rootly-mcp-server 2.1.0__py3-none-any.whl → 2.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,6 +5,7 @@ This module implements a server that dynamically generates MCP tools based on
5
5
  the Rootly API's OpenAPI (Swagger) specification using FastMCP's OpenAPI integration.
6
6
  """
7
7
 
8
+ import asyncio
8
9
  import json
9
10
  import logging
10
11
  import os
@@ -163,131 +164,9 @@ def strip_heavy_nested_data(data: dict[str, Any]) -> dict[str, Any]:
163
164
  # Replace with just count
164
165
  rels[rel_key] = {"count": len(rels[rel_key]["data"])}
165
166
 
166
- # Process "included" section (common in shifts/alerts with user data)
167
- if "included" in data and isinstance(data["included"], list):
168
- for item in data["included"]:
169
- if item.get("type") == "users":
170
- # Keep only essential user fields
171
- if "attributes" in item:
172
- attrs = item["attributes"]
173
- keep_fields = {"name", "email", "phone", "time_zone", "full_name"}
174
- item["attributes"] = {k: v for k, v in attrs.items() if k in keep_fields}
175
- # Strip heavy relationships
176
- if "relationships" in item:
177
- for rel_key in [
178
- "schedules",
179
- "notification_rules",
180
- "teams",
181
- "devices",
182
- "email_addresses",
183
- "phone_numbers",
184
- ]:
185
- if rel_key in item["relationships"]:
186
- rel_data = item["relationships"][rel_key]
187
- if isinstance(rel_data, dict) and "data" in rel_data:
188
- data_list = rel_data.get("data", [])
189
- if isinstance(data_list, list):
190
- item["relationships"][rel_key] = {"count": len(data_list)}
191
-
192
- # Process alerts in data list
193
- if "data" in data and isinstance(data["data"], list):
194
- for item in data["data"]:
195
- if item.get("type") == "alerts":
196
- # Strip heavy attributes from alerts
197
- if "attributes" in item:
198
- attrs = item["attributes"]
199
- # Remove heavy fields - raw data, embedded objects, integration fields
200
- heavy_fields = [
201
- "data", # Raw alert payload from source - very large
202
- "labels",
203
- "external_url",
204
- "pagerduty_incident_id",
205
- "pagerduty_incident_url",
206
- "opsgenie_alert_id",
207
- "opsgenie_alert_url",
208
- "deduplication_key",
209
- ]
210
- for field in heavy_fields:
211
- attrs.pop(field, None)
212
-
213
- # Simplify embedded objects to just IDs/counts
214
- # groups - keep only group_ids
215
- if "groups" in attrs:
216
- attrs.pop("groups", None)
217
- # environments - keep only environment_ids
218
- if "environments" in attrs:
219
- attrs.pop("environments", None)
220
- # services - keep only service_ids
221
- if "services" in attrs:
222
- attrs.pop("services", None)
223
- # incidents - embedded incident objects
224
- if "incidents" in attrs:
225
- attrs.pop("incidents", None)
226
- # responders - embedded responder objects
227
- if "responders" in attrs:
228
- attrs.pop("responders", None)
229
- # notified_users - embedded user objects
230
- if "notified_users" in attrs:
231
- attrs.pop("notified_users", None)
232
- # alerting_targets - embedded target objects
233
- if "alerting_targets" in attrs:
234
- attrs.pop("alerting_targets", None)
235
- # alert_urgency - keep only alert_urgency_id
236
- if "alert_urgency" in attrs:
237
- attrs.pop("alert_urgency", None)
238
- # alert_field_values - embedded custom field values
239
- if "alert_field_values" in attrs:
240
- attrs.pop("alert_field_values", None)
241
-
242
- # Strip heavy relationships
243
- if "relationships" in item:
244
- rels = item["relationships"]
245
- for rel_key in ["events", "subscribers", "alerts"]:
246
- if (
247
- rel_key in rels
248
- and isinstance(rels[rel_key], dict)
249
- and "data" in rels[rel_key]
250
- ):
251
- data_list = rels[rel_key].get("data", [])
252
- if isinstance(data_list, list):
253
- rels[rel_key] = {"count": len(data_list)}
254
-
255
167
  return data
256
168
 
257
169
 
258
- class ProcessedResponse:
259
- """Wrapper around httpx.Response that processes JSON to reduce payload size."""
260
-
261
- def __init__(self, response: httpx.Response):
262
- self._response = response
263
- self._processed_json = None
264
-
265
- def json(self, **kwargs):
266
- """Parse JSON and strip heavy nested data."""
267
- if self._processed_json is None:
268
- raw_data = self._response.json(**kwargs)
269
- self._processed_json = strip_heavy_nested_data(raw_data)
270
- return self._processed_json
271
-
272
- def __getattr__(self, name):
273
- """Delegate all other attributes to the wrapped response."""
274
- return getattr(self._response, name)
275
-
276
-
277
- class ResponseProcessingClient(httpx.AsyncClient):
278
- """AsyncClient subclass that wraps responses to reduce payload size.
279
-
280
- This is necessary because FastMCP.from_openapi() uses the client directly,
281
- bypassing any wrapper class. By subclassing httpx.AsyncClient, we ensure
282
- all responses go through our processing.
283
- """
284
-
285
- async def request(self, method, url, **kwargs):
286
- """Override request to wrap response with ProcessedResponse."""
287
- response = await super().request(method, url, **kwargs)
288
- return ProcessedResponse(response)
289
-
290
-
291
170
  class MCPError:
292
171
  """Enhanced error handling for MCP protocol compliance."""
293
172
 
@@ -468,7 +347,7 @@ class AuthenticatedHTTPXClient:
468
347
  if self._api_token:
469
348
  headers["Authorization"] = f"Bearer {self._api_token}"
470
349
 
471
- self.client = ResponseProcessingClient(
350
+ self.client = httpx.AsyncClient(
472
351
  base_url=base_url,
473
352
  headers=headers,
474
353
  timeout=30.0,
@@ -500,16 +379,13 @@ class AuthenticatedHTTPXClient:
500
379
  return transformed
501
380
 
502
381
  async def request(self, method: str, url: str, **kwargs):
503
- """Override request to transform parameters and wrap response for payload reduction."""
382
+ """Override request to transform parameters."""
504
383
  # Transform query parameters
505
384
  if "params" in kwargs:
506
385
  kwargs["params"] = self._transform_params(kwargs["params"])
507
386
 
508
- # Call the underlying client's request method
509
- response = await self.client.request(method, url, **kwargs)
510
-
511
- # Wrap response to process JSON and reduce payload size
512
- return ProcessedResponse(response)
387
+ # Call the underlying client's request method and let it handle everything
388
+ return await self.client.request(method, url, **kwargs)
513
389
 
514
390
  async def get(self, url: str, **kwargs):
515
391
  """Proxy to request with GET method."""
@@ -612,9 +488,12 @@ def create_rootly_mcp_server(
612
488
 
613
489
  # Create the MCP server using OpenAPI integration
614
490
  # By default, all routes become tools which is what we want
491
+ # NOTE: We pass http_client (the wrapper) instead of http_client.client (the inner httpx client)
492
+ # so that parameter transformation (e.g., filter_status -> filter[status]) is applied.
493
+ # The wrapper implements the same interface as httpx.AsyncClient (duck typing).
615
494
  mcp = FastMCP.from_openapi(
616
495
  openapi_spec=filtered_spec,
617
- client=http_client.client,
496
+ client=http_client, # type: ignore[arg-type]
618
497
  name=name,
619
498
  timeout=30.0,
620
499
  tags={"rootly", "incident-management"},
@@ -2141,6 +2020,1029 @@ def create_rootly_mcp_server(
2141
2020
  start_time, end_time, schedule_ids, severity, status, tags
2142
2021
  )
2143
2022
 
2023
+ # Cache for lookup maps (TTL: 5 minutes)
2024
+ _lookup_maps_cache: dict[str, Any] = {
2025
+ "data": None,
2026
+ "timestamp": 0.0,
2027
+ "ttl_seconds": 300, # 5 minutes
2028
+ }
2029
+ _lookup_maps_lock = asyncio.Lock()
2030
+
2031
+ # Helper function to fetch users and schedules for enrichment
2032
+ async def _fetch_users_and_schedules_maps() -> tuple[dict, dict, dict]:
2033
+ """Fetch all users, schedules, and teams to build lookup maps.
2034
+
2035
+ Results are cached for 5 minutes to avoid repeated API calls.
2036
+ """
2037
+ import time
2038
+
2039
+ # Check cache (fast path without lock)
2040
+ now = time.time()
2041
+ if (
2042
+ _lookup_maps_cache["data"] is not None
2043
+ and (now - _lookup_maps_cache["timestamp"]) < _lookup_maps_cache["ttl_seconds"]
2044
+ ):
2045
+ return _lookup_maps_cache["data"]
2046
+
2047
+ # Acquire lock to prevent concurrent fetches
2048
+ async with _lookup_maps_lock:
2049
+ # Re-check cache after acquiring lock
2050
+ now = time.time()
2051
+ if (
2052
+ _lookup_maps_cache["data"] is not None
2053
+ and (now - _lookup_maps_cache["timestamp"]) < _lookup_maps_cache["ttl_seconds"]
2054
+ ):
2055
+ return _lookup_maps_cache["data"]
2056
+
2057
+ users_map = {}
2058
+ schedules_map = {}
2059
+ teams_map = {}
2060
+
2061
+ # Fetch all users with pagination
2062
+ page = 1
2063
+ while page <= 10:
2064
+ users_response = await make_authenticated_request(
2065
+ "GET", "/v1/users", params={"page[size]": 100, "page[number]": page}
2066
+ )
2067
+ if users_response and users_response.status_code == 200:
2068
+ users_data = users_response.json()
2069
+ for user in users_data.get("data", []):
2070
+ users_map[user.get("id")] = user
2071
+ if len(users_data.get("data", [])) < 100:
2072
+ break
2073
+ page += 1
2074
+ else:
2075
+ break
2076
+
2077
+ # Fetch all schedules with pagination
2078
+ page = 1
2079
+ while page <= 10:
2080
+ schedules_response = await make_authenticated_request(
2081
+ "GET", "/v1/schedules", params={"page[size]": 100, "page[number]": page}
2082
+ )
2083
+ if schedules_response and schedules_response.status_code == 200:
2084
+ schedules_data = schedules_response.json()
2085
+ for schedule in schedules_data.get("data", []):
2086
+ schedules_map[schedule.get("id")] = schedule
2087
+ if len(schedules_data.get("data", [])) < 100:
2088
+ break
2089
+ page += 1
2090
+ else:
2091
+ break
2092
+
2093
+ # Fetch all teams with pagination
2094
+ page = 1
2095
+ while page <= 10:
2096
+ teams_response = await make_authenticated_request(
2097
+ "GET", "/v1/teams", params={"page[size]": 100, "page[number]": page}
2098
+ )
2099
+ if teams_response and teams_response.status_code == 200:
2100
+ teams_data = teams_response.json()
2101
+ for team in teams_data.get("data", []):
2102
+ teams_map[team.get("id")] = team
2103
+ if len(teams_data.get("data", [])) < 100:
2104
+ break
2105
+ page += 1
2106
+ else:
2107
+ break
2108
+
2109
+ # Cache the result
2110
+ result = (users_map, schedules_map, teams_map)
2111
+ _lookup_maps_cache["data"] = result
2112
+ _lookup_maps_cache["timestamp"] = now
2113
+
2114
+ return result
2115
+
2116
+ @mcp.tool()
2117
+ async def list_shifts(
2118
+ from_date: Annotated[
2119
+ str,
2120
+ Field(
2121
+ description="Start date/time for shift query (ISO 8601 format, e.g., '2026-02-09T00:00:00Z')"
2122
+ ),
2123
+ ],
2124
+ to_date: Annotated[
2125
+ str,
2126
+ Field(
2127
+ description="End date/time for shift query (ISO 8601 format, e.g., '2026-02-15T23:59:59Z')"
2128
+ ),
2129
+ ],
2130
+ user_ids: Annotated[
2131
+ str,
2132
+ Field(
2133
+ description="Comma-separated list of user IDs to filter by (e.g., '2381,94178'). Only returns shifts for these users."
2134
+ ),
2135
+ ] = "",
2136
+ schedule_ids: Annotated[
2137
+ str,
2138
+ Field(description="Comma-separated list of schedule IDs to filter by (optional)"),
2139
+ ] = "",
2140
+ include_user_details: Annotated[
2141
+ bool,
2142
+ Field(description="Include user name and email in response (default: True)"),
2143
+ ] = True,
2144
+ ) -> dict:
2145
+ """
2146
+ List on-call shifts with proper user filtering and enriched data.
2147
+
2148
+ Unlike the raw API, this tool:
2149
+ - Actually filters by user_ids (client-side filtering)
2150
+ - Includes user_name, user_email, schedule_name, team_name
2151
+ - Calculates total_hours for each shift
2152
+
2153
+ Use this instead of the auto-generated listShifts when you need user filtering.
2154
+ """
2155
+ try:
2156
+ from datetime import datetime
2157
+
2158
+ # Build query parameters
2159
+ params: dict[str, Any] = {
2160
+ "from": from_date,
2161
+ "to": to_date,
2162
+ "include": "user,on_call_role,schedule_rotation",
2163
+ "page[size]": 100,
2164
+ }
2165
+
2166
+ if schedule_ids:
2167
+ schedule_id_list = [sid.strip() for sid in schedule_ids.split(",") if sid.strip()]
2168
+ params["schedule_ids[]"] = schedule_id_list
2169
+
2170
+ # Parse user_ids for filtering
2171
+ user_id_filter = set()
2172
+ if user_ids:
2173
+ user_id_filter = {uid.strip() for uid in user_ids.split(",") if uid.strip()}
2174
+
2175
+ # Fetch lookup maps for enrichment
2176
+ users_map, schedules_map, teams_map = await _fetch_users_and_schedules_maps()
2177
+
2178
+ # Build schedule -> team mapping
2179
+ schedule_to_team = {}
2180
+ for schedule_id, schedule in schedules_map.items():
2181
+ owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
2182
+ if owner_group_ids:
2183
+ team_id = owner_group_ids[0]
2184
+ team = teams_map.get(team_id, {})
2185
+ schedule_to_team[schedule_id] = {
2186
+ "team_id": team_id,
2187
+ "team_name": team.get("attributes", {}).get("name", "Unknown Team"),
2188
+ }
2189
+
2190
+ # Fetch all shifts with pagination
2191
+ all_shifts = []
2192
+ page = 1
2193
+ while page <= 10:
2194
+ params["page[number]"] = page
2195
+ shifts_response = await make_authenticated_request(
2196
+ "GET", "/v1/shifts", params=params
2197
+ )
2198
+
2199
+ if shifts_response is None:
2200
+ break
2201
+
2202
+ shifts_response.raise_for_status()
2203
+ shifts_data = shifts_response.json()
2204
+
2205
+ shifts = shifts_data.get("data", [])
2206
+ included = shifts_data.get("included", [])
2207
+
2208
+ # Update users_map from included data
2209
+ for resource in included:
2210
+ if resource.get("type") == "users":
2211
+ users_map[resource.get("id")] = resource
2212
+
2213
+ if not shifts:
2214
+ break
2215
+
2216
+ all_shifts.extend(shifts)
2217
+
2218
+ meta = shifts_data.get("meta", {})
2219
+ total_pages = meta.get("total_pages", 1)
2220
+ if page >= total_pages:
2221
+ break
2222
+ page += 1
2223
+
2224
+ # Process and filter shifts
2225
+ enriched_shifts = []
2226
+ for shift in all_shifts:
2227
+ attrs = shift.get("attributes", {})
2228
+ relationships = shift.get("relationships", {})
2229
+
2230
+ # Get user info
2231
+ user_rel = relationships.get("user", {}).get("data") or {}
2232
+ user_id = user_rel.get("id")
2233
+
2234
+ # Skip shifts without a user
2235
+ if not user_id:
2236
+ continue
2237
+
2238
+ # Apply user_ids filter
2239
+ if user_id_filter and str(user_id) not in user_id_filter:
2240
+ continue
2241
+
2242
+ user_info = users_map.get(user_id, {})
2243
+ user_attrs = user_info.get("attributes", {})
2244
+ user_name = user_attrs.get("full_name") or user_attrs.get("name") or "Unknown"
2245
+ user_email = user_attrs.get("email", "")
2246
+
2247
+ # Get schedule info
2248
+ schedule_id = attrs.get("schedule_id")
2249
+ schedule_info = schedules_map.get(schedule_id, {})
2250
+ schedule_name = schedule_info.get("attributes", {}).get("name", "Unknown Schedule")
2251
+
2252
+ # Get team info
2253
+ team_info = schedule_to_team.get(schedule_id, {})
2254
+ team_name = team_info.get("team_name", "Unknown Team")
2255
+
2256
+ # Calculate total hours
2257
+ starts_at = attrs.get("starts_at")
2258
+ ends_at = attrs.get("ends_at")
2259
+ total_hours = 0.0
2260
+ if starts_at and ends_at:
2261
+ try:
2262
+ start_dt = datetime.fromisoformat(starts_at.replace("Z", "+00:00"))
2263
+ end_dt = datetime.fromisoformat(ends_at.replace("Z", "+00:00"))
2264
+ total_hours = round((end_dt - start_dt).total_seconds() / 3600, 2)
2265
+ except (ValueError, AttributeError):
2266
+ pass
2267
+
2268
+ enriched_shift = {
2269
+ "shift_id": shift.get("id"),
2270
+ "user_id": user_id,
2271
+ "schedule_id": schedule_id,
2272
+ "starts_at": starts_at,
2273
+ "ends_at": ends_at,
2274
+ "is_override": attrs.get("is_override", False),
2275
+ "total_hours": total_hours,
2276
+ }
2277
+
2278
+ if include_user_details:
2279
+ enriched_shift["user_name"] = user_name
2280
+ enriched_shift["user_email"] = user_email
2281
+ enriched_shift["schedule_name"] = schedule_name
2282
+ enriched_shift["team_name"] = team_name
2283
+
2284
+ enriched_shifts.append(enriched_shift)
2285
+
2286
+ return {
2287
+ "period": {"from": from_date, "to": to_date},
2288
+ "total_shifts": len(enriched_shifts),
2289
+ "filters_applied": {
2290
+ "user_ids": list(user_id_filter) if user_id_filter else None,
2291
+ "schedule_ids": schedule_ids if schedule_ids else None,
2292
+ },
2293
+ "shifts": enriched_shifts,
2294
+ }
2295
+
2296
+ except Exception as e:
2297
+ import traceback
2298
+
2299
+ error_type, error_message = MCPError.categorize_error(e)
2300
+ return MCPError.tool_error(
2301
+ f"Failed to list shifts: {error_message}",
2302
+ error_type,
2303
+ details={
2304
+ "params": {"from": from_date, "to": to_date},
2305
+ "exception_type": type(e).__name__,
2306
+ "traceback": traceback.format_exc(),
2307
+ },
2308
+ )
2309
+
2310
+ @mcp.tool()
2311
+ async def get_oncall_schedule_summary(
2312
+ start_date: Annotated[
2313
+ str,
2314
+ Field(description="Start date (ISO 8601, e.g., '2026-02-09')"),
2315
+ ],
2316
+ end_date: Annotated[
2317
+ str,
2318
+ Field(description="End date (ISO 8601, e.g., '2026-02-15')"),
2319
+ ],
2320
+ schedule_ids: Annotated[
2321
+ str,
2322
+ Field(description="Comma-separated schedule IDs to filter (optional)"),
2323
+ ] = "",
2324
+ team_ids: Annotated[
2325
+ str,
2326
+ Field(description="Comma-separated team IDs to filter (optional)"),
2327
+ ] = "",
2328
+ include_user_ids: Annotated[
2329
+ bool,
2330
+ Field(description="Include numeric user IDs for cross-platform correlation"),
2331
+ ] = True,
2332
+ ) -> dict:
2333
+ """
2334
+ Get compact on-call schedule summary for a date range.
2335
+
2336
+ Returns one entry per user per schedule (not raw shifts), with
2337
+ aggregated hours. Optimized for AI agent context windows.
2338
+
2339
+ Use this instead of listShifts when you need:
2340
+ - Aggregated hours per responder
2341
+ - Schedule coverage overview
2342
+ - Responder load analysis with warnings
2343
+ """
2344
+ try:
2345
+ from collections import defaultdict
2346
+ from datetime import datetime
2347
+
2348
+ # Parse filter IDs
2349
+ schedule_id_filter = set()
2350
+ if schedule_ids:
2351
+ schedule_id_filter = {sid.strip() for sid in schedule_ids.split(",") if sid.strip()}
2352
+
2353
+ team_id_filter = set()
2354
+ if team_ids:
2355
+ team_id_filter = {tid.strip() for tid in team_ids.split(",") if tid.strip()}
2356
+
2357
+ # Fetch lookup maps
2358
+ users_map, schedules_map, teams_map = await _fetch_users_and_schedules_maps()
2359
+
2360
+ # Build schedule -> team mapping and apply team filter
2361
+ schedule_to_team = {}
2362
+ filtered_schedule_ids = set()
2363
+ for schedule_id, schedule in schedules_map.items():
2364
+ owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
2365
+ team_id = owner_group_ids[0] if owner_group_ids else None
2366
+ team = teams_map.get(team_id, {}) if team_id else {}
2367
+ team_name = team.get("attributes", {}).get("name", "Unknown Team")
2368
+
2369
+ schedule_to_team[schedule_id] = {
2370
+ "team_id": team_id,
2371
+ "team_name": team_name,
2372
+ "schedule_name": schedule.get("attributes", {}).get("name", "Unknown Schedule"),
2373
+ }
2374
+
2375
+ # Apply filters
2376
+ if schedule_id_filter and schedule_id not in schedule_id_filter:
2377
+ continue
2378
+ if team_id_filter and (not team_id or team_id not in team_id_filter):
2379
+ continue
2380
+ filtered_schedule_ids.add(schedule_id)
2381
+
2382
+ # If no filters, include all schedules
2383
+ if not schedule_id_filter and not team_id_filter:
2384
+ filtered_schedule_ids = set(schedules_map.keys())
2385
+
2386
+ # Fetch shifts
2387
+ params: dict[str, Any] = {
2388
+ "from": f"{start_date}T00:00:00Z" if "T" not in start_date else start_date,
2389
+ "to": f"{end_date}T23:59:59Z" if "T" not in end_date else end_date,
2390
+ "include": "user,on_call_role",
2391
+ "page[size]": 100,
2392
+ }
2393
+
2394
+ all_shifts = []
2395
+ page = 1
2396
+ while page <= 10:
2397
+ params["page[number]"] = page
2398
+ shifts_response = await make_authenticated_request(
2399
+ "GET", "/v1/shifts", params=params
2400
+ )
2401
+
2402
+ if shifts_response is None:
2403
+ break
2404
+
2405
+ shifts_response.raise_for_status()
2406
+ shifts_data = shifts_response.json()
2407
+
2408
+ shifts = shifts_data.get("data", [])
2409
+ included = shifts_data.get("included", [])
2410
+
2411
+ # Update users_map from included data
2412
+ for resource in included:
2413
+ if resource.get("type") == "users":
2414
+ users_map[resource.get("id")] = resource
2415
+
2416
+ if not shifts:
2417
+ break
2418
+
2419
+ all_shifts.extend(shifts)
2420
+
2421
+ meta = shifts_data.get("meta", {})
2422
+ total_pages = meta.get("total_pages", 1)
2423
+ if page >= total_pages:
2424
+ break
2425
+ page += 1
2426
+
2427
+ # Aggregate by schedule and user
2428
+ schedule_coverage: dict[str, dict] = defaultdict(
2429
+ lambda: {
2430
+ "schedule_name": "",
2431
+ "team_name": "",
2432
+ "responders": defaultdict(
2433
+ lambda: {
2434
+ "user_name": "",
2435
+ "user_id": None,
2436
+ "total_hours": 0.0,
2437
+ "shift_count": 0,
2438
+ "is_override": False,
2439
+ }
2440
+ ),
2441
+ }
2442
+ )
2443
+
2444
+ responder_load: dict[str, dict] = defaultdict(
2445
+ lambda: {
2446
+ "user_name": "",
2447
+ "user_id": None,
2448
+ "total_hours": 0.0,
2449
+ "schedules": set(),
2450
+ }
2451
+ )
2452
+
2453
+ for shift in all_shifts:
2454
+ attrs = shift.get("attributes", {})
2455
+ schedule_id = attrs.get("schedule_id")
2456
+
2457
+ # Apply schedule filter
2458
+ if filtered_schedule_ids and schedule_id not in filtered_schedule_ids:
2459
+ continue
2460
+
2461
+ # Get user info
2462
+ relationships = shift.get("relationships", {})
2463
+ user_rel = relationships.get("user", {}).get("data") or {}
2464
+ user_id = user_rel.get("id")
2465
+
2466
+ # Skip shifts without a user
2467
+ if not user_id:
2468
+ continue
2469
+
2470
+ user_info = users_map.get(user_id, {})
2471
+ user_attrs = user_info.get("attributes", {})
2472
+ user_name = user_attrs.get("full_name") or user_attrs.get("name") or "Unknown"
2473
+
2474
+ # Get schedule/team info
2475
+ sched_info = schedule_to_team.get(schedule_id, {})
2476
+ schedule_name = sched_info.get("schedule_name", "Unknown Schedule")
2477
+ team_name = sched_info.get("team_name", "Unknown Team")
2478
+
2479
+ # Calculate hours
2480
+ starts_at = attrs.get("starts_at")
2481
+ ends_at = attrs.get("ends_at")
2482
+ hours = 0.0
2483
+ if starts_at and ends_at:
2484
+ try:
2485
+ start_dt = datetime.fromisoformat(starts_at.replace("Z", "+00:00"))
2486
+ end_dt = datetime.fromisoformat(ends_at.replace("Z", "+00:00"))
2487
+ hours = (end_dt - start_dt).total_seconds() / 3600
2488
+ except (ValueError, AttributeError):
2489
+ pass
2490
+
2491
+ is_override = attrs.get("is_override", False)
2492
+
2493
+ # Update schedule coverage
2494
+ sched_data = schedule_coverage[schedule_id]
2495
+ sched_data["schedule_name"] = schedule_name
2496
+ sched_data["team_name"] = team_name
2497
+
2498
+ user_key = str(user_id)
2499
+ sched_data["responders"][user_key]["user_name"] = user_name
2500
+ sched_data["responders"][user_key]["user_id"] = user_id
2501
+ sched_data["responders"][user_key]["total_hours"] += hours
2502
+ sched_data["responders"][user_key]["shift_count"] += 1
2503
+ if is_override:
2504
+ sched_data["responders"][user_key]["is_override"] = True
2505
+
2506
+ # Update responder load
2507
+ responder_load[user_key]["user_name"] = user_name
2508
+ responder_load[user_key]["user_id"] = user_id
2509
+ responder_load[user_key]["total_hours"] += hours
2510
+ responder_load[user_key]["schedules"].add(schedule_name)
2511
+
2512
+ # Format schedule coverage
2513
+ formatted_coverage = []
2514
+ for _schedule_id, sched_data in schedule_coverage.items():
2515
+ responders_list = []
2516
+ for _user_key, resp_data in sched_data["responders"].items():
2517
+ responder = {
2518
+ "user_name": resp_data["user_name"],
2519
+ "total_hours": round(resp_data["total_hours"], 1),
2520
+ "shift_count": resp_data["shift_count"],
2521
+ "is_override": resp_data["is_override"],
2522
+ }
2523
+ if include_user_ids:
2524
+ responder["user_id"] = resp_data["user_id"]
2525
+ responders_list.append(responder)
2526
+
2527
+ # Sort by hours descending
2528
+ responders_list.sort(key=lambda x: x["total_hours"], reverse=True)
2529
+
2530
+ formatted_coverage.append(
2531
+ {
2532
+ "schedule_name": sched_data["schedule_name"],
2533
+ "team_name": sched_data["team_name"],
2534
+ "responders": responders_list,
2535
+ }
2536
+ )
2537
+
2538
+ # Format responder load with warnings
2539
+ formatted_load = []
2540
+ for _user_key, load_data in responder_load.items():
2541
+ schedules_list = list(load_data["schedules"])
2542
+ hours = round(load_data["total_hours"], 1)
2543
+
2544
+ responder_entry = {
2545
+ "user_name": load_data["user_name"],
2546
+ "total_hours": hours,
2547
+ "schedules": schedules_list,
2548
+ }
2549
+ if include_user_ids:
2550
+ responder_entry["user_id"] = load_data["user_id"]
2551
+
2552
+ # Add warnings for high load
2553
+ if len(schedules_list) >= 4:
2554
+ responder_entry["warning"] = (
2555
+ f"High load: {len(schedules_list)} concurrent schedules"
2556
+ )
2557
+ elif hours >= 168: # 7 days * 24 hours
2558
+ responder_entry["warning"] = f"High load: {hours} hours in period"
2559
+
2560
+ formatted_load.append(responder_entry)
2561
+
2562
+ # Sort by hours descending
2563
+ formatted_load.sort(key=lambda x: x["total_hours"], reverse=True)
2564
+ formatted_coverage.sort(key=lambda x: x["schedule_name"])
2565
+
2566
+ return {
2567
+ "period": {"start": start_date, "end": end_date},
2568
+ "total_schedules": len(formatted_coverage),
2569
+ "total_responders": len(formatted_load),
2570
+ "schedule_coverage": formatted_coverage,
2571
+ "responder_load": formatted_load,
2572
+ }
2573
+
2574
+ except Exception as e:
2575
+ import traceback
2576
+
2577
+ error_type, error_message = MCPError.categorize_error(e)
2578
+ return MCPError.tool_error(
2579
+ f"Failed to get on-call schedule summary: {error_message}",
2580
+ error_type,
2581
+ details={
2582
+ "params": {"start_date": start_date, "end_date": end_date},
2583
+ "exception_type": type(e).__name__,
2584
+ "traceback": traceback.format_exc(),
2585
+ },
2586
+ )
2587
+
2588
+ @mcp.tool()
2589
+ async def check_responder_availability(
2590
+ start_date: Annotated[
2591
+ str,
2592
+ Field(description="Start date (ISO 8601, e.g., '2026-02-09')"),
2593
+ ],
2594
+ end_date: Annotated[
2595
+ str,
2596
+ Field(description="End date (ISO 8601, e.g., '2026-02-15')"),
2597
+ ],
2598
+ user_ids: Annotated[
2599
+ str,
2600
+ Field(
2601
+ description="Comma-separated Rootly user IDs to check (e.g., '2381,94178,27965')"
2602
+ ),
2603
+ ],
2604
+ ) -> dict:
2605
+ """
2606
+ Check if specific users are scheduled for on-call in a date range.
2607
+
2608
+ Use this to verify if at-risk users (from On-Call Health) are scheduled,
2609
+ or to check availability before assigning new shifts.
2610
+
2611
+ Returns scheduled users with their shifts and total hours,
2612
+ plus users who are not scheduled.
2613
+ """
2614
+ try:
2615
+ from datetime import datetime
2616
+
2617
+ if not user_ids:
2618
+ return MCPError.tool_error(
2619
+ "user_ids parameter is required",
2620
+ "validation_error",
2621
+ )
2622
+
2623
+ # Parse user IDs
2624
+ user_id_list = [uid.strip() for uid in user_ids.split(",") if uid.strip()]
2625
+ user_id_set = set(user_id_list)
2626
+
2627
+ # Fetch lookup maps
2628
+ users_map, schedules_map, teams_map = await _fetch_users_and_schedules_maps()
2629
+
2630
+ # Build schedule -> team mapping
2631
+ schedule_to_team = {}
2632
+ for schedule_id, schedule in schedules_map.items():
2633
+ owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
2634
+ if owner_group_ids:
2635
+ team_id = owner_group_ids[0]
2636
+ team = teams_map.get(team_id, {})
2637
+ schedule_to_team[schedule_id] = {
2638
+ "schedule_name": schedule.get("attributes", {}).get("name", "Unknown"),
2639
+ "team_name": team.get("attributes", {}).get("name", "Unknown Team"),
2640
+ }
2641
+
2642
+ # Fetch shifts
2643
+ params: dict[str, Any] = {
2644
+ "from": f"{start_date}T00:00:00Z" if "T" not in start_date else start_date,
2645
+ "to": f"{end_date}T23:59:59Z" if "T" not in end_date else end_date,
2646
+ "include": "user,on_call_role",
2647
+ "page[size]": 100,
2648
+ }
2649
+
2650
+ all_shifts = []
2651
+ page = 1
2652
+ while page <= 10:
2653
+ params["page[number]"] = page
2654
+ shifts_response = await make_authenticated_request(
2655
+ "GET", "/v1/shifts", params=params
2656
+ )
2657
+
2658
+ if shifts_response is None:
2659
+ break
2660
+
2661
+ shifts_response.raise_for_status()
2662
+ shifts_data = shifts_response.json()
2663
+
2664
+ shifts = shifts_data.get("data", [])
2665
+ included = shifts_data.get("included", [])
2666
+
2667
+ # Update users_map from included data
2668
+ for resource in included:
2669
+ if resource.get("type") == "users":
2670
+ users_map[resource.get("id")] = resource
2671
+
2672
+ if not shifts:
2673
+ break
2674
+
2675
+ all_shifts.extend(shifts)
2676
+
2677
+ meta = shifts_data.get("meta", {})
2678
+ total_pages = meta.get("total_pages", 1)
2679
+ if page >= total_pages:
2680
+ break
2681
+ page += 1
2682
+
2683
+ # Group shifts by user
2684
+ user_shifts: dict[str, list] = {uid: [] for uid in user_id_list}
2685
+ user_hours: dict[str, float] = dict.fromkeys(user_id_list, 0.0)
2686
+
2687
+ for shift in all_shifts:
2688
+ attrs = shift.get("attributes", {})
2689
+ relationships = shift.get("relationships", {})
2690
+
2691
+ user_rel = relationships.get("user", {}).get("data") or {}
2692
+ raw_user_id = user_rel.get("id")
2693
+
2694
+ # Skip shifts without a user
2695
+ if not raw_user_id:
2696
+ continue
2697
+
2698
+ user_id = str(raw_user_id)
2699
+
2700
+ if user_id not in user_id_set:
2701
+ continue
2702
+
2703
+ schedule_id = attrs.get("schedule_id")
2704
+ sched_info = schedule_to_team.get(schedule_id, {})
2705
+
2706
+ starts_at = attrs.get("starts_at")
2707
+ ends_at = attrs.get("ends_at")
2708
+ hours = 0.0
2709
+ if starts_at and ends_at:
2710
+ try:
2711
+ start_dt = datetime.fromisoformat(starts_at.replace("Z", "+00:00"))
2712
+ end_dt = datetime.fromisoformat(ends_at.replace("Z", "+00:00"))
2713
+ hours = round((end_dt - start_dt).total_seconds() / 3600, 1)
2714
+ except (ValueError, AttributeError):
2715
+ pass
2716
+
2717
+ user_shifts[user_id].append(
2718
+ {
2719
+ "schedule_name": sched_info.get("schedule_name", "Unknown"),
2720
+ "starts_at": starts_at,
2721
+ "ends_at": ends_at,
2722
+ "hours": hours,
2723
+ }
2724
+ )
2725
+ user_hours[user_id] += hours
2726
+
2727
+ # Format results
2728
+ scheduled = []
2729
+ not_scheduled = []
2730
+
2731
+ for user_id in user_id_list:
2732
+ user_info = users_map.get(user_id, {})
2733
+ user_attrs = user_info.get("attributes", {})
2734
+ user_name = user_attrs.get("full_name") or user_attrs.get("name") or "Unknown"
2735
+
2736
+ shifts = user_shifts.get(user_id, [])
2737
+ if shifts:
2738
+ scheduled.append(
2739
+ {
2740
+ "user_id": int(user_id) if user_id.isdigit() else user_id,
2741
+ "user_name": user_name,
2742
+ "total_hours": round(user_hours[user_id], 1),
2743
+ "shifts": shifts,
2744
+ }
2745
+ )
2746
+ else:
2747
+ not_scheduled.append(
2748
+ {
2749
+ "user_id": int(user_id) if user_id.isdigit() else user_id,
2750
+ "user_name": user_name,
2751
+ }
2752
+ )
2753
+
2754
+ # Sort scheduled by hours descending
2755
+ scheduled.sort(key=lambda x: x["total_hours"], reverse=True)
2756
+
2757
+ return {
2758
+ "period": {"start": start_date, "end": end_date},
2759
+ "checked_users": len(user_id_list),
2760
+ "scheduled": scheduled,
2761
+ "not_scheduled": not_scheduled,
2762
+ }
2763
+
2764
+ except Exception as e:
2765
+ import traceback
2766
+
2767
+ error_type, error_message = MCPError.categorize_error(e)
2768
+ return MCPError.tool_error(
2769
+ f"Failed to check responder availability: {error_message}",
2770
+ error_type,
2771
+ details={
2772
+ "params": {
2773
+ "start_date": start_date,
2774
+ "end_date": end_date,
2775
+ "user_ids": user_ids,
2776
+ },
2777
+ "exception_type": type(e).__name__,
2778
+ "traceback": traceback.format_exc(),
2779
+ },
2780
+ )
2781
+
2782
+ @mcp.tool()
2783
+ async def create_override_recommendation(
2784
+ schedule_id: Annotated[
2785
+ str,
2786
+ Field(description="Schedule ID to create override for"),
2787
+ ],
2788
+ original_user_id: Annotated[
2789
+ int,
2790
+ Field(description="User ID being replaced"),
2791
+ ],
2792
+ start_date: Annotated[
2793
+ str,
2794
+ Field(description="Override start (ISO 8601, e.g., '2026-02-09')"),
2795
+ ],
2796
+ end_date: Annotated[
2797
+ str,
2798
+ Field(description="Override end (ISO 8601, e.g., '2026-02-15')"),
2799
+ ],
2800
+ exclude_user_ids: Annotated[
2801
+ str,
2802
+ Field(description="Comma-separated user IDs to exclude (e.g., other at-risk users)"),
2803
+ ] = "",
2804
+ ) -> dict:
2805
+ """
2806
+ Recommend replacement responders for an override shift.
2807
+
2808
+ Finds users in the same schedule rotation who are not already
2809
+ heavily loaded during the period.
2810
+
2811
+ Returns recommended replacements sorted by current load (lowest first),
2812
+ plus a ready-to-use override payload for the top recommendation.
2813
+ """
2814
+ try:
2815
+ from datetime import datetime
2816
+
2817
+ # Parse exclusions
2818
+ exclude_set = set()
2819
+ if exclude_user_ids:
2820
+ exclude_set = {uid.strip() for uid in exclude_user_ids.split(",") if uid.strip()}
2821
+ exclude_set.add(str(original_user_id))
2822
+
2823
+ # Fetch lookup maps
2824
+ users_map, schedules_map, teams_map = await _fetch_users_and_schedules_maps()
2825
+
2826
+ # Get schedule info
2827
+ schedule = schedules_map.get(schedule_id, {})
2828
+ schedule_name = schedule.get("attributes", {}).get("name", "Unknown Schedule")
2829
+
2830
+ # Get original user info
2831
+ original_user = users_map.get(str(original_user_id), {})
2832
+ original_user_attrs = original_user.get("attributes", {})
2833
+ original_user_name = (
2834
+ original_user_attrs.get("full_name") or original_user_attrs.get("name") or "Unknown"
2835
+ )
2836
+
2837
+ # Fetch schedule rotations to find rotation users
2838
+ rotation_users = set()
2839
+
2840
+ # First, get the schedule to find its rotations
2841
+ schedule_response = await make_authenticated_request(
2842
+ "GET", f"/v1/schedules/{schedule_id}"
2843
+ )
2844
+
2845
+ if schedule_response and schedule_response.status_code == 200:
2846
+ import asyncio
2847
+
2848
+ schedule_data = schedule_response.json()
2849
+ schedule_obj = schedule_data.get("data", {})
2850
+ relationships = schedule_obj.get("relationships", {})
2851
+
2852
+ # Get schedule rotations
2853
+ rotations = relationships.get("schedule_rotations", {}).get("data", [])
2854
+ rotation_ids = [r.get("id") for r in rotations if r.get("id")]
2855
+
2856
+ # Fetch all rotation users in parallel
2857
+ if rotation_ids:
2858
+
2859
+ async def fetch_rotation_users(rotation_id: str):
2860
+ response = await make_authenticated_request(
2861
+ "GET",
2862
+ f"/v1/schedule_rotations/{rotation_id}/schedule_rotation_users",
2863
+ params={"page[size]": 100},
2864
+ )
2865
+ if response and response.status_code == 200:
2866
+ return response.json().get("data", [])
2867
+ return []
2868
+
2869
+ # Execute all rotation user fetches in parallel
2870
+ rotation_results = await asyncio.gather(
2871
+ *[fetch_rotation_users(rid) for rid in rotation_ids], return_exceptions=True
2872
+ )
2873
+
2874
+ # Process results
2875
+ for result in rotation_results:
2876
+ if isinstance(result, list):
2877
+ for ru in result:
2878
+ user_rel = (
2879
+ ru.get("relationships", {}).get("user", {}).get("data", {})
2880
+ )
2881
+ user_id = user_rel.get("id")
2882
+ if user_id:
2883
+ rotation_users.add(str(user_id))
2884
+
2885
+ # Fetch shifts to calculate current load for rotation users
2886
+ params: dict[str, Any] = {
2887
+ "from": f"{start_date}T00:00:00Z" if "T" not in start_date else start_date,
2888
+ "to": f"{end_date}T23:59:59Z" if "T" not in end_date else end_date,
2889
+ "include": "user",
2890
+ "page[size]": 100,
2891
+ }
2892
+
2893
+ all_shifts = []
2894
+ page = 1
2895
+ while page <= 10:
2896
+ params["page[number]"] = page
2897
+ shifts_response = await make_authenticated_request(
2898
+ "GET", "/v1/shifts", params=params
2899
+ )
2900
+
2901
+ if shifts_response is None:
2902
+ break
2903
+
2904
+ shifts_response.raise_for_status()
2905
+ shifts_data = shifts_response.json()
2906
+
2907
+ shifts = shifts_data.get("data", [])
2908
+ included = shifts_data.get("included", [])
2909
+
2910
+ for resource in included:
2911
+ if resource.get("type") == "users":
2912
+ users_map[resource.get("id")] = resource
2913
+
2914
+ if not shifts:
2915
+ break
2916
+
2917
+ all_shifts.extend(shifts)
2918
+
2919
+ meta = shifts_data.get("meta", {})
2920
+ total_pages = meta.get("total_pages", 1)
2921
+ if page >= total_pages:
2922
+ break
2923
+ page += 1
2924
+
2925
+ # Calculate load per user
2926
+ user_load: dict[str, float] = {}
2927
+ for shift in all_shifts:
2928
+ attrs = shift.get("attributes", {})
2929
+ relationships = shift.get("relationships", {})
2930
+
2931
+ user_rel = relationships.get("user", {}).get("data") or {}
2932
+ raw_user_id = user_rel.get("id")
2933
+
2934
+ # Skip shifts without a user
2935
+ if not raw_user_id:
2936
+ continue
2937
+
2938
+ user_id = str(raw_user_id)
2939
+
2940
+ starts_at = attrs.get("starts_at")
2941
+ ends_at = attrs.get("ends_at")
2942
+ hours = 0.0
2943
+ if starts_at and ends_at:
2944
+ try:
2945
+ start_dt = datetime.fromisoformat(starts_at.replace("Z", "+00:00"))
2946
+ end_dt = datetime.fromisoformat(ends_at.replace("Z", "+00:00"))
2947
+ hours = (end_dt - start_dt).total_seconds() / 3600
2948
+ except (ValueError, AttributeError):
2949
+ pass
2950
+
2951
+ user_load[user_id] = user_load.get(user_id, 0.0) + hours
2952
+
2953
+ # Find recommendations from rotation users
2954
+ recommendations = []
2955
+ for user_id in rotation_users:
2956
+ if user_id in exclude_set:
2957
+ continue
2958
+
2959
+ user_info = users_map.get(user_id, {})
2960
+ user_attrs = user_info.get("attributes", {})
2961
+ user_name = user_attrs.get("full_name") or user_attrs.get("name") or "Unknown"
2962
+
2963
+ current_hours = round(user_load.get(user_id, 0.0), 1)
2964
+
2965
+ # Generate reason based on load
2966
+ if current_hours == 0:
2967
+ reason = "Already in rotation, no current load"
2968
+ elif current_hours < 24:
2969
+ reason = "Already in rotation, low load"
2970
+ elif current_hours < 48:
2971
+ reason = "Same team, moderate availability"
2972
+ else:
2973
+ reason = "In rotation, but higher load"
2974
+
2975
+ recommendations.append(
2976
+ {
2977
+ "user_id": int(user_id) if user_id.isdigit() else user_id,
2978
+ "user_name": user_name,
2979
+ "current_hours_in_period": current_hours,
2980
+ "reason": reason,
2981
+ }
2982
+ )
2983
+
2984
+ # Sort by load (lowest first)
2985
+ recommendations.sort(key=lambda x: x["current_hours_in_period"])
2986
+
2987
+ # Build override payload for top recommendation
2988
+ override_payload = None
2989
+ if recommendations:
2990
+ top_rec = recommendations[0]
2991
+ # Format dates for API
2992
+ override_starts = f"{start_date}T00:00:00Z" if "T" not in start_date else start_date
2993
+ override_ends = f"{end_date}T23:59:59Z" if "T" not in end_date else end_date
2994
+
2995
+ override_payload = {
2996
+ "schedule_id": schedule_id,
2997
+ "user_id": top_rec["user_id"],
2998
+ "starts_at": override_starts,
2999
+ "ends_at": override_ends,
3000
+ }
3001
+
3002
+ # Build response with optional warning
3003
+ response = {
3004
+ "schedule_name": schedule_name,
3005
+ "original_user": {
3006
+ "id": original_user_id,
3007
+ "name": original_user_name,
3008
+ },
3009
+ "period": {
3010
+ "start": start_date,
3011
+ "end": end_date,
3012
+ },
3013
+ "recommended_replacements": recommendations[:5], # Top 5
3014
+ "override_payload": override_payload,
3015
+ }
3016
+
3017
+ # Add warning if no recommendations available
3018
+ if not rotation_users:
3019
+ response["warning"] = (
3020
+ "No rotation users found for this schedule. The schedule may not have any rotations configured."
3021
+ )
3022
+ elif not recommendations:
3023
+ response["warning"] = (
3024
+ "All rotation users are either excluded or the original user. No recommendations available."
3025
+ )
3026
+
3027
+ return response
3028
+
3029
+ except Exception as e:
3030
+ import traceback
3031
+
3032
+ error_type, error_message = MCPError.categorize_error(e)
3033
+ return MCPError.tool_error(
3034
+ f"Failed to create override recommendation: {error_message}",
3035
+ error_type,
3036
+ details={
3037
+ "params": {
3038
+ "schedule_id": schedule_id,
3039
+ "original_user_id": original_user_id,
3040
+ },
3041
+ "exception_type": type(e).__name__,
3042
+ "traceback": traceback.format_exc(),
3043
+ },
3044
+ )
3045
+
2144
3046
  # Add MCP resources for incidents and teams
2145
3047
  @mcp.resource("incident://{incident_id}")
2146
3048
  async def get_incident_resource(incident_id: str):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rootly-mcp-server
3
- Version: 2.1.0
3
+ Version: 2.1.2
4
4
  Summary: Secure Model Context Protocol server for Rootly APIs with AI SRE capabilities, comprehensive error handling, and input validation
5
5
  Project-URL: Homepage, https://github.com/Rootly-AI-Labs/Rootly-MCP-server
6
6
  Project-URL: Issues, https://github.com/Rootly-AI-Labs/Rootly-MCP-server/issues
@@ -5,14 +5,14 @@ rootly_mcp_server/exceptions.py,sha256=67J_wlfOICg87eUipbkARzn_6u_Io82L-5cVnk2UP
5
5
  rootly_mcp_server/monitoring.py,sha256=k1X7vK65FOTrCrOsLUXrFm6AJxKpXt_a0PzL6xdPuVU,11681
6
6
  rootly_mcp_server/pagination.py,sha256=2hZSO4DLUEJZbdF8oDfIt2_7X_XGBG1jIxN8VGmeJBE,2420
7
7
  rootly_mcp_server/security.py,sha256=YkMoVALZ3XaKnMu3yF5kVf3SW_jdKHllSMwVLk1OlX0,11556
8
- rootly_mcp_server/server.py,sha256=tJLRJgurdFlq-7m7jCsRYCeD1LFUbLff0YQ7yUQJWVc,115527
8
+ rootly_mcp_server/server.py,sha256=IM_0HQZdWB9PmMc9Si4OKW4MFbzijvtrXQpFdFjleI0,150221
9
9
  rootly_mcp_server/smart_utils.py,sha256=c7S-8H151GfmDw6dZBDdLH_cCmR1qiXkKEYSKc0WwUY,23481
10
10
  rootly_mcp_server/texttest.json,sha256=KV9m13kWugmW1VEpU80Irp50uCcLgJtV1YT-JzMogQg,154182
11
11
  rootly_mcp_server/utils.py,sha256=TWG1MaaFKrU1phRhU6FgHuZAEv91JOe_1w0L2OrPJMY,4406
12
12
  rootly_mcp_server/validators.py,sha256=z1Lvel2SpOFLo1cPdQGSrX2ySt6zqR42w0R6QV9c2Cc,4092
13
13
  rootly_mcp_server/data/__init__.py,sha256=KdWD6hiRssHXt0Ywgj3wjNHY1sx-XSPEqVHqrTArf54,143
14
- rootly_mcp_server-2.1.0.dist-info/METADATA,sha256=0fXm7kY3z3npobc9BeEm-P0R6Et3yRGAisb-Wcr40vM,13560
15
- rootly_mcp_server-2.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
16
- rootly_mcp_server-2.1.0.dist-info/entry_points.txt,sha256=NE33b8VgigVPGBkboyo6pvN1Vz35HZtLybxMO4Q03PI,70
17
- rootly_mcp_server-2.1.0.dist-info/licenses/LICENSE,sha256=c9w9ZZGl14r54tsP40oaq5adTVX_HMNHozPIH2ymzmw,11341
18
- rootly_mcp_server-2.1.0.dist-info/RECORD,,
14
+ rootly_mcp_server-2.1.2.dist-info/METADATA,sha256=6HU6hf7I1n90bKQDVx1s84fGSC91DCss4D4TZxMtNKc,13560
15
+ rootly_mcp_server-2.1.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
16
+ rootly_mcp_server-2.1.2.dist-info/entry_points.txt,sha256=NE33b8VgigVPGBkboyo6pvN1Vz35HZtLybxMO4Q03PI,70
17
+ rootly_mcp_server-2.1.2.dist-info/licenses/LICENSE,sha256=c9w9ZZGl14r54tsP40oaq5adTVX_HMNHozPIH2ymzmw,11341
18
+ rootly_mcp_server-2.1.2.dist-info/RECORD,,