rootly-mcp-server 2.1.1__py3-none-any.whl → 2.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rootly_mcp_server/och_client.py +71 -0
- rootly_mcp_server/server.py +1323 -0
- {rootly_mcp_server-2.1.1.dist-info → rootly_mcp_server-2.1.3.dist-info}/METADATA +38 -164
- {rootly_mcp_server-2.1.1.dist-info → rootly_mcp_server-2.1.3.dist-info}/RECORD +7 -6
- {rootly_mcp_server-2.1.1.dist-info → rootly_mcp_server-2.1.3.dist-info}/WHEEL +0 -0
- {rootly_mcp_server-2.1.1.dist-info → rootly_mcp_server-2.1.3.dist-info}/entry_points.txt +0 -0
- {rootly_mcp_server-2.1.1.dist-info → rootly_mcp_server-2.1.3.dist-info}/licenses/LICENSE +0 -0
rootly_mcp_server/server.py
CHANGED
|
@@ -5,10 +5,12 @@ This module implements a server that dynamically generates MCP tools based on
|
|
|
5
5
|
the Rootly API's OpenAPI (Swagger) specification using FastMCP's OpenAPI integration.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
+
import asyncio
|
|
8
9
|
import json
|
|
9
10
|
import logging
|
|
10
11
|
import os
|
|
11
12
|
from copy import deepcopy
|
|
13
|
+
from datetime import datetime
|
|
12
14
|
from pathlib import Path
|
|
13
15
|
from typing import Annotated, Any
|
|
14
16
|
|
|
@@ -17,6 +19,7 @@ import requests
|
|
|
17
19
|
from fastmcp import FastMCP
|
|
18
20
|
from pydantic import Field
|
|
19
21
|
|
|
22
|
+
from .och_client import OnCallHealthClient
|
|
20
23
|
from .smart_utils import SolutionExtractor, TextSimilarityAnalyzer
|
|
21
24
|
from .utils import sanitize_parameters_in_spec
|
|
22
25
|
|
|
@@ -2019,6 +2022,1029 @@ def create_rootly_mcp_server(
|
|
|
2019
2022
|
start_time, end_time, schedule_ids, severity, status, tags
|
|
2020
2023
|
)
|
|
2021
2024
|
|
|
2025
|
+
# Cache for lookup maps (TTL: 5 minutes)
|
|
2026
|
+
_lookup_maps_cache: dict[str, Any] = {
|
|
2027
|
+
"data": None,
|
|
2028
|
+
"timestamp": 0.0,
|
|
2029
|
+
"ttl_seconds": 300, # 5 minutes
|
|
2030
|
+
}
|
|
2031
|
+
_lookup_maps_lock = asyncio.Lock()
|
|
2032
|
+
|
|
2033
|
+
# Helper function to fetch users and schedules for enrichment
|
|
2034
|
+
async def _fetch_users_and_schedules_maps() -> tuple[dict, dict, dict]:
|
|
2035
|
+
"""Fetch all users, schedules, and teams to build lookup maps.
|
|
2036
|
+
|
|
2037
|
+
Results are cached for 5 minutes to avoid repeated API calls.
|
|
2038
|
+
"""
|
|
2039
|
+
import time
|
|
2040
|
+
|
|
2041
|
+
# Check cache (fast path without lock)
|
|
2042
|
+
now = time.time()
|
|
2043
|
+
if (
|
|
2044
|
+
_lookup_maps_cache["data"] is not None
|
|
2045
|
+
and (now - _lookup_maps_cache["timestamp"]) < _lookup_maps_cache["ttl_seconds"]
|
|
2046
|
+
):
|
|
2047
|
+
return _lookup_maps_cache["data"]
|
|
2048
|
+
|
|
2049
|
+
# Acquire lock to prevent concurrent fetches
|
|
2050
|
+
async with _lookup_maps_lock:
|
|
2051
|
+
# Re-check cache after acquiring lock
|
|
2052
|
+
now = time.time()
|
|
2053
|
+
if (
|
|
2054
|
+
_lookup_maps_cache["data"] is not None
|
|
2055
|
+
and (now - _lookup_maps_cache["timestamp"]) < _lookup_maps_cache["ttl_seconds"]
|
|
2056
|
+
):
|
|
2057
|
+
return _lookup_maps_cache["data"]
|
|
2058
|
+
|
|
2059
|
+
users_map = {}
|
|
2060
|
+
schedules_map = {}
|
|
2061
|
+
teams_map = {}
|
|
2062
|
+
|
|
2063
|
+
# Fetch all users with pagination
|
|
2064
|
+
page = 1
|
|
2065
|
+
while page <= 10:
|
|
2066
|
+
users_response = await make_authenticated_request(
|
|
2067
|
+
"GET", "/v1/users", params={"page[size]": 100, "page[number]": page}
|
|
2068
|
+
)
|
|
2069
|
+
if users_response and users_response.status_code == 200:
|
|
2070
|
+
users_data = users_response.json()
|
|
2071
|
+
for user in users_data.get("data", []):
|
|
2072
|
+
users_map[user.get("id")] = user
|
|
2073
|
+
if len(users_data.get("data", [])) < 100:
|
|
2074
|
+
break
|
|
2075
|
+
page += 1
|
|
2076
|
+
else:
|
|
2077
|
+
break
|
|
2078
|
+
|
|
2079
|
+
# Fetch all schedules with pagination
|
|
2080
|
+
page = 1
|
|
2081
|
+
while page <= 10:
|
|
2082
|
+
schedules_response = await make_authenticated_request(
|
|
2083
|
+
"GET", "/v1/schedules", params={"page[size]": 100, "page[number]": page}
|
|
2084
|
+
)
|
|
2085
|
+
if schedules_response and schedules_response.status_code == 200:
|
|
2086
|
+
schedules_data = schedules_response.json()
|
|
2087
|
+
for schedule in schedules_data.get("data", []):
|
|
2088
|
+
schedules_map[schedule.get("id")] = schedule
|
|
2089
|
+
if len(schedules_data.get("data", [])) < 100:
|
|
2090
|
+
break
|
|
2091
|
+
page += 1
|
|
2092
|
+
else:
|
|
2093
|
+
break
|
|
2094
|
+
|
|
2095
|
+
# Fetch all teams with pagination
|
|
2096
|
+
page = 1
|
|
2097
|
+
while page <= 10:
|
|
2098
|
+
teams_response = await make_authenticated_request(
|
|
2099
|
+
"GET", "/v1/teams", params={"page[size]": 100, "page[number]": page}
|
|
2100
|
+
)
|
|
2101
|
+
if teams_response and teams_response.status_code == 200:
|
|
2102
|
+
teams_data = teams_response.json()
|
|
2103
|
+
for team in teams_data.get("data", []):
|
|
2104
|
+
teams_map[team.get("id")] = team
|
|
2105
|
+
if len(teams_data.get("data", [])) < 100:
|
|
2106
|
+
break
|
|
2107
|
+
page += 1
|
|
2108
|
+
else:
|
|
2109
|
+
break
|
|
2110
|
+
|
|
2111
|
+
# Cache the result
|
|
2112
|
+
result = (users_map, schedules_map, teams_map)
|
|
2113
|
+
_lookup_maps_cache["data"] = result
|
|
2114
|
+
_lookup_maps_cache["timestamp"] = now
|
|
2115
|
+
|
|
2116
|
+
return result
|
|
2117
|
+
|
|
2118
|
+
@mcp.tool()
|
|
2119
|
+
async def list_shifts(
|
|
2120
|
+
from_date: Annotated[
|
|
2121
|
+
str,
|
|
2122
|
+
Field(
|
|
2123
|
+
description="Start date/time for shift query (ISO 8601 format, e.g., '2026-02-09T00:00:00Z')"
|
|
2124
|
+
),
|
|
2125
|
+
],
|
|
2126
|
+
to_date: Annotated[
|
|
2127
|
+
str,
|
|
2128
|
+
Field(
|
|
2129
|
+
description="End date/time for shift query (ISO 8601 format, e.g., '2026-02-15T23:59:59Z')"
|
|
2130
|
+
),
|
|
2131
|
+
],
|
|
2132
|
+
user_ids: Annotated[
|
|
2133
|
+
str,
|
|
2134
|
+
Field(
|
|
2135
|
+
description="Comma-separated list of user IDs to filter by (e.g., '2381,94178'). Only returns shifts for these users."
|
|
2136
|
+
),
|
|
2137
|
+
] = "",
|
|
2138
|
+
schedule_ids: Annotated[
|
|
2139
|
+
str,
|
|
2140
|
+
Field(description="Comma-separated list of schedule IDs to filter by (optional)"),
|
|
2141
|
+
] = "",
|
|
2142
|
+
include_user_details: Annotated[
|
|
2143
|
+
bool,
|
|
2144
|
+
Field(description="Include user name and email in response (default: True)"),
|
|
2145
|
+
] = True,
|
|
2146
|
+
) -> dict:
|
|
2147
|
+
"""
|
|
2148
|
+
List on-call shifts with proper user filtering and enriched data.
|
|
2149
|
+
|
|
2150
|
+
Unlike the raw API, this tool:
|
|
2151
|
+
- Actually filters by user_ids (client-side filtering)
|
|
2152
|
+
- Includes user_name, user_email, schedule_name, team_name
|
|
2153
|
+
- Calculates total_hours for each shift
|
|
2154
|
+
|
|
2155
|
+
Use this instead of the auto-generated listShifts when you need user filtering.
|
|
2156
|
+
"""
|
|
2157
|
+
try:
|
|
2158
|
+
from datetime import datetime
|
|
2159
|
+
|
|
2160
|
+
# Build query parameters
|
|
2161
|
+
params: dict[str, Any] = {
|
|
2162
|
+
"from": from_date,
|
|
2163
|
+
"to": to_date,
|
|
2164
|
+
"include": "user,on_call_role,schedule_rotation",
|
|
2165
|
+
"page[size]": 100,
|
|
2166
|
+
}
|
|
2167
|
+
|
|
2168
|
+
if schedule_ids:
|
|
2169
|
+
schedule_id_list = [sid.strip() for sid in schedule_ids.split(",") if sid.strip()]
|
|
2170
|
+
params["schedule_ids[]"] = schedule_id_list
|
|
2171
|
+
|
|
2172
|
+
# Parse user_ids for filtering
|
|
2173
|
+
user_id_filter = set()
|
|
2174
|
+
if user_ids:
|
|
2175
|
+
user_id_filter = {uid.strip() for uid in user_ids.split(",") if uid.strip()}
|
|
2176
|
+
|
|
2177
|
+
# Fetch lookup maps for enrichment
|
|
2178
|
+
users_map, schedules_map, teams_map = await _fetch_users_and_schedules_maps()
|
|
2179
|
+
|
|
2180
|
+
# Build schedule -> team mapping
|
|
2181
|
+
schedule_to_team = {}
|
|
2182
|
+
for schedule_id, schedule in schedules_map.items():
|
|
2183
|
+
owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
|
|
2184
|
+
if owner_group_ids:
|
|
2185
|
+
team_id = owner_group_ids[0]
|
|
2186
|
+
team = teams_map.get(team_id, {})
|
|
2187
|
+
schedule_to_team[schedule_id] = {
|
|
2188
|
+
"team_id": team_id,
|
|
2189
|
+
"team_name": team.get("attributes", {}).get("name", "Unknown Team"),
|
|
2190
|
+
}
|
|
2191
|
+
|
|
2192
|
+
# Fetch all shifts with pagination
|
|
2193
|
+
all_shifts = []
|
|
2194
|
+
page = 1
|
|
2195
|
+
while page <= 10:
|
|
2196
|
+
params["page[number]"] = page
|
|
2197
|
+
shifts_response = await make_authenticated_request(
|
|
2198
|
+
"GET", "/v1/shifts", params=params
|
|
2199
|
+
)
|
|
2200
|
+
|
|
2201
|
+
if shifts_response is None:
|
|
2202
|
+
break
|
|
2203
|
+
|
|
2204
|
+
shifts_response.raise_for_status()
|
|
2205
|
+
shifts_data = shifts_response.json()
|
|
2206
|
+
|
|
2207
|
+
shifts = shifts_data.get("data", [])
|
|
2208
|
+
included = shifts_data.get("included", [])
|
|
2209
|
+
|
|
2210
|
+
# Update users_map from included data
|
|
2211
|
+
for resource in included:
|
|
2212
|
+
if resource.get("type") == "users":
|
|
2213
|
+
users_map[resource.get("id")] = resource
|
|
2214
|
+
|
|
2215
|
+
if not shifts:
|
|
2216
|
+
break
|
|
2217
|
+
|
|
2218
|
+
all_shifts.extend(shifts)
|
|
2219
|
+
|
|
2220
|
+
meta = shifts_data.get("meta", {})
|
|
2221
|
+
total_pages = meta.get("total_pages", 1)
|
|
2222
|
+
if page >= total_pages:
|
|
2223
|
+
break
|
|
2224
|
+
page += 1
|
|
2225
|
+
|
|
2226
|
+
# Process and filter shifts
|
|
2227
|
+
enriched_shifts = []
|
|
2228
|
+
for shift in all_shifts:
|
|
2229
|
+
attrs = shift.get("attributes", {})
|
|
2230
|
+
relationships = shift.get("relationships", {})
|
|
2231
|
+
|
|
2232
|
+
# Get user info
|
|
2233
|
+
user_rel = relationships.get("user", {}).get("data") or {}
|
|
2234
|
+
user_id = user_rel.get("id")
|
|
2235
|
+
|
|
2236
|
+
# Skip shifts without a user
|
|
2237
|
+
if not user_id:
|
|
2238
|
+
continue
|
|
2239
|
+
|
|
2240
|
+
# Apply user_ids filter
|
|
2241
|
+
if user_id_filter and str(user_id) not in user_id_filter:
|
|
2242
|
+
continue
|
|
2243
|
+
|
|
2244
|
+
user_info = users_map.get(user_id, {})
|
|
2245
|
+
user_attrs = user_info.get("attributes", {})
|
|
2246
|
+
user_name = user_attrs.get("full_name") or user_attrs.get("name") or "Unknown"
|
|
2247
|
+
user_email = user_attrs.get("email", "")
|
|
2248
|
+
|
|
2249
|
+
# Get schedule info
|
|
2250
|
+
schedule_id = attrs.get("schedule_id")
|
|
2251
|
+
schedule_info = schedules_map.get(schedule_id, {})
|
|
2252
|
+
schedule_name = schedule_info.get("attributes", {}).get("name", "Unknown Schedule")
|
|
2253
|
+
|
|
2254
|
+
# Get team info
|
|
2255
|
+
team_info = schedule_to_team.get(schedule_id, {})
|
|
2256
|
+
team_name = team_info.get("team_name", "Unknown Team")
|
|
2257
|
+
|
|
2258
|
+
# Calculate total hours
|
|
2259
|
+
starts_at = attrs.get("starts_at")
|
|
2260
|
+
ends_at = attrs.get("ends_at")
|
|
2261
|
+
total_hours = 0.0
|
|
2262
|
+
if starts_at and ends_at:
|
|
2263
|
+
try:
|
|
2264
|
+
start_dt = datetime.fromisoformat(starts_at.replace("Z", "+00:00"))
|
|
2265
|
+
end_dt = datetime.fromisoformat(ends_at.replace("Z", "+00:00"))
|
|
2266
|
+
total_hours = round((end_dt - start_dt).total_seconds() / 3600, 2)
|
|
2267
|
+
except (ValueError, AttributeError):
|
|
2268
|
+
pass
|
|
2269
|
+
|
|
2270
|
+
enriched_shift = {
|
|
2271
|
+
"shift_id": shift.get("id"),
|
|
2272
|
+
"user_id": user_id,
|
|
2273
|
+
"schedule_id": schedule_id,
|
|
2274
|
+
"starts_at": starts_at,
|
|
2275
|
+
"ends_at": ends_at,
|
|
2276
|
+
"is_override": attrs.get("is_override", False),
|
|
2277
|
+
"total_hours": total_hours,
|
|
2278
|
+
}
|
|
2279
|
+
|
|
2280
|
+
if include_user_details:
|
|
2281
|
+
enriched_shift["user_name"] = user_name
|
|
2282
|
+
enriched_shift["user_email"] = user_email
|
|
2283
|
+
enriched_shift["schedule_name"] = schedule_name
|
|
2284
|
+
enriched_shift["team_name"] = team_name
|
|
2285
|
+
|
|
2286
|
+
enriched_shifts.append(enriched_shift)
|
|
2287
|
+
|
|
2288
|
+
return {
|
|
2289
|
+
"period": {"from": from_date, "to": to_date},
|
|
2290
|
+
"total_shifts": len(enriched_shifts),
|
|
2291
|
+
"filters_applied": {
|
|
2292
|
+
"user_ids": list(user_id_filter) if user_id_filter else None,
|
|
2293
|
+
"schedule_ids": schedule_ids if schedule_ids else None,
|
|
2294
|
+
},
|
|
2295
|
+
"shifts": enriched_shifts,
|
|
2296
|
+
}
|
|
2297
|
+
|
|
2298
|
+
except Exception as e:
|
|
2299
|
+
import traceback
|
|
2300
|
+
|
|
2301
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
2302
|
+
return MCPError.tool_error(
|
|
2303
|
+
f"Failed to list shifts: {error_message}",
|
|
2304
|
+
error_type,
|
|
2305
|
+
details={
|
|
2306
|
+
"params": {"from": from_date, "to": to_date},
|
|
2307
|
+
"exception_type": type(e).__name__,
|
|
2308
|
+
"traceback": traceback.format_exc(),
|
|
2309
|
+
},
|
|
2310
|
+
)
|
|
2311
|
+
|
|
2312
|
+
@mcp.tool()
|
|
2313
|
+
async def get_oncall_schedule_summary(
|
|
2314
|
+
start_date: Annotated[
|
|
2315
|
+
str,
|
|
2316
|
+
Field(description="Start date (ISO 8601, e.g., '2026-02-09')"),
|
|
2317
|
+
],
|
|
2318
|
+
end_date: Annotated[
|
|
2319
|
+
str,
|
|
2320
|
+
Field(description="End date (ISO 8601, e.g., '2026-02-15')"),
|
|
2321
|
+
],
|
|
2322
|
+
schedule_ids: Annotated[
|
|
2323
|
+
str,
|
|
2324
|
+
Field(description="Comma-separated schedule IDs to filter (optional)"),
|
|
2325
|
+
] = "",
|
|
2326
|
+
team_ids: Annotated[
|
|
2327
|
+
str,
|
|
2328
|
+
Field(description="Comma-separated team IDs to filter (optional)"),
|
|
2329
|
+
] = "",
|
|
2330
|
+
include_user_ids: Annotated[
|
|
2331
|
+
bool,
|
|
2332
|
+
Field(description="Include numeric user IDs for cross-platform correlation"),
|
|
2333
|
+
] = True,
|
|
2334
|
+
) -> dict:
|
|
2335
|
+
"""
|
|
2336
|
+
Get compact on-call schedule summary for a date range.
|
|
2337
|
+
|
|
2338
|
+
Returns one entry per user per schedule (not raw shifts), with
|
|
2339
|
+
aggregated hours. Optimized for AI agent context windows.
|
|
2340
|
+
|
|
2341
|
+
Use this instead of listShifts when you need:
|
|
2342
|
+
- Aggregated hours per responder
|
|
2343
|
+
- Schedule coverage overview
|
|
2344
|
+
- Responder load analysis with warnings
|
|
2345
|
+
"""
|
|
2346
|
+
try:
|
|
2347
|
+
from collections import defaultdict
|
|
2348
|
+
from datetime import datetime
|
|
2349
|
+
|
|
2350
|
+
# Parse filter IDs
|
|
2351
|
+
schedule_id_filter = set()
|
|
2352
|
+
if schedule_ids:
|
|
2353
|
+
schedule_id_filter = {sid.strip() for sid in schedule_ids.split(",") if sid.strip()}
|
|
2354
|
+
|
|
2355
|
+
team_id_filter = set()
|
|
2356
|
+
if team_ids:
|
|
2357
|
+
team_id_filter = {tid.strip() for tid in team_ids.split(",") if tid.strip()}
|
|
2358
|
+
|
|
2359
|
+
# Fetch lookup maps
|
|
2360
|
+
users_map, schedules_map, teams_map = await _fetch_users_and_schedules_maps()
|
|
2361
|
+
|
|
2362
|
+
# Build schedule -> team mapping and apply team filter
|
|
2363
|
+
schedule_to_team = {}
|
|
2364
|
+
filtered_schedule_ids = set()
|
|
2365
|
+
for schedule_id, schedule in schedules_map.items():
|
|
2366
|
+
owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
|
|
2367
|
+
team_id = owner_group_ids[0] if owner_group_ids else None
|
|
2368
|
+
team = teams_map.get(team_id, {}) if team_id else {}
|
|
2369
|
+
team_name = team.get("attributes", {}).get("name", "Unknown Team")
|
|
2370
|
+
|
|
2371
|
+
schedule_to_team[schedule_id] = {
|
|
2372
|
+
"team_id": team_id,
|
|
2373
|
+
"team_name": team_name,
|
|
2374
|
+
"schedule_name": schedule.get("attributes", {}).get("name", "Unknown Schedule"),
|
|
2375
|
+
}
|
|
2376
|
+
|
|
2377
|
+
# Apply filters
|
|
2378
|
+
if schedule_id_filter and schedule_id not in schedule_id_filter:
|
|
2379
|
+
continue
|
|
2380
|
+
if team_id_filter and (not team_id or team_id not in team_id_filter):
|
|
2381
|
+
continue
|
|
2382
|
+
filtered_schedule_ids.add(schedule_id)
|
|
2383
|
+
|
|
2384
|
+
# If no filters, include all schedules
|
|
2385
|
+
if not schedule_id_filter and not team_id_filter:
|
|
2386
|
+
filtered_schedule_ids = set(schedules_map.keys())
|
|
2387
|
+
|
|
2388
|
+
# Fetch shifts
|
|
2389
|
+
params: dict[str, Any] = {
|
|
2390
|
+
"from": f"{start_date}T00:00:00Z" if "T" not in start_date else start_date,
|
|
2391
|
+
"to": f"{end_date}T23:59:59Z" if "T" not in end_date else end_date,
|
|
2392
|
+
"include": "user,on_call_role",
|
|
2393
|
+
"page[size]": 100,
|
|
2394
|
+
}
|
|
2395
|
+
|
|
2396
|
+
all_shifts = []
|
|
2397
|
+
page = 1
|
|
2398
|
+
while page <= 10:
|
|
2399
|
+
params["page[number]"] = page
|
|
2400
|
+
shifts_response = await make_authenticated_request(
|
|
2401
|
+
"GET", "/v1/shifts", params=params
|
|
2402
|
+
)
|
|
2403
|
+
|
|
2404
|
+
if shifts_response is None:
|
|
2405
|
+
break
|
|
2406
|
+
|
|
2407
|
+
shifts_response.raise_for_status()
|
|
2408
|
+
shifts_data = shifts_response.json()
|
|
2409
|
+
|
|
2410
|
+
shifts = shifts_data.get("data", [])
|
|
2411
|
+
included = shifts_data.get("included", [])
|
|
2412
|
+
|
|
2413
|
+
# Update users_map from included data
|
|
2414
|
+
for resource in included:
|
|
2415
|
+
if resource.get("type") == "users":
|
|
2416
|
+
users_map[resource.get("id")] = resource
|
|
2417
|
+
|
|
2418
|
+
if not shifts:
|
|
2419
|
+
break
|
|
2420
|
+
|
|
2421
|
+
all_shifts.extend(shifts)
|
|
2422
|
+
|
|
2423
|
+
meta = shifts_data.get("meta", {})
|
|
2424
|
+
total_pages = meta.get("total_pages", 1)
|
|
2425
|
+
if page >= total_pages:
|
|
2426
|
+
break
|
|
2427
|
+
page += 1
|
|
2428
|
+
|
|
2429
|
+
# Aggregate by schedule and user
|
|
2430
|
+
schedule_coverage: dict[str, dict] = defaultdict(
|
|
2431
|
+
lambda: {
|
|
2432
|
+
"schedule_name": "",
|
|
2433
|
+
"team_name": "",
|
|
2434
|
+
"responders": defaultdict(
|
|
2435
|
+
lambda: {
|
|
2436
|
+
"user_name": "",
|
|
2437
|
+
"user_id": None,
|
|
2438
|
+
"total_hours": 0.0,
|
|
2439
|
+
"shift_count": 0,
|
|
2440
|
+
"is_override": False,
|
|
2441
|
+
}
|
|
2442
|
+
),
|
|
2443
|
+
}
|
|
2444
|
+
)
|
|
2445
|
+
|
|
2446
|
+
responder_load: dict[str, dict] = defaultdict(
|
|
2447
|
+
lambda: {
|
|
2448
|
+
"user_name": "",
|
|
2449
|
+
"user_id": None,
|
|
2450
|
+
"total_hours": 0.0,
|
|
2451
|
+
"schedules": set(),
|
|
2452
|
+
}
|
|
2453
|
+
)
|
|
2454
|
+
|
|
2455
|
+
for shift in all_shifts:
|
|
2456
|
+
attrs = shift.get("attributes", {})
|
|
2457
|
+
schedule_id = attrs.get("schedule_id")
|
|
2458
|
+
|
|
2459
|
+
# Apply schedule filter
|
|
2460
|
+
if filtered_schedule_ids and schedule_id not in filtered_schedule_ids:
|
|
2461
|
+
continue
|
|
2462
|
+
|
|
2463
|
+
# Get user info
|
|
2464
|
+
relationships = shift.get("relationships", {})
|
|
2465
|
+
user_rel = relationships.get("user", {}).get("data") or {}
|
|
2466
|
+
user_id = user_rel.get("id")
|
|
2467
|
+
|
|
2468
|
+
# Skip shifts without a user
|
|
2469
|
+
if not user_id:
|
|
2470
|
+
continue
|
|
2471
|
+
|
|
2472
|
+
user_info = users_map.get(user_id, {})
|
|
2473
|
+
user_attrs = user_info.get("attributes", {})
|
|
2474
|
+
user_name = user_attrs.get("full_name") or user_attrs.get("name") or "Unknown"
|
|
2475
|
+
|
|
2476
|
+
# Get schedule/team info
|
|
2477
|
+
sched_info = schedule_to_team.get(schedule_id, {})
|
|
2478
|
+
schedule_name = sched_info.get("schedule_name", "Unknown Schedule")
|
|
2479
|
+
team_name = sched_info.get("team_name", "Unknown Team")
|
|
2480
|
+
|
|
2481
|
+
# Calculate hours
|
|
2482
|
+
starts_at = attrs.get("starts_at")
|
|
2483
|
+
ends_at = attrs.get("ends_at")
|
|
2484
|
+
hours = 0.0
|
|
2485
|
+
if starts_at and ends_at:
|
|
2486
|
+
try:
|
|
2487
|
+
start_dt = datetime.fromisoformat(starts_at.replace("Z", "+00:00"))
|
|
2488
|
+
end_dt = datetime.fromisoformat(ends_at.replace("Z", "+00:00"))
|
|
2489
|
+
hours = (end_dt - start_dt).total_seconds() / 3600
|
|
2490
|
+
except (ValueError, AttributeError):
|
|
2491
|
+
pass
|
|
2492
|
+
|
|
2493
|
+
is_override = attrs.get("is_override", False)
|
|
2494
|
+
|
|
2495
|
+
# Update schedule coverage
|
|
2496
|
+
sched_data = schedule_coverage[schedule_id]
|
|
2497
|
+
sched_data["schedule_name"] = schedule_name
|
|
2498
|
+
sched_data["team_name"] = team_name
|
|
2499
|
+
|
|
2500
|
+
user_key = str(user_id)
|
|
2501
|
+
sched_data["responders"][user_key]["user_name"] = user_name
|
|
2502
|
+
sched_data["responders"][user_key]["user_id"] = user_id
|
|
2503
|
+
sched_data["responders"][user_key]["total_hours"] += hours
|
|
2504
|
+
sched_data["responders"][user_key]["shift_count"] += 1
|
|
2505
|
+
if is_override:
|
|
2506
|
+
sched_data["responders"][user_key]["is_override"] = True
|
|
2507
|
+
|
|
2508
|
+
# Update responder load
|
|
2509
|
+
responder_load[user_key]["user_name"] = user_name
|
|
2510
|
+
responder_load[user_key]["user_id"] = user_id
|
|
2511
|
+
responder_load[user_key]["total_hours"] += hours
|
|
2512
|
+
responder_load[user_key]["schedules"].add(schedule_name)
|
|
2513
|
+
|
|
2514
|
+
# Format schedule coverage
|
|
2515
|
+
formatted_coverage = []
|
|
2516
|
+
for _schedule_id, sched_data in schedule_coverage.items():
|
|
2517
|
+
responders_list = []
|
|
2518
|
+
for _user_key, resp_data in sched_data["responders"].items():
|
|
2519
|
+
responder = {
|
|
2520
|
+
"user_name": resp_data["user_name"],
|
|
2521
|
+
"total_hours": round(resp_data["total_hours"], 1),
|
|
2522
|
+
"shift_count": resp_data["shift_count"],
|
|
2523
|
+
"is_override": resp_data["is_override"],
|
|
2524
|
+
}
|
|
2525
|
+
if include_user_ids:
|
|
2526
|
+
responder["user_id"] = resp_data["user_id"]
|
|
2527
|
+
responders_list.append(responder)
|
|
2528
|
+
|
|
2529
|
+
# Sort by hours descending
|
|
2530
|
+
responders_list.sort(key=lambda x: x["total_hours"], reverse=True)
|
|
2531
|
+
|
|
2532
|
+
formatted_coverage.append(
|
|
2533
|
+
{
|
|
2534
|
+
"schedule_name": sched_data["schedule_name"],
|
|
2535
|
+
"team_name": sched_data["team_name"],
|
|
2536
|
+
"responders": responders_list,
|
|
2537
|
+
}
|
|
2538
|
+
)
|
|
2539
|
+
|
|
2540
|
+
# Format responder load with warnings
|
|
2541
|
+
formatted_load = []
|
|
2542
|
+
for _user_key, load_data in responder_load.items():
|
|
2543
|
+
schedules_list = list(load_data["schedules"])
|
|
2544
|
+
hours = round(load_data["total_hours"], 1)
|
|
2545
|
+
|
|
2546
|
+
responder_entry = {
|
|
2547
|
+
"user_name": load_data["user_name"],
|
|
2548
|
+
"total_hours": hours,
|
|
2549
|
+
"schedules": schedules_list,
|
|
2550
|
+
}
|
|
2551
|
+
if include_user_ids:
|
|
2552
|
+
responder_entry["user_id"] = load_data["user_id"]
|
|
2553
|
+
|
|
2554
|
+
# Add warnings for high load
|
|
2555
|
+
if len(schedules_list) >= 4:
|
|
2556
|
+
responder_entry["warning"] = (
|
|
2557
|
+
f"High load: {len(schedules_list)} concurrent schedules"
|
|
2558
|
+
)
|
|
2559
|
+
elif hours >= 168: # 7 days * 24 hours
|
|
2560
|
+
responder_entry["warning"] = f"High load: {hours} hours in period"
|
|
2561
|
+
|
|
2562
|
+
formatted_load.append(responder_entry)
|
|
2563
|
+
|
|
2564
|
+
# Sort by hours descending
|
|
2565
|
+
formatted_load.sort(key=lambda x: x["total_hours"], reverse=True)
|
|
2566
|
+
formatted_coverage.sort(key=lambda x: x["schedule_name"])
|
|
2567
|
+
|
|
2568
|
+
return {
|
|
2569
|
+
"period": {"start": start_date, "end": end_date},
|
|
2570
|
+
"total_schedules": len(formatted_coverage),
|
|
2571
|
+
"total_responders": len(formatted_load),
|
|
2572
|
+
"schedule_coverage": formatted_coverage,
|
|
2573
|
+
"responder_load": formatted_load,
|
|
2574
|
+
}
|
|
2575
|
+
|
|
2576
|
+
except Exception as e:
|
|
2577
|
+
import traceback
|
|
2578
|
+
|
|
2579
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
2580
|
+
return MCPError.tool_error(
|
|
2581
|
+
f"Failed to get on-call schedule summary: {error_message}",
|
|
2582
|
+
error_type,
|
|
2583
|
+
details={
|
|
2584
|
+
"params": {"start_date": start_date, "end_date": end_date},
|
|
2585
|
+
"exception_type": type(e).__name__,
|
|
2586
|
+
"traceback": traceback.format_exc(),
|
|
2587
|
+
},
|
|
2588
|
+
)
|
|
2589
|
+
|
|
2590
|
+
@mcp.tool()
|
|
2591
|
+
async def check_responder_availability(
|
|
2592
|
+
start_date: Annotated[
|
|
2593
|
+
str,
|
|
2594
|
+
Field(description="Start date (ISO 8601, e.g., '2026-02-09')"),
|
|
2595
|
+
],
|
|
2596
|
+
end_date: Annotated[
|
|
2597
|
+
str,
|
|
2598
|
+
Field(description="End date (ISO 8601, e.g., '2026-02-15')"),
|
|
2599
|
+
],
|
|
2600
|
+
user_ids: Annotated[
|
|
2601
|
+
str,
|
|
2602
|
+
Field(
|
|
2603
|
+
description="Comma-separated Rootly user IDs to check (e.g., '2381,94178,27965')"
|
|
2604
|
+
),
|
|
2605
|
+
],
|
|
2606
|
+
) -> dict:
|
|
2607
|
+
"""
|
|
2608
|
+
Check if specific users are scheduled for on-call in a date range.
|
|
2609
|
+
|
|
2610
|
+
Use this to verify if at-risk users (from On-Call Health) are scheduled,
|
|
2611
|
+
or to check availability before assigning new shifts.
|
|
2612
|
+
|
|
2613
|
+
Returns scheduled users with their shifts and total hours,
|
|
2614
|
+
plus users who are not scheduled.
|
|
2615
|
+
"""
|
|
2616
|
+
try:
|
|
2617
|
+
from datetime import datetime
|
|
2618
|
+
|
|
2619
|
+
if not user_ids:
|
|
2620
|
+
return MCPError.tool_error(
|
|
2621
|
+
"user_ids parameter is required",
|
|
2622
|
+
"validation_error",
|
|
2623
|
+
)
|
|
2624
|
+
|
|
2625
|
+
# Parse user IDs
|
|
2626
|
+
user_id_list = [uid.strip() for uid in user_ids.split(",") if uid.strip()]
|
|
2627
|
+
user_id_set = set(user_id_list)
|
|
2628
|
+
|
|
2629
|
+
# Fetch lookup maps
|
|
2630
|
+
users_map, schedules_map, teams_map = await _fetch_users_and_schedules_maps()
|
|
2631
|
+
|
|
2632
|
+
# Build schedule -> team mapping
|
|
2633
|
+
schedule_to_team = {}
|
|
2634
|
+
for schedule_id, schedule in schedules_map.items():
|
|
2635
|
+
owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
|
|
2636
|
+
if owner_group_ids:
|
|
2637
|
+
team_id = owner_group_ids[0]
|
|
2638
|
+
team = teams_map.get(team_id, {})
|
|
2639
|
+
schedule_to_team[schedule_id] = {
|
|
2640
|
+
"schedule_name": schedule.get("attributes", {}).get("name", "Unknown"),
|
|
2641
|
+
"team_name": team.get("attributes", {}).get("name", "Unknown Team"),
|
|
2642
|
+
}
|
|
2643
|
+
|
|
2644
|
+
# Fetch shifts
|
|
2645
|
+
params: dict[str, Any] = {
|
|
2646
|
+
"from": f"{start_date}T00:00:00Z" if "T" not in start_date else start_date,
|
|
2647
|
+
"to": f"{end_date}T23:59:59Z" if "T" not in end_date else end_date,
|
|
2648
|
+
"include": "user,on_call_role",
|
|
2649
|
+
"page[size]": 100,
|
|
2650
|
+
}
|
|
2651
|
+
|
|
2652
|
+
all_shifts = []
|
|
2653
|
+
page = 1
|
|
2654
|
+
while page <= 10:
|
|
2655
|
+
params["page[number]"] = page
|
|
2656
|
+
shifts_response = await make_authenticated_request(
|
|
2657
|
+
"GET", "/v1/shifts", params=params
|
|
2658
|
+
)
|
|
2659
|
+
|
|
2660
|
+
if shifts_response is None:
|
|
2661
|
+
break
|
|
2662
|
+
|
|
2663
|
+
shifts_response.raise_for_status()
|
|
2664
|
+
shifts_data = shifts_response.json()
|
|
2665
|
+
|
|
2666
|
+
shifts = shifts_data.get("data", [])
|
|
2667
|
+
included = shifts_data.get("included", [])
|
|
2668
|
+
|
|
2669
|
+
# Update users_map from included data
|
|
2670
|
+
for resource in included:
|
|
2671
|
+
if resource.get("type") == "users":
|
|
2672
|
+
users_map[resource.get("id")] = resource
|
|
2673
|
+
|
|
2674
|
+
if not shifts:
|
|
2675
|
+
break
|
|
2676
|
+
|
|
2677
|
+
all_shifts.extend(shifts)
|
|
2678
|
+
|
|
2679
|
+
meta = shifts_data.get("meta", {})
|
|
2680
|
+
total_pages = meta.get("total_pages", 1)
|
|
2681
|
+
if page >= total_pages:
|
|
2682
|
+
break
|
|
2683
|
+
page += 1
|
|
2684
|
+
|
|
2685
|
+
# Group shifts by user
|
|
2686
|
+
user_shifts: dict[str, list] = {uid: [] for uid in user_id_list}
|
|
2687
|
+
user_hours: dict[str, float] = dict.fromkeys(user_id_list, 0.0)
|
|
2688
|
+
|
|
2689
|
+
for shift in all_shifts:
|
|
2690
|
+
attrs = shift.get("attributes", {})
|
|
2691
|
+
relationships = shift.get("relationships", {})
|
|
2692
|
+
|
|
2693
|
+
user_rel = relationships.get("user", {}).get("data") or {}
|
|
2694
|
+
raw_user_id = user_rel.get("id")
|
|
2695
|
+
|
|
2696
|
+
# Skip shifts without a user
|
|
2697
|
+
if not raw_user_id:
|
|
2698
|
+
continue
|
|
2699
|
+
|
|
2700
|
+
user_id = str(raw_user_id)
|
|
2701
|
+
|
|
2702
|
+
if user_id not in user_id_set:
|
|
2703
|
+
continue
|
|
2704
|
+
|
|
2705
|
+
schedule_id = attrs.get("schedule_id")
|
|
2706
|
+
sched_info = schedule_to_team.get(schedule_id, {})
|
|
2707
|
+
|
|
2708
|
+
starts_at = attrs.get("starts_at")
|
|
2709
|
+
ends_at = attrs.get("ends_at")
|
|
2710
|
+
hours = 0.0
|
|
2711
|
+
if starts_at and ends_at:
|
|
2712
|
+
try:
|
|
2713
|
+
start_dt = datetime.fromisoformat(starts_at.replace("Z", "+00:00"))
|
|
2714
|
+
end_dt = datetime.fromisoformat(ends_at.replace("Z", "+00:00"))
|
|
2715
|
+
hours = round((end_dt - start_dt).total_seconds() / 3600, 1)
|
|
2716
|
+
except (ValueError, AttributeError):
|
|
2717
|
+
pass
|
|
2718
|
+
|
|
2719
|
+
user_shifts[user_id].append(
|
|
2720
|
+
{
|
|
2721
|
+
"schedule_name": sched_info.get("schedule_name", "Unknown"),
|
|
2722
|
+
"starts_at": starts_at,
|
|
2723
|
+
"ends_at": ends_at,
|
|
2724
|
+
"hours": hours,
|
|
2725
|
+
}
|
|
2726
|
+
)
|
|
2727
|
+
user_hours[user_id] += hours
|
|
2728
|
+
|
|
2729
|
+
# Format results
|
|
2730
|
+
scheduled = []
|
|
2731
|
+
not_scheduled = []
|
|
2732
|
+
|
|
2733
|
+
for user_id in user_id_list:
|
|
2734
|
+
user_info = users_map.get(user_id, {})
|
|
2735
|
+
user_attrs = user_info.get("attributes", {})
|
|
2736
|
+
user_name = user_attrs.get("full_name") or user_attrs.get("name") or "Unknown"
|
|
2737
|
+
|
|
2738
|
+
shifts = user_shifts.get(user_id, [])
|
|
2739
|
+
if shifts:
|
|
2740
|
+
scheduled.append(
|
|
2741
|
+
{
|
|
2742
|
+
"user_id": int(user_id) if user_id.isdigit() else user_id,
|
|
2743
|
+
"user_name": user_name,
|
|
2744
|
+
"total_hours": round(user_hours[user_id], 1),
|
|
2745
|
+
"shifts": shifts,
|
|
2746
|
+
}
|
|
2747
|
+
)
|
|
2748
|
+
else:
|
|
2749
|
+
not_scheduled.append(
|
|
2750
|
+
{
|
|
2751
|
+
"user_id": int(user_id) if user_id.isdigit() else user_id,
|
|
2752
|
+
"user_name": user_name,
|
|
2753
|
+
}
|
|
2754
|
+
)
|
|
2755
|
+
|
|
2756
|
+
# Sort scheduled by hours descending
|
|
2757
|
+
scheduled.sort(key=lambda x: x["total_hours"], reverse=True)
|
|
2758
|
+
|
|
2759
|
+
return {
|
|
2760
|
+
"period": {"start": start_date, "end": end_date},
|
|
2761
|
+
"checked_users": len(user_id_list),
|
|
2762
|
+
"scheduled": scheduled,
|
|
2763
|
+
"not_scheduled": not_scheduled,
|
|
2764
|
+
}
|
|
2765
|
+
|
|
2766
|
+
except Exception as e:
|
|
2767
|
+
import traceback
|
|
2768
|
+
|
|
2769
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
2770
|
+
return MCPError.tool_error(
|
|
2771
|
+
f"Failed to check responder availability: {error_message}",
|
|
2772
|
+
error_type,
|
|
2773
|
+
details={
|
|
2774
|
+
"params": {
|
|
2775
|
+
"start_date": start_date,
|
|
2776
|
+
"end_date": end_date,
|
|
2777
|
+
"user_ids": user_ids,
|
|
2778
|
+
},
|
|
2779
|
+
"exception_type": type(e).__name__,
|
|
2780
|
+
"traceback": traceback.format_exc(),
|
|
2781
|
+
},
|
|
2782
|
+
)
|
|
2783
|
+
|
|
2784
|
+
@mcp.tool()
|
|
2785
|
+
async def create_override_recommendation(
|
|
2786
|
+
schedule_id: Annotated[
|
|
2787
|
+
str,
|
|
2788
|
+
Field(description="Schedule ID to create override for"),
|
|
2789
|
+
],
|
|
2790
|
+
original_user_id: Annotated[
|
|
2791
|
+
int,
|
|
2792
|
+
Field(description="User ID being replaced"),
|
|
2793
|
+
],
|
|
2794
|
+
start_date: Annotated[
|
|
2795
|
+
str,
|
|
2796
|
+
Field(description="Override start (ISO 8601, e.g., '2026-02-09')"),
|
|
2797
|
+
],
|
|
2798
|
+
end_date: Annotated[
|
|
2799
|
+
str,
|
|
2800
|
+
Field(description="Override end (ISO 8601, e.g., '2026-02-15')"),
|
|
2801
|
+
],
|
|
2802
|
+
exclude_user_ids: Annotated[
|
|
2803
|
+
str,
|
|
2804
|
+
Field(description="Comma-separated user IDs to exclude (e.g., other at-risk users)"),
|
|
2805
|
+
] = "",
|
|
2806
|
+
) -> dict:
|
|
2807
|
+
"""
|
|
2808
|
+
Recommend replacement responders for an override shift.
|
|
2809
|
+
|
|
2810
|
+
Finds users in the same schedule rotation who are not already
|
|
2811
|
+
heavily loaded during the period.
|
|
2812
|
+
|
|
2813
|
+
Returns recommended replacements sorted by current load (lowest first),
|
|
2814
|
+
plus a ready-to-use override payload for the top recommendation.
|
|
2815
|
+
"""
|
|
2816
|
+
try:
|
|
2817
|
+
from datetime import datetime
|
|
2818
|
+
|
|
2819
|
+
# Parse exclusions
|
|
2820
|
+
exclude_set = set()
|
|
2821
|
+
if exclude_user_ids:
|
|
2822
|
+
exclude_set = {uid.strip() for uid in exclude_user_ids.split(",") if uid.strip()}
|
|
2823
|
+
exclude_set.add(str(original_user_id))
|
|
2824
|
+
|
|
2825
|
+
# Fetch lookup maps
|
|
2826
|
+
users_map, schedules_map, teams_map = await _fetch_users_and_schedules_maps()
|
|
2827
|
+
|
|
2828
|
+
# Get schedule info
|
|
2829
|
+
schedule = schedules_map.get(schedule_id, {})
|
|
2830
|
+
schedule_name = schedule.get("attributes", {}).get("name", "Unknown Schedule")
|
|
2831
|
+
|
|
2832
|
+
# Get original user info
|
|
2833
|
+
original_user = users_map.get(str(original_user_id), {})
|
|
2834
|
+
original_user_attrs = original_user.get("attributes", {})
|
|
2835
|
+
original_user_name = (
|
|
2836
|
+
original_user_attrs.get("full_name") or original_user_attrs.get("name") or "Unknown"
|
|
2837
|
+
)
|
|
2838
|
+
|
|
2839
|
+
# Fetch schedule rotations to find rotation users
|
|
2840
|
+
rotation_users = set()
|
|
2841
|
+
|
|
2842
|
+
# First, get the schedule to find its rotations
|
|
2843
|
+
schedule_response = await make_authenticated_request(
|
|
2844
|
+
"GET", f"/v1/schedules/{schedule_id}"
|
|
2845
|
+
)
|
|
2846
|
+
|
|
2847
|
+
if schedule_response and schedule_response.status_code == 200:
|
|
2848
|
+
import asyncio
|
|
2849
|
+
|
|
2850
|
+
schedule_data = schedule_response.json()
|
|
2851
|
+
schedule_obj = schedule_data.get("data", {})
|
|
2852
|
+
relationships = schedule_obj.get("relationships", {})
|
|
2853
|
+
|
|
2854
|
+
# Get schedule rotations
|
|
2855
|
+
rotations = relationships.get("schedule_rotations", {}).get("data", [])
|
|
2856
|
+
rotation_ids = [r.get("id") for r in rotations if r.get("id")]
|
|
2857
|
+
|
|
2858
|
+
# Fetch all rotation users in parallel
|
|
2859
|
+
if rotation_ids:
|
|
2860
|
+
|
|
2861
|
+
async def fetch_rotation_users(rotation_id: str):
|
|
2862
|
+
response = await make_authenticated_request(
|
|
2863
|
+
"GET",
|
|
2864
|
+
f"/v1/schedule_rotations/{rotation_id}/schedule_rotation_users",
|
|
2865
|
+
params={"page[size]": 100},
|
|
2866
|
+
)
|
|
2867
|
+
if response and response.status_code == 200:
|
|
2868
|
+
return response.json().get("data", [])
|
|
2869
|
+
return []
|
|
2870
|
+
|
|
2871
|
+
# Execute all rotation user fetches in parallel
|
|
2872
|
+
rotation_results = await asyncio.gather(
|
|
2873
|
+
*[fetch_rotation_users(rid) for rid in rotation_ids], return_exceptions=True
|
|
2874
|
+
)
|
|
2875
|
+
|
|
2876
|
+
# Process results
|
|
2877
|
+
for result in rotation_results:
|
|
2878
|
+
if isinstance(result, list):
|
|
2879
|
+
for ru in result:
|
|
2880
|
+
user_rel = (
|
|
2881
|
+
ru.get("relationships", {}).get("user", {}).get("data", {})
|
|
2882
|
+
)
|
|
2883
|
+
user_id = user_rel.get("id")
|
|
2884
|
+
if user_id:
|
|
2885
|
+
rotation_users.add(str(user_id))
|
|
2886
|
+
|
|
2887
|
+
# Fetch shifts to calculate current load for rotation users
|
|
2888
|
+
params: dict[str, Any] = {
|
|
2889
|
+
"from": f"{start_date}T00:00:00Z" if "T" not in start_date else start_date,
|
|
2890
|
+
"to": f"{end_date}T23:59:59Z" if "T" not in end_date else end_date,
|
|
2891
|
+
"include": "user",
|
|
2892
|
+
"page[size]": 100,
|
|
2893
|
+
}
|
|
2894
|
+
|
|
2895
|
+
all_shifts = []
|
|
2896
|
+
page = 1
|
|
2897
|
+
while page <= 10:
|
|
2898
|
+
params["page[number]"] = page
|
|
2899
|
+
shifts_response = await make_authenticated_request(
|
|
2900
|
+
"GET", "/v1/shifts", params=params
|
|
2901
|
+
)
|
|
2902
|
+
|
|
2903
|
+
if shifts_response is None:
|
|
2904
|
+
break
|
|
2905
|
+
|
|
2906
|
+
shifts_response.raise_for_status()
|
|
2907
|
+
shifts_data = shifts_response.json()
|
|
2908
|
+
|
|
2909
|
+
shifts = shifts_data.get("data", [])
|
|
2910
|
+
included = shifts_data.get("included", [])
|
|
2911
|
+
|
|
2912
|
+
for resource in included:
|
|
2913
|
+
if resource.get("type") == "users":
|
|
2914
|
+
users_map[resource.get("id")] = resource
|
|
2915
|
+
|
|
2916
|
+
if not shifts:
|
|
2917
|
+
break
|
|
2918
|
+
|
|
2919
|
+
all_shifts.extend(shifts)
|
|
2920
|
+
|
|
2921
|
+
meta = shifts_data.get("meta", {})
|
|
2922
|
+
total_pages = meta.get("total_pages", 1)
|
|
2923
|
+
if page >= total_pages:
|
|
2924
|
+
break
|
|
2925
|
+
page += 1
|
|
2926
|
+
|
|
2927
|
+
# Calculate load per user
|
|
2928
|
+
user_load: dict[str, float] = {}
|
|
2929
|
+
for shift in all_shifts:
|
|
2930
|
+
attrs = shift.get("attributes", {})
|
|
2931
|
+
relationships = shift.get("relationships", {})
|
|
2932
|
+
|
|
2933
|
+
user_rel = relationships.get("user", {}).get("data") or {}
|
|
2934
|
+
raw_user_id = user_rel.get("id")
|
|
2935
|
+
|
|
2936
|
+
# Skip shifts without a user
|
|
2937
|
+
if not raw_user_id:
|
|
2938
|
+
continue
|
|
2939
|
+
|
|
2940
|
+
user_id = str(raw_user_id)
|
|
2941
|
+
|
|
2942
|
+
starts_at = attrs.get("starts_at")
|
|
2943
|
+
ends_at = attrs.get("ends_at")
|
|
2944
|
+
hours = 0.0
|
|
2945
|
+
if starts_at and ends_at:
|
|
2946
|
+
try:
|
|
2947
|
+
start_dt = datetime.fromisoformat(starts_at.replace("Z", "+00:00"))
|
|
2948
|
+
end_dt = datetime.fromisoformat(ends_at.replace("Z", "+00:00"))
|
|
2949
|
+
hours = (end_dt - start_dt).total_seconds() / 3600
|
|
2950
|
+
except (ValueError, AttributeError):
|
|
2951
|
+
pass
|
|
2952
|
+
|
|
2953
|
+
user_load[user_id] = user_load.get(user_id, 0.0) + hours
|
|
2954
|
+
|
|
2955
|
+
# Find recommendations from rotation users
|
|
2956
|
+
recommendations = []
|
|
2957
|
+
for user_id in rotation_users:
|
|
2958
|
+
if user_id in exclude_set:
|
|
2959
|
+
continue
|
|
2960
|
+
|
|
2961
|
+
user_info = users_map.get(user_id, {})
|
|
2962
|
+
user_attrs = user_info.get("attributes", {})
|
|
2963
|
+
user_name = user_attrs.get("full_name") or user_attrs.get("name") or "Unknown"
|
|
2964
|
+
|
|
2965
|
+
current_hours = round(user_load.get(user_id, 0.0), 1)
|
|
2966
|
+
|
|
2967
|
+
# Generate reason based on load
|
|
2968
|
+
if current_hours == 0:
|
|
2969
|
+
reason = "Already in rotation, no current load"
|
|
2970
|
+
elif current_hours < 24:
|
|
2971
|
+
reason = "Already in rotation, low load"
|
|
2972
|
+
elif current_hours < 48:
|
|
2973
|
+
reason = "Same team, moderate availability"
|
|
2974
|
+
else:
|
|
2975
|
+
reason = "In rotation, but higher load"
|
|
2976
|
+
|
|
2977
|
+
recommendations.append(
|
|
2978
|
+
{
|
|
2979
|
+
"user_id": int(user_id) if user_id.isdigit() else user_id,
|
|
2980
|
+
"user_name": user_name,
|
|
2981
|
+
"current_hours_in_period": current_hours,
|
|
2982
|
+
"reason": reason,
|
|
2983
|
+
}
|
|
2984
|
+
)
|
|
2985
|
+
|
|
2986
|
+
# Sort by load (lowest first)
|
|
2987
|
+
recommendations.sort(key=lambda x: x["current_hours_in_period"])
|
|
2988
|
+
|
|
2989
|
+
# Build override payload for top recommendation
|
|
2990
|
+
override_payload = None
|
|
2991
|
+
if recommendations:
|
|
2992
|
+
top_rec = recommendations[0]
|
|
2993
|
+
# Format dates for API
|
|
2994
|
+
override_starts = f"{start_date}T00:00:00Z" if "T" not in start_date else start_date
|
|
2995
|
+
override_ends = f"{end_date}T23:59:59Z" if "T" not in end_date else end_date
|
|
2996
|
+
|
|
2997
|
+
override_payload = {
|
|
2998
|
+
"schedule_id": schedule_id,
|
|
2999
|
+
"user_id": top_rec["user_id"],
|
|
3000
|
+
"starts_at": override_starts,
|
|
3001
|
+
"ends_at": override_ends,
|
|
3002
|
+
}
|
|
3003
|
+
|
|
3004
|
+
# Build response with optional warning
|
|
3005
|
+
response = {
|
|
3006
|
+
"schedule_name": schedule_name,
|
|
3007
|
+
"original_user": {
|
|
3008
|
+
"id": original_user_id,
|
|
3009
|
+
"name": original_user_name,
|
|
3010
|
+
},
|
|
3011
|
+
"period": {
|
|
3012
|
+
"start": start_date,
|
|
3013
|
+
"end": end_date,
|
|
3014
|
+
},
|
|
3015
|
+
"recommended_replacements": recommendations[:5], # Top 5
|
|
3016
|
+
"override_payload": override_payload,
|
|
3017
|
+
}
|
|
3018
|
+
|
|
3019
|
+
# Add warning if no recommendations available
|
|
3020
|
+
if not rotation_users:
|
|
3021
|
+
response["warning"] = (
|
|
3022
|
+
"No rotation users found for this schedule. The schedule may not have any rotations configured."
|
|
3023
|
+
)
|
|
3024
|
+
elif not recommendations:
|
|
3025
|
+
response["warning"] = (
|
|
3026
|
+
"All rotation users are either excluded or the original user. No recommendations available."
|
|
3027
|
+
)
|
|
3028
|
+
|
|
3029
|
+
return response
|
|
3030
|
+
|
|
3031
|
+
except Exception as e:
|
|
3032
|
+
import traceback
|
|
3033
|
+
|
|
3034
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
3035
|
+
return MCPError.tool_error(
|
|
3036
|
+
f"Failed to create override recommendation: {error_message}",
|
|
3037
|
+
error_type,
|
|
3038
|
+
details={
|
|
3039
|
+
"params": {
|
|
3040
|
+
"schedule_id": schedule_id,
|
|
3041
|
+
"original_user_id": original_user_id,
|
|
3042
|
+
},
|
|
3043
|
+
"exception_type": type(e).__name__,
|
|
3044
|
+
"traceback": traceback.format_exc(),
|
|
3045
|
+
},
|
|
3046
|
+
)
|
|
3047
|
+
|
|
2022
3048
|
# Add MCP resources for incidents and teams
|
|
2023
3049
|
@mcp.resource("incident://{incident_id}")
|
|
2024
3050
|
async def get_incident_resource(incident_id: str):
|
|
@@ -2131,6 +3157,303 @@ Updated: {attributes.get("updated_at", "N/A")}"""
|
|
|
2131
3157
|
"mimeType": "text/plain",
|
|
2132
3158
|
}
|
|
2133
3159
|
|
|
3160
|
+
@mcp.tool()
|
|
3161
|
+
async def check_oncall_burnout_risk(
|
|
3162
|
+
start_date: Annotated[
|
|
3163
|
+
str,
|
|
3164
|
+
Field(description="Start date for the on-call period (ISO 8601, e.g., '2026-02-09')"),
|
|
3165
|
+
],
|
|
3166
|
+
end_date: Annotated[
|
|
3167
|
+
str,
|
|
3168
|
+
Field(description="End date for the on-call period (ISO 8601, e.g., '2026-02-15')"),
|
|
3169
|
+
],
|
|
3170
|
+
och_analysis_id: Annotated[
|
|
3171
|
+
int | None,
|
|
3172
|
+
Field(
|
|
3173
|
+
description="On-Call Health analysis ID. If not provided, uses the latest analysis"
|
|
3174
|
+
),
|
|
3175
|
+
] = None,
|
|
3176
|
+
och_threshold: Annotated[
|
|
3177
|
+
float,
|
|
3178
|
+
Field(description="OCH score threshold for at-risk classification (default: 50.0)"),
|
|
3179
|
+
] = 50.0,
|
|
3180
|
+
include_replacements: Annotated[
|
|
3181
|
+
bool,
|
|
3182
|
+
Field(description="Include recommended replacement responders (default: true)"),
|
|
3183
|
+
] = True,
|
|
3184
|
+
) -> dict:
|
|
3185
|
+
"""Check if any at-risk responders (based on On-Call Health burnout analysis) are scheduled for on-call.
|
|
3186
|
+
|
|
3187
|
+
Integrates with On-Call Health (oncallhealth.ai) to identify responders at risk of burnout
|
|
3188
|
+
and checks if they are scheduled during the specified period. Optionally recommends
|
|
3189
|
+
safe replacement responders.
|
|
3190
|
+
|
|
3191
|
+
Requires ONCALLHEALTH_API_KEY environment variable.
|
|
3192
|
+
"""
|
|
3193
|
+
try:
|
|
3194
|
+
# Validate OCH API key is configured
|
|
3195
|
+
if not os.environ.get("ONCALLHEALTH_API_KEY"):
|
|
3196
|
+
raise PermissionError(
|
|
3197
|
+
"ONCALLHEALTH_API_KEY environment variable required. "
|
|
3198
|
+
"Get your key from oncallhealth.ai/settings/api-keys"
|
|
3199
|
+
)
|
|
3200
|
+
|
|
3201
|
+
och_client = OnCallHealthClient()
|
|
3202
|
+
|
|
3203
|
+
# 1. Get OCH analysis (by ID or latest)
|
|
3204
|
+
try:
|
|
3205
|
+
if och_analysis_id:
|
|
3206
|
+
analysis = await och_client.get_analysis(och_analysis_id)
|
|
3207
|
+
else:
|
|
3208
|
+
analysis = await och_client.get_latest_analysis()
|
|
3209
|
+
och_analysis_id = analysis.get("id")
|
|
3210
|
+
except httpx.HTTPStatusError as e:
|
|
3211
|
+
raise ConnectionError(f"Failed to fetch On-Call Health data: {e}")
|
|
3212
|
+
except ValueError as e:
|
|
3213
|
+
raise ValueError(str(e))
|
|
3214
|
+
|
|
3215
|
+
# 2. Extract at-risk and safe users
|
|
3216
|
+
at_risk_users, safe_users = och_client.extract_at_risk_users(
|
|
3217
|
+
analysis, threshold=och_threshold
|
|
3218
|
+
)
|
|
3219
|
+
|
|
3220
|
+
if not at_risk_users:
|
|
3221
|
+
return {
|
|
3222
|
+
"period": {"start": start_date, "end": end_date},
|
|
3223
|
+
"och_analysis_id": och_analysis_id,
|
|
3224
|
+
"och_threshold": och_threshold,
|
|
3225
|
+
"at_risk_scheduled": [],
|
|
3226
|
+
"at_risk_not_scheduled": [],
|
|
3227
|
+
"recommended_replacements": [],
|
|
3228
|
+
"summary": {
|
|
3229
|
+
"total_at_risk": 0,
|
|
3230
|
+
"at_risk_scheduled": 0,
|
|
3231
|
+
"action_required": False,
|
|
3232
|
+
"message": "No users above burnout threshold.",
|
|
3233
|
+
},
|
|
3234
|
+
}
|
|
3235
|
+
|
|
3236
|
+
# 3. Get shifts for the period
|
|
3237
|
+
all_shifts = []
|
|
3238
|
+
users_map = {}
|
|
3239
|
+
schedules_map = {}
|
|
3240
|
+
|
|
3241
|
+
# Fetch lookup maps
|
|
3242
|
+
lookup_users, lookup_schedules, lookup_teams = await _fetch_users_and_schedules_maps()
|
|
3243
|
+
users_map.update({str(k): v for k, v in lookup_users.items()})
|
|
3244
|
+
schedules_map.update({str(k): v for k, v in lookup_schedules.items()})
|
|
3245
|
+
|
|
3246
|
+
# Fetch shifts
|
|
3247
|
+
page = 1
|
|
3248
|
+
while page <= 10:
|
|
3249
|
+
shifts_response = await make_authenticated_request(
|
|
3250
|
+
"GET",
|
|
3251
|
+
"/v1/shifts",
|
|
3252
|
+
params={
|
|
3253
|
+
"filter[starts_at_lte]": (
|
|
3254
|
+
end_date if "T" in end_date else f"{end_date}T23:59:59Z"
|
|
3255
|
+
),
|
|
3256
|
+
"filter[ends_at_gte]": (
|
|
3257
|
+
start_date if "T" in start_date else f"{start_date}T00:00:00Z"
|
|
3258
|
+
),
|
|
3259
|
+
"page[size]": 100,
|
|
3260
|
+
"page[number]": page,
|
|
3261
|
+
"include": "user,schedule",
|
|
3262
|
+
},
|
|
3263
|
+
)
|
|
3264
|
+
if shifts_response is None:
|
|
3265
|
+
break
|
|
3266
|
+
shifts_response.raise_for_status()
|
|
3267
|
+
shifts_data = shifts_response.json()
|
|
3268
|
+
|
|
3269
|
+
shifts = shifts_data.get("data", [])
|
|
3270
|
+
included = shifts_data.get("included", [])
|
|
3271
|
+
|
|
3272
|
+
for resource in included:
|
|
3273
|
+
if resource.get("type") == "users":
|
|
3274
|
+
users_map[str(resource.get("id"))] = resource
|
|
3275
|
+
elif resource.get("type") == "schedules":
|
|
3276
|
+
schedules_map[str(resource.get("id"))] = resource
|
|
3277
|
+
|
|
3278
|
+
if not shifts:
|
|
3279
|
+
break
|
|
3280
|
+
|
|
3281
|
+
all_shifts.extend(shifts)
|
|
3282
|
+
|
|
3283
|
+
meta = shifts_data.get("meta", {})
|
|
3284
|
+
total_pages = meta.get("total_pages", 1)
|
|
3285
|
+
if page >= total_pages:
|
|
3286
|
+
break
|
|
3287
|
+
page += 1
|
|
3288
|
+
|
|
3289
|
+
# 5. Correlate: which at-risk users are scheduled?
|
|
3290
|
+
at_risk_scheduled = []
|
|
3291
|
+
at_risk_not_scheduled = []
|
|
3292
|
+
|
|
3293
|
+
for user in at_risk_users:
|
|
3294
|
+
rootly_id = user.get("rootly_user_id")
|
|
3295
|
+
if not rootly_id:
|
|
3296
|
+
continue
|
|
3297
|
+
|
|
3298
|
+
rootly_id_str = str(rootly_id)
|
|
3299
|
+
|
|
3300
|
+
# Find shifts for this user
|
|
3301
|
+
user_shifts = []
|
|
3302
|
+
for shift in all_shifts:
|
|
3303
|
+
relationships = shift.get("relationships", {})
|
|
3304
|
+
user_rel = relationships.get("user", {}).get("data") or {}
|
|
3305
|
+
shift_user_id = str(user_rel.get("id", ""))
|
|
3306
|
+
|
|
3307
|
+
if shift_user_id == rootly_id_str:
|
|
3308
|
+
attrs = shift.get("attributes", {})
|
|
3309
|
+
schedule_rel = relationships.get("schedule", {}).get("data") or {}
|
|
3310
|
+
schedule_id = str(schedule_rel.get("id", ""))
|
|
3311
|
+
schedule_info = schedules_map.get(schedule_id, {})
|
|
3312
|
+
schedule_name = schedule_info.get("attributes", {}).get("name", "Unknown")
|
|
3313
|
+
|
|
3314
|
+
starts_at = attrs.get("starts_at")
|
|
3315
|
+
ends_at = attrs.get("ends_at")
|
|
3316
|
+
hours = 0.0
|
|
3317
|
+
if starts_at and ends_at:
|
|
3318
|
+
try:
|
|
3319
|
+
start_dt = datetime.fromisoformat(starts_at.replace("Z", "+00:00"))
|
|
3320
|
+
end_dt = datetime.fromisoformat(ends_at.replace("Z", "+00:00"))
|
|
3321
|
+
hours = (end_dt - start_dt).total_seconds() / 3600
|
|
3322
|
+
except (ValueError, AttributeError):
|
|
3323
|
+
pass
|
|
3324
|
+
|
|
3325
|
+
user_shifts.append(
|
|
3326
|
+
{
|
|
3327
|
+
"schedule_id": schedule_id,
|
|
3328
|
+
"schedule_name": schedule_name,
|
|
3329
|
+
"starts_at": starts_at,
|
|
3330
|
+
"ends_at": ends_at,
|
|
3331
|
+
"hours": round(hours, 1),
|
|
3332
|
+
}
|
|
3333
|
+
)
|
|
3334
|
+
|
|
3335
|
+
if user_shifts:
|
|
3336
|
+
total_hours = sum(s["hours"] for s in user_shifts)
|
|
3337
|
+
at_risk_scheduled.append(
|
|
3338
|
+
{
|
|
3339
|
+
"user_name": user["user_name"],
|
|
3340
|
+
"user_id": int(rootly_id),
|
|
3341
|
+
"och_score": user["och_score"],
|
|
3342
|
+
"risk_level": user["risk_level"],
|
|
3343
|
+
"burnout_score": user["burnout_score"],
|
|
3344
|
+
"total_hours": round(total_hours, 1),
|
|
3345
|
+
"shifts": user_shifts,
|
|
3346
|
+
}
|
|
3347
|
+
)
|
|
3348
|
+
else:
|
|
3349
|
+
at_risk_not_scheduled.append(
|
|
3350
|
+
{
|
|
3351
|
+
"user_name": user["user_name"],
|
|
3352
|
+
"user_id": int(rootly_id) if rootly_id else None,
|
|
3353
|
+
"och_score": user["och_score"],
|
|
3354
|
+
"risk_level": user["risk_level"],
|
|
3355
|
+
}
|
|
3356
|
+
)
|
|
3357
|
+
|
|
3358
|
+
# 6. Get recommended replacements (if requested)
|
|
3359
|
+
recommended_replacements = []
|
|
3360
|
+
if include_replacements and safe_users:
|
|
3361
|
+
safe_rootly_ids = [
|
|
3362
|
+
str(u["rootly_user_id"]) for u in safe_users[:10] if u.get("rootly_user_id")
|
|
3363
|
+
]
|
|
3364
|
+
|
|
3365
|
+
if safe_rootly_ids:
|
|
3366
|
+
# Calculate current hours for safe users
|
|
3367
|
+
for user in safe_users[:5]:
|
|
3368
|
+
rootly_id = user.get("rootly_user_id")
|
|
3369
|
+
if not rootly_id:
|
|
3370
|
+
continue
|
|
3371
|
+
|
|
3372
|
+
rootly_id_str = str(rootly_id)
|
|
3373
|
+
user_hours = 0.0
|
|
3374
|
+
|
|
3375
|
+
for shift in all_shifts:
|
|
3376
|
+
relationships = shift.get("relationships", {})
|
|
3377
|
+
user_rel = relationships.get("user", {}).get("data") or {}
|
|
3378
|
+
shift_user_id = str(user_rel.get("id", ""))
|
|
3379
|
+
|
|
3380
|
+
if shift_user_id == rootly_id_str:
|
|
3381
|
+
attrs = shift.get("attributes", {})
|
|
3382
|
+
starts_at = attrs.get("starts_at")
|
|
3383
|
+
ends_at = attrs.get("ends_at")
|
|
3384
|
+
if starts_at and ends_at:
|
|
3385
|
+
try:
|
|
3386
|
+
start_dt = datetime.fromisoformat(
|
|
3387
|
+
starts_at.replace("Z", "+00:00")
|
|
3388
|
+
)
|
|
3389
|
+
end_dt = datetime.fromisoformat(
|
|
3390
|
+
ends_at.replace("Z", "+00:00")
|
|
3391
|
+
)
|
|
3392
|
+
user_hours += (end_dt - start_dt).total_seconds() / 3600
|
|
3393
|
+
except (ValueError, AttributeError):
|
|
3394
|
+
pass
|
|
3395
|
+
|
|
3396
|
+
recommended_replacements.append(
|
|
3397
|
+
{
|
|
3398
|
+
"user_name": user["user_name"],
|
|
3399
|
+
"user_id": int(rootly_id),
|
|
3400
|
+
"och_score": user["och_score"],
|
|
3401
|
+
"risk_level": user["risk_level"],
|
|
3402
|
+
"current_hours_in_period": round(user_hours, 1),
|
|
3403
|
+
}
|
|
3404
|
+
)
|
|
3405
|
+
|
|
3406
|
+
# 7. Build summary
|
|
3407
|
+
total_scheduled_hours = sum(u["total_hours"] for u in at_risk_scheduled)
|
|
3408
|
+
action_required = len(at_risk_scheduled) > 0
|
|
3409
|
+
|
|
3410
|
+
if action_required:
|
|
3411
|
+
message = (
|
|
3412
|
+
f"{len(at_risk_scheduled)} at-risk user(s) scheduled for "
|
|
3413
|
+
f"{total_scheduled_hours} hours. Consider reassignment."
|
|
3414
|
+
)
|
|
3415
|
+
else:
|
|
3416
|
+
message = "No at-risk users are scheduled for the period."
|
|
3417
|
+
|
|
3418
|
+
return {
|
|
3419
|
+
"period": {"start": start_date, "end": end_date},
|
|
3420
|
+
"och_analysis_id": och_analysis_id,
|
|
3421
|
+
"och_threshold": och_threshold,
|
|
3422
|
+
"at_risk_scheduled": at_risk_scheduled,
|
|
3423
|
+
"at_risk_not_scheduled": at_risk_not_scheduled,
|
|
3424
|
+
"recommended_replacements": recommended_replacements,
|
|
3425
|
+
"summary": {
|
|
3426
|
+
"total_at_risk": len(at_risk_users),
|
|
3427
|
+
"at_risk_scheduled": len(at_risk_scheduled),
|
|
3428
|
+
"action_required": action_required,
|
|
3429
|
+
"message": message,
|
|
3430
|
+
},
|
|
3431
|
+
}
|
|
3432
|
+
|
|
3433
|
+
except PermissionError as e:
|
|
3434
|
+
return MCPError.tool_error(str(e), "permission_error")
|
|
3435
|
+
except ConnectionError as e:
|
|
3436
|
+
return MCPError.tool_error(str(e), "connection_error")
|
|
3437
|
+
except ValueError as e:
|
|
3438
|
+
return MCPError.tool_error(str(e), "validation_error")
|
|
3439
|
+
except Exception as e:
|
|
3440
|
+
import traceback
|
|
3441
|
+
|
|
3442
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
3443
|
+
return MCPError.tool_error(
|
|
3444
|
+
f"Failed to check burnout risk: {error_message}",
|
|
3445
|
+
error_type,
|
|
3446
|
+
details={
|
|
3447
|
+
"params": {
|
|
3448
|
+
"start_date": start_date,
|
|
3449
|
+
"end_date": end_date,
|
|
3450
|
+
"och_analysis_id": och_analysis_id,
|
|
3451
|
+
},
|
|
3452
|
+
"exception_type": type(e).__name__,
|
|
3453
|
+
"traceback": traceback.format_exc(),
|
|
3454
|
+
},
|
|
3455
|
+
)
|
|
3456
|
+
|
|
2134
3457
|
# Log server creation (tool count will be shown when tools are accessed)
|
|
2135
3458
|
logger.info("Created Rootly MCP Server successfully")
|
|
2136
3459
|
return mcp
|