rootly-mcp-server 2.0.13__py3-none-any.whl → 2.0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -153,6 +153,22 @@ DEFAULT_ALLOWED_PATHS = [
153
153
  # Status pages
154
154
  "/status_pages",
155
155
  "/status_pages/{status_page_id}",
156
+ # On-call schedules and shifts
157
+ "/schedules",
158
+ "/schedules/{schedule_id}",
159
+ "/schedules/{schedule_id}/shifts",
160
+ "/shifts",
161
+ "/schedule_rotations/{schedule_rotation_id}",
162
+ "/schedule_rotations/{schedule_rotation_id}/schedule_rotation_users",
163
+ "/schedule_rotations/{schedule_rotation_id}/schedule_rotation_active_days",
164
+ # On-call overrides
165
+ "/schedules/{schedule_id}/override_shifts",
166
+ "/override_shifts/{override_shift_id}",
167
+ # On-call shadows and roles
168
+ "/schedules/{schedule_id}/on_call_shadows",
169
+ "/on_call_shadows/{on_call_shadow_id}",
170
+ "/on_call_roles",
171
+ "/on_call_roles/{on_call_role_id}",
156
172
  ]
157
173
 
158
174
 
@@ -394,6 +410,7 @@ def create_rootly_mcp_server(
394
410
  "page[size]": page_size, # Use requested page size (already limited to max 20)
395
411
  "page[number]": page_number,
396
412
  "include": "",
413
+ "fields[incidents]": "id,title,summary,status,severity,created_at,updated_at,url,started_at",
397
414
  }
398
415
  if query:
399
416
  params["filter[search]"] = query
@@ -418,6 +435,7 @@ def create_rootly_mcp_server(
418
435
  "page[size]": effective_page_size,
419
436
  "page[number]": current_page,
420
437
  "include": "",
438
+ "fields[incidents]": "id,title,summary,status,severity,created_at,updated_at,url,started_at",
421
439
  }
422
440
  if query:
423
441
  params["filter[search]"] = query
@@ -485,42 +503,66 @@ def create_rootly_mcp_server(
485
503
 
486
504
  @mcp.tool()
487
505
  async def find_related_incidents(
488
- incident_id: str,
489
- similarity_threshold: Annotated[float, Field(description="Minimum similarity score (0.0-1.0)", ge=0.0, le=1.0)] = 0.3,
490
- max_results: Annotated[int, Field(description="Maximum number of related incidents to return", ge=1, le=20)] = 5
506
+ incident_id: str = "",
507
+ incident_description: str = "",
508
+ similarity_threshold: Annotated[float, Field(description="Minimum similarity score (0.0-1.0)", ge=0.0, le=1.0)] = 0.15,
509
+ max_results: Annotated[int, Field(description="Maximum number of related incidents to return", ge=1, le=20)] = 5,
510
+ status_filter: Annotated[str, Field(description="Filter incidents by status (empty for all, 'resolved', 'investigating', etc.)")] = ""
491
511
  ) -> dict:
492
- """Find historically similar incidents to help with context and resolution strategies."""
512
+ """Find similar incidents to help with context and resolution strategies. Provide either incident_id OR incident_description (e.g., 'website is down', 'database timeout errors'). Use status_filter to limit to specific incident statuses or leave empty for all incidents."""
493
513
  try:
494
- # Get the target incident details
495
- target_response = await make_authenticated_request("GET", f"/v1/incidents/{incident_id}")
496
- target_response.raise_for_status()
497
- target_incident_data = target_response.json()
498
- target_incident = target_incident_data.get("data", {})
514
+ target_incident = {}
499
515
 
500
- if not target_incident:
501
- return MCPError.tool_error("Incident not found", "not_found")
516
+ if incident_id:
517
+ # Get the target incident details by ID
518
+ target_response = await make_authenticated_request("GET", f"/v1/incidents/{incident_id}")
519
+ target_response.raise_for_status()
520
+ target_incident_data = target_response.json()
521
+ target_incident = target_incident_data.get("data", {})
522
+
523
+ if not target_incident:
524
+ return MCPError.tool_error("Incident not found", "not_found")
525
+
526
+ elif incident_description:
527
+ # Create synthetic incident for analysis from descriptive text
528
+ target_incident = {
529
+ "id": "synthetic",
530
+ "attributes": {
531
+ "title": incident_description,
532
+ "summary": incident_description,
533
+ "description": incident_description
534
+ }
535
+ }
536
+ else:
537
+ return MCPError.tool_error("Must provide either incident_id or incident_description", "validation_error")
502
538
 
503
- # Get historical incidents for comparison (resolved incidents from last 6 months)
504
- historical_response = await make_authenticated_request("GET", "/v1/incidents", params={
539
+ # Get historical incidents for comparison
540
+ params = {
505
541
  "page[size]": 100, # Get more incidents for better matching
506
542
  "page[number]": 1,
507
- "filter[status]": "resolved", # Only look at resolved incidents
508
543
  "include": ""
509
- })
544
+ }
545
+
546
+ # Only add status filter if specified
547
+ if status_filter:
548
+ params["filter[status]"] = status_filter
549
+
550
+ historical_response = await make_authenticated_request("GET", "/v1/incidents", params=params)
510
551
  historical_response.raise_for_status()
511
552
  historical_data = historical_response.json()
512
553
  historical_incidents = historical_data.get("data", [])
513
554
 
514
- # Filter out the target incident itself
515
- historical_incidents = [inc for inc in historical_incidents if str(inc.get('id')) != str(incident_id)]
555
+ # Filter out the target incident itself if it exists
556
+ if incident_id:
557
+ historical_incidents = [inc for inc in historical_incidents if str(inc.get('id')) != str(incident_id)]
516
558
 
517
559
  if not historical_incidents:
518
560
  return {
519
561
  "related_incidents": [],
520
562
  "message": "No historical incidents found for comparison",
521
563
  "target_incident": {
522
- "id": incident_id,
523
- "title": target_incident.get("attributes", {}).get("title", "")
564
+ "id": incident_id or "synthetic",
565
+ "title": target_incident.get("attributes", {}).get("title", incident_description)
524
566
  }
525
567
  }
526
568
 
@@ -548,8 +590,8 @@ def create_rootly_mcp_server(
548
590
 
549
591
  return {
550
592
  "target_incident": {
551
- "id": incident_id,
552
- "title": target_incident.get("attributes", {}).get("title", "")
593
+ "id": incident_id or "synthetic",
594
+ "title": target_incident.get("attributes", {}).get("title", incident_description)
553
595
  },
554
596
  "related_incidents": related_incidents,
555
597
  "total_found": len(filtered_incidents),
@@ -566,9 +608,10 @@ def create_rootly_mcp_server(
566
608
  incident_id: str = "",
567
609
  incident_title: str = "",
568
610
  incident_description: str = "",
569
- max_solutions: Annotated[int, Field(description="Maximum number of solution suggestions", ge=1, le=10)] = 3
611
+ max_solutions: Annotated[int, Field(description="Maximum number of solution suggestions", ge=1, le=10)] = 3,
612
+ status_filter: Annotated[str, Field(description="Filter incidents by status (default 'resolved', empty for all, 'investigating', etc.)")] = "resolved"
570
613
  ) -> dict:
571
- """Suggest solutions based on similar resolved incidents. Provide either incident_id OR title/description."""
614
+ """Suggest solutions based on similar incidents. Provide either incident_id OR title/description. Defaults to resolved incidents for solution mining, but can search all statuses."""
572
615
  try:
573
616
  target_incident = {}
574
617
 
@@ -595,13 +638,18 @@ def create_rootly_mcp_server(
595
638
  else:
596
639
  return MCPError.tool_error("Must provide either incident_id or incident_title/description", "validation_error")
597
640
 
598
- # Get resolved incidents for solution mining
599
- historical_response = await make_authenticated_request("GET", "/v1/incidents", params={
641
+ # Get incidents for solution mining
642
+ params = {
600
643
  "page[size]": 150, # Get more incidents for better solution matching
601
644
  "page[number]": 1,
602
- "filter[status]": "resolved",
603
645
  "include": ""
604
- })
646
+ }
647
+
648
+ # Only add status filter if specified
649
+ if status_filter:
650
+ params["filter[status]"] = status_filter
651
+
652
+ historical_response = await make_authenticated_request("GET", "/v1/incidents", params=params)
605
653
  historical_response.raise_for_status()
606
654
  historical_data = historical_response.json()
607
655
  historical_incidents = historical_data.get("data", [])
@@ -611,9 +659,10 @@ def create_rootly_mcp_server(
611
659
  historical_incidents = [inc for inc in historical_incidents if str(inc.get('id')) != str(incident_id)]
612
660
 
613
661
  if not historical_incidents:
662
+ status_msg = f" with status '{status_filter}'" if status_filter else ""
614
663
  return {
615
664
  "solutions": [],
616
- "message": "No historical resolved incidents found for solution mining"
665
+ "message": f"No historical incidents found{status_msg} for solution mining"
617
666
  }
618
667
 
619
668
  # Find similar incidents
@@ -652,6 +701,945 @@ def create_rootly_mcp_server(
652
701
  error_type, error_message = MCPError.categorize_error(e)
653
702
  return MCPError.tool_error(f"Failed to suggest solutions: {error_message}", error_type)
654
703
 
704
+ @mcp.tool()
705
+ async def get_oncall_shift_metrics(
706
+ start_date: Annotated[str, Field(description="Start date for metrics (ISO 8601 format, e.g., '2025-10-01' or '2025-10-01T00:00:00Z')")],
707
+ end_date: Annotated[str, Field(description="End date for metrics (ISO 8601 format, e.g., '2025-10-31' or '2025-10-31T23:59:59Z')")],
708
+ user_ids: Annotated[str, Field(description="Comma-separated list of user IDs to filter by (optional)")] = "",
709
+ schedule_ids: Annotated[str, Field(description="Comma-separated list of schedule IDs to filter by (optional)")] = "",
710
+ team_ids: Annotated[str, Field(description="Comma-separated list of team IDs to filter by (requires querying schedules first)")] = "",
711
+ group_by: Annotated[str, Field(description="Group results by: 'user', 'schedule', 'team', or 'none'")] = "user"
712
+ ) -> dict:
713
+ """
714
+ Get on-call shift metrics for a specified time period. Returns shift counts, total hours,
715
+ and other statistics grouped by user, schedule, or team.
716
+
717
+ Examples:
718
+ - Monthly report: start_date='2025-10-01', end_date='2025-10-31'
719
+ - Specific user: start_date='2025-10-01', end_date='2025-10-31', user_ids='123,456'
720
+ - Specific team: team_ids='team-1' (will query schedules for that team first)
721
+ """
722
+ try:
723
+ from datetime import datetime, timedelta
724
+ from collections import defaultdict
725
+ from typing import Any, Dict
726
+
727
+ # Build query parameters
728
+ params: Dict[str, Any] = {
729
+ "from": start_date,
730
+ "to": end_date,
731
+ }
732
+
733
+ # Fetch schedules (schedules don't have team relationship, they have owner_group_ids)
734
+ schedules_response = await make_authenticated_request("GET", "/v1/schedules", params={"page[size]": 100})
735
+
736
+ if schedules_response is None:
737
+ return MCPError.tool_error("Failed to get schedules: API request returned None", "execution_error")
738
+
739
+ schedules_response.raise_for_status()
740
+ schedules_data = schedules_response.json()
741
+
742
+ all_schedules = schedules_data.get("data", [])
743
+
744
+ # Collect all unique team IDs from schedules' owner_group_ids
745
+ team_ids_set = set()
746
+ for schedule in all_schedules:
747
+ owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
748
+ team_ids_set.update(owner_group_ids)
749
+
750
+ # Fetch all teams
751
+ teams_map = {}
752
+ if team_ids_set:
753
+ teams_response = await make_authenticated_request("GET", "/v1/teams", params={"page[size]": 100})
754
+ if teams_response and teams_response.status_code == 200:
755
+ teams_data = teams_response.json()
756
+ for team in teams_data.get("data", []):
757
+ teams_map[team.get("id")] = team
758
+
759
+ # Build schedule -> team mapping
760
+ schedule_to_team_map = {}
761
+ for schedule in all_schedules:
762
+ schedule_id = schedule.get("id")
763
+ schedule_name = schedule.get("attributes", {}).get("name", "Unknown")
764
+ owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
765
+
766
+ # Use the first owner group as the primary team
767
+ if owner_group_ids:
768
+ team_id = owner_group_ids[0]
769
+ team_attrs = teams_map.get(team_id, {}).get("attributes", {})
770
+ team_name = team_attrs.get("name", "Unknown Team")
771
+ schedule_to_team_map[schedule_id] = {
772
+ "team_id": team_id,
773
+ "team_name": team_name,
774
+ "schedule_name": schedule_name
775
+ }
776
+
777
+ # Handle team filtering (requires multi-step query)
778
+ target_schedule_ids = []
779
+ if team_ids:
780
+ team_id_list = [tid.strip() for tid in team_ids.split(",") if tid.strip()]
781
+
782
+ # Filter schedules by team
783
+ for schedule_id, team_info in schedule_to_team_map.items():
784
+ if str(team_info["team_id"]) in team_id_list:
785
+ target_schedule_ids.append(schedule_id)
786
+
787
+ # Apply schedule filtering
788
+ if schedule_ids:
789
+ schedule_id_list = [sid.strip() for sid in schedule_ids.split(",") if sid.strip()]
790
+ target_schedule_ids.extend(schedule_id_list)
791
+
792
+ if target_schedule_ids:
793
+ params["schedule_ids[]"] = target_schedule_ids
794
+
795
+ # Apply user filtering
796
+ if user_ids:
797
+ user_id_list = [uid.strip() for uid in user_ids.split(",") if uid.strip()]
798
+ params["user_ids[]"] = user_id_list
799
+
800
+ # Include relationships for richer data
801
+ params["include"] = "user,shift_override,on_call_role,schedule_rotation"
802
+
803
+ # Query shifts
804
+ try:
805
+ shifts_response = await make_authenticated_request("GET", "/v1/shifts", params=params)
806
+
807
+ if shifts_response is None:
808
+ return MCPError.tool_error("Failed to get shifts: API request returned None", "execution_error")
809
+
810
+ shifts_response.raise_for_status()
811
+ shifts_data = shifts_response.json()
812
+
813
+ if shifts_data is None:
814
+ return MCPError.tool_error("Failed to get shifts: API returned null/empty response", "execution_error", details={"status": shifts_response.status_code})
815
+
816
+ shifts = shifts_data.get("data", [])
817
+ included = shifts_data.get("included", [])
818
+ except AttributeError as e:
819
+ return MCPError.tool_error(f"Failed to get shifts: Response object error - {str(e)}", "execution_error", details={"params": params})
820
+ except Exception as e:
821
+ return MCPError.tool_error(f"Failed to get shifts: {str(e)}", "execution_error", details={"params": params, "error_type": type(e).__name__})
822
+
823
+ # Build lookup maps for included resources
824
+ users_map = {}
825
+ on_call_roles_map = {}
826
+ for resource in included:
827
+ if resource.get("type") == "users":
828
+ users_map[resource.get("id")] = resource
829
+ elif resource.get("type") == "on_call_roles":
830
+ on_call_roles_map[resource.get("id")] = resource
831
+
832
+ # Calculate metrics
833
+ metrics: Dict[str, Dict[str, Any]] = defaultdict(lambda: {
834
+ "shift_count": 0,
835
+ "total_hours": 0.0,
836
+ "override_count": 0,
837
+ "regular_count": 0,
838
+ "primary_count": 0,
839
+ "secondary_count": 0,
840
+ "primary_hours": 0.0,
841
+ "secondary_hours": 0.0,
842
+ "unknown_role_count": 0,
843
+ "unique_days": set(),
844
+ "shifts": []
845
+ })
846
+
847
+ for shift in shifts:
848
+ attrs = shift.get("attributes", {})
849
+ relationships = shift.get("relationships", {})
850
+
851
+ # Parse timestamps
852
+ starts_at = attrs.get("starts_at")
853
+ ends_at = attrs.get("ends_at")
854
+ is_override = attrs.get("is_override", False)
855
+ schedule_id = attrs.get("schedule_id")
856
+
857
+ # Calculate shift duration in hours and track unique days
858
+ duration_hours = 0.0
859
+ shift_days = set()
860
+ if starts_at and ends_at:
861
+ try:
862
+ start_dt = datetime.fromisoformat(starts_at.replace("Z", "+00:00"))
863
+ end_dt = datetime.fromisoformat(ends_at.replace("Z", "+00:00"))
864
+ duration_hours = (end_dt - start_dt).total_seconds() / 3600
865
+
866
+ # Track all unique calendar days this shift spans
867
+ shift_start_date = start_dt.date()
868
+ shift_end_date = end_dt.date()
869
+ while shift_start_date <= shift_end_date:
870
+ shift_days.add(shift_start_date)
871
+ shift_start_date += timedelta(days=1)
872
+ except (ValueError, AttributeError):
873
+ pass
874
+
875
+ # Get user info
876
+ user_rel = relationships.get("user", {}).get("data") or {}
877
+ user_id = user_rel.get("id")
878
+ user_name = "Unknown"
879
+ user_email = ""
880
+
881
+ if user_id and user_id in users_map:
882
+ user_attrs = users_map[user_id].get("attributes", {})
883
+ user_name = user_attrs.get("full_name") or user_attrs.get("email", "Unknown")
884
+ user_email = user_attrs.get("email", "")
885
+
886
+ # Get on-call role info (primary vs secondary)
887
+ role_rel = relationships.get("on_call_role", {}).get("data") or {}
888
+ role_id = role_rel.get("id")
889
+ role_name = "unknown"
890
+ is_primary = False
891
+
892
+ if role_id and role_id in on_call_roles_map:
893
+ role_attrs = on_call_roles_map[role_id].get("attributes", {})
894
+ role_name = role_attrs.get("name", "").lower()
895
+ # Typically primary roles contain "primary" and secondary contain "secondary"
896
+ # Common patterns: "Primary", "Secondary", "L1", "L2", etc.
897
+ is_primary = "primary" in role_name or role_name == "l1" or role_name == "p1"
898
+
899
+ # Determine grouping key
900
+ if group_by == "user":
901
+ key = f"{user_id}|{user_name}"
902
+ elif group_by == "schedule":
903
+ schedule_info = schedule_to_team_map.get(schedule_id, {})
904
+ schedule_name = schedule_info.get("schedule_name", f"schedule_{schedule_id}")
905
+ key = f"{schedule_id}|{schedule_name}"
906
+ elif group_by == "team":
907
+ team_info = schedule_to_team_map.get(schedule_id, {})
908
+ if team_info:
909
+ team_id = team_info["team_id"]
910
+ team_name = team_info["team_name"]
911
+ key = f"{team_id}|{team_name}"
912
+ else:
913
+ key = "unknown_team|Unknown Team"
914
+ else:
915
+ key = "all"
916
+
917
+ # Update metrics
918
+ metrics[key]["shift_count"] += 1
919
+ metrics[key]["total_hours"] += duration_hours
920
+
921
+ if is_override:
922
+ metrics[key]["override_count"] += 1
923
+ else:
924
+ metrics[key]["regular_count"] += 1
925
+
926
+ # Track primary vs secondary
927
+ if role_id:
928
+ if is_primary:
929
+ metrics[key]["primary_count"] += 1
930
+ metrics[key]["primary_hours"] += duration_hours
931
+ else:
932
+ metrics[key]["secondary_count"] += 1
933
+ metrics[key]["secondary_hours"] += duration_hours
934
+ else:
935
+ metrics[key]["unknown_role_count"] += 1
936
+
937
+ # Track unique days
938
+ metrics[key]["unique_days"].update(shift_days)
939
+
940
+ metrics[key]["shifts"].append({
941
+ "shift_id": shift.get("id"),
942
+ "starts_at": starts_at,
943
+ "ends_at": ends_at,
944
+ "duration_hours": round(duration_hours, 2),
945
+ "is_override": is_override,
946
+ "schedule_id": schedule_id,
947
+ "user_id": user_id,
948
+ "user_name": user_name,
949
+ "user_email": user_email,
950
+ "role_name": role_name,
951
+ "is_primary": is_primary
952
+ })
953
+
954
+ # Format results
955
+ results = []
956
+ for key, data in metrics.items():
957
+ if group_by == "user":
958
+ user_id, user_name = key.split("|", 1)
959
+ result = {
960
+ "user_id": user_id,
961
+ "user_name": user_name,
962
+ "shift_count": data["shift_count"],
963
+ "days_on_call": len(data["unique_days"]),
964
+ "total_hours": round(data["total_hours"], 2),
965
+ "regular_shifts": data["regular_count"],
966
+ "override_shifts": data["override_count"],
967
+ "primary_shifts": data["primary_count"],
968
+ "secondary_shifts": data["secondary_count"],
969
+ "primary_hours": round(data["primary_hours"], 2),
970
+ "secondary_hours": round(data["secondary_hours"], 2),
971
+ "unknown_role_shifts": data["unknown_role_count"],
972
+ }
973
+ elif group_by == "schedule":
974
+ schedule_id, schedule_name = key.split("|", 1)
975
+ result = {
976
+ "schedule_id": schedule_id,
977
+ "schedule_name": schedule_name,
978
+ "shift_count": data["shift_count"],
979
+ "days_on_call": len(data["unique_days"]),
980
+ "total_hours": round(data["total_hours"], 2),
981
+ "regular_shifts": data["regular_count"],
982
+ "override_shifts": data["override_count"],
983
+ "primary_shifts": data["primary_count"],
984
+ "secondary_shifts": data["secondary_count"],
985
+ "primary_hours": round(data["primary_hours"], 2),
986
+ "secondary_hours": round(data["secondary_hours"], 2),
987
+ "unknown_role_shifts": data["unknown_role_count"],
988
+ }
989
+ elif group_by == "team":
990
+ team_id, team_name = key.split("|", 1)
991
+ result = {
992
+ "team_id": team_id,
993
+ "team_name": team_name,
994
+ "shift_count": data["shift_count"],
995
+ "days_on_call": len(data["unique_days"]),
996
+ "total_hours": round(data["total_hours"], 2),
997
+ "regular_shifts": data["regular_count"],
998
+ "override_shifts": data["override_count"],
999
+ "primary_shifts": data["primary_count"],
1000
+ "secondary_shifts": data["secondary_count"],
1001
+ "primary_hours": round(data["primary_hours"], 2),
1002
+ "secondary_hours": round(data["secondary_hours"], 2),
1003
+ "unknown_role_shifts": data["unknown_role_count"],
1004
+ }
1005
+ else:
1006
+ result = {
1007
+ "group_key": key,
1008
+ "shift_count": data["shift_count"],
1009
+ "days_on_call": len(data["unique_days"]),
1010
+ "total_hours": round(data["total_hours"], 2),
1011
+ "regular_shifts": data["regular_count"],
1012
+ "override_shifts": data["override_count"],
1013
+ "primary_shifts": data["primary_count"],
1014
+ "secondary_shifts": data["secondary_count"],
1015
+ "primary_hours": round(data["primary_hours"], 2),
1016
+ "secondary_hours": round(data["secondary_hours"], 2),
1017
+ "unknown_role_shifts": data["unknown_role_count"],
1018
+ }
1019
+
1020
+ results.append(result)
1021
+
1022
+ # Sort by shift count descending
1023
+ results.sort(key=lambda x: x["shift_count"], reverse=True)
1024
+
1025
+ return {
1026
+ "period": {
1027
+ "start_date": start_date,
1028
+ "end_date": end_date
1029
+ },
1030
+ "total_shifts": len(shifts),
1031
+ "grouped_by": group_by,
1032
+ "metrics": results,
1033
+ "summary": {
1034
+ "total_hours": round(sum(m["total_hours"] for m in results), 2),
1035
+ "total_regular_shifts": sum(m["regular_shifts"] for m in results),
1036
+ "total_override_shifts": sum(m["override_shifts"] for m in results),
1037
+ "unique_people": len(results) if group_by == "user" else None
1038
+ }
1039
+ }
1040
+
1041
+ except Exception as e:
1042
+ import traceback
1043
+ error_type, error_message = MCPError.categorize_error(e)
1044
+ return MCPError.tool_error(
1045
+ f"Failed to get on-call shift metrics: {error_message}",
1046
+ error_type,
1047
+ details={
1048
+ "params": {"start_date": start_date, "end_date": end_date},
1049
+ "exception_type": type(e).__name__,
1050
+ "exception_str": str(e),
1051
+ "traceback": traceback.format_exc()
1052
+ }
1053
+ )
1054
+
1055
+ @mcp.tool()
1056
+ async def get_oncall_handoff_summary(
1057
+ team_ids: Annotated[str, Field(description="Comma-separated list of team IDs to filter schedules (optional)")] = "",
1058
+ schedule_ids: Annotated[str, Field(description="Comma-separated list of schedule IDs (optional)")] = "",
1059
+ timezone: Annotated[str, Field(description="Timezone to use for display and filtering (e.g., 'America/Los_Angeles', 'Europe/London', 'Asia/Tokyo'). IMPORTANT: If user mentions a city, location, or region (e.g., 'Toronto', 'APAC', 'my time'), infer the appropriate IANA timezone. Defaults to UTC if not specified.")] = "UTC",
1060
+ filter_by_region: Annotated[bool, Field(description="If True, only show on-call for people whose shifts are during business hours (9am-5pm) in the specified timezone. Defaults to False.")] = False,
1061
+ include_incidents: Annotated[bool, Field(description="If True, fetch incidents for each shift (slower). If False, only show on-call info (faster). Defaults to False for better performance.")] = False
1062
+ ) -> dict:
1063
+ """
1064
+ Get current on-call handoff summary. Shows who's currently on-call and who's next.
1065
+ Optionally fetch incidents (set include_incidents=True, but slower).
1066
+
1067
+ Timezone handling: If user mentions their location/timezone, infer it (e.g., "Toronto" → "America/Toronto",
1068
+ "my time" → ask clarifying question or use a common timezone).
1069
+
1070
+ Regional filtering: Use timezone + filter_by_region=True to see only people on-call
1071
+ during business hours in that region (e.g., timezone='Asia/Tokyo', filter_by_region=True
1072
+ shows only APAC on-call during APAC business hours).
1073
+
1074
+ Performance: By default, incidents are NOT fetched for faster response. Set include_incidents=True
1075
+ to fetch incidents for each shift (slower, may timeout with many schedules).
1076
+
1077
+ Useful for:
1078
+ - Quick on-call status checks
1079
+ - Daily handoff meetings
1080
+ - Regional on-call status (APAC, EU, Americas)
1081
+ - Team coordination across timezones
1082
+ """
1083
+ try:
1084
+ from datetime import datetime, timedelta
1085
+ from zoneinfo import ZoneInfo
1086
+
1087
+ # Validate and set timezone
1088
+ try:
1089
+ tz = ZoneInfo(timezone)
1090
+ except Exception:
1091
+ tz = ZoneInfo("UTC") # Fallback to UTC if invalid timezone
1092
+
1093
+ now = datetime.now(tz)
1094
+
1095
+ def convert_to_timezone(iso_string: str) -> str:
1096
+ """Convert ISO timestamp to target timezone."""
1097
+ if not iso_string:
1098
+ return iso_string
1099
+ try:
1100
+ dt = datetime.fromisoformat(iso_string.replace("Z", "+00:00"))
1101
+ dt_converted = dt.astimezone(tz)
1102
+ return dt_converted.isoformat()
1103
+ except (ValueError, AttributeError):
1104
+ return iso_string # Return original if conversion fails
1105
+
1106
+ # Fetch schedules with team info (with pagination)
1107
+ all_schedules = []
1108
+ page = 1
1109
+ max_pages = 5 # Schedules shouldn't have many pages
1110
+
1111
+ while page <= max_pages:
1112
+ schedules_response = await make_authenticated_request("GET", "/v1/schedules", params={"page[size]": 100, "page[number]": page})
1113
+ if not schedules_response:
1114
+ return MCPError.tool_error("Failed to fetch schedules - no response from API", "execution_error")
1115
+
1116
+ if schedules_response.status_code != 200:
1117
+ return MCPError.tool_error(
1118
+ f"Failed to fetch schedules - API returned status {schedules_response.status_code}",
1119
+ "execution_error",
1120
+ details={"status_code": schedules_response.status_code}
1121
+ )
1122
+
1123
+ schedules_data = schedules_response.json()
1124
+ page_schedules = schedules_data.get("data", [])
1125
+
1126
+ if not page_schedules:
1127
+ break
1128
+
1129
+ all_schedules.extend(page_schedules)
1130
+
1131
+ # Check if there are more pages
1132
+ meta = schedules_data.get("meta", {})
1133
+ total_pages = meta.get("total_pages", 1)
1134
+
1135
+ if page >= total_pages:
1136
+ break
1137
+
1138
+ page += 1
1139
+
1140
+ # Build team mapping
1141
+ team_ids_set = set()
1142
+ for schedule in all_schedules:
1143
+ owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
1144
+ team_ids_set.update(owner_group_ids)
1145
+
1146
+ teams_map = {}
1147
+ if team_ids_set:
1148
+ teams_response = await make_authenticated_request("GET", "/v1/teams", params={"page[size]": 100})
1149
+ if teams_response and teams_response.status_code == 200:
1150
+ teams_data = teams_response.json()
1151
+ for team in teams_data.get("data", []):
1152
+ teams_map[team.get("id")] = team
1153
+
1154
+ # Filter schedules
1155
+ target_schedules = []
1156
+ team_filter = [tid.strip() for tid in team_ids.split(",") if tid.strip()] if team_ids else []
1157
+ schedule_filter = [sid.strip() for sid in schedule_ids.split(",") if sid.strip()] if schedule_ids else []
1158
+
1159
+ for schedule in all_schedules:
1160
+ schedule_id = schedule.get("id")
1161
+ owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
1162
+
1163
+ # Apply filters
1164
+ if schedule_filter and schedule_id not in schedule_filter:
1165
+ continue
1166
+ if team_filter and not any(str(tgid) in team_filter for tgid in owner_group_ids):
1167
+ continue
1168
+
1169
+ target_schedules.append(schedule)
1170
+
1171
+ # Get current and upcoming shifts for each schedule
1172
+ handoff_data = []
1173
+ for schedule in target_schedules:
1174
+ schedule_id = schedule.get("id")
1175
+ schedule_attrs = schedule.get("attributes", {})
1176
+ schedule_name = schedule_attrs.get("name", "Unknown Schedule")
1177
+ owner_group_ids = schedule_attrs.get("owner_group_ids", [])
1178
+
1179
+ # Get team info
1180
+ team_name = "No Team"
1181
+ if owner_group_ids:
1182
+ team_id = owner_group_ids[0]
1183
+ team_attrs = teams_map.get(team_id, {}).get("attributes", {})
1184
+ team_name = team_attrs.get("name", "Unknown Team")
1185
+
1186
+ # Query shifts for this schedule
1187
+ shifts_response = await make_authenticated_request(
1188
+ "GET",
1189
+ "/v1/shifts",
1190
+ params={
1191
+ "schedule_ids[]": [schedule_id],
1192
+ "filter[starts_at][gte]": (now - timedelta(days=1)).isoformat(),
1193
+ "filter[starts_at][lte]": (now + timedelta(days=7)).isoformat(),
1194
+ "include": "user,on_call_role",
1195
+ "page[size]": 50
1196
+ }
1197
+ )
1198
+
1199
+ if not shifts_response:
1200
+ continue
1201
+
1202
+ shifts_data = shifts_response.json()
1203
+ shifts = shifts_data.get("data", [])
1204
+ included = shifts_data.get("included", [])
1205
+
1206
+ # Build user and role maps
1207
+ users_map = {}
1208
+ roles_map = {}
1209
+ for resource in included:
1210
+ if resource.get("type") == "users":
1211
+ users_map[resource.get("id")] = resource
1212
+ elif resource.get("type") == "on_call_roles":
1213
+ roles_map[resource.get("id")] = resource
1214
+
1215
+ # Find current and next shifts
1216
+ current_shift = None
1217
+ next_shift = None
1218
+
1219
+ for shift in sorted(shifts, key=lambda s: s.get("attributes", {}).get("starts_at", "")):
1220
+ attrs = shift.get("attributes", {})
1221
+ starts_at_str = attrs.get("starts_at")
1222
+ ends_at_str = attrs.get("ends_at")
1223
+
1224
+ if not starts_at_str or not ends_at_str:
1225
+ continue
1226
+
1227
+ try:
1228
+ starts_at = datetime.fromisoformat(starts_at_str.replace("Z", "+00:00"))
1229
+ ends_at = datetime.fromisoformat(ends_at_str.replace("Z", "+00:00"))
1230
+
1231
+ # Current shift: ongoing now
1232
+ if starts_at <= now <= ends_at:
1233
+ current_shift = shift
1234
+ # Next shift: starts after now and no current shift found yet
1235
+ elif starts_at > now and not next_shift:
1236
+ next_shift = shift
1237
+
1238
+ except (ValueError, AttributeError):
1239
+ continue
1240
+
1241
+ # Build response for this schedule
1242
+ schedule_info = {
1243
+ "schedule_id": schedule_id,
1244
+ "schedule_name": schedule_name,
1245
+ "team_name": team_name,
1246
+ "current_oncall": None,
1247
+ "next_oncall": None
1248
+ }
1249
+
1250
+ if current_shift:
1251
+ current_attrs = current_shift.get("attributes", {})
1252
+ current_rels = current_shift.get("relationships", {})
1253
+ user_data = (current_rels.get("user", {}).get("data") or {})
1254
+ user_id = user_data.get("id")
1255
+ role_data = (current_rels.get("on_call_role", {}).get("data") or {})
1256
+ role_id = role_data.get("id")
1257
+
1258
+ user_name = "Unknown"
1259
+ if user_id and user_id in users_map:
1260
+ user_attrs = users_map[user_id].get("attributes", {})
1261
+ user_name = user_attrs.get("full_name") or user_attrs.get("email", "Unknown")
1262
+
1263
+ role_name = "Unknown Role"
1264
+ if role_id and role_id in roles_map:
1265
+ role_attrs = roles_map[role_id].get("attributes", {})
1266
+ role_name = role_attrs.get("name", "Unknown Role")
1267
+
1268
+ schedule_info["current_oncall"] = {
1269
+ "user_name": user_name,
1270
+ "user_id": user_id,
1271
+ "role": role_name,
1272
+ "starts_at": convert_to_timezone(current_attrs.get("starts_at")),
1273
+ "ends_at": convert_to_timezone(current_attrs.get("ends_at")),
1274
+ "is_override": current_attrs.get("is_override", False)
1275
+ }
1276
+
1277
+ if next_shift:
1278
+ next_attrs = next_shift.get("attributes", {})
1279
+ next_rels = next_shift.get("relationships", {})
1280
+ user_data = (next_rels.get("user", {}).get("data") or {})
1281
+ user_id = user_data.get("id")
1282
+ role_data = (next_rels.get("on_call_role", {}).get("data") or {})
1283
+ role_id = role_data.get("id")
1284
+
1285
+ user_name = "Unknown"
1286
+ if user_id and user_id in users_map:
1287
+ user_attrs = users_map[user_id].get("attributes", {})
1288
+ user_name = user_attrs.get("full_name") or user_attrs.get("email", "Unknown")
1289
+
1290
+ role_name = "Unknown Role"
1291
+ if role_id and role_id in roles_map:
1292
+ role_attrs = roles_map[role_id].get("attributes", {})
1293
+ role_name = role_attrs.get("name", "Unknown Role")
1294
+
1295
+ schedule_info["next_oncall"] = {
1296
+ "user_name": user_name,
1297
+ "user_id": user_id,
1298
+ "role": role_name,
1299
+ "starts_at": convert_to_timezone(next_attrs.get("starts_at")),
1300
+ "ends_at": convert_to_timezone(next_attrs.get("ends_at")),
1301
+ "is_override": next_attrs.get("is_override", False)
1302
+ }
1303
+
1304
+ handoff_data.append(schedule_info)
1305
+
1306
+ # Filter by region if requested
1307
+ if filter_by_region:
1308
+ # Define business hours (9am-5pm) in the target timezone
1309
+ business_start_hour = 9
1310
+ business_end_hour = 17
1311
+
1312
+ # Create datetime objects for today's business hours in target timezone
1313
+ today_business_start = now.replace(hour=business_start_hour, minute=0, second=0, microsecond=0)
1314
+ today_business_end = now.replace(hour=business_end_hour, minute=0, second=0, microsecond=0)
1315
+
1316
+ # Filter schedules where current shift overlaps with business hours
1317
+ filtered_data = []
1318
+ for schedule_info in handoff_data:
1319
+ current_oncall = schedule_info.get("current_oncall")
1320
+ if current_oncall:
1321
+ # Parse shift times (already in target timezone)
1322
+ shift_start_str = current_oncall.get("starts_at")
1323
+ shift_end_str = current_oncall.get("ends_at")
1324
+
1325
+ if shift_start_str and shift_end_str:
1326
+ try:
1327
+ shift_start = datetime.fromisoformat(shift_start_str.replace("Z", "+00:00"))
1328
+ shift_end = datetime.fromisoformat(shift_end_str.replace("Z", "+00:00"))
1329
+
1330
+ # Check if shift overlaps with today's business hours
1331
+ # Shift overlaps if: shift_start < business_end AND shift_end > business_start
1332
+ if shift_start < today_business_end and shift_end > today_business_start:
1333
+ filtered_data.append(schedule_info)
1334
+ except (ValueError, AttributeError):
1335
+ # Skip if we can't parse times
1336
+ continue
1337
+
1338
+ handoff_data = filtered_data
1339
+
1340
+ # Fetch incidents for each current shift (only if requested)
1341
+ if include_incidents:
1342
+ for schedule_info in handoff_data:
1343
+ current_oncall = schedule_info.get("current_oncall")
1344
+ if current_oncall:
1345
+ shift_start = current_oncall["starts_at"]
1346
+ shift_end = current_oncall["ends_at"]
1347
+
1348
+ incidents_result = await _fetch_shift_incidents_internal(
1349
+ start_time=shift_start,
1350
+ end_time=shift_end,
1351
+ schedule_ids="",
1352
+ severity="",
1353
+ status="",
1354
+ tags=""
1355
+ )
1356
+
1357
+ schedule_info["shift_incidents"] = incidents_result if incidents_result.get("success") else None
1358
+ else:
1359
+ schedule_info["shift_incidents"] = None
1360
+ else:
1361
+ # Skip incident fetching for better performance
1362
+ for schedule_info in handoff_data:
1363
+ schedule_info["shift_incidents"] = None
1364
+
1365
+ return {
1366
+ "success": True,
1367
+ "timestamp": now.isoformat(),
1368
+ "timezone": timezone,
1369
+ "schedules": handoff_data,
1370
+ "summary": {
1371
+ "total_schedules": len(handoff_data),
1372
+ "schedules_with_current_oncall": sum(1 for s in handoff_data if s["current_oncall"]),
1373
+ "schedules_with_next_oncall": sum(1 for s in handoff_data if s["next_oncall"]),
1374
+ "total_incidents": sum(
1375
+ s.get("shift_incidents", {}).get("summary", {}).get("total_incidents", 0)
1376
+ for s in handoff_data
1377
+ if s.get("shift_incidents")
1378
+ )
1379
+ }
1380
+ }
1381
+
1382
+ except Exception as e:
1383
+ import traceback
1384
+ error_type, error_message = MCPError.categorize_error(e)
1385
+ return MCPError.tool_error(
1386
+ f"Failed to get on-call handoff summary: {error_message}",
1387
+ error_type,
1388
+ details={
1389
+ "exception_type": type(e).__name__,
1390
+ "exception_str": str(e),
1391
+ "traceback": traceback.format_exc()
1392
+ }
1393
+ )
1394
+
1395
+ async def _fetch_shift_incidents_internal(
1396
+ start_time: str,
1397
+ end_time: str,
1398
+ schedule_ids: str = "",
1399
+ severity: str = "",
1400
+ status: str = "",
1401
+ tags: str = ""
1402
+ ) -> dict:
1403
+ """Internal helper to fetch incidents - used by both get_shift_incidents and get_oncall_handoff_summary."""
1404
+ try:
1405
+ from datetime import datetime
1406
+
1407
+ # Build query parameters
1408
+ # Fetch incidents that:
1409
+ # 1. Were created during the shift (created_at in range)
1410
+ # 2. OR are currently active/unresolved (started but not resolved yet)
1411
+ params = {
1412
+ "page[size]": 100,
1413
+ "sort": "-created_at"
1414
+ }
1415
+
1416
+ # Get incidents created during shift OR still active
1417
+ # We'll fetch all incidents and filter in-memory for active ones
1418
+ params["filter[started_at][lte]"] = end_time # Started before shift ended
1419
+
1420
+ # Add severity filter if provided
1421
+ if severity:
1422
+ params["filter[severity]"] = severity.lower()
1423
+
1424
+ # Add status filter if provided
1425
+ if status:
1426
+ params["filter[status]"] = status.lower()
1427
+
1428
+ # Add tags filter if provided
1429
+ if tags:
1430
+ tag_list = [t.strip() for t in tags.split(",") if t.strip()]
1431
+ if tag_list:
1432
+ params["filter[tags][]"] = tag_list
1433
+
1434
+ # Query incidents with pagination
1435
+ all_incidents = []
1436
+ page = 1
1437
+ max_pages = 10 # Safety limit to prevent infinite loops
1438
+
1439
+ while page <= max_pages:
1440
+ params["page[number]"] = page
1441
+ incidents_response = await make_authenticated_request("GET", "/v1/incidents", params=params)
1442
+
1443
+ if not incidents_response:
1444
+ return MCPError.tool_error("Failed to fetch incidents - no response from API", "execution_error")
1445
+
1446
+ if incidents_response.status_code != 200:
1447
+ return MCPError.tool_error(
1448
+ f"Failed to fetch incidents - API returned status {incidents_response.status_code}",
1449
+ "execution_error",
1450
+ details={"status_code": incidents_response.status_code, "time_range": f"{start_time} to {end_time}"}
1451
+ )
1452
+
1453
+ incidents_data = incidents_response.json()
1454
+ page_incidents = incidents_data.get("data", [])
1455
+
1456
+ if not page_incidents:
1457
+ break # No more data
1458
+
1459
+ all_incidents.extend(page_incidents)
1460
+
1461
+ # Check if there are more pages
1462
+ meta = incidents_data.get("meta", {})
1463
+ total_pages = meta.get("total_pages", 1)
1464
+
1465
+ if page >= total_pages:
1466
+ break # Reached the last page
1467
+
1468
+ page += 1
1469
+
1470
+ # Filter incidents to include:
1471
+ # 1. Created during shift (created_at between start_time and end_time)
1472
+ # 2. Currently active (started but not resolved, regardless of when created)
1473
+ from datetime import timezone as dt_timezone
1474
+ shift_start_dt = datetime.fromisoformat(start_time.replace("Z", "+00:00"))
1475
+ shift_end_dt = datetime.fromisoformat(end_time.replace("Z", "+00:00"))
1476
+ now_dt = datetime.now(dt_timezone.utc)
1477
+
1478
+ # Format incidents for handoff summary
1479
+ incidents_summary = []
1480
+ for incident in all_incidents:
1481
+ incident_id = incident.get("id")
1482
+ attrs = incident.get("attributes", {})
1483
+
1484
+ # Check if incident is relevant to this shift
1485
+ created_at = attrs.get("created_at")
1486
+ started_at = attrs.get("started_at")
1487
+ resolved_at = attrs.get("resolved_at")
1488
+
1489
+ # Parse timestamps
1490
+ try:
1491
+ created_dt = datetime.fromisoformat(created_at.replace("Z", "+00:00")) if created_at else None
1492
+ started_dt = datetime.fromisoformat(started_at.replace("Z", "+00:00")) if started_at else None
1493
+ resolved_dt = datetime.fromisoformat(resolved_at.replace("Z", "+00:00")) if resolved_at else None
1494
+ except (ValueError, AttributeError):
1495
+ continue # Skip if we can't parse dates
1496
+
1497
+ # Include incident if:
1498
+ # 1. Created during shift
1499
+ # 2. Started during shift
1500
+ # 3. Resolved during shift
1501
+ # 4. Currently active (not resolved and started before now)
1502
+ include_incident = False
1503
+
1504
+ if created_dt and shift_start_dt <= created_dt <= shift_end_dt:
1505
+ include_incident = True # Created during shift
1506
+
1507
+ if started_dt and shift_start_dt <= started_dt <= shift_end_dt:
1508
+ include_incident = True # Started during shift
1509
+
1510
+ if resolved_dt and shift_start_dt <= resolved_dt <= shift_end_dt:
1511
+ include_incident = True # Resolved during shift
1512
+
1513
+ if not resolved_dt and started_dt and started_dt <= now_dt:
1514
+ include_incident = True # Currently active
1515
+
1516
+ if not include_incident:
1517
+ continue
1518
+
1519
+ # Calculate duration if resolved
1520
+ duration_minutes = None
1521
+ if started_dt and resolved_dt:
1522
+ duration_minutes = int((resolved_dt - started_dt).total_seconds() / 60)
1523
+
1524
+ # Build narrative summary
1525
+ narrative_parts = []
1526
+
1527
+ # What happened
1528
+ title = attrs.get("title", "Untitled Incident")
1529
+ severity = attrs.get("severity", "unknown")
1530
+ narrative_parts.append(f"[{severity.upper()}] {title}")
1531
+
1532
+ # When and duration
1533
+ if started_at:
1534
+ narrative_parts.append(f"Started at {started_at}")
1535
+ if resolved_at:
1536
+ narrative_parts.append(f"Resolved at {resolved_at}")
1537
+ if duration_minutes:
1538
+ narrative_parts.append(f"Duration: {duration_minutes} minutes")
1539
+ elif attrs.get("status"):
1540
+ narrative_parts.append(f"Status: {attrs.get('status')}")
1541
+
1542
+ # What was the issue
1543
+ if attrs.get("summary"):
1544
+ narrative_parts.append(f"Details: {attrs.get('summary')}")
1545
+
1546
+ # Impact
1547
+ if attrs.get("customer_impact_summary"):
1548
+ narrative_parts.append(f"Impact: {attrs.get('customer_impact_summary')}")
1549
+
1550
+ # Resolution (if available)
1551
+ if attrs.get("mitigation"):
1552
+ narrative_parts.append(f"Resolution: {attrs.get('mitigation')}")
1553
+ elif attrs.get("action_items_count") and attrs.get("action_items_count") > 0:
1554
+ narrative_parts.append(f"Action items created: {attrs.get('action_items_count')}")
1555
+
1556
+ narrative = " | ".join(narrative_parts)
1557
+
1558
+ incidents_summary.append({
1559
+ "incident_id": incident_id,
1560
+ "title": attrs.get("title", "Untitled Incident"),
1561
+ "severity": attrs.get("severity"),
1562
+ "status": attrs.get("status"),
1563
+ "started_at": started_at,
1564
+ "resolved_at": resolved_at,
1565
+ "duration_minutes": duration_minutes,
1566
+ "summary": attrs.get("summary"),
1567
+ "impact": attrs.get("customer_impact_summary"),
1568
+ "mitigation": attrs.get("mitigation"),
1569
+ "narrative": narrative,
1570
+ "incident_url": attrs.get("incident_url")
1571
+ })
1572
+
1573
+ # Group by severity
1574
+ by_severity = {}
1575
+ for inc in incidents_summary:
1576
+ sev = inc["severity"] or "unknown"
1577
+ if sev not in by_severity:
1578
+ by_severity[sev] = []
1579
+ by_severity[sev].append(inc)
1580
+
1581
+ # Calculate statistics
1582
+ total_incidents = len(incidents_summary)
1583
+ resolved_count = sum(1 for inc in incidents_summary if inc["resolved_at"])
1584
+ ongoing_count = total_incidents - resolved_count
1585
+
1586
+ avg_resolution_time = None
1587
+ durations = [inc["duration_minutes"] for inc in incidents_summary if inc["duration_minutes"]]
1588
+ if durations:
1589
+ avg_resolution_time = int(sum(durations) / len(durations))
1590
+
1591
+ return {
1592
+ "success": True,
1593
+ "period": {
1594
+ "start_time": start_time,
1595
+ "end_time": end_time
1596
+ },
1597
+ "summary": {
1598
+ "total_incidents": total_incidents,
1599
+ "resolved": resolved_count,
1600
+ "ongoing": ongoing_count,
1601
+ "average_resolution_minutes": avg_resolution_time,
1602
+ "by_severity": {k: len(v) for k, v in by_severity.items()}
1603
+ },
1604
+ "incidents": incidents_summary
1605
+ }
1606
+
1607
+ except Exception as e:
1608
+ import traceback
1609
+ error_type, error_message = MCPError.categorize_error(e)
1610
+ return MCPError.tool_error(
1611
+ f"Failed to get shift incidents: {error_message}",
1612
+ error_type,
1613
+ details={
1614
+ "params": {"start_time": start_time, "end_time": end_time},
1615
+ "exception_type": type(e).__name__,
1616
+ "exception_str": str(e),
1617
+ "traceback": traceback.format_exc()
1618
+ }
1619
+ )
1620
+
1621
+ @mcp.tool()
1622
+ async def get_shift_incidents(
1623
+ start_time: Annotated[str, Field(description="Start time for incident search (ISO 8601 format, e.g., '2025-10-01T00:00:00Z')")],
1624
+ end_time: Annotated[str, Field(description="End time for incident search (ISO 8601 format, e.g., '2025-10-01T23:59:59Z')")],
1625
+ schedule_ids: Annotated[str, Field(description="Comma-separated list of schedule IDs to filter incidents (optional)")] = "",
1626
+ severity: Annotated[str, Field(description="Filter by severity: 'critical', 'high', 'medium', 'low' (optional)")] = "",
1627
+ status: Annotated[str, Field(description="Filter by status: 'started', 'detected', 'acknowledged', 'investigating', 'identified', 'monitoring', 'resolved', 'cancelled' (optional)")] = "",
1628
+ tags: Annotated[str, Field(description="Comma-separated list of tag slugs to filter incidents (optional)")] = ""
1629
+ ) -> dict:
1630
+ """
1631
+ Get incidents and alerts that occurred during a specific shift or time period.
1632
+
1633
+ Useful for:
1634
+ - Shift handoff summaries showing what happened during the shift
1635
+ - Post-shift debriefs and reporting
1636
+ - Incident analysis by time period
1637
+ - Understanding team workload during specific shifts
1638
+
1639
+ Returns incident details including severity, status, duration, and basic summary.
1640
+ """
1641
+ return await _fetch_shift_incidents_internal(start_time, end_time, schedule_ids, severity, status, tags)
1642
+
655
1643
  # Add MCP resources for incidents and teams
656
1644
  @mcp.resource("incident://{incident_id}")
657
1645
  async def get_incident_resource(incident_id: str):
@@ -988,6 +1976,66 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
988
1976
  "description": "Page number to retrieve"
989
1977
  }
990
1978
  })
1979
+
1980
+ # Add sparse fieldsets for alerts endpoints to reduce payload size
1981
+ if "alert" in path.lower():
1982
+ # Add fields[alerts] parameter with essential fields only - make it required with default
1983
+ operation["parameters"].append({
1984
+ "name": "fields[alerts]",
1985
+ "in": "query",
1986
+ "required": True,
1987
+ "schema": {
1988
+ "type": "string",
1989
+ "default": "id,summary,status,started_at,ended_at,short_id,alert_urgency_id,source,noise",
1990
+ "description": "Comma-separated list of alert fields to include (reduces payload size)"
1991
+ }
1992
+ })
1993
+
1994
+ # Add include parameter for alerts endpoints to minimize relationships
1995
+ if "alert" in path.lower():
1996
+ # Check if include parameter already exists
1997
+ include_param_exists = any(param.get("name") == "include" for param in operation["parameters"])
1998
+ if not include_param_exists:
1999
+ operation["parameters"].append({
2000
+ "name": "include",
2001
+ "in": "query",
2002
+ "required": True,
2003
+ "schema": {
2004
+ "type": "string",
2005
+ "default": "",
2006
+ "description": "Related resources to include (empty for minimal payload)"
2007
+ }
2008
+ })
2009
+
2010
+ # Add sparse fieldsets for incidents endpoints to reduce payload size
2011
+ if "incident" in path.lower():
2012
+ # Add fields[incidents] parameter with essential fields only - make it required with default
2013
+ operation["parameters"].append({
2014
+ "name": "fields[incidents]",
2015
+ "in": "query",
2016
+ "required": True,
2017
+ "schema": {
2018
+ "type": "string",
2019
+ "default": "id,title,summary,status,severity,created_at,updated_at,url,started_at",
2020
+ "description": "Comma-separated list of incident fields to include (reduces payload size)"
2021
+ }
2022
+ })
2023
+
2024
+ # Add include parameter for incidents endpoints to minimize relationships
2025
+ if "incident" in path.lower():
2026
+ # Check if include parameter already exists
2027
+ include_param_exists = any(param.get("name") == "include" for param in operation["parameters"])
2028
+ if not include_param_exists:
2029
+ operation["parameters"].append({
2030
+ "name": "include",
2031
+ "in": "query",
2032
+ "required": True,
2033
+ "schema": {
2034
+ "type": "string",
2035
+ "default": "",
2036
+ "description": "Related resources to include (empty for minimal payload)"
2037
+ }
2038
+ })
991
2039
 
992
2040
  # Also clean up any remaining broken references in components
993
2041
  if "components" in filtered_spec and "schemas" in filtered_spec["components"]:
@@ -1002,6 +2050,52 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
1002
2050
  logger.warning(f"Removing schema with broken references: {schema_name}")
1003
2051
  del schemas[schema_name]
1004
2052
 
2053
+ # Clean up any operation-level references to removed schemas
2054
+ removed_schemas = set()
2055
+ if "components" in filtered_spec and "schemas" in filtered_spec["components"]:
2056
+ removed_schemas = {"new_workflow", "update_workflow", "workflow", "workflow_task",
2057
+ "workflow_response", "workflow_list", "new_workflow_task",
2058
+ "update_workflow_task", "workflow_task_response", "workflow_task_list"}
2059
+
2060
+ for path, path_item in filtered_spec.get("paths", {}).items():
2061
+ for method, operation in path_item.items():
2062
+ if method.lower() not in ["get", "post", "put", "delete", "patch"]:
2063
+ continue
2064
+
2065
+ # Clean request body references
2066
+ if "requestBody" in operation:
2067
+ request_body = operation["requestBody"]
2068
+ if "content" in request_body:
2069
+ for content_type, content_info in request_body["content"].items():
2070
+ if "schema" in content_info and "$ref" in content_info["schema"]:
2071
+ ref_path = content_info["schema"]["$ref"]
2072
+ schema_name = ref_path.split("/")[-1]
2073
+ if schema_name in removed_schemas:
2074
+ # Replace with generic object schema
2075
+ content_info["schema"] = {
2076
+ "type": "object",
2077
+ "description": "Request data for this endpoint",
2078
+ "additionalProperties": True
2079
+ }
2080
+ logger.debug(f"Cleaned broken reference in {method.upper()} {path} request body: {ref_path}")
2081
+
2082
+ # Clean response references
2083
+ if "responses" in operation:
2084
+ for status_code, response in operation["responses"].items():
2085
+ if "content" in response:
2086
+ for content_type, content_info in response["content"].items():
2087
+ if "schema" in content_info and "$ref" in content_info["schema"]:
2088
+ ref_path = content_info["schema"]["$ref"]
2089
+ schema_name = ref_path.split("/")[-1]
2090
+ if schema_name in removed_schemas:
2091
+ # Replace with generic object schema
2092
+ content_info["schema"] = {
2093
+ "type": "object",
2094
+ "description": "Response data from this endpoint",
2095
+ "additionalProperties": True
2096
+ }
2097
+ logger.debug(f"Cleaned broken reference in {method.upper()} {path} response: {ref_path}")
2098
+
1005
2099
  return filtered_spec
1006
2100
 
1007
2101
 
@@ -1013,8 +2107,23 @@ def _has_broken_references(schema_def: Dict[str, Any]) -> bool:
1013
2107
  broken_refs = [
1014
2108
  "incident_trigger_params",
1015
2109
  "new_workflow",
1016
- "update_workflow",
1017
- "workflow"
2110
+ "update_workflow",
2111
+ "workflow",
2112
+ "new_workflow_task",
2113
+ "update_workflow_task",
2114
+ "workflow_task",
2115
+ "workflow_task_response",
2116
+ "workflow_task_list",
2117
+ "workflow_response",
2118
+ "workflow_list",
2119
+ "workflow_custom_field_selection_response",
2120
+ "workflow_custom_field_selection_list",
2121
+ "workflow_form_field_condition_response",
2122
+ "workflow_form_field_condition_list",
2123
+ "workflow_group_response",
2124
+ "workflow_group_list",
2125
+ "workflow_run_response",
2126
+ "workflow_runs_list"
1018
2127
  ]
1019
2128
  if any(broken_ref in ref_path for broken_ref in broken_refs):
1020
2129
  return True