rootly-mcp-server 2.0.10__py3-none-any.whl → 2.0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rootly_mcp_server/__init__.py +1 -1
- rootly_mcp_server/client.py +1 -1
- rootly_mcp_server/server.py +392 -18
- rootly_mcp_server/smart_utils.py +519 -0
- {rootly_mcp_server-2.0.10.dist-info → rootly_mcp_server-2.0.12.dist-info}/METADATA +52 -10
- rootly_mcp_server-2.0.12.dist-info/RECORD +12 -0
- rootly_mcp_server/routemap_server.py +0 -206
- rootly_mcp_server/test_client.py +0 -150
- rootly_mcp_server-2.0.10.dist-info/RECORD +0 -13
- {rootly_mcp_server-2.0.10.dist-info → rootly_mcp_server-2.0.12.dist-info}/WHEEL +0 -0
- {rootly_mcp_server-2.0.10.dist-info → rootly_mcp_server-2.0.12.dist-info}/entry_points.txt +0 -0
- {rootly_mcp_server-2.0.10.dist-info → rootly_mcp_server-2.0.12.dist-info}/licenses/LICENSE +0 -0
rootly_mcp_server/__init__.py
CHANGED
rootly_mcp_server/client.py
CHANGED
|
@@ -121,7 +121,7 @@ class RootlyClient:
|
|
|
121
121
|
# Add response details if available
|
|
122
122
|
if hasattr(e, "response") and e.response is not None:
|
|
123
123
|
try:
|
|
124
|
-
error_response["status_code"] = e.response.status_code
|
|
124
|
+
error_response["status_code"] = str(e.response.status_code)
|
|
125
125
|
error_response["response_text"] = e.response.text
|
|
126
126
|
except Exception:
|
|
127
127
|
pass
|
rootly_mcp_server/server.py
CHANGED
|
@@ -19,13 +19,104 @@ from fastmcp import FastMCP
|
|
|
19
19
|
from pydantic import Field
|
|
20
20
|
|
|
21
21
|
from .utils import sanitize_parameters_in_spec
|
|
22
|
+
from .smart_utils import TextSimilarityAnalyzer, SolutionExtractor
|
|
22
23
|
|
|
23
24
|
# Set up logger
|
|
24
25
|
logger = logging.getLogger(__name__)
|
|
25
26
|
|
|
27
|
+
|
|
28
|
+
class MCPError:
|
|
29
|
+
"""Enhanced error handling for MCP protocol compliance."""
|
|
30
|
+
|
|
31
|
+
@staticmethod
|
|
32
|
+
def protocol_error(code: int, message: str, data: Optional[Dict] = None):
|
|
33
|
+
"""Create a JSON-RPC protocol-level error response."""
|
|
34
|
+
error_response = {
|
|
35
|
+
"jsonrpc": "2.0",
|
|
36
|
+
"error": {
|
|
37
|
+
"code": code,
|
|
38
|
+
"message": message
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
if data:
|
|
42
|
+
error_response["error"]["data"] = data
|
|
43
|
+
return error_response
|
|
44
|
+
|
|
45
|
+
@staticmethod
|
|
46
|
+
def tool_error(error_message: str, error_type: str = "execution_error", details: Optional[Dict] = None):
|
|
47
|
+
"""Create a tool-level error response (returned as successful tool result)."""
|
|
48
|
+
error_response = {
|
|
49
|
+
"error": True,
|
|
50
|
+
"error_type": error_type,
|
|
51
|
+
"message": error_message
|
|
52
|
+
}
|
|
53
|
+
if details:
|
|
54
|
+
error_response["details"] = details
|
|
55
|
+
return error_response
|
|
56
|
+
|
|
57
|
+
@staticmethod
|
|
58
|
+
def categorize_error(exception: Exception) -> tuple[str, str]:
|
|
59
|
+
"""Categorize an exception into error type and appropriate message."""
|
|
60
|
+
error_str = str(exception)
|
|
61
|
+
exception_type = type(exception).__name__
|
|
62
|
+
|
|
63
|
+
# Authentication/Authorization errors
|
|
64
|
+
if any(keyword in error_str.lower() for keyword in ["401", "unauthorized", "authentication", "token", "forbidden"]):
|
|
65
|
+
return "authentication_error", f"Authentication failed: {error_str}"
|
|
66
|
+
|
|
67
|
+
# Network/Connection errors
|
|
68
|
+
if any(keyword in exception_type.lower() for keyword in ["connection", "timeout", "network"]):
|
|
69
|
+
return "network_error", f"Network error: {error_str}"
|
|
70
|
+
|
|
71
|
+
# HTTP errors
|
|
72
|
+
if "40" in error_str[:10]: # 4xx client errors
|
|
73
|
+
return "client_error", f"Client error: {error_str}"
|
|
74
|
+
elif "50" in error_str[:10]: # 5xx server errors
|
|
75
|
+
return "server_error", f"Server error: {error_str}"
|
|
76
|
+
|
|
77
|
+
# Validation errors
|
|
78
|
+
if any(keyword in exception_type.lower() for keyword in ["validation", "pydantic", "field"]):
|
|
79
|
+
return "validation_error", f"Input validation error: {error_str}"
|
|
80
|
+
|
|
81
|
+
# Generic execution errors
|
|
82
|
+
return "execution_error", f"Tool execution error: {error_str}"
|
|
83
|
+
|
|
26
84
|
# Default Swagger URL
|
|
27
85
|
SWAGGER_URL = "https://rootly-heroku.s3.amazonaws.com/swagger/v1/swagger.json"
|
|
28
86
|
|
|
87
|
+
# Default allowed API paths
|
|
88
|
+
def _generate_recommendation(solution_data: dict) -> str:
|
|
89
|
+
"""Generate a high-level recommendation based on solution analysis."""
|
|
90
|
+
solutions = solution_data.get("solutions", [])
|
|
91
|
+
avg_time = solution_data.get("average_resolution_time")
|
|
92
|
+
|
|
93
|
+
if not solutions:
|
|
94
|
+
return "No similar incidents found. This may be a novel issue requiring escalation."
|
|
95
|
+
|
|
96
|
+
recommendation_parts = []
|
|
97
|
+
|
|
98
|
+
# Time expectation
|
|
99
|
+
if avg_time:
|
|
100
|
+
if avg_time < 1:
|
|
101
|
+
recommendation_parts.append("Similar incidents typically resolve quickly (< 1 hour).")
|
|
102
|
+
elif avg_time > 4:
|
|
103
|
+
recommendation_parts.append("Similar incidents typically require more time (> 4 hours).")
|
|
104
|
+
|
|
105
|
+
# Top solution
|
|
106
|
+
if solutions:
|
|
107
|
+
top_solution = solutions[0]
|
|
108
|
+
if top_solution.get("suggested_actions"):
|
|
109
|
+
actions = top_solution["suggested_actions"][:2] # Top 2 actions
|
|
110
|
+
recommendation_parts.append(f"Consider trying: {', '.join(actions)}")
|
|
111
|
+
|
|
112
|
+
# Pattern insights
|
|
113
|
+
patterns = solution_data.get("common_patterns", [])
|
|
114
|
+
if patterns:
|
|
115
|
+
recommendation_parts.append(f"Common patterns: {patterns[0]}")
|
|
116
|
+
|
|
117
|
+
return " ".join(recommendation_parts) if recommendation_parts else "Review similar incidents above for resolution guidance."
|
|
118
|
+
|
|
119
|
+
|
|
29
120
|
# Default allowed API paths
|
|
30
121
|
DEFAULT_ALLOWED_PATHS = [
|
|
31
122
|
"/incidents/{incident_id}/alerts",
|
|
@@ -230,7 +321,7 @@ def create_rootly_mcp_server(
|
|
|
230
321
|
# By default, all routes become tools which is what we want
|
|
231
322
|
mcp = FastMCP.from_openapi(
|
|
232
323
|
openapi_spec=filtered_spec,
|
|
233
|
-
client=http_client,
|
|
324
|
+
client=http_client.client,
|
|
234
325
|
name=name,
|
|
235
326
|
timeout=30.0,
|
|
236
327
|
tags={"rootly", "incident-management"},
|
|
@@ -289,7 +380,7 @@ def create_rootly_mcp_server(
|
|
|
289
380
|
query: Annotated[str, Field(description="Search query to filter incidents by title/summary")] = "",
|
|
290
381
|
page_size: Annotated[int, Field(description="Number of results per page (max: 20)", ge=1, le=20)] = 10,
|
|
291
382
|
page_number: Annotated[int, Field(description="Page number to retrieve (use 0 for all pages)", ge=0)] = 1,
|
|
292
|
-
max_results: Annotated[int, Field(description="Maximum total results when fetching all pages (ignored if page_number > 0)", ge=1, le=
|
|
383
|
+
max_results: Annotated[int, Field(description="Maximum total results when fetching all pages (ignored if page_number > 0)", ge=1, le=10)] = 5,
|
|
293
384
|
) -> dict:
|
|
294
385
|
"""
|
|
295
386
|
Search incidents with flexible pagination control.
|
|
@@ -312,7 +403,8 @@ def create_rootly_mcp_server(
|
|
|
312
403
|
response.raise_for_status()
|
|
313
404
|
return response.json()
|
|
314
405
|
except Exception as e:
|
|
315
|
-
|
|
406
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
407
|
+
return MCPError.tool_error(error_message, error_type)
|
|
316
408
|
|
|
317
409
|
# Multi-page mode (page_number = 0)
|
|
318
410
|
all_incidents = []
|
|
@@ -362,9 +454,11 @@ def create_rootly_mcp_server(
|
|
|
362
454
|
break
|
|
363
455
|
|
|
364
456
|
except Exception as e:
|
|
365
|
-
# Re-raise authentication or critical errors
|
|
457
|
+
# Re-raise authentication or critical errors for immediate handling
|
|
366
458
|
if "401" in str(e) or "Unauthorized" in str(e) or "authentication" in str(e).lower():
|
|
367
|
-
|
|
459
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
460
|
+
return MCPError.tool_error(error_message, error_type)
|
|
461
|
+
# For other errors, break loop and return partial results
|
|
368
462
|
break
|
|
369
463
|
|
|
370
464
|
# Limit to max_results
|
|
@@ -382,7 +476,287 @@ def create_rootly_mcp_server(
|
|
|
382
476
|
}
|
|
383
477
|
}
|
|
384
478
|
except Exception as e:
|
|
385
|
-
|
|
479
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
480
|
+
return MCPError.tool_error(error_message, error_type)
|
|
481
|
+
|
|
482
|
+
# Initialize smart analysis tools
|
|
483
|
+
similarity_analyzer = TextSimilarityAnalyzer()
|
|
484
|
+
solution_extractor = SolutionExtractor()
|
|
485
|
+
|
|
486
|
+
@mcp.tool()
|
|
487
|
+
async def find_related_incidents(
|
|
488
|
+
incident_id: str,
|
|
489
|
+
similarity_threshold: Annotated[float, Field(description="Minimum similarity score (0.0-1.0)", ge=0.0, le=1.0)] = 0.3,
|
|
490
|
+
max_results: Annotated[int, Field(description="Maximum number of related incidents to return", ge=1, le=20)] = 5
|
|
491
|
+
) -> dict:
|
|
492
|
+
"""Find historically similar incidents to help with context and resolution strategies."""
|
|
493
|
+
try:
|
|
494
|
+
# Get the target incident details
|
|
495
|
+
target_response = await make_authenticated_request("GET", f"/v1/incidents/{incident_id}")
|
|
496
|
+
target_response.raise_for_status()
|
|
497
|
+
target_incident_data = target_response.json()
|
|
498
|
+
target_incident = target_incident_data.get("data", {})
|
|
499
|
+
|
|
500
|
+
if not target_incident:
|
|
501
|
+
return MCPError.tool_error("Incident not found", "not_found")
|
|
502
|
+
|
|
503
|
+
# Get historical incidents for comparison (resolved incidents from last 6 months)
|
|
504
|
+
historical_response = await make_authenticated_request("GET", "/v1/incidents", params={
|
|
505
|
+
"page[size]": 100, # Get more incidents for better matching
|
|
506
|
+
"page[number]": 1,
|
|
507
|
+
"filter[status]": "resolved", # Only look at resolved incidents
|
|
508
|
+
"include": ""
|
|
509
|
+
})
|
|
510
|
+
historical_response.raise_for_status()
|
|
511
|
+
historical_data = historical_response.json()
|
|
512
|
+
historical_incidents = historical_data.get("data", [])
|
|
513
|
+
|
|
514
|
+
# Filter out the target incident itself
|
|
515
|
+
historical_incidents = [inc for inc in historical_incidents if str(inc.get('id')) != str(incident_id)]
|
|
516
|
+
|
|
517
|
+
if not historical_incidents:
|
|
518
|
+
return {
|
|
519
|
+
"related_incidents": [],
|
|
520
|
+
"message": "No historical incidents found for comparison",
|
|
521
|
+
"target_incident": {
|
|
522
|
+
"id": incident_id,
|
|
523
|
+
"title": target_incident.get("attributes", {}).get("title", "")
|
|
524
|
+
}
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
# Calculate similarities
|
|
528
|
+
similar_incidents = similarity_analyzer.calculate_similarity(historical_incidents, target_incident)
|
|
529
|
+
|
|
530
|
+
# Filter by threshold and limit results
|
|
531
|
+
filtered_incidents = [
|
|
532
|
+
inc for inc in similar_incidents
|
|
533
|
+
if inc.similarity_score >= similarity_threshold
|
|
534
|
+
][:max_results]
|
|
535
|
+
|
|
536
|
+
# Format response
|
|
537
|
+
related_incidents = []
|
|
538
|
+
for incident in filtered_incidents:
|
|
539
|
+
related_incidents.append({
|
|
540
|
+
"incident_id": incident.incident_id,
|
|
541
|
+
"title": incident.title,
|
|
542
|
+
"similarity_score": round(incident.similarity_score, 3),
|
|
543
|
+
"matched_services": incident.matched_services,
|
|
544
|
+
"matched_keywords": incident.matched_keywords,
|
|
545
|
+
"resolution_summary": incident.resolution_summary,
|
|
546
|
+
"resolution_time_hours": incident.resolution_time_hours
|
|
547
|
+
})
|
|
548
|
+
|
|
549
|
+
return {
|
|
550
|
+
"target_incident": {
|
|
551
|
+
"id": incident_id,
|
|
552
|
+
"title": target_incident.get("attributes", {}).get("title", "")
|
|
553
|
+
},
|
|
554
|
+
"related_incidents": related_incidents,
|
|
555
|
+
"total_found": len(filtered_incidents),
|
|
556
|
+
"similarity_threshold": similarity_threshold,
|
|
557
|
+
"analysis_summary": f"Found {len(filtered_incidents)} similar incidents out of {len(historical_incidents)} historical incidents"
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
except Exception as e:
|
|
561
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
562
|
+
return MCPError.tool_error(f"Failed to find related incidents: {error_message}", error_type)
|
|
563
|
+
|
|
564
|
+
@mcp.tool()
|
|
565
|
+
async def suggest_solutions(
|
|
566
|
+
incident_id: str = "",
|
|
567
|
+
incident_title: str = "",
|
|
568
|
+
incident_description: str = "",
|
|
569
|
+
max_solutions: Annotated[int, Field(description="Maximum number of solution suggestions", ge=1, le=10)] = 3
|
|
570
|
+
) -> dict:
|
|
571
|
+
"""Suggest solutions based on similar resolved incidents. Provide either incident_id OR title/description."""
|
|
572
|
+
try:
|
|
573
|
+
target_incident = {}
|
|
574
|
+
|
|
575
|
+
if incident_id:
|
|
576
|
+
# Get incident details by ID
|
|
577
|
+
response = await make_authenticated_request("GET", f"/v1/incidents/{incident_id}")
|
|
578
|
+
response.raise_for_status()
|
|
579
|
+
incident_data = response.json()
|
|
580
|
+
target_incident = incident_data.get("data", {})
|
|
581
|
+
|
|
582
|
+
if not target_incident:
|
|
583
|
+
return MCPError.tool_error("Incident not found", "not_found")
|
|
584
|
+
|
|
585
|
+
elif incident_title or incident_description:
|
|
586
|
+
# Create synthetic incident for analysis
|
|
587
|
+
target_incident = {
|
|
588
|
+
"id": "synthetic",
|
|
589
|
+
"attributes": {
|
|
590
|
+
"title": incident_title,
|
|
591
|
+
"summary": incident_description,
|
|
592
|
+
"description": incident_description
|
|
593
|
+
}
|
|
594
|
+
}
|
|
595
|
+
else:
|
|
596
|
+
return MCPError.tool_error("Must provide either incident_id or incident_title/description", "validation_error")
|
|
597
|
+
|
|
598
|
+
# Get resolved incidents for solution mining
|
|
599
|
+
historical_response = await make_authenticated_request("GET", "/v1/incidents", params={
|
|
600
|
+
"page[size]": 150, # Get more incidents for better solution matching
|
|
601
|
+
"page[number]": 1,
|
|
602
|
+
"filter[status]": "resolved",
|
|
603
|
+
"include": ""
|
|
604
|
+
})
|
|
605
|
+
historical_response.raise_for_status()
|
|
606
|
+
historical_data = historical_response.json()
|
|
607
|
+
historical_incidents = historical_data.get("data", [])
|
|
608
|
+
|
|
609
|
+
# Filter out target incident if it exists
|
|
610
|
+
if incident_id:
|
|
611
|
+
historical_incidents = [inc for inc in historical_incidents if str(inc.get('id')) != str(incident_id)]
|
|
612
|
+
|
|
613
|
+
if not historical_incidents:
|
|
614
|
+
return {
|
|
615
|
+
"solutions": [],
|
|
616
|
+
"message": "No historical resolved incidents found for solution mining"
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
# Find similar incidents
|
|
620
|
+
similar_incidents = similarity_analyzer.calculate_similarity(historical_incidents, target_incident)
|
|
621
|
+
|
|
622
|
+
# Filter to reasonably similar incidents (lower threshold for solution suggestions)
|
|
623
|
+
relevant_incidents = [inc for inc in similar_incidents if inc.similarity_score >= 0.2][:max_solutions * 2]
|
|
624
|
+
|
|
625
|
+
if not relevant_incidents:
|
|
626
|
+
return {
|
|
627
|
+
"solutions": [],
|
|
628
|
+
"message": "No sufficiently similar incidents found for solution suggestions",
|
|
629
|
+
"suggestion": "This appears to be a unique incident. Consider escalating or consulting documentation."
|
|
630
|
+
}
|
|
631
|
+
|
|
632
|
+
# Extract solutions
|
|
633
|
+
solution_data = solution_extractor.extract_solutions(relevant_incidents)
|
|
634
|
+
|
|
635
|
+
# Format response
|
|
636
|
+
return {
|
|
637
|
+
"target_incident": {
|
|
638
|
+
"id": incident_id or "synthetic",
|
|
639
|
+
"title": target_incident.get("attributes", {}).get("title", incident_title),
|
|
640
|
+
"description": target_incident.get("attributes", {}).get("summary", incident_description)
|
|
641
|
+
},
|
|
642
|
+
"solutions": solution_data["solutions"][:max_solutions],
|
|
643
|
+
"insights": {
|
|
644
|
+
"common_patterns": solution_data["common_patterns"],
|
|
645
|
+
"average_resolution_time_hours": solution_data["average_resolution_time"],
|
|
646
|
+
"total_similar_incidents": solution_data["total_similar_incidents"]
|
|
647
|
+
},
|
|
648
|
+
"recommendation": _generate_recommendation(solution_data)
|
|
649
|
+
}
|
|
650
|
+
|
|
651
|
+
except Exception as e:
|
|
652
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
653
|
+
return MCPError.tool_error(f"Failed to suggest solutions: {error_message}", error_type)
|
|
654
|
+
|
|
655
|
+
# Add MCP resources for incidents and teams
|
|
656
|
+
@mcp.resource("incident://{incident_id}")
|
|
657
|
+
async def get_incident_resource(incident_id: str):
|
|
658
|
+
"""Expose incident details as an MCP resource for easy reference and context."""
|
|
659
|
+
try:
|
|
660
|
+
response = await make_authenticated_request("GET", f"/v1/incidents/{incident_id}")
|
|
661
|
+
response.raise_for_status()
|
|
662
|
+
incident_data = response.json()
|
|
663
|
+
|
|
664
|
+
# Format incident data as readable text
|
|
665
|
+
incident = incident_data.get("data", {})
|
|
666
|
+
attributes = incident.get("attributes", {})
|
|
667
|
+
|
|
668
|
+
text_content = f"""Incident #{incident_id}
|
|
669
|
+
Title: {attributes.get('title', 'N/A')}
|
|
670
|
+
Status: {attributes.get('status', 'N/A')}
|
|
671
|
+
Severity: {attributes.get('severity', 'N/A')}
|
|
672
|
+
Created: {attributes.get('created_at', 'N/A')}
|
|
673
|
+
Updated: {attributes.get('updated_at', 'N/A')}
|
|
674
|
+
Summary: {attributes.get('summary', 'N/A')}
|
|
675
|
+
URL: {attributes.get('url', 'N/A')}"""
|
|
676
|
+
|
|
677
|
+
return {
|
|
678
|
+
"uri": f"incident://{incident_id}",
|
|
679
|
+
"name": f"Incident #{incident_id}",
|
|
680
|
+
"text": text_content,
|
|
681
|
+
"mimeType": "text/plain"
|
|
682
|
+
}
|
|
683
|
+
except Exception as e:
|
|
684
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
685
|
+
return {
|
|
686
|
+
"uri": f"incident://{incident_id}",
|
|
687
|
+
"name": f"Incident #{incident_id} (Error)",
|
|
688
|
+
"text": f"Error ({error_type}): {error_message}",
|
|
689
|
+
"mimeType": "text/plain"
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
@mcp.resource("team://{team_id}")
|
|
693
|
+
async def get_team_resource(team_id: str):
|
|
694
|
+
"""Expose team details as an MCP resource for easy reference and context."""
|
|
695
|
+
try:
|
|
696
|
+
response = await make_authenticated_request("GET", f"/v1/teams/{team_id}")
|
|
697
|
+
response.raise_for_status()
|
|
698
|
+
team_data = response.json()
|
|
699
|
+
|
|
700
|
+
# Format team data as readable text
|
|
701
|
+
team = team_data.get("data", {})
|
|
702
|
+
attributes = team.get("attributes", {})
|
|
703
|
+
|
|
704
|
+
text_content = f"""Team #{team_id}
|
|
705
|
+
Name: {attributes.get('name', 'N/A')}
|
|
706
|
+
Color: {attributes.get('color', 'N/A')}
|
|
707
|
+
Slug: {attributes.get('slug', 'N/A')}
|
|
708
|
+
Created: {attributes.get('created_at', 'N/A')}
|
|
709
|
+
Updated: {attributes.get('updated_at', 'N/A')}"""
|
|
710
|
+
|
|
711
|
+
return {
|
|
712
|
+
"uri": f"team://{team_id}",
|
|
713
|
+
"name": f"Team: {attributes.get('name', team_id)}",
|
|
714
|
+
"text": text_content,
|
|
715
|
+
"mimeType": "text/plain"
|
|
716
|
+
}
|
|
717
|
+
except Exception as e:
|
|
718
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
719
|
+
return {
|
|
720
|
+
"uri": f"team://{team_id}",
|
|
721
|
+
"name": f"Team #{team_id} (Error)",
|
|
722
|
+
"text": f"Error ({error_type}): {error_message}",
|
|
723
|
+
"mimeType": "text/plain"
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
@mcp.resource("rootly://incidents")
|
|
727
|
+
async def list_incidents_resource():
|
|
728
|
+
"""List recent incidents as an MCP resource for quick reference."""
|
|
729
|
+
try:
|
|
730
|
+
response = await make_authenticated_request("GET", "/v1/incidents", params={
|
|
731
|
+
"page[size]": 10,
|
|
732
|
+
"page[number]": 1,
|
|
733
|
+
"include": ""
|
|
734
|
+
})
|
|
735
|
+
response.raise_for_status()
|
|
736
|
+
data = response.json()
|
|
737
|
+
|
|
738
|
+
incidents = data.get("data", [])
|
|
739
|
+
text_lines = ["Recent Incidents:\n"]
|
|
740
|
+
|
|
741
|
+
for incident in incidents:
|
|
742
|
+
attrs = incident.get("attributes", {})
|
|
743
|
+
text_lines.append(f"• #{incident.get('id', 'N/A')} - {attrs.get('title', 'N/A')} [{attrs.get('status', 'N/A')}]")
|
|
744
|
+
|
|
745
|
+
return {
|
|
746
|
+
"uri": "rootly://incidents",
|
|
747
|
+
"name": "Recent Incidents",
|
|
748
|
+
"text": "\n".join(text_lines),
|
|
749
|
+
"mimeType": "text/plain"
|
|
750
|
+
}
|
|
751
|
+
except Exception as e:
|
|
752
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
753
|
+
return {
|
|
754
|
+
"uri": "rootly://incidents",
|
|
755
|
+
"name": "Recent Incidents (Error)",
|
|
756
|
+
"text": f"Error ({error_type}): {error_message}",
|
|
757
|
+
"mimeType": "text/plain"
|
|
758
|
+
}
|
|
759
|
+
|
|
386
760
|
|
|
387
761
|
# Log server creation (tool count will be shown when tools are accessed)
|
|
388
762
|
logger.info("Created Rootly MCP Server successfully")
|
|
@@ -422,18 +796,18 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
|
|
|
422
796
|
current_dir = Path.cwd()
|
|
423
797
|
|
|
424
798
|
# Check current directory first
|
|
425
|
-
|
|
426
|
-
if
|
|
427
|
-
logger.info(f"Found Swagger file at {
|
|
428
|
-
with open(
|
|
799
|
+
local_swagger_path = current_dir / "swagger.json"
|
|
800
|
+
if local_swagger_path.is_file():
|
|
801
|
+
logger.info(f"Found Swagger file at {local_swagger_path}")
|
|
802
|
+
with open(local_swagger_path, "r", encoding="utf-8") as f:
|
|
429
803
|
return json.load(f)
|
|
430
804
|
|
|
431
805
|
# Check parent directories
|
|
432
806
|
for parent in current_dir.parents:
|
|
433
|
-
|
|
434
|
-
if
|
|
435
|
-
logger.info(f"Found Swagger file at {
|
|
436
|
-
with open(
|
|
807
|
+
parent_swagger_path = parent / "swagger.json"
|
|
808
|
+
if parent_swagger_path.is_file():
|
|
809
|
+
logger.info(f"Found Swagger file at {parent_swagger_path}")
|
|
810
|
+
with open(parent_swagger_path, "r", encoding="utf-8") as f:
|
|
437
811
|
return json.load(f)
|
|
438
812
|
|
|
439
813
|
# If the file wasn't found, fetch it from the URL and save it
|
|
@@ -441,12 +815,12 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
|
|
|
441
815
|
swagger_spec = _fetch_swagger_from_url()
|
|
442
816
|
|
|
443
817
|
# Save the fetched spec to the current directory
|
|
444
|
-
|
|
445
|
-
logger.info(f"Saving Swagger file to {
|
|
818
|
+
save_swagger_path = current_dir / "swagger.json"
|
|
819
|
+
logger.info(f"Saving Swagger file to {save_swagger_path}")
|
|
446
820
|
try:
|
|
447
|
-
with open(
|
|
821
|
+
with open(save_swagger_path, "w", encoding="utf-8") as f:
|
|
448
822
|
json.dump(swagger_spec, f)
|
|
449
|
-
logger.info(f"Saved Swagger file to {
|
|
823
|
+
logger.info(f"Saved Swagger file to {save_swagger_path}")
|
|
450
824
|
except Exception as e:
|
|
451
825
|
logger.warning(f"Failed to save Swagger file: {e}")
|
|
452
826
|
|