rootly-mcp-server 2.0.10__py3-none-any.whl → 2.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rootly_mcp_server/client.py +1 -1
- rootly_mcp_server/server.py +392 -18
- rootly_mcp_server/smart_utils.py +398 -0
- {rootly_mcp_server-2.0.10.dist-info → rootly_mcp_server-2.0.11.dist-info}/METADATA +52 -10
- rootly_mcp_server-2.0.11.dist-info/RECORD +12 -0
- rootly_mcp_server/routemap_server.py +0 -206
- rootly_mcp_server/test_client.py +0 -150
- rootly_mcp_server-2.0.10.dist-info/RECORD +0 -13
- {rootly_mcp_server-2.0.10.dist-info → rootly_mcp_server-2.0.11.dist-info}/WHEEL +0 -0
- {rootly_mcp_server-2.0.10.dist-info → rootly_mcp_server-2.0.11.dist-info}/entry_points.txt +0 -0
- {rootly_mcp_server-2.0.10.dist-info → rootly_mcp_server-2.0.11.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,398 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Smart utility functions for AI-powered incident analysis.
|
|
3
|
+
|
|
4
|
+
This module provides text similarity, pattern matching, and intelligent analysis
|
|
5
|
+
functions for implementing smart incident management features.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import re
|
|
9
|
+
import logging
|
|
10
|
+
from typing import List, Dict, Optional, Any
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
|
|
14
|
+
# Check ML library availability
|
|
15
|
+
import importlib.util
|
|
16
|
+
ML_AVAILABLE = (
|
|
17
|
+
importlib.util.find_spec("sklearn.feature_extraction.text") is not None and
|
|
18
|
+
importlib.util.find_spec("sklearn.metrics.pairwise") is not None
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class IncidentSimilarity:
|
|
26
|
+
"""Represents similarity between two incidents."""
|
|
27
|
+
incident_id: str
|
|
28
|
+
title: str
|
|
29
|
+
similarity_score: float
|
|
30
|
+
matched_services: List[str]
|
|
31
|
+
matched_keywords: List[str]
|
|
32
|
+
resolution_summary: str = ""
|
|
33
|
+
resolution_time_hours: Optional[float] = None
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class TextSimilarityAnalyzer:
|
|
37
|
+
"""Analyzes text similarity between incidents using TF-IDF and cosine similarity."""
|
|
38
|
+
|
|
39
|
+
def __init__(self):
|
|
40
|
+
if not ML_AVAILABLE:
|
|
41
|
+
logger.warning("scikit-learn not available. Text similarity will use basic keyword matching.")
|
|
42
|
+
self.vectorizer = None
|
|
43
|
+
self.incident_vectors = None
|
|
44
|
+
self.incident_metadata = {}
|
|
45
|
+
|
|
46
|
+
def preprocess_text(self, text: Optional[str]) -> str:
|
|
47
|
+
"""Clean and normalize text for analysis."""
|
|
48
|
+
if not text:
|
|
49
|
+
return ""
|
|
50
|
+
|
|
51
|
+
# Convert to lowercase
|
|
52
|
+
text = text.lower()
|
|
53
|
+
|
|
54
|
+
# Remove special characters but keep spaces and important symbols
|
|
55
|
+
text = re.sub(r'[^\w\s\-\.]', ' ', text)
|
|
56
|
+
|
|
57
|
+
# Replace multiple spaces with single space
|
|
58
|
+
text = re.sub(r'\s+', ' ', text)
|
|
59
|
+
|
|
60
|
+
# Remove common stopwords manually (basic set)
|
|
61
|
+
stopwords = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by', 'is', 'are', 'was', 'were'}
|
|
62
|
+
words = text.split()
|
|
63
|
+
text = ' '.join([word for word in words if word not in stopwords and len(word) > 1])
|
|
64
|
+
|
|
65
|
+
return text.strip()
|
|
66
|
+
|
|
67
|
+
def extract_services(self, text: str) -> List[str]:
|
|
68
|
+
"""Extract service names from incident text."""
|
|
69
|
+
services = []
|
|
70
|
+
|
|
71
|
+
# Common service patterns
|
|
72
|
+
service_patterns = [
|
|
73
|
+
r'\b(\w+)-(?:service|api|app|server|db)\b', # service-api, auth-service
|
|
74
|
+
r'\b(\w+)(?:service|api|app|server|db)\b', # paymentapi, authservice
|
|
75
|
+
r'\b(\w+)\.(?:service|api|app|com)\b', # auth.service, api.com
|
|
76
|
+
]
|
|
77
|
+
|
|
78
|
+
text_lower = text.lower()
|
|
79
|
+
for pattern in service_patterns:
|
|
80
|
+
matches = re.findall(pattern, text_lower)
|
|
81
|
+
services.extend(matches)
|
|
82
|
+
|
|
83
|
+
# Remove duplicates while preserving order
|
|
84
|
+
return list(dict.fromkeys(services))
|
|
85
|
+
|
|
86
|
+
def extract_error_patterns(self, text: str) -> List[str]:
|
|
87
|
+
"""Extract common error patterns from incident text."""
|
|
88
|
+
patterns = []
|
|
89
|
+
|
|
90
|
+
# HTTP status codes
|
|
91
|
+
http_codes = re.findall(r'\b[45]\d\d\b', text)
|
|
92
|
+
patterns.extend([f"http-{code}" for code in http_codes])
|
|
93
|
+
|
|
94
|
+
# Database errors
|
|
95
|
+
if re.search(r'\b(?:connection|timeout|database|db)\b', text.lower()):
|
|
96
|
+
patterns.append("database-error")
|
|
97
|
+
|
|
98
|
+
# Memory/resource errors
|
|
99
|
+
if re.search(r'\b(?:memory|cpu|disk|resource)\b', text.lower()):
|
|
100
|
+
patterns.append("resource-error")
|
|
101
|
+
|
|
102
|
+
# Network errors
|
|
103
|
+
if re.search(r'\b(?:network|dns|connection|unreachable)\b', text.lower()):
|
|
104
|
+
patterns.append("network-error")
|
|
105
|
+
|
|
106
|
+
return patterns
|
|
107
|
+
|
|
108
|
+
def calculate_similarity(self, incidents: List[Dict], target_incident: Dict) -> List[IncidentSimilarity]:
|
|
109
|
+
"""Calculate similarity scores between target incident and historical incidents."""
|
|
110
|
+
if not incidents:
|
|
111
|
+
return []
|
|
112
|
+
|
|
113
|
+
target_text = self._combine_incident_text(target_incident)
|
|
114
|
+
target_services = self.extract_services(target_text)
|
|
115
|
+
target_errors = self.extract_error_patterns(target_text)
|
|
116
|
+
|
|
117
|
+
similarities = []
|
|
118
|
+
|
|
119
|
+
if ML_AVAILABLE and len(incidents) > 1:
|
|
120
|
+
similarities = self._calculate_tfidf_similarity(incidents, target_incident, target_text, target_services, target_errors)
|
|
121
|
+
else:
|
|
122
|
+
similarities = self._calculate_keyword_similarity(incidents, target_incident, target_text, target_services, target_errors)
|
|
123
|
+
|
|
124
|
+
# Sort by similarity score descending
|
|
125
|
+
return sorted(similarities, key=lambda x: x.similarity_score, reverse=True)
|
|
126
|
+
|
|
127
|
+
def _combine_incident_text(self, incident: Dict) -> str:
|
|
128
|
+
"""Combine incident title, description, and other text fields."""
|
|
129
|
+
text_parts = []
|
|
130
|
+
|
|
131
|
+
# Get text from incident attributes
|
|
132
|
+
attributes = incident.get('attributes', {})
|
|
133
|
+
text_parts.append(attributes.get('title', ''))
|
|
134
|
+
text_parts.append(attributes.get('summary', ''))
|
|
135
|
+
text_parts.append(attributes.get('description', ''))
|
|
136
|
+
|
|
137
|
+
# Also check root level for backward compatibility
|
|
138
|
+
text_parts.append(incident.get('title', ''))
|
|
139
|
+
text_parts.append(incident.get('summary', ''))
|
|
140
|
+
text_parts.append(incident.get('description', ''))
|
|
141
|
+
|
|
142
|
+
combined = ' '.join([part for part in text_parts if part])
|
|
143
|
+
return self.preprocess_text(combined)
|
|
144
|
+
|
|
145
|
+
def _calculate_tfidf_similarity(self, incidents: List[Dict], target_incident: Dict,
|
|
146
|
+
target_text: str, target_services: List[str],
|
|
147
|
+
target_errors: List[str]) -> List[IncidentSimilarity]:
|
|
148
|
+
"""Use TF-IDF and cosine similarity for advanced text matching."""
|
|
149
|
+
if not ML_AVAILABLE:
|
|
150
|
+
return []
|
|
151
|
+
|
|
152
|
+
# Import here to avoid issues with conditional imports
|
|
153
|
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
|
154
|
+
from sklearn.metrics.pairwise import cosine_similarity
|
|
155
|
+
|
|
156
|
+
# Prepare texts
|
|
157
|
+
incident_texts = [self._combine_incident_text(inc) for inc in incidents]
|
|
158
|
+
all_texts = incident_texts + [target_text]
|
|
159
|
+
|
|
160
|
+
# Vectorize
|
|
161
|
+
vectorizer = TfidfVectorizer(max_features=1000, ngram_range=(1, 2))
|
|
162
|
+
tfidf_matrix = vectorizer.fit_transform(all_texts)
|
|
163
|
+
|
|
164
|
+
# Calculate similarities
|
|
165
|
+
target_vector = tfidf_matrix[-1]
|
|
166
|
+
similarities = cosine_similarity(target_vector, tfidf_matrix[:-1]).flatten()
|
|
167
|
+
|
|
168
|
+
results = []
|
|
169
|
+
for i, incident in enumerate(incidents):
|
|
170
|
+
if similarities[i] > 0.1: # Only include reasonable matches
|
|
171
|
+
incident_services = self.extract_services(incident_texts[i])
|
|
172
|
+
incident_errors = self.extract_error_patterns(incident_texts[i])
|
|
173
|
+
|
|
174
|
+
# Bonus for matching services and error patterns
|
|
175
|
+
service_bonus = len(set(target_services) & set(incident_services)) * 0.1
|
|
176
|
+
error_bonus = len(set(target_errors) & set(incident_errors)) * 0.15
|
|
177
|
+
|
|
178
|
+
final_score = min(1.0, similarities[i] + service_bonus + error_bonus)
|
|
179
|
+
|
|
180
|
+
results.append(IncidentSimilarity(
|
|
181
|
+
incident_id=str(incident.get('id', '')),
|
|
182
|
+
title=incident.get('attributes', {}).get('title', 'Unknown'),
|
|
183
|
+
similarity_score=final_score,
|
|
184
|
+
matched_services=list(set(target_services) & set(incident_services)),
|
|
185
|
+
matched_keywords=self._extract_common_keywords(target_text, incident_texts[i]),
|
|
186
|
+
resolution_summary=incident.get('attributes', {}).get('summary', ''),
|
|
187
|
+
resolution_time_hours=self._calculate_resolution_time(incident)
|
|
188
|
+
))
|
|
189
|
+
|
|
190
|
+
return results
|
|
191
|
+
|
|
192
|
+
def _calculate_keyword_similarity(self, incidents: List[Dict], target_incident: Dict,
|
|
193
|
+
target_text: str, target_services: List[str],
|
|
194
|
+
target_errors: List[str]) -> List[IncidentSimilarity]:
|
|
195
|
+
"""Fallback keyword-based similarity when ML libraries not available."""
|
|
196
|
+
target_words = set(target_text.split())
|
|
197
|
+
|
|
198
|
+
results = []
|
|
199
|
+
for incident in incidents:
|
|
200
|
+
incident_text = self._combine_incident_text(incident)
|
|
201
|
+
incident_words = set(incident_text.split())
|
|
202
|
+
incident_services = self.extract_services(incident_text)
|
|
203
|
+
incident_errors = self.extract_error_patterns(incident_text)
|
|
204
|
+
|
|
205
|
+
# Calculate Jaccard similarity
|
|
206
|
+
if len(target_words | incident_words) > 0:
|
|
207
|
+
word_similarity = len(target_words & incident_words) / len(target_words | incident_words)
|
|
208
|
+
else:
|
|
209
|
+
word_similarity = 0
|
|
210
|
+
|
|
211
|
+
# Service and error pattern bonuses
|
|
212
|
+
service_bonus = len(set(target_services) & set(incident_services)) * 0.2
|
|
213
|
+
error_bonus = len(set(target_errors) & set(incident_errors)) * 0.25
|
|
214
|
+
|
|
215
|
+
final_score = min(1.0, word_similarity + service_bonus + error_bonus)
|
|
216
|
+
|
|
217
|
+
if final_score > 0.15: # Only include reasonable matches
|
|
218
|
+
results.append(IncidentSimilarity(
|
|
219
|
+
incident_id=str(incident.get('id', '')),
|
|
220
|
+
title=incident.get('attributes', {}).get('title', 'Unknown'),
|
|
221
|
+
similarity_score=final_score,
|
|
222
|
+
matched_services=list(set(target_services) & set(incident_services)),
|
|
223
|
+
matched_keywords=list(target_words & incident_words)[:5], # Top 5 matches
|
|
224
|
+
resolution_summary=incident.get('attributes', {}).get('summary', ''),
|
|
225
|
+
resolution_time_hours=self._calculate_resolution_time(incident)
|
|
226
|
+
))
|
|
227
|
+
|
|
228
|
+
return results
|
|
229
|
+
|
|
230
|
+
def _extract_common_keywords(self, text1: str, text2: str) -> List[str]:
|
|
231
|
+
"""Extract common meaningful keywords between two texts."""
|
|
232
|
+
words1 = set(text1.split())
|
|
233
|
+
words2 = set(text2.split())
|
|
234
|
+
common = words1 & words2
|
|
235
|
+
|
|
236
|
+
# Filter out very short words and return top matches
|
|
237
|
+
meaningful = [word for word in common if len(word) > 2]
|
|
238
|
+
return meaningful[:5]
|
|
239
|
+
|
|
240
|
+
def _calculate_resolution_time(self, incident: Dict) -> Optional[float]:
|
|
241
|
+
"""Calculate resolution time in hours if timestamps are available."""
|
|
242
|
+
try:
|
|
243
|
+
attributes = incident.get('attributes', {})
|
|
244
|
+
created_at = attributes.get('created_at')
|
|
245
|
+
resolved_at = attributes.get('resolved_at') or attributes.get('updated_at')
|
|
246
|
+
|
|
247
|
+
if created_at and resolved_at:
|
|
248
|
+
# Try to parse ISO format timestamps
|
|
249
|
+
created = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
|
|
250
|
+
resolved = datetime.fromisoformat(resolved_at.replace('Z', '+00:00'))
|
|
251
|
+
diff = resolved - created
|
|
252
|
+
return diff.total_seconds() / 3600 # Convert to hours
|
|
253
|
+
except Exception:
|
|
254
|
+
pass
|
|
255
|
+
|
|
256
|
+
return None
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
class SolutionExtractor:
|
|
260
|
+
"""Extract and format solution information from resolved incidents."""
|
|
261
|
+
|
|
262
|
+
def extract_solutions(self, similar_incidents: List[IncidentSimilarity]) -> Dict[str, Any]:
|
|
263
|
+
"""Extract actionable solutions from similar resolved incidents."""
|
|
264
|
+
if not similar_incidents:
|
|
265
|
+
return {
|
|
266
|
+
"solutions": [],
|
|
267
|
+
"common_patterns": [],
|
|
268
|
+
"average_resolution_time": None,
|
|
269
|
+
"total_similar_incidents": 0
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
solutions = []
|
|
273
|
+
resolution_times = []
|
|
274
|
+
all_keywords = []
|
|
275
|
+
|
|
276
|
+
for incident in similar_incidents[:5]: # Top 5 most similar
|
|
277
|
+
solution_info = {
|
|
278
|
+
"incident_id": incident.incident_id,
|
|
279
|
+
"title": incident.title,
|
|
280
|
+
"similarity": round(incident.similarity_score, 3),
|
|
281
|
+
"matched_services": incident.matched_services,
|
|
282
|
+
"resolution_summary": incident.resolution_summary or "No resolution summary available",
|
|
283
|
+
"resolution_time_hours": incident.resolution_time_hours
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
# Extract potential solution steps from resolution summary
|
|
287
|
+
solution_steps = self._extract_action_items(incident.resolution_summary)
|
|
288
|
+
if solution_steps:
|
|
289
|
+
solution_info["suggested_actions"] = solution_steps
|
|
290
|
+
|
|
291
|
+
solutions.append(solution_info)
|
|
292
|
+
|
|
293
|
+
if incident.resolution_time_hours:
|
|
294
|
+
resolution_times.append(incident.resolution_time_hours)
|
|
295
|
+
|
|
296
|
+
all_keywords.extend(incident.matched_keywords)
|
|
297
|
+
|
|
298
|
+
# Calculate average resolution time
|
|
299
|
+
avg_resolution = sum(resolution_times) / len(resolution_times) if resolution_times else None
|
|
300
|
+
|
|
301
|
+
# Find common patterns
|
|
302
|
+
common_patterns = self._identify_common_patterns(all_keywords, similar_incidents)
|
|
303
|
+
|
|
304
|
+
return {
|
|
305
|
+
"solutions": solutions,
|
|
306
|
+
"common_patterns": common_patterns,
|
|
307
|
+
"average_resolution_time": round(avg_resolution, 2) if avg_resolution else None,
|
|
308
|
+
"total_similar_incidents": len(similar_incidents)
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
def _extract_action_items(self, resolution_text: str) -> List[str]:
|
|
312
|
+
"""Extract potential action items from resolution text."""
|
|
313
|
+
if not resolution_text:
|
|
314
|
+
return []
|
|
315
|
+
|
|
316
|
+
actions = []
|
|
317
|
+
text_lower = resolution_text.lower()
|
|
318
|
+
|
|
319
|
+
# Look for common action patterns
|
|
320
|
+
action_patterns = [
|
|
321
|
+
r'restart(?:ed)?\s+(\w+(?:\s+\w+)*)',
|
|
322
|
+
r'clear(?:ed)?\s+(\w+(?:\s+\w+)*)',
|
|
323
|
+
r'update(?:d)?\s+(\w+(?:\s+\w+)*)',
|
|
324
|
+
r'fix(?:ed)?\s+(\w+(?:\s+\w+)*)',
|
|
325
|
+
r'roll(?:ed)?\s+back\s+(\w+(?:\s+\w+)*)',
|
|
326
|
+
r'scale(?:d)?\s+(\w+(?:\s+\w+)*)',
|
|
327
|
+
r'deploy(?:ed)?\s+(\w+(?:\s+\w+)*)',
|
|
328
|
+
]
|
|
329
|
+
|
|
330
|
+
for pattern in action_patterns:
|
|
331
|
+
matches = re.findall(pattern, text_lower)
|
|
332
|
+
for match in matches:
|
|
333
|
+
# Extract the base action word from the pattern
|
|
334
|
+
if 'roll' in pattern and 'back' in pattern:
|
|
335
|
+
action = f"rollback {match}".strip()
|
|
336
|
+
elif 'restart' in pattern:
|
|
337
|
+
action = f"restart {match}".strip()
|
|
338
|
+
elif 'clear' in pattern:
|
|
339
|
+
action = f"clear {match}".strip()
|
|
340
|
+
elif 'update' in pattern:
|
|
341
|
+
action = f"update {match}".strip()
|
|
342
|
+
elif 'fix' in pattern:
|
|
343
|
+
action = f"fix {match}".strip()
|
|
344
|
+
elif 'scale' in pattern:
|
|
345
|
+
action = f"scale {match}".strip()
|
|
346
|
+
elif 'deploy' in pattern:
|
|
347
|
+
action = f"deploy {match}".strip()
|
|
348
|
+
else:
|
|
349
|
+
# Fallback to original logic
|
|
350
|
+
base_pattern = pattern.split('(')[0].replace('(?:ed)?', '').replace('(?:d)?', '')
|
|
351
|
+
action = f"{base_pattern.replace(r'\s+', ' ')} {match}".strip()
|
|
352
|
+
actions.append(action)
|
|
353
|
+
|
|
354
|
+
# Look for explicit steps
|
|
355
|
+
if 'step' in text_lower or 'action' in text_lower:
|
|
356
|
+
sentences = resolution_text.split('.')
|
|
357
|
+
for sentence in sentences:
|
|
358
|
+
if any(word in sentence.lower() for word in ['step', 'action', 'fix', 'solution']):
|
|
359
|
+
actions.append(sentence.strip())
|
|
360
|
+
|
|
361
|
+
return actions[:5] # Limit to top 5 actions
|
|
362
|
+
|
|
363
|
+
def _identify_common_patterns(self, keywords: List[str], incidents: List[IncidentSimilarity]) -> List[str]:
|
|
364
|
+
"""Identify common patterns across similar incidents."""
|
|
365
|
+
patterns = []
|
|
366
|
+
|
|
367
|
+
# Service patterns
|
|
368
|
+
all_services = []
|
|
369
|
+
for incident in incidents:
|
|
370
|
+
all_services.extend(incident.matched_services)
|
|
371
|
+
|
|
372
|
+
if all_services:
|
|
373
|
+
common_services = [service for service in set(all_services) if all_services.count(service) >= 2]
|
|
374
|
+
if common_services:
|
|
375
|
+
patterns.append(f"Common services affected: {', '.join(common_services)}")
|
|
376
|
+
|
|
377
|
+
# Keyword patterns
|
|
378
|
+
if keywords:
|
|
379
|
+
keyword_counts = {}
|
|
380
|
+
for keyword in keywords:
|
|
381
|
+
keyword_counts[keyword] = keyword_counts.get(keyword, 0) + 1
|
|
382
|
+
|
|
383
|
+
frequent_keywords = [k for k, v in keyword_counts.items() if v >= 2 and len(k) > 3]
|
|
384
|
+
if frequent_keywords:
|
|
385
|
+
patterns.append(f"Common keywords: {', '.join(frequent_keywords[:3])}")
|
|
386
|
+
|
|
387
|
+
# Resolution time patterns
|
|
388
|
+
resolution_times = [inc.resolution_time_hours for inc in incidents if inc.resolution_time_hours is not None]
|
|
389
|
+
if resolution_times:
|
|
390
|
+
avg_time = sum(resolution_times) / len(resolution_times)
|
|
391
|
+
if avg_time < 1:
|
|
392
|
+
patterns.append("These incidents typically resolve quickly (< 1 hour)")
|
|
393
|
+
elif avg_time > 4:
|
|
394
|
+
patterns.append("These incidents typically take longer to resolve (> 4 hours)")
|
|
395
|
+
else:
|
|
396
|
+
patterns.append(f"These incidents typically resolve in {avg_time:.1f} hours")
|
|
397
|
+
|
|
398
|
+
return patterns
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: rootly-mcp-server
|
|
3
|
-
Version: 2.0.
|
|
3
|
+
Version: 2.0.11
|
|
4
4
|
Summary: A Model Context Protocol server for Rootly APIs using OpenAPI spec
|
|
5
5
|
Project-URL: Homepage, https://github.com/Rootly-AI-Labs/Rootly-MCP-server
|
|
6
6
|
Project-URL: Issues, https://github.com/Rootly-AI-Labs/Rootly-MCP-server/issues
|
|
@@ -17,8 +17,10 @@ Requires-Python: >=3.12
|
|
|
17
17
|
Requires-Dist: brotli>=1.0.0
|
|
18
18
|
Requires-Dist: fastmcp>=2.9.0
|
|
19
19
|
Requires-Dist: httpx>=0.24.0
|
|
20
|
+
Requires-Dist: numpy>=1.24.0
|
|
20
21
|
Requires-Dist: pydantic>=2.0.0
|
|
21
22
|
Requires-Dist: requests>=2.28.0
|
|
23
|
+
Requires-Dist: scikit-learn>=1.3.0
|
|
22
24
|
Provides-Extra: dev
|
|
23
25
|
Requires-Dist: black>=23.0.0; extra == 'dev'
|
|
24
26
|
Requires-Dist: isort>=5.0.0; extra == 'dev'
|
|
@@ -43,9 +45,7 @@ An MCP server for the [Rootly API](https://docs.rootly.com/api-reference/overvie
|
|
|
43
45
|
|
|
44
46
|
## Installation
|
|
45
47
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
Configure your MCP-compatible editor (tested with Cursor and Windsurf) with the following:
|
|
48
|
+
Configure your MCP-compatible editor (tested with Cursor) with one of the configurations below. The package will be automatically downloaded and installed when you first open your editor.
|
|
49
49
|
|
|
50
50
|
### With uv
|
|
51
51
|
|
|
@@ -69,7 +69,7 @@ Configure your MCP-compatible editor (tested with Cursor and Windsurf) with the
|
|
|
69
69
|
}
|
|
70
70
|
```
|
|
71
71
|
|
|
72
|
-
### With
|
|
72
|
+
### With uvx
|
|
73
73
|
|
|
74
74
|
```json
|
|
75
75
|
{
|
|
@@ -139,6 +139,11 @@ Alternatively, connect directly to our hosted MCP server:
|
|
|
139
139
|
- **Dynamic Tool Generation**: Automatically creates MCP resources from Rootly's OpenAPI (Swagger) specification
|
|
140
140
|
- **Smart Pagination**: Defaults to 10 items per request for incident endpoints to prevent context window overflow
|
|
141
141
|
- **API Filtering**: Limits exposed API endpoints for security and performance
|
|
142
|
+
- **AI-Powered Incident Analysis**: Smart tools that learn from historical incident data
|
|
143
|
+
- **`find_related_incidents`**: Uses TF-IDF similarity analysis to find historically similar incidents
|
|
144
|
+
- **`suggest_solutions`**: Mines past incident resolutions to recommend actionable solutions
|
|
145
|
+
- **MCP Resources**: Exposes incident and team data as structured resources for easy AI reference
|
|
146
|
+
- **Intelligent Pattern Recognition**: Automatically identifies services, error types, and resolution patterns
|
|
142
147
|
|
|
143
148
|
### Whitelisted Endpoints
|
|
144
149
|
|
|
@@ -180,10 +185,49 @@ By default, the following Rootly API endpoints are exposed to the AI agent (see
|
|
|
180
185
|
We limit exposed API paths for two key reasons:
|
|
181
186
|
|
|
182
187
|
1. **Context Management**: Rootly's comprehensive API can overwhelm AI agents, affecting their ability to perform simple tasks effectively
|
|
183
|
-
2. **Security**:
|
|
188
|
+
2. **Security**: Controls which information and actions are accessible through the MCP server
|
|
184
189
|
|
|
185
190
|
To expose additional paths, modify the `allowed_paths` variable in `src/rootly_mcp_server/server.py`.
|
|
186
191
|
|
|
192
|
+
### AI-Powered Smart Tools
|
|
193
|
+
|
|
194
|
+
The MCP server includes intelligent tools that analyze historical incident data to provide actionable insights:
|
|
195
|
+
|
|
196
|
+
#### `find_related_incidents`
|
|
197
|
+
Finds historically similar incidents using machine learning text analysis:
|
|
198
|
+
```
|
|
199
|
+
find_related_incidents(incident_id="12345", similarity_threshold=0.3, max_results=5)
|
|
200
|
+
```
|
|
201
|
+
- **Input**: Incident ID, similarity threshold (0.0-1.0), max results
|
|
202
|
+
- **Output**: Similar incidents with confidence scores, matched services, and resolution times
|
|
203
|
+
- **Use Case**: Get context from past incidents to understand patterns and solutions
|
|
204
|
+
|
|
205
|
+
#### `suggest_solutions`
|
|
206
|
+
Recommends solutions by analyzing how similar incidents were resolved:
|
|
207
|
+
```
|
|
208
|
+
suggest_solutions(incident_id="12345", max_solutions=3)
|
|
209
|
+
# OR for new incidents:
|
|
210
|
+
suggest_solutions(incident_title="Payment API errors", incident_description="Users getting 500 errors during checkout")
|
|
211
|
+
```
|
|
212
|
+
- **Input**: Either incident ID OR title/description text
|
|
213
|
+
- **Output**: Actionable solution recommendations with confidence scores and time estimates
|
|
214
|
+
- **Use Case**: Get AI-powered suggestions based on successful past resolutions
|
|
215
|
+
|
|
216
|
+
#### How It Works
|
|
217
|
+
- **Text Similarity**: Uses TF-IDF vectorization and cosine similarity (scikit-learn)
|
|
218
|
+
- **Service Detection**: Automatically identifies affected services from incident text
|
|
219
|
+
- **Pattern Recognition**: Finds common error types, resolution patterns, and time estimates
|
|
220
|
+
- **Fallback Mode**: Works without ML libraries using keyword-based similarity
|
|
221
|
+
- **Solution Mining**: Extracts actionable steps from resolution summaries
|
|
222
|
+
|
|
223
|
+
#### Data Requirements
|
|
224
|
+
For optimal results, ensure your Rootly incidents have descriptive:
|
|
225
|
+
- **Titles**: Clear, specific incident descriptions
|
|
226
|
+
- **Summaries**: Detailed resolution steps when closing incidents
|
|
227
|
+
- **Service Tags**: Proper service identification
|
|
228
|
+
|
|
229
|
+
Example good resolution summary: `"Restarted auth-service, cleared Redis cache, and increased connection pool from 10 to 50"`
|
|
230
|
+
|
|
187
231
|
## About Rootly AI Labs
|
|
188
232
|
|
|
189
233
|
This project was developed by [Rootly AI Labs](https://labs.rootly.ai/), where we're building the future of system reliability and operational excellence. As an open-source incubator, we share ideas, experiment, and rapidly prototype solutions that benefit the entire community.
|
|
@@ -219,9 +263,7 @@ uv pip install <package>
|
|
|
219
263
|
|
|
220
264
|
### 3. Verify Installation
|
|
221
265
|
|
|
222
|
-
|
|
266
|
+
The server should now be ready to use with your MCP-compatible editor.
|
|
223
267
|
|
|
224
|
-
|
|
225
|
-
python src/rootly_mcp_server/test_client.py
|
|
226
|
-
```
|
|
268
|
+
**For developers:** Additional testing tools are available in the `tests/` directory.
|
|
227
269
|
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
rootly_mcp_server/__init__.py,sha256=6pLh19IFyqE-Cve9zergkD-X_yApEkInREKmRa73T6s,628
|
|
2
|
+
rootly_mcp_server/__main__.py,sha256=_F4p65_VjnN84RtmEdESVLLH0tO5tL9qBfb2Xdvbj2E,6480
|
|
3
|
+
rootly_mcp_server/client.py,sha256=uit-YijR7OAJtysBoclqnublEDVkFfcb29wSzhpBv44,4686
|
|
4
|
+
rootly_mcp_server/server.py,sha256=5NyGWUOjz1C1kFbAbu2iMNfuKo53_Sq254vF0cEUSHE,41358
|
|
5
|
+
rootly_mcp_server/smart_utils.py,sha256=B0to9o55PMwQPLFT6GZAg_S_Nt4pKFRQq0AXBL-GJp8,17442
|
|
6
|
+
rootly_mcp_server/utils.py,sha256=NyxdcDiFGlV2a8eBO4lKgZg0D7Gxr6xUIB0YyJGgpPA,4165
|
|
7
|
+
rootly_mcp_server/data/__init__.py,sha256=fO8a0bQnRVEoRMHKvhFzj10bhoaw7VsI51czc2MsUm4,143
|
|
8
|
+
rootly_mcp_server-2.0.11.dist-info/METADATA,sha256=fGFaM6E_5DJ1gdvFfuw1yyNBeJZMLdsDM56gRnDhy38,8722
|
|
9
|
+
rootly_mcp_server-2.0.11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
10
|
+
rootly_mcp_server-2.0.11.dist-info/entry_points.txt,sha256=NE33b8VgigVPGBkboyo6pvN1Vz35HZtLybxMO4Q03PI,70
|
|
11
|
+
rootly_mcp_server-2.0.11.dist-info/licenses/LICENSE,sha256=c9w9ZZGl14r54tsP40oaq5adTVX_HMNHozPIH2ymzmw,11341
|
|
12
|
+
rootly_mcp_server-2.0.11.dist-info/RECORD,,
|