rootly-mcp-server 2.0.11__py3-none-any.whl → 2.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,7 +13,7 @@ Features:
13
13
  from .server import RootlyMCPServer
14
14
  from .client import RootlyClient
15
15
 
16
- __version__ = "2.0.1"
16
+ __version__ = "2.0.12"
17
17
  __all__ = [
18
18
  'RootlyMCPServer',
19
19
  'RootlyClient',
@@ -391,7 +391,7 @@ def create_rootly_mcp_server(
391
391
  # Single page mode
392
392
  if page_number > 0:
393
393
  params = {
394
- "page[size]": min(page_size, 5), # Keep responses very small to avoid errors
394
+ "page[size]": page_size, # Use requested page size (already limited to max 20)
395
395
  "page[number]": page_number,
396
396
  "include": "",
397
397
  }
@@ -409,7 +409,7 @@ def create_rootly_mcp_server(
409
409
  # Multi-page mode (page_number = 0)
410
410
  all_incidents = []
411
411
  current_page = 1
412
- effective_page_size = min(page_size, 5) # Keep responses very small to avoid errors
412
+ effective_page_size = page_size # Use requested page size (already limited to max 20)
413
413
  max_pages = 10 # Safety limit to prevent infinite loops
414
414
 
415
415
  try:
@@ -922,6 +922,73 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
922
922
  "description": param.get("description", "Parameter value")
923
923
  }
924
924
 
925
+ # Add/modify pagination limits to alerts and incident-related endpoints to prevent infinite loops
926
+ if method.lower() == "get" and ("alerts" in path.lower() or "incident" in path.lower()):
927
+ if "parameters" not in operation:
928
+ operation["parameters"] = []
929
+
930
+ # Find existing pagination parameters and update them with limits
931
+ page_size_param = None
932
+ page_number_param = None
933
+
934
+ for param in operation["parameters"]:
935
+ if param.get("name") == "page[size]":
936
+ page_size_param = param
937
+ elif param.get("name") == "page[number]":
938
+ page_number_param = param
939
+
940
+ # Update or add page[size] parameter with limits
941
+ if page_size_param:
942
+ # Update existing parameter with limits
943
+ if "schema" not in page_size_param:
944
+ page_size_param["schema"] = {}
945
+ page_size_param["schema"].update({
946
+ "type": "integer",
947
+ "default": 10,
948
+ "minimum": 1,
949
+ "maximum": 20,
950
+ "description": "Number of results per page (max: 20)"
951
+ })
952
+ else:
953
+ # Add new parameter
954
+ operation["parameters"].append({
955
+ "name": "page[size]",
956
+ "in": "query",
957
+ "required": False,
958
+ "schema": {
959
+ "type": "integer",
960
+ "default": 10,
961
+ "minimum": 1,
962
+ "maximum": 20,
963
+ "description": "Number of results per page (max: 20)"
964
+ }
965
+ })
966
+
967
+ # Update or add page[number] parameter with defaults
968
+ if page_number_param:
969
+ # Update existing parameter
970
+ if "schema" not in page_number_param:
971
+ page_number_param["schema"] = {}
972
+ page_number_param["schema"].update({
973
+ "type": "integer",
974
+ "default": 1,
975
+ "minimum": 1,
976
+ "description": "Page number to retrieve"
977
+ })
978
+ else:
979
+ # Add new parameter
980
+ operation["parameters"].append({
981
+ "name": "page[number]",
982
+ "in": "query",
983
+ "required": False,
984
+ "schema": {
985
+ "type": "integer",
986
+ "default": 1,
987
+ "minimum": 1,
988
+ "description": "Page number to retrieve"
989
+ }
990
+ })
991
+
925
992
  # Also clean up any remaining broken references in components
926
993
  if "components" in filtered_spec and "schemas" in filtered_spec["components"]:
927
994
  schemas = filtered_spec["components"]["schemas"]
@@ -13,10 +13,13 @@ from datetime import datetime
13
13
 
14
14
  # Check ML library availability
15
15
  import importlib.util
16
- ML_AVAILABLE = (
17
- importlib.util.find_spec("sklearn.feature_extraction.text") is not None and
18
- importlib.util.find_spec("sklearn.metrics.pairwise") is not None
19
- )
16
+ try:
17
+ ML_AVAILABLE = (
18
+ importlib.util.find_spec("sklearn.feature_extraction.text") is not None and
19
+ importlib.util.find_spec("sklearn.metrics.pairwise") is not None
20
+ )
21
+ except (ImportError, ModuleNotFoundError):
22
+ ML_AVAILABLE = False
20
23
 
21
24
  logger = logging.getLogger(__name__)
22
25
 
@@ -73,13 +76,27 @@ class TextSimilarityAnalyzer:
73
76
  r'\b(\w+)-(?:service|api|app|server|db)\b', # service-api, auth-service
74
77
  r'\b(\w+)(?:service|api|app|server|db)\b', # paymentapi, authservice
75
78
  r'\b(\w+)\.(?:service|api|app|com)\b', # auth.service, api.com
79
+ r'\b(\w+)\s+(?:api|service|app|server|db)\b', # payment api, auth service
80
+ ]
81
+
82
+ # Known service names (exact matches)
83
+ known_services = [
84
+ 'elasticsearch', 'elastic', 'kibana', 'redis', 'postgres', 'mysql',
85
+ 'mongodb', 'kafka', 'rabbitmq', 'nginx', 'apache', 'docker', 'kubernetes'
76
86
  ]
77
87
 
78
88
  text_lower = text.lower()
89
+
90
+ # Extract pattern-based services
79
91
  for pattern in service_patterns:
80
92
  matches = re.findall(pattern, text_lower)
81
93
  services.extend(matches)
82
94
 
95
+ # Extract known services (with word boundaries to avoid false positives)
96
+ for service in known_services:
97
+ if re.search(r'\b' + re.escape(service) + r'\b', text_lower):
98
+ services.append(service)
99
+
83
100
  # Remove duplicates while preserving order
84
101
  return list(dict.fromkeys(services))
85
102
 
@@ -128,18 +145,26 @@ class TextSimilarityAnalyzer:
128
145
  """Combine incident title, description, and other text fields."""
129
146
  text_parts = []
130
147
 
131
- # Get text from incident attributes
148
+ # Get text from incident attributes (preferred)
132
149
  attributes = incident.get('attributes', {})
133
- text_parts.append(attributes.get('title', ''))
134
- text_parts.append(attributes.get('summary', ''))
135
- text_parts.append(attributes.get('description', ''))
136
-
137
- # Also check root level for backward compatibility
138
- text_parts.append(incident.get('title', ''))
139
- text_parts.append(incident.get('summary', ''))
140
- text_parts.append(incident.get('description', ''))
141
-
142
- combined = ' '.join([part for part in text_parts if part])
150
+ title = attributes.get('title', '')
151
+ summary = attributes.get('summary', '')
152
+ description = attributes.get('description', '')
153
+
154
+ # Fallback to root level if attributes are empty
155
+ if not title:
156
+ title = incident.get('title', '')
157
+ if not summary:
158
+ summary = incident.get('summary', '')
159
+ if not description:
160
+ description = incident.get('description', '')
161
+
162
+ # Add non-empty parts, avoiding duplication
163
+ for part in [title, summary, description]:
164
+ if part and part not in text_parts:
165
+ text_parts.append(part)
166
+
167
+ combined = ' '.join(text_parts)
143
168
  return self.preprocess_text(combined)
144
169
 
145
170
  def _calculate_tfidf_similarity(self, incidents: List[Dict], target_incident: Dict,
@@ -175,7 +200,15 @@ class TextSimilarityAnalyzer:
175
200
  service_bonus = len(set(target_services) & set(incident_services)) * 0.1
176
201
  error_bonus = len(set(target_errors) & set(incident_errors)) * 0.15
177
202
 
178
- final_score = min(1.0, similarities[i] + service_bonus + error_bonus)
203
+ # Exact match bonus for identical preprocessed text
204
+ exact_match_bonus = 0.0
205
+ if target_text and incident_texts[i] and target_text.strip() == incident_texts[i].strip():
206
+ exact_match_bonus = 0.3 # Strong bonus for exact matches
207
+
208
+ # Partial matching bonus using fuzzy keyword similarity
209
+ partial_bonus = self._calculate_partial_similarity_bonus(target_text, incident_texts[i])
210
+
211
+ final_score = min(1.0, similarities[i] + service_bonus + error_bonus + exact_match_bonus + partial_bonus)
179
212
 
180
213
  results.append(IncidentSimilarity(
181
214
  incident_id=str(incident.get('id', '')),
@@ -212,7 +245,15 @@ class TextSimilarityAnalyzer:
212
245
  service_bonus = len(set(target_services) & set(incident_services)) * 0.2
213
246
  error_bonus = len(set(target_errors) & set(incident_errors)) * 0.25
214
247
 
215
- final_score = min(1.0, word_similarity + service_bonus + error_bonus)
248
+ # Exact match bonus for identical preprocessed text
249
+ exact_match_bonus = 0.0
250
+ if target_text and incident_text and target_text.strip() == incident_text.strip():
251
+ exact_match_bonus = 0.4 # Strong bonus for exact matches in keyword mode
252
+
253
+ # Partial matching bonus using fuzzy keyword similarity
254
+ partial_bonus = self._calculate_partial_similarity_bonus(target_text, incident_text)
255
+
256
+ final_score = min(1.0, word_similarity + service_bonus + error_bonus + exact_match_bonus + partial_bonus)
216
257
 
217
258
  if final_score > 0.15: # Only include reasonable matches
218
259
  results.append(IncidentSimilarity(
@@ -228,14 +269,94 @@ class TextSimilarityAnalyzer:
228
269
  return results
229
270
 
230
271
  def _extract_common_keywords(self, text1: str, text2: str) -> List[str]:
231
- """Extract common meaningful keywords between two texts."""
272
+ """Extract common meaningful keywords between two texts with fuzzy matching."""
273
+ words1 = set(text1.split())
274
+ words2 = set(text2.split())
275
+
276
+ # Exact matches
277
+ exact_common = words1 & words2
278
+
279
+ # Fuzzy matches for partial similarity
280
+ fuzzy_common = []
281
+ for word1 in words1:
282
+ if len(word1) > 3: # Only check longer words
283
+ for word2 in words2:
284
+ if len(word2) > 3 and word1 != word2:
285
+ # Check if words share significant substring (fuzzy matching)
286
+ if self._words_similar(word1, word2):
287
+ fuzzy_common.append(f"{word1}~{word2}")
288
+
289
+ # Combine exact and fuzzy matches
290
+ all_matches = list(exact_common) + fuzzy_common
291
+ meaningful = [word for word in all_matches if len(word.split('~')[0]) > 2]
292
+ return meaningful[:8] # Increased to show more matches
293
+
294
+ def _words_similar(self, word1: str, word2: str) -> bool:
295
+ """Check if two words are similar enough to be considered related."""
296
+ # Handle common variations
297
+ variations = {
298
+ 'elastic': ['elasticsearch', 'elk'],
299
+ 'payment': ['payments', 'pay', 'billing'],
300
+ 'database': ['db', 'postgres', 'mysql', 'mongo'],
301
+ 'timeout': ['timeouts', 'timed-out', 'timing-out'],
302
+ 'service': ['services', 'svc', 'api', 'app'],
303
+ 'error': ['errors', 'err', 'failure', 'failed', 'failing'],
304
+ 'down': ['outage', 'offline', 'unavailable']
305
+ }
306
+
307
+ # Check if words are variations of each other
308
+ for base, variants in variations.items():
309
+ if (word1 == base and word2 in variants) or (word2 == base and word1 in variants):
310
+ return True
311
+ if word1 in variants and word2 in variants:
312
+ return True
313
+
314
+ # Check substring similarity (at least 70% overlap for longer words)
315
+ if len(word1) >= 5 and len(word2) >= 5:
316
+ shorter = min(word1, word2, key=len)
317
+ longer = max(word1, word2, key=len)
318
+ if shorter in longer and len(shorter) / len(longer) >= 0.7:
319
+ return True
320
+
321
+ # Check if one word starts with the other (for prefixed services)
322
+ if len(word1) >= 4 and len(word2) >= 4:
323
+ if word1.startswith(word2) or word2.startswith(word1):
324
+ return True
325
+
326
+ return False
327
+
328
+ def _calculate_partial_similarity_bonus(self, text1: str, text2: str) -> float:
329
+ """Calculate bonus for partial/fuzzy keyword matches."""
330
+ if not text1 or not text2:
331
+ return 0.0
332
+
232
333
  words1 = set(text1.split())
233
334
  words2 = set(text2.split())
234
- common = words1 & words2
235
335
 
236
- # Filter out very short words and return top matches
237
- meaningful = [word for word in common if len(word) > 2]
238
- return meaningful[:5]
336
+ fuzzy_matches = 0
337
+
338
+ # Count meaningful words that could be compared
339
+ meaningful_words1 = [w for w in words1 if len(w) > 3]
340
+ meaningful_words2 = [w for w in words2 if len(w) > 3]
341
+
342
+ if not meaningful_words1 or not meaningful_words2:
343
+ return 0.0
344
+
345
+ # Count fuzzy matches
346
+ for word1 in meaningful_words1:
347
+ for word2 in meaningful_words2:
348
+ if word1 != word2 and self._words_similar(word1, word2):
349
+ fuzzy_matches += 1
350
+ break # Only count each target word once
351
+
352
+ # Calculate bonus based on fuzzy match ratio
353
+ if fuzzy_matches > 0:
354
+ # Use the smaller meaningful word set as denominator for conservative bonus
355
+ total_possible_matches = min(len(meaningful_words1), len(meaningful_words2))
356
+ bonus_ratio = fuzzy_matches / total_possible_matches
357
+ return min(0.15, bonus_ratio * 0.3) # Max 0.15 bonus for partial matches
358
+
359
+ return 0.0
239
360
 
240
361
  def _calculate_resolution_time(self, incident: Dict) -> Optional[float]:
241
362
  """Calculate resolution time in hours if timestamps are available."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rootly-mcp-server
3
- Version: 2.0.11
3
+ Version: 2.0.13
4
4
  Summary: A Model Context Protocol server for Rootly APIs using OpenAPI spec
5
5
  Project-URL: Homepage, https://github.com/Rootly-AI-Labs/Rootly-MCP-server
6
6
  Project-URL: Issues, https://github.com/Rootly-AI-Labs/Rootly-MCP-server/issues
@@ -0,0 +1,12 @@
1
+ rootly_mcp_server/__init__.py,sha256=rvIuqIyuzgC7b9qSnylrdDP2zPO-7Ou9AoblR6re1co,629
2
+ rootly_mcp_server/__main__.py,sha256=_F4p65_VjnN84RtmEdESVLLH0tO5tL9qBfb2Xdvbj2E,6480
3
+ rootly_mcp_server/client.py,sha256=uit-YijR7OAJtysBoclqnublEDVkFfcb29wSzhpBv44,4686
4
+ rootly_mcp_server/server.py,sha256=BX4bRTlzUBI0xNha-owy9FRSmBCpVRM2qgSv5m5SHzE,44413
5
+ rootly_mcp_server/smart_utils.py,sha256=lvGN9ITyJjBkm7ejpYagd8VWodLKnC6FmwECfCOcGwM,22973
6
+ rootly_mcp_server/utils.py,sha256=NyxdcDiFGlV2a8eBO4lKgZg0D7Gxr6xUIB0YyJGgpPA,4165
7
+ rootly_mcp_server/data/__init__.py,sha256=fO8a0bQnRVEoRMHKvhFzj10bhoaw7VsI51czc2MsUm4,143
8
+ rootly_mcp_server-2.0.13.dist-info/METADATA,sha256=86--37XF6dDVqGyzqdmNGbaq0gEzRtGC_gJrtoduQXY,8722
9
+ rootly_mcp_server-2.0.13.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
10
+ rootly_mcp_server-2.0.13.dist-info/entry_points.txt,sha256=NE33b8VgigVPGBkboyo6pvN1Vz35HZtLybxMO4Q03PI,70
11
+ rootly_mcp_server-2.0.13.dist-info/licenses/LICENSE,sha256=c9w9ZZGl14r54tsP40oaq5adTVX_HMNHozPIH2ymzmw,11341
12
+ rootly_mcp_server-2.0.13.dist-info/RECORD,,
@@ -1,12 +0,0 @@
1
- rootly_mcp_server/__init__.py,sha256=6pLh19IFyqE-Cve9zergkD-X_yApEkInREKmRa73T6s,628
2
- rootly_mcp_server/__main__.py,sha256=_F4p65_VjnN84RtmEdESVLLH0tO5tL9qBfb2Xdvbj2E,6480
3
- rootly_mcp_server/client.py,sha256=uit-YijR7OAJtysBoclqnublEDVkFfcb29wSzhpBv44,4686
4
- rootly_mcp_server/server.py,sha256=5NyGWUOjz1C1kFbAbu2iMNfuKo53_Sq254vF0cEUSHE,41358
5
- rootly_mcp_server/smart_utils.py,sha256=B0to9o55PMwQPLFT6GZAg_S_Nt4pKFRQq0AXBL-GJp8,17442
6
- rootly_mcp_server/utils.py,sha256=NyxdcDiFGlV2a8eBO4lKgZg0D7Gxr6xUIB0YyJGgpPA,4165
7
- rootly_mcp_server/data/__init__.py,sha256=fO8a0bQnRVEoRMHKvhFzj10bhoaw7VsI51czc2MsUm4,143
8
- rootly_mcp_server-2.0.11.dist-info/METADATA,sha256=fGFaM6E_5DJ1gdvFfuw1yyNBeJZMLdsDM56gRnDhy38,8722
9
- rootly_mcp_server-2.0.11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
10
- rootly_mcp_server-2.0.11.dist-info/entry_points.txt,sha256=NE33b8VgigVPGBkboyo6pvN1Vz35HZtLybxMO4Q03PI,70
11
- rootly_mcp_server-2.0.11.dist-info/licenses/LICENSE,sha256=c9w9ZZGl14r54tsP40oaq5adTVX_HMNHozPIH2ymzmw,11341
12
- rootly_mcp_server-2.0.11.dist-info/RECORD,,