rootly-mcp-server 2.0.11__tar.gz → 2.0.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/PKG-INFO +1 -1
  2. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/pyproject.toml +1 -1
  3. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/src/rootly_mcp_server/__init__.py +1 -1
  4. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/src/rootly_mcp_server/smart_utils.py +143 -22
  5. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/tests/integration/local/test_smart_tools.py +70 -3
  6. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/tests/unit/test_smart_utils.py +1 -1
  7. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/.github/workflows/pypi-release.yml +0 -0
  8. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/.github/workflows/test.yml +0 -0
  9. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/.gitignore +0 -0
  10. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/.semaphore/deploy.yml +0 -0
  11. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/.semaphore/semaphore.yml +0 -0
  12. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/.semaphore/update-task-definition.sh +0 -0
  13. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/Dockerfile +0 -0
  14. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/LICENSE +0 -0
  15. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/README.md +0 -0
  16. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/rootly-mcp-server-demo.gif +0 -0
  17. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/rootly_openapi.json +0 -0
  18. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/src/rootly_mcp_server/__main__.py +0 -0
  19. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/src/rootly_mcp_server/client.py +0 -0
  20. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/src/rootly_mcp_server/data/__init__.py +0 -0
  21. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/src/rootly_mcp_server/server.py +0 -0
  22. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/src/rootly_mcp_server/utils.py +0 -0
  23. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/tests/README.md +0 -0
  24. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/tests/conftest.py +0 -0
  25. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/tests/integration/local/test_basic.py +0 -0
  26. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/tests/integration/remote/test_essential.py +0 -0
  27. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/tests/test_client.py +0 -0
  28. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/tests/unit/test_authentication.py +0 -0
  29. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/tests/unit/test_server.py +0 -0
  30. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/tests/unit/test_tools.py +0 -0
  31. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.12}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rootly-mcp-server
3
- Version: 2.0.11
3
+ Version: 2.0.12
4
4
  Summary: A Model Context Protocol server for Rootly APIs using OpenAPI spec
5
5
  Project-URL: Homepage, https://github.com/Rootly-AI-Labs/Rootly-MCP-server
6
6
  Project-URL: Issues, https://github.com/Rootly-AI-Labs/Rootly-MCP-server/issues
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "rootly-mcp-server"
3
- version = "2.0.11"
3
+ version = "2.0.12"
4
4
  description = "A Model Context Protocol server for Rootly APIs using OpenAPI spec"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.12"
@@ -13,7 +13,7 @@ Features:
13
13
  from .server import RootlyMCPServer
14
14
  from .client import RootlyClient
15
15
 
16
- __version__ = "2.0.1"
16
+ __version__ = "2.0.12"
17
17
  __all__ = [
18
18
  'RootlyMCPServer',
19
19
  'RootlyClient',
@@ -13,10 +13,13 @@ from datetime import datetime
13
13
 
14
14
  # Check ML library availability
15
15
  import importlib.util
16
- ML_AVAILABLE = (
17
- importlib.util.find_spec("sklearn.feature_extraction.text") is not None and
18
- importlib.util.find_spec("sklearn.metrics.pairwise") is not None
19
- )
16
+ try:
17
+ ML_AVAILABLE = (
18
+ importlib.util.find_spec("sklearn.feature_extraction.text") is not None and
19
+ importlib.util.find_spec("sklearn.metrics.pairwise") is not None
20
+ )
21
+ except (ImportError, ModuleNotFoundError):
22
+ ML_AVAILABLE = False
20
23
 
21
24
  logger = logging.getLogger(__name__)
22
25
 
@@ -73,13 +76,27 @@ class TextSimilarityAnalyzer:
73
76
  r'\b(\w+)-(?:service|api|app|server|db)\b', # service-api, auth-service
74
77
  r'\b(\w+)(?:service|api|app|server|db)\b', # paymentapi, authservice
75
78
  r'\b(\w+)\.(?:service|api|app|com)\b', # auth.service, api.com
79
+ r'\b(\w+)\s+(?:api|service|app|server|db)\b', # payment api, auth service
80
+ ]
81
+
82
+ # Known service names (exact matches)
83
+ known_services = [
84
+ 'elasticsearch', 'elastic', 'kibana', 'redis', 'postgres', 'mysql',
85
+ 'mongodb', 'kafka', 'rabbitmq', 'nginx', 'apache', 'docker', 'kubernetes'
76
86
  ]
77
87
 
78
88
  text_lower = text.lower()
89
+
90
+ # Extract pattern-based services
79
91
  for pattern in service_patterns:
80
92
  matches = re.findall(pattern, text_lower)
81
93
  services.extend(matches)
82
94
 
95
+ # Extract known services (with word boundaries to avoid false positives)
96
+ for service in known_services:
97
+ if re.search(r'\b' + re.escape(service) + r'\b', text_lower):
98
+ services.append(service)
99
+
83
100
  # Remove duplicates while preserving order
84
101
  return list(dict.fromkeys(services))
85
102
 
@@ -128,18 +145,26 @@ class TextSimilarityAnalyzer:
128
145
  """Combine incident title, description, and other text fields."""
129
146
  text_parts = []
130
147
 
131
- # Get text from incident attributes
148
+ # Get text from incident attributes (preferred)
132
149
  attributes = incident.get('attributes', {})
133
- text_parts.append(attributes.get('title', ''))
134
- text_parts.append(attributes.get('summary', ''))
135
- text_parts.append(attributes.get('description', ''))
136
-
137
- # Also check root level for backward compatibility
138
- text_parts.append(incident.get('title', ''))
139
- text_parts.append(incident.get('summary', ''))
140
- text_parts.append(incident.get('description', ''))
141
-
142
- combined = ' '.join([part for part in text_parts if part])
150
+ title = attributes.get('title', '')
151
+ summary = attributes.get('summary', '')
152
+ description = attributes.get('description', '')
153
+
154
+ # Fallback to root level if attributes are empty
155
+ if not title:
156
+ title = incident.get('title', '')
157
+ if not summary:
158
+ summary = incident.get('summary', '')
159
+ if not description:
160
+ description = incident.get('description', '')
161
+
162
+ # Add non-empty parts, avoiding duplication
163
+ for part in [title, summary, description]:
164
+ if part and part not in text_parts:
165
+ text_parts.append(part)
166
+
167
+ combined = ' '.join(text_parts)
143
168
  return self.preprocess_text(combined)
144
169
 
145
170
  def _calculate_tfidf_similarity(self, incidents: List[Dict], target_incident: Dict,
@@ -175,7 +200,15 @@ class TextSimilarityAnalyzer:
175
200
  service_bonus = len(set(target_services) & set(incident_services)) * 0.1
176
201
  error_bonus = len(set(target_errors) & set(incident_errors)) * 0.15
177
202
 
178
- final_score = min(1.0, similarities[i] + service_bonus + error_bonus)
203
+ # Exact match bonus for identical preprocessed text
204
+ exact_match_bonus = 0.0
205
+ if target_text and incident_texts[i] and target_text.strip() == incident_texts[i].strip():
206
+ exact_match_bonus = 0.3 # Strong bonus for exact matches
207
+
208
+ # Partial matching bonus using fuzzy keyword similarity
209
+ partial_bonus = self._calculate_partial_similarity_bonus(target_text, incident_texts[i])
210
+
211
+ final_score = min(1.0, similarities[i] + service_bonus + error_bonus + exact_match_bonus + partial_bonus)
179
212
 
180
213
  results.append(IncidentSimilarity(
181
214
  incident_id=str(incident.get('id', '')),
@@ -212,7 +245,15 @@ class TextSimilarityAnalyzer:
212
245
  service_bonus = len(set(target_services) & set(incident_services)) * 0.2
213
246
  error_bonus = len(set(target_errors) & set(incident_errors)) * 0.25
214
247
 
215
- final_score = min(1.0, word_similarity + service_bonus + error_bonus)
248
+ # Exact match bonus for identical preprocessed text
249
+ exact_match_bonus = 0.0
250
+ if target_text and incident_text and target_text.strip() == incident_text.strip():
251
+ exact_match_bonus = 0.4 # Strong bonus for exact matches in keyword mode
252
+
253
+ # Partial matching bonus using fuzzy keyword similarity
254
+ partial_bonus = self._calculate_partial_similarity_bonus(target_text, incident_text)
255
+
256
+ final_score = min(1.0, word_similarity + service_bonus + error_bonus + exact_match_bonus + partial_bonus)
216
257
 
217
258
  if final_score > 0.15: # Only include reasonable matches
218
259
  results.append(IncidentSimilarity(
@@ -228,14 +269,94 @@ class TextSimilarityAnalyzer:
228
269
  return results
229
270
 
230
271
  def _extract_common_keywords(self, text1: str, text2: str) -> List[str]:
231
- """Extract common meaningful keywords between two texts."""
272
+ """Extract common meaningful keywords between two texts with fuzzy matching."""
273
+ words1 = set(text1.split())
274
+ words2 = set(text2.split())
275
+
276
+ # Exact matches
277
+ exact_common = words1 & words2
278
+
279
+ # Fuzzy matches for partial similarity
280
+ fuzzy_common = []
281
+ for word1 in words1:
282
+ if len(word1) > 3: # Only check longer words
283
+ for word2 in words2:
284
+ if len(word2) > 3 and word1 != word2:
285
+ # Check if words share significant substring (fuzzy matching)
286
+ if self._words_similar(word1, word2):
287
+ fuzzy_common.append(f"{word1}~{word2}")
288
+
289
+ # Combine exact and fuzzy matches
290
+ all_matches = list(exact_common) + fuzzy_common
291
+ meaningful = [word for word in all_matches if len(word.split('~')[0]) > 2]
292
+ return meaningful[:8] # Increased to show more matches
293
+
294
+ def _words_similar(self, word1: str, word2: str) -> bool:
295
+ """Check if two words are similar enough to be considered related."""
296
+ # Handle common variations
297
+ variations = {
298
+ 'elastic': ['elasticsearch', 'elk'],
299
+ 'payment': ['payments', 'pay', 'billing'],
300
+ 'database': ['db', 'postgres', 'mysql', 'mongo'],
301
+ 'timeout': ['timeouts', 'timed-out', 'timing-out'],
302
+ 'service': ['services', 'svc', 'api', 'app'],
303
+ 'error': ['errors', 'err', 'failure', 'failed', 'failing'],
304
+ 'down': ['outage', 'offline', 'unavailable']
305
+ }
306
+
307
+ # Check if words are variations of each other
308
+ for base, variants in variations.items():
309
+ if (word1 == base and word2 in variants) or (word2 == base and word1 in variants):
310
+ return True
311
+ if word1 in variants and word2 in variants:
312
+ return True
313
+
314
+ # Check substring similarity (at least 70% overlap for longer words)
315
+ if len(word1) >= 5 and len(word2) >= 5:
316
+ shorter = min(word1, word2, key=len)
317
+ longer = max(word1, word2, key=len)
318
+ if shorter in longer and len(shorter) / len(longer) >= 0.7:
319
+ return True
320
+
321
+ # Check if one word starts with the other (for prefixed services)
322
+ if len(word1) >= 4 and len(word2) >= 4:
323
+ if word1.startswith(word2) or word2.startswith(word1):
324
+ return True
325
+
326
+ return False
327
+
328
+ def _calculate_partial_similarity_bonus(self, text1: str, text2: str) -> float:
329
+ """Calculate bonus for partial/fuzzy keyword matches."""
330
+ if not text1 or not text2:
331
+ return 0.0
332
+
232
333
  words1 = set(text1.split())
233
334
  words2 = set(text2.split())
234
- common = words1 & words2
235
335
 
236
- # Filter out very short words and return top matches
237
- meaningful = [word for word in common if len(word) > 2]
238
- return meaningful[:5]
336
+ fuzzy_matches = 0
337
+
338
+ # Count meaningful words that could be compared
339
+ meaningful_words1 = [w for w in words1 if len(w) > 3]
340
+ meaningful_words2 = [w for w in words2 if len(w) > 3]
341
+
342
+ if not meaningful_words1 or not meaningful_words2:
343
+ return 0.0
344
+
345
+ # Count fuzzy matches
346
+ for word1 in meaningful_words1:
347
+ for word2 in meaningful_words2:
348
+ if word1 != word2 and self._words_similar(word1, word2):
349
+ fuzzy_matches += 1
350
+ break # Only count each target word once
351
+
352
+ # Calculate bonus based on fuzzy match ratio
353
+ if fuzzy_matches > 0:
354
+ # Use the smaller meaningful word set as denominator for conservative bonus
355
+ total_possible_matches = min(len(meaningful_words1), len(meaningful_words2))
356
+ bonus_ratio = fuzzy_matches / total_possible_matches
357
+ return min(0.15, bonus_ratio * 0.3) # Max 0.15 bonus for partial matches
358
+
359
+ return 0.0
239
360
 
240
361
  def _calculate_resolution_time(self, incident: Dict) -> Optional[float]:
241
362
  """Calculate resolution time in hours if timestamps are available."""
@@ -107,10 +107,14 @@ class TestSmartToolsIntegration:
107
107
  payment_incidents = [inc for inc in similar_incidents if "payment" in inc.title.lower()]
108
108
  assert len(payment_incidents) >= 2 # Should find both payment incidents
109
109
 
110
- # Check that similarity scores are reasonable
110
+ # Check that similarity scores are reasonable (updated for partial matching bonuses)
111
111
  top_incident = similar_incidents[0]
112
112
  assert top_incident.similarity_score > 0.1
113
113
  assert top_incident.incident_id in ["1001", "1003"] # Should be a payment incident
114
+
115
+ # Check that matched services are detected
116
+ payment_matches = [inc for inc in similar_incidents if "payment" in inc.matched_services]
117
+ assert len(payment_matches) > 0 # Should detect payment service matches
114
118
 
115
119
  async def test_suggest_solutions_with_incident_id(self, server_with_smart_tools, mock_target_incident, mock_historical_incidents):
116
120
  """Test solution suggestions using incident ID."""
@@ -250,7 +254,9 @@ class TestSmartToolsIntegration:
250
254
  ("authapi connection failed", ["auth"]),
251
255
  ("user.service timeout", ["user"]),
252
256
  ("Error in notification-api and billing-service", ["notification", "billing"]),
253
- ("postgres-db connection issue", ["postgres"])
257
+ ("postgres-db connection issue", ["postgres"]),
258
+ ("elasticsearch cluster failing", ["elasticsearch"]), # New test for known services
259
+ ("elastic search timeout", ["elastic"]), # Test partial matching
254
260
  ]
255
261
 
256
262
  for text, expected_services in test_cases:
@@ -258,4 +264,65 @@ class TestSmartToolsIntegration:
258
264
 
259
265
  for expected_service in expected_services:
260
266
  assert expected_service in services, \
261
- f"Expected service '{expected_service}' not found in {services} for text '{text}'"
267
+ f"Expected service '{expected_service}' not found in {services} for text '{text}'"
268
+
269
+ def test_partial_matching_improvements(self):
270
+ """Test partial/fuzzy matching for related but not identical incidents."""
271
+ from rootly_mcp_server.smart_utils import TextSimilarityAnalyzer
272
+
273
+ analyzer = TextSimilarityAnalyzer()
274
+
275
+ # Test cases for partial matching
276
+ target_incident = {
277
+ "id": "target",
278
+ "attributes": {
279
+ "title": "Payment API timeout errors",
280
+ "summary": "Users experiencing payment failures due to API timeouts"
281
+ }
282
+ }
283
+
284
+ historical_incidents = [
285
+ {
286
+ "id": "similar1",
287
+ "attributes": {
288
+ "title": "Payment service timeouts",
289
+ "summary": "Payments API timing out for users"
290
+ }
291
+ },
292
+ {
293
+ "id": "similar2",
294
+ "attributes": {
295
+ "title": "Billing API errors",
296
+ "summary": "Users unable to complete payments due to errors"
297
+ }
298
+ },
299
+ {
300
+ "id": "unrelated",
301
+ "attributes": {
302
+ "title": "Auth service down",
303
+ "summary": "Login failures for all users"
304
+ }
305
+ }
306
+ ]
307
+
308
+ similar_incidents = analyzer.calculate_similarity(historical_incidents, target_incident)
309
+
310
+ # Should find payment-related incidents with partial matching
311
+ payment_related = [inc for inc in similar_incidents
312
+ if inc.incident_id in ["similar1", "similar2"]]
313
+ auth_related = [inc for inc in similar_incidents
314
+ if inc.incident_id == "unrelated"]
315
+
316
+ # Payment incidents should have higher scores than auth incident
317
+ if payment_related and auth_related:
318
+ max_payment_score = max(inc.similarity_score for inc in payment_related)
319
+ max_auth_score = max(inc.similarity_score for inc in auth_related)
320
+ assert max_payment_score > max_auth_score, \
321
+ f"Payment similarity ({max_payment_score}) should be higher than auth ({max_auth_score})"
322
+
323
+ # Check that fuzzy keywords are detected
324
+ if payment_related:
325
+ top_match = max(payment_related, key=lambda x: x.similarity_score)
326
+ # Should detect partial matches like "payment~payments" or "timeout~timeouts"
327
+ # Note: This might be 0 if exact matches exist, which is also valid
328
+ assert top_match.similarity_score > 0.1, "Should have reasonable similarity score for payment incidents"
@@ -247,7 +247,7 @@ class TestTextSimilarityAnalyzer:
247
247
  assert "timeout" in common
248
248
  assert "error" in common
249
249
  assert "service" in common
250
- assert len(common) <= 5 # Should limit to top 5
250
+ assert len(common) <= 8 # Should limit to top 8 (increased for fuzzy matching)
251
251
 
252
252
 
253
253
  class TestSolutionExtractor: