rootly-mcp-server 2.0.11__tar.gz → 2.0.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/PKG-INFO +1 -1
  2. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/pyproject.toml +1 -1
  3. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/src/rootly_mcp_server/__init__.py +1 -1
  4. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/src/rootly_mcp_server/server.py +69 -2
  5. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/src/rootly_mcp_server/smart_utils.py +143 -22
  6. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/tests/integration/local/test_smart_tools.py +70 -3
  7. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/tests/unit/test_server.py +90 -0
  8. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/tests/unit/test_smart_utils.py +1 -1
  9. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/uv.lock +1 -1
  10. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/.github/workflows/pypi-release.yml +0 -0
  11. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/.github/workflows/test.yml +0 -0
  12. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/.gitignore +0 -0
  13. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/.semaphore/deploy.yml +0 -0
  14. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/.semaphore/semaphore.yml +0 -0
  15. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/.semaphore/update-task-definition.sh +0 -0
  16. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/Dockerfile +0 -0
  17. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/LICENSE +0 -0
  18. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/README.md +0 -0
  19. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/rootly-mcp-server-demo.gif +0 -0
  20. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/rootly_openapi.json +0 -0
  21. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/src/rootly_mcp_server/__main__.py +0 -0
  22. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/src/rootly_mcp_server/client.py +0 -0
  23. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/src/rootly_mcp_server/data/__init__.py +0 -0
  24. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/src/rootly_mcp_server/utils.py +0 -0
  25. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/tests/README.md +0 -0
  26. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/tests/conftest.py +0 -0
  27. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/tests/integration/local/test_basic.py +0 -0
  28. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/tests/integration/remote/test_essential.py +0 -0
  29. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/tests/test_client.py +0 -0
  30. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/tests/unit/test_authentication.py +0 -0
  31. {rootly_mcp_server-2.0.11 → rootly_mcp_server-2.0.13}/tests/unit/test_tools.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rootly-mcp-server
3
- Version: 2.0.11
3
+ Version: 2.0.13
4
4
  Summary: A Model Context Protocol server for Rootly APIs using OpenAPI spec
5
5
  Project-URL: Homepage, https://github.com/Rootly-AI-Labs/Rootly-MCP-server
6
6
  Project-URL: Issues, https://github.com/Rootly-AI-Labs/Rootly-MCP-server/issues
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "rootly-mcp-server"
3
- version = "2.0.11"
3
+ version = "2.0.13"
4
4
  description = "A Model Context Protocol server for Rootly APIs using OpenAPI spec"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.12"
@@ -13,7 +13,7 @@ Features:
13
13
  from .server import RootlyMCPServer
14
14
  from .client import RootlyClient
15
15
 
16
- __version__ = "2.0.1"
16
+ __version__ = "2.0.12"
17
17
  __all__ = [
18
18
  'RootlyMCPServer',
19
19
  'RootlyClient',
@@ -391,7 +391,7 @@ def create_rootly_mcp_server(
391
391
  # Single page mode
392
392
  if page_number > 0:
393
393
  params = {
394
- "page[size]": min(page_size, 5), # Keep responses very small to avoid errors
394
+ "page[size]": page_size, # Use requested page size (already limited to max 20)
395
395
  "page[number]": page_number,
396
396
  "include": "",
397
397
  }
@@ -409,7 +409,7 @@ def create_rootly_mcp_server(
409
409
  # Multi-page mode (page_number = 0)
410
410
  all_incidents = []
411
411
  current_page = 1
412
- effective_page_size = min(page_size, 5) # Keep responses very small to avoid errors
412
+ effective_page_size = page_size # Use requested page size (already limited to max 20)
413
413
  max_pages = 10 # Safety limit to prevent infinite loops
414
414
 
415
415
  try:
@@ -922,6 +922,73 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
922
922
  "description": param.get("description", "Parameter value")
923
923
  }
924
924
 
925
+ # Add/modify pagination limits to alerts and incident-related endpoints to prevent infinite loops
926
+ if method.lower() == "get" and ("alerts" in path.lower() or "incident" in path.lower()):
927
+ if "parameters" not in operation:
928
+ operation["parameters"] = []
929
+
930
+ # Find existing pagination parameters and update them with limits
931
+ page_size_param = None
932
+ page_number_param = None
933
+
934
+ for param in operation["parameters"]:
935
+ if param.get("name") == "page[size]":
936
+ page_size_param = param
937
+ elif param.get("name") == "page[number]":
938
+ page_number_param = param
939
+
940
+ # Update or add page[size] parameter with limits
941
+ if page_size_param:
942
+ # Update existing parameter with limits
943
+ if "schema" not in page_size_param:
944
+ page_size_param["schema"] = {}
945
+ page_size_param["schema"].update({
946
+ "type": "integer",
947
+ "default": 10,
948
+ "minimum": 1,
949
+ "maximum": 20,
950
+ "description": "Number of results per page (max: 20)"
951
+ })
952
+ else:
953
+ # Add new parameter
954
+ operation["parameters"].append({
955
+ "name": "page[size]",
956
+ "in": "query",
957
+ "required": False,
958
+ "schema": {
959
+ "type": "integer",
960
+ "default": 10,
961
+ "minimum": 1,
962
+ "maximum": 20,
963
+ "description": "Number of results per page (max: 20)"
964
+ }
965
+ })
966
+
967
+ # Update or add page[number] parameter with defaults
968
+ if page_number_param:
969
+ # Update existing parameter
970
+ if "schema" not in page_number_param:
971
+ page_number_param["schema"] = {}
972
+ page_number_param["schema"].update({
973
+ "type": "integer",
974
+ "default": 1,
975
+ "minimum": 1,
976
+ "description": "Page number to retrieve"
977
+ })
978
+ else:
979
+ # Add new parameter
980
+ operation["parameters"].append({
981
+ "name": "page[number]",
982
+ "in": "query",
983
+ "required": False,
984
+ "schema": {
985
+ "type": "integer",
986
+ "default": 1,
987
+ "minimum": 1,
988
+ "description": "Page number to retrieve"
989
+ }
990
+ })
991
+
925
992
  # Also clean up any remaining broken references in components
926
993
  if "components" in filtered_spec and "schemas" in filtered_spec["components"]:
927
994
  schemas = filtered_spec["components"]["schemas"]
@@ -13,10 +13,13 @@ from datetime import datetime
13
13
 
14
14
  # Check ML library availability
15
15
  import importlib.util
16
- ML_AVAILABLE = (
17
- importlib.util.find_spec("sklearn.feature_extraction.text") is not None and
18
- importlib.util.find_spec("sklearn.metrics.pairwise") is not None
19
- )
16
+ try:
17
+ ML_AVAILABLE = (
18
+ importlib.util.find_spec("sklearn.feature_extraction.text") is not None and
19
+ importlib.util.find_spec("sklearn.metrics.pairwise") is not None
20
+ )
21
+ except (ImportError, ModuleNotFoundError):
22
+ ML_AVAILABLE = False
20
23
 
21
24
  logger = logging.getLogger(__name__)
22
25
 
@@ -73,13 +76,27 @@ class TextSimilarityAnalyzer:
73
76
  r'\b(\w+)-(?:service|api|app|server|db)\b', # service-api, auth-service
74
77
  r'\b(\w+)(?:service|api|app|server|db)\b', # paymentapi, authservice
75
78
  r'\b(\w+)\.(?:service|api|app|com)\b', # auth.service, api.com
79
+ r'\b(\w+)\s+(?:api|service|app|server|db)\b', # payment api, auth service
80
+ ]
81
+
82
+ # Known service names (exact matches)
83
+ known_services = [
84
+ 'elasticsearch', 'elastic', 'kibana', 'redis', 'postgres', 'mysql',
85
+ 'mongodb', 'kafka', 'rabbitmq', 'nginx', 'apache', 'docker', 'kubernetes'
76
86
  ]
77
87
 
78
88
  text_lower = text.lower()
89
+
90
+ # Extract pattern-based services
79
91
  for pattern in service_patterns:
80
92
  matches = re.findall(pattern, text_lower)
81
93
  services.extend(matches)
82
94
 
95
+ # Extract known services (with word boundaries to avoid false positives)
96
+ for service in known_services:
97
+ if re.search(r'\b' + re.escape(service) + r'\b', text_lower):
98
+ services.append(service)
99
+
83
100
  # Remove duplicates while preserving order
84
101
  return list(dict.fromkeys(services))
85
102
 
@@ -128,18 +145,26 @@ class TextSimilarityAnalyzer:
128
145
  """Combine incident title, description, and other text fields."""
129
146
  text_parts = []
130
147
 
131
- # Get text from incident attributes
148
+ # Get text from incident attributes (preferred)
132
149
  attributes = incident.get('attributes', {})
133
- text_parts.append(attributes.get('title', ''))
134
- text_parts.append(attributes.get('summary', ''))
135
- text_parts.append(attributes.get('description', ''))
136
-
137
- # Also check root level for backward compatibility
138
- text_parts.append(incident.get('title', ''))
139
- text_parts.append(incident.get('summary', ''))
140
- text_parts.append(incident.get('description', ''))
141
-
142
- combined = ' '.join([part for part in text_parts if part])
150
+ title = attributes.get('title', '')
151
+ summary = attributes.get('summary', '')
152
+ description = attributes.get('description', '')
153
+
154
+ # Fallback to root level if attributes are empty
155
+ if not title:
156
+ title = incident.get('title', '')
157
+ if not summary:
158
+ summary = incident.get('summary', '')
159
+ if not description:
160
+ description = incident.get('description', '')
161
+
162
+ # Add non-empty parts, avoiding duplication
163
+ for part in [title, summary, description]:
164
+ if part and part not in text_parts:
165
+ text_parts.append(part)
166
+
167
+ combined = ' '.join(text_parts)
143
168
  return self.preprocess_text(combined)
144
169
 
145
170
  def _calculate_tfidf_similarity(self, incidents: List[Dict], target_incident: Dict,
@@ -175,7 +200,15 @@ class TextSimilarityAnalyzer:
175
200
  service_bonus = len(set(target_services) & set(incident_services)) * 0.1
176
201
  error_bonus = len(set(target_errors) & set(incident_errors)) * 0.15
177
202
 
178
- final_score = min(1.0, similarities[i] + service_bonus + error_bonus)
203
+ # Exact match bonus for identical preprocessed text
204
+ exact_match_bonus = 0.0
205
+ if target_text and incident_texts[i] and target_text.strip() == incident_texts[i].strip():
206
+ exact_match_bonus = 0.3 # Strong bonus for exact matches
207
+
208
+ # Partial matching bonus using fuzzy keyword similarity
209
+ partial_bonus = self._calculate_partial_similarity_bonus(target_text, incident_texts[i])
210
+
211
+ final_score = min(1.0, similarities[i] + service_bonus + error_bonus + exact_match_bonus + partial_bonus)
179
212
 
180
213
  results.append(IncidentSimilarity(
181
214
  incident_id=str(incident.get('id', '')),
@@ -212,7 +245,15 @@ class TextSimilarityAnalyzer:
212
245
  service_bonus = len(set(target_services) & set(incident_services)) * 0.2
213
246
  error_bonus = len(set(target_errors) & set(incident_errors)) * 0.25
214
247
 
215
- final_score = min(1.0, word_similarity + service_bonus + error_bonus)
248
+ # Exact match bonus for identical preprocessed text
249
+ exact_match_bonus = 0.0
250
+ if target_text and incident_text and target_text.strip() == incident_text.strip():
251
+ exact_match_bonus = 0.4 # Strong bonus for exact matches in keyword mode
252
+
253
+ # Partial matching bonus using fuzzy keyword similarity
254
+ partial_bonus = self._calculate_partial_similarity_bonus(target_text, incident_text)
255
+
256
+ final_score = min(1.0, word_similarity + service_bonus + error_bonus + exact_match_bonus + partial_bonus)
216
257
 
217
258
  if final_score > 0.15: # Only include reasonable matches
218
259
  results.append(IncidentSimilarity(
@@ -228,14 +269,94 @@ class TextSimilarityAnalyzer:
228
269
  return results
229
270
 
230
271
  def _extract_common_keywords(self, text1: str, text2: str) -> List[str]:
231
- """Extract common meaningful keywords between two texts."""
272
+ """Extract common meaningful keywords between two texts with fuzzy matching."""
273
+ words1 = set(text1.split())
274
+ words2 = set(text2.split())
275
+
276
+ # Exact matches
277
+ exact_common = words1 & words2
278
+
279
+ # Fuzzy matches for partial similarity
280
+ fuzzy_common = []
281
+ for word1 in words1:
282
+ if len(word1) > 3: # Only check longer words
283
+ for word2 in words2:
284
+ if len(word2) > 3 and word1 != word2:
285
+ # Check if words share significant substring (fuzzy matching)
286
+ if self._words_similar(word1, word2):
287
+ fuzzy_common.append(f"{word1}~{word2}")
288
+
289
+ # Combine exact and fuzzy matches
290
+ all_matches = list(exact_common) + fuzzy_common
291
+ meaningful = [word for word in all_matches if len(word.split('~')[0]) > 2]
292
+ return meaningful[:8] # Increased to show more matches
293
+
294
+ def _words_similar(self, word1: str, word2: str) -> bool:
295
+ """Check if two words are similar enough to be considered related."""
296
+ # Handle common variations
297
+ variations = {
298
+ 'elastic': ['elasticsearch', 'elk'],
299
+ 'payment': ['payments', 'pay', 'billing'],
300
+ 'database': ['db', 'postgres', 'mysql', 'mongo'],
301
+ 'timeout': ['timeouts', 'timed-out', 'timing-out'],
302
+ 'service': ['services', 'svc', 'api', 'app'],
303
+ 'error': ['errors', 'err', 'failure', 'failed', 'failing'],
304
+ 'down': ['outage', 'offline', 'unavailable']
305
+ }
306
+
307
+ # Check if words are variations of each other
308
+ for base, variants in variations.items():
309
+ if (word1 == base and word2 in variants) or (word2 == base and word1 in variants):
310
+ return True
311
+ if word1 in variants and word2 in variants:
312
+ return True
313
+
314
+ # Check substring similarity (at least 70% overlap for longer words)
315
+ if len(word1) >= 5 and len(word2) >= 5:
316
+ shorter = min(word1, word2, key=len)
317
+ longer = max(word1, word2, key=len)
318
+ if shorter in longer and len(shorter) / len(longer) >= 0.7:
319
+ return True
320
+
321
+ # Check if one word starts with the other (for prefixed services)
322
+ if len(word1) >= 4 and len(word2) >= 4:
323
+ if word1.startswith(word2) or word2.startswith(word1):
324
+ return True
325
+
326
+ return False
327
+
328
+ def _calculate_partial_similarity_bonus(self, text1: str, text2: str) -> float:
329
+ """Calculate bonus for partial/fuzzy keyword matches."""
330
+ if not text1 or not text2:
331
+ return 0.0
332
+
232
333
  words1 = set(text1.split())
233
334
  words2 = set(text2.split())
234
- common = words1 & words2
235
335
 
236
- # Filter out very short words and return top matches
237
- meaningful = [word for word in common if len(word) > 2]
238
- return meaningful[:5]
336
+ fuzzy_matches = 0
337
+
338
+ # Count meaningful words that could be compared
339
+ meaningful_words1 = [w for w in words1 if len(w) > 3]
340
+ meaningful_words2 = [w for w in words2 if len(w) > 3]
341
+
342
+ if not meaningful_words1 or not meaningful_words2:
343
+ return 0.0
344
+
345
+ # Count fuzzy matches
346
+ for word1 in meaningful_words1:
347
+ for word2 in meaningful_words2:
348
+ if word1 != word2 and self._words_similar(word1, word2):
349
+ fuzzy_matches += 1
350
+ break # Only count each target word once
351
+
352
+ # Calculate bonus based on fuzzy match ratio
353
+ if fuzzy_matches > 0:
354
+ # Use the smaller meaningful word set as denominator for conservative bonus
355
+ total_possible_matches = min(len(meaningful_words1), len(meaningful_words2))
356
+ bonus_ratio = fuzzy_matches / total_possible_matches
357
+ return min(0.15, bonus_ratio * 0.3) # Max 0.15 bonus for partial matches
358
+
359
+ return 0.0
239
360
 
240
361
  def _calculate_resolution_time(self, incident: Dict) -> Optional[float]:
241
362
  """Calculate resolution time in hours if timestamps are available."""
@@ -107,10 +107,14 @@ class TestSmartToolsIntegration:
107
107
  payment_incidents = [inc for inc in similar_incidents if "payment" in inc.title.lower()]
108
108
  assert len(payment_incidents) >= 2 # Should find both payment incidents
109
109
 
110
- # Check that similarity scores are reasonable
110
+ # Check that similarity scores are reasonable (updated for partial matching bonuses)
111
111
  top_incident = similar_incidents[0]
112
112
  assert top_incident.similarity_score > 0.1
113
113
  assert top_incident.incident_id in ["1001", "1003"] # Should be a payment incident
114
+
115
+ # Check that matched services are detected
116
+ payment_matches = [inc for inc in similar_incidents if "payment" in inc.matched_services]
117
+ assert len(payment_matches) > 0 # Should detect payment service matches
114
118
 
115
119
  async def test_suggest_solutions_with_incident_id(self, server_with_smart_tools, mock_target_incident, mock_historical_incidents):
116
120
  """Test solution suggestions using incident ID."""
@@ -250,7 +254,9 @@ class TestSmartToolsIntegration:
250
254
  ("authapi connection failed", ["auth"]),
251
255
  ("user.service timeout", ["user"]),
252
256
  ("Error in notification-api and billing-service", ["notification", "billing"]),
253
- ("postgres-db connection issue", ["postgres"])
257
+ ("postgres-db connection issue", ["postgres"]),
258
+ ("elasticsearch cluster failing", ["elasticsearch"]), # New test for known services
259
+ ("elastic search timeout", ["elastic"]), # Test partial matching
254
260
  ]
255
261
 
256
262
  for text, expected_services in test_cases:
@@ -258,4 +264,65 @@ class TestSmartToolsIntegration:
258
264
 
259
265
  for expected_service in expected_services:
260
266
  assert expected_service in services, \
261
- f"Expected service '{expected_service}' not found in {services} for text '{text}'"
267
+ f"Expected service '{expected_service}' not found in {services} for text '{text}'"
268
+
269
+ def test_partial_matching_improvements(self):
270
+ """Test partial/fuzzy matching for related but not identical incidents."""
271
+ from rootly_mcp_server.smart_utils import TextSimilarityAnalyzer
272
+
273
+ analyzer = TextSimilarityAnalyzer()
274
+
275
+ # Test cases for partial matching
276
+ target_incident = {
277
+ "id": "target",
278
+ "attributes": {
279
+ "title": "Payment API timeout errors",
280
+ "summary": "Users experiencing payment failures due to API timeouts"
281
+ }
282
+ }
283
+
284
+ historical_incidents = [
285
+ {
286
+ "id": "similar1",
287
+ "attributes": {
288
+ "title": "Payment service timeouts",
289
+ "summary": "Payments API timing out for users"
290
+ }
291
+ },
292
+ {
293
+ "id": "similar2",
294
+ "attributes": {
295
+ "title": "Billing API errors",
296
+ "summary": "Users unable to complete payments due to errors"
297
+ }
298
+ },
299
+ {
300
+ "id": "unrelated",
301
+ "attributes": {
302
+ "title": "Auth service down",
303
+ "summary": "Login failures for all users"
304
+ }
305
+ }
306
+ ]
307
+
308
+ similar_incidents = analyzer.calculate_similarity(historical_incidents, target_incident)
309
+
310
+ # Should find payment-related incidents with partial matching
311
+ payment_related = [inc for inc in similar_incidents
312
+ if inc.incident_id in ["similar1", "similar2"]]
313
+ auth_related = [inc for inc in similar_incidents
314
+ if inc.incident_id == "unrelated"]
315
+
316
+ # Payment incidents should have higher scores than auth incident
317
+ if payment_related and auth_related:
318
+ max_payment_score = max(inc.similarity_score for inc in payment_related)
319
+ max_auth_score = max(inc.similarity_score for inc in auth_related)
320
+ assert max_payment_score > max_auth_score, \
321
+ f"Payment similarity ({max_payment_score}) should be higher than auth ({max_auth_score})"
322
+
323
+ # Check that fuzzy keywords are detected
324
+ if payment_related:
325
+ top_match = max(payment_related, key=lambda x: x.similarity_score)
326
+ # Should detect partial matches like "payment~payments" or "timeout~timeouts"
327
+ # Note: This might be 0 if exact matches exist, which is also valid
328
+ assert top_match.similarity_score > 0.1, "Should have reasonable similarity score for payment incidents"
@@ -237,6 +237,19 @@ class TestOpenAPISpecFiltering:
237
237
  assert "/teams" in filtered_spec["paths"]
238
238
  assert "/forbidden" not in filtered_spec["paths"]
239
239
 
240
+ # Verify pagination parameters were added to /incidents endpoint
241
+ incidents_get = filtered_spec["paths"]["/incidents"]["get"]
242
+ assert "parameters" in incidents_get
243
+ param_names = [p["name"] for p in incidents_get["parameters"]]
244
+ assert "page[size]" in param_names
245
+ assert "page[number]" in param_names
246
+
247
+ # Verify /teams endpoint does not get pagination (doesn't contain "incidents" or "alerts")
248
+ teams_get = filtered_spec["paths"]["/teams"]["get"]
249
+ if "parameters" in teams_get:
250
+ param_names = [p["name"] for p in teams_get["parameters"]]
251
+ assert "page[size]" not in param_names
252
+
240
253
  # Verify other properties are preserved
241
254
  assert filtered_spec["openapi"] == original_spec["openapi"]
242
255
  assert filtered_spec["info"] == original_spec["info"]
@@ -276,6 +289,83 @@ class TestOpenAPISpecFiltering:
276
289
  assert "servers" in filtered_spec
277
290
  assert "components" in filtered_spec
278
291
  assert filtered_spec["servers"] == original_spec["servers"]
292
+
293
+ # Verify pagination parameters were added to /incidents endpoint
294
+ incidents_get = filtered_spec["paths"]["/incidents"]["get"]
295
+ assert "parameters" in incidents_get
296
+ param_names = [p["name"] for p in incidents_get["parameters"]]
297
+ assert "page[size]" in param_names
298
+ assert "page[number]" in param_names
299
+
300
+ def test_filter_spec_adds_pagination_to_alerts(self):
301
+ """Test that pagination parameters are added to alerts endpoints."""
302
+ original_spec = {
303
+ "openapi": "3.0.0",
304
+ "info": {"title": "Test API", "version": "1.0.0"},
305
+ "paths": {
306
+ "/alerts": {"get": {"operationId": "listAlerts"}},
307
+ "/incidents/123/alerts": {"get": {"operationId": "listIncidentAlerts"}},
308
+ "/users": {"get": {"operationId": "listUsers"}},
309
+ },
310
+ "components": {"schemas": {}}
311
+ }
312
+
313
+ allowed_paths = ["/alerts", "/incidents/123/alerts", "/users"]
314
+ filtered_spec = _filter_openapi_spec(original_spec, allowed_paths)
315
+
316
+ # Verify pagination was added to alerts endpoints
317
+ alerts_get = filtered_spec["paths"]["/alerts"]["get"]
318
+ assert "parameters" in alerts_get
319
+ param_names = [p["name"] for p in alerts_get["parameters"]]
320
+ assert "page[size]" in param_names
321
+ assert "page[number]" in param_names
322
+
323
+ incident_alerts_get = filtered_spec["paths"]["/incidents/123/alerts"]["get"]
324
+ assert "parameters" in incident_alerts_get
325
+ param_names = [p["name"] for p in incident_alerts_get["parameters"]]
326
+ assert "page[size]" in param_names
327
+ assert "page[number]" in param_names
328
+
329
+ # Verify pagination was NOT added to /users (no "incident" or "alerts" in path)
330
+ users_get = filtered_spec["paths"]["/users"]["get"]
331
+ if "parameters" in users_get:
332
+ param_names = [p["name"] for p in users_get["parameters"]]
333
+ assert "page[size]" not in param_names
334
+
335
+ def test_filter_spec_adds_pagination_to_incident_types(self):
336
+ """Test that pagination parameters are added to incident-related endpoints."""
337
+ original_spec = {
338
+ "openapi": "3.0.0",
339
+ "info": {"title": "Test API", "version": "1.0.0"},
340
+ "paths": {
341
+ "/incident_types": {"get": {"operationId": "listIncidentTypes"}},
342
+ "/incident_action_items": {"get": {"operationId": "listIncidentActionItems"}},
343
+ "/services": {"get": {"operationId": "listServices"}},
344
+ },
345
+ "components": {"schemas": {}}
346
+ }
347
+
348
+ allowed_paths = ["/incident_types", "/incident_action_items", "/services"]
349
+ filtered_spec = _filter_openapi_spec(original_spec, allowed_paths)
350
+
351
+ # Verify pagination was added to incident-related endpoints
352
+ incident_types_get = filtered_spec["paths"]["/incident_types"]["get"]
353
+ assert "parameters" in incident_types_get
354
+ param_names = [p["name"] for p in incident_types_get["parameters"]]
355
+ assert "page[size]" in param_names
356
+ assert "page[number]" in param_names
357
+
358
+ incident_action_items_get = filtered_spec["paths"]["/incident_action_items"]["get"]
359
+ assert "parameters" in incident_action_items_get
360
+ param_names = [p["name"] for p in incident_action_items_get["parameters"]]
361
+ assert "page[size]" in param_names
362
+ assert "page[number]" in param_names
363
+
364
+ # Verify pagination was NOT added to /services (no "incident" or "alerts" in path)
365
+ services_get = filtered_spec["paths"]["/services"]["get"]
366
+ if "parameters" in services_get:
367
+ param_names = [p["name"] for p in services_get["parameters"]]
368
+ assert "page[size]" not in param_names
279
369
 
280
370
 
281
371
  @pytest.mark.unit
@@ -247,7 +247,7 @@ class TestTextSimilarityAnalyzer:
247
247
  assert "timeout" in common
248
248
  assert "error" in common
249
249
  assert "service" in common
250
- assert len(common) <= 5 # Should limit to top 5
250
+ assert len(common) <= 8 # Should limit to top 8 (increased for fuzzy matching)
251
251
 
252
252
 
253
253
  class TestSolutionExtractor:
@@ -873,7 +873,7 @@ wheels = [
873
873
 
874
874
  [[package]]
875
875
  name = "rootly-mcp-server"
876
- version = "2.0.10"
876
+ version = "2.0.12"
877
877
  source = { editable = "." }
878
878
  dependencies = [
879
879
  { name = "brotli" },