mcp-instana 0.6.2__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/METADATA +179 -120
- {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/RECORD +28 -21
- src/application/application_alert_config.py +397 -146
- src/application/application_analyze.py +597 -597
- src/application/application_call_group.py +528 -0
- src/application/application_catalog.py +0 -8
- src/application/application_global_alert_config.py +255 -38
- src/application/application_metrics.py +377 -237
- src/application/application_resources.py +414 -365
- src/application/application_settings.py +605 -1651
- src/application/application_topology.py +62 -62
- src/core/custom_dashboard_smart_router_tool.py +135 -0
- src/core/server.py +92 -119
- src/core/smart_router_tool.py +574 -0
- src/core/utils.py +17 -8
- src/custom_dashboard/custom_dashboard_tools.py +422 -0
- src/infrastructure/elicitation_handler.py +338 -0
- src/infrastructure/entity_registry.py +329 -0
- src/infrastructure/infrastructure_analyze_new.py +600 -0
- src/infrastructure/{infrastructure_analyze.py → infrastructure_analyze_old.py} +1 -16
- src/infrastructure/infrastructure_catalog.py +7 -28
- src/infrastructure/infrastructure_metrics.py +93 -17
- src/infrastructure/infrastructure_resources.py +5 -20
- src/infrastructure/infrastructure_topology.py +2 -8
- src/prompts/application/application_settings.py +58 -0
- {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/WHEEL +0 -0
- {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/entry_points.txt +0 -0
- {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -0,0 +1,528 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Application Call Group MCP Tools Module
|
|
3
|
+
|
|
4
|
+
This module provides application call group metrics-specific MCP tools for Instana monitoring.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from typing import Any, Dict, List, Optional, Union
|
|
10
|
+
|
|
11
|
+
from mcp.types import ToolAnnotations
|
|
12
|
+
|
|
13
|
+
from src.prompts import mcp
|
|
14
|
+
|
|
15
|
+
# Import the necessary classes from the SDK
|
|
16
|
+
try:
|
|
17
|
+
from instana_client.api.application_analyze_api import ApplicationAnalyzeApi
|
|
18
|
+
from instana_client.models.get_call_groups import GetCallGroups
|
|
19
|
+
except ImportError as e:
|
|
20
|
+
import logging
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
logger.error(f"Error importing Instana SDK: {e}", exc_info=True)
|
|
23
|
+
raise
|
|
24
|
+
|
|
25
|
+
from src.core.utils import BaseInstanaClient, register_as_tool, with_header_auth
|
|
26
|
+
|
|
27
|
+
# Configure logger for this module
|
|
28
|
+
logger = logging.getLogger(__name__)
|
|
29
|
+
|
|
30
|
+
class ApplicationCallGroupMCPTools(BaseInstanaClient):
|
|
31
|
+
"""Tools for application call group metrics in Instana MCP."""
|
|
32
|
+
|
|
33
|
+
def __init__(self, read_token: str, base_url: str):
|
|
34
|
+
"""Initialize the Application Call Group MCP tools client."""
|
|
35
|
+
super().__init__(read_token=read_token, base_url=base_url)
|
|
36
|
+
|
|
37
|
+
# @register_as_tool decorator commented out - not exposed as MCP tool
|
|
38
|
+
# @register_as_tool(
|
|
39
|
+
# title="Get Grouped Calls Metrics",
|
|
40
|
+
# annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
|
|
41
|
+
# )
|
|
42
|
+
@with_header_auth(ApplicationAnalyzeApi)
|
|
43
|
+
async def get_grouped_calls_metrics(
|
|
44
|
+
self,
|
|
45
|
+
metrics: Optional[List[Dict[str, Any]]] = None,
|
|
46
|
+
time_frame: Optional[Dict[str, int]] = None,
|
|
47
|
+
group: Optional[Dict[str, str]] = None,
|
|
48
|
+
tag_filter_expression: Optional[Dict[str, Any]] = None,
|
|
49
|
+
include_internal: Optional[bool] = False,
|
|
50
|
+
include_synthetic: Optional[bool] = False,
|
|
51
|
+
order: Optional[Dict[str, str]] = None,
|
|
52
|
+
pagination: Optional[Dict[str, int]] = None,
|
|
53
|
+
fill_time_series: Optional[bool] = None,
|
|
54
|
+
ctx=None,
|
|
55
|
+
api_client=None
|
|
56
|
+
) -> Dict[str, Any]:
|
|
57
|
+
"""
|
|
58
|
+
Get grouped calls metrics.
|
|
59
|
+
|
|
60
|
+
This API endpoint retrieves metrics for calls grouped by tags (e.g., service name, endpoint name).
|
|
61
|
+
Use this to analyze call patterns, latency, and errors across different services or endpoints.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
metrics: List of metrics to retrieve with their aggregations
|
|
65
|
+
Example: [
|
|
66
|
+
{"metric": "calls", "aggregation": "SUM"},
|
|
67
|
+
{"metric": "latency", "aggregation": "P75", "granularity": 360}
|
|
68
|
+
]
|
|
69
|
+
time_frame: Time range for metrics
|
|
70
|
+
Example: {"to": 1688366990000, "windowSize": 600000}
|
|
71
|
+
group: Grouping configuration
|
|
72
|
+
Example: {"groupbyTag": "service.name", "groupbyTagEntity": "DESTINATION"}
|
|
73
|
+
tag_filter_expression: Filter expression for tags
|
|
74
|
+
Example: {
|
|
75
|
+
"type": "EXPRESSION",
|
|
76
|
+
"logicalOperator": "AND",
|
|
77
|
+
"elements": [
|
|
78
|
+
{
|
|
79
|
+
"type": "TAG_FILTER",
|
|
80
|
+
"name": "call.type",
|
|
81
|
+
"operator": "EQUALS",
|
|
82
|
+
"entity": "NOT_APPLICABLE",
|
|
83
|
+
"value": "DATABASE"
|
|
84
|
+
}
|
|
85
|
+
]
|
|
86
|
+
}
|
|
87
|
+
include_internal: Whether to include internal calls (default: False)
|
|
88
|
+
include_synthetic: Whether to include synthetic calls (default: False)
|
|
89
|
+
order: Ordering configuration
|
|
90
|
+
Example: {"by": "calls", "direction": "DESC"}
|
|
91
|
+
pagination: Pagination configuration
|
|
92
|
+
Example: {"retrievalSize": 20}
|
|
93
|
+
fill_time_series: Whether to fill missing data points with zeroes
|
|
94
|
+
ctx: The MCP context (optional)
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
Dictionary containing grouped call metrics data or error information
|
|
98
|
+
"""
|
|
99
|
+
try:
|
|
100
|
+
logger.debug(f"get_grouped_calls_metrics called with metrics={metrics}, group={group}")
|
|
101
|
+
|
|
102
|
+
# Two-Pass Elicitation: Check for required and recommended parameters
|
|
103
|
+
elicitation_request = self._check_elicitation_for_call_group_metrics(
|
|
104
|
+
metrics, time_frame, group
|
|
105
|
+
)
|
|
106
|
+
if elicitation_request:
|
|
107
|
+
logger.info("Elicitation needed for call group metrics")
|
|
108
|
+
return elicitation_request
|
|
109
|
+
|
|
110
|
+
# Set default time range if not provided
|
|
111
|
+
if not time_frame:
|
|
112
|
+
to_time = int(datetime.now().timestamp() * 1000)
|
|
113
|
+
time_frame = {
|
|
114
|
+
"to": to_time,
|
|
115
|
+
"windowSize": 3600000 # Default to 1 hour
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
# Set default metrics if not provided
|
|
119
|
+
if not metrics:
|
|
120
|
+
metrics = [
|
|
121
|
+
{
|
|
122
|
+
"metric": "calls",
|
|
123
|
+
"aggregation": "SUM"
|
|
124
|
+
},
|
|
125
|
+
{
|
|
126
|
+
"metric": "latency",
|
|
127
|
+
"aggregation": "MEAN"
|
|
128
|
+
}
|
|
129
|
+
]
|
|
130
|
+
|
|
131
|
+
# Set default group if not provided
|
|
132
|
+
if not group:
|
|
133
|
+
group = {
|
|
134
|
+
"groupbyTag": "service.name",
|
|
135
|
+
"groupbyTagEntity": "DESTINATION"
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
# Create the GetCallGroups object with ONLY group and metrics
|
|
139
|
+
# The SDK model only accepts these two parameters
|
|
140
|
+
query_params = {}
|
|
141
|
+
if group:
|
|
142
|
+
query_params["group"] = group
|
|
143
|
+
if metrics:
|
|
144
|
+
query_params["metrics"] = metrics
|
|
145
|
+
|
|
146
|
+
logger.debug(f"Creating GetCallGroups with params: {query_params}")
|
|
147
|
+
get_call_groups = GetCallGroups(**query_params)
|
|
148
|
+
logger.debug("Successfully created GetCallGroups object")
|
|
149
|
+
|
|
150
|
+
# Call the get_call_group method from the SDK
|
|
151
|
+
# Note: Other parameters like time_frame, tag_filter_expression, etc.
|
|
152
|
+
# might need to be passed as separate parameters to the API call
|
|
153
|
+
# For now, following the exact pattern from application_analyze.py
|
|
154
|
+
logger.debug("Calling get_call_group with GetCallGroups object")
|
|
155
|
+
result = api_client.get_call_group(
|
|
156
|
+
get_call_groups=get_call_groups
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# Convert the result to a dictionary
|
|
160
|
+
if hasattr(result, 'to_dict'):
|
|
161
|
+
result_dict = result.to_dict()
|
|
162
|
+
else:
|
|
163
|
+
# If it's already a dict or another format, use it as is
|
|
164
|
+
result_dict = result
|
|
165
|
+
|
|
166
|
+
# 🔍 DEBUG: Log the API response structure and data
|
|
167
|
+
logger.info("=" * 80)
|
|
168
|
+
logger.info("📥 INSTANA API RESPONSE DEBUG - CALL GROUPS")
|
|
169
|
+
logger.info("=" * 80)
|
|
170
|
+
logger.info(f"Response Type: {type(result_dict)}")
|
|
171
|
+
logger.info(f"Response Keys: {result_dict.keys() if isinstance(result_dict, dict) else 'N/A'}")
|
|
172
|
+
|
|
173
|
+
# Log detailed structure for each group
|
|
174
|
+
if isinstance(result_dict, dict) and 'items' in result_dict:
|
|
175
|
+
logger.info(f"Number of groups: {len(result_dict['items'])}")
|
|
176
|
+
for idx, item in enumerate(result_dict['items'][:3]): # Log first 3 items
|
|
177
|
+
logger.info(f"\nGroup {idx}:")
|
|
178
|
+
logger.info(f" Keys: {item.keys() if isinstance(item, dict) else 'N/A'}")
|
|
179
|
+
if isinstance(item, dict):
|
|
180
|
+
if 'metrics' in item:
|
|
181
|
+
logger.info(f" Metrics: {item['metrics'].keys() if isinstance(item['metrics'], dict) else item['metrics']}")
|
|
182
|
+
|
|
183
|
+
logger.info("=" * 80)
|
|
184
|
+
logger.debug(f"Full Result: {result_dict}")
|
|
185
|
+
|
|
186
|
+
# Post-process the response to make it more LLM-friendly
|
|
187
|
+
processed_result = self._process_metrics_response(result_dict)
|
|
188
|
+
|
|
189
|
+
# Check if we should aggregate results (no grouping needed in output)
|
|
190
|
+
# This happens when group is provided but user wants overall metrics only
|
|
191
|
+
if group and self._should_aggregate_results(metrics, group):
|
|
192
|
+
processed_result = self._aggregate_grouped_results(processed_result, metrics)
|
|
193
|
+
|
|
194
|
+
return processed_result
|
|
195
|
+
except Exception as e:
|
|
196
|
+
logger.error(f"Error in get_grouped_calls_metrics: {e}", exc_info=True)
|
|
197
|
+
return {"error": f"Failed to get grouped calls metrics: {e!s}"}
|
|
198
|
+
|
|
199
|
+
def _process_metrics_response(self, result_dict: Dict[str, Any]) -> Dict[str, Any]:
|
|
200
|
+
"""
|
|
201
|
+
Process the metrics response to extract values from nested arrays.
|
|
202
|
+
|
|
203
|
+
The API returns metrics in format:
|
|
204
|
+
"metrics": {
|
|
205
|
+
"errors.mean": [[timestamp, value]],
|
|
206
|
+
"calls.sum": [[timestamp, value]]
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
This function extracts the actual values and adds a human-readable summary.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
result_dict: Raw API response
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
Processed response with extracted metric values
|
|
216
|
+
"""
|
|
217
|
+
try:
|
|
218
|
+
if not isinstance(result_dict, dict) or 'items' not in result_dict:
|
|
219
|
+
return result_dict
|
|
220
|
+
|
|
221
|
+
processed_items = []
|
|
222
|
+
for item in result_dict.get('items', []):
|
|
223
|
+
if not isinstance(item, dict):
|
|
224
|
+
processed_items.append(item)
|
|
225
|
+
continue
|
|
226
|
+
|
|
227
|
+
processed_item = item.copy()
|
|
228
|
+
|
|
229
|
+
# Extract metric values from nested arrays
|
|
230
|
+
if 'metrics' in item and isinstance(item['metrics'], dict):
|
|
231
|
+
extracted_metrics = {}
|
|
232
|
+
metric_summary = {}
|
|
233
|
+
|
|
234
|
+
for metric_key, metric_data in item['metrics'].items():
|
|
235
|
+
# metric_key format: "metric_name.aggregation" (e.g., "errors.mean", "calls.sum")
|
|
236
|
+
if isinstance(metric_data, list) and len(metric_data) > 0:
|
|
237
|
+
# Extract the latest value from [timestamp, value] pairs
|
|
238
|
+
if isinstance(metric_data[0], list) and len(metric_data[0]) >= 2:
|
|
239
|
+
timestamp, value = metric_data[0][0], metric_data[0][1]
|
|
240
|
+
extracted_metrics[metric_key] = {
|
|
241
|
+
"timestamp": timestamp,
|
|
242
|
+
"value": value,
|
|
243
|
+
"raw_data": metric_data # Keep original for reference
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
# Create human-readable summary
|
|
247
|
+
metric_name = metric_key.split('.')[0]
|
|
248
|
+
if metric_name == 'errors':
|
|
249
|
+
metric_summary['error_rate'] = f"{value * 100:.2f}%"
|
|
250
|
+
metric_summary['error_rate_decimal'] = value
|
|
251
|
+
elif metric_name == 'calls':
|
|
252
|
+
metric_summary['total_calls'] = int(value)
|
|
253
|
+
elif metric_name == 'latency':
|
|
254
|
+
metric_summary['latency_ms'] = f"{value:.2f}ms"
|
|
255
|
+
elif metric_name == 'erroneousCalls':
|
|
256
|
+
metric_summary['erroneous_calls'] = int(value)
|
|
257
|
+
|
|
258
|
+
processed_item['metrics_extracted'] = extracted_metrics
|
|
259
|
+
processed_item['metrics_summary'] = metric_summary
|
|
260
|
+
|
|
261
|
+
# Add interpretation note
|
|
262
|
+
if metric_summary:
|
|
263
|
+
interpretation = []
|
|
264
|
+
if 'error_rate' in metric_summary:
|
|
265
|
+
interpretation.append(f"Error Rate: {metric_summary['error_rate']}")
|
|
266
|
+
if 'total_calls' in metric_summary:
|
|
267
|
+
interpretation.append(f"Total Calls: {metric_summary['total_calls']}")
|
|
268
|
+
if 'erroneous_calls' in metric_summary:
|
|
269
|
+
interpretation.append(f"Erroneous Calls: {metric_summary['erroneous_calls']}")
|
|
270
|
+
if 'latency_ms' in metric_summary:
|
|
271
|
+
interpretation.append(f"Latency: {metric_summary['latency_ms']}")
|
|
272
|
+
|
|
273
|
+
processed_item['interpretation'] = " | ".join(interpretation)
|
|
274
|
+
|
|
275
|
+
processed_items.append(processed_item)
|
|
276
|
+
|
|
277
|
+
result_dict['items'] = processed_items
|
|
278
|
+
|
|
279
|
+
# Add a summary at the top level
|
|
280
|
+
if processed_items:
|
|
281
|
+
result_dict['summary'] = {
|
|
282
|
+
"total_groups": len(processed_items),
|
|
283
|
+
"note": "Check 'metrics_summary' and 'interpretation' fields in each item for human-readable values"
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
return result_dict
|
|
287
|
+
|
|
288
|
+
except Exception as e:
|
|
289
|
+
logger.error(f"Error processing metrics response: {e}", exc_info=True)
|
|
290
|
+
# Return original if processing fails
|
|
291
|
+
return result_dict
|
|
292
|
+
|
|
293
|
+
def _should_aggregate_results(
|
|
294
|
+
self,
|
|
295
|
+
metrics: Optional[List[Dict[str, Any]]],
|
|
296
|
+
group: Optional[Dict[str, str]]
|
|
297
|
+
) -> bool:
|
|
298
|
+
"""
|
|
299
|
+
Determine if results should be aggregated (no grouping in output).
|
|
300
|
+
|
|
301
|
+
This is true when:
|
|
302
|
+
- Only MEAN latency is requested (no other metrics)
|
|
303
|
+
- Group is by endpoint.name (which we want to aggregate away)
|
|
304
|
+
|
|
305
|
+
Args:
|
|
306
|
+
metrics: List of metrics requested
|
|
307
|
+
group: Grouping configuration
|
|
308
|
+
|
|
309
|
+
Returns:
|
|
310
|
+
True if results should be aggregated, False otherwise
|
|
311
|
+
"""
|
|
312
|
+
if not metrics or not group:
|
|
313
|
+
return False
|
|
314
|
+
|
|
315
|
+
# Check if only latency MEAN is requested
|
|
316
|
+
if len(metrics) == 1:
|
|
317
|
+
metric = metrics[0]
|
|
318
|
+
if (metric.get("metric") == "latency" and
|
|
319
|
+
metric.get("aggregation") == "MEAN"):
|
|
320
|
+
# Check if grouping by endpoint.name
|
|
321
|
+
if group.get("groupbyTag") == "endpoint.name":
|
|
322
|
+
return True
|
|
323
|
+
|
|
324
|
+
return False
|
|
325
|
+
|
|
326
|
+
def _aggregate_grouped_results(
|
|
327
|
+
self,
|
|
328
|
+
result_dict: Dict[str, Any],
|
|
329
|
+
metrics: List[Dict[str, Any]]
|
|
330
|
+
) -> Dict[str, Any]:
|
|
331
|
+
"""
|
|
332
|
+
Aggregate grouped results into a single overall metric.
|
|
333
|
+
|
|
334
|
+
This calculates the overall mean latency across all endpoints/services
|
|
335
|
+
by averaging the individual group values.
|
|
336
|
+
|
|
337
|
+
Args:
|
|
338
|
+
result_dict: Processed API response with grouped data
|
|
339
|
+
metrics: List of metrics that were requested
|
|
340
|
+
|
|
341
|
+
Returns:
|
|
342
|
+
Aggregated result with overall metrics only
|
|
343
|
+
"""
|
|
344
|
+
try:
|
|
345
|
+
if not isinstance(result_dict, dict) or 'items' not in result_dict:
|
|
346
|
+
return result_dict
|
|
347
|
+
|
|
348
|
+
items = result_dict.get('items', [])
|
|
349
|
+
if not items:
|
|
350
|
+
return {
|
|
351
|
+
"aggregated": True,
|
|
352
|
+
"message": "No data available for the specified filters",
|
|
353
|
+
"overall_metrics": {}
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
# Aggregate metrics across all groups
|
|
357
|
+
aggregated_metrics = {}
|
|
358
|
+
metric_counts = {}
|
|
359
|
+
|
|
360
|
+
for item in items:
|
|
361
|
+
if not isinstance(item, dict):
|
|
362
|
+
continue
|
|
363
|
+
|
|
364
|
+
# Use the extracted metrics if available
|
|
365
|
+
metrics_data = item.get('metrics_extracted', item.get('metrics', {}))
|
|
366
|
+
|
|
367
|
+
for metric_key, metric_info in metrics_data.items():
|
|
368
|
+
if metric_key not in aggregated_metrics:
|
|
369
|
+
aggregated_metrics[metric_key] = 0
|
|
370
|
+
metric_counts[metric_key] = 0
|
|
371
|
+
|
|
372
|
+
# Extract value from different possible formats
|
|
373
|
+
if isinstance(metric_info, dict):
|
|
374
|
+
value = metric_info.get('value', 0)
|
|
375
|
+
elif isinstance(metric_info, (int, float)):
|
|
376
|
+
value = metric_info
|
|
377
|
+
else:
|
|
378
|
+
continue
|
|
379
|
+
|
|
380
|
+
aggregated_metrics[metric_key] += value
|
|
381
|
+
metric_counts[metric_key] += 1
|
|
382
|
+
|
|
383
|
+
# Calculate averages for MEAN aggregations
|
|
384
|
+
overall_metrics = {}
|
|
385
|
+
for metric_key, total in aggregated_metrics.items():
|
|
386
|
+
count = metric_counts.get(metric_key, 1)
|
|
387
|
+
if count > 0:
|
|
388
|
+
# For latency.mean, we want the average of all endpoint means
|
|
389
|
+
if 'latency' in metric_key.lower() and 'mean' in metric_key.lower():
|
|
390
|
+
overall_metrics[metric_key] = {
|
|
391
|
+
"value": total / count,
|
|
392
|
+
"unit": "ms",
|
|
393
|
+
"aggregation": "MEAN",
|
|
394
|
+
"note": f"Average across {count} endpoints"
|
|
395
|
+
}
|
|
396
|
+
else:
|
|
397
|
+
overall_metrics[metric_key] = {
|
|
398
|
+
"value": total / count,
|
|
399
|
+
"aggregation": "MEAN",
|
|
400
|
+
"note": f"Average across {count} groups"
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
# Create a simplified response
|
|
404
|
+
aggregated_result = {
|
|
405
|
+
"aggregated": True,
|
|
406
|
+
"message": "Results aggregated across all groups",
|
|
407
|
+
"total_groups_analyzed": len(items),
|
|
408
|
+
"overall_metrics": overall_metrics,
|
|
409
|
+
"original_group_count": len(items)
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
# Add human-readable summary
|
|
413
|
+
if 'latency.mean' in overall_metrics:
|
|
414
|
+
latency_value = overall_metrics['latency.mean']['value']
|
|
415
|
+
aggregated_result['summary'] = f"Overall mean latency: {latency_value:.2f}ms across {len(items)} endpoints"
|
|
416
|
+
|
|
417
|
+
logger.info(f"Aggregated {len(items)} groups into overall metrics")
|
|
418
|
+
return aggregated_result
|
|
419
|
+
|
|
420
|
+
except Exception as e:
|
|
421
|
+
logger.error(f"Error aggregating grouped results: {e}", exc_info=True)
|
|
422
|
+
# Return original if aggregation fails
|
|
423
|
+
return result_dict
|
|
424
|
+
|
|
425
|
+
def _check_elicitation_for_call_group_metrics(
|
|
426
|
+
self,
|
|
427
|
+
metrics: Optional[List[Dict[str, str]]],
|
|
428
|
+
time_frame: Optional[Dict[str, int]],
|
|
429
|
+
group: Optional[Dict[str, str]]
|
|
430
|
+
) -> Optional[Dict[str, Any]]:
|
|
431
|
+
"""
|
|
432
|
+
Check for required and recommended parameters (Two-Pass Elicitation).
|
|
433
|
+
|
|
434
|
+
Args:
|
|
435
|
+
metrics: Metrics list if provided
|
|
436
|
+
time_frame: Time frame if provided
|
|
437
|
+
group: Group configuration if provided
|
|
438
|
+
|
|
439
|
+
Returns:
|
|
440
|
+
Elicitation request dict if parameters are missing, None otherwise
|
|
441
|
+
"""
|
|
442
|
+
missing_params = []
|
|
443
|
+
|
|
444
|
+
# Check for REQUIRED parameters
|
|
445
|
+
if not metrics:
|
|
446
|
+
missing_params.append({
|
|
447
|
+
"name": "metrics",
|
|
448
|
+
"description": "List of metric names with aggregations (REQUIRED)",
|
|
449
|
+
"examples": [
|
|
450
|
+
{"metric": "calls", "aggregation": "SUM"},
|
|
451
|
+
{"metric": "latency", "aggregation": "MEAN"},
|
|
452
|
+
{"metric": "errors", "aggregation": "SUM"},
|
|
453
|
+
{"metric": "latency", "aggregation": "P75", "granularity": 360}
|
|
454
|
+
],
|
|
455
|
+
"type": "list"
|
|
456
|
+
})
|
|
457
|
+
|
|
458
|
+
# Check for RECOMMENDED parameters
|
|
459
|
+
if not time_frame:
|
|
460
|
+
missing_params.append({
|
|
461
|
+
"name": "time_frame",
|
|
462
|
+
"description": "Time range for metrics (RECOMMENDED)",
|
|
463
|
+
"examples": [
|
|
464
|
+
{"windowSize": 3600000}, # Last hour
|
|
465
|
+
{"windowSize": 86400000}, # Last 24 hours
|
|
466
|
+
{"to": 1688366990000, "windowSize": 600000} # Specific time
|
|
467
|
+
],
|
|
468
|
+
"type": "dict",
|
|
469
|
+
"note": "If not provided, defaults to last hour"
|
|
470
|
+
})
|
|
471
|
+
|
|
472
|
+
if not group:
|
|
473
|
+
missing_params.append({
|
|
474
|
+
"name": "group",
|
|
475
|
+
"description": "Grouping configuration (RECOMMENDED)",
|
|
476
|
+
"examples": [
|
|
477
|
+
{"groupbyTag": "service.name", "groupbyTagEntity": "DESTINATION"},
|
|
478
|
+
{"groupbyTag": "endpoint.name", "groupbyTagEntity": "DESTINATION"},
|
|
479
|
+
{"groupbyTag": "call.type", "groupbyTagEntity": "NOT_APPLICABLE"}
|
|
480
|
+
],
|
|
481
|
+
"type": "dict",
|
|
482
|
+
"note": "If not provided, defaults to grouping by service.name"
|
|
483
|
+
})
|
|
484
|
+
|
|
485
|
+
# If any required or recommended parameters are missing, return elicitation request
|
|
486
|
+
if missing_params:
|
|
487
|
+
return self._create_elicitation_request(missing_params)
|
|
488
|
+
|
|
489
|
+
return None
|
|
490
|
+
|
|
491
|
+
def _create_elicitation_request(self, missing_params: list) -> Dict[str, Any]:
|
|
492
|
+
"""
|
|
493
|
+
Create an elicitation request following MCP pattern.
|
|
494
|
+
|
|
495
|
+
Args:
|
|
496
|
+
missing_params: List of missing parameter descriptions
|
|
497
|
+
|
|
498
|
+
Returns:
|
|
499
|
+
Elicitation request dict
|
|
500
|
+
"""
|
|
501
|
+
# Build simple, user-friendly parameter descriptions
|
|
502
|
+
param_lines = []
|
|
503
|
+
for param in missing_params:
|
|
504
|
+
# Format examples in a simple, readable way
|
|
505
|
+
if param['name'] == 'metrics':
|
|
506
|
+
examples = "calls, latency, errors, erroneousCalls"
|
|
507
|
+
elif param['name'] == 'time_frame':
|
|
508
|
+
examples = "last hour, last 24 hours, last 10 minutes"
|
|
509
|
+
elif param['name'] == 'group':
|
|
510
|
+
examples = "service.name, endpoint.name, call.type"
|
|
511
|
+
else:
|
|
512
|
+
examples = ", ".join([str(ex) for ex in param["examples"][:3]])
|
|
513
|
+
|
|
514
|
+
param_lines.append(f"{param['name']}: {examples}")
|
|
515
|
+
|
|
516
|
+
message = (
|
|
517
|
+
"I need:\n\n"
|
|
518
|
+
+ "\n".join(param_lines)
|
|
519
|
+
)
|
|
520
|
+
|
|
521
|
+
return {
|
|
522
|
+
"elicitation_needed": True,
|
|
523
|
+
"message": message,
|
|
524
|
+
"missing_parameters": [p["name"] for p in missing_params],
|
|
525
|
+
"parameter_details": missing_params,
|
|
526
|
+
"instructions": "Call get_grouped_calls_metrics again with these parameters filled in."
|
|
527
|
+
}
|
|
528
|
+
|
|
@@ -41,10 +41,6 @@ class ApplicationCatalogMCPTools(BaseInstanaClient):
|
|
|
41
41
|
"""Initialize the Application Catalog MCP tools client."""
|
|
42
42
|
super().__init__(read_token=read_token, base_url=base_url)
|
|
43
43
|
|
|
44
|
-
@register_as_tool(
|
|
45
|
-
title="Get Application Tag Catalog",
|
|
46
|
-
annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
|
|
47
|
-
)
|
|
48
44
|
@with_header_auth(ApplicationCatalogApi)
|
|
49
45
|
async def get_application_tag_catalog(self,
|
|
50
46
|
use_case: Optional[str] = None,
|
|
@@ -112,10 +108,6 @@ class ApplicationCatalogMCPTools(BaseInstanaClient):
|
|
|
112
108
|
return {"error": f"Failed to get application catalog: {e!s}"}
|
|
113
109
|
|
|
114
110
|
|
|
115
|
-
@register_as_tool(
|
|
116
|
-
title="Get Application Metric Catalog",
|
|
117
|
-
annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
|
|
118
|
-
)
|
|
119
111
|
@with_header_auth(ApplicationCatalogApi)
|
|
120
112
|
async def get_application_metric_catalog(self, ctx=None, api_client=None) -> Dict[str, Any]:
|
|
121
113
|
"""
|