mcp-instana 0.3.1__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_instana-0.3.1.dist-info → mcp_instana-0.7.0.dist-info}/METADATA +186 -311
- {mcp_instana-0.3.1.dist-info → mcp_instana-0.7.0.dist-info}/RECORD +30 -22
- {mcp_instana-0.3.1.dist-info → mcp_instana-0.7.0.dist-info}/WHEEL +1 -1
- src/application/application_alert_config.py +393 -136
- src/application/application_analyze.py +597 -594
- src/application/application_call_group.py +528 -0
- src/application/application_catalog.py +0 -8
- src/application/application_global_alert_config.py +275 -57
- src/application/application_metrics.py +377 -237
- src/application/application_resources.py +414 -325
- src/application/application_settings.py +608 -1530
- src/application/application_topology.py +62 -62
- src/core/custom_dashboard_smart_router_tool.py +135 -0
- src/core/server.py +95 -119
- src/core/smart_router_tool.py +574 -0
- src/core/utils.py +17 -8
- src/custom_dashboard/custom_dashboard_tools.py +422 -0
- src/event/events_tools.py +57 -9
- src/infrastructure/elicitation_handler.py +338 -0
- src/infrastructure/entity_registry.py +329 -0
- src/infrastructure/infrastructure_analyze_new.py +600 -0
- src/infrastructure/{infrastructure_analyze.py → infrastructure_analyze_old.py} +1 -16
- src/infrastructure/infrastructure_catalog.py +37 -32
- src/infrastructure/infrastructure_metrics.py +93 -16
- src/infrastructure/infrastructure_resources.py +6 -24
- src/infrastructure/infrastructure_topology.py +29 -23
- src/observability.py +29 -0
- src/prompts/application/application_settings.py +58 -0
- {mcp_instana-0.3.1.dist-info → mcp_instana-0.7.0.dist-info}/entry_points.txt +0 -0
- {mcp_instana-0.3.1.dist-info → mcp_instana-0.7.0.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -41,10 +41,11 @@ class ApplicationMetricsMCPTools(BaseInstanaClient):
|
|
|
41
41
|
"""Initialize the Application Metrics MCP tools client."""
|
|
42
42
|
super().__init__(read_token=read_token, base_url=base_url)
|
|
43
43
|
|
|
44
|
-
@register_as_tool
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
)
|
|
44
|
+
# @register_as_tool decorator commented out - not exposed as MCP tool
|
|
45
|
+
# @register_as_tool(
|
|
46
|
+
# title="Get Application Data Metrics V2",
|
|
47
|
+
# annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
|
|
48
|
+
# )
|
|
48
49
|
@with_header_auth(ApplicationMetricsApi)
|
|
49
50
|
async def get_application_data_metrics_v2(self,
|
|
50
51
|
metrics: Optional[List[Dict[str, Any]]] = None,
|
|
@@ -75,6 +76,14 @@ class ApplicationMetricsMCPTools(BaseInstanaClient):
|
|
|
75
76
|
try:
|
|
76
77
|
logger.debug(f"get_application_data_metrics_v2 called with application_id={application_id}, service_id={service_id}, endpoint_id={endpoint_id}")
|
|
77
78
|
|
|
79
|
+
# Two-Pass Elicitation: Check for required and recommended parameters
|
|
80
|
+
elicitation_request = self._check_elicitation_for_app_metrics(
|
|
81
|
+
metrics, time_frame, application_id, service_id, endpoint_id
|
|
82
|
+
)
|
|
83
|
+
if elicitation_request:
|
|
84
|
+
logger.info("Elicitation needed for application metrics")
|
|
85
|
+
return elicitation_request
|
|
86
|
+
|
|
78
87
|
# Set default time range if not provided
|
|
79
88
|
if not time_frame:
|
|
80
89
|
to_time = int(datetime.now().timestamp() * 1000)
|
|
@@ -122,254 +131,385 @@ class ApplicationMetricsMCPTools(BaseInstanaClient):
|
|
|
122
131
|
# If it's already a dict or another format, use it as is
|
|
123
132
|
result_dict = result
|
|
124
133
|
|
|
125
|
-
|
|
134
|
+
# 🔍 DEBUG: Log the API response structure and data
|
|
135
|
+
logger.info("=" * 80)
|
|
136
|
+
logger.info("📥 INSTANA API RESPONSE DEBUG")
|
|
137
|
+
logger.info("=" * 80)
|
|
138
|
+
logger.info(f"Response Type: {type(result_dict)}")
|
|
139
|
+
logger.info(f"Response Keys: {result_dict.keys() if isinstance(result_dict, dict) else 'N/A'}")
|
|
140
|
+
|
|
141
|
+
# Log detailed structure for each metric
|
|
142
|
+
if isinstance(result_dict, dict) and 'items' in result_dict:
|
|
143
|
+
logger.info(f"Number of items: {len(result_dict['items'])}")
|
|
144
|
+
for idx, item in enumerate(result_dict['items'][:3]): # Log first 3 items
|
|
145
|
+
logger.info(f"\nItem {idx}:")
|
|
146
|
+
logger.info(f" Keys: {item.keys() if isinstance(item, dict) else 'N/A'}")
|
|
147
|
+
if isinstance(item, dict):
|
|
148
|
+
if 'metrics' in item:
|
|
149
|
+
logger.info(f" Metrics: {item['metrics'].keys() if isinstance(item['metrics'], dict) else item['metrics']}")
|
|
150
|
+
# Log actual metric values
|
|
151
|
+
for metric_name, metric_data in (item['metrics'].items() if isinstance(item['metrics'], dict) else []):
|
|
152
|
+
logger.info(f" {metric_name}:")
|
|
153
|
+
if isinstance(metric_data, dict):
|
|
154
|
+
logger.info(f" Keys: {metric_data.keys()}")
|
|
155
|
+
if 'values' in metric_data:
|
|
156
|
+
values = metric_data['values']
|
|
157
|
+
logger.info(f" Number of data points: {len(values) if isinstance(values, list) else 'N/A'}")
|
|
158
|
+
if isinstance(values, list) and len(values) > 0:
|
|
159
|
+
logger.info(f" First value: {values[0]}")
|
|
160
|
+
logger.info(f" Last value: {values[-1]}")
|
|
161
|
+
# Calculate sum if it's a list of numbers
|
|
162
|
+
try:
|
|
163
|
+
numeric_values = [v[1] if isinstance(v, list) and len(v) > 1 else v for v in values]
|
|
164
|
+
total = sum(numeric_values)
|
|
165
|
+
logger.info(f" ⚠️ SUM of all data points: {total}")
|
|
166
|
+
except (TypeError, ValueError):
|
|
167
|
+
pass
|
|
168
|
+
if 'aggregation' in metric_data:
|
|
169
|
+
logger.info(f" Aggregation: {metric_data['aggregation']}")
|
|
170
|
+
else:
|
|
171
|
+
logger.info(f" Value: {metric_data}")
|
|
172
|
+
|
|
173
|
+
logger.info("=" * 80)
|
|
174
|
+
logger.debug(f"Full Result: {result_dict}")
|
|
126
175
|
return result_dict
|
|
127
176
|
except Exception as e:
|
|
128
177
|
logger.error(f"Error in get_application_data_metrics_v2: {e}", exc_info=True)
|
|
129
178
|
return {"error": f"Failed to get application data metrics: {e!s}"}
|
|
130
179
|
|
|
131
|
-
@register_as_tool(
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
)
|
|
135
|
-
@with_header_auth(ApplicationMetricsApi)
|
|
136
|
-
async def get_application_metrics(self,
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
180
|
+
# @register_as_tool(
|
|
181
|
+
# title="Get Application Metrics",
|
|
182
|
+
# annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
|
|
183
|
+
# )
|
|
184
|
+
# @with_header_auth(ApplicationMetricsApi)
|
|
185
|
+
# async def get_application_metrics(self,
|
|
186
|
+
# application_id: Optional[str] = None,
|
|
187
|
+
# metrics: Optional[List[Dict[str, str]]] = None,
|
|
188
|
+
# time_frame: Optional[Dict[str, int]] = None,
|
|
189
|
+
# fill_time_series: Optional[bool] = True,
|
|
190
|
+
# ctx=None, api_client=None) -> Dict[str, Any]:
|
|
191
|
+
# """
|
|
192
|
+
# Get metrics for a specific application.
|
|
193
|
+
|
|
194
|
+
# This API endpoint retrieves one or more supported aggregations of metrics for an Application Perspective.
|
|
195
|
+
|
|
196
|
+
# Args:
|
|
197
|
+
# application_id: Application ID to get metrics for (single application)
|
|
198
|
+
# metrics: List of metrics to retrieve with their aggregations
|
|
199
|
+
# Example: [{"metric": "latency", "aggregation": "MEAN"}]
|
|
200
|
+
# time_frame: Dictionary with 'from' and 'to' timestamps in milliseconds
|
|
201
|
+
# Example: {"from": 1617994800000, "to": 1618081200000}
|
|
202
|
+
# fill_time_series: Whether to fill missing data points with timestamp and value 0
|
|
203
|
+
# ctx: The MCP context (optional)
|
|
204
|
+
|
|
205
|
+
# Returns:
|
|
206
|
+
# Dictionary containing application metrics data or error information
|
|
207
|
+
# """
|
|
208
|
+
# try:
|
|
209
|
+
# logger.debug(f"get_application_metrics called with application_id={application_id}")
|
|
210
|
+
|
|
211
|
+
# # Set default time range if not provided
|
|
212
|
+
# if not time_frame:
|
|
213
|
+
# to_time = int(datetime.now().timestamp() * 1000)
|
|
214
|
+
# from_time = to_time - (60 * 60 * 1000) # Default to 1 hour
|
|
215
|
+
# time_frame = {
|
|
216
|
+
# "from": from_time,
|
|
217
|
+
# "to": to_time
|
|
218
|
+
# }
|
|
219
|
+
|
|
220
|
+
# # Set default metrics if not provided
|
|
221
|
+
# if not metrics:
|
|
222
|
+
# metrics = [
|
|
223
|
+
# {
|
|
224
|
+
# "metric": "latency",
|
|
225
|
+
# "aggregation": "MEAN"
|
|
226
|
+
# }
|
|
227
|
+
# ]
|
|
228
|
+
|
|
229
|
+
# # Create the request body
|
|
230
|
+
# request_body = {
|
|
231
|
+
# "metrics": metrics,
|
|
232
|
+
# "timeFrame": time_frame
|
|
233
|
+
# }
|
|
234
|
+
|
|
235
|
+
# # Add application ID if provided
|
|
236
|
+
# if application_id:
|
|
237
|
+
# request_body["applicationId"] = application_id
|
|
238
|
+
|
|
239
|
+
# # Create the GetApplications object
|
|
240
|
+
# get_applications = GetApplications(**request_body)
|
|
241
|
+
|
|
242
|
+
# # Call the get_application_metrics method from the SDK
|
|
243
|
+
# result = api_client.get_application_metrics(
|
|
244
|
+
# fill_time_series=fill_time_series,
|
|
245
|
+
# get_applications=get_applications
|
|
246
|
+
# )
|
|
247
|
+
|
|
248
|
+
# # Convert the result to a dictionary
|
|
249
|
+
# if hasattr(result, 'to_dict'):
|
|
250
|
+
# result_dict = result.to_dict()
|
|
251
|
+
# else:
|
|
252
|
+
# # If it's already a dict or another format, use it as is
|
|
253
|
+
# result_dict = result
|
|
254
|
+
|
|
255
|
+
# logger.debug(f"Result from get_application_metrics: {result_dict}")
|
|
256
|
+
# return result_dict
|
|
257
|
+
# except Exception as e:
|
|
258
|
+
# logger.error(f"Error in get_application_metrics: {e}", exc_info=True)
|
|
259
|
+
# return {"error": f"Failed to get application metrics: {e!s}"}
|
|
260
|
+
|
|
261
|
+
# @register_as_tool(
|
|
262
|
+
# title="Get Endpoints Metrics",
|
|
263
|
+
# annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
|
|
264
|
+
# )
|
|
265
|
+
# @with_header_auth(ApplicationMetricsApi)
|
|
266
|
+
# async def get_endpoints_metrics(self,
|
|
267
|
+
# endpoint_id: Optional[str] = None,
|
|
268
|
+
# metrics: Optional[List[Dict[str, str]]] = None,
|
|
269
|
+
# time_frame: Optional[Dict[str, int]] = None,
|
|
270
|
+
# fill_time_series: Optional[bool] = True,
|
|
271
|
+
# ctx=None, api_client=None) -> Dict[str, Any]:
|
|
272
|
+
# """
|
|
273
|
+
# Get metrics for a specific endpoint.
|
|
274
|
+
|
|
275
|
+
# This API endpoint retrieves one or more supported aggregations of metrics for an Endpoint.
|
|
276
|
+
|
|
277
|
+
# Args:
|
|
278
|
+
# endpoint_id: Endpoint ID to get metrics for (single endpoint)
|
|
279
|
+
# metrics: List of metrics to retrieve with their aggregations
|
|
280
|
+
# Example: [{"metric": "latency", "aggregation": "MEAN"}]
|
|
281
|
+
# time_frame: Dictionary with 'from' and 'to' timestamps in milliseconds
|
|
282
|
+
# Example: {"from": 1617994800000, "to": 1618081200000}
|
|
283
|
+
# fill_time_series: Whether to fill missing data points with timestamp and value 0
|
|
284
|
+
# ctx: The MCP context (optional)
|
|
285
|
+
|
|
286
|
+
# Returns:
|
|
287
|
+
# Dictionary containing endpoint metrics data or error information
|
|
288
|
+
# """
|
|
289
|
+
# try:
|
|
290
|
+
# logger.debug(f"get_endpoints_metrics called with endpoint_id={endpoint_id}")
|
|
291
|
+
|
|
292
|
+
# # Set default time range if not provided
|
|
293
|
+
# if not time_frame:
|
|
294
|
+
# to_time = int(datetime.now().timestamp() * 1000)
|
|
295
|
+
# from_time = to_time - (60 * 60 * 1000) # Default to 1 hour
|
|
296
|
+
# time_frame = {
|
|
297
|
+
# "from": from_time,
|
|
298
|
+
# "to": to_time
|
|
299
|
+
# }
|
|
300
|
+
|
|
301
|
+
# # Set default metrics if not provided
|
|
302
|
+
# if not metrics:
|
|
303
|
+
# metrics = [
|
|
304
|
+
# {
|
|
305
|
+
# "metric": "latency",
|
|
306
|
+
# "aggregation": "MEAN"
|
|
307
|
+
# }
|
|
308
|
+
# ]
|
|
309
|
+
|
|
310
|
+
# # Create the request body
|
|
311
|
+
# request_body = {
|
|
312
|
+
# "metrics": metrics,
|
|
313
|
+
# "timeFrame": time_frame
|
|
314
|
+
# }
|
|
315
|
+
|
|
316
|
+
# # Add endpoint ID if provided
|
|
317
|
+
# if endpoint_id:
|
|
318
|
+
# request_body["endpointId"] = endpoint_id
|
|
319
|
+
|
|
320
|
+
# # Create the GetEndpoints object
|
|
321
|
+
# get_endpoints = GetEndpoints(**request_body)
|
|
322
|
+
|
|
323
|
+
# # Call the get_endpoints_metrics method from the SDK
|
|
324
|
+
# result = api_client.get_endpoints_metrics(
|
|
325
|
+
# fill_time_series=fill_time_series,
|
|
326
|
+
# get_endpoints=get_endpoints
|
|
327
|
+
# )
|
|
328
|
+
|
|
329
|
+
# # Convert the result to a dictionary
|
|
330
|
+
# if hasattr(result, 'to_dict'):
|
|
331
|
+
# result_dict = result.to_dict()
|
|
332
|
+
# else:
|
|
333
|
+
# # If it's already a dict or another format, use it as is
|
|
334
|
+
# result_dict = result
|
|
335
|
+
|
|
336
|
+
# logger.debug(f"Result from get_endpoints_metrics: {result_dict}")
|
|
337
|
+
# return result_dict
|
|
338
|
+
# except Exception as e:
|
|
339
|
+
# logger.error(f"Error in get_endpoints_metrics: {e}", exc_info=True)
|
|
340
|
+
# return {"error": f"Failed to get endpoints metrics: {e!s}"}
|
|
341
|
+
|
|
342
|
+
# @register_as_tool(
|
|
343
|
+
# title="Get Services Metrics",
|
|
344
|
+
# annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
|
|
345
|
+
# )
|
|
346
|
+
# @with_header_auth(ApplicationMetricsApi)
|
|
347
|
+
# async def get_services_metrics(self,
|
|
348
|
+
# service_id: Optional[str] = None,
|
|
349
|
+
# metrics: Optional[List[Dict[str, str]]] = None,
|
|
350
|
+
# time_frame: Optional[Dict[str, int]] = None,
|
|
351
|
+
# fill_time_series: Optional[bool] = True,
|
|
352
|
+
# include_snapshot_ids: Optional[bool] = False,
|
|
353
|
+
# ctx=None, api_client=None) -> Dict[str, Any]:
|
|
354
|
+
# """
|
|
355
|
+
# Get metrics for a specific service.
|
|
356
|
+
|
|
357
|
+
# This API endpoint retrieves one or more supported aggregations of metrics for a Service.
|
|
358
|
+
|
|
359
|
+
# Args:
|
|
360
|
+
# service_id: Service ID to get metrics for (single service)
|
|
361
|
+
# metrics: List of metrics to retrieve with their aggregations
|
|
362
|
+
# Example: [{"metric": "latency", "aggregation": "MEAN"}]
|
|
363
|
+
# time_frame: Dictionary with 'from' and 'to' timestamps in milliseconds
|
|
364
|
+
# Example: {"from": 1617994800000, "to": 1618081200000}
|
|
365
|
+
# fill_time_series: Whether to fill missing data points with timestamp and value 0
|
|
366
|
+
# include_snapshot_ids: Whether to include snapshot IDs in the results
|
|
367
|
+
# ctx: The MCP context (optional)
|
|
368
|
+
|
|
369
|
+
# Returns:
|
|
370
|
+
# Dictionary containing service metrics data or error information
|
|
371
|
+
# """
|
|
372
|
+
# try:
|
|
373
|
+
# logger.debug(f"get_services_metrics called with service_id={service_id}")
|
|
374
|
+
|
|
375
|
+
# # Set default time range if not provided
|
|
376
|
+
# if not time_frame:
|
|
377
|
+
# to_time = int(datetime.now().timestamp() * 1000)
|
|
378
|
+
# from_time = to_time - (60 * 60 * 1000) # Default to 1 hour
|
|
379
|
+
# time_frame = {
|
|
380
|
+
# "from": from_time,
|
|
381
|
+
# "to": to_time
|
|
382
|
+
# }
|
|
383
|
+
|
|
384
|
+
# # Set default metrics if not provided
|
|
385
|
+
# if not metrics:
|
|
386
|
+
# metrics = [
|
|
387
|
+
# {
|
|
388
|
+
# "metric": "latency",
|
|
389
|
+
# "aggregation": "MEAN"
|
|
390
|
+
# }
|
|
391
|
+
# ]
|
|
392
|
+
|
|
393
|
+
# # Create the request body
|
|
394
|
+
# request_body = {
|
|
395
|
+
# "metrics": metrics,
|
|
396
|
+
# "timeFrame": time_frame
|
|
397
|
+
# }
|
|
398
|
+
|
|
399
|
+
# # Add service ID if provided
|
|
400
|
+
# if service_id:
|
|
401
|
+
# request_body["serviceId"] = service_id
|
|
402
|
+
|
|
403
|
+
# # Create the GetServices object
|
|
404
|
+
# get_services = GetServices(**request_body)
|
|
405
|
+
|
|
406
|
+
# # Call the get_services_metrics method from the SDK
|
|
407
|
+
# result = api_client.get_services_metrics(
|
|
408
|
+
# fill_time_series=fill_time_series,
|
|
409
|
+
# include_snapshot_ids=include_snapshot_ids,
|
|
410
|
+
# get_services=get_services
|
|
411
|
+
# )
|
|
412
|
+
|
|
413
|
+
# # Convert the result to a dictionary
|
|
414
|
+
# if hasattr(result, 'to_dict'):
|
|
415
|
+
# result_dict = result.to_dict()
|
|
416
|
+
# else:
|
|
417
|
+
# # If it's already a dict or another format, use it as is
|
|
418
|
+
# result_dict = result
|
|
419
|
+
|
|
420
|
+
# logger.debug(f"Result from get_services_metrics: {result_dict}")
|
|
421
|
+
# return result_dict
|
|
422
|
+
# except Exception as e:
|
|
423
|
+
# logger.error(f"Error in get_services_metrics: {e}", exc_info=True)
|
|
424
|
+
|
|
425
|
+
def _check_elicitation_for_app_metrics(
|
|
426
|
+
self,
|
|
427
|
+
metrics: Optional[List[Dict[str, str]]],
|
|
428
|
+
time_frame: Optional[Dict[str, int]],
|
|
429
|
+
application_id: Optional[str],
|
|
430
|
+
service_id: Optional[str],
|
|
431
|
+
endpoint_id: Optional[str]
|
|
432
|
+
) -> Optional[Dict[str, Any]]:
|
|
142
433
|
"""
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
This API endpoint retrieves one or more supported aggregations of metrics for an Application Perspective.
|
|
434
|
+
Check for required and recommended parameters (Two-Pass Elicitation).
|
|
146
435
|
|
|
147
436
|
Args:
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
fill_time_series: Whether to fill missing data points with timestamp and value 0
|
|
154
|
-
ctx: The MCP context (optional)
|
|
437
|
+
metrics: Metrics list if provided
|
|
438
|
+
time_frame: Time frame if provided
|
|
439
|
+
application_id: Application ID if provided
|
|
440
|
+
service_id: Service ID if provided
|
|
441
|
+
endpoint_id: Endpoint ID if provided
|
|
155
442
|
|
|
156
443
|
Returns:
|
|
157
|
-
|
|
444
|
+
Elicitation request dict if parameters are missing, None otherwise
|
|
158
445
|
"""
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
"
|
|
168
|
-
"
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
"
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
result = api_client.get_application_metrics(
|
|
195
|
-
fill_time_series=fill_time_series,
|
|
196
|
-
get_applications=get_applications
|
|
197
|
-
)
|
|
198
|
-
|
|
199
|
-
# Convert the result to a dictionary
|
|
200
|
-
if hasattr(result, 'to_dict'):
|
|
201
|
-
result_dict = result.to_dict()
|
|
202
|
-
else:
|
|
203
|
-
# If it's already a dict or another format, use it as is
|
|
204
|
-
result_dict = result
|
|
205
|
-
|
|
206
|
-
logger.debug(f"Result from get_application_metrics: {result_dict}")
|
|
207
|
-
return result_dict
|
|
208
|
-
except Exception as e:
|
|
209
|
-
logger.error(f"Error in get_application_metrics: {e}", exc_info=True)
|
|
210
|
-
return {"error": f"Failed to get application metrics: {e!s}"}
|
|
211
|
-
|
|
212
|
-
@register_as_tool(
|
|
213
|
-
title="Get Endpoints Metrics",
|
|
214
|
-
annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
|
|
215
|
-
)
|
|
216
|
-
@with_header_auth(ApplicationMetricsApi)
|
|
217
|
-
async def get_endpoints_metrics(self,
|
|
218
|
-
endpoint_ids: Optional[List[str]] = None,
|
|
219
|
-
metrics: Optional[List[Dict[str, str]]] = None,
|
|
220
|
-
time_frame: Optional[Dict[str, int]] = None,
|
|
221
|
-
fill_time_series: Optional[bool] = True,
|
|
222
|
-
ctx=None, api_client=None) -> Dict[str, Any]:
|
|
446
|
+
missing_params = []
|
|
447
|
+
|
|
448
|
+
# Check for REQUIRED parameters
|
|
449
|
+
if not metrics:
|
|
450
|
+
missing_params.append({
|
|
451
|
+
"name": "metrics",
|
|
452
|
+
"description": "List of metric names with aggregations (REQUIRED)",
|
|
453
|
+
"examples": [
|
|
454
|
+
{"metric": "calls", "aggregation": "SUM"},
|
|
455
|
+
{"metric": "latency", "aggregation": "MEAN"},
|
|
456
|
+
{"metric": "errors", "aggregation": "SUM"}
|
|
457
|
+
],
|
|
458
|
+
"type": "list"
|
|
459
|
+
})
|
|
460
|
+
|
|
461
|
+
# Check for RECOMMENDED parameters
|
|
462
|
+
if not time_frame:
|
|
463
|
+
missing_params.append({
|
|
464
|
+
"name": "time_frame",
|
|
465
|
+
"description": "Time range for metrics (RECOMMENDED)",
|
|
466
|
+
"examples": [
|
|
467
|
+
{"windowSize": 3600000}, # Last hour
|
|
468
|
+
{"windowSize": 86400000} # Last 24 hours
|
|
469
|
+
],
|
|
470
|
+
"type": "dict",
|
|
471
|
+
"note": "If not provided, defaults to last hour"
|
|
472
|
+
})
|
|
473
|
+
|
|
474
|
+
# If any required or recommended parameters are missing, return elicitation request
|
|
475
|
+
if missing_params:
|
|
476
|
+
return self._create_elicitation_request(missing_params)
|
|
477
|
+
|
|
478
|
+
return None
|
|
479
|
+
|
|
480
|
+
def _create_elicitation_request(self, missing_params: list) -> Dict[str, Any]:
|
|
223
481
|
"""
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
This API endpoint retrieves one or more supported aggregations of metrics for an Endpoint.
|
|
482
|
+
Create an elicitation request following MCP pattern.
|
|
227
483
|
|
|
228
484
|
Args:
|
|
229
|
-
|
|
230
|
-
metrics: List of metrics to retrieve with their aggregations
|
|
231
|
-
Example: [{"metric": "latency", "aggregation": "MEAN"}]
|
|
232
|
-
time_frame: Dictionary with 'from' and 'to' timestamps in milliseconds
|
|
233
|
-
Example: {"from": 1617994800000, "to": 1618081200000}
|
|
234
|
-
fill_time_series: Whether to fill missing data points with timestamp and value 0
|
|
235
|
-
ctx: The MCP context (optional)
|
|
485
|
+
missing_params: List of missing parameter descriptions
|
|
236
486
|
|
|
237
487
|
Returns:
|
|
238
|
-
|
|
488
|
+
Elicitation request dict
|
|
239
489
|
"""
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
#
|
|
244
|
-
if
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
"from": from_time,
|
|
249
|
-
"to": to_time
|
|
250
|
-
}
|
|
251
|
-
|
|
252
|
-
# Set default metrics if not provided
|
|
253
|
-
if not metrics:
|
|
254
|
-
metrics = [
|
|
255
|
-
{
|
|
256
|
-
"metric": "latency",
|
|
257
|
-
"aggregation": "MEAN"
|
|
258
|
-
}
|
|
259
|
-
]
|
|
260
|
-
|
|
261
|
-
# Create the request body
|
|
262
|
-
request_body = {
|
|
263
|
-
"metrics": metrics,
|
|
264
|
-
"timeFrame": time_frame
|
|
265
|
-
}
|
|
266
|
-
|
|
267
|
-
# Add endpoint IDs if provided
|
|
268
|
-
if endpoint_ids:
|
|
269
|
-
request_body["endpointIds"] = endpoint_ids
|
|
270
|
-
|
|
271
|
-
# Create the GetEndpoints object
|
|
272
|
-
get_endpoints = GetEndpoints(**request_body)
|
|
273
|
-
|
|
274
|
-
# Call the get_endpoints_metrics method from the SDK
|
|
275
|
-
result = api_client.get_endpoints_metrics(
|
|
276
|
-
fill_time_series=fill_time_series,
|
|
277
|
-
get_endpoints=get_endpoints
|
|
278
|
-
)
|
|
279
|
-
|
|
280
|
-
# Convert the result to a dictionary
|
|
281
|
-
if hasattr(result, 'to_dict'):
|
|
282
|
-
result_dict = result.to_dict()
|
|
490
|
+
# Build simple, user-friendly parameter descriptions
|
|
491
|
+
param_lines = []
|
|
492
|
+
for param in missing_params:
|
|
493
|
+
# Format examples in a simple, readable way
|
|
494
|
+
if param['name'] == 'metrics':
|
|
495
|
+
examples = "latency, calls, errors, erroneousCalls"
|
|
496
|
+
elif param['name'] == 'time_frame':
|
|
497
|
+
examples = "last hour, last 24 hours, last 10 minutes"
|
|
283
498
|
else:
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
time_frame: Optional[Dict[str, int]] = None,
|
|
302
|
-
fill_time_series: Optional[bool] = True,
|
|
303
|
-
include_snapshot_ids: Optional[bool] = False,
|
|
304
|
-
ctx=None, api_client=None) -> Dict[str, Any]:
|
|
305
|
-
"""
|
|
306
|
-
Get metrics for specific services.
|
|
307
|
-
|
|
308
|
-
This API endpoint retrieves one or more supported aggregations of metrics for a Service.
|
|
309
|
-
|
|
310
|
-
Args:
|
|
311
|
-
service_ids: List of service IDs to get metrics for
|
|
312
|
-
metrics: List of metrics to retrieve with their aggregations
|
|
313
|
-
Example: [{"metric": "latency", "aggregation": "MEAN"}]
|
|
314
|
-
time_frame: Dictionary with 'from' and 'to' timestamps in milliseconds
|
|
315
|
-
Example: {"from": 1617994800000, "to": 1618081200000}
|
|
316
|
-
fill_time_series: Whether to fill missing data points with timestamp and value 0
|
|
317
|
-
include_snapshot_ids: Whether to include snapshot IDs in the results
|
|
318
|
-
ctx: The MCP context (optional)
|
|
319
|
-
|
|
320
|
-
Returns:
|
|
321
|
-
Dictionary containing service metrics data or error information
|
|
322
|
-
"""
|
|
323
|
-
try:
|
|
324
|
-
logger.debug(f"get_services_metrics called with service_ids={service_ids}")
|
|
325
|
-
|
|
326
|
-
# Set default time range if not provided
|
|
327
|
-
if not time_frame:
|
|
328
|
-
to_time = int(datetime.now().timestamp() * 1000)
|
|
329
|
-
from_time = to_time - (60 * 60 * 1000) # Default to 1 hour
|
|
330
|
-
time_frame = {
|
|
331
|
-
"from": from_time,
|
|
332
|
-
"to": to_time
|
|
333
|
-
}
|
|
334
|
-
|
|
335
|
-
# Set default metrics if not provided
|
|
336
|
-
if not metrics:
|
|
337
|
-
metrics = [
|
|
338
|
-
{
|
|
339
|
-
"metric": "latency",
|
|
340
|
-
"aggregation": "MEAN"
|
|
341
|
-
}
|
|
342
|
-
]
|
|
343
|
-
|
|
344
|
-
# Create the request body
|
|
345
|
-
request_body = {
|
|
346
|
-
"metrics": metrics,
|
|
347
|
-
"timeFrame": time_frame
|
|
348
|
-
}
|
|
349
|
-
|
|
350
|
-
# Add service IDs if provided
|
|
351
|
-
if service_ids:
|
|
352
|
-
request_body["serviceIds"] = service_ids
|
|
353
|
-
|
|
354
|
-
# Create the GetServices object
|
|
355
|
-
get_services = GetServices(**request_body)
|
|
356
|
-
|
|
357
|
-
# Call the get_services_metrics method from the SDK
|
|
358
|
-
result = api_client.get_services_metrics(
|
|
359
|
-
fill_time_series=fill_time_series,
|
|
360
|
-
include_snapshot_ids=include_snapshot_ids,
|
|
361
|
-
get_services=get_services
|
|
362
|
-
)
|
|
363
|
-
|
|
364
|
-
# Convert the result to a dictionary
|
|
365
|
-
if hasattr(result, 'to_dict'):
|
|
366
|
-
result_dict = result.to_dict()
|
|
367
|
-
else:
|
|
368
|
-
# If it's already a dict or another format, use it as is
|
|
369
|
-
result_dict = result
|
|
370
|
-
|
|
371
|
-
logger.debug(f"Result from get_services_metrics: {result_dict}")
|
|
372
|
-
return result_dict
|
|
373
|
-
except Exception as e:
|
|
374
|
-
logger.error(f"Error in get_services_metrics: {e}", exc_info=True)
|
|
375
|
-
return {"error": f"Failed to get services metrics: {e!s}"}
|
|
499
|
+
examples = ", ".join([str(ex) for ex in param["examples"][:3]])
|
|
500
|
+
|
|
501
|
+
param_lines.append(f"{param['name']}: {examples}")
|
|
502
|
+
|
|
503
|
+
message = (
|
|
504
|
+
"I need:\n\n"
|
|
505
|
+
+ "\n".join(param_lines)
|
|
506
|
+
)
|
|
507
|
+
|
|
508
|
+
return {
|
|
509
|
+
"elicitation_needed": True,
|
|
510
|
+
"message": message,
|
|
511
|
+
"missing_parameters": [p["name"] for p in missing_params],
|
|
512
|
+
"parameter_details": missing_params,
|
|
513
|
+
"instructions": "Call query_instana_metrics again with these parameters filled in."
|
|
514
|
+
}
|
|
515
|
+
# return {"error": f"Failed to get services metrics: {e!s}"}
|