mcp-instana 0.6.2__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/METADATA +179 -120
  2. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/RECORD +28 -21
  3. src/application/application_alert_config.py +397 -146
  4. src/application/application_analyze.py +597 -597
  5. src/application/application_call_group.py +528 -0
  6. src/application/application_catalog.py +0 -8
  7. src/application/application_global_alert_config.py +255 -38
  8. src/application/application_metrics.py +377 -237
  9. src/application/application_resources.py +414 -365
  10. src/application/application_settings.py +605 -1651
  11. src/application/application_topology.py +62 -62
  12. src/core/custom_dashboard_smart_router_tool.py +135 -0
  13. src/core/server.py +92 -119
  14. src/core/smart_router_tool.py +574 -0
  15. src/core/utils.py +17 -8
  16. src/custom_dashboard/custom_dashboard_tools.py +422 -0
  17. src/infrastructure/elicitation_handler.py +338 -0
  18. src/infrastructure/entity_registry.py +329 -0
  19. src/infrastructure/infrastructure_analyze_new.py +600 -0
  20. src/infrastructure/{infrastructure_analyze.py → infrastructure_analyze_old.py} +1 -16
  21. src/infrastructure/infrastructure_catalog.py +7 -28
  22. src/infrastructure/infrastructure_metrics.py +93 -17
  23. src/infrastructure/infrastructure_resources.py +5 -20
  24. src/infrastructure/infrastructure_topology.py +2 -8
  25. src/prompts/application/application_settings.py +58 -0
  26. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/WHEEL +0 -0
  27. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/entry_points.txt +0 -0
  28. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/licenses/LICENSE.md +0 -0
@@ -54,600 +54,600 @@ class ApplicationAnalyzeMCPTools(BaseInstanaClient):
54
54
  logger.error(f"Error initializing ApplicationAnalyzeApi: {e}", exc_info=True)
55
55
  raise
56
56
 
57
- @register_as_tool(
58
- title="Get Call Details",
59
- annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
60
- )
61
- @with_header_auth(ApplicationAnalyzeApi)
62
- async def get_call_details(
63
- self,
64
- trace_id: str,
65
- call_id: str,
66
- ctx=None,
67
- api_client=None
68
- ) -> Dict[str, Any]:
69
- """
70
- Get details of a specific call in a trace.
71
- This tool is to retrieve a vast information about a call present in a trace.
72
-
73
- Args:
74
- trace_id (str): The ID of the trace.
75
- call_id (str): The ID of the call.
76
- ctx: Optional context for the request.
77
-
78
- Returns:
79
- Dict[str, Any]: Details of the specified call.
80
- """
81
- try:
82
- if not trace_id or not call_id:
83
- logger.warning("Both trace_id and call_id must be provided")
84
- return {"error": "Both trace_id and call_id must be provided"}
85
-
86
- logger.debug(f"Fetching call details for trace_id={trace_id}, call_id={call_id}")
87
- result = api_client.get_call_details_without_preload_content(
88
- trace_id=trace_id,
89
- call_id=call_id
90
- )
91
-
92
- import json
93
-
94
- try:
95
- response_text = result.data.decode('utf-8')
96
- result_dict = json.loads(response_text)
97
- logger.debug("Successfully retrieved call details")
98
- return result_dict
99
-
100
- # Convert the result to a dictionary
101
- except (json.JSONDecodeError, AttributeError) as json_err:
102
- error_message = f"Failed to parse JSON response: {json_err}"
103
- logger.error(error_message)
104
- return {"error": error_message}
105
-
106
- except Exception as e:
107
- logger.error(f"Error getting call details: {e}", exc_info=True)
108
- return {"error": f"Failed to get call details: {e!s}"}
109
-
110
- @register_as_tool(
111
- title="Get Trace Details",
112
- annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
113
- )
114
- @with_header_auth(ApplicationAnalyzeApi)
115
- async def get_trace_details(
116
- self,
117
- id: str,
118
- retrievalSize: Optional[int] = None,
119
- offset: Optional[int] = None,
120
- ingestionTime: Optional[int] = None,
121
- ctx=None,
122
- api_client=None
123
- ) -> Dict[str, Any]:
124
- """
125
- Get details of a specific trace.
126
- This tool is to retrive comprehensive details of a particular trace.
127
- Args:
128
- id (str): The ID of the trace.
129
- retrievalSize (Optional[int]):The number of records to retrieve in a single request.
130
- Minimum value is 1 and maximum value is 10000.
131
- offset (Optional[int]): The number of records to be skipped from the ingestionTime.
132
- ingestionTime (Optional[int]): The timestamp indicating the starting point from which data was ingested.
133
- ctx: Optional context for the request.
134
- Returns:
135
- Dict[str, Any]: Details of the specified trace.
136
- """
137
-
138
- try:
139
- if not id:
140
- logger.warning("Trace ID must be provided")
141
- return {"error": "Trace ID must be provided"}
142
-
143
- if offset is not None and ingestionTime is None:
144
- logger.warning("If offset is provided, ingestionTime must also be provided")
145
- return {"error": "If offset is provided, ingestionTime must also be provided"}
146
-
147
- if retrievalSize is not None and (retrievalSize < 1 or retrievalSize > 10000):
148
- logger.warning(f"retrievalSize must be between 1 and 10000, got: {retrievalSize}")
149
- return {"error": "retrievalSize must be between 1 and 10000"}
150
-
151
- logger.debug(f"Fetching trace details for id={id}")
152
- result = api_client.get_trace_download(
153
- id=id,
154
- retrieval_size=retrievalSize,
155
- offset=offset,
156
- ingestion_time=ingestionTime
157
- )
158
-
159
- # Convert the result to a dictionary
160
- if hasattr(result, 'to_dict'):
161
- result_dict = result.to_dict()
162
- else:
163
- # If it's already a dict or another format, use it as is
164
- result_dict = result
165
-
166
- logger.debug(f"Result from get_trace_details: {result_dict}")
167
- # Ensure we return a dictionary
168
- return dict(result_dict) if not isinstance(result_dict, dict) else result_dict
169
-
170
- except Exception as e:
171
- logger.error(f"Error getting trace details: {e}", exc_info=True)
172
- return {"error": f"Failed to get trace details: {e!s}"}
173
-
174
-
175
- @register_as_tool(
176
- title="Get All Traces",
177
- annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
178
- )
179
- @with_header_auth(ApplicationAnalyzeApi)
180
- async def get_all_traces(
181
- self,
182
- payload: Optional[Union[Dict[str, Any], str]]=None,
183
- api_client = None,
184
- ctx=None
185
- ) -> Dict[str, Any]:
186
- """
187
- Get all traces.
188
- This tool endpoint retrieves the metrics for traces.
189
-
190
- Sample payload: {
191
- "includeInternal": false,
192
- "includeSynthetic": false,
193
- "pagination": {
194
- "retrievalSize": 1
195
- },
196
- "tagFilterExpression": {
197
- "type": "EXPRESSION",
198
- "logicalOperator": "AND",
199
- "elements": [
200
- {
201
- "type": "TAG_FILTER",
202
- "name": "endpoint.name",
203
- "operator": "EQUALS",
204
- "entity": "DESTINATION",
205
- "value": "GET /"
206
- },
207
- {
208
- "type": "TAG_FILTER",
209
- "name": "service.name",
210
- "operator": "EQUALS",
211
- "entity": "DESTINATION",
212
- "value": "groundskeeper"
213
- }
214
- ]
215
- },
216
- "order": {
217
- "by": "traceLabel",
218
- "direction": "DESC"
219
- }
220
- }
221
-
222
- Returns:
223
- Dict[str, Any]: List of traces matching the criteria.
224
- """
225
- try:
226
- # Parse the payload if it's a string
227
- if isinstance(payload, str):
228
- logger.debug("Payload is a string, attempting to parse")
229
- try:
230
- import json
231
- try:
232
- parsed_payload = json.loads(payload)
233
- logger.debug("Successfully parsed payload as JSON")
234
- request_body = parsed_payload
235
- except json.JSONDecodeError as e:
236
- logger.debug(f"JSON parsing failed: {e}, trying with quotes replaced")
237
-
238
- # Try replacing single quotes with double quotes
239
- fixed_payload = payload.replace("'", "\"")
240
- try:
241
- parsed_payload = json.loads(fixed_payload)
242
- logger.debug("Successfully parsed fixed JSON")
243
- request_body = parsed_payload
244
- except json.JSONDecodeError:
245
- # Try as Python literal
246
- import ast
247
- try:
248
- parsed_payload = ast.literal_eval(payload)
249
- logger.debug("Successfully parsed payload as Python literal")
250
- request_body = parsed_payload
251
- except (SyntaxError, ValueError) as e2:
252
- logger.debug(f"Failed to parse payload string: {e2}")
253
- return {"error": f"Invalid payload format: {e2}", "payload": payload}
254
- except Exception as e:
255
- logger.debug(f"Error parsing payload string: {e}")
256
- return {"error": f"Failed to parse payload: {e}", "payload": payload}
257
- else:
258
- # If payload is already a dictionary, use it directly
259
- logger.debug("Using provided payload dictionary")
260
- request_body = payload
261
-
262
- # Import the GetTraces class
263
- try:
264
- from instana_client.models.get_traces import (
265
- GetTraces,
266
- )
267
- from instana_client.models.group import Group
268
- logger.debug("Successfully imported GetTraces")
269
- except ImportError as e:
270
- logger.debug(f"Error importing GetTraces: {e}")
271
- return {"error": f"Failed to import GetTraces: {e!s}"}
272
-
273
- # Create an GetTraces object from the request body
274
- try:
275
- query_params = {}
276
- if request_body and "tag_filter_expression" in request_body:
277
- query_params["tag_filter_expression"] = request_body["tag_filter_expression"]
278
- logger.debug(f"Creating get_traces with params: {query_params}")
279
- config_object = GetTraces(**query_params)
280
- logger.debug("Successfully got traces")
281
- except Exception as e:
282
- logger.debug(f"Error creating get_traces: {e}")
283
- return {"error": f"Failed to get tracest: {e!s}"}
284
-
285
- # Call the get_traces method from the SDK
286
- logger.debug("Calling get_traces with config object")
287
- result = api_client.get_traces(
288
- get_traces=config_object
289
- )
290
- # Convert the result to a dictionary
291
- if hasattr(result, 'to_dict'):
292
- result_dict = result.to_dict()
293
- else:
294
- # If it's already a dict or another format, use it as is
295
- result_dict = result or {
296
- "success": True,
297
- "message": "Get traces"
298
- }
299
-
300
- logger.debug(f"Result from get_traces: {result_dict}")
301
- return result_dict
302
- except Exception as e:
303
- logger.error(f"Error in get_traces: {e}")
304
- return {"error": f"Failed to get traces: {e!s}"}
305
-
306
- @register_as_tool(
307
- title="Get Grouped Trace Metrics",
308
- annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
309
- )
310
- @with_header_auth(ApplicationAnalyzeApi)
311
- async def get_grouped_trace_metrics(
312
- self,
313
- payload: Optional[Union[Dict[str, Any], str]]=None,
314
- fill_time_series: Optional[bool] = None,
315
- api_client=None,
316
- ctx=None
317
- ) -> Dict[str, Any]:
318
- """
319
- The API endpoint retrieves metrics for traces that are grouped in the endpoint or service name.
320
- This tool Get grouped trace metrics (by endpoint or service name).
321
-
322
- Args:
323
- fillTimeSeries (Optional[bool]): Whether to fill missing data points with zeroes.
324
- Sample Payload: {
325
- "group": {
326
- "groupbyTag": "trace.endpoint.name",
327
- "groupbyTagEntity": "NOT_APPLICABLE"
328
- },
329
- "metrics": [
330
- {
331
- "aggregation": "SUM",
332
- "metric": "latency"
333
- }
334
- ],
335
- "order": {
336
- "by": "latency",
337
- "direction": "ASC"
338
- },
339
- "pagination": {
340
- "retrievalSize": 20
341
- },
342
- "tagFilterExpression": {
343
- "type": "EXPRESSION",
344
- "logicalOperator": "AND",
345
- "elements": [
346
- {
347
- "type": "TAG_FILTER",
348
- "name": "call.type",
349
- "operator": "EQUALS",
350
- "entity": "NOT_APPLICABLE",
351
- "value": "DATABASE"
352
- },
353
- {
354
- "type": "TAG_FILTER",
355
- "name": "service.name",
356
- "operator": "EQUALS",
357
- "entity": "DESTINATION",
358
- "value": "ratings"
359
- }
360
- ]
361
- }
362
- }
363
- ctx: Optional execution context.
364
-
365
- Returns:
366
- Dict[str, Any]: Grouped trace metrics result.
367
- """
368
- try:
369
- # Parse the payload if it's a string
370
- if isinstance(payload, str):
371
- logger.debug("Payload is a string, attempting to parse")
372
- try:
373
- import json
374
- try:
375
- parsed_payload = json.loads(payload)
376
- logger.debug("Successfully parsed payload as JSON")
377
- request_body = parsed_payload
378
- except json.JSONDecodeError as e:
379
- logger.debug(f"JSON parsing failed: {e}, trying with quotes replaced")
380
-
381
- # Try replacing single quotes with double quotes
382
- fixed_payload = payload.replace("'", "\"")
383
- try:
384
- parsed_payload = json.loads(fixed_payload)
385
- logger.debug("Successfully parsed fixed JSON")
386
- request_body = parsed_payload
387
- except json.JSONDecodeError:
388
- # Try as Python literal
389
- import ast
390
- try:
391
- parsed_payload = ast.literal_eval(payload)
392
- logger.debug("Successfully parsed payload as Python literal")
393
- request_body = parsed_payload
394
- except (SyntaxError, ValueError) as e2:
395
- logger.debug(f"Failed to parse payload string: {e2}")
396
- return {"error": f"Invalid payload format: {e2}", "payload": payload}
397
- except Exception as e:
398
- logger.debug(f"Error parsing payload string: {e}")
399
- return {"error": f"Failed to parse payload: {e}", "payload": payload}
400
- else:
401
- # If payload is already a dictionary, use it directly
402
- logger.debug("Using provided payload dictionary")
403
- request_body = payload
404
-
405
- # Import the GetTraceGroups class
406
- try:
407
- from instana_client.models.get_trace_groups import (
408
- GetTraceGroups,
409
- )
410
- from instana_client.models.group import Group
411
- logger.debug("Successfully imported GetTraceGroups")
412
- except ImportError as e:
413
- logger.debug(f"Error importing GetTraceGroups: {e}")
414
- return {"error": f"Failed to import GetTraceGroups: {e!s}"}
415
-
416
- # Create an GetTraceGroups object from the request body
417
- try:
418
- query_params = {}
419
- if request_body and "group" in request_body:
420
- query_params["group"] = request_body["group"]
421
- if request_body and "metrics" in request_body:
422
- query_params["metrics"] = request_body["metrics"]
423
- if request_body and "tag_filter_expression" in request_body:
424
- query_params["tag_filter_expression"] = request_body["tag_filter_expression"]
425
- logger.debug(f"Creating GetTraceGroups with params: {query_params}")
426
- config_object = GetTraceGroups(**query_params)
427
- logger.debug("Successfully created endpoint config object")
428
- except Exception as e:
429
- logger.debug(f"Error creating GetTraceGroups: {e}")
430
- return {"error": f"Failed to create config object: {e!s}"}
431
-
432
- # Call the create_endpoint_config method from the SDK
433
- logger.debug("Calling create_endpoint_config with config object")
434
- result = api_client.get_trace_groups(
435
- get_trace_groups=config_object
436
- )
437
- # Convert the result to a dictionary
438
- if hasattr(result, 'to_dict'):
439
- result_dict = result.to_dict()
440
- else:
441
- # If it's already a dict or another format, use it as is
442
- result_dict = result or {
443
- "success": True,
444
- "message": "Grouped trace metrics"
445
- }
446
-
447
- logger.debug(f"Result from get_grouped_trace_metrics: {result_dict}")
448
- return result_dict
449
- except Exception as e:
450
- logger.error(f"Error in get_grouped_trace_metrics: {e}")
451
- return {"error": f"Failed to get grouped trace metrics: {e!s}"}
452
-
453
- @register_as_tool(
454
- title="Get Grouped Calls Metrics",
455
- annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
456
- )
457
- @with_header_auth(ApplicationAnalyzeApi)
458
- async def get_grouped_calls_metrics(
459
- self,
460
- fillTimeSeries: Optional[str] = None,
461
- payload: Optional[Union[Dict[str, Any], str]]=None,
462
- api_client = None,
463
- ctx=None
464
- ) -> Dict[str, Any]:
465
- """
466
- Get grouped calls metrics.
467
- This endpoint retrieves the metrics for calls.
468
-
469
- Args:
470
- fillTimeSeries (Optional[bool]): Whether to fill missing data points with zeroes.
471
- Sample payload: {
472
- "group": {
473
- "groupbyTag": "service.name",
474
- "groupbyTagEntity": "DESTINATION"
475
- },
476
- "metrics": [
477
- {
478
- "aggregation": "SUM",
479
- "metric": "calls"
480
- },
481
- {
482
- "aggregation": "P75",
483
- "metric": "latency",
484
- "granularity": 360
485
- }
486
- ],
487
- "includeInternal": false,
488
- "includeSynthetic": false,
489
- "order": {
490
- "by": "calls",
491
- "direction": "DESC"
492
- },
493
- "pagination": {
494
- "retrievalSize": 20
495
- },
496
- "tagFilterExpression": {
497
- "type": "EXPRESSION",
498
- "logicalOperator": "AND",
499
- "elements": [
500
- {
501
- "type": "TAG_FILTER",
502
- "name": "call.type",
503
- "operator": "EQUALS",
504
- "entity": "NOT_APPLICABLE",
505
- "value": "DATABASE"
506
- },
507
- {
508
- "type": "TAG_FILTER",
509
- "name": "service.name",
510
- "operator": "EQUALS",
511
- "entity": "DESTINATION",
512
- "value": "ratings"
513
- }
514
- ]
515
- },
516
- "timeFrame": {
517
- "to": "1688366990000",
518
- "windowSize": "600000"
519
- }
520
- }
521
- ctx: Optional execution context.
522
-
523
- Returns:
524
- Dict[str, Any]: Grouped trace metrics result.
525
- """
526
- try:
527
- # Parse the payload if it's a string
528
- if isinstance(payload, str):
529
- logger.debug("Payload is a string, attempting to parse")
530
- try:
531
- import json
532
- try:
533
- parsed_payload = json.loads(payload)
534
- logger.debug("Successfully parsed payload as JSON")
535
- request_body = parsed_payload
536
- except json.JSONDecodeError as e:
537
- logger.debug(f"JSON parsing failed: {e}, trying with quotes replaced")
538
-
539
- # Try replacing single quotes with double quotes
540
- fixed_payload = payload.replace("'", "\"")
541
- try:
542
- parsed_payload = json.loads(fixed_payload)
543
- logger.debug("Successfully parsed fixed JSON")
544
- request_body = parsed_payload
545
- except json.JSONDecodeError:
546
- # Try as Python literal
547
- import ast
548
- try:
549
- parsed_payload = ast.literal_eval(payload)
550
- logger.debug("Successfully parsed payload as Python literal")
551
- request_body = parsed_payload
552
- except (SyntaxError, ValueError) as e2:
553
- logger.debug(f"Failed to parse payload string: {e2}")
554
- return {"error": f"Invalid payload format: {e2}", "payload": payload}
555
- except Exception as e:
556
- logger.debug(f"Error parsing payload string: {e}")
557
- return {"error": f"Failed to parse payload: {e}", "payload": payload}
558
- else:
559
- # If payload is already a dictionary, use it directly
560
- logger.debug("Using provided payload dictionary")
561
- request_body = payload
562
-
563
- # Import the GetCallGroups class
564
- try:
565
- from instana_client.models.get_call_groups import (
566
- GetCallGroups,
567
- )
568
- from instana_client.models.group import Group
569
- logger.debug("Successfully imported GetCallGroups")
570
- except ImportError as e:
571
- logger.debug(f"Error importing GetCallGroups: {e}")
572
- return {"error": f"Failed to import GetCallGroups: {e!s}"}
573
-
574
- # Create an GetCallGroups object from the request body
575
- try:
576
- query_params = {}
577
- if request_body and "group" in request_body:
578
- query_params["group"] = request_body["group"]
579
- if request_body and "metrics" in request_body:
580
- query_params["metrics"] = request_body["metrics"]
581
- logger.debug(f"Creating GetCallGroups with params: {query_params}")
582
- config_object = GetCallGroups(**query_params)
583
- logger.debug("Successfully created endpoint config object")
584
- except Exception as e:
585
- logger.error(f"Error creating GetCallGroups: {e}")
586
- return {"error": f"Failed to create config object: {e!s}"}
587
-
588
- # Call the get_call_groups method from the SDK
589
- logger.debug("Calling get_call_groups with config object")
590
- result = api_client.get_call_group(
591
- get_call_groups=config_object
592
- )
593
- # Convert the result to a dictionary
594
- if hasattr(result, 'to_dict'):
595
- result_dict = result.to_dict()
596
- else:
597
- # If it's already a dict or another format, use it as is
598
- result_dict = result or {
599
- "success": True,
600
- "message": "Get Grouped call"
601
- }
602
-
603
- logger.debug(f"Result from get_call_group: {result_dict}")
604
- return result_dict
605
- except Exception as e:
606
- logger.error(f"Error in get_call_group: {e}")
607
- return {"error": f"Failed to get grouped call: {e!s}"}
608
-
609
-
610
- @register_as_tool(
611
- title="Get Correlated Traces",
612
- annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
613
- )
614
- @with_header_auth(ApplicationAnalyzeApi)
615
- async def get_correlated_traces(
616
- self,
617
- correlation_id: str,
618
- api_client = None,
619
- ctx=None
620
- ) -> Dict[str, Any]:
621
- """
622
- Resolve Trace IDs from Monitoring Beacons.
623
- Resolves backend trace IDs using correlation IDs from website and mobile app monitoring beacons.
624
-
625
- Args:
626
- correlation_id: Here, the `backendTraceId` is typically used which can be obtained from the `Get all beacons` API endpoint for website and mobile app monitoring. For XHR, fetch, or HTTP beacons, the `beaconId` retrieved from the same API endpoint can also serve as the `correlationId`.(required)
627
- ctx: Optional execution context.
628
- Returns:
629
- Dict[str, Any]: Grouped trace metrics result.
630
- """
631
- try:
632
- logger.debug("Calling backend correlation API")
633
- if not correlation_id:
634
- error_msg = "Correlation ID must be provided"
635
- logger.warning(error_msg)
636
- return {"error": error_msg}
637
-
638
- result = api_client.get_correlated_traces(
639
- correlation_id=correlation_id
640
- )
641
-
642
- result_dict = result.to_dict() if hasattr(result, 'to_dict') else result
643
-
644
- logger.debug(f"Result from get_correlated_traces: {result_dict}")
645
- # If result is a list, convert it to a dictionary
646
- if isinstance(result_dict, list):
647
- return {"traces": result_dict}
648
- # Otherwise ensure we return a dictionary
649
- return dict(result_dict) if not isinstance(result_dict, dict) else result_dict
650
-
651
- except Exception as e:
652
- logger.error(f"Error in get_correlated_traces: {e}", exc_info=True)
653
- return {"error": f"Failed to get correlated traces: {e!s}"}
57
+ # @register_as_tool(
58
+ # title="Get Call Details",
59
+ # annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
60
+ # )
61
+ # @with_header_auth(ApplicationAnalyzeApi)
62
+ # async def get_call_details(
63
+ # self,
64
+ # trace_id: str,
65
+ # call_id: str,
66
+ # ctx=None,
67
+ # api_client=None
68
+ # ) -> Dict[str, Any]:
69
+ # """
70
+ # Get details of a specific call in a trace.
71
+ # This tool is to retrieve a vast information about a call present in a trace.
72
+
73
+ # Args:
74
+ # trace_id (str): The ID of the trace.
75
+ # call_id (str): The ID of the call.
76
+ # ctx: Optional context for the request.
77
+
78
+ # Returns:
79
+ # Dict[str, Any]: Details of the specified call.
80
+ # """
81
+ # try:
82
+ # if not trace_id or not call_id:
83
+ # logger.warning("Both trace_id and call_id must be provided")
84
+ # return {"error": "Both trace_id and call_id must be provided"}
85
+
86
+ # logger.debug(f"Fetching call details for trace_id={trace_id}, call_id={call_id}")
87
+ # result = api_client.get_call_details_without_preload_content(
88
+ # trace_id=trace_id,
89
+ # call_id=call_id
90
+ # )
91
+
92
+ # import json
93
+
94
+ # try:
95
+ # response_text = result.data.decode('utf-8')
96
+ # result_dict = json.loads(response_text)
97
+ # logger.debug("Successfully retrieved call details")
98
+ # return result_dict
99
+
100
+ # # Convert the result to a dictionary
101
+ # except (json.JSONDecodeError, AttributeError) as json_err:
102
+ # error_message = f"Failed to parse JSON response: {json_err}"
103
+ # logger.error(error_message)
104
+ # return {"error": error_message}
105
+
106
+ # except Exception as e:
107
+ # logger.error(f"Error getting call details: {e}", exc_info=True)
108
+ # return {"error": f"Failed to get call details: {e!s}"}
109
+
110
+ # @register_as_tool(
111
+ # title="Get Trace Details",
112
+ # annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
113
+ # )
114
+ # @with_header_auth(ApplicationAnalyzeApi)
115
+ # async def get_trace_details(
116
+ # self,
117
+ # id: str,
118
+ # retrievalSize: Optional[int] = None,
119
+ # offset: Optional[int] = None,
120
+ # ingestionTime: Optional[int] = None,
121
+ # ctx=None,
122
+ # api_client=None
123
+ # ) -> Dict[str, Any]:
124
+ # """
125
+ # Get details of a specific trace.
126
+ # This tool is to retrive comprehensive details of a particular trace.
127
+ # Args:
128
+ # id (str): The ID of the trace.
129
+ # retrievalSize (Optional[int]):The number of records to retrieve in a single request.
130
+ # Minimum value is 1 and maximum value is 10000.
131
+ # offset (Optional[int]): The number of records to be skipped from the ingestionTime.
132
+ # ingestionTime (Optional[int]): The timestamp indicating the starting point from which data was ingested.
133
+ # ctx: Optional context for the request.
134
+ # Returns:
135
+ # Dict[str, Any]: Details of the specified trace.
136
+ # """
137
+
138
+ # try:
139
+ # if not id:
140
+ # logger.warning("Trace ID must be provided")
141
+ # return {"error": "Trace ID must be provided"}
142
+
143
+ # if offset is not None and ingestionTime is None:
144
+ # logger.warning("If offset is provided, ingestionTime must also be provided")
145
+ # return {"error": "If offset is provided, ingestionTime must also be provided"}
146
+
147
+ # if retrievalSize is not None and (retrievalSize < 1 or retrievalSize > 10000):
148
+ # logger.warning(f"retrievalSize must be between 1 and 10000, got: {retrievalSize}")
149
+ # return {"error": "retrievalSize must be between 1 and 10000"}
150
+
151
+ # logger.debug(f"Fetching trace details for id={id}")
152
+ # result = api_client.get_trace_download(
153
+ # id=id,
154
+ # retrieval_size=retrievalSize,
155
+ # offset=offset,
156
+ # ingestion_time=ingestionTime
157
+ # )
158
+
159
+ # # Convert the result to a dictionary
160
+ # if hasattr(result, 'to_dict'):
161
+ # result_dict = result.to_dict()
162
+ # else:
163
+ # # If it's already a dict or another format, use it as is
164
+ # result_dict = result
165
+
166
+ # logger.debug(f"Result from get_trace_details: {result_dict}")
167
+ # # Ensure we return a dictionary
168
+ # return dict(result_dict) if not isinstance(result_dict, dict) else result_dict
169
+
170
+ # except Exception as e:
171
+ # logger.error(f"Error getting trace details: {e}", exc_info=True)
172
+ # return {"error": f"Failed to get trace details: {e!s}"}
173
+
174
+
175
+ # @register_as_tool(
176
+ # title="Get All Traces",
177
+ # annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
178
+ # )
179
+ # @with_header_auth(ApplicationAnalyzeApi)
180
+ # async def get_all_traces(
181
+ # self,
182
+ # payload: Optional[Union[Dict[str, Any], str]]=None,
183
+ # api_client = None,
184
+ # ctx=None
185
+ # ) -> Dict[str, Any]:
186
+ # """
187
+ # Get all traces.
188
+ # This tool endpoint retrieves the metrics for traces.
189
+
190
+ # Sample payload: {
191
+ # "includeInternal": false,
192
+ # "includeSynthetic": false,
193
+ # "pagination": {
194
+ # "retrievalSize": 1
195
+ # },
196
+ # "tagFilterExpression": {
197
+ # "type": "EXPRESSION",
198
+ # "logicalOperator": "AND",
199
+ # "elements": [
200
+ # {
201
+ # "type": "TAG_FILTER",
202
+ # "name": "endpoint.name",
203
+ # "operator": "EQUALS",
204
+ # "entity": "DESTINATION",
205
+ # "value": "GET /"
206
+ # },
207
+ # {
208
+ # "type": "TAG_FILTER",
209
+ # "name": "service.name",
210
+ # "operator": "EQUALS",
211
+ # "entity": "DESTINATION",
212
+ # "value": "groundskeeper"
213
+ # }
214
+ # ]
215
+ # },
216
+ # "order": {
217
+ # "by": "traceLabel",
218
+ # "direction": "DESC"
219
+ # }
220
+ # }
221
+
222
+ # Returns:
223
+ # Dict[str, Any]: List of traces matching the criteria.
224
+ # """
225
+ # try:
226
+ # # Parse the payload if it's a string
227
+ # if isinstance(payload, str):
228
+ # logger.debug("Payload is a string, attempting to parse")
229
+ # try:
230
+ # import json
231
+ # try:
232
+ # parsed_payload = json.loads(payload)
233
+ # logger.debug("Successfully parsed payload as JSON")
234
+ # request_body = parsed_payload
235
+ # except json.JSONDecodeError as e:
236
+ # logger.debug(f"JSON parsing failed: {e}, trying with quotes replaced")
237
+
238
+ # # Try replacing single quotes with double quotes
239
+ # fixed_payload = payload.replace("'", "\"")
240
+ # try:
241
+ # parsed_payload = json.loads(fixed_payload)
242
+ # logger.debug("Successfully parsed fixed JSON")
243
+ # request_body = parsed_payload
244
+ # except json.JSONDecodeError:
245
+ # # Try as Python literal
246
+ # import ast
247
+ # try:
248
+ # parsed_payload = ast.literal_eval(payload)
249
+ # logger.debug("Successfully parsed payload as Python literal")
250
+ # request_body = parsed_payload
251
+ # except (SyntaxError, ValueError) as e2:
252
+ # logger.debug(f"Failed to parse payload string: {e2}")
253
+ # return {"error": f"Invalid payload format: {e2}", "payload": payload}
254
+ # except Exception as e:
255
+ # logger.debug(f"Error parsing payload string: {e}")
256
+ # return {"error": f"Failed to parse payload: {e}", "payload": payload}
257
+ # else:
258
+ # # If payload is already a dictionary, use it directly
259
+ # logger.debug("Using provided payload dictionary")
260
+ # request_body = payload
261
+
262
+ # # Import the GetTraces class
263
+ # try:
264
+ # from instana_client.models.get_traces import (
265
+ # GetTraces,
266
+ # )
267
+ # from instana_client.models.group import Group
268
+ # logger.debug("Successfully imported GetTraces")
269
+ # except ImportError as e:
270
+ # logger.debug(f"Error importing GetTraces: {e}")
271
+ # return {"error": f"Failed to import GetTraces: {e!s}"}
272
+
273
+ # # Create an GetTraces object from the request body
274
+ # try:
275
+ # query_params = {}
276
+ # if request_body and "tag_filter_expression" in request_body:
277
+ # query_params["tag_filter_expression"] = request_body["tag_filter_expression"]
278
+ # logger.debug(f"Creating get_traces with params: {query_params}")
279
+ # config_object = GetTraces(**query_params)
280
+ # logger.debug("Successfully got traces")
281
+ # except Exception as e:
282
+ # logger.debug(f"Error creating get_traces: {e}")
283
+ # return {"error": f"Failed to get tracest: {e!s}"}
284
+
285
+ # # Call the get_traces method from the SDK
286
+ # logger.debug("Calling get_traces with config object")
287
+ # result = api_client.get_traces(
288
+ # get_traces=config_object
289
+ # )
290
+ # # Convert the result to a dictionary
291
+ # if hasattr(result, 'to_dict'):
292
+ # result_dict = result.to_dict()
293
+ # else:
294
+ # # If it's already a dict or another format, use it as is
295
+ # result_dict = result or {
296
+ # "success": True,
297
+ # "message": "Get traces"
298
+ # }
299
+
300
+ # logger.debug(f"Result from get_traces: {result_dict}")
301
+ # return result_dict
302
+ # except Exception as e:
303
+ # logger.error(f"Error in get_traces: {e}")
304
+ # return {"error": f"Failed to get traces: {e!s}"}
305
+
306
+ # @register_as_tool(
307
+ # title="Get Grouped Trace Metrics",
308
+ # annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
309
+ # )
310
+ # @with_header_auth(ApplicationAnalyzeApi)
311
+ # async def get_grouped_trace_metrics(
312
+ # self,
313
+ # payload: Optional[Union[Dict[str, Any], str]]=None,
314
+ # fill_time_series: Optional[bool] = None,
315
+ # api_client=None,
316
+ # ctx=None
317
+ # ) -> Dict[str, Any]:
318
+ # """
319
+ # The API endpoint retrieves metrics for traces that are grouped in the endpoint or service name.
320
+ # This tool Get grouped trace metrics (by endpoint or service name).
321
+
322
+ # Args:
323
+ # fillTimeSeries (Optional[bool]): Whether to fill missing data points with zeroes.
324
+ # Sample Payload: {
325
+ # "group": {
326
+ # "groupbyTag": "trace.endpoint.name",
327
+ # "groupbyTagEntity": "NOT_APPLICABLE"
328
+ # },
329
+ # "metrics": [
330
+ # {
331
+ # "aggregation": "SUM",
332
+ # "metric": "latency"
333
+ # }
334
+ # ],
335
+ # "order": {
336
+ # "by": "latency",
337
+ # "direction": "ASC"
338
+ # },
339
+ # "pagination": {
340
+ # "retrievalSize": 20
341
+ # },
342
+ # "tagFilterExpression": {
343
+ # "type": "EXPRESSION",
344
+ # "logicalOperator": "AND",
345
+ # "elements": [
346
+ # {
347
+ # "type": "TAG_FILTER",
348
+ # "name": "call.type",
349
+ # "operator": "EQUALS",
350
+ # "entity": "NOT_APPLICABLE",
351
+ # "value": "DATABASE"
352
+ # },
353
+ # {
354
+ # "type": "TAG_FILTER",
355
+ # "name": "service.name",
356
+ # "operator": "EQUALS",
357
+ # "entity": "DESTINATION",
358
+ # "value": "ratings"
359
+ # }
360
+ # ]
361
+ # }
362
+ # }
363
+ # ctx: Optional execution context.
364
+
365
+ # Returns:
366
+ # Dict[str, Any]: Grouped trace metrics result.
367
+ # """
368
+ # try:
369
+ # # Parse the payload if it's a string
370
+ # if isinstance(payload, str):
371
+ # logger.debug("Payload is a string, attempting to parse")
372
+ # try:
373
+ # import json
374
+ # try:
375
+ # parsed_payload = json.loads(payload)
376
+ # logger.debug("Successfully parsed payload as JSON")
377
+ # request_body = parsed_payload
378
+ # except json.JSONDecodeError as e:
379
+ # logger.debug(f"JSON parsing failed: {e}, trying with quotes replaced")
380
+
381
+ # # Try replacing single quotes with double quotes
382
+ # fixed_payload = payload.replace("'", "\"")
383
+ # try:
384
+ # parsed_payload = json.loads(fixed_payload)
385
+ # logger.debug("Successfully parsed fixed JSON")
386
+ # request_body = parsed_payload
387
+ # except json.JSONDecodeError:
388
+ # # Try as Python literal
389
+ # import ast
390
+ # try:
391
+ # parsed_payload = ast.literal_eval(payload)
392
+ # logger.debug("Successfully parsed payload as Python literal")
393
+ # request_body = parsed_payload
394
+ # except (SyntaxError, ValueError) as e2:
395
+ # logger.debug(f"Failed to parse payload string: {e2}")
396
+ # return {"error": f"Invalid payload format: {e2}", "payload": payload}
397
+ # except Exception as e:
398
+ # logger.debug(f"Error parsing payload string: {e}")
399
+ # return {"error": f"Failed to parse payload: {e}", "payload": payload}
400
+ # else:
401
+ # # If payload is already a dictionary, use it directly
402
+ # logger.debug("Using provided payload dictionary")
403
+ # request_body = payload
404
+
405
+ # # Import the GetTraceGroups class
406
+ # try:
407
+ # from instana_client.models.get_trace_groups import (
408
+ # GetTraceGroups,
409
+ # )
410
+ # from instana_client.models.group import Group
411
+ # logger.debug("Successfully imported GetTraceGroups")
412
+ # except ImportError as e:
413
+ # logger.debug(f"Error importing GetTraceGroups: {e}")
414
+ # return {"error": f"Failed to import GetTraceGroups: {e!s}"}
415
+
416
+ # # Create an GetTraceGroups object from the request body
417
+ # try:
418
+ # query_params = {}
419
+ # if request_body and "group" in request_body:
420
+ # query_params["group"] = request_body["group"]
421
+ # if request_body and "metrics" in request_body:
422
+ # query_params["metrics"] = request_body["metrics"]
423
+ # if request_body and "tag_filter_expression" in request_body:
424
+ # query_params["tag_filter_expression"] = request_body["tag_filter_expression"]
425
+ # logger.debug(f"Creating GetTraceGroups with params: {query_params}")
426
+ # config_object = GetTraceGroups(**query_params)
427
+ # logger.debug("Successfully created endpoint config object")
428
+ # except Exception as e:
429
+ # logger.debug(f"Error creating GetTraceGroups: {e}")
430
+ # return {"error": f"Failed to create config object: {e!s}"}
431
+
432
+ # # Call the create_endpoint_config method from the SDK
433
+ # logger.debug("Calling create_endpoint_config with config object")
434
+ # result = api_client.get_trace_groups(
435
+ # get_trace_groups=config_object
436
+ # )
437
+ # # Convert the result to a dictionary
438
+ # if hasattr(result, 'to_dict'):
439
+ # result_dict = result.to_dict()
440
+ # else:
441
+ # # If it's already a dict or another format, use it as is
442
+ # result_dict = result or {
443
+ # "success": True,
444
+ # "message": "Grouped trace metrics"
445
+ # }
446
+
447
+ # logger.debug(f"Result from get_grouped_trace_metrics: {result_dict}")
448
+ # return result_dict
449
+ # except Exception as e:
450
+ # logger.error(f"Error in get_grouped_trace_metrics: {e}")
451
+ # return {"error": f"Failed to get grouped trace metrics: {e!s}"}
452
+
453
+ # # @register_as_tool(
454
+ # # title="Get Grouped Calls Metrics",
455
+ # # annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
456
+ # # )
457
+ # # @with_header_auth(ApplicationAnalyzeApi)
458
+ # # async def get_grouped_calls_metrics(
459
+ # # self,
460
+ # # fillTimeSeries: Optional[str] = None,
461
+ # # payload: Optional[Union[Dict[str, Any], str]]=None,
462
+ # # api_client = None,
463
+ # # ctx=None
464
+ # # ) -> Dict[str, Any]:
465
+ # # """
466
+ # # Get grouped calls metrics.
467
+ # # This endpoint retrieves the metrics for calls.
468
+
469
+ # # Args:
470
+ # # fillTimeSeries (Optional[bool]): Whether to fill missing data points with zeroes.
471
+ # # Sample payload: {
472
+ # # "group": {
473
+ # # "groupbyTag": "service.name",
474
+ # # "groupbyTagEntity": "DESTINATION"
475
+ # # },
476
+ # # "metrics": [
477
+ # # {
478
+ # # "aggregation": "SUM",
479
+ # # "metric": "calls"
480
+ # # },
481
+ # # {
482
+ # # "aggregation": "P75",
483
+ # # "metric": "latency",
484
+ # # "granularity": 360
485
+ # # }
486
+ # # ],
487
+ # # "includeInternal": false,
488
+ # # "includeSynthetic": false,
489
+ # # "order": {
490
+ # # "by": "calls",
491
+ # # "direction": "DESC"
492
+ # # },
493
+ # # "pagination": {
494
+ # # "retrievalSize": 20
495
+ # # },
496
+ # # "tagFilterExpression": {
497
+ # # "type": "EXPRESSION",
498
+ # # "logicalOperator": "AND",
499
+ # # "elements": [
500
+ # # {
501
+ # # "type": "TAG_FILTER",
502
+ # # "name": "call.type",
503
+ # # "operator": "EQUALS",
504
+ # # "entity": "NOT_APPLICABLE",
505
+ # # "value": "DATABASE"
506
+ # # },
507
+ # # {
508
+ # # "type": "TAG_FILTER",
509
+ # # "name": "service.name",
510
+ # # "operator": "EQUALS",
511
+ # # "entity": "DESTINATION",
512
+ # # "value": "ratings"
513
+ # # }
514
+ # # ]
515
+ # # },
516
+ # # "timeFrame": {
517
+ # # "to": "1688366990000",
518
+ # # "windowSize": "600000"
519
+ # # }
520
+ # # }
521
+ # # ctx: Optional execution context.
522
+
523
+ # # Returns:
524
+ # # Dict[str, Any]: Grouped trace metrics result.
525
+ # # """
526
+ # # try:
527
+ # # # Parse the payload if it's a string
528
+ # # if isinstance(payload, str):
529
+ # # logger.debug("Payload is a string, attempting to parse")
530
+ # # try:
531
+ # # import json
532
+ # # try:
533
+ # # parsed_payload = json.loads(payload)
534
+ # # logger.debug("Successfully parsed payload as JSON")
535
+ # # request_body = parsed_payload
536
+ # # except json.JSONDecodeError as e:
537
+ # # logger.debug(f"JSON parsing failed: {e}, trying with quotes replaced")
538
+
539
+ # # # Try replacing single quotes with double quotes
540
+ # # fixed_payload = payload.replace("'", "\"")
541
+ # # try:
542
+ # # parsed_payload = json.loads(fixed_payload)
543
+ # # logger.debug("Successfully parsed fixed JSON")
544
+ # # request_body = parsed_payload
545
+ # # except json.JSONDecodeError:
546
+ # # # Try as Python literal
547
+ # # import ast
548
+ # # try:
549
+ # # parsed_payload = ast.literal_eval(payload)
550
+ # # logger.debug("Successfully parsed payload as Python literal")
551
+ # # request_body = parsed_payload
552
+ # # except (SyntaxError, ValueError) as e2:
553
+ # # logger.debug(f"Failed to parse payload string: {e2}")
554
+ # # return {"error": f"Invalid payload format: {e2}", "payload": payload}
555
+ # # except Exception as e:
556
+ # # logger.debug(f"Error parsing payload string: {e}")
557
+ # # return {"error": f"Failed to parse payload: {e}", "payload": payload}
558
+ # # else:
559
+ # # # If payload is already a dictionary, use it directly
560
+ # # logger.debug("Using provided payload dictionary")
561
+ # # request_body = payload
562
+
563
+ # # # Import the GetCallGroups class
564
+ # # try:
565
+ # # from instana_client.models.get_call_groups import (
566
+ # # GetCallGroups,
567
+ # # )
568
+ # # from instana_client.models.group import Group
569
+ # # logger.debug("Successfully imported GetCallGroups")
570
+ # # except ImportError as e:
571
+ # # logger.debug(f"Error importing GetCallGroups: {e}")
572
+ # # return {"error": f"Failed to import GetCallGroups: {e!s}"}
573
+
574
+ # # # Create an GetCallGroups object from the request body
575
+ # # try:
576
+ # # query_params = {}
577
+ # # if request_body and "group" in request_body:
578
+ # # query_params["group"] = request_body["group"]
579
+ # # if request_body and "metrics" in request_body:
580
+ # # query_params["metrics"] = request_body["metrics"]
581
+ # # logger.debug(f"Creating GetCallGroups with params: {query_params}")
582
+ # # config_object = GetCallGroups(**query_params)
583
+ # # logger.debug("Successfully created endpoint config object")
584
+ # # except Exception as e:
585
+ # # logger.error(f"Error creating GetCallGroups: {e}")
586
+ # # return {"error": f"Failed to create config object: {e!s}"}
587
+
588
+ # # # Call the get_call_groups method from the SDK
589
+ # # logger.debug("Calling get_call_groups with config object")
590
+ # # result = api_client.get_call_group(
591
+ # # get_call_groups=config_object
592
+ # # )
593
+ # # # Convert the result to a dictionary
594
+ # # if hasattr(result, 'to_dict'):
595
+ # # result_dict = result.to_dict()
596
+ # # else:
597
+ # # # If it's already a dict or another format, use it as is
598
+ # # result_dict = result or {
599
+ # # "success": True,
600
+ # # "message": "Get Grouped call"
601
+ # # }
602
+
603
+ # # logger.debug(f"Result from get_call_group: {result_dict}")
604
+ # # return result_dict
605
+ # # except Exception as e:
606
+ # # logger.error(f"Error in get_call_group: {e}")
607
+ # # return {"error": f"Failed to get grouped call: {e!s}"}
608
+
609
+
610
+ # @register_as_tool(
611
+ # title="Get Correlated Traces",
612
+ # annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
613
+ # )
614
+ # @with_header_auth(ApplicationAnalyzeApi)
615
+ # async def get_correlated_traces(
616
+ # self,
617
+ # correlation_id: str,
618
+ # api_client = None,
619
+ # ctx=None
620
+ # ) -> Dict[str, Any]:
621
+ # """
622
+ # Resolve Trace IDs from Monitoring Beacons.
623
+ # Resolves backend trace IDs using correlation IDs from website and mobile app monitoring beacons.
624
+
625
+ # Args:
626
+ # correlation_id: Here, the `backendTraceId` is typically used which can be obtained from the `Get all beacons` API endpoint for website and mobile app monitoring. For XHR, fetch, or HTTP beacons, the `beaconId` retrieved from the same API endpoint can also serve as the `correlationId`.(required)
627
+ # ctx: Optional execution context.
628
+ # Returns:
629
+ # Dict[str, Any]: Grouped trace metrics result.
630
+ # """
631
+ # try:
632
+ # logger.debug("Calling backend correlation API")
633
+ # if not correlation_id:
634
+ # error_msg = "Correlation ID must be provided"
635
+ # logger.warning(error_msg)
636
+ # return {"error": error_msg}
637
+
638
+ # result = api_client.get_correlated_traces(
639
+ # correlation_id=correlation_id
640
+ # )
641
+
642
+ # result_dict = result.to_dict() if hasattr(result, 'to_dict') else result
643
+
644
+ # logger.debug(f"Result from get_correlated_traces: {result_dict}")
645
+ # # If result is a list, convert it to a dictionary
646
+ # if isinstance(result_dict, list):
647
+ # return {"traces": result_dict}
648
+ # # Otherwise ensure we return a dictionary
649
+ # return dict(result_dict) if not isinstance(result_dict, dict) else result_dict
650
+
651
+ # except Exception as e:
652
+ # logger.error(f"Error in get_correlated_traces: {e}", exc_info=True)
653
+ # return {"error": f"Failed to get correlated traces: {e!s}"}