mcp-instana 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. mcp_instana-0.2.0.dist-info/METADATA +1229 -0
  2. mcp_instana-0.2.0.dist-info/RECORD +59 -0
  3. {mcp_instana-0.1.0.dist-info → mcp_instana-0.2.0.dist-info}/WHEEL +1 -1
  4. mcp_instana-0.2.0.dist-info/entry_points.txt +4 -0
  5. mcp_instana-0.1.0.dist-info/LICENSE → mcp_instana-0.2.0.dist-info/licenses/LICENSE.md +3 -3
  6. src/application/__init__.py +1 -0
  7. src/{client/application_alert_config_mcp_tools.py → application/application_alert_config.py} +251 -273
  8. src/application/application_analyze.py +628 -0
  9. src/application/application_catalog.py +155 -0
  10. src/application/application_global_alert_config.py +653 -0
  11. src/{client/application_metrics_mcp_tools.py → application/application_metrics.py} +113 -131
  12. src/{client/application_resources_mcp_tools.py → application/application_resources.py} +131 -151
  13. src/application/application_settings.py +1731 -0
  14. src/application/application_topology.py +111 -0
  15. src/automation/action_catalog.py +416 -0
  16. src/automation/action_history.py +338 -0
  17. src/core/__init__.py +1 -0
  18. src/core/server.py +586 -0
  19. src/core/utils.py +213 -0
  20. src/event/__init__.py +1 -0
  21. src/event/events_tools.py +850 -0
  22. src/infrastructure/__init__.py +1 -0
  23. src/{client/infrastructure_analyze_mcp_tools.py → infrastructure/infrastructure_analyze.py} +207 -206
  24. src/{client/infrastructure_catalog_mcp_tools.py → infrastructure/infrastructure_catalog.py} +197 -265
  25. src/infrastructure/infrastructure_metrics.py +171 -0
  26. src/{client/infrastructure_resources_mcp_tools.py → infrastructure/infrastructure_resources.py} +198 -227
  27. src/{client/infrastructure_topology_mcp_tools.py → infrastructure/infrastructure_topology.py} +110 -109
  28. src/log/__init__.py +1 -0
  29. src/log/log_alert_configuration.py +331 -0
  30. src/prompts/__init__.py +16 -0
  31. src/prompts/application/__init__.py +1 -0
  32. src/prompts/application/application_alerts.py +54 -0
  33. src/prompts/application/application_catalog.py +26 -0
  34. src/prompts/application/application_metrics.py +57 -0
  35. src/prompts/application/application_resources.py +26 -0
  36. src/prompts/application/application_settings.py +75 -0
  37. src/prompts/application/application_topology.py +30 -0
  38. src/prompts/events/__init__.py +1 -0
  39. src/prompts/events/events_tools.py +161 -0
  40. src/prompts/infrastructure/infrastructure_analyze.py +72 -0
  41. src/prompts/infrastructure/infrastructure_catalog.py +53 -0
  42. src/prompts/infrastructure/infrastructure_metrics.py +45 -0
  43. src/prompts/infrastructure/infrastructure_resources.py +74 -0
  44. src/prompts/infrastructure/infrastructure_topology.py +38 -0
  45. src/prompts/settings/__init__.py +0 -0
  46. src/prompts/settings/custom_dashboard.py +157 -0
  47. src/prompts/website/__init__.py +1 -0
  48. src/prompts/website/website_analyze.py +35 -0
  49. src/prompts/website/website_catalog.py +40 -0
  50. src/prompts/website/website_configuration.py +105 -0
  51. src/prompts/website/website_metrics.py +34 -0
  52. src/settings/__init__.py +1 -0
  53. src/settings/custom_dashboard_tools.py +417 -0
  54. src/website/__init__.py +0 -0
  55. src/website/website_analyze.py +433 -0
  56. src/website/website_catalog.py +171 -0
  57. src/website/website_configuration.py +770 -0
  58. src/website/website_metrics.py +241 -0
  59. mcp_instana-0.1.0.dist-info/METADATA +0 -649
  60. mcp_instana-0.1.0.dist-info/RECORD +0 -19
  61. mcp_instana-0.1.0.dist-info/entry_points.txt +0 -3
  62. src/client/What is the sum of queue depth for all q +0 -55
  63. src/client/events_mcp_tools.py +0 -531
  64. src/client/instana_client_base.py +0 -93
  65. src/client/log_alert_configuration_mcp_tools.py +0 -316
  66. src/client/show the top 5 services with the highest +0 -28
  67. src/mcp_server.py +0 -343
@@ -0,0 +1,628 @@
1
+ """
2
+ Application Analyze MCP Tools Module
3
+
4
+ This module provides application analyze tool functionality for Instana monitoring.
5
+ """
6
+
7
+ import logging
8
+ from typing import Any, Dict, List, Optional, Union
9
+
10
+ # Import the necessary classes from the SDK
11
+ try:
12
+ from instana_client.api.application_analyze_api import ApplicationAnalyzeApi
13
+ from instana_client.api_client import ApiClient
14
+ from instana_client.configuration import Configuration
15
+ from instana_client.models.get_call_groups import GetCallGroups
16
+ from instana_client.models.get_traces import GetTraces
17
+
18
+ except ImportError:
19
+ import logging
20
+ logger = logging.getLogger(__name__)
21
+ logger.error("Failed to import application analyze API", exc_info=True)
22
+ raise
23
+
24
+ from src.core.utils import BaseInstanaClient, register_as_tool, with_header_auth
25
+
26
+ # Configure logger for this module
27
+ logger = logging.getLogger(__name__)
28
+
29
+ class ApplicationAnalyzeMCPTools(BaseInstanaClient):
30
+ """Tools for application analyze in Instana MCP."""
31
+
32
+ def __init__(self, read_token: str, base_url: str):
33
+ """Initialize the Application Analyze MCP tools client."""
34
+ super().__init__(read_token=read_token, base_url=base_url)
35
+
36
+ try:
37
+
38
+ # Configure the API client with the correct base URL and authentication
39
+ configuration = Configuration()
40
+ configuration.host = base_url
41
+ configuration.api_key['ApiKeyAuth'] = read_token
42
+ configuration.api_key_prefix['ApiKeyAuth'] = 'apiToken'
43
+
44
+ # Create an API client with this configuration
45
+ api_client = ApiClient(configuration=configuration)
46
+
47
+ # Initialize the Instana SDK's ApplicationAnalyzeApi with our configured client
48
+ self.analyze_api = ApplicationAnalyzeApi(api_client=api_client)
49
+ except Exception as e:
50
+ logger.error(f"Error initializing ApplicationAnalyzeApi: {e}", exc_info=True)
51
+ raise
52
+
53
+ @register_as_tool
54
+ @with_header_auth(ApplicationAnalyzeApi)
55
+ async def get_call_details(
56
+ self,
57
+ trace_id: str,
58
+ call_id: str,
59
+ ctx=None,
60
+ api_client=None
61
+ ) -> Dict[str, Any]:
62
+ """
63
+ Get details of a specific call in a trace.
64
+ This tool is to retrieve a vast information about a call present in a trace.
65
+
66
+ Args:
67
+ trace_id (str): The ID of the trace.
68
+ call_id (str): The ID of the call.
69
+ ctx: Optional context for the request.
70
+
71
+ Returns:
72
+ Dict[str, Any]: Details of the specified call.
73
+ """
74
+ try:
75
+ if not trace_id or not call_id:
76
+ logger.warning("Both trace_id and call_id must be provided")
77
+ return {"error": "Both trace_id and call_id must be provided"}
78
+
79
+ logger.debug(f"Fetching call details for trace_id={trace_id}, call_id={call_id}")
80
+ result = api_client.get_call_details(
81
+ trace_id=trace_id,
82
+ call_id=call_id
83
+ )
84
+
85
+ # Convert the result to a dictionary
86
+ if hasattr(result, 'to_dict'):
87
+ result_dict = result.to_dict()
88
+ else:
89
+ # If it's already a dict or another format, use it as is
90
+ result_dict = result
91
+
92
+ logger.debug(f"Result from get_call_details: {result_dict}")
93
+ # Ensure we return a dictionary
94
+ return dict(result_dict) if not isinstance(result_dict, dict) else result_dict
95
+
96
+ except Exception as e:
97
+ logger.error(f"Error getting call details: {e}", exc_info=True)
98
+ return {"error": f"Failed to get call details: {e!s}"}
99
+
100
+ @register_as_tool
101
+ @with_header_auth(ApplicationAnalyzeApi)
102
+ async def get_trace_details(
103
+ self,
104
+ id: str,
105
+ retrievalSize: Optional[int] = None,
106
+ offset: Optional[int] = None,
107
+ ingestionTime: Optional[int] = None,
108
+ ctx=None,
109
+ api_client=None
110
+ ) -> Dict[str, Any]:
111
+ """
112
+ Get details of a specific trace.
113
+ This tool is to retrive comprehensive details of a particular trace.
114
+ Args:
115
+ id (str): The ID of the trace.
116
+ retrievalSize (Optional[int]):The number of records to retrieve in a single request.
117
+ Minimum value is 1 and maximum value is 10000.
118
+ offset (Optional[int]): The number of records to be skipped from the ingestionTime.
119
+ ingestionTime (Optional[int]): The timestamp indicating the starting point from which data was ingested.
120
+ ctx: Optional context for the request.
121
+ Returns:
122
+ Dict[str, Any]: Details of the specified trace.
123
+ """
124
+
125
+ try:
126
+ if not id:
127
+ logger.warning("Trace ID must be provided")
128
+ return {"error": "Trace ID must be provided"}
129
+
130
+ if offset is not None and ingestionTime is None:
131
+ logger.warning("If offset is provided, ingestionTime must also be provided")
132
+ return {"error": "If offset is provided, ingestionTime must also be provided"}
133
+
134
+ if retrievalSize is not None and (retrievalSize < 1 or retrievalSize > 10000):
135
+ logger.warning(f"retrievalSize must be between 1 and 10000, got: {retrievalSize}")
136
+ return {"error": "retrievalSize must be between 1 and 10000"}
137
+
138
+ logger.debug(f"Fetching trace details for id={id}")
139
+ result = api_client.get_trace_download(
140
+ id=id,
141
+ retrieval_size=retrievalSize,
142
+ offset=offset,
143
+ ingestion_time=ingestionTime
144
+ )
145
+
146
+ # Convert the result to a dictionary
147
+ if hasattr(result, 'to_dict'):
148
+ result_dict = result.to_dict()
149
+ else:
150
+ # If it's already a dict or another format, use it as is
151
+ result_dict = result
152
+
153
+ logger.debug(f"Result from get_trace_details: {result_dict}")
154
+ # Ensure we return a dictionary
155
+ return dict(result_dict) if not isinstance(result_dict, dict) else result_dict
156
+
157
+ except Exception as e:
158
+ logger.error(f"Error getting trace details: {e}", exc_info=True)
159
+ return {"error": f"Failed to get trace details: {e!s}"}
160
+
161
+
162
+ @register_as_tool
163
+ @with_header_auth(ApplicationAnalyzeApi)
164
+ async def get_all_traces(
165
+ self,
166
+ payload: Optional[Union[Dict[str, Any], str]]=None,
167
+ api_client = None,
168
+ ctx=None
169
+ ) -> Dict[str, Any]:
170
+ """
171
+ Get all traces.
172
+ This tool endpoint retrieves the metrics for traces.
173
+
174
+ Sample payload: {
175
+ "includeInternal": false,
176
+ "includeSynthetic": false,
177
+ "pagination": {
178
+ "retrievalSize": 1
179
+ },
180
+ "tagFilterExpression": {
181
+ "type": "EXPRESSION",
182
+ "logicalOperator": "AND",
183
+ "elements": [
184
+ {
185
+ "type": "TAG_FILTER",
186
+ "name": "endpoint.name",
187
+ "operator": "EQUALS",
188
+ "entity": "DESTINATION",
189
+ "value": "GET /"
190
+ },
191
+ {
192
+ "type": "TAG_FILTER",
193
+ "name": "service.name",
194
+ "operator": "EQUALS",
195
+ "entity": "DESTINATION",
196
+ "value": "groundskeeper"
197
+ }
198
+ ]
199
+ },
200
+ "order": {
201
+ "by": "traceLabel",
202
+ "direction": "DESC"
203
+ }
204
+ }
205
+
206
+ Returns:
207
+ Dict[str, Any]: List of traces matching the criteria.
208
+ """
209
+ try:
210
+ # Parse the payload if it's a string
211
+ if isinstance(payload, str):
212
+ logger.debug("Payload is a string, attempting to parse")
213
+ try:
214
+ import json
215
+ try:
216
+ parsed_payload = json.loads(payload)
217
+ logger.debug("Successfully parsed payload as JSON")
218
+ request_body = parsed_payload
219
+ except json.JSONDecodeError as e:
220
+ logger.debug(f"JSON parsing failed: {e}, trying with quotes replaced")
221
+
222
+ # Try replacing single quotes with double quotes
223
+ fixed_payload = payload.replace("'", "\"")
224
+ try:
225
+ parsed_payload = json.loads(fixed_payload)
226
+ logger.debug("Successfully parsed fixed JSON")
227
+ request_body = parsed_payload
228
+ except json.JSONDecodeError:
229
+ # Try as Python literal
230
+ import ast
231
+ try:
232
+ parsed_payload = ast.literal_eval(payload)
233
+ logger.debug("Successfully parsed payload as Python literal")
234
+ request_body = parsed_payload
235
+ except (SyntaxError, ValueError) as e2:
236
+ logger.debug(f"Failed to parse payload string: {e2}")
237
+ return {"error": f"Invalid payload format: {e2}", "payload": payload}
238
+ except Exception as e:
239
+ logger.debug(f"Error parsing payload string: {e}")
240
+ return {"error": f"Failed to parse payload: {e}", "payload": payload}
241
+ else:
242
+ # If payload is already a dictionary, use it directly
243
+ logger.debug("Using provided payload dictionary")
244
+ request_body = payload
245
+
246
+ # Import the GetTraces class
247
+ try:
248
+ from instana_client.models.get_traces import (
249
+ GetTraces,
250
+ )
251
+ from instana_client.models.group import Group
252
+ logger.debug("Successfully imported GetTraces")
253
+ except ImportError as e:
254
+ logger.debug(f"Error importing GetTraces: {e}")
255
+ return {"error": f"Failed to import GetTraces: {e!s}"}
256
+
257
+ # Create an GetTraces object from the request body
258
+ try:
259
+ query_params = {}
260
+ if request_body and "tag_filter_expression" in request_body:
261
+ query_params["tag_filter_expression"] = request_body["tag_filter_expression"]
262
+ logger.debug(f"Creating get_traces with params: {query_params}")
263
+ config_object = GetTraces(**query_params)
264
+ logger.debug("Successfully got traces")
265
+ except Exception as e:
266
+ logger.debug(f"Error creating get_traces: {e}")
267
+ return {"error": f"Failed to get tracest: {e!s}"}
268
+
269
+ # Call the get_traces method from the SDK
270
+ logger.debug("Calling get_traces with config object")
271
+ result = api_client.get_traces(
272
+ get_traces=config_object
273
+ )
274
+ # Convert the result to a dictionary
275
+ if hasattr(result, 'to_dict'):
276
+ result_dict = result.to_dict()
277
+ else:
278
+ # If it's already a dict or another format, use it as is
279
+ result_dict = result or {
280
+ "success": True,
281
+ "message": "Get traces"
282
+ }
283
+
284
+ logger.debug(f"Result from get_traces: {result_dict}")
285
+ return result_dict
286
+ except Exception as e:
287
+ logger.error(f"Error in get_traces: {e}")
288
+ return {"error": f"Failed to get traces: {e!s}"}
289
+
290
+ @register_as_tool
291
+ @with_header_auth(ApplicationAnalyzeApi)
292
+ async def get_grouped_trace_metrics(
293
+ self,
294
+ payload: Optional[Union[Dict[str, Any], str]]=None,
295
+ fill_time_series: Optional[bool] = None,
296
+ api_client=None,
297
+ ctx=None
298
+ ) -> Dict[str, Any]:
299
+ """
300
+ The API endpoint retrieves metrics for traces that are grouped in the endpoint or service name.
301
+ This tool Get grouped trace metrics (by endpoint or service name).
302
+
303
+ Args:
304
+ fillTimeSeries (Optional[bool]): Whether to fill missing data points with zeroes.
305
+ Sample Payload: {
306
+ "group": {
307
+ "groupbyTag": "trace.endpoint.name",
308
+ "groupbyTagEntity": "NOT_APPLICABLE"
309
+ },
310
+ "metrics": [
311
+ {
312
+ "aggregation": "SUM",
313
+ "metric": "latency"
314
+ }
315
+ ],
316
+ "order": {
317
+ "by": "latency",
318
+ "direction": "ASC"
319
+ },
320
+ "pagination": {
321
+ "retrievalSize": 20
322
+ },
323
+ "tagFilterExpression": {
324
+ "type": "EXPRESSION",
325
+ "logicalOperator": "AND",
326
+ "elements": [
327
+ {
328
+ "type": "TAG_FILTER",
329
+ "name": "call.type",
330
+ "operator": "EQUALS",
331
+ "entity": "NOT_APPLICABLE",
332
+ "value": "DATABASE"
333
+ },
334
+ {
335
+ "type": "TAG_FILTER",
336
+ "name": "service.name",
337
+ "operator": "EQUALS",
338
+ "entity": "DESTINATION",
339
+ "value": "ratings"
340
+ }
341
+ ]
342
+ }
343
+ }
344
+ ctx: Optional execution context.
345
+
346
+ Returns:
347
+ Dict[str, Any]: Grouped trace metrics result.
348
+ """
349
+ try:
350
+ # Parse the payload if it's a string
351
+ if isinstance(payload, str):
352
+ logger.debug("Payload is a string, attempting to parse")
353
+ try:
354
+ import json
355
+ try:
356
+ parsed_payload = json.loads(payload)
357
+ logger.debug("Successfully parsed payload as JSON")
358
+ request_body = parsed_payload
359
+ except json.JSONDecodeError as e:
360
+ logger.debug(f"JSON parsing failed: {e}, trying with quotes replaced")
361
+
362
+ # Try replacing single quotes with double quotes
363
+ fixed_payload = payload.replace("'", "\"")
364
+ try:
365
+ parsed_payload = json.loads(fixed_payload)
366
+ logger.debug("Successfully parsed fixed JSON")
367
+ request_body = parsed_payload
368
+ except json.JSONDecodeError:
369
+ # Try as Python literal
370
+ import ast
371
+ try:
372
+ parsed_payload = ast.literal_eval(payload)
373
+ logger.debug("Successfully parsed payload as Python literal")
374
+ request_body = parsed_payload
375
+ except (SyntaxError, ValueError) as e2:
376
+ logger.debug(f"Failed to parse payload string: {e2}")
377
+ return {"error": f"Invalid payload format: {e2}", "payload": payload}
378
+ except Exception as e:
379
+ logger.debug(f"Error parsing payload string: {e}")
380
+ return {"error": f"Failed to parse payload: {e}", "payload": payload}
381
+ else:
382
+ # If payload is already a dictionary, use it directly
383
+ logger.debug("Using provided payload dictionary")
384
+ request_body = payload
385
+
386
+ # Import the GetTraceGroups class
387
+ try:
388
+ from instana_client.models.get_trace_groups import (
389
+ GetTraceGroups,
390
+ )
391
+ from instana_client.models.group import Group
392
+ logger.debug("Successfully imported GetTraceGroups")
393
+ except ImportError as e:
394
+ logger.debug(f"Error importing GetTraceGroups: {e}")
395
+ return {"error": f"Failed to import GetTraceGroups: {e!s}"}
396
+
397
+ # Create an GetTraceGroups object from the request body
398
+ try:
399
+ query_params = {}
400
+ if request_body and "group" in request_body:
401
+ query_params["group"] = request_body["group"]
402
+ if request_body and "metrics" in request_body:
403
+ query_params["metrics"] = request_body["metrics"]
404
+ if request_body and "tag_filter_expression" in request_body:
405
+ query_params["tag_filter_expression"] = request_body["tag_filter_expression"]
406
+ logger.debug(f"Creating GetTraceGroups with params: {query_params}")
407
+ config_object = GetTraceGroups(**query_params)
408
+ logger.debug("Successfully created endpoint config object")
409
+ except Exception as e:
410
+ logger.debug(f"Error creating GetTraceGroups: {e}")
411
+ return {"error": f"Failed to create config object: {e!s}"}
412
+
413
+ # Call the create_endpoint_config method from the SDK
414
+ logger.debug("Calling create_endpoint_config with config object")
415
+ result = api_client.get_trace_groups(
416
+ get_trace_groups=config_object
417
+ )
418
+ # Convert the result to a dictionary
419
+ if hasattr(result, 'to_dict'):
420
+ result_dict = result.to_dict()
421
+ else:
422
+ # If it's already a dict or another format, use it as is
423
+ result_dict = result or {
424
+ "success": True,
425
+ "message": "Grouped trace metrics"
426
+ }
427
+
428
+ logger.debug(f"Result from get_grouped_trace_metrics: {result_dict}")
429
+ return result_dict
430
+ except Exception as e:
431
+ logger.error(f"Error in get_grouped_trace_metrics: {e}")
432
+ return {"error": f"Failed to get grouped trace metrics: {e!s}"}
433
+
434
+ @register_as_tool
435
+ @with_header_auth(ApplicationAnalyzeApi)
436
+ async def get_grouped_calls_metrics(
437
+ self,
438
+ fillTimeSeries: Optional[str] = None,
439
+ payload: Optional[Union[Dict[str, Any], str]]=None,
440
+ api_client = None,
441
+ ctx=None
442
+ ) -> Dict[str, Any]:
443
+ """
444
+ Get grouped calls metrics.
445
+ This endpoint retrieves the metrics for calls.
446
+
447
+ Args:
448
+ fillTimeSeries (Optional[bool]): Whether to fill missing data points with zeroes.
449
+ Sample payload: {
450
+ "group": {
451
+ "groupbyTag": "service.name",
452
+ "groupbyTagEntity": "DESTINATION"
453
+ },
454
+ "metrics": [
455
+ {
456
+ "aggregation": "SUM",
457
+ "metric": "calls"
458
+ },
459
+ {
460
+ "aggregation": "P75",
461
+ "metric": "latency",
462
+ "granularity": 360
463
+ }
464
+ ],
465
+ "includeInternal": false,
466
+ "includeSynthetic": false,
467
+ "order": {
468
+ "by": "calls",
469
+ "direction": "DESC"
470
+ },
471
+ "pagination": {
472
+ "retrievalSize": 20
473
+ },
474
+ "tagFilterExpression": {
475
+ "type": "EXPRESSION",
476
+ "logicalOperator": "AND",
477
+ "elements": [
478
+ {
479
+ "type": "TAG_FILTER",
480
+ "name": "call.type",
481
+ "operator": "EQUALS",
482
+ "entity": "NOT_APPLICABLE",
483
+ "value": "DATABASE"
484
+ },
485
+ {
486
+ "type": "TAG_FILTER",
487
+ "name": "service.name",
488
+ "operator": "EQUALS",
489
+ "entity": "DESTINATION",
490
+ "value": "ratings"
491
+ }
492
+ ]
493
+ },
494
+ "timeFrame": {
495
+ "to": "1688366990000",
496
+ "windowSize": "600000"
497
+ }
498
+ }
499
+ ctx: Optional execution context.
500
+
501
+ Returns:
502
+ Dict[str, Any]: Grouped trace metrics result.
503
+ """
504
+ try:
505
+ # Parse the payload if it's a string
506
+ if isinstance(payload, str):
507
+ logger.debug("Payload is a string, attempting to parse")
508
+ try:
509
+ import json
510
+ try:
511
+ parsed_payload = json.loads(payload)
512
+ logger.debug("Successfully parsed payload as JSON")
513
+ request_body = parsed_payload
514
+ except json.JSONDecodeError as e:
515
+ logger.debug(f"JSON parsing failed: {e}, trying with quotes replaced")
516
+
517
+ # Try replacing single quotes with double quotes
518
+ fixed_payload = payload.replace("'", "\"")
519
+ try:
520
+ parsed_payload = json.loads(fixed_payload)
521
+ logger.debug("Successfully parsed fixed JSON")
522
+ request_body = parsed_payload
523
+ except json.JSONDecodeError:
524
+ # Try as Python literal
525
+ import ast
526
+ try:
527
+ parsed_payload = ast.literal_eval(payload)
528
+ logger.debug("Successfully parsed payload as Python literal")
529
+ request_body = parsed_payload
530
+ except (SyntaxError, ValueError) as e2:
531
+ logger.debug(f"Failed to parse payload string: {e2}")
532
+ return {"error": f"Invalid payload format: {e2}", "payload": payload}
533
+ except Exception as e:
534
+ logger.debug(f"Error parsing payload string: {e}")
535
+ return {"error": f"Failed to parse payload: {e}", "payload": payload}
536
+ else:
537
+ # If payload is already a dictionary, use it directly
538
+ logger.debug("Using provided payload dictionary")
539
+ request_body = payload
540
+
541
+ # Import the GetCallGroups class
542
+ try:
543
+ from instana_client.models.get_call_groups import (
544
+ GetCallGroups,
545
+ )
546
+ from instana_client.models.group import Group
547
+ logger.debug("Successfully imported GetCallGroups")
548
+ except ImportError as e:
549
+ logger.debug(f"Error importing GetCallGroups: {e}")
550
+ return {"error": f"Failed to import GetCallGroups: {e!s}"}
551
+
552
+ # Create an GetCallGroups object from the request body
553
+ try:
554
+ query_params = {}
555
+ if request_body and "group" in request_body:
556
+ query_params["group"] = request_body["group"]
557
+ if request_body and "metrics" in request_body:
558
+ query_params["metrics"] = request_body["metrics"]
559
+ logger.debug(f"Creating GetCallGroups with params: {query_params}")
560
+ config_object = GetCallGroups(**query_params)
561
+ logger.debug("Successfully created endpoint config object")
562
+ except Exception as e:
563
+ logger.error(f"Error creating GetCallGroups: {e}")
564
+ return {"error": f"Failed to create config object: {e!s}"}
565
+
566
+ # Call the get_call_groups method from the SDK
567
+ logger.debug("Calling get_call_groups with config object")
568
+ result = api_client.get_call_group(
569
+ get_call_groups=config_object
570
+ )
571
+ # Convert the result to a dictionary
572
+ if hasattr(result, 'to_dict'):
573
+ result_dict = result.to_dict()
574
+ else:
575
+ # If it's already a dict or another format, use it as is
576
+ result_dict = result or {
577
+ "success": True,
578
+ "message": "Get Grouped call"
579
+ }
580
+
581
+ logger.debug(f"Result from get_call_group: {result_dict}")
582
+ return result_dict
583
+ except Exception as e:
584
+ logger.error(f"Error in get_call_group: {e}")
585
+ return {"error": f"Failed to get grouped call: {e!s}"}
586
+
587
+
588
+ @register_as_tool
589
+ @with_header_auth(ApplicationAnalyzeApi)
590
+ async def get_correlated_traces(
591
+ self,
592
+ correlation_id: str,
593
+ api_client = None,
594
+ ctx=None
595
+ ) -> Dict[str, Any]:
596
+ """
597
+ Resolve Trace IDs from Monitoring Beacons.
598
+ Resolves backend trace IDs using correlation IDs from website and mobile app monitoring beacons.
599
+
600
+ Args:
601
+ correlation_id: Here, the `backendTraceId` is typically used which can be obtained from the `Get all beacons` API endpoint for website and mobile app monitoring. For XHR, fetch, or HTTP beacons, the `beaconId` retrieved from the same API endpoint can also serve as the `correlationId`.(required)
602
+ ctx: Optional execution context.
603
+ Returns:
604
+ Dict[str, Any]: Grouped trace metrics result.
605
+ """
606
+ try:
607
+ logger.debug("Calling backend correlation API")
608
+ if not correlation_id:
609
+ error_msg = "Correlation ID must be provided"
610
+ logger.warning(error_msg)
611
+ return {"error": error_msg}
612
+
613
+ result = api_client.get_correlated_traces(
614
+ correlation_id=correlation_id
615
+ )
616
+
617
+ result_dict = result.to_dict() if hasattr(result, 'to_dict') else result
618
+
619
+ logger.debug(f"Result from get_correlated_traces: {result_dict}")
620
+ # If result is a list, convert it to a dictionary
621
+ if isinstance(result_dict, list):
622
+ return {"traces": result_dict}
623
+ # Otherwise ensure we return a dictionary
624
+ return dict(result_dict) if not isinstance(result_dict, dict) else result_dict
625
+
626
+ except Exception as e:
627
+ logger.error(f"Error in get_correlated_traces: {e}", exc_info=True)
628
+ return {"error": f"Failed to get correlated traces: {e!s}"}