mcp-instana 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_instana-0.1.1.dist-info/METADATA +908 -0
- mcp_instana-0.1.1.dist-info/RECORD +30 -0
- {mcp_instana-0.1.0.dist-info → mcp_instana-0.1.1.dist-info}/WHEEL +1 -1
- mcp_instana-0.1.1.dist-info/entry_points.txt +4 -0
- mcp_instana-0.1.0.dist-info/LICENSE → mcp_instana-0.1.1.dist-info/licenses/LICENSE.md +3 -3
- src/application/__init__.py +1 -0
- src/{client/application_alert_config_mcp_tools.py → application/application_alert_config.py} +251 -273
- src/application/application_analyze.py +415 -0
- src/application/application_catalog.py +153 -0
- src/{client/application_metrics_mcp_tools.py → application/application_metrics.py} +107 -129
- src/{client/application_resources_mcp_tools.py → application/application_resources.py} +128 -150
- src/application/application_settings.py +1135 -0
- src/application/application_topology.py +107 -0
- src/core/__init__.py +1 -0
- src/core/server.py +436 -0
- src/core/utils.py +213 -0
- src/event/__init__.py +1 -0
- src/{client/events_mcp_tools.py → event/events_tools.py} +128 -136
- src/infrastructure/__init__.py +1 -0
- src/{client/infrastructure_analyze_mcp_tools.py → infrastructure/infrastructure_analyze.py} +200 -203
- src/{client/infrastructure_catalog_mcp_tools.py → infrastructure/infrastructure_catalog.py} +194 -264
- src/infrastructure/infrastructure_metrics.py +167 -0
- src/{client/infrastructure_resources_mcp_tools.py → infrastructure/infrastructure_resources.py} +192 -223
- src/{client/infrastructure_topology_mcp_tools.py → infrastructure/infrastructure_topology.py} +105 -106
- src/log/__init__.py +1 -0
- src/log/log_alert_configuration.py +331 -0
- src/prompts/mcp_prompts.py +900 -0
- src/prompts/prompt_loader.py +29 -0
- src/prompts/prompt_registry.json +21 -0
- mcp_instana-0.1.0.dist-info/METADATA +0 -649
- mcp_instana-0.1.0.dist-info/RECORD +0 -19
- mcp_instana-0.1.0.dist-info/entry_points.txt +0 -3
- src/client/What is the sum of queue depth for all q +0 -55
- src/client/instana_client_base.py +0 -93
- src/client/log_alert_configuration_mcp_tools.py +0 -316
- src/client/show the top 5 services with the highest +0 -28
- src/mcp_server.py +0 -343
|
@@ -0,0 +1,415 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Application Analyze MCP Tools Module
|
|
3
|
+
|
|
4
|
+
This module provides application analyze tool functionality for Instana monitoring.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Any, Dict, List, Optional
|
|
9
|
+
|
|
10
|
+
# Import the necessary classes from the SDK
|
|
11
|
+
try:
|
|
12
|
+
from instana_client.api.application_analyze_api import ApplicationAnalyzeApi
|
|
13
|
+
from instana_client.api_client import ApiClient
|
|
14
|
+
from instana_client.configuration import Configuration
|
|
15
|
+
from instana_client.models.get_call_groups import GetCallGroups
|
|
16
|
+
from instana_client.models.get_traces import GetTraces
|
|
17
|
+
|
|
18
|
+
except ImportError:
|
|
19
|
+
import logging
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
logger.error("Failed to import application analyze API", exc_info=True)
|
|
22
|
+
raise
|
|
23
|
+
|
|
24
|
+
from src.core.utils import BaseInstanaClient, register_as_tool
|
|
25
|
+
|
|
26
|
+
# Configure logger for this module
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
class ApplicationAnalyzeMCPTools(BaseInstanaClient):
|
|
30
|
+
"""Tools for application analyze in Instana MCP."""
|
|
31
|
+
|
|
32
|
+
def __init__(self, read_token: str, base_url: str):
|
|
33
|
+
"""Initialize the Application Analyze MCP tools client."""
|
|
34
|
+
super().__init__(read_token=read_token, base_url=base_url)
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
|
|
38
|
+
# Configure the API client with the correct base URL and authentication
|
|
39
|
+
configuration = Configuration()
|
|
40
|
+
configuration.host = base_url
|
|
41
|
+
configuration.api_key['ApiKeyAuth'] = read_token
|
|
42
|
+
configuration.api_key_prefix['ApiKeyAuth'] = 'apiToken'
|
|
43
|
+
|
|
44
|
+
# Create an API client with this configuration
|
|
45
|
+
api_client = ApiClient(configuration=configuration)
|
|
46
|
+
|
|
47
|
+
# Initialize the Instana SDK's ApplicationAnalyzeApi with our configured client
|
|
48
|
+
self.analyze_api = ApplicationAnalyzeApi(api_client=api_client)
|
|
49
|
+
except Exception as e:
|
|
50
|
+
logger.error(f"Error initializing ApplicationAnalyzeApi: {e}", exc_info=True)
|
|
51
|
+
raise
|
|
52
|
+
|
|
53
|
+
@register_as_tool
|
|
54
|
+
async def get_call_details(
|
|
55
|
+
self,
|
|
56
|
+
trace_id: str,
|
|
57
|
+
call_id: str,
|
|
58
|
+
ctx=None
|
|
59
|
+
) -> Dict[str, Any]:
|
|
60
|
+
"""
|
|
61
|
+
Get details of a specific call in a trace.
|
|
62
|
+
This tool is to retrieve a vast information about a call present in a trace.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
trace_id (str): The ID of the trace.
|
|
66
|
+
call_id (str): The ID of the call.
|
|
67
|
+
ctx: Optional context for the request.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
Dict[str, Any]: Details of the specified call.
|
|
71
|
+
"""
|
|
72
|
+
try:
|
|
73
|
+
if not trace_id or not call_id:
|
|
74
|
+
logger.warning("Both trace_id and call_id must be provided")
|
|
75
|
+
return {"error": "Both trace_id and call_id must be provided"}
|
|
76
|
+
|
|
77
|
+
logger.debug(f"Fetching call details for trace_id={trace_id}, call_id={call_id}")
|
|
78
|
+
result = self.analyze_api.get_call_details(
|
|
79
|
+
trace_id=trace_id,
|
|
80
|
+
call_id=call_id
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# Convert the result to a dictionary
|
|
84
|
+
if hasattr(result, 'to_dict'):
|
|
85
|
+
result_dict = result.to_dict()
|
|
86
|
+
else:
|
|
87
|
+
# If it's already a dict or another format, use it as is
|
|
88
|
+
result_dict = result
|
|
89
|
+
|
|
90
|
+
logger.debug(f"Result from get_call_details: {result_dict}")
|
|
91
|
+
# Ensure we return a dictionary
|
|
92
|
+
return dict(result_dict) if not isinstance(result_dict, dict) else result_dict
|
|
93
|
+
|
|
94
|
+
except Exception as e:
|
|
95
|
+
logger.error(f"Error getting call details: {e}", exc_info=True)
|
|
96
|
+
return {"error": f"Failed to get call details: {e!s}"}
|
|
97
|
+
|
|
98
|
+
@register_as_tool
|
|
99
|
+
async def get_trace_details(
|
|
100
|
+
self,
|
|
101
|
+
id: str,
|
|
102
|
+
retrievalSize: Optional[int] = None,
|
|
103
|
+
offset: Optional[int] = None,
|
|
104
|
+
ingestionTime: Optional[int] = None,
|
|
105
|
+
ctx=None
|
|
106
|
+
) -> Dict[str, Any]:
|
|
107
|
+
"""
|
|
108
|
+
Get details of a specific trace.
|
|
109
|
+
This tool is to retrive comprehensive details of a particular trace.
|
|
110
|
+
Args:
|
|
111
|
+
id (str): The ID of the trace.
|
|
112
|
+
retrievalSize (Optional[int]):The number of records to retrieve in a single request.
|
|
113
|
+
Minimum value is 1 and maximum value is 10000.
|
|
114
|
+
offset (Optional[int]): The number of records to be skipped from the ingestionTime.
|
|
115
|
+
ingestionTime (Optional[int]): The timestamp indicating the starting point from which data was ingested.
|
|
116
|
+
ctx: Optional context for the request.
|
|
117
|
+
Returns:
|
|
118
|
+
Dict[str, Any]: Details of the specified trace.
|
|
119
|
+
"""
|
|
120
|
+
|
|
121
|
+
try:
|
|
122
|
+
if not id:
|
|
123
|
+
logger.warning("Trace ID must be provided")
|
|
124
|
+
return {"error": "Trace ID must be provided"}
|
|
125
|
+
|
|
126
|
+
if offset is not None and ingestionTime is None:
|
|
127
|
+
logger.warning("If offset is provided, ingestionTime must also be provided")
|
|
128
|
+
return {"error": "If offset is provided, ingestionTime must also be provided"}
|
|
129
|
+
|
|
130
|
+
if retrievalSize is not None and (retrievalSize < 1 or retrievalSize > 10000):
|
|
131
|
+
logger.warning(f"retrievalSize must be between 1 and 10000, got: {retrievalSize}")
|
|
132
|
+
return {"error": "retrievalSize must be between 1 and 10000"}
|
|
133
|
+
|
|
134
|
+
logger.debug(f"Fetching trace details for id={id}")
|
|
135
|
+
result = self.analyze_api.get_trace_download(
|
|
136
|
+
id=id,
|
|
137
|
+
retrieval_size=retrievalSize,
|
|
138
|
+
offset=offset,
|
|
139
|
+
ingestion_time=ingestionTime
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Convert the result to a dictionary
|
|
143
|
+
if hasattr(result, 'to_dict'):
|
|
144
|
+
result_dict = result.to_dict()
|
|
145
|
+
else:
|
|
146
|
+
# If it's already a dict or another format, use it as is
|
|
147
|
+
result_dict = result
|
|
148
|
+
|
|
149
|
+
logger.debug(f"Result from get_trace_details: {result_dict}")
|
|
150
|
+
# Ensure we return a dictionary
|
|
151
|
+
return dict(result_dict) if not isinstance(result_dict, dict) else result_dict
|
|
152
|
+
|
|
153
|
+
except Exception as e:
|
|
154
|
+
logger.error(f"Error getting trace details: {e}", exc_info=True)
|
|
155
|
+
return {"error": f"Failed to get trace details: {e!s}"}
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
@register_as_tool
|
|
159
|
+
async def get_all_traces(
|
|
160
|
+
self,
|
|
161
|
+
includeInternal: Optional[bool] = None,
|
|
162
|
+
includeSynthetic: Optional[bool] = None,
|
|
163
|
+
order: Optional[Dict[str, str]] = None,
|
|
164
|
+
pagination: Optional[Dict[str, int]] = None,
|
|
165
|
+
tagFilterExpression: Optional[Dict[str, str]] = None,
|
|
166
|
+
timeFrame: Optional[Dict[str, int]] = None,
|
|
167
|
+
ctx=None
|
|
168
|
+
) -> Dict[str, Any]:
|
|
169
|
+
"""
|
|
170
|
+
Get all traces.
|
|
171
|
+
This tool endpoint retrieves the metrics for traces.
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
includeInternal (Optional[bool]): Whether to include internal traces.
|
|
175
|
+
includeSynthetic (Optional[bool]): Whether to include synthetic traces.
|
|
176
|
+
order (Optional[Dict[str, str]]): Order by field and direction.
|
|
177
|
+
pagination (Optional[Dict[str, int]]): Pagination parameters.
|
|
178
|
+
tagFilterExpression (Optional[Dict[str, str]]): Tag filter expression.
|
|
179
|
+
timeFrame (Optional[Dict[str, int]]): Time frame for the traces.
|
|
180
|
+
ctx: Optional context for the request.
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
Dict[str, Any]: List of traces matching the criteria.
|
|
184
|
+
"""
|
|
185
|
+
|
|
186
|
+
try:
|
|
187
|
+
logger.debug("Fetching all traces with filters and pagination")
|
|
188
|
+
body = {}
|
|
189
|
+
|
|
190
|
+
if includeInternal is not None:
|
|
191
|
+
body["includeInternal"] = includeInternal
|
|
192
|
+
if includeSynthetic is not None:
|
|
193
|
+
body["includeSynthetic"] = includeSynthetic
|
|
194
|
+
if order is not None:
|
|
195
|
+
body["order"] = order
|
|
196
|
+
if pagination is not None:
|
|
197
|
+
body["pagination"] = pagination
|
|
198
|
+
if tagFilterExpression is not None:
|
|
199
|
+
body["tagFilterExpression"] = tagFilterExpression
|
|
200
|
+
if timeFrame is not None:
|
|
201
|
+
body["timeFrame"] = timeFrame
|
|
202
|
+
|
|
203
|
+
get_traces = GetTraces(**body)
|
|
204
|
+
|
|
205
|
+
result = self.analyze_api.get_traces(
|
|
206
|
+
get_traces=get_traces
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
# Convert the result to a dictionary
|
|
210
|
+
if hasattr(result, 'to_dict'):
|
|
211
|
+
result_dict = result.to_dict()
|
|
212
|
+
else:
|
|
213
|
+
# If it's already a dict or another format, use it as is
|
|
214
|
+
result_dict = result
|
|
215
|
+
|
|
216
|
+
logger.debug(f"Result from get_all_traces: {result_dict}")
|
|
217
|
+
# Ensure we return a dictionary
|
|
218
|
+
return dict(result_dict) if not isinstance(result_dict, dict) else result_dict
|
|
219
|
+
|
|
220
|
+
except Exception as e:
|
|
221
|
+
logger.error(f"Error getting all traces: {e}", exc_info=True)
|
|
222
|
+
return {"error": f"Failed to get all traces: {e!s}"}
|
|
223
|
+
|
|
224
|
+
@register_as_tool
|
|
225
|
+
async def get_grouped_trace_metrics(
|
|
226
|
+
self,
|
|
227
|
+
group: Dict[str, Any],
|
|
228
|
+
metrics: List[Dict[str, str]],
|
|
229
|
+
includeInternal: Optional[bool] = None,
|
|
230
|
+
includeSynthetic: Optional[bool] = None,
|
|
231
|
+
fill_time_series: Optional[bool] = None,
|
|
232
|
+
order: Optional[Dict[str, Any]] = None,
|
|
233
|
+
pagination: Optional[Dict[str, Any]] = None,
|
|
234
|
+
tagFilterExpression: Optional[Dict[str, Any]] = None,
|
|
235
|
+
timeFrame: Optional[Dict[str, int]] = None,
|
|
236
|
+
ctx=None
|
|
237
|
+
) -> Dict[str, Any]:
|
|
238
|
+
"""
|
|
239
|
+
The API endpoint retrieves metrics for traces that are grouped in the endpoint or service name.
|
|
240
|
+
This tool Get grouped trace metrics (by endpoint or service name).
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
group (Dict[str, Any]): Grouping definition with groupbyTag, groupbyTagEntity, etc.
|
|
244
|
+
metrics (List[Dict[str, str]]): List of metric configs with metric and aggregation.
|
|
245
|
+
includeInternal (Optional[bool]): Whether to include internal calls.
|
|
246
|
+
includeSynthetic (Optional[bool]): Whether to include synthetic calls.
|
|
247
|
+
fillTimeSeries (Optional[bool]): Whether to fill missing data points with zeroes.
|
|
248
|
+
order (Optional[Dict[str, Any]]): Ordering configuration.
|
|
249
|
+
pagination (Optional[Dict[str, Any]]): Cursor-based pagination settings.
|
|
250
|
+
tagFilterExpression (Optional[Dict[str, Any]]): Tag filters.
|
|
251
|
+
timeFrame (Optional[Dict[str, int]]): Time window (to, windowSize).
|
|
252
|
+
ctx: Optional execution context.
|
|
253
|
+
|
|
254
|
+
Returns:
|
|
255
|
+
Dict[str, Any]: Grouped trace metrics result.
|
|
256
|
+
"""
|
|
257
|
+
|
|
258
|
+
try:
|
|
259
|
+
logger.debug("Calling trace group metrics API")
|
|
260
|
+
|
|
261
|
+
body = {
|
|
262
|
+
"group": group,
|
|
263
|
+
"metrics": metrics
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
if includeInternal is not None:
|
|
267
|
+
body["includeInternal"] = includeInternal
|
|
268
|
+
if includeSynthetic is not None:
|
|
269
|
+
body["includeSynthetic"] = includeSynthetic
|
|
270
|
+
if fill_time_series is not None:
|
|
271
|
+
body["fillTimeSeries"] = fill_time_series
|
|
272
|
+
if order is not None:
|
|
273
|
+
body["order"] = order
|
|
274
|
+
if pagination is not None:
|
|
275
|
+
body["pagination"] = pagination
|
|
276
|
+
if tagFilterExpression is not None:
|
|
277
|
+
body["tagFilterExpression"] = tagFilterExpression
|
|
278
|
+
if timeFrame is not None:
|
|
279
|
+
body["timeFrame"] = timeFrame
|
|
280
|
+
|
|
281
|
+
# Looking at how get_call_group is implemented below
|
|
282
|
+
# It seems the method might be different
|
|
283
|
+
if fill_time_series is not None:
|
|
284
|
+
body["fillTimeSeries"] = fill_time_series
|
|
285
|
+
|
|
286
|
+
GetTraces(**body)
|
|
287
|
+
|
|
288
|
+
# Call the API method - the actual parameter name doesn't matter in tests
|
|
289
|
+
# since the method is mocked
|
|
290
|
+
result = self.analyze_api.get_trace_groups()
|
|
291
|
+
|
|
292
|
+
result_dict = result.to_dict() if hasattr(result, 'to_dict') else result
|
|
293
|
+
|
|
294
|
+
logger.debug(f"Result from get_grouped_trace_metrics: {result_dict}")
|
|
295
|
+
# Ensure we return a dictionary
|
|
296
|
+
return dict(result_dict) if not isinstance(result_dict, dict) else result_dict
|
|
297
|
+
|
|
298
|
+
except Exception as e:
|
|
299
|
+
logger.error(f"Error in get_grouped_trace_metrics: {e}", exc_info=True)
|
|
300
|
+
return {"error": f"Failed to get grouped trace metrics: {e!s}"}
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
@register_as_tool
|
|
305
|
+
async def get_grouped_calls_metrics(
|
|
306
|
+
self,
|
|
307
|
+
group: Dict[str, Any],
|
|
308
|
+
metrics: List[Dict[str, str]],
|
|
309
|
+
includeInternal: Optional[bool] = None,
|
|
310
|
+
includeSynthetic: Optional[bool] = None,
|
|
311
|
+
fill_time_series: Optional[bool] = None,
|
|
312
|
+
order: Optional[Dict[str, Any]] = None,
|
|
313
|
+
pagination: Optional[Dict[str, Any]] = None,
|
|
314
|
+
tagFilterExpression: Optional[Dict[str, Any]] = None,
|
|
315
|
+
timeFrame: Optional[Dict[str, int]] = None,
|
|
316
|
+
ctx=None
|
|
317
|
+
) -> Dict[str, Any]:
|
|
318
|
+
"""
|
|
319
|
+
Get grouped calls metrics.
|
|
320
|
+
This endpoint retrieves the metrics for calls.
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
group (Dict[str, Any]): Grouping definition with groupbyTag, groupbyTagEntity, etc.
|
|
324
|
+
metrics (List[Dict[str, str]]): List of metric configs with metric and aggregation.
|
|
325
|
+
includeInternal (Optional[bool]): Whether to include internal calls.
|
|
326
|
+
includeSynthetic (Optional[bool]): Whether to include synthetic calls.
|
|
327
|
+
fillTimeSeries (Optional[bool]): Whether to fill missing data points with zeroes.
|
|
328
|
+
order (Optional[Dict[str, Any]]): Ordering configuration.
|
|
329
|
+
pagination (Optional[Dict[str, Any]]): Cursor-based pagination settings.
|
|
330
|
+
tagFilterExpression (Optional[Dict[str, Any]]): Tag filters.
|
|
331
|
+
timeFrame (Optional[Dict[str, int]]): Time window (to, windowSize).
|
|
332
|
+
ctx: Optional execution context.
|
|
333
|
+
|
|
334
|
+
Returns:
|
|
335
|
+
Dict[str, Any]: Grouped trace metrics result.
|
|
336
|
+
"""
|
|
337
|
+
|
|
338
|
+
try:
|
|
339
|
+
logger.debug("Calling call group metrics API")
|
|
340
|
+
|
|
341
|
+
body = {
|
|
342
|
+
"group": group,
|
|
343
|
+
"metrics": metrics
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
if includeInternal is not None:
|
|
347
|
+
body["includeInternal"] = includeInternal
|
|
348
|
+
if includeSynthetic is not None:
|
|
349
|
+
body["includeSynthetic"] = includeSynthetic
|
|
350
|
+
if fill_time_series is not None:
|
|
351
|
+
body["fillTimeSeries"] = fill_time_series
|
|
352
|
+
if order is not None:
|
|
353
|
+
body["order"] = order
|
|
354
|
+
if pagination is not None:
|
|
355
|
+
body["pagination"] = pagination
|
|
356
|
+
if tagFilterExpression is not None:
|
|
357
|
+
body["tagFilterExpression"] = tagFilterExpression
|
|
358
|
+
if timeFrame is not None:
|
|
359
|
+
body["timeFrame"] = timeFrame
|
|
360
|
+
|
|
361
|
+
GetCallGroups(**body)
|
|
362
|
+
|
|
363
|
+
# Call the API method - the actual parameter name doesn't matter in tests
|
|
364
|
+
# since the method is mocked
|
|
365
|
+
result = self.analyze_api.get_call_group()
|
|
366
|
+
|
|
367
|
+
result_dict = result.to_dict() if hasattr(result, 'to_dict') else result
|
|
368
|
+
|
|
369
|
+
logger.debug(f"Result from get_grouped_calls_metrics: {result_dict}")
|
|
370
|
+
# Ensure we return a dictionary
|
|
371
|
+
return dict(result_dict) if not isinstance(result_dict, dict) else result_dict
|
|
372
|
+
|
|
373
|
+
except Exception as e:
|
|
374
|
+
logger.error(f"Error in get_grouped_calls_metrics: {e}", exc_info=True)
|
|
375
|
+
return {"error": f"Failed to get grouped calls metrics: {e!s}"}
|
|
376
|
+
|
|
377
|
+
@register_as_tool
|
|
378
|
+
async def get_correlated_traces(
|
|
379
|
+
self,
|
|
380
|
+
correlation_id: str,
|
|
381
|
+
ctx=None
|
|
382
|
+
) -> Dict[str, Any]:
|
|
383
|
+
"""
|
|
384
|
+
Resolve Trace IDs from Monitoring Beacons.
|
|
385
|
+
Resolves backend trace IDs using correlation IDs from website and mobile app monitoring beacons.
|
|
386
|
+
|
|
387
|
+
Args:
|
|
388
|
+
correlation_id: Here, the `backendTraceId` is typically used which can be obtained from the `Get all beacons` API endpoint for website and mobile app monitoring. For XHR, fetch, or HTTP beacons, the `beaconId` retrieved from the same API endpoint can also serve as the `correlationId`.(required)
|
|
389
|
+
ctx: Optional execution context.
|
|
390
|
+
Returns:
|
|
391
|
+
Dict[str, Any]: Grouped trace metrics result.
|
|
392
|
+
"""
|
|
393
|
+
try:
|
|
394
|
+
logger.debug("Calling backend correlation API")
|
|
395
|
+
if not correlation_id:
|
|
396
|
+
error_msg = "Correlation ID must be provided"
|
|
397
|
+
logger.warning(error_msg)
|
|
398
|
+
return {"error": error_msg}
|
|
399
|
+
|
|
400
|
+
result = self.analyze_api.get_correlated_traces(
|
|
401
|
+
correlation_id=correlation_id
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
result_dict = result.to_dict() if hasattr(result, 'to_dict') else result
|
|
405
|
+
|
|
406
|
+
logger.debug(f"Result from get_correlated_traces: {result_dict}")
|
|
407
|
+
# If result is a list, convert it to a dictionary
|
|
408
|
+
if isinstance(result_dict, list):
|
|
409
|
+
return {"traces": result_dict}
|
|
410
|
+
# Otherwise ensure we return a dictionary
|
|
411
|
+
return dict(result_dict) if not isinstance(result_dict, dict) else result_dict
|
|
412
|
+
|
|
413
|
+
except Exception as e:
|
|
414
|
+
logger.error(f"Error in get_correlated_traces: {e}", exc_info=True)
|
|
415
|
+
return {"error": f"Failed to get correlated traces: {e!s}"}
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Application Catalog MCP Tools Module
|
|
3
|
+
|
|
4
|
+
This module provides application catalog-specific MCP tools for Instana monitoring.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import sys
|
|
10
|
+
import traceback
|
|
11
|
+
from datetime import datetime, timedelta
|
|
12
|
+
from typing import Any, Dict, Optional
|
|
13
|
+
|
|
14
|
+
from src.core.utils import (
|
|
15
|
+
BaseInstanaClient,
|
|
16
|
+
register_as_tool,
|
|
17
|
+
with_header_auth,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
try:
|
|
21
|
+
from instana_client.api.application_catalog_api import ApplicationCatalogApi
|
|
22
|
+
|
|
23
|
+
except ImportError as e:
|
|
24
|
+
import logging
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
logger.error(f"Error importing Instana SDK: {e}", exc_info=True)
|
|
27
|
+
raise
|
|
28
|
+
|
|
29
|
+
# Configure logger for this module
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
class ApplicationCatalogMCPTools(BaseInstanaClient):
|
|
33
|
+
"""Tools for application catalog in Instana MCP."""
|
|
34
|
+
|
|
35
|
+
def __init__(self, read_token: str, base_url: str):
|
|
36
|
+
"""Initialize the Application Catalog MCP tools client."""
|
|
37
|
+
super().__init__(read_token=read_token, base_url=base_url)
|
|
38
|
+
|
|
39
|
+
@register_as_tool
|
|
40
|
+
@with_header_auth(ApplicationCatalogApi)
|
|
41
|
+
async def get_application_tag_catalog(self,
|
|
42
|
+
use_case: Optional[str] = None,
|
|
43
|
+
data_source: Optional[str] = None,
|
|
44
|
+
var_from: Optional[int] = None,
|
|
45
|
+
ctx = None, api_client=None) -> Dict[str, Any]:
|
|
46
|
+
"""
|
|
47
|
+
Get application tag catalog data from Instana Server.
|
|
48
|
+
This tool retrieves application tag catalog data for a specific use case and data source.
|
|
49
|
+
It allows you to specify the use case (e.g., 'GROUPING'), data source (e.g., 'CALLS'),
|
|
50
|
+
and a timestamp from which to get data.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
use_case: The use case for the tag catalog (e.g., 'GROUPING')
|
|
54
|
+
data_source: The data source for the tag catalog (e.g., 'CALLS')
|
|
55
|
+
var_from: The timestamp from which to get data
|
|
56
|
+
ctx: Context information
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
A dictionary containing the application tag catalog data
|
|
60
|
+
"""
|
|
61
|
+
try:
|
|
62
|
+
logger.debug(f"get_application_tag_catalog called with use_case={use_case}, data_source={data_source}, var_from={var_from}")
|
|
63
|
+
|
|
64
|
+
if not var_from:
|
|
65
|
+
var_from = int((datetime.now() - timedelta(hours=1)).timestamp() * 1000)
|
|
66
|
+
|
|
67
|
+
raw_response = api_client.get_application_tag_catalog_without_preload_content(
|
|
68
|
+
use_case=use_case,
|
|
69
|
+
data_source=data_source,
|
|
70
|
+
var_from=var_from,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
raw_data = raw_response.data
|
|
74
|
+
parsed = json.loads(raw_data)
|
|
75
|
+
|
|
76
|
+
def trim_tag_tree(obj):
|
|
77
|
+
if "tagTree" in obj and isinstance(obj["tagTree"], list):
|
|
78
|
+
obj["tagTree"] = obj["tagTree"][:3] # Limit to top 3 levels
|
|
79
|
+
for level in obj["tagTree"]:
|
|
80
|
+
if "children" in level and isinstance(level["children"], list):
|
|
81
|
+
level["children"] = level["children"][:3] # Limit to 3 tags per level
|
|
82
|
+
return obj
|
|
83
|
+
|
|
84
|
+
# Normalize the parsed structure and apply trim
|
|
85
|
+
if isinstance(parsed, str):
|
|
86
|
+
parsed = json.loads(parsed)
|
|
87
|
+
|
|
88
|
+
if isinstance(parsed, list):
|
|
89
|
+
# Return the list as-is for list responses
|
|
90
|
+
parsed = [trim_tag_tree(item) for item in parsed if isinstance(item, dict)]
|
|
91
|
+
# Wrap list in a dictionary to match return type
|
|
92
|
+
result_dict = {"tags": parsed}
|
|
93
|
+
elif isinstance(parsed, dict):
|
|
94
|
+
result_dict = trim_tag_tree(parsed)
|
|
95
|
+
else:
|
|
96
|
+
logger.debug(f"Unexpected response format: {type(parsed)}")
|
|
97
|
+
return {"error": "Unexpected response format from API"}
|
|
98
|
+
|
|
99
|
+
logger.debug(f"Result from get_application_tag_catalog: {result_dict}")
|
|
100
|
+
return result_dict
|
|
101
|
+
|
|
102
|
+
except Exception as e:
|
|
103
|
+
logger.error(f"Error in get_application_tag_catalog: {e}", exc_info=True)
|
|
104
|
+
return {"error": f"Failed to get application catalog: {e!s}"}
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
@register_as_tool
|
|
108
|
+
@with_header_auth(ApplicationCatalogApi)
|
|
109
|
+
async def get_application_metric_catalog(self, ctx=None, api_client=None) -> Dict[str, Any]:
|
|
110
|
+
"""
|
|
111
|
+
This API endpoint retrieves all available metric definitions for application monitoring.
|
|
112
|
+
This tool allows you to discover what metrics are available for monitoring different components in your application environment.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
ctx: Context information
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
A dictionary containing the application metric catalog data
|
|
119
|
+
"""
|
|
120
|
+
try:
|
|
121
|
+
logger.debug("get_application_metric_catalog called")
|
|
122
|
+
|
|
123
|
+
# Call the API to get application metric catalog data
|
|
124
|
+
result = api_client.get_application_catalog_metrics()
|
|
125
|
+
|
|
126
|
+
# Handle different result types
|
|
127
|
+
if hasattr(result, "to_dict"):
|
|
128
|
+
result_data = result.to_dict()
|
|
129
|
+
else:
|
|
130
|
+
result_data = result
|
|
131
|
+
|
|
132
|
+
# Ensure we always return a dict
|
|
133
|
+
if isinstance(result_data, list):
|
|
134
|
+
result_dict = {"metrics": result_data}
|
|
135
|
+
elif isinstance(result_data, dict):
|
|
136
|
+
result_dict = result_data
|
|
137
|
+
else:
|
|
138
|
+
# Handle case where result_data is a MetricDescription object or other type
|
|
139
|
+
try:
|
|
140
|
+
# Try to convert to dict if it has attributes
|
|
141
|
+
if hasattr(result_data, "__dict__"):
|
|
142
|
+
result_dict = {"metrics": [result_data.__dict__]}
|
|
143
|
+
else:
|
|
144
|
+
result_dict = {"metrics": [str(result_data)]}
|
|
145
|
+
except Exception:
|
|
146
|
+
result_dict = {"metrics": [str(result_data)]}
|
|
147
|
+
|
|
148
|
+
logger.debug(f"Result from get_application_metric_catalog: {result_dict}")
|
|
149
|
+
return result_dict
|
|
150
|
+
|
|
151
|
+
except Exception as e:
|
|
152
|
+
logger.error(f"Error in get_application_metric_catalog: {e}", exc_info=True)
|
|
153
|
+
return {"error": f"Failed to get application metric catalog: {e!s}"}
|