mcp-instana 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_instana-0.1.0.dist-info/LICENSE +201 -0
- mcp_instana-0.1.0.dist-info/METADATA +649 -0
- mcp_instana-0.1.0.dist-info/RECORD +19 -0
- mcp_instana-0.1.0.dist-info/WHEEL +4 -0
- mcp_instana-0.1.0.dist-info/entry_points.txt +3 -0
- src/__init__.py +0 -0
- src/client/What is the sum of queue depth for all q +55 -0
- src/client/application_alert_config_mcp_tools.py +680 -0
- src/client/application_metrics_mcp_tools.py +377 -0
- src/client/application_resources_mcp_tools.py +391 -0
- src/client/events_mcp_tools.py +531 -0
- src/client/infrastructure_analyze_mcp_tools.py +634 -0
- src/client/infrastructure_catalog_mcp_tools.py +624 -0
- src/client/infrastructure_resources_mcp_tools.py +653 -0
- src/client/infrastructure_topology_mcp_tools.py +319 -0
- src/client/instana_client_base.py +93 -0
- src/client/log_alert_configuration_mcp_tools.py +316 -0
- src/client/show the top 5 services with the highest +28 -0
- src/mcp_server.py +343 -0
|
@@ -0,0 +1,634 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Infrastructure Analyze MCP Tools Module
|
|
3
|
+
|
|
4
|
+
This module provides infrastructure analysis-specific MCP tools for Instana monitoring.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import sys
|
|
8
|
+
import traceback
|
|
9
|
+
from typing import Dict, Any, Optional, List, Union
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
|
|
12
|
+
# Import the necessary classes from the SDK
|
|
13
|
+
try:
|
|
14
|
+
from instana_client.api.infrastructure_analyze_api import InfrastructureAnalyzeApi
|
|
15
|
+
from instana_client.api_client import ApiClient
|
|
16
|
+
from instana_client.configuration import Configuration
|
|
17
|
+
from instana_client.models.get_available_metrics_query import GetAvailableMetricsQuery
|
|
18
|
+
from instana_client.models.get_available_plugins_query import GetAvailablePluginsQuery
|
|
19
|
+
from instana_client.models.get_infrastructure_query import GetInfrastructureQuery
|
|
20
|
+
from instana_client.models.get_infrastructure_groups_query import GetInfrastructureGroupsQuery
|
|
21
|
+
except ImportError as e:
|
|
22
|
+
print(f"Error importing Instana SDK: {e}", file=sys.stderr)
|
|
23
|
+
traceback.print_exc(file=sys.stderr)
|
|
24
|
+
raise
|
|
25
|
+
|
|
26
|
+
from .instana_client_base import BaseInstanaClient, register_as_tool
|
|
27
|
+
|
|
28
|
+
# Helper function for debug printing
|
|
29
|
+
def debug_print(*args, **kwargs):
|
|
30
|
+
"""Print debug information to stderr instead of stdout"""
|
|
31
|
+
print(*args, file=sys.stderr, **kwargs)
|
|
32
|
+
|
|
33
|
+
class InfrastructureAnalyzeMCPTools(BaseInstanaClient):
|
|
34
|
+
"""Tools for infrastructure analysis in Instana MCP."""
|
|
35
|
+
|
|
36
|
+
def __init__(self, read_token: str, base_url: str):
|
|
37
|
+
"""Initialize the Infrastructure Analyze MCP tools client."""
|
|
38
|
+
super().__init__(read_token=read_token, base_url=base_url)
|
|
39
|
+
|
|
40
|
+
try:
|
|
41
|
+
|
|
42
|
+
# Configure the API client with the correct base URL and authentication
|
|
43
|
+
configuration = Configuration()
|
|
44
|
+
configuration.host = base_url
|
|
45
|
+
configuration.api_key['ApiKeyAuth'] = read_token
|
|
46
|
+
configuration.api_key_prefix['ApiKeyAuth'] = 'apiToken'
|
|
47
|
+
|
|
48
|
+
# Create an API client with this configuration
|
|
49
|
+
api_client = ApiClient(configuration=configuration)
|
|
50
|
+
|
|
51
|
+
# Initialize the Instana SDK's InfrastructureAnalyzeApi with our configured client
|
|
52
|
+
self.analyze_api = InfrastructureAnalyzeApi(api_client=api_client)
|
|
53
|
+
except Exception as e:
|
|
54
|
+
debug_print(f"Error initializing InfrastructureAnalyzeApi: {e}")
|
|
55
|
+
traceback.print_exc(file=sys.stderr)
|
|
56
|
+
raise
|
|
57
|
+
|
|
58
|
+
@register_as_tool
|
|
59
|
+
async def get_available_metrics(self,
|
|
60
|
+
payload: Optional[Union[Dict[str, Any], str]] = None,
|
|
61
|
+
ctx=None) -> Dict[str, Any]:
|
|
62
|
+
"""
|
|
63
|
+
Get available metrics for infrastructure monitoring.
|
|
64
|
+
|
|
65
|
+
This tool retrieves information about available metrics for a specific entity type.
|
|
66
|
+
You can use this to discover what metrics are available for monitoring different components in your environment.
|
|
67
|
+
|
|
68
|
+
Sample payload:
|
|
69
|
+
{
|
|
70
|
+
"timeFrame": {
|
|
71
|
+
"from": 1743920395000,
|
|
72
|
+
"to": 1743923995000,
|
|
73
|
+
"windowSize": 3600000
|
|
74
|
+
},
|
|
75
|
+
"tagFilterExpression": {
|
|
76
|
+
"type": "EXPRESSION",
|
|
77
|
+
"logicalOperator": "AND",
|
|
78
|
+
"elements": []
|
|
79
|
+
},
|
|
80
|
+
"query": "",
|
|
81
|
+
"type": "jvmRuntimePlatform"
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
payload: Complete request payload as a dictionary or a JSON string
|
|
86
|
+
ctx: The MCP context (optional)
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
Dictionary containing available metrics or error information
|
|
90
|
+
"""
|
|
91
|
+
try:
|
|
92
|
+
debug_print(f"get_available_metrics called with payload={payload}")
|
|
93
|
+
|
|
94
|
+
# If payload is a string, try to parse it as JSON
|
|
95
|
+
if isinstance(payload, str):
|
|
96
|
+
debug_print(f"Payload is a string, attempting to parse")
|
|
97
|
+
try:
|
|
98
|
+
import json
|
|
99
|
+
try:
|
|
100
|
+
parsed_payload = json.loads(payload)
|
|
101
|
+
debug_print(f"Successfully parsed payload as JSON")
|
|
102
|
+
request_body = parsed_payload
|
|
103
|
+
except json.JSONDecodeError as e:
|
|
104
|
+
debug_print(f"JSON parsing failed: {e}, trying with quotes replaced")
|
|
105
|
+
|
|
106
|
+
# Try replacing single quotes with double quotes
|
|
107
|
+
fixed_payload = payload.replace("'", "\"")
|
|
108
|
+
try:
|
|
109
|
+
parsed_payload = json.loads(fixed_payload)
|
|
110
|
+
debug_print(f"Successfully parsed fixed JSON")
|
|
111
|
+
request_body = parsed_payload
|
|
112
|
+
except json.JSONDecodeError:
|
|
113
|
+
# Try as Python literal
|
|
114
|
+
import ast
|
|
115
|
+
try:
|
|
116
|
+
parsed_payload = ast.literal_eval(payload)
|
|
117
|
+
debug_print(f"Successfully parsed payload as Python literal")
|
|
118
|
+
request_body = parsed_payload
|
|
119
|
+
except (SyntaxError, ValueError) as e2:
|
|
120
|
+
debug_print(f"Failed to parse payload string: {e2}")
|
|
121
|
+
return {"error": f"Invalid payload format: {e2}", "payload": payload}
|
|
122
|
+
except Exception as e:
|
|
123
|
+
debug_print(f"Error parsing payload string: {e}")
|
|
124
|
+
return {"error": f"Failed to parse payload: {e}", "payload": payload}
|
|
125
|
+
else:
|
|
126
|
+
# If payload is already a dictionary, use it directly
|
|
127
|
+
debug_print(f"Using provided payload dictionary")
|
|
128
|
+
request_body = payload
|
|
129
|
+
|
|
130
|
+
debug_print(f"Final request body: {request_body}")
|
|
131
|
+
|
|
132
|
+
# Import the GetAvailableMetricsQuery class
|
|
133
|
+
try:
|
|
134
|
+
from instana_client.models.get_available_metrics_query import GetAvailableMetricsQuery
|
|
135
|
+
debug_print("Successfully imported GetAvailableMetricsQuery")
|
|
136
|
+
except ImportError as e:
|
|
137
|
+
debug_print(f"Error importing GetAvailableMetricsQuery: {e}")
|
|
138
|
+
return {"error": f"Failed to import GetAvailableMetricsQuery: {str(e)}"}
|
|
139
|
+
|
|
140
|
+
# Create a GetAvailableMetricsQuery object from the request body
|
|
141
|
+
try:
|
|
142
|
+
# Extract parameters from the request body
|
|
143
|
+
query_params = {}
|
|
144
|
+
|
|
145
|
+
# Handle timeFrame
|
|
146
|
+
if request_body and "timeFrame" in request_body:
|
|
147
|
+
time_frame = {}
|
|
148
|
+
if "to" in request_body["timeFrame"]:
|
|
149
|
+
time_frame["to"] = request_body["timeFrame"]["to"]
|
|
150
|
+
if "from" in request_body["timeFrame"]:
|
|
151
|
+
time_frame["from"] = request_body["timeFrame"]["from"]
|
|
152
|
+
if "windowSize" in request_body["timeFrame"]:
|
|
153
|
+
time_frame["windowSize"] = request_body["timeFrame"]["windowSize"]
|
|
154
|
+
query_params["timeFrame"] = time_frame
|
|
155
|
+
|
|
156
|
+
# Handle other parameters
|
|
157
|
+
if request_body and "query" in request_body:
|
|
158
|
+
query_params["query"] = request_body["query"]
|
|
159
|
+
|
|
160
|
+
if request_body and "type" in request_body:
|
|
161
|
+
query_params["type"] = request_body["type"]
|
|
162
|
+
|
|
163
|
+
if request_body and "tagFilterExpression" in request_body:
|
|
164
|
+
query_params["tagFilterExpression"] = request_body["tagFilterExpression"]
|
|
165
|
+
|
|
166
|
+
debug_print(f"Creating GetAvailableMetricsQuery with params: {query_params}")
|
|
167
|
+
query_object = GetAvailableMetricsQuery(**query_params)
|
|
168
|
+
debug_print(f"Successfully created query object: {query_object}")
|
|
169
|
+
except Exception as e:
|
|
170
|
+
debug_print(f"Error creating GetAvailableMetricsQuery: {e}")
|
|
171
|
+
return {"error": f"Failed to create query object: {str(e)}"}
|
|
172
|
+
|
|
173
|
+
# Call the get_available_metrics method from the SDK with the query object
|
|
174
|
+
debug_print("Calling get_available_metrics with query object")
|
|
175
|
+
result = self.analyze_api.get_available_metrics(get_available_metrics_query=query_object)
|
|
176
|
+
|
|
177
|
+
# Convert the result to a dictionary
|
|
178
|
+
if hasattr(result, 'to_dict'):
|
|
179
|
+
result_dict = result.to_dict()
|
|
180
|
+
else:
|
|
181
|
+
# If it's already a dict or another format, use it as is
|
|
182
|
+
result_dict = result
|
|
183
|
+
|
|
184
|
+
debug_print(f"Result from get_available_metrics: {result_dict}")
|
|
185
|
+
return result_dict
|
|
186
|
+
except Exception as e:
|
|
187
|
+
debug_print(f"Error in get_available_metrics: {e}")
|
|
188
|
+
traceback.print_exc(file=sys.stderr)
|
|
189
|
+
return {"error": f"Failed to get available metrics: {str(e)}"}
|
|
190
|
+
|
|
191
|
+
@register_as_tool
|
|
192
|
+
async def get_entities(self,
|
|
193
|
+
payload: Optional[Union[Dict[str, Any], str]] = None,
|
|
194
|
+
ctx=None) -> Dict[str, Any]:
|
|
195
|
+
"""
|
|
196
|
+
Get infrastructure entities for a given entity type along with requested metrics.
|
|
197
|
+
|
|
198
|
+
We want to know the memory used and no of blocked threads for entity named JVMruntimeplatform for last 1 hour in Instana. can you help us get the details ?
|
|
199
|
+
|
|
200
|
+
This tool retrieves entities of a specific type (e.g., hosts, processes, containers) along with
|
|
201
|
+
their metrics. You can filter the results using tag filters and paginate through large result sets.
|
|
202
|
+
|
|
203
|
+
Sample payload:
|
|
204
|
+
{
|
|
205
|
+
"tagFilterExpression": {
|
|
206
|
+
"type": "TAG_FILTER",
|
|
207
|
+
"entity": "NOT_APPLICABLE",
|
|
208
|
+
"name": "label",
|
|
209
|
+
"operator": "EQUALS",
|
|
210
|
+
"value": "custom-metrics.jar"
|
|
211
|
+
},
|
|
212
|
+
"timeFrame": {
|
|
213
|
+
"to": 1743923995000,
|
|
214
|
+
"windowSize": 3600000
|
|
215
|
+
},
|
|
216
|
+
"pagination": {
|
|
217
|
+
"retrievalSize": 200
|
|
218
|
+
},
|
|
219
|
+
"type": "jvmRuntimePlatform",
|
|
220
|
+
"metrics": [
|
|
221
|
+
{"metric": "memory.used", "granularity": 3600000, "aggregation": "MAX"},
|
|
222
|
+
{"metric": "memory.used", "granularity": 600000, "aggregation": "MAX"},
|
|
223
|
+
{"metric": "threads.blocked", "granularity": 3600000, "aggregation": "MEAN"},
|
|
224
|
+
{"metric": "threads.blocked", "granularity": 600000, "aggregation": "MEAN"}
|
|
225
|
+
]
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
payload: Complete request payload as a dictionary or a JSON string
|
|
230
|
+
ctx: The MCP context (optional)
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
Dictionary containing infrastructure entities and their metrics or error information
|
|
234
|
+
"""
|
|
235
|
+
try:
|
|
236
|
+
debug_print(f"get_entities called with payload={payload}")
|
|
237
|
+
|
|
238
|
+
# If payload is a string, try to parse it as JSON
|
|
239
|
+
if isinstance(payload, str):
|
|
240
|
+
debug_print(f"Payload is a string, attempting to parse")
|
|
241
|
+
try:
|
|
242
|
+
import json
|
|
243
|
+
try:
|
|
244
|
+
parsed_payload = json.loads(payload)
|
|
245
|
+
debug_print(f"Successfully parsed payload as JSON")
|
|
246
|
+
request_body = parsed_payload
|
|
247
|
+
except json.JSONDecodeError as e:
|
|
248
|
+
debug_print(f"JSON parsing failed: {e}, trying with quotes replaced")
|
|
249
|
+
|
|
250
|
+
# Try replacing single quotes with double quotes
|
|
251
|
+
fixed_payload = payload.replace("'", "\"")
|
|
252
|
+
try:
|
|
253
|
+
parsed_payload = json.loads(fixed_payload)
|
|
254
|
+
debug_print(f"Successfully parsed fixed JSON")
|
|
255
|
+
request_body = parsed_payload
|
|
256
|
+
except json.JSONDecodeError:
|
|
257
|
+
# Try as Python literal
|
|
258
|
+
import ast
|
|
259
|
+
try:
|
|
260
|
+
parsed_payload = ast.literal_eval(payload)
|
|
261
|
+
debug_print(f"Successfully parsed payload as Python literal")
|
|
262
|
+
request_body = parsed_payload
|
|
263
|
+
except (SyntaxError, ValueError) as e2:
|
|
264
|
+
debug_print(f"Failed to parse payload string: {e2}")
|
|
265
|
+
return {"error": f"Invalid payload format: {e2}", "payload": payload}
|
|
266
|
+
except Exception as e:
|
|
267
|
+
debug_print(f"Error parsing payload string: {e}")
|
|
268
|
+
return {"error": f"Failed to parse payload: {e}", "payload": payload}
|
|
269
|
+
else:
|
|
270
|
+
# If payload is already a dictionary, use it directly
|
|
271
|
+
debug_print(f"Using provided payload dictionary")
|
|
272
|
+
request_body = payload
|
|
273
|
+
|
|
274
|
+
debug_print(f"Final request body: {request_body}")
|
|
275
|
+
|
|
276
|
+
# Create the GetInfrastructureQuery object
|
|
277
|
+
try:
|
|
278
|
+
# Create the query object directly from the request body
|
|
279
|
+
get_infra_query = GetInfrastructureQuery(**request_body)
|
|
280
|
+
debug_print("Successfully created GetInfrastructureQuery object")
|
|
281
|
+
except Exception as model_error:
|
|
282
|
+
error_msg = f"Failed to create GetInfrastructureQuery object: {model_error}"
|
|
283
|
+
debug_print(error_msg)
|
|
284
|
+
return {"error": error_msg, "request_body": request_body}
|
|
285
|
+
|
|
286
|
+
# Call the get_entities method from the SDK
|
|
287
|
+
debug_print("Calling API method get_entities")
|
|
288
|
+
result = self.analyze_api.get_entities(
|
|
289
|
+
get_infrastructure_query=get_infra_query
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
# Convert the result to a dictionary
|
|
293
|
+
if hasattr(result, 'to_dict'):
|
|
294
|
+
result_dict = result.to_dict()
|
|
295
|
+
else:
|
|
296
|
+
# If it's already a dict or another format, use it as is
|
|
297
|
+
result_dict = result
|
|
298
|
+
|
|
299
|
+
debug_print(f"Result from get_entities: {result_dict}")
|
|
300
|
+
return result_dict
|
|
301
|
+
except Exception as e:
|
|
302
|
+
debug_print(f"Error in get_entities: {e}")
|
|
303
|
+
traceback.print_exc(file=sys.stderr)
|
|
304
|
+
return {"error": f"Failed to get entities: {str(e)}"}
|
|
305
|
+
|
|
306
|
+
@register_as_tool
|
|
307
|
+
async def get_aggregated_entity_groups(self,
|
|
308
|
+
payload: Optional[Union[Dict[str, Any], str]] = None,
|
|
309
|
+
ctx=None) -> Dict[str, Any]:
|
|
310
|
+
"""
|
|
311
|
+
Get grouped infrastructure entities with aggregated metrics.
|
|
312
|
+
|
|
313
|
+
This tool groups entities of a specific type by specified tags and aggregates metrics for these groups.
|
|
314
|
+
For example, you can group hosts by their region and get average CPU usage per region.
|
|
315
|
+
|
|
316
|
+
Sample payload:
|
|
317
|
+
{
|
|
318
|
+
"timeFrame": {
|
|
319
|
+
"to": 1743923995000,
|
|
320
|
+
"windowSize": 3600000
|
|
321
|
+
},
|
|
322
|
+
"tagFilterExpression": {
|
|
323
|
+
"type": "EXPRESSION",
|
|
324
|
+
"logicalOperator": "AND",
|
|
325
|
+
"elements": []
|
|
326
|
+
},
|
|
327
|
+
"pagination": {
|
|
328
|
+
"retrievalSize": 20
|
|
329
|
+
},
|
|
330
|
+
"groupBy": ["host.name"],
|
|
331
|
+
"type": "jvmRuntimePlatform",
|
|
332
|
+
"metrics": [
|
|
333
|
+
{"metric": "memory.used", "granularity": 3600000, "aggregation": "MEAN"},
|
|
334
|
+
{"metric": "memory.used", "granularity": 600000, "aggregation": "MEAN"},
|
|
335
|
+
{"metric": "threads.blocked", "granularity": 3600000, "aggregation": "MEAN"},
|
|
336
|
+
{"metric": "threads.blocked", "granularity": 600000, "aggregation": "MEAN"}
|
|
337
|
+
],
|
|
338
|
+
"order": {
|
|
339
|
+
"by": "label",
|
|
340
|
+
"direction": "ASC"
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
Args:
|
|
345
|
+
payload: Complete request payload as a dictionary or a JSON string
|
|
346
|
+
ctx: The MCP context (optional)
|
|
347
|
+
|
|
348
|
+
Returns:
|
|
349
|
+
Dictionary containing grouped entities and their aggregated metrics or error information
|
|
350
|
+
"""
|
|
351
|
+
try:
|
|
352
|
+
debug_print(f"get_aggregated_entity_groups called with payload={payload}")
|
|
353
|
+
|
|
354
|
+
# If no payload is provided, return an error
|
|
355
|
+
if not payload:
|
|
356
|
+
return {"error": "Payload is required for this operation"}
|
|
357
|
+
|
|
358
|
+
# If payload is a string, try to parse it as JSON
|
|
359
|
+
if isinstance(payload, str):
|
|
360
|
+
debug_print(f"Payload is a string, attempting to parse")
|
|
361
|
+
try:
|
|
362
|
+
import json
|
|
363
|
+
try:
|
|
364
|
+
parsed_payload = json.loads(payload)
|
|
365
|
+
debug_print(f"Successfully parsed payload as JSON")
|
|
366
|
+
request_body = parsed_payload
|
|
367
|
+
except json.JSONDecodeError as e:
|
|
368
|
+
debug_print(f"JSON parsing failed: {e}, trying with quotes replaced")
|
|
369
|
+
|
|
370
|
+
# Try replacing single quotes with double quotes
|
|
371
|
+
fixed_payload = payload.replace("'", "\"")
|
|
372
|
+
try:
|
|
373
|
+
parsed_payload = json.loads(fixed_payload)
|
|
374
|
+
debug_print(f"Successfully parsed fixed JSON")
|
|
375
|
+
request_body = parsed_payload
|
|
376
|
+
except json.JSONDecodeError:
|
|
377
|
+
# Try as Python literal
|
|
378
|
+
import ast
|
|
379
|
+
try:
|
|
380
|
+
parsed_payload = ast.literal_eval(payload)
|
|
381
|
+
debug_print(f"Successfully parsed payload as Python literal")
|
|
382
|
+
request_body = parsed_payload
|
|
383
|
+
except (SyntaxError, ValueError) as e2:
|
|
384
|
+
debug_print(f"Failed to parse payload string: {e2}")
|
|
385
|
+
return {"error": f"Invalid payload format: {e2}", "payload": payload}
|
|
386
|
+
except Exception as e:
|
|
387
|
+
debug_print(f"Error parsing payload string: {e}")
|
|
388
|
+
return {"error": f"Failed to parse payload: {e}", "payload": payload}
|
|
389
|
+
else:
|
|
390
|
+
# If payload is already a dictionary, use it directly
|
|
391
|
+
debug_print(f"Using provided payload dictionary")
|
|
392
|
+
request_body = payload
|
|
393
|
+
|
|
394
|
+
debug_print(f"Final request body: {request_body}")
|
|
395
|
+
|
|
396
|
+
# Create the GetInfrastructureGroupsQuery object
|
|
397
|
+
try:
|
|
398
|
+
# Import the model class
|
|
399
|
+
from instana_client.models.get_infrastructure_groups_query import GetInfrastructureGroupsQuery
|
|
400
|
+
|
|
401
|
+
# Create the query object
|
|
402
|
+
get_groups_query = GetInfrastructureGroupsQuery(**request_body)
|
|
403
|
+
debug_print("Successfully created GetInfrastructureGroupsQuery object")
|
|
404
|
+
except Exception as model_error:
|
|
405
|
+
error_msg = f"Failed to create GetInfrastructureGroupsQuery object: {model_error}"
|
|
406
|
+
debug_print(error_msg)
|
|
407
|
+
return {"error": error_msg, "request_body": request_body}
|
|
408
|
+
|
|
409
|
+
# Call the get_entity_groups method from the SDK
|
|
410
|
+
debug_print("Calling API method get_entity_groups")
|
|
411
|
+
try:
|
|
412
|
+
# Use the without_preload_content version to get the raw response
|
|
413
|
+
response = self.analyze_api.get_entity_groups_without_preload_content(
|
|
414
|
+
get_infrastructure_groups_query=get_groups_query
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
# Check if the response was successful
|
|
418
|
+
if response.status != 200:
|
|
419
|
+
error_message = f"Failed to get entity groups: HTTP {response.status}"
|
|
420
|
+
debug_print(error_message)
|
|
421
|
+
return {"error": error_message}
|
|
422
|
+
|
|
423
|
+
# Read the response content
|
|
424
|
+
response_text = response.data.decode('utf-8')
|
|
425
|
+
|
|
426
|
+
# Parse the response as JSON
|
|
427
|
+
import json
|
|
428
|
+
result_dict = json.loads(response_text)
|
|
429
|
+
|
|
430
|
+
debug_print(f"Successfully parsed raw response")
|
|
431
|
+
|
|
432
|
+
# Create a summarized version of the results
|
|
433
|
+
return self._summarize_entity_groups_result(result_dict, request_body)
|
|
434
|
+
except Exception as api_error:
|
|
435
|
+
error_msg = f"API call failed: {api_error}"
|
|
436
|
+
debug_print(error_msg)
|
|
437
|
+
return {"error": error_msg}
|
|
438
|
+
|
|
439
|
+
except Exception as e:
|
|
440
|
+
debug_print(f"Error in get_aggregated_entity_groups: {e}")
|
|
441
|
+
traceback.print_exc(file=sys.stderr)
|
|
442
|
+
return {"error": f"Failed to get aggregated entity groups: {str(e)}"}
|
|
443
|
+
|
|
444
|
+
def _summarize_entity_groups_result(self, result_dict, query_body):
|
|
445
|
+
"""
|
|
446
|
+
Create a summarized version of the entity groups result.
|
|
447
|
+
|
|
448
|
+
Args:
|
|
449
|
+
result_dict: The full API response
|
|
450
|
+
query_body: The query body used to make the request
|
|
451
|
+
|
|
452
|
+
Returns:
|
|
453
|
+
A summarized version of the results
|
|
454
|
+
"""
|
|
455
|
+
try:
|
|
456
|
+
# Check if there's an error in the result
|
|
457
|
+
if isinstance(result_dict, dict) and "error" in result_dict:
|
|
458
|
+
return result_dict
|
|
459
|
+
|
|
460
|
+
# Extract the group by tag
|
|
461
|
+
group_by_tag = None
|
|
462
|
+
if "groupBy" in query_body and isinstance(query_body["groupBy"], list) and len(query_body["groupBy"]) > 0:
|
|
463
|
+
group_by_tag = query_body["groupBy"][0]
|
|
464
|
+
|
|
465
|
+
# Extract host names if available
|
|
466
|
+
host_names = []
|
|
467
|
+
|
|
468
|
+
# Process each item in the results
|
|
469
|
+
if "items" in result_dict and isinstance(result_dict["items"], list):
|
|
470
|
+
for item in result_dict["items"]:
|
|
471
|
+
# Extract the host name
|
|
472
|
+
if "tags" in item and isinstance(item["tags"], dict) and group_by_tag in item["tags"]:
|
|
473
|
+
# Get the tag value, ensuring it's a string
|
|
474
|
+
tag_value = item["tags"][group_by_tag]
|
|
475
|
+
if isinstance(tag_value, str):
|
|
476
|
+
host_name = tag_value
|
|
477
|
+
elif isinstance(tag_value, dict) and "name" in tag_value:
|
|
478
|
+
# Handle case where tag value is a dictionary with a name field
|
|
479
|
+
host_name = tag_value["name"]
|
|
480
|
+
else:
|
|
481
|
+
# Convert other types to string
|
|
482
|
+
host_name = str(tag_value)
|
|
483
|
+
|
|
484
|
+
if host_name not in host_names:
|
|
485
|
+
host_names.append(host_name)
|
|
486
|
+
|
|
487
|
+
# Sort host names alphabetically
|
|
488
|
+
host_names.sort()
|
|
489
|
+
|
|
490
|
+
# Create the exact format requested
|
|
491
|
+
summary = {
|
|
492
|
+
"hosts": host_names,
|
|
493
|
+
"count": len(host_names),
|
|
494
|
+
"summary": f"Found {len(host_names)} hosts: {', '.join(host_names)}"
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
return summary
|
|
498
|
+
except Exception as e:
|
|
499
|
+
debug_print(f"Error in _summarize_entity_groups_result: {e}")
|
|
500
|
+
traceback.print_exc(file=sys.stderr)
|
|
501
|
+
# If summarization fails, return an error message
|
|
502
|
+
return {
|
|
503
|
+
"error": f"Failed to summarize results: {str(e)}"
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
@register_as_tool
|
|
507
|
+
async def get_available_plugins(self,
|
|
508
|
+
payload: Optional[Union[Dict[str, Any], str]] = None,
|
|
509
|
+
ctx=None) -> Dict[str, Any]:
|
|
510
|
+
"""
|
|
511
|
+
Get available plugins for infrastructure monitoring.
|
|
512
|
+
|
|
513
|
+
This tool retrieves information about available plugins for infrastructure monitoring.
|
|
514
|
+
You can use this to discover what types of entities can be monitored in your environment.
|
|
515
|
+
|
|
516
|
+
Sample payload:
|
|
517
|
+
{
|
|
518
|
+
"timeFrame": {
|
|
519
|
+
"to": 1743923995000,
|
|
520
|
+
"windowSize": 3600000
|
|
521
|
+
},
|
|
522
|
+
"tagFilterExpression": {
|
|
523
|
+
"type": "EXPRESSION",
|
|
524
|
+
"logicalOperator": "AND",
|
|
525
|
+
"elements": []
|
|
526
|
+
},
|
|
527
|
+
"query": "java",
|
|
528
|
+
"offline": false
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
Args:
|
|
532
|
+
payload: Complete request payload as a dictionary or a JSON string
|
|
533
|
+
ctx: The MCP context (optional)
|
|
534
|
+
|
|
535
|
+
Returns:
|
|
536
|
+
Dictionary containing available plugins or error information
|
|
537
|
+
"""
|
|
538
|
+
try:
|
|
539
|
+
debug_print(f"get_available_plugins called with payload={payload}")
|
|
540
|
+
|
|
541
|
+
# If payload is a string, try to parse it as JSON
|
|
542
|
+
if isinstance(payload, str):
|
|
543
|
+
debug_print(f"Payload is a string, attempting to parse")
|
|
544
|
+
try:
|
|
545
|
+
import json
|
|
546
|
+
try:
|
|
547
|
+
parsed_payload = json.loads(payload)
|
|
548
|
+
debug_print(f"Successfully parsed payload as JSON")
|
|
549
|
+
request_body = parsed_payload
|
|
550
|
+
except json.JSONDecodeError as e:
|
|
551
|
+
debug_print(f"JSON parsing failed: {e}, trying with quotes replaced")
|
|
552
|
+
|
|
553
|
+
# Try replacing single quotes with double quotes
|
|
554
|
+
fixed_payload = payload.replace("'", "\"")
|
|
555
|
+
try:
|
|
556
|
+
parsed_payload = json.loads(fixed_payload)
|
|
557
|
+
debug_print(f"Successfully parsed fixed JSON")
|
|
558
|
+
request_body = parsed_payload
|
|
559
|
+
except json.JSONDecodeError:
|
|
560
|
+
# Try as Python literal
|
|
561
|
+
import ast
|
|
562
|
+
try:
|
|
563
|
+
parsed_payload = ast.literal_eval(payload)
|
|
564
|
+
debug_print(f"Successfully parsed payload as Python literal")
|
|
565
|
+
request_body = parsed_payload
|
|
566
|
+
except (SyntaxError, ValueError) as e2:
|
|
567
|
+
debug_print(f"Failed to parse payload string: {e2}")
|
|
568
|
+
return {"error": f"Invalid payload format: {e2}", "payload": payload}
|
|
569
|
+
except Exception as e:
|
|
570
|
+
debug_print(f"Error parsing payload string: {e}")
|
|
571
|
+
return {"error": f"Failed to parse payload: {e}", "payload": payload}
|
|
572
|
+
else:
|
|
573
|
+
# If payload is already a dictionary, use it directly
|
|
574
|
+
debug_print(f"Using provided payload dictionary")
|
|
575
|
+
request_body = payload
|
|
576
|
+
|
|
577
|
+
debug_print(f"Final request body: {request_body}")
|
|
578
|
+
|
|
579
|
+
# Import the GetAvailablePluginsQuery class
|
|
580
|
+
try:
|
|
581
|
+
from instana_client.models.get_available_plugins_query import GetAvailablePluginsQuery
|
|
582
|
+
debug_print("Successfully imported GetAvailablePluginsQuery")
|
|
583
|
+
except ImportError as e:
|
|
584
|
+
debug_print(f"Error importing GetAvailablePluginsQuery: {e}")
|
|
585
|
+
return {"error": f"Failed to import GetAvailablePluginsQuery: {str(e)}"}
|
|
586
|
+
|
|
587
|
+
# Create a GetAvailablePluginsQuery object from the request body
|
|
588
|
+
try:
|
|
589
|
+
# Extract parameters from the request body
|
|
590
|
+
query_params = {}
|
|
591
|
+
|
|
592
|
+
# Handle timeFrame
|
|
593
|
+
if request_body and "timeFrame" in request_body:
|
|
594
|
+
time_frame = {}
|
|
595
|
+
if "to" in request_body["timeFrame"]:
|
|
596
|
+
time_frame["to"] = request_body["timeFrame"]["to"]
|
|
597
|
+
if "windowSize" in request_body["timeFrame"]:
|
|
598
|
+
time_frame["windowSize"] = request_body["timeFrame"]["windowSize"]
|
|
599
|
+
query_params["timeFrame"] = time_frame
|
|
600
|
+
|
|
601
|
+
# Handle other parameters
|
|
602
|
+
if request_body and "query" in request_body:
|
|
603
|
+
query_params["query"] = request_body["query"]
|
|
604
|
+
|
|
605
|
+
if request_body and "offline" in request_body:
|
|
606
|
+
query_params["offline"] = request_body["offline"]
|
|
607
|
+
|
|
608
|
+
if request_body and "tagFilterExpression" in request_body:
|
|
609
|
+
query_params["tagFilterExpression"] = request_body["tagFilterExpression"]
|
|
610
|
+
|
|
611
|
+
debug_print(f"Creating GetAvailablePluginsQuery with params: {query_params}")
|
|
612
|
+
query_object = GetAvailablePluginsQuery(**query_params)
|
|
613
|
+
debug_print(f"Successfully created query object: {query_object}")
|
|
614
|
+
except Exception as e:
|
|
615
|
+
debug_print(f"Error creating GetAvailablePluginsQuery: {e}")
|
|
616
|
+
return {"error": f"Failed to create query object: {str(e)}"}
|
|
617
|
+
|
|
618
|
+
# Call the get_available_plugins method from the SDK with the query object
|
|
619
|
+
debug_print("Calling get_available_plugins with query object")
|
|
620
|
+
result = self.analyze_api.get_available_plugins(get_available_plugins_query=query_object)
|
|
621
|
+
|
|
622
|
+
# Convert the result to a dictionary
|
|
623
|
+
if hasattr(result, 'to_dict'):
|
|
624
|
+
result_dict = result.to_dict()
|
|
625
|
+
else:
|
|
626
|
+
# If it's already a dict or another format, use it as is
|
|
627
|
+
result_dict = result
|
|
628
|
+
|
|
629
|
+
debug_print(f"Result from get_available_plugins: {result_dict}")
|
|
630
|
+
return result_dict
|
|
631
|
+
except Exception as e:
|
|
632
|
+
debug_print(f"Error in get_available_plugins: {e}")
|
|
633
|
+
traceback.print_exc(file=sys.stderr)
|
|
634
|
+
return {"error": f"Failed to get available plugins: {str(e)}"}
|