mcp-instana 0.6.2__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/METADATA +179 -120
  2. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/RECORD +28 -21
  3. src/application/application_alert_config.py +397 -146
  4. src/application/application_analyze.py +597 -597
  5. src/application/application_call_group.py +528 -0
  6. src/application/application_catalog.py +0 -8
  7. src/application/application_global_alert_config.py +255 -38
  8. src/application/application_metrics.py +377 -237
  9. src/application/application_resources.py +414 -365
  10. src/application/application_settings.py +605 -1651
  11. src/application/application_topology.py +62 -62
  12. src/core/custom_dashboard_smart_router_tool.py +135 -0
  13. src/core/server.py +92 -119
  14. src/core/smart_router_tool.py +574 -0
  15. src/core/utils.py +17 -8
  16. src/custom_dashboard/custom_dashboard_tools.py +422 -0
  17. src/infrastructure/elicitation_handler.py +338 -0
  18. src/infrastructure/entity_registry.py +329 -0
  19. src/infrastructure/infrastructure_analyze_new.py +600 -0
  20. src/infrastructure/{infrastructure_analyze.py → infrastructure_analyze_old.py} +1 -16
  21. src/infrastructure/infrastructure_catalog.py +7 -28
  22. src/infrastructure/infrastructure_metrics.py +93 -17
  23. src/infrastructure/infrastructure_resources.py +5 -20
  24. src/infrastructure/infrastructure_topology.py +2 -8
  25. src/prompts/application/application_settings.py +58 -0
  26. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/WHEEL +0 -0
  27. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/entry_points.txt +0 -0
  28. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/licenses/LICENSE.md +0 -0
@@ -0,0 +1,600 @@
1
+ """
2
+ Infrastructure Analyze Tool - Option 2 Architecture
3
+
4
+ This tool implements the complete Option 2 flow:
5
+ 1. LLM provides high-level intent (entity, metric, filters)
6
+ 2. Server normalizes intent
7
+ 3. Server resolves exact constants from schemas
8
+ 4. Server handles elicitation if ambiguous
9
+ 5. Server compiles complete payload
10
+ 6. Server calls Instana API
11
+
12
+ Key benefit: LLM never sees schema complexity, reducing tokens by 99.4%
13
+ """
14
+
15
+ import logging
16
+ from pathlib import Path
17
+ from typing import Any, Dict, List, Optional
18
+
19
+ try:
20
+ from instana_client.api.infrastructure_analyze_api import InfrastructureAnalyzeApi
21
+ except ImportError as e:
22
+ logger = logging.getLogger(__name__)
23
+ logger.error(f"Error importing Instana SDK: {e}", exc_info=True)
24
+ raise
25
+
26
+ from mcp.types import EmbeddedResource, TextContent, ToolAnnotations
27
+
28
+ from src.core.utils import BaseInstanaClient, register_as_tool, with_header_auth
29
+ from src.infrastructure.elicitation_handler import ElicitationHandler
30
+ from src.infrastructure.entity_registry import EntityCapabilityRegistry
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ class InfrastructureAnalyzeOption2(BaseInstanaClient):
36
+ """
37
+ Infrastructure analyze tool using Option 2 architecture.
38
+
39
+ This tool demonstrates the complete server-side payload assembly approach:
40
+ - LLM provides simple intent (~300 tokens)
41
+ - Server resolves all constants from schemas (~48,000 tokens saved)
42
+ - Zero hallucination on metric/tag names
43
+ """
44
+
45
+ def __init__(self, read_token: str, base_url: str, schema_dir: Optional[Path] = None):
46
+ """
47
+ Initialize the Infrastructure Analyze Option 2 tool.
48
+
49
+ Args:
50
+ read_token: Instana API read token
51
+ base_url: Instana API base URL
52
+ schema_dir: Path to schema directory (defaults to ../schema)
53
+ """
54
+ super().__init__(read_token=read_token, base_url=base_url)
55
+
56
+ # Initialize Option 2 components
57
+ if schema_dir is None:
58
+ # Default to schema directory relative to this file
59
+ schema_dir = Path(__file__).parent.parent.parent / "schema"
60
+
61
+ self.registry = EntityCapabilityRegistry(schema_dir)
62
+ self.elicitation_handler = ElicitationHandler()
63
+
64
+ logger.info(f"Initialized Option 2 tool with {len(self.registry.get_entity_types())} entity types")
65
+
66
+ @register_as_tool(
67
+ title="Analyze Infrastructure with Elicitation (Two-Pass)",
68
+ annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
69
+ )
70
+ @with_header_auth(InfrastructureAnalyzeApi)
71
+ async def analyze_infrastructure_elicitation(
72
+ self,
73
+ intent: Optional[str] = None,
74
+ entity: Optional[str] = None,
75
+ selections: Optional[Dict[str, Any]] = None,
76
+ ctx=None,
77
+ api_client=None
78
+ ) -> List[Any]:
79
+ """
80
+ Two-pass infrastructure analysis using machine-facing elicitation.
81
+
82
+ **Pass 1 - Intent to Schema:**
83
+ Provide intent and entity hint. Server returns full schema via MCP elicitation.
84
+
85
+ Parameters:
86
+ - intent: Natural language query (e.g., "maximum heap size of JVM on host galactica1")
87
+ - entity: Entity hint (e.g., "jvm", "kubernetes", "docker")
88
+
89
+ **Pass 2 - Selections to Results:**
90
+ Provide exact selections from schema. Server builds payload and calls API.
91
+
92
+ Parameters:
93
+ - selections: Dict with:
94
+ - entity_type: Exact entity type from schema (e.g., "jvmRuntimePlatform")
95
+ - metrics: Array of exact metric names from schema (e.g., ["jvm.heap.maxSize", "jvm.heap.used"])
96
+ - aggregation: Aggregation type (e.g., "max", "mean", "sum")
97
+ - filters: List of dicts with name/value pairs (e.g., [{"name": "host.name", "value": "galactica1"}])
98
+ - groupBy: (optional) Array of tag names to group entities by (e.g., ["host.name"], ["kubernetes.namespace.name"])
99
+ - timeRange: (optional) Time range string (e.g., "1h", "30m", "2h", "1d"). Default: "1h"
100
+ - order: (optional) Dict with "by" (metric name) and "direction" ("ASC" or "DESC")
101
+
102
+ Returns:
103
+ List of MCP content blocks (TextContent or EmbeddedResource)
104
+ """
105
+ try:
106
+ # Route based on input
107
+ if intent is not None and entity is not None:
108
+ # Pass 1: Intent → Elicitation
109
+ return await self._handle_pass1_intent({"intent": intent, "entity": entity})
110
+ elif selections is not None:
111
+ # Pass 2: Selections → API Call
112
+ return await self._handle_pass2_selections({"selections": selections}, api_client)
113
+ else:
114
+ return [TextContent(
115
+ type="text",
116
+ text="Error: Invalid input. Provide either (intent + entity) for Pass 1, or (selections) for Pass 2."
117
+ )]
118
+ except Exception as e:
119
+ logger.error(f"Error in analyze_infrastructure_elicitation: {e}", exc_info=True)
120
+ return [TextContent(
121
+ type="text",
122
+ text=f"Error: {e!s}"
123
+ )]
124
+
125
+ async def _handle_pass1_intent(self, arguments: Dict[str, Any]) -> List[Any]:
126
+ """
127
+ Handle Pass 1: Intent → Schema Elicitation
128
+
129
+ Args:
130
+ arguments: Dict with 'intent' and 'entity' keys
131
+
132
+ Returns:
133
+ List of MCP content blocks with schema
134
+ """
135
+ intent = arguments.get("intent", "")
136
+ entity_hint = arguments.get("entity", "").lower()
137
+ intent_lower = intent.lower()
138
+
139
+ logger.info(f"Pass 1: intent='{intent}', entity_hint='{entity_hint}'")
140
+
141
+ # Entity type resolution with priority (more specific first)
142
+ # Check for specific matches first, then fall back to general ones
143
+ entity_type = None
144
+
145
+ # Priority 1: Specific multi-word matches
146
+ if "kubernetes deployment" in entity_hint or "k8s deployment" in entity_hint:
147
+ entity_type = "kubernetesDeployment"
148
+ elif "kubernetes pod" in entity_hint or "k8s pod" in entity_hint:
149
+ entity_type = "kubernetesPod"
150
+ elif "docker container" in entity_hint:
151
+ entity_type = "dockerContainer"
152
+ elif "ibm mq" in entity_hint or "ibmmq" in entity_hint:
153
+ entity_type = "ibmMqQueue"
154
+ elif "db2 database" in entity_hint:
155
+ entity_type = "db2Database"
156
+ # Priority 2: Single word specific matches
157
+ elif "deployment" in entity_hint:
158
+ entity_type = "kubernetesDeployment"
159
+ elif "pod" in entity_hint:
160
+ entity_type = "kubernetesPod"
161
+ elif "jvm" in entity_hint or "java" in entity_hint:
162
+ entity_type = "jvmRuntimePlatform"
163
+ elif "docker" in entity_hint or "container" in entity_hint:
164
+ entity_type = "dockerContainer"
165
+ elif "mq" in entity_hint or "queue" in entity_hint:
166
+ entity_type = "ibmMqQueue"
167
+ elif "db2" in entity_hint or "database" in entity_hint:
168
+ entity_type = "db2Database"
169
+ elif "host" in entity_hint or "server" in entity_hint or "machine" in entity_hint:
170
+ entity_type = "host"
171
+ # Priority 3: Generic kubernetes - use intent context to disambiguate
172
+ elif "kubernetes" in entity_hint or "k8s" in entity_hint:
173
+ # Smart disambiguation based on intent keywords
174
+ deployment_keywords = ["deployment", "replica", "availabletodesiredreplica", "desiredreplica", "availablereplica"]
175
+ pod_keywords = ["pod", "restart", "container"]
176
+
177
+ # Check intent for deployment-specific keywords
178
+ if any(keyword in intent_lower for keyword in deployment_keywords):
179
+ entity_type = "kubernetesDeployment"
180
+ logger.info("Resolved ambiguous 'kubernetes' to 'kubernetesDeployment' based on intent keywords")
181
+ # Check intent for pod-specific keywords
182
+ elif any(keyword in intent_lower for keyword in pod_keywords):
183
+ entity_type = "kubernetesPod"
184
+ logger.info("Resolved ambiguous 'kubernetes' to 'kubernetesPod' based on intent keywords")
185
+ else:
186
+ # Still ambiguous - ask for clarification
187
+ return [TextContent(
188
+ type="text",
189
+ text="Ambiguous entity type 'kubernetes'. Please specify: 'kubernetes pod' or 'kubernetes deployment'"
190
+ )]
191
+
192
+ if not entity_type:
193
+ return [TextContent(
194
+ type="text",
195
+ text=f"Error: Unknown entity type '{entity_hint}'. Supported: jvm, kubernetes pod, kubernetes deployment, docker, ibmmq, db2"
196
+ )]
197
+
198
+ logger.info(f"Resolved entity type: {entity_type}")
199
+
200
+ # Load full schema
201
+ schema = self.registry.get_full_schema(entity_type)
202
+ if not schema:
203
+ return [TextContent(
204
+ type="text",
205
+ text=f"Error: Could not load schema for {entity_type}"
206
+ )]
207
+
208
+ logger.info(f"Loaded schema for {entity_type}")
209
+
210
+ # Create elicitation response
211
+ elicitation_content = self.elicitation_handler.create_schema_elicitation(
212
+ entity_type=entity_type,
213
+ schema=schema,
214
+ intent=intent
215
+ )
216
+
217
+ logger.info(f"Created elicitation with {len(elicitation_content)} content blocks")
218
+
219
+ return elicitation_content
220
+
221
+ async def _handle_pass2_selections(
222
+ self,
223
+ arguments: Dict[str, Any],
224
+ api_client
225
+ ) -> List[Any]:
226
+ """
227
+ Handle Pass 2: Selections → API Call
228
+
229
+ Args:
230
+ arguments: Dict with 'selections' key containing user selections
231
+ api_client: Instana API client
232
+
233
+ Returns:
234
+ List with TextContent containing formatted results
235
+ """
236
+ selections = arguments.get("selections", {})
237
+
238
+ entity_type = selections.get("entity_type")
239
+ # Support both "metrics" (correct) and "metric" (backward compatibility)
240
+ metrics = selections.get("metrics") or selections.get("metric", [])
241
+ aggregation = selections.get("aggregation", "mean")
242
+ filters = selections.get("filters", [])
243
+ group_by = selections.get("groupBy", [])
244
+ order = selections.get("order") # Optional: {"by": "metric_name", "direction": "ASC|DESC"}
245
+ time_range = selections.get("timeRange", "1h") # Default to 1 hour
246
+ pagination = selections.get("pagination", {}) # Optional: {"page": 1, "pageSize": 20} or {"offset": 0, "limit": 20}
247
+
248
+ # Ensure metrics is a list
249
+ if not isinstance(metrics, list):
250
+ metrics = [metrics] if metrics else []
251
+
252
+ # Ensure groupBy is a list
253
+ if not isinstance(group_by, list):
254
+ group_by = [group_by] if group_by else []
255
+
256
+ logger.info(f"Pass 2: entity_type={entity_type}, metrics={metrics}, aggregation={aggregation}, groupBy={group_by}, order={order}, timeRange={time_range}")
257
+
258
+ if not entity_type or not metrics:
259
+ return [TextContent(
260
+ type="text",
261
+ text="Error: selections must include 'entity_type' and 'metrics' (array)"
262
+ )]
263
+
264
+ # Validate groupBy if provided
265
+ if group_by and len(group_by) > 5:
266
+ return [TextContent(
267
+ type="text",
268
+ text="Error: groupBy can have maximum 5 tag names"
269
+ )]
270
+
271
+ # Build payload using payload compiler
272
+ # For PoC, we'll build a simple payload directly
273
+ from instana_client.models.cursor_pagination import CursorPagination
274
+ from instana_client.models.get_infrastructure_groups_query import (
275
+ GetInfrastructureGroupsQuery,
276
+ )
277
+ from instana_client.models.get_infrastructure_query import (
278
+ GetInfrastructureQuery,
279
+ )
280
+ from instana_client.models.infra_metric_configuration import (
281
+ InfraMetricConfiguration,
282
+ )
283
+ from instana_client.models.order import Order
284
+ from instana_client.models.simple_metric_configuration import (
285
+ SimpleMetricConfiguration,
286
+ )
287
+ from instana_client.models.tag_filter import TagFilter
288
+ from instana_client.models.tag_filter_all_of_value import TagFilterAllOfValue
289
+ from instana_client.models.tag_filter_expression import TagFilterExpression
290
+ from instana_client.models.time_frame import TimeFrame
291
+
292
+ # Parse time range - supports relative, absolute timestamps, and human-readable dates
293
+ def parse_time_range(time_input):
294
+ """
295
+ Convert time range to TimeFrame parameters.
296
+
297
+ Supports three formats:
298
+ 1. Relative: "1h", "30m", "2h", "1d" -> returns (window_size_ms, None)
299
+ 2. Absolute (timestamps): {"from": timestamp_ms, "to": timestamp_ms} -> returns (window_size_ms, to_ms)
300
+ 3. Absolute (date strings): {"from": "2026-01-24 12:25:00", "to": "2026-01-24 14:40:00"} -> returns (window_size_ms, to_ms)
301
+
302
+ Returns:
303
+ tuple: (window_size_ms, to_timestamp_ms or None)
304
+ """
305
+ from datetime import datetime
306
+
307
+ # Handle dict format for absolute time range
308
+ if isinstance(time_input, dict):
309
+ from_val = time_input.get("from")
310
+ to_val = time_input.get("to")
311
+
312
+ if from_val is not None and to_val is not None:
313
+ # Check if values are strings (date format) or integers (timestamps)
314
+ if isinstance(from_val, str) and isinstance(to_val, str):
315
+ # Parse date strings to timestamps
316
+ try:
317
+ from datetime import timezone
318
+
319
+ # Support multiple date formats
320
+ date_formats = [
321
+ "%Y-%m-%d %H:%M:%S",
322
+ "%Y-%m-%d %H:%M",
323
+ "%Y-%m-%dT%H:%M:%S",
324
+ "%Y-%m-%dT%H:%M:%SZ",
325
+ "%d-%B-%Y %H:%M", # e.g., "24-January-2026 12:25"
326
+ "%d-%b-%Y %H:%M", # e.g., "24-Jan-2026 12:25"
327
+ ]
328
+
329
+ from_dt = None
330
+ to_dt = None
331
+
332
+ for fmt in date_formats:
333
+ try:
334
+ from_dt = datetime.strptime(from_val, fmt)
335
+ to_dt = datetime.strptime(to_val, fmt)
336
+ break
337
+ except ValueError:
338
+ continue
339
+
340
+ if from_dt is None or to_dt is None:
341
+ logger.error(f"Could not parse date strings: from='{from_val}', to='{to_val}'")
342
+ return (3600000, None)
343
+
344
+ # Use system's local timezone for timestamp conversion
345
+ from_ts = int(from_dt.timestamp() * 1000)
346
+ to_ts = int(to_dt.timestamp() * 1000)
347
+ window_size_ms = to_ts - from_ts
348
+
349
+ logger.info(f"Parsed date strings (local timezone): from='{from_val}' ({from_ts}), to='{to_val}' ({to_ts}), window={window_size_ms}ms")
350
+ return (window_size_ms, to_ts)
351
+
352
+ except Exception as e:
353
+ logger.error(f"Error parsing date strings: {e}")
354
+ return (3600000, None)
355
+ else:
356
+ # Assume numeric timestamps
357
+ from_ts = int(from_val)
358
+ to_ts = int(to_val)
359
+ window_size_ms = to_ts - from_ts
360
+ logger.info(f"Parsed timestamp range: from={from_ts}, to={to_ts}, window={window_size_ms}ms")
361
+ return (window_size_ms, to_ts)
362
+ else:
363
+ logger.warning("Absolute time range missing 'from' or 'to', defaulting to 1h")
364
+ return (3600000, None)
365
+
366
+ # Handle string format for relative time range
367
+ time_str = str(time_input).lower().strip()
368
+ if time_str.endswith('h'):
369
+ hours = int(time_str[:-1])
370
+ window_size_ms = hours * 3600000
371
+ elif time_str.endswith('m'):
372
+ minutes = int(time_str[:-1])
373
+ window_size_ms = minutes * 60000
374
+ elif time_str.endswith('d'):
375
+ days = int(time_str[:-1])
376
+ window_size_ms = days * 86400000
377
+ else:
378
+ # Default to 1 hour if format not recognized
379
+ window_size_ms = 3600000
380
+
381
+ logger.info(f"Parsed relative time range '{time_str}' to {window_size_ms}ms")
382
+ return (window_size_ms, None)
383
+
384
+ window_size_ms, to_timestamp = parse_time_range(time_range)
385
+
386
+ # Build tag filter expression
387
+ if filters and len(filters) > 0:
388
+ # Single filter case - use TagFilter directly
389
+ if len(filters) == 1:
390
+ filter_dict = filters[0]
391
+ filter_name = filter_dict.get("name")
392
+ filter_value = filter_dict.get("value")
393
+
394
+ tag_filter_expression = TagFilter(
395
+ type="TAG_FILTER",
396
+ entity="NOT_APPLICABLE",
397
+ name=filter_name,
398
+ operator="EQUALS",
399
+ value=TagFilterAllOfValue(filter_value)
400
+ )
401
+ else:
402
+ # Multiple filters - use TagFilterExpression with AND
403
+ tag_filter_elements = []
404
+ for filter_dict in filters:
405
+ filter_name = filter_dict.get("name")
406
+ filter_value = filter_dict.get("value")
407
+ if filter_name and filter_value:
408
+ tag_filter_elements.append(TagFilter(
409
+ type="TAG_FILTER",
410
+ entity="NOT_APPLICABLE",
411
+ name=filter_name,
412
+ operator="EQUALS",
413
+ value=TagFilterAllOfValue(filter_value)
414
+ ))
415
+
416
+ tag_filter_expression = TagFilterExpression(
417
+ type="EXPRESSION",
418
+ logical_operator="AND",
419
+ elements=tag_filter_elements
420
+ )
421
+ else:
422
+ # No filters - use empty expression
423
+ tag_filter_expression = TagFilterExpression(
424
+ type="EXPRESSION",
425
+ logical_operator="AND",
426
+ elements=[]
427
+ )
428
+
429
+ # Build metrics array with proper Pydantic objects
430
+ # Note: granularity is intentionally omitted as it causes data discrepancies with UI
431
+ infra_metrics = []
432
+ for metric_name in metrics:
433
+ metric_config = SimpleMetricConfiguration(
434
+ metric=metric_name,
435
+ aggregation=aggregation.upper()
436
+ )
437
+ infra_metric = InfraMetricConfiguration(actual_instance=metric_config)
438
+ infra_metrics.append(infra_metric)
439
+
440
+ logger.info(f"Built {len(infra_metrics)} metric configurations (without granularity)")
441
+
442
+ # Build Order object if provided
443
+ order_obj = None
444
+ if order and isinstance(order, dict):
445
+ order_by = order.get("by")
446
+ order_direction = order.get("direction", "DESC").upper()
447
+ if order_by:
448
+ # Ensure order.by is in format "metricName.AGGREGATION"
449
+ # If LLM provides just metric name, append aggregation
450
+ if "." not in order_by and aggregation:
451
+ order_by = f"{order_by}.{aggregation.upper()}"
452
+ logger.info(f"Appended aggregation to order.by: {order_by}")
453
+
454
+ order_obj = Order(by=order_by, direction=order_direction)
455
+ logger.info(f"Built Order: by={order_by}, direction={order_direction}")
456
+
457
+ # Build pagination object
458
+ # Supports pagination formats: {"page": 1, "pageSize": 20} or {"offset": 0, "limit": 20}
459
+ page_size = 50 # Default
460
+ offset = None
461
+
462
+ if pagination and isinstance(pagination, dict):
463
+ # Handle pageSize or limit
464
+ page_size = pagination.get("pageSize") or pagination.get("limit", 50)
465
+
466
+ # Handle page number (convert to offset)
467
+ page = pagination.get("page")
468
+ if page is not None and page > 0:
469
+ offset = (page - 1) * page_size
470
+ logger.info(f"Converted page {page} to offset {offset}")
471
+
472
+ # Handle direct offset
473
+ if "offset" in pagination:
474
+ offset = pagination.get("offset", 0)
475
+
476
+ logger.info(f"Pagination: pageSize={page_size}, offset={offset}")
477
+
478
+ # Build CursorPagination object
479
+ if offset is not None and offset > 0:
480
+ cursor_pagination = CursorPagination(retrieval_size=page_size, offset=offset)
481
+ else:
482
+ cursor_pagination = CursorPagination(retrieval_size=page_size)
483
+
484
+ # Build TimeFrame object - supports both relative and absolute time ranges
485
+ if to_timestamp is not None:
486
+ # Absolute time range: from specific timestamp to another timestamp
487
+ time_frame = TimeFrame(window_size=window_size_ms, to=to_timestamp)
488
+ logger.info(f"Built absolute TimeFrame: window_size={window_size_ms}ms, to={to_timestamp}")
489
+ else:
490
+ # Relative time range: last N hours/minutes/days from now
491
+ time_frame = TimeFrame(window_size=window_size_ms)
492
+ logger.info(f"Built relative TimeFrame: window_size={window_size_ms}ms")
493
+
494
+ # Build query with proper Pydantic objects - conditional based on groupBy
495
+ if group_by and len(group_by) > 0:
496
+ # Use GetInfrastructureGroupsQuery for grouped queries
497
+ query = GetInfrastructureGroupsQuery(
498
+ group_by=group_by, # Required for groups API
499
+ type=entity_type,
500
+ metrics=infra_metrics,
501
+ tag_filter_expression=tag_filter_expression, # Required field
502
+ time_frame=time_frame,
503
+ pagination=cursor_pagination,
504
+ order=order_obj # Optional
505
+ )
506
+ logger.info(f"Built GROUPS query payload for {entity_type} with groupBy={group_by}")
507
+ else:
508
+ # Use GetInfrastructureQuery for non-grouped queries (existing behavior)
509
+ query = GetInfrastructureQuery(
510
+ type=entity_type,
511
+ metrics=infra_metrics,
512
+ tag_filter_expression=tag_filter_expression, # Required field
513
+ time_frame=time_frame,
514
+ pagination=cursor_pagination,
515
+ order=order_obj # Optional
516
+ )
517
+ logger.info(f"Built ENTITIES query payload for {entity_type}")
518
+
519
+ # Log the complete payload being sent to API
520
+ import json
521
+ try:
522
+ payload_dict = query.to_dict()
523
+ logger.info("=" * 80)
524
+ logger.info("FINAL API PAYLOAD:")
525
+ logger.info(json.dumps(payload_dict, indent=2))
526
+ logger.info("=" * 80)
527
+ except Exception as e:
528
+ logger.warning(f"Could not serialize payload for logging: {e}")
529
+
530
+ # Call Instana API - conditional based on groupBy
531
+ try:
532
+ if group_by and len(group_by) > 0:
533
+ logger.info("Calling Instana API: get_entity_groups_without_preload_content (with groupBy)")
534
+ raw_response = api_client.get_entity_groups_without_preload_content(
535
+ get_infrastructure_groups_query=query
536
+ )
537
+ else:
538
+ logger.info("Calling Instana API: get_entities_without_preload_content (no groupBy)")
539
+ raw_response = api_client.get_entities_without_preload_content(
540
+ get_infrastructure_query=query
541
+ )
542
+ logger.info("API call successful, processing response...")
543
+
544
+ # Parse response
545
+ import json
546
+ response_text = raw_response.data.decode('utf-8')
547
+ result_dict = json.loads(response_text)
548
+
549
+ items = result_dict.get("items", [])
550
+ logger.info(f"API returned {len(items)} items")
551
+
552
+ # Format results - different format for grouped vs individual entities
553
+ if not items:
554
+ result_text = f"No {entity_type} entities found matching your criteria."
555
+ elif group_by and len(group_by) > 0:
556
+ # Grouped results format
557
+ result_text = f"Found {len(items)} groups (grouped by {', '.join(group_by)}):\n\n"
558
+ for idx, item in enumerate(items, 1): # Show all groups
559
+ # For groups, extract the group key from 'tags' field
560
+ tags = item.get("tags", {})
561
+ count = item.get("count", 0)
562
+ metrics_data = item.get("metrics", {})
563
+
564
+ # Build group label from tags (the groupBy fields)
565
+ group_parts = []
566
+ for tag_name in group_by:
567
+ tag_value = tags.get(tag_name, "unknown")
568
+ group_parts.append(f"{tag_name}={tag_value}")
569
+ group_label = ", ".join(group_parts)
570
+
571
+ result_text += f"{idx}. Group: {group_label} (count: {count})\n"
572
+ if metrics_data:
573
+ for metric_key, metric_value in metrics_data.items():
574
+ # Check if metric_key matches any of the requested metrics
575
+ if any(requested_metric in metric_key for requested_metric in metrics):
576
+ result_text += f" {metric_key}: {metric_value}\n"
577
+ result_text += "\n"
578
+ else:
579
+ # Individual entities format - return all entities
580
+ result_text = f"Found {len(items)} {entity_type} entities:\n\n"
581
+ for idx, item in enumerate(items, 1): # Show all entities
582
+ label = item.get("label", "unknown")
583
+ metrics_data = item.get("metrics", {})
584
+
585
+ result_text += f"{idx}. {label}\n"
586
+ if metrics_data:
587
+ for metric_key, metric_value in metrics_data.items():
588
+ # Check if metric_key matches any of the requested metrics
589
+ if any(requested_metric in metric_key for requested_metric in metrics):
590
+ result_text += f" {metric_key}: {metric_value}\n"
591
+ result_text += "\n"
592
+
593
+ return [TextContent(type="text", text=result_text)]
594
+
595
+ except Exception as e:
596
+ logger.error(f"API call failed: {e}", exc_info=True)
597
+ return [TextContent(
598
+ type="text",
599
+ text=f"Error calling Instana API: {e!s}"
600
+ )]
@@ -52,10 +52,7 @@ class InfrastructureAnalyzeMCPTools(BaseInstanaClient):
52
52
  """Initialize the Infrastructure Analyze MCP tools client."""
53
53
  super().__init__(read_token=read_token, base_url=base_url)
54
54
 
55
- @register_as_tool(
56
- title="Get Available Metrics",
57
- annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
58
- )
55
+ # @register_as_tool(...) # Disabled for future reference
59
56
  @with_header_auth(InfrastructureAnalyzeApi)
60
57
  async def get_available_metrics(self,
61
58
  payload: Optional[Union[Dict[str, Any], str]] = None,
@@ -191,10 +188,6 @@ class InfrastructureAnalyzeMCPTools(BaseInstanaClient):
191
188
  logger.error(f"Error in get_available_metrics: {e}", exc_info=True)
192
189
  return {"error": f"Failed to get available metrics: {e!s}"}
193
190
 
194
- @register_as_tool(
195
- title="Get Entities",
196
- annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
197
- )
198
191
  @with_header_auth(InfrastructureAnalyzeApi)
199
192
  async def get_entities(self,
200
193
  payload: Optional[Union[Dict[str, Any], str]] = None,
@@ -309,10 +302,6 @@ class InfrastructureAnalyzeMCPTools(BaseInstanaClient):
309
302
  logger.error(f"Error in get_entities: {e}", exc_info=True)
310
303
  return {"error": f"Failed to get entities: {e!s}"}
311
304
 
312
- @register_as_tool(
313
- title="Get Aggregated Entity Groups",
314
- annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
315
- )
316
305
  @with_header_auth(InfrastructureAnalyzeApi)
317
306
  async def get_aggregated_entity_groups(self,
318
307
  payload: Optional[Union[Dict[str, Any], str]] = None,
@@ -513,10 +502,6 @@ class InfrastructureAnalyzeMCPTools(BaseInstanaClient):
513
502
  "error": f"Failed to summarize results: {e!s}"
514
503
  }
515
504
 
516
- @register_as_tool(
517
- title="Get Available Plugins",
518
- annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False)
519
- )
520
505
  @with_header_auth(InfrastructureAnalyzeApi)
521
506
  async def get_available_plugins(self,
522
507
  payload: Optional[Union[Dict[str, Any], str]] = None,