mcp-instana 0.6.2__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/METADATA +179 -120
  2. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/RECORD +28 -21
  3. src/application/application_alert_config.py +397 -146
  4. src/application/application_analyze.py +597 -597
  5. src/application/application_call_group.py +528 -0
  6. src/application/application_catalog.py +0 -8
  7. src/application/application_global_alert_config.py +255 -38
  8. src/application/application_metrics.py +377 -237
  9. src/application/application_resources.py +414 -365
  10. src/application/application_settings.py +605 -1651
  11. src/application/application_topology.py +62 -62
  12. src/core/custom_dashboard_smart_router_tool.py +135 -0
  13. src/core/server.py +92 -119
  14. src/core/smart_router_tool.py +574 -0
  15. src/core/utils.py +17 -8
  16. src/custom_dashboard/custom_dashboard_tools.py +422 -0
  17. src/infrastructure/elicitation_handler.py +338 -0
  18. src/infrastructure/entity_registry.py +329 -0
  19. src/infrastructure/infrastructure_analyze_new.py +600 -0
  20. src/infrastructure/{infrastructure_analyze.py → infrastructure_analyze_old.py} +1 -16
  21. src/infrastructure/infrastructure_catalog.py +7 -28
  22. src/infrastructure/infrastructure_metrics.py +93 -17
  23. src/infrastructure/infrastructure_resources.py +5 -20
  24. src/infrastructure/infrastructure_topology.py +2 -8
  25. src/prompts/application/application_settings.py +58 -0
  26. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/WHEEL +0 -0
  27. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/entry_points.txt +0 -0
  28. {mcp_instana-0.6.2.dist-info → mcp_instana-0.7.0.dist-info}/licenses/LICENSE.md +0 -0
@@ -0,0 +1,338 @@
1
+ """
2
+ Elicitation Handler Module
3
+
4
+ Handles ambiguity resolution when multiple options exist.
5
+ Asks user to choose when intent is unclear.
6
+ """
7
+
8
+ import json
9
+ import logging
10
+ from dataclasses import dataclass
11
+ from typing import Any, Dict, List, Optional
12
+
13
+ from mcp.types import EmbeddedResource, TextContent, TextResourceContents
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ @dataclass
19
+ class ElicitationRequest:
20
+ """
21
+ Structured elicitation request to present to user.
22
+ """
23
+ type: str # "choice", "clarification", "missing_parameter"
24
+ message: str
25
+ options: List[Dict[str, str]]
26
+ context: Dict[str, Any]
27
+
28
+
29
+ class ElicitationHandler:
30
+ """
31
+ Handles ambiguity resolution via elicitation.
32
+
33
+ Responsibilities:
34
+ 1. Detect when intent is ambiguous
35
+ 2. Create user-friendly choice requests
36
+ 3. Preserve context for follow-up
37
+ """
38
+
39
+ def check_ambiguity(self,normalized_intent,registry,resolved_metrics: Optional[List[str]] = None) -> Optional[ElicitationRequest]:
40
+ """
41
+ Check if intent is ambiguous and needs elicitation.
42
+
43
+ Args:
44
+ normalized_intent: NormalizedIntent object
45
+ registry: EntityCapabilityRegistry instance
46
+ resolved_metrics: List of resolved metric names (if any)
47
+
48
+ Returns:
49
+ ElicitationRequest if ambiguous, None otherwise
50
+ """
51
+
52
+ #Check 1: Unknown entity
53
+ if normalized_intent.entity_class == "unknown":
54
+ return self._create_unknown_entity_elicitation(normalized_intent)
55
+
56
+ #Check 2: Unknown metric
57
+ if normalized_intent.metric_category == "unknown":
58
+ return self._create_unknown_metric_elicitation(normalized_intent, registry)
59
+
60
+ #Check 3: Multiple possible metric matches
61
+ if resolved_metrics and len(resolved_metrics) > 1:
62
+ return self._create_multiple_metrics_elicitation(normalized_intent, resolved_metrics)
63
+
64
+ #Check 4: No metric found
65
+ if resolved_metrics is not None and len(resolved_metrics) == 0:
66
+ return self._create_no_metric_elicitation(normalized_intent, registry)
67
+
68
+ #No ambiguity detected
69
+ return None
70
+
71
+ def _create_unknown_entity_elicitation(self,normalized_intent) -> ElicitationRequest:
72
+ """
73
+ Create ElicitationRequest for unknown entity type.
74
+ """
75
+ return ElicitationRequest(
76
+ type="clarification",
77
+ message=f"I don't recognize the entity '{normalized_intent.entity_class}'. "
78
+ f"Please specify one of the supported entity types:",
79
+ options=[
80
+ {"label": "Kubernetes Pod", "value": "kubernetes pod"},
81
+ {"label": "Kubernetes Deployment", "value": "kubernetes deployment"},
82
+ {"label": "Docker Containers", "value": "docker container"},
83
+ {"label": "JVM Applications", "value": "jvm application"},
84
+ {"label": "DB2 Database", "value": "db2 database"},
85
+ {"label": "IBM MQ Queues", "value": "ibm mq queues"},
86
+ ],
87
+ context={"normalized_intent": normalized_intent}
88
+ )
89
+
90
+ def _create_unknown_metric_elicitation(
91
+ self,
92
+ normalized_intent,
93
+ registry
94
+ ) -> ElicitationRequest:
95
+ """
96
+ Create elicitation for unknown metric.
97
+ """
98
+ # Try to get entity capability to show available metrics
99
+ capability = registry.resolve(normalized_intent.entity_class, normalized_intent.entity_kind)
100
+
101
+ if capability:
102
+ #Show some common metrics for this entity
103
+ common_metrics = capability.metrics[:10] #Show first 10 metrics
104
+ options = [{"label": metric, "value": metric} for metric in common_metrics]
105
+
106
+ return ElicitationRequest(
107
+ type="choice",
108
+ message=f"I don't recognize the metric '{normalized_intent.metric_category}'. "
109
+ f"Here are some available metrics for {capability.entity_type}:",
110
+ options=options,
111
+ context={
112
+ "normalized_intent": normalized_intent,
113
+ "entity_type": capability.entity_type
114
+ }
115
+ )
116
+ else:
117
+ return ElicitationRequest(
118
+ type="clarification",
119
+ message=f"I don't recognize the metric '{normalized_intent.metric_category}'. "
120
+ f"Please provide a valid metric name.",
121
+ options=[],
122
+ context={"normalized_intent": normalized_intent}
123
+ )
124
+
125
+ def _create_multiple_metrics_elicitation(self, normalized_intent, resolved_metrics: List[str]) -> ElicitationRequest:
126
+ """
127
+ Create elicitation for multiple metrics match.
128
+ """
129
+ options = [{"label": metric, "value": metric} for metric in resolved_metrics]
130
+
131
+ return ElicitationRequest(
132
+ type="choice",
133
+ message=f"Multiple metrics match '{normalized_intent.metric_category}'. "
134
+ f"Please select the correct one:",
135
+ options=options,
136
+ context={
137
+ "normalized_intent": normalized_intent,
138
+ "resolved_metrics": resolved_metrics
139
+ }
140
+ )
141
+
142
+ def _create_no_metric_elicitation(
143
+ self,
144
+ normalized_intent,
145
+ registry
146
+ ) -> ElicitationRequest:
147
+ """
148
+ Create elicitation when no metrics found.
149
+ """
150
+ capability = registry.resolve(
151
+ normalized_intent.entity_class,
152
+ normalized_intent.entity_kind
153
+ )
154
+
155
+ if capability:
156
+ # Show all available metrics
157
+ options = [
158
+ {"label": metric, "value": metric}
159
+ for metric in capability.metrics[:20] # First 20
160
+ ]
161
+
162
+ return ElicitationRequest(
163
+ type="choice",
164
+ message=f"No metrics found matching '{normalized_intent.metric_category}' "
165
+ f"for {capability.entity_type}. Available metrics:",
166
+ options=options,
167
+ context={
168
+ "normalized_intent": normalized_intent,
169
+ "entity_type": capability.entity_type
170
+ }
171
+ )
172
+
173
+ return ElicitationRequest(
174
+ type="clarification",
175
+ message=f"Could not find metrics for '{normalized_intent.metric_category}'.",
176
+ options=[],
177
+ context={"normalized_intent": normalized_intent}
178
+ )
179
+
180
+ def create_schema_elicitation(self, entity_type: str, schema: dict, intent: str) -> list:
181
+ """
182
+ Create machine-facing elicitation with full schema.
183
+
184
+ Returns MCP-compliant list of content blocks for LLM.
185
+
186
+ Args:
187
+ entity_type: Entity type (e.g., "jvmRuntimePlatform")
188
+ schema: Complete schema dict from entity_registry
189
+ intent: User's original query
190
+
191
+ Returns:
192
+ List of MCP content blocks: [TextContent, EmbeddedResource]
193
+ """
194
+ # Extract schema info for instruction text
195
+ metrics = schema.get("parameters", {}).get("metrics", {}).get("metric", [])
196
+ tag_filters_params = schema.get("parameters", {}).get("tagFilterElements", {})
197
+ tag_filters = tag_filters_params.get("enum", []) if isinstance(tag_filters_params, dict) else []
198
+ aggregations = schema.get("parameters", {}).get("metrics", {}).get("aggregation", {}).get("enum", [])
199
+
200
+ # Create instruction text
201
+ instruction_text = f"""Based on your query: "{intent}"
202
+ I'm providing the complete schema for {entity_type}.
203
+
204
+ **Schema Summary:**
205
+ - {len(metrics)} available metrics
206
+ - {len(tag_filters)} available tag filters
207
+ - Aggregations: {', '.join(aggregations) if aggregations else 'N/A'}
208
+
209
+ **CRITICAL RULES - YOU MUST FOLLOW THESE:**
210
+ 1. ⚠️ ONLY use names that EXACTLY match the schema below - NO assumptions, NO variations, NO constructions
211
+ 2. ⚠️ If user says "label.node name", search the schema for filters containing BOTH "label" AND "node"
212
+ 3. ⚠️ NEVER infer or construct filter/metric names - copy them EXACTLY from the schema
213
+ 4. ⚠️ If you cannot find an exact match in the schema, ask the user for clarification
214
+ 5. ⚠️ The schema is your ONLY source of truth - do not use external knowledge
215
+ 6. ⚠️ **NEVER add "groupBy" unless user EXPLICITLY uses grouping keywords** (see examples below)
216
+
217
+ **EXAMPLE - CORRECT BEHAVIOR:**
218
+ User query: "pods with label.node name = worker3"
219
+ Your process:
220
+ Step 1: Search schema filters for "label" AND "node"
221
+ Step 2: Find exact match: "kubernetes.pod.label.node_name"
222
+ Step 3: Use EXACTLY: {{"name": "kubernetes.pod.label.node_name", "value": "worker3"}}
223
+
224
+ **EXAMPLE - INCORRECT BEHAVIOR (DO NOT DO THIS):**
225
+ User query: "pods with label.node name = worker3"
226
+ ❌ WRONG: Assume it means "kubernetes.node.name" (this is a node property, not a pod label)
227
+ ❌ WRONG: Construct "kubernetes.pod.label.node" (incomplete name)
228
+ ❌ WRONG: Use "node.name" (too short, not in schema)
229
+ ✅ CORRECT: Use "kubernetes.pod.label.node_name" (exact match from schema)
230
+
231
+ **GROUPBY RULES - WHEN TO USE AND WHEN NOT TO USE:**
232
+
233
+ ⚠️ **DO NOT use "groupBy" unless user EXPLICITLY asks for it with these keywords:**
234
+ - "group by", "grouped by", "per", "for each", "by host", "by namespace", "break down by"
235
+
236
+ **CORRECT Examples (NO groupBy):**
237
+ ✅ "Show me CPU usage for hosts" → NO groupBy (aggregated result across all hosts)
238
+ ✅ "What is memory usage of pods in production?" → NO groupBy (aggregated result)
239
+ ✅ "Get free memory of host with CPU model Intel Xeon" → NO groupBy (aggregated result)
240
+ ✅ "List hosts with high disk usage" → NO groupBy (aggregated result)
241
+
242
+ **CORRECT Examples (WITH groupBy):**
243
+ ✅ "Show me CPU usage grouped by host" → groupBy: ["host.name"]
244
+ ✅ "What is memory usage per namespace?" → groupBy: ["kubernetes.namespace.name"]
245
+ ✅ "Get CPU for each pod" → groupBy: ["kubernetes.pod.name"]
246
+ ✅ "Break down memory by host and cluster" → groupBy: ["host.name", "kubernetes.cluster.name"]
247
+
248
+ **INCORRECT Examples (DO NOT DO THIS):**
249
+ ❌ "Show me CPU usage for hosts" → groupBy: ["host.name"] ← WRONG! No grouping keyword used
250
+ ❌ "Get memory of pods" → groupBy: ["kubernetes.pod.name"] ← WRONG! No "per" or "each" keyword
251
+ ❌ "List hosts with high CPU" → groupBy: ["host.name"] ← WRONG! User wants aggregated list, not breakdown
252
+
253
+ **Instructions:**
254
+ 1. Review the embedded schema resource below carefully
255
+ 2. Select one or more exact metric names from the schema (up to 10 metrics)
256
+ 3. Select the aggregation type based on the user's intent:
257
+ - "mean" for average/typical/normal values
258
+ - "sum" for total/combined/count/cumulative values
259
+ - "max" for highest/maximum/peak/largest values
260
+ - "min" for lowest/minimum/smallest values
261
+ 4. For filters: Search the schema for exact matches to user's terms
262
+ - If user says "label.X", look for filters containing "label" and "X"
263
+ - If user says "node name", look for filters containing "node" and "name"
264
+ - Copy the COMPLETE filter name from schema (e.g., "kubernetes.pod.label.node_name")
265
+ 5. **ONLY include "groupBy" if user EXPLICITLY uses grouping keywords:**
266
+ - Keywords: "group by", "grouped by", "per", "for each", "by host", "by namespace", "break down by"
267
+ - If NO grouping keyword → DO NOT include "groupBy" in your response
268
+ - If grouping keyword present → select groupBy tags from schema (max 5)
269
+ 6. If user specifies time range, include timeRange in one of two formats:
270
+ - Relative: "1h", "30m", "2h", "1d" (last N hours/minutes/days from now)
271
+ - Absolute: {{"from": "YYYY-MM-DD HH:MM:SS", "to": "YYYY-MM-DD HH:MM:SS"}} (specific time window)
272
+ - IMPORTANT: For absolute times, use date STRING format, NOT Unix timestamps
273
+ - The server will handle date parsing - you just provide readable dates
274
+ 7. If user asks to sort/order results (e.g., "highest first", "sorted by CPU"), include order
275
+ - CRITICAL: "order.by" must be in format "metricName.AGGREGATION" (e.g., "queueDepth.MEAN", "cpu.used.MAX")
276
+ - NOT just the metric name alone
277
+
278
+ **Return your selections in this exact JSON format:**
279
+ {{
280
+ "selectedMetrics": ["exact.metric.name.1", "exact.metric.name.2"],
281
+ "aggregation": "mean|max|min|sum",
282
+ "filters": [
283
+ {{"name": "exact.tag.filter.name.from.schema", "value": "your_value"}}
284
+ ],
285
+ "groupBy": ["exact.tag.name.1", "exact.tag.name.2"],
286
+ "timeRange": "1h|30m|2h|1d" OR {{"from": "2026-01-24 12:25:00", "to": "2026-01-24 14:40:00"}},
287
+ "order": {{"by": "metric.name.AGGREGATION", "direction": "ASC|DESC"}},
288
+ "pagination": {{"page": 1, "pageSize": 20}} OR {{"offset": 0, "limit": 20}}
289
+ }}
290
+
291
+ **IMPORTANT REMINDERS:**
292
+ - Use EXACT names from the schema - copy/paste them, do not type from memory
293
+ - "selectedMetrics" must be an array, even for a single metric: ["metric.name"]
294
+ - You can select multiple metrics if the user asks for them (e.g., "runnable and waiting threads")
295
+ - **"groupBy" - CRITICAL RULE:**
296
+ - ⚠️ DO NOT include "groupBy" unless user uses explicit grouping keywords
297
+ - ⚠️ Grouping keywords: "group by", "grouped by", "per", "for each", "by host", "by namespace", "break down by"
298
+ - ⚠️ If user just asks "show me X" or "get X" or "list X" → NO groupBy (return aggregated result)
299
+ - ⚠️ If user asks "show me X per Y" or "X grouped by Y" → YES groupBy: ["Y"]
300
+ - "groupBy" uses tag filter names from the schema (max 5 tags)
301
+ - "timeRange" is optional - supports two formats:
302
+ - Relative (default): "1h", "30m", "2h", "1d" for last N hours/minutes/days from now
303
+ - Absolute: {{"from": "YYYY-MM-DD HH:MM:SS", "to": "YYYY-MM-DD HH:MM:SS"}} for specific time window
304
+ - ⚠️ CRITICAL: For absolute times, use DATE STRINGS, NOT Unix timestamps
305
+ - ⚠️ DO NOT calculate timestamps yourself - the server will parse the date strings
306
+ - Example relative: "2h" means last 2 hours from now
307
+ - Example absolute: {{"from": "2026-01-24 12:25:00", "to": "2026-01-24 14:40:00"}}
308
+ - Supported date formats: "YYYY-MM-DD HH:MM:SS", "DD-Month-YYYY HH:MM" (e.g., "24-January-2026 12:25")
309
+ - "order" is optional - only include if user asks to sort results
310
+ - "by" MUST be in format "metricName.AGGREGATION" (e.g., "queueDepth.MEAN", "cpu.requests.MAX")
311
+ - "direction" is ASC or DESC
312
+ - Example: {{"by": "queueDepth.MEAN", "direction": "DESC"}} for "order by average queue depth descending"
313
+ - "pagination" is optional - controls how many results to return
314
+ - Format 1 (page-based): {{"page": 1, "pageSize": 20}} - page 1 with 20 items per page
315
+ - Format 2 (offset-based): {{"offset": 0, "limit": 20}} - skip 0 items, return 20 items
316
+ - Format 3 (size only): {{"pageSize": 20}} - return first 20 items
317
+ - Default: 50 items if not specified
318
+ - Example: {{"page": 2, "pageSize": 10}} for "show me page 2 with 10 items per page"
319
+ - When in doubt about a filter name, search the schema carefully or ask for clarification
320
+ """
321
+
322
+ # Create TextContent(Pydantic Model)
323
+ text_content = TextContent(type="text", text=instruction_text)
324
+
325
+ # Create EmbeddedResource (Pydantic Model)
326
+ schema_resource = EmbeddedResource(
327
+ type="resource",
328
+ resource=TextResourceContents(
329
+ uri=f"schema://{entity_type}",
330
+ mimeType="application/json",
331
+ text=json.dumps(schema, indent=2)
332
+ )
333
+ )
334
+
335
+ logger.info(f"Created schema elicitation for {entity_type}: {len(metrics)} metrics, {len(tag_filters)} tags")
336
+
337
+ #Return list of the MCP content blocks
338
+ return [text_content, schema_resource]
@@ -0,0 +1,329 @@
1
+ """
2
+ Entity Capability Registry Module
3
+
4
+ Manages entity type capabilities (metrics, tags, filters).
5
+ Loads from schema files and provides exact constant resolution.
6
+ """
7
+ import json
8
+ import logging
9
+ from dataclasses import dataclass
10
+ from pathlib import Path
11
+ from typing import Dict, List, Optional
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ @dataclass
16
+ class EntityCapability:
17
+ """
18
+ Represents capabilities of an entity type.
19
+ Contains all metrics and tag filters available for that entity.
20
+ """
21
+ entity_type: str # e.g. "kubernetesPod"
22
+ api_endpoint: str # e.g. "/api/infrastructure-monitoring/analyze/entities"
23
+ metrics: List[str] # e.g., ["cpuRequests", "cpuLimits", ...]
24
+ tag_filters: List[str] # e.g., ["kubernetes.namespace.name", ...]
25
+ aggregations: List[str] # e.g., ["mean", "sum", "max", "min"]
26
+
27
+ class EntityCapabilityRegistry:
28
+ """
29
+ Central registry for entity type capabilities.
30
+
31
+ Responsibilities:
32
+ 1. Load schema files from disk
33
+ 2. Cache entity capabilities
34
+ 3. Resolve entity types from normalized intent
35
+ 4. Find exact metric names from categories
36
+ 5. Find exact tag filter names from simple names
37
+ """
38
+
39
+ # Mapping from normalized (class, kind) to entity type
40
+ ENTITY_TYPE_MAPPING = {
41
+ ("kubernetes", "pod"): "kubernetesPod",
42
+ ("kubernetes", "deployment"): "kubernetesDeployment",
43
+ ("jvm", "runtime"): "jvmRuntimePlatform",
44
+ ("docker", "container"): "docker",
45
+ ("db2", "database"): "db2Database",
46
+ ("ibmmq", "queue"): "ibmMqQueue",
47
+ ("host", "host"): "host",
48
+ ("infrastructure", "host"): "host",
49
+ ("server", "host"): "host",
50
+ }
51
+
52
+ def __init__(self, schema_dir: Path) -> None:
53
+ """
54
+ Initialize the registry.
55
+ Args:
56
+ schema_dir: Path to directory containing schema JSON files
57
+ """
58
+
59
+ self.schema_dir = Path(schema_dir)
60
+ self._cache: Dict[str, EntityCapability] = {}
61
+ self._load_schemas()
62
+
63
+ def _load_schemas(self) -> None:
64
+ """
65
+ Load all entity schemas from JSON files.
66
+
67
+ Looks for files matching pattern: *_schema.json
68
+ """
69
+
70
+ if not self.schema_dir.exists():
71
+ logger.warning(f"Schema directory does not exist: {self.schema_dir}")
72
+ return
73
+
74
+ schema_files = list(self.schema_dir.glob("*_schema.json"))
75
+ logger.info(f"Found {len(schema_files)} schema files in {self.schema_dir}")
76
+
77
+ for schema_file in schema_files:
78
+ try:
79
+ with open(schema_file, 'r') as f:
80
+ schema = json.load(f)
81
+ self._parse_schema(schema, schema_file.name)
82
+
83
+ except Exception as e:
84
+ logger.error(f"Error loading schema {schema_file}: {e}")
85
+
86
+ def _parse_schema(self, schema: Dict, filename: str) -> None:
87
+ """
88
+ Parse a schema JSON and create EntityCapability.
89
+
90
+ Args:
91
+ schema: Parsed JSON schema
92
+ filename: Name of the schema file (for logging)
93
+ """
94
+
95
+ try:
96
+ entity_type = schema.get("type")
97
+ if not entity_type:
98
+ logger.warning(f"Schema {filename} missing 'type' field")
99
+ return
100
+
101
+ # Extract metrics
102
+ metrics = []
103
+ if "parameters" in schema and "metrics" in schema["parameters"]:
104
+ metrics_data = schema["parameters"]["metrics"]
105
+ if "metric" in metrics_data:
106
+ metrics = metrics_data["metric"]
107
+
108
+ # Extract Tag Filters
109
+ tag_filters = []
110
+ if "parameters" in schema and "tagFilterElements" in schema["parameters"]:
111
+ tag_data = schema["parameters"]["tagFilterElements"]
112
+ if "enum" in tag_data:
113
+ tag_filters = tag_data["enum"]
114
+
115
+ # Extract aggregations
116
+ aggregations = []
117
+ if "parameters" in schema and "metrics" in schema["parameters"]:
118
+ metrics_data = schema["parameters"]["metrics"]
119
+ if "aggregation" in metrics_data and "enum" in metrics_data["aggregation"]:
120
+ aggregations = metrics_data["aggregation"]["enum"]
121
+
122
+ # Get API Endpoint
123
+ api_endpoint = schema.get("api_endpoint", "")
124
+
125
+ # Create EntityCapability
126
+ capability = EntityCapability(
127
+ entity_type=entity_type,
128
+ metrics=metrics,
129
+ aggregations=aggregations,
130
+ tag_filters=tag_filters,
131
+ api_endpoint=api_endpoint
132
+ )
133
+
134
+ self._cache[entity_type] = capability
135
+ logger.info(f"Loaded {entity_type}: {len(metrics)} metrics, {len(tag_filters)} tag filters, {len(aggregations)} aggregations")
136
+ except Exception as e:
137
+ logger.error(f"Error loading schema {filename}: {e!s}")
138
+
139
+ def resolve(self, entity_class: str, entity_kind: str) -> Optional[EntityCapability]:
140
+ """
141
+ Resolve normalized intent to entity capability.
142
+
143
+ Args:
144
+ entity_class: Entity class from normalizer (e.g., "kubernetes")
145
+ entity_kind: Entity kind from normalizer (e.g., "pod")
146
+
147
+ Returns:
148
+ EntityCapability if found, None otherwise
149
+
150
+ Examples:
151
+ resolve("kubernetes", "pod") → kubernetesPod capability
152
+ resolve("jvm", "runtime") → jvmRuntimePlatform capability
153
+ """
154
+ # Look up entity type from mapping
155
+ entity_type = self.ENTITY_TYPE_MAPPING.get((entity_class, entity_kind))
156
+ if not entity_type:
157
+ logger.warning(f"No entity type mapping found for {entity_class}/{entity_kind}")
158
+ return None
159
+
160
+ # Get capability from cache
161
+ capability = self._cache.get(entity_type)
162
+ if not capability:
163
+ logger.warning(f"No capability found for entity type {entity_type}")
164
+ return None
165
+ return capability
166
+
167
+ def find_metric(self,
168
+ entity_type: str,
169
+ metric_category: str,
170
+ aggregation: Optional[str] = None) -> Optional[str]:
171
+ """
172
+ Find exact metric name from category.
173
+
174
+ This is the KEY method that resolves categories to exact metrics!
175
+
176
+ Args:
177
+ entity_type: Entity type (e.g., "kubernetesPod")
178
+ metric_category: Metric category from normalizer (e.g., "cpu")
179
+ aggregation: Optional aggregation hint (e.g., "usage")
180
+
181
+ Returns:
182
+ Exact metric name if found, None otherwise
183
+
184
+ Examples:
185
+ find_metric("kubernetesPod", "cpu", "usage") → "cpuRequests"
186
+ find_metric("jvmRuntimePlatform", "memory", "usage") → "memory.used"
187
+ find_metric("kubernetesPod", "threads", "blocked") → None (not applicable)
188
+ """
189
+ capability = self._cache.get(entity_type)
190
+ if not capability:
191
+ logger.warning(f"No capability found for entity type {entity_type}")
192
+ return None
193
+
194
+ # Search for metrics containg the category
195
+ matches = []
196
+ category_lower = metric_category.lower()
197
+ for metric in capability.metrics:
198
+ metric_lower = metric.lower()
199
+ # Check if category is contained in metric name
200
+ if category_lower in metric_lower:
201
+ matches.append(metric)
202
+ if not matches:
203
+ logger.debug(f"No metrics found for category {metric_category} in entity type {entity_type}")
204
+ return None
205
+
206
+ #If only one match, return it
207
+ if len(matches) == 1:
208
+ logger.debug(f"Found exact match: {matches[0]}")
209
+ return matches[0]
210
+
211
+ #Multiple matches - try to narrow down with the aggregation
212
+ if aggregation:
213
+ agg_lower = aggregation.lower()
214
+ for match in matches:
215
+ if agg_lower in match.lower():
216
+ logger.debug(f"Found match with aggregation: {match}")
217
+ return match
218
+
219
+ # Return first match (caller can handle multiple matches via elicitation)
220
+ logger.debug(f"Multiple matches found, returning first: {matches[0]}")
221
+ return matches[0]
222
+
223
+ def find_all_matching_metrics(self, entity_type: str, metric_category: str) -> List[str]:
224
+ """
225
+ Find ALL metrics matching a category.
226
+
227
+ Used by elicitation handler when multiple matches exist.
228
+
229
+ Args:
230
+ entity_type: Entity type
231
+ metric_category: Metric category
232
+
233
+ Returns:
234
+ List of all matching metric names
235
+ """
236
+ capability = self._cache.get(entity_type)
237
+ if not capability:
238
+ return []
239
+ matches = []
240
+ category_lower = metric_category.lower()
241
+ for metric in capability.metrics:
242
+ if category_lower in metric.lower():
243
+ matches.append(metric)
244
+ return matches
245
+
246
+ def find_tag_filter(self, entity_type: str, filter_name: str) -> Optional[str]:
247
+ """
248
+ Find exact tag filter name from simple name.
249
+
250
+ Args:
251
+ entity_type: Entity type (e.g., "kubernetesPod")
252
+ filter_name: Simple filter name (e.g., "namespace")
253
+
254
+ Returns:
255
+ Exact tag filter name if found, None otherwise
256
+
257
+ Examples:
258
+ find_tag_filter("kubernetesPod", "namespace") → "kubernetes.namespace.name"
259
+ find_tag_filter("kubernetesPod", "cluster") → "kubernetes.cluster.name"
260
+ find_tag_filter("jvmRuntimePlatform", "host") → "host.name"
261
+ """
262
+ capability = self._cache.get(entity_type)
263
+ if not capability:
264
+ return None
265
+
266
+ #Search for tag filters containing the filter name
267
+ filter_lower = filter_name.lower()
268
+ for tag in capability.tag_filters:
269
+ tag_lower = tag.lower()
270
+ #Check if filter name is in tag
271
+ if filter_lower in tag_lower:
272
+ logger.debug(f"Found tag filter {tag} for entity type {entity_type} and filter name {filter_name}")
273
+ return tag
274
+
275
+ logger.debug(f"No tag filter found for entity type {entity_type} and filter name {filter_name}")
276
+ return None
277
+
278
+ def get_all_metrics(self, entity_type: str) -> List[str]:
279
+ """
280
+ Get all available metrics for an entity type.
281
+
282
+ Used for displaying options to user.
283
+ """
284
+ capability = self._cache.get(entity_type)
285
+ return capability.metrics if capability else []
286
+
287
+ def get_all_tag_filters(self, entity_type: str) -> List[str]:
288
+ """
289
+ Get all available tag filters for an entity type.
290
+
291
+ Used for displaying options to user.
292
+ """
293
+ capability = self._cache.get(entity_type)
294
+ return capability.tag_filters if capability else []
295
+
296
+ def get_entity_types(self) -> List[str]:
297
+ """Get list of all loaded entity types."""
298
+ return list(self._cache.keys())
299
+
300
+ def get_full_schema(self, entity_type: str) -> Optional[dict]:
301
+ """
302
+ Get complete raw schema for an entity type.
303
+
304
+ Used for machine-facing elicitation - returns the full schema
305
+ that will be passed to LLM for selection.
306
+
307
+ Args:
308
+ entity_type: Entity type (e.g., "jvmRuntimePlatform")
309
+
310
+ Returns:
311
+ Complete schema dict if found, None otherwise
312
+ """
313
+
314
+ if entity_type not in self._cache:
315
+ logger.warning(f"Entity type {entity_type} not found in registry")
316
+ return None
317
+ schema_file = self.schema_dir/f"{entity_type}_schema.json"
318
+
319
+ if not schema_file.exists():
320
+ logger.error(f"Schema file not found: {schema_file}")
321
+ return None
322
+
323
+ try:
324
+ with open(schema_file, 'r') as f:
325
+ schema = json.load(f)
326
+ return schema
327
+ except Exception as e:
328
+ logger.error(f"Error loading schema file {schema_file}: {e}")
329
+ return None