iflow-mcp_drdroidlab-grafana-mcp-server 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- grafana_mcp_server/__init__.py +0 -0
- grafana_mcp_server/config.yaml +19 -0
- grafana_mcp_server/mcp_server.py +643 -0
- grafana_mcp_server/processor/__init__.py +0 -0
- grafana_mcp_server/processor/grafana_processor.py +771 -0
- grafana_mcp_server/processor/processor.py +0 -0
- grafana_mcp_server/stdio_server.py +36 -0
- iflow_mcp_drdroidlab_grafana_mcp_server-0.1.0.dist-info/METADATA +290 -0
- iflow_mcp_drdroidlab_grafana_mcp_server-0.1.0.dist-info/RECORD +12 -0
- iflow_mcp_drdroidlab_grafana_mcp_server-0.1.0.dist-info/WHEEL +4 -0
- iflow_mcp_drdroidlab_grafana_mcp_server-0.1.0.dist-info/entry_points.txt +2 -0
- iflow_mcp_drdroidlab_grafana_mcp_server-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,771 @@
|
|
|
1
|
+
import datetime
|
|
2
|
+
import json
|
|
3
|
+
import logging
|
|
4
|
+
import re
|
|
5
|
+
from typing import Any, Optional
|
|
6
|
+
|
|
7
|
+
import requests
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Processor:
|
|
13
|
+
"""Base processor interface"""
|
|
14
|
+
|
|
15
|
+
def get_connection(self):
|
|
16
|
+
pass
|
|
17
|
+
|
|
18
|
+
def test_connection(self):
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class GrafanaApiProcessor(Processor):
|
|
23
|
+
"""
|
|
24
|
+
Grafana API processor for handling Grafana API interactions.
|
|
25
|
+
Uses API key authentication.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(self, grafana_host, grafana_api_key, ssl_verify="true"):
|
|
29
|
+
"""
|
|
30
|
+
Initialize Grafana API processor.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
grafana_host: Grafana instance URL (e.g., https://grafana.example.com)
|
|
34
|
+
grafana_api_key: API key for authentication
|
|
35
|
+
ssl_verify: Whether to verify SSL certificates ("true" or "false")
|
|
36
|
+
"""
|
|
37
|
+
self.__host = grafana_host.rstrip("/") # Remove trailing slash
|
|
38
|
+
self.__api_key = grafana_api_key
|
|
39
|
+
self.__ssl_verify = not (ssl_verify and ssl_verify.lower() == "false")
|
|
40
|
+
self.headers = {
|
|
41
|
+
"Authorization": f"Bearer {self.__api_key}",
|
|
42
|
+
"Content-Type": "application/json",
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
logger.info(f"Initialized Grafana processor with host: {self.__host}")
|
|
46
|
+
|
|
47
|
+
def get_connection(self):
|
|
48
|
+
"""Return connection details for debugging"""
|
|
49
|
+
return {
|
|
50
|
+
"host": self.__host,
|
|
51
|
+
"ssl_verify": self.__ssl_verify,
|
|
52
|
+
"auth_method": "api_key",
|
|
53
|
+
"headers": {k: v for k, v in self.headers.items() if k != "Authorization"},
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
def test_connection(self):
|
|
57
|
+
"""
|
|
58
|
+
Test connection to Grafana API to verify configuration and connectivity.
|
|
59
|
+
Uses the /api/datasources endpoint to verify API access.
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
bool: True if connection successful
|
|
63
|
+
|
|
64
|
+
Raises:
|
|
65
|
+
Exception: If connection fails with details about the failure
|
|
66
|
+
"""
|
|
67
|
+
try:
|
|
68
|
+
url = f"{self.__host}/api/datasources"
|
|
69
|
+
logger.info(f"Testing Grafana connection to: {url}")
|
|
70
|
+
|
|
71
|
+
response = requests.get(url, headers=self.headers, verify=self.__ssl_verify, timeout=20)
|
|
72
|
+
if response and response.status_code == 200:
|
|
73
|
+
logger.info("Successfully connected to Grafana API")
|
|
74
|
+
return True
|
|
75
|
+
else:
|
|
76
|
+
status_code = response.status_code if response else None
|
|
77
|
+
raise Exception(f"Failed to connect with Grafana. Status Code: {status_code}. Response Text: {response.text}")
|
|
78
|
+
except Exception as e:
|
|
79
|
+
logger.error(f"Exception occurred while fetching grafana data sources with error: {e}")
|
|
80
|
+
raise e
|
|
81
|
+
|
|
82
|
+
def _get_time_range(self, start_time=None, end_time=None, duration=None, default_hours=3):
|
|
83
|
+
"""
|
|
84
|
+
Returns (start_dt, end_dt) as UTC datetimes.
|
|
85
|
+
- If start_time and end_time are provided, use those.
|
|
86
|
+
- Else if duration is provided, use (now - duration, now).
|
|
87
|
+
- Else, use (now - default_hours, now).
|
|
88
|
+
"""
|
|
89
|
+
now_dt = datetime.datetime.now(datetime.timezone.utc)
|
|
90
|
+
if start_time and end_time:
|
|
91
|
+
start_dt = self._parse_time(start_time)
|
|
92
|
+
end_dt = self._parse_time(end_time)
|
|
93
|
+
if not start_dt or not end_dt:
|
|
94
|
+
start_dt = now_dt - datetime.timedelta(hours=default_hours)
|
|
95
|
+
end_dt = now_dt
|
|
96
|
+
elif duration:
|
|
97
|
+
dur_ms = self._parse_duration(duration)
|
|
98
|
+
if dur_ms is None:
|
|
99
|
+
dur_ms = default_hours * 60 * 60 * 1000
|
|
100
|
+
start_dt = now_dt - datetime.timedelta(milliseconds=dur_ms)
|
|
101
|
+
end_dt = now_dt
|
|
102
|
+
else:
|
|
103
|
+
start_dt = now_dt - datetime.timedelta(hours=default_hours)
|
|
104
|
+
end_dt = now_dt
|
|
105
|
+
return start_dt, end_dt
|
|
106
|
+
|
|
107
|
+
def _parse_duration(self, duration_str):
|
|
108
|
+
"""Parse duration string like '2h', '90m' into milliseconds."""
|
|
109
|
+
if not duration_str or not isinstance(duration_str, str):
|
|
110
|
+
return None
|
|
111
|
+
match = re.match(r"^(\d+)([smhd])$", duration_str.strip().lower())
|
|
112
|
+
if match:
|
|
113
|
+
value, unit = match.groups()
|
|
114
|
+
value = int(value)
|
|
115
|
+
if unit == "s":
|
|
116
|
+
return value * 1000
|
|
117
|
+
elif unit == "m":
|
|
118
|
+
return value * 60 * 1000
|
|
119
|
+
elif unit == "h":
|
|
120
|
+
return value * 60 * 60 * 1000
|
|
121
|
+
elif unit == "d":
|
|
122
|
+
return value * 24 * 60 * 60 * 1000
|
|
123
|
+
try:
|
|
124
|
+
# fallback: try to parse as integer minutes
|
|
125
|
+
value = int(duration_str)
|
|
126
|
+
return value * 60 * 1000
|
|
127
|
+
except Exception as e:
|
|
128
|
+
logger.error(f"_parse_duration: Exception parsing '{duration_str}': {e}")
|
|
129
|
+
return None
|
|
130
|
+
|
|
131
|
+
def _parse_time(self, time_str):
|
|
132
|
+
"""
|
|
133
|
+
Parse a time string in RFC3339, 'now', or 'now-2h', 'now-30m', etc. Returns a UTC datetime.
|
|
134
|
+
"""
|
|
135
|
+
if not time_str or not isinstance(time_str, str):
|
|
136
|
+
logger.error(f"_parse_time: Invalid input (not a string): {time_str}")
|
|
137
|
+
return None
|
|
138
|
+
time_str_orig = time_str
|
|
139
|
+
time_str = time_str.strip().lower()
|
|
140
|
+
if time_str.startswith("now"):
|
|
141
|
+
if "-" in time_str:
|
|
142
|
+
match = re.match(r"now-(\d+)([smhd])", time_str)
|
|
143
|
+
if match:
|
|
144
|
+
value, unit = match.groups()
|
|
145
|
+
value = int(value)
|
|
146
|
+
if unit == "s":
|
|
147
|
+
delta = datetime.timedelta(seconds=value)
|
|
148
|
+
elif unit == "m":
|
|
149
|
+
delta = datetime.timedelta(minutes=value)
|
|
150
|
+
elif unit == "h":
|
|
151
|
+
delta = datetime.timedelta(hours=value)
|
|
152
|
+
elif unit == "d":
|
|
153
|
+
delta = datetime.timedelta(days=value)
|
|
154
|
+
else:
|
|
155
|
+
delta = datetime.timedelta()
|
|
156
|
+
logger.debug(f"_parse_time: Parsed relative time '{time_str_orig}' as now - {value}{unit}")
|
|
157
|
+
return datetime.datetime.now(datetime.timezone.utc) - delta
|
|
158
|
+
logger.debug(f"_parse_time: Parsed 'now' as current UTC time for input '{time_str_orig}'")
|
|
159
|
+
return datetime.datetime.now(datetime.timezone.utc)
|
|
160
|
+
else:
|
|
161
|
+
try:
|
|
162
|
+
# Try parsing as RFC3339 or other datetime formats
|
|
163
|
+
dt = datetime.datetime.fromisoformat(time_str_orig.replace("Z", "+00:00"))
|
|
164
|
+
if dt.tzinfo is None:
|
|
165
|
+
dt = dt.replace(tzinfo=datetime.timezone.utc)
|
|
166
|
+
logger.debug(f"_parse_time: Successfully parsed '{time_str_orig}' as {dt.isoformat()}")
|
|
167
|
+
return dt.astimezone(datetime.timezone.utc)
|
|
168
|
+
except Exception as e:
|
|
169
|
+
logger.error(f"_parse_time: Exception parsing '{time_str_orig}': {e}")
|
|
170
|
+
return None
|
|
171
|
+
|
|
172
|
+
def grafana_promql_query(
|
|
173
|
+
self,
|
|
174
|
+
datasource_uid: str,
|
|
175
|
+
query: str,
|
|
176
|
+
start_time: Optional[str] = None,
|
|
177
|
+
end_time: Optional[str] = None,
|
|
178
|
+
duration: Optional[str] = None,
|
|
179
|
+
) -> dict[str, Any]:
|
|
180
|
+
"""
|
|
181
|
+
Executes PromQL queries against Grafana's Prometheus datasource.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
datasource_uid: Prometheus datasource UID
|
|
185
|
+
query: PromQL query string
|
|
186
|
+
start_time: Start time in RFC3339 or relative string (e.g., 'now-2h', '2023-01-01T00:00:00Z')
|
|
187
|
+
end_time: End time in RFC3339 or relative string (e.g., 'now-2h', '2023-01-01T00:00:00Z')
|
|
188
|
+
duration: Duration string for the time window (e.g., '2h', '90m')
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
Dict containing query results with optimized time series data
|
|
192
|
+
"""
|
|
193
|
+
try:
|
|
194
|
+
# Use standardized time range logic
|
|
195
|
+
start_dt, end_dt = self._get_time_range(start_time, end_time, duration, default_hours=3)
|
|
196
|
+
|
|
197
|
+
# Convert to milliseconds since epoch (Grafana format)
|
|
198
|
+
start_ms = int(start_dt.timestamp() * 1000)
|
|
199
|
+
end_ms = int(end_dt.timestamp() * 1000)
|
|
200
|
+
|
|
201
|
+
payload = {
|
|
202
|
+
"queries": [
|
|
203
|
+
{
|
|
204
|
+
"refId": "A",
|
|
205
|
+
"expr": query,
|
|
206
|
+
"editorMode": "code",
|
|
207
|
+
"legendFormat": "__auto",
|
|
208
|
+
"range": True,
|
|
209
|
+
"exemplar": False,
|
|
210
|
+
"requestId": "A",
|
|
211
|
+
"utcOffsetSec": 0,
|
|
212
|
+
"scopes": [],
|
|
213
|
+
"adhocFilters": [],
|
|
214
|
+
"interval": "",
|
|
215
|
+
"datasource": {"type": "prometheus", "uid": datasource_uid},
|
|
216
|
+
"intervalMs": 30000,
|
|
217
|
+
"maxDataPoints": 1000,
|
|
218
|
+
}
|
|
219
|
+
],
|
|
220
|
+
"from": str(start_ms),
|
|
221
|
+
"to": str(end_ms),
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
url = f"{self.__host}/api/ds/query"
|
|
225
|
+
logger.info(f"Executing PromQL query: {query} from {start_dt.isoformat()} to {end_dt.isoformat()}")
|
|
226
|
+
|
|
227
|
+
response = requests.post(
|
|
228
|
+
url,
|
|
229
|
+
headers=self.headers,
|
|
230
|
+
json=payload,
|
|
231
|
+
verify=self.__ssl_verify,
|
|
232
|
+
timeout=30,
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
if response.status_code == 200:
|
|
236
|
+
data = response.json()
|
|
237
|
+
# Optimize time series data to reduce token size
|
|
238
|
+
optimized_data = self._optimize_time_series_data(data)
|
|
239
|
+
return {
|
|
240
|
+
"status": "success",
|
|
241
|
+
"query": query,
|
|
242
|
+
"start_time": start_dt.isoformat(),
|
|
243
|
+
"end_time": end_dt.isoformat(),
|
|
244
|
+
"duration": duration,
|
|
245
|
+
"results": optimized_data,
|
|
246
|
+
}
|
|
247
|
+
else:
|
|
248
|
+
raise Exception(f"PromQL query failed. Status: {response.status_code}, Response: {response.text}")
|
|
249
|
+
|
|
250
|
+
except Exception as e:
|
|
251
|
+
logger.error(f"Error executing PromQL query: {e!s}")
|
|
252
|
+
raise e
|
|
253
|
+
|
|
254
|
+
def grafana_loki_query(
|
|
255
|
+
self,
|
|
256
|
+
datasource_uid: str,
|
|
257
|
+
query: str,
|
|
258
|
+
duration: Optional[str] = None,
|
|
259
|
+
start_time: Optional[str] = None,
|
|
260
|
+
end_time: Optional[str] = None,
|
|
261
|
+
limit: int = 100,
|
|
262
|
+
) -> dict[str, Any]:
|
|
263
|
+
"""
|
|
264
|
+
Queries Grafana Loki for log data.
|
|
265
|
+
|
|
266
|
+
Args:
|
|
267
|
+
query: Loki query string
|
|
268
|
+
duration: Time duration (e.g., '5m', '1h', '2d') - overrides start_time/end_time if provided
|
|
269
|
+
start_time: Start time in RFC3339 or relative string (e.g., 'now-2h', '2023-01-01T00:00:00Z')
|
|
270
|
+
end_time: End time in RFC3339 or relative string (e.g., 'now-2h', '2023-01-01T00:00:00Z')
|
|
271
|
+
limit: Maximum number of log entries to return
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
Dict containing log data from Loki datasource
|
|
275
|
+
"""
|
|
276
|
+
try:
|
|
277
|
+
# Use standardized time range logic
|
|
278
|
+
start_dt, end_dt = self._get_time_range(start_time, end_time, duration, default_hours=1)
|
|
279
|
+
|
|
280
|
+
# Convert to milliseconds since epoch (Grafana format)
|
|
281
|
+
start_ms = int(start_dt.timestamp() * 1000)
|
|
282
|
+
end_ms = int(end_dt.timestamp() * 1000)
|
|
283
|
+
|
|
284
|
+
payload = {
|
|
285
|
+
"queries": [
|
|
286
|
+
{
|
|
287
|
+
"refId": "A",
|
|
288
|
+
"expr": query,
|
|
289
|
+
"datasource": {"type": "loki", "uid": datasource_uid},
|
|
290
|
+
"maxLines": limit,
|
|
291
|
+
}
|
|
292
|
+
],
|
|
293
|
+
"from": str(start_ms),
|
|
294
|
+
"to": str(end_ms),
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
url = f"{self.__host}/api/ds/query"
|
|
298
|
+
logger.info(f"Executing Loki query: {query} from {start_dt.isoformat()} to {end_dt.isoformat()}")
|
|
299
|
+
|
|
300
|
+
response = requests.post(
|
|
301
|
+
url,
|
|
302
|
+
headers=self.headers,
|
|
303
|
+
json=payload,
|
|
304
|
+
verify=self.__ssl_verify,
|
|
305
|
+
timeout=30,
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
if response.status_code == 200:
|
|
309
|
+
data = response.json()
|
|
310
|
+
return {
|
|
311
|
+
"status": "success",
|
|
312
|
+
"query": query,
|
|
313
|
+
"start_time": start_dt.isoformat(),
|
|
314
|
+
"end_time": end_dt.isoformat(),
|
|
315
|
+
"duration": duration,
|
|
316
|
+
"limit": limit,
|
|
317
|
+
"results": data,
|
|
318
|
+
}
|
|
319
|
+
else:
|
|
320
|
+
raise Exception(f"Loki query failed. Status: {response.status_code}, Response: {response.text}")
|
|
321
|
+
|
|
322
|
+
except Exception as e:
|
|
323
|
+
logger.error(f"Error executing Loki query: {e!s}")
|
|
324
|
+
raise e
|
|
325
|
+
|
|
326
|
+
def grafana_get_dashboard_config_details(self, dashboard_uid: str) -> dict[str, Any]:
|
|
327
|
+
"""
|
|
328
|
+
Retrieves dashboard configuration details from the database.
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
dashboard_uid: Dashboard UID
|
|
332
|
+
|
|
333
|
+
Returns:
|
|
334
|
+
Dict containing dashboard configuration metadata
|
|
335
|
+
"""
|
|
336
|
+
try:
|
|
337
|
+
# This would typically query a database, but for now we'll use Grafana API
|
|
338
|
+
url = f"{self.__host}/api/dashboards/uid/{dashboard_uid}"
|
|
339
|
+
logger.info(f"Fetching dashboard config for UID: {dashboard_uid}")
|
|
340
|
+
|
|
341
|
+
response = requests.get(url, headers=self.headers, verify=self.__ssl_verify, timeout=20)
|
|
342
|
+
|
|
343
|
+
if response.status_code == 200:
|
|
344
|
+
dashboard_data = response.json()
|
|
345
|
+
return {
|
|
346
|
+
"status": "success",
|
|
347
|
+
"dashboard_uid": dashboard_uid,
|
|
348
|
+
"dashboard": dashboard_data.get("dashboard", {}),
|
|
349
|
+
"meta": dashboard_data.get("meta", {}),
|
|
350
|
+
}
|
|
351
|
+
else:
|
|
352
|
+
raise Exception(f"Failed to fetch dashboard config. Status: {response.status_code}, Response: {response.text}")
|
|
353
|
+
|
|
354
|
+
except Exception as e:
|
|
355
|
+
logger.error(f"Error fetching dashboard config: {e!s}")
|
|
356
|
+
raise e
|
|
357
|
+
|
|
358
|
+
def grafana_query_dashboard_panels(
|
|
359
|
+
self,
|
|
360
|
+
dashboard_uid: str,
|
|
361
|
+
panel_ids: list[int],
|
|
362
|
+
template_variables: Optional[dict[str, str]] = None,
|
|
363
|
+
) -> dict[str, Any]:
|
|
364
|
+
"""
|
|
365
|
+
Executes queries for specific dashboard panels.
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
dashboard_uid: Dashboard UID
|
|
369
|
+
panel_ids: List of panel IDs to query (max 4)
|
|
370
|
+
template_variables: Template variables for the dashboard
|
|
371
|
+
|
|
372
|
+
Returns:
|
|
373
|
+
Dict containing panel data with optimized metrics
|
|
374
|
+
"""
|
|
375
|
+
try:
|
|
376
|
+
if len(panel_ids) > 4:
|
|
377
|
+
raise ValueError("Maximum 4 panels can be queried at once")
|
|
378
|
+
|
|
379
|
+
logger.info(f"Querying dashboard panels: {dashboard_uid}, panel_ids: {panel_ids}")
|
|
380
|
+
|
|
381
|
+
# First get dashboard configuration
|
|
382
|
+
dashboard_url = f"{self.__host}/api/dashboards/uid/{dashboard_uid}"
|
|
383
|
+
dashboard_response = requests.get(
|
|
384
|
+
dashboard_url,
|
|
385
|
+
headers=self.headers,
|
|
386
|
+
verify=self.__ssl_verify,
|
|
387
|
+
timeout=20,
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
if dashboard_response.status_code != 200:
|
|
391
|
+
raise Exception(f"Failed to fetch dashboard. Status: {dashboard_response.status_code}")
|
|
392
|
+
|
|
393
|
+
dashboard_data = dashboard_response.json()
|
|
394
|
+
dashboard = dashboard_data.get("dashboard", {})
|
|
395
|
+
|
|
396
|
+
# Handle both old and new dashboard structures
|
|
397
|
+
panels = dashboard.get("panels", [])
|
|
398
|
+
if not panels:
|
|
399
|
+
# Try to get panels from rows (newer dashboard structure)
|
|
400
|
+
rows = dashboard.get("rows", [])
|
|
401
|
+
for row in rows:
|
|
402
|
+
row_panels = row.get("panels", [])
|
|
403
|
+
panels.extend(row_panels)
|
|
404
|
+
|
|
405
|
+
logger.info(f"Found {len(panels)} panels in dashboard")
|
|
406
|
+
|
|
407
|
+
# Filter panels by requested IDs
|
|
408
|
+
target_panels = [panel for panel in panels if panel.get("id") in panel_ids]
|
|
409
|
+
|
|
410
|
+
if not target_panels:
|
|
411
|
+
logger.warning(f"No panels found with IDs: {panel_ids}")
|
|
412
|
+
logger.info(f"Available panel IDs: {[panel.get('id') for panel in panels]}")
|
|
413
|
+
raise Exception(f"No panels found with IDs: {panel_ids}")
|
|
414
|
+
|
|
415
|
+
logger.info(f"Found {len(target_panels)} target panels")
|
|
416
|
+
|
|
417
|
+
# Execute queries for each panel
|
|
418
|
+
panel_results = []
|
|
419
|
+
for panel in target_panels:
|
|
420
|
+
logger.info(f"Processing panel {panel.get('id')}: {panel.get('title', 'Unknown')}")
|
|
421
|
+
panel_result = self._execute_panel_query(panel, template_variables or {})
|
|
422
|
+
panel_results.append(
|
|
423
|
+
{
|
|
424
|
+
"panel_id": panel.get("id"),
|
|
425
|
+
"title": panel.get("title"),
|
|
426
|
+
"type": panel.get("type"),
|
|
427
|
+
"data": panel_result,
|
|
428
|
+
}
|
|
429
|
+
)
|
|
430
|
+
|
|
431
|
+
return {
|
|
432
|
+
"status": "success",
|
|
433
|
+
"dashboard_uid": dashboard_uid,
|
|
434
|
+
"panel_ids": panel_ids,
|
|
435
|
+
"template_variables": template_variables,
|
|
436
|
+
"results": panel_results,
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
except Exception as e:
|
|
440
|
+
logger.error(f"Error querying dashboard panels: {e!s}")
|
|
441
|
+
raise e
|
|
442
|
+
|
|
443
|
+
def grafana_fetch_dashboard_variable_label_values(
|
|
444
|
+
self, datasource_uid: str, label_name: str, metric_match_filter: Optional[str] = None
|
|
445
|
+
) -> dict[str, Any]:
|
|
446
|
+
"""
|
|
447
|
+
Fetches label values for dashboard variables from Prometheus datasource.
|
|
448
|
+
|
|
449
|
+
Args:
|
|
450
|
+
datasource_uid: Prometheus datasource UID
|
|
451
|
+
label_name: Label name to fetch values for (e.g., "instance", "job")
|
|
452
|
+
metric_match_filter: Optional metric name filter (e.g., "up", "node_cpu_seconds_total")
|
|
453
|
+
|
|
454
|
+
Returns:
|
|
455
|
+
Dict containing list of available label values
|
|
456
|
+
"""
|
|
457
|
+
try:
|
|
458
|
+
url = f"{self.__host}/api/datasources/proxy/uid/{datasource_uid}/api/v1/label/{label_name}/values"
|
|
459
|
+
params = {}
|
|
460
|
+
|
|
461
|
+
if metric_match_filter:
|
|
462
|
+
params["match[]"] = metric_match_filter
|
|
463
|
+
|
|
464
|
+
logger.info(f"Fetching label values for: {label_name} from Prometheus API")
|
|
465
|
+
|
|
466
|
+
response = requests.get(url, headers=self.headers, params=params, verify=self.__ssl_verify, timeout=20)
|
|
467
|
+
|
|
468
|
+
if response and response.status_code == 200:
|
|
469
|
+
label_values = response.json().get("data", [])
|
|
470
|
+
|
|
471
|
+
return {
|
|
472
|
+
"status": "success",
|
|
473
|
+
"datasource_uid": datasource_uid,
|
|
474
|
+
"label_name": label_name,
|
|
475
|
+
"metric_match_filter": metric_match_filter,
|
|
476
|
+
"values": label_values,
|
|
477
|
+
}
|
|
478
|
+
else:
|
|
479
|
+
status_code = response.status_code if response else None
|
|
480
|
+
error_msg = (
|
|
481
|
+
f"Failed to fetch label values for {label_name}. Status: {status_code}, Response: {response.text if response else 'No response'}"
|
|
482
|
+
)
|
|
483
|
+
logger.error(error_msg)
|
|
484
|
+
raise Exception(error_msg)
|
|
485
|
+
|
|
486
|
+
except Exception as e:
|
|
487
|
+
logger.error(f"Exception occurred while fetching promql metric labels for {label_name} with error: {e}")
|
|
488
|
+
raise e
|
|
489
|
+
|
|
490
|
+
def grafana_fetch_dashboard_variables(self, dashboard_uid: str) -> dict[str, Any]:
|
|
491
|
+
"""
|
|
492
|
+
Fetches all variables and their values from a Grafana dashboard.
|
|
493
|
+
|
|
494
|
+
Args:
|
|
495
|
+
dashboard_uid: Dashboard UID
|
|
496
|
+
|
|
497
|
+
Returns:
|
|
498
|
+
Dict containing dashboard variables and their values
|
|
499
|
+
"""
|
|
500
|
+
try:
|
|
501
|
+
url = f"{self.__host}/api/dashboards/uid/{dashboard_uid}"
|
|
502
|
+
logger.info(f"Fetching dashboard variables for UID: {dashboard_uid}")
|
|
503
|
+
|
|
504
|
+
response = requests.get(url, headers=self.headers, verify=self.__ssl_verify, timeout=20)
|
|
505
|
+
|
|
506
|
+
if response.status_code == 200:
|
|
507
|
+
dashboard_data = response.json()
|
|
508
|
+
dashboard = dashboard_data.get("dashboard", {})
|
|
509
|
+
templating = dashboard.get("templating", {})
|
|
510
|
+
variables = templating.get("list", [])
|
|
511
|
+
|
|
512
|
+
# Extract variable information
|
|
513
|
+
variable_details = []
|
|
514
|
+
for var in variables:
|
|
515
|
+
variable_details.append(
|
|
516
|
+
{
|
|
517
|
+
"name": var.get("name"),
|
|
518
|
+
"type": var.get("type"),
|
|
519
|
+
"current_value": var.get("current", {}).get("value"),
|
|
520
|
+
"options": var.get("options", []),
|
|
521
|
+
"query": var.get("query"),
|
|
522
|
+
"definition": var.get("definition"),
|
|
523
|
+
}
|
|
524
|
+
)
|
|
525
|
+
|
|
526
|
+
return {
|
|
527
|
+
"status": "success",
|
|
528
|
+
"dashboard_uid": dashboard_uid,
|
|
529
|
+
"variables": variable_details,
|
|
530
|
+
}
|
|
531
|
+
else:
|
|
532
|
+
raise Exception(f"Failed to fetch dashboard variables. Status: {response.status_code}, Response: {response.text}")
|
|
533
|
+
|
|
534
|
+
except Exception as e:
|
|
535
|
+
logger.error(f"Error fetching dashboard variables: {e!s}")
|
|
536
|
+
raise e
|
|
537
|
+
|
|
538
|
+
def grafana_fetch_all_dashboards(self, limit: int = 100) -> dict[str, Any]:
|
|
539
|
+
"""
|
|
540
|
+
Fetches all dashboards from Grafana.
|
|
541
|
+
|
|
542
|
+
Args:
|
|
543
|
+
limit: Maximum number of dashboards to return
|
|
544
|
+
|
|
545
|
+
Returns:
|
|
546
|
+
Dict containing list of dashboards with basic information
|
|
547
|
+
"""
|
|
548
|
+
try:
|
|
549
|
+
url = f"{self.__host}/api/search"
|
|
550
|
+
params = {"limit": limit}
|
|
551
|
+
logger.info(f"Fetching all dashboards (limit: {limit})")
|
|
552
|
+
|
|
553
|
+
response = requests.get(
|
|
554
|
+
url,
|
|
555
|
+
headers=self.headers,
|
|
556
|
+
params=params,
|
|
557
|
+
verify=self.__ssl_verify,
|
|
558
|
+
timeout=20,
|
|
559
|
+
)
|
|
560
|
+
|
|
561
|
+
if response.status_code == 200:
|
|
562
|
+
dashboards = response.json()
|
|
563
|
+
# Extract relevant information
|
|
564
|
+
dashboard_list = []
|
|
565
|
+
for dashboard in dashboards:
|
|
566
|
+
dashboard_list.append(
|
|
567
|
+
{
|
|
568
|
+
"uid": dashboard.get("uid"),
|
|
569
|
+
"title": dashboard.get("title"),
|
|
570
|
+
"type": dashboard.get("type"),
|
|
571
|
+
"url": dashboard.get("url"),
|
|
572
|
+
"folder_title": dashboard.get("folderTitle"),
|
|
573
|
+
"folder_uid": dashboard.get("folderUid"),
|
|
574
|
+
"tags": dashboard.get("tags", []),
|
|
575
|
+
"is_starred": dashboard.get("isStarred", False),
|
|
576
|
+
}
|
|
577
|
+
)
|
|
578
|
+
|
|
579
|
+
return {
|
|
580
|
+
"status": "success",
|
|
581
|
+
"total_count": len(dashboard_list),
|
|
582
|
+
"limit": limit,
|
|
583
|
+
"dashboards": dashboard_list,
|
|
584
|
+
}
|
|
585
|
+
else:
|
|
586
|
+
raise Exception(f"Failed to fetch dashboards. Status: {response.status_code}, Response: {response.text}")
|
|
587
|
+
|
|
588
|
+
except Exception as e:
|
|
589
|
+
logger.error(f"Error fetching dashboards: {e!s}")
|
|
590
|
+
raise e
|
|
591
|
+
|
|
592
|
+
def grafana_fetch_datasources(self) -> dict[str, Any]:
|
|
593
|
+
"""
|
|
594
|
+
Fetches all datasources from Grafana.
|
|
595
|
+
|
|
596
|
+
Returns:
|
|
597
|
+
Dict containing list of datasources
|
|
598
|
+
"""
|
|
599
|
+
try:
|
|
600
|
+
url = f"{self.__host}/api/datasources"
|
|
601
|
+
logger.info("Fetching all datasources")
|
|
602
|
+
|
|
603
|
+
response = requests.get(url, headers=self.headers, verify=self.__ssl_verify, timeout=20)
|
|
604
|
+
|
|
605
|
+
if response.status_code == 200:
|
|
606
|
+
datasources = response.json()
|
|
607
|
+
# Extract relevant information
|
|
608
|
+
datasource_list = []
|
|
609
|
+
for ds in datasources:
|
|
610
|
+
datasource_list.append(
|
|
611
|
+
{
|
|
612
|
+
"id": ds.get("id"),
|
|
613
|
+
"uid": ds.get("uid"),
|
|
614
|
+
"name": ds.get("name"),
|
|
615
|
+
"type": ds.get("type"),
|
|
616
|
+
"url": ds.get("url"),
|
|
617
|
+
"access": ds.get("access"),
|
|
618
|
+
"database": ds.get("database"),
|
|
619
|
+
"is_default": ds.get("isDefault", False),
|
|
620
|
+
"json_data": ds.get("jsonData", {}),
|
|
621
|
+
"secure_json_data": dict.fromkeys(ds.get("secureJsonData", {}).keys(), "***"),
|
|
622
|
+
}
|
|
623
|
+
)
|
|
624
|
+
|
|
625
|
+
return {
|
|
626
|
+
"status": "success",
|
|
627
|
+
"total_count": len(datasource_list),
|
|
628
|
+
"datasources": datasource_list,
|
|
629
|
+
}
|
|
630
|
+
else:
|
|
631
|
+
raise Exception(f"Failed to fetch datasources. Status: {response.status_code}, Response: {response.text}")
|
|
632
|
+
|
|
633
|
+
except Exception as e:
|
|
634
|
+
logger.error(f"Error fetching datasources: {e!s}")
|
|
635
|
+
raise e
|
|
636
|
+
|
|
637
|
+
def grafana_fetch_folders(self) -> dict[str, Any]:
|
|
638
|
+
"""
|
|
639
|
+
Fetches all folders from Grafana.
|
|
640
|
+
|
|
641
|
+
Returns:
|
|
642
|
+
Dict containing list of folders
|
|
643
|
+
"""
|
|
644
|
+
try:
|
|
645
|
+
url = f"{self.__host}/api/folders"
|
|
646
|
+
logger.info("Fetching all folders")
|
|
647
|
+
|
|
648
|
+
response = requests.get(url, headers=self.headers, verify=self.__ssl_verify, timeout=20)
|
|
649
|
+
|
|
650
|
+
if response.status_code == 200:
|
|
651
|
+
folders = response.json()
|
|
652
|
+
# Extract relevant information
|
|
653
|
+
folder_list = []
|
|
654
|
+
for folder in folders:
|
|
655
|
+
folder_list.append(
|
|
656
|
+
{
|
|
657
|
+
"id": folder.get("id"),
|
|
658
|
+
"uid": folder.get("uid"),
|
|
659
|
+
"title": folder.get("title"),
|
|
660
|
+
"url": folder.get("url"),
|
|
661
|
+
"has_acl": folder.get("hasAcl", False),
|
|
662
|
+
"can_save": folder.get("canSave", False),
|
|
663
|
+
"can_edit": folder.get("canEdit", False),
|
|
664
|
+
"can_admin": folder.get("canAdmin", False),
|
|
665
|
+
"created": folder.get("created"),
|
|
666
|
+
"updated": folder.get("updated"),
|
|
667
|
+
"created_by": folder.get("createdBy"),
|
|
668
|
+
"updated_by": folder.get("updatedBy"),
|
|
669
|
+
"version": folder.get("version"),
|
|
670
|
+
}
|
|
671
|
+
)
|
|
672
|
+
|
|
673
|
+
return {
|
|
674
|
+
"status": "success",
|
|
675
|
+
"total_count": len(folder_list),
|
|
676
|
+
"folders": folder_list,
|
|
677
|
+
}
|
|
678
|
+
else:
|
|
679
|
+
raise Exception(f"Failed to fetch folders. Status: {response.status_code}, Response: {response.text}")
|
|
680
|
+
|
|
681
|
+
except Exception as e:
|
|
682
|
+
logger.error(f"Error fetching folders: {e!s}")
|
|
683
|
+
raise e
|
|
684
|
+
|
|
685
|
+
def _optimize_time_series_data(self, data: dict[str, Any]) -> dict[str, Any]:
|
|
686
|
+
"""Optimize time series data to reduce token size"""
|
|
687
|
+
try:
|
|
688
|
+
# Sample data points if there are too many
|
|
689
|
+
for result in data.get("results", {}).values():
|
|
690
|
+
if "frames" in result:
|
|
691
|
+
for frame in result["frames"]:
|
|
692
|
+
if "data" in frame and "values" in frame["data"]:
|
|
693
|
+
values = frame["data"]["values"]
|
|
694
|
+
if len(values) > 0 and len(values[0]) > 1000:
|
|
695
|
+
# Sample every 10th point
|
|
696
|
+
for i in range(len(values)):
|
|
697
|
+
values[i] = values[i][::10]
|
|
698
|
+
return data
|
|
699
|
+
except Exception as e:
|
|
700
|
+
logger.warning(f"Error optimizing time series data: {e}")
|
|
701
|
+
return data
|
|
702
|
+
|
|
703
|
+
def _execute_panel_query(self, panel: dict[str, Any], template_variables: dict[str, str]) -> dict[str, Any]:
|
|
704
|
+
"""Execute query for a specific panel"""
|
|
705
|
+
try:
|
|
706
|
+
logger.info(f"Executing panel query for panel: {panel.get('title', 'Unknown')}")
|
|
707
|
+
logger.debug(f"Panel structure: {json.dumps(panel, indent=2)}")
|
|
708
|
+
|
|
709
|
+
targets = panel.get("targets", [])
|
|
710
|
+
if not targets:
|
|
711
|
+
logger.warning(f"No targets found for panel: {panel.get('title', 'Unknown')}")
|
|
712
|
+
return {"error": "No targets found for panel"}
|
|
713
|
+
|
|
714
|
+
# For now, execute the first target
|
|
715
|
+
target = targets[0]
|
|
716
|
+
logger.debug(f"Target structure: {json.dumps(target, indent=2)}")
|
|
717
|
+
|
|
718
|
+
# Extract query expression
|
|
719
|
+
query = target.get("expr", "")
|
|
720
|
+
if not query:
|
|
721
|
+
logger.warning(f"No query expression found in target for panel: {panel.get('title', 'Unknown')}")
|
|
722
|
+
return {"error": "No query expression found in target"}
|
|
723
|
+
|
|
724
|
+
# Extract datasource information
|
|
725
|
+
datasource = target.get("datasource", {})
|
|
726
|
+
logger.debug(f"Datasource info: {datasource}")
|
|
727
|
+
|
|
728
|
+
# Handle different datasource formats
|
|
729
|
+
datasource_uid = None
|
|
730
|
+
if isinstance(datasource, str):
|
|
731
|
+
datasource_uid = datasource
|
|
732
|
+
elif isinstance(datasource, dict):
|
|
733
|
+
datasource_uid = datasource.get("uid")
|
|
734
|
+
if not datasource_uid:
|
|
735
|
+
datasource_uid = datasource.get("id") # Fallback to id
|
|
736
|
+
else:
|
|
737
|
+
logger.warning(f"Unexpected datasource format: {type(datasource)}")
|
|
738
|
+
return {"error": f"Unexpected datasource format: {type(datasource)}"}
|
|
739
|
+
|
|
740
|
+
if not datasource_uid:
|
|
741
|
+
logger.warning(f"No datasource UID found for panel: {panel.get('title', 'Unknown')}")
|
|
742
|
+
# Try to get datasource from panel level
|
|
743
|
+
panel_datasource = panel.get("datasource", {})
|
|
744
|
+
if isinstance(panel_datasource, dict):
|
|
745
|
+
datasource_uid = panel_datasource.get("uid") or panel_datasource.get("id")
|
|
746
|
+
elif isinstance(panel_datasource, str):
|
|
747
|
+
datasource_uid = panel_datasource
|
|
748
|
+
|
|
749
|
+
if not datasource_uid:
|
|
750
|
+
return {"error": "No datasource UID found"}
|
|
751
|
+
|
|
752
|
+
logger.info(f"Executing query: {query} with datasource: {datasource_uid}")
|
|
753
|
+
|
|
754
|
+
# Apply template variables - fix the replacement pattern
|
|
755
|
+
original_query = query
|
|
756
|
+
for var_name, var_value in template_variables.items():
|
|
757
|
+
# Replace both $var and ${var} patterns
|
|
758
|
+
query = query.replace(f"${var_name}", var_value)
|
|
759
|
+
query = query.replace(f"${{{var_name}}}", var_value)
|
|
760
|
+
|
|
761
|
+
if original_query != query:
|
|
762
|
+
logger.info(f"Applied template variables. Original: {original_query}, Modified: {query}")
|
|
763
|
+
|
|
764
|
+
# Execute the query with a reasonable time range
|
|
765
|
+
result = self.grafana_promql_query(datasource_uid, query, duration="1h")
|
|
766
|
+
|
|
767
|
+
return result
|
|
768
|
+
|
|
769
|
+
except Exception as e:
|
|
770
|
+
logger.error(f"Error executing panel query: {e}")
|
|
771
|
+
return {"error": str(e)}
|