holmesgpt 0.13.3a0__py3-none-any.whl → 0.14.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of holmesgpt might be problematic. Click here for more details.
- holmes/__init__.py +1 -1
- holmes/clients/robusta_client.py +15 -4
- holmes/common/env_vars.py +8 -1
- holmes/config.py +66 -139
- holmes/core/investigation.py +1 -2
- holmes/core/llm.py +295 -52
- holmes/core/models.py +2 -0
- holmes/core/safeguards.py +4 -4
- holmes/core/supabase_dal.py +14 -8
- holmes/core/tool_calling_llm.py +202 -177
- holmes/core/tools.py +260 -25
- holmes/core/tools_utils/data_types.py +81 -0
- holmes/core/tools_utils/tool_context_window_limiter.py +33 -0
- holmes/core/tools_utils/tool_executor.py +2 -2
- holmes/core/toolset_manager.py +150 -3
- holmes/core/tracing.py +6 -1
- holmes/core/transformers/__init__.py +23 -0
- holmes/core/transformers/base.py +62 -0
- holmes/core/transformers/llm_summarize.py +174 -0
- holmes/core/transformers/registry.py +122 -0
- holmes/core/transformers/transformer.py +31 -0
- holmes/main.py +5 -0
- holmes/plugins/prompts/_fetch_logs.jinja2 +10 -1
- holmes/plugins/toolsets/aks-node-health.yaml +46 -0
- holmes/plugins/toolsets/aks.yaml +64 -0
- holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +17 -15
- holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +8 -4
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +7 -3
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +3 -3
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +3 -3
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +7 -3
- holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +4 -4
- holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +7 -3
- holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +7 -3
- holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +7 -3
- holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +7 -3
- holmes/plugins/toolsets/bash/bash_toolset.py +6 -6
- holmes/plugins/toolsets/bash/common/bash.py +7 -7
- holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +5 -3
- holmes/plugins/toolsets/datadog/datadog_api.py +490 -24
- holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +21 -10
- holmes/plugins/toolsets/datadog/toolset_datadog_general.py +345 -207
- holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +190 -19
- holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +96 -32
- holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +10 -10
- holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +21 -22
- holmes/plugins/toolsets/git.py +22 -22
- holmes/plugins/toolsets/grafana/common.py +14 -2
- holmes/plugins/toolsets/grafana/grafana_tempo_api.py +473 -0
- holmes/plugins/toolsets/grafana/toolset_grafana.py +4 -4
- holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +5 -4
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +246 -11
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +662 -290
- holmes/plugins/toolsets/grafana/trace_parser.py +1 -1
- holmes/plugins/toolsets/internet/internet.py +3 -3
- holmes/plugins/toolsets/internet/notion.py +3 -3
- holmes/plugins/toolsets/investigator/core_investigation.py +3 -3
- holmes/plugins/toolsets/kafka.py +18 -18
- holmes/plugins/toolsets/kubernetes.yaml +58 -0
- holmes/plugins/toolsets/kubernetes_logs.py +6 -6
- holmes/plugins/toolsets/kubernetes_logs.yaml +32 -0
- holmes/plugins/toolsets/logging_utils/logging_api.py +1 -1
- holmes/plugins/toolsets/mcp/toolset_mcp.py +4 -4
- holmes/plugins/toolsets/newrelic.py +8 -8
- holmes/plugins/toolsets/opensearch/opensearch.py +5 -5
- holmes/plugins/toolsets/opensearch/opensearch_logs.py +7 -7
- holmes/plugins/toolsets/opensearch/opensearch_traces.py +10 -10
- holmes/plugins/toolsets/prometheus/prometheus.py +841 -351
- holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +39 -2
- holmes/plugins/toolsets/prometheus/utils.py +28 -0
- holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +6 -4
- holmes/plugins/toolsets/robusta/robusta.py +10 -10
- holmes/plugins/toolsets/runbook/runbook_fetcher.py +4 -4
- holmes/plugins/toolsets/servicenow/servicenow.py +6 -6
- holmes/plugins/toolsets/utils.py +88 -0
- holmes/utils/config_utils.py +91 -0
- holmes/utils/env.py +7 -0
- holmes/utils/holmes_status.py +2 -1
- holmes/utils/sentry_helper.py +41 -0
- holmes/utils/stream.py +9 -0
- {holmesgpt-0.13.3a0.dist-info → holmesgpt-0.14.1.dist-info}/METADATA +11 -15
- {holmesgpt-0.13.3a0.dist-info → holmesgpt-0.14.1.dist-info}/RECORD +85 -75
- holmes/plugins/toolsets/grafana/tempo_api.py +0 -124
- {holmesgpt-0.13.3a0.dist-info → holmesgpt-0.14.1.dist-info}/LICENSE.txt +0 -0
- {holmesgpt-0.13.3a0.dist-info → holmesgpt-0.14.1.dist-info}/WHEEL +0 -0
- {holmesgpt-0.13.3a0.dist-info → holmesgpt-0.14.1.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,473 @@
|
|
|
1
|
+
"""Grafana Tempo API wrapper for querying traces and metrics."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Any, Dict, Optional, Union
|
|
5
|
+
from urllib.parse import quote
|
|
6
|
+
|
|
7
|
+
import backoff
|
|
8
|
+
import requests # type: ignore
|
|
9
|
+
|
|
10
|
+
from holmes.plugins.toolsets.grafana.common import (
|
|
11
|
+
GrafanaTempoConfig,
|
|
12
|
+
build_headers,
|
|
13
|
+
get_base_url,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class TempoAPIError(Exception):
|
|
21
|
+
"""Custom exception for Tempo API errors with detailed response information."""
|
|
22
|
+
|
|
23
|
+
def __init__(self, status_code: int, response_text: str, url: str):
|
|
24
|
+
self.status_code = status_code
|
|
25
|
+
self.response_text = response_text
|
|
26
|
+
self.url = url
|
|
27
|
+
|
|
28
|
+
# Try to extract error message from JSON response
|
|
29
|
+
try:
|
|
30
|
+
import json
|
|
31
|
+
|
|
32
|
+
error_data = json.loads(response_text)
|
|
33
|
+
# Tempo may return errors in different formats
|
|
34
|
+
error_message = (
|
|
35
|
+
error_data.get("error")
|
|
36
|
+
or error_data.get("message")
|
|
37
|
+
or error_data.get("errorType")
|
|
38
|
+
or response_text
|
|
39
|
+
)
|
|
40
|
+
except (json.JSONDecodeError, TypeError):
|
|
41
|
+
error_message = response_text
|
|
42
|
+
|
|
43
|
+
super().__init__(f"Tempo API error {status_code}: {error_message}")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class GrafanaTempoAPI:
|
|
47
|
+
"""Python wrapper for Grafana Tempo REST API.
|
|
48
|
+
|
|
49
|
+
This class provides a clean interface to all Tempo API endpoints,
|
|
50
|
+
supporting both GET and POST methods based on configuration.
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
def __init__(self, config: GrafanaTempoConfig, use_post: bool = False):
|
|
54
|
+
"""Initialize the Tempo API wrapper.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
config: GrafanaTempoConfig instance with connection details
|
|
58
|
+
use_post: If True, use POST method for API calls. Defaults to False (GET).
|
|
59
|
+
"""
|
|
60
|
+
self.config = config
|
|
61
|
+
self.base_url = get_base_url(config)
|
|
62
|
+
self.headers = build_headers(config.api_key, config.headers)
|
|
63
|
+
self.use_post = use_post
|
|
64
|
+
|
|
65
|
+
def _make_request(
|
|
66
|
+
self,
|
|
67
|
+
endpoint: str,
|
|
68
|
+
params: Optional[Dict[str, Any]] = None,
|
|
69
|
+
path_params: Optional[Dict[str, str]] = None,
|
|
70
|
+
timeout: int = 30,
|
|
71
|
+
retries: int = 3,
|
|
72
|
+
) -> Dict[str, Any]:
|
|
73
|
+
"""Make HTTP request to Tempo API with retry logic.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
endpoint: API endpoint path (e.g., "/api/echo")
|
|
77
|
+
params: Query parameters (GET) or body parameters (POST)
|
|
78
|
+
path_params: Parameters to substitute in the endpoint path
|
|
79
|
+
timeout: Request timeout in seconds
|
|
80
|
+
retries: Number of retry attempts
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
JSON response from the API
|
|
84
|
+
|
|
85
|
+
Raises:
|
|
86
|
+
Exception: If the request fails after all retries
|
|
87
|
+
"""
|
|
88
|
+
# Format endpoint with path parameters
|
|
89
|
+
if path_params:
|
|
90
|
+
for key, value in path_params.items():
|
|
91
|
+
endpoint = endpoint.replace(f"{{{key}}}", quote(str(value), safe=""))
|
|
92
|
+
|
|
93
|
+
url = f"{self.base_url}{endpoint}"
|
|
94
|
+
|
|
95
|
+
@backoff.on_exception(
|
|
96
|
+
backoff.expo,
|
|
97
|
+
requests.exceptions.RequestException,
|
|
98
|
+
max_tries=retries,
|
|
99
|
+
giveup=lambda e: isinstance(e, requests.exceptions.HTTPError)
|
|
100
|
+
and getattr(e, "response", None) is not None
|
|
101
|
+
and e.response.status_code < 500,
|
|
102
|
+
)
|
|
103
|
+
def make_request():
|
|
104
|
+
if self.use_post:
|
|
105
|
+
# POST request with JSON body
|
|
106
|
+
response = requests.post(
|
|
107
|
+
url,
|
|
108
|
+
headers=self.headers,
|
|
109
|
+
json=params or {},
|
|
110
|
+
timeout=timeout,
|
|
111
|
+
)
|
|
112
|
+
else:
|
|
113
|
+
# GET request with query parameters
|
|
114
|
+
response = requests.get(
|
|
115
|
+
url,
|
|
116
|
+
headers=self.headers,
|
|
117
|
+
params=params,
|
|
118
|
+
timeout=timeout,
|
|
119
|
+
)
|
|
120
|
+
response.raise_for_status()
|
|
121
|
+
return response.json()
|
|
122
|
+
|
|
123
|
+
try:
|
|
124
|
+
return make_request()
|
|
125
|
+
except requests.exceptions.HTTPError as e:
|
|
126
|
+
# Extract detailed error message from response
|
|
127
|
+
response = e.response
|
|
128
|
+
if response is not None:
|
|
129
|
+
logger.error(
|
|
130
|
+
f"HTTP error {response.status_code} for {url}: {response.text}"
|
|
131
|
+
)
|
|
132
|
+
raise TempoAPIError(
|
|
133
|
+
status_code=response.status_code,
|
|
134
|
+
response_text=response.text,
|
|
135
|
+
url=url,
|
|
136
|
+
)
|
|
137
|
+
else:
|
|
138
|
+
logger.error(f"Request failed for {url}: {e}")
|
|
139
|
+
raise
|
|
140
|
+
except requests.exceptions.RequestException as e:
|
|
141
|
+
logger.error(f"Request failed for {url}: {e}")
|
|
142
|
+
raise
|
|
143
|
+
|
|
144
|
+
def query_echo_endpoint(self) -> bool:
|
|
145
|
+
"""Query the echo endpoint to check Tempo status.
|
|
146
|
+
|
|
147
|
+
API Endpoint: GET /api/echo
|
|
148
|
+
HTTP Method: GET (or POST if use_post=True)
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
bool: True if endpoint returns 200 status code, False otherwise
|
|
152
|
+
"""
|
|
153
|
+
url = f"{self.base_url}/api/echo"
|
|
154
|
+
|
|
155
|
+
try:
|
|
156
|
+
if self.use_post:
|
|
157
|
+
response = requests.post(
|
|
158
|
+
url,
|
|
159
|
+
headers=self.headers,
|
|
160
|
+
timeout=30,
|
|
161
|
+
)
|
|
162
|
+
else:
|
|
163
|
+
response = requests.get(
|
|
164
|
+
url,
|
|
165
|
+
headers=self.headers,
|
|
166
|
+
timeout=30,
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Just check status code, don't try to parse JSON
|
|
170
|
+
return response.status_code == 200
|
|
171
|
+
|
|
172
|
+
except requests.exceptions.RequestException as e:
|
|
173
|
+
logger.error(f"Request failed for {url}: {e}")
|
|
174
|
+
return False
|
|
175
|
+
|
|
176
|
+
def query_trace_by_id_v2(
|
|
177
|
+
self,
|
|
178
|
+
trace_id: str,
|
|
179
|
+
start: Optional[int] = None,
|
|
180
|
+
end: Optional[int] = None,
|
|
181
|
+
) -> Dict[str, Any]:
|
|
182
|
+
"""Query a trace by its ID.
|
|
183
|
+
|
|
184
|
+
API Endpoint: GET /api/v2/traces/{trace_id}
|
|
185
|
+
HTTP Method: GET (or POST if use_post=True)
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
trace_id: The trace ID to retrieve
|
|
189
|
+
start: Optional start time in Unix epoch seconds
|
|
190
|
+
end: Optional end time in Unix epoch seconds
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
dict: OpenTelemetry format trace data
|
|
194
|
+
"""
|
|
195
|
+
params = {}
|
|
196
|
+
if start is not None:
|
|
197
|
+
params["start"] = str(start)
|
|
198
|
+
if end is not None:
|
|
199
|
+
params["end"] = str(end)
|
|
200
|
+
|
|
201
|
+
return self._make_request(
|
|
202
|
+
"/api/v2/traces/{trace_id}",
|
|
203
|
+
params=params,
|
|
204
|
+
path_params={"trace_id": trace_id},
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
def _search_traces_common(
|
|
208
|
+
self,
|
|
209
|
+
search_params: Dict[str, Any],
|
|
210
|
+
limit: Optional[int] = None,
|
|
211
|
+
start: Optional[int] = None,
|
|
212
|
+
end: Optional[int] = None,
|
|
213
|
+
spss: Optional[int] = None,
|
|
214
|
+
) -> Dict[str, Any]:
|
|
215
|
+
"""Common search implementation for both tag and TraceQL searches.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
search_params: The search-specific parameters (tags or q)
|
|
219
|
+
limit: Optional max number of traces to return
|
|
220
|
+
start: Optional start time in Unix epoch seconds
|
|
221
|
+
end: Optional end time in Unix epoch seconds
|
|
222
|
+
spss: Optional spans per span set
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
dict: Search results with trace metadata
|
|
226
|
+
"""
|
|
227
|
+
params = search_params.copy()
|
|
228
|
+
|
|
229
|
+
if limit is not None:
|
|
230
|
+
params["limit"] = str(limit)
|
|
231
|
+
if start is not None:
|
|
232
|
+
params["start"] = str(start)
|
|
233
|
+
if end is not None:
|
|
234
|
+
params["end"] = str(end)
|
|
235
|
+
if spss is not None:
|
|
236
|
+
params["spss"] = str(spss)
|
|
237
|
+
|
|
238
|
+
return self._make_request("/api/search", params=params)
|
|
239
|
+
|
|
240
|
+
def search_traces_by_tags(
|
|
241
|
+
self,
|
|
242
|
+
tags: str,
|
|
243
|
+
min_duration: Optional[str] = None,
|
|
244
|
+
max_duration: Optional[str] = None,
|
|
245
|
+
limit: Optional[int] = None,
|
|
246
|
+
start: Optional[int] = None,
|
|
247
|
+
end: Optional[int] = None,
|
|
248
|
+
spss: Optional[int] = None,
|
|
249
|
+
) -> Dict[str, Any]:
|
|
250
|
+
"""Search for traces using tag-based search.
|
|
251
|
+
|
|
252
|
+
API Endpoint: GET /api/search
|
|
253
|
+
HTTP Method: GET (or POST if use_post=True)
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
tags: logfmt-encoded span/process attributes (required)
|
|
257
|
+
min_duration: Optional minimum trace duration (e.g., "5s")
|
|
258
|
+
max_duration: Optional maximum trace duration
|
|
259
|
+
limit: Optional max number of traces to return
|
|
260
|
+
start: Optional start time in Unix epoch seconds
|
|
261
|
+
end: Optional end time in Unix epoch seconds
|
|
262
|
+
spss: Optional spans per span set
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
dict: Search results with trace metadata
|
|
266
|
+
"""
|
|
267
|
+
search_params = {"tags": tags}
|
|
268
|
+
|
|
269
|
+
# minDuration and maxDuration are only supported with tag-based search
|
|
270
|
+
if min_duration is not None:
|
|
271
|
+
search_params["minDuration"] = min_duration
|
|
272
|
+
if max_duration is not None:
|
|
273
|
+
search_params["maxDuration"] = max_duration
|
|
274
|
+
|
|
275
|
+
return self._search_traces_common(
|
|
276
|
+
search_params=search_params,
|
|
277
|
+
limit=limit,
|
|
278
|
+
start=start,
|
|
279
|
+
end=end,
|
|
280
|
+
spss=spss,
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
def search_traces_by_query(
|
|
284
|
+
self,
|
|
285
|
+
q: str,
|
|
286
|
+
limit: Optional[int] = None,
|
|
287
|
+
start: Optional[int] = None,
|
|
288
|
+
end: Optional[int] = None,
|
|
289
|
+
spss: Optional[int] = None,
|
|
290
|
+
) -> Dict[str, Any]:
|
|
291
|
+
"""Search for traces using TraceQL query.
|
|
292
|
+
|
|
293
|
+
API Endpoint: GET /api/search
|
|
294
|
+
HTTP Method: GET (or POST if use_post=True)
|
|
295
|
+
|
|
296
|
+
Note: minDuration and maxDuration are not supported with TraceQL queries.
|
|
297
|
+
Use the TraceQL query syntax to filter by duration instead.
|
|
298
|
+
|
|
299
|
+
Args:
|
|
300
|
+
q: TraceQL query (required)
|
|
301
|
+
limit: Optional max number of traces to return
|
|
302
|
+
start: Optional start time in Unix epoch seconds
|
|
303
|
+
end: Optional end time in Unix epoch seconds
|
|
304
|
+
spss: Optional spans per span set
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
dict: Search results with trace metadata
|
|
308
|
+
"""
|
|
309
|
+
return self._search_traces_common(
|
|
310
|
+
search_params={"q": q},
|
|
311
|
+
limit=limit,
|
|
312
|
+
start=start,
|
|
313
|
+
end=end,
|
|
314
|
+
spss=spss,
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
def search_tag_names_v2(
|
|
318
|
+
self,
|
|
319
|
+
scope: Optional[str] = None,
|
|
320
|
+
q: Optional[str] = None,
|
|
321
|
+
start: Optional[int] = None,
|
|
322
|
+
end: Optional[int] = None,
|
|
323
|
+
limit: Optional[int] = None,
|
|
324
|
+
max_stale_values: Optional[int] = None,
|
|
325
|
+
) -> Dict[str, Any]:
|
|
326
|
+
"""Search for available tag names.
|
|
327
|
+
|
|
328
|
+
API Endpoint: GET /api/v2/search/tags
|
|
329
|
+
HTTP Method: GET (or POST if use_post=True)
|
|
330
|
+
|
|
331
|
+
Args:
|
|
332
|
+
scope: Optional scope filter ("resource", "span", or "intrinsic")
|
|
333
|
+
q: Optional TraceQL query to filter tags
|
|
334
|
+
start: Optional start time in Unix epoch seconds
|
|
335
|
+
end: Optional end time in Unix epoch seconds
|
|
336
|
+
limit: Optional max number of tag names
|
|
337
|
+
max_stale_values: Optional max stale values parameter
|
|
338
|
+
|
|
339
|
+
Returns:
|
|
340
|
+
dict: Available tag names organized by scope
|
|
341
|
+
"""
|
|
342
|
+
params = {}
|
|
343
|
+
if scope is not None:
|
|
344
|
+
params["scope"] = scope
|
|
345
|
+
if q is not None:
|
|
346
|
+
params["q"] = q
|
|
347
|
+
if start is not None:
|
|
348
|
+
params["start"] = str(start)
|
|
349
|
+
if end is not None:
|
|
350
|
+
params["end"] = str(end)
|
|
351
|
+
if limit is not None:
|
|
352
|
+
params["limit"] = str(limit)
|
|
353
|
+
if max_stale_values is not None:
|
|
354
|
+
params["maxStaleValues"] = str(max_stale_values)
|
|
355
|
+
|
|
356
|
+
return self._make_request("/api/v2/search/tags", params=params)
|
|
357
|
+
|
|
358
|
+
def search_tag_values_v2(
|
|
359
|
+
self,
|
|
360
|
+
tag: str,
|
|
361
|
+
q: Optional[str] = None,
|
|
362
|
+
start: Optional[int] = None,
|
|
363
|
+
end: Optional[int] = None,
|
|
364
|
+
limit: Optional[int] = None,
|
|
365
|
+
max_stale_values: Optional[int] = None,
|
|
366
|
+
) -> Dict[str, Any]:
|
|
367
|
+
"""Search for values of a specific tag with optional TraceQL filtering.
|
|
368
|
+
|
|
369
|
+
API Endpoint: GET /api/v2/search/tag/{tag}/values
|
|
370
|
+
HTTP Method: GET (or POST if use_post=True)
|
|
371
|
+
|
|
372
|
+
Args:
|
|
373
|
+
tag: The tag name to get values for (required)
|
|
374
|
+
q: Optional TraceQL query to filter tag values (e.g., '{resource.cluster="us-east-1"}')
|
|
375
|
+
start: Optional start time in Unix epoch seconds
|
|
376
|
+
end: Optional end time in Unix epoch seconds
|
|
377
|
+
limit: Optional max number of values
|
|
378
|
+
max_stale_values: Optional max stale values parameter
|
|
379
|
+
|
|
380
|
+
Returns:
|
|
381
|
+
dict: List of discovered values for the tag
|
|
382
|
+
"""
|
|
383
|
+
params = {}
|
|
384
|
+
if q is not None:
|
|
385
|
+
params["q"] = q
|
|
386
|
+
if start is not None:
|
|
387
|
+
params["start"] = str(start)
|
|
388
|
+
if end is not None:
|
|
389
|
+
params["end"] = str(end)
|
|
390
|
+
if limit is not None:
|
|
391
|
+
params["limit"] = str(limit)
|
|
392
|
+
if max_stale_values is not None:
|
|
393
|
+
params["maxStaleValues"] = str(max_stale_values)
|
|
394
|
+
|
|
395
|
+
return self._make_request(
|
|
396
|
+
"/api/v2/search/tag/{tag}/values",
|
|
397
|
+
params=params,
|
|
398
|
+
path_params={"tag": tag},
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
def query_metrics_instant(
|
|
402
|
+
self,
|
|
403
|
+
q: str,
|
|
404
|
+
start: Optional[Union[int, str]] = None,
|
|
405
|
+
end: Optional[Union[int, str]] = None,
|
|
406
|
+
since: Optional[str] = None,
|
|
407
|
+
) -> Dict[str, Any]:
|
|
408
|
+
"""Query TraceQL metrics for an instant value.
|
|
409
|
+
|
|
410
|
+
Computes a single value across the entire time range.
|
|
411
|
+
|
|
412
|
+
API Endpoint: GET /api/metrics/query
|
|
413
|
+
HTTP Method: GET (or POST if use_post=True)
|
|
414
|
+
|
|
415
|
+
Args:
|
|
416
|
+
q: TraceQL metrics query (required)
|
|
417
|
+
start: Optional start time (Unix seconds/nanoseconds/RFC3339)
|
|
418
|
+
end: Optional end time (Unix seconds/nanoseconds/RFC3339)
|
|
419
|
+
since: Optional duration string (e.g., "1h")
|
|
420
|
+
|
|
421
|
+
Returns:
|
|
422
|
+
dict: Single computed metric value
|
|
423
|
+
"""
|
|
424
|
+
params = {"q": q}
|
|
425
|
+
if start is not None:
|
|
426
|
+
params["start"] = str(start)
|
|
427
|
+
if end is not None:
|
|
428
|
+
params["end"] = str(end)
|
|
429
|
+
if since is not None:
|
|
430
|
+
params["since"] = since
|
|
431
|
+
|
|
432
|
+
return self._make_request("/api/metrics/query", params=params)
|
|
433
|
+
|
|
434
|
+
def query_metrics_range(
|
|
435
|
+
self,
|
|
436
|
+
q: str,
|
|
437
|
+
step: Optional[str] = None,
|
|
438
|
+
start: Optional[Union[int, str]] = None,
|
|
439
|
+
end: Optional[Union[int, str]] = None,
|
|
440
|
+
since: Optional[str] = None,
|
|
441
|
+
exemplars: Optional[int] = None,
|
|
442
|
+
) -> Dict[str, Any]:
|
|
443
|
+
"""Query TraceQL metrics for a time series range.
|
|
444
|
+
|
|
445
|
+
Returns metrics computed at regular intervals over the time range.
|
|
446
|
+
|
|
447
|
+
API Endpoint: GET /api/metrics/query_range
|
|
448
|
+
HTTP Method: GET (or POST if use_post=True)
|
|
449
|
+
|
|
450
|
+
Args:
|
|
451
|
+
q: TraceQL metrics query (required)
|
|
452
|
+
step: Optional time series granularity (e.g., "1m", "5m")
|
|
453
|
+
start: Optional start time (Unix seconds/nanoseconds/RFC3339)
|
|
454
|
+
end: Optional end time (Unix seconds/nanoseconds/RFC3339)
|
|
455
|
+
since: Optional duration string (e.g., "3h")
|
|
456
|
+
exemplars: Optional maximum number of exemplars to return
|
|
457
|
+
|
|
458
|
+
Returns:
|
|
459
|
+
dict: Time series of metric values
|
|
460
|
+
"""
|
|
461
|
+
params = {"q": q}
|
|
462
|
+
if step is not None:
|
|
463
|
+
params["step"] = step
|
|
464
|
+
if start is not None:
|
|
465
|
+
params["start"] = str(start)
|
|
466
|
+
if end is not None:
|
|
467
|
+
params["end"] = str(end)
|
|
468
|
+
if since is not None:
|
|
469
|
+
params["since"] = since
|
|
470
|
+
if exemplars is not None:
|
|
471
|
+
params["exemplars"] = str(exemplars)
|
|
472
|
+
|
|
473
|
+
return self._make_request("/api/metrics/query_range", params=params)
|
|
@@ -4,7 +4,7 @@ from holmes.core.tools import (
|
|
|
4
4
|
StructuredToolResult,
|
|
5
5
|
Tool,
|
|
6
6
|
ToolParameter,
|
|
7
|
-
|
|
7
|
+
StructuredToolResultStatus,
|
|
8
8
|
)
|
|
9
9
|
from holmes.plugins.toolsets.grafana.base_grafana_toolset import BaseGrafanaToolset
|
|
10
10
|
import requests # type: ignore
|
|
@@ -90,9 +90,9 @@ class ListAndBuildGrafanaDashboardURLs(Tool):
|
|
|
90
90
|
)
|
|
91
91
|
|
|
92
92
|
return StructuredToolResult(
|
|
93
|
-
status=
|
|
93
|
+
status=StructuredToolResultStatus.SUCCESS
|
|
94
94
|
if formatted_dashboards
|
|
95
|
-
else
|
|
95
|
+
else StructuredToolResultStatus.NO_DATA,
|
|
96
96
|
data="\n".join(formatted_dashboards)
|
|
97
97
|
if formatted_dashboards
|
|
98
98
|
else "No dashboards found.",
|
|
@@ -102,7 +102,7 @@ class ListAndBuildGrafanaDashboardURLs(Tool):
|
|
|
102
102
|
except requests.RequestException as e:
|
|
103
103
|
logging.error(f"Error fetching dashboards: {str(e)}")
|
|
104
104
|
return StructuredToolResult(
|
|
105
|
-
status=
|
|
105
|
+
status=StructuredToolResultStatus.ERROR,
|
|
106
106
|
error=f"Error fetching dashboards: {str(e)}",
|
|
107
107
|
url=url,
|
|
108
108
|
params=params,
|
|
@@ -14,6 +14,7 @@ from holmes.plugins.toolsets.logging_utils.logging_api import (
|
|
|
14
14
|
LoggingCapability,
|
|
15
15
|
PodLoggingTool,
|
|
16
16
|
DEFAULT_TIME_SPAN_SECONDS,
|
|
17
|
+
DEFAULT_LOG_LIMIT,
|
|
17
18
|
)
|
|
18
19
|
from holmes.plugins.toolsets.utils import (
|
|
19
20
|
process_timestamps_to_rfc3339,
|
|
@@ -22,7 +23,7 @@ from holmes.plugins.toolsets.utils import (
|
|
|
22
23
|
from holmes.plugins.toolsets.grafana.loki_api import (
|
|
23
24
|
query_loki_logs_by_label,
|
|
24
25
|
)
|
|
25
|
-
from holmes.core.tools import StructuredToolResult,
|
|
26
|
+
from holmes.core.tools import StructuredToolResult, StructuredToolResultStatus
|
|
26
27
|
|
|
27
28
|
|
|
28
29
|
class GrafanaLokiLabelsConfig(BaseModel):
|
|
@@ -94,17 +95,17 @@ class GrafanaLokiToolset(BasePodLoggingToolset):
|
|
|
94
95
|
label_value=params.pod_name,
|
|
95
96
|
start=start,
|
|
96
97
|
end=end,
|
|
97
|
-
limit=params.limit or
|
|
98
|
+
limit=params.limit or DEFAULT_LOG_LIMIT,
|
|
98
99
|
)
|
|
99
100
|
if logs:
|
|
100
101
|
logs.sort(key=lambda x: x["timestamp"])
|
|
101
102
|
return StructuredToolResult(
|
|
102
|
-
status=
|
|
103
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
103
104
|
data="\n".join([format_log(log) for log in logs]),
|
|
104
105
|
params=params.model_dump(),
|
|
105
106
|
)
|
|
106
107
|
else:
|
|
107
108
|
return StructuredToolResult(
|
|
108
|
-
status=
|
|
109
|
+
status=StructuredToolResultStatus.NO_DATA,
|
|
109
110
|
params=params.model_dump(),
|
|
110
111
|
)
|