holmesgpt 0.13.1__py3-none-any.whl → 0.13.3a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of holmesgpt might be problematic. Click here for more details.
- holmes/__init__.py +1 -1
- holmes/common/env_vars.py +7 -0
- holmes/config.py +3 -1
- holmes/core/conversations.py +0 -11
- holmes/core/investigation.py +0 -6
- holmes/core/llm.py +60 -1
- holmes/core/prompt.py +0 -2
- holmes/core/supabase_dal.py +2 -2
- holmes/core/todo_tasks_formatter.py +51 -0
- holmes/core/tool_calling_llm.py +166 -91
- holmes/core/tools.py +20 -4
- holmes/interactive.py +63 -2
- holmes/main.py +0 -1
- holmes/plugins/prompts/_general_instructions.jinja2 +3 -1
- holmes/plugins/prompts/investigation_procedure.jinja2 +3 -13
- holmes/plugins/toolsets/__init__.py +5 -1
- holmes/plugins/toolsets/argocd.yaml +1 -1
- holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +18 -6
- holmes/plugins/toolsets/aws.yaml +9 -5
- holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +3 -1
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +3 -1
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +3 -1
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +3 -1
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +3 -1
- holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +3 -1
- holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +3 -1
- holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +3 -1
- holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +3 -1
- holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +3 -1
- holmes/plugins/toolsets/bash/bash_toolset.py +31 -20
- holmes/plugins/toolsets/confluence.yaml +1 -1
- holmes/plugins/toolsets/coralogix/api.py +3 -1
- holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +4 -4
- holmes/plugins/toolsets/coralogix/utils.py +41 -14
- holmes/plugins/toolsets/datadog/datadog_api.py +45 -2
- holmes/plugins/toolsets/datadog/datadog_general_instructions.jinja2 +208 -0
- holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +43 -0
- holmes/plugins/toolsets/datadog/datadog_metrics_instructions.jinja2 +12 -9
- holmes/plugins/toolsets/datadog/toolset_datadog_general.py +722 -0
- holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +17 -6
- holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +15 -7
- holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +6 -2
- holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +9 -3
- holmes/plugins/toolsets/docker.yaml +1 -1
- holmes/plugins/toolsets/git.py +15 -5
- holmes/plugins/toolsets/grafana/toolset_grafana.py +25 -4
- holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +4 -4
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +5 -3
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +299 -32
- holmes/plugins/toolsets/helm.yaml +1 -1
- holmes/plugins/toolsets/internet/internet.py +4 -2
- holmes/plugins/toolsets/internet/notion.py +4 -2
- holmes/plugins/toolsets/investigator/core_investigation.py +5 -17
- holmes/plugins/toolsets/investigator/investigator_instructions.jinja2 +1 -5
- holmes/plugins/toolsets/kafka.py +19 -7
- holmes/plugins/toolsets/kubernetes.yaml +5 -5
- holmes/plugins/toolsets/kubernetes_logs.py +4 -4
- holmes/plugins/toolsets/kubernetes_logs.yaml +1 -1
- holmes/plugins/toolsets/logging_utils/logging_api.py +15 -2
- holmes/plugins/toolsets/mcp/toolset_mcp.py +3 -1
- holmes/plugins/toolsets/newrelic.py +8 -4
- holmes/plugins/toolsets/opensearch/opensearch.py +13 -5
- holmes/plugins/toolsets/opensearch/opensearch_logs.py +4 -4
- holmes/plugins/toolsets/opensearch/opensearch_traces.py +9 -6
- holmes/plugins/toolsets/prometheus/prometheus.py +193 -82
- holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +7 -3
- holmes/plugins/toolsets/robusta/robusta.py +10 -4
- holmes/plugins/toolsets/runbook/runbook_fetcher.py +4 -2
- holmes/plugins/toolsets/servicenow/servicenow.py +9 -3
- holmes/plugins/toolsets/slab.yaml +1 -1
- {holmesgpt-0.13.1.dist-info → holmesgpt-0.13.3a0.dist-info}/METADATA +3 -2
- {holmesgpt-0.13.1.dist-info → holmesgpt-0.13.3a0.dist-info}/RECORD +75 -72
- holmes/core/todo_manager.py +0 -88
- {holmesgpt-0.13.1.dist-info → holmesgpt-0.13.3a0.dist-info}/LICENSE.txt +0 -0
- {holmesgpt-0.13.1.dist-info → holmesgpt-0.13.3a0.dist-info}/WHEEL +0 -0
- {holmesgpt-0.13.1.dist-info → holmesgpt-0.13.3a0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,722 @@
|
|
|
1
|
+
"""General-purpose Datadog API toolset for read-only operations."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import os
|
|
6
|
+
import re
|
|
7
|
+
from typing import Any, Dict, Optional, Tuple
|
|
8
|
+
from urllib.parse import urlparse
|
|
9
|
+
|
|
10
|
+
from holmes.core.tools import (
|
|
11
|
+
CallablePrerequisite,
|
|
12
|
+
Tool,
|
|
13
|
+
ToolParameter,
|
|
14
|
+
Toolset,
|
|
15
|
+
StructuredToolResult,
|
|
16
|
+
ToolResultStatus,
|
|
17
|
+
ToolsetTag,
|
|
18
|
+
)
|
|
19
|
+
from holmes.plugins.toolsets.consts import TOOLSET_CONFIG_MISSING_ERROR
|
|
20
|
+
from holmes.plugins.toolsets.datadog.datadog_api import (
|
|
21
|
+
DatadogBaseConfig,
|
|
22
|
+
DataDogRequestError,
|
|
23
|
+
execute_datadog_http_request,
|
|
24
|
+
get_headers,
|
|
25
|
+
MAX_RETRY_COUNT_ON_RATE_LIMIT,
|
|
26
|
+
)
|
|
27
|
+
from holmes.plugins.toolsets.utils import toolset_name_for_one_liner
|
|
28
|
+
|
|
29
|
+
# Maximum response size in bytes (10MB)
|
|
30
|
+
MAX_RESPONSE_SIZE = 10 * 1024 * 1024
|
|
31
|
+
|
|
32
|
+
# Whitelisted API endpoint patterns - READ ONLY operations
|
|
33
|
+
WHITELISTED_ENDPOINTS = [
|
|
34
|
+
# Monitors
|
|
35
|
+
r"^/api/v\d+/monitor(/search)?$",
|
|
36
|
+
r"^/api/v\d+/monitor/\d+(/downtimes)?$",
|
|
37
|
+
r"^/api/v\d+/monitor/groups/search$",
|
|
38
|
+
# Dashboards
|
|
39
|
+
r"^/api/v\d+/dashboard(/lists)?$",
|
|
40
|
+
r"^/api/v\d+/dashboard/[^/]+$",
|
|
41
|
+
r"^/api/v\d+/dashboard/public/[^/]+$",
|
|
42
|
+
# SLOs (Service Level Objectives)
|
|
43
|
+
r"^/api/v\d+/slo(/search)?$",
|
|
44
|
+
r"^/api/v\d+/slo/[^/]+(/history)?$",
|
|
45
|
+
r"^/api/v\d+/slo/[^/]+/corrections$",
|
|
46
|
+
# Events
|
|
47
|
+
r"^/api/v\d+/events$",
|
|
48
|
+
r"^/api/v\d+/events/\d+$",
|
|
49
|
+
# Incidents
|
|
50
|
+
r"^/api/v\d+/incidents(/search)?$",
|
|
51
|
+
r"^/api/v\d+/incidents/[^/]+$",
|
|
52
|
+
r"^/api/v\d+/incidents/[^/]+/attachments$",
|
|
53
|
+
r"^/api/v\d+/incidents/[^/]+/connected_integrations$",
|
|
54
|
+
r"^/api/v\d+/incidents/[^/]+/relationships$",
|
|
55
|
+
r"^/api/v\d+/incidents/[^/]+/timeline$",
|
|
56
|
+
# Synthetics
|
|
57
|
+
r"^/api/v\d+/synthetics/tests(/search)?$",
|
|
58
|
+
r"^/api/v\d+/synthetics/tests/[^/]+$",
|
|
59
|
+
r"^/api/v\d+/synthetics/tests/[^/]+/results$",
|
|
60
|
+
r"^/api/v\d+/synthetics/tests/browser/[^/]+/results$",
|
|
61
|
+
r"^/api/v\d+/synthetics/tests/api/[^/]+/results$",
|
|
62
|
+
r"^/api/v\d+/synthetics/locations$",
|
|
63
|
+
# Security Monitoring
|
|
64
|
+
r"^/api/v\d+/security_monitoring/rules(/search)?$",
|
|
65
|
+
r"^/api/v\d+/security_monitoring/rules/[^/]+$",
|
|
66
|
+
r"^/api/v\d+/security_monitoring/signals(/search)?$",
|
|
67
|
+
r"^/api/v\d+/security_monitoring/signals/[^/]+$",
|
|
68
|
+
# Service Map / APM Services
|
|
69
|
+
r"^/api/v\d+/services$",
|
|
70
|
+
r"^/api/v\d+/services/[^/]+$",
|
|
71
|
+
r"^/api/v\d+/services/[^/]+/dependencies$",
|
|
72
|
+
# Hosts
|
|
73
|
+
r"^/api/v\d+/hosts$",
|
|
74
|
+
r"^/api/v\d+/hosts/totals$",
|
|
75
|
+
r"^/api/v\d+/hosts/[^/]+$",
|
|
76
|
+
# Usage & Cost
|
|
77
|
+
r"^/api/v\d+/usage/[^/]+$",
|
|
78
|
+
r"^/api/v\d+/usage/summary$",
|
|
79
|
+
r"^/api/v\d+/usage/billable-summary$",
|
|
80
|
+
r"^/api/v\d+/usage/cost_by_org$",
|
|
81
|
+
r"^/api/v\d+/usage/estimated_cost$",
|
|
82
|
+
# Processes
|
|
83
|
+
r"^/api/v\d+/processes$",
|
|
84
|
+
# Tags
|
|
85
|
+
r"^/api/v\d+/tags/hosts(/[^/]+)?$",
|
|
86
|
+
# Notebooks
|
|
87
|
+
r"^/api/v\d+/notebooks$",
|
|
88
|
+
r"^/api/v\d+/notebooks/\d+$",
|
|
89
|
+
# Service Dependencies
|
|
90
|
+
r"^/api/v\d+/service_dependencies$",
|
|
91
|
+
# Organization
|
|
92
|
+
r"^/api/v\d+/org$",
|
|
93
|
+
r"^/api/v\d+/org/[^/]+$",
|
|
94
|
+
# Users (read only)
|
|
95
|
+
r"^/api/v\d+/users$",
|
|
96
|
+
r"^/api/v\d+/users/[^/]+$",
|
|
97
|
+
# Teams (read only)
|
|
98
|
+
r"^/api/v\d+/teams$",
|
|
99
|
+
r"^/api/v\d+/teams/[^/]+$",
|
|
100
|
+
# Audit logs
|
|
101
|
+
r"^/api/v\d+/audit/events$",
|
|
102
|
+
# Service Accounts (read only)
|
|
103
|
+
r"^/api/v\d+/service_accounts$",
|
|
104
|
+
r"^/api/v\d+/service_accounts/[^/]+$",
|
|
105
|
+
]
|
|
106
|
+
|
|
107
|
+
# Blacklisted path segments that indicate write operations
|
|
108
|
+
BLACKLISTED_SEGMENTS = [
|
|
109
|
+
"/create",
|
|
110
|
+
"/update",
|
|
111
|
+
"/delete",
|
|
112
|
+
"/patch",
|
|
113
|
+
"/remove",
|
|
114
|
+
"/add",
|
|
115
|
+
"/revoke",
|
|
116
|
+
"/cancel",
|
|
117
|
+
"/mute",
|
|
118
|
+
"/unmute",
|
|
119
|
+
"/enable",
|
|
120
|
+
"/disable",
|
|
121
|
+
"/archive",
|
|
122
|
+
"/unarchive",
|
|
123
|
+
"/assign",
|
|
124
|
+
"/unassign",
|
|
125
|
+
"/invite",
|
|
126
|
+
"/bulk",
|
|
127
|
+
"/import",
|
|
128
|
+
"/export",
|
|
129
|
+
"/trigger",
|
|
130
|
+
"/validate",
|
|
131
|
+
"/execute",
|
|
132
|
+
"/run",
|
|
133
|
+
"/start",
|
|
134
|
+
"/stop",
|
|
135
|
+
"/restart",
|
|
136
|
+
]
|
|
137
|
+
|
|
138
|
+
# POST endpoints that are allowed (search/query operations only)
|
|
139
|
+
WHITELISTED_POST_ENDPOINTS = [
|
|
140
|
+
r"^/api/v\d+/monitor/search$",
|
|
141
|
+
r"^/api/v\d+/dashboard/lists$",
|
|
142
|
+
r"^/api/v\d+/slo/search$",
|
|
143
|
+
r"^/api/v\d+/events/search$",
|
|
144
|
+
r"^/api/v\d+/incidents/search$",
|
|
145
|
+
r"^/api/v\d+/synthetics/tests/search$",
|
|
146
|
+
r"^/api/v\d+/security_monitoring/rules/search$",
|
|
147
|
+
r"^/api/v\d+/security_monitoring/signals/search$",
|
|
148
|
+
r"^/api/v\d+/logs/events/search$",
|
|
149
|
+
r"^/api/v\d+/spans/events/search$",
|
|
150
|
+
r"^/api/v\d+/rum/events/search$",
|
|
151
|
+
r"^/api/v\d+/audit/events/search$",
|
|
152
|
+
]
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class DatadogGeneralConfig(DatadogBaseConfig):
|
|
156
|
+
"""Configuration for general-purpose Datadog toolset."""
|
|
157
|
+
|
|
158
|
+
max_response_size: int = MAX_RESPONSE_SIZE
|
|
159
|
+
allow_custom_endpoints: bool = (
|
|
160
|
+
False # If True, allows endpoints not in whitelist (still filtered for safety)
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
class DatadogGeneralToolset(Toolset):
|
|
165
|
+
"""General-purpose Datadog API toolset for read-only operations not covered by specialized toolsets."""
|
|
166
|
+
|
|
167
|
+
dd_config: Optional[DatadogGeneralConfig] = None
|
|
168
|
+
|
|
169
|
+
def __init__(self):
|
|
170
|
+
super().__init__(
|
|
171
|
+
name="datadog/general",
|
|
172
|
+
description="General-purpose Datadog API access for read-only operations including monitors, dashboards, SLOs, incidents, synthetics, and more",
|
|
173
|
+
docs_url="https://docs.datadoghq.com/api/latest/",
|
|
174
|
+
icon_url="https://imgix.datadoghq.com//img/about/presskit/DDlogo.jpg",
|
|
175
|
+
prerequisites=[CallablePrerequisite(callable=self.prerequisites_callable)],
|
|
176
|
+
tools=[
|
|
177
|
+
DatadogAPIGet(toolset=self),
|
|
178
|
+
DatadogAPIPostSearch(toolset=self),
|
|
179
|
+
ListDatadogAPIResources(toolset=self),
|
|
180
|
+
],
|
|
181
|
+
experimental=True,
|
|
182
|
+
tags=[ToolsetTag.CORE],
|
|
183
|
+
)
|
|
184
|
+
template_file_path = os.path.abspath(
|
|
185
|
+
os.path.join(
|
|
186
|
+
os.path.dirname(__file__), "datadog_general_instructions.jinja2"
|
|
187
|
+
)
|
|
188
|
+
)
|
|
189
|
+
self._load_llm_instructions(jinja_template=f"file://{template_file_path}")
|
|
190
|
+
|
|
191
|
+
def prerequisites_callable(self, config: dict[str, Any]) -> Tuple[bool, str]:
|
|
192
|
+
"""Check prerequisites with configuration."""
|
|
193
|
+
if not config:
|
|
194
|
+
return False, TOOLSET_CONFIG_MISSING_ERROR
|
|
195
|
+
|
|
196
|
+
try:
|
|
197
|
+
dd_config = DatadogGeneralConfig(**config)
|
|
198
|
+
self.dd_config = dd_config
|
|
199
|
+
success, error_msg = self._perform_healthcheck(dd_config)
|
|
200
|
+
return success, error_msg
|
|
201
|
+
except Exception as e:
|
|
202
|
+
logging.exception("Failed to set up Datadog general toolset")
|
|
203
|
+
return False, f"Failed to parse Datadog configuration: {str(e)}"
|
|
204
|
+
|
|
205
|
+
def _perform_healthcheck(self, dd_config: DatadogGeneralConfig) -> Tuple[bool, str]:
|
|
206
|
+
"""Perform health check on Datadog API."""
|
|
207
|
+
try:
|
|
208
|
+
logging.info("Performing Datadog general API configuration healthcheck...")
|
|
209
|
+
url = f"{dd_config.site_api_url}/api/v1/validate"
|
|
210
|
+
headers = get_headers(dd_config)
|
|
211
|
+
|
|
212
|
+
data = execute_datadog_http_request(
|
|
213
|
+
url=url,
|
|
214
|
+
headers=headers,
|
|
215
|
+
payload_or_params={},
|
|
216
|
+
timeout=dd_config.request_timeout,
|
|
217
|
+
method="GET",
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
if data.get("valid", False):
|
|
221
|
+
logging.info("Datadog general API healthcheck completed successfully")
|
|
222
|
+
return True, ""
|
|
223
|
+
else:
|
|
224
|
+
error_msg = "Datadog API key validation failed"
|
|
225
|
+
logging.error(f"Datadog general API healthcheck failed: {error_msg}")
|
|
226
|
+
return False, f"Datadog general API healthcheck failed: {error_msg}"
|
|
227
|
+
|
|
228
|
+
except Exception as e:
|
|
229
|
+
logging.exception("Failed during Datadog general API healthcheck")
|
|
230
|
+
return False, f"Healthcheck failed with exception: {str(e)}"
|
|
231
|
+
|
|
232
|
+
def get_example_config(self) -> Dict[str, Any]:
|
|
233
|
+
"""Get example configuration for this toolset."""
|
|
234
|
+
return {
|
|
235
|
+
"dd_api_key": "your-datadog-api-key",
|
|
236
|
+
"dd_app_key": "your-datadog-application-key",
|
|
237
|
+
"site_api_url": "https://api.datadoghq.com",
|
|
238
|
+
"max_response_size": MAX_RESPONSE_SIZE,
|
|
239
|
+
"allow_custom_endpoints": False,
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
def is_endpoint_allowed(
|
|
244
|
+
endpoint: str, method: str = "GET", allow_custom: bool = False
|
|
245
|
+
) -> Tuple[bool, str]:
|
|
246
|
+
"""
|
|
247
|
+
Check if an endpoint is allowed based on whitelist and safety rules.
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
Tuple of (is_allowed, error_message)
|
|
251
|
+
"""
|
|
252
|
+
# Parse the endpoint
|
|
253
|
+
parsed = urlparse(endpoint)
|
|
254
|
+
path = parsed.path
|
|
255
|
+
|
|
256
|
+
# Check for blacklisted segments
|
|
257
|
+
path_lower = path.lower()
|
|
258
|
+
for segment in BLACKLISTED_SEGMENTS:
|
|
259
|
+
if segment in path_lower:
|
|
260
|
+
return False, f"Endpoint contains blacklisted operation '{segment}'"
|
|
261
|
+
|
|
262
|
+
# Check method-specific whitelists
|
|
263
|
+
if method == "POST":
|
|
264
|
+
for pattern in WHITELISTED_POST_ENDPOINTS:
|
|
265
|
+
if re.match(pattern, path):
|
|
266
|
+
return True, ""
|
|
267
|
+
return False, f"POST method not allowed for endpoint: {path}"
|
|
268
|
+
|
|
269
|
+
elif method == "GET":
|
|
270
|
+
for pattern in WHITELISTED_ENDPOINTS:
|
|
271
|
+
if re.match(pattern, path):
|
|
272
|
+
return True, ""
|
|
273
|
+
|
|
274
|
+
# If custom endpoints are allowed and no blacklisted segments found
|
|
275
|
+
if allow_custom:
|
|
276
|
+
return True, ""
|
|
277
|
+
|
|
278
|
+
return False, f"Endpoint not in whitelist: {path}"
|
|
279
|
+
|
|
280
|
+
else:
|
|
281
|
+
return False, f"HTTP method {method} not allowed for {path}"
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
class BaseDatadogGeneralTool(Tool):
|
|
285
|
+
"""Base class for general Datadog API tools."""
|
|
286
|
+
|
|
287
|
+
toolset: "DatadogGeneralToolset"
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
291
|
+
"""Tool for making GET requests to Datadog API."""
|
|
292
|
+
|
|
293
|
+
def __init__(self, toolset: "DatadogGeneralToolset"):
|
|
294
|
+
super().__init__(
|
|
295
|
+
name="datadog_api_get",
|
|
296
|
+
description="Make a GET request to a Datadog API endpoint for read-only operations",
|
|
297
|
+
parameters={
|
|
298
|
+
"endpoint": ToolParameter(
|
|
299
|
+
description="The API endpoint path (e.g., '/api/v1/monitors', '/api/v2/events')",
|
|
300
|
+
type="string",
|
|
301
|
+
required=True,
|
|
302
|
+
),
|
|
303
|
+
"query_params": ToolParameter(
|
|
304
|
+
description="Query parameters as a dictionary (e.g., {'from': '2024-01-01', 'to': '2024-01-02'})",
|
|
305
|
+
type="object",
|
|
306
|
+
required=False,
|
|
307
|
+
),
|
|
308
|
+
"description": ToolParameter(
|
|
309
|
+
description="Brief description of what this API call is retrieving",
|
|
310
|
+
type="string",
|
|
311
|
+
required=True,
|
|
312
|
+
),
|
|
313
|
+
},
|
|
314
|
+
toolset=toolset,
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
def get_parameterized_one_liner(self, params: dict) -> str:
|
|
318
|
+
"""Get a one-liner description of the tool invocation."""
|
|
319
|
+
description = params.get("description", "API call")
|
|
320
|
+
return f"{toolset_name_for_one_liner(self.toolset.name)}: {description}"
|
|
321
|
+
|
|
322
|
+
def _invoke(
|
|
323
|
+
self, params: dict, user_approved: bool = False
|
|
324
|
+
) -> StructuredToolResult:
|
|
325
|
+
"""Execute the GET request."""
|
|
326
|
+
logging.info("=" * 60)
|
|
327
|
+
logging.info("DatadogAPIGet Tool Invocation:")
|
|
328
|
+
logging.info(f" Description: {params.get('description', 'No description')}")
|
|
329
|
+
logging.info(f" Endpoint: {params.get('endpoint', '')}")
|
|
330
|
+
logging.info(
|
|
331
|
+
f" Query Params: {json.dumps(params.get('query_params', {}), indent=2)}"
|
|
332
|
+
)
|
|
333
|
+
logging.info("=" * 60)
|
|
334
|
+
|
|
335
|
+
if not self.toolset.dd_config:
|
|
336
|
+
return StructuredToolResult(
|
|
337
|
+
status=ToolResultStatus.ERROR,
|
|
338
|
+
error=TOOLSET_CONFIG_MISSING_ERROR,
|
|
339
|
+
params=params,
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
endpoint = params.get("endpoint", "")
|
|
343
|
+
query_params = params.get("query_params", {})
|
|
344
|
+
|
|
345
|
+
# Validate endpoint
|
|
346
|
+
is_allowed, error_msg = is_endpoint_allowed(
|
|
347
|
+
endpoint,
|
|
348
|
+
method="GET",
|
|
349
|
+
allow_custom=self.toolset.dd_config.allow_custom_endpoints,
|
|
350
|
+
)
|
|
351
|
+
if not is_allowed:
|
|
352
|
+
logging.error(f"Endpoint validation failed: {error_msg}")
|
|
353
|
+
return StructuredToolResult(
|
|
354
|
+
status=ToolResultStatus.ERROR,
|
|
355
|
+
error=f"Endpoint validation failed: {error_msg}",
|
|
356
|
+
params=params,
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
url = None
|
|
360
|
+
try:
|
|
361
|
+
# Build full URL (ensure no double slashes)
|
|
362
|
+
base_url = str(self.toolset.dd_config.site_api_url).rstrip("/")
|
|
363
|
+
endpoint = endpoint.lstrip("/")
|
|
364
|
+
url = f"{base_url}/{endpoint}"
|
|
365
|
+
headers = get_headers(self.toolset.dd_config)
|
|
366
|
+
|
|
367
|
+
logging.info(f"Full API URL: {url}")
|
|
368
|
+
|
|
369
|
+
# Execute request
|
|
370
|
+
response = execute_datadog_http_request(
|
|
371
|
+
url=url,
|
|
372
|
+
headers=headers,
|
|
373
|
+
payload_or_params=query_params,
|
|
374
|
+
timeout=self.toolset.dd_config.request_timeout,
|
|
375
|
+
method="GET",
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
# Check response size
|
|
379
|
+
response_str = json.dumps(response, indent=2)
|
|
380
|
+
if (
|
|
381
|
+
len(response_str.encode("utf-8"))
|
|
382
|
+
> self.toolset.dd_config.max_response_size
|
|
383
|
+
):
|
|
384
|
+
return StructuredToolResult(
|
|
385
|
+
status=ToolResultStatus.ERROR,
|
|
386
|
+
error=f"Response too large (>{self.toolset.dd_config.max_response_size} bytes)",
|
|
387
|
+
params=params,
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
return StructuredToolResult(
|
|
391
|
+
status=ToolResultStatus.SUCCESS,
|
|
392
|
+
data=response_str,
|
|
393
|
+
params=params,
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
except DataDogRequestError as e:
|
|
397
|
+
logging.exception(e, exc_info=True)
|
|
398
|
+
|
|
399
|
+
if e.status_code == 429:
|
|
400
|
+
error_msg = f"Datadog API rate limit exceeded. Failed after {MAX_RETRY_COUNT_ON_RATE_LIMIT} retry attempts."
|
|
401
|
+
elif e.status_code == 403:
|
|
402
|
+
error_msg = (
|
|
403
|
+
f"Permission denied. Check API key permissions. Error: {str(e)}"
|
|
404
|
+
)
|
|
405
|
+
elif e.status_code == 404:
|
|
406
|
+
error_msg = f"Endpoint not found: {endpoint}"
|
|
407
|
+
else:
|
|
408
|
+
error_msg = f"API error {e.status_code}: {str(e)}"
|
|
409
|
+
|
|
410
|
+
return StructuredToolResult(
|
|
411
|
+
status=ToolResultStatus.ERROR,
|
|
412
|
+
error=error_msg,
|
|
413
|
+
params=params,
|
|
414
|
+
invocation=json.dumps({"url": url, "params": query_params})
|
|
415
|
+
if url
|
|
416
|
+
else None,
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
except Exception as e:
|
|
420
|
+
logging.exception(f"Failed to query Datadog API: {params}", exc_info=True)
|
|
421
|
+
return StructuredToolResult(
|
|
422
|
+
status=ToolResultStatus.ERROR,
|
|
423
|
+
error=f"Unexpected error: {str(e)}",
|
|
424
|
+
params=params,
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
429
|
+
"""Tool for making POST requests to Datadog search/query endpoints."""
|
|
430
|
+
|
|
431
|
+
def __init__(self, toolset: "DatadogGeneralToolset"):
|
|
432
|
+
super().__init__(
|
|
433
|
+
name="datadog_api_post_search",
|
|
434
|
+
description="Make a POST request to Datadog search/query endpoints for complex filtering",
|
|
435
|
+
parameters={
|
|
436
|
+
"endpoint": ToolParameter(
|
|
437
|
+
description="The search API endpoint (e.g., '/api/v2/monitor/search', '/api/v2/events/search')",
|
|
438
|
+
type="string",
|
|
439
|
+
required=True,
|
|
440
|
+
),
|
|
441
|
+
"body": ToolParameter(
|
|
442
|
+
description="Request body for the search/filter operation",
|
|
443
|
+
type="object",
|
|
444
|
+
required=True,
|
|
445
|
+
),
|
|
446
|
+
"description": ToolParameter(
|
|
447
|
+
description="Brief description of what this search is looking for",
|
|
448
|
+
type="string",
|
|
449
|
+
required=True,
|
|
450
|
+
),
|
|
451
|
+
},
|
|
452
|
+
toolset=toolset,
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
def get_parameterized_one_liner(self, params: dict) -> str:
|
|
456
|
+
"""Get a one-liner description of the tool invocation."""
|
|
457
|
+
description = params.get("description", "Search")
|
|
458
|
+
return f"{toolset_name_for_one_liner(self.toolset.name)}: {description}"
|
|
459
|
+
|
|
460
|
+
def _invoke(
|
|
461
|
+
self, params: dict, user_approved: bool = False
|
|
462
|
+
) -> StructuredToolResult:
|
|
463
|
+
"""Execute the POST search request."""
|
|
464
|
+
logging.info("=" * 60)
|
|
465
|
+
logging.info("DatadogAPIPostSearch Tool Invocation:")
|
|
466
|
+
logging.info(f" Description: {params.get('description', 'No description')}")
|
|
467
|
+
logging.info(f" Endpoint: {params.get('endpoint', '')}")
|
|
468
|
+
logging.info(f" Body: {json.dumps(params.get('body', {}), indent=2)}")
|
|
469
|
+
logging.info("=" * 60)
|
|
470
|
+
|
|
471
|
+
if not self.toolset.dd_config:
|
|
472
|
+
return StructuredToolResult(
|
|
473
|
+
status=ToolResultStatus.ERROR,
|
|
474
|
+
error=TOOLSET_CONFIG_MISSING_ERROR,
|
|
475
|
+
params=params,
|
|
476
|
+
)
|
|
477
|
+
|
|
478
|
+
endpoint = params.get("endpoint", "")
|
|
479
|
+
body = params.get("body", {})
|
|
480
|
+
|
|
481
|
+
# Validate endpoint
|
|
482
|
+
is_allowed, error_msg = is_endpoint_allowed(
|
|
483
|
+
endpoint,
|
|
484
|
+
method="POST",
|
|
485
|
+
allow_custom=self.toolset.dd_config.allow_custom_endpoints,
|
|
486
|
+
)
|
|
487
|
+
if not is_allowed:
|
|
488
|
+
logging.error(f"Endpoint validation failed: {error_msg}")
|
|
489
|
+
return StructuredToolResult(
|
|
490
|
+
status=ToolResultStatus.ERROR,
|
|
491
|
+
error=f"Endpoint validation failed: {error_msg}",
|
|
492
|
+
params=params,
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
url = None
|
|
496
|
+
try:
|
|
497
|
+
# Build full URL (ensure no double slashes)
|
|
498
|
+
base_url = str(self.toolset.dd_config.site_api_url).rstrip("/")
|
|
499
|
+
endpoint = endpoint.lstrip("/")
|
|
500
|
+
url = f"{base_url}/{endpoint}"
|
|
501
|
+
headers = get_headers(self.toolset.dd_config)
|
|
502
|
+
|
|
503
|
+
logging.info(f"Full API URL: {url}")
|
|
504
|
+
|
|
505
|
+
# Execute request
|
|
506
|
+
response = execute_datadog_http_request(
|
|
507
|
+
url=url,
|
|
508
|
+
headers=headers,
|
|
509
|
+
payload_or_params=body,
|
|
510
|
+
timeout=self.toolset.dd_config.request_timeout,
|
|
511
|
+
method="POST",
|
|
512
|
+
)
|
|
513
|
+
|
|
514
|
+
# Check response size
|
|
515
|
+
response_str = json.dumps(response, indent=2)
|
|
516
|
+
if (
|
|
517
|
+
len(response_str.encode("utf-8"))
|
|
518
|
+
> self.toolset.dd_config.max_response_size
|
|
519
|
+
):
|
|
520
|
+
return StructuredToolResult(
|
|
521
|
+
status=ToolResultStatus.ERROR,
|
|
522
|
+
error=f"Response too large (>{self.toolset.dd_config.max_response_size} bytes)",
|
|
523
|
+
params=params,
|
|
524
|
+
)
|
|
525
|
+
|
|
526
|
+
return StructuredToolResult(
|
|
527
|
+
status=ToolResultStatus.SUCCESS,
|
|
528
|
+
data=response_str,
|
|
529
|
+
params=params,
|
|
530
|
+
)
|
|
531
|
+
|
|
532
|
+
except DataDogRequestError as e:
|
|
533
|
+
logging.exception(e, exc_info=True)
|
|
534
|
+
|
|
535
|
+
if e.status_code == 429:
|
|
536
|
+
error_msg = f"Datadog API rate limit exceeded. Failed after {MAX_RETRY_COUNT_ON_RATE_LIMIT} retry attempts."
|
|
537
|
+
elif e.status_code == 403:
|
|
538
|
+
error_msg = (
|
|
539
|
+
f"Permission denied. Check API key permissions. Error: {str(e)}"
|
|
540
|
+
)
|
|
541
|
+
elif e.status_code == 404:
|
|
542
|
+
error_msg = f"Endpoint not found: {endpoint}"
|
|
543
|
+
else:
|
|
544
|
+
error_msg = f"API error {e.status_code}: {str(e)}"
|
|
545
|
+
|
|
546
|
+
return StructuredToolResult(
|
|
547
|
+
status=ToolResultStatus.ERROR,
|
|
548
|
+
error=error_msg,
|
|
549
|
+
params=params,
|
|
550
|
+
invocation=json.dumps({"url": url, "body": body}) if url else None,
|
|
551
|
+
)
|
|
552
|
+
|
|
553
|
+
except Exception as e:
|
|
554
|
+
logging.exception(f"Failed to query Datadog API: {params}", exc_info=True)
|
|
555
|
+
return StructuredToolResult(
|
|
556
|
+
status=ToolResultStatus.ERROR,
|
|
557
|
+
error=f"Unexpected error: {str(e)}",
|
|
558
|
+
params=params,
|
|
559
|
+
)
|
|
560
|
+
|
|
561
|
+
|
|
562
|
+
class ListDatadogAPIResources(BaseDatadogGeneralTool):
|
|
563
|
+
"""Tool for listing available Datadog API resources and endpoints."""
|
|
564
|
+
|
|
565
|
+
def __init__(self, toolset: "DatadogGeneralToolset"):
|
|
566
|
+
super().__init__(
|
|
567
|
+
name="list_datadog_api_resources",
|
|
568
|
+
description="List available Datadog API resources and endpoints that can be accessed",
|
|
569
|
+
parameters={
|
|
570
|
+
"category": ToolParameter(
|
|
571
|
+
description="Filter by category (e.g., 'monitors', 'dashboards', 'slos', 'incidents', 'synthetics', 'security', 'hosts', 'all')",
|
|
572
|
+
type="string",
|
|
573
|
+
required=False,
|
|
574
|
+
),
|
|
575
|
+
},
|
|
576
|
+
toolset=toolset,
|
|
577
|
+
)
|
|
578
|
+
|
|
579
|
+
def get_parameterized_one_liner(self, params: dict) -> str:
|
|
580
|
+
"""Get a one-liner description of the tool invocation."""
|
|
581
|
+
category = params.get("category", "all")
|
|
582
|
+
return f"{toolset_name_for_one_liner(self.toolset.name)}: List API Resources ({category})"
|
|
583
|
+
|
|
584
|
+
def _invoke(
|
|
585
|
+
self, params: dict, user_approved: bool = False
|
|
586
|
+
) -> StructuredToolResult:
|
|
587
|
+
"""List available API resources."""
|
|
588
|
+
category = params.get("category", "all").lower()
|
|
589
|
+
|
|
590
|
+
logging.info("=" * 60)
|
|
591
|
+
logging.info("ListDatadogAPIResources Tool Invocation:")
|
|
592
|
+
logging.info(f" Category: {category}")
|
|
593
|
+
logging.info("=" * 60)
|
|
594
|
+
|
|
595
|
+
# Define categories and their endpoints
|
|
596
|
+
resources = {
|
|
597
|
+
"monitors": {
|
|
598
|
+
"description": "Monitor management and alerting",
|
|
599
|
+
"endpoints": [
|
|
600
|
+
"GET /api/v1/monitor - List all monitors",
|
|
601
|
+
"GET /api/v1/monitor/{id} - Get a monitor by ID",
|
|
602
|
+
"POST /api/v1/monitor/search - Search monitors",
|
|
603
|
+
"GET /api/v1/monitor/groups/search - Search monitor groups",
|
|
604
|
+
],
|
|
605
|
+
},
|
|
606
|
+
"dashboards": {
|
|
607
|
+
"description": "Dashboard and visualization management",
|
|
608
|
+
"endpoints": [
|
|
609
|
+
"GET /api/v1/dashboard - List all dashboards",
|
|
610
|
+
"GET /api/v1/dashboard/{id} - Get a dashboard by ID",
|
|
611
|
+
"POST /api/v1/dashboard/lists - List dashboard lists",
|
|
612
|
+
"GET /api/v1/dashboard/public/{token} - Get public dashboard",
|
|
613
|
+
],
|
|
614
|
+
},
|
|
615
|
+
"slos": {
|
|
616
|
+
"description": "Service Level Objectives",
|
|
617
|
+
"endpoints": [
|
|
618
|
+
"GET /api/v1/slo - List all SLOs",
|
|
619
|
+
"GET /api/v1/slo/{id} - Get an SLO by ID",
|
|
620
|
+
"GET /api/v1/slo/{id}/history - Get SLO history",
|
|
621
|
+
"POST /api/v1/slo/search - Search SLOs",
|
|
622
|
+
"GET /api/v1/slo/{id}/corrections - Get SLO corrections",
|
|
623
|
+
],
|
|
624
|
+
},
|
|
625
|
+
"incidents": {
|
|
626
|
+
"description": "Incident management",
|
|
627
|
+
"endpoints": [
|
|
628
|
+
"GET /api/v2/incidents - List incidents",
|
|
629
|
+
"GET /api/v2/incidents/{id} - Get incident details",
|
|
630
|
+
"POST /api/v2/incidents/search - Search incidents",
|
|
631
|
+
"GET /api/v2/incidents/{id}/timeline - Get incident timeline",
|
|
632
|
+
"GET /api/v2/incidents/{id}/attachments - Get incident attachments",
|
|
633
|
+
],
|
|
634
|
+
},
|
|
635
|
+
"synthetics": {
|
|
636
|
+
"description": "Synthetic monitoring and testing",
|
|
637
|
+
"endpoints": [
|
|
638
|
+
"GET /api/v1/synthetics/tests - List synthetic tests",
|
|
639
|
+
"GET /api/v1/synthetics/tests/{id} - Get test details",
|
|
640
|
+
"POST /api/v1/synthetics/tests/search - Search tests",
|
|
641
|
+
"GET /api/v1/synthetics/tests/{id}/results - Get test results",
|
|
642
|
+
"GET /api/v1/synthetics/locations - List test locations",
|
|
643
|
+
],
|
|
644
|
+
},
|
|
645
|
+
"security": {
|
|
646
|
+
"description": "Security monitoring and detection",
|
|
647
|
+
"endpoints": [
|
|
648
|
+
"GET /api/v2/security_monitoring/rules - List security rules",
|
|
649
|
+
"GET /api/v2/security_monitoring/rules/{id} - Get rule details",
|
|
650
|
+
"POST /api/v2/security_monitoring/rules/search - Search rules",
|
|
651
|
+
"POST /api/v2/security_monitoring/signals/search - Search security signals",
|
|
652
|
+
],
|
|
653
|
+
},
|
|
654
|
+
"hosts": {
|
|
655
|
+
"description": "Host and infrastructure monitoring",
|
|
656
|
+
"endpoints": [
|
|
657
|
+
"GET /api/v1/hosts - List all hosts",
|
|
658
|
+
"GET /api/v1/hosts/{name} - Get host details",
|
|
659
|
+
"GET /api/v1/hosts/totals - Get host totals",
|
|
660
|
+
"GET /api/v1/tags/hosts - Get host tags",
|
|
661
|
+
],
|
|
662
|
+
},
|
|
663
|
+
"events": {
|
|
664
|
+
"description": "Event stream and management",
|
|
665
|
+
"endpoints": [
|
|
666
|
+
"GET /api/v1/events - Query event stream",
|
|
667
|
+
"GET /api/v1/events/{id} - Get event details",
|
|
668
|
+
"POST /api/v2/events/search - Search events",
|
|
669
|
+
],
|
|
670
|
+
},
|
|
671
|
+
"usage": {
|
|
672
|
+
"description": "Usage and billing information",
|
|
673
|
+
"endpoints": [
|
|
674
|
+
"GET /api/v1/usage/summary - Get usage summary",
|
|
675
|
+
"GET /api/v1/usage/billable-summary - Get billable summary",
|
|
676
|
+
"GET /api/v1/usage/estimated_cost - Get estimated costs",
|
|
677
|
+
"GET /api/v2/usage/cost_by_org - Get costs by organization",
|
|
678
|
+
],
|
|
679
|
+
},
|
|
680
|
+
"services": {
|
|
681
|
+
"description": "APM service information",
|
|
682
|
+
"endpoints": [
|
|
683
|
+
"GET /api/v2/services - List services",
|
|
684
|
+
"GET /api/v2/services/{service} - Get service details",
|
|
685
|
+
"GET /api/v2/services/{service}/dependencies - Get service dependencies",
|
|
686
|
+
],
|
|
687
|
+
},
|
|
688
|
+
}
|
|
689
|
+
|
|
690
|
+
# Filter by category if specified
|
|
691
|
+
if category != "all":
|
|
692
|
+
matching_categories = {k: v for k, v in resources.items() if category in k}
|
|
693
|
+
if not matching_categories:
|
|
694
|
+
return StructuredToolResult(
|
|
695
|
+
status=ToolResultStatus.ERROR,
|
|
696
|
+
error=f"Unknown category: {category}. Available: {', '.join(resources.keys())}",
|
|
697
|
+
params=params,
|
|
698
|
+
)
|
|
699
|
+
resources = matching_categories
|
|
700
|
+
|
|
701
|
+
# Format output
|
|
702
|
+
output = ["Available Datadog API Resources", "=" * 40, ""]
|
|
703
|
+
|
|
704
|
+
for cat_name, cat_info in resources.items():
|
|
705
|
+
output.append(f"## {cat_name.upper()}")
|
|
706
|
+
output.append(f"Description: {cat_info['description']}")
|
|
707
|
+
output.append("")
|
|
708
|
+
output.append("Endpoints:")
|
|
709
|
+
for endpoint in cat_info["endpoints"]:
|
|
710
|
+
output.append(f" • {endpoint}")
|
|
711
|
+
output.append("")
|
|
712
|
+
|
|
713
|
+
output.append(
|
|
714
|
+
"Note: All endpoints are read-only. Use the appropriate tool with the endpoint path."
|
|
715
|
+
)
|
|
716
|
+
output.append("Example: datadog_api_get with endpoint='/api/v1/monitors'")
|
|
717
|
+
|
|
718
|
+
return StructuredToolResult(
|
|
719
|
+
status=ToolResultStatus.SUCCESS,
|
|
720
|
+
data="\n".join(output),
|
|
721
|
+
params=params,
|
|
722
|
+
)
|