holmesgpt 0.14.0a0__py3-none-any.whl → 0.14.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of holmesgpt might be problematic. Click here for more details.
- holmes/__init__.py +1 -1
- holmes/clients/robusta_client.py +15 -4
- holmes/common/env_vars.py +8 -1
- holmes/config.py +66 -139
- holmes/core/investigation.py +1 -2
- holmes/core/llm.py +295 -52
- holmes/core/models.py +2 -0
- holmes/core/safeguards.py +4 -4
- holmes/core/supabase_dal.py +14 -8
- holmes/core/tool_calling_llm.py +110 -102
- holmes/core/tools.py +260 -25
- holmes/core/tools_utils/data_types.py +81 -0
- holmes/core/tools_utils/tool_context_window_limiter.py +33 -0
- holmes/core/tools_utils/tool_executor.py +2 -2
- holmes/core/toolset_manager.py +150 -3
- holmes/core/transformers/__init__.py +23 -0
- holmes/core/transformers/base.py +62 -0
- holmes/core/transformers/llm_summarize.py +174 -0
- holmes/core/transformers/registry.py +122 -0
- holmes/core/transformers/transformer.py +31 -0
- holmes/main.py +5 -0
- holmes/plugins/prompts/_fetch_logs.jinja2 +10 -1
- holmes/plugins/toolsets/aks-node-health.yaml +46 -0
- holmes/plugins/toolsets/aks.yaml +64 -0
- holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +17 -15
- holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +8 -4
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +7 -3
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +3 -3
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +3 -3
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +7 -3
- holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +4 -4
- holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +7 -3
- holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +7 -3
- holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +7 -3
- holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +7 -3
- holmes/plugins/toolsets/bash/bash_toolset.py +6 -6
- holmes/plugins/toolsets/bash/common/bash.py +7 -7
- holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +5 -3
- holmes/plugins/toolsets/datadog/datadog_api.py +490 -24
- holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +21 -10
- holmes/plugins/toolsets/datadog/toolset_datadog_general.py +344 -205
- holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +189 -17
- holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +95 -30
- holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +10 -10
- holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +20 -20
- holmes/plugins/toolsets/git.py +21 -21
- holmes/plugins/toolsets/grafana/common.py +2 -2
- holmes/plugins/toolsets/grafana/toolset_grafana.py +4 -4
- holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +5 -4
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +123 -23
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +165 -307
- holmes/plugins/toolsets/internet/internet.py +3 -3
- holmes/plugins/toolsets/internet/notion.py +3 -3
- holmes/plugins/toolsets/investigator/core_investigation.py +3 -3
- holmes/plugins/toolsets/kafka.py +18 -18
- holmes/plugins/toolsets/kubernetes.yaml +58 -0
- holmes/plugins/toolsets/kubernetes_logs.py +6 -6
- holmes/plugins/toolsets/kubernetes_logs.yaml +32 -0
- holmes/plugins/toolsets/logging_utils/logging_api.py +1 -1
- holmes/plugins/toolsets/mcp/toolset_mcp.py +4 -4
- holmes/plugins/toolsets/newrelic.py +5 -5
- holmes/plugins/toolsets/opensearch/opensearch.py +5 -5
- holmes/plugins/toolsets/opensearch/opensearch_logs.py +7 -7
- holmes/plugins/toolsets/opensearch/opensearch_traces.py +10 -10
- holmes/plugins/toolsets/prometheus/prometheus.py +841 -351
- holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +39 -2
- holmes/plugins/toolsets/prometheus/utils.py +28 -0
- holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +6 -4
- holmes/plugins/toolsets/robusta/robusta.py +10 -10
- holmes/plugins/toolsets/runbook/runbook_fetcher.py +4 -4
- holmes/plugins/toolsets/servicenow/servicenow.py +6 -6
- holmes/plugins/toolsets/utils.py +88 -0
- holmes/utils/config_utils.py +91 -0
- holmes/utils/env.py +7 -0
- holmes/utils/holmes_status.py +2 -1
- holmes/utils/sentry_helper.py +41 -0
- holmes/utils/stream.py +9 -0
- {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/METADATA +10 -14
- {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/RECORD +82 -72
- {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/LICENSE.txt +0 -0
- {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/WHEEL +0 -0
- {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/entry_points.txt +0 -0
|
@@ -13,7 +13,7 @@ from holmes.core.tools import (
|
|
|
13
13
|
ToolParameter,
|
|
14
14
|
Toolset,
|
|
15
15
|
StructuredToolResult,
|
|
16
|
-
|
|
16
|
+
StructuredToolResultStatus,
|
|
17
17
|
ToolsetTag,
|
|
18
18
|
)
|
|
19
19
|
from holmes.plugins.toolsets.consts import TOOLSET_CONFIG_MISSING_ERROR
|
|
@@ -23,85 +23,120 @@ from holmes.plugins.toolsets.datadog.datadog_api import (
|
|
|
23
23
|
execute_datadog_http_request,
|
|
24
24
|
get_headers,
|
|
25
25
|
MAX_RETRY_COUNT_ON_RATE_LIMIT,
|
|
26
|
+
preprocess_time_fields,
|
|
27
|
+
enhance_error_message,
|
|
28
|
+
fetch_openapi_spec,
|
|
26
29
|
)
|
|
27
30
|
from holmes.plugins.toolsets.utils import toolset_name_for_one_liner
|
|
28
31
|
|
|
29
32
|
# Maximum response size in bytes (10MB)
|
|
30
33
|
MAX_RESPONSE_SIZE = 10 * 1024 * 1024
|
|
31
34
|
|
|
32
|
-
# Whitelisted API endpoint patterns
|
|
35
|
+
# Whitelisted API endpoint patterns with optional hints
|
|
36
|
+
# Format: (pattern, hint) - hint is empty string if no special instructions
|
|
33
37
|
WHITELISTED_ENDPOINTS = [
|
|
34
38
|
# Monitors
|
|
35
|
-
r"^/api/v\d+/monitor(/search)?$",
|
|
36
|
-
r"^/api/v\d+/monitor/\d+(/downtimes)?$",
|
|
37
|
-
r"^/api/v\d+/monitor/groups/search$",
|
|
39
|
+
(r"^/api/v\d+/monitor(/search)?$", ""),
|
|
40
|
+
(r"^/api/v\d+/monitor/\d+(/downtimes)?$", ""),
|
|
41
|
+
(r"^/api/v\d+/monitor/groups/search$", ""),
|
|
38
42
|
# Dashboards
|
|
39
|
-
r"^/api/v\d+/dashboard(/lists)?$",
|
|
40
|
-
r"^/api/v\d+/dashboard/[^/]+$",
|
|
41
|
-
r"^/api/v\d+/dashboard/public/[^/]+$",
|
|
42
|
-
# SLOs
|
|
43
|
-
r"^/api/v\d+/slo(/search)?$",
|
|
44
|
-
r"^/api/v\d+/slo/[^/]+(/history)?$",
|
|
45
|
-
r"^/api/v\d+/slo/[^/]+/corrections$",
|
|
43
|
+
(r"^/api/v\d+/dashboard(/lists)?$", ""),
|
|
44
|
+
(r"^/api/v\d+/dashboard/[^/]+$", ""),
|
|
45
|
+
(r"^/api/v\d+/dashboard/public/[^/]+$", ""),
|
|
46
|
+
# SLOs
|
|
47
|
+
(r"^/api/v\d+/slo(/search)?$", ""),
|
|
48
|
+
(r"^/api/v\d+/slo/[^/]+(/history)?$", ""),
|
|
49
|
+
(r"^/api/v\d+/slo/[^/]+/corrections$", ""),
|
|
46
50
|
# Events
|
|
47
|
-
|
|
48
|
-
|
|
51
|
+
(
|
|
52
|
+
r"^/api/v\d+/events$",
|
|
53
|
+
"Use time range parameters 'start' and 'end' as Unix timestamps",
|
|
54
|
+
),
|
|
55
|
+
(r"^/api/v\d+/events/\d+$", ""),
|
|
49
56
|
# Incidents
|
|
50
|
-
r"^/api/v\d+/incidents(/search)?$",
|
|
51
|
-
r"^/api/v\d+/incidents/[^/]+$",
|
|
52
|
-
r"^/api/v\d+/incidents/[^/]+/attachments$",
|
|
53
|
-
r"^/api/v\d+/incidents/[^/]+/connected_integrations$",
|
|
54
|
-
r"^/api/v\d+/incidents/[^/]+/relationships$",
|
|
55
|
-
r"^/api/v\d+/incidents/[^/]+/timeline$",
|
|
57
|
+
(r"^/api/v\d+/incidents(/search)?$", ""),
|
|
58
|
+
(r"^/api/v\d+/incidents/[^/]+$", ""),
|
|
59
|
+
(r"^/api/v\d+/incidents/[^/]+/attachments$", ""),
|
|
60
|
+
(r"^/api/v\d+/incidents/[^/]+/connected_integrations$", ""),
|
|
61
|
+
(r"^/api/v\d+/incidents/[^/]+/relationships$", ""),
|
|
62
|
+
(r"^/api/v\d+/incidents/[^/]+/timeline$", ""),
|
|
56
63
|
# Synthetics
|
|
57
|
-
r"^/api/v\d+/synthetics/tests(/search)?$",
|
|
58
|
-
r"^/api/v\d+/synthetics/tests/[^/]+$",
|
|
59
|
-
r"^/api/v\d+/synthetics/tests/[^/]+/results$",
|
|
60
|
-
r"^/api/v\d+/synthetics/tests/browser/[^/]+/results$",
|
|
61
|
-
r"^/api/v\d+/synthetics/tests/api/[^/]+/results$",
|
|
62
|
-
r"^/api/v\d+/synthetics/locations$",
|
|
63
|
-
# Security
|
|
64
|
-
r"^/api/v\d+/security_monitoring/rules(/search)?$",
|
|
65
|
-
r"^/api/v\d+/security_monitoring/rules/[^/]+$",
|
|
66
|
-
r"^/api/v\d+/security_monitoring/signals(/search)?$",
|
|
67
|
-
r"^/api/v\d+/security_monitoring/signals/[^/]+$",
|
|
68
|
-
#
|
|
69
|
-
r"^/api/v\d+/services$",
|
|
70
|
-
r"^/api/v\d+/services/[^/]+$",
|
|
71
|
-
r"^/api/v\d+/services/[^/]+/dependencies$",
|
|
64
|
+
(r"^/api/v\d+/synthetics/tests(/search)?$", ""),
|
|
65
|
+
(r"^/api/v\d+/synthetics/tests/[^/]+$", ""),
|
|
66
|
+
(r"^/api/v\d+/synthetics/tests/[^/]+/results$", ""),
|
|
67
|
+
(r"^/api/v\d+/synthetics/tests/browser/[^/]+/results$", ""),
|
|
68
|
+
(r"^/api/v\d+/synthetics/tests/api/[^/]+/results$", ""),
|
|
69
|
+
(r"^/api/v\d+/synthetics/locations$", ""),
|
|
70
|
+
# Security
|
|
71
|
+
(r"^/api/v\d+/security_monitoring/rules(/search)?$", ""),
|
|
72
|
+
(r"^/api/v\d+/security_monitoring/rules/[^/]+$", ""),
|
|
73
|
+
(r"^/api/v\d+/security_monitoring/signals(/search)?$", ""),
|
|
74
|
+
(r"^/api/v\d+/security_monitoring/signals/[^/]+$", ""),
|
|
75
|
+
# Services
|
|
76
|
+
(r"^/api/v\d+/services$", ""),
|
|
77
|
+
(r"^/api/v\d+/services/[^/]+$", ""),
|
|
78
|
+
(r"^/api/v\d+/services/[^/]+/dependencies$", ""),
|
|
79
|
+
(r"^/api/v\d+/service_dependencies$", ""),
|
|
72
80
|
# Hosts
|
|
73
|
-
r"^/api/v\d+/hosts$",
|
|
74
|
-
r"^/api/v\d+/hosts/totals$",
|
|
75
|
-
r"^/api/v\d+/hosts/[^/]+$",
|
|
76
|
-
# Usage
|
|
77
|
-
r"^/api/v\d+/usage/[^/]+$",
|
|
78
|
-
r"^/api/v\d+/usage/summary$",
|
|
79
|
-
r"^/api/v\d+/usage/billable-summary$",
|
|
80
|
-
r"^/api/v\d+/usage/cost_by_org$",
|
|
81
|
-
r"^/api/v\d+/usage/estimated_cost$",
|
|
81
|
+
(r"^/api/v\d+/hosts$", ""),
|
|
82
|
+
(r"^/api/v\d+/hosts/totals$", ""),
|
|
83
|
+
(r"^/api/v\d+/hosts/[^/]+$", ""),
|
|
84
|
+
# Usage
|
|
85
|
+
(r"^/api/v\d+/usage/[^/]+$", ""),
|
|
86
|
+
(r"^/api/v\d+/usage/summary$", ""),
|
|
87
|
+
(r"^/api/v\d+/usage/billable-summary$", ""),
|
|
88
|
+
(r"^/api/v\d+/usage/cost_by_org$", ""),
|
|
89
|
+
(r"^/api/v\d+/usage/estimated_cost$", ""),
|
|
82
90
|
# Processes
|
|
83
|
-
r"^/api/v\d+/processes$",
|
|
91
|
+
(r"^/api/v\d+/processes$", ""),
|
|
84
92
|
# Tags
|
|
85
|
-
r"^/api/v\d+/tags/hosts(/[^/]+)?$",
|
|
93
|
+
(r"^/api/v\d+/tags/hosts(/[^/]+)?$", ""),
|
|
86
94
|
# Notebooks
|
|
87
|
-
r"^/api/v\d+/notebooks$",
|
|
88
|
-
r"^/api/v\d+/notebooks/\d+$",
|
|
89
|
-
# Service Dependencies
|
|
90
|
-
r"^/api/v\d+/service_dependencies$",
|
|
95
|
+
(r"^/api/v\d+/notebooks$", ""),
|
|
96
|
+
(r"^/api/v\d+/notebooks/\d+$", ""),
|
|
91
97
|
# Organization
|
|
92
|
-
r"^/api/v\d+/org$",
|
|
93
|
-
r"^/api/v\d+/org/[^/]+$",
|
|
94
|
-
# Users
|
|
95
|
-
r"^/api/v\d+/users$",
|
|
96
|
-
r"^/api/v\d+/users/[^/]+$",
|
|
97
|
-
# Teams
|
|
98
|
-
r"^/api/v\d+/teams$",
|
|
99
|
-
r"^/api/v\d+/teams/[^/]+$",
|
|
100
|
-
#
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
98
|
+
(r"^/api/v\d+/org$", ""),
|
|
99
|
+
(r"^/api/v\d+/org/[^/]+$", ""),
|
|
100
|
+
# Users
|
|
101
|
+
(r"^/api/v\d+/users$", ""),
|
|
102
|
+
(r"^/api/v\d+/users/[^/]+$", ""),
|
|
103
|
+
# Teams
|
|
104
|
+
(r"^/api/v\d+/teams$", ""),
|
|
105
|
+
(r"^/api/v\d+/teams/[^/]+$", ""),
|
|
106
|
+
# Logs
|
|
107
|
+
(
|
|
108
|
+
r"^/api/v1/logs/config/indexes$",
|
|
109
|
+
"When available, prefer using fetch_pod_logs tool from datadog/logs toolset instead of calling this API directly with the datadog/general toolset",
|
|
110
|
+
),
|
|
111
|
+
(
|
|
112
|
+
r"^/api/v2/logs/events$",
|
|
113
|
+
"When available, prefer using fetch_pod_logs tool from datadog/logs toolset instead of calling this API directly with the datadog/general toolset. Use RFC3339 timestamps (e.g., '2024-01-01T00:00:00Z')",
|
|
114
|
+
),
|
|
115
|
+
(
|
|
116
|
+
r"^/api/v2/logs/events/search$",
|
|
117
|
+
'When available, prefer using fetch_pod_logs tool from datadog/logs toolset instead of calling this API directly with the datadog/general toolset. RFC3339 time format. Example: {"filter": {"from": "2024-01-01T00:00:00Z", "to": "2024-01-02T00:00:00Z", "query": "*"}}',
|
|
118
|
+
),
|
|
119
|
+
(
|
|
120
|
+
r"^/api/v2/logs/analytics/aggregate$",
|
|
121
|
+
"When available, prefer using fetch_pod_logs tool from datadog/logs toolset instead of calling this API directly with the datadog/general toolset. Do not include 'sort' parameter",
|
|
122
|
+
),
|
|
123
|
+
# Metrics
|
|
124
|
+
(
|
|
125
|
+
r"^/api/v\d+/metrics$",
|
|
126
|
+
"When available, prefer using query_datadog_metrics tool from datadog/metrics toolset instead of calling this API directly with the datadog/general toolset",
|
|
127
|
+
),
|
|
128
|
+
(
|
|
129
|
+
r"^/api/v\d+/metrics/[^/]+$",
|
|
130
|
+
"When available, prefer using get_datadog_metric_metadata tool from datadog/metrics toolset instead of calling this API directly with the datadog/general toolset",
|
|
131
|
+
),
|
|
132
|
+
(
|
|
133
|
+
r"^/api/v\d+/query$",
|
|
134
|
+
"When available, prefer using query_datadog_metrics tool from datadog/metrics toolset instead of calling this API directly with the datadog/general toolset. Use 'from' and 'to' as Unix timestamps",
|
|
135
|
+
),
|
|
136
|
+
(
|
|
137
|
+
r"^/api/v\d+/search/query$",
|
|
138
|
+
"When available, prefer using query_datadog_metrics tool from datadog/metrics toolset instead of calling this API directly with the datadog/general toolset",
|
|
139
|
+
),
|
|
105
140
|
]
|
|
106
141
|
|
|
107
142
|
# Blacklisted path segments that indicate write operations
|
|
@@ -146,9 +181,13 @@ WHITELISTED_POST_ENDPOINTS = [
|
|
|
146
181
|
r"^/api/v\d+/security_monitoring/rules/search$",
|
|
147
182
|
r"^/api/v\d+/security_monitoring/signals/search$",
|
|
148
183
|
r"^/api/v\d+/logs/events/search$",
|
|
184
|
+
r"^/api/v2/logs/events/search$",
|
|
185
|
+
r"^/api/v2/logs/analytics/aggregate$",
|
|
149
186
|
r"^/api/v\d+/spans/events/search$",
|
|
150
187
|
r"^/api/v\d+/rum/events/search$",
|
|
151
188
|
r"^/api/v\d+/audit/events/search$",
|
|
189
|
+
r"^/api/v\d+/query$",
|
|
190
|
+
r"^/api/v\d+/search/query$",
|
|
152
191
|
]
|
|
153
192
|
|
|
154
193
|
|
|
@@ -165,11 +204,12 @@ class DatadogGeneralToolset(Toolset):
|
|
|
165
204
|
"""General-purpose Datadog API toolset for read-only operations not covered by specialized toolsets."""
|
|
166
205
|
|
|
167
206
|
dd_config: Optional[DatadogGeneralConfig] = None
|
|
207
|
+
openapi_spec: Optional[Dict[str, Any]] = None
|
|
168
208
|
|
|
169
209
|
def __init__(self):
|
|
170
210
|
super().__init__(
|
|
171
211
|
name="datadog/general",
|
|
172
|
-
description="General-purpose Datadog API access for read-only operations including monitors, dashboards, SLOs, incidents, synthetics, and more",
|
|
212
|
+
description="General-purpose Datadog API access for read-only operations including monitors, dashboards, SLOs, incidents, synthetics, logs, metrics, and more. Note: For logs and metrics, prefer using the specialized datadog/logs and datadog/metrics toolsets when available as they provide optimized functionality",
|
|
173
213
|
docs_url="https://holmesgpt.dev/data-sources/builtin-toolsets/datadog/",
|
|
174
214
|
icon_url="https://imgix.datadoghq.com//img/about/presskit/DDlogo.jpg",
|
|
175
215
|
prerequisites=[CallablePrerequisite(callable=self.prerequisites_callable)],
|
|
@@ -190,11 +230,27 @@ class DatadogGeneralToolset(Toolset):
|
|
|
190
230
|
def prerequisites_callable(self, config: dict[str, Any]) -> Tuple[bool, str]:
|
|
191
231
|
"""Check prerequisites with configuration."""
|
|
192
232
|
if not config:
|
|
193
|
-
return
|
|
233
|
+
return (
|
|
234
|
+
False,
|
|
235
|
+
"Datadog general toolset requires configuration. Please provide: dd_api_key, dd_app_key, and site_api_url in your Holmes config. For more details, see https://holmesgpt.dev/data-sources/builtin-toolsets/datadog/",
|
|
236
|
+
)
|
|
194
237
|
|
|
195
238
|
try:
|
|
196
239
|
dd_config = DatadogGeneralConfig(**config)
|
|
197
240
|
self.dd_config = dd_config
|
|
241
|
+
|
|
242
|
+
# Fetch OpenAPI spec on startup for better error messages and documentation
|
|
243
|
+
logging.debug("Fetching Datadog OpenAPI specification...")
|
|
244
|
+
self.openapi_spec = fetch_openapi_spec(version="both")
|
|
245
|
+
if self.openapi_spec:
|
|
246
|
+
logging.info(
|
|
247
|
+
f"Successfully loaded OpenAPI spec with {len(self.openapi_spec.get('paths', {}))} endpoints"
|
|
248
|
+
)
|
|
249
|
+
else:
|
|
250
|
+
logging.warning(
|
|
251
|
+
"Could not fetch OpenAPI spec; enhanced error messages will be limited"
|
|
252
|
+
)
|
|
253
|
+
|
|
198
254
|
success, error_msg = self._perform_healthcheck(dd_config)
|
|
199
255
|
return success, error_msg
|
|
200
256
|
except Exception as e:
|
|
@@ -205,7 +261,8 @@ class DatadogGeneralToolset(Toolset):
|
|
|
205
261
|
"""Perform health check on Datadog API."""
|
|
206
262
|
try:
|
|
207
263
|
logging.info("Performing Datadog general API configuration healthcheck...")
|
|
208
|
-
|
|
264
|
+
base_url = str(dd_config.site_api_url).rstrip("/")
|
|
265
|
+
url = f"{base_url}/api/v1/validate"
|
|
209
266
|
headers = get_headers(dd_config)
|
|
210
267
|
|
|
211
268
|
data = execute_datadog_http_request(
|
|
@@ -217,7 +274,7 @@ class DatadogGeneralToolset(Toolset):
|
|
|
217
274
|
)
|
|
218
275
|
|
|
219
276
|
if data.get("valid", False):
|
|
220
|
-
logging.
|
|
277
|
+
logging.debug("Datadog general API healthcheck completed successfully")
|
|
221
278
|
return True, ""
|
|
222
279
|
else:
|
|
223
280
|
error_msg = "Datadog API key validation failed"
|
|
@@ -266,7 +323,7 @@ def is_endpoint_allowed(
|
|
|
266
323
|
return False, f"POST method not allowed for endpoint: {path}"
|
|
267
324
|
|
|
268
325
|
elif method == "GET":
|
|
269
|
-
for pattern in WHITELISTED_ENDPOINTS:
|
|
326
|
+
for pattern, _ in WHITELISTED_ENDPOINTS:
|
|
270
327
|
if re.match(pattern, path):
|
|
271
328
|
return True, ""
|
|
272
329
|
|
|
@@ -280,6 +337,23 @@ def is_endpoint_allowed(
|
|
|
280
337
|
return False, f"HTTP method {method} not allowed for {path}"
|
|
281
338
|
|
|
282
339
|
|
|
340
|
+
def get_endpoint_hint(endpoint: str) -> str:
|
|
341
|
+
"""
|
|
342
|
+
Get hint for an endpoint if available.
|
|
343
|
+
|
|
344
|
+
Returns:
|
|
345
|
+
Hint string or empty string if no hint
|
|
346
|
+
"""
|
|
347
|
+
parsed = urlparse(endpoint)
|
|
348
|
+
path = parsed.path
|
|
349
|
+
|
|
350
|
+
for pattern, hint in WHITELISTED_ENDPOINTS:
|
|
351
|
+
if re.match(pattern, path):
|
|
352
|
+
return hint
|
|
353
|
+
|
|
354
|
+
return ""
|
|
355
|
+
|
|
356
|
+
|
|
283
357
|
class BaseDatadogGeneralTool(Tool):
|
|
284
358
|
"""Base class for general Datadog API tools."""
|
|
285
359
|
|
|
@@ -292,7 +366,7 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
292
366
|
def __init__(self, toolset: "DatadogGeneralToolset"):
|
|
293
367
|
super().__init__(
|
|
294
368
|
name="datadog_api_get",
|
|
295
|
-
description="Make a GET request to a Datadog API endpoint for read-only operations",
|
|
369
|
+
description="[datadog/general toolset] Make a GET request to a Datadog API endpoint for read-only operations",
|
|
296
370
|
parameters={
|
|
297
371
|
"endpoint": ToolParameter(
|
|
298
372
|
description="The API endpoint path (e.g., '/api/v1/monitors', '/api/v2/events')",
|
|
@@ -300,7 +374,14 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
300
374
|
required=True,
|
|
301
375
|
),
|
|
302
376
|
"query_params": ToolParameter(
|
|
303
|
-
description="Query parameters as a dictionary
|
|
377
|
+
description="""Query parameters as a dictionary.
|
|
378
|
+
Time format requirements:
|
|
379
|
+
- v1 API: Unix timestamps in seconds (e.g., {'start': 1704067200, 'end': 1704153600})
|
|
380
|
+
- v2 API: RFC3339 format (e.g., {'from': '2024-01-01T00:00:00Z', 'to': '2024-01-02T00:00:00Z'})
|
|
381
|
+
- Relative times like '-24h', 'now', '-7d' will be auto-converted to proper format
|
|
382
|
+
|
|
383
|
+
Example for events: {'start': 1704067200, 'end': 1704153600}
|
|
384
|
+
Example for monitors: {'name': 'my-monitor', 'tags': 'env:prod'}""",
|
|
304
385
|
type="object",
|
|
305
386
|
required=False,
|
|
306
387
|
),
|
|
@@ -333,7 +414,7 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
333
414
|
|
|
334
415
|
if not self.toolset.dd_config:
|
|
335
416
|
return StructuredToolResult(
|
|
336
|
-
status=
|
|
417
|
+
status=StructuredToolResultStatus.ERROR,
|
|
337
418
|
error=TOOLSET_CONFIG_MISSING_ERROR,
|
|
338
419
|
params=params,
|
|
339
420
|
)
|
|
@@ -350,7 +431,7 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
350
431
|
if not is_allowed:
|
|
351
432
|
logging.error(f"Endpoint validation failed: {error_msg}")
|
|
352
433
|
return StructuredToolResult(
|
|
353
|
-
status=
|
|
434
|
+
status=StructuredToolResultStatus.ERROR,
|
|
354
435
|
error=f"Endpoint validation failed: {error_msg}",
|
|
355
436
|
params=params,
|
|
356
437
|
)
|
|
@@ -365,11 +446,14 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
365
446
|
|
|
366
447
|
logging.info(f"Full API URL: {url}")
|
|
367
448
|
|
|
449
|
+
# Preprocess time fields if any
|
|
450
|
+
processed_params = preprocess_time_fields(query_params, endpoint)
|
|
451
|
+
|
|
368
452
|
# Execute request
|
|
369
453
|
response = execute_datadog_http_request(
|
|
370
454
|
url=url,
|
|
371
455
|
headers=headers,
|
|
372
|
-
payload_or_params=
|
|
456
|
+
payload_or_params=processed_params,
|
|
373
457
|
timeout=self.toolset.dd_config.request_timeout,
|
|
374
458
|
method="GET",
|
|
375
459
|
)
|
|
@@ -381,13 +465,13 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
381
465
|
> self.toolset.dd_config.max_response_size
|
|
382
466
|
):
|
|
383
467
|
return StructuredToolResult(
|
|
384
|
-
status=
|
|
468
|
+
status=StructuredToolResultStatus.ERROR,
|
|
385
469
|
error=f"Response too large (>{self.toolset.dd_config.max_response_size} bytes)",
|
|
386
470
|
params=params,
|
|
387
471
|
)
|
|
388
472
|
|
|
389
473
|
return StructuredToolResult(
|
|
390
|
-
status=
|
|
474
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
391
475
|
data=response_str,
|
|
392
476
|
params=params,
|
|
393
477
|
)
|
|
@@ -403,11 +487,16 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
403
487
|
)
|
|
404
488
|
elif e.status_code == 404:
|
|
405
489
|
error_msg = f"Endpoint not found: {endpoint}"
|
|
490
|
+
elif e.status_code == 400:
|
|
491
|
+
# Use enhanced error message for 400 errors
|
|
492
|
+
error_msg = enhance_error_message(
|
|
493
|
+
e, endpoint, "GET", str(self.toolset.dd_config.site_api_url)
|
|
494
|
+
)
|
|
406
495
|
else:
|
|
407
496
|
error_msg = f"API error {e.status_code}: {str(e)}"
|
|
408
497
|
|
|
409
498
|
return StructuredToolResult(
|
|
410
|
-
status=
|
|
499
|
+
status=StructuredToolResultStatus.ERROR,
|
|
411
500
|
error=error_msg,
|
|
412
501
|
params=params,
|
|
413
502
|
invocation=json.dumps({"url": url, "params": query_params})
|
|
@@ -418,7 +507,7 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
418
507
|
except Exception as e:
|
|
419
508
|
logging.exception(f"Failed to query Datadog API: {params}", exc_info=True)
|
|
420
509
|
return StructuredToolResult(
|
|
421
|
-
status=
|
|
510
|
+
status=StructuredToolResultStatus.ERROR,
|
|
422
511
|
error=f"Unexpected error: {str(e)}",
|
|
423
512
|
params=params,
|
|
424
513
|
)
|
|
@@ -430,7 +519,7 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
430
519
|
def __init__(self, toolset: "DatadogGeneralToolset"):
|
|
431
520
|
super().__init__(
|
|
432
521
|
name="datadog_api_post_search",
|
|
433
|
-
description="Make a POST request to Datadog search/query endpoints for complex filtering",
|
|
522
|
+
description="[datadog/general toolset] Make a POST request to Datadog search/query endpoints for complex filtering",
|
|
434
523
|
parameters={
|
|
435
524
|
"endpoint": ToolParameter(
|
|
436
525
|
description="The search API endpoint (e.g., '/api/v2/monitor/search', '/api/v2/events/search')",
|
|
@@ -438,7 +527,29 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
438
527
|
required=True,
|
|
439
528
|
),
|
|
440
529
|
"body": ToolParameter(
|
|
441
|
-
description="Request body for the search/filter operation
|
|
530
|
+
description="""Request body for the search/filter operation.
|
|
531
|
+
Time format requirements:
|
|
532
|
+
- v1 API: Unix timestamps (e.g., 1704067200)
|
|
533
|
+
- v2 API: RFC3339 format (e.g., '2024-01-01T00:00:00Z')
|
|
534
|
+
- Relative times like '-24h', 'now', '-7d' will be auto-converted
|
|
535
|
+
|
|
536
|
+
Example for logs search:
|
|
537
|
+
{
|
|
538
|
+
"filter": {
|
|
539
|
+
"from": "2024-01-01T00:00:00Z",
|
|
540
|
+
"to": "2024-01-02T00:00:00Z",
|
|
541
|
+
"query": "*"
|
|
542
|
+
},
|
|
543
|
+
"sort": "-timestamp",
|
|
544
|
+
"page": {"limit": 50}
|
|
545
|
+
}
|
|
546
|
+
|
|
547
|
+
Example for monitor search:
|
|
548
|
+
{
|
|
549
|
+
"query": "env:production",
|
|
550
|
+
"page": 0,
|
|
551
|
+
"per_page": 20
|
|
552
|
+
}""",
|
|
442
553
|
type="object",
|
|
443
554
|
required=True,
|
|
444
555
|
),
|
|
@@ -469,7 +580,7 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
469
580
|
|
|
470
581
|
if not self.toolset.dd_config:
|
|
471
582
|
return StructuredToolResult(
|
|
472
|
-
status=
|
|
583
|
+
status=StructuredToolResultStatus.ERROR,
|
|
473
584
|
error=TOOLSET_CONFIG_MISSING_ERROR,
|
|
474
585
|
params=params,
|
|
475
586
|
)
|
|
@@ -486,7 +597,7 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
486
597
|
if not is_allowed:
|
|
487
598
|
logging.error(f"Endpoint validation failed: {error_msg}")
|
|
488
599
|
return StructuredToolResult(
|
|
489
|
-
status=
|
|
600
|
+
status=StructuredToolResultStatus.ERROR,
|
|
490
601
|
error=f"Endpoint validation failed: {error_msg}",
|
|
491
602
|
params=params,
|
|
492
603
|
)
|
|
@@ -501,11 +612,14 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
501
612
|
|
|
502
613
|
logging.info(f"Full API URL: {url}")
|
|
503
614
|
|
|
615
|
+
# Preprocess time fields if any
|
|
616
|
+
processed_body = preprocess_time_fields(body, endpoint)
|
|
617
|
+
|
|
504
618
|
# Execute request
|
|
505
619
|
response = execute_datadog_http_request(
|
|
506
620
|
url=url,
|
|
507
621
|
headers=headers,
|
|
508
|
-
payload_or_params=
|
|
622
|
+
payload_or_params=processed_body,
|
|
509
623
|
timeout=self.toolset.dd_config.request_timeout,
|
|
510
624
|
method="POST",
|
|
511
625
|
)
|
|
@@ -517,13 +631,13 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
517
631
|
> self.toolset.dd_config.max_response_size
|
|
518
632
|
):
|
|
519
633
|
return StructuredToolResult(
|
|
520
|
-
status=
|
|
634
|
+
status=StructuredToolResultStatus.ERROR,
|
|
521
635
|
error=f"Response too large (>{self.toolset.dd_config.max_response_size} bytes)",
|
|
522
636
|
params=params,
|
|
523
637
|
)
|
|
524
638
|
|
|
525
639
|
return StructuredToolResult(
|
|
526
|
-
status=
|
|
640
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
527
641
|
data=response_str,
|
|
528
642
|
params=params,
|
|
529
643
|
)
|
|
@@ -539,11 +653,16 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
539
653
|
)
|
|
540
654
|
elif e.status_code == 404:
|
|
541
655
|
error_msg = f"Endpoint not found: {endpoint}"
|
|
656
|
+
elif e.status_code == 400:
|
|
657
|
+
# Use enhanced error message for 400 errors
|
|
658
|
+
error_msg = enhance_error_message(
|
|
659
|
+
e, endpoint, "POST", str(self.toolset.dd_config.site_api_url)
|
|
660
|
+
)
|
|
542
661
|
else:
|
|
543
662
|
error_msg = f"API error {e.status_code}: {str(e)}"
|
|
544
663
|
|
|
545
664
|
return StructuredToolResult(
|
|
546
|
-
status=
|
|
665
|
+
status=StructuredToolResultStatus.ERROR,
|
|
547
666
|
error=error_msg,
|
|
548
667
|
params=params,
|
|
549
668
|
invocation=json.dumps({"url": url, "body": body}) if url else None,
|
|
@@ -552,7 +671,7 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
552
671
|
except Exception as e:
|
|
553
672
|
logging.exception(f"Failed to query Datadog API: {params}", exc_info=True)
|
|
554
673
|
return StructuredToolResult(
|
|
555
|
-
status=
|
|
674
|
+
status=StructuredToolResultStatus.ERROR,
|
|
556
675
|
error=f"Unexpected error: {str(e)}",
|
|
557
676
|
params=params,
|
|
558
677
|
)
|
|
@@ -564,10 +683,10 @@ class ListDatadogAPIResources(BaseDatadogGeneralTool):
|
|
|
564
683
|
def __init__(self, toolset: "DatadogGeneralToolset"):
|
|
565
684
|
super().__init__(
|
|
566
685
|
name="list_datadog_api_resources",
|
|
567
|
-
description="List available Datadog API resources and endpoints that can be accessed",
|
|
686
|
+
description="[datadog/general toolset] List available Datadog API resources and endpoints that can be accessed",
|
|
568
687
|
parameters={
|
|
569
|
-
"
|
|
570
|
-
description="
|
|
688
|
+
"search_regex": ToolParameter(
|
|
689
|
+
description="Optional regex pattern to filter endpoints (e.g., 'monitor', 'logs|metrics', 'security.*signals', 'v2/.*search$'). If not provided, shows all endpoints.",
|
|
571
690
|
type="string",
|
|
572
691
|
required=False,
|
|
573
692
|
),
|
|
@@ -577,145 +696,165 @@ class ListDatadogAPIResources(BaseDatadogGeneralTool):
|
|
|
577
696
|
|
|
578
697
|
def get_parameterized_one_liner(self, params: dict) -> str:
|
|
579
698
|
"""Get a one-liner description of the tool invocation."""
|
|
580
|
-
|
|
581
|
-
return f"{toolset_name_for_one_liner(self.toolset.name)}: List API Resources ({
|
|
699
|
+
search = params.get("search_regex", "all")
|
|
700
|
+
return f"{toolset_name_for_one_liner(self.toolset.name)}: List API Resources (search: {search})"
|
|
582
701
|
|
|
583
702
|
def _invoke(
|
|
584
703
|
self, params: dict, user_approved: bool = False
|
|
585
704
|
) -> StructuredToolResult:
|
|
586
705
|
"""List available API resources."""
|
|
587
|
-
|
|
706
|
+
search_regex = params.get("search_regex", "")
|
|
588
707
|
|
|
589
708
|
logging.info("=" * 60)
|
|
590
709
|
logging.info("ListDatadogAPIResources Tool Invocation:")
|
|
591
|
-
logging.info(f"
|
|
710
|
+
logging.info(f" Search regex: {search_regex or 'None (showing all)'}")
|
|
711
|
+
logging.info(f" OpenAPI Spec Loaded: {self.toolset.openapi_spec is not None}")
|
|
592
712
|
logging.info("=" * 60)
|
|
593
713
|
|
|
594
|
-
#
|
|
595
|
-
|
|
596
|
-
"monitors": {
|
|
597
|
-
"description": "Monitor management and alerting",
|
|
598
|
-
"endpoints": [
|
|
599
|
-
"GET /api/v1/monitor - List all monitors",
|
|
600
|
-
"GET /api/v1/monitor/{id} - Get a monitor by ID",
|
|
601
|
-
"POST /api/v1/monitor/search - Search monitors",
|
|
602
|
-
"GET /api/v1/monitor/groups/search - Search monitor groups",
|
|
603
|
-
],
|
|
604
|
-
},
|
|
605
|
-
"dashboards": {
|
|
606
|
-
"description": "Dashboard and visualization management",
|
|
607
|
-
"endpoints": [
|
|
608
|
-
"GET /api/v1/dashboard - List all dashboards",
|
|
609
|
-
"GET /api/v1/dashboard/{id} - Get a dashboard by ID",
|
|
610
|
-
"POST /api/v1/dashboard/lists - List dashboard lists",
|
|
611
|
-
"GET /api/v1/dashboard/public/{token} - Get public dashboard",
|
|
612
|
-
],
|
|
613
|
-
},
|
|
614
|
-
"slos": {
|
|
615
|
-
"description": "Service Level Objectives",
|
|
616
|
-
"endpoints": [
|
|
617
|
-
"GET /api/v1/slo - List all SLOs",
|
|
618
|
-
"GET /api/v1/slo/{id} - Get an SLO by ID",
|
|
619
|
-
"GET /api/v1/slo/{id}/history - Get SLO history",
|
|
620
|
-
"POST /api/v1/slo/search - Search SLOs",
|
|
621
|
-
"GET /api/v1/slo/{id}/corrections - Get SLO corrections",
|
|
622
|
-
],
|
|
623
|
-
},
|
|
624
|
-
"incidents": {
|
|
625
|
-
"description": "Incident management",
|
|
626
|
-
"endpoints": [
|
|
627
|
-
"GET /api/v2/incidents - List incidents",
|
|
628
|
-
"GET /api/v2/incidents/{id} - Get incident details",
|
|
629
|
-
"POST /api/v2/incidents/search - Search incidents",
|
|
630
|
-
"GET /api/v2/incidents/{id}/timeline - Get incident timeline",
|
|
631
|
-
"GET /api/v2/incidents/{id}/attachments - Get incident attachments",
|
|
632
|
-
],
|
|
633
|
-
},
|
|
634
|
-
"synthetics": {
|
|
635
|
-
"description": "Synthetic monitoring and testing",
|
|
636
|
-
"endpoints": [
|
|
637
|
-
"GET /api/v1/synthetics/tests - List synthetic tests",
|
|
638
|
-
"GET /api/v1/synthetics/tests/{id} - Get test details",
|
|
639
|
-
"POST /api/v1/synthetics/tests/search - Search tests",
|
|
640
|
-
"GET /api/v1/synthetics/tests/{id}/results - Get test results",
|
|
641
|
-
"GET /api/v1/synthetics/locations - List test locations",
|
|
642
|
-
],
|
|
643
|
-
},
|
|
644
|
-
"security": {
|
|
645
|
-
"description": "Security monitoring and detection",
|
|
646
|
-
"endpoints": [
|
|
647
|
-
"GET /api/v2/security_monitoring/rules - List security rules",
|
|
648
|
-
"GET /api/v2/security_monitoring/rules/{id} - Get rule details",
|
|
649
|
-
"POST /api/v2/security_monitoring/rules/search - Search rules",
|
|
650
|
-
"POST /api/v2/security_monitoring/signals/search - Search security signals",
|
|
651
|
-
],
|
|
652
|
-
},
|
|
653
|
-
"hosts": {
|
|
654
|
-
"description": "Host and infrastructure monitoring",
|
|
655
|
-
"endpoints": [
|
|
656
|
-
"GET /api/v1/hosts - List all hosts",
|
|
657
|
-
"GET /api/v1/hosts/{name} - Get host details",
|
|
658
|
-
"GET /api/v1/hosts/totals - Get host totals",
|
|
659
|
-
"GET /api/v1/tags/hosts - Get host tags",
|
|
660
|
-
],
|
|
661
|
-
},
|
|
662
|
-
"events": {
|
|
663
|
-
"description": "Event stream and management",
|
|
664
|
-
"endpoints": [
|
|
665
|
-
"GET /api/v1/events - Query event stream",
|
|
666
|
-
"GET /api/v1/events/{id} - Get event details",
|
|
667
|
-
"POST /api/v2/events/search - Search events",
|
|
668
|
-
],
|
|
669
|
-
},
|
|
670
|
-
"usage": {
|
|
671
|
-
"description": "Usage and billing information",
|
|
672
|
-
"endpoints": [
|
|
673
|
-
"GET /api/v1/usage/summary - Get usage summary",
|
|
674
|
-
"GET /api/v1/usage/billable-summary - Get billable summary",
|
|
675
|
-
"GET /api/v1/usage/estimated_cost - Get estimated costs",
|
|
676
|
-
"GET /api/v2/usage/cost_by_org - Get costs by organization",
|
|
677
|
-
],
|
|
678
|
-
},
|
|
679
|
-
"services": {
|
|
680
|
-
"description": "APM service information",
|
|
681
|
-
"endpoints": [
|
|
682
|
-
"GET /api/v2/services - List services",
|
|
683
|
-
"GET /api/v2/services/{service} - Get service details",
|
|
684
|
-
"GET /api/v2/services/{service}/dependencies - Get service dependencies",
|
|
685
|
-
],
|
|
686
|
-
},
|
|
687
|
-
}
|
|
714
|
+
# Filter endpoints based on regex search
|
|
715
|
+
matching_endpoints = []
|
|
688
716
|
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
717
|
+
if search_regex:
|
|
718
|
+
try:
|
|
719
|
+
search_pattern = re.compile(search_regex, re.IGNORECASE)
|
|
720
|
+
except re.error as e:
|
|
693
721
|
return StructuredToolResult(
|
|
694
|
-
status=
|
|
695
|
-
error=f"
|
|
722
|
+
status=StructuredToolResultStatus.ERROR,
|
|
723
|
+
error=f"Invalid regex pattern: {e}",
|
|
696
724
|
params=params,
|
|
697
725
|
)
|
|
698
|
-
|
|
726
|
+
else:
|
|
727
|
+
search_pattern = None
|
|
728
|
+
|
|
729
|
+
# Build list of matching endpoints
|
|
730
|
+
for pattern, hint in WHITELISTED_ENDPOINTS:
|
|
731
|
+
# Create a readable endpoint example from the pattern
|
|
732
|
+
example_endpoint = pattern.replace(r"^/api/v\d+", "/api/v1")
|
|
733
|
+
example_endpoint = example_endpoint.replace(r"(/search)?$", "")
|
|
734
|
+
example_endpoint = example_endpoint.replace(r"(/[^/]+)?$", "/{id}")
|
|
735
|
+
example_endpoint = example_endpoint.replace(r"/[^/]+$", "/{id}")
|
|
736
|
+
example_endpoint = example_endpoint.replace(r"/\d+$", "/{id}")
|
|
737
|
+
example_endpoint = example_endpoint.replace("$", "")
|
|
738
|
+
example_endpoint = example_endpoint.replace("^", "")
|
|
739
|
+
|
|
740
|
+
# Apply search filter if provided
|
|
741
|
+
if search_pattern and not search_pattern.search(example_endpoint):
|
|
742
|
+
continue
|
|
743
|
+
|
|
744
|
+
# Determine HTTP methods
|
|
745
|
+
if "search" in pattern or "query" in pattern or "aggregate" in pattern:
|
|
746
|
+
methods = "POST"
|
|
747
|
+
elif "/search)?$" in pattern:
|
|
748
|
+
methods = "GET/POST"
|
|
749
|
+
else:
|
|
750
|
+
methods = "GET"
|
|
699
751
|
|
|
700
|
-
|
|
701
|
-
|
|
752
|
+
endpoint_info = {
|
|
753
|
+
"endpoint": example_endpoint,
|
|
754
|
+
"methods": methods,
|
|
755
|
+
"hint": hint,
|
|
756
|
+
"pattern": pattern,
|
|
757
|
+
}
|
|
758
|
+
matching_endpoints.append(endpoint_info)
|
|
702
759
|
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
output.append(f" • {endpoint}")
|
|
710
|
-
output.append("")
|
|
760
|
+
if not matching_endpoints:
|
|
761
|
+
return StructuredToolResult(
|
|
762
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
763
|
+
data=f"No endpoints found matching regex: {search_regex}",
|
|
764
|
+
params=params,
|
|
765
|
+
)
|
|
711
766
|
|
|
767
|
+
# Format output
|
|
768
|
+
output = ["Available Datadog API Endpoints", "=" * 40]
|
|
769
|
+
|
|
770
|
+
if search_regex:
|
|
771
|
+
output.append(f"Filter: {search_regex}")
|
|
772
|
+
output.append(f"Found: {len(matching_endpoints)} endpoints")
|
|
773
|
+
output.append("")
|
|
774
|
+
|
|
775
|
+
# List endpoints with spec info if available
|
|
776
|
+
for info in matching_endpoints:
|
|
777
|
+
line = f"{info['methods']:8} {info['endpoint']}"
|
|
778
|
+
if info["hint"]:
|
|
779
|
+
line += f"\n {info['hint']}"
|
|
780
|
+
|
|
781
|
+
# Add OpenAPI spec info for this specific endpoint if available
|
|
782
|
+
if self.toolset.openapi_spec and "paths" in self.toolset.openapi_spec:
|
|
783
|
+
# Try to find matching path in OpenAPI spec
|
|
784
|
+
spec_path = None
|
|
785
|
+
for path in self.toolset.openapi_spec["paths"].keys():
|
|
786
|
+
if re.match(info["pattern"], path):
|
|
787
|
+
spec_path = path
|
|
788
|
+
break
|
|
789
|
+
|
|
790
|
+
if spec_path and spec_path in self.toolset.openapi_spec["paths"]:
|
|
791
|
+
path_spec = self.toolset.openapi_spec["paths"][spec_path]
|
|
792
|
+
# Add actual OpenAPI schema for the endpoint
|
|
793
|
+
for method in ["get", "post", "put", "delete"]:
|
|
794
|
+
if method in path_spec:
|
|
795
|
+
method_spec = path_spec[method]
|
|
796
|
+
line += f"\n\n OpenAPI Schema ({method.upper()}):"
|
|
797
|
+
|
|
798
|
+
# Add summary if available
|
|
799
|
+
if "summary" in method_spec:
|
|
800
|
+
line += f"\n Summary: {method_spec['summary']}"
|
|
801
|
+
|
|
802
|
+
# Add parameters if available
|
|
803
|
+
if "parameters" in method_spec:
|
|
804
|
+
line += "\n Parameters:"
|
|
805
|
+
for param in method_spec["parameters"]:
|
|
806
|
+
param_info = f"\n - {param.get('name', 'unknown')} ({param.get('in', 'unknown')})"
|
|
807
|
+
if param.get("required", False):
|
|
808
|
+
param_info += " [required]"
|
|
809
|
+
if "description" in param:
|
|
810
|
+
param_info += f": {param['description'][:100]}"
|
|
811
|
+
line += param_info
|
|
812
|
+
|
|
813
|
+
# Add request body schema if available
|
|
814
|
+
if "requestBody" in method_spec:
|
|
815
|
+
line += "\n Request Body:"
|
|
816
|
+
if "content" in method_spec["requestBody"]:
|
|
817
|
+
for content_type, content_spec in method_spec[
|
|
818
|
+
"requestBody"
|
|
819
|
+
]["content"].items():
|
|
820
|
+
if "schema" in content_spec:
|
|
821
|
+
# Show a compact version of the schema
|
|
822
|
+
schema_str = json.dumps(
|
|
823
|
+
content_spec["schema"], indent=10
|
|
824
|
+
)[:500]
|
|
825
|
+
if (
|
|
826
|
+
len(json.dumps(content_spec["schema"]))
|
|
827
|
+
> 500
|
|
828
|
+
):
|
|
829
|
+
schema_str += "..."
|
|
830
|
+
line += f"\n Content-Type: {content_type}"
|
|
831
|
+
line += f"\n Schema: {schema_str}"
|
|
832
|
+
|
|
833
|
+
# Add response schema sample if available
|
|
834
|
+
if "responses" in method_spec:
|
|
835
|
+
if "200" in method_spec["responses"]:
|
|
836
|
+
line += "\n Response (200):"
|
|
837
|
+
resp = method_spec["responses"]["200"]
|
|
838
|
+
if "description" in resp:
|
|
839
|
+
line += f"\n {resp['description']}"
|
|
840
|
+
break
|
|
841
|
+
|
|
842
|
+
output.append(line)
|
|
843
|
+
|
|
844
|
+
output.append("")
|
|
712
845
|
output.append(
|
|
713
846
|
"Note: All endpoints are read-only. Use the appropriate tool with the endpoint path."
|
|
714
847
|
)
|
|
715
848
|
output.append("Example: datadog_api_get with endpoint='/api/v1/monitors'")
|
|
849
|
+
output.append("")
|
|
850
|
+
output.append("Search examples:")
|
|
851
|
+
output.append(" • 'monitor' - find all monitor endpoints")
|
|
852
|
+
output.append(" • 'logs|metrics' - find logs OR metrics endpoints")
|
|
853
|
+
output.append(" • 'v2.*search$' - find all v2 search endpoints")
|
|
854
|
+
output.append(" • 'security.*signals' - find security signals endpoints")
|
|
716
855
|
|
|
717
856
|
return StructuredToolResult(
|
|
718
|
-
status=
|
|
857
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
719
858
|
data="\n".join(output),
|
|
720
859
|
params=params,
|
|
721
860
|
)
|