holmesgpt 0.13.2__py3-none-any.whl → 0.16.2a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- holmes/__init__.py +1 -1
- holmes/clients/robusta_client.py +17 -4
- holmes/common/env_vars.py +40 -1
- holmes/config.py +114 -144
- holmes/core/conversations.py +53 -14
- holmes/core/feedback.py +191 -0
- holmes/core/investigation.py +18 -22
- holmes/core/llm.py +489 -88
- holmes/core/models.py +103 -1
- holmes/core/openai_formatting.py +13 -0
- holmes/core/prompt.py +1 -1
- holmes/core/safeguards.py +4 -4
- holmes/core/supabase_dal.py +293 -100
- holmes/core/tool_calling_llm.py +423 -323
- holmes/core/tools.py +311 -33
- holmes/core/tools_utils/token_counting.py +14 -0
- holmes/core/tools_utils/tool_context_window_limiter.py +57 -0
- holmes/core/tools_utils/tool_executor.py +13 -8
- holmes/core/toolset_manager.py +155 -4
- holmes/core/tracing.py +6 -1
- holmes/core/transformers/__init__.py +23 -0
- holmes/core/transformers/base.py +62 -0
- holmes/core/transformers/llm_summarize.py +174 -0
- holmes/core/transformers/registry.py +122 -0
- holmes/core/transformers/transformer.py +31 -0
- holmes/core/truncation/compaction.py +59 -0
- holmes/core/truncation/dal_truncation_utils.py +23 -0
- holmes/core/truncation/input_context_window_limiter.py +218 -0
- holmes/interactive.py +177 -24
- holmes/main.py +7 -4
- holmes/plugins/prompts/_fetch_logs.jinja2 +26 -1
- holmes/plugins/prompts/_general_instructions.jinja2 +1 -2
- holmes/plugins/prompts/_runbook_instructions.jinja2 +23 -12
- holmes/plugins/prompts/conversation_history_compaction.jinja2 +88 -0
- holmes/plugins/prompts/generic_ask.jinja2 +2 -4
- holmes/plugins/prompts/generic_ask_conversation.jinja2 +2 -1
- holmes/plugins/prompts/generic_ask_for_issue_conversation.jinja2 +2 -1
- holmes/plugins/prompts/generic_investigation.jinja2 +2 -1
- holmes/plugins/prompts/investigation_procedure.jinja2 +48 -0
- holmes/plugins/prompts/kubernetes_workload_ask.jinja2 +2 -1
- holmes/plugins/prompts/kubernetes_workload_chat.jinja2 +2 -1
- holmes/plugins/runbooks/__init__.py +117 -18
- holmes/plugins/runbooks/catalog.json +2 -0
- holmes/plugins/toolsets/__init__.py +21 -8
- holmes/plugins/toolsets/aks-node-health.yaml +46 -0
- holmes/plugins/toolsets/aks.yaml +64 -0
- holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +26 -36
- holmes/plugins/toolsets/azure_sql/azure_sql_toolset.py +0 -1
- holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +10 -7
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +8 -6
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +8 -6
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +9 -7
- holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +9 -6
- holmes/plugins/toolsets/bash/bash_toolset.py +10 -13
- holmes/plugins/toolsets/bash/common/bash.py +7 -7
- holmes/plugins/toolsets/cilium.yaml +284 -0
- holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +5 -3
- holmes/plugins/toolsets/datadog/datadog_api.py +490 -24
- holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +21 -10
- holmes/plugins/toolsets/datadog/toolset_datadog_general.py +349 -216
- holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +190 -19
- holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +101 -44
- holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +13 -16
- holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +25 -31
- holmes/plugins/toolsets/git.py +51 -46
- holmes/plugins/toolsets/grafana/common.py +15 -3
- holmes/plugins/toolsets/grafana/grafana_api.py +46 -24
- holmes/plugins/toolsets/grafana/grafana_tempo_api.py +454 -0
- holmes/plugins/toolsets/grafana/loki/instructions.jinja2 +9 -0
- holmes/plugins/toolsets/grafana/loki/toolset_grafana_loki.py +117 -0
- holmes/plugins/toolsets/grafana/toolset_grafana.py +211 -91
- holmes/plugins/toolsets/grafana/toolset_grafana_dashboard.jinja2 +27 -0
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +246 -11
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +653 -293
- holmes/plugins/toolsets/grafana/trace_parser.py +1 -1
- holmes/plugins/toolsets/internet/internet.py +6 -7
- holmes/plugins/toolsets/internet/notion.py +5 -6
- holmes/plugins/toolsets/investigator/core_investigation.py +42 -34
- holmes/plugins/toolsets/kafka.py +25 -36
- holmes/plugins/toolsets/kubernetes.yaml +58 -84
- holmes/plugins/toolsets/kubernetes_logs.py +6 -6
- holmes/plugins/toolsets/kubernetes_logs.yaml +32 -0
- holmes/plugins/toolsets/logging_utils/logging_api.py +80 -4
- holmes/plugins/toolsets/mcp/toolset_mcp.py +181 -55
- holmes/plugins/toolsets/newrelic/__init__.py +0 -0
- holmes/plugins/toolsets/newrelic/new_relic_api.py +125 -0
- holmes/plugins/toolsets/newrelic/newrelic.jinja2 +41 -0
- holmes/plugins/toolsets/newrelic/newrelic.py +163 -0
- holmes/plugins/toolsets/opensearch/opensearch.py +10 -17
- holmes/plugins/toolsets/opensearch/opensearch_logs.py +7 -7
- holmes/plugins/toolsets/opensearch/opensearch_ppl_query_docs.jinja2 +1616 -0
- holmes/plugins/toolsets/opensearch/opensearch_query_assist.py +78 -0
- holmes/plugins/toolsets/opensearch/opensearch_query_assist_instructions.jinja2 +223 -0
- holmes/plugins/toolsets/opensearch/opensearch_traces.py +13 -16
- holmes/plugins/toolsets/openshift.yaml +283 -0
- holmes/plugins/toolsets/prometheus/prometheus.py +915 -390
- holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +43 -2
- holmes/plugins/toolsets/prometheus/utils.py +28 -0
- holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +9 -10
- holmes/plugins/toolsets/robusta/robusta.py +236 -65
- holmes/plugins/toolsets/robusta/robusta_instructions.jinja2 +26 -9
- holmes/plugins/toolsets/runbook/runbook_fetcher.py +137 -26
- holmes/plugins/toolsets/service_discovery.py +1 -1
- holmes/plugins/toolsets/servicenow_tables/instructions.jinja2 +83 -0
- holmes/plugins/toolsets/servicenow_tables/servicenow_tables.py +426 -0
- holmes/plugins/toolsets/utils.py +88 -0
- holmes/utils/config_utils.py +91 -0
- holmes/utils/default_toolset_installation_guide.jinja2 +1 -22
- holmes/utils/env.py +7 -0
- holmes/utils/global_instructions.py +75 -10
- holmes/utils/holmes_status.py +2 -1
- holmes/utils/holmes_sync_toolsets.py +0 -2
- holmes/utils/krr_utils.py +188 -0
- holmes/utils/sentry_helper.py +41 -0
- holmes/utils/stream.py +61 -7
- holmes/version.py +34 -14
- holmesgpt-0.16.2a0.dist-info/LICENSE +178 -0
- {holmesgpt-0.13.2.dist-info → holmesgpt-0.16.2a0.dist-info}/METADATA +29 -27
- {holmesgpt-0.13.2.dist-info → holmesgpt-0.16.2a0.dist-info}/RECORD +126 -102
- holmes/core/performance_timing.py +0 -72
- holmes/plugins/toolsets/grafana/tempo_api.py +0 -124
- holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +0 -110
- holmes/plugins/toolsets/newrelic.py +0 -231
- holmes/plugins/toolsets/servicenow/install.md +0 -37
- holmes/plugins/toolsets/servicenow/instructions.jinja2 +0 -3
- holmes/plugins/toolsets/servicenow/servicenow.py +0 -219
- holmesgpt-0.13.2.dist-info/LICENSE.txt +0 -21
- {holmesgpt-0.13.2.dist-info → holmesgpt-0.16.2a0.dist-info}/WHEEL +0 -0
- {holmesgpt-0.13.2.dist-info → holmesgpt-0.16.2a0.dist-info}/entry_points.txt +0 -0
|
@@ -10,10 +10,11 @@ from urllib.parse import urlparse
|
|
|
10
10
|
from holmes.core.tools import (
|
|
11
11
|
CallablePrerequisite,
|
|
12
12
|
Tool,
|
|
13
|
+
ToolInvokeContext,
|
|
13
14
|
ToolParameter,
|
|
14
15
|
Toolset,
|
|
15
16
|
StructuredToolResult,
|
|
16
|
-
|
|
17
|
+
StructuredToolResultStatus,
|
|
17
18
|
ToolsetTag,
|
|
18
19
|
)
|
|
19
20
|
from holmes.plugins.toolsets.consts import TOOLSET_CONFIG_MISSING_ERROR
|
|
@@ -23,85 +24,120 @@ from holmes.plugins.toolsets.datadog.datadog_api import (
|
|
|
23
24
|
execute_datadog_http_request,
|
|
24
25
|
get_headers,
|
|
25
26
|
MAX_RETRY_COUNT_ON_RATE_LIMIT,
|
|
27
|
+
preprocess_time_fields,
|
|
28
|
+
enhance_error_message,
|
|
29
|
+
fetch_openapi_spec,
|
|
26
30
|
)
|
|
27
31
|
from holmes.plugins.toolsets.utils import toolset_name_for_one_liner
|
|
28
32
|
|
|
29
33
|
# Maximum response size in bytes (10MB)
|
|
30
34
|
MAX_RESPONSE_SIZE = 10 * 1024 * 1024
|
|
31
35
|
|
|
32
|
-
# Whitelisted API endpoint patterns
|
|
36
|
+
# Whitelisted API endpoint patterns with optional hints
|
|
37
|
+
# Format: (pattern, hint) - hint is empty string if no special instructions
|
|
33
38
|
WHITELISTED_ENDPOINTS = [
|
|
34
39
|
# Monitors
|
|
35
|
-
r"^/api/v\d+/monitor(/search)?$",
|
|
36
|
-
r"^/api/v\d+/monitor/\d+(/downtimes)?$",
|
|
37
|
-
r"^/api/v\d+/monitor/groups/search$",
|
|
40
|
+
(r"^/api/v\d+/monitor(/search)?$", ""),
|
|
41
|
+
(r"^/api/v\d+/monitor/\d+(/downtimes)?$", ""),
|
|
42
|
+
(r"^/api/v\d+/monitor/groups/search$", ""),
|
|
38
43
|
# Dashboards
|
|
39
|
-
r"^/api/v\d+/dashboard(/lists)?$",
|
|
40
|
-
r"^/api/v\d+/dashboard/[^/]+$",
|
|
41
|
-
r"^/api/v\d+/dashboard/public/[^/]+$",
|
|
42
|
-
# SLOs
|
|
43
|
-
r"^/api/v\d+/slo(/search)?$",
|
|
44
|
-
r"^/api/v\d+/slo/[^/]+(/history)?$",
|
|
45
|
-
r"^/api/v\d+/slo/[^/]+/corrections$",
|
|
44
|
+
(r"^/api/v\d+/dashboard(/lists)?$", ""),
|
|
45
|
+
(r"^/api/v\d+/dashboard/[^/]+$", ""),
|
|
46
|
+
(r"^/api/v\d+/dashboard/public/[^/]+$", ""),
|
|
47
|
+
# SLOs
|
|
48
|
+
(r"^/api/v\d+/slo(/search)?$", ""),
|
|
49
|
+
(r"^/api/v\d+/slo/[^/]+(/history)?$", ""),
|
|
50
|
+
(r"^/api/v\d+/slo/[^/]+/corrections$", ""),
|
|
46
51
|
# Events
|
|
47
|
-
|
|
48
|
-
|
|
52
|
+
(
|
|
53
|
+
r"^/api/v\d+/events$",
|
|
54
|
+
"Use time range parameters 'start' and 'end' as Unix timestamps",
|
|
55
|
+
),
|
|
56
|
+
(r"^/api/v\d+/events/\d+$", ""),
|
|
49
57
|
# Incidents
|
|
50
|
-
r"^/api/v\d+/incidents(/search)?$",
|
|
51
|
-
r"^/api/v\d+/incidents/[^/]+$",
|
|
52
|
-
r"^/api/v\d+/incidents/[^/]+/attachments$",
|
|
53
|
-
r"^/api/v\d+/incidents/[^/]+/connected_integrations$",
|
|
54
|
-
r"^/api/v\d+/incidents/[^/]+/relationships$",
|
|
55
|
-
r"^/api/v\d+/incidents/[^/]+/timeline$",
|
|
58
|
+
(r"^/api/v\d+/incidents(/search)?$", ""),
|
|
59
|
+
(r"^/api/v\d+/incidents/[^/]+$", ""),
|
|
60
|
+
(r"^/api/v\d+/incidents/[^/]+/attachments$", ""),
|
|
61
|
+
(r"^/api/v\d+/incidents/[^/]+/connected_integrations$", ""),
|
|
62
|
+
(r"^/api/v\d+/incidents/[^/]+/relationships$", ""),
|
|
63
|
+
(r"^/api/v\d+/incidents/[^/]+/timeline$", ""),
|
|
56
64
|
# Synthetics
|
|
57
|
-
r"^/api/v\d+/synthetics/tests(/search)?$",
|
|
58
|
-
r"^/api/v\d+/synthetics/tests/[^/]+$",
|
|
59
|
-
r"^/api/v\d+/synthetics/tests/[^/]+/results$",
|
|
60
|
-
r"^/api/v\d+/synthetics/tests/browser/[^/]+/results$",
|
|
61
|
-
r"^/api/v\d+/synthetics/tests/api/[^/]+/results$",
|
|
62
|
-
r"^/api/v\d+/synthetics/locations$",
|
|
63
|
-
# Security
|
|
64
|
-
r"^/api/v\d+/security_monitoring/rules(/search)?$",
|
|
65
|
-
r"^/api/v\d+/security_monitoring/rules/[^/]+$",
|
|
66
|
-
r"^/api/v\d+/security_monitoring/signals(/search)?$",
|
|
67
|
-
r"^/api/v\d+/security_monitoring/signals/[^/]+$",
|
|
68
|
-
#
|
|
69
|
-
r"^/api/v\d+/services$",
|
|
70
|
-
r"^/api/v\d+/services/[^/]+$",
|
|
71
|
-
r"^/api/v\d+/services/[^/]+/dependencies$",
|
|
65
|
+
(r"^/api/v\d+/synthetics/tests(/search)?$", ""),
|
|
66
|
+
(r"^/api/v\d+/synthetics/tests/[^/]+$", ""),
|
|
67
|
+
(r"^/api/v\d+/synthetics/tests/[^/]+/results$", ""),
|
|
68
|
+
(r"^/api/v\d+/synthetics/tests/browser/[^/]+/results$", ""),
|
|
69
|
+
(r"^/api/v\d+/synthetics/tests/api/[^/]+/results$", ""),
|
|
70
|
+
(r"^/api/v\d+/synthetics/locations$", ""),
|
|
71
|
+
# Security
|
|
72
|
+
(r"^/api/v\d+/security_monitoring/rules(/search)?$", ""),
|
|
73
|
+
(r"^/api/v\d+/security_monitoring/rules/[^/]+$", ""),
|
|
74
|
+
(r"^/api/v\d+/security_monitoring/signals(/search)?$", ""),
|
|
75
|
+
(r"^/api/v\d+/security_monitoring/signals/[^/]+$", ""),
|
|
76
|
+
# Services
|
|
77
|
+
(r"^/api/v\d+/services$", ""),
|
|
78
|
+
(r"^/api/v\d+/services/[^/]+$", ""),
|
|
79
|
+
(r"^/api/v\d+/services/[^/]+/dependencies$", ""),
|
|
80
|
+
(r"^/api/v\d+/service_dependencies$", ""),
|
|
72
81
|
# Hosts
|
|
73
|
-
r"^/api/v\d+/hosts$",
|
|
74
|
-
r"^/api/v\d+/hosts/totals$",
|
|
75
|
-
r"^/api/v\d+/hosts/[^/]+$",
|
|
76
|
-
# Usage
|
|
77
|
-
r"^/api/v\d+/usage/[^/]+$",
|
|
78
|
-
r"^/api/v\d+/usage/summary$",
|
|
79
|
-
r"^/api/v\d+/usage/billable-summary$",
|
|
80
|
-
r"^/api/v\d+/usage/cost_by_org$",
|
|
81
|
-
r"^/api/v\d+/usage/estimated_cost$",
|
|
82
|
+
(r"^/api/v\d+/hosts$", ""),
|
|
83
|
+
(r"^/api/v\d+/hosts/totals$", ""),
|
|
84
|
+
(r"^/api/v\d+/hosts/[^/]+$", ""),
|
|
85
|
+
# Usage
|
|
86
|
+
(r"^/api/v\d+/usage/[^/]+$", ""),
|
|
87
|
+
(r"^/api/v\d+/usage/summary$", ""),
|
|
88
|
+
(r"^/api/v\d+/usage/billable-summary$", ""),
|
|
89
|
+
(r"^/api/v\d+/usage/cost_by_org$", ""),
|
|
90
|
+
(r"^/api/v\d+/usage/estimated_cost$", ""),
|
|
82
91
|
# Processes
|
|
83
|
-
r"^/api/v\d+/processes$",
|
|
92
|
+
(r"^/api/v\d+/processes$", ""),
|
|
84
93
|
# Tags
|
|
85
|
-
r"^/api/v\d+/tags/hosts(/[^/]+)?$",
|
|
94
|
+
(r"^/api/v\d+/tags/hosts(/[^/]+)?$", ""),
|
|
86
95
|
# Notebooks
|
|
87
|
-
r"^/api/v\d+/notebooks$",
|
|
88
|
-
r"^/api/v\d+/notebooks/\d+$",
|
|
89
|
-
# Service Dependencies
|
|
90
|
-
r"^/api/v\d+/service_dependencies$",
|
|
96
|
+
(r"^/api/v\d+/notebooks$", ""),
|
|
97
|
+
(r"^/api/v\d+/notebooks/\d+$", ""),
|
|
91
98
|
# Organization
|
|
92
|
-
r"^/api/v\d+/org$",
|
|
93
|
-
r"^/api/v\d+/org/[^/]+$",
|
|
94
|
-
# Users
|
|
95
|
-
r"^/api/v\d+/users$",
|
|
96
|
-
r"^/api/v\d+/users/[^/]+$",
|
|
97
|
-
# Teams
|
|
98
|
-
r"^/api/v\d+/teams$",
|
|
99
|
-
r"^/api/v\d+/teams/[^/]+$",
|
|
100
|
-
#
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
99
|
+
(r"^/api/v\d+/org$", ""),
|
|
100
|
+
(r"^/api/v\d+/org/[^/]+$", ""),
|
|
101
|
+
# Users
|
|
102
|
+
(r"^/api/v\d+/users$", ""),
|
|
103
|
+
(r"^/api/v\d+/users/[^/]+$", ""),
|
|
104
|
+
# Teams
|
|
105
|
+
(r"^/api/v\d+/teams$", ""),
|
|
106
|
+
(r"^/api/v\d+/teams/[^/]+$", ""),
|
|
107
|
+
# Logs
|
|
108
|
+
(
|
|
109
|
+
r"^/api/v1/logs/config/indexes$",
|
|
110
|
+
"When available, prefer using fetch_pod_logs tool from datadog/logs toolset instead of calling this API directly with the datadog/general toolset",
|
|
111
|
+
),
|
|
112
|
+
(
|
|
113
|
+
r"^/api/v2/logs/events$",
|
|
114
|
+
"When available, prefer using fetch_pod_logs tool from datadog/logs toolset instead of calling this API directly with the datadog/general toolset. Use RFC3339 timestamps (e.g., '2024-01-01T00:00:00Z')",
|
|
115
|
+
),
|
|
116
|
+
(
|
|
117
|
+
r"^/api/v2/logs/events/search$",
|
|
118
|
+
'When available, prefer using fetch_pod_logs tool from datadog/logs toolset instead of calling this API directly with the datadog/general toolset. RFC3339 time format. Example: {"filter": {"from": "2024-01-01T00:00:00Z", "to": "2024-01-02T00:00:00Z", "query": "*"}}',
|
|
119
|
+
),
|
|
120
|
+
(
|
|
121
|
+
r"^/api/v2/logs/analytics/aggregate$",
|
|
122
|
+
"When available, prefer using fetch_pod_logs tool from datadog/logs toolset instead of calling this API directly with the datadog/general toolset. Do not include 'sort' parameter",
|
|
123
|
+
),
|
|
124
|
+
# Metrics
|
|
125
|
+
(
|
|
126
|
+
r"^/api/v\d+/metrics$",
|
|
127
|
+
"When available, prefer using query_datadog_metrics tool from datadog/metrics toolset instead of calling this API directly with the datadog/general toolset",
|
|
128
|
+
),
|
|
129
|
+
(
|
|
130
|
+
r"^/api/v\d+/metrics/[^/]+$",
|
|
131
|
+
"When available, prefer using get_datadog_metric_metadata tool from datadog/metrics toolset instead of calling this API directly with the datadog/general toolset",
|
|
132
|
+
),
|
|
133
|
+
(
|
|
134
|
+
r"^/api/v\d+/query$",
|
|
135
|
+
"When available, prefer using query_datadog_metrics tool from datadog/metrics toolset instead of calling this API directly with the datadog/general toolset. Use 'from' and 'to' as Unix timestamps",
|
|
136
|
+
),
|
|
137
|
+
(
|
|
138
|
+
r"^/api/v\d+/search/query$",
|
|
139
|
+
"When available, prefer using query_datadog_metrics tool from datadog/metrics toolset instead of calling this API directly with the datadog/general toolset",
|
|
140
|
+
),
|
|
105
141
|
]
|
|
106
142
|
|
|
107
143
|
# Blacklisted path segments that indicate write operations
|
|
@@ -146,9 +182,13 @@ WHITELISTED_POST_ENDPOINTS = [
|
|
|
146
182
|
r"^/api/v\d+/security_monitoring/rules/search$",
|
|
147
183
|
r"^/api/v\d+/security_monitoring/signals/search$",
|
|
148
184
|
r"^/api/v\d+/logs/events/search$",
|
|
185
|
+
r"^/api/v2/logs/events/search$",
|
|
186
|
+
r"^/api/v2/logs/analytics/aggregate$",
|
|
149
187
|
r"^/api/v\d+/spans/events/search$",
|
|
150
188
|
r"^/api/v\d+/rum/events/search$",
|
|
151
189
|
r"^/api/v\d+/audit/events/search$",
|
|
190
|
+
r"^/api/v\d+/query$",
|
|
191
|
+
r"^/api/v\d+/search/query$",
|
|
152
192
|
]
|
|
153
193
|
|
|
154
194
|
|
|
@@ -165,12 +205,13 @@ class DatadogGeneralToolset(Toolset):
|
|
|
165
205
|
"""General-purpose Datadog API toolset for read-only operations not covered by specialized toolsets."""
|
|
166
206
|
|
|
167
207
|
dd_config: Optional[DatadogGeneralConfig] = None
|
|
208
|
+
openapi_spec: Optional[Dict[str, Any]] = None
|
|
168
209
|
|
|
169
210
|
def __init__(self):
|
|
170
211
|
super().__init__(
|
|
171
212
|
name="datadog/general",
|
|
172
|
-
description="General-purpose Datadog API access for read-only operations including monitors, dashboards, SLOs, incidents, synthetics, and more",
|
|
173
|
-
docs_url="https://
|
|
213
|
+
description="General-purpose Datadog API access for read-only operations including monitors, dashboards, SLOs, incidents, synthetics, logs, metrics, and more. Note: For logs and metrics, prefer using the specialized datadog/logs and datadog/metrics toolsets when available as they provide optimized functionality",
|
|
214
|
+
docs_url="https://holmesgpt.dev/data-sources/builtin-toolsets/datadog/",
|
|
174
215
|
icon_url="https://imgix.datadoghq.com//img/about/presskit/DDlogo.jpg",
|
|
175
216
|
prerequisites=[CallablePrerequisite(callable=self.prerequisites_callable)],
|
|
176
217
|
tools=[
|
|
@@ -178,7 +219,6 @@ class DatadogGeneralToolset(Toolset):
|
|
|
178
219
|
DatadogAPIPostSearch(toolset=self),
|
|
179
220
|
ListDatadogAPIResources(toolset=self),
|
|
180
221
|
],
|
|
181
|
-
experimental=True,
|
|
182
222
|
tags=[ToolsetTag.CORE],
|
|
183
223
|
)
|
|
184
224
|
template_file_path = os.path.abspath(
|
|
@@ -191,11 +231,27 @@ class DatadogGeneralToolset(Toolset):
|
|
|
191
231
|
def prerequisites_callable(self, config: dict[str, Any]) -> Tuple[bool, str]:
|
|
192
232
|
"""Check prerequisites with configuration."""
|
|
193
233
|
if not config:
|
|
194
|
-
return
|
|
234
|
+
return (
|
|
235
|
+
False,
|
|
236
|
+
"Missing config for dd_api_key, dd_app_key, or site_api_url. For details: https://holmesgpt.dev/data-sources/builtin-toolsets/datadog/",
|
|
237
|
+
)
|
|
195
238
|
|
|
196
239
|
try:
|
|
197
240
|
dd_config = DatadogGeneralConfig(**config)
|
|
198
241
|
self.dd_config = dd_config
|
|
242
|
+
|
|
243
|
+
# Fetch OpenAPI spec on startup for better error messages and documentation
|
|
244
|
+
logging.debug("Fetching Datadog OpenAPI specification...")
|
|
245
|
+
self.openapi_spec = fetch_openapi_spec(version="both")
|
|
246
|
+
if self.openapi_spec:
|
|
247
|
+
logging.info(
|
|
248
|
+
f"Successfully loaded OpenAPI spec with {len(self.openapi_spec.get('paths', {}))} endpoints"
|
|
249
|
+
)
|
|
250
|
+
else:
|
|
251
|
+
logging.warning(
|
|
252
|
+
"Could not fetch OpenAPI spec; enhanced error messages will be limited"
|
|
253
|
+
)
|
|
254
|
+
|
|
199
255
|
success, error_msg = self._perform_healthcheck(dd_config)
|
|
200
256
|
return success, error_msg
|
|
201
257
|
except Exception as e:
|
|
@@ -206,7 +262,8 @@ class DatadogGeneralToolset(Toolset):
|
|
|
206
262
|
"""Perform health check on Datadog API."""
|
|
207
263
|
try:
|
|
208
264
|
logging.info("Performing Datadog general API configuration healthcheck...")
|
|
209
|
-
|
|
265
|
+
base_url = str(dd_config.site_api_url).rstrip("/")
|
|
266
|
+
url = f"{base_url}/api/v1/validate"
|
|
210
267
|
headers = get_headers(dd_config)
|
|
211
268
|
|
|
212
269
|
data = execute_datadog_http_request(
|
|
@@ -218,7 +275,7 @@ class DatadogGeneralToolset(Toolset):
|
|
|
218
275
|
)
|
|
219
276
|
|
|
220
277
|
if data.get("valid", False):
|
|
221
|
-
logging.
|
|
278
|
+
logging.debug("Datadog general API healthcheck completed successfully")
|
|
222
279
|
return True, ""
|
|
223
280
|
else:
|
|
224
281
|
error_msg = "Datadog API key validation failed"
|
|
@@ -267,7 +324,7 @@ def is_endpoint_allowed(
|
|
|
267
324
|
return False, f"POST method not allowed for endpoint: {path}"
|
|
268
325
|
|
|
269
326
|
elif method == "GET":
|
|
270
|
-
for pattern in WHITELISTED_ENDPOINTS:
|
|
327
|
+
for pattern, _ in WHITELISTED_ENDPOINTS:
|
|
271
328
|
if re.match(pattern, path):
|
|
272
329
|
return True, ""
|
|
273
330
|
|
|
@@ -281,6 +338,23 @@ def is_endpoint_allowed(
|
|
|
281
338
|
return False, f"HTTP method {method} not allowed for {path}"
|
|
282
339
|
|
|
283
340
|
|
|
341
|
+
def get_endpoint_hint(endpoint: str) -> str:
|
|
342
|
+
"""
|
|
343
|
+
Get hint for an endpoint if available.
|
|
344
|
+
|
|
345
|
+
Returns:
|
|
346
|
+
Hint string or empty string if no hint
|
|
347
|
+
"""
|
|
348
|
+
parsed = urlparse(endpoint)
|
|
349
|
+
path = parsed.path
|
|
350
|
+
|
|
351
|
+
for pattern, hint in WHITELISTED_ENDPOINTS:
|
|
352
|
+
if re.match(pattern, path):
|
|
353
|
+
return hint
|
|
354
|
+
|
|
355
|
+
return ""
|
|
356
|
+
|
|
357
|
+
|
|
284
358
|
class BaseDatadogGeneralTool(Tool):
|
|
285
359
|
"""Base class for general Datadog API tools."""
|
|
286
360
|
|
|
@@ -293,7 +367,7 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
293
367
|
def __init__(self, toolset: "DatadogGeneralToolset"):
|
|
294
368
|
super().__init__(
|
|
295
369
|
name="datadog_api_get",
|
|
296
|
-
description="Make a GET request to a Datadog API endpoint for read-only operations",
|
|
370
|
+
description="[datadog/general toolset] Make a GET request to a Datadog API endpoint for read-only operations",
|
|
297
371
|
parameters={
|
|
298
372
|
"endpoint": ToolParameter(
|
|
299
373
|
description="The API endpoint path (e.g., '/api/v1/monitors', '/api/v2/events')",
|
|
@@ -301,7 +375,14 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
301
375
|
required=True,
|
|
302
376
|
),
|
|
303
377
|
"query_params": ToolParameter(
|
|
304
|
-
description="Query parameters as a dictionary
|
|
378
|
+
description="""Query parameters as a dictionary.
|
|
379
|
+
Time format requirements:
|
|
380
|
+
- v1 API: Unix timestamps in seconds (e.g., {'start': 1704067200, 'end': 1704153600})
|
|
381
|
+
- v2 API: RFC3339 format (e.g., {'from': '2024-01-01T00:00:00Z', 'to': '2024-01-02T00:00:00Z'})
|
|
382
|
+
- Relative times like '-24h', 'now', '-7d' will be auto-converted to proper format
|
|
383
|
+
|
|
384
|
+
Example for events: {'start': 1704067200, 'end': 1704153600}
|
|
385
|
+
Example for monitors: {'name': 'my-monitor', 'tags': 'env:prod'}""",
|
|
305
386
|
type="object",
|
|
306
387
|
required=False,
|
|
307
388
|
),
|
|
@@ -319,9 +400,7 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
319
400
|
description = params.get("description", "API call")
|
|
320
401
|
return f"{toolset_name_for_one_liner(self.toolset.name)}: {description}"
|
|
321
402
|
|
|
322
|
-
def _invoke(
|
|
323
|
-
self, params: dict, user_approved: bool = False
|
|
324
|
-
) -> StructuredToolResult:
|
|
403
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
325
404
|
"""Execute the GET request."""
|
|
326
405
|
logging.info("=" * 60)
|
|
327
406
|
logging.info("DatadogAPIGet Tool Invocation:")
|
|
@@ -334,7 +413,7 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
334
413
|
|
|
335
414
|
if not self.toolset.dd_config:
|
|
336
415
|
return StructuredToolResult(
|
|
337
|
-
status=
|
|
416
|
+
status=StructuredToolResultStatus.ERROR,
|
|
338
417
|
error=TOOLSET_CONFIG_MISSING_ERROR,
|
|
339
418
|
params=params,
|
|
340
419
|
)
|
|
@@ -351,7 +430,7 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
351
430
|
if not is_allowed:
|
|
352
431
|
logging.error(f"Endpoint validation failed: {error_msg}")
|
|
353
432
|
return StructuredToolResult(
|
|
354
|
-
status=
|
|
433
|
+
status=StructuredToolResultStatus.ERROR,
|
|
355
434
|
error=f"Endpoint validation failed: {error_msg}",
|
|
356
435
|
params=params,
|
|
357
436
|
)
|
|
@@ -366,11 +445,14 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
366
445
|
|
|
367
446
|
logging.info(f"Full API URL: {url}")
|
|
368
447
|
|
|
448
|
+
# Preprocess time fields if any
|
|
449
|
+
processed_params = preprocess_time_fields(query_params, endpoint)
|
|
450
|
+
|
|
369
451
|
# Execute request
|
|
370
452
|
response = execute_datadog_http_request(
|
|
371
453
|
url=url,
|
|
372
454
|
headers=headers,
|
|
373
|
-
payload_or_params=
|
|
455
|
+
payload_or_params=processed_params,
|
|
374
456
|
timeout=self.toolset.dd_config.request_timeout,
|
|
375
457
|
method="GET",
|
|
376
458
|
)
|
|
@@ -382,13 +464,13 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
382
464
|
> self.toolset.dd_config.max_response_size
|
|
383
465
|
):
|
|
384
466
|
return StructuredToolResult(
|
|
385
|
-
status=
|
|
467
|
+
status=StructuredToolResultStatus.ERROR,
|
|
386
468
|
error=f"Response too large (>{self.toolset.dd_config.max_response_size} bytes)",
|
|
387
469
|
params=params,
|
|
388
470
|
)
|
|
389
471
|
|
|
390
472
|
return StructuredToolResult(
|
|
391
|
-
status=
|
|
473
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
392
474
|
data=response_str,
|
|
393
475
|
params=params,
|
|
394
476
|
)
|
|
@@ -404,11 +486,16 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
404
486
|
)
|
|
405
487
|
elif e.status_code == 404:
|
|
406
488
|
error_msg = f"Endpoint not found: {endpoint}"
|
|
489
|
+
elif e.status_code == 400:
|
|
490
|
+
# Use enhanced error message for 400 errors
|
|
491
|
+
error_msg = enhance_error_message(
|
|
492
|
+
e, endpoint, "GET", str(self.toolset.dd_config.site_api_url)
|
|
493
|
+
)
|
|
407
494
|
else:
|
|
408
495
|
error_msg = f"API error {e.status_code}: {str(e)}"
|
|
409
496
|
|
|
410
497
|
return StructuredToolResult(
|
|
411
|
-
status=
|
|
498
|
+
status=StructuredToolResultStatus.ERROR,
|
|
412
499
|
error=error_msg,
|
|
413
500
|
params=params,
|
|
414
501
|
invocation=json.dumps({"url": url, "params": query_params})
|
|
@@ -419,7 +506,7 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
|
|
|
419
506
|
except Exception as e:
|
|
420
507
|
logging.exception(f"Failed to query Datadog API: {params}", exc_info=True)
|
|
421
508
|
return StructuredToolResult(
|
|
422
|
-
status=
|
|
509
|
+
status=StructuredToolResultStatus.ERROR,
|
|
423
510
|
error=f"Unexpected error: {str(e)}",
|
|
424
511
|
params=params,
|
|
425
512
|
)
|
|
@@ -431,7 +518,7 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
431
518
|
def __init__(self, toolset: "DatadogGeneralToolset"):
|
|
432
519
|
super().__init__(
|
|
433
520
|
name="datadog_api_post_search",
|
|
434
|
-
description="Make a POST request to Datadog search/query endpoints for complex filtering",
|
|
521
|
+
description="[datadog/general toolset] Make a POST request to Datadog search/query endpoints for complex filtering",
|
|
435
522
|
parameters={
|
|
436
523
|
"endpoint": ToolParameter(
|
|
437
524
|
description="The search API endpoint (e.g., '/api/v2/monitor/search', '/api/v2/events/search')",
|
|
@@ -439,7 +526,29 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
439
526
|
required=True,
|
|
440
527
|
),
|
|
441
528
|
"body": ToolParameter(
|
|
442
|
-
description="Request body for the search/filter operation
|
|
529
|
+
description="""Request body for the search/filter operation.
|
|
530
|
+
Time format requirements:
|
|
531
|
+
- v1 API: Unix timestamps (e.g., 1704067200)
|
|
532
|
+
- v2 API: RFC3339 format (e.g., '2024-01-01T00:00:00Z')
|
|
533
|
+
- Relative times like '-24h', 'now', '-7d' will be auto-converted
|
|
534
|
+
|
|
535
|
+
Example for logs search:
|
|
536
|
+
{
|
|
537
|
+
"filter": {
|
|
538
|
+
"from": "2024-01-01T00:00:00Z",
|
|
539
|
+
"to": "2024-01-02T00:00:00Z",
|
|
540
|
+
"query": "*"
|
|
541
|
+
},
|
|
542
|
+
"sort": "-timestamp",
|
|
543
|
+
"page": {"limit": 50}
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
Example for monitor search:
|
|
547
|
+
{
|
|
548
|
+
"query": "env:production",
|
|
549
|
+
"page": 0,
|
|
550
|
+
"per_page": 20
|
|
551
|
+
}""",
|
|
443
552
|
type="object",
|
|
444
553
|
required=True,
|
|
445
554
|
),
|
|
@@ -457,9 +566,7 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
457
566
|
description = params.get("description", "Search")
|
|
458
567
|
return f"{toolset_name_for_one_liner(self.toolset.name)}: {description}"
|
|
459
568
|
|
|
460
|
-
def _invoke(
|
|
461
|
-
self, params: dict, user_approved: bool = False
|
|
462
|
-
) -> StructuredToolResult:
|
|
569
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
463
570
|
"""Execute the POST search request."""
|
|
464
571
|
logging.info("=" * 60)
|
|
465
572
|
logging.info("DatadogAPIPostSearch Tool Invocation:")
|
|
@@ -470,7 +577,7 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
470
577
|
|
|
471
578
|
if not self.toolset.dd_config:
|
|
472
579
|
return StructuredToolResult(
|
|
473
|
-
status=
|
|
580
|
+
status=StructuredToolResultStatus.ERROR,
|
|
474
581
|
error=TOOLSET_CONFIG_MISSING_ERROR,
|
|
475
582
|
params=params,
|
|
476
583
|
)
|
|
@@ -487,7 +594,7 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
487
594
|
if not is_allowed:
|
|
488
595
|
logging.error(f"Endpoint validation failed: {error_msg}")
|
|
489
596
|
return StructuredToolResult(
|
|
490
|
-
status=
|
|
597
|
+
status=StructuredToolResultStatus.ERROR,
|
|
491
598
|
error=f"Endpoint validation failed: {error_msg}",
|
|
492
599
|
params=params,
|
|
493
600
|
)
|
|
@@ -502,11 +609,14 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
502
609
|
|
|
503
610
|
logging.info(f"Full API URL: {url}")
|
|
504
611
|
|
|
612
|
+
# Preprocess time fields if any
|
|
613
|
+
processed_body = preprocess_time_fields(body, endpoint)
|
|
614
|
+
|
|
505
615
|
# Execute request
|
|
506
616
|
response = execute_datadog_http_request(
|
|
507
617
|
url=url,
|
|
508
618
|
headers=headers,
|
|
509
|
-
payload_or_params=
|
|
619
|
+
payload_or_params=processed_body,
|
|
510
620
|
timeout=self.toolset.dd_config.request_timeout,
|
|
511
621
|
method="POST",
|
|
512
622
|
)
|
|
@@ -518,13 +628,13 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
518
628
|
> self.toolset.dd_config.max_response_size
|
|
519
629
|
):
|
|
520
630
|
return StructuredToolResult(
|
|
521
|
-
status=
|
|
631
|
+
status=StructuredToolResultStatus.ERROR,
|
|
522
632
|
error=f"Response too large (>{self.toolset.dd_config.max_response_size} bytes)",
|
|
523
633
|
params=params,
|
|
524
634
|
)
|
|
525
635
|
|
|
526
636
|
return StructuredToolResult(
|
|
527
|
-
status=
|
|
637
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
528
638
|
data=response_str,
|
|
529
639
|
params=params,
|
|
530
640
|
)
|
|
@@ -540,11 +650,16 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
540
650
|
)
|
|
541
651
|
elif e.status_code == 404:
|
|
542
652
|
error_msg = f"Endpoint not found: {endpoint}"
|
|
653
|
+
elif e.status_code == 400:
|
|
654
|
+
# Use enhanced error message for 400 errors
|
|
655
|
+
error_msg = enhance_error_message(
|
|
656
|
+
e, endpoint, "POST", str(self.toolset.dd_config.site_api_url)
|
|
657
|
+
)
|
|
543
658
|
else:
|
|
544
659
|
error_msg = f"API error {e.status_code}: {str(e)}"
|
|
545
660
|
|
|
546
661
|
return StructuredToolResult(
|
|
547
|
-
status=
|
|
662
|
+
status=StructuredToolResultStatus.ERROR,
|
|
548
663
|
error=error_msg,
|
|
549
664
|
params=params,
|
|
550
665
|
invocation=json.dumps({"url": url, "body": body}) if url else None,
|
|
@@ -553,7 +668,7 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
|
|
|
553
668
|
except Exception as e:
|
|
554
669
|
logging.exception(f"Failed to query Datadog API: {params}", exc_info=True)
|
|
555
670
|
return StructuredToolResult(
|
|
556
|
-
status=
|
|
671
|
+
status=StructuredToolResultStatus.ERROR,
|
|
557
672
|
error=f"Unexpected error: {str(e)}",
|
|
558
673
|
params=params,
|
|
559
674
|
)
|
|
@@ -565,10 +680,10 @@ class ListDatadogAPIResources(BaseDatadogGeneralTool):
|
|
|
565
680
|
def __init__(self, toolset: "DatadogGeneralToolset"):
|
|
566
681
|
super().__init__(
|
|
567
682
|
name="list_datadog_api_resources",
|
|
568
|
-
description="List available Datadog API resources and endpoints that can be accessed",
|
|
683
|
+
description="[datadog/general toolset] List available Datadog API resources and endpoints that can be accessed",
|
|
569
684
|
parameters={
|
|
570
|
-
"
|
|
571
|
-
description="
|
|
685
|
+
"search_regex": ToolParameter(
|
|
686
|
+
description="Optional regex pattern to filter endpoints (e.g., 'monitor', 'logs|metrics', 'security.*signals', 'v2/.*search$'). If not provided, shows all endpoints.",
|
|
572
687
|
type="string",
|
|
573
688
|
required=False,
|
|
574
689
|
),
|
|
@@ -578,145 +693,163 @@ class ListDatadogAPIResources(BaseDatadogGeneralTool):
|
|
|
578
693
|
|
|
579
694
|
def get_parameterized_one_liner(self, params: dict) -> str:
|
|
580
695
|
"""Get a one-liner description of the tool invocation."""
|
|
581
|
-
|
|
582
|
-
return f"{toolset_name_for_one_liner(self.toolset.name)}: List API Resources ({
|
|
696
|
+
search = params.get("search_regex", "all")
|
|
697
|
+
return f"{toolset_name_for_one_liner(self.toolset.name)}: List API Resources (search: {search})"
|
|
583
698
|
|
|
584
|
-
def _invoke(
|
|
585
|
-
self, params: dict, user_approved: bool = False
|
|
586
|
-
) -> StructuredToolResult:
|
|
699
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
587
700
|
"""List available API resources."""
|
|
588
|
-
|
|
701
|
+
search_regex = params.get("search_regex", "")
|
|
589
702
|
|
|
590
703
|
logging.info("=" * 60)
|
|
591
704
|
logging.info("ListDatadogAPIResources Tool Invocation:")
|
|
592
|
-
logging.info(f"
|
|
705
|
+
logging.info(f" Search regex: {search_regex or 'None (showing all)'}")
|
|
706
|
+
logging.info(f" OpenAPI Spec Loaded: {self.toolset.openapi_spec is not None}")
|
|
593
707
|
logging.info("=" * 60)
|
|
594
708
|
|
|
595
|
-
#
|
|
596
|
-
|
|
597
|
-
"monitors": {
|
|
598
|
-
"description": "Monitor management and alerting",
|
|
599
|
-
"endpoints": [
|
|
600
|
-
"GET /api/v1/monitor - List all monitors",
|
|
601
|
-
"GET /api/v1/monitor/{id} - Get a monitor by ID",
|
|
602
|
-
"POST /api/v1/monitor/search - Search monitors",
|
|
603
|
-
"GET /api/v1/monitor/groups/search - Search monitor groups",
|
|
604
|
-
],
|
|
605
|
-
},
|
|
606
|
-
"dashboards": {
|
|
607
|
-
"description": "Dashboard and visualization management",
|
|
608
|
-
"endpoints": [
|
|
609
|
-
"GET /api/v1/dashboard - List all dashboards",
|
|
610
|
-
"GET /api/v1/dashboard/{id} - Get a dashboard by ID",
|
|
611
|
-
"POST /api/v1/dashboard/lists - List dashboard lists",
|
|
612
|
-
"GET /api/v1/dashboard/public/{token} - Get public dashboard",
|
|
613
|
-
],
|
|
614
|
-
},
|
|
615
|
-
"slos": {
|
|
616
|
-
"description": "Service Level Objectives",
|
|
617
|
-
"endpoints": [
|
|
618
|
-
"GET /api/v1/slo - List all SLOs",
|
|
619
|
-
"GET /api/v1/slo/{id} - Get an SLO by ID",
|
|
620
|
-
"GET /api/v1/slo/{id}/history - Get SLO history",
|
|
621
|
-
"POST /api/v1/slo/search - Search SLOs",
|
|
622
|
-
"GET /api/v1/slo/{id}/corrections - Get SLO corrections",
|
|
623
|
-
],
|
|
624
|
-
},
|
|
625
|
-
"incidents": {
|
|
626
|
-
"description": "Incident management",
|
|
627
|
-
"endpoints": [
|
|
628
|
-
"GET /api/v2/incidents - List incidents",
|
|
629
|
-
"GET /api/v2/incidents/{id} - Get incident details",
|
|
630
|
-
"POST /api/v2/incidents/search - Search incidents",
|
|
631
|
-
"GET /api/v2/incidents/{id}/timeline - Get incident timeline",
|
|
632
|
-
"GET /api/v2/incidents/{id}/attachments - Get incident attachments",
|
|
633
|
-
],
|
|
634
|
-
},
|
|
635
|
-
"synthetics": {
|
|
636
|
-
"description": "Synthetic monitoring and testing",
|
|
637
|
-
"endpoints": [
|
|
638
|
-
"GET /api/v1/synthetics/tests - List synthetic tests",
|
|
639
|
-
"GET /api/v1/synthetics/tests/{id} - Get test details",
|
|
640
|
-
"POST /api/v1/synthetics/tests/search - Search tests",
|
|
641
|
-
"GET /api/v1/synthetics/tests/{id}/results - Get test results",
|
|
642
|
-
"GET /api/v1/synthetics/locations - List test locations",
|
|
643
|
-
],
|
|
644
|
-
},
|
|
645
|
-
"security": {
|
|
646
|
-
"description": "Security monitoring and detection",
|
|
647
|
-
"endpoints": [
|
|
648
|
-
"GET /api/v2/security_monitoring/rules - List security rules",
|
|
649
|
-
"GET /api/v2/security_monitoring/rules/{id} - Get rule details",
|
|
650
|
-
"POST /api/v2/security_monitoring/rules/search - Search rules",
|
|
651
|
-
"POST /api/v2/security_monitoring/signals/search - Search security signals",
|
|
652
|
-
],
|
|
653
|
-
},
|
|
654
|
-
"hosts": {
|
|
655
|
-
"description": "Host and infrastructure monitoring",
|
|
656
|
-
"endpoints": [
|
|
657
|
-
"GET /api/v1/hosts - List all hosts",
|
|
658
|
-
"GET /api/v1/hosts/{name} - Get host details",
|
|
659
|
-
"GET /api/v1/hosts/totals - Get host totals",
|
|
660
|
-
"GET /api/v1/tags/hosts - Get host tags",
|
|
661
|
-
],
|
|
662
|
-
},
|
|
663
|
-
"events": {
|
|
664
|
-
"description": "Event stream and management",
|
|
665
|
-
"endpoints": [
|
|
666
|
-
"GET /api/v1/events - Query event stream",
|
|
667
|
-
"GET /api/v1/events/{id} - Get event details",
|
|
668
|
-
"POST /api/v2/events/search - Search events",
|
|
669
|
-
],
|
|
670
|
-
},
|
|
671
|
-
"usage": {
|
|
672
|
-
"description": "Usage and billing information",
|
|
673
|
-
"endpoints": [
|
|
674
|
-
"GET /api/v1/usage/summary - Get usage summary",
|
|
675
|
-
"GET /api/v1/usage/billable-summary - Get billable summary",
|
|
676
|
-
"GET /api/v1/usage/estimated_cost - Get estimated costs",
|
|
677
|
-
"GET /api/v2/usage/cost_by_org - Get costs by organization",
|
|
678
|
-
],
|
|
679
|
-
},
|
|
680
|
-
"services": {
|
|
681
|
-
"description": "APM service information",
|
|
682
|
-
"endpoints": [
|
|
683
|
-
"GET /api/v2/services - List services",
|
|
684
|
-
"GET /api/v2/services/{service} - Get service details",
|
|
685
|
-
"GET /api/v2/services/{service}/dependencies - Get service dependencies",
|
|
686
|
-
],
|
|
687
|
-
},
|
|
688
|
-
}
|
|
709
|
+
# Filter endpoints based on regex search
|
|
710
|
+
matching_endpoints = []
|
|
689
711
|
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
712
|
+
if search_regex:
|
|
713
|
+
try:
|
|
714
|
+
search_pattern = re.compile(search_regex, re.IGNORECASE)
|
|
715
|
+
except re.error as e:
|
|
694
716
|
return StructuredToolResult(
|
|
695
|
-
status=
|
|
696
|
-
error=f"
|
|
717
|
+
status=StructuredToolResultStatus.ERROR,
|
|
718
|
+
error=f"Invalid regex pattern: {e}",
|
|
697
719
|
params=params,
|
|
698
720
|
)
|
|
699
|
-
|
|
721
|
+
else:
|
|
722
|
+
search_pattern = None
|
|
723
|
+
|
|
724
|
+
# Build list of matching endpoints
|
|
725
|
+
for pattern, hint in WHITELISTED_ENDPOINTS:
|
|
726
|
+
# Create a readable endpoint example from the pattern
|
|
727
|
+
example_endpoint = pattern.replace(r"^/api/v\d+", "/api/v1")
|
|
728
|
+
example_endpoint = example_endpoint.replace(r"(/search)?$", "")
|
|
729
|
+
example_endpoint = example_endpoint.replace(r"(/[^/]+)?$", "/{id}")
|
|
730
|
+
example_endpoint = example_endpoint.replace(r"/[^/]+$", "/{id}")
|
|
731
|
+
example_endpoint = example_endpoint.replace(r"/\d+$", "/{id}")
|
|
732
|
+
example_endpoint = example_endpoint.replace("$", "")
|
|
733
|
+
example_endpoint = example_endpoint.replace("^", "")
|
|
734
|
+
|
|
735
|
+
# Apply search filter if provided
|
|
736
|
+
if search_pattern and not search_pattern.search(example_endpoint):
|
|
737
|
+
continue
|
|
738
|
+
|
|
739
|
+
# Determine HTTP methods
|
|
740
|
+
if "search" in pattern or "query" in pattern or "aggregate" in pattern:
|
|
741
|
+
methods = "POST"
|
|
742
|
+
elif "/search)?$" in pattern:
|
|
743
|
+
methods = "GET/POST"
|
|
744
|
+
else:
|
|
745
|
+
methods = "GET"
|
|
700
746
|
|
|
701
|
-
|
|
702
|
-
|
|
747
|
+
endpoint_info = {
|
|
748
|
+
"endpoint": example_endpoint,
|
|
749
|
+
"methods": methods,
|
|
750
|
+
"hint": hint,
|
|
751
|
+
"pattern": pattern,
|
|
752
|
+
}
|
|
753
|
+
matching_endpoints.append(endpoint_info)
|
|
703
754
|
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
output.append(f" • {endpoint}")
|
|
711
|
-
output.append("")
|
|
755
|
+
if not matching_endpoints:
|
|
756
|
+
return StructuredToolResult(
|
|
757
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
758
|
+
data=f"No endpoints found matching regex: {search_regex}",
|
|
759
|
+
params=params,
|
|
760
|
+
)
|
|
712
761
|
|
|
762
|
+
# Format output
|
|
763
|
+
output = ["Available Datadog API Endpoints", "=" * 40]
|
|
764
|
+
|
|
765
|
+
if search_regex:
|
|
766
|
+
output.append(f"Filter: {search_regex}")
|
|
767
|
+
output.append(f"Found: {len(matching_endpoints)} endpoints")
|
|
768
|
+
output.append("")
|
|
769
|
+
|
|
770
|
+
# List endpoints with spec info if available
|
|
771
|
+
for info in matching_endpoints:
|
|
772
|
+
line = f"{info['methods']:8} {info['endpoint']}"
|
|
773
|
+
if info["hint"]:
|
|
774
|
+
line += f"\n {info['hint']}"
|
|
775
|
+
|
|
776
|
+
# Add OpenAPI spec info for this specific endpoint if available
|
|
777
|
+
if self.toolset.openapi_spec and "paths" in self.toolset.openapi_spec:
|
|
778
|
+
# Try to find matching path in OpenAPI spec
|
|
779
|
+
spec_path = None
|
|
780
|
+
for path in self.toolset.openapi_spec["paths"].keys():
|
|
781
|
+
if re.match(info["pattern"], path):
|
|
782
|
+
spec_path = path
|
|
783
|
+
break
|
|
784
|
+
|
|
785
|
+
if spec_path and spec_path in self.toolset.openapi_spec["paths"]:
|
|
786
|
+
path_spec = self.toolset.openapi_spec["paths"][spec_path]
|
|
787
|
+
# Add actual OpenAPI schema for the endpoint
|
|
788
|
+
for method in ["get", "post", "put", "delete"]:
|
|
789
|
+
if method in path_spec:
|
|
790
|
+
method_spec = path_spec[method]
|
|
791
|
+
line += f"\n\n OpenAPI Schema ({method.upper()}):"
|
|
792
|
+
|
|
793
|
+
# Add summary if available
|
|
794
|
+
if "summary" in method_spec:
|
|
795
|
+
line += f"\n Summary: {method_spec['summary']}"
|
|
796
|
+
|
|
797
|
+
# Add parameters if available
|
|
798
|
+
if "parameters" in method_spec:
|
|
799
|
+
line += "\n Parameters:"
|
|
800
|
+
for param in method_spec["parameters"]:
|
|
801
|
+
param_info = f"\n - {param.get('name', 'unknown')} ({param.get('in', 'unknown')})"
|
|
802
|
+
if param.get("required", False):
|
|
803
|
+
param_info += " [required]"
|
|
804
|
+
if "description" in param:
|
|
805
|
+
param_info += f": {param['description'][:100]}"
|
|
806
|
+
line += param_info
|
|
807
|
+
|
|
808
|
+
# Add request body schema if available
|
|
809
|
+
if "requestBody" in method_spec:
|
|
810
|
+
line += "\n Request Body:"
|
|
811
|
+
if "content" in method_spec["requestBody"]:
|
|
812
|
+
for content_type, content_spec in method_spec[
|
|
813
|
+
"requestBody"
|
|
814
|
+
]["content"].items():
|
|
815
|
+
if "schema" in content_spec:
|
|
816
|
+
# Show a compact version of the schema
|
|
817
|
+
schema_str = json.dumps(
|
|
818
|
+
content_spec["schema"], indent=10
|
|
819
|
+
)[:500]
|
|
820
|
+
if (
|
|
821
|
+
len(json.dumps(content_spec["schema"]))
|
|
822
|
+
> 500
|
|
823
|
+
):
|
|
824
|
+
schema_str += "..."
|
|
825
|
+
line += f"\n Content-Type: {content_type}"
|
|
826
|
+
line += f"\n Schema: {schema_str}"
|
|
827
|
+
|
|
828
|
+
# Add response schema sample if available
|
|
829
|
+
if "responses" in method_spec:
|
|
830
|
+
if "200" in method_spec["responses"]:
|
|
831
|
+
line += "\n Response (200):"
|
|
832
|
+
resp = method_spec["responses"]["200"]
|
|
833
|
+
if "description" in resp:
|
|
834
|
+
line += f"\n {resp['description']}"
|
|
835
|
+
break
|
|
836
|
+
|
|
837
|
+
output.append(line)
|
|
838
|
+
|
|
839
|
+
output.append("")
|
|
713
840
|
output.append(
|
|
714
841
|
"Note: All endpoints are read-only. Use the appropriate tool with the endpoint path."
|
|
715
842
|
)
|
|
716
843
|
output.append("Example: datadog_api_get with endpoint='/api/v1/monitors'")
|
|
844
|
+
output.append("")
|
|
845
|
+
output.append("Search examples:")
|
|
846
|
+
output.append(" • 'monitor' - find all monitor endpoints")
|
|
847
|
+
output.append(" • 'logs|metrics' - find logs OR metrics endpoints")
|
|
848
|
+
output.append(" • 'v2.*search$' - find all v2 search endpoints")
|
|
849
|
+
output.append(" • 'security.*signals' - find security signals endpoints")
|
|
717
850
|
|
|
718
851
|
return StructuredToolResult(
|
|
719
|
-
status=
|
|
852
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
720
853
|
data="\n".join(output),
|
|
721
854
|
params=params,
|
|
722
855
|
)
|