holmesgpt 0.13.2__py3-none-any.whl → 0.18.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (188) hide show
  1. holmes/__init__.py +3 -5
  2. holmes/clients/robusta_client.py +20 -6
  3. holmes/common/env_vars.py +58 -3
  4. holmes/common/openshift.py +1 -1
  5. holmes/config.py +123 -148
  6. holmes/core/conversations.py +71 -15
  7. holmes/core/feedback.py +191 -0
  8. holmes/core/investigation.py +31 -39
  9. holmes/core/investigation_structured_output.py +3 -3
  10. holmes/core/issue.py +1 -1
  11. holmes/core/llm.py +508 -88
  12. holmes/core/models.py +108 -4
  13. holmes/core/openai_formatting.py +14 -1
  14. holmes/core/prompt.py +48 -3
  15. holmes/core/runbooks.py +1 -0
  16. holmes/core/safeguards.py +8 -6
  17. holmes/core/supabase_dal.py +295 -100
  18. holmes/core/tool_calling_llm.py +489 -428
  19. holmes/core/tools.py +325 -56
  20. holmes/core/tools_utils/token_counting.py +21 -0
  21. holmes/core/tools_utils/tool_context_window_limiter.py +40 -0
  22. holmes/core/tools_utils/tool_executor.py +0 -13
  23. holmes/core/tools_utils/toolset_utils.py +1 -0
  24. holmes/core/toolset_manager.py +191 -5
  25. holmes/core/tracing.py +19 -3
  26. holmes/core/transformers/__init__.py +23 -0
  27. holmes/core/transformers/base.py +63 -0
  28. holmes/core/transformers/llm_summarize.py +175 -0
  29. holmes/core/transformers/registry.py +123 -0
  30. holmes/core/transformers/transformer.py +32 -0
  31. holmes/core/truncation/compaction.py +94 -0
  32. holmes/core/truncation/dal_truncation_utils.py +23 -0
  33. holmes/core/truncation/input_context_window_limiter.py +219 -0
  34. holmes/interactive.py +228 -31
  35. holmes/main.py +23 -40
  36. holmes/plugins/interfaces.py +2 -1
  37. holmes/plugins/prompts/__init__.py +2 -1
  38. holmes/plugins/prompts/_fetch_logs.jinja2 +31 -6
  39. holmes/plugins/prompts/_general_instructions.jinja2 +1 -2
  40. holmes/plugins/prompts/_runbook_instructions.jinja2 +24 -12
  41. holmes/plugins/prompts/base_user_prompt.jinja2 +7 -0
  42. holmes/plugins/prompts/conversation_history_compaction.jinja2 +89 -0
  43. holmes/plugins/prompts/generic_ask.jinja2 +0 -4
  44. holmes/plugins/prompts/generic_ask_conversation.jinja2 +0 -1
  45. holmes/plugins/prompts/generic_ask_for_issue_conversation.jinja2 +0 -1
  46. holmes/plugins/prompts/generic_investigation.jinja2 +0 -1
  47. holmes/plugins/prompts/investigation_procedure.jinja2 +50 -1
  48. holmes/plugins/prompts/kubernetes_workload_ask.jinja2 +0 -1
  49. holmes/plugins/prompts/kubernetes_workload_chat.jinja2 +0 -1
  50. holmes/plugins/runbooks/__init__.py +145 -17
  51. holmes/plugins/runbooks/catalog.json +2 -0
  52. holmes/plugins/sources/github/__init__.py +4 -2
  53. holmes/plugins/sources/prometheus/models.py +1 -0
  54. holmes/plugins/toolsets/__init__.py +44 -27
  55. holmes/plugins/toolsets/aks-node-health.yaml +46 -0
  56. holmes/plugins/toolsets/aks.yaml +64 -0
  57. holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +38 -47
  58. holmes/plugins/toolsets/azure_sql/apis/alert_monitoring_api.py +3 -2
  59. holmes/plugins/toolsets/azure_sql/apis/azure_sql_api.py +2 -1
  60. holmes/plugins/toolsets/azure_sql/apis/connection_failure_api.py +3 -2
  61. holmes/plugins/toolsets/azure_sql/apis/connection_monitoring_api.py +3 -1
  62. holmes/plugins/toolsets/azure_sql/apis/storage_analysis_api.py +3 -1
  63. holmes/plugins/toolsets/azure_sql/azure_sql_toolset.py +12 -13
  64. holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +15 -12
  65. holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +15 -12
  66. holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +11 -11
  67. holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +11 -9
  68. holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +15 -12
  69. holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +15 -15
  70. holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +11 -8
  71. holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +11 -8
  72. holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +11 -8
  73. holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +11 -8
  74. holmes/plugins/toolsets/azure_sql/utils.py +0 -32
  75. holmes/plugins/toolsets/bash/argocd/__init__.py +3 -3
  76. holmes/plugins/toolsets/bash/aws/__init__.py +4 -4
  77. holmes/plugins/toolsets/bash/azure/__init__.py +4 -4
  78. holmes/plugins/toolsets/bash/bash_toolset.py +11 -15
  79. holmes/plugins/toolsets/bash/common/bash.py +23 -13
  80. holmes/plugins/toolsets/bash/common/bash_command.py +1 -1
  81. holmes/plugins/toolsets/bash/common/stringify.py +1 -1
  82. holmes/plugins/toolsets/bash/kubectl/__init__.py +2 -1
  83. holmes/plugins/toolsets/bash/kubectl/constants.py +0 -1
  84. holmes/plugins/toolsets/bash/kubectl/kubectl_get.py +3 -4
  85. holmes/plugins/toolsets/bash/parse_command.py +12 -13
  86. holmes/plugins/toolsets/cilium.yaml +284 -0
  87. holmes/plugins/toolsets/connectivity_check.py +124 -0
  88. holmes/plugins/toolsets/coralogix/api.py +132 -119
  89. holmes/plugins/toolsets/coralogix/coralogix.jinja2 +14 -0
  90. holmes/plugins/toolsets/coralogix/toolset_coralogix.py +219 -0
  91. holmes/plugins/toolsets/coralogix/utils.py +15 -79
  92. holmes/plugins/toolsets/datadog/datadog_api.py +525 -26
  93. holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +55 -11
  94. holmes/plugins/toolsets/datadog/datadog_metrics_instructions.jinja2 +3 -3
  95. holmes/plugins/toolsets/datadog/datadog_models.py +59 -0
  96. holmes/plugins/toolsets/datadog/datadog_url_utils.py +213 -0
  97. holmes/plugins/toolsets/datadog/instructions_datadog_traces.jinja2 +165 -28
  98. holmes/plugins/toolsets/datadog/toolset_datadog_general.py +417 -241
  99. holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +234 -214
  100. holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +167 -79
  101. holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +374 -363
  102. holmes/plugins/toolsets/elasticsearch/__init__.py +6 -0
  103. holmes/plugins/toolsets/elasticsearch/elasticsearch.py +834 -0
  104. holmes/plugins/toolsets/elasticsearch/opensearch_ppl_query_docs.jinja2 +1616 -0
  105. holmes/plugins/toolsets/elasticsearch/opensearch_query_assist.py +78 -0
  106. holmes/plugins/toolsets/elasticsearch/opensearch_query_assist_instructions.jinja2 +223 -0
  107. holmes/plugins/toolsets/git.py +54 -50
  108. holmes/plugins/toolsets/grafana/base_grafana_toolset.py +16 -4
  109. holmes/plugins/toolsets/grafana/common.py +13 -29
  110. holmes/plugins/toolsets/grafana/grafana_tempo_api.py +455 -0
  111. holmes/plugins/toolsets/grafana/loki/instructions.jinja2 +25 -0
  112. holmes/plugins/toolsets/grafana/loki/toolset_grafana_loki.py +191 -0
  113. holmes/plugins/toolsets/grafana/loki_api.py +4 -0
  114. holmes/plugins/toolsets/grafana/toolset_grafana.py +293 -89
  115. holmes/plugins/toolsets/grafana/toolset_grafana_dashboard.jinja2 +49 -0
  116. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +246 -11
  117. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +820 -292
  118. holmes/plugins/toolsets/grafana/trace_parser.py +4 -3
  119. holmes/plugins/toolsets/internet/internet.py +15 -16
  120. holmes/plugins/toolsets/internet/notion.py +9 -11
  121. holmes/plugins/toolsets/investigator/core_investigation.py +44 -36
  122. holmes/plugins/toolsets/investigator/model.py +3 -1
  123. holmes/plugins/toolsets/json_filter_mixin.py +134 -0
  124. holmes/plugins/toolsets/kafka.py +36 -42
  125. holmes/plugins/toolsets/kubernetes.yaml +317 -113
  126. holmes/plugins/toolsets/kubernetes_logs.py +9 -9
  127. holmes/plugins/toolsets/kubernetes_logs.yaml +32 -0
  128. holmes/plugins/toolsets/logging_utils/logging_api.py +94 -8
  129. holmes/plugins/toolsets/mcp/toolset_mcp.py +218 -64
  130. holmes/plugins/toolsets/newrelic/new_relic_api.py +165 -0
  131. holmes/plugins/toolsets/newrelic/newrelic.jinja2 +65 -0
  132. holmes/plugins/toolsets/newrelic/newrelic.py +320 -0
  133. holmes/plugins/toolsets/openshift.yaml +283 -0
  134. holmes/plugins/toolsets/prometheus/prometheus.py +1202 -421
  135. holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +54 -5
  136. holmes/plugins/toolsets/prometheus/utils.py +28 -0
  137. holmes/plugins/toolsets/rabbitmq/api.py +23 -4
  138. holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +13 -14
  139. holmes/plugins/toolsets/robusta/robusta.py +239 -68
  140. holmes/plugins/toolsets/robusta/robusta_instructions.jinja2 +26 -9
  141. holmes/plugins/toolsets/runbook/runbook_fetcher.py +157 -27
  142. holmes/plugins/toolsets/service_discovery.py +1 -1
  143. holmes/plugins/toolsets/servicenow_tables/instructions.jinja2 +83 -0
  144. holmes/plugins/toolsets/servicenow_tables/servicenow_tables.py +426 -0
  145. holmes/plugins/toolsets/utils.py +88 -0
  146. holmes/utils/config_utils.py +91 -0
  147. holmes/utils/connection_utils.py +31 -0
  148. holmes/utils/console/result.py +10 -0
  149. holmes/utils/default_toolset_installation_guide.jinja2 +1 -22
  150. holmes/utils/env.py +7 -0
  151. holmes/utils/file_utils.py +2 -1
  152. holmes/utils/global_instructions.py +60 -11
  153. holmes/utils/holmes_status.py +6 -4
  154. holmes/utils/holmes_sync_toolsets.py +0 -2
  155. holmes/utils/krr_utils.py +188 -0
  156. holmes/utils/log.py +15 -0
  157. holmes/utils/markdown_utils.py +2 -3
  158. holmes/utils/memory_limit.py +58 -0
  159. holmes/utils/sentry_helper.py +64 -0
  160. holmes/utils/stream.py +69 -8
  161. holmes/utils/tags.py +4 -3
  162. holmes/version.py +37 -15
  163. holmesgpt-0.18.4.dist-info/LICENSE +178 -0
  164. {holmesgpt-0.13.2.dist-info → holmesgpt-0.18.4.dist-info}/METADATA +35 -31
  165. holmesgpt-0.18.4.dist-info/RECORD +258 -0
  166. holmes/core/performance_timing.py +0 -72
  167. holmes/plugins/toolsets/aws.yaml +0 -80
  168. holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +0 -112
  169. holmes/plugins/toolsets/datadog/datadog_traces_formatter.py +0 -310
  170. holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +0 -739
  171. holmes/plugins/toolsets/grafana/grafana_api.py +0 -42
  172. holmes/plugins/toolsets/grafana/tempo_api.py +0 -124
  173. holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +0 -110
  174. holmes/plugins/toolsets/newrelic.py +0 -231
  175. holmes/plugins/toolsets/opensearch/opensearch.py +0 -257
  176. holmes/plugins/toolsets/opensearch/opensearch_logs.py +0 -161
  177. holmes/plugins/toolsets/opensearch/opensearch_traces.py +0 -218
  178. holmes/plugins/toolsets/opensearch/opensearch_traces_instructions.jinja2 +0 -12
  179. holmes/plugins/toolsets/opensearch/opensearch_utils.py +0 -166
  180. holmes/plugins/toolsets/servicenow/install.md +0 -37
  181. holmes/plugins/toolsets/servicenow/instructions.jinja2 +0 -3
  182. holmes/plugins/toolsets/servicenow/servicenow.py +0 -219
  183. holmes/utils/keygen_utils.py +0 -6
  184. holmesgpt-0.13.2.dist-info/LICENSE.txt +0 -21
  185. holmesgpt-0.13.2.dist-info/RECORD +0 -234
  186. /holmes/plugins/toolsets/{opensearch → newrelic}/__init__.py +0 -0
  187. {holmesgpt-0.13.2.dist-info → holmesgpt-0.18.4.dist-info}/WHEEL +0 -0
  188. {holmesgpt-0.13.2.dist-info → holmesgpt-0.18.4.dist-info}/entry_points.txt +0 -0
@@ -5,103 +5,142 @@ import logging
5
5
  import os
6
6
  import re
7
7
  from typing import Any, Dict, Optional, Tuple
8
- from urllib.parse import urlparse
8
+ from urllib.parse import urlencode, urlparse
9
9
 
10
10
  from holmes.core.tools import (
11
11
  CallablePrerequisite,
12
+ StructuredToolResult,
13
+ StructuredToolResultStatus,
12
14
  Tool,
15
+ ToolInvokeContext,
13
16
  ToolParameter,
14
17
  Toolset,
15
- StructuredToolResult,
16
- ToolResultStatus,
17
18
  ToolsetTag,
18
19
  )
19
20
  from holmes.plugins.toolsets.consts import TOOLSET_CONFIG_MISSING_ERROR
20
21
  from holmes.plugins.toolsets.datadog.datadog_api import (
21
- DatadogBaseConfig,
22
+ MAX_RETRY_COUNT_ON_RATE_LIMIT,
22
23
  DataDogRequestError,
24
+ enhance_error_message,
23
25
  execute_datadog_http_request,
26
+ fetch_openapi_spec,
24
27
  get_headers,
25
- MAX_RETRY_COUNT_ON_RATE_LIMIT,
28
+ preprocess_time_fields,
29
+ )
30
+ from holmes.plugins.toolsets.datadog.datadog_models import (
31
+ MAX_RESPONSE_SIZE,
32
+ DatadogGeneralConfig,
33
+ )
34
+ from holmes.plugins.toolsets.datadog.datadog_url_utils import (
35
+ generate_datadog_general_url,
26
36
  )
27
37
  from holmes.plugins.toolsets.utils import toolset_name_for_one_liner
28
38
 
29
- # Maximum response size in bytes (10MB)
30
- MAX_RESPONSE_SIZE = 10 * 1024 * 1024
31
-
32
- # Whitelisted API endpoint patterns - READ ONLY operations
39
+ # Whitelisted API endpoint patterns with optional hints
40
+ # Format: (pattern, hint) - hint is empty string if no special instructions
33
41
  WHITELISTED_ENDPOINTS = [
34
42
  # Monitors
35
- r"^/api/v\d+/monitor(/search)?$",
36
- r"^/api/v\d+/monitor/\d+(/downtimes)?$",
37
- r"^/api/v\d+/monitor/groups/search$",
43
+ (r"^/api/v\d+/monitor(/search)?$", ""),
44
+ (r"^/api/v\d+/monitor/\d+(/downtimes)?$", ""),
45
+ (r"^/api/v\d+/monitor/groups/search$", ""),
38
46
  # Dashboards
39
- r"^/api/v\d+/dashboard(/lists)?$",
40
- r"^/api/v\d+/dashboard/[^/]+$",
41
- r"^/api/v\d+/dashboard/public/[^/]+$",
42
- # SLOs (Service Level Objectives)
43
- r"^/api/v\d+/slo(/search)?$",
44
- r"^/api/v\d+/slo/[^/]+(/history)?$",
45
- r"^/api/v\d+/slo/[^/]+/corrections$",
47
+ (r"^/api/v\d+/dashboard(/lists)?$", ""),
48
+ (r"^/api/v\d+/dashboard/[^/]+$", ""),
49
+ (r"^/api/v\d+/dashboard/public/[^/]+$", ""),
50
+ # SLOs
51
+ (r"^/api/v\d+/slo(/search)?$", ""),
52
+ (r"^/api/v\d+/slo/[^/]+(/history)?$", ""),
53
+ (r"^/api/v\d+/slo/[^/]+/corrections$", ""),
46
54
  # Events
47
- r"^/api/v\d+/events$",
48
- r"^/api/v\d+/events/\d+$",
55
+ (
56
+ r"^/api/v\d+/events$",
57
+ "Use time range parameters 'start' and 'end' as Unix timestamps",
58
+ ),
59
+ (r"^/api/v\d+/events/\d+$", ""),
49
60
  # Incidents
50
- r"^/api/v\d+/incidents(/search)?$",
51
- r"^/api/v\d+/incidents/[^/]+$",
52
- r"^/api/v\d+/incidents/[^/]+/attachments$",
53
- r"^/api/v\d+/incidents/[^/]+/connected_integrations$",
54
- r"^/api/v\d+/incidents/[^/]+/relationships$",
55
- r"^/api/v\d+/incidents/[^/]+/timeline$",
61
+ (r"^/api/v\d+/incidents(/search)?$", ""),
62
+ (r"^/api/v\d+/incidents/[^/]+$", ""),
63
+ (r"^/api/v\d+/incidents/[^/]+/attachments$", ""),
64
+ (r"^/api/v\d+/incidents/[^/]+/connected_integrations$", ""),
65
+ (r"^/api/v\d+/incidents/[^/]+/relationships$", ""),
66
+ (r"^/api/v\d+/incidents/[^/]+/timeline$", ""),
56
67
  # Synthetics
57
- r"^/api/v\d+/synthetics/tests(/search)?$",
58
- r"^/api/v\d+/synthetics/tests/[^/]+$",
59
- r"^/api/v\d+/synthetics/tests/[^/]+/results$",
60
- r"^/api/v\d+/synthetics/tests/browser/[^/]+/results$",
61
- r"^/api/v\d+/synthetics/tests/api/[^/]+/results$",
62
- r"^/api/v\d+/synthetics/locations$",
63
- # Security Monitoring
64
- r"^/api/v\d+/security_monitoring/rules(/search)?$",
65
- r"^/api/v\d+/security_monitoring/rules/[^/]+$",
66
- r"^/api/v\d+/security_monitoring/signals(/search)?$",
67
- r"^/api/v\d+/security_monitoring/signals/[^/]+$",
68
- # Service Map / APM Services
69
- r"^/api/v\d+/services$",
70
- r"^/api/v\d+/services/[^/]+$",
71
- r"^/api/v\d+/services/[^/]+/dependencies$",
68
+ (r"^/api/v\d+/synthetics/tests(/search)?$", ""),
69
+ (r"^/api/v\d+/synthetics/tests/[^/]+$", ""),
70
+ (r"^/api/v\d+/synthetics/tests/[^/]+/results$", ""),
71
+ (r"^/api/v\d+/synthetics/tests/browser/[^/]+/results$", ""),
72
+ (r"^/api/v\d+/synthetics/tests/api/[^/]+/results$", ""),
73
+ (r"^/api/v\d+/synthetics/locations$", ""),
74
+ # Security
75
+ (r"^/api/v\d+/security_monitoring/rules(/search)?$", ""),
76
+ (r"^/api/v\d+/security_monitoring/rules/[^/]+$", ""),
77
+ (r"^/api/v\d+/security_monitoring/signals(/search)?$", ""),
78
+ (r"^/api/v\d+/security_monitoring/signals/[^/]+$", ""),
79
+ # Services
80
+ (r"^/api/v\d+/services$", ""),
81
+ (r"^/api/v\d+/services/[^/]+$", ""),
82
+ (r"^/api/v\d+/services/[^/]+/dependencies$", ""),
83
+ (r"^/api/v\d+/service_dependencies$", ""),
72
84
  # Hosts
73
- r"^/api/v\d+/hosts$",
74
- r"^/api/v\d+/hosts/totals$",
75
- r"^/api/v\d+/hosts/[^/]+$",
76
- # Usage & Cost
77
- r"^/api/v\d+/usage/[^/]+$",
78
- r"^/api/v\d+/usage/summary$",
79
- r"^/api/v\d+/usage/billable-summary$",
80
- r"^/api/v\d+/usage/cost_by_org$",
81
- r"^/api/v\d+/usage/estimated_cost$",
85
+ (r"^/api/v\d+/hosts$", ""),
86
+ (r"^/api/v\d+/hosts/totals$", ""),
87
+ (r"^/api/v\d+/hosts/[^/]+$", ""),
88
+ # Usage
89
+ (r"^/api/v\d+/usage/[^/]+$", ""),
90
+ (r"^/api/v\d+/usage/summary$", ""),
91
+ (r"^/api/v\d+/usage/billable-summary$", ""),
92
+ (r"^/api/v\d+/usage/cost_by_org$", ""),
93
+ (r"^/api/v\d+/usage/estimated_cost$", ""),
82
94
  # Processes
83
- r"^/api/v\d+/processes$",
95
+ (r"^/api/v\d+/processes$", ""),
84
96
  # Tags
85
- r"^/api/v\d+/tags/hosts(/[^/]+)?$",
97
+ (r"^/api/v\d+/tags/hosts(/[^/]+)?$", ""),
86
98
  # Notebooks
87
- r"^/api/v\d+/notebooks$",
88
- r"^/api/v\d+/notebooks/\d+$",
89
- # Service Dependencies
90
- r"^/api/v\d+/service_dependencies$",
99
+ (r"^/api/v\d+/notebooks$", ""),
100
+ (r"^/api/v\d+/notebooks/\d+$", ""),
91
101
  # Organization
92
- r"^/api/v\d+/org$",
93
- r"^/api/v\d+/org/[^/]+$",
94
- # Users (read only)
95
- r"^/api/v\d+/users$",
96
- r"^/api/v\d+/users/[^/]+$",
97
- # Teams (read only)
98
- r"^/api/v\d+/teams$",
99
- r"^/api/v\d+/teams/[^/]+$",
100
- # Audit logs
101
- r"^/api/v\d+/audit/events$",
102
- # Service Accounts (read only)
103
- r"^/api/v\d+/service_accounts$",
104
- r"^/api/v\d+/service_accounts/[^/]+$",
102
+ (r"^/api/v\d+/org$", ""),
103
+ (r"^/api/v\d+/org/[^/]+$", ""),
104
+ # Users
105
+ (r"^/api/v\d+/users$", ""),
106
+ (r"^/api/v\d+/users/[^/]+$", ""),
107
+ # Teams
108
+ (r"^/api/v\d+/teams$", ""),
109
+ (r"^/api/v\d+/teams/[^/]+$", ""),
110
+ # Logs
111
+ (
112
+ r"^/api/v1/logs/config/indexes$",
113
+ "When available, prefer using fetch_pod_logs tool from datadog/logs toolset instead of calling this API directly with the datadog/general toolset",
114
+ ),
115
+ (
116
+ r"^/api/v2/logs/events$",
117
+ "When available, prefer using fetch_pod_logs tool from datadog/logs toolset instead of calling this API directly with the datadog/general toolset. Use RFC3339 timestamps (e.g., '2024-01-01T00:00:00Z')",
118
+ ),
119
+ (
120
+ r"^/api/v2/logs/events/search$",
121
+ 'When available, prefer using fetch_pod_logs tool from datadog/logs toolset instead of calling this API directly with the datadog/general toolset. RFC3339 time format. Example: {"filter": {"from": "2024-01-01T00:00:00Z", "to": "2024-01-02T00:00:00Z", "query": "*"}}',
122
+ ),
123
+ (
124
+ r"^/api/v2/logs/analytics/aggregate$",
125
+ "When available, prefer using fetch_pod_logs tool from datadog/logs toolset instead of calling this API directly with the datadog/general toolset. Do not include 'sort' parameter",
126
+ ),
127
+ # Metrics
128
+ (
129
+ r"^/api/v\d+/metrics$",
130
+ "When available, prefer using query_datadog_metrics tool from datadog/metrics toolset instead of calling this API directly with the datadog/general toolset",
131
+ ),
132
+ (
133
+ r"^/api/v\d+/metrics/[^/]+$",
134
+ "When available, prefer using get_datadog_metric_metadata tool from datadog/metrics toolset instead of calling this API directly with the datadog/general toolset",
135
+ ),
136
+ (
137
+ r"^/api/v\d+/query$",
138
+ "When available, prefer using query_datadog_metrics tool from datadog/metrics toolset instead of calling this API directly with the datadog/general toolset. Use 'from' and 'to' as Unix timestamps",
139
+ ),
140
+ (
141
+ r"^/api/v\d+/search/query$",
142
+ "When available, prefer using query_datadog_metrics tool from datadog/metrics toolset instead of calling this API directly with the datadog/general toolset",
143
+ ),
105
144
  ]
106
145
 
107
146
  # Blacklisted path segments that indicate write operations
@@ -146,31 +185,27 @@ WHITELISTED_POST_ENDPOINTS = [
146
185
  r"^/api/v\d+/security_monitoring/rules/search$",
147
186
  r"^/api/v\d+/security_monitoring/signals/search$",
148
187
  r"^/api/v\d+/logs/events/search$",
188
+ r"^/api/v2/logs/events/search$",
189
+ r"^/api/v2/logs/analytics/aggregate$",
149
190
  r"^/api/v\d+/spans/events/search$",
150
191
  r"^/api/v\d+/rum/events/search$",
151
192
  r"^/api/v\d+/audit/events/search$",
193
+ r"^/api/v\d+/query$",
194
+ r"^/api/v\d+/search/query$",
152
195
  ]
153
196
 
154
197
 
155
- class DatadogGeneralConfig(DatadogBaseConfig):
156
- """Configuration for general-purpose Datadog toolset."""
157
-
158
- max_response_size: int = MAX_RESPONSE_SIZE
159
- allow_custom_endpoints: bool = (
160
- False # If True, allows endpoints not in whitelist (still filtered for safety)
161
- )
162
-
163
-
164
198
  class DatadogGeneralToolset(Toolset):
165
199
  """General-purpose Datadog API toolset for read-only operations not covered by specialized toolsets."""
166
200
 
167
201
  dd_config: Optional[DatadogGeneralConfig] = None
202
+ openapi_spec: Optional[Dict[str, Any]] = None
168
203
 
169
204
  def __init__(self):
170
205
  super().__init__(
171
206
  name="datadog/general",
172
- description="General-purpose Datadog API access for read-only operations including monitors, dashboards, SLOs, incidents, synthetics, and more",
173
- docs_url="https://docs.datadoghq.com/api/latest/",
207
+ description="General-purpose Datadog API access for read-only operations including monitors, dashboards, SLOs, incidents, synthetics, logs, metrics, and more. Note: For logs and metrics, prefer using the specialized datadog/logs and datadog/metrics toolsets when available as they provide optimized functionality",
208
+ docs_url="https://holmesgpt.dev/data-sources/builtin-toolsets/datadog/",
174
209
  icon_url="https://imgix.datadoghq.com//img/about/presskit/DDlogo.jpg",
175
210
  prerequisites=[CallablePrerequisite(callable=self.prerequisites_callable)],
176
211
  tools=[
@@ -178,7 +213,6 @@ class DatadogGeneralToolset(Toolset):
178
213
  DatadogAPIPostSearch(toolset=self),
179
214
  ListDatadogAPIResources(toolset=self),
180
215
  ],
181
- experimental=True,
182
216
  tags=[ToolsetTag.CORE],
183
217
  )
184
218
  template_file_path = os.path.abspath(
@@ -191,11 +225,27 @@ class DatadogGeneralToolset(Toolset):
191
225
  def prerequisites_callable(self, config: dict[str, Any]) -> Tuple[bool, str]:
192
226
  """Check prerequisites with configuration."""
193
227
  if not config:
194
- return False, TOOLSET_CONFIG_MISSING_ERROR
228
+ return (
229
+ False,
230
+ "Missing config for dd_api_key, dd_app_key, or site_api_url. For details: https://holmesgpt.dev/data-sources/builtin-toolsets/datadog/",
231
+ )
195
232
 
196
233
  try:
197
234
  dd_config = DatadogGeneralConfig(**config)
198
235
  self.dd_config = dd_config
236
+
237
+ # Fetch OpenAPI spec on startup for better error messages and documentation
238
+ logging.debug("Fetching Datadog OpenAPI specification...")
239
+ self.openapi_spec = fetch_openapi_spec(version="both")
240
+ if self.openapi_spec:
241
+ logging.info(
242
+ f"Successfully loaded OpenAPI spec with {len(self.openapi_spec.get('paths', {}))} endpoints"
243
+ )
244
+ else:
245
+ logging.warning(
246
+ "Could not fetch OpenAPI spec; enhanced error messages will be limited"
247
+ )
248
+
199
249
  success, error_msg = self._perform_healthcheck(dd_config)
200
250
  return success, error_msg
201
251
  except Exception as e:
@@ -206,7 +256,8 @@ class DatadogGeneralToolset(Toolset):
206
256
  """Perform health check on Datadog API."""
207
257
  try:
208
258
  logging.info("Performing Datadog general API configuration healthcheck...")
209
- url = f"{dd_config.site_api_url}/api/v1/validate"
259
+ base_url = str(dd_config.site_api_url).rstrip("/")
260
+ url = f"{base_url}/api/v1/validate"
210
261
  headers = get_headers(dd_config)
211
262
 
212
263
  data = execute_datadog_http_request(
@@ -218,7 +269,7 @@ class DatadogGeneralToolset(Toolset):
218
269
  )
219
270
 
220
271
  if data.get("valid", False):
221
- logging.info("Datadog general API healthcheck completed successfully")
272
+ logging.debug("Datadog general API healthcheck completed successfully")
222
273
  return True, ""
223
274
  else:
224
275
  error_msg = "Datadog API key validation failed"
@@ -267,7 +318,7 @@ def is_endpoint_allowed(
267
318
  return False, f"POST method not allowed for endpoint: {path}"
268
319
 
269
320
  elif method == "GET":
270
- for pattern in WHITELISTED_ENDPOINTS:
321
+ for pattern, _ in WHITELISTED_ENDPOINTS:
271
322
  if re.match(pattern, path):
272
323
  return True, ""
273
324
 
@@ -281,6 +332,23 @@ def is_endpoint_allowed(
281
332
  return False, f"HTTP method {method} not allowed for {path}"
282
333
 
283
334
 
335
+ def get_endpoint_hint(endpoint: str) -> str:
336
+ """
337
+ Get hint for an endpoint if available.
338
+
339
+ Returns:
340
+ Hint string or empty string if no hint
341
+ """
342
+ parsed = urlparse(endpoint)
343
+ path = parsed.path
344
+
345
+ for pattern, hint in WHITELISTED_ENDPOINTS:
346
+ if re.match(pattern, path):
347
+ return hint
348
+
349
+ return ""
350
+
351
+
284
352
  class BaseDatadogGeneralTool(Tool):
285
353
  """Base class for general Datadog API tools."""
286
354
 
@@ -293,7 +361,7 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
293
361
  def __init__(self, toolset: "DatadogGeneralToolset"):
294
362
  super().__init__(
295
363
  name="datadog_api_get",
296
- description="Make a GET request to a Datadog API endpoint for read-only operations",
364
+ description="[datadog/general toolset] Make a GET request to a Datadog API endpoint for read-only operations",
297
365
  parameters={
298
366
  "endpoint": ToolParameter(
299
367
  description="The API endpoint path (e.g., '/api/v1/monitors', '/api/v2/events')",
@@ -301,7 +369,14 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
301
369
  required=True,
302
370
  ),
303
371
  "query_params": ToolParameter(
304
- description="Query parameters as a dictionary (e.g., {'from': '2024-01-01', 'to': '2024-01-02'})",
372
+ description="""Query parameters as a dictionary.
373
+ Time format requirements:
374
+ - v1 API: Unix timestamps in seconds (e.g., {'start': 1704067200, 'end': 1704153600})
375
+ - v2 API: RFC3339 format (e.g., {'from': '2024-01-01T00:00:00Z', 'to': '2024-01-02T00:00:00Z'})
376
+ - Relative times like '-24h', 'now', '-7d' will be auto-converted to proper format
377
+
378
+ Example for events: {'start': 1704067200, 'end': 1704153600}
379
+ Example for monitors: {'name': 'my-monitor', 'tags': 'env:prod'}""",
305
380
  type="object",
306
381
  required=False,
307
382
  ),
@@ -319,24 +394,24 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
319
394
  description = params.get("description", "API call")
320
395
  return f"{toolset_name_for_one_liner(self.toolset.name)}: {description}"
321
396
 
322
- def _invoke(
323
- self, params: dict, user_approved: bool = False
324
- ) -> StructuredToolResult:
397
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
325
398
  """Execute the GET request."""
399
+ params_return = params.copy()
400
+ params_return["query_params"] = json.dumps(
401
+ params.get("query_params", {}), indent=2
402
+ )
326
403
  logging.info("=" * 60)
327
404
  logging.info("DatadogAPIGet Tool Invocation:")
328
405
  logging.info(f" Description: {params.get('description', 'No description')}")
329
406
  logging.info(f" Endpoint: {params.get('endpoint', '')}")
330
- logging.info(
331
- f" Query Params: {json.dumps(params.get('query_params', {}), indent=2)}"
332
- )
407
+ logging.info(f" Query Params: {params_return['query_params']}")
333
408
  logging.info("=" * 60)
334
409
 
335
410
  if not self.toolset.dd_config:
336
411
  return StructuredToolResult(
337
- status=ToolResultStatus.ERROR,
412
+ status=StructuredToolResultStatus.ERROR,
338
413
  error=TOOLSET_CONFIG_MISSING_ERROR,
339
- params=params,
414
+ params=params_return,
340
415
  )
341
416
 
342
417
  endpoint = params.get("endpoint", "")
@@ -351,9 +426,9 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
351
426
  if not is_allowed:
352
427
  logging.error(f"Endpoint validation failed: {error_msg}")
353
428
  return StructuredToolResult(
354
- status=ToolResultStatus.ERROR,
429
+ status=StructuredToolResultStatus.ERROR,
355
430
  error=f"Endpoint validation failed: {error_msg}",
356
- params=params,
431
+ params=params_return,
357
432
  )
358
433
 
359
434
  url = None
@@ -366,11 +441,14 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
366
441
 
367
442
  logging.info(f"Full API URL: {url}")
368
443
 
444
+ # Preprocess time fields if any
445
+ processed_params = preprocess_time_fields(query_params, endpoint)
446
+
369
447
  # Execute request
370
448
  response = execute_datadog_http_request(
371
449
  url=url,
372
450
  headers=headers,
373
- payload_or_params=query_params,
451
+ payload_or_params=processed_params,
374
452
  timeout=self.toolset.dd_config.request_timeout,
375
453
  method="GET",
376
454
  )
@@ -382,15 +460,22 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
382
460
  > self.toolset.dd_config.max_response_size
383
461
  ):
384
462
  return StructuredToolResult(
385
- status=ToolResultStatus.ERROR,
463
+ status=StructuredToolResultStatus.ERROR,
386
464
  error=f"Response too large (>{self.toolset.dd_config.max_response_size} bytes)",
387
- params=params,
465
+ params=params_return,
388
466
  )
389
467
 
468
+ web_url = generate_datadog_general_url(
469
+ self.toolset.dd_config,
470
+ endpoint,
471
+ query_params,
472
+ )
473
+
390
474
  return StructuredToolResult(
391
- status=ToolResultStatus.SUCCESS,
475
+ status=StructuredToolResultStatus.SUCCESS,
392
476
  data=response_str,
393
- params=params,
477
+ params=params_return,
478
+ url=web_url,
394
479
  )
395
480
 
396
481
  except DataDogRequestError as e:
@@ -404,13 +489,18 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
404
489
  )
405
490
  elif e.status_code == 404:
406
491
  error_msg = f"Endpoint not found: {endpoint}"
492
+ elif e.status_code == 400:
493
+ # Use enhanced error message for 400 errors
494
+ error_msg = enhance_error_message(
495
+ e, endpoint, "GET", str(self.toolset.dd_config.site_api_url)
496
+ )
407
497
  else:
408
498
  error_msg = f"API error {e.status_code}: {str(e)}"
409
499
 
410
500
  return StructuredToolResult(
411
- status=ToolResultStatus.ERROR,
501
+ status=StructuredToolResultStatus.ERROR,
412
502
  error=error_msg,
413
- params=params,
503
+ params=params_return,
414
504
  invocation=json.dumps({"url": url, "params": query_params})
415
505
  if url
416
506
  else None,
@@ -419,9 +509,9 @@ class DatadogAPIGet(BaseDatadogGeneralTool):
419
509
  except Exception as e:
420
510
  logging.exception(f"Failed to query Datadog API: {params}", exc_info=True)
421
511
  return StructuredToolResult(
422
- status=ToolResultStatus.ERROR,
512
+ status=StructuredToolResultStatus.ERROR,
423
513
  error=f"Unexpected error: {str(e)}",
424
- params=params,
514
+ params=params_return,
425
515
  )
426
516
 
427
517
 
@@ -431,7 +521,7 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
431
521
  def __init__(self, toolset: "DatadogGeneralToolset"):
432
522
  super().__init__(
433
523
  name="datadog_api_post_search",
434
- description="Make a POST request to Datadog search/query endpoints for complex filtering",
524
+ description="[datadog/general toolset] Make a POST request to Datadog search/query endpoints for complex filtering",
435
525
  parameters={
436
526
  "endpoint": ToolParameter(
437
527
  description="The search API endpoint (e.g., '/api/v2/monitor/search', '/api/v2/events/search')",
@@ -439,7 +529,29 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
439
529
  required=True,
440
530
  ),
441
531
  "body": ToolParameter(
442
- description="Request body for the search/filter operation",
532
+ description="""Request body for the search/filter operation.
533
+ Time format requirements:
534
+ - v1 API: Unix timestamps (e.g., 1704067200)
535
+ - v2 API: RFC3339 format (e.g., '2024-01-01T00:00:00Z')
536
+ - Relative times like '-24h', 'now', '-7d' will be auto-converted
537
+
538
+ Example for logs search:
539
+ {
540
+ "filter": {
541
+ "from": "2024-01-01T00:00:00Z",
542
+ "to": "2024-01-02T00:00:00Z",
543
+ "query": "*"
544
+ },
545
+ "sort": "-timestamp",
546
+ "page": {"limit": 50}
547
+ }
548
+
549
+ Example for monitor search:
550
+ {
551
+ "query": "env:production",
552
+ "page": 0,
553
+ "per_page": 20
554
+ }""",
443
555
  type="object",
444
556
  required=True,
445
557
  ),
@@ -457,9 +569,32 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
457
569
  description = params.get("description", "Search")
458
570
  return f"{toolset_name_for_one_liner(self.toolset.name)}: {description}"
459
571
 
460
- def _invoke(
461
- self, params: dict, user_approved: bool = False
462
- ) -> StructuredToolResult:
572
+ def _body_to_query_params(self, body: dict) -> Optional[Dict[str, Any]]:
573
+ body_query_params = {}
574
+ if not isinstance(body, dict):
575
+ return None
576
+ if "filter" not in body:
577
+ return None
578
+ filter_data = body["filter"]
579
+ if "from" in filter_data:
580
+ try:
581
+ body_query_params["from"] = int(filter_data["from"]) // 1000
582
+ except (ValueError, TypeError):
583
+ pass
584
+ if "to" in filter_data:
585
+ try:
586
+ body_query_params["to"] = int(filter_data["to"]) // 1000
587
+ except (ValueError, TypeError):
588
+ pass
589
+ if "query" in filter_data:
590
+ body_query_params["query"] = filter_data["query"]
591
+
592
+ if not body_query_params:
593
+ return None
594
+
595
+ return body_query_params
596
+
597
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
463
598
  """Execute the POST search request."""
464
599
  logging.info("=" * 60)
465
600
  logging.info("DatadogAPIPostSearch Tool Invocation:")
@@ -470,7 +605,7 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
470
605
 
471
606
  if not self.toolset.dd_config:
472
607
  return StructuredToolResult(
473
- status=ToolResultStatus.ERROR,
608
+ status=StructuredToolResultStatus.ERROR,
474
609
  error=TOOLSET_CONFIG_MISSING_ERROR,
475
610
  params=params,
476
611
  )
@@ -487,7 +622,7 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
487
622
  if not is_allowed:
488
623
  logging.error(f"Endpoint validation failed: {error_msg}")
489
624
  return StructuredToolResult(
490
- status=ToolResultStatus.ERROR,
625
+ status=StructuredToolResultStatus.ERROR,
491
626
  error=f"Endpoint validation failed: {error_msg}",
492
627
  params=params,
493
628
  )
@@ -502,11 +637,14 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
502
637
 
503
638
  logging.info(f"Full API URL: {url}")
504
639
 
640
+ # Preprocess time fields if any
641
+ processed_body = preprocess_time_fields(body, endpoint)
642
+
505
643
  # Execute request
506
644
  response = execute_datadog_http_request(
507
645
  url=url,
508
646
  headers=headers,
509
- payload_or_params=body,
647
+ payload_or_params=processed_body,
510
648
  timeout=self.toolset.dd_config.request_timeout,
511
649
  method="POST",
512
650
  )
@@ -518,15 +656,23 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
518
656
  > self.toolset.dd_config.max_response_size
519
657
  ):
520
658
  return StructuredToolResult(
521
- status=ToolResultStatus.ERROR,
659
+ status=StructuredToolResultStatus.ERROR,
522
660
  error=f"Response too large (>{self.toolset.dd_config.max_response_size} bytes)",
523
661
  params=params,
524
662
  )
525
663
 
664
+ body_query_params = self._body_to_query_params(body)
665
+ web_url = generate_datadog_general_url(
666
+ self.toolset.dd_config,
667
+ endpoint,
668
+ body_query_params,
669
+ )
670
+
526
671
  return StructuredToolResult(
527
- status=ToolResultStatus.SUCCESS,
672
+ status=StructuredToolResultStatus.SUCCESS,
528
673
  data=response_str,
529
674
  params=params,
675
+ url=web_url,
530
676
  )
531
677
 
532
678
  except DataDogRequestError as e:
@@ -540,11 +686,16 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
540
686
  )
541
687
  elif e.status_code == 404:
542
688
  error_msg = f"Endpoint not found: {endpoint}"
689
+ elif e.status_code == 400:
690
+ # Use enhanced error message for 400 errors
691
+ error_msg = enhance_error_message(
692
+ e, endpoint, "POST", str(self.toolset.dd_config.site_api_url)
693
+ )
543
694
  else:
544
695
  error_msg = f"API error {e.status_code}: {str(e)}"
545
696
 
546
697
  return StructuredToolResult(
547
- status=ToolResultStatus.ERROR,
698
+ status=StructuredToolResultStatus.ERROR,
548
699
  error=error_msg,
549
700
  params=params,
550
701
  invocation=json.dumps({"url": url, "body": body}) if url else None,
@@ -553,7 +704,7 @@ class DatadogAPIPostSearch(BaseDatadogGeneralTool):
553
704
  except Exception as e:
554
705
  logging.exception(f"Failed to query Datadog API: {params}", exc_info=True)
555
706
  return StructuredToolResult(
556
- status=ToolResultStatus.ERROR,
707
+ status=StructuredToolResultStatus.ERROR,
557
708
  error=f"Unexpected error: {str(e)}",
558
709
  params=params,
559
710
  )
@@ -565,10 +716,10 @@ class ListDatadogAPIResources(BaseDatadogGeneralTool):
565
716
  def __init__(self, toolset: "DatadogGeneralToolset"):
566
717
  super().__init__(
567
718
  name="list_datadog_api_resources",
568
- description="List available Datadog API resources and endpoints that can be accessed",
719
+ description="[datadog/general toolset] List available Datadog API resources and endpoints that can be accessed",
569
720
  parameters={
570
- "category": ToolParameter(
571
- description="Filter by category (e.g., 'monitors', 'dashboards', 'slos', 'incidents', 'synthetics', 'security', 'hosts', 'all')",
721
+ "search_regex": ToolParameter(
722
+ description="Optional regex pattern to filter endpoints (e.g., 'monitor', 'logs|metrics', 'security.*signals', 'v2/.*search$'). If not provided, shows all endpoints.",
572
723
  type="string",
573
724
  required=False,
574
725
  ),
@@ -578,145 +729,170 @@ class ListDatadogAPIResources(BaseDatadogGeneralTool):
578
729
 
579
730
  def get_parameterized_one_liner(self, params: dict) -> str:
580
731
  """Get a one-liner description of the tool invocation."""
581
- category = params.get("category", "all")
582
- return f"{toolset_name_for_one_liner(self.toolset.name)}: List API Resources ({category})"
732
+ search = params.get("search_regex", "all")
733
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: List API Resources (search: {search})"
583
734
 
584
- def _invoke(
585
- self, params: dict, user_approved: bool = False
586
- ) -> StructuredToolResult:
735
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
587
736
  """List available API resources."""
588
- category = params.get("category", "all").lower()
737
+ search_regex = params.get("search_regex", "")
589
738
 
590
739
  logging.info("=" * 60)
591
740
  logging.info("ListDatadogAPIResources Tool Invocation:")
592
- logging.info(f" Category: {category}")
741
+ logging.info(f" Search regex: {search_regex or 'None (showing all)'}")
742
+ logging.info(f" OpenAPI Spec Loaded: {self.toolset.openapi_spec is not None}")
593
743
  logging.info("=" * 60)
594
744
 
595
- # Define categories and their endpoints
596
- resources = {
597
- "monitors": {
598
- "description": "Monitor management and alerting",
599
- "endpoints": [
600
- "GET /api/v1/monitor - List all monitors",
601
- "GET /api/v1/monitor/{id} - Get a monitor by ID",
602
- "POST /api/v1/monitor/search - Search monitors",
603
- "GET /api/v1/monitor/groups/search - Search monitor groups",
604
- ],
605
- },
606
- "dashboards": {
607
- "description": "Dashboard and visualization management",
608
- "endpoints": [
609
- "GET /api/v1/dashboard - List all dashboards",
610
- "GET /api/v1/dashboard/{id} - Get a dashboard by ID",
611
- "POST /api/v1/dashboard/lists - List dashboard lists",
612
- "GET /api/v1/dashboard/public/{token} - Get public dashboard",
613
- ],
614
- },
615
- "slos": {
616
- "description": "Service Level Objectives",
617
- "endpoints": [
618
- "GET /api/v1/slo - List all SLOs",
619
- "GET /api/v1/slo/{id} - Get an SLO by ID",
620
- "GET /api/v1/slo/{id}/history - Get SLO history",
621
- "POST /api/v1/slo/search - Search SLOs",
622
- "GET /api/v1/slo/{id}/corrections - Get SLO corrections",
623
- ],
624
- },
625
- "incidents": {
626
- "description": "Incident management",
627
- "endpoints": [
628
- "GET /api/v2/incidents - List incidents",
629
- "GET /api/v2/incidents/{id} - Get incident details",
630
- "POST /api/v2/incidents/search - Search incidents",
631
- "GET /api/v2/incidents/{id}/timeline - Get incident timeline",
632
- "GET /api/v2/incidents/{id}/attachments - Get incident attachments",
633
- ],
634
- },
635
- "synthetics": {
636
- "description": "Synthetic monitoring and testing",
637
- "endpoints": [
638
- "GET /api/v1/synthetics/tests - List synthetic tests",
639
- "GET /api/v1/synthetics/tests/{id} - Get test details",
640
- "POST /api/v1/synthetics/tests/search - Search tests",
641
- "GET /api/v1/synthetics/tests/{id}/results - Get test results",
642
- "GET /api/v1/synthetics/locations - List test locations",
643
- ],
644
- },
645
- "security": {
646
- "description": "Security monitoring and detection",
647
- "endpoints": [
648
- "GET /api/v2/security_monitoring/rules - List security rules",
649
- "GET /api/v2/security_monitoring/rules/{id} - Get rule details",
650
- "POST /api/v2/security_monitoring/rules/search - Search rules",
651
- "POST /api/v2/security_monitoring/signals/search - Search security signals",
652
- ],
653
- },
654
- "hosts": {
655
- "description": "Host and infrastructure monitoring",
656
- "endpoints": [
657
- "GET /api/v1/hosts - List all hosts",
658
- "GET /api/v1/hosts/{name} - Get host details",
659
- "GET /api/v1/hosts/totals - Get host totals",
660
- "GET /api/v1/tags/hosts - Get host tags",
661
- ],
662
- },
663
- "events": {
664
- "description": "Event stream and management",
665
- "endpoints": [
666
- "GET /api/v1/events - Query event stream",
667
- "GET /api/v1/events/{id} - Get event details",
668
- "POST /api/v2/events/search - Search events",
669
- ],
670
- },
671
- "usage": {
672
- "description": "Usage and billing information",
673
- "endpoints": [
674
- "GET /api/v1/usage/summary - Get usage summary",
675
- "GET /api/v1/usage/billable-summary - Get billable summary",
676
- "GET /api/v1/usage/estimated_cost - Get estimated costs",
677
- "GET /api/v2/usage/cost_by_org - Get costs by organization",
678
- ],
679
- },
680
- "services": {
681
- "description": "APM service information",
682
- "endpoints": [
683
- "GET /api/v2/services - List services",
684
- "GET /api/v2/services/{service} - Get service details",
685
- "GET /api/v2/services/{service}/dependencies - Get service dependencies",
686
- ],
687
- },
688
- }
745
+ # Filter endpoints based on regex search
746
+ matching_endpoints = []
689
747
 
690
- # Filter by category if specified
691
- if category != "all":
692
- matching_categories = {k: v for k, v in resources.items() if category in k}
693
- if not matching_categories:
748
+ if search_regex:
749
+ try:
750
+ search_pattern = re.compile(search_regex, re.IGNORECASE)
751
+ except re.error as e:
694
752
  return StructuredToolResult(
695
- status=ToolResultStatus.ERROR,
696
- error=f"Unknown category: {category}. Available: {', '.join(resources.keys())}",
753
+ status=StructuredToolResultStatus.ERROR,
754
+ error=f"Invalid regex pattern: {e}",
697
755
  params=params,
756
+ url="",
698
757
  )
699
- resources = matching_categories
758
+ else:
759
+ search_pattern = None
760
+
761
+ # Build list of matching endpoints
762
+ for pattern, hint in WHITELISTED_ENDPOINTS:
763
+ # Create a readable endpoint example from the pattern
764
+ example_endpoint = pattern.replace(r"^/api/v\d+", "/api/v1")
765
+ example_endpoint = example_endpoint.replace(r"(/search)?$", "")
766
+ example_endpoint = example_endpoint.replace(r"(/[^/]+)?$", "/{id}")
767
+ example_endpoint = example_endpoint.replace(r"/[^/]+$", "/{id}")
768
+ example_endpoint = example_endpoint.replace(r"/\d+$", "/{id}")
769
+ example_endpoint = example_endpoint.replace("$", "")
770
+ example_endpoint = example_endpoint.replace("^", "")
771
+
772
+ # Apply search filter if provided
773
+ if search_pattern and not search_pattern.search(example_endpoint):
774
+ continue
775
+
776
+ # Determine HTTP methods
777
+ if "search" in pattern or "query" in pattern or "aggregate" in pattern:
778
+ methods = "POST"
779
+ elif "/search)?$" in pattern:
780
+ methods = "GET/POST"
781
+ else:
782
+ methods = "GET"
700
783
 
701
- # Format output
702
- output = ["Available Datadog API Resources", "=" * 40, ""]
784
+ endpoint_info = {
785
+ "endpoint": example_endpoint,
786
+ "methods": methods,
787
+ "hint": hint,
788
+ "pattern": pattern,
789
+ }
790
+ matching_endpoints.append(endpoint_info)
703
791
 
704
- for cat_name, cat_info in resources.items():
705
- output.append(f"## {cat_name.upper()}")
706
- output.append(f"Description: {cat_info['description']}")
707
- output.append("")
708
- output.append("Endpoints:")
709
- for endpoint in cat_info["endpoints"]:
710
- output.append(f" • {endpoint}")
711
- output.append("")
792
+ if not matching_endpoints:
793
+ return StructuredToolResult(
794
+ status=StructuredToolResultStatus.SUCCESS,
795
+ data=f"No endpoints found matching regex: {search_regex}",
796
+ params=params,
797
+ )
712
798
 
799
+ # Format output
800
+ output = ["Available Datadog API Endpoints", "=" * 40]
801
+
802
+ if search_regex:
803
+ output.append(f"Filter: {search_regex}")
804
+ output.append(f"Found: {len(matching_endpoints)} endpoints")
805
+ output.append("")
806
+
807
+ # List endpoints with spec info if available
808
+ for info in matching_endpoints:
809
+ line = f"{info['methods']:8} {info['endpoint']}"
810
+ if info["hint"]:
811
+ line += f"\n {info['hint']}"
812
+
813
+ # Add OpenAPI spec info for this specific endpoint if available
814
+ if self.toolset.openapi_spec and "paths" in self.toolset.openapi_spec:
815
+ # Try to find matching path in OpenAPI spec
816
+ spec_path = None
817
+ for path in self.toolset.openapi_spec["paths"].keys():
818
+ if re.match(info["pattern"], path):
819
+ spec_path = path
820
+ break
821
+
822
+ if spec_path and spec_path in self.toolset.openapi_spec["paths"]:
823
+ path_spec = self.toolset.openapi_spec["paths"][spec_path]
824
+ # Add actual OpenAPI schema for the endpoint
825
+ for method in ["get", "post", "put", "delete"]:
826
+ if method in path_spec:
827
+ method_spec = path_spec[method]
828
+ line += f"\n\n OpenAPI Schema ({method.upper()}):"
829
+
830
+ # Add summary if available
831
+ if "summary" in method_spec:
832
+ line += f"\n Summary: {method_spec['summary']}"
833
+
834
+ # Add parameters if available
835
+ if "parameters" in method_spec:
836
+ line += "\n Parameters:"
837
+ for param in method_spec["parameters"]:
838
+ param_info = f"\n - {param.get('name', 'unknown')} ({param.get('in', 'unknown')})"
839
+ if param.get("required", False):
840
+ param_info += " [required]"
841
+ if "description" in param:
842
+ param_info += f": {param['description'][:100]}"
843
+ line += param_info
844
+
845
+ # Add request body schema if available
846
+ if "requestBody" in method_spec:
847
+ line += "\n Request Body:"
848
+ if "content" in method_spec["requestBody"]:
849
+ for content_type, content_spec in method_spec[
850
+ "requestBody"
851
+ ]["content"].items():
852
+ if "schema" in content_spec:
853
+ # Show a compact version of the schema
854
+ schema_str = json.dumps(
855
+ content_spec["schema"], indent=10
856
+ )[:500]
857
+ if (
858
+ len(json.dumps(content_spec["schema"]))
859
+ > 500
860
+ ):
861
+ schema_str += "..."
862
+ line += f"\n Content-Type: {content_type}"
863
+ line += f"\n Schema: {schema_str}"
864
+
865
+ # Add response schema sample if available
866
+ if "responses" in method_spec:
867
+ if "200" in method_spec["responses"]:
868
+ line += "\n Response (200):"
869
+ resp = method_spec["responses"]["200"]
870
+ if "description" in resp:
871
+ line += f"\n {resp['description']}"
872
+ break
873
+
874
+ output.append(line)
875
+
876
+ output.append("")
713
877
  output.append(
714
878
  "Note: All endpoints are read-only. Use the appropriate tool with the endpoint path."
715
879
  )
716
880
  output.append("Example: datadog_api_get with endpoint='/api/v1/monitors'")
881
+ output.append("")
882
+ output.append("Search examples:")
883
+ output.append(" • 'monitor' - find all monitor endpoints")
884
+ output.append(" • 'logs|metrics' - find logs OR metrics endpoints")
885
+ output.append(" • 'v2.*search$' - find all v2 search endpoints")
886
+ output.append(" • 'security.*signals' - find security signals endpoints")
887
+ doc_url = "https://docs.datadoghq.com/api/latest/"
888
+ if search_regex:
889
+ # URL encode the search parameter - spaces become + in query strings
890
+ search_params = urlencode({"s": search_regex})
891
+ doc_url = f"{doc_url}?{search_params}"
717
892
 
718
893
  return StructuredToolResult(
719
- status=ToolResultStatus.SUCCESS,
894
+ status=StructuredToolResultStatus.SUCCESS,
720
895
  data="\n".join(output),
721
896
  params=params,
897
+ url=doc_url,
722
898
  )