holmesgpt 0.13.2__py3-none-any.whl → 0.18.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (188) hide show
  1. holmes/__init__.py +3 -5
  2. holmes/clients/robusta_client.py +20 -6
  3. holmes/common/env_vars.py +58 -3
  4. holmes/common/openshift.py +1 -1
  5. holmes/config.py +123 -148
  6. holmes/core/conversations.py +71 -15
  7. holmes/core/feedback.py +191 -0
  8. holmes/core/investigation.py +31 -39
  9. holmes/core/investigation_structured_output.py +3 -3
  10. holmes/core/issue.py +1 -1
  11. holmes/core/llm.py +508 -88
  12. holmes/core/models.py +108 -4
  13. holmes/core/openai_formatting.py +14 -1
  14. holmes/core/prompt.py +48 -3
  15. holmes/core/runbooks.py +1 -0
  16. holmes/core/safeguards.py +8 -6
  17. holmes/core/supabase_dal.py +295 -100
  18. holmes/core/tool_calling_llm.py +489 -428
  19. holmes/core/tools.py +325 -56
  20. holmes/core/tools_utils/token_counting.py +21 -0
  21. holmes/core/tools_utils/tool_context_window_limiter.py +40 -0
  22. holmes/core/tools_utils/tool_executor.py +0 -13
  23. holmes/core/tools_utils/toolset_utils.py +1 -0
  24. holmes/core/toolset_manager.py +191 -5
  25. holmes/core/tracing.py +19 -3
  26. holmes/core/transformers/__init__.py +23 -0
  27. holmes/core/transformers/base.py +63 -0
  28. holmes/core/transformers/llm_summarize.py +175 -0
  29. holmes/core/transformers/registry.py +123 -0
  30. holmes/core/transformers/transformer.py +32 -0
  31. holmes/core/truncation/compaction.py +94 -0
  32. holmes/core/truncation/dal_truncation_utils.py +23 -0
  33. holmes/core/truncation/input_context_window_limiter.py +219 -0
  34. holmes/interactive.py +228 -31
  35. holmes/main.py +23 -40
  36. holmes/plugins/interfaces.py +2 -1
  37. holmes/plugins/prompts/__init__.py +2 -1
  38. holmes/plugins/prompts/_fetch_logs.jinja2 +31 -6
  39. holmes/plugins/prompts/_general_instructions.jinja2 +1 -2
  40. holmes/plugins/prompts/_runbook_instructions.jinja2 +24 -12
  41. holmes/plugins/prompts/base_user_prompt.jinja2 +7 -0
  42. holmes/plugins/prompts/conversation_history_compaction.jinja2 +89 -0
  43. holmes/plugins/prompts/generic_ask.jinja2 +0 -4
  44. holmes/plugins/prompts/generic_ask_conversation.jinja2 +0 -1
  45. holmes/plugins/prompts/generic_ask_for_issue_conversation.jinja2 +0 -1
  46. holmes/plugins/prompts/generic_investigation.jinja2 +0 -1
  47. holmes/plugins/prompts/investigation_procedure.jinja2 +50 -1
  48. holmes/plugins/prompts/kubernetes_workload_ask.jinja2 +0 -1
  49. holmes/plugins/prompts/kubernetes_workload_chat.jinja2 +0 -1
  50. holmes/plugins/runbooks/__init__.py +145 -17
  51. holmes/plugins/runbooks/catalog.json +2 -0
  52. holmes/plugins/sources/github/__init__.py +4 -2
  53. holmes/plugins/sources/prometheus/models.py +1 -0
  54. holmes/plugins/toolsets/__init__.py +44 -27
  55. holmes/plugins/toolsets/aks-node-health.yaml +46 -0
  56. holmes/plugins/toolsets/aks.yaml +64 -0
  57. holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +38 -47
  58. holmes/plugins/toolsets/azure_sql/apis/alert_monitoring_api.py +3 -2
  59. holmes/plugins/toolsets/azure_sql/apis/azure_sql_api.py +2 -1
  60. holmes/plugins/toolsets/azure_sql/apis/connection_failure_api.py +3 -2
  61. holmes/plugins/toolsets/azure_sql/apis/connection_monitoring_api.py +3 -1
  62. holmes/plugins/toolsets/azure_sql/apis/storage_analysis_api.py +3 -1
  63. holmes/plugins/toolsets/azure_sql/azure_sql_toolset.py +12 -13
  64. holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +15 -12
  65. holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +15 -12
  66. holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +11 -11
  67. holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +11 -9
  68. holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +15 -12
  69. holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +15 -15
  70. holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +11 -8
  71. holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +11 -8
  72. holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +11 -8
  73. holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +11 -8
  74. holmes/plugins/toolsets/azure_sql/utils.py +0 -32
  75. holmes/plugins/toolsets/bash/argocd/__init__.py +3 -3
  76. holmes/plugins/toolsets/bash/aws/__init__.py +4 -4
  77. holmes/plugins/toolsets/bash/azure/__init__.py +4 -4
  78. holmes/plugins/toolsets/bash/bash_toolset.py +11 -15
  79. holmes/plugins/toolsets/bash/common/bash.py +23 -13
  80. holmes/plugins/toolsets/bash/common/bash_command.py +1 -1
  81. holmes/plugins/toolsets/bash/common/stringify.py +1 -1
  82. holmes/plugins/toolsets/bash/kubectl/__init__.py +2 -1
  83. holmes/plugins/toolsets/bash/kubectl/constants.py +0 -1
  84. holmes/plugins/toolsets/bash/kubectl/kubectl_get.py +3 -4
  85. holmes/plugins/toolsets/bash/parse_command.py +12 -13
  86. holmes/plugins/toolsets/cilium.yaml +284 -0
  87. holmes/plugins/toolsets/connectivity_check.py +124 -0
  88. holmes/plugins/toolsets/coralogix/api.py +132 -119
  89. holmes/plugins/toolsets/coralogix/coralogix.jinja2 +14 -0
  90. holmes/plugins/toolsets/coralogix/toolset_coralogix.py +219 -0
  91. holmes/plugins/toolsets/coralogix/utils.py +15 -79
  92. holmes/plugins/toolsets/datadog/datadog_api.py +525 -26
  93. holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +55 -11
  94. holmes/plugins/toolsets/datadog/datadog_metrics_instructions.jinja2 +3 -3
  95. holmes/plugins/toolsets/datadog/datadog_models.py +59 -0
  96. holmes/plugins/toolsets/datadog/datadog_url_utils.py +213 -0
  97. holmes/plugins/toolsets/datadog/instructions_datadog_traces.jinja2 +165 -28
  98. holmes/plugins/toolsets/datadog/toolset_datadog_general.py +417 -241
  99. holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +234 -214
  100. holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +167 -79
  101. holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +374 -363
  102. holmes/plugins/toolsets/elasticsearch/__init__.py +6 -0
  103. holmes/plugins/toolsets/elasticsearch/elasticsearch.py +834 -0
  104. holmes/plugins/toolsets/elasticsearch/opensearch_ppl_query_docs.jinja2 +1616 -0
  105. holmes/plugins/toolsets/elasticsearch/opensearch_query_assist.py +78 -0
  106. holmes/plugins/toolsets/elasticsearch/opensearch_query_assist_instructions.jinja2 +223 -0
  107. holmes/plugins/toolsets/git.py +54 -50
  108. holmes/plugins/toolsets/grafana/base_grafana_toolset.py +16 -4
  109. holmes/plugins/toolsets/grafana/common.py +13 -29
  110. holmes/plugins/toolsets/grafana/grafana_tempo_api.py +455 -0
  111. holmes/plugins/toolsets/grafana/loki/instructions.jinja2 +25 -0
  112. holmes/plugins/toolsets/grafana/loki/toolset_grafana_loki.py +191 -0
  113. holmes/plugins/toolsets/grafana/loki_api.py +4 -0
  114. holmes/plugins/toolsets/grafana/toolset_grafana.py +293 -89
  115. holmes/plugins/toolsets/grafana/toolset_grafana_dashboard.jinja2 +49 -0
  116. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +246 -11
  117. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +820 -292
  118. holmes/plugins/toolsets/grafana/trace_parser.py +4 -3
  119. holmes/plugins/toolsets/internet/internet.py +15 -16
  120. holmes/plugins/toolsets/internet/notion.py +9 -11
  121. holmes/plugins/toolsets/investigator/core_investigation.py +44 -36
  122. holmes/plugins/toolsets/investigator/model.py +3 -1
  123. holmes/plugins/toolsets/json_filter_mixin.py +134 -0
  124. holmes/plugins/toolsets/kafka.py +36 -42
  125. holmes/plugins/toolsets/kubernetes.yaml +317 -113
  126. holmes/plugins/toolsets/kubernetes_logs.py +9 -9
  127. holmes/plugins/toolsets/kubernetes_logs.yaml +32 -0
  128. holmes/plugins/toolsets/logging_utils/logging_api.py +94 -8
  129. holmes/plugins/toolsets/mcp/toolset_mcp.py +218 -64
  130. holmes/plugins/toolsets/newrelic/new_relic_api.py +165 -0
  131. holmes/plugins/toolsets/newrelic/newrelic.jinja2 +65 -0
  132. holmes/plugins/toolsets/newrelic/newrelic.py +320 -0
  133. holmes/plugins/toolsets/openshift.yaml +283 -0
  134. holmes/plugins/toolsets/prometheus/prometheus.py +1202 -421
  135. holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +54 -5
  136. holmes/plugins/toolsets/prometheus/utils.py +28 -0
  137. holmes/plugins/toolsets/rabbitmq/api.py +23 -4
  138. holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +13 -14
  139. holmes/plugins/toolsets/robusta/robusta.py +239 -68
  140. holmes/plugins/toolsets/robusta/robusta_instructions.jinja2 +26 -9
  141. holmes/plugins/toolsets/runbook/runbook_fetcher.py +157 -27
  142. holmes/plugins/toolsets/service_discovery.py +1 -1
  143. holmes/plugins/toolsets/servicenow_tables/instructions.jinja2 +83 -0
  144. holmes/plugins/toolsets/servicenow_tables/servicenow_tables.py +426 -0
  145. holmes/plugins/toolsets/utils.py +88 -0
  146. holmes/utils/config_utils.py +91 -0
  147. holmes/utils/connection_utils.py +31 -0
  148. holmes/utils/console/result.py +10 -0
  149. holmes/utils/default_toolset_installation_guide.jinja2 +1 -22
  150. holmes/utils/env.py +7 -0
  151. holmes/utils/file_utils.py +2 -1
  152. holmes/utils/global_instructions.py +60 -11
  153. holmes/utils/holmes_status.py +6 -4
  154. holmes/utils/holmes_sync_toolsets.py +0 -2
  155. holmes/utils/krr_utils.py +188 -0
  156. holmes/utils/log.py +15 -0
  157. holmes/utils/markdown_utils.py +2 -3
  158. holmes/utils/memory_limit.py +58 -0
  159. holmes/utils/sentry_helper.py +64 -0
  160. holmes/utils/stream.py +69 -8
  161. holmes/utils/tags.py +4 -3
  162. holmes/version.py +37 -15
  163. holmesgpt-0.18.4.dist-info/LICENSE +178 -0
  164. {holmesgpt-0.13.2.dist-info → holmesgpt-0.18.4.dist-info}/METADATA +35 -31
  165. holmesgpt-0.18.4.dist-info/RECORD +258 -0
  166. holmes/core/performance_timing.py +0 -72
  167. holmes/plugins/toolsets/aws.yaml +0 -80
  168. holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +0 -112
  169. holmes/plugins/toolsets/datadog/datadog_traces_formatter.py +0 -310
  170. holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +0 -739
  171. holmes/plugins/toolsets/grafana/grafana_api.py +0 -42
  172. holmes/plugins/toolsets/grafana/tempo_api.py +0 -124
  173. holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +0 -110
  174. holmes/plugins/toolsets/newrelic.py +0 -231
  175. holmes/plugins/toolsets/opensearch/opensearch.py +0 -257
  176. holmes/plugins/toolsets/opensearch/opensearch_logs.py +0 -161
  177. holmes/plugins/toolsets/opensearch/opensearch_traces.py +0 -218
  178. holmes/plugins/toolsets/opensearch/opensearch_traces_instructions.jinja2 +0 -12
  179. holmes/plugins/toolsets/opensearch/opensearch_utils.py +0 -166
  180. holmes/plugins/toolsets/servicenow/install.md +0 -37
  181. holmes/plugins/toolsets/servicenow/instructions.jinja2 +0 -3
  182. holmes/plugins/toolsets/servicenow/servicenow.py +0 -219
  183. holmes/utils/keygen_utils.py +0 -6
  184. holmesgpt-0.13.2.dist-info/LICENSE.txt +0 -21
  185. holmesgpt-0.13.2.dist-info/RECORD +0 -234
  186. /holmes/plugins/toolsets/{opensearch → newrelic}/__init__.py +0 -0
  187. {holmesgpt-0.13.2.dist-info → holmesgpt-0.18.4.dist-info}/WHEEL +0 -0
  188. {holmesgpt-0.13.2.dist-info → holmesgpt-0.18.4.dist-info}/entry_points.txt +0 -0
@@ -1,43 +1,47 @@
1
1
  """Datadog Traces toolset for HolmesGPT."""
2
2
 
3
+ import copy
3
4
  import json
4
5
  import logging
5
6
  import os
6
- import time
7
+ import re
7
8
  from typing import Any, Dict, Optional, Tuple
8
9
 
10
+ from pydantic import AnyUrl
11
+
9
12
  from holmes.core.tools import (
10
13
  CallablePrerequisite,
14
+ StructuredToolResult,
15
+ StructuredToolResultStatus,
11
16
  Tool,
17
+ ToolInvokeContext,
12
18
  ToolParameter,
13
19
  Toolset,
14
- StructuredToolResult,
15
- ToolResultStatus,
16
20
  ToolsetTag,
17
21
  )
22
+ from holmes.plugins.toolsets.consts import STANDARD_END_DATETIME_TOOL_PARAM_DESCRIPTION
18
23
  from holmes.plugins.toolsets.datadog.datadog_api import (
24
+ MAX_RETRY_COUNT_ON_RATE_LIMIT,
19
25
  DataDogRequestError,
20
- DatadogBaseConfig,
21
26
  execute_datadog_http_request,
22
27
  get_headers,
23
- MAX_RETRY_COUNT_ON_RATE_LIMIT,
24
- )
25
- from holmes.plugins.toolsets.utils import (
26
- process_timestamps_to_int,
27
- toolset_name_for_one_liner,
28
28
  )
29
- from holmes.plugins.toolsets.datadog.datadog_traces_formatter import (
30
- format_traces_list,
31
- format_trace_hierarchy,
32
- format_spans_search,
29
+ from holmes.plugins.toolsets.datadog.datadog_models import DatadogTracesConfig
30
+ from holmes.plugins.toolsets.datadog.datadog_url_utils import (
31
+ generate_datadog_spans_analytics_url,
32
+ generate_datadog_spans_url,
33
33
  )
34
34
  from holmes.plugins.toolsets.logging_utils.logging_api import (
35
35
  DEFAULT_TIME_SPAN_SECONDS,
36
36
  )
37
+ from holmes.plugins.toolsets.utils import (
38
+ process_timestamps_to_int,
39
+ standard_start_datetime_tool_param_description,
40
+ toolset_name_for_one_liner,
41
+ )
37
42
 
38
-
39
- class DatadogTracesConfig(DatadogBaseConfig):
40
- indexes: list[str] = ["*"]
43
+ # Valid percentile aggregations supported by Datadog
44
+ PERCENTILE_AGGREGATIONS = ["pc75", "pc90", "pc95", "pc98", "pc99"]
41
45
 
42
46
 
43
47
  class DatadogTracesToolset(Toolset):
@@ -49,27 +53,18 @@ class DatadogTracesToolset(Toolset):
49
53
  super().__init__(
50
54
  name="datadog/traces",
51
55
  description="Toolset for interacting with Datadog APM to fetch and analyze traces",
52
- docs_url="https://docs.datadoghq.com/api/latest/spans/",
56
+ docs_url="https://holmesgpt.dev/data-sources/builtin-toolsets/datadog/",
53
57
  icon_url="https://imgix.datadoghq.com//img/about/presskit/DDlogo.jpg",
54
58
  prerequisites=[CallablePrerequisite(callable=self.prerequisites_callable)],
55
59
  tools=[
56
- FetchDatadogTracesList(toolset=self),
57
- FetchDatadogTraceById(toolset=self),
58
- FetchDatadogSpansByFilter(toolset=self),
60
+ GetSpans(toolset=self),
61
+ AggregateSpans(toolset=self),
59
62
  ],
60
- experimental=True,
61
63
  tags=[ToolsetTag.CORE],
62
64
  )
63
- self._reload_instructions()
64
-
65
- def _reload_instructions(self):
66
- """Load Datadog traces specific troubleshooting instructions."""
67
- template_file_path = os.path.abspath(
68
- os.path.join(
69
- os.path.dirname(__file__), "instructions_datadog_traces.jinja2"
70
- )
65
+ self._load_llm_instructions_from_file(
66
+ os.path.dirname(__file__), "instructions_datadog_traces.jinja2"
71
67
  )
72
- self._load_llm_instructions(jinja_template=f"file://{template_file_path}")
73
68
 
74
69
  def prerequisites_callable(self, config: dict[str, Any]) -> Tuple[bool, str]:
75
70
  """Check prerequisites with configuration."""
@@ -136,13 +131,12 @@ class DatadogTracesToolset(Toolset):
136
131
  return False, f"Healthcheck failed with exception: {str(e)}"
137
132
 
138
133
  def get_example_config(self) -> Dict[str, Any]:
139
- """Get example configuration for this toolset."""
140
- return {
141
- "dd_api_key": "<your_datadog_api_key>",
142
- "dd_app_key": "<your_datadog_app_key>",
143
- "site_api_url": "https://api.datadoghq.com", # or https://api.datadoghq.eu for EU
144
- "request_timeout": 60,
145
- }
134
+ example_config = DatadogTracesConfig(
135
+ dd_api_key="<your_datadog_api_key>",
136
+ dd_app_key="<your_datadog_app_key>",
137
+ site_api_url=AnyUrl("https://api.datadoghq.com"),
138
+ )
139
+ return example_config.model_dump(mode="json")
146
140
 
147
141
 
148
142
  class BaseDatadogTracesTool(Tool):
@@ -151,79 +145,97 @@ class BaseDatadogTracesTool(Tool):
151
145
  toolset: "DatadogTracesToolset"
152
146
 
153
147
 
154
- class FetchDatadogTracesList(BaseDatadogTracesTool):
155
- """Tool to fetch a list of traces from Datadog."""
148
+ # Schema defines what fields to keep in compact mode
149
+ COMPACT_SCHEMA = {
150
+ "custom": {
151
+ "duration": True,
152
+ "http": {"status_code": True, "host": True, "method": True, "url": True},
153
+ },
154
+ "status": True,
155
+ "start_timestamp": True,
156
+ "end_timestamp": True,
157
+ "error": True,
158
+ "single_span": True,
159
+ "span_id": True,
160
+ "trace_id": True,
161
+ "parent_id": True,
162
+ "service": True,
163
+ "resource_name": True,
164
+ "tags": {"_filter": "startswith", "_values": ["pod_name:"]}, # Generic array filter
165
+ }
166
+
167
+
168
+ class GetSpans(BaseDatadogTracesTool):
169
+ """Tool to search for spans with specific filters."""
156
170
 
157
171
  def __init__(self, toolset: "DatadogTracesToolset"):
158
172
  super().__init__(
159
- name="fetch_datadog_traces",
160
- description="Fetch a list of traces from Datadog with optional filters",
173
+ name="fetch_datadog_spans",
174
+ description="Search for spans in Datadog using span syntax. "
175
+ "Supports wildcards (*) for pattern matching: @http.route:*payment*, resource_name:*user*, service:*api*. "
176
+ "Uses the DataDog api endpoint: POST /api/v2/spans/events/search with 'query' parameter.",
161
177
  parameters={
162
- "service": ToolParameter(
163
- description="Filter by service name",
164
- type="string",
165
- required=False,
166
- ),
167
- "operation": ToolParameter(
168
- description="Filter by operation name",
178
+ "query": ToolParameter(
179
+ description="The search query following span syntax. Supports wildcards (*) for pattern matching. Examples: @http.route:*payment*, resource_name:*user*, service:*api*. Default: *",
169
180
  type="string",
170
181
  required=False,
171
182
  ),
172
- "resource": ToolParameter(
173
- description="Filter by resource name",
183
+ "start_datetime": ToolParameter(
184
+ description=standard_start_datetime_tool_param_description(
185
+ DEFAULT_TIME_SPAN_SECONDS
186
+ ),
174
187
  type="string",
175
188
  required=False,
176
189
  ),
177
- "min_duration": ToolParameter(
178
- description="Minimum duration (e.g., '5s', '500ms', '1m')",
190
+ "end_datetime": ToolParameter(
191
+ description=STANDARD_END_DATETIME_TOOL_PARAM_DESCRIPTION,
179
192
  type="string",
180
193
  required=False,
181
194
  ),
182
- "start_datetime": ToolParameter(
183
- description="Start time in RFC3339 format or relative time in seconds (negative for past)",
195
+ "timezone": ToolParameter(
196
+ description="The timezone can be specified as GMT, UTC, an offset from UTC (like UTC+1), or as a Timezone Database identifier (like America/New_York). default: UTC",
184
197
  type="string",
185
198
  required=False,
186
199
  ),
187
- "end_datetime": ToolParameter(
188
- description="End time in RFC3339 format or relative time in seconds (negative for past)",
200
+ "cursor": ToolParameter(
201
+ description="The returned paging point to use to get the next results. IMPORTANT: Cursors are single-use and stateful - never reuse the same cursor value multiple times or parallelize cursor-based calls. Each response provides a new cursor for the subsequent request.",
189
202
  type="string",
190
203
  required=False,
191
204
  ),
192
205
  "limit": ToolParameter(
193
- description="Maximum number of traces to return",
206
+ description="Maximum number of spans to return. Default: 10. Warning: Using values higher than 10 may result in too much data and cause the tool call to fail.",
194
207
  type="integer",
195
208
  required=False,
196
209
  ),
210
+ "sort_desc": ToolParameter(
211
+ description="Get the results in descending order. default: true",
212
+ type="boolean",
213
+ required=False,
214
+ ),
215
+ "compact": ToolParameter(
216
+ description="Return only essential fields to reduce output size. Use with higher limits (50-100) for initial exploration, then use compact=false with lower limits (5-10) for detailed investigation. Default: True",
217
+ type="boolean",
218
+ required=True,
219
+ ),
197
220
  },
198
221
  toolset=toolset,
199
222
  )
200
223
 
201
224
  def get_parameterized_one_liner(self, params: dict) -> str:
202
225
  """Get a one-liner description of the tool invocation."""
203
- filters = []
204
- if "service" in params:
205
- filters.append(f"service={params['service']}")
206
- if "operation" in params:
207
- filters.append(f"operation={params['operation']}")
208
- if "min_duration" in params:
209
- filters.append(f"duration>{params['min_duration']}")
210
-
211
- filter_str = ", ".join(filters) if filters else "all"
212
- return f"{toolset_name_for_one_liner(self.toolset.name)}: Fetch Traces ({filter_str})"
213
-
214
- def _invoke(
215
- self, params: dict, user_approved: bool = False
216
- ) -> StructuredToolResult:
217
- """Execute the tool to fetch traces."""
226
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: Search Spans ({params['query'] if 'query' in params else ''})"
227
+
228
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
229
+ """Execute the tool to search spans."""
218
230
  if not self.toolset.dd_config:
219
231
  return StructuredToolResult(
220
- status=ToolResultStatus.ERROR,
232
+ status=StructuredToolResultStatus.ERROR,
221
233
  error="Datadog configuration not initialized",
222
234
  params=params,
223
235
  )
224
236
 
225
237
  url = None
226
- payload = None
238
+ payload: Optional[Dict[str, Any]] = None
227
239
 
228
240
  try:
229
241
  # Process timestamps
@@ -237,36 +249,14 @@ class FetchDatadogTracesList(BaseDatadogTracesTool):
237
249
  from_time_ms = from_time_int * 1000
238
250
  to_time_ms = to_time_int * 1000
239
251
 
240
- # Build search query
241
- query_parts = []
242
-
243
- if params.get("service"):
244
- query_parts.append(f"service:{params['service']}")
245
-
246
- if params.get("operation"):
247
- query_parts.append(f"operation_name:{params['operation']}")
248
-
249
- if params.get("resource"):
250
- query_parts.append(f"resource_name:{params['resource']}")
251
-
252
- if params.get("min_duration"):
253
- # Parse duration string (e.g., "5s", "500ms", "1m")
254
- duration_str = params["min_duration"].lower()
255
- if duration_str.endswith("ms"):
256
- duration_ns = int(float(duration_str[:-2]) * 1_000_000)
257
- elif duration_str.endswith("s"):
258
- duration_ns = int(float(duration_str[:-1]) * 1_000_000_000)
259
- elif duration_str.endswith("m"):
260
- duration_ns = int(float(duration_str[:-1]) * 60 * 1_000_000_000)
261
- else:
262
- # Assume milliseconds if no unit
263
- duration_ns = int(float(duration_str) * 1_000_000)
264
-
265
- query_parts.append(f"@duration:>{duration_ns}")
266
-
267
- query = " ".join(query_parts) if query_parts else "*"
252
+ query: str = params.get("query") if params.get("query") else "*" # type: ignore
253
+ limit = params.get("limit") if params.get("limit") else 10
254
+ if params.get("sort") is not None:
255
+ sort = "-timestamp" if params.get("sort") else True
256
+ else:
257
+ sort = "-timestamp"
268
258
 
269
- # Prepare API request - use POST search endpoint
259
+ # Use POST endpoint for more complex searches
270
260
  url = f"{self.toolset.dd_config.site_api_url}/api/v2/spans/events/search"
271
261
  headers = get_headers(self.toolset.dd_config)
272
262
 
@@ -280,12 +270,17 @@ class FetchDatadogTracesList(BaseDatadogTracesTool):
280
270
  "to": str(to_time_ms),
281
271
  "indexes": self.toolset.dd_config.indexes,
282
272
  },
283
- "page": {"limit": params.get("limit", 50)},
284
- "sort": "-timestamp",
273
+ "page": {
274
+ "limit": limit,
275
+ },
276
+ "sort": sort,
285
277
  },
286
278
  }
287
279
  }
288
280
 
281
+ if params.get("cursor"):
282
+ payload["data"]["attributes"]["page"]["cursor"] = params["cursor"]
283
+
289
284
  response = execute_datadog_http_request(
290
285
  url=url,
291
286
  headers=headers,
@@ -294,32 +289,28 @@ class FetchDatadogTracesList(BaseDatadogTracesTool):
294
289
  method="POST",
295
290
  )
296
291
 
297
- # Handle tuple response from POST requests
298
- if isinstance(response, tuple):
299
- spans, _ = response
300
- elif response:
301
- spans = response.get("data", [])
302
- else:
303
- spans = []
304
-
305
- # Format the traces using the formatter
306
- formatted_output = format_traces_list(spans, limit=params.get("limit", 50))
307
- if not formatted_output:
308
- return StructuredToolResult(
309
- status=ToolResultStatus.NO_DATA,
310
- params=params,
311
- data="No matching traces found.",
312
- )
292
+ # Apply compact filtering if requested
293
+ if params.get("compact", False) and "data" in response:
294
+ response["data"] = [
295
+ self._filter_span_attributes(span) for span in response["data"]
296
+ ]
297
+
298
+ web_url = generate_datadog_spans_url(
299
+ self.toolset.dd_config,
300
+ query,
301
+ from_time_ms,
302
+ to_time_ms,
303
+ )
313
304
 
314
305
  return StructuredToolResult(
315
- status=ToolResultStatus.SUCCESS,
316
- data=formatted_output,
306
+ status=StructuredToolResultStatus.SUCCESS,
307
+ data=response,
317
308
  params=params,
309
+ url=web_url,
318
310
  )
319
311
 
320
312
  except DataDogRequestError as e:
321
313
  logging.exception(e, exc_info=True)
322
-
323
314
  if e.status_code == 429:
324
315
  error_msg = f"Datadog API rate limit exceeded. Failed after {MAX_RETRY_COUNT_ON_RATE_LIMIT} retry attempts."
325
316
  elif e.status_code == 403:
@@ -331,7 +322,7 @@ class FetchDatadogTracesList(BaseDatadogTracesTool):
331
322
  error_msg = f"Exception while querying Datadog: {str(e)}"
332
323
 
333
324
  return StructuredToolResult(
334
- status=ToolResultStatus.ERROR,
325
+ status=StructuredToolResultStatus.ERROR,
335
326
  error=error_msg,
336
327
  params=params,
337
328
  invocation=(
@@ -344,7 +335,7 @@ class FetchDatadogTracesList(BaseDatadogTracesTool):
344
335
  except Exception as e:
345
336
  logging.exception(e, exc_info=True)
346
337
  return StructuredToolResult(
347
- status=ToolResultStatus.ERROR,
338
+ status=StructuredToolResultStatus.ERROR,
348
339
  error=f"Unexpected error: {str(e)}",
349
340
  params=params,
350
341
  invocation=(
@@ -354,219 +345,257 @@ class FetchDatadogTracesList(BaseDatadogTracesTool):
354
345
  ),
355
346
  )
356
347
 
348
+ def _apply_compact_schema(self, source: dict, schema: dict) -> dict:
349
+ """Apply schema to filter fields from source dict."""
350
+ result: Dict[str, Any] = {}
351
+
352
+ for key, value in schema.items():
353
+ if key not in source:
354
+ continue
355
+
356
+ source_value = source[key]
357
+
358
+ if isinstance(value, dict):
359
+ # Check if it's a filter directive for arrays
360
+ if "_filter" in value and isinstance(source_value, list):
361
+ filter_type = value["_filter"]
362
+ filter_values = value.get("_values", [])
363
+
364
+ if filter_type == "startswith":
365
+ # Filter array items that start with any of the specified values
366
+ filtered = [
367
+ item
368
+ for item in source_value
369
+ if isinstance(item, str)
370
+ and any(item.startswith(prefix) for prefix in filter_values)
371
+ ]
372
+ if filtered:
373
+ result[key] = filtered
374
+
375
+ elif isinstance(source_value, dict):
376
+ # Regular nested object - recurse
377
+ nested_result = self._apply_compact_schema(source_value, value)
378
+ if nested_result:
379
+ result[key] = nested_result
380
+
381
+ elif value is True:
382
+ # Copy the field as-is
383
+ result[key] = source_value
384
+
385
+ return result
386
+
387
+ def _filter_span_attributes(self, span: dict) -> dict:
388
+ """Filter span to include only essential fields."""
389
+ filtered_span = {
390
+ "id": span.get("id"),
391
+ "type": span.get("type"),
392
+ }
357
393
 
358
- class FetchDatadogTraceById(BaseDatadogTracesTool):
359
- """Tool to fetch detailed information about a specific trace."""
360
-
361
- def __init__(self, toolset: "DatadogTracesToolset"):
362
- super().__init__(
363
- name="fetch_datadog_trace_by_id",
364
- description="Fetch detailed information about a specific trace by its ID",
365
- parameters={
366
- "trace_id": ToolParameter(
367
- description="The trace ID to fetch details for",
368
- type="string",
369
- required=True,
370
- ),
371
- },
372
- toolset=toolset,
373
- )
374
-
375
- def get_parameterized_one_liner(self, params: dict) -> str:
376
- """Get a one-liner description of the tool invocation."""
377
- trace_id = params.get("trace_id", "unknown")
378
- return f"{toolset_name_for_one_liner(self.toolset.name)}: Fetch Trace Details ({trace_id})"
379
-
380
- def _invoke(
381
- self, params: dict, user_approved: bool = False
382
- ) -> StructuredToolResult:
383
- """Execute the tool to fetch trace details."""
384
- if not self.toolset.dd_config:
385
- return StructuredToolResult(
386
- status=ToolResultStatus.ERROR,
387
- error="Datadog configuration not initialized",
388
- params=params,
389
- )
390
-
391
- trace_id = params.get("trace_id")
392
- if not trace_id:
393
- return StructuredToolResult(
394
- status=ToolResultStatus.ERROR,
395
- error="trace_id parameter is required",
396
- params=params,
397
- )
398
-
399
- url = None
400
- payload = None
401
-
402
- try:
403
- # For Datadog, we need to search for all spans with the given trace_id
404
- # Using a reasonable time window (last 7 days by default)
405
- current_time = int(time.time())
406
- from_time_ms = (current_time - 604800) * 1000 # 7 days ago
407
- to_time_ms = current_time * 1000
408
-
409
- url = f"{self.toolset.dd_config.site_api_url}/api/v2/spans/events/search"
410
- headers = get_headers(self.toolset.dd_config)
411
-
412
- payload = {
413
- "data": {
414
- "type": "search_request",
415
- "attributes": {
416
- "filter": {
417
- "query": f"trace_id:{trace_id}",
418
- "from": str(from_time_ms),
419
- "to": str(to_time_ms),
420
- "indexes": self.toolset.dd_config.indexes,
421
- },
422
- "page": {"limit": 1000}, # Get all spans for the trace
423
- "sort": "timestamp",
424
- },
425
- }
426
- }
427
-
428
- response = execute_datadog_http_request(
429
- url=url,
430
- headers=headers,
431
- payload_or_params=payload,
432
- timeout=self.toolset.dd_config.request_timeout,
433
- method="POST",
434
- )
435
-
436
- # Handle tuple response from POST requests
437
- if isinstance(response, tuple):
438
- spans, _ = response
439
- elif response:
440
- spans = response.get("data", [])
441
- else:
442
- spans = []
443
-
444
- # Format the trace hierarchy using the formatter
445
- formatted_output = format_trace_hierarchy(trace_id, spans)
446
- if not formatted_output:
447
- return StructuredToolResult(
448
- status=ToolResultStatus.NO_DATA,
449
- params=params,
450
- data=f"No trace found for trace_id: {trace_id}",
451
- )
452
-
453
- return StructuredToolResult(
454
- status=ToolResultStatus.SUCCESS,
455
- data=formatted_output,
456
- params=params,
394
+ if "attributes" in span:
395
+ filtered_span["attributes"] = self._apply_compact_schema(
396
+ span["attributes"], COMPACT_SCHEMA
457
397
  )
458
398
 
459
- except DataDogRequestError as e:
460
- logging.exception(e, exc_info=True)
399
+ return filtered_span
461
400
 
462
- if e.status_code == 429:
463
- error_msg = f"Datadog API rate limit exceeded. Failed after {MAX_RETRY_COUNT_ON_RATE_LIMIT} retry attempts."
464
- elif e.status_code == 403:
465
- error_msg = (
466
- f"Permission denied. Ensure your Datadog Application Key has the 'apm_read' "
467
- f"permission. Error: {str(e)}"
468
- )
469
- else:
470
- error_msg = f"Exception while querying Datadog: {str(e)}"
471
401
 
472
- return StructuredToolResult(
473
- status=ToolResultStatus.ERROR,
474
- error=error_msg,
475
- params=params,
476
- invocation=(
477
- json.dumps({"url": url, "payload": payload})
478
- if url and payload
479
- else None
480
- ),
481
- )
482
-
483
- except Exception as e:
484
- logging.exception(e, exc_info=True)
485
- return StructuredToolResult(
486
- status=ToolResultStatus.ERROR,
487
- error=f"Unexpected error: {str(e)}",
488
- params=params,
489
- invocation=(
490
- json.dumps({"url": url, "payload": payload})
491
- if url and payload
492
- else None
493
- ),
494
- )
495
-
496
-
497
- class FetchDatadogSpansByFilter(BaseDatadogTracesTool):
498
- """Tool to search for spans with specific filters."""
402
+ class AggregateSpans(BaseDatadogTracesTool):
403
+ """Tool to aggregate span data into buckets and compute metrics and timeseries."""
499
404
 
500
405
  def __init__(self, toolset: "DatadogTracesToolset"):
501
406
  super().__init__(
502
- name="fetch_datadog_spans",
503
- description="Search for spans in Datadog with detailed filters",
407
+ name="aggregate_datadog_spans",
408
+ description="Aggregate spans into buckets and compute metrics and timeseries. "
409
+ "Uses the DataDog api endpoint: POST /api/v2/spans/analytics/aggregate",
504
410
  parameters={
505
411
  "query": ToolParameter(
506
- description="Datadog search query (e.g., 'service:web-app @http.status_code:500')",
507
- type="string",
508
- required=False,
509
- ),
510
- "service": ToolParameter(
511
- description="Filter by service name",
412
+ description="Search query following span syntax. Default: '*'",
512
413
  type="string",
513
414
  required=False,
514
415
  ),
515
- "operation": ToolParameter(
516
- description="Filter by operation name",
416
+ "start_datetime": ToolParameter(
417
+ description=standard_start_datetime_tool_param_description(
418
+ DEFAULT_TIME_SPAN_SECONDS
419
+ ),
517
420
  type="string",
518
421
  required=False,
519
422
  ),
520
- "resource": ToolParameter(
521
- description="Filter by resource name",
423
+ "end_datetime": ToolParameter(
424
+ description=STANDARD_END_DATETIME_TOOL_PARAM_DESCRIPTION,
522
425
  type="string",
523
426
  required=False,
524
427
  ),
525
- "tags": ToolParameter(
526
- description="Filter by tags (e.g., {'env': 'production', 'version': '1.2.3'})",
527
- type="object",
528
- required=False,
428
+ "compute": ToolParameter(
429
+ description="List of metrics to compute from the matching spans. Supports up to 10 computes at the same time.",
430
+ type="array",
431
+ required=True,
432
+ items=ToolParameter(
433
+ type="object",
434
+ properties={
435
+ "aggregation": ToolParameter(
436
+ type="string",
437
+ required=True,
438
+ enum=[
439
+ "count",
440
+ "cardinality",
441
+ "sum",
442
+ "min",
443
+ "max",
444
+ "avg",
445
+ "median",
446
+ ]
447
+ + PERCENTILE_AGGREGATIONS,
448
+ description="The aggregation method.",
449
+ ),
450
+ "metric": ToolParameter(
451
+ type="string",
452
+ required=False,
453
+ description="The span attribute to aggregate. Required for all non-count aggregations",
454
+ ),
455
+ "type": ToolParameter(
456
+ type="string",
457
+ required=False,
458
+ enum=["total", "timeseries"],
459
+ description="Compute type for the aggregation. Default: 'total'",
460
+ ),
461
+ "interval": ToolParameter(
462
+ type="string",
463
+ required=False,
464
+ description="The time buckets for timeseries results (e.g., '5m', '1h'). The time buckets' size (only used for type=timeseries) Defaults to a resolution of 150 points.",
465
+ ),
466
+ },
467
+ ),
529
468
  ),
530
- "start_datetime": ToolParameter(
531
- description="Start time in RFC3339 format or relative time in seconds (negative for past)",
532
- type="string",
469
+ "group_by": ToolParameter(
470
+ description="List of facets to split the aggregate data by",
471
+ type="array",
533
472
  required=False,
473
+ items=ToolParameter(
474
+ type="object",
475
+ properties={
476
+ "facet": ToolParameter(
477
+ type="string",
478
+ required=True,
479
+ description="The span attribute to split by",
480
+ ),
481
+ "limit": ToolParameter(
482
+ type="integer",
483
+ required=False,
484
+ description="Maximum number of facet groups to return. Default: 10",
485
+ ),
486
+ "missing": ToolParameter(
487
+ type="string",
488
+ required=False,
489
+ description="The value to use for spans that don't have the facet",
490
+ ),
491
+ "sort": ToolParameter(
492
+ type="object",
493
+ required=False,
494
+ description="Sort configuration for the groups",
495
+ properties={
496
+ # Not working correctly
497
+ # "aggregation": ToolParameter(
498
+ # type="string",
499
+ # required=True,
500
+ # description="The aggregation method to sort by",
501
+ # ),
502
+ "metric": ToolParameter(
503
+ type="string",
504
+ required=False,
505
+ description="The metric to sort by when using a metric aggregation. (only used for type=measure).",
506
+ ),
507
+ "type": ToolParameter(
508
+ type="string",
509
+ required=False,
510
+ enum=["alphabetical", "measure"],
511
+ description="The type of sorting to use",
512
+ ),
513
+ "order": ToolParameter(
514
+ type="string",
515
+ required=False,
516
+ enum=["asc", "desc"],
517
+ description="The sort order. Default: 'desc'",
518
+ ),
519
+ },
520
+ ),
521
+ "total": ToolParameter(
522
+ type="boolean",
523
+ required=False,
524
+ description="Whether to include a 'total' group with all non-faceted results",
525
+ ),
526
+ "histogram": ToolParameter(
527
+ type="object",
528
+ required=False,
529
+ description="Histogram configuration for numeric facets",
530
+ properties={
531
+ "interval": ToolParameter(
532
+ type="number",
533
+ required=True,
534
+ description="The bin size for the histogram",
535
+ ),
536
+ "min": ToolParameter(
537
+ type="number",
538
+ required=False,
539
+ description="The minimum value for the histogram",
540
+ ),
541
+ "max": ToolParameter(
542
+ type="number",
543
+ required=False,
544
+ description="The maximum value for the histogram",
545
+ ),
546
+ },
547
+ ),
548
+ },
549
+ ),
534
550
  ),
535
- "end_datetime": ToolParameter(
536
- description="End time in RFC3339 format or relative time in seconds (negative for past)",
551
+ "timezone": ToolParameter(
552
+ description="The timezone for time-based results (e.g., 'GMT', 'UTC', 'America/New_York'). Default: 'UTC'",
537
553
  type="string",
538
554
  required=False,
539
555
  ),
540
- "limit": ToolParameter(
541
- description="Maximum number of spans to return",
542
- type="integer",
543
- required=False,
544
- ),
545
556
  },
546
557
  toolset=toolset,
547
558
  )
548
559
 
549
560
  def get_parameterized_one_liner(self, params: dict) -> str:
550
561
  """Get a one-liner description of the tool invocation."""
551
- if "query" in params:
552
- return f"{toolset_name_for_one_liner(self.toolset.name)}: Search Spans ({params['query']})"
553
-
554
- filters = []
555
- if "service" in params:
556
- filters.append(f"service={params['service']}")
557
- if "operation" in params:
558
- filters.append(f"operation={params['operation']}")
559
-
560
- filter_str = ", ".join(filters) if filters else "all"
561
- return f"{toolset_name_for_one_liner(self.toolset.name)}: Search Spans ({filter_str})"
562
-
563
- def _invoke(
564
- self, params: dict, user_approved: bool = False
565
- ) -> StructuredToolResult:
566
- """Execute the tool to search spans."""
562
+ query = params.get("query", "*")
563
+ compute_info = ""
564
+ if params.get("compute"):
565
+ aggregations = [c.get("aggregation", "") for c in params["compute"]]
566
+ compute_info = f" (computing: {', '.join(aggregations)})"
567
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: Aggregate Spans ({query}){compute_info}"
568
+
569
+ def _fix_percentile_aggregations(self, compute_params: list) -> list:
570
+ """Fix common percentile format mistakes that the LLM makes when choosing from the enum (e.g., p95 -> pc95).
571
+
572
+ Args:
573
+ compute_params: List of compute parameter dictionaries
574
+
575
+ Returns:
576
+ List of compute parameters with corrected aggregation values
577
+ """
578
+ # Deep copy the entire compute params to avoid modifying the original
579
+ processed_compute = copy.deepcopy(compute_params)
580
+
581
+ # Simple replacement for each known percentile
582
+ for compute_item in processed_compute:
583
+ if isinstance(compute_item, dict) and "aggregation" in compute_item:
584
+ agg_value = compute_item["aggregation"]
585
+ # Check if it matches p\d\d pattern (e.g., p95)
586
+ if re.match(r"^p\d{2}$", agg_value):
587
+ # Convert to pc format and check if it's valid
588
+ pc_version = "pc" + agg_value[1:]
589
+ if pc_version in PERCENTILE_AGGREGATIONS:
590
+ compute_item["aggregation"] = pc_version
591
+
592
+ return processed_compute
593
+
594
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
595
+ """Execute the tool to aggregate spans."""
567
596
  if not self.toolset.dd_config:
568
597
  return StructuredToolResult(
569
- status=ToolResultStatus.ERROR,
598
+ status=StructuredToolResultStatus.ERROR,
570
599
  error="Datadog configuration not initialized",
571
600
  params=params,
572
601
  )
@@ -586,51 +615,42 @@ class FetchDatadogSpansByFilter(BaseDatadogTracesTool):
586
615
  from_time_ms = from_time_int * 1000
587
616
  to_time_ms = to_time_int * 1000
588
617
 
589
- # Build search query
590
- query_parts = []
591
-
592
- # If a custom query is provided, use it as the base
593
- if params.get("query"):
594
- query_parts.append(params["query"])
618
+ query = params.get("query", "*")
595
619
 
596
- # Add additional filters
597
- if params.get("service"):
598
- query_parts.append(f"service:{params['service']}")
599
-
600
- if params.get("operation"):
601
- query_parts.append(f"operation_name:{params['operation']}")
620
+ # Build the request payload
621
+ url = f"{self.toolset.dd_config.site_api_url}/api/v2/spans/analytics/aggregate"
622
+ headers = get_headers(self.toolset.dd_config)
602
623
 
603
- if params.get("resource"):
604
- query_parts.append(f"resource_name:{params['resource']}")
624
+ # Build payload attributes first
625
+ # Process compute parameter to fix common p95->pc95 style mistakes
626
+ compute_params = params.get("compute", [])
627
+ processed_compute = self._fix_percentile_aggregations(compute_params)
628
+
629
+ attributes: Dict[str, Any] = {
630
+ "filter": {
631
+ "query": query,
632
+ "from": str(from_time_ms),
633
+ "to": str(to_time_ms),
634
+ },
635
+ "compute": processed_compute,
636
+ }
605
637
 
606
- # Add tag filters
607
- if params.get("tags"):
608
- tags = params["tags"]
609
- if isinstance(tags, dict):
610
- for key, value in tags.items():
611
- query_parts.append(f"@{key}:{value}")
638
+ # Add optional fields
639
+ if params.get("group_by"):
640
+ attributes["group_by"] = params["group_by"]
612
641
 
613
- query = " ".join(query_parts) if query_parts else "*"
642
+ # Add options if timezone is specified
643
+ options: Dict[str, Any] = {}
644
+ if params.get("timezone"):
645
+ options["timezone"] = params["timezone"]
614
646
 
615
- # Use POST endpoint for more complex searches
616
- url = f"{self.toolset.dd_config.site_api_url}/api/v2/spans/events/search"
617
- headers = get_headers(self.toolset.dd_config)
647
+ if options:
648
+ attributes["options"] = options
618
649
 
619
650
  payload = {
620
651
  "data": {
621
- "type": "search_request",
622
- "attributes": {
623
- "filter": {
624
- "query": query,
625
- "from": str(from_time_ms),
626
- "to": str(to_time_ms),
627
- "indexes": self.toolset.dd_config.indexes,
628
- },
629
- "page": {
630
- "limit": params.get("limit", 100),
631
- },
632
- "sort": "-timestamp",
633
- },
652
+ "type": "aggregate_request",
653
+ "attributes": attributes,
634
654
  }
635
655
  }
636
656
 
@@ -642,27 +662,18 @@ class FetchDatadogSpansByFilter(BaseDatadogTracesTool):
642
662
  method="POST",
643
663
  )
644
664
 
645
- # Handle tuple response from POST requests
646
- if isinstance(response, tuple):
647
- spans, _ = response
648
- elif response:
649
- spans = response.get("data", [])
650
- else:
651
- spans = []
652
-
653
- # Format the spans search results using the formatter
654
- formatted_output = format_spans_search(spans)
655
- if not formatted_output:
656
- return StructuredToolResult(
657
- status=ToolResultStatus.NO_DATA,
658
- params=params,
659
- data="No matching spans found.",
660
- )
665
+ web_url = generate_datadog_spans_analytics_url(
666
+ self.toolset.dd_config,
667
+ query,
668
+ from_time_ms,
669
+ to_time_ms,
670
+ )
661
671
 
662
672
  return StructuredToolResult(
663
- status=ToolResultStatus.SUCCESS,
664
- data=formatted_output,
673
+ status=StructuredToolResultStatus.SUCCESS,
674
+ data=response,
665
675
  params=params,
676
+ url=web_url,
666
677
  )
667
678
 
668
679
  except DataDogRequestError as e:
@@ -678,7 +689,7 @@ class FetchDatadogSpansByFilter(BaseDatadogTracesTool):
678
689
  error_msg = f"Exception while querying Datadog: {str(e)}"
679
690
 
680
691
  return StructuredToolResult(
681
- status=ToolResultStatus.ERROR,
692
+ status=StructuredToolResultStatus.ERROR,
682
693
  error=error_msg,
683
694
  params=params,
684
695
  invocation=(
@@ -691,7 +702,7 @@ class FetchDatadogSpansByFilter(BaseDatadogTracesTool):
691
702
  except Exception as e:
692
703
  logging.exception(e, exc_info=True)
693
704
  return StructuredToolResult(
694
- status=ToolResultStatus.ERROR,
705
+ status=StructuredToolResultStatus.ERROR,
695
706
  error=f"Unexpected error: {str(e)}",
696
707
  params=params,
697
708
  invocation=(