holmesgpt 0.14.0a0__py3-none-any.whl → 0.14.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of holmesgpt might be problematic. Click here for more details.

Files changed (82) hide show
  1. holmes/__init__.py +1 -1
  2. holmes/clients/robusta_client.py +15 -4
  3. holmes/common/env_vars.py +8 -1
  4. holmes/config.py +66 -139
  5. holmes/core/investigation.py +1 -2
  6. holmes/core/llm.py +295 -52
  7. holmes/core/models.py +2 -0
  8. holmes/core/safeguards.py +4 -4
  9. holmes/core/supabase_dal.py +14 -8
  10. holmes/core/tool_calling_llm.py +110 -102
  11. holmes/core/tools.py +260 -25
  12. holmes/core/tools_utils/data_types.py +81 -0
  13. holmes/core/tools_utils/tool_context_window_limiter.py +33 -0
  14. holmes/core/tools_utils/tool_executor.py +2 -2
  15. holmes/core/toolset_manager.py +150 -3
  16. holmes/core/transformers/__init__.py +23 -0
  17. holmes/core/transformers/base.py +62 -0
  18. holmes/core/transformers/llm_summarize.py +174 -0
  19. holmes/core/transformers/registry.py +122 -0
  20. holmes/core/transformers/transformer.py +31 -0
  21. holmes/main.py +5 -0
  22. holmes/plugins/prompts/_fetch_logs.jinja2 +10 -1
  23. holmes/plugins/toolsets/aks-node-health.yaml +46 -0
  24. holmes/plugins/toolsets/aks.yaml +64 -0
  25. holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +17 -15
  26. holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +8 -4
  27. holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +7 -3
  28. holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +3 -3
  29. holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +3 -3
  30. holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +7 -3
  31. holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +4 -4
  32. holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +7 -3
  33. holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +7 -3
  34. holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +7 -3
  35. holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +7 -3
  36. holmes/plugins/toolsets/bash/bash_toolset.py +6 -6
  37. holmes/plugins/toolsets/bash/common/bash.py +7 -7
  38. holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +5 -3
  39. holmes/plugins/toolsets/datadog/datadog_api.py +490 -24
  40. holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +21 -10
  41. holmes/plugins/toolsets/datadog/toolset_datadog_general.py +344 -205
  42. holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +189 -17
  43. holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +95 -30
  44. holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +10 -10
  45. holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +20 -20
  46. holmes/plugins/toolsets/git.py +21 -21
  47. holmes/plugins/toolsets/grafana/common.py +2 -2
  48. holmes/plugins/toolsets/grafana/toolset_grafana.py +4 -4
  49. holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +5 -4
  50. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +123 -23
  51. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +165 -307
  52. holmes/plugins/toolsets/internet/internet.py +3 -3
  53. holmes/plugins/toolsets/internet/notion.py +3 -3
  54. holmes/plugins/toolsets/investigator/core_investigation.py +3 -3
  55. holmes/plugins/toolsets/kafka.py +18 -18
  56. holmes/plugins/toolsets/kubernetes.yaml +58 -0
  57. holmes/plugins/toolsets/kubernetes_logs.py +6 -6
  58. holmes/plugins/toolsets/kubernetes_logs.yaml +32 -0
  59. holmes/plugins/toolsets/logging_utils/logging_api.py +1 -1
  60. holmes/plugins/toolsets/mcp/toolset_mcp.py +4 -4
  61. holmes/plugins/toolsets/newrelic.py +5 -5
  62. holmes/plugins/toolsets/opensearch/opensearch.py +5 -5
  63. holmes/plugins/toolsets/opensearch/opensearch_logs.py +7 -7
  64. holmes/plugins/toolsets/opensearch/opensearch_traces.py +10 -10
  65. holmes/plugins/toolsets/prometheus/prometheus.py +841 -351
  66. holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +39 -2
  67. holmes/plugins/toolsets/prometheus/utils.py +28 -0
  68. holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +6 -4
  69. holmes/plugins/toolsets/robusta/robusta.py +10 -10
  70. holmes/plugins/toolsets/runbook/runbook_fetcher.py +4 -4
  71. holmes/plugins/toolsets/servicenow/servicenow.py +6 -6
  72. holmes/plugins/toolsets/utils.py +88 -0
  73. holmes/utils/config_utils.py +91 -0
  74. holmes/utils/env.py +7 -0
  75. holmes/utils/holmes_status.py +2 -1
  76. holmes/utils/sentry_helper.py +41 -0
  77. holmes/utils/stream.py +9 -0
  78. {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/METADATA +10 -14
  79. {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/RECORD +82 -72
  80. {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/LICENSE.txt +0 -0
  81. {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/WHEEL +0 -0
  82. {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/entry_points.txt +0 -0
holmes/core/tools.py CHANGED
@@ -8,40 +8,68 @@ import tempfile
8
8
  from abc import ABC, abstractmethod
9
9
  from datetime import datetime
10
10
  from enum import Enum
11
- from typing import Any, Callable, Dict, List, Optional, OrderedDict, Tuple, Union
11
+ from typing import (
12
+ TYPE_CHECKING,
13
+ Any,
14
+ Callable,
15
+ Dict,
16
+ List,
17
+ Optional,
18
+ OrderedDict,
19
+ Tuple,
20
+ Union,
21
+ )
12
22
 
13
23
  from jinja2 import Template
14
- from pydantic import BaseModel, ConfigDict, Field, FilePath, model_validator
24
+ from pydantic import (
25
+ BaseModel,
26
+ ConfigDict,
27
+ Field,
28
+ FilePath,
29
+ model_validator,
30
+ PrivateAttr,
31
+ )
15
32
  from rich.console import Console
16
33
 
17
34
  from holmes.core.openai_formatting import format_tool_to_open_ai_standard
18
35
  from holmes.plugins.prompts import load_and_render_prompt
36
+ from holmes.core.transformers import (
37
+ registry,
38
+ TransformerError,
39
+ Transformer,
40
+ )
41
+
42
+ if TYPE_CHECKING:
43
+ from holmes.core.transformers import BaseTransformer
44
+ from holmes.utils.config_utils import merge_transformers
19
45
  import time
20
46
  from rich.table import Table
21
47
 
48
+ logger = logging.getLogger(__name__)
22
49
 
23
- class ToolResultStatus(str, Enum):
50
+
51
+ class StructuredToolResultStatus(str, Enum):
24
52
  SUCCESS = "success"
25
53
  ERROR = "error"
26
54
  NO_DATA = "no_data"
27
55
  APPROVAL_REQUIRED = "approval_required"
28
56
 
29
57
  def to_color(self) -> str:
30
- if self == ToolResultStatus.SUCCESS:
58
+ if self == StructuredToolResultStatus.SUCCESS:
31
59
  return "green"
32
- elif self == ToolResultStatus.ERROR:
60
+ elif self == StructuredToolResultStatus.ERROR:
33
61
  return "red"
34
- elif self == ToolResultStatus.APPROVAL_REQUIRED:
62
+ elif self == StructuredToolResultStatus.APPROVAL_REQUIRED:
35
63
  return "yellow"
36
64
  else:
37
65
  return "white"
38
66
 
39
67
  def to_emoji(self) -> str:
40
- if self == ToolResultStatus.SUCCESS:
68
+ if self == StructuredToolResultStatus.SUCCESS:
41
69
  return "✔"
42
- elif self == ToolResultStatus.ERROR:
70
+ elif self == StructuredToolResultStatus.ERROR:
43
71
  return "❌"
44
- elif self == ToolResultStatus.APPROVAL_REQUIRED:
72
+ elif self == StructuredToolResultStatus.APPROVAL_REQUIRED:
45
73
  return "⚠️"
46
74
  else:
47
75
  return "⚪️"
@@ -49,7 +77,7 @@ class ToolResultStatus(str, Enum):
49
77
 
50
78
  class StructuredToolResult(BaseModel):
51
79
  schema_version: str = "robusta:v1.0.0"
52
- status: ToolResultStatus
80
+ status: StructuredToolResultStatus
53
81
  error: Optional[str] = None
54
82
  return_code: Optional[int] = None
55
83
  data: Optional[Any] = None
@@ -143,6 +171,48 @@ class Tool(ABC, BaseModel):
143
171
  default=None,
144
172
  description="The URL of the icon for the tool, if None will get toolset icon",
145
173
  )
174
+ transformers: Optional[List[Transformer]] = None
175
+
176
+ # Private attribute to store initialized transformer instances for performance
177
+ _transformer_instances: Optional[List["BaseTransformer"]] = PrivateAttr(
178
+ default=None
179
+ )
180
+
181
+ def model_post_init(self, __context) -> None:
182
+ """Initialize transformer instances once during tool creation for better performance."""
183
+ logger.debug(
184
+ f"Tool '{self.name}' model_post_init: creating transformer instances"
185
+ )
186
+
187
+ if self.transformers:
188
+ logger.debug(
189
+ f"Tool '{self.name}' has {len(self.transformers)} transformers to initialize"
190
+ )
191
+ self._transformer_instances = []
192
+ for transformer in self.transformers:
193
+ if not transformer:
194
+ continue
195
+ logger.debug(
196
+ f" Initializing transformer '{transformer.name}' with config: {transformer.config}"
197
+ )
198
+ try:
199
+ # Create transformer instance once and cache it
200
+ transformer_instance = registry.create_transformer(
201
+ transformer.name, transformer.config
202
+ )
203
+ self._transformer_instances.append(transformer_instance)
204
+ logger.debug(
205
+ f"Initialized transformer '{transformer.name}' for tool '{self.name}'"
206
+ )
207
+ except Exception as e:
208
+ logger.warning(
209
+ f"Failed to initialize transformer '{transformer.name}' for tool '{self.name}': {e}"
210
+ )
211
+ # Continue with other transformers, don't fail the entire initialization
212
+ continue
213
+ else:
214
+ logger.debug(f"Tool '{self.name}' has no transformers")
215
+ self._transformer_instances = None
146
216
 
147
217
  def get_openai_format(self, target_model: str):
148
218
  return format_tool_to_open_ai_standard(
@@ -159,23 +229,113 @@ class Tool(ABC, BaseModel):
159
229
  user_approved: bool = False,
160
230
  ) -> StructuredToolResult:
161
231
  tool_number_str = f"#{tool_number} " if tool_number else ""
162
- logging.info(
232
+ logger.info(
163
233
  f"Running tool {tool_number_str}[bold]{self.name}[/bold]: {self.get_parameterized_one_liner(params)}"
164
234
  )
165
235
  start_time = time.time()
166
236
  result = self._invoke(params=params, user_approved=user_approved)
167
237
  result.icon_url = self.icon_url
238
+
239
+ # Apply transformers to the result
240
+ transformed_result = self._apply_transformers(result)
168
241
  elapsed = time.time() - start_time
169
242
  output_str = (
170
- result.get_stringified_data()
171
- if hasattr(result, "get_stringified_data")
172
- else str(result)
243
+ transformed_result.get_stringified_data()
244
+ if hasattr(transformed_result, "get_stringified_data")
245
+ else str(transformed_result)
173
246
  )
174
247
  show_hint = f"/show {tool_number}" if tool_number else "/show"
175
248
  line_count = output_str.count("\n") + 1 if output_str else 0
176
- logging.info(
249
+ logger.info(
177
250
  f" [dim]Finished {tool_number_str}in {elapsed:.2f}s, output length: {len(output_str):,} characters ({line_count:,} lines) - {show_hint} to view contents[/dim]"
178
251
  )
252
+ return transformed_result
253
+
254
+ def _apply_transformers(self, result: StructuredToolResult) -> StructuredToolResult:
255
+ """
256
+ Apply configured transformers to the tool result.
257
+
258
+ Args:
259
+ result: The original tool result
260
+
261
+ Returns:
262
+ The tool result with transformed data, or original result if transformation fails
263
+ """
264
+ if (
265
+ not self._transformer_instances
266
+ or result.status != StructuredToolResultStatus.SUCCESS
267
+ ):
268
+ return result
269
+
270
+ # Get the output string to transform
271
+ original_data = result.get_stringified_data()
272
+ if not original_data:
273
+ return result
274
+
275
+ transformed_data = original_data
276
+ transformers_applied = []
277
+
278
+ # Use cached transformer instances instead of creating new ones
279
+ for transformer_instance in self._transformer_instances:
280
+ try:
281
+ # Check if transformer should be applied
282
+ if not transformer_instance.should_apply(transformed_data):
283
+ logger.debug(
284
+ f"Transformer '{transformer_instance.name}' skipped for tool '{self.name}' (conditions not met)"
285
+ )
286
+ continue
287
+
288
+ # Apply transformation
289
+ pre_transform_size = len(transformed_data)
290
+ transform_start_time = time.time()
291
+ original_data = transformed_data # Keep a copy for potential reversion
292
+ transformed_data = transformer_instance.transform(transformed_data)
293
+ transform_elapsed = time.time() - transform_start_time
294
+
295
+ # Check if this is llm_summarize and revert if summary is not smaller
296
+ post_transform_size = len(transformed_data)
297
+ if (
298
+ transformer_instance.name == "llm_summarize"
299
+ and post_transform_size >= pre_transform_size
300
+ ):
301
+ # Revert to original data if summary is not smaller
302
+ transformed_data = original_data
303
+ logger.debug(
304
+ f"Transformer '{transformer_instance.name}' reverted for tool '{self.name}' "
305
+ f"(output size {post_transform_size:,} >= input size {pre_transform_size:,})"
306
+ )
307
+ continue # Don't mark as applied
308
+
309
+ transformers_applied.append(transformer_instance.name)
310
+
311
+ # Generic logging - transformers can override this with their own specific metrics
312
+ size_change = post_transform_size - pre_transform_size
313
+ logger.info(
314
+ f"Applied transformer '{transformer_instance.name}' to tool '{self.name}' output "
315
+ f"in {transform_elapsed:.2f}s (size: {pre_transform_size:,} → {post_transform_size:,} chars, "
316
+ f"change: {size_change:+,})"
317
+ )
318
+
319
+ except TransformerError as e:
320
+ logger.warning(
321
+ f"Transformer '{transformer_instance.name}' failed for tool '{self.name}': {e}"
322
+ )
323
+ # Continue with other transformers, don't fail the entire chain
324
+ continue
325
+ except Exception as e:
326
+ logger.error(
327
+ f"Unexpected error applying transformer '{transformer_instance.name}' to tool '{self.name}': {e}"
328
+ )
329
+ # Continue with other transformers
330
+ continue
331
+
332
+ # If any transformers were applied, update the result
333
+ if transformers_applied:
334
+ # Create a copy of the result with transformed data
335
+ result_dict = result.model_dump(exclude={"data"})
336
+ result_dict["data"] = transformed_data
337
+ return StructuredToolResult(**result_dict)
338
+
179
339
  return result
180
340
 
181
341
  @abstractmethod
@@ -230,12 +390,14 @@ class YAMLTool(Tool, BaseModel):
230
390
  context = {**params}
231
391
  return context
232
392
 
233
- def _get_status(self, return_code: int, raw_output: str) -> ToolResultStatus:
393
+ def _get_status(
394
+ self, return_code: int, raw_output: str
395
+ ) -> StructuredToolResultStatus:
234
396
  if return_code != 0:
235
- return ToolResultStatus.ERROR
397
+ return StructuredToolResultStatus.ERROR
236
398
  if raw_output == "":
237
- return ToolResultStatus.NO_DATA
238
- return ToolResultStatus.SUCCESS
399
+ return StructuredToolResultStatus.NO_DATA
400
+ return StructuredToolResultStatus.SUCCESS
239
401
 
240
402
  def _invoke(
241
403
  self, params: dict, user_approved: bool = False
@@ -246,7 +408,7 @@ class YAMLTool(Tool, BaseModel):
246
408
  raw_output, return_code, invocation = self.__invoke_script(params) # type: ignore
247
409
 
248
410
  if self.additional_instructions and return_code == 0:
249
- logging.info(
411
+ logger.info(
250
412
  f"Applying additional instructions: {self.additional_instructions}"
251
413
  )
252
414
  output_with_instructions = self.__apply_additional_instructions(raw_output)
@@ -281,7 +443,7 @@ class YAMLTool(Tool, BaseModel):
281
443
  )
282
444
  return result.stdout.strip()
283
445
  except subprocess.CalledProcessError as e:
284
- logging.error(
446
+ logger.error(
285
447
  f"Failed to apply additional instructions: {self.additional_instructions}. "
286
448
  f"Error: {e.stderr}"
287
449
  )
@@ -316,7 +478,7 @@ class YAMLTool(Tool, BaseModel):
316
478
 
317
479
  def __execute_subprocess(self, cmd) -> Tuple[str, int]:
318
480
  try:
319
- logging.debug(f"Running `{cmd}`")
481
+ logger.debug(f"Running `{cmd}`")
320
482
  result = subprocess.run(
321
483
  cmd,
322
484
  shell=True,
@@ -329,7 +491,7 @@ class YAMLTool(Tool, BaseModel):
329
491
 
330
492
  return result.stdout.strip(), result.returncode
331
493
  except Exception as e:
332
- logging.error(
494
+ logger.error(
333
495
  f"An unexpected error occurred while running '{cmd}': {e}",
334
496
  exc_info=True,
335
497
  )
@@ -381,6 +543,7 @@ class Toolset(BaseModel):
381
543
  config: Optional[Any] = None
382
544
  is_default: bool = False
383
545
  llm_instructions: Optional[str] = None
546
+ transformers: Optional[List[Transformer]] = None
384
547
 
385
548
  # warning! private attributes are not copied, which can lead to subtle bugs.
386
549
  # e.g. l.extend([some_tool]) will reset these private attribute to None
@@ -406,13 +569,85 @@ class Toolset(BaseModel):
406
569
  @model_validator(mode="before")
407
570
  def preprocess_tools(cls, values):
408
571
  additional_instructions = values.get("additional_instructions", "")
572
+ transformers = values.get("transformers", None)
409
573
  tools_data = values.get("tools", [])
574
+
575
+ # Convert raw dict transformers to Transformer objects BEFORE merging
576
+ if transformers:
577
+ converted_transformers = []
578
+ for t in transformers:
579
+ if isinstance(t, dict):
580
+ try:
581
+ transformer_obj = Transformer(**t)
582
+ # Check if transformer is registered
583
+ from holmes.core.transformers import registry
584
+
585
+ if not registry.is_registered(transformer_obj.name):
586
+ logger.warning(
587
+ f"Invalid toolset transformer configuration: Transformer '{transformer_obj.name}' is not registered"
588
+ )
589
+ continue # Skip invalid transformer
590
+ converted_transformers.append(transformer_obj)
591
+ except Exception as e:
592
+ # Log warning and skip invalid transformer
593
+ logger.warning(
594
+ f"Invalid toolset transformer configuration: {e}"
595
+ )
596
+ continue
597
+ else:
598
+ # Already a Transformer object
599
+ converted_transformers.append(t)
600
+ transformers = converted_transformers if converted_transformers else None
601
+
410
602
  tools = []
411
603
  for tool in tools_data:
412
604
  if isinstance(tool, dict):
413
605
  tool["additional_instructions"] = additional_instructions
606
+
607
+ # Convert tool-level transformers to Transformer objects
608
+ tool_transformers = tool.get("transformers")
609
+ if tool_transformers:
610
+ converted_tool_transformers = []
611
+ for t in tool_transformers:
612
+ if isinstance(t, dict):
613
+ try:
614
+ transformer_obj = Transformer(**t)
615
+ # Check if transformer is registered
616
+ from holmes.core.transformers import registry
617
+
618
+ if not registry.is_registered(transformer_obj.name):
619
+ logger.warning(
620
+ f"Invalid tool transformer configuration: Transformer '{transformer_obj.name}' is not registered"
621
+ )
622
+ continue # Skip invalid transformer
623
+ converted_tool_transformers.append(transformer_obj)
624
+ except Exception as e:
625
+ # Log warning and skip invalid transformer
626
+ logger.warning(
627
+ f"Invalid tool transformer configuration: {e}"
628
+ )
629
+ continue
630
+ else:
631
+ # Already a Transformer object
632
+ converted_tool_transformers.append(t)
633
+ tool_transformers = (
634
+ converted_tool_transformers
635
+ if converted_tool_transformers
636
+ else None
637
+ )
638
+
639
+ # Merge toolset-level transformers with tool-level configs
640
+ tool["transformers"] = merge_transformers(
641
+ base_transformers=transformers,
642
+ override_transformers=tool_transformers,
643
+ )
414
644
  if isinstance(tool, Tool):
415
645
  tool.additional_instructions = additional_instructions
646
+ # Merge toolset-level transformers with tool-level configs
647
+ tool.transformers = merge_transformers( # type: ignore
648
+ base_transformers=transformers,
649
+ override_transformers=tool.transformers,
650
+ )
416
651
  tools.append(tool)
417
652
  values["tools"] = tools
418
653
 
@@ -482,11 +717,11 @@ class Toolset(BaseModel):
482
717
  self.status == ToolsetStatusEnum.DISABLED
483
718
  or self.status == ToolsetStatusEnum.FAILED
484
719
  ):
485
- logging.info(f"❌ Toolset {self.name}: {self.error}")
720
+ logger.info(f"❌ Toolset {self.name}: {self.error}")
486
721
  # no point checking further prerequisites if one failed
487
722
  return
488
723
 
489
- logging.info(f"✅ Toolset {self.name}")
724
+ logger.info(f"✅ Toolset {self.name}")
490
725
 
491
726
  @abstractmethod
492
727
  def get_example_config(self) -> Dict[str, Any]:
@@ -0,0 +1,81 @@
1
+ import json
2
+ from typing import Optional
3
+ from pydantic import BaseModel
4
+
5
+ from holmes.core.tools import StructuredToolResult, StructuredToolResultStatus
6
+
7
+
8
+ class TruncationMetadata(BaseModel):
9
+ tool_call_id: str
10
+ start_index: int
11
+ end_index: int
12
+ tool_name: str
13
+ original_token_count: int
14
+
15
+
16
+ class TruncationResult(BaseModel):
17
+ truncated_messages: list[dict]
18
+ truncations: list[TruncationMetadata]
19
+
20
+
21
+ def format_tool_result_data(tool_result: StructuredToolResult) -> str:
22
+ tool_response = tool_result.data
23
+ if isinstance(tool_result.data, str):
24
+ tool_response = tool_result.data
25
+ else:
26
+ try:
27
+ if isinstance(tool_result.data, BaseModel):
28
+ tool_response = tool_result.data.model_dump_json(indent=2)
29
+ else:
30
+ tool_response = json.dumps(tool_result.data, indent=2)
31
+ except Exception:
32
+ tool_response = str(tool_result.data)
33
+ if tool_result.status == StructuredToolResultStatus.ERROR:
34
+ tool_response = f"{tool_result.error or 'Tool execution failed'}:\n\n{tool_result.data or ''}".strip()
35
+ return tool_response
36
+
37
+
38
+ class ToolCallResult(BaseModel):
39
+ tool_call_id: str
40
+ tool_name: str
41
+ description: str
42
+ result: StructuredToolResult
43
+ size: Optional[int] = None
44
+
45
+ def as_tool_call_message(self):
46
+ content = format_tool_result_data(self.result)
47
+ if self.result.params:
48
+ content = (
49
+ f"Params used for the tool call: {json.dumps(self.result.params)}. The tool call output follows on the next line.\n"
50
+ + content
51
+ )
52
+ return {
53
+ "tool_call_id": self.tool_call_id,
54
+ "role": "tool",
55
+ "name": self.tool_name,
56
+ "content": content,
57
+ }
58
+
59
+ def as_tool_result_response(self):
60
+ result_dump = self.result.model_dump()
61
+ result_dump["data"] = self.result.get_stringified_data()
62
+
63
+ return {
64
+ "tool_call_id": self.tool_call_id,
65
+ "tool_name": self.tool_name,
66
+ "description": self.description,
67
+ "role": "tool",
68
+ "result": result_dump,
69
+ }
70
+
71
+ def as_streaming_tool_result_response(self):
72
+ result_dump = self.result.model_dump()
73
+ result_dump["data"] = self.result.get_stringified_data()
74
+
75
+ return {
76
+ "tool_call_id": self.tool_call_id,
77
+ "role": "tool",
78
+ "description": self.description,
79
+ "name": self.tool_name,
80
+ "result": result_dump,
81
+ }
@@ -0,0 +1,33 @@
1
+ from holmes.common.env_vars import TOOL_MAX_ALLOCATED_CONTEXT_WINDOW_PCT
2
+ from holmes.core.llm import LLM
3
+ from holmes.core.tools import StructuredToolResultStatus
4
+ from holmes.core.tools_utils.data_types import ToolCallResult
5
+ from holmes.utils import sentry_helper
6
+
7
+
8
+ def prevent_overly_big_tool_response(tool_call_result: ToolCallResult, llm: LLM):
9
+ if (
10
+ tool_call_result.result.status == StructuredToolResultStatus.SUCCESS
11
+ and 0 < TOOL_MAX_ALLOCATED_CONTEXT_WINDOW_PCT
12
+ and TOOL_MAX_ALLOCATED_CONTEXT_WINDOW_PCT <= 100
13
+ ):
14
+ message = tool_call_result.as_tool_call_message()
15
+
16
+ messages_token = llm.count_tokens_for_message(messages=[message])
17
+ context_window_size = llm.get_context_window_size()
18
+ max_tokens_allowed: int = int(
19
+ context_window_size * TOOL_MAX_ALLOCATED_CONTEXT_WINDOW_PCT // 100
20
+ )
21
+
22
+ if messages_token > max_tokens_allowed:
23
+ relative_pct = (
24
+ (messages_token - max_tokens_allowed) / messages_token
25
+ ) * 100
26
+ error_message = f"The tool call result is too large to return: {messages_token} tokens.\nThe maximum allowed tokens is {max_tokens_allowed} which is {format(relative_pct, '.1f')}% smaller.\nInstructions for the LLM: try to repeat the query but proactively narrow down the result so that the tool answer fits within the allowed number of tokens."
27
+ tool_call_result.result.status = StructuredToolResultStatus.ERROR
28
+ tool_call_result.result.data = None
29
+ tool_call_result.result.error = error_message
30
+
31
+ sentry_helper.capture_toolcall_contains_too_many_tokens(
32
+ tool_call_result, messages_token, max_tokens_allowed
33
+ )
@@ -6,7 +6,7 @@ import sentry_sdk
6
6
  from holmes.core.tools import (
7
7
  StructuredToolResult,
8
8
  Tool,
9
- ToolResultStatus,
9
+ StructuredToolResultStatus,
10
10
  Toolset,
11
11
  ToolsetStatusEnum,
12
12
  )
@@ -52,7 +52,7 @@ class ToolExecutor:
52
52
  tool.invoke(params)
53
53
  if tool
54
54
  else StructuredToolResult(
55
- status=ToolResultStatus.ERROR,
55
+ status=StructuredToolResultStatus.ERROR,
56
56
  error=f"Could not find tool named {tool_name}",
57
57
  )
58
58
  )