holmesgpt 0.14.0a0__py3-none-any.whl → 0.14.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of holmesgpt might be problematic. Click here for more details.

Files changed (82) hide show
  1. holmes/__init__.py +1 -1
  2. holmes/clients/robusta_client.py +15 -4
  3. holmes/common/env_vars.py +8 -1
  4. holmes/config.py +66 -139
  5. holmes/core/investigation.py +1 -2
  6. holmes/core/llm.py +295 -52
  7. holmes/core/models.py +2 -0
  8. holmes/core/safeguards.py +4 -4
  9. holmes/core/supabase_dal.py +14 -8
  10. holmes/core/tool_calling_llm.py +110 -102
  11. holmes/core/tools.py +260 -25
  12. holmes/core/tools_utils/data_types.py +81 -0
  13. holmes/core/tools_utils/tool_context_window_limiter.py +33 -0
  14. holmes/core/tools_utils/tool_executor.py +2 -2
  15. holmes/core/toolset_manager.py +150 -3
  16. holmes/core/transformers/__init__.py +23 -0
  17. holmes/core/transformers/base.py +62 -0
  18. holmes/core/transformers/llm_summarize.py +174 -0
  19. holmes/core/transformers/registry.py +122 -0
  20. holmes/core/transformers/transformer.py +31 -0
  21. holmes/main.py +5 -0
  22. holmes/plugins/prompts/_fetch_logs.jinja2 +10 -1
  23. holmes/plugins/toolsets/aks-node-health.yaml +46 -0
  24. holmes/plugins/toolsets/aks.yaml +64 -0
  25. holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +17 -15
  26. holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +8 -4
  27. holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +7 -3
  28. holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +3 -3
  29. holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +3 -3
  30. holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +7 -3
  31. holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +4 -4
  32. holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +7 -3
  33. holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +7 -3
  34. holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +7 -3
  35. holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +7 -3
  36. holmes/plugins/toolsets/bash/bash_toolset.py +6 -6
  37. holmes/plugins/toolsets/bash/common/bash.py +7 -7
  38. holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +5 -3
  39. holmes/plugins/toolsets/datadog/datadog_api.py +490 -24
  40. holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +21 -10
  41. holmes/plugins/toolsets/datadog/toolset_datadog_general.py +344 -205
  42. holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +189 -17
  43. holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +95 -30
  44. holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +10 -10
  45. holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +20 -20
  46. holmes/plugins/toolsets/git.py +21 -21
  47. holmes/plugins/toolsets/grafana/common.py +2 -2
  48. holmes/plugins/toolsets/grafana/toolset_grafana.py +4 -4
  49. holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +5 -4
  50. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +123 -23
  51. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +165 -307
  52. holmes/plugins/toolsets/internet/internet.py +3 -3
  53. holmes/plugins/toolsets/internet/notion.py +3 -3
  54. holmes/plugins/toolsets/investigator/core_investigation.py +3 -3
  55. holmes/plugins/toolsets/kafka.py +18 -18
  56. holmes/plugins/toolsets/kubernetes.yaml +58 -0
  57. holmes/plugins/toolsets/kubernetes_logs.py +6 -6
  58. holmes/plugins/toolsets/kubernetes_logs.yaml +32 -0
  59. holmes/plugins/toolsets/logging_utils/logging_api.py +1 -1
  60. holmes/plugins/toolsets/mcp/toolset_mcp.py +4 -4
  61. holmes/plugins/toolsets/newrelic.py +5 -5
  62. holmes/plugins/toolsets/opensearch/opensearch.py +5 -5
  63. holmes/plugins/toolsets/opensearch/opensearch_logs.py +7 -7
  64. holmes/plugins/toolsets/opensearch/opensearch_traces.py +10 -10
  65. holmes/plugins/toolsets/prometheus/prometheus.py +841 -351
  66. holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +39 -2
  67. holmes/plugins/toolsets/prometheus/utils.py +28 -0
  68. holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +6 -4
  69. holmes/plugins/toolsets/robusta/robusta.py +10 -10
  70. holmes/plugins/toolsets/runbook/runbook_fetcher.py +4 -4
  71. holmes/plugins/toolsets/servicenow/servicenow.py +6 -6
  72. holmes/plugins/toolsets/utils.py +88 -0
  73. holmes/utils/config_utils.py +91 -0
  74. holmes/utils/env.py +7 -0
  75. holmes/utils/holmes_status.py +2 -1
  76. holmes/utils/sentry_helper.py +41 -0
  77. holmes/utils/stream.py +9 -0
  78. {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/METADATA +10 -14
  79. {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/RECORD +82 -72
  80. {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/LICENSE.txt +0 -0
  81. {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/WHEEL +0 -0
  82. {holmesgpt-0.14.0a0.dist-info → holmesgpt-0.14.1.dist-info}/entry_points.txt +0 -0
@@ -10,7 +10,7 @@ from holmes.core.tools import (
10
10
  StructuredToolResult,
11
11
  Tool,
12
12
  ToolParameter,
13
- ToolResultStatus,
13
+ StructuredToolResultStatus,
14
14
  Toolset,
15
15
  ToolsetTag,
16
16
  )
@@ -69,7 +69,7 @@ class GenerateRDSPerformanceReport(BaseDatadogRDSTool):
69
69
  def __init__(self, toolset: "DatadogRDSToolset"):
70
70
  super().__init__(
71
71
  name="datadog_rds_performance_report",
72
- description="Generate a comprehensive performance report for a specific RDS instance including latency, resource utilization, and storage metrics with analysis",
72
+ description="[datadog/rds toolset] Generate a comprehensive performance report for a specific RDS instance including latency, resource utilization, and storage metrics with analysis",
73
73
  parameters={
74
74
  "db_instance_identifier": ToolParameter(
75
75
  description="The RDS database instance identifier",
@@ -97,7 +97,7 @@ class GenerateRDSPerformanceReport(BaseDatadogRDSTool):
97
97
  ) -> StructuredToolResult:
98
98
  if not self.toolset.dd_config:
99
99
  return StructuredToolResult(
100
- status=ToolResultStatus.ERROR,
100
+ status=StructuredToolResultStatus.ERROR,
101
101
  error=TOOLSET_CONFIG_MISSING_ERROR,
102
102
  params=params,
103
103
  )
@@ -150,7 +150,7 @@ class GenerateRDSPerformanceReport(BaseDatadogRDSTool):
150
150
  formatted_report = self._format_report(report)
151
151
 
152
152
  return StructuredToolResult(
153
- status=ToolResultStatus.SUCCESS,
153
+ status=StructuredToolResultStatus.SUCCESS,
154
154
  data=formatted_report,
155
155
  params=params,
156
156
  )
@@ -158,7 +158,7 @@ class GenerateRDSPerformanceReport(BaseDatadogRDSTool):
158
158
  except Exception as e:
159
159
  logging.error(f"Error generating RDS performance report: {str(e)}")
160
160
  return StructuredToolResult(
161
- status=ToolResultStatus.ERROR,
161
+ status=StructuredToolResultStatus.ERROR,
162
162
  error=f"Failed to generate RDS performance report: {str(e)}",
163
163
  params=params,
164
164
  )
@@ -364,7 +364,7 @@ class GetTopWorstPerformingRDSInstances(BaseDatadogRDSTool):
364
364
  def __init__(self, toolset: "DatadogRDSToolset"):
365
365
  super().__init__(
366
366
  name="datadog_rds_top_worst_performing",
367
- description="Get a summarized report of the top worst performing RDS instances based on latency, CPU utilization, and error rates",
367
+ description="[datadog/rds toolset] Get a summarized report of the top worst performing RDS instances based on latency, CPU utilization, and error rates",
368
368
  parameters={
369
369
  "top_n": ToolParameter(
370
370
  description=f"Number of worst performing instances to return (default: {DEFAULT_TOP_INSTANCES})",
@@ -397,7 +397,7 @@ class GetTopWorstPerformingRDSInstances(BaseDatadogRDSTool):
397
397
  ) -> StructuredToolResult:
398
398
  if not self.toolset.dd_config:
399
399
  return StructuredToolResult(
400
- status=ToolResultStatus.ERROR,
400
+ status=StructuredToolResultStatus.ERROR,
401
401
  error=TOOLSET_CONFIG_MISSING_ERROR,
402
402
  params=params,
403
403
  )
@@ -416,7 +416,7 @@ class GetTopWorstPerformingRDSInstances(BaseDatadogRDSTool):
416
416
 
417
417
  if not instances:
418
418
  return StructuredToolResult(
419
- status=ToolResultStatus.NO_DATA,
419
+ status=StructuredToolResultStatus.NO_DATA,
420
420
  data="No RDS instances found with metrics in the specified time range",
421
421
  params=params,
422
422
  )
@@ -443,7 +443,7 @@ class GetTopWorstPerformingRDSInstances(BaseDatadogRDSTool):
443
443
  report += f"\n\nInstances:\n{json.dumps(worst_performers, indent=2)}"
444
444
 
445
445
  return StructuredToolResult(
446
- status=ToolResultStatus.SUCCESS,
446
+ status=StructuredToolResultStatus.SUCCESS,
447
447
  data=report,
448
448
  params=params,
449
449
  )
@@ -451,7 +451,7 @@ class GetTopWorstPerformingRDSInstances(BaseDatadogRDSTool):
451
451
  except Exception as e:
452
452
  logging.error(f"Error getting top worst performing RDS instances: {str(e)}")
453
453
  return StructuredToolResult(
454
- status=ToolResultStatus.ERROR,
454
+ status=StructuredToolResultStatus.ERROR,
455
455
  error=f"Failed to get top worst performing RDS instances: {str(e)}",
456
456
  params=params,
457
457
  )
@@ -12,7 +12,7 @@ from holmes.core.tools import (
12
12
  ToolParameter,
13
13
  Toolset,
14
14
  StructuredToolResult,
15
- ToolResultStatus,
15
+ StructuredToolResultStatus,
16
16
  ToolsetTag,
17
17
  )
18
18
  from holmes.plugins.toolsets.datadog.datadog_api import (
@@ -156,7 +156,7 @@ class FetchDatadogTracesList(BaseDatadogTracesTool):
156
156
  def __init__(self, toolset: "DatadogTracesToolset"):
157
157
  super().__init__(
158
158
  name="fetch_datadog_traces",
159
- description="Fetch a list of traces from Datadog with optional filters",
159
+ description="[datadog/traces toolset] Fetch a list of traces from Datadog with optional filters",
160
160
  parameters={
161
161
  "service": ToolParameter(
162
162
  description="Filter by service name",
@@ -216,7 +216,7 @@ class FetchDatadogTracesList(BaseDatadogTracesTool):
216
216
  """Execute the tool to fetch traces."""
217
217
  if not self.toolset.dd_config:
218
218
  return StructuredToolResult(
219
- status=ToolResultStatus.ERROR,
219
+ status=StructuredToolResultStatus.ERROR,
220
220
  error="Datadog configuration not initialized",
221
221
  params=params,
222
222
  )
@@ -305,13 +305,13 @@ class FetchDatadogTracesList(BaseDatadogTracesTool):
305
305
  formatted_output = format_traces_list(spans, limit=params.get("limit", 50))
306
306
  if not formatted_output:
307
307
  return StructuredToolResult(
308
- status=ToolResultStatus.NO_DATA,
308
+ status=StructuredToolResultStatus.NO_DATA,
309
309
  params=params,
310
310
  data="No matching traces found.",
311
311
  )
312
312
 
313
313
  return StructuredToolResult(
314
- status=ToolResultStatus.SUCCESS,
314
+ status=StructuredToolResultStatus.SUCCESS,
315
315
  data=formatted_output,
316
316
  params=params,
317
317
  )
@@ -330,7 +330,7 @@ class FetchDatadogTracesList(BaseDatadogTracesTool):
330
330
  error_msg = f"Exception while querying Datadog: {str(e)}"
331
331
 
332
332
  return StructuredToolResult(
333
- status=ToolResultStatus.ERROR,
333
+ status=StructuredToolResultStatus.ERROR,
334
334
  error=error_msg,
335
335
  params=params,
336
336
  invocation=(
@@ -343,7 +343,7 @@ class FetchDatadogTracesList(BaseDatadogTracesTool):
343
343
  except Exception as e:
344
344
  logging.exception(e, exc_info=True)
345
345
  return StructuredToolResult(
346
- status=ToolResultStatus.ERROR,
346
+ status=StructuredToolResultStatus.ERROR,
347
347
  error=f"Unexpected error: {str(e)}",
348
348
  params=params,
349
349
  invocation=(
@@ -360,7 +360,7 @@ class FetchDatadogTraceById(BaseDatadogTracesTool):
360
360
  def __init__(self, toolset: "DatadogTracesToolset"):
361
361
  super().__init__(
362
362
  name="fetch_datadog_trace_by_id",
363
- description="Fetch detailed information about a specific trace by its ID",
363
+ description="[datadog/traces toolset] Fetch detailed information about a specific trace by its ID",
364
364
  parameters={
365
365
  "trace_id": ToolParameter(
366
366
  description="The trace ID to fetch details for",
@@ -382,7 +382,7 @@ class FetchDatadogTraceById(BaseDatadogTracesTool):
382
382
  """Execute the tool to fetch trace details."""
383
383
  if not self.toolset.dd_config:
384
384
  return StructuredToolResult(
385
- status=ToolResultStatus.ERROR,
385
+ status=StructuredToolResultStatus.ERROR,
386
386
  error="Datadog configuration not initialized",
387
387
  params=params,
388
388
  )
@@ -390,7 +390,7 @@ class FetchDatadogTraceById(BaseDatadogTracesTool):
390
390
  trace_id = params.get("trace_id")
391
391
  if not trace_id:
392
392
  return StructuredToolResult(
393
- status=ToolResultStatus.ERROR,
393
+ status=StructuredToolResultStatus.ERROR,
394
394
  error="trace_id parameter is required",
395
395
  params=params,
396
396
  )
@@ -444,13 +444,13 @@ class FetchDatadogTraceById(BaseDatadogTracesTool):
444
444
  formatted_output = format_trace_hierarchy(trace_id, spans)
445
445
  if not formatted_output:
446
446
  return StructuredToolResult(
447
- status=ToolResultStatus.NO_DATA,
447
+ status=StructuredToolResultStatus.NO_DATA,
448
448
  params=params,
449
449
  data=f"No trace found for trace_id: {trace_id}",
450
450
  )
451
451
 
452
452
  return StructuredToolResult(
453
- status=ToolResultStatus.SUCCESS,
453
+ status=StructuredToolResultStatus.SUCCESS,
454
454
  data=formatted_output,
455
455
  params=params,
456
456
  )
@@ -469,7 +469,7 @@ class FetchDatadogTraceById(BaseDatadogTracesTool):
469
469
  error_msg = f"Exception while querying Datadog: {str(e)}"
470
470
 
471
471
  return StructuredToolResult(
472
- status=ToolResultStatus.ERROR,
472
+ status=StructuredToolResultStatus.ERROR,
473
473
  error=error_msg,
474
474
  params=params,
475
475
  invocation=(
@@ -482,7 +482,7 @@ class FetchDatadogTraceById(BaseDatadogTracesTool):
482
482
  except Exception as e:
483
483
  logging.exception(e, exc_info=True)
484
484
  return StructuredToolResult(
485
- status=ToolResultStatus.ERROR,
485
+ status=StructuredToolResultStatus.ERROR,
486
486
  error=f"Unexpected error: {str(e)}",
487
487
  params=params,
488
488
  invocation=(
@@ -499,7 +499,7 @@ class FetchDatadogSpansByFilter(BaseDatadogTracesTool):
499
499
  def __init__(self, toolset: "DatadogTracesToolset"):
500
500
  super().__init__(
501
501
  name="fetch_datadog_spans",
502
- description="Search for spans in Datadog with detailed filters",
502
+ description="[datadog/traces toolset] Search for spans in Datadog with detailed filters",
503
503
  parameters={
504
504
  "query": ToolParameter(
505
505
  description="Datadog search query (e.g., 'service:web-app @http.status_code:500')",
@@ -565,7 +565,7 @@ class FetchDatadogSpansByFilter(BaseDatadogTracesTool):
565
565
  """Execute the tool to search spans."""
566
566
  if not self.toolset.dd_config:
567
567
  return StructuredToolResult(
568
- status=ToolResultStatus.ERROR,
568
+ status=StructuredToolResultStatus.ERROR,
569
569
  error="Datadog configuration not initialized",
570
570
  params=params,
571
571
  )
@@ -653,13 +653,13 @@ class FetchDatadogSpansByFilter(BaseDatadogTracesTool):
653
653
  formatted_output = format_spans_search(spans)
654
654
  if not formatted_output:
655
655
  return StructuredToolResult(
656
- status=ToolResultStatus.NO_DATA,
656
+ status=StructuredToolResultStatus.NO_DATA,
657
657
  params=params,
658
658
  data="No matching spans found.",
659
659
  )
660
660
 
661
661
  return StructuredToolResult(
662
- status=ToolResultStatus.SUCCESS,
662
+ status=StructuredToolResultStatus.SUCCESS,
663
663
  data=formatted_output,
664
664
  params=params,
665
665
  )
@@ -677,7 +677,7 @@ class FetchDatadogSpansByFilter(BaseDatadogTracesTool):
677
677
  error_msg = f"Exception while querying Datadog: {str(e)}"
678
678
 
679
679
  return StructuredToolResult(
680
- status=ToolResultStatus.ERROR,
680
+ status=StructuredToolResultStatus.ERROR,
681
681
  error=error_msg,
682
682
  params=params,
683
683
  invocation=(
@@ -690,7 +690,7 @@ class FetchDatadogSpansByFilter(BaseDatadogTracesTool):
690
690
  except Exception as e:
691
691
  logging.exception(e, exc_info=True)
692
692
  return StructuredToolResult(
693
- status=ToolResultStatus.ERROR,
693
+ status=StructuredToolResultStatus.ERROR,
694
694
  error=f"Unexpected error: {str(e)}",
695
695
  params=params,
696
696
  invocation=(
@@ -4,7 +4,7 @@ import requests # type: ignore
4
4
  import os
5
5
  from typing import Any, Optional, Dict, List, Tuple
6
6
  from pydantic import BaseModel
7
- from holmes.core.tools import StructuredToolResult, ToolResultStatus
7
+ from holmes.core.tools import StructuredToolResult, StructuredToolResultStatus
8
8
 
9
9
  from holmes.core.tools import (
10
10
  Toolset,
@@ -259,7 +259,7 @@ class GitReadFileWithLineNumbers(Tool):
259
259
  resp = requests.get(url, headers=headers)
260
260
  if resp.status_code != 200:
261
261
  return StructuredToolResult(
262
- status=ToolResultStatus.ERROR,
262
+ status=StructuredToolResultStatus.ERROR,
263
263
  data=self.toolset._sanitize_error(
264
264
  f"Error fetching file: {resp.text}"
265
265
  ),
@@ -268,13 +268,13 @@ class GitReadFileWithLineNumbers(Tool):
268
268
  content = base64.b64decode(resp.json()["content"]).decode().splitlines()
269
269
  numbered = "\n".join(f"{i+1}: {line}" for i, line in enumerate(content))
270
270
  return StructuredToolResult(
271
- status=ToolResultStatus.SUCCESS,
271
+ status=StructuredToolResultStatus.SUCCESS,
272
272
  data=numbered,
273
273
  params=params,
274
274
  )
275
275
  except Exception as e:
276
276
  return StructuredToolResult(
277
- status=ToolResultStatus.ERROR,
277
+ status=StructuredToolResultStatus.ERROR,
278
278
  data=self.toolset._sanitize_error(str(e)),
279
279
  params=params,
280
280
  )
@@ -304,7 +304,7 @@ class GitListFiles(Tool):
304
304
  resp = requests.get(url, headers=headers)
305
305
  if resp.status_code != 200:
306
306
  return StructuredToolResult(
307
- status=ToolResultStatus.ERROR,
307
+ status=StructuredToolResultStatus.ERROR,
308
308
  data=self.toolset._sanitize_error(
309
309
  f"Error listing files: {resp.text}"
310
310
  ),
@@ -312,13 +312,13 @@ class GitListFiles(Tool):
312
312
  )
313
313
  paths = [entry["path"] for entry in resp.json()["tree"]]
314
314
  return StructuredToolResult(
315
- status=ToolResultStatus.SUCCESS,
315
+ status=StructuredToolResultStatus.SUCCESS,
316
316
  data=paths,
317
317
  params=params,
318
318
  )
319
319
  except Exception as e:
320
320
  return StructuredToolResult(
321
- status=ToolResultStatus.ERROR,
321
+ status=StructuredToolResultStatus.ERROR,
322
322
  data=self.toolset._sanitize_error(str(e)),
323
323
  params=params,
324
324
  )
@@ -353,13 +353,13 @@ class GitListOpenPRs(Tool):
353
353
  for pr in prs
354
354
  ]
355
355
  return StructuredToolResult(
356
- status=ToolResultStatus.SUCCESS,
356
+ status=StructuredToolResultStatus.SUCCESS,
357
357
  data=formatted,
358
358
  params=params,
359
359
  )
360
360
  except Exception as e:
361
361
  return StructuredToolResult(
362
- status=ToolResultStatus.ERROR,
362
+ status=StructuredToolResultStatus.ERROR,
363
363
  data=self.toolset._sanitize_error(str(e)),
364
364
  params=params,
365
365
  )
@@ -413,14 +413,14 @@ class GitExecuteChanges(Tool):
413
413
  ) -> StructuredToolResult:
414
414
  def error(msg: str) -> StructuredToolResult:
415
415
  return StructuredToolResult(
416
- status=ToolResultStatus.ERROR,
416
+ status=StructuredToolResultStatus.ERROR,
417
417
  data=self.toolset._sanitize_error(msg),
418
418
  params=params,
419
419
  )
420
420
 
421
421
  def success(msg: Any) -> StructuredToolResult:
422
422
  return StructuredToolResult(
423
- status=ToolResultStatus.SUCCESS, data=msg, params=params
423
+ status=StructuredToolResultStatus.SUCCESS, data=msg, params=params
424
424
  )
425
425
 
426
426
  def modify_lines(lines: List[str]) -> List[str]:
@@ -643,24 +643,24 @@ class GitUpdatePR(Tool):
643
643
  # Validate inputs
644
644
  if not commit_message.strip():
645
645
  return StructuredToolResult(
646
- status=ToolResultStatus.ERROR,
646
+ status=StructuredToolResultStatus.ERROR,
647
647
  error="Tool call failed to run: Commit message cannot be empty",
648
648
  )
649
649
  if not filename.strip():
650
650
  return StructuredToolResult(
651
- status=ToolResultStatus.ERROR,
651
+ status=StructuredToolResultStatus.ERROR,
652
652
  error="Tool call failed to run: Filename cannot be empty",
653
653
  )
654
654
  if line < 1:
655
655
  return StructuredToolResult(
656
- status=ToolResultStatus.ERROR,
656
+ status=StructuredToolResultStatus.ERROR,
657
657
  error="Tool call failed to run: Line number must be positive",
658
658
  )
659
659
 
660
660
  # Verify this is a PR created by our tool
661
661
  if not self.toolset.is_created_pr(pr_number):
662
662
  return StructuredToolResult(
663
- status=ToolResultStatus.ERROR,
663
+ status=StructuredToolResultStatus.ERROR,
664
664
  error=f"Tool call failed to run: PR #{pr_number} was not created by this tool. Only PRs created using git_execute_changes can be updated.",
665
665
  )
666
666
 
@@ -714,7 +714,7 @@ class GitUpdatePR(Tool):
714
714
  del content_lines[line - 1]
715
715
  else:
716
716
  return StructuredToolResult(
717
- status=ToolResultStatus.ERROR,
717
+ status=StructuredToolResultStatus.ERROR,
718
718
  error=f"Tool call failed to run: Invalid command: {command}",
719
719
  )
720
720
 
@@ -722,7 +722,7 @@ class GitUpdatePR(Tool):
722
722
 
723
723
  if dry_run:
724
724
  return StructuredToolResult(
725
- status=ToolResultStatus.SUCCESS,
725
+ status=StructuredToolResultStatus.SUCCESS,
726
726
  data=f"DRY RUN: Updated content for PR #{pr_number}:\n\n{updated_content}",
727
727
  )
728
728
 
@@ -731,13 +731,13 @@ class GitUpdatePR(Tool):
731
731
  pr_number, filename, updated_content, commit_message
732
732
  )
733
733
  return StructuredToolResult(
734
- status=ToolResultStatus.SUCCESS,
734
+ status=StructuredToolResultStatus.SUCCESS,
735
735
  data=f"Added commit to PR #{pr_number} successfully",
736
736
  )
737
737
 
738
738
  except Exception as e:
739
739
  return StructuredToolResult(
740
- status=ToolResultStatus.ERROR,
740
+ status=StructuredToolResultStatus.ERROR,
741
741
  error=self.toolset._sanitize_error(
742
742
  f"Tool call failed to run: Error updating PR: {str(e)}"
743
743
  ),
@@ -745,14 +745,14 @@ class GitUpdatePR(Tool):
745
745
 
746
746
  except requests.exceptions.RequestException as e:
747
747
  return StructuredToolResult(
748
- status=ToolResultStatus.ERROR,
748
+ status=StructuredToolResultStatus.ERROR,
749
749
  error=self.toolset._sanitize_error(
750
750
  f"Tool call failed to run: Network error: {str(e)}"
751
751
  ),
752
752
  )
753
753
  except Exception as e:
754
754
  return StructuredToolResult(
755
- status=ToolResultStatus.ERROR,
755
+ status=StructuredToolResultStatus.ERROR,
756
756
  error=self.toolset._sanitize_error(
757
757
  f"Tool call failed to run: Unexpected error: {str(e)}"
758
758
  ),
@@ -3,7 +3,7 @@ from typing import Dict, Optional
3
3
  from pydantic import BaseModel
4
4
  import datetime
5
5
 
6
- from holmes.core.tools import StructuredToolResult, ToolResultStatus
6
+ from holmes.core.tools import StructuredToolResult, StructuredToolResultStatus
7
7
 
8
8
 
9
9
  class GrafanaConfig(BaseModel):
@@ -61,7 +61,7 @@ def ensure_grafana_uid_or_return_error_result(
61
61
  ) -> Optional[StructuredToolResult]:
62
62
  if not config.grafana_datasource_uid:
63
63
  return StructuredToolResult(
64
- status=ToolResultStatus.ERROR,
64
+ status=StructuredToolResultStatus.ERROR,
65
65
  error="This tool only works when the toolset is configued ",
66
66
  )
67
67
  else:
@@ -4,7 +4,7 @@ from holmes.core.tools import (
4
4
  StructuredToolResult,
5
5
  Tool,
6
6
  ToolParameter,
7
- ToolResultStatus,
7
+ StructuredToolResultStatus,
8
8
  )
9
9
  from holmes.plugins.toolsets.grafana.base_grafana_toolset import BaseGrafanaToolset
10
10
  import requests # type: ignore
@@ -90,9 +90,9 @@ class ListAndBuildGrafanaDashboardURLs(Tool):
90
90
  )
91
91
 
92
92
  return StructuredToolResult(
93
- status=ToolResultStatus.SUCCESS
93
+ status=StructuredToolResultStatus.SUCCESS
94
94
  if formatted_dashboards
95
- else ToolResultStatus.NO_DATA,
95
+ else StructuredToolResultStatus.NO_DATA,
96
96
  data="\n".join(formatted_dashboards)
97
97
  if formatted_dashboards
98
98
  else "No dashboards found.",
@@ -102,7 +102,7 @@ class ListAndBuildGrafanaDashboardURLs(Tool):
102
102
  except requests.RequestException as e:
103
103
  logging.error(f"Error fetching dashboards: {str(e)}")
104
104
  return StructuredToolResult(
105
- status=ToolResultStatus.ERROR,
105
+ status=StructuredToolResultStatus.ERROR,
106
106
  error=f"Error fetching dashboards: {str(e)}",
107
107
  url=url,
108
108
  params=params,
@@ -14,6 +14,7 @@ from holmes.plugins.toolsets.logging_utils.logging_api import (
14
14
  LoggingCapability,
15
15
  PodLoggingTool,
16
16
  DEFAULT_TIME_SPAN_SECONDS,
17
+ DEFAULT_LOG_LIMIT,
17
18
  )
18
19
  from holmes.plugins.toolsets.utils import (
19
20
  process_timestamps_to_rfc3339,
@@ -22,7 +23,7 @@ from holmes.plugins.toolsets.utils import (
22
23
  from holmes.plugins.toolsets.grafana.loki_api import (
23
24
  query_loki_logs_by_label,
24
25
  )
25
- from holmes.core.tools import StructuredToolResult, ToolResultStatus
26
+ from holmes.core.tools import StructuredToolResult, StructuredToolResultStatus
26
27
 
27
28
 
28
29
  class GrafanaLokiLabelsConfig(BaseModel):
@@ -94,17 +95,17 @@ class GrafanaLokiToolset(BasePodLoggingToolset):
94
95
  label_value=params.pod_name,
95
96
  start=start,
96
97
  end=end,
97
- limit=params.limit or 2000,
98
+ limit=params.limit or DEFAULT_LOG_LIMIT,
98
99
  )
99
100
  if logs:
100
101
  logs.sort(key=lambda x: x["timestamp"])
101
102
  return StructuredToolResult(
102
- status=ToolResultStatus.SUCCESS,
103
+ status=StructuredToolResultStatus.SUCCESS,
103
104
  data="\n".join([format_log(log) for log in logs]),
104
105
  params=params.model_dump(),
105
106
  )
106
107
  else:
107
108
  return StructuredToolResult(
108
- status=ToolResultStatus.NO_DATA,
109
+ status=StructuredToolResultStatus.NO_DATA,
109
110
  params=params.model_dump(),
110
111
  )