holmesgpt 0.12.5__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of holmesgpt might be problematic. Click here for more details.

Files changed (84) hide show
  1. holmes/__init__.py +1 -1
  2. holmes/clients/robusta_client.py +19 -1
  3. holmes/common/env_vars.py +13 -0
  4. holmes/config.py +69 -9
  5. holmes/core/conversations.py +11 -0
  6. holmes/core/investigation.py +16 -3
  7. holmes/core/investigation_structured_output.py +12 -0
  8. holmes/core/llm.py +10 -0
  9. holmes/core/models.py +9 -1
  10. holmes/core/openai_formatting.py +72 -12
  11. holmes/core/prompt.py +13 -0
  12. holmes/core/supabase_dal.py +3 -0
  13. holmes/core/todo_manager.py +88 -0
  14. holmes/core/tool_calling_llm.py +121 -149
  15. holmes/core/tools.py +10 -1
  16. holmes/core/tools_utils/tool_executor.py +7 -2
  17. holmes/core/tools_utils/toolset_utils.py +7 -2
  18. holmes/core/tracing.py +3 -2
  19. holmes/interactive.py +1 -0
  20. holmes/main.py +2 -1
  21. holmes/plugins/prompts/__init__.py +7 -1
  22. holmes/plugins/prompts/_current_date_time.jinja2 +1 -0
  23. holmes/plugins/prompts/_default_log_prompt.jinja2 +4 -2
  24. holmes/plugins/prompts/_fetch_logs.jinja2 +6 -1
  25. holmes/plugins/prompts/_general_instructions.jinja2 +14 -0
  26. holmes/plugins/prompts/_permission_errors.jinja2 +1 -1
  27. holmes/plugins/prompts/_toolsets_instructions.jinja2 +4 -4
  28. holmes/plugins/prompts/generic_ask.jinja2 +4 -3
  29. holmes/plugins/prompts/investigation_procedure.jinja2 +210 -0
  30. holmes/plugins/prompts/kubernetes_workload_ask.jinja2 +2 -0
  31. holmes/plugins/toolsets/__init__.py +19 -6
  32. holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +27 -0
  33. holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +2 -2
  34. holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +2 -1
  35. holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +3 -1
  36. holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +2 -1
  37. holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +2 -1
  38. holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +3 -1
  39. holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +2 -1
  40. holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +2 -1
  41. holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +2 -1
  42. holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +2 -1
  43. holmes/plugins/toolsets/coralogix/api.py +6 -6
  44. holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +7 -1
  45. holmes/plugins/toolsets/datadog/datadog_api.py +20 -8
  46. holmes/plugins/toolsets/datadog/datadog_metrics_instructions.jinja2 +8 -1
  47. holmes/plugins/toolsets/datadog/datadog_rds_instructions.jinja2 +82 -0
  48. holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +12 -5
  49. holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +20 -11
  50. holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +735 -0
  51. holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +18 -11
  52. holmes/plugins/toolsets/git.py +15 -15
  53. holmes/plugins/toolsets/grafana/grafana_api.py +12 -1
  54. holmes/plugins/toolsets/grafana/toolset_grafana.py +5 -1
  55. holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +9 -4
  56. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +12 -5
  57. holmes/plugins/toolsets/internet/internet.py +2 -1
  58. holmes/plugins/toolsets/internet/notion.py +2 -1
  59. holmes/plugins/toolsets/investigator/__init__.py +0 -0
  60. holmes/plugins/toolsets/investigator/core_investigation.py +157 -0
  61. holmes/plugins/toolsets/investigator/investigator_instructions.jinja2 +253 -0
  62. holmes/plugins/toolsets/investigator/model.py +15 -0
  63. holmes/plugins/toolsets/kafka.py +14 -7
  64. holmes/plugins/toolsets/kubernetes_logs.py +454 -25
  65. holmes/plugins/toolsets/logging_utils/logging_api.py +115 -55
  66. holmes/plugins/toolsets/mcp/toolset_mcp.py +1 -1
  67. holmes/plugins/toolsets/newrelic.py +8 -3
  68. holmes/plugins/toolsets/opensearch/opensearch.py +8 -4
  69. holmes/plugins/toolsets/opensearch/opensearch_logs.py +9 -2
  70. holmes/plugins/toolsets/opensearch/opensearch_traces.py +6 -2
  71. holmes/plugins/toolsets/prometheus/prometheus.py +149 -44
  72. holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +8 -2
  73. holmes/plugins/toolsets/robusta/robusta.py +4 -4
  74. holmes/plugins/toolsets/runbook/runbook_fetcher.py +6 -5
  75. holmes/plugins/toolsets/servicenow/servicenow.py +18 -3
  76. holmes/plugins/toolsets/utils.py +8 -1
  77. holmes/utils/llms.py +20 -0
  78. holmes/utils/stream.py +90 -0
  79. {holmesgpt-0.12.5.dist-info → holmesgpt-0.13.0.dist-info}/METADATA +48 -35
  80. {holmesgpt-0.12.5.dist-info → holmesgpt-0.13.0.dist-info}/RECORD +83 -74
  81. holmes/utils/robusta.py +0 -9
  82. {holmesgpt-0.12.5.dist-info → holmesgpt-0.13.0.dist-info}/LICENSE.txt +0 -0
  83. {holmesgpt-0.12.5.dist-info → holmesgpt-0.13.0.dist-info}/WHEEL +0 -0
  84. {holmesgpt-0.12.5.dist-info → holmesgpt-0.13.0.dist-info}/entry_points.txt +0 -0
@@ -1,3 +1,5 @@
1
+ """Datadog Traces toolset for HolmesGPT."""
2
+
1
3
  import json
2
4
  import logging
3
5
  import os
@@ -20,12 +22,18 @@ from holmes.plugins.toolsets.datadog.datadog_api import (
20
22
  get_headers,
21
23
  MAX_RETRY_COUNT_ON_RATE_LIMIT,
22
24
  )
23
- from holmes.plugins.toolsets.utils import process_timestamps_to_int
25
+ from holmes.plugins.toolsets.utils import (
26
+ process_timestamps_to_int,
27
+ toolset_name_for_one_liner,
28
+ )
24
29
  from holmes.plugins.toolsets.datadog.datadog_traces_formatter import (
25
30
  format_traces_list,
26
31
  format_trace_hierarchy,
27
32
  format_spans_search,
28
33
  )
34
+ from holmes.plugins.toolsets.logging_utils.logging_api import (
35
+ DEFAULT_TIME_SPAN_SECONDS,
36
+ )
29
37
 
30
38
 
31
39
  class DatadogTracesConfig(DatadogBaseConfig):
@@ -200,8 +208,8 @@ class FetchDatadogTracesList(BaseDatadogTracesTool):
200
208
  if "min_duration" in params:
201
209
  filters.append(f"duration>{params['min_duration']}")
202
210
 
203
- filter_str = " AND ".join(filters) if filters else "all traces"
204
- return f"DataDog: fetch traces matching {filter_str}"
211
+ filter_str = ", ".join(filters) if filters else "all"
212
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: Fetch Traces ({filter_str})"
205
213
 
206
214
  def _invoke(self, params: Any) -> StructuredToolResult:
207
215
  """Execute the tool to fetch traces."""
@@ -220,7 +228,7 @@ class FetchDatadogTracesList(BaseDatadogTracesTool):
220
228
  from_time_int, to_time_int = process_timestamps_to_int(
221
229
  start=params.get("start_datetime"),
222
230
  end=params.get("end_datetime"),
223
- default_time_span_seconds=3600, # Default to 1 hour
231
+ default_time_span_seconds=DEFAULT_TIME_SPAN_SECONDS,
224
232
  )
225
233
 
226
234
  # Convert to milliseconds for Datadog API
@@ -364,9 +372,8 @@ class FetchDatadogTraceById(BaseDatadogTracesTool):
364
372
 
365
373
  def get_parameterized_one_liner(self, params: dict) -> str:
366
374
  """Get a one-liner description of the tool invocation."""
367
- return (
368
- f"DataDog: fetch trace details for ID {params.get('trace_id', 'unknown')}"
369
- )
375
+ trace_id = params.get("trace_id", "unknown")
376
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: Fetch Trace Details ({trace_id})"
370
377
 
371
378
  def _invoke(self, params: Any) -> StructuredToolResult:
372
379
  """Execute the tool to fetch trace details."""
@@ -538,7 +545,7 @@ class FetchDatadogSpansByFilter(BaseDatadogTracesTool):
538
545
  def get_parameterized_one_liner(self, params: dict) -> str:
539
546
  """Get a one-liner description of the tool invocation."""
540
547
  if "query" in params:
541
- return f"DataDog: search spans with query: {params['query']}"
548
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: Search Spans ({params['query']})"
542
549
 
543
550
  filters = []
544
551
  if "service" in params:
@@ -546,8 +553,8 @@ class FetchDatadogSpansByFilter(BaseDatadogTracesTool):
546
553
  if "operation" in params:
547
554
  filters.append(f"operation={params['operation']}")
548
555
 
549
- filter_str = " AND ".join(filters) if filters else "all spans"
550
- return f"DataDog: search spans matching {filter_str}"
556
+ filter_str = ", ".join(filters) if filters else "all"
557
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: Search Spans ({filter_str})"
551
558
 
552
559
  def _invoke(self, params: Any) -> StructuredToolResult:
553
560
  """Execute the tool to search spans."""
@@ -566,7 +573,7 @@ class FetchDatadogSpansByFilter(BaseDatadogTracesTool):
566
573
  from_time_int, to_time_int = process_timestamps_to_int(
567
574
  start=params.get("start_datetime"),
568
575
  end=params.get("end_datetime"),
569
- default_time_span_seconds=3600, # Default to 1 hour
576
+ default_time_span_seconds=DEFAULT_TIME_SPAN_SECONDS,
570
577
  )
571
578
 
572
579
  # Convert to milliseconds for Datadog API
@@ -13,6 +13,7 @@ from holmes.core.tools import (
13
13
  ToolsetTag,
14
14
  CallablePrerequisite,
15
15
  )
16
+ from holmes.plugins.toolsets.utils import toolset_name_for_one_liner
16
17
 
17
18
 
18
19
  class GitHubConfig(BaseModel):
@@ -277,7 +278,8 @@ class GitReadFileWithLineNumbers(Tool):
277
278
  )
278
279
 
279
280
  def get_parameterized_one_liner(self, params) -> str:
280
- return "Reading git files"
281
+ filepath = params.get("filepath", "")
282
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: Read Git File ({filepath})"
281
283
 
282
284
 
283
285
  class GitListFiles(Tool):
@@ -318,7 +320,7 @@ class GitListFiles(Tool):
318
320
  )
319
321
 
320
322
  def get_parameterized_one_liner(self, params) -> str:
321
- return "listing git files"
323
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: List Git Files"
322
324
 
323
325
 
324
326
  class GitListOpenPRs(Tool):
@@ -357,7 +359,7 @@ class GitListOpenPRs(Tool):
357
359
  )
358
360
 
359
361
  def get_parameterized_one_liner(self, params) -> str:
360
- return "Listing PR's"
362
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: List Open PRs"
361
363
 
362
364
 
363
365
  class GitExecuteChanges(Tool):
@@ -569,12 +571,11 @@ class GitExecuteChanges(Tool):
569
571
  return error(f"Unexpected error: {e}")
570
572
 
571
573
  def get_parameterized_one_liner(self, params) -> str:
572
- return (
573
- f"git execute_changes(line={params['line']}, filename='{params['filename']}', "
574
- f"command='{params['command']}', code='{params.get('code', '')}', "
575
- f"open_pr={params['open_pr']}, commit_pr='{params['commit_pr']}', "
576
- f"dry_run={params['dry_run']}, commit_message='{params['commit_message']}')"
577
- )
574
+ command = params.get("command", "")
575
+ filename = params.get("filename", "")
576
+ dry_run = params.get("dry_run", False)
577
+ mode = "(dry run)" if dry_run else ""
578
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: Execute Git Changes ({command} in {filename}) {mode}".strip()
578
579
 
579
580
 
580
581
  class GitUpdatePR(Tool):
@@ -748,9 +749,8 @@ class GitUpdatePR(Tool):
748
749
  )
749
750
 
750
751
  def get_parameterized_one_liner(self, params) -> str:
751
- return (
752
- f"git update_pr(line={params['line']}, filename='{params['filename']}', "
753
- f"command='{params['command']}', code='{params.get('code', '')}', "
754
- f"pr_number={params['pr_number']}, dry_run={params['dry_run']}, "
755
- f"commit_message='{params['commit_message']}')"
756
- )
752
+ pr_number = params.get("pr_number", "")
753
+ command = params.get("command", "")
754
+ dry_run = params.get("dry_run", False)
755
+ mode = "(dry run)" if dry_run else ""
756
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: Update PR #{pr_number} ({command}) {mode}".strip()
@@ -28,4 +28,15 @@ def grafana_health_check(config: GrafanaConfig) -> Tuple[bool, str]:
28
28
  return True, ""
29
29
  except Exception as e:
30
30
  logging.error(f"Failed to fetch grafana health status at {url}", exc_info=True)
31
- return False, f"Failed to fetch grafana health status at {url}. {str(e)}"
31
+ error_msg = f"Failed to fetch grafana health status at {url}. {str(e)}"
32
+
33
+ # Add helpful hint if this looks like a common misconfiguration
34
+ if config.grafana_datasource_uid and ":3100" in config.url:
35
+ error_msg += (
36
+ "\n\nPossible configuration issue: grafana_datasource_uid is set but URL contains port 3100 "
37
+ "(typically used for direct Loki connections). Please verify:\n"
38
+ "- If connecting directly to Loki: remove grafana_datasource_uid from config\n"
39
+ "- If connecting via Grafana proxy: ensure URL points to Grafana (usually port 3000)"
40
+ )
41
+
42
+ return False, error_msg
@@ -5,6 +5,8 @@ from holmes.plugins.toolsets.grafana.base_grafana_toolset import BaseGrafanaTool
5
5
  import requests # type: ignore
6
6
  import logging
7
7
 
8
+ from holmes.plugins.toolsets.utils import toolset_name_for_one_liner
9
+
8
10
 
9
11
  class ListAndBuildGrafanaDashboardURLs(Tool):
10
12
  def __init__(self, toolset: BaseGrafanaToolset):
@@ -86,7 +88,9 @@ class ListAndBuildGrafanaDashboardURLs(Tool):
86
88
  return f"Error fetching dashboards: {str(e)}"
87
89
 
88
90
  def get_parameterized_one_liner(self, params: Dict) -> str:
89
- return f"Lists Grafana dashboards and builds URLs with parameters: {params}"
91
+ return (
92
+ f"{toolset_name_for_one_liner(self._toolset.name)}: List Grafana Dashboards"
93
+ )
90
94
 
91
95
 
92
96
  class GrafanaToolset(BaseGrafanaToolset):
@@ -1,4 +1,4 @@
1
- from typing import Any, cast
1
+ from typing import Any, cast, Set
2
2
  from pydantic import BaseModel
3
3
 
4
4
  from holmes.core.tools import CallablePrerequisite
@@ -11,7 +11,9 @@ from holmes.plugins.toolsets.grafana.grafana_api import grafana_health_check
11
11
  from holmes.plugins.toolsets.logging_utils.logging_api import (
12
12
  BasePodLoggingToolset,
13
13
  FetchPodLogsParams,
14
+ LoggingCapability,
14
15
  PodLoggingTool,
16
+ DEFAULT_TIME_SPAN_SECONDS,
15
17
  )
16
18
  from holmes.plugins.toolsets.utils import (
17
19
  process_timestamps_to_rfc3339,
@@ -22,8 +24,6 @@ from holmes.plugins.toolsets.grafana.loki_api import (
22
24
  )
23
25
  from holmes.core.tools import StructuredToolResult, ToolResultStatus
24
26
 
25
- DEFAULT_TIME_SPAN_SECONDS = 3600
26
-
27
27
 
28
28
  class GrafanaLokiLabelsConfig(BaseModel):
29
29
  pod: str = "pod"
@@ -35,6 +35,11 @@ class GrafanaLokiConfig(GrafanaConfig):
35
35
 
36
36
 
37
37
  class GrafanaLokiToolset(BasePodLoggingToolset):
38
+ @property
39
+ def supported_capabilities(self) -> Set[LoggingCapability]:
40
+ """Loki only supports substring matching, not regex or exclude filters"""
41
+ return set() # No regex support, no exclude filter
42
+
38
43
  def __init__(self):
39
44
  super().__init__(
40
45
  name="grafana/loki",
@@ -49,7 +54,7 @@ class GrafanaLokiToolset(BasePodLoggingToolset):
49
54
 
50
55
  def prerequisites_callable(self, config: dict[str, Any]) -> tuple[bool, str]:
51
56
  if not config:
52
- return False, "Missing Grafana Loki configuration. Check your config."
57
+ return False, "Missing Loki configuration. Check your config."
53
58
 
54
59
  self.config = GrafanaLokiConfig(**config)
55
60
 
@@ -23,7 +23,14 @@ from holmes.plugins.toolsets.grafana.tempo_api import (
23
23
  query_tempo_traces,
24
24
  )
25
25
  from holmes.plugins.toolsets.grafana.trace_parser import format_traces_list
26
- from holmes.plugins.toolsets.utils import get_param_or_raise, process_timestamps_to_int
26
+ from holmes.plugins.toolsets.logging_utils.logging_api import (
27
+ DEFAULT_TIME_SPAN_SECONDS,
28
+ )
29
+ from holmes.plugins.toolsets.utils import (
30
+ get_param_or_raise,
31
+ process_timestamps_to_int,
32
+ toolset_name_for_one_liner,
33
+ )
27
34
 
28
35
  TEMPO_LABELS_ADD_PREFIX = load_bool("TEMPO_LABELS_ADD_PREFIX", True)
29
36
 
@@ -144,7 +151,7 @@ class GetTempoTraces(Tool):
144
151
  start, end = process_timestamps_to_int(
145
152
  params.get("start_datetime"),
146
153
  params.get("end_datetime"),
147
- default_time_span_seconds=3600,
154
+ default_time_span_seconds=DEFAULT_TIME_SPAN_SECONDS,
148
155
  )
149
156
 
150
157
  prefix = ""
@@ -190,7 +197,7 @@ class GetTempoTraces(Tool):
190
197
  )
191
198
 
192
199
  def get_parameterized_one_liner(self, params: Dict) -> str:
193
- return f"Fetched Tempo traces with min_duration={params.get('min_duration')} ({str(params)})"
200
+ return f"{toolset_name_for_one_liner(self._toolset.name)}: Fetched Tempo Traces (min_duration={params.get('min_duration')})"
194
201
 
195
202
 
196
203
  class GetTempoTags(Tool):
@@ -244,7 +251,7 @@ class GetTempoTags(Tool):
244
251
  )
245
252
 
246
253
  def get_parameterized_one_liner(self, params: Dict) -> str:
247
- return f"Fetched Tempo tags ({str(params)})"
254
+ return f"{toolset_name_for_one_liner(self._toolset.name)}: Fetched Tempo tags"
248
255
 
249
256
 
250
257
  class GetTempoTraceById(Tool):
@@ -281,7 +288,7 @@ class GetTempoTraceById(Tool):
281
288
  )
282
289
 
283
290
  def get_parameterized_one_liner(self, params: Dict) -> str:
284
- return f"Fetched Tempo trace with trace_id={params.get('trace_id')} ({str(params)})"
291
+ return f"{toolset_name_for_one_liner(self._toolset.name)}: Fetched Tempo Trace (trace_id={params.get('trace_id')})"
285
292
 
286
293
 
287
294
  class GrafanaTempoToolset(BaseGrafanaTempoToolset):
@@ -16,6 +16,7 @@ from bs4 import BeautifulSoup
16
16
 
17
17
  import requests # type: ignore
18
18
  from holmes.core.tools import StructuredToolResult, ToolResultStatus
19
+ from holmes.plugins.toolsets.utils import toolset_name_for_one_liner
19
20
 
20
21
 
21
22
  # TODO: change and make it holmes
@@ -215,7 +216,7 @@ class FetchWebpage(Tool):
215
216
 
216
217
  def get_parameterized_one_liner(self, params) -> str:
217
218
  url: str = params.get("url", "<missing url>")
218
- return f"fetched webpage {url}"
219
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: Fetch Webpage {url}"
219
220
 
220
221
 
221
222
  class InternetBaseToolset(Toolset):
@@ -15,6 +15,7 @@ from holmes.core.tools import (
15
15
  StructuredToolResult,
16
16
  ToolResultStatus,
17
17
  )
18
+ from holmes.plugins.toolsets.utils import toolset_name_for_one_liner
18
19
 
19
20
 
20
21
  class FetchNotion(Tool):
@@ -108,7 +109,7 @@ class FetchNotion(Tool):
108
109
 
109
110
  def get_parameterized_one_liner(self, params) -> str:
110
111
  url: str = params["url"]
111
- return f"fetched notion webpage {url}"
112
+ return f"{toolset_name_for_one_liner(self.toolset.name)}: Fetch Webpage {url}"
112
113
 
113
114
 
114
115
  class NotionToolset(InternetBaseToolset):
File without changes
@@ -0,0 +1,157 @@
1
+ import logging
2
+ import os
3
+ from typing import Any, Dict
4
+
5
+ from uuid import uuid4
6
+ from holmes.core.todo_manager import (
7
+ get_todo_manager,
8
+ )
9
+
10
+ from holmes.core.tools import (
11
+ Toolset,
12
+ ToolsetTag,
13
+ ToolParameter,
14
+ Tool,
15
+ StructuredToolResult,
16
+ ToolResultStatus,
17
+ )
18
+ from holmes.plugins.toolsets.investigator.model import Task, TaskStatus
19
+
20
+
21
+ class TodoWriteTool(Tool):
22
+ name: str = "TodoWrite"
23
+ description: str = "Save investigation tasks to break down complex problems into manageable sub-tasks. ALWAYS provide the COMPLETE list of all tasks, not just the ones being updated."
24
+ parameters: Dict[str, ToolParameter] = {
25
+ "todos": ToolParameter(
26
+ description="COMPLETE list of ALL tasks on the task list. Each task should have: id (string), content (string), status (pending/in_progress/completed)",
27
+ type="array",
28
+ required=True,
29
+ items=ToolParameter(
30
+ type="object",
31
+ properties={
32
+ "id": ToolParameter(type="string", required=True),
33
+ "content": ToolParameter(type="string", required=True),
34
+ "status": ToolParameter(type="string", required=True),
35
+ },
36
+ ),
37
+ ),
38
+ "investigation_id": ToolParameter(
39
+ description="This investigation identifier. This is a uuid that represents the investigation session id.",
40
+ type="string",
41
+ required=True,
42
+ ),
43
+ }
44
+
45
+ # Print a nice table to console/log
46
+ def print_tasks_table(self, tasks):
47
+ if not tasks:
48
+ logging.info("No tasks in the investigation plan.")
49
+ return
50
+
51
+ status_icons = {
52
+ "pending": "[ ]",
53
+ "in_progress": "[~]",
54
+ "completed": "[✓]",
55
+ }
56
+
57
+ max_id_width = max(len(str(task.id)) for task in tasks)
58
+ max_content_width = max(len(task.content) for task in tasks)
59
+ max_status_display_width = max(
60
+ len(f"{status_icons[task.status.value]} {task.status.value}")
61
+ for task in tasks
62
+ )
63
+
64
+ id_width = max(max_id_width, len("ID"))
65
+ content_width = max(max_content_width, len("Content"))
66
+ status_width = max(max_status_display_width, len("Status"))
67
+
68
+ # Build table
69
+ separator = f"+{'-' * (id_width + 2)}+{'-' * (content_width + 2)}+{'-' * (status_width + 2)}+"
70
+ header = f"| {'ID':<{id_width}} | {'Content':<{content_width}} | {'Status':<{status_width}} |"
71
+
72
+ # Log the table
73
+ logging.info("Updated Investigation Tasks:")
74
+ logging.info(separator)
75
+ logging.info(header)
76
+ logging.info(separator)
77
+
78
+ for task in tasks:
79
+ status_display = f"{status_icons[task.status.value]} {task.status.value}"
80
+ row = f"| {task.id:<{id_width}} | {task.content:<{content_width}} | {status_display:<{status_width}} |"
81
+ logging.info(row)
82
+
83
+ logging.info(separator)
84
+
85
+ def _invoke(self, params: Dict) -> StructuredToolResult:
86
+ try:
87
+ todos_data = params.get("todos", [])
88
+
89
+ tasks = []
90
+
91
+ for todo_item in todos_data:
92
+ if isinstance(todo_item, dict):
93
+ task = Task(
94
+ id=todo_item.get("id", str(uuid4())),
95
+ content=todo_item.get("content", ""),
96
+ status=TaskStatus(todo_item.get("status", "pending")),
97
+ )
98
+ tasks.append(task)
99
+
100
+ logging.info(f"Tasks: {len(tasks)}")
101
+
102
+ # Store tasks in session storage
103
+ todo_manager = get_todo_manager()
104
+ session_id = params.get("investigation_id", "")
105
+ todo_manager.update_session_tasks(session_id, tasks)
106
+
107
+ self.print_tasks_table(tasks)
108
+
109
+ formatted_tasks = todo_manager.format_tasks_for_prompt(session_id)
110
+
111
+ response_data = f"✅ Investigation plan updated with {len(tasks)} tasks. Tasks are now stored in session and will appear in subsequent prompts.\n\n"
112
+ if formatted_tasks:
113
+ response_data += formatted_tasks
114
+ else:
115
+ response_data += "No tasks currently in the investigation plan."
116
+
117
+ return StructuredToolResult(
118
+ status=ToolResultStatus.SUCCESS,
119
+ data=response_data,
120
+ params=params,
121
+ )
122
+
123
+ except Exception as e:
124
+ logging.exception("error using todowrite tool")
125
+ return StructuredToolResult(
126
+ status=ToolResultStatus.ERROR,
127
+ error=f"Failed to process tasks: {str(e)}",
128
+ params=params,
129
+ )
130
+
131
+ def get_parameterized_one_liner(self, params: Dict) -> str:
132
+ todos = params.get("todos", [])
133
+ return f"Write {todos} investigation tasks"
134
+
135
+
136
+ class CoreInvestigationToolset(Toolset):
137
+ """Core toolset for investigation management and task planning."""
138
+
139
+ def __init__(self):
140
+ super().__init__(
141
+ name="core_investigation",
142
+ description="Core investigation tools for task management and planning",
143
+ enabled=True,
144
+ tools=[TodoWriteTool()],
145
+ tags=[ToolsetTag.CORE],
146
+ is_default=True,
147
+ )
148
+ logging.info("Core investigation toolset loaded")
149
+
150
+ def get_example_config(self) -> Dict[str, Any]:
151
+ return {}
152
+
153
+ def _reload_instructions(self):
154
+ template_file_path = os.path.abspath(
155
+ os.path.join(os.path.dirname(__file__), "investigator_instructions.jinja2")
156
+ )
157
+ self._load_llm_instructions(jinja_template=f"file://{template_file_path}")