holmesgpt 0.14.1a0__py3-none-any.whl → 0.14.3a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of holmesgpt might be problematic. Click here for more details.

Files changed (73) hide show
  1. holmes/__init__.py +1 -1
  2. holmes/clients/robusta_client.py +5 -2
  3. holmes/common/env_vars.py +8 -2
  4. holmes/config.py +4 -7
  5. holmes/core/conversations.py +12 -2
  6. holmes/core/feedback.py +191 -0
  7. holmes/core/llm.py +52 -10
  8. holmes/core/models.py +101 -1
  9. holmes/core/supabase_dal.py +23 -9
  10. holmes/core/tool_calling_llm.py +206 -16
  11. holmes/core/tools.py +20 -7
  12. holmes/core/tools_utils/token_counting.py +13 -0
  13. holmes/core/tools_utils/tool_context_window_limiter.py +45 -23
  14. holmes/core/tools_utils/tool_executor.py +11 -6
  15. holmes/core/toolset_manager.py +7 -3
  16. holmes/core/truncation/dal_truncation_utils.py +23 -0
  17. holmes/interactive.py +146 -14
  18. holmes/plugins/prompts/_fetch_logs.jinja2 +13 -1
  19. holmes/plugins/runbooks/__init__.py +6 -1
  20. holmes/plugins/toolsets/__init__.py +11 -4
  21. holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +9 -20
  22. holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +2 -3
  23. holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +2 -3
  24. holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +6 -4
  25. holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +6 -4
  26. holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +2 -3
  27. holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +6 -4
  28. holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +2 -3
  29. holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +2 -3
  30. holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +2 -3
  31. holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +2 -3
  32. holmes/plugins/toolsets/bash/bash_toolset.py +4 -7
  33. holmes/plugins/toolsets/cilium.yaml +284 -0
  34. holmes/plugins/toolsets/datadog/datadog_api.py +490 -24
  35. holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +21 -10
  36. holmes/plugins/toolsets/datadog/toolset_datadog_general.py +333 -199
  37. holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +181 -9
  38. holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +80 -22
  39. holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +5 -8
  40. holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +7 -12
  41. holmes/plugins/toolsets/git.py +14 -12
  42. holmes/plugins/toolsets/grafana/grafana_tempo_api.py +23 -42
  43. holmes/plugins/toolsets/grafana/toolset_grafana.py +2 -3
  44. holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +2 -1
  45. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +21 -39
  46. holmes/plugins/toolsets/internet/internet.py +2 -3
  47. holmes/plugins/toolsets/internet/notion.py +2 -3
  48. holmes/plugins/toolsets/investigator/core_investigation.py +7 -9
  49. holmes/plugins/toolsets/kafka.py +7 -18
  50. holmes/plugins/toolsets/logging_utils/logging_api.py +80 -4
  51. holmes/plugins/toolsets/mcp/toolset_mcp.py +2 -3
  52. holmes/plugins/toolsets/newrelic/__init__.py +0 -0
  53. holmes/plugins/toolsets/newrelic/new_relic_api.py +125 -0
  54. holmes/plugins/toolsets/newrelic/newrelic.jinja2 +41 -0
  55. holmes/plugins/toolsets/newrelic/newrelic.py +211 -0
  56. holmes/plugins/toolsets/opensearch/opensearch.py +5 -12
  57. holmes/plugins/toolsets/opensearch/opensearch_traces.py +3 -6
  58. holmes/plugins/toolsets/prometheus/prometheus.py +808 -419
  59. holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +27 -11
  60. holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +3 -6
  61. holmes/plugins/toolsets/robusta/robusta.py +4 -9
  62. holmes/plugins/toolsets/runbook/runbook_fetcher.py +93 -13
  63. holmes/plugins/toolsets/servicenow/servicenow.py +5 -10
  64. holmes/utils/sentry_helper.py +1 -1
  65. holmes/utils/stream.py +22 -7
  66. holmes/version.py +34 -14
  67. {holmesgpt-0.14.1a0.dist-info → holmesgpt-0.14.3a0.dist-info}/METADATA +7 -9
  68. {holmesgpt-0.14.1a0.dist-info → holmesgpt-0.14.3a0.dist-info}/RECORD +71 -65
  69. holmes/core/tools_utils/data_types.py +0 -81
  70. holmes/plugins/toolsets/newrelic.py +0 -231
  71. {holmesgpt-0.14.1a0.dist-info → holmesgpt-0.14.3a0.dist-info}/LICENSE.txt +0 -0
  72. {holmesgpt-0.14.1a0.dist-info → holmesgpt-0.14.3a0.dist-info}/WHEEL +0 -0
  73. {holmesgpt-0.14.1a0.dist-info → holmesgpt-0.14.3a0.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,27 @@
1
1
 
2
2
  # Prometheus/PromQL queries
3
- * ALWAYS call list_prometheus_rules to get the alert definition
3
+
4
+ ## Efficient Metric Discovery (when needed)
5
+ * When you need to discover metrics, use `get_metric_names` with filters - it's the fastest method
6
+ * Combine multiple patterns with regex OR (|) to reduce API calls:
7
+ - `{__name__=~"node_cpu.*|node_memory.*|node_disk.*"}` - get all node resource metrics in one call
8
+ - `{__name__=~"container.*|pod.*|kube.*"}` - get all Kubernetes-related metrics
9
+ - `{namespace=~"example1|example2|example3"}` - metrics from multiple namespaces
10
+ * Use `get_metric_metadata` after discovering names to get types/descriptions if needed
11
+ * Use `get_label_values` to discover pods, namespaces, jobs: e.g., get_label_values(label="pod")
12
+ * Only use `get_series` when you need full label sets (slower than other methods)
13
+
14
+ ## Retrying queries that return too much data
15
+ * When a Prometheus query returns too much data (e.g., truncation error), you MUST retry with a more specific query or less data points or topk/bottomk
16
+ * NEVER EVER EVER answer a question based on Prometheus data that was truncated as you might be missing important information and give the totally wrong answer
17
+ * Prefer telling the user you can't answer the question because of too much data rather than answering based on incomplete data
18
+ * You are also able to show graphs to the user (using the promql embed functionality mentioned below) so you can show users graphs and THEY can interpret the data themselves, even if you can't answer.
19
+ * Do NOT hestitate to try alternative queries and try to reduce the amount of data returned until you get a successful query
20
+ * Be extremely, extremely cautious when answering based on get_label_values because the existence of a label value says NOTHING about the metric value itself (is it high, low, or perhaps the label exists in Prometheus but its an older series not present right now)
21
+ * DO NOT give answers about metrics based on what 'is typically the case' or 'common knowledge' - if you can't see the actual metric value, you MUST NEVER EVER answer about it - just tell the user your limitations due to the size of the data
22
+
23
+ ## Alert Investigation & Query Execution
24
+ * When investigating a Prometheus alert, ALWAYS call list_prometheus_rules to get the alert definition
4
25
  * Use Prometheus to query metrics from the alert promql
5
26
  * Use prometheus to execute promql queries with the tools `execute_prometheus_instant_query` and `execute_prometheus_range_query`
6
27
  * To create queries, use 'start_timestamp' and 'end_timestamp' as graphs start and end times
@@ -16,7 +37,7 @@
16
37
  ** Avoid global averages like `sum(rate(<metric>_sum)) / sum(rate(<metric>_count))` because it hides data and is not generally informative
17
38
  * Timestamps MUST be in string date format. For example: '2025-03-15 10:10:08.610862+00:00'
18
39
  * Post processing will parse your response, re-run the query from the tool output and create a chart visible to the user
19
- * Only generate and execute a prometheus query after checking what metrics are available with the `list_available_metrics` tool
40
+ * When unsure about available metrics, use `get_metric_names` with appropriate filters (combine multiple patterns with | for efficiency). Then use `get_metric_metadata` if you need descriptions/types
20
41
  * Check that any node, service, pod, container, app, namespace, etc. mentioned in the query exist in the kubernetes cluster before making a query. Use any appropriate kubectl tool(s) for this
21
42
  * The toolcall will return no data to you. That is expected. You MUST however ensure that the query is successful.
22
43
 
@@ -25,24 +46,19 @@
25
46
  * ALWAYS use `topk()` or `bottomk()` to limit the number of series returned
26
47
  * Standard pattern for high-cardinality queries:
27
48
  - Use `topk(5, <your_query>)` to get the top 5 series
28
- - Example: `topk(5, rate(container_cpu_usage_seconds_total{namespace="default"}[5m]))`
49
+ - Example: `topk(5, rate(container_cpu_usage_seconds_total{namespace="example"}[5m]))`
29
50
  - This prevents context overflow and focuses on the most relevant data
30
51
  * To also capture the aggregate of remaining series as "other":
31
52
  ```
32
- topk(5, rate(container_cpu_usage_seconds_total{namespace="default"}[5m]))
33
- or
34
- label_replace(
35
- (sum(rate(container_cpu_usage_seconds_total{namespace="default"}[5m])) - sum(topk(5, rate(container_cpu_usage_seconds_total{namespace="default"}[5m])))),
36
- "pod", "other", "", ""
37
- )
53
+ topk(5, rate(container_cpu_usage_seconds_total{namespace="example"}[5m])) or label_replace((sum(rate(container_cpu_usage_seconds_total{namespace="example"}[5m])) - sum(topk(5, rate(container_cpu_usage_seconds_total{namespace="example"}[5m])))), "pod", "other", "", "")
38
54
  ```
39
55
  * Common high-cardinality scenarios requiring topk():
40
56
  - Pod-level metrics in namespaces with many pods
41
57
  - Container-level CPU/memory metrics
42
58
  - HTTP metrics with many endpoints or status codes
43
59
  - Any query returning more than 10 time series
44
- * For initial exploration, use instant queries with `count()` to check cardinality:
45
- - Example: `count(count by (pod) (container_cpu_usage_seconds_total{namespace="default"}))`
60
+ * For initial exploration, you may use instant queries with `count()` to check cardinality:
61
+ - Example: `count(count by (pod) (container_cpu_usage_seconds_total{namespace="example"}))`
46
62
  - If count > 10, use topk() in your range query
47
63
  * When doing queries, always extend the time range, to 15 min before and after the alert start time
48
64
  * ALWAYS embed the execution results into your answer
@@ -7,6 +7,7 @@ from holmes.core.tools import (
7
7
  CallablePrerequisite,
8
8
  StructuredToolResult,
9
9
  Tool,
10
+ ToolInvokeContext,
10
11
  ToolParameter,
11
12
  StructuredToolResultStatus,
12
13
  Toolset,
@@ -63,9 +64,7 @@ class ListConfiguredClusters(BaseRabbitMQTool):
63
64
  toolset=toolset,
64
65
  )
65
66
 
66
- def _invoke(
67
- self, params: dict, user_approved: bool = False
68
- ) -> StructuredToolResult:
67
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
69
68
  if not self.toolset.config:
70
69
  raise ValueError("RabbitMQ is not configured.")
71
70
 
@@ -103,9 +102,7 @@ class GetRabbitMQClusterStatus(BaseRabbitMQTool):
103
102
  toolset=toolset,
104
103
  )
105
104
 
106
- def _invoke(
107
- self, params: dict, user_approved: bool = False
108
- ) -> StructuredToolResult:
105
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
109
106
  try:
110
107
  # Fetch node details which include partition info
111
108
  cluster_config = self._get_cluster_config(
@@ -7,6 +7,7 @@ from holmes.core.supabase_dal import SupabaseDal
7
7
  from holmes.core.tools import (
8
8
  StaticPrerequisite,
9
9
  Tool,
10
+ ToolInvokeContext,
10
11
  ToolParameter,
11
12
  Toolset,
12
13
  ToolsetTag,
@@ -45,9 +46,7 @@ class FetchRobustaFinding(Tool):
45
46
  logging.error(error)
46
47
  return {"error": error}
47
48
 
48
- def _invoke(
49
- self, params: dict, user_approved: bool = False
50
- ) -> StructuredToolResult:
49
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
51
50
  finding_id = params[PARAM_FINDING_ID]
52
51
  try:
53
52
  finding = self._fetch_finding(finding_id)
@@ -115,9 +114,7 @@ class FetchResourceRecommendation(Tool):
115
114
  )
116
115
  return None
117
116
 
118
- def _invoke(
119
- self, params: dict, user_approved: bool = False
120
- ) -> StructuredToolResult:
117
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
121
118
  try:
122
119
  recommendations = self._resource_recommendation(params)
123
120
  if recommendations:
@@ -175,9 +172,7 @@ class FetchConfigurationChanges(Tool):
175
172
  )
176
173
  return None
177
174
 
178
- def _invoke(
179
- self, params: dict, user_approved: bool = False
180
- ) -> StructuredToolResult:
175
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
181
176
  try:
182
177
  changes = self._fetch_change_history(params)
183
178
  if changes:
@@ -1,17 +1,23 @@
1
1
  import logging
2
+ import os
2
3
  import textwrap
3
4
  from typing import Any, Dict, List, Optional
4
5
 
5
6
  from holmes.core.tools import (
6
7
  StructuredToolResult,
7
8
  Tool,
9
+ ToolInvokeContext,
8
10
  ToolParameter,
9
11
  StructuredToolResultStatus,
10
12
  Toolset,
11
13
  ToolsetTag,
12
14
  )
13
15
 
14
- from holmes.plugins.runbooks import get_runbook_by_path, DEFAULT_RUNBOOK_SEARCH_PATH
16
+ from holmes.plugins.runbooks import (
17
+ get_runbook_by_path,
18
+ load_runbook_catalog,
19
+ DEFAULT_RUNBOOK_SEARCH_PATH,
20
+ )
15
21
  from holmes.plugins.toolsets.utils import toolset_name_for_one_liner
16
22
 
17
23
 
@@ -19,30 +25,104 @@ from holmes.plugins.toolsets.utils import toolset_name_for_one_liner
19
25
  # runbooks from external sources as well.
20
26
  class RunbookFetcher(Tool):
21
27
  toolset: "RunbookToolset"
28
+ available_runbooks: List[str] = []
29
+ additional_search_paths: Optional[List[str]] = None
30
+
31
+ def __init__(
32
+ self,
33
+ toolset: "RunbookToolset",
34
+ additional_search_paths: Optional[List[str]] = None,
35
+ ):
36
+ catalog = load_runbook_catalog()
37
+ available_runbooks = []
38
+ if catalog:
39
+ available_runbooks = [entry.link for entry in catalog.catalog]
40
+
41
+ # If additional search paths are configured (e.g., for testing), also scan those for .md files
42
+ if additional_search_paths:
43
+ for search_path in additional_search_paths:
44
+ if not os.path.isdir(search_path):
45
+ continue
46
+
47
+ for file in os.listdir(search_path):
48
+ if file.endswith(".md") and file not in available_runbooks:
49
+ available_runbooks.append(file)
50
+
51
+ # Build description with available runbooks
52
+ runbook_list = ", ".join([f'"{rb}"' for rb in available_runbooks])
22
53
 
23
- def __init__(self, toolset: "RunbookToolset"):
24
54
  super().__init__(
25
55
  name="fetch_runbook",
26
56
  description="Get runbook content by runbook link. Use this to get troubleshooting steps for incidents",
27
57
  parameters={
28
- # use link as a more generic term for runbook path, considering we may have external links in the future
29
58
  "link": ToolParameter(
30
- description="The link to the runbook",
59
+ description=f"The link to the runbook (non-empty string required). Must be one of: {runbook_list}",
31
60
  type="string",
32
61
  required=True,
33
62
  ),
34
63
  },
35
- toolset=toolset, # type: ignore
64
+ toolset=toolset, # type: ignore[call-arg]
65
+ available_runbooks=available_runbooks, # type: ignore[call-arg]
66
+ additional_search_paths=additional_search_paths, # type: ignore[call-arg]
36
67
  )
37
68
 
38
- def _invoke(
39
- self, params: dict, user_approved: bool = False
40
- ) -> StructuredToolResult:
41
- link: str = params["link"]
69
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
70
+ link: str = params.get("link", "")
71
+ # Validate link is not empty
72
+ if not link or not link.strip():
73
+ err_msg = (
74
+ "Runbook link cannot be empty. Please provide a valid runbook path."
75
+ )
76
+ logging.error(err_msg)
77
+ return StructuredToolResult(
78
+ status=StructuredToolResultStatus.ERROR,
79
+ error=err_msg,
80
+ params=params,
81
+ )
42
82
 
83
+ # Build list of allowed search paths
43
84
  search_paths = [DEFAULT_RUNBOOK_SEARCH_PATH]
44
- if self.toolset.config and "additional_search_paths" in self.toolset.config:
45
- search_paths.extend(self.toolset.config["additional_search_paths"])
85
+ if self.additional_search_paths:
86
+ search_paths.extend(self.additional_search_paths)
87
+
88
+ # Validate link is in the available runbooks list OR is a valid path within allowed directories
89
+ if link not in self.available_runbooks:
90
+ # For links not in the catalog, perform strict path validation
91
+ if not link.endswith(".md"):
92
+ err_msg = f"Invalid runbook link '{link}'. Must end with .md extension."
93
+ logging.error(err_msg)
94
+ return StructuredToolResult(
95
+ status=StructuredToolResultStatus.ERROR,
96
+ error=err_msg,
97
+ params=params,
98
+ )
99
+
100
+ # Check if the link would resolve to a valid path within allowed directories
101
+ # This prevents path traversal attacks like ../../secret.md
102
+ is_valid_path = False
103
+ for search_path in search_paths:
104
+ candidate_path = os.path.join(search_path, link)
105
+ # Canonicalize both paths to resolve any .. or . components
106
+ real_search_path = os.path.realpath(search_path)
107
+ real_candidate_path = os.path.realpath(candidate_path)
108
+
109
+ # Check if the resolved path is within the allowed directory
110
+ if (
111
+ real_candidate_path.startswith(real_search_path + os.sep)
112
+ or real_candidate_path == real_search_path
113
+ ):
114
+ if os.path.isfile(real_candidate_path):
115
+ is_valid_path = True
116
+ break
117
+
118
+ if not is_valid_path:
119
+ err_msg = f"Invalid runbook link '{link}'. Must be one of: {', '.join(self.available_runbooks) if self.available_runbooks else 'No runbooks available'}"
120
+ logging.error(err_msg)
121
+ return StructuredToolResult(
122
+ status=StructuredToolResultStatus.ERROR,
123
+ error=err_msg,
124
+ params=params,
125
+ )
46
126
 
47
127
  runbook_path = get_runbook_by_path(link, search_paths)
48
128
 
@@ -116,7 +196,7 @@ class RunbookFetcher(Tool):
116
196
 
117
197
  class RunbookToolset(Toolset):
118
198
  def __init__(self, additional_search_paths: Optional[List[str]] = None):
119
- # Store additional search paths in config
199
+ # Store additional search paths in config for RunbookFetcher to access
120
200
  config = {}
121
201
  if additional_search_paths:
122
202
  config["additional_search_paths"] = additional_search_paths
@@ -126,7 +206,7 @@ class RunbookToolset(Toolset):
126
206
  description="Fetch runbooks",
127
207
  icon_url="https://platform.robusta.dev/demos/runbook.svg",
128
208
  tools=[
129
- RunbookFetcher(self),
209
+ RunbookFetcher(self, additional_search_paths),
130
210
  ],
131
211
  docs_url="https://holmesgpt.dev/data-sources/",
132
212
  tags=[
@@ -5,6 +5,7 @@ from typing import Any, Dict, Tuple, List
5
5
  from holmes.core.tools import (
6
6
  CallablePrerequisite,
7
7
  Tool,
8
+ ToolInvokeContext,
8
9
  ToolParameter,
9
10
  Toolset,
10
11
  ToolsetTag,
@@ -56,7 +57,7 @@ class ServiceNowToolset(Toolset):
56
57
  self.config: Dict = ServiceNowConfig(**config).model_dump()
57
58
  self._session.headers.update(
58
59
  {
59
- "x-sn-apikey": self.config.get("api_key"),
60
+ "x-sn-apikey": self.config.get("api_key"), # type: ignore
60
61
  }
61
62
  )
62
63
 
@@ -115,9 +116,7 @@ class ReturnChangesInTimerange(ServiceNowBaseTool):
115
116
  start = params.get("start", "last hour")
116
117
  return f"{toolset_name_for_one_liner(self.toolset.name)}: Get Change Requests ({start})"
117
118
 
118
- def _invoke(
119
- self, params: dict, user_approved: bool = False
120
- ) -> StructuredToolResult:
119
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
121
120
  parsed_params = {}
122
121
  try:
123
122
  (start, _) = process_timestamps_to_rfc3339(
@@ -160,9 +159,7 @@ class ReturnChange(ServiceNowBaseTool):
160
159
  sys_id = params.get("sys_id", "")
161
160
  return f"{toolset_name_for_one_liner(self.toolset.name)}: Get Change Details ({sys_id})"
162
161
 
163
- def _invoke(
164
- self, params: dict, user_approved: bool = False
165
- ) -> StructuredToolResult:
162
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
166
163
  try:
167
164
  url = "https://{instance}.service-now.com/api/now/v2/table/change_request/{sys_id}".format(
168
165
  instance=self.toolset.config.get("instance"),
@@ -194,9 +191,7 @@ class ReturnChangesWithKeyword(ServiceNowBaseTool):
194
191
  keyword = params.get("keyword", "")
195
192
  return f"{toolset_name_for_one_liner(self.toolset.name)}: Search Changes ({keyword})"
196
193
 
197
- def _invoke(
198
- self, params: dict, user_approved: bool = False
199
- ) -> StructuredToolResult:
194
+ def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
200
195
  parsed_params = {}
201
196
  try:
202
197
  url = f"https://{self.toolset.config.get('instance')}.service-now.com/api/now/v2/table/change_request"
@@ -1,5 +1,5 @@
1
1
  import sentry_sdk
2
- from holmes.core.tools_utils.data_types import ToolCallResult, TruncationMetadata
2
+ from holmes.core.models import ToolCallResult, TruncationMetadata
3
3
 
4
4
 
5
5
  def capture_tool_truncations(truncations: list[TruncationMetadata]):
holmes/utils/stream.py CHANGED
@@ -14,6 +14,7 @@ class StreamEvents(str, Enum):
14
14
  TOOL_RESULT = "tool_calling_result"
15
15
  ERROR = "error"
16
16
  AI_MESSAGE = "ai_message"
17
+ APPROVAL_REQUIRED = "approval_required"
17
18
 
18
19
 
19
20
  class StreamMessage(BaseModel):
@@ -78,14 +79,28 @@ def stream_chat_formatter(
78
79
  try:
79
80
  for message in call_stream:
80
81
  if message.event == StreamEvents.ANSWER_END:
82
+ response_data = {
83
+ "analysis": message.data.get("content"),
84
+ "conversation_history": message.data.get("messages"),
85
+ "follow_up_actions": followups,
86
+ "metadata": message.data.get("metadata") or {},
87
+ }
88
+
89
+ yield create_sse_message(StreamEvents.ANSWER_END.value, response_data)
90
+ elif message.event == StreamEvents.APPROVAL_REQUIRED:
91
+ response_data = {
92
+ "analysis": message.data.get("content"),
93
+ "conversation_history": message.data.get("messages"),
94
+ "follow_up_actions": followups,
95
+ }
96
+
97
+ response_data["requires_approval"] = True
98
+ response_data["pending_approvals"] = message.data.get(
99
+ "pending_approvals", []
100
+ )
101
+
81
102
  yield create_sse_message(
82
- StreamEvents.ANSWER_END.value,
83
- {
84
- "analysis": message.data.get("content"),
85
- "conversation_history": message.data.get("messages"),
86
- "follow_up_actions": followups,
87
- "metadata": message.data.get("metadata") or {},
88
- },
103
+ StreamEvents.APPROVAL_REQUIRED.value, response_data
89
104
  )
90
105
  else:
91
106
  yield create_sse_message(message.event.value, message.data)
holmes/version.py CHANGED
@@ -57,11 +57,41 @@ def get_version() -> str:
57
57
  return __version__
58
58
 
59
59
  # we are running from an unreleased dev version
60
+ archival_file_path = os.path.join(this_path, ".git_archival.json")
61
+ if os.path.exists(archival_file_path):
62
+ try:
63
+ with open(archival_file_path, "r") as f:
64
+ archival_data = json.load(f)
65
+ refs = archival_data.get("refs", "")
66
+ hash_short = archival_data.get("hash-short", "")
67
+
68
+ # Check if Git substitution didn't happen (placeholders are still present)
69
+ if "$Format:" in refs or "$Format:" in hash_short:
70
+ # Placeholders not substituted, skip to next method
71
+ pass
72
+ else:
73
+ # Valid archival data found
74
+ return f"dev-{refs}-{hash_short}"
75
+ except Exception:
76
+ pass
77
+
78
+ # Now try git commands for development environments
60
79
  try:
80
+ env = os.environ.copy()
81
+ # Set ceiling to prevent walking up beyond the project root
82
+ # We want to allow access to holmes/.git but not beyond holmes
83
+ project_root = os.path.dirname(this_path) # holmes
84
+ env["GIT_CEILING_DIRECTORIES"] = os.path.dirname(
85
+ project_root
86
+ ) # holmes's parent
87
+
61
88
  # Get the latest git tag
62
89
  tag = (
63
90
  subprocess.check_output(
64
- ["git", "describe", "--tags"], stderr=subprocess.STDOUT, cwd=this_path
91
+ ["git", "describe", "--tags"],
92
+ stderr=subprocess.STDOUT,
93
+ cwd=this_path,
94
+ env=env,
65
95
  )
66
96
  .decode()
67
97
  .strip()
@@ -73,6 +103,7 @@ def get_version() -> str:
73
103
  ["git", "rev-parse", "--abbrev-ref", "HEAD"],
74
104
  stderr=subprocess.STDOUT,
75
105
  cwd=this_path,
106
+ env=env,
76
107
  )
77
108
  .decode()
78
109
  .strip()
@@ -84,6 +115,7 @@ def get_version() -> str:
84
115
  ["git", "status", "--porcelain"],
85
116
  stderr=subprocess.STDOUT,
86
117
  cwd=this_path,
118
+ env=env,
87
119
  )
88
120
  .decode()
89
121
  .strip()
@@ -95,19 +127,7 @@ def get_version() -> str:
95
127
  except Exception:
96
128
  pass
97
129
 
98
- # we are running without git history, but we still might have git archival data (e.g. if we were pip installed)
99
- archival_file_path = os.path.join(this_path, ".git_archival.json")
100
- if os.path.exists(archival_file_path):
101
- try:
102
- with open(archival_file_path, "r") as f:
103
- archival_data = json.load(f)
104
- return f"dev-{archival_data['refs']}-{archival_data['hash-short']}"
105
- except Exception:
106
- pass
107
-
108
- return "dev-version"
109
-
110
- return "unknown-version"
130
+ return "dev-unknown"
111
131
 
112
132
 
113
133
  @cache
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: holmesgpt
3
- Version: 0.14.1a0
3
+ Version: 0.14.3a0
4
4
  Summary:
5
5
  Author: Natan Yellin
6
6
  Author-email: natan@robusta.dev
@@ -23,10 +23,11 @@ Requires-Dist: certifi (>=2024.7.4,<2025.0.0)
23
23
  Requires-Dist: colorlog (>=6.8.2,<7.0.0)
24
24
  Requires-Dist: confluent-kafka (>=2.6.1,<3.0.0)
25
25
  Requires-Dist: fastapi (>=0.116,<0.117)
26
+ Requires-Dist: httpx[socks] (<0.28)
26
27
  Requires-Dist: humanize (>=4.9.0,<5.0.0)
27
28
  Requires-Dist: jinja2 (>=3.1.2,<4.0.0)
28
29
  Requires-Dist: kubernetes (>=32.0.1,<33.0.0)
29
- Requires-Dist: litellm (>=1.75.4,<2.0.0)
30
+ Requires-Dist: litellm (==1.77.1)
30
31
  Requires-Dist: markdown (>=3.6,<4.0)
31
32
  Requires-Dist: markdownify (>=1.1.0,<2.0.0)
32
33
  Requires-Dist: mcp (==v1.12.2)
@@ -245,14 +246,11 @@ Distributed under the MIT License. See [LICENSE.txt](https://github.com/robusta-
245
246
 
246
247
  ## Community
247
248
 
248
- Join our community meetings to discuss the HolmesGPT roadmap and share feedback:
249
+ Join our community to discuss the HolmesGPT roadmap and share feedback:
249
250
 
250
- 📅 **First Community Meeting:** Thursday, August 21, 2025
251
- - **Time:** 8:00-9:00 AM PT / 11:00 AM-12:00 PM ET / 8:30-9:30 PM IST
252
- - **Where:** [Google Meet](https://meet.google.com/jxc-ujyf-xwy)
253
- - **Agenda:** [Roadmap discussion](https://github.com/orgs/robusta-dev/projects/2), community feedback, and Q&A
254
-
255
- [📝 Meeting Notes](https://docs.google.com/document/d/1sIHCcTivyzrF5XNvos7ZT_UcxEOqgwfawsTbb9wMJe4/edit?tab=t.0) | [📋 Full Details](https://holmesgpt.dev/community/)
251
+ 📹 **First Community Meetup Recording:** [Watch on YouTube](https://youtu.be/slQRc6nlFQU)
252
+ - **Topics:** Roadmap discussion, community feedback, and Q&A
253
+ - **Resources:** [📝 Meeting Notes](https://docs.google.com/document/d/1sIHCcTivyzrF5XNvos7ZT_UcxEOqgwfawsTbb9wMJe4/edit?tab=t.0) | [📋 Community Page](https://holmesgpt.dev/community/)
256
254
 
257
255
  ## Support
258
256