holmesgpt 0.11.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of holmesgpt might be problematic. Click here for more details.

Files changed (183) hide show
  1. holmes/.git_archival.json +7 -0
  2. holmes/__init__.py +76 -0
  3. holmes/__init__.py.bak +76 -0
  4. holmes/clients/robusta_client.py +24 -0
  5. holmes/common/env_vars.py +47 -0
  6. holmes/config.py +526 -0
  7. holmes/core/__init__.py +0 -0
  8. holmes/core/conversations.py +578 -0
  9. holmes/core/investigation.py +152 -0
  10. holmes/core/investigation_structured_output.py +264 -0
  11. holmes/core/issue.py +54 -0
  12. holmes/core/llm.py +250 -0
  13. holmes/core/models.py +157 -0
  14. holmes/core/openai_formatting.py +51 -0
  15. holmes/core/performance_timing.py +72 -0
  16. holmes/core/prompt.py +42 -0
  17. holmes/core/resource_instruction.py +17 -0
  18. holmes/core/runbooks.py +26 -0
  19. holmes/core/safeguards.py +120 -0
  20. holmes/core/supabase_dal.py +540 -0
  21. holmes/core/tool_calling_llm.py +798 -0
  22. holmes/core/tools.py +566 -0
  23. holmes/core/tools_utils/__init__.py +0 -0
  24. holmes/core/tools_utils/tool_executor.py +65 -0
  25. holmes/core/tools_utils/toolset_utils.py +52 -0
  26. holmes/core/toolset_manager.py +418 -0
  27. holmes/interactive.py +229 -0
  28. holmes/main.py +1041 -0
  29. holmes/plugins/__init__.py +0 -0
  30. holmes/plugins/destinations/__init__.py +6 -0
  31. holmes/plugins/destinations/slack/__init__.py +2 -0
  32. holmes/plugins/destinations/slack/plugin.py +163 -0
  33. holmes/plugins/interfaces.py +32 -0
  34. holmes/plugins/prompts/__init__.py +48 -0
  35. holmes/plugins/prompts/_current_date_time.jinja2 +1 -0
  36. holmes/plugins/prompts/_default_log_prompt.jinja2 +11 -0
  37. holmes/plugins/prompts/_fetch_logs.jinja2 +36 -0
  38. holmes/plugins/prompts/_general_instructions.jinja2 +86 -0
  39. holmes/plugins/prompts/_global_instructions.jinja2 +12 -0
  40. holmes/plugins/prompts/_runbook_instructions.jinja2 +13 -0
  41. holmes/plugins/prompts/_toolsets_instructions.jinja2 +56 -0
  42. holmes/plugins/prompts/generic_ask.jinja2 +36 -0
  43. holmes/plugins/prompts/generic_ask_conversation.jinja2 +32 -0
  44. holmes/plugins/prompts/generic_ask_for_issue_conversation.jinja2 +50 -0
  45. holmes/plugins/prompts/generic_investigation.jinja2 +42 -0
  46. holmes/plugins/prompts/generic_post_processing.jinja2 +13 -0
  47. holmes/plugins/prompts/generic_ticket.jinja2 +12 -0
  48. holmes/plugins/prompts/investigation_output_format.jinja2 +32 -0
  49. holmes/plugins/prompts/kubernetes_workload_ask.jinja2 +84 -0
  50. holmes/plugins/prompts/kubernetes_workload_chat.jinja2 +39 -0
  51. holmes/plugins/runbooks/README.md +22 -0
  52. holmes/plugins/runbooks/__init__.py +100 -0
  53. holmes/plugins/runbooks/catalog.json +14 -0
  54. holmes/plugins/runbooks/jira.yaml +12 -0
  55. holmes/plugins/runbooks/kube-prometheus-stack.yaml +10 -0
  56. holmes/plugins/runbooks/networking/dns_troubleshooting_instructions.md +66 -0
  57. holmes/plugins/runbooks/upgrade/upgrade_troubleshooting_instructions.md +44 -0
  58. holmes/plugins/sources/github/__init__.py +77 -0
  59. holmes/plugins/sources/jira/__init__.py +123 -0
  60. holmes/plugins/sources/opsgenie/__init__.py +93 -0
  61. holmes/plugins/sources/pagerduty/__init__.py +147 -0
  62. holmes/plugins/sources/prometheus/__init__.py +0 -0
  63. holmes/plugins/sources/prometheus/models.py +104 -0
  64. holmes/plugins/sources/prometheus/plugin.py +154 -0
  65. holmes/plugins/toolsets/__init__.py +171 -0
  66. holmes/plugins/toolsets/aks-node-health.yaml +65 -0
  67. holmes/plugins/toolsets/aks.yaml +86 -0
  68. holmes/plugins/toolsets/argocd.yaml +70 -0
  69. holmes/plugins/toolsets/atlas_mongodb/instructions.jinja2 +8 -0
  70. holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +307 -0
  71. holmes/plugins/toolsets/aws.yaml +76 -0
  72. holmes/plugins/toolsets/azure_sql/__init__.py +0 -0
  73. holmes/plugins/toolsets/azure_sql/apis/alert_monitoring_api.py +600 -0
  74. holmes/plugins/toolsets/azure_sql/apis/azure_sql_api.py +309 -0
  75. holmes/plugins/toolsets/azure_sql/apis/connection_failure_api.py +445 -0
  76. holmes/plugins/toolsets/azure_sql/apis/connection_monitoring_api.py +251 -0
  77. holmes/plugins/toolsets/azure_sql/apis/storage_analysis_api.py +317 -0
  78. holmes/plugins/toolsets/azure_sql/azure_base_toolset.py +55 -0
  79. holmes/plugins/toolsets/azure_sql/azure_sql_instructions.jinja2 +137 -0
  80. holmes/plugins/toolsets/azure_sql/azure_sql_toolset.py +183 -0
  81. holmes/plugins/toolsets/azure_sql/install.md +66 -0
  82. holmes/plugins/toolsets/azure_sql/tools/__init__.py +1 -0
  83. holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +324 -0
  84. holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +243 -0
  85. holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +205 -0
  86. holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +249 -0
  87. holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +373 -0
  88. holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +237 -0
  89. holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +172 -0
  90. holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +170 -0
  91. holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +188 -0
  92. holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +180 -0
  93. holmes/plugins/toolsets/azure_sql/utils.py +83 -0
  94. holmes/plugins/toolsets/bash/__init__.py +0 -0
  95. holmes/plugins/toolsets/bash/bash_instructions.jinja2 +14 -0
  96. holmes/plugins/toolsets/bash/bash_toolset.py +208 -0
  97. holmes/plugins/toolsets/bash/common/bash.py +52 -0
  98. holmes/plugins/toolsets/bash/common/config.py +14 -0
  99. holmes/plugins/toolsets/bash/common/stringify.py +25 -0
  100. holmes/plugins/toolsets/bash/common/validators.py +24 -0
  101. holmes/plugins/toolsets/bash/grep/__init__.py +52 -0
  102. holmes/plugins/toolsets/bash/kubectl/__init__.py +100 -0
  103. holmes/plugins/toolsets/bash/kubectl/constants.py +96 -0
  104. holmes/plugins/toolsets/bash/kubectl/kubectl_describe.py +66 -0
  105. holmes/plugins/toolsets/bash/kubectl/kubectl_events.py +88 -0
  106. holmes/plugins/toolsets/bash/kubectl/kubectl_get.py +108 -0
  107. holmes/plugins/toolsets/bash/kubectl/kubectl_logs.py +20 -0
  108. holmes/plugins/toolsets/bash/kubectl/kubectl_run.py +46 -0
  109. holmes/plugins/toolsets/bash/kubectl/kubectl_top.py +81 -0
  110. holmes/plugins/toolsets/bash/parse_command.py +103 -0
  111. holmes/plugins/toolsets/confluence.yaml +19 -0
  112. holmes/plugins/toolsets/consts.py +5 -0
  113. holmes/plugins/toolsets/coralogix/api.py +158 -0
  114. holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +103 -0
  115. holmes/plugins/toolsets/coralogix/utils.py +181 -0
  116. holmes/plugins/toolsets/datadog.py +153 -0
  117. holmes/plugins/toolsets/docker.yaml +46 -0
  118. holmes/plugins/toolsets/git.py +756 -0
  119. holmes/plugins/toolsets/grafana/__init__.py +0 -0
  120. holmes/plugins/toolsets/grafana/base_grafana_toolset.py +54 -0
  121. holmes/plugins/toolsets/grafana/common.py +68 -0
  122. holmes/plugins/toolsets/grafana/grafana_api.py +31 -0
  123. holmes/plugins/toolsets/grafana/loki_api.py +89 -0
  124. holmes/plugins/toolsets/grafana/tempo_api.py +124 -0
  125. holmes/plugins/toolsets/grafana/toolset_grafana.py +102 -0
  126. holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +102 -0
  127. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +10 -0
  128. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +299 -0
  129. holmes/plugins/toolsets/grafana/trace_parser.py +195 -0
  130. holmes/plugins/toolsets/helm.yaml +42 -0
  131. holmes/plugins/toolsets/internet/internet.py +275 -0
  132. holmes/plugins/toolsets/internet/notion.py +137 -0
  133. holmes/plugins/toolsets/kafka.py +638 -0
  134. holmes/plugins/toolsets/kubernetes.yaml +255 -0
  135. holmes/plugins/toolsets/kubernetes_logs.py +426 -0
  136. holmes/plugins/toolsets/kubernetes_logs.yaml +42 -0
  137. holmes/plugins/toolsets/logging_utils/__init__.py +0 -0
  138. holmes/plugins/toolsets/logging_utils/logging_api.py +217 -0
  139. holmes/plugins/toolsets/logging_utils/types.py +0 -0
  140. holmes/plugins/toolsets/mcp/toolset_mcp.py +135 -0
  141. holmes/plugins/toolsets/newrelic.py +222 -0
  142. holmes/plugins/toolsets/opensearch/__init__.py +0 -0
  143. holmes/plugins/toolsets/opensearch/opensearch.py +245 -0
  144. holmes/plugins/toolsets/opensearch/opensearch_logs.py +151 -0
  145. holmes/plugins/toolsets/opensearch/opensearch_traces.py +211 -0
  146. holmes/plugins/toolsets/opensearch/opensearch_traces_instructions.jinja2 +12 -0
  147. holmes/plugins/toolsets/opensearch/opensearch_utils.py +166 -0
  148. holmes/plugins/toolsets/prometheus/prometheus.py +818 -0
  149. holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +38 -0
  150. holmes/plugins/toolsets/rabbitmq/api.py +398 -0
  151. holmes/plugins/toolsets/rabbitmq/rabbitmq_instructions.jinja2 +37 -0
  152. holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +222 -0
  153. holmes/plugins/toolsets/robusta/__init__.py +0 -0
  154. holmes/plugins/toolsets/robusta/robusta.py +235 -0
  155. holmes/plugins/toolsets/robusta/robusta_instructions.jinja2 +24 -0
  156. holmes/plugins/toolsets/runbook/__init__.py +0 -0
  157. holmes/plugins/toolsets/runbook/runbook_fetcher.py +78 -0
  158. holmes/plugins/toolsets/service_discovery.py +92 -0
  159. holmes/plugins/toolsets/servicenow/install.md +37 -0
  160. holmes/plugins/toolsets/servicenow/instructions.jinja2 +3 -0
  161. holmes/plugins/toolsets/servicenow/servicenow.py +198 -0
  162. holmes/plugins/toolsets/slab.yaml +20 -0
  163. holmes/plugins/toolsets/utils.py +137 -0
  164. holmes/plugins/utils.py +14 -0
  165. holmes/utils/__init__.py +0 -0
  166. holmes/utils/cache.py +84 -0
  167. holmes/utils/cert_utils.py +40 -0
  168. holmes/utils/default_toolset_installation_guide.jinja2 +44 -0
  169. holmes/utils/definitions.py +13 -0
  170. holmes/utils/env.py +53 -0
  171. holmes/utils/file_utils.py +56 -0
  172. holmes/utils/global_instructions.py +20 -0
  173. holmes/utils/holmes_status.py +22 -0
  174. holmes/utils/holmes_sync_toolsets.py +80 -0
  175. holmes/utils/markdown_utils.py +55 -0
  176. holmes/utils/pydantic_utils.py +54 -0
  177. holmes/utils/robusta.py +10 -0
  178. holmes/utils/tags.py +97 -0
  179. holmesgpt-0.11.5.dist-info/LICENSE.txt +21 -0
  180. holmesgpt-0.11.5.dist-info/METADATA +400 -0
  181. holmesgpt-0.11.5.dist-info/RECORD +183 -0
  182. holmesgpt-0.11.5.dist-info/WHEEL +4 -0
  183. holmesgpt-0.11.5.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,42 @@
1
+ toolsets:
2
+ kubernetes/logs:
3
+ description: "Read pod logs"
4
+ docs_url: "https://docs.robusta.dev/master/configuration/holmesgpt/toolsets/kubernetes.html#logs"
5
+ icon_url: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRPKA-U9m5BxYQDF1O7atMfj9EMMXEoGu4t0Q&s"
6
+ tags:
7
+ - core
8
+ prerequisites:
9
+ - command: "kubectl version --client"
10
+
11
+ tools:
12
+ - name: "kubectl_previous_logs"
13
+ description: "Run `kubectl logs --previous` on a single Kubernetes pod. Used to fetch logs for a pod that crashed and see logs from before the crash. Never give a deployment name or a resource that is not a pod."
14
+ command: "kubectl logs {{pod_name}} -n {{ namespace }} --previous"
15
+
16
+ - name: "kubectl_previous_logs_all_containers"
17
+ description: "Run `kubectl logs --previous` on a single Kubernetes pod. Used to fetch logs for a pod that crashed and see logs from before the crash."
18
+ command: "kubectl logs {{pod_name}} -n {{ namespace }} --previous --all-containers"
19
+
20
+ - name: "kubectl_container_previous_logs"
21
+ description: "Run `kubectl logs --previous` on a single container of a Kubernetes pod. Used to fetch logs for a pod that crashed and see logs from before the crash."
22
+ command: "kubectl logs {{pod_name}} -c {{container_name}} -n {{ namespace }} --previous"
23
+
24
+ - name: "kubectl_logs"
25
+ description: "Run `kubectl logs` on a single Kubernetes pod. Never give a deployment name or a resource that is not a pod."
26
+ command: "kubectl logs {{pod_name}} -n {{ namespace }}"
27
+
28
+ - name: "kubectl_logs_all_containers"
29
+ description: "Run `kubectl logs` on all containers within a single Kubernetes pod."
30
+ command: "kubectl logs {{pod_name}} -n {{ namespace }} --all-containers"
31
+
32
+ - name: "kubectl_container_logs"
33
+ description: "Run `kubectl logs` on a single container within a Kubernetes pod. This is to get the logs of a specific container in a multi-container pod."
34
+ command: "kubectl logs {{pod_name}} -c {{container_name}} -n {{ namespace }} "
35
+
36
+ - name: "kubectl_logs_grep"
37
+ description: "Search for a specific term in the logs of a single Kubernetes pod. Only provide a pod name, not a deployment or other resource."
38
+ command: "kubectl logs {{ pod_name }} -n {{ namespace }} | grep {{ search_term }}"
39
+
40
+ - name: "kubectl_logs_all_containers_grep"
41
+ description: "Search for a specific term in the logs of a single Kubernetes pod across all of its containers. Only provide a pod name, not a deployment or other resource."
42
+ command: "kubectl logs {{pod_name}} -n {{ namespace }} --all-containers | grep {{ search_term }}"
File without changes
@@ -0,0 +1,217 @@
1
+ from abc import ABC, abstractmethod
2
+ from datetime import datetime, timedelta
3
+ import logging
4
+ from typing import Optional
5
+
6
+ from pydantic import BaseModel
7
+ from datetime import timezone
8
+ from holmes.core.tools import (
9
+ StructuredToolResult,
10
+ Tool,
11
+ ToolParameter,
12
+ Toolset,
13
+ )
14
+ from holmes.plugins.toolsets.utils import get_param_or_raise
15
+
16
+ # Default values for log fetching
17
+ DEFAULT_LOG_LIMIT = 2000
18
+ DEFAULT_TIME_SPAN_SECONDS = 3600
19
+
20
+ POD_LOGGING_TOOL_NAME = "fetch_pod_logs"
21
+
22
+
23
+ class LoggingConfig(BaseModel):
24
+ """Base configuration for all logging backends"""
25
+
26
+ pass
27
+
28
+
29
+ class FetchPodLogsParams(BaseModel):
30
+ namespace: str
31
+ pod_name: str
32
+ start_time: Optional[str] = None
33
+ end_time: Optional[str] = None
34
+ filter: Optional[str] = None
35
+ limit: Optional[int] = None
36
+
37
+
38
+ class BasePodLoggingToolset(Toolset, ABC):
39
+ """Base class for all logging toolsets"""
40
+
41
+ @abstractmethod
42
+ def fetch_pod_logs(self, params: FetchPodLogsParams) -> StructuredToolResult:
43
+ pass
44
+
45
+
46
+ class PodLoggingTool(Tool):
47
+ """Common tool for fetching pod logs across different logging backends"""
48
+
49
+ def __init__(self, toolset: BasePodLoggingToolset):
50
+ super().__init__(
51
+ name=POD_LOGGING_TOOL_NAME,
52
+ description="Fetch logs for a Kubernetes pod",
53
+ parameters={
54
+ "pod_name": ToolParameter(
55
+ description="The exact kubernetes pod name",
56
+ type="string",
57
+ required=True,
58
+ ),
59
+ "namespace": ToolParameter(
60
+ description="Kubernetes namespace", type="string", required=True
61
+ ),
62
+ "start_time": ToolParameter(
63
+ description="Start time for logs. Can be an RFC3339 formatted timestamp (e.g. '2023-03-01T10:30:00Z') for absolute time or a negative integer (e.g. -3600) for relative seconds before end_time.",
64
+ type="string",
65
+ required=False,
66
+ ),
67
+ "end_time": ToolParameter(
68
+ description="End time for logs. Must be an RFC3339 formatted timestamp (e.g. '2023-03-01T12:30:00Z'). If not specified, defaults to current time.",
69
+ type="string",
70
+ required=False,
71
+ ),
72
+ "limit": ToolParameter(
73
+ description="Maximum number of logs to return",
74
+ type="integer",
75
+ required=False,
76
+ ),
77
+ "filter": ToolParameter(
78
+ description="An optional keyword or sentence to filter the logs",
79
+ type="string",
80
+ required=False,
81
+ ),
82
+ },
83
+ )
84
+ self._toolset = toolset
85
+
86
+ def _invoke(self, params: dict) -> StructuredToolResult:
87
+ structured_params = FetchPodLogsParams(
88
+ namespace=get_param_or_raise(params, "namespace"),
89
+ pod_name=get_param_or_raise(params, "pod_name"),
90
+ start_time=params.get("start_time"),
91
+ end_time=params.get("end_time"),
92
+ filter=params.get("filter"),
93
+ limit=params.get("limit"),
94
+ )
95
+
96
+ result = self._toolset.fetch_pod_logs(
97
+ params=structured_params,
98
+ )
99
+
100
+ return result
101
+
102
+ def get_parameterized_one_liner(self, params: dict) -> str:
103
+ """Generate a one-line description of this tool invocation"""
104
+ namespace = params.get("namespace", "unknown-namespace")
105
+ pod_name = params.get("pod_name", "unknown-pod")
106
+
107
+ start_time = params.get("start_time")
108
+ end_time = params.get("end_time")
109
+ filter = params.get("filter")
110
+ limit = params.get("limit")
111
+
112
+ extra_params_str = ""
113
+
114
+ if start_time and not end_time:
115
+ extra_params_str += f" start_time={start_time}"
116
+ elif not start_time and end_time:
117
+ extra_params_str += f" end_time={end_time}"
118
+ elif start_time and end_time:
119
+ extra_params_str += f" time range={start_time}/{end_time}"
120
+
121
+ if filter:
122
+ extra_params_str += f" filter={filter}"
123
+ if limit:
124
+ extra_params_str += f" limit={limit}"
125
+
126
+ return f"Fetching logs for pod {pod_name} in namespace {namespace}.{extra_params_str}"
127
+
128
+
129
+ def process_time_parameters(
130
+ start_time: Optional[str],
131
+ end_time: Optional[str],
132
+ default_span_seconds: int = DEFAULT_TIME_SPAN_SECONDS,
133
+ ) -> tuple[Optional[str], Optional[str]]:
134
+ """
135
+ Convert time parameters to standard RFC3339 format
136
+
137
+ Args:
138
+ start_time: Either RFC3339 timestamp or negative integer (seconds before end)
139
+ end_time: RFC3339 timestamp or None (defaults to now)
140
+ default_span_seconds: Default time span if start_time not provided
141
+
142
+ Returns:
143
+ Tuple of (start_time, end_time) both in RFC3339 format or None
144
+ """
145
+ # Process end time first (as start might depend on it)
146
+ now = datetime.now(timezone.utc)
147
+
148
+ # Handle end_time
149
+ processed_end_time = None
150
+ if end_time:
151
+ try:
152
+ # Check if it's already in RFC3339 format
153
+ processed_end_time = end_time
154
+ datetime.fromisoformat(end_time.replace("Z", "+00:00"))
155
+ except (ValueError, TypeError):
156
+ # If not a valid RFC3339, log the error and use current time
157
+ logging.warning(f"Invalid end_time format: {end_time}, using current time")
158
+ processed_end_time = now.strftime("%Y-%m-%dT%H:%M:%SZ")
159
+ else:
160
+ # Default to current time
161
+ processed_end_time = now.strftime("%Y-%m-%dT%H:%M:%SZ")
162
+
163
+ # Handle start_time
164
+ processed_start_time = None
165
+ if start_time:
166
+ try:
167
+ # Check if it's a negative integer (relative time)
168
+ if isinstance(start_time, int) or (
169
+ isinstance(start_time, str)
170
+ and start_time.startswith("-")
171
+ and start_time[1:].isdigit()
172
+ ):
173
+ # Convert to seconds before end_time
174
+ seconds_before = abs(int(start_time))
175
+
176
+ # Parse end_time
177
+ if processed_end_time:
178
+ end_datetime = datetime.fromisoformat(
179
+ processed_end_time.replace("Z", "+00:00")
180
+ )
181
+ else:
182
+ end_datetime = now
183
+
184
+ # Calculate start_time
185
+ start_datetime = end_datetime - timedelta(seconds=seconds_before)
186
+ processed_start_time = start_datetime.strftime("%Y-%m-%dT%H:%M:%SZ")
187
+ else:
188
+ # Assume it's RFC3339
189
+ processed_start_time = start_time
190
+ datetime.fromisoformat(start_time.replace("Z", "+00:00"))
191
+ except (ValueError, TypeError):
192
+ # If not a valid format, use default
193
+ logging.warning(
194
+ f"Invalid start_time format: {start_time}, using default time span"
195
+ )
196
+ if processed_end_time:
197
+ end_datetime = datetime.fromisoformat(
198
+ processed_end_time.replace("Z", "+00:00")
199
+ )
200
+ else:
201
+ end_datetime = now
202
+
203
+ start_datetime = end_datetime - timedelta(seconds=default_span_seconds)
204
+ processed_start_time = start_datetime.strftime("%Y-%m-%dT%H:%M:%SZ")
205
+ else:
206
+ # Default to default_span_seconds before end_time
207
+ if processed_end_time:
208
+ end_datetime = datetime.fromisoformat(
209
+ processed_end_time.replace("Z", "+00:00")
210
+ )
211
+ else:
212
+ end_datetime = now
213
+
214
+ start_datetime = end_datetime - timedelta(seconds=default_span_seconds)
215
+ processed_start_time = start_datetime.strftime("%Y-%m-%dT%H:%M:%SZ")
216
+
217
+ return processed_start_time, processed_end_time
File without changes
@@ -0,0 +1,135 @@
1
+ from holmes.core.tools import (
2
+ Toolset,
3
+ Tool,
4
+ ToolParameter,
5
+ StructuredToolResult,
6
+ ToolResultStatus,
7
+ CallablePrerequisite,
8
+ )
9
+
10
+ from typing import Dict, Any, List, Optional
11
+ from mcp.client.session import ClientSession
12
+ from mcp.client.sse import sse_client
13
+
14
+ from mcp.types import Tool as MCP_Tool
15
+ from mcp.types import CallToolResult
16
+
17
+ import asyncio
18
+ from pydantic import Field, AnyUrl, field_validator
19
+ from typing import Tuple
20
+ import logging
21
+
22
+
23
+ class RemoteMCPTool(Tool):
24
+ url: str
25
+ headers: Optional[Dict[str, str]] = None
26
+
27
+ def _invoke(self, params: Dict) -> StructuredToolResult:
28
+ try:
29
+ return asyncio.run(self._invoke_async(params))
30
+ except Exception as e:
31
+ return StructuredToolResult(
32
+ status=ToolResultStatus.ERROR,
33
+ error=str(e.args),
34
+ params=params,
35
+ invocation=f"MCPtool {self.name} with params {params}",
36
+ )
37
+
38
+ async def _invoke_async(self, params: Dict) -> StructuredToolResult:
39
+ async with sse_client(self.url, self.headers) as (read_stream, write_stream):
40
+ async with ClientSession(read_stream, write_stream) as session:
41
+ _ = await session.initialize()
42
+ tool_result: CallToolResult = await session.call_tool(self.name, params)
43
+
44
+ merged_text = " ".join(
45
+ c.text for c in tool_result.content if c.type == "text"
46
+ )
47
+ return StructuredToolResult(
48
+ status=(
49
+ ToolResultStatus.ERROR
50
+ if tool_result.isError
51
+ else ToolResultStatus.SUCCESS
52
+ ),
53
+ data=merged_text,
54
+ params=params,
55
+ invocation=f"MCPtool {self.name} with params {params}",
56
+ )
57
+
58
+ @classmethod
59
+ def create(cls, url: str, tool: MCP_Tool, headers: Optional[Dict[str, str]] = None):
60
+ parameters = cls.parse_input_schema(tool.inputSchema)
61
+ return cls(
62
+ url=url,
63
+ name=tool.name,
64
+ description=tool.description or "",
65
+ parameters=parameters,
66
+ headers=headers,
67
+ )
68
+
69
+ @classmethod
70
+ def parse_input_schema(
71
+ cls, input_schema: dict[str, Any]
72
+ ) -> Dict[str, ToolParameter]:
73
+ required_list = input_schema.get("required", [])
74
+ schema_params = input_schema.get("properties", {})
75
+ parameters = {}
76
+ for key, val in schema_params.items():
77
+ parameters[key] = ToolParameter(
78
+ description=val.get("description"),
79
+ type=val.get("type", "string"),
80
+ required=key in required_list,
81
+ )
82
+
83
+ return parameters
84
+
85
+ def get_parameterized_one_liner(self, params: Dict) -> str:
86
+ return f"Call mcp server {self.url} tool {self.name} with params {str(params)}"
87
+
88
+
89
+ class RemoteMCPToolset(Toolset):
90
+ url: AnyUrl
91
+ tools: List[RemoteMCPTool] = Field(default_factory=list) # type: ignore
92
+ icon_url: str = "https://registry.npmmirror.com/@lobehub/icons-static-png/1.46.0/files/light/mcp.png"
93
+
94
+ def model_post_init(self, __context: Any) -> None:
95
+ self.prerequisites = [CallablePrerequisite(callable=self.init_server_tools)]
96
+
97
+ def get_headers(self) -> Optional[Dict[str, str]]:
98
+ return self.config and self.config.get("headers")
99
+
100
+ @field_validator("url", mode="before")
101
+ def append_sse_if_missing(cls, v):
102
+ if isinstance(v, str) and not v.rstrip("/").endswith("/sse"):
103
+ v = v.rstrip("/") + "/sse"
104
+ return v
105
+
106
+ # used as a CallablePrerequisite, config added for that case.
107
+ def init_server_tools(self, config: dict[str, Any]) -> Tuple[bool, str]:
108
+ try:
109
+ tools_result = asyncio.run(self._get_server_tools())
110
+ self.tools = [
111
+ RemoteMCPTool.create(str(self.url), tool, self.get_headers())
112
+ for tool in tools_result.tools
113
+ ]
114
+
115
+ if not self.tools:
116
+ logging.warning(f"mcp server {self.name} loaded 0 tools.")
117
+ return (True, "")
118
+ except Exception as e:
119
+ # using e.args, the asyncio wrapper could stack another exception this helps printing them all.
120
+ return (
121
+ False,
122
+ f"Failed to load mcp server {self.name} {self.url} {str(e.args)}",
123
+ )
124
+
125
+ async def _get_server_tools(self):
126
+ async with sse_client(str(self.url), headers=self.get_headers()) as (
127
+ read_stream,
128
+ write_stream,
129
+ ):
130
+ async with ClientSession(read_stream, write_stream) as session:
131
+ _ = await session.initialize()
132
+ return await session.list_tools()
133
+
134
+ def get_example_config(self) -> Dict[str, Any]:
135
+ return {}
@@ -0,0 +1,222 @@
1
+ import requests # type: ignore
2
+ import logging
3
+ from typing import Any, Optional, Dict
4
+ from holmes.core.tools import (
5
+ CallablePrerequisite,
6
+ Tool,
7
+ ToolParameter,
8
+ Toolset,
9
+ ToolsetTag,
10
+ )
11
+ from pydantic import BaseModel
12
+ from holmes.core.tools import StructuredToolResult, ToolResultStatus
13
+
14
+
15
+ class BaseNewRelicTool(Tool):
16
+ toolset: "NewRelicToolset"
17
+
18
+
19
+ class GetLogs(BaseNewRelicTool):
20
+ def __init__(self, toolset: "NewRelicToolset"):
21
+ super().__init__(
22
+ name="newrelic_get_logs",
23
+ description="Retrieve logs from New Relic",
24
+ parameters={
25
+ "app": ToolParameter(
26
+ description="The application name to filter logs",
27
+ type="string",
28
+ required=True,
29
+ ),
30
+ "since": ToolParameter(
31
+ description="Time range to fetch logs (e.g., '1 hour ago')",
32
+ type="string",
33
+ required=True,
34
+ ),
35
+ },
36
+ toolset=toolset,
37
+ )
38
+
39
+ def _invoke(self, params: Any) -> StructuredToolResult:
40
+ def success(msg: Any) -> StructuredToolResult:
41
+ return StructuredToolResult(
42
+ status=ToolResultStatus.SUCCESS,
43
+ data=msg,
44
+ params=params,
45
+ )
46
+
47
+ def error(msg: str) -> StructuredToolResult:
48
+ return StructuredToolResult(
49
+ status=ToolResultStatus.ERROR,
50
+ data=msg,
51
+ params=params,
52
+ )
53
+
54
+ app = params.get("app")
55
+ since = params.get("since")
56
+
57
+ query = {
58
+ "query": f"""
59
+ {{
60
+ actor {{
61
+ account(id: {self.toolset.nr_account_id}) {{
62
+ nrql(query: \"SELECT * FROM Log WHERE app = '{app}' SINCE {since}\") {{
63
+ results
64
+ }}
65
+ }}
66
+ }}
67
+ }}
68
+ """
69
+ }
70
+
71
+ url = "https://api.newrelic.com/graphql"
72
+ headers = {
73
+ "Content-Type": "application/json",
74
+ "Api-Key": self.toolset.nr_api_key,
75
+ }
76
+
77
+ try:
78
+ logging.info(f"Getting New Relic logs for app {app} since {since}")
79
+ response = requests.post(url, headers=headers, json=query)
80
+
81
+ if response.status_code == 200:
82
+ return success(response.json())
83
+ else:
84
+ return error(
85
+ f"Failed to fetch logs. Status code: {response.status_code}\n{response.text}"
86
+ )
87
+ except Exception as e:
88
+ logging.exception("Exception while fetching logs")
89
+ return error(f"Error while fetching logs: {str(e)}")
90
+
91
+ def get_parameterized_one_liner(self, params) -> str:
92
+ return f"newrelic GetLogs(app='{params.get('app')}', since='{params.get('since')}')"
93
+
94
+
95
+ class GetTraces(BaseNewRelicTool):
96
+ def __init__(self, toolset: "NewRelicToolset"):
97
+ super().__init__(
98
+ name="newrelic_get_traces",
99
+ description="Retrieve traces from New Relic",
100
+ parameters={
101
+ "duration": ToolParameter(
102
+ description="Minimum trace duration in seconds",
103
+ type="number",
104
+ required=True,
105
+ ),
106
+ "trace_id": ToolParameter(
107
+ description="Specific trace ID to fetch details (optional)",
108
+ type="string",
109
+ required=False,
110
+ ),
111
+ },
112
+ toolset=toolset,
113
+ )
114
+
115
+ def _invoke(self, params: Any) -> StructuredToolResult:
116
+ def success(msg: Any) -> StructuredToolResult:
117
+ return StructuredToolResult(
118
+ status=ToolResultStatus.SUCCESS,
119
+ data=msg,
120
+ params=params,
121
+ )
122
+
123
+ def error(msg: str) -> StructuredToolResult:
124
+ return StructuredToolResult(
125
+ status=ToolResultStatus.ERROR,
126
+ data=msg,
127
+ params=params,
128
+ )
129
+
130
+ duration = params.get("duration")
131
+ trace_id = params.get("trace_id")
132
+
133
+ if trace_id:
134
+ query_string = f"SELECT * FROM Span WHERE trace.id = '{trace_id}' and duration.ms > {duration * 1000} and span.kind != 'internal'"
135
+ else:
136
+ query_string = f"SELECT * FROM Span WHERE duration.ms > {duration * 1000} and span.kind != 'internal'"
137
+
138
+ query = {
139
+ "query": f"""
140
+ {{
141
+ actor {{
142
+ account(id: {self.toolset.nr_account_id}) {{
143
+ nrql(query: \"{query_string}\") {{
144
+ results
145
+ }}
146
+ }}
147
+ }}
148
+ }}
149
+ """
150
+ }
151
+
152
+ url = "https://api.newrelic.com/graphql"
153
+ headers = {
154
+ "Content-Type": "application/json",
155
+ "Api-Key": self.toolset.nr_api_key,
156
+ }
157
+
158
+ try:
159
+ logging.info(f"Getting New Relic traces with duration > {duration}s")
160
+ response = requests.post(url, headers=headers, json=query)
161
+
162
+ if response.status_code == 200:
163
+ return success(response.json())
164
+ else:
165
+ return error(
166
+ f"Failed to fetch traces. Status code: {response.status_code}\n{response.text}"
167
+ )
168
+ except Exception as e:
169
+ logging.exception("Exception while fetching traces")
170
+ return error(f"Error while fetching traces: {str(e)}")
171
+
172
+ def get_parameterized_one_liner(self, params) -> str:
173
+ if "trace_id" in params and params["trace_id"]:
174
+ return f"newrelic GetTraces(trace_id='{params.get('trace_id')}')"
175
+ return f"newrelic GetTraces(duration={params.get('duration')})"
176
+
177
+
178
+ class NewrelicConfig(BaseModel):
179
+ nr_api_key: Optional[str] = None
180
+ nr_account_id: Optional[str] = None
181
+
182
+
183
+ class NewRelicToolset(Toolset):
184
+ nr_api_key: Optional[str] = None
185
+ nr_account_id: Optional[str] = None
186
+
187
+ def __init__(self):
188
+ super().__init__(
189
+ name="newrelic",
190
+ description="Toolset for interacting with New Relic to fetch logs and traces",
191
+ docs_url="https://docs.newrelic.com/docs/apis/nerdgraph-api/",
192
+ icon_url="https://companieslogo.com/img/orig/NEWR-de5fcb2e.png?t=1720244493",
193
+ prerequisites=[CallablePrerequisite(callable=self.prerequisites_callable)],
194
+ tools=[
195
+ GetLogs(self),
196
+ GetTraces(self),
197
+ ],
198
+ experimental=True,
199
+ tags=[ToolsetTag.CORE],
200
+ )
201
+
202
+ def prerequisites_callable(
203
+ self, config: dict[str, Any]
204
+ ) -> tuple[bool, Optional[str]]:
205
+ if not config:
206
+ return False, "No configuration provided"
207
+
208
+ try:
209
+ nr_config = NewrelicConfig(**config)
210
+ self.nr_account_id = nr_config.nr_account_id
211
+ self.nr_api_key = nr_config.nr_api_key
212
+
213
+ if not self.nr_account_id or not self.nr_api_key:
214
+ return False, "New Relic account ID or API key is missing"
215
+
216
+ return True, None
217
+ except Exception as e:
218
+ logging.exception("Failed to set up New Relic toolset")
219
+ return False, str(e)
220
+
221
+ def get_example_config(self) -> Dict[str, Any]:
222
+ return {}
File without changes