holmesgpt 0.11.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of holmesgpt might be problematic. Click here for more details.
- holmes/.git_archival.json +7 -0
- holmes/__init__.py +76 -0
- holmes/__init__.py.bak +76 -0
- holmes/clients/robusta_client.py +24 -0
- holmes/common/env_vars.py +47 -0
- holmes/config.py +526 -0
- holmes/core/__init__.py +0 -0
- holmes/core/conversations.py +578 -0
- holmes/core/investigation.py +152 -0
- holmes/core/investigation_structured_output.py +264 -0
- holmes/core/issue.py +54 -0
- holmes/core/llm.py +250 -0
- holmes/core/models.py +157 -0
- holmes/core/openai_formatting.py +51 -0
- holmes/core/performance_timing.py +72 -0
- holmes/core/prompt.py +42 -0
- holmes/core/resource_instruction.py +17 -0
- holmes/core/runbooks.py +26 -0
- holmes/core/safeguards.py +120 -0
- holmes/core/supabase_dal.py +540 -0
- holmes/core/tool_calling_llm.py +798 -0
- holmes/core/tools.py +566 -0
- holmes/core/tools_utils/__init__.py +0 -0
- holmes/core/tools_utils/tool_executor.py +65 -0
- holmes/core/tools_utils/toolset_utils.py +52 -0
- holmes/core/toolset_manager.py +418 -0
- holmes/interactive.py +229 -0
- holmes/main.py +1041 -0
- holmes/plugins/__init__.py +0 -0
- holmes/plugins/destinations/__init__.py +6 -0
- holmes/plugins/destinations/slack/__init__.py +2 -0
- holmes/plugins/destinations/slack/plugin.py +163 -0
- holmes/plugins/interfaces.py +32 -0
- holmes/plugins/prompts/__init__.py +48 -0
- holmes/plugins/prompts/_current_date_time.jinja2 +1 -0
- holmes/plugins/prompts/_default_log_prompt.jinja2 +11 -0
- holmes/plugins/prompts/_fetch_logs.jinja2 +36 -0
- holmes/plugins/prompts/_general_instructions.jinja2 +86 -0
- holmes/plugins/prompts/_global_instructions.jinja2 +12 -0
- holmes/plugins/prompts/_runbook_instructions.jinja2 +13 -0
- holmes/plugins/prompts/_toolsets_instructions.jinja2 +56 -0
- holmes/plugins/prompts/generic_ask.jinja2 +36 -0
- holmes/plugins/prompts/generic_ask_conversation.jinja2 +32 -0
- holmes/plugins/prompts/generic_ask_for_issue_conversation.jinja2 +50 -0
- holmes/plugins/prompts/generic_investigation.jinja2 +42 -0
- holmes/plugins/prompts/generic_post_processing.jinja2 +13 -0
- holmes/plugins/prompts/generic_ticket.jinja2 +12 -0
- holmes/plugins/prompts/investigation_output_format.jinja2 +32 -0
- holmes/plugins/prompts/kubernetes_workload_ask.jinja2 +84 -0
- holmes/plugins/prompts/kubernetes_workload_chat.jinja2 +39 -0
- holmes/plugins/runbooks/README.md +22 -0
- holmes/plugins/runbooks/__init__.py +100 -0
- holmes/plugins/runbooks/catalog.json +14 -0
- holmes/plugins/runbooks/jira.yaml +12 -0
- holmes/plugins/runbooks/kube-prometheus-stack.yaml +10 -0
- holmes/plugins/runbooks/networking/dns_troubleshooting_instructions.md +66 -0
- holmes/plugins/runbooks/upgrade/upgrade_troubleshooting_instructions.md +44 -0
- holmes/plugins/sources/github/__init__.py +77 -0
- holmes/plugins/sources/jira/__init__.py +123 -0
- holmes/plugins/sources/opsgenie/__init__.py +93 -0
- holmes/plugins/sources/pagerduty/__init__.py +147 -0
- holmes/plugins/sources/prometheus/__init__.py +0 -0
- holmes/plugins/sources/prometheus/models.py +104 -0
- holmes/plugins/sources/prometheus/plugin.py +154 -0
- holmes/plugins/toolsets/__init__.py +171 -0
- holmes/plugins/toolsets/aks-node-health.yaml +65 -0
- holmes/plugins/toolsets/aks.yaml +86 -0
- holmes/plugins/toolsets/argocd.yaml +70 -0
- holmes/plugins/toolsets/atlas_mongodb/instructions.jinja2 +8 -0
- holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +307 -0
- holmes/plugins/toolsets/aws.yaml +76 -0
- holmes/plugins/toolsets/azure_sql/__init__.py +0 -0
- holmes/plugins/toolsets/azure_sql/apis/alert_monitoring_api.py +600 -0
- holmes/plugins/toolsets/azure_sql/apis/azure_sql_api.py +309 -0
- holmes/plugins/toolsets/azure_sql/apis/connection_failure_api.py +445 -0
- holmes/plugins/toolsets/azure_sql/apis/connection_monitoring_api.py +251 -0
- holmes/plugins/toolsets/azure_sql/apis/storage_analysis_api.py +317 -0
- holmes/plugins/toolsets/azure_sql/azure_base_toolset.py +55 -0
- holmes/plugins/toolsets/azure_sql/azure_sql_instructions.jinja2 +137 -0
- holmes/plugins/toolsets/azure_sql/azure_sql_toolset.py +183 -0
- holmes/plugins/toolsets/azure_sql/install.md +66 -0
- holmes/plugins/toolsets/azure_sql/tools/__init__.py +1 -0
- holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +324 -0
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +243 -0
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +205 -0
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +249 -0
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +373 -0
- holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +237 -0
- holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +172 -0
- holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +170 -0
- holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +188 -0
- holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +180 -0
- holmes/plugins/toolsets/azure_sql/utils.py +83 -0
- holmes/plugins/toolsets/bash/__init__.py +0 -0
- holmes/plugins/toolsets/bash/bash_instructions.jinja2 +14 -0
- holmes/plugins/toolsets/bash/bash_toolset.py +208 -0
- holmes/plugins/toolsets/bash/common/bash.py +52 -0
- holmes/plugins/toolsets/bash/common/config.py +14 -0
- holmes/plugins/toolsets/bash/common/stringify.py +25 -0
- holmes/plugins/toolsets/bash/common/validators.py +24 -0
- holmes/plugins/toolsets/bash/grep/__init__.py +52 -0
- holmes/plugins/toolsets/bash/kubectl/__init__.py +100 -0
- holmes/plugins/toolsets/bash/kubectl/constants.py +96 -0
- holmes/plugins/toolsets/bash/kubectl/kubectl_describe.py +66 -0
- holmes/plugins/toolsets/bash/kubectl/kubectl_events.py +88 -0
- holmes/plugins/toolsets/bash/kubectl/kubectl_get.py +108 -0
- holmes/plugins/toolsets/bash/kubectl/kubectl_logs.py +20 -0
- holmes/plugins/toolsets/bash/kubectl/kubectl_run.py +46 -0
- holmes/plugins/toolsets/bash/kubectl/kubectl_top.py +81 -0
- holmes/plugins/toolsets/bash/parse_command.py +103 -0
- holmes/plugins/toolsets/confluence.yaml +19 -0
- holmes/plugins/toolsets/consts.py +5 -0
- holmes/plugins/toolsets/coralogix/api.py +158 -0
- holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +103 -0
- holmes/plugins/toolsets/coralogix/utils.py +181 -0
- holmes/plugins/toolsets/datadog.py +153 -0
- holmes/plugins/toolsets/docker.yaml +46 -0
- holmes/plugins/toolsets/git.py +756 -0
- holmes/plugins/toolsets/grafana/__init__.py +0 -0
- holmes/plugins/toolsets/grafana/base_grafana_toolset.py +54 -0
- holmes/plugins/toolsets/grafana/common.py +68 -0
- holmes/plugins/toolsets/grafana/grafana_api.py +31 -0
- holmes/plugins/toolsets/grafana/loki_api.py +89 -0
- holmes/plugins/toolsets/grafana/tempo_api.py +124 -0
- holmes/plugins/toolsets/grafana/toolset_grafana.py +102 -0
- holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +102 -0
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +10 -0
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +299 -0
- holmes/plugins/toolsets/grafana/trace_parser.py +195 -0
- holmes/plugins/toolsets/helm.yaml +42 -0
- holmes/plugins/toolsets/internet/internet.py +275 -0
- holmes/plugins/toolsets/internet/notion.py +137 -0
- holmes/plugins/toolsets/kafka.py +638 -0
- holmes/plugins/toolsets/kubernetes.yaml +255 -0
- holmes/plugins/toolsets/kubernetes_logs.py +426 -0
- holmes/plugins/toolsets/kubernetes_logs.yaml +42 -0
- holmes/plugins/toolsets/logging_utils/__init__.py +0 -0
- holmes/plugins/toolsets/logging_utils/logging_api.py +217 -0
- holmes/plugins/toolsets/logging_utils/types.py +0 -0
- holmes/plugins/toolsets/mcp/toolset_mcp.py +135 -0
- holmes/plugins/toolsets/newrelic.py +222 -0
- holmes/plugins/toolsets/opensearch/__init__.py +0 -0
- holmes/plugins/toolsets/opensearch/opensearch.py +245 -0
- holmes/plugins/toolsets/opensearch/opensearch_logs.py +151 -0
- holmes/plugins/toolsets/opensearch/opensearch_traces.py +211 -0
- holmes/plugins/toolsets/opensearch/opensearch_traces_instructions.jinja2 +12 -0
- holmes/plugins/toolsets/opensearch/opensearch_utils.py +166 -0
- holmes/plugins/toolsets/prometheus/prometheus.py +818 -0
- holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +38 -0
- holmes/plugins/toolsets/rabbitmq/api.py +398 -0
- holmes/plugins/toolsets/rabbitmq/rabbitmq_instructions.jinja2 +37 -0
- holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +222 -0
- holmes/plugins/toolsets/robusta/__init__.py +0 -0
- holmes/plugins/toolsets/robusta/robusta.py +235 -0
- holmes/plugins/toolsets/robusta/robusta_instructions.jinja2 +24 -0
- holmes/plugins/toolsets/runbook/__init__.py +0 -0
- holmes/plugins/toolsets/runbook/runbook_fetcher.py +78 -0
- holmes/plugins/toolsets/service_discovery.py +92 -0
- holmes/plugins/toolsets/servicenow/install.md +37 -0
- holmes/plugins/toolsets/servicenow/instructions.jinja2 +3 -0
- holmes/plugins/toolsets/servicenow/servicenow.py +198 -0
- holmes/plugins/toolsets/slab.yaml +20 -0
- holmes/plugins/toolsets/utils.py +137 -0
- holmes/plugins/utils.py +14 -0
- holmes/utils/__init__.py +0 -0
- holmes/utils/cache.py +84 -0
- holmes/utils/cert_utils.py +40 -0
- holmes/utils/default_toolset_installation_guide.jinja2 +44 -0
- holmes/utils/definitions.py +13 -0
- holmes/utils/env.py +53 -0
- holmes/utils/file_utils.py +56 -0
- holmes/utils/global_instructions.py +20 -0
- holmes/utils/holmes_status.py +22 -0
- holmes/utils/holmes_sync_toolsets.py +80 -0
- holmes/utils/markdown_utils.py +55 -0
- holmes/utils/pydantic_utils.py +54 -0
- holmes/utils/robusta.py +10 -0
- holmes/utils/tags.py +97 -0
- holmesgpt-0.11.5.dist-info/LICENSE.txt +21 -0
- holmesgpt-0.11.5.dist-info/METADATA +400 -0
- holmesgpt-0.11.5.dist-info/RECORD +183 -0
- holmesgpt-0.11.5.dist-info/WHEEL +4 -0
- holmesgpt-0.11.5.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
## Configuration
|
|
2
|
+
|
|
3
|
+
[Full guide for reference](https://www.servicenow.com/docs/bundle/yokohama-platform-security/page/integrate/authentication/task/configure-api-key.html)
|
|
4
|
+
|
|
5
|
+
### Create an inbound authentication profile.
|
|
6
|
+
|
|
7
|
+
1. Navigate to All > System Web Services > API Access Policies > Inbound Authentication Profiles.
|
|
8
|
+
2. Select New.
|
|
9
|
+
3. Select Create API Key authentication profiles
|
|
10
|
+
4. Auth Parameter > add x-sn-apikey: Auth Header
|
|
11
|
+
5. Submit the form.
|
|
12
|
+
|
|
13
|
+
### Create a REST API key
|
|
14
|
+
|
|
15
|
+
1. Navigate to All > System Web Services > API Access Policies > REST API Key.
|
|
16
|
+
2. Select New.
|
|
17
|
+
3. Set name, description and user. Set expiry date if desired. > Submit.
|
|
18
|
+
4. Open the record that was created to view the token generated by the ServiceNow AI Platform for the user.
|
|
19
|
+
|
|
20
|
+
### Create a REST API Access policy
|
|
21
|
+
|
|
22
|
+
1. Navigate to All > System Web Services > REST API Access Policies.
|
|
23
|
+
2. Select New.
|
|
24
|
+
3. REST API = Table API
|
|
25
|
+
4. Uncheck Apply to all tables > Select table > change_request
|
|
26
|
+
5. in select profile from step 1 (API Key)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
Use your `instance name` and `api_key` to set up Service Now configuration.
|
|
30
|
+
```yaml
|
|
31
|
+
toolsets:
|
|
32
|
+
ServiceNow:
|
|
33
|
+
enabled: true
|
|
34
|
+
config:
|
|
35
|
+
api_key: <api-token>
|
|
36
|
+
instance: <dev1234..>
|
|
37
|
+
```
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
* ALWAYS fetch changes from servicenow, USE servicenow_return_changes_in_timerange to see changes in the relevant time range.
|
|
2
|
+
* If you are investigating an issue on some subject , USE servicenow_return_changes_with_keyword with the object name to find related changes.
|
|
3
|
+
* If you find a ServiceNow change that seems relevant to your investigation or the user question, USE servicenow_return_change_details with the change sys_id to get further information and improve your answer if possible.
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
import requests # type: ignore
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any, Dict, Tuple, List
|
|
5
|
+
from holmes.core.tools import (
|
|
6
|
+
CallablePrerequisite,
|
|
7
|
+
Tool,
|
|
8
|
+
ToolParameter,
|
|
9
|
+
Toolset,
|
|
10
|
+
ToolsetTag,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
from pydantic import BaseModel, PrivateAttr
|
|
14
|
+
from holmes.core.tools import StructuredToolResult, ToolResultStatus
|
|
15
|
+
from holmes.plugins.toolsets.utils import (
|
|
16
|
+
process_timestamps_to_rfc3339,
|
|
17
|
+
standard_start_datetime_tool_param_description,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
DEFAULT_TIME_SPAN_SECONDS = 3600
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ServiceNowConfig(BaseModel):
|
|
24
|
+
api_key: str
|
|
25
|
+
instance: str
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ServiceNowToolset(Toolset):
|
|
29
|
+
name: str = "ServiceNow"
|
|
30
|
+
description: str = "Database containing changes information related to keys, workloads or any service."
|
|
31
|
+
tags: List[ToolsetTag] = [ToolsetTag.CORE]
|
|
32
|
+
_session: requests.Session = PrivateAttr(default=requests.Session())
|
|
33
|
+
|
|
34
|
+
def __init__(self):
|
|
35
|
+
super().__init__(
|
|
36
|
+
prerequisites=[CallablePrerequisite(callable=self.prerequisites_callable)],
|
|
37
|
+
experimental=True,
|
|
38
|
+
tools=[
|
|
39
|
+
ReturnChangesInTimerange(toolset=self),
|
|
40
|
+
ReturnChange(toolset=self),
|
|
41
|
+
ReturnChangesWithKeyword(toolset=self),
|
|
42
|
+
],
|
|
43
|
+
)
|
|
44
|
+
instructions_filepath = os.path.abspath(
|
|
45
|
+
os.path.join(os.path.dirname(__file__), "instructions.jinja2")
|
|
46
|
+
)
|
|
47
|
+
self._load_llm_instructions(jinja_template=f"file://{instructions_filepath}")
|
|
48
|
+
|
|
49
|
+
def prerequisites_callable(self, config: dict[str, Any]) -> Tuple[bool, str]:
|
|
50
|
+
if not config:
|
|
51
|
+
return False, "Missing config credentials."
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
self.config: Dict = ServiceNowConfig(**config).model_dump()
|
|
55
|
+
self._session.headers.update(
|
|
56
|
+
{
|
|
57
|
+
"x-sn-apikey": self.config.get("api_key"),
|
|
58
|
+
}
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
url = f"https://{self.config.get('instance')}.service-now.com/api/now/v2/table/change_request"
|
|
62
|
+
response = self._session.get(url=url, params={"sysparm_limit": 1})
|
|
63
|
+
|
|
64
|
+
return response.ok, ""
|
|
65
|
+
except Exception as e:
|
|
66
|
+
logging.exception(
|
|
67
|
+
"Invalid ServiceNow config. Failed to set up ServiceNow toolset"
|
|
68
|
+
)
|
|
69
|
+
return False, f"Invalid ServiceNow config {e}"
|
|
70
|
+
|
|
71
|
+
def get_example_config(self) -> Dict[str, Any]:
|
|
72
|
+
example_config = ServiceNowConfig(
|
|
73
|
+
api_key="now_xxxxxxxxxxxxxxxx", instance="dev12345"
|
|
74
|
+
)
|
|
75
|
+
return example_config.model_dump()
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class ServiceNowBaseTool(Tool):
|
|
79
|
+
toolset: ServiceNowToolset
|
|
80
|
+
|
|
81
|
+
def return_result(
|
|
82
|
+
self, response: requests.Response, params: Any, field: str = "result"
|
|
83
|
+
) -> StructuredToolResult:
|
|
84
|
+
response.raise_for_status()
|
|
85
|
+
res = response.json()
|
|
86
|
+
return StructuredToolResult(
|
|
87
|
+
status=ToolResultStatus.SUCCESS
|
|
88
|
+
if res.get(field, [])
|
|
89
|
+
else ToolResultStatus.NO_DATA,
|
|
90
|
+
data=res,
|
|
91
|
+
params=params,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
def get_parameterized_one_liner(self, params) -> str:
|
|
95
|
+
return f"ServiceNow {self.name} {params}"
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class ReturnChangesInTimerange(ServiceNowBaseTool):
|
|
99
|
+
name: str = "servicenow_return_changes_in_timerange"
|
|
100
|
+
description: str = "Returns all changes requests from a specific time range. These changes tickets can apply to all components. default to changes from the last 1 hour."
|
|
101
|
+
parameters: Dict[str, ToolParameter] = {
|
|
102
|
+
"start": ToolParameter(
|
|
103
|
+
description=standard_start_datetime_tool_param_description(
|
|
104
|
+
DEFAULT_TIME_SPAN_SECONDS
|
|
105
|
+
),
|
|
106
|
+
type="string",
|
|
107
|
+
required=False,
|
|
108
|
+
)
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
def _invoke(self, params: Any) -> StructuredToolResult:
|
|
112
|
+
parsed_params = {}
|
|
113
|
+
try:
|
|
114
|
+
(start, _) = process_timestamps_to_rfc3339(
|
|
115
|
+
start_timestamp=params.get("start"),
|
|
116
|
+
end_timestamp=None,
|
|
117
|
+
default_time_span_seconds=DEFAULT_TIME_SPAN_SECONDS,
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
url = f"https://{self.toolset.config.get('instance')}.service-now.com/api/now/v2/table/change_request"
|
|
121
|
+
parsed_params.update(
|
|
122
|
+
{
|
|
123
|
+
"sysparm_fields": "sys_id,number,short_description,type,active,sys_updated_on"
|
|
124
|
+
}
|
|
125
|
+
)
|
|
126
|
+
parsed_params.update({"sysparm_query": f"sys_updated_on>={start}"})
|
|
127
|
+
|
|
128
|
+
response = self.toolset._session.get(url=url, params=parsed_params)
|
|
129
|
+
return self.return_result(response, parsed_params)
|
|
130
|
+
except Exception as e:
|
|
131
|
+
logging.exception(self.get_parameterized_one_liner(params))
|
|
132
|
+
return StructuredToolResult(
|
|
133
|
+
status=ToolResultStatus.ERROR,
|
|
134
|
+
data=f"Exception {self.name}: {str(e)}",
|
|
135
|
+
params=params,
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class ReturnChange(ServiceNowBaseTool):
|
|
140
|
+
name: str = "servicenow_return_change_details"
|
|
141
|
+
description: str = "Returns detailed information for one specific ServiceNow change"
|
|
142
|
+
parameters: Dict[str, ToolParameter] = {
|
|
143
|
+
"sys_id": ToolParameter(
|
|
144
|
+
description="The unique identifier of the change. Use servicenow_return_changes_in_timerange tool to fetch list of changes and use 'sys_id' for further information",
|
|
145
|
+
type="string",
|
|
146
|
+
required=True,
|
|
147
|
+
)
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
def _invoke(self, params: Any) -> StructuredToolResult:
|
|
151
|
+
try:
|
|
152
|
+
url = "https://{instance}.service-now.com/api/now/v2/table/change_request/{sys_id}".format(
|
|
153
|
+
instance=self.toolset.config.get("instance"),
|
|
154
|
+
sys_id=params.get("sys_id"),
|
|
155
|
+
)
|
|
156
|
+
response = self.toolset._session.get(url=url)
|
|
157
|
+
return self.return_result(response, params)
|
|
158
|
+
except Exception as e:
|
|
159
|
+
logging.exception(self.get_parameterized_one_liner(params))
|
|
160
|
+
return StructuredToolResult(
|
|
161
|
+
status=ToolResultStatus.ERROR,
|
|
162
|
+
data=f"Exception {self.name}: {str(e)}",
|
|
163
|
+
params=params,
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
class ReturnChangesWithKeyword(ServiceNowBaseTool):
|
|
168
|
+
name: str = "servicenow_return_changes_with_keyword"
|
|
169
|
+
description: str = "Returns all changes requests where a keyword is contained in the description. good for finding changes related to a key, workload or any object."
|
|
170
|
+
parameters: Dict[str, ToolParameter] = {
|
|
171
|
+
"keyword": ToolParameter(
|
|
172
|
+
description="key, workload or object name. Keyword that will filter service now changes that are related to this keyword or object.",
|
|
173
|
+
type="string",
|
|
174
|
+
required=True,
|
|
175
|
+
)
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
def _invoke(self, params: Any) -> StructuredToolResult:
|
|
179
|
+
parsed_params = {}
|
|
180
|
+
try:
|
|
181
|
+
url = f"https://{self.toolset.config.get('instance')}.service-now.com/api/now/v2/table/change_request"
|
|
182
|
+
parsed_params.update(
|
|
183
|
+
{
|
|
184
|
+
"sysparm_fields": "sys_id,number,short_description,type,active,sys_updated_on"
|
|
185
|
+
}
|
|
186
|
+
)
|
|
187
|
+
parsed_params.update(
|
|
188
|
+
{"sysparm_query": f"short_descriptionLIKE{params.get('keyword')}"}
|
|
189
|
+
)
|
|
190
|
+
response = self.toolset._session.get(url=url, params=parsed_params)
|
|
191
|
+
return self.return_result(response, parsed_params)
|
|
192
|
+
except Exception as e:
|
|
193
|
+
logging.exception(self.get_parameterized_one_liner(params))
|
|
194
|
+
return StructuredToolResult(
|
|
195
|
+
status=ToolResultStatus.ERROR,
|
|
196
|
+
data=f"Exception {self.name}: {str(e)}",
|
|
197
|
+
params=params,
|
|
198
|
+
)
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
toolsets:
|
|
2
|
+
slab:
|
|
3
|
+
description: "Fetches slab pages"
|
|
4
|
+
docs_url: "https://docs.robusta.dev/master/configuration/holmesgpt/toolsets/slab.html"
|
|
5
|
+
icon_url: "https://platform.robusta.dev/demos/slab-mark.svg"
|
|
6
|
+
tags:
|
|
7
|
+
- core
|
|
8
|
+
prerequisites:
|
|
9
|
+
- command: "curl --version"
|
|
10
|
+
- env:
|
|
11
|
+
- SLAB_API_KEY
|
|
12
|
+
tools:
|
|
13
|
+
- name: "fetch_slab_document"
|
|
14
|
+
description: "Fetch a document from slab. Use this to fetch runbooks if they are present before starting your investigation."
|
|
15
|
+
command: |
|
|
16
|
+
curl -X POST \
|
|
17
|
+
-H "Authorization: ${SLAB_API_KEY}" \
|
|
18
|
+
-H "Content-Type: application/json" \
|
|
19
|
+
-d '{"query":"query { post(id: \"{{ post_id }}\") { id title content } }"}' \
|
|
20
|
+
https://api.slab.com/v1/graphql
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import datetime
|
|
2
|
+
import time
|
|
3
|
+
from typing import Dict, Optional, Tuple, Union
|
|
4
|
+
|
|
5
|
+
from dateutil import parser # type: ignore
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def standard_start_datetime_tool_param_description(time_span_seconds: int):
|
|
9
|
+
return f"Start datetime, inclusive. Should be formatted in rfc3339. If negative integer, the number of seconds relative to end. Defaults to -{time_span_seconds}"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def is_int(val):
|
|
13
|
+
try:
|
|
14
|
+
int(val)
|
|
15
|
+
except ValueError:
|
|
16
|
+
return False
|
|
17
|
+
else:
|
|
18
|
+
return True
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def is_rfc3339(timestamp_str: str) -> bool:
|
|
22
|
+
"""Check if a string is in RFC3339 format."""
|
|
23
|
+
try:
|
|
24
|
+
parser.parse(timestamp_str)
|
|
25
|
+
return True
|
|
26
|
+
except (ValueError, TypeError):
|
|
27
|
+
return False
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def to_unix(timestamp_str: str) -> int:
|
|
31
|
+
dt = parser.parse(timestamp_str)
|
|
32
|
+
return int(dt.timestamp())
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def to_unix_ms(timestamp_str: str) -> int:
|
|
36
|
+
dt = parser.parse(timestamp_str)
|
|
37
|
+
return int(dt.timestamp() * 1000)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def unix_nano_to_rfc3339(unix_nano: int) -> str:
|
|
41
|
+
unix_seconds = unix_nano / 1_000_000_000
|
|
42
|
+
|
|
43
|
+
seconds_part = int(unix_seconds)
|
|
44
|
+
milliseconds_part = int((unix_seconds - seconds_part) * 1000)
|
|
45
|
+
|
|
46
|
+
dt = datetime.datetime.fromtimestamp(seconds_part, datetime.timezone.utc)
|
|
47
|
+
return f"{dt.strftime('%Y-%m-%dT%H:%M:%S')}.{milliseconds_part:03d}Z"
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def datetime_to_unix(timestamp_or_datetime_str):
|
|
51
|
+
if timestamp_or_datetime_str and is_int(timestamp_or_datetime_str):
|
|
52
|
+
return int(timestamp_or_datetime_str)
|
|
53
|
+
else:
|
|
54
|
+
return to_unix(timestamp_or_datetime_str)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def unix_to_rfc3339(timestamp: int) -> str:
|
|
58
|
+
dt = datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc)
|
|
59
|
+
return f"{dt.strftime('%Y-%m-%dT%H:%M:%S')}Z"
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def datetime_to_rfc3339(timestamp):
|
|
63
|
+
if isinstance(timestamp, int):
|
|
64
|
+
return unix_to_rfc3339(timestamp)
|
|
65
|
+
else:
|
|
66
|
+
return timestamp
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def process_timestamps_to_rfc3339(
|
|
70
|
+
start_timestamp: Optional[Union[int, str]],
|
|
71
|
+
end_timestamp: Optional[Union[int, str]],
|
|
72
|
+
default_time_span_seconds: int,
|
|
73
|
+
) -> Tuple[str, str]:
|
|
74
|
+
(start_timestamp, end_timestamp) = process_timestamps_to_int(
|
|
75
|
+
start_timestamp,
|
|
76
|
+
end_timestamp,
|
|
77
|
+
default_time_span_seconds=default_time_span_seconds,
|
|
78
|
+
)
|
|
79
|
+
parsed_start_timestamp = datetime_to_rfc3339(start_timestamp)
|
|
80
|
+
parsed_end_timestamp = datetime_to_rfc3339(end_timestamp)
|
|
81
|
+
return (parsed_start_timestamp, parsed_end_timestamp)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def process_timestamps_to_int(
|
|
85
|
+
start: Optional[Union[int, str]],
|
|
86
|
+
end: Optional[Union[int, str]],
|
|
87
|
+
default_time_span_seconds: int,
|
|
88
|
+
) -> Tuple[int, int]:
|
|
89
|
+
"""
|
|
90
|
+
Process and normalize start and end timestamps.
|
|
91
|
+
|
|
92
|
+
Supports:
|
|
93
|
+
- Integer timestamps (Unix time)
|
|
94
|
+
- RFC3339 formatted timestamps
|
|
95
|
+
- Negative integers as relative time from the other timestamp
|
|
96
|
+
- Auto-inversion if start is after end
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Tuple of (start_timestamp, end_timestamp)
|
|
100
|
+
"""
|
|
101
|
+
# If no end_timestamp provided, use current time
|
|
102
|
+
if not end or end == "0" or end == 0:
|
|
103
|
+
end = int(time.time())
|
|
104
|
+
|
|
105
|
+
# If no start provided, default to one hour before end
|
|
106
|
+
if not start:
|
|
107
|
+
start = -1 * abs(default_time_span_seconds)
|
|
108
|
+
|
|
109
|
+
start = datetime_to_unix(start)
|
|
110
|
+
end = datetime_to_unix(end)
|
|
111
|
+
|
|
112
|
+
# Handle negative timestamps (relative to the other timestamp)
|
|
113
|
+
if isinstance(start, int) and isinstance(end, int):
|
|
114
|
+
if start < 0 and end < 0:
|
|
115
|
+
# end is relative to now()
|
|
116
|
+
end = int(time.time()) + end
|
|
117
|
+
start = end + start
|
|
118
|
+
elif start < 0:
|
|
119
|
+
start = end + start
|
|
120
|
+
elif end < 0:
|
|
121
|
+
# start/end are inverted. end should be after start_timestamp
|
|
122
|
+
delta = end
|
|
123
|
+
end = start
|
|
124
|
+
start = start + delta
|
|
125
|
+
|
|
126
|
+
# Invert timestamps if start is after end
|
|
127
|
+
if isinstance(start, int) and isinstance(end, int) and start > end:
|
|
128
|
+
start, end = end, start
|
|
129
|
+
|
|
130
|
+
return (start, end) # type: ignore
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def get_param_or_raise(dict: Dict, param: str) -> str:
|
|
134
|
+
value = dict.get(param)
|
|
135
|
+
if not value:
|
|
136
|
+
raise Exception(f'Missing param "{param}"')
|
|
137
|
+
return value
|
holmes/plugins/utils.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# this file contains utilities that plugin writers are likely to use - not utilities that are only relevant for core
|
|
2
|
+
from typing import Dict
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def dict_to_markdown(items: Dict[str, str]) -> str:
|
|
6
|
+
if not items:
|
|
7
|
+
return ""
|
|
8
|
+
|
|
9
|
+
text = ""
|
|
10
|
+
for k, v in items.items():
|
|
11
|
+
# TODO: if v is a url, linkify it
|
|
12
|
+
text += f"• *{k}*: {v}\n"
|
|
13
|
+
|
|
14
|
+
return text
|
holmes/utils/__init__.py
ADDED
|
File without changes
|
holmes/utils/cache.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from threading import Timer
|
|
3
|
+
from typing import Any, Dict, Optional
|
|
4
|
+
import json
|
|
5
|
+
import bz2
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class SetEncoder(json.JSONEncoder):
|
|
9
|
+
def default(self, o):
|
|
10
|
+
if isinstance(o, set):
|
|
11
|
+
return list(o)
|
|
12
|
+
return json.JSONEncoder.default(self, o)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def compress(data):
|
|
16
|
+
json_str = json.dumps(data, cls=SetEncoder)
|
|
17
|
+
json_bytes = json_str.encode("utf-8")
|
|
18
|
+
compressed = bz2.compress(json_bytes, compresslevel=1)
|
|
19
|
+
|
|
20
|
+
return compressed
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def decompress(compressed_data):
|
|
24
|
+
try:
|
|
25
|
+
decompressed = bz2.decompress(compressed_data)
|
|
26
|
+
json_str = decompressed.decode("utf-8")
|
|
27
|
+
data = json.loads(json_str)
|
|
28
|
+
return data
|
|
29
|
+
except Exception as e:
|
|
30
|
+
raise Exception(f"Decompression failed: {str(e)}")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class TTLCache:
|
|
34
|
+
def __init__(self, ttl_seconds: int):
|
|
35
|
+
self._cache: Dict[str, Dict[str, Any]] = {}
|
|
36
|
+
self._ttl = ttl_seconds
|
|
37
|
+
self._evict_interval = max(self._ttl / 10, 60)
|
|
38
|
+
self._evict_timer = None
|
|
39
|
+
self._start_evict_timer()
|
|
40
|
+
|
|
41
|
+
def _start_evict_timer(self):
|
|
42
|
+
self._evict_timer = Timer(self._evict_interval, self._evict)
|
|
43
|
+
self._evict_timer.daemon = (
|
|
44
|
+
True # Allow the program to exit even if timer is alive
|
|
45
|
+
)
|
|
46
|
+
self._evict_timer.start()
|
|
47
|
+
|
|
48
|
+
def _evict(self):
|
|
49
|
+
current_time = time.time()
|
|
50
|
+
expired_keys = [
|
|
51
|
+
key for key, item in self._cache.items() if item["expiry"] <= current_time
|
|
52
|
+
]
|
|
53
|
+
|
|
54
|
+
for key in expired_keys:
|
|
55
|
+
del self._cache[key]
|
|
56
|
+
|
|
57
|
+
self._start_evict_timer()
|
|
58
|
+
|
|
59
|
+
def set(self, key: str, value: Any) -> None:
|
|
60
|
+
expiry = time.time() + self._ttl
|
|
61
|
+
|
|
62
|
+
self._cache[key] = {"value": compress(value), "expiry": expiry}
|
|
63
|
+
|
|
64
|
+
def get(self, key: str) -> Optional[Any]:
|
|
65
|
+
item = self._cache.get(key)
|
|
66
|
+
|
|
67
|
+
if item is None:
|
|
68
|
+
return None
|
|
69
|
+
|
|
70
|
+
if item["expiry"] <= time.time():
|
|
71
|
+
del self._cache[key]
|
|
72
|
+
return None
|
|
73
|
+
|
|
74
|
+
return decompress(item["value"])
|
|
75
|
+
|
|
76
|
+
def delete(self, key: str) -> None:
|
|
77
|
+
self._cache.pop(key, None)
|
|
78
|
+
|
|
79
|
+
def clear(self) -> None:
|
|
80
|
+
self._cache.clear()
|
|
81
|
+
|
|
82
|
+
def __del__(self):
|
|
83
|
+
if self._evict_timer:
|
|
84
|
+
self._evict_timer.cancel()
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
import certifi
|
|
5
|
+
|
|
6
|
+
CUSTOM_CERTIFICATE_PATH = "/tmp/custom_ca.pem"
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def append_custom_certificate(custom_ca: str) -> None:
|
|
10
|
+
with open(certifi.where(), "ab") as outfile:
|
|
11
|
+
outfile.write(base64.b64decode(custom_ca))
|
|
12
|
+
|
|
13
|
+
os.environ["WEBSOCKET_CLIENT_CA_BUNDLE"] = certifi.where()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def create_temporary_certificate(custom_ca: str) -> None:
|
|
17
|
+
with open(certifi.where(), "rb") as base_cert:
|
|
18
|
+
base_cert_content = base_cert.read()
|
|
19
|
+
|
|
20
|
+
with open(CUSTOM_CERTIFICATE_PATH, "wb") as outfile:
|
|
21
|
+
outfile.write(base_cert_content)
|
|
22
|
+
outfile.write(base64.b64decode(custom_ca))
|
|
23
|
+
|
|
24
|
+
os.environ["REQUESTS_CA_BUNDLE"] = CUSTOM_CERTIFICATE_PATH
|
|
25
|
+
os.environ["WEBSOCKET_CLIENT_CA_BUNDLE"] = CUSTOM_CERTIFICATE_PATH
|
|
26
|
+
certifi.where = lambda: CUSTOM_CERTIFICATE_PATH
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def add_custom_certificate(custom_ca: str) -> bool:
|
|
30
|
+
if not custom_ca:
|
|
31
|
+
return False
|
|
32
|
+
|
|
33
|
+
# NOTE: Sometimes (Openshift) the certifi.where() is not writable, so we need to
|
|
34
|
+
# use a temporary file in case of PermissionError.
|
|
35
|
+
try:
|
|
36
|
+
append_custom_certificate(custom_ca)
|
|
37
|
+
except PermissionError:
|
|
38
|
+
create_temporary_certificate(custom_ca)
|
|
39
|
+
|
|
40
|
+
return True
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
{% if enabled %}
|
|
2
|
+
{% if is_default %}
|
|
3
|
+
This integration is enabled by default.
|
|
4
|
+
|
|
5
|
+
If you would like to disable this toolset (not recommended), you need to update the `generated_values.yaml` configuration.
|
|
6
|
+
{% else %}
|
|
7
|
+
To disable this integration, you need to update the `generated_values.yaml` configuration.
|
|
8
|
+
{% endif %}
|
|
9
|
+
|
|
10
|
+
```yaml
|
|
11
|
+
holmes:
|
|
12
|
+
toolsets:
|
|
13
|
+
{{toolset_name}}:
|
|
14
|
+
enabled: false
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
{% else %}
|
|
18
|
+
To enable this integration, update the Helm values for Robusta (`generated_values.yaml`):
|
|
19
|
+
|
|
20
|
+
```yaml
|
|
21
|
+
holmes:
|
|
22
|
+
{% if env_vars %}
|
|
23
|
+
additionalEnvVars:
|
|
24
|
+
{% for env in env_vars %}
|
|
25
|
+
- name: {{ env }}
|
|
26
|
+
value: <{{ env }}>
|
|
27
|
+
{% endfor %}
|
|
28
|
+
{% endif %}
|
|
29
|
+
toolsets:
|
|
30
|
+
{{toolset_name}}:
|
|
31
|
+
enabled: true
|
|
32
|
+
{% if example_config %}
|
|
33
|
+
config:
|
|
34
|
+
{{ example_config | indent(8) }}
|
|
35
|
+
{% endif %}
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
{% endif %}
|
|
39
|
+
|
|
40
|
+
And deploy the updated configuration using Helm:
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
helm upgrade robusta robusta/robusta --values=generated_values.yaml --set clusterName=<YOUR_CLUSTER_NAME>
|
|
44
|
+
```
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Dict, List
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
|
|
6
|
+
CUSTOM_TOOLSET_LOCATION = os.environ.get(
|
|
7
|
+
"CUSTOM_TOOLSET_LOCATION", "/etc/holmes/config/custom_toolset.yaml"
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class RobustaConfig(BaseModel):
|
|
12
|
+
sinks_config: List[Dict[str, Dict]]
|
|
13
|
+
global_config: dict
|
holmes/utils/env.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
import re
|
|
4
|
+
from typing import Any, Optional
|
|
5
|
+
|
|
6
|
+
from pydantic import SecretStr
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_env_replacement(value: str) -> Optional[str]:
|
|
10
|
+
env_patterns = re.findall(r"{{\s*env\.([^}]*)\s*}}", value)
|
|
11
|
+
|
|
12
|
+
result = value
|
|
13
|
+
|
|
14
|
+
# Replace env patterns with their values or raise exception
|
|
15
|
+
for env_var_key in env_patterns:
|
|
16
|
+
env_var_key = env_var_key.strip()
|
|
17
|
+
pattern_regex = r"{{\s*env\." + re.escape(env_var_key) + r"\s*}}"
|
|
18
|
+
if env_var_key in os.environ:
|
|
19
|
+
replacement = os.environ[env_var_key]
|
|
20
|
+
else:
|
|
21
|
+
msg = f"ENV var replacement {env_var_key} does not exist"
|
|
22
|
+
logging.error(msg)
|
|
23
|
+
raise ValueError(msg)
|
|
24
|
+
result = re.sub(pattern_regex, replacement, result)
|
|
25
|
+
|
|
26
|
+
return result
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def replace_env_vars_values(values: dict[str, Any]) -> dict[str, Any]:
|
|
30
|
+
for key, value in values.items():
|
|
31
|
+
if isinstance(value, str):
|
|
32
|
+
env_var_value = get_env_replacement(value)
|
|
33
|
+
if env_var_value:
|
|
34
|
+
values[key] = env_var_value
|
|
35
|
+
elif isinstance(value, SecretStr):
|
|
36
|
+
env_var_value = get_env_replacement(value.get_secret_value())
|
|
37
|
+
if env_var_value:
|
|
38
|
+
values[key] = SecretStr(env_var_value)
|
|
39
|
+
elif isinstance(value, dict):
|
|
40
|
+
replace_env_vars_values(value)
|
|
41
|
+
elif isinstance(value, list):
|
|
42
|
+
# can be a list of strings
|
|
43
|
+
values[key] = [
|
|
44
|
+
(
|
|
45
|
+
replace_env_vars_values(item)
|
|
46
|
+
if isinstance(item, dict)
|
|
47
|
+
else get_env_replacement(item)
|
|
48
|
+
if isinstance(item, str)
|
|
49
|
+
else item
|
|
50
|
+
)
|
|
51
|
+
for item in value
|
|
52
|
+
]
|
|
53
|
+
return values
|