holmesgpt 0.13.2__py3-none-any.whl → 0.16.2a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- holmes/__init__.py +1 -1
- holmes/clients/robusta_client.py +17 -4
- holmes/common/env_vars.py +40 -1
- holmes/config.py +114 -144
- holmes/core/conversations.py +53 -14
- holmes/core/feedback.py +191 -0
- holmes/core/investigation.py +18 -22
- holmes/core/llm.py +489 -88
- holmes/core/models.py +103 -1
- holmes/core/openai_formatting.py +13 -0
- holmes/core/prompt.py +1 -1
- holmes/core/safeguards.py +4 -4
- holmes/core/supabase_dal.py +293 -100
- holmes/core/tool_calling_llm.py +423 -323
- holmes/core/tools.py +311 -33
- holmes/core/tools_utils/token_counting.py +14 -0
- holmes/core/tools_utils/tool_context_window_limiter.py +57 -0
- holmes/core/tools_utils/tool_executor.py +13 -8
- holmes/core/toolset_manager.py +155 -4
- holmes/core/tracing.py +6 -1
- holmes/core/transformers/__init__.py +23 -0
- holmes/core/transformers/base.py +62 -0
- holmes/core/transformers/llm_summarize.py +174 -0
- holmes/core/transformers/registry.py +122 -0
- holmes/core/transformers/transformer.py +31 -0
- holmes/core/truncation/compaction.py +59 -0
- holmes/core/truncation/dal_truncation_utils.py +23 -0
- holmes/core/truncation/input_context_window_limiter.py +218 -0
- holmes/interactive.py +177 -24
- holmes/main.py +7 -4
- holmes/plugins/prompts/_fetch_logs.jinja2 +26 -1
- holmes/plugins/prompts/_general_instructions.jinja2 +1 -2
- holmes/plugins/prompts/_runbook_instructions.jinja2 +23 -12
- holmes/plugins/prompts/conversation_history_compaction.jinja2 +88 -0
- holmes/plugins/prompts/generic_ask.jinja2 +2 -4
- holmes/plugins/prompts/generic_ask_conversation.jinja2 +2 -1
- holmes/plugins/prompts/generic_ask_for_issue_conversation.jinja2 +2 -1
- holmes/plugins/prompts/generic_investigation.jinja2 +2 -1
- holmes/plugins/prompts/investigation_procedure.jinja2 +48 -0
- holmes/plugins/prompts/kubernetes_workload_ask.jinja2 +2 -1
- holmes/plugins/prompts/kubernetes_workload_chat.jinja2 +2 -1
- holmes/plugins/runbooks/__init__.py +117 -18
- holmes/plugins/runbooks/catalog.json +2 -0
- holmes/plugins/toolsets/__init__.py +21 -8
- holmes/plugins/toolsets/aks-node-health.yaml +46 -0
- holmes/plugins/toolsets/aks.yaml +64 -0
- holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +26 -36
- holmes/plugins/toolsets/azure_sql/azure_sql_toolset.py +0 -1
- holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +10 -7
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +8 -6
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +8 -6
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +9 -7
- holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +9 -6
- holmes/plugins/toolsets/bash/bash_toolset.py +10 -13
- holmes/plugins/toolsets/bash/common/bash.py +7 -7
- holmes/plugins/toolsets/cilium.yaml +284 -0
- holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +5 -3
- holmes/plugins/toolsets/datadog/datadog_api.py +490 -24
- holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +21 -10
- holmes/plugins/toolsets/datadog/toolset_datadog_general.py +349 -216
- holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +190 -19
- holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +101 -44
- holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +13 -16
- holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +25 -31
- holmes/plugins/toolsets/git.py +51 -46
- holmes/plugins/toolsets/grafana/common.py +15 -3
- holmes/plugins/toolsets/grafana/grafana_api.py +46 -24
- holmes/plugins/toolsets/grafana/grafana_tempo_api.py +454 -0
- holmes/plugins/toolsets/grafana/loki/instructions.jinja2 +9 -0
- holmes/plugins/toolsets/grafana/loki/toolset_grafana_loki.py +117 -0
- holmes/plugins/toolsets/grafana/toolset_grafana.py +211 -91
- holmes/plugins/toolsets/grafana/toolset_grafana_dashboard.jinja2 +27 -0
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +246 -11
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +653 -293
- holmes/plugins/toolsets/grafana/trace_parser.py +1 -1
- holmes/plugins/toolsets/internet/internet.py +6 -7
- holmes/plugins/toolsets/internet/notion.py +5 -6
- holmes/plugins/toolsets/investigator/core_investigation.py +42 -34
- holmes/plugins/toolsets/kafka.py +25 -36
- holmes/plugins/toolsets/kubernetes.yaml +58 -84
- holmes/plugins/toolsets/kubernetes_logs.py +6 -6
- holmes/plugins/toolsets/kubernetes_logs.yaml +32 -0
- holmes/plugins/toolsets/logging_utils/logging_api.py +80 -4
- holmes/plugins/toolsets/mcp/toolset_mcp.py +181 -55
- holmes/plugins/toolsets/newrelic/__init__.py +0 -0
- holmes/plugins/toolsets/newrelic/new_relic_api.py +125 -0
- holmes/plugins/toolsets/newrelic/newrelic.jinja2 +41 -0
- holmes/plugins/toolsets/newrelic/newrelic.py +163 -0
- holmes/plugins/toolsets/opensearch/opensearch.py +10 -17
- holmes/plugins/toolsets/opensearch/opensearch_logs.py +7 -7
- holmes/plugins/toolsets/opensearch/opensearch_ppl_query_docs.jinja2 +1616 -0
- holmes/plugins/toolsets/opensearch/opensearch_query_assist.py +78 -0
- holmes/plugins/toolsets/opensearch/opensearch_query_assist_instructions.jinja2 +223 -0
- holmes/plugins/toolsets/opensearch/opensearch_traces.py +13 -16
- holmes/plugins/toolsets/openshift.yaml +283 -0
- holmes/plugins/toolsets/prometheus/prometheus.py +915 -390
- holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +43 -2
- holmes/plugins/toolsets/prometheus/utils.py +28 -0
- holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +9 -10
- holmes/plugins/toolsets/robusta/robusta.py +236 -65
- holmes/plugins/toolsets/robusta/robusta_instructions.jinja2 +26 -9
- holmes/plugins/toolsets/runbook/runbook_fetcher.py +137 -26
- holmes/plugins/toolsets/service_discovery.py +1 -1
- holmes/plugins/toolsets/servicenow_tables/instructions.jinja2 +83 -0
- holmes/plugins/toolsets/servicenow_tables/servicenow_tables.py +426 -0
- holmes/plugins/toolsets/utils.py +88 -0
- holmes/utils/config_utils.py +91 -0
- holmes/utils/default_toolset_installation_guide.jinja2 +1 -22
- holmes/utils/env.py +7 -0
- holmes/utils/global_instructions.py +75 -10
- holmes/utils/holmes_status.py +2 -1
- holmes/utils/holmes_sync_toolsets.py +0 -2
- holmes/utils/krr_utils.py +188 -0
- holmes/utils/sentry_helper.py +41 -0
- holmes/utils/stream.py +61 -7
- holmes/version.py +34 -14
- holmesgpt-0.16.2a0.dist-info/LICENSE +178 -0
- {holmesgpt-0.13.2.dist-info → holmesgpt-0.16.2a0.dist-info}/METADATA +29 -27
- {holmesgpt-0.13.2.dist-info → holmesgpt-0.16.2a0.dist-info}/RECORD +126 -102
- holmes/core/performance_timing.py +0 -72
- holmes/plugins/toolsets/grafana/tempo_api.py +0 -124
- holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +0 -110
- holmes/plugins/toolsets/newrelic.py +0 -231
- holmes/plugins/toolsets/servicenow/install.md +0 -37
- holmes/plugins/toolsets/servicenow/instructions.jinja2 +0 -3
- holmes/plugins/toolsets/servicenow/servicenow.py +0 -219
- holmesgpt-0.13.2.dist-info/LICENSE.txt +0 -21
- {holmesgpt-0.13.2.dist-info → holmesgpt-0.16.2a0.dist-info}/WHEEL +0 -0
- {holmesgpt-0.13.2.dist-info → holmesgpt-0.16.2a0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
New Relic provides distributed tracing data along with logs and metrics.
|
|
2
|
+
|
|
3
|
+
Assume every application has New Relic tracing data.
|
|
4
|
+
|
|
5
|
+
Use `nrql_query` to run a NRQL query.
|
|
6
|
+
|
|
7
|
+
**NRQL (New Relic Query Language)** is used to query all telemetry data in New Relic. The main event types are:
|
|
8
|
+
|
|
9
|
+
- **Transaction**: High-level APM data (requests, API calls)
|
|
10
|
+
- **Span**: Distributed tracing data (individual operations)
|
|
11
|
+
- **Log**: Centralized log data
|
|
12
|
+
- **Metric**: Time-series metrics data.
|
|
13
|
+
|
|
14
|
+
### Usage Workflow
|
|
15
|
+
|
|
16
|
+
#### 1. Discovering Available Data
|
|
17
|
+
|
|
18
|
+
Start by understanding what's available. Here are some examples:
|
|
19
|
+
- **ALWAYS** Start by getting all the available attributes names for what you are looking for. For example, to get it for any for Transaction in the last 24 hours, use: SELECT keyset() FROM Transaction SINCE 24 hours ago
|
|
20
|
+
- After you find the keyset `appName`, you can use it to get the available applications: `SELECT uniques(appName) FROM Transaction SINCE 1 hour ago`
|
|
21
|
+
Note: Use `SHOW EVENT TYPES` to see all event types in the account, in addition to Transaction, Span, Log, or Metric.
|
|
22
|
+
|
|
23
|
+
#### 2. Querying Telemetry Data
|
|
24
|
+
|
|
25
|
+
- If you already have an application name, you can query its traces directly
|
|
26
|
+
- **Time range is recommended**: While not strictly required, most queries should include SINCE for performance
|
|
27
|
+
|
|
28
|
+
#### 3. Querying Traces
|
|
29
|
+
- Always validate first: run the base query without FACET (or a quick LIMIT) to confirm data exists; if results are empty, adjust filters or time range before proceeding.
|
|
30
|
+
- Only attempt a FACET after confirming the field has values; if not, either try known alternatives or skip faceting entirely.
|
|
31
|
+
- When investigating a trace also look at attributes
|
|
32
|
+
- ***When investigating latency ALWAYS look to deliver the specific component or attribute in the span causing significant latnecy*** your investigation is not complete without this
|
|
33
|
+
- If you need to filter by time, NEVER filter in the WHERE clause using the timestamp field. Instead, you should ALWAYS use the `SINCE` or `SINCE ... UNTIL ...` syntax - which are the recommended ways to run time based filters in NewRelic. Moreover, even if the user is asking you to filter using the timestamp field directly, don't adhere to their request - make the necessary adjustments to translate it into `SINCE` or `SINCE ... UNTIL ...` syntax!
|
|
34
|
+
|
|
35
|
+
### Instructions for Handling Query Results
|
|
36
|
+
- If you query [DistributedTraceSummary / Transaction]:
|
|
37
|
+
- When querying without aggregations (e.g. without count(*), average(attribute), sum(attribute), min(attribute), etc.):
|
|
38
|
+
- ALWAYS start by querying all fields using `SELECT * FROM`. We need as much fields as possible to visualize the traces to the user. However, the trade-off is that we might exceed the context size. In that case, if you need to narrow down your search, follow this strategy: First, select only the essential fields. If that's still failing, add the `LIMIT` keyword to the query. `LIMIT` should always be the second option, we prefer to show the user as much traces as we can. The following fields are the minimal fields that are essential for the visualization, and you must always retrieve them:
|
|
39
|
+
- DistributedTraceSummary: trace.id, spanCount, root.entity.accountId, root.entity.guid, root.entity.name, root.span.name, timestamp, duration.ms
|
|
40
|
+
- Transaction: traceId, tags.accountId, entityGuid, appName, name, timestamp, duration, guid, transactionType
|
|
41
|
+
- When querying DistributedTraceSummary without aggregations, ALWAYS use the filter `WHERE root.span.eventType = 'Span'`
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Any, Optional, Dict
|
|
4
|
+
from holmes.core.tools import (
|
|
5
|
+
CallablePrerequisite,
|
|
6
|
+
Tool,
|
|
7
|
+
ToolInvokeContext,
|
|
8
|
+
ToolParameter,
|
|
9
|
+
Toolset,
|
|
10
|
+
ToolsetTag,
|
|
11
|
+
)
|
|
12
|
+
from pydantic import BaseModel
|
|
13
|
+
from holmes.core.tools import StructuredToolResult, StructuredToolResultStatus
|
|
14
|
+
from holmes.plugins.toolsets.utils import toolset_name_for_one_liner
|
|
15
|
+
from holmes.plugins.toolsets.newrelic.new_relic_api import NewRelicAPI
|
|
16
|
+
import yaml
|
|
17
|
+
from holmes.utils.keygen_utils import generate_random_key
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ExecuteNRQLQuery(Tool):
|
|
21
|
+
def __init__(self, toolset: "NewRelicToolset"):
|
|
22
|
+
super().__init__(
|
|
23
|
+
name="newrelic_execute_nrql_query",
|
|
24
|
+
description="Get Traces, APM, Spans, Logs and more by executing a NRQL query in New Relic. "
|
|
25
|
+
"Returns the result of the NRQL function. "
|
|
26
|
+
"⚠️ CRITICAL: NRQL silently returns empty results for invalid queries instead of errors. "
|
|
27
|
+
"If you get empty results, your query likely has issues such as: "
|
|
28
|
+
"1) Wrong attribute names (use SELECT keyset() first to verify), "
|
|
29
|
+
"2) Type mismatches (string vs numeric fields), "
|
|
30
|
+
"3) Wrong event type. "
|
|
31
|
+
"Always verify attribute names and types before querying.",
|
|
32
|
+
parameters={
|
|
33
|
+
"query": ToolParameter(
|
|
34
|
+
description="""The NRQL query string to execute.
|
|
35
|
+
|
|
36
|
+
MANDATORY: Before querying any event type, ALWAYS run `SELECT keyset() FROM <EventType> SINCE <timeframe>` to discover available attributes. Never use attributes without confirming they exist first. Make sure to remember which fields are stringKeys, numericKeys or booleanKeys as this will be important in subsequent queries.
|
|
37
|
+
|
|
38
|
+
Example: Before querying Transactions, run: `SELECT keyset() FROM Transaction SINCE 24 hours ago`
|
|
39
|
+
|
|
40
|
+
### ⚠️ Critical Rule: NRQL `FACET` Usa ge
|
|
41
|
+
|
|
42
|
+
When using **FACET** in NRQL:
|
|
43
|
+
- Any **non-constant value** in the `SELECT` clause **must be aggregated**.
|
|
44
|
+
- The attribute you **FACET** on must **not appear in `SELECT`** unless it's wrapped in an aggregation.
|
|
45
|
+
|
|
46
|
+
#### ✅ Correct
|
|
47
|
+
```nrql
|
|
48
|
+
-- Aggregated metric + facet
|
|
49
|
+
SELECT count(*) FROM Transaction FACET transactionType
|
|
50
|
+
|
|
51
|
+
-- Multiple aggregations with facet
|
|
52
|
+
SELECT count(*), average(duration) FROM Transaction FACET transactionType
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
#### ❌ Incorrect
|
|
56
|
+
```nrql
|
|
57
|
+
-- Not allowed: raw attribute in SELECT
|
|
58
|
+
SELECT count(*), transactionType FROM Transaction FACET transactionType
|
|
59
|
+
```
|
|
60
|
+
""",
|
|
61
|
+
type="string",
|
|
62
|
+
required=True,
|
|
63
|
+
),
|
|
64
|
+
"description": ToolParameter(
|
|
65
|
+
description="A breif 6 word human understandable description of the query you are running.",
|
|
66
|
+
type="string",
|
|
67
|
+
required=True,
|
|
68
|
+
),
|
|
69
|
+
"query_type": ToolParameter(
|
|
70
|
+
description="Either 'Metrics', 'Logs', 'Traces', 'Discover Attributes' or 'Other'.",
|
|
71
|
+
type="string",
|
|
72
|
+
required=True,
|
|
73
|
+
),
|
|
74
|
+
},
|
|
75
|
+
)
|
|
76
|
+
self._toolset = toolset
|
|
77
|
+
|
|
78
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
79
|
+
if not self._toolset.nr_api_key or not self._toolset.nr_account_id:
|
|
80
|
+
raise ValueError("NewRelic API key or account ID is not configured")
|
|
81
|
+
|
|
82
|
+
api = NewRelicAPI(
|
|
83
|
+
api_key=self._toolset.nr_api_key,
|
|
84
|
+
account_id=self._toolset.nr_account_id,
|
|
85
|
+
is_eu_datacenter=self._toolset.is_eu_datacenter,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
query = params["query"]
|
|
89
|
+
result = api.execute_nrql_query(query)
|
|
90
|
+
|
|
91
|
+
result_with_key = {
|
|
92
|
+
"random_key": generate_random_key(),
|
|
93
|
+
"tool_name": self.name,
|
|
94
|
+
"query": query,
|
|
95
|
+
"data": result,
|
|
96
|
+
"is_eu": self._toolset.is_eu_datacenter,
|
|
97
|
+
}
|
|
98
|
+
final_result = yaml.dump(result_with_key, default_flow_style=False)
|
|
99
|
+
return StructuredToolResult(
|
|
100
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
101
|
+
data=final_result,
|
|
102
|
+
params=params,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
def get_parameterized_one_liner(self, params) -> str:
|
|
106
|
+
description = params.get("description", "")
|
|
107
|
+
return f"{toolset_name_for_one_liner(self._toolset.name)}: Execute NRQL ({description})"
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class NewrelicConfig(BaseModel):
|
|
111
|
+
nr_api_key: Optional[str] = None
|
|
112
|
+
nr_account_id: Optional[str] = None
|
|
113
|
+
is_eu_datacenter: Optional[bool] = False
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class NewRelicToolset(Toolset):
|
|
117
|
+
nr_api_key: Optional[str] = None
|
|
118
|
+
nr_account_id: Optional[str] = None
|
|
119
|
+
is_eu_datacenter: bool = False
|
|
120
|
+
|
|
121
|
+
def __init__(self):
|
|
122
|
+
super().__init__(
|
|
123
|
+
name="newrelic",
|
|
124
|
+
description="Toolset for interacting with New Relic to fetch logs, traces, and execute freeform NRQL queries",
|
|
125
|
+
docs_url="https://holmesgpt.dev/data-sources/builtin-toolsets/newrelic/",
|
|
126
|
+
icon_url="https://companieslogo.com/img/orig/NEWR-de5fcb2e.png?t=1720244493",
|
|
127
|
+
prerequisites=[CallablePrerequisite(callable=self.prerequisites_callable)], # type: ignore
|
|
128
|
+
tools=[
|
|
129
|
+
ExecuteNRQLQuery(self),
|
|
130
|
+
],
|
|
131
|
+
tags=[ToolsetTag.CORE],
|
|
132
|
+
)
|
|
133
|
+
template_file_path = os.path.abspath(
|
|
134
|
+
os.path.join(os.path.dirname(__file__), "newrelic.jinja2")
|
|
135
|
+
)
|
|
136
|
+
self._load_llm_instructions(jinja_template=f"file://{template_file_path}")
|
|
137
|
+
|
|
138
|
+
def prerequisites_callable(
|
|
139
|
+
self, config: dict[str, Any]
|
|
140
|
+
) -> tuple[bool, Optional[str]]:
|
|
141
|
+
if not config:
|
|
142
|
+
return False, "No configuration provided"
|
|
143
|
+
|
|
144
|
+
try:
|
|
145
|
+
nr_config = NewrelicConfig(**config)
|
|
146
|
+
self.nr_account_id = nr_config.nr_account_id
|
|
147
|
+
self.nr_api_key = nr_config.nr_api_key
|
|
148
|
+
self.is_eu_datacenter = nr_config.is_eu_datacenter or False
|
|
149
|
+
|
|
150
|
+
if not self.nr_account_id or not self.nr_api_key:
|
|
151
|
+
return False, "New Relic account ID or API key is missing"
|
|
152
|
+
|
|
153
|
+
return True, None
|
|
154
|
+
except Exception as e:
|
|
155
|
+
logging.exception("Failed to set up New Relic toolset")
|
|
156
|
+
return False, str(e)
|
|
157
|
+
|
|
158
|
+
def get_example_config(self) -> Dict[str, Any]:
|
|
159
|
+
return {
|
|
160
|
+
"nr_api_key": "NRAK-XXXXXXXXXXXXXXXXXXXXXXXXXX",
|
|
161
|
+
"nr_account_id": "1234567",
|
|
162
|
+
"is_eu_datacenter": False,
|
|
163
|
+
}
|
|
@@ -8,8 +8,9 @@ from holmes.core.tools import (
|
|
|
8
8
|
CallablePrerequisite,
|
|
9
9
|
StructuredToolResult,
|
|
10
10
|
Tool,
|
|
11
|
+
ToolInvokeContext,
|
|
11
12
|
ToolParameter,
|
|
12
|
-
|
|
13
|
+
StructuredToolResultStatus,
|
|
13
14
|
Toolset,
|
|
14
15
|
ToolsetTag,
|
|
15
16
|
)
|
|
@@ -93,13 +94,11 @@ class ListShards(BaseOpenSearchTool):
|
|
|
93
94
|
toolset=toolset,
|
|
94
95
|
)
|
|
95
96
|
|
|
96
|
-
def _invoke(
|
|
97
|
-
self, params: dict, user_approved: bool = False
|
|
98
|
-
) -> StructuredToolResult:
|
|
97
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
99
98
|
client = get_client(self.toolset.clients, host=params.get("host", ""))
|
|
100
99
|
shards = client.client.cat.shards()
|
|
101
100
|
return StructuredToolResult(
|
|
102
|
-
status=
|
|
101
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
103
102
|
data=str(shards),
|
|
104
103
|
params=params,
|
|
105
104
|
)
|
|
@@ -124,15 +123,13 @@ class GetClusterSettings(BaseOpenSearchTool):
|
|
|
124
123
|
toolset=toolset,
|
|
125
124
|
)
|
|
126
125
|
|
|
127
|
-
def _invoke(
|
|
128
|
-
self, params: dict, user_approved: bool = False
|
|
129
|
-
) -> StructuredToolResult:
|
|
126
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
130
127
|
client = get_client(self.toolset.clients, host=params.get("host"))
|
|
131
128
|
response = client.client.cluster.get_settings(
|
|
132
129
|
include_defaults=True, flat_settings=True
|
|
133
130
|
)
|
|
134
131
|
return StructuredToolResult(
|
|
135
|
-
status=
|
|
132
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
136
133
|
data=str(response),
|
|
137
134
|
params=params,
|
|
138
135
|
)
|
|
@@ -157,13 +154,11 @@ class GetClusterHealth(BaseOpenSearchTool):
|
|
|
157
154
|
toolset=toolset,
|
|
158
155
|
)
|
|
159
156
|
|
|
160
|
-
def _invoke(
|
|
161
|
-
self, params: dict, user_approved: bool = False
|
|
162
|
-
) -> StructuredToolResult:
|
|
157
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
163
158
|
client = get_client(self.toolset.clients, host=params.get("host", ""))
|
|
164
159
|
health = client.client.cluster.health()
|
|
165
160
|
return StructuredToolResult(
|
|
166
|
-
status=
|
|
161
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
167
162
|
data=str(health),
|
|
168
163
|
params=params,
|
|
169
164
|
)
|
|
@@ -182,12 +177,10 @@ class ListOpenSearchHosts(BaseOpenSearchTool):
|
|
|
182
177
|
toolset=toolset,
|
|
183
178
|
)
|
|
184
179
|
|
|
185
|
-
def _invoke(
|
|
186
|
-
self, params: dict, user_approved: bool = False
|
|
187
|
-
) -> StructuredToolResult:
|
|
180
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
188
181
|
hosts = [host for client in self.toolset.clients for host in client.hosts]
|
|
189
182
|
return StructuredToolResult(
|
|
190
|
-
status=
|
|
183
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
191
184
|
data=str(hosts),
|
|
192
185
|
params=params,
|
|
193
186
|
)
|
|
@@ -8,7 +8,7 @@ from urllib.parse import urljoin
|
|
|
8
8
|
from holmes.core.tools import (
|
|
9
9
|
CallablePrerequisite,
|
|
10
10
|
StructuredToolResult,
|
|
11
|
-
|
|
11
|
+
StructuredToolResultStatus,
|
|
12
12
|
ToolsetTag,
|
|
13
13
|
)
|
|
14
14
|
from holmes.plugins.toolsets.logging_utils.logging_api import (
|
|
@@ -79,7 +79,7 @@ class OpenSearchLogsToolset(BasePodLoggingToolset):
|
|
|
79
79
|
def fetch_pod_logs(self, params: FetchPodLogsParams) -> StructuredToolResult:
|
|
80
80
|
if not self.opensearch_config:
|
|
81
81
|
return StructuredToolResult(
|
|
82
|
-
status=
|
|
82
|
+
status=StructuredToolResultStatus.ERROR,
|
|
83
83
|
error="Missing OpenSearch configuration",
|
|
84
84
|
params=params.model_dump(),
|
|
85
85
|
)
|
|
@@ -126,13 +126,13 @@ class OpenSearchLogsToolset(BasePodLoggingToolset):
|
|
|
126
126
|
config=self.opensearch_config,
|
|
127
127
|
)
|
|
128
128
|
return StructuredToolResult(
|
|
129
|
-
status=
|
|
129
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
130
130
|
data=logs,
|
|
131
131
|
params=params.model_dump(),
|
|
132
132
|
)
|
|
133
133
|
else:
|
|
134
134
|
return StructuredToolResult(
|
|
135
|
-
status=
|
|
135
|
+
status=StructuredToolResultStatus.ERROR,
|
|
136
136
|
return_code=logs_response.status_code,
|
|
137
137
|
error=logs_response.text,
|
|
138
138
|
params=params.model_dump(),
|
|
@@ -141,21 +141,21 @@ class OpenSearchLogsToolset(BasePodLoggingToolset):
|
|
|
141
141
|
except requests.Timeout:
|
|
142
142
|
logging.warning("Timeout while fetching OpenSearch logs", exc_info=True)
|
|
143
143
|
return StructuredToolResult(
|
|
144
|
-
status=
|
|
144
|
+
status=StructuredToolResultStatus.ERROR,
|
|
145
145
|
error="Request timed out while fetching OpenSearch logs",
|
|
146
146
|
params=params.model_dump(),
|
|
147
147
|
)
|
|
148
148
|
except RequestException as e:
|
|
149
149
|
logging.warning("Failed to fetch OpenSearch logs", exc_info=True)
|
|
150
150
|
return StructuredToolResult(
|
|
151
|
-
status=
|
|
151
|
+
status=StructuredToolResultStatus.ERROR,
|
|
152
152
|
error=f"Network error while fetching OpenSearch logs: {str(e)}",
|
|
153
153
|
params=params.model_dump(),
|
|
154
154
|
)
|
|
155
155
|
except Exception as e:
|
|
156
156
|
logging.warning("Failed to process OpenSearch logs", exc_info=True)
|
|
157
157
|
return StructuredToolResult(
|
|
158
|
-
status=
|
|
158
|
+
status=StructuredToolResultStatus.ERROR,
|
|
159
159
|
error=f"Unexpected error: {str(e)}",
|
|
160
160
|
params=params.model_dump(),
|
|
161
161
|
)
|