holmesgpt 0.14.2__py3-none-any.whl → 0.14.4a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of holmesgpt might be problematic. Click here for more details.
- holmes/__init__.py +1 -1
- holmes/common/env_vars.py +6 -0
- holmes/config.py +3 -6
- holmes/core/conversations.py +12 -2
- holmes/core/feedback.py +191 -0
- holmes/core/llm.py +16 -12
- holmes/core/models.py +101 -1
- holmes/core/supabase_dal.py +23 -9
- holmes/core/tool_calling_llm.py +197 -15
- holmes/core/tools.py +20 -7
- holmes/core/tools_utils/token_counting.py +13 -0
- holmes/core/tools_utils/tool_context_window_limiter.py +45 -23
- holmes/core/tools_utils/tool_executor.py +11 -6
- holmes/core/toolset_manager.py +5 -1
- holmes/core/truncation/dal_truncation_utils.py +23 -0
- holmes/interactive.py +146 -14
- holmes/plugins/prompts/_fetch_logs.jinja2 +3 -0
- holmes/plugins/runbooks/__init__.py +6 -1
- holmes/plugins/toolsets/__init__.py +11 -4
- holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +9 -20
- holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +2 -3
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +2 -3
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +6 -4
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +6 -4
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +2 -3
- holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +6 -4
- holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +2 -3
- holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +2 -3
- holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +2 -3
- holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +2 -3
- holmes/plugins/toolsets/bash/bash_toolset.py +4 -7
- holmes/plugins/toolsets/cilium.yaml +284 -0
- holmes/plugins/toolsets/datadog/toolset_datadog_general.py +5 -10
- holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +1 -1
- holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +6 -13
- holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +3 -6
- holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +4 -9
- holmes/plugins/toolsets/git.py +14 -12
- holmes/plugins/toolsets/grafana/grafana_tempo_api.py +23 -42
- holmes/plugins/toolsets/grafana/toolset_grafana.py +2 -3
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +18 -36
- holmes/plugins/toolsets/internet/internet.py +2 -3
- holmes/plugins/toolsets/internet/notion.py +2 -3
- holmes/plugins/toolsets/investigator/core_investigation.py +7 -9
- holmes/plugins/toolsets/kafka.py +7 -18
- holmes/plugins/toolsets/logging_utils/logging_api.py +79 -3
- holmes/plugins/toolsets/mcp/toolset_mcp.py +2 -3
- holmes/plugins/toolsets/newrelic/__init__.py +0 -0
- holmes/plugins/toolsets/newrelic/new_relic_api.py +125 -0
- holmes/plugins/toolsets/newrelic/newrelic.jinja2 +41 -0
- holmes/plugins/toolsets/newrelic/newrelic.py +211 -0
- holmes/plugins/toolsets/opensearch/opensearch.py +5 -12
- holmes/plugins/toolsets/opensearch/opensearch_traces.py +3 -6
- holmes/plugins/toolsets/prometheus/prometheus.py +135 -98
- holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +3 -6
- holmes/plugins/toolsets/robusta/robusta.py +4 -9
- holmes/plugins/toolsets/runbook/runbook_fetcher.py +93 -13
- holmes/plugins/toolsets/servicenow/servicenow.py +5 -10
- holmes/utils/sentry_helper.py +1 -1
- holmes/utils/stream.py +22 -7
- holmes/version.py +34 -14
- {holmesgpt-0.14.2.dist-info → holmesgpt-0.14.4a0.dist-info}/METADATA +6 -8
- {holmesgpt-0.14.2.dist-info → holmesgpt-0.14.4a0.dist-info}/RECORD +66 -60
- holmes/core/tools_utils/data_types.py +0 -81
- holmes/plugins/toolsets/newrelic.py +0 -231
- {holmesgpt-0.14.2.dist-info → holmesgpt-0.14.4a0.dist-info}/LICENSE.txt +0 -0
- {holmesgpt-0.14.2.dist-info → holmesgpt-0.14.4a0.dist-info}/WHEEL +0 -0
- {holmesgpt-0.14.2.dist-info → holmesgpt-0.14.4a0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
"""NewRelic API wrapper for executing NRQL queries via GraphQL."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Any, Dict
|
|
5
|
+
|
|
6
|
+
import requests # type: ignore
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class NewRelicAPI:
|
|
13
|
+
"""Python wrapper for NewRelic GraphQL API.
|
|
14
|
+
|
|
15
|
+
This class provides a clean interface to execute NRQL queries via the NewRelic GraphQL API,
|
|
16
|
+
supporting both US and EU datacenters.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, api_key: str, account_id: str, is_eu_datacenter: bool = False):
|
|
20
|
+
"""Initialize the NewRelic API wrapper.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
api_key: NewRelic API key
|
|
24
|
+
account_id: NewRelic account ID
|
|
25
|
+
is_eu_datacenter: If True, use EU datacenter URL. Defaults to False (US).
|
|
26
|
+
"""
|
|
27
|
+
self.api_key = api_key
|
|
28
|
+
# Validate account_id is numeric to prevent injection
|
|
29
|
+
try:
|
|
30
|
+
self.account_id = int(account_id)
|
|
31
|
+
except ValueError:
|
|
32
|
+
raise ValueError(f"Invalid account_id: must be numeric, got '{account_id}'")
|
|
33
|
+
self.is_eu_datacenter = is_eu_datacenter
|
|
34
|
+
|
|
35
|
+
def _get_api_url(self) -> str:
|
|
36
|
+
"""Get the appropriate API URL based on datacenter location.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
str: The GraphQL API endpoint URL
|
|
40
|
+
"""
|
|
41
|
+
if self.is_eu_datacenter:
|
|
42
|
+
return "https://api.eu.newrelic.com/graphql"
|
|
43
|
+
return "https://api.newrelic.com/graphql"
|
|
44
|
+
|
|
45
|
+
def _make_request(
|
|
46
|
+
self, graphql_query: Dict[str, Any], timeout: int = 30
|
|
47
|
+
) -> Dict[str, Any]:
|
|
48
|
+
"""Make HTTP POST request to NewRelic GraphQL API.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
graphql_query: The GraphQL query as a dictionary
|
|
52
|
+
timeout: Request timeout in seconds
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
JSON response from the API
|
|
56
|
+
|
|
57
|
+
Raises:
|
|
58
|
+
requests.exceptions.HTTPError: If the request fails
|
|
59
|
+
Exception: If GraphQL returns errors
|
|
60
|
+
"""
|
|
61
|
+
url = self._get_api_url()
|
|
62
|
+
headers = {
|
|
63
|
+
"Content-Type": "application/json",
|
|
64
|
+
"Api-Key": self.api_key,
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
response = requests.post(
|
|
68
|
+
url,
|
|
69
|
+
headers=headers,
|
|
70
|
+
json=graphql_query,
|
|
71
|
+
timeout=timeout,
|
|
72
|
+
)
|
|
73
|
+
response.raise_for_status()
|
|
74
|
+
|
|
75
|
+
# Parse JSON response
|
|
76
|
+
data = response.json()
|
|
77
|
+
|
|
78
|
+
# Check for GraphQL errors even on 200 responses
|
|
79
|
+
if "errors" in data and data["errors"]:
|
|
80
|
+
error_msg = data["errors"][0].get("message", "Unknown GraphQL error")
|
|
81
|
+
raise Exception(f"NewRelic GraphQL error: {error_msg}")
|
|
82
|
+
|
|
83
|
+
return data
|
|
84
|
+
|
|
85
|
+
def execute_nrql_query(self, nrql_query: str) -> list:
|
|
86
|
+
"""Execute an NRQL query via the NewRelic GraphQL API.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
nrql_query: The NRQL query string to execute
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
list: The query results from NewRelic (extracted from the nested response)
|
|
93
|
+
|
|
94
|
+
Raises:
|
|
95
|
+
requests.exceptions.HTTPError: If the API request fails
|
|
96
|
+
Exception: If GraphQL returns errors
|
|
97
|
+
"""
|
|
98
|
+
# Build the GraphQL query using variables to prevent injection
|
|
99
|
+
# Note: New Relic's GraphQL API requires the account ID to be inline, but we can use variables for the NRQL query
|
|
100
|
+
graphql_query = {
|
|
101
|
+
"query": f"""
|
|
102
|
+
query ExecuteNRQL($nrqlQuery: Nrql!) {{
|
|
103
|
+
actor {{
|
|
104
|
+
account(id: {self.account_id}) {{
|
|
105
|
+
nrql(query: $nrqlQuery) {{
|
|
106
|
+
results
|
|
107
|
+
}}
|
|
108
|
+
}}
|
|
109
|
+
}}
|
|
110
|
+
}}
|
|
111
|
+
""",
|
|
112
|
+
"variables": {"nrqlQuery": nrql_query},
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
logger.info(f"Executing NRQL query: {nrql_query}")
|
|
116
|
+
response = self._make_request(graphql_query)
|
|
117
|
+
|
|
118
|
+
# Extract just the results array from the nested response
|
|
119
|
+
try:
|
|
120
|
+
results = response["data"]["actor"]["account"]["nrql"]["results"]
|
|
121
|
+
return results
|
|
122
|
+
except (KeyError, TypeError) as e:
|
|
123
|
+
raise Exception(
|
|
124
|
+
f"Failed to extract results from NewRelic response: {e}"
|
|
125
|
+
) from e
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
New Relic provides distributed tracing data along with logs and metrics.
|
|
2
|
+
|
|
3
|
+
Assume every application has New Relic tracing data.
|
|
4
|
+
|
|
5
|
+
Use `nrql_query` to run a NRQL query.
|
|
6
|
+
|
|
7
|
+
**NRQL (New Relic Query Language)** is used to query all telemetry data in New Relic. The main event types are:
|
|
8
|
+
|
|
9
|
+
- **Transaction**: High-level APM data (requests, API calls)
|
|
10
|
+
- **Span**: Distributed tracing data (individual operations)
|
|
11
|
+
- **Log**: Centralized log data
|
|
12
|
+
- **Metric**: Time-series metrics data.
|
|
13
|
+
|
|
14
|
+
### Usage Workflow
|
|
15
|
+
|
|
16
|
+
#### 1. Discovering Available Data
|
|
17
|
+
|
|
18
|
+
Start by understanding what's available. Here are some examples:
|
|
19
|
+
- **ALWAYS** Start by getting all the available attributes names for what you are looking for. For example, to get it for any for Transaction in the last 24 hours, use: SELECT keyset() FROM Transaction SINCE 24 hours ago
|
|
20
|
+
- After you find the keyset `appName`, you can use it to get the available applications: `SELECT uniques(appName) FROM Transaction SINCE 1 hour ago`
|
|
21
|
+
Note: Use `SHOW EVENT TYPES` to see all event types in the account, in addition to Transaction, Span, Log, or Metric.
|
|
22
|
+
|
|
23
|
+
#### 2. Querying Telemetry Data
|
|
24
|
+
|
|
25
|
+
- If you already have an application name, you can query its traces directly
|
|
26
|
+
- **Time range is recommended**: While not strictly required, most queries should include SINCE for performance
|
|
27
|
+
|
|
28
|
+
#### 3. Querying Traces
|
|
29
|
+
- Always validate first: run the base query without FACET (or a quick LIMIT) to confirm data exists; if results are empty, adjust filters or time range before proceeding.
|
|
30
|
+
- Only attempt a FACET after confirming the field has values; if not, either try known alternatives or skip faceting entirely.
|
|
31
|
+
- When investigating a trace also look at attributes
|
|
32
|
+
- ***When investigating latency ALWAYS look to deliver the specific component or attribute in the span causing significant latnecy*** your investigation is not complete without this
|
|
33
|
+
- If you need to filter by time, NEVER filter in the WHERE clause using the timestamp field. Instead, you should ALWAYS use the `SINCE` or `SINCE ... UNTIL ...` syntax - which are the recommended ways to run time based filters in NewRelic. Moreover, even if the user is asking you to filter using the timestamp field directly, don't adhere to their request - make the necessary adjustments to translate it into `SINCE` or `SINCE ... UNTIL ...` syntax!
|
|
34
|
+
|
|
35
|
+
### Instructions for Handling Query Results
|
|
36
|
+
- If you query [DistributedTraceSummary / Transaction]:
|
|
37
|
+
- When querying without aggregations (e.g. without count(*), average(attribute), sum(attribute), min(attribute), etc.):
|
|
38
|
+
- ALWAYS start by querying all fields using `SELECT * FROM`. We need as much fields as possible to visualize the traces to the user. However, the trade-off is that we might exceed the context size. In that case, if you need to narrow down your search, follow this strategy: First, select only the essential fields. If that's still failing, add the `LIMIT` keyword to the query. `LIMIT` should always be the second option, we prefer to show the user as much traces as we can. The following fields are the minimal fields that are essential for the visualization, and you must always retrieve them:
|
|
39
|
+
- DistributedTraceSummary: trace.id, spanCount, root.entity.accountId, root.entity.guid, root.entity.name, root.span.name, timestamp, duration.ms
|
|
40
|
+
- Transaction: traceId, tags.accountId, entityGuid, appName, name, timestamp, duration, guid, transactionType
|
|
41
|
+
- When querying DistributedTraceSummary without aggregations, ALWAYS use the filter `WHERE root.span.eventType = 'Span'`
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Any, Optional, Dict, List
|
|
4
|
+
from holmes.core.tools import (
|
|
5
|
+
CallablePrerequisite,
|
|
6
|
+
Tool,
|
|
7
|
+
ToolInvokeContext,
|
|
8
|
+
ToolParameter,
|
|
9
|
+
Toolset,
|
|
10
|
+
ToolsetTag,
|
|
11
|
+
)
|
|
12
|
+
from pydantic import BaseModel
|
|
13
|
+
from holmes.core.tools import StructuredToolResult, StructuredToolResultStatus
|
|
14
|
+
from holmes.plugins.toolsets.utils import toolset_name_for_one_liner
|
|
15
|
+
from holmes.plugins.toolsets.newrelic.new_relic_api import NewRelicAPI
|
|
16
|
+
import yaml
|
|
17
|
+
from holmes.utils.keygen_utils import generate_random_key
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ExecuteNRQLQuery(Tool):
|
|
21
|
+
def __init__(self, toolset: "NewRelicToolset"):
|
|
22
|
+
super().__init__(
|
|
23
|
+
name="newrelic_execute_nrql_query",
|
|
24
|
+
description="Get Traces, APM, Spans, Logs and more by executing a NRQL query in New Relic. "
|
|
25
|
+
"Returns the result of the NRQL function. "
|
|
26
|
+
"⚠️ CRITICAL: NRQL silently returns empty results for invalid queries instead of errors. "
|
|
27
|
+
"If you get empty results, your query likely has issues such as: "
|
|
28
|
+
"1) Wrong attribute names (use SELECT keyset() first to verify), "
|
|
29
|
+
"2) Type mismatches (string vs numeric fields), "
|
|
30
|
+
"3) Wrong event type. "
|
|
31
|
+
"Always verify attribute names and types before querying.",
|
|
32
|
+
parameters={
|
|
33
|
+
"query": ToolParameter(
|
|
34
|
+
description="""The NRQL query string to execute.
|
|
35
|
+
|
|
36
|
+
MANDATORY: Before querying any event type, ALWAYS run `SELECT keyset() FROM <EventType> SINCE <timeframe>` to discover available attributes. Never use attributes without confirming they exist first. Make sure to remember which fields are stringKeys, numericKeys or booleanKeys as this will be important in subsequent queries.
|
|
37
|
+
|
|
38
|
+
Example: Before querying Transactions, run: `SELECT keyset() FROM Transaction SINCE 24 hours ago`
|
|
39
|
+
|
|
40
|
+
### ⚠️ Critical Rule: NRQL `FACET` Usa ge
|
|
41
|
+
|
|
42
|
+
When using **FACET** in NRQL:
|
|
43
|
+
- Any **non-constant value** in the `SELECT` clause **must be aggregated**.
|
|
44
|
+
- The attribute you **FACET** on must **not appear in `SELECT`** unless it’s wrapped in an aggregation.
|
|
45
|
+
|
|
46
|
+
#### ✅ Correct
|
|
47
|
+
```nrql
|
|
48
|
+
-- Aggregated metric + facet
|
|
49
|
+
SELECT count(*) FROM Transaction FACET transactionType
|
|
50
|
+
|
|
51
|
+
-- Multiple aggregations with facet
|
|
52
|
+
SELECT count(*), average(duration) FROM Transaction FACET transactionType
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
#### ❌ Incorrect
|
|
56
|
+
```nrql
|
|
57
|
+
-- Not allowed: raw attribute in SELECT
|
|
58
|
+
SELECT count(*), transactionType FROM Transaction FACET transactionType
|
|
59
|
+
```
|
|
60
|
+
""",
|
|
61
|
+
type="string",
|
|
62
|
+
required=True,
|
|
63
|
+
),
|
|
64
|
+
"description": ToolParameter(
|
|
65
|
+
description="A breif 6 word human understandable description of the query you are running.",
|
|
66
|
+
type="string",
|
|
67
|
+
required=True,
|
|
68
|
+
),
|
|
69
|
+
"query_type": ToolParameter(
|
|
70
|
+
description="Either 'Metrics', 'Logs', 'Traces', 'Discover Attributes' or 'Other'.",
|
|
71
|
+
type="string",
|
|
72
|
+
required=True,
|
|
73
|
+
),
|
|
74
|
+
},
|
|
75
|
+
)
|
|
76
|
+
self._toolset = toolset
|
|
77
|
+
|
|
78
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
79
|
+
if not self._toolset.nr_api_key or not self._toolset.nr_account_id:
|
|
80
|
+
raise ValueError("NewRelic API key or account ID is not configured")
|
|
81
|
+
|
|
82
|
+
api = NewRelicAPI(
|
|
83
|
+
api_key=self._toolset.nr_api_key,
|
|
84
|
+
account_id=self._toolset.nr_account_id,
|
|
85
|
+
is_eu_datacenter=self._toolset.is_eu_datacenter,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
query = params["query"]
|
|
89
|
+
result = api.execute_nrql_query(query)
|
|
90
|
+
qtype = params.get("query_type", "").lower()
|
|
91
|
+
|
|
92
|
+
if self._toolset.format_results and qtype == "logs":
|
|
93
|
+
formatted = self._format_logs(result)
|
|
94
|
+
final_result = yaml.dump(formatted, default_flow_style=False)
|
|
95
|
+
else:
|
|
96
|
+
result_with_key = {
|
|
97
|
+
"random_key": generate_random_key(),
|
|
98
|
+
"tool_name": self.name,
|
|
99
|
+
"query": query,
|
|
100
|
+
"data": result,
|
|
101
|
+
"is_eu": self._toolset.is_eu_datacenter,
|
|
102
|
+
}
|
|
103
|
+
final_result = yaml.dump(result_with_key, default_flow_style=False)
|
|
104
|
+
return StructuredToolResult(
|
|
105
|
+
status=StructuredToolResultStatus.SUCCESS,
|
|
106
|
+
data=final_result,
|
|
107
|
+
params=params,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
def _format_logs(self, records: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
111
|
+
"""
|
|
112
|
+
Build a single grouped object from a list of log records.
|
|
113
|
+
"""
|
|
114
|
+
if not records:
|
|
115
|
+
return []
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
# Defensive, shallow copy of each record and type validation
|
|
119
|
+
copied: List[Dict[str, Any]] = []
|
|
120
|
+
for i, rec in enumerate(records):
|
|
121
|
+
if not isinstance(rec, dict):
|
|
122
|
+
raise TypeError(
|
|
123
|
+
f"`records[{i}]` must be a dict, got {type(rec).__name__}"
|
|
124
|
+
)
|
|
125
|
+
copied.append(dict(rec))
|
|
126
|
+
|
|
127
|
+
# Determine common fields by walking keys
|
|
128
|
+
common_fields: Dict[str, Any] = {}
|
|
129
|
+
first = copied[0]
|
|
130
|
+
for key in first.keys():
|
|
131
|
+
value = first.get(key)
|
|
132
|
+
# The key + value must be the same in every record
|
|
133
|
+
if all(key in r and r.get(key) == value for r in copied[1:]):
|
|
134
|
+
common_fields[key] = value
|
|
135
|
+
|
|
136
|
+
# Build per-record entries excluding any common fields
|
|
137
|
+
data_entries: List[Dict[str, Any]] = []
|
|
138
|
+
for rec in copied:
|
|
139
|
+
# Keep only fields that aren’t common (don’t mutate the original record)
|
|
140
|
+
entry = {k: v for k, v in rec.items() if k not in common_fields}
|
|
141
|
+
data_entries.append(entry)
|
|
142
|
+
|
|
143
|
+
group: Dict[str, Any] = dict(common_fields)
|
|
144
|
+
if "data" in group:
|
|
145
|
+
group["_common.data"] = group.pop("data")
|
|
146
|
+
|
|
147
|
+
group["data"] = data_entries
|
|
148
|
+
|
|
149
|
+
return [group]
|
|
150
|
+
except Exception:
|
|
151
|
+
logging.exception(f"Failed to reformat newrelic logs {records}")
|
|
152
|
+
return records
|
|
153
|
+
|
|
154
|
+
def get_parameterized_one_liner(self, params) -> str:
|
|
155
|
+
description = params.get("description", "")
|
|
156
|
+
return f"{toolset_name_for_one_liner(self._toolset.name)}: Execute NRQL ({description})"
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
class NewrelicConfig(BaseModel):
|
|
160
|
+
nr_api_key: Optional[str] = None
|
|
161
|
+
nr_account_id: Optional[str] = None
|
|
162
|
+
is_eu_datacenter: Optional[bool] = False
|
|
163
|
+
format_results: Optional[bool] = False
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
class NewRelicToolset(Toolset):
|
|
167
|
+
nr_api_key: Optional[str] = None
|
|
168
|
+
nr_account_id: Optional[str] = None
|
|
169
|
+
is_eu_datacenter: bool = False
|
|
170
|
+
format_results: bool = False
|
|
171
|
+
|
|
172
|
+
def __init__(self):
|
|
173
|
+
super().__init__(
|
|
174
|
+
name="newrelic",
|
|
175
|
+
description="Toolset for interacting with New Relic to fetch logs, traces, and execute freeform NRQL queries",
|
|
176
|
+
docs_url="https://holmesgpt.dev/data-sources/builtin-toolsets/newrelic/",
|
|
177
|
+
icon_url="https://companieslogo.com/img/orig/NEWR-de5fcb2e.png?t=1720244493",
|
|
178
|
+
prerequisites=[CallablePrerequisite(callable=self.prerequisites_callable)], # type: ignore
|
|
179
|
+
tools=[
|
|
180
|
+
ExecuteNRQLQuery(self),
|
|
181
|
+
],
|
|
182
|
+
tags=[ToolsetTag.CORE],
|
|
183
|
+
)
|
|
184
|
+
template_file_path = os.path.abspath(
|
|
185
|
+
os.path.join(os.path.dirname(__file__), "newrelic.jinja2")
|
|
186
|
+
)
|
|
187
|
+
self._load_llm_instructions(jinja_template=f"file://{template_file_path}")
|
|
188
|
+
|
|
189
|
+
def prerequisites_callable(
|
|
190
|
+
self, config: dict[str, Any]
|
|
191
|
+
) -> tuple[bool, Optional[str]]:
|
|
192
|
+
if not config:
|
|
193
|
+
return False, "No configuration provided"
|
|
194
|
+
|
|
195
|
+
try:
|
|
196
|
+
nr_config = NewrelicConfig(**config)
|
|
197
|
+
self.nr_account_id = nr_config.nr_account_id
|
|
198
|
+
self.nr_api_key = nr_config.nr_api_key
|
|
199
|
+
self.is_eu_datacenter = nr_config.is_eu_datacenter or False
|
|
200
|
+
self.format_results = nr_config.format_results or False
|
|
201
|
+
|
|
202
|
+
if not self.nr_account_id or not self.nr_api_key:
|
|
203
|
+
return False, "New Relic account ID or API key is missing"
|
|
204
|
+
|
|
205
|
+
return True, None
|
|
206
|
+
except Exception as e:
|
|
207
|
+
logging.exception("Failed to set up New Relic toolset")
|
|
208
|
+
return False, str(e)
|
|
209
|
+
|
|
210
|
+
def get_example_config(self) -> Dict[str, Any]:
|
|
211
|
+
return {}
|
|
@@ -8,6 +8,7 @@ from holmes.core.tools import (
|
|
|
8
8
|
CallablePrerequisite,
|
|
9
9
|
StructuredToolResult,
|
|
10
10
|
Tool,
|
|
11
|
+
ToolInvokeContext,
|
|
11
12
|
ToolParameter,
|
|
12
13
|
StructuredToolResultStatus,
|
|
13
14
|
Toolset,
|
|
@@ -93,9 +94,7 @@ class ListShards(BaseOpenSearchTool):
|
|
|
93
94
|
toolset=toolset,
|
|
94
95
|
)
|
|
95
96
|
|
|
96
|
-
def _invoke(
|
|
97
|
-
self, params: dict, user_approved: bool = False
|
|
98
|
-
) -> StructuredToolResult:
|
|
97
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
99
98
|
client = get_client(self.toolset.clients, host=params.get("host", ""))
|
|
100
99
|
shards = client.client.cat.shards()
|
|
101
100
|
return StructuredToolResult(
|
|
@@ -124,9 +123,7 @@ class GetClusterSettings(BaseOpenSearchTool):
|
|
|
124
123
|
toolset=toolset,
|
|
125
124
|
)
|
|
126
125
|
|
|
127
|
-
def _invoke(
|
|
128
|
-
self, params: dict, user_approved: bool = False
|
|
129
|
-
) -> StructuredToolResult:
|
|
126
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
130
127
|
client = get_client(self.toolset.clients, host=params.get("host"))
|
|
131
128
|
response = client.client.cluster.get_settings(
|
|
132
129
|
include_defaults=True, flat_settings=True
|
|
@@ -157,9 +154,7 @@ class GetClusterHealth(BaseOpenSearchTool):
|
|
|
157
154
|
toolset=toolset,
|
|
158
155
|
)
|
|
159
156
|
|
|
160
|
-
def _invoke(
|
|
161
|
-
self, params: dict, user_approved: bool = False
|
|
162
|
-
) -> StructuredToolResult:
|
|
157
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
163
158
|
client = get_client(self.toolset.clients, host=params.get("host", ""))
|
|
164
159
|
health = client.client.cluster.health()
|
|
165
160
|
return StructuredToolResult(
|
|
@@ -182,9 +177,7 @@ class ListOpenSearchHosts(BaseOpenSearchTool):
|
|
|
182
177
|
toolset=toolset,
|
|
183
178
|
)
|
|
184
179
|
|
|
185
|
-
def _invoke(
|
|
186
|
-
self, params: dict, user_approved: bool = False
|
|
187
|
-
) -> StructuredToolResult:
|
|
180
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
188
181
|
hosts = [host for client in self.toolset.clients for host in client.hosts]
|
|
189
182
|
return StructuredToolResult(
|
|
190
183
|
status=StructuredToolResultStatus.SUCCESS,
|
|
@@ -7,6 +7,7 @@ from cachetools import TTLCache # type: ignore
|
|
|
7
7
|
from holmes.core.tools import (
|
|
8
8
|
CallablePrerequisite,
|
|
9
9
|
Tool,
|
|
10
|
+
ToolInvokeContext,
|
|
10
11
|
ToolParameter,
|
|
11
12
|
ToolsetTag,
|
|
12
13
|
)
|
|
@@ -34,9 +35,7 @@ class GetTracesFields(Tool):
|
|
|
34
35
|
self._toolset = toolset
|
|
35
36
|
self._cache = None
|
|
36
37
|
|
|
37
|
-
def _invoke(
|
|
38
|
-
self, params: dict, user_approved: bool = False
|
|
39
|
-
) -> StructuredToolResult:
|
|
38
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
40
39
|
try:
|
|
41
40
|
if not self._cache and self._toolset.opensearch_config.fields_ttl_seconds:
|
|
42
41
|
self._cache = TTLCache(
|
|
@@ -129,9 +128,7 @@ class TracesSearchQuery(Tool):
|
|
|
129
128
|
self._toolset = toolset
|
|
130
129
|
self._cache = None
|
|
131
130
|
|
|
132
|
-
def _invoke(
|
|
133
|
-
self, params: dict, user_approved: bool = False
|
|
134
|
-
) -> StructuredToolResult:
|
|
131
|
+
def _invoke(self, params: dict, context: ToolInvokeContext) -> StructuredToolResult:
|
|
135
132
|
err_msg = ""
|
|
136
133
|
try:
|
|
137
134
|
body = json.loads(get_param_or_raise(params, "query"))
|