holmesgpt 0.13.2__py3-none-any.whl → 0.16.2a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- holmes/__init__.py +1 -1
- holmes/clients/robusta_client.py +17 -4
- holmes/common/env_vars.py +40 -1
- holmes/config.py +114 -144
- holmes/core/conversations.py +53 -14
- holmes/core/feedback.py +191 -0
- holmes/core/investigation.py +18 -22
- holmes/core/llm.py +489 -88
- holmes/core/models.py +103 -1
- holmes/core/openai_formatting.py +13 -0
- holmes/core/prompt.py +1 -1
- holmes/core/safeguards.py +4 -4
- holmes/core/supabase_dal.py +293 -100
- holmes/core/tool_calling_llm.py +423 -323
- holmes/core/tools.py +311 -33
- holmes/core/tools_utils/token_counting.py +14 -0
- holmes/core/tools_utils/tool_context_window_limiter.py +57 -0
- holmes/core/tools_utils/tool_executor.py +13 -8
- holmes/core/toolset_manager.py +155 -4
- holmes/core/tracing.py +6 -1
- holmes/core/transformers/__init__.py +23 -0
- holmes/core/transformers/base.py +62 -0
- holmes/core/transformers/llm_summarize.py +174 -0
- holmes/core/transformers/registry.py +122 -0
- holmes/core/transformers/transformer.py +31 -0
- holmes/core/truncation/compaction.py +59 -0
- holmes/core/truncation/dal_truncation_utils.py +23 -0
- holmes/core/truncation/input_context_window_limiter.py +218 -0
- holmes/interactive.py +177 -24
- holmes/main.py +7 -4
- holmes/plugins/prompts/_fetch_logs.jinja2 +26 -1
- holmes/plugins/prompts/_general_instructions.jinja2 +1 -2
- holmes/plugins/prompts/_runbook_instructions.jinja2 +23 -12
- holmes/plugins/prompts/conversation_history_compaction.jinja2 +88 -0
- holmes/plugins/prompts/generic_ask.jinja2 +2 -4
- holmes/plugins/prompts/generic_ask_conversation.jinja2 +2 -1
- holmes/plugins/prompts/generic_ask_for_issue_conversation.jinja2 +2 -1
- holmes/plugins/prompts/generic_investigation.jinja2 +2 -1
- holmes/plugins/prompts/investigation_procedure.jinja2 +48 -0
- holmes/plugins/prompts/kubernetes_workload_ask.jinja2 +2 -1
- holmes/plugins/prompts/kubernetes_workload_chat.jinja2 +2 -1
- holmes/plugins/runbooks/__init__.py +117 -18
- holmes/plugins/runbooks/catalog.json +2 -0
- holmes/plugins/toolsets/__init__.py +21 -8
- holmes/plugins/toolsets/aks-node-health.yaml +46 -0
- holmes/plugins/toolsets/aks.yaml +64 -0
- holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +26 -36
- holmes/plugins/toolsets/azure_sql/azure_sql_toolset.py +0 -1
- holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +10 -7
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +8 -6
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +8 -6
- holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +9 -7
- holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +9 -6
- holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +9 -6
- holmes/plugins/toolsets/bash/bash_toolset.py +10 -13
- holmes/plugins/toolsets/bash/common/bash.py +7 -7
- holmes/plugins/toolsets/cilium.yaml +284 -0
- holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +5 -3
- holmes/plugins/toolsets/datadog/datadog_api.py +490 -24
- holmes/plugins/toolsets/datadog/datadog_logs_instructions.jinja2 +21 -10
- holmes/plugins/toolsets/datadog/toolset_datadog_general.py +349 -216
- holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +190 -19
- holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +101 -44
- holmes/plugins/toolsets/datadog/toolset_datadog_rds.py +13 -16
- holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +25 -31
- holmes/plugins/toolsets/git.py +51 -46
- holmes/plugins/toolsets/grafana/common.py +15 -3
- holmes/plugins/toolsets/grafana/grafana_api.py +46 -24
- holmes/plugins/toolsets/grafana/grafana_tempo_api.py +454 -0
- holmes/plugins/toolsets/grafana/loki/instructions.jinja2 +9 -0
- holmes/plugins/toolsets/grafana/loki/toolset_grafana_loki.py +117 -0
- holmes/plugins/toolsets/grafana/toolset_grafana.py +211 -91
- holmes/plugins/toolsets/grafana/toolset_grafana_dashboard.jinja2 +27 -0
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +246 -11
- holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +653 -293
- holmes/plugins/toolsets/grafana/trace_parser.py +1 -1
- holmes/plugins/toolsets/internet/internet.py +6 -7
- holmes/plugins/toolsets/internet/notion.py +5 -6
- holmes/plugins/toolsets/investigator/core_investigation.py +42 -34
- holmes/plugins/toolsets/kafka.py +25 -36
- holmes/plugins/toolsets/kubernetes.yaml +58 -84
- holmes/plugins/toolsets/kubernetes_logs.py +6 -6
- holmes/plugins/toolsets/kubernetes_logs.yaml +32 -0
- holmes/plugins/toolsets/logging_utils/logging_api.py +80 -4
- holmes/plugins/toolsets/mcp/toolset_mcp.py +181 -55
- holmes/plugins/toolsets/newrelic/__init__.py +0 -0
- holmes/plugins/toolsets/newrelic/new_relic_api.py +125 -0
- holmes/plugins/toolsets/newrelic/newrelic.jinja2 +41 -0
- holmes/plugins/toolsets/newrelic/newrelic.py +163 -0
- holmes/plugins/toolsets/opensearch/opensearch.py +10 -17
- holmes/plugins/toolsets/opensearch/opensearch_logs.py +7 -7
- holmes/plugins/toolsets/opensearch/opensearch_ppl_query_docs.jinja2 +1616 -0
- holmes/plugins/toolsets/opensearch/opensearch_query_assist.py +78 -0
- holmes/plugins/toolsets/opensearch/opensearch_query_assist_instructions.jinja2 +223 -0
- holmes/plugins/toolsets/opensearch/opensearch_traces.py +13 -16
- holmes/plugins/toolsets/openshift.yaml +283 -0
- holmes/plugins/toolsets/prometheus/prometheus.py +915 -390
- holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +43 -2
- holmes/plugins/toolsets/prometheus/utils.py +28 -0
- holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +9 -10
- holmes/plugins/toolsets/robusta/robusta.py +236 -65
- holmes/plugins/toolsets/robusta/robusta_instructions.jinja2 +26 -9
- holmes/plugins/toolsets/runbook/runbook_fetcher.py +137 -26
- holmes/plugins/toolsets/service_discovery.py +1 -1
- holmes/plugins/toolsets/servicenow_tables/instructions.jinja2 +83 -0
- holmes/plugins/toolsets/servicenow_tables/servicenow_tables.py +426 -0
- holmes/plugins/toolsets/utils.py +88 -0
- holmes/utils/config_utils.py +91 -0
- holmes/utils/default_toolset_installation_guide.jinja2 +1 -22
- holmes/utils/env.py +7 -0
- holmes/utils/global_instructions.py +75 -10
- holmes/utils/holmes_status.py +2 -1
- holmes/utils/holmes_sync_toolsets.py +0 -2
- holmes/utils/krr_utils.py +188 -0
- holmes/utils/sentry_helper.py +41 -0
- holmes/utils/stream.py +61 -7
- holmes/version.py +34 -14
- holmesgpt-0.16.2a0.dist-info/LICENSE +178 -0
- {holmesgpt-0.13.2.dist-info → holmesgpt-0.16.2a0.dist-info}/METADATA +29 -27
- {holmesgpt-0.13.2.dist-info → holmesgpt-0.16.2a0.dist-info}/RECORD +126 -102
- holmes/core/performance_timing.py +0 -72
- holmes/plugins/toolsets/grafana/tempo_api.py +0 -124
- holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +0 -110
- holmes/plugins/toolsets/newrelic.py +0 -231
- holmes/plugins/toolsets/servicenow/install.md +0 -37
- holmes/plugins/toolsets/servicenow/instructions.jinja2 +0 -3
- holmes/plugins/toolsets/servicenow/servicenow.py +0 -219
- holmesgpt-0.13.2.dist-info/LICENSE.txt +0 -21
- {holmesgpt-0.13.2.dist-info → holmesgpt-0.16.2a0.dist-info}/WHEEL +0 -0
- {holmesgpt-0.13.2.dist-info → holmesgpt-0.16.2a0.dist-info}/entry_points.txt +0 -0
holmes/core/conversations.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
from typing import Dict, List, Optional
|
|
2
2
|
|
|
3
3
|
import sentry_sdk
|
|
4
|
-
|
|
5
4
|
from holmes.config import Config
|
|
6
5
|
from holmes.core.models import (
|
|
7
6
|
ToolCallConversationResult,
|
|
@@ -10,9 +9,10 @@ from holmes.core.models import (
|
|
|
10
9
|
)
|
|
11
10
|
from holmes.plugins.prompts import load_and_render_prompt
|
|
12
11
|
from holmes.core.tool_calling_llm import ToolCallingLLM
|
|
12
|
+
from holmes.plugins.runbooks import RunbookCatalog
|
|
13
13
|
from holmes.utils.global_instructions import (
|
|
14
14
|
Instructions,
|
|
15
|
-
|
|
15
|
+
add_runbooks_to_user_prompt,
|
|
16
16
|
)
|
|
17
17
|
|
|
18
18
|
DEFAULT_TOOL_SIZE = 10000
|
|
@@ -26,7 +26,8 @@ def calculate_tool_size(
|
|
|
26
26
|
return DEFAULT_TOOL_SIZE
|
|
27
27
|
|
|
28
28
|
context_window = ai.llm.get_context_window_size()
|
|
29
|
-
|
|
29
|
+
tokens = ai.llm.count_tokens(messages_without_tools)
|
|
30
|
+
message_size_without_tools = tokens.total_tokens
|
|
30
31
|
maximum_output_token = ai.llm.get_maximum_output_token()
|
|
31
32
|
|
|
32
33
|
tool_size = min(
|
|
@@ -63,6 +64,7 @@ def build_issue_chat_messages(
|
|
|
63
64
|
ai: ToolCallingLLM,
|
|
64
65
|
config: Config,
|
|
65
66
|
global_instructions: Optional[Instructions] = None,
|
|
67
|
+
runbooks: Optional[RunbookCatalog] = None,
|
|
66
68
|
):
|
|
67
69
|
"""
|
|
68
70
|
This function generates a list of messages for issue conversation and ensures that the message sequence adheres to the model's context window limitations
|
|
@@ -119,8 +121,10 @@ def build_issue_chat_messages(
|
|
|
119
121
|
tools_for_investigation = issue_chat_request.investigation_result.tools
|
|
120
122
|
|
|
121
123
|
if not conversation_history or len(conversation_history) == 0:
|
|
122
|
-
user_prompt =
|
|
123
|
-
user_prompt,
|
|
124
|
+
user_prompt = add_runbooks_to_user_prompt(
|
|
125
|
+
user_prompt=user_prompt,
|
|
126
|
+
runbook_catalog=runbooks,
|
|
127
|
+
global_instructions=global_instructions,
|
|
124
128
|
)
|
|
125
129
|
|
|
126
130
|
number_of_tools_for_investigation = len(tools_for_investigation) # type: ignore
|
|
@@ -133,6 +137,7 @@ def build_issue_chat_messages(
|
|
|
133
137
|
"issue": issue_chat_request.issue_type,
|
|
134
138
|
"toolsets": ai.tool_executor.toolsets,
|
|
135
139
|
"cluster_name": config.cluster_name,
|
|
140
|
+
"runbooks_enabled": True if runbooks else False,
|
|
136
141
|
},
|
|
137
142
|
)
|
|
138
143
|
messages = [
|
|
@@ -153,6 +158,7 @@ def build_issue_chat_messages(
|
|
|
153
158
|
"issue": issue_chat_request.issue_type,
|
|
154
159
|
"toolsets": ai.tool_executor.toolsets,
|
|
155
160
|
"cluster_name": config.cluster_name,
|
|
161
|
+
"runbooks_enabled": True if runbooks else False,
|
|
156
162
|
}
|
|
157
163
|
system_prompt_without_tools = load_and_render_prompt(
|
|
158
164
|
template_path, template_context_without_tools
|
|
@@ -186,6 +192,7 @@ def build_issue_chat_messages(
|
|
|
186
192
|
"issue": issue_chat_request.issue_type,
|
|
187
193
|
"toolsets": ai.tool_executor.toolsets,
|
|
188
194
|
"cluster_name": config.cluster_name,
|
|
195
|
+
"runbooks_enabled": True if runbooks else False,
|
|
189
196
|
}
|
|
190
197
|
system_prompt_with_truncated_tools = load_and_render_prompt(
|
|
191
198
|
template_path, truncated_template_context
|
|
@@ -201,8 +208,10 @@ def build_issue_chat_messages(
|
|
|
201
208
|
},
|
|
202
209
|
]
|
|
203
210
|
|
|
204
|
-
user_prompt =
|
|
205
|
-
user_prompt,
|
|
211
|
+
user_prompt = add_runbooks_to_user_prompt(
|
|
212
|
+
user_prompt=user_prompt,
|
|
213
|
+
runbook_catalog=runbooks,
|
|
214
|
+
global_instructions=global_instructions,
|
|
206
215
|
)
|
|
207
216
|
|
|
208
217
|
conversation_history.append(
|
|
@@ -227,6 +236,7 @@ def build_issue_chat_messages(
|
|
|
227
236
|
"issue": issue_chat_request.issue_type,
|
|
228
237
|
"toolsets": ai.tool_executor.toolsets,
|
|
229
238
|
"cluster_name": config.cluster_name,
|
|
239
|
+
"runbooks_enabled": True if runbooks else False,
|
|
230
240
|
}
|
|
231
241
|
system_prompt_without_tools = load_and_render_prompt(
|
|
232
242
|
template_path, template_context_without_tools
|
|
@@ -250,6 +260,7 @@ def build_issue_chat_messages(
|
|
|
250
260
|
"issue": issue_chat_request.issue_type,
|
|
251
261
|
"toolsets": ai.tool_executor.toolsets,
|
|
252
262
|
"cluster_name": config.cluster_name,
|
|
263
|
+
"runbooks_enabled": True if runbooks else False,
|
|
253
264
|
}
|
|
254
265
|
system_prompt_with_truncated_tools = load_and_render_prompt(
|
|
255
266
|
template_path, template_context
|
|
@@ -262,7 +273,11 @@ def build_issue_chat_messages(
|
|
|
262
273
|
|
|
263
274
|
|
|
264
275
|
def add_or_update_system_prompt(
|
|
265
|
-
conversation_history: List[Dict[str, str]],
|
|
276
|
+
conversation_history: List[Dict[str, str]],
|
|
277
|
+
ai: ToolCallingLLM,
|
|
278
|
+
config: Config,
|
|
279
|
+
additional_system_prompt: Optional[str] = None,
|
|
280
|
+
runbooks: Optional[RunbookCatalog] = None,
|
|
266
281
|
):
|
|
267
282
|
"""Either add the system prompt or replace an existing system prompt.
|
|
268
283
|
As a 'defensive' measure, this code will only replace an existing system prompt if it is the
|
|
@@ -274,9 +289,12 @@ def add_or_update_system_prompt(
|
|
|
274
289
|
context = {
|
|
275
290
|
"toolsets": ai.tool_executor.toolsets,
|
|
276
291
|
"cluster_name": config.cluster_name,
|
|
292
|
+
"runbooks_enabled": True if runbooks else False,
|
|
277
293
|
}
|
|
278
294
|
|
|
279
295
|
system_prompt = load_and_render_prompt(template_path, context)
|
|
296
|
+
if additional_system_prompt:
|
|
297
|
+
system_prompt = system_prompt + "\n" + additional_system_prompt
|
|
280
298
|
|
|
281
299
|
if not conversation_history or len(conversation_history) == 0:
|
|
282
300
|
conversation_history.append({"role": "system", "content": system_prompt})
|
|
@@ -303,6 +321,8 @@ def build_chat_messages(
|
|
|
303
321
|
ai: ToolCallingLLM,
|
|
304
322
|
config: Config,
|
|
305
323
|
global_instructions: Optional[Instructions] = None,
|
|
324
|
+
additional_system_prompt: Optional[str] = None,
|
|
325
|
+
runbooks: Optional[RunbookCatalog] = None,
|
|
306
326
|
) -> List[dict]:
|
|
307
327
|
"""
|
|
308
328
|
This function generates a list of messages for general chat conversation and ensures that the message sequence adheres to the model's context window limitations
|
|
@@ -358,10 +378,18 @@ def build_chat_messages(
|
|
|
358
378
|
conversation_history = conversation_history.copy()
|
|
359
379
|
|
|
360
380
|
conversation_history = add_or_update_system_prompt(
|
|
361
|
-
conversation_history=conversation_history,
|
|
381
|
+
conversation_history=conversation_history,
|
|
382
|
+
ai=ai,
|
|
383
|
+
config=config,
|
|
384
|
+
additional_system_prompt=additional_system_prompt,
|
|
385
|
+
runbooks=runbooks,
|
|
362
386
|
)
|
|
363
387
|
|
|
364
|
-
ask =
|
|
388
|
+
ask = add_runbooks_to_user_prompt(
|
|
389
|
+
user_prompt=ask,
|
|
390
|
+
runbook_catalog=runbooks,
|
|
391
|
+
global_instructions=global_instructions,
|
|
392
|
+
)
|
|
365
393
|
|
|
366
394
|
conversation_history.append( # type: ignore
|
|
367
395
|
{
|
|
@@ -369,6 +397,7 @@ def build_chat_messages(
|
|
|
369
397
|
"content": ask,
|
|
370
398
|
},
|
|
371
399
|
)
|
|
400
|
+
|
|
372
401
|
number_of_tools = len(
|
|
373
402
|
[message for message in conversation_history if message.get("role") == "tool"] # type: ignore
|
|
374
403
|
)
|
|
@@ -393,6 +422,7 @@ def build_workload_health_chat_messages(
|
|
|
393
422
|
ai: ToolCallingLLM,
|
|
394
423
|
config: Config,
|
|
395
424
|
global_instructions: Optional[Instructions] = None,
|
|
425
|
+
runbooks: Optional[RunbookCatalog] = None,
|
|
396
426
|
):
|
|
397
427
|
"""
|
|
398
428
|
This function generates a list of messages for workload health conversation and ensures that the message sequence adheres to the model's context window limitations
|
|
@@ -451,8 +481,10 @@ def build_workload_health_chat_messages(
|
|
|
451
481
|
resource = workload_health_chat_request.resource
|
|
452
482
|
|
|
453
483
|
if not conversation_history or len(conversation_history) == 0:
|
|
454
|
-
user_prompt =
|
|
455
|
-
user_prompt,
|
|
484
|
+
user_prompt = add_runbooks_to_user_prompt(
|
|
485
|
+
user_prompt=user_prompt,
|
|
486
|
+
runbook_catalog=runbooks,
|
|
487
|
+
global_instructions=global_instructions,
|
|
456
488
|
)
|
|
457
489
|
|
|
458
490
|
number_of_tools_for_workload = len(tools_for_workload) # type: ignore
|
|
@@ -465,6 +497,7 @@ def build_workload_health_chat_messages(
|
|
|
465
497
|
"resource": resource,
|
|
466
498
|
"toolsets": ai.tool_executor.toolsets,
|
|
467
499
|
"cluster_name": config.cluster_name,
|
|
500
|
+
"runbooks_enabled": True if runbooks else False,
|
|
468
501
|
},
|
|
469
502
|
)
|
|
470
503
|
messages = [
|
|
@@ -485,6 +518,7 @@ def build_workload_health_chat_messages(
|
|
|
485
518
|
"resource": resource,
|
|
486
519
|
"toolsets": ai.tool_executor.toolsets,
|
|
487
520
|
"cluster_name": config.cluster_name,
|
|
521
|
+
"runbooks_enabled": True if runbooks else False,
|
|
488
522
|
}
|
|
489
523
|
system_prompt_without_tools = load_and_render_prompt(
|
|
490
524
|
template_path, template_context_without_tools
|
|
@@ -518,6 +552,7 @@ def build_workload_health_chat_messages(
|
|
|
518
552
|
"resource": resource,
|
|
519
553
|
"toolsets": ai.tool_executor.toolsets,
|
|
520
554
|
"cluster_name": config.cluster_name,
|
|
555
|
+
"runbooks_enabled": True if runbooks else False,
|
|
521
556
|
}
|
|
522
557
|
system_prompt_with_truncated_tools = load_and_render_prompt(
|
|
523
558
|
template_path, truncated_template_context
|
|
@@ -533,8 +568,10 @@ def build_workload_health_chat_messages(
|
|
|
533
568
|
},
|
|
534
569
|
]
|
|
535
570
|
|
|
536
|
-
user_prompt =
|
|
537
|
-
user_prompt,
|
|
571
|
+
user_prompt = add_runbooks_to_user_prompt(
|
|
572
|
+
user_prompt=user_prompt,
|
|
573
|
+
runbook_catalog=runbooks,
|
|
574
|
+
global_instructions=global_instructions,
|
|
538
575
|
)
|
|
539
576
|
|
|
540
577
|
conversation_history.append(
|
|
@@ -559,6 +596,7 @@ def build_workload_health_chat_messages(
|
|
|
559
596
|
"resource": resource,
|
|
560
597
|
"toolsets": ai.tool_executor.toolsets,
|
|
561
598
|
"cluster_name": config.cluster_name,
|
|
599
|
+
"runbooks_enabled": True if runbooks else False,
|
|
562
600
|
}
|
|
563
601
|
system_prompt_without_tools = load_and_render_prompt(
|
|
564
602
|
template_path, template_context_without_tools
|
|
@@ -582,6 +620,7 @@ def build_workload_health_chat_messages(
|
|
|
582
620
|
"resource": resource,
|
|
583
621
|
"toolsets": ai.tool_executor.toolsets,
|
|
584
622
|
"cluster_name": config.cluster_name,
|
|
623
|
+
"runbooks_enabled": True if runbooks else False,
|
|
585
624
|
}
|
|
586
625
|
system_prompt_with_truncated_tools = load_and_render_prompt(
|
|
587
626
|
template_path, template_context
|
holmes/core/feedback.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from typing import Callable, Optional
|
|
4
|
+
|
|
5
|
+
from .llm import LLM
|
|
6
|
+
|
|
7
|
+
DEFAULT_PRIVACY_NOTICE_BANNER = "Your feedback will be used to improve Holmesgpt's performance. Please avoid sharing sensitive personal information. By continuing, you consent to this data usage."
|
|
8
|
+
PRIVACY_NOTICE_BANNER = os.environ.get(
|
|
9
|
+
"PRIVACY_NOTICE_BANNER", DEFAULT_PRIVACY_NOTICE_BANNER
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class FeedbackInfoBase(ABC):
|
|
14
|
+
"""Abstract base class for all feedback-related classes that must implement to_dict()."""
|
|
15
|
+
|
|
16
|
+
@abstractmethod
|
|
17
|
+
def to_dict(self) -> dict:
|
|
18
|
+
"""Convert to dictionary representation. Must be implemented by all subclasses."""
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class FeedbackLLM(FeedbackInfoBase):
|
|
23
|
+
"""Class to represent a LLM in the feedback."""
|
|
24
|
+
|
|
25
|
+
def __init__(self, model: str, max_context_size: int):
|
|
26
|
+
self.model = model
|
|
27
|
+
self.max_context_size = max_context_size
|
|
28
|
+
|
|
29
|
+
def update_from_llm(self, llm: LLM):
|
|
30
|
+
self.model = llm.model
|
|
31
|
+
self.max_context_size = llm.get_context_window_size()
|
|
32
|
+
|
|
33
|
+
def to_dict(self) -> dict:
|
|
34
|
+
"""Convert to dictionary representation."""
|
|
35
|
+
return self.__dict__
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# TODO: extend the FeedbackLLMResponse to include each tool call results details used for evaluate the overall response.
|
|
39
|
+
# Currenlty tool call details in plan:
|
|
40
|
+
# - toolcall parameter and success/failure, toolcall truncation size
|
|
41
|
+
# - Holmes plan (todo list)
|
|
42
|
+
# - Holmes intermediate output
|
|
43
|
+
class FeedbackLLMResponse(FeedbackInfoBase):
|
|
44
|
+
"""Class to represent a LLM response in the feedback"""
|
|
45
|
+
|
|
46
|
+
def __init__(self, user_ask: str, response: str):
|
|
47
|
+
self.user_ask = user_ask
|
|
48
|
+
self.response = response
|
|
49
|
+
|
|
50
|
+
def to_dict(self) -> dict:
|
|
51
|
+
"""Convert to dictionary representation."""
|
|
52
|
+
return self.__dict__
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class FeedbackMetadata(FeedbackInfoBase):
|
|
56
|
+
"""Class to store feedback metadata."""
|
|
57
|
+
|
|
58
|
+
def __init__(self):
|
|
59
|
+
# In iteration mode, there can be multiple ask and response pairs.
|
|
60
|
+
self.llm_responses = []
|
|
61
|
+
self.llm = FeedbackLLM("", 0)
|
|
62
|
+
|
|
63
|
+
def add_llm_response(self, user_ask: str, response: str) -> None:
|
|
64
|
+
"""Add a LLM response to the metadata."""
|
|
65
|
+
llm_response = FeedbackLLMResponse(user_ask, response)
|
|
66
|
+
self.llm_responses.append(llm_response)
|
|
67
|
+
|
|
68
|
+
def update_llm(self, llm: LLM) -> None:
|
|
69
|
+
"""Update the LLM information in the metadata."""
|
|
70
|
+
self.llm.update_from_llm(llm)
|
|
71
|
+
|
|
72
|
+
def to_dict(self) -> dict:
|
|
73
|
+
"""Convert to dictionary representation."""
|
|
74
|
+
return {
|
|
75
|
+
"llm_responses": [resp.to_dict() for resp in self.llm_responses],
|
|
76
|
+
"llm": self.llm.to_dict(),
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class UserFeedback(FeedbackInfoBase):
|
|
81
|
+
"""Class to store user rate and comment to the AI response."""
|
|
82
|
+
|
|
83
|
+
def __init__(self, is_positive: bool, comment: Optional[str]):
|
|
84
|
+
self.is_positive = is_positive
|
|
85
|
+
self.comment = comment
|
|
86
|
+
|
|
87
|
+
@property
|
|
88
|
+
def rating_text(self) -> str:
|
|
89
|
+
"""Return human-readable rating text."""
|
|
90
|
+
return "useful" if self.is_positive else "not useful"
|
|
91
|
+
|
|
92
|
+
@property
|
|
93
|
+
def rating_emoji(self) -> str:
|
|
94
|
+
"""Return emoji representation of the rating."""
|
|
95
|
+
return "👍" if self.is_positive else "👎"
|
|
96
|
+
|
|
97
|
+
def __str__(self) -> str:
|
|
98
|
+
"""Return string representation of the feedback."""
|
|
99
|
+
if self.comment:
|
|
100
|
+
return f"Rating: {self.rating_text}. Comment: {self.comment}"
|
|
101
|
+
else:
|
|
102
|
+
return f"Rating: {self.rating_text}. No additional comment."
|
|
103
|
+
|
|
104
|
+
def to_dict(self) -> dict:
|
|
105
|
+
"""Convert to dictionary representation."""
|
|
106
|
+
return {
|
|
107
|
+
"is_positive": self.is_positive,
|
|
108
|
+
"comment": self.comment,
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
class Feedback(FeedbackInfoBase):
|
|
113
|
+
"""Class to store overall feedback data used to evaluate the AI response."""
|
|
114
|
+
|
|
115
|
+
def __init__(self):
|
|
116
|
+
self.metadata = FeedbackMetadata()
|
|
117
|
+
self.user_feedback: Optional[UserFeedback] = None
|
|
118
|
+
|
|
119
|
+
def set_user_feedback(self, user_feedback: UserFeedback) -> None:
|
|
120
|
+
"""Set the user feedback."""
|
|
121
|
+
self.user_feedback = user_feedback
|
|
122
|
+
|
|
123
|
+
def to_dict(self) -> dict:
|
|
124
|
+
"""Convert to dictionary representation."""
|
|
125
|
+
return {
|
|
126
|
+
"metadata": self.metadata.to_dict(),
|
|
127
|
+
"user_feedback": self.user_feedback.to_dict()
|
|
128
|
+
if self.user_feedback
|
|
129
|
+
else None,
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
FeedbackCallback = Callable[[Feedback], None]
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def feedback_callback_example(feedback: Feedback) -> None:
|
|
137
|
+
"""
|
|
138
|
+
Example implementation of a feedback callback function.
|
|
139
|
+
|
|
140
|
+
This function demonstrates how to process feedback data using to_dict() methods
|
|
141
|
+
and could be used for:
|
|
142
|
+
- Logging feedback to files or databases
|
|
143
|
+
- Sending feedback to analytics services
|
|
144
|
+
- Training data collection
|
|
145
|
+
- User satisfaction monitoring
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
feedback: Feedback object containing user feedback and metadata
|
|
149
|
+
"""
|
|
150
|
+
print("\n=== Feedback Received ===")
|
|
151
|
+
|
|
152
|
+
# Convert entire feedback to dict first - this is the main data structure
|
|
153
|
+
feedback_dict = feedback.to_dict()
|
|
154
|
+
print(f"Complete feedback dictionary keys: {list(feedback_dict.keys())}")
|
|
155
|
+
|
|
156
|
+
# How to check user feedback using to_dict()
|
|
157
|
+
print("\n1. Checking User Feedback:")
|
|
158
|
+
user_feedback_dict = (
|
|
159
|
+
feedback.user_feedback.to_dict() if feedback.user_feedback else None
|
|
160
|
+
)
|
|
161
|
+
if user_feedback_dict:
|
|
162
|
+
print(f" User feedback dict: {user_feedback_dict}")
|
|
163
|
+
print(f" Is positive: {user_feedback_dict['is_positive']}")
|
|
164
|
+
print(f" Comment: {user_feedback_dict['comment'] or 'None'}")
|
|
165
|
+
# You can also access properties through the object:
|
|
166
|
+
print(f" Rating emoji: {feedback.user_feedback.rating_emoji}") # type: ignore
|
|
167
|
+
print(f" Rating text: {feedback.user_feedback.rating_text}") # type: ignore
|
|
168
|
+
else:
|
|
169
|
+
print(" No user feedback provided (user_feedback is None)")
|
|
170
|
+
|
|
171
|
+
# How to check LLM information using to_dict()
|
|
172
|
+
print("\n2. Checking LLM Information:")
|
|
173
|
+
metadata_dict = feedback.metadata.to_dict()
|
|
174
|
+
llm_dict = metadata_dict["llm"]
|
|
175
|
+
print(f" LLM dict: {llm_dict}")
|
|
176
|
+
print(f" Model: {llm_dict['model']}")
|
|
177
|
+
print(f" Max context size: {llm_dict['max_context_size']}")
|
|
178
|
+
|
|
179
|
+
# How to check ask and response pairs using to_dict()
|
|
180
|
+
print("\n3. Checking Ask and Response History:")
|
|
181
|
+
llm_responses_dict = metadata_dict["llm_responses"]
|
|
182
|
+
print(f" Number of exchanges: {len(llm_responses_dict)}")
|
|
183
|
+
|
|
184
|
+
for i, response_dict in enumerate(llm_responses_dict, 1):
|
|
185
|
+
print(f" Exchange {i} dict: {list(response_dict.keys())}")
|
|
186
|
+
user_ask = response_dict["user_ask"]
|
|
187
|
+
ai_response = response_dict["response"]
|
|
188
|
+
print(f" User ask: {user_ask}")
|
|
189
|
+
print(f" AI response: {ai_response}")
|
|
190
|
+
|
|
191
|
+
print("=== End Feedback ===\n")
|
holmes/core/investigation.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
from typing import Optional
|
|
3
3
|
|
|
4
|
+
|
|
4
5
|
from holmes.common.env_vars import HOLMES_POST_PROCESSING_PROMPT
|
|
5
6
|
from holmes.config import Config
|
|
6
7
|
from holmes.core.investigation_structured_output import process_response_into_sections
|
|
@@ -8,7 +9,8 @@ from holmes.core.issue import Issue
|
|
|
8
9
|
from holmes.core.models import InvestigateRequest, InvestigationResult
|
|
9
10
|
from holmes.core.supabase_dal import SupabaseDal
|
|
10
11
|
from holmes.core.tracing import DummySpan, SpanType
|
|
11
|
-
from holmes.
|
|
12
|
+
from holmes.plugins.runbooks import RunbookCatalog
|
|
13
|
+
from holmes.utils.global_instructions import add_runbooks_to_user_prompt
|
|
12
14
|
|
|
13
15
|
from holmes.core.investigation_structured_output import (
|
|
14
16
|
DEFAULT_SECTIONS,
|
|
@@ -25,8 +27,8 @@ def investigate_issues(
|
|
|
25
27
|
config: Config,
|
|
26
28
|
model: Optional[str] = None,
|
|
27
29
|
trace_span=DummySpan(),
|
|
30
|
+
runbooks: Optional[RunbookCatalog] = None,
|
|
28
31
|
) -> InvestigationResult:
|
|
29
|
-
config.load_robusta_api_key(dal=dal)
|
|
30
32
|
context = dal.get_issue_data(investigate_request.context.get("robusta_issue_id"))
|
|
31
33
|
|
|
32
34
|
resource_instructions = dal.get_resource_instructions(
|
|
@@ -61,6 +63,7 @@ def investigate_issues(
|
|
|
61
63
|
global_instructions=global_instructions,
|
|
62
64
|
sections=investigate_request.sections,
|
|
63
65
|
trace_span=trace_span,
|
|
66
|
+
runbooks=runbooks,
|
|
64
67
|
)
|
|
65
68
|
|
|
66
69
|
(text_response, sections) = process_response_into_sections(investigation.result)
|
|
@@ -71,6 +74,7 @@ def investigate_issues(
|
|
|
71
74
|
sections=sections,
|
|
72
75
|
tool_calls=investigation.tool_calls or [],
|
|
73
76
|
instructions=investigation.instructions,
|
|
77
|
+
metadata=investigation.metadata,
|
|
74
78
|
)
|
|
75
79
|
|
|
76
80
|
|
|
@@ -80,7 +84,6 @@ def get_investigation_context(
|
|
|
80
84
|
config: Config,
|
|
81
85
|
request_structured_output_from_llm: Optional[bool] = None,
|
|
82
86
|
):
|
|
83
|
-
config.load_robusta_api_key(dal=dal)
|
|
84
87
|
ai = config.create_issue_investigator(dal=dal, model=investigate_request.model)
|
|
85
88
|
|
|
86
89
|
raw_data = investigate_request.model_dump()
|
|
@@ -96,18 +99,11 @@ def get_investigation_context(
|
|
|
96
99
|
raw=raw_data,
|
|
97
100
|
)
|
|
98
101
|
|
|
99
|
-
|
|
102
|
+
issue_instructions = ai.runbook_manager.get_instructions_for_issue(issue)
|
|
100
103
|
|
|
101
|
-
|
|
104
|
+
resource_instructions = dal.get_resource_instructions(
|
|
102
105
|
"alert", investigate_request.context.get("issue_type")
|
|
103
106
|
)
|
|
104
|
-
if instructions is not None and instructions.instructions:
|
|
105
|
-
runbooks.extend(instructions.instructions)
|
|
106
|
-
if instructions is not None and len(instructions.documents) > 0:
|
|
107
|
-
docPrompts = []
|
|
108
|
-
for document in instructions.documents:
|
|
109
|
-
docPrompts.append(f"* fetch information from this URL: {document.url}\n")
|
|
110
|
-
runbooks.extend(docPrompts)
|
|
111
107
|
|
|
112
108
|
# This section is about setting vars to request the LLM to return structured output.
|
|
113
109
|
# It does not mean that Holmes will not return structured sections for investigation as it is
|
|
@@ -132,6 +128,7 @@ def get_investigation_context(
|
|
|
132
128
|
else:
|
|
133
129
|
logging.info("Structured output is disabled for this request")
|
|
134
130
|
|
|
131
|
+
runbook_catalog = config.get_runbook_catalog()
|
|
135
132
|
system_prompt = load_and_render_prompt(
|
|
136
133
|
investigate_request.prompt_template,
|
|
137
134
|
{
|
|
@@ -140,21 +137,20 @@ def get_investigation_context(
|
|
|
140
137
|
"structured_output": request_structured_output_from_llm,
|
|
141
138
|
"toolsets": ai.tool_executor.toolsets,
|
|
142
139
|
"cluster_name": config.cluster_name,
|
|
140
|
+
"runbooks_enabled": True if runbook_catalog else False,
|
|
143
141
|
},
|
|
144
142
|
)
|
|
145
|
-
|
|
146
143
|
user_prompt = ""
|
|
147
|
-
if runbooks:
|
|
148
|
-
for runbook_str in runbooks:
|
|
149
|
-
user_prompt += f"* {runbook_str}\n"
|
|
150
|
-
|
|
151
|
-
user_prompt = f'My instructions to check \n"""{user_prompt}"""'
|
|
152
144
|
|
|
153
145
|
global_instructions = dal.get_global_instructions_for_account()
|
|
154
|
-
user_prompt =
|
|
155
|
-
user_prompt,
|
|
146
|
+
user_prompt = add_runbooks_to_user_prompt(
|
|
147
|
+
user_prompt=user_prompt,
|
|
148
|
+
runbook_catalog=runbook_catalog,
|
|
149
|
+
global_instructions=global_instructions,
|
|
150
|
+
issue_instructions=issue_instructions,
|
|
151
|
+
resource_instructions=resource_instructions,
|
|
156
152
|
)
|
|
157
153
|
|
|
158
|
-
user_prompt = f"{user_prompt}\n This is context from the issue
|
|
154
|
+
user_prompt = f"{user_prompt}\n #This is context from the issue:\n{issue.raw}"
|
|
159
155
|
|
|
160
|
-
return ai, system_prompt, user_prompt, response_format, sections,
|
|
156
|
+
return ai, system_prompt, user_prompt, response_format, sections, issue_instructions
|