holmesgpt 0.11.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of holmesgpt might be problematic. Click here for more details.

Files changed (183) hide show
  1. holmes/.git_archival.json +7 -0
  2. holmes/__init__.py +76 -0
  3. holmes/__init__.py.bak +76 -0
  4. holmes/clients/robusta_client.py +24 -0
  5. holmes/common/env_vars.py +47 -0
  6. holmes/config.py +526 -0
  7. holmes/core/__init__.py +0 -0
  8. holmes/core/conversations.py +578 -0
  9. holmes/core/investigation.py +152 -0
  10. holmes/core/investigation_structured_output.py +264 -0
  11. holmes/core/issue.py +54 -0
  12. holmes/core/llm.py +250 -0
  13. holmes/core/models.py +157 -0
  14. holmes/core/openai_formatting.py +51 -0
  15. holmes/core/performance_timing.py +72 -0
  16. holmes/core/prompt.py +42 -0
  17. holmes/core/resource_instruction.py +17 -0
  18. holmes/core/runbooks.py +26 -0
  19. holmes/core/safeguards.py +120 -0
  20. holmes/core/supabase_dal.py +540 -0
  21. holmes/core/tool_calling_llm.py +798 -0
  22. holmes/core/tools.py +566 -0
  23. holmes/core/tools_utils/__init__.py +0 -0
  24. holmes/core/tools_utils/tool_executor.py +65 -0
  25. holmes/core/tools_utils/toolset_utils.py +52 -0
  26. holmes/core/toolset_manager.py +418 -0
  27. holmes/interactive.py +229 -0
  28. holmes/main.py +1041 -0
  29. holmes/plugins/__init__.py +0 -0
  30. holmes/plugins/destinations/__init__.py +6 -0
  31. holmes/plugins/destinations/slack/__init__.py +2 -0
  32. holmes/plugins/destinations/slack/plugin.py +163 -0
  33. holmes/plugins/interfaces.py +32 -0
  34. holmes/plugins/prompts/__init__.py +48 -0
  35. holmes/plugins/prompts/_current_date_time.jinja2 +1 -0
  36. holmes/plugins/prompts/_default_log_prompt.jinja2 +11 -0
  37. holmes/plugins/prompts/_fetch_logs.jinja2 +36 -0
  38. holmes/plugins/prompts/_general_instructions.jinja2 +86 -0
  39. holmes/plugins/prompts/_global_instructions.jinja2 +12 -0
  40. holmes/plugins/prompts/_runbook_instructions.jinja2 +13 -0
  41. holmes/plugins/prompts/_toolsets_instructions.jinja2 +56 -0
  42. holmes/plugins/prompts/generic_ask.jinja2 +36 -0
  43. holmes/plugins/prompts/generic_ask_conversation.jinja2 +32 -0
  44. holmes/plugins/prompts/generic_ask_for_issue_conversation.jinja2 +50 -0
  45. holmes/plugins/prompts/generic_investigation.jinja2 +42 -0
  46. holmes/plugins/prompts/generic_post_processing.jinja2 +13 -0
  47. holmes/plugins/prompts/generic_ticket.jinja2 +12 -0
  48. holmes/plugins/prompts/investigation_output_format.jinja2 +32 -0
  49. holmes/plugins/prompts/kubernetes_workload_ask.jinja2 +84 -0
  50. holmes/plugins/prompts/kubernetes_workload_chat.jinja2 +39 -0
  51. holmes/plugins/runbooks/README.md +22 -0
  52. holmes/plugins/runbooks/__init__.py +100 -0
  53. holmes/plugins/runbooks/catalog.json +14 -0
  54. holmes/plugins/runbooks/jira.yaml +12 -0
  55. holmes/plugins/runbooks/kube-prometheus-stack.yaml +10 -0
  56. holmes/plugins/runbooks/networking/dns_troubleshooting_instructions.md +66 -0
  57. holmes/plugins/runbooks/upgrade/upgrade_troubleshooting_instructions.md +44 -0
  58. holmes/plugins/sources/github/__init__.py +77 -0
  59. holmes/plugins/sources/jira/__init__.py +123 -0
  60. holmes/plugins/sources/opsgenie/__init__.py +93 -0
  61. holmes/plugins/sources/pagerduty/__init__.py +147 -0
  62. holmes/plugins/sources/prometheus/__init__.py +0 -0
  63. holmes/plugins/sources/prometheus/models.py +104 -0
  64. holmes/plugins/sources/prometheus/plugin.py +154 -0
  65. holmes/plugins/toolsets/__init__.py +171 -0
  66. holmes/plugins/toolsets/aks-node-health.yaml +65 -0
  67. holmes/plugins/toolsets/aks.yaml +86 -0
  68. holmes/plugins/toolsets/argocd.yaml +70 -0
  69. holmes/plugins/toolsets/atlas_mongodb/instructions.jinja2 +8 -0
  70. holmes/plugins/toolsets/atlas_mongodb/mongodb_atlas.py +307 -0
  71. holmes/plugins/toolsets/aws.yaml +76 -0
  72. holmes/plugins/toolsets/azure_sql/__init__.py +0 -0
  73. holmes/plugins/toolsets/azure_sql/apis/alert_monitoring_api.py +600 -0
  74. holmes/plugins/toolsets/azure_sql/apis/azure_sql_api.py +309 -0
  75. holmes/plugins/toolsets/azure_sql/apis/connection_failure_api.py +445 -0
  76. holmes/plugins/toolsets/azure_sql/apis/connection_monitoring_api.py +251 -0
  77. holmes/plugins/toolsets/azure_sql/apis/storage_analysis_api.py +317 -0
  78. holmes/plugins/toolsets/azure_sql/azure_base_toolset.py +55 -0
  79. holmes/plugins/toolsets/azure_sql/azure_sql_instructions.jinja2 +137 -0
  80. holmes/plugins/toolsets/azure_sql/azure_sql_toolset.py +183 -0
  81. holmes/plugins/toolsets/azure_sql/install.md +66 -0
  82. holmes/plugins/toolsets/azure_sql/tools/__init__.py +1 -0
  83. holmes/plugins/toolsets/azure_sql/tools/analyze_connection_failures.py +324 -0
  84. holmes/plugins/toolsets/azure_sql/tools/analyze_database_connections.py +243 -0
  85. holmes/plugins/toolsets/azure_sql/tools/analyze_database_health_status.py +205 -0
  86. holmes/plugins/toolsets/azure_sql/tools/analyze_database_performance.py +249 -0
  87. holmes/plugins/toolsets/azure_sql/tools/analyze_database_storage.py +373 -0
  88. holmes/plugins/toolsets/azure_sql/tools/get_active_alerts.py +237 -0
  89. holmes/plugins/toolsets/azure_sql/tools/get_slow_queries.py +172 -0
  90. holmes/plugins/toolsets/azure_sql/tools/get_top_cpu_queries.py +170 -0
  91. holmes/plugins/toolsets/azure_sql/tools/get_top_data_io_queries.py +188 -0
  92. holmes/plugins/toolsets/azure_sql/tools/get_top_log_io_queries.py +180 -0
  93. holmes/plugins/toolsets/azure_sql/utils.py +83 -0
  94. holmes/plugins/toolsets/bash/__init__.py +0 -0
  95. holmes/plugins/toolsets/bash/bash_instructions.jinja2 +14 -0
  96. holmes/plugins/toolsets/bash/bash_toolset.py +208 -0
  97. holmes/plugins/toolsets/bash/common/bash.py +52 -0
  98. holmes/plugins/toolsets/bash/common/config.py +14 -0
  99. holmes/plugins/toolsets/bash/common/stringify.py +25 -0
  100. holmes/plugins/toolsets/bash/common/validators.py +24 -0
  101. holmes/plugins/toolsets/bash/grep/__init__.py +52 -0
  102. holmes/plugins/toolsets/bash/kubectl/__init__.py +100 -0
  103. holmes/plugins/toolsets/bash/kubectl/constants.py +96 -0
  104. holmes/plugins/toolsets/bash/kubectl/kubectl_describe.py +66 -0
  105. holmes/plugins/toolsets/bash/kubectl/kubectl_events.py +88 -0
  106. holmes/plugins/toolsets/bash/kubectl/kubectl_get.py +108 -0
  107. holmes/plugins/toolsets/bash/kubectl/kubectl_logs.py +20 -0
  108. holmes/plugins/toolsets/bash/kubectl/kubectl_run.py +46 -0
  109. holmes/plugins/toolsets/bash/kubectl/kubectl_top.py +81 -0
  110. holmes/plugins/toolsets/bash/parse_command.py +103 -0
  111. holmes/plugins/toolsets/confluence.yaml +19 -0
  112. holmes/plugins/toolsets/consts.py +5 -0
  113. holmes/plugins/toolsets/coralogix/api.py +158 -0
  114. holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +103 -0
  115. holmes/plugins/toolsets/coralogix/utils.py +181 -0
  116. holmes/plugins/toolsets/datadog.py +153 -0
  117. holmes/plugins/toolsets/docker.yaml +46 -0
  118. holmes/plugins/toolsets/git.py +756 -0
  119. holmes/plugins/toolsets/grafana/__init__.py +0 -0
  120. holmes/plugins/toolsets/grafana/base_grafana_toolset.py +54 -0
  121. holmes/plugins/toolsets/grafana/common.py +68 -0
  122. holmes/plugins/toolsets/grafana/grafana_api.py +31 -0
  123. holmes/plugins/toolsets/grafana/loki_api.py +89 -0
  124. holmes/plugins/toolsets/grafana/tempo_api.py +124 -0
  125. holmes/plugins/toolsets/grafana/toolset_grafana.py +102 -0
  126. holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +102 -0
  127. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.jinja2 +10 -0
  128. holmes/plugins/toolsets/grafana/toolset_grafana_tempo.py +299 -0
  129. holmes/plugins/toolsets/grafana/trace_parser.py +195 -0
  130. holmes/plugins/toolsets/helm.yaml +42 -0
  131. holmes/plugins/toolsets/internet/internet.py +275 -0
  132. holmes/plugins/toolsets/internet/notion.py +137 -0
  133. holmes/plugins/toolsets/kafka.py +638 -0
  134. holmes/plugins/toolsets/kubernetes.yaml +255 -0
  135. holmes/plugins/toolsets/kubernetes_logs.py +426 -0
  136. holmes/plugins/toolsets/kubernetes_logs.yaml +42 -0
  137. holmes/plugins/toolsets/logging_utils/__init__.py +0 -0
  138. holmes/plugins/toolsets/logging_utils/logging_api.py +217 -0
  139. holmes/plugins/toolsets/logging_utils/types.py +0 -0
  140. holmes/plugins/toolsets/mcp/toolset_mcp.py +135 -0
  141. holmes/plugins/toolsets/newrelic.py +222 -0
  142. holmes/plugins/toolsets/opensearch/__init__.py +0 -0
  143. holmes/plugins/toolsets/opensearch/opensearch.py +245 -0
  144. holmes/plugins/toolsets/opensearch/opensearch_logs.py +151 -0
  145. holmes/plugins/toolsets/opensearch/opensearch_traces.py +211 -0
  146. holmes/plugins/toolsets/opensearch/opensearch_traces_instructions.jinja2 +12 -0
  147. holmes/plugins/toolsets/opensearch/opensearch_utils.py +166 -0
  148. holmes/plugins/toolsets/prometheus/prometheus.py +818 -0
  149. holmes/plugins/toolsets/prometheus/prometheus_instructions.jinja2 +38 -0
  150. holmes/plugins/toolsets/rabbitmq/api.py +398 -0
  151. holmes/plugins/toolsets/rabbitmq/rabbitmq_instructions.jinja2 +37 -0
  152. holmes/plugins/toolsets/rabbitmq/toolset_rabbitmq.py +222 -0
  153. holmes/plugins/toolsets/robusta/__init__.py +0 -0
  154. holmes/plugins/toolsets/robusta/robusta.py +235 -0
  155. holmes/plugins/toolsets/robusta/robusta_instructions.jinja2 +24 -0
  156. holmes/plugins/toolsets/runbook/__init__.py +0 -0
  157. holmes/plugins/toolsets/runbook/runbook_fetcher.py +78 -0
  158. holmes/plugins/toolsets/service_discovery.py +92 -0
  159. holmes/plugins/toolsets/servicenow/install.md +37 -0
  160. holmes/plugins/toolsets/servicenow/instructions.jinja2 +3 -0
  161. holmes/plugins/toolsets/servicenow/servicenow.py +198 -0
  162. holmes/plugins/toolsets/slab.yaml +20 -0
  163. holmes/plugins/toolsets/utils.py +137 -0
  164. holmes/plugins/utils.py +14 -0
  165. holmes/utils/__init__.py +0 -0
  166. holmes/utils/cache.py +84 -0
  167. holmes/utils/cert_utils.py +40 -0
  168. holmes/utils/default_toolset_installation_guide.jinja2 +44 -0
  169. holmes/utils/definitions.py +13 -0
  170. holmes/utils/env.py +53 -0
  171. holmes/utils/file_utils.py +56 -0
  172. holmes/utils/global_instructions.py +20 -0
  173. holmes/utils/holmes_status.py +22 -0
  174. holmes/utils/holmes_sync_toolsets.py +80 -0
  175. holmes/utils/markdown_utils.py +55 -0
  176. holmes/utils/pydantic_utils.py +54 -0
  177. holmes/utils/robusta.py +10 -0
  178. holmes/utils/tags.py +97 -0
  179. holmesgpt-0.11.5.dist-info/LICENSE.txt +21 -0
  180. holmesgpt-0.11.5.dist-info/METADATA +400 -0
  181. holmesgpt-0.11.5.dist-info/RECORD +183 -0
  182. holmesgpt-0.11.5.dist-info/WHEEL +4 -0
  183. holmesgpt-0.11.5.dist-info/entry_points.txt +3 -0
holmes/main.py ADDED
@@ -0,0 +1,1041 @@
1
+ # ruff: noqa: E402
2
+ import os
3
+
4
+ from holmes.utils.cert_utils import add_custom_certificate
5
+
6
+ ADDITIONAL_CERTIFICATE: str = os.environ.get("CERTIFICATE", "")
7
+ if add_custom_certificate(ADDITIONAL_CERTIFICATE):
8
+ print("added custom certificate")
9
+
10
+ # DO NOT ADD ANY IMPORTS OR CODE ABOVE THIS LINE
11
+ # IMPORTING ABOVE MIGHT INITIALIZE AN HTTPS CLIENT THAT DOESN'T TRUST THE CUSTOM CERTIFICATE
12
+
13
+
14
+ import json
15
+ import logging
16
+ import socket
17
+ import uuid
18
+ import warnings
19
+ from enum import Enum
20
+ from pathlib import Path
21
+ from typing import List, Optional
22
+
23
+ import typer
24
+ from rich.console import Console
25
+ from rich.logging import RichHandler
26
+ from rich.markdown import Markdown
27
+ from rich.rule import Rule
28
+
29
+ from holmes import get_version # type: ignore
30
+ from holmes.config import (
31
+ DEFAULT_CONFIG_LOCATION,
32
+ Config,
33
+ SourceFactory,
34
+ SupportedTicketSources,
35
+ )
36
+ from holmes.core.prompt import build_initial_ask_messages
37
+ from holmes.core.resource_instruction import ResourceInstructionDocument
38
+ from holmes.core.tool_calling_llm import LLMResult
39
+ from holmes.core.tools import pretty_print_toolset_status
40
+ from holmes.interactive import run_interactive_loop
41
+ from holmes.plugins.destinations import DestinationType
42
+ from holmes.plugins.interfaces import Issue
43
+ from holmes.plugins.prompts import load_and_render_prompt
44
+ from holmes.plugins.sources.opsgenie import OPSGENIE_TEAM_INTEGRATION_KEY_HELP
45
+ from holmes.utils.file_utils import write_json_file
46
+
47
+ app = typer.Typer(add_completion=False, pretty_exceptions_show_locals=False)
48
+ investigate_app = typer.Typer(
49
+ add_completion=False,
50
+ name="investigate",
51
+ no_args_is_help=True,
52
+ help="Investigate firing alerts or tickets",
53
+ )
54
+ app.add_typer(investigate_app, name="investigate")
55
+ generate_app = typer.Typer(
56
+ add_completion=False,
57
+ name="generate",
58
+ no_args_is_help=True,
59
+ help="Generate new integrations or test data",
60
+ )
61
+ app.add_typer(generate_app, name="generate")
62
+ toolset_app = typer.Typer(
63
+ add_completion=False,
64
+ name="toolset",
65
+ no_args_is_help=True,
66
+ help="Toolset management commands",
67
+ )
68
+ app.add_typer(toolset_app, name="toolset")
69
+
70
+
71
+ class Verbosity(Enum):
72
+ NORMAL = 0
73
+ LOG_QUERIES = 1 # TODO: currently unused
74
+ VERBOSE = 2
75
+ VERY_VERBOSE = 3
76
+
77
+
78
+ def cli_flags_to_verbosity(verbose_flags: List[bool]) -> Verbosity:
79
+ if verbose_flags is None or len(verbose_flags) == 0:
80
+ return Verbosity.NORMAL
81
+ elif len(verbose_flags) == 1:
82
+ return Verbosity.LOG_QUERIES
83
+ elif len(verbose_flags) == 2:
84
+ return Verbosity.VERBOSE
85
+ else:
86
+ return Verbosity.VERY_VERBOSE
87
+
88
+
89
+ def suppress_noisy_logs():
90
+ # disable INFO logs from OpenAI
91
+ logging.getLogger("httpx").setLevel(logging.WARNING)
92
+ # disable INFO logs from LiteLLM
93
+ logging.getLogger("LiteLLM").setLevel(logging.WARNING)
94
+ # disable INFO logs from AWS (relevant when using bedrock)
95
+ logging.getLogger("boto3").setLevel(logging.WARNING)
96
+ logging.getLogger("botocore").setLevel(logging.WARNING)
97
+ # when running in --verbose mode we don't want to see DEBUG logs from these libraries
98
+ logging.getLogger("openai._base_client").setLevel(logging.INFO)
99
+ logging.getLogger("httpcore").setLevel(logging.INFO)
100
+ logging.getLogger("markdown_it").setLevel(logging.INFO)
101
+ # suppress UserWarnings from the slack_sdk module
102
+ warnings.filterwarnings("ignore", category=UserWarning, module="slack_sdk.*")
103
+
104
+
105
+ def init_logging(verbose_flags: Optional[List[bool]] = None):
106
+ verbosity = cli_flags_to_verbosity(verbose_flags) # type: ignore
107
+
108
+ if verbosity == Verbosity.VERY_VERBOSE:
109
+ logging.basicConfig(
110
+ level=logging.DEBUG,
111
+ format="%(message)s",
112
+ handlers=[
113
+ RichHandler(
114
+ show_level=False,
115
+ markup=True,
116
+ show_time=False,
117
+ show_path=False,
118
+ console=Console(width=None),
119
+ )
120
+ ],
121
+ )
122
+ elif verbosity == Verbosity.VERBOSE:
123
+ logging.basicConfig(
124
+ level=logging.INFO,
125
+ format="%(message)s",
126
+ handlers=[
127
+ RichHandler(
128
+ show_level=False,
129
+ markup=True,
130
+ show_time=False,
131
+ show_path=False,
132
+ console=Console(width=None),
133
+ )
134
+ ],
135
+ )
136
+ logging.getLogger().setLevel(logging.DEBUG)
137
+ suppress_noisy_logs()
138
+ else:
139
+ logging.basicConfig(
140
+ level=logging.INFO,
141
+ format="%(message)s",
142
+ handlers=[
143
+ RichHandler(
144
+ show_level=False,
145
+ markup=True,
146
+ show_time=False,
147
+ show_path=False,
148
+ console=Console(width=None),
149
+ )
150
+ ],
151
+ )
152
+ suppress_noisy_logs()
153
+
154
+ logging.debug(f"verbosity is {verbosity}")
155
+
156
+ return Console()
157
+
158
+
159
+ # Common cli options
160
+ # The defaults for options that are also in the config file MUST be None or else the cli defaults will override settings in the config file
161
+ opt_api_key: Optional[str] = typer.Option(
162
+ None,
163
+ help="API key to use for the LLM (if not given, uses environment variables OPENAI_API_KEY or AZURE_API_KEY)",
164
+ )
165
+ opt_model: Optional[str] = typer.Option(None, help="Model to use for the LLM")
166
+ opt_config_file: Optional[Path] = typer.Option(
167
+ DEFAULT_CONFIG_LOCATION, # type: ignore
168
+ "--config",
169
+ help="Path to the config file. Defaults to ~/.holmes/config.yaml when it exists. Command line arguments take precedence over config file settings",
170
+ )
171
+ opt_custom_toolsets: Optional[List[Path]] = typer.Option(
172
+ [],
173
+ "--custom-toolsets",
174
+ "-t",
175
+ help="Path to a custom toolsets. The status of the custom toolsets specified here won't be cached (can specify -t multiple times to add multiple toolsets)",
176
+ )
177
+ opt_custom_runbooks: Optional[List[Path]] = typer.Option(
178
+ [],
179
+ "--custom-runbooks",
180
+ "-r",
181
+ help="Path to a custom runbooks (can specify -r multiple times to add multiple runbooks)",
182
+ )
183
+ opt_max_steps: Optional[int] = typer.Option(
184
+ 10,
185
+ "--max-steps",
186
+ help="Advanced. Maximum number of steps the LLM can take to investigate the issue",
187
+ )
188
+ opt_verbose: Optional[List[bool]] = typer.Option(
189
+ [],
190
+ "--verbose",
191
+ "-v",
192
+ help="Verbose output. You can pass multiple times to increase the verbosity. e.g. -v or -vv or -vvv",
193
+ )
194
+ opt_echo_request: bool = typer.Option(
195
+ True,
196
+ "--echo/--no-echo",
197
+ help="Echo back the question provided to HolmesGPT in the output",
198
+ )
199
+ opt_destination: Optional[DestinationType] = typer.Option(
200
+ DestinationType.CLI,
201
+ "--destination",
202
+ help="Destination for the results of the investigation (defaults to STDOUT)",
203
+ )
204
+ opt_slack_token: Optional[str] = typer.Option(
205
+ None,
206
+ "--slack-token",
207
+ help="Slack API key if --destination=slack (experimental). Can generate with `pip install robusta-cli && robusta integrations slack`",
208
+ )
209
+ opt_slack_channel: Optional[str] = typer.Option(
210
+ None,
211
+ "--slack-channel",
212
+ help="Slack channel if --destination=slack (experimental). E.g. #devops",
213
+ )
214
+ opt_json_output_file: Optional[str] = typer.Option(
215
+ None,
216
+ "--json-output-file",
217
+ help="Save the complete output in json format in to a file",
218
+ envvar="HOLMES_JSON_OUTPUT_FILE",
219
+ )
220
+
221
+ opt_post_processing_prompt: Optional[str] = typer.Option(
222
+ None,
223
+ "--post-processing-prompt",
224
+ help="Adds a prompt for post processing. (Preferable for chatty ai models)",
225
+ envvar="HOLMES_POST_PROCESSING_PROMPT",
226
+ )
227
+
228
+ opt_documents: Optional[str] = typer.Option(
229
+ None,
230
+ "--documents",
231
+ help="Additional documents to provide the LLM (typically URLs to runbooks)",
232
+ )
233
+
234
+ # Common help texts
235
+ system_prompt_help = "Advanced. System prompt for LLM. Values starting with builtin:// are loaded from holmes/plugins/prompts, values starting with file:// are loaded from the given path, other values are interpreted as a prompt string"
236
+
237
+
238
+ def parse_documents(documents: Optional[str]) -> List[ResourceInstructionDocument]:
239
+ resource_documents = []
240
+
241
+ if documents is not None:
242
+ data = json.loads(documents)
243
+ for item in data:
244
+ resource_document = ResourceInstructionDocument(**item)
245
+ resource_documents.append(resource_document)
246
+
247
+ return resource_documents
248
+
249
+
250
+ def handle_result(
251
+ result: LLMResult,
252
+ console: Console,
253
+ destination: DestinationType,
254
+ config: Config,
255
+ issue: Issue,
256
+ show_tool_output: bool,
257
+ add_separator: bool,
258
+ ):
259
+ if destination == DestinationType.CLI:
260
+ if show_tool_output and result.tool_calls:
261
+ for tool_call in result.tool_calls:
262
+ console.print("[bold magenta]Used Tool:[/bold magenta]", end="")
263
+ # we need to print this separately with markup=False because it contains arbitrary text and we don't want console.print to interpret it
264
+ console.print(
265
+ f"{tool_call.description}. Output=\n{tool_call.result}",
266
+ markup=False,
267
+ )
268
+
269
+ console.print("[bold green]AI:[/bold green]", end=" ")
270
+ console.print(Markdown(result.result)) # type: ignore
271
+ if add_separator:
272
+ console.print(Rule())
273
+
274
+ elif destination == DestinationType.SLACK:
275
+ slack = config.create_slack_destination()
276
+ slack.send_issue(issue, result)
277
+
278
+
279
+ # TODO: add streaming output
280
+ @app.command()
281
+ def ask(
282
+ prompt: Optional[str] = typer.Argument(
283
+ None, help="What to ask the LLM (user prompt)"
284
+ ),
285
+ prompt_file: Optional[Path] = typer.Option(
286
+ None,
287
+ "--prompt-file",
288
+ "-pf",
289
+ help="File containing the prompt to ask the LLM (overrides the prompt argument if provided)",
290
+ ),
291
+ # common options
292
+ api_key: Optional[str] = opt_api_key,
293
+ model: Optional[str] = opt_model,
294
+ config_file: Optional[Path] = opt_config_file,
295
+ custom_toolsets: Optional[List[Path]] = opt_custom_toolsets,
296
+ max_steps: Optional[int] = opt_max_steps,
297
+ verbose: Optional[List[bool]] = opt_verbose,
298
+ # semi-common options
299
+ destination: Optional[DestinationType] = opt_destination,
300
+ slack_token: Optional[str] = opt_slack_token,
301
+ slack_channel: Optional[str] = opt_slack_channel,
302
+ # advanced options for this command
303
+ system_prompt: Optional[str] = typer.Option(
304
+ "builtin://generic_ask.jinja2", help=system_prompt_help
305
+ ),
306
+ show_tool_output: bool = typer.Option(
307
+ False,
308
+ "--show-tool-output",
309
+ help="Advanced. Show the output of each tool that was called",
310
+ ),
311
+ include_file: Optional[List[Path]] = typer.Option(
312
+ [],
313
+ "--file",
314
+ "-f",
315
+ help="File to append to prompt (can specify -f multiple times to add multiple files)",
316
+ ),
317
+ json_output_file: Optional[str] = opt_json_output_file,
318
+ echo_request: bool = opt_echo_request,
319
+ post_processing_prompt: Optional[str] = opt_post_processing_prompt,
320
+ interactive: bool = typer.Option(
321
+ True,
322
+ "--interactive/--no-interactive",
323
+ "-i/-n",
324
+ help="Enter interactive mode after the initial question? For scripting, disable this with --no-interactive",
325
+ ),
326
+ ):
327
+ """
328
+ Ask any question and answer using available tools
329
+ """
330
+ console = init_logging(verbose) # type: ignore
331
+ config = Config.load_from_file(
332
+ config_file,
333
+ api_key=api_key,
334
+ model=model,
335
+ max_steps=max_steps,
336
+ custom_toolsets_from_cli=custom_toolsets,
337
+ slack_token=slack_token,
338
+ slack_channel=slack_channel,
339
+ )
340
+
341
+ ai = config.create_console_toolcalling_llm(
342
+ dal=None, # type: ignore
343
+ )
344
+ template_context = {
345
+ "toolsets": ai.tool_executor.toolsets,
346
+ "runbooks": config.get_runbook_catalog(),
347
+ }
348
+
349
+ system_prompt_rendered = load_and_render_prompt(system_prompt, template_context) # type: ignore
350
+
351
+ if prompt_file and prompt:
352
+ raise typer.BadParameter(
353
+ "You cannot provide both a prompt argument and a prompt file. Please use one or the other."
354
+ )
355
+ elif prompt_file:
356
+ if not prompt_file.is_file():
357
+ raise typer.BadParameter(f"Prompt file not found: {prompt_file}")
358
+ with prompt_file.open("r") as f:
359
+ prompt = f.read()
360
+ console.print(
361
+ f"[bold yellow]Loaded prompt from file {prompt_file}[/bold yellow]"
362
+ )
363
+ elif not prompt and not interactive:
364
+ raise typer.BadParameter(
365
+ "Either the 'prompt' argument or the --prompt-file option must be provided (unless using --interactive mode)."
366
+ )
367
+
368
+ if echo_request and not interactive and prompt:
369
+ console.print("[bold yellow]User:[/bold yellow] " + prompt)
370
+
371
+ if interactive:
372
+ run_interactive_loop(
373
+ ai,
374
+ console,
375
+ system_prompt_rendered,
376
+ prompt,
377
+ include_file,
378
+ post_processing_prompt,
379
+ show_tool_output,
380
+ )
381
+ return
382
+
383
+ messages = build_initial_ask_messages(
384
+ console,
385
+ system_prompt_rendered,
386
+ prompt, # type: ignore
387
+ include_file,
388
+ )
389
+
390
+ response = ai.call(messages, post_processing_prompt)
391
+ messages = response.messages # type: ignore # Update messages with the full history
392
+
393
+ if json_output_file:
394
+ write_json_file(json_output_file, response.model_dump())
395
+
396
+ issue = Issue(
397
+ id=str(uuid.uuid4()),
398
+ name=prompt, # type: ignore
399
+ source_type="holmes-ask",
400
+ raw={"prompt": prompt, "full_conversation": messages},
401
+ source_instance_id=socket.gethostname(),
402
+ )
403
+ handle_result(
404
+ response,
405
+ console,
406
+ destination, # type: ignore
407
+ config,
408
+ issue,
409
+ show_tool_output,
410
+ False, # type: ignore
411
+ )
412
+
413
+
414
+ @investigate_app.command()
415
+ def alertmanager(
416
+ alertmanager_url: Optional[str] = typer.Option(None, help="AlertManager url"),
417
+ alertmanager_alertname: Optional[str] = typer.Option(
418
+ None,
419
+ help="Investigate all alerts with this name (can be regex that matches multiple alerts). If not given, defaults to all firing alerts",
420
+ ),
421
+ alertmanager_label: Optional[List[str]] = typer.Option(
422
+ [],
423
+ help="For filtering alerts with a specific label. Must be of format key=value. If --alertmanager-label is passed multiple times, alerts must match ALL labels",
424
+ ),
425
+ alertmanager_username: Optional[str] = typer.Option(
426
+ None, help="Username to use for basic auth"
427
+ ),
428
+ alertmanager_password: Optional[str] = typer.Option(
429
+ None, help="Password to use for basic auth"
430
+ ),
431
+ alertmanager_file: Optional[Path] = typer.Option(
432
+ None, help="Load alertmanager alerts from a file (used by the test framework)"
433
+ ),
434
+ alertmanager_limit: Optional[int] = typer.Option(
435
+ None, "-n", help="Limit the number of alerts to process"
436
+ ),
437
+ # common options
438
+ api_key: Optional[str] = opt_api_key,
439
+ model: Optional[str] = opt_model,
440
+ config_file: Optional[Path] = opt_config_file, # type: ignore
441
+ custom_toolsets: Optional[List[Path]] = opt_custom_toolsets,
442
+ custom_runbooks: Optional[List[Path]] = opt_custom_runbooks,
443
+ max_steps: Optional[int] = opt_max_steps,
444
+ verbose: Optional[List[bool]] = opt_verbose,
445
+ # advanced options for this command
446
+ destination: Optional[DestinationType] = opt_destination,
447
+ slack_token: Optional[str] = opt_slack_token,
448
+ slack_channel: Optional[str] = opt_slack_channel,
449
+ json_output_file: Optional[str] = opt_json_output_file,
450
+ system_prompt: Optional[str] = typer.Option(
451
+ "builtin://generic_investigation.jinja2", help=system_prompt_help
452
+ ),
453
+ post_processing_prompt: Optional[str] = opt_post_processing_prompt,
454
+ ):
455
+ """
456
+ Investigate a Prometheus/Alertmanager alert
457
+ """
458
+ console = init_logging(verbose)
459
+ config = Config.load_from_file(
460
+ config_file,
461
+ api_key=api_key,
462
+ model=model,
463
+ max_steps=max_steps,
464
+ alertmanager_url=alertmanager_url,
465
+ alertmanager_username=alertmanager_username,
466
+ alertmanager_password=alertmanager_password,
467
+ alertmanager_alertname=alertmanager_alertname,
468
+ alertmanager_label=alertmanager_label,
469
+ alertmanager_file=alertmanager_file,
470
+ slack_token=slack_token,
471
+ slack_channel=slack_channel,
472
+ custom_toolsets_from_cli=custom_toolsets,
473
+ custom_runbooks=custom_runbooks,
474
+ )
475
+
476
+ ai = config.create_console_issue_investigator() # type: ignore
477
+
478
+ source = config.create_alertmanager_source()
479
+
480
+ try:
481
+ issues = source.fetch_issues()
482
+ except Exception as e:
483
+ logging.error("Failed to fetch issues from alertmanager", exc_info=e)
484
+ return
485
+
486
+ if alertmanager_limit is not None:
487
+ console.print(
488
+ f"[bold yellow]Limiting to {alertmanager_limit}/{len(issues)} issues.[/bold yellow]"
489
+ )
490
+ issues = issues[:alertmanager_limit]
491
+
492
+ if alertmanager_alertname is not None:
493
+ console.print(
494
+ f"[bold yellow]Analyzing {len(issues)} issues matching filter.[/bold yellow] [red]Press Ctrl+C to stop.[/red]"
495
+ )
496
+ else:
497
+ console.print(
498
+ f"[bold yellow]Analyzing all {len(issues)} issues. (Use --alertmanager-alertname to filter.)[/bold yellow] [red]Press Ctrl+C to stop.[/red]"
499
+ )
500
+ results = []
501
+ for i, issue in enumerate(issues):
502
+ console.print(
503
+ f"[bold yellow]Analyzing issue {i+1}/{len(issues)}: {issue.name}...[/bold yellow]"
504
+ )
505
+ result = ai.investigate(
506
+ issue=issue,
507
+ prompt=system_prompt, # type: ignore
508
+ console=console,
509
+ instructions=None,
510
+ post_processing_prompt=post_processing_prompt,
511
+ )
512
+ results.append({"issue": issue.model_dump(), "result": result.model_dump()})
513
+ handle_result(result, console, destination, config, issue, False, True) # type: ignore
514
+
515
+ if json_output_file:
516
+ write_json_file(json_output_file, results)
517
+
518
+
519
+ @generate_app.command("alertmanager-tests")
520
+ def generate_alertmanager_tests(
521
+ alertmanager_url: Optional[str] = typer.Option(None, help="AlertManager url"),
522
+ alertmanager_username: Optional[str] = typer.Option(
523
+ None, help="Username to use for basic auth"
524
+ ),
525
+ alertmanager_password: Optional[str] = typer.Option(
526
+ None, help="Password to use for basic auth"
527
+ ),
528
+ output: Optional[Path] = typer.Option(
529
+ None,
530
+ help="Path to dump alertmanager alerts as json (if not given, output curl commands instead)",
531
+ ),
532
+ config_file: Optional[Path] = opt_config_file, # type: ignore
533
+ verbose: Optional[List[bool]] = opt_verbose,
534
+ ):
535
+ """
536
+ Connect to alertmanager and dump all alerts as either a json file or curl commands to simulate the alert (depending on --output flag)
537
+ """
538
+ console = init_logging(verbose) # type: ignore
539
+ config = Config.load_from_file(
540
+ config_file,
541
+ alertmanager_url=alertmanager_url,
542
+ alertmanager_username=alertmanager_username,
543
+ alertmanager_password=alertmanager_password,
544
+ )
545
+
546
+ source = config.create_alertmanager_source()
547
+ if output is None:
548
+ source.output_curl_commands(console)
549
+ else:
550
+ source.dump_raw_alerts_to_file(output)
551
+
552
+
553
+ @investigate_app.command()
554
+ def jira(
555
+ jira_url: Optional[str] = typer.Option(
556
+ None,
557
+ help="Jira url - e.g. https://your-company.atlassian.net",
558
+ envvar="JIRA_URL",
559
+ ),
560
+ jira_username: Optional[str] = typer.Option(
561
+ None,
562
+ help="The email address with which you log into Jira",
563
+ envvar="JIRA_USERNAME",
564
+ ),
565
+ jira_api_key: str = typer.Option(
566
+ None,
567
+ envvar="JIRA_API_KEY",
568
+ ),
569
+ jira_query: Optional[str] = typer.Option(
570
+ None,
571
+ help="Investigate tickets matching a JQL query (e.g. 'project=DEFAULT_PROJECT')",
572
+ ),
573
+ update: Optional[bool] = typer.Option(False, help="Update Jira with AI results"),
574
+ # common options
575
+ api_key: Optional[str] = opt_api_key,
576
+ model: Optional[str] = opt_model,
577
+ config_file: Optional[Path] = opt_config_file, # type: ignore
578
+ custom_toolsets: Optional[List[Path]] = opt_custom_toolsets,
579
+ custom_runbooks: Optional[List[Path]] = opt_custom_runbooks,
580
+ max_steps: Optional[int] = opt_max_steps,
581
+ verbose: Optional[List[bool]] = opt_verbose,
582
+ json_output_file: Optional[str] = opt_json_output_file,
583
+ # advanced options for this command
584
+ system_prompt: Optional[str] = typer.Option(
585
+ "builtin://generic_investigation.jinja2", help=system_prompt_help
586
+ ),
587
+ post_processing_prompt: Optional[str] = opt_post_processing_prompt,
588
+ ):
589
+ """
590
+ Investigate a Jira ticket
591
+ """
592
+ console = init_logging(verbose)
593
+ config = Config.load_from_file(
594
+ config_file,
595
+ api_key=api_key,
596
+ model=model,
597
+ max_steps=max_steps,
598
+ jira_url=jira_url,
599
+ jira_username=jira_username,
600
+ jira_api_key=jira_api_key,
601
+ jira_query=jira_query,
602
+ custom_toolsets_from_cli=custom_toolsets,
603
+ custom_runbooks=custom_runbooks,
604
+ )
605
+ ai = config.create_console_issue_investigator() # type: ignore
606
+ source = config.create_jira_source()
607
+ try:
608
+ issues = source.fetch_issues()
609
+ except Exception as e:
610
+ logging.error("Failed to fetch issues from Jira", exc_info=e)
611
+ return
612
+
613
+ console.print(
614
+ f"[bold yellow]Analyzing {len(issues)} Jira tickets.[/bold yellow] [red]Press Ctrl+C to stop.[/red]"
615
+ )
616
+
617
+ results = []
618
+ for i, issue in enumerate(issues):
619
+ console.print(
620
+ f"[bold yellow]Analyzing Jira ticket {i+1}/{len(issues)}: {issue.name}...[/bold yellow]"
621
+ )
622
+ result = ai.investigate(
623
+ issue=issue,
624
+ prompt=system_prompt, # type: ignore
625
+ console=console,
626
+ instructions=None,
627
+ post_processing_prompt=post_processing_prompt,
628
+ )
629
+
630
+ console.print(Rule())
631
+ console.print(f"[bold green]AI analysis of {issue.url}[/bold green]")
632
+ console.print(Markdown(result.result.replace("\n", "\n\n")), style="bold green") # type: ignore
633
+ console.print(Rule())
634
+ if update:
635
+ source.write_back_result(issue.id, result)
636
+ console.print(f"[bold]Updated ticket {issue.url}.[/bold]")
637
+ else:
638
+ console.print(
639
+ f"[bold]Not updating ticket {issue.url}. Use the --update option to do so.[/bold]"
640
+ )
641
+
642
+ results.append({"issue": issue.model_dump(), "result": result.model_dump()})
643
+
644
+ if json_output_file:
645
+ write_json_file(json_output_file, results)
646
+
647
+
648
+ # Define supported sources
649
+
650
+
651
+ @investigate_app.command()
652
+ def ticket(
653
+ prompt: str = typer.Argument(help="What to ask the LLM (user prompt)"),
654
+ source: SupportedTicketSources = typer.Option(
655
+ ...,
656
+ help=f"Source system to investigate the ticket from. Supported sources: {', '.join(s.value for s in SupportedTicketSources)}",
657
+ ),
658
+ ticket_url: Optional[str] = typer.Option(
659
+ None,
660
+ help="URL - e.g. https://your-company.atlassian.net",
661
+ envvar="TICKET_URL",
662
+ ),
663
+ ticket_username: Optional[str] = typer.Option(
664
+ None,
665
+ help="The email address with which you log into your Source",
666
+ envvar="TICKET_USERNAME",
667
+ ),
668
+ ticket_api_key: Optional[str] = typer.Option(
669
+ None,
670
+ envvar="TICKET_API_KEY",
671
+ ),
672
+ ticket_id: Optional[str] = typer.Option(
673
+ None,
674
+ help="ticket ID to investigate (e.g., 'KAN-1')",
675
+ ),
676
+ config_file: Optional[Path] = opt_config_file, # type: ignore
677
+ system_prompt: Optional[str] = typer.Option(
678
+ "builtin://generic_ticket.jinja2", help=system_prompt_help
679
+ ),
680
+ post_processing_prompt: Optional[str] = opt_post_processing_prompt,
681
+ ):
682
+ """
683
+ Fetch and print a Jira ticket from the specified source.
684
+ """
685
+
686
+ console = init_logging([])
687
+
688
+ # Validate source
689
+ try:
690
+ ticket_source = SourceFactory.create_source(
691
+ source=source,
692
+ config_file=config_file,
693
+ ticket_url=ticket_url,
694
+ ticket_username=ticket_username,
695
+ ticket_api_key=ticket_api_key,
696
+ ticket_id=ticket_id,
697
+ )
698
+ except Exception as e:
699
+ console.print(f"[bold red]Error: {str(e)}[/bold red]")
700
+ return
701
+
702
+ try:
703
+ issue_to_investigate = ticket_source.source.fetch_issue(id=ticket_id) # type: ignore
704
+ if issue_to_investigate is None:
705
+ raise Exception(f"Issue {ticket_id} Not found")
706
+ except Exception as e:
707
+ logging.error(f"Failed to fetch issue from {source}", exc_info=e)
708
+ console.print(
709
+ f"[bold red]Error: Failed to fetch issue {ticket_id} from {source}.[/bold red]"
710
+ )
711
+ return
712
+
713
+ system_prompt = load_and_render_prompt(
714
+ prompt=system_prompt, # type: ignore
715
+ context={
716
+ "source": source,
717
+ "output_instructions": ticket_source.output_instructions,
718
+ },
719
+ )
720
+
721
+ ai = ticket_source.config.create_console_issue_investigator()
722
+ console.print(
723
+ f"[bold yellow]Analyzing ticket: {issue_to_investigate.name}...[/bold yellow]"
724
+ )
725
+ prompt = (
726
+ prompt
727
+ + f" for issue '{issue_to_investigate.name}' with description:'{issue_to_investigate.description}'"
728
+ )
729
+
730
+ result = ai.prompt_call(system_prompt, prompt, post_processing_prompt)
731
+
732
+ console.print(Rule())
733
+ console.print(
734
+ f"[bold green]AI analysis of {issue_to_investigate.url} {prompt}[/bold green]"
735
+ )
736
+ console.print(result.result.replace("\n", "\n\n"), style="bold green") # type: ignore
737
+ console.print(Rule())
738
+
739
+ ticket_source.source.write_back_result(issue_to_investigate.id, result)
740
+ console.print(f"[bold]Updated ticket {issue_to_investigate.url}.[/bold]")
741
+
742
+
743
+ @investigate_app.command()
744
+ def github(
745
+ github_url: str = typer.Option(
746
+ "https://api.github.com",
747
+ help="The GitHub api base url (e.g: https://api.github.com)",
748
+ ),
749
+ github_owner: Optional[str] = typer.Option(
750
+ None,
751
+ help="The GitHub repository Owner, eg: if the repository url is https://github.com/robusta-dev/holmesgpt, the owner is robusta-dev",
752
+ ),
753
+ github_pat: str = typer.Option(
754
+ None,
755
+ ),
756
+ github_repository: Optional[str] = typer.Option(
757
+ None,
758
+ help="The GitHub repository name, eg: if the repository url is https://github.com/robusta-dev/holmesgpt, the repository name is holmesgpt",
759
+ ),
760
+ update: Optional[bool] = typer.Option(False, help="Update GitHub with AI results"),
761
+ github_query: Optional[str] = typer.Option(
762
+ "is:issue is:open",
763
+ help="Investigate tickets matching a GitHub query (e.g. 'is:issue is:open')",
764
+ ),
765
+ # common options
766
+ api_key: Optional[str] = opt_api_key,
767
+ model: Optional[str] = opt_model,
768
+ config_file: Optional[Path] = opt_config_file, # type: ignore
769
+ custom_toolsets: Optional[List[Path]] = opt_custom_toolsets,
770
+ custom_runbooks: Optional[List[Path]] = opt_custom_runbooks,
771
+ max_steps: Optional[int] = opt_max_steps,
772
+ verbose: Optional[List[bool]] = opt_verbose,
773
+ # advanced options for this command
774
+ system_prompt: Optional[str] = typer.Option(
775
+ "builtin://generic_investigation.jinja2", help=system_prompt_help
776
+ ),
777
+ post_processing_prompt: Optional[str] = opt_post_processing_prompt,
778
+ ):
779
+ """
780
+ Investigate a GitHub issue
781
+ """
782
+ console = init_logging(verbose) # type: ignore
783
+ config = Config.load_from_file(
784
+ config_file,
785
+ api_key=api_key,
786
+ model=model,
787
+ max_steps=max_steps,
788
+ github_url=github_url,
789
+ github_owner=github_owner,
790
+ github_pat=github_pat,
791
+ github_repository=github_repository,
792
+ github_query=github_query,
793
+ custom_toolsets_from_cli=custom_toolsets,
794
+ custom_runbooks=custom_runbooks,
795
+ )
796
+ ai = config.create_console_issue_investigator()
797
+ source = config.create_github_source()
798
+ try:
799
+ issues = source.fetch_issues()
800
+ except Exception as e:
801
+ logging.error("Failed to fetch issues from GitHub", exc_info=e)
802
+ return
803
+
804
+ console.print(
805
+ f"[bold yellow]Analyzing {len(issues)} GitHub Issues.[/bold yellow] [red]Press Ctrl+C to stop.[/red]"
806
+ )
807
+ for i, issue in enumerate(issues):
808
+ console.print(
809
+ f"[bold yellow]Analyzing GitHub issue {i+1}/{len(issues)}: {issue.name}...[/bold yellow]"
810
+ )
811
+
812
+ result = ai.investigate(
813
+ issue=issue,
814
+ prompt=system_prompt, # type: ignore
815
+ console=console,
816
+ instructions=None,
817
+ post_processing_prompt=post_processing_prompt,
818
+ )
819
+
820
+ console.print(Rule())
821
+ console.print(f"[bold green]AI analysis of {issue.url}[/bold green]")
822
+ console.print(Markdown(result.result.replace("\n", "\n\n")), style="bold green") # type: ignore
823
+ console.print(Rule())
824
+ if update:
825
+ source.write_back_result(issue.id, result)
826
+ console.print(f"[bold]Updated ticket {issue.url}.[/bold]")
827
+ else:
828
+ console.print(
829
+ f"[bold]Not updating issue {issue.url}. Use the --update option to do so.[/bold]"
830
+ )
831
+
832
+
833
+ @investigate_app.command()
834
+ def pagerduty(
835
+ pagerduty_api_key: str = typer.Option(
836
+ None,
837
+ help="The PagerDuty API key. This can be found in the PagerDuty UI under Integrations > API Access Keys.",
838
+ ),
839
+ pagerduty_user_email: Optional[str] = typer.Option(
840
+ None,
841
+ help="When --update is set, which user will be listed as the user who updated the ticket. (Must be the email of a valid user in your PagerDuty account.)",
842
+ ),
843
+ pagerduty_incident_key: Optional[str] = typer.Option(
844
+ None,
845
+ help="If provided, only analyze a single PagerDuty incident matching this key",
846
+ ),
847
+ update: Optional[bool] = typer.Option(
848
+ False, help="Update PagerDuty with AI results"
849
+ ),
850
+ # common options
851
+ api_key: Optional[str] = opt_api_key,
852
+ model: Optional[str] = opt_model,
853
+ config_file: Optional[Path] = opt_config_file, # type: ignore
854
+ custom_toolsets: Optional[List[Path]] = opt_custom_toolsets,
855
+ custom_runbooks: Optional[List[Path]] = opt_custom_runbooks,
856
+ max_steps: Optional[int] = opt_max_steps,
857
+ verbose: Optional[List[bool]] = opt_verbose,
858
+ json_output_file: Optional[str] = opt_json_output_file,
859
+ # advanced options for this command
860
+ system_prompt: Optional[str] = typer.Option(
861
+ "builtin://generic_investigation.jinja2", help=system_prompt_help
862
+ ),
863
+ post_processing_prompt: Optional[str] = opt_post_processing_prompt,
864
+ ):
865
+ """
866
+ Investigate a PagerDuty incident
867
+ """
868
+ console = init_logging(verbose)
869
+ config = Config.load_from_file(
870
+ config_file,
871
+ api_key=api_key,
872
+ model=model,
873
+ max_steps=max_steps,
874
+ pagerduty_api_key=pagerduty_api_key,
875
+ pagerduty_user_email=pagerduty_user_email,
876
+ pagerduty_incident_key=pagerduty_incident_key,
877
+ custom_toolsets_from_cli=custom_toolsets,
878
+ custom_runbooks=custom_runbooks,
879
+ )
880
+ ai = config.create_console_issue_investigator()
881
+ source = config.create_pagerduty_source()
882
+ try:
883
+ issues = source.fetch_issues()
884
+ except Exception as e:
885
+ logging.error("Failed to fetch issues from PagerDuty", exc_info=e)
886
+ return
887
+
888
+ console.print(
889
+ f"[bold yellow]Analyzing {len(issues)} PagerDuty incidents.[/bold yellow] [red]Press Ctrl+C to stop.[/red]"
890
+ )
891
+
892
+ results = []
893
+ for i, issue in enumerate(issues):
894
+ console.print(
895
+ f"[bold yellow]Analyzing PagerDuty incident {i+1}/{len(issues)}: {issue.name}...[/bold yellow]"
896
+ )
897
+
898
+ result = ai.investigate(
899
+ issue=issue,
900
+ prompt=system_prompt, # type: ignore
901
+ console=console,
902
+ instructions=None,
903
+ post_processing_prompt=post_processing_prompt,
904
+ )
905
+
906
+ console.print(Rule())
907
+ console.print(f"[bold green]AI analysis of {issue.url}[/bold green]")
908
+ console.print(Markdown(result.result.replace("\n", "\n\n")), style="bold green") # type: ignore
909
+ console.print(Rule())
910
+ if update:
911
+ source.write_back_result(issue.id, result)
912
+ console.print(f"[bold]Updated alert {issue.url}.[/bold]")
913
+ else:
914
+ console.print(
915
+ f"[bold]Not updating alert {issue.url}. Use the --update option to do so.[/bold]"
916
+ )
917
+ results.append({"issue": issue.model_dump(), "result": result.model_dump()})
918
+
919
+ if json_output_file:
920
+ write_json_file(json_output_file, results)
921
+
922
+
923
+ @investigate_app.command()
924
+ def opsgenie(
925
+ opsgenie_api_key: str = typer.Option(None, help="The OpsGenie API key"),
926
+ opsgenie_team_integration_key: str = typer.Option(
927
+ None, help=OPSGENIE_TEAM_INTEGRATION_KEY_HELP
928
+ ),
929
+ opsgenie_query: Optional[str] = typer.Option(
930
+ None,
931
+ help="E.g. 'message: Foo' (see https://support.atlassian.com/opsgenie/docs/search-queries-for-alerts/)",
932
+ ),
933
+ update: Optional[bool] = typer.Option(
934
+ False, help="Update OpsGenie with AI results"
935
+ ),
936
+ # common options
937
+ api_key: Optional[str] = opt_api_key,
938
+ model: Optional[str] = opt_model,
939
+ config_file: Optional[Path] = opt_config_file, # type: ignore
940
+ custom_toolsets: Optional[List[Path]] = opt_custom_toolsets,
941
+ custom_runbooks: Optional[List[Path]] = opt_custom_runbooks,
942
+ max_steps: Optional[int] = opt_max_steps,
943
+ verbose: Optional[List[bool]] = opt_verbose,
944
+ # advanced options for this command
945
+ system_prompt: Optional[str] = typer.Option(
946
+ "builtin://generic_investigation.jinja2", help=system_prompt_help
947
+ ),
948
+ post_processing_prompt: Optional[str] = opt_post_processing_prompt,
949
+ documents: Optional[str] = opt_documents,
950
+ ):
951
+ """
952
+ Investigate an OpsGenie alert
953
+ """
954
+ console = init_logging(verbose) # type: ignore
955
+ config = Config.load_from_file(
956
+ config_file,
957
+ api_key=api_key,
958
+ model=model,
959
+ max_steps=max_steps,
960
+ opsgenie_api_key=opsgenie_api_key,
961
+ opsgenie_team_integration_key=opsgenie_team_integration_key,
962
+ opsgenie_query=opsgenie_query,
963
+ custom_toolsets_from_cli=custom_toolsets,
964
+ custom_runbooks=custom_runbooks,
965
+ )
966
+ ai = config.create_console_issue_investigator()
967
+ source = config.create_opsgenie_source()
968
+ try:
969
+ issues = source.fetch_issues()
970
+ except Exception as e:
971
+ logging.error("Failed to fetch issues from OpsGenie", exc_info=e)
972
+ return
973
+
974
+ console.print(
975
+ f"[bold yellow]Analyzing {len(issues)} OpsGenie alerts.[/bold yellow] [red]Press Ctrl+C to stop.[/red]"
976
+ )
977
+ for i, issue in enumerate(issues):
978
+ console.print(
979
+ f"[bold yellow]Analyzing OpsGenie alert {i+1}/{len(issues)}: {issue.name}...[/bold yellow]"
980
+ )
981
+ result = ai.investigate(
982
+ issue=issue,
983
+ prompt=system_prompt, # type: ignore
984
+ console=console,
985
+ instructions=None,
986
+ post_processing_prompt=post_processing_prompt,
987
+ )
988
+
989
+ console.print(Rule())
990
+ console.print(f"[bold green]AI analysis of {issue.url}[/bold green]")
991
+ console.print(Markdown(result.result.replace("\n", "\n\n")), style="bold green") # type: ignore
992
+ console.print(Rule())
993
+ if update:
994
+ source.write_back_result(issue.id, result)
995
+ console.print(f"[bold]Updated alert {issue.url}.[/bold]")
996
+ else:
997
+ console.print(
998
+ f"[bold]Not updating alert {issue.url}. Use the --update option to do so.[/bold]"
999
+ )
1000
+
1001
+
1002
+ @toolset_app.command("list")
1003
+ def list_toolsets(
1004
+ verbose: Optional[List[bool]] = opt_verbose,
1005
+ config_file: Optional[Path] = opt_config_file, # type: ignore
1006
+ ):
1007
+ """
1008
+ List build-in and custom toolsets status of CLI
1009
+ """
1010
+ console = init_logging(verbose)
1011
+ config = Config.load_from_file(config_file)
1012
+ cli_toolsets = config.toolset_manager.list_console_toolsets()
1013
+
1014
+ pretty_print_toolset_status(cli_toolsets, console)
1015
+
1016
+
1017
+ @toolset_app.command("refresh")
1018
+ def refresh_toolsets(
1019
+ verbose: Optional[List[bool]] = opt_verbose,
1020
+ config_file: Optional[Path] = opt_config_file, # type: ignore
1021
+ ):
1022
+ """
1023
+ Refresh build-in and custom toolsets status of CLI
1024
+ """
1025
+ console = init_logging(verbose)
1026
+ config = Config.load_from_file(config_file)
1027
+ cli_toolsets = config.toolset_manager.list_console_toolsets(refresh_status=True)
1028
+ pretty_print_toolset_status(cli_toolsets, console)
1029
+
1030
+
1031
+ @app.command()
1032
+ def version() -> None:
1033
+ typer.echo(get_version())
1034
+
1035
+
1036
+ def run():
1037
+ app()
1038
+
1039
+
1040
+ if __name__ == "__main__":
1041
+ run()