holmesgpt 0.11.5__py3-none-any.whl → 0.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of holmesgpt might be problematic. Click here for more details.

Files changed (40) hide show
  1. holmes/__init__.py +1 -1
  2. holmes/common/env_vars.py +8 -4
  3. holmes/config.py +52 -13
  4. holmes/core/investigation_structured_output.py +7 -0
  5. holmes/core/llm.py +14 -4
  6. holmes/core/models.py +24 -0
  7. holmes/core/tool_calling_llm.py +48 -6
  8. holmes/core/tools.py +7 -4
  9. holmes/core/toolset_manager.py +24 -5
  10. holmes/core/tracing.py +224 -0
  11. holmes/interactive.py +761 -44
  12. holmes/main.py +59 -127
  13. holmes/plugins/prompts/_fetch_logs.jinja2 +4 -0
  14. holmes/plugins/prompts/kubernetes_workload_ask.jinja2 +2 -10
  15. holmes/plugins/toolsets/__init__.py +10 -2
  16. holmes/plugins/toolsets/azure_sql/apis/azure_sql_api.py +2 -1
  17. holmes/plugins/toolsets/coralogix/toolset_coralogix_logs.py +3 -0
  18. holmes/plugins/toolsets/datadog/datadog_api.py +161 -0
  19. holmes/plugins/toolsets/datadog/datadog_metrics_instructions.jinja2 +26 -0
  20. holmes/plugins/toolsets/datadog/datadog_traces_formatter.py +310 -0
  21. holmes/plugins/toolsets/datadog/instructions_datadog_traces.jinja2 +51 -0
  22. holmes/plugins/toolsets/datadog/toolset_datadog_logs.py +267 -0
  23. holmes/plugins/toolsets/datadog/toolset_datadog_metrics.py +488 -0
  24. holmes/plugins/toolsets/datadog/toolset_datadog_traces.py +689 -0
  25. holmes/plugins/toolsets/grafana/toolset_grafana_loki.py +3 -0
  26. holmes/plugins/toolsets/internet/internet.py +1 -1
  27. holmes/plugins/toolsets/logging_utils/logging_api.py +9 -3
  28. holmes/plugins/toolsets/opensearch/opensearch_logs.py +3 -0
  29. holmes/plugins/toolsets/utils.py +6 -2
  30. holmes/utils/cache.py +4 -4
  31. holmes/utils/console/consts.py +2 -0
  32. holmes/utils/console/logging.py +95 -0
  33. holmes/utils/console/result.py +37 -0
  34. {holmesgpt-0.11.5.dist-info → holmesgpt-0.12.0.dist-info}/METADATA +3 -4
  35. {holmesgpt-0.11.5.dist-info → holmesgpt-0.12.0.dist-info}/RECORD +38 -29
  36. {holmesgpt-0.11.5.dist-info → holmesgpt-0.12.0.dist-info}/WHEEL +1 -1
  37. holmes/__init__.py.bak +0 -76
  38. holmes/plugins/toolsets/datadog.py +0 -153
  39. {holmesgpt-0.11.5.dist-info → holmesgpt-0.12.0.dist-info}/LICENSE.txt +0 -0
  40. {holmesgpt-0.11.5.dist-info → holmesgpt-0.12.0.dist-info}/entry_points.txt +0 -0
holmes/__init__.py CHANGED
@@ -9,7 +9,7 @@ this_path = os.path.dirname(os.path.realpath(__file__))
9
9
  sys.path.append(this_path)
10
10
 
11
11
  # This is patched by github actions during release
12
- __version__ = "0.11.5"
12
+ __version__ = "0.12.0"
13
13
 
14
14
 
15
15
  def is_official_release() -> bool:
holmes/common/env_vars.py CHANGED
@@ -1,10 +1,14 @@
1
1
  import os
2
2
  import json
3
+ from typing import Optional
3
4
 
4
5
 
5
- def load_bool(env_var, default: bool):
6
- s = os.environ.get(env_var, str(default))
7
- return json.loads(s.lower())
6
+ def load_bool(env_var, default: Optional[bool]) -> Optional[bool]:
7
+ env_value = os.environ.get(env_var)
8
+ if env_value is None:
9
+ return default
10
+
11
+ return json.loads(env_value.lower())
8
12
 
9
13
 
10
14
  ENABLED_BY_DEFAULT_TOOLSETS = os.environ.get(
@@ -22,7 +26,7 @@ STORE_API_KEY = os.environ.get("STORE_API_KEY", "")
22
26
  STORE_EMAIL = os.environ.get("STORE_EMAIL", "")
23
27
  STORE_PASSWORD = os.environ.get("STORE_PASSWORD", "")
24
28
  HOLMES_POST_PROCESSING_PROMPT = os.environ.get("HOLMES_POST_PROCESSING_PROMPT", "")
25
- ROBUSTA_AI = load_bool("ROBUSTA_AI", False)
29
+ ROBUSTA_AI = load_bool("ROBUSTA_AI", None)
26
30
  ROBUSTA_API_ENDPOINT = os.environ.get("ROBUSTA_API_ENDPOINT", "https://api.robusta.dev")
27
31
 
28
32
  LOG_PERFORMANCE = os.environ.get("LOG_PERFORMANCE", None)
holmes/config.py CHANGED
@@ -58,7 +58,7 @@ def is_old_toolset_config(
58
58
  def parse_models_file(path: str):
59
59
  models = load_yaml_file(path, raise_error=False, warn_not_found=False)
60
60
 
61
- for model, params in models.items():
61
+ for _, params in models.items():
62
62
  params = replace_env_vars_values(params)
63
63
 
64
64
  return models
@@ -109,6 +109,7 @@ class Config(RobustaBaseConfig):
109
109
  # custom_toolsets_from_cli is passed from CLI option `--custom-toolsets` as 'experimental' custom toolsets.
110
110
  # The status of toolset here won't be cached, so the toolset from cli will always be loaded when specified in the CLI.
111
111
  custom_toolsets_from_cli: Optional[List[FilePath]] = None
112
+ should_try_robusta_ai: bool = False # if True, we will try to load the Robusta AI model, in cli we aren't trying to load it.
112
113
 
113
114
  toolsets: Optional[dict[str, dict[str, Any]]] = None
114
115
 
@@ -148,11 +149,31 @@ class Config(RobustaBaseConfig):
148
149
  self._version = get_version()
149
150
  self._holmes_info = fetch_holmes_info()
150
151
  self._model_list = parse_models_file(MODEL_LIST_FILE_LOCATION)
151
- if ROBUSTA_AI:
152
+ if self._should_load_robusta_ai():
153
+ logging.info("Loading Robusta AI model")
152
154
  self._model_list["Robusta"] = {
153
155
  "base_url": ROBUSTA_API_ENDPOINT,
154
156
  }
155
157
 
158
+ def _should_load_robusta_ai(self) -> bool:
159
+ if not self.should_try_robusta_ai:
160
+ return False
161
+
162
+ # ROBUSTA_AI were set in the env vars, so we can use it directly
163
+ if ROBUSTA_AI is not None:
164
+ return ROBUSTA_AI
165
+
166
+ # MODEL is set in the env vars, e.g. the user is using a custom model
167
+ # so we don't need to load the robusta AI model and keep the behavior backward compatible
168
+ if "MODEL" in os.environ:
169
+ return False
170
+
171
+ # if the user has provided a model list, we don't need to load the robusta AI model
172
+ if self._model_list:
173
+ return False
174
+
175
+ return True
176
+
156
177
  def log_useful_info(self):
157
178
  if self._model_list:
158
179
  logging.info(f"loaded models: {list(self._model_list.keys())}")
@@ -220,6 +241,7 @@ class Config(RobustaBaseConfig):
220
241
  if val is not None:
221
242
  kwargs[field_name] = val
222
243
  kwargs["cluster_name"] = Config.__get_cluster_name()
244
+ kwargs["should_try_robusta_ai"] = True
223
245
  result = cls(**kwargs)
224
246
  result.log_useful_info()
225
247
  return result
@@ -249,7 +271,9 @@ class Config(RobustaBaseConfig):
249
271
  runbook_catalog = load_runbook_catalog()
250
272
  return runbook_catalog
251
273
 
252
- def create_console_tool_executor(self, dal: Optional[SupabaseDal]) -> ToolExecutor:
274
+ def create_console_tool_executor(
275
+ self, dal: Optional[SupabaseDal], refresh_status: bool = False
276
+ ) -> ToolExecutor:
253
277
  """
254
278
  Creates a ToolExecutor instance configured for CLI usage. This executor manages the available tools
255
279
  and their execution in the command-line interface.
@@ -259,7 +283,9 @@ class Config(RobustaBaseConfig):
259
283
  2. toolsets from config file will override and be merged into built-in toolsets with the same name.
260
284
  3. Custom toolsets from config files which can not override built-in toolsets
261
285
  """
262
- cli_toolsets = self.toolset_manager.list_console_toolsets(dal=dal)
286
+ cli_toolsets = self.toolset_manager.list_console_toolsets(
287
+ dal=dal, refresh_status=refresh_status
288
+ )
263
289
  return ToolExecutor(cli_toolsets)
264
290
 
265
291
  def create_tool_executor(self, dal: Optional[SupabaseDal]) -> ToolExecutor:
@@ -281,19 +307,32 @@ class Config(RobustaBaseConfig):
281
307
  return self._server_tool_executor
282
308
 
283
309
  def create_console_toolcalling_llm(
284
- self, dal: Optional[SupabaseDal] = None
310
+ self,
311
+ dal: Optional[SupabaseDal] = None,
312
+ refresh_toolsets: bool = False,
313
+ tracer=None,
285
314
  ) -> ToolCallingLLM:
286
- tool_executor = self.create_console_tool_executor(dal)
287
- return ToolCallingLLM(tool_executor, self.max_steps, self._get_llm())
315
+ tool_executor = self.create_console_tool_executor(dal, refresh_toolsets)
316
+ return ToolCallingLLM(
317
+ tool_executor, self.max_steps, self._get_llm(tracer=tracer)
318
+ )
288
319
 
289
320
  def create_toolcalling_llm(
290
- self, dal: Optional[SupabaseDal] = None, model: Optional[str] = None
321
+ self,
322
+ dal: Optional[SupabaseDal] = None,
323
+ model: Optional[str] = None,
324
+ tracer=None,
291
325
  ) -> ToolCallingLLM:
292
326
  tool_executor = self.create_tool_executor(dal)
293
- return ToolCallingLLM(tool_executor, self.max_steps, self._get_llm(model))
327
+ return ToolCallingLLM(
328
+ tool_executor, self.max_steps, self._get_llm(model, tracer)
329
+ )
294
330
 
295
331
  def create_issue_investigator(
296
- self, dal: Optional[SupabaseDal] = None, model: Optional[str] = None
332
+ self,
333
+ dal: Optional[SupabaseDal] = None,
334
+ model: Optional[str] = None,
335
+ tracer=None,
297
336
  ) -> IssueInvestigator:
298
337
  all_runbooks = load_builtin_runbooks()
299
338
  for runbook_path in self.custom_runbooks:
@@ -302,7 +341,7 @@ class Config(RobustaBaseConfig):
302
341
  runbook_manager = RunbookManager(all_runbooks)
303
342
  tool_executor = self.create_tool_executor(dal)
304
343
  return IssueInvestigator(
305
- tool_executor, runbook_manager, self.max_steps, self._get_llm(model)
344
+ tool_executor, runbook_manager, self.max_steps, self._get_llm(model, tracer)
306
345
  )
307
346
 
308
347
  def create_console_issue_investigator(
@@ -411,7 +450,7 @@ class Config(RobustaBaseConfig):
411
450
  raise ValueError("--slack-channel must be specified")
412
451
  return SlackDestination(self.slack_token.get_secret_value(), self.slack_channel)
413
452
 
414
- def _get_llm(self, model_key: Optional[str] = None) -> LLM:
453
+ def _get_llm(self, model_key: Optional[str] = None, tracer=None) -> LLM:
415
454
  api_key = self.api_key.get_secret_value() if self.api_key else None
416
455
  model = self.model
417
456
  model_params = {}
@@ -425,7 +464,7 @@ class Config(RobustaBaseConfig):
425
464
  api_key = model_params.pop("api_key", api_key)
426
465
  model = model_params.pop("model", model)
427
466
 
428
- return DefaultLLM(model, api_key, model_params) # type: ignore
467
+ return DefaultLLM(model, api_key, model_params, tracer) # type: ignore
429
468
 
430
469
  def get_models_list(self) -> List[str]:
431
470
  if self._model_list:
@@ -262,3 +262,10 @@ def is_response_an_incorrect_tool_call(
262
262
  return False
263
263
  return True
264
264
  return False
265
+
266
+
267
+ def clear_json_markdown(text: str):
268
+ if text and text.startswith("```json") and text.endswith("```"):
269
+ return text[8:-3]
270
+
271
+ return text
holmes/core/llm.py CHANGED
@@ -64,12 +64,19 @@ class DefaultLLM(LLM):
64
64
  base_url: Optional[str]
65
65
  args: Dict
66
66
 
67
- def __init__(self, model: str, api_key: Optional[str] = None, args: Dict = {}):
67
+ def __init__(
68
+ self,
69
+ model: str,
70
+ api_key: Optional[str] = None,
71
+ args: Optional[Dict] = None,
72
+ tracer=None,
73
+ ):
68
74
  self.model = model
69
75
  self.api_key = api_key
70
- self.args = args
76
+ self.args = args or {}
77
+ self.tracer = tracer
71
78
 
72
- if not args:
79
+ if not self.args:
73
80
  self.check_llm(self.model, self.api_key)
74
81
 
75
82
  def check_llm(self, model: str, api_key: Optional[str]):
@@ -214,7 +221,10 @@ class DefaultLLM(LLM):
214
221
  if self.args.get("thinking", None):
215
222
  litellm.modify_params = True
216
223
 
217
- result = litellm.completion(
224
+ # Get the litellm module to use (wrapped or unwrapped)
225
+ litellm_to_use = self.tracer.wrap_llm(litellm) if self.tracer else litellm
226
+
227
+ result = litellm_to_use.completion(
218
228
  model=self.model,
219
229
  api_key=self.api_key,
220
230
  messages=messages,
holmes/core/models.py CHANGED
@@ -155,3 +155,27 @@ class WorkloadHealthChatRequest(ChatRequestBaseModel):
155
155
  ask: str
156
156
  workload_health_result: WorkloadHealthInvestigationResult
157
157
  resource: dict
158
+
159
+
160
+ workload_health_structured_output = {
161
+ "type": "json_schema",
162
+ "json_schema": {
163
+ "name": "WorkloadHealthResult",
164
+ "strict": False,
165
+ "schema": {
166
+ "type": "object",
167
+ "properties": {
168
+ "workload_healthy": {
169
+ "type": "boolean",
170
+ "description": "is the workload in healthy state or in error state",
171
+ },
172
+ "root_cause_summary": {
173
+ "type": "string",
174
+ "description": "concise short explaination leading to the workload_healthy result, pinpoint reason and root cause for the workload issues if any.",
175
+ },
176
+ },
177
+ "required": ["root_cause_summary", "workload_healthy"],
178
+ "additionalProperties": False,
179
+ },
180
+ },
181
+ }
@@ -39,6 +39,7 @@ from holmes.utils.global_instructions import (
39
39
  )
40
40
  from holmes.utils.tags import format_tags_in_string, parse_messages_tags
41
41
  from holmes.core.tools_utils.tool_executor import ToolExecutor
42
+ from holmes.core.tracing import DummySpan, SpanType
42
43
 
43
44
 
44
45
  def format_tool_result_data(tool_result: StructuredToolResult) -> str:
@@ -200,9 +201,12 @@ class LLMResult(BaseModel):
200
201
  class ToolCallingLLM:
201
202
  llm: LLM
202
203
 
203
- def __init__(self, tool_executor: ToolExecutor, max_steps: int, llm: LLM):
204
+ def __init__(
205
+ self, tool_executor: ToolExecutor, max_steps: int, llm: LLM, tracer=None
206
+ ):
204
207
  self.tool_executor = tool_executor
205
208
  self.max_steps = max_steps
209
+ self.tracer = tracer
206
210
  self.llm = llm
207
211
 
208
212
  def prompt_call(
@@ -230,8 +234,11 @@ class ToolCallingLLM:
230
234
  messages: List[Dict[str, str]],
231
235
  post_process_prompt: Optional[str] = None,
232
236
  response_format: Optional[Union[dict, Type[BaseModel]]] = None,
237
+ trace_span=DummySpan(),
233
238
  ) -> LLMResult:
234
- return self.call(messages, post_process_prompt, response_format)
239
+ return self.call(
240
+ messages, post_process_prompt, response_format, trace_span=trace_span
241
+ )
235
242
 
236
243
  @sentry_sdk.trace
237
244
  def call( # type: ignore
@@ -241,6 +248,7 @@ class ToolCallingLLM:
241
248
  response_format: Optional[Union[dict, Type[BaseModel]]] = None,
242
249
  user_prompt: Optional[str] = None,
243
250
  sections: Optional[InputSectionsDataType] = None,
251
+ trace_span=DummySpan(),
244
252
  ) -> LLMResult:
245
253
  perf_timing = PerformanceTiming("tool_calling_llm.call")
246
254
  tool_calls = [] # type: ignore
@@ -270,6 +278,7 @@ class ToolCallingLLM:
270
278
  perf_timing.measure("truncate_messages_to_fit_context")
271
279
 
272
280
  logging.debug(f"sending messages={messages}\n\ntools={tools}")
281
+
273
282
  try:
274
283
  full_response = self.llm.completion(
275
284
  messages=parse_messages_tags(messages),
@@ -291,6 +300,7 @@ class ToolCallingLLM:
291
300
  )
292
301
  else:
293
302
  raise
303
+
294
304
  response = full_response.choices[0] # type: ignore
295
305
 
296
306
  response_message = response.message # type: ignore
@@ -347,15 +357,17 @@ class ToolCallingLLM:
347
357
  )
348
358
 
349
359
  perf_timing.measure("pre-tool-calls")
350
- with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
360
+ with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
351
361
  futures = []
352
- for t in tools_to_call:
362
+ for tool_index, t in enumerate(tools_to_call, 1):
353
363
  logging.debug(f"Tool to call: {t}")
354
364
  futures.append(
355
365
  executor.submit(
356
366
  self._invoke_tool,
357
367
  tool_to_call=t,
358
368
  previous_tool_calls=tool_calls,
369
+ trace_span=trace_span,
370
+ tool_number=tool_index,
359
371
  )
360
372
  )
361
373
 
@@ -367,10 +379,16 @@ class ToolCallingLLM:
367
379
 
368
380
  perf_timing.measure(f"tool completed {tool_call_result.tool_name}")
369
381
 
382
+ # Add a blank line after all tools in this batch complete
383
+ if tools_to_call:
384
+ logging.info("")
385
+
370
386
  def _invoke_tool(
371
387
  self,
372
388
  tool_to_call: ChatCompletionMessageToolCall,
373
389
  previous_tool_calls: list[dict],
390
+ trace_span=DummySpan(),
391
+ tool_number=None,
374
392
  ) -> ToolCallResult:
375
393
  tool_name = tool_to_call.function.name
376
394
  tool_params = None
@@ -399,6 +417,10 @@ class ToolCallingLLM:
399
417
  )
400
418
 
401
419
  tool_response = None
420
+
421
+ # Create tool span if tracing is enabled
422
+ tool_span = trace_span.start_span(name=tool_name, type=SpanType.TOOL)
423
+
402
424
  try:
403
425
  tool_response = prevent_overly_repeated_tool_call(
404
426
  tool_name=tool.name,
@@ -406,7 +428,7 @@ class ToolCallingLLM:
406
428
  tool_calls=previous_tool_calls,
407
429
  )
408
430
  if not tool_response:
409
- tool_response = tool.invoke(tool_params)
431
+ tool_response = tool.invoke(tool_params, tool_number=tool_number)
410
432
 
411
433
  if not isinstance(tool_response, StructuredToolResult):
412
434
  # Should never be needed but ensure Holmes does not crash if one of the tools does not return the right type
@@ -419,6 +441,16 @@ class ToolCallingLLM:
419
441
  params=tool_params,
420
442
  )
421
443
 
444
+ # Log tool execution to trace span
445
+ tool_span.log(
446
+ input=tool_params,
447
+ output=tool_response.data,
448
+ metadata={
449
+ "status": tool_response.status.value,
450
+ "error": tool_response.error,
451
+ },
452
+ )
453
+
422
454
  except Exception as e:
423
455
  logging.error(
424
456
  f"Tool call to {tool_name} failed with an Exception", exc_info=True
@@ -428,6 +460,14 @@ class ToolCallingLLM:
428
460
  error=f"Tool call failed: {e}",
429
461
  params=tool_params,
430
462
  )
463
+
464
+ # Log error to trace span
465
+ tool_span.log(
466
+ input=tool_params, output=str(e), metadata={"status": "ERROR"}
467
+ )
468
+ finally:
469
+ # End tool span
470
+ tool_span.end()
431
471
  return ToolCallResult(
432
472
  tool_call_id=tool_call_id,
433
473
  tool_name=tool_name,
@@ -650,12 +690,14 @@ class ToolCallingLLM:
650
690
  perf_timing.measure("pre-tool-calls")
651
691
  with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
652
692
  futures = []
653
- for t in tools_to_call: # type: ignore
693
+ for tool_index, t in enumerate(tools_to_call, 1): # type: ignore
654
694
  futures.append(
655
695
  executor.submit(
656
696
  self._invoke_tool,
657
697
  tool_to_call=t, # type: ignore
658
698
  previous_tool_calls=tool_calls,
699
+ trace_span=DummySpan(), # Streaming mode doesn't support tracing yet
700
+ tool_number=tool_index,
659
701
  )
660
702
  )
661
703
  yield create_sse_message(
holmes/core/tools.py CHANGED
@@ -139,9 +139,12 @@ class Tool(ABC, BaseModel):
139
139
  tool_parameters=self.parameters,
140
140
  )
141
141
 
142
- def invoke(self, params: Dict) -> StructuredToolResult:
142
+ def invoke(
143
+ self, params: Dict, tool_number: Optional[int] = None
144
+ ) -> StructuredToolResult:
145
+ tool_number_str = f"#{tool_number} " if tool_number else ""
143
146
  logging.info(
144
- f"Running tool [bold]{self.name}[/bold]: {self.get_parameterized_one_liner(params)}"
147
+ f"Running tool {tool_number_str}[bold]{self.name}[/bold]: {self.get_parameterized_one_liner(params)}"
145
148
  )
146
149
  start_time = time.time()
147
150
  result = self._invoke(params)
@@ -152,7 +155,7 @@ class Tool(ABC, BaseModel):
152
155
  else str(result)
153
156
  )
154
157
  logging.info(
155
- f" [dim]Finished in {elapsed:.2f}s, output length: {len(output_str):,} characters[/dim]\n"
158
+ f" [dim]Finished {tool_number_str}in {elapsed:.2f}s, output length: {len(output_str):,} characters - /show to view contents[/dim]"
156
159
  )
157
160
  return result
158
161
 
@@ -370,7 +373,7 @@ class Toolset(BaseModel):
370
373
  exclude_unset=True,
371
374
  exclude=("name"), # type: ignore
372
375
  ).items():
373
- if field in self.model_fields and value not in (None, [], {}, ""):
376
+ if field in self.__class__.model_fields and value not in (None, [], {}, ""):
374
377
  setattr(self, field, value)
375
378
 
376
379
  @model_validator(mode="before")
@@ -1,3 +1,4 @@
1
+ import concurrent.futures
1
2
  import json
2
3
  import logging
3
4
  import os
@@ -113,14 +114,27 @@ class ToolsetManager:
113
114
  # check_prerequisites against each enabled toolset
114
115
  if not check_prerequisites:
115
116
  return list(toolsets_by_name.values())
117
+
118
+ enabled_toolsets: List[Toolset] = []
116
119
  for _, toolset in toolsets_by_name.items():
117
120
  if toolset.enabled:
118
- toolset.check_prerequisites()
121
+ enabled_toolsets.append(toolset)
119
122
  else:
120
123
  toolset.status = ToolsetStatusEnum.DISABLED
124
+ self.check_toolset_prerequisites(enabled_toolsets)
121
125
 
122
126
  return list(toolsets_by_name.values())
123
127
 
128
+ @classmethod
129
+ def check_toolset_prerequisites(cls, toolsets: list[Toolset]):
130
+ with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
131
+ futures = []
132
+ for toolset in toolsets:
133
+ futures.append(executor.submit(toolset.check_prerequisites))
134
+
135
+ for _ in concurrent.futures.as_completed(futures):
136
+ pass
137
+
124
138
  def _load_toolsets_from_config(
125
139
  self,
126
140
  toolsets: dict[str, dict[str, Any]],
@@ -231,6 +245,7 @@ class ToolsetManager:
231
245
  dal=dal, check_prerequisites=False, toolset_tags=toolset_tags
232
246
  )
233
247
 
248
+ enabled_toolsets_from_cache: List[Toolset] = []
234
249
  for toolset in all_toolsets_with_status:
235
250
  if toolset.name in toolsets_status_by_name:
236
251
  # Update the status and error from the cached status
@@ -242,13 +257,15 @@ class ToolsetManager:
242
257
  cached_status.get("type", ToolsetType.BUILTIN)
243
258
  )
244
259
  toolset.path = cached_status.get("path", None)
245
- # check prerequisites for only enabled toolset when the toolset is loaded from cache
260
+ # check prerequisites for only enabled toolset when the toolset is loaded from cache. When the toolset is
261
+ # not loaded from cache, the prerequisites are checked in the refresh_toolset_status method.
246
262
  if (
247
263
  toolset.enabled
248
264
  and toolset.status == ToolsetStatusEnum.ENABLED
249
265
  and using_cached
250
266
  ):
251
- toolset.check_prerequisites() # type: ignore
267
+ enabled_toolsets_from_cache.append(toolset)
268
+ self.check_toolset_prerequisites(enabled_toolsets_from_cache)
252
269
 
253
270
  # CLI custom toolsets status are not cached, and their prerequisites are always checked whenever the CLI runs.
254
271
  custom_toolsets_from_cli = self._load_toolsets_from_paths(
@@ -257,13 +274,15 @@ class ToolsetManager:
257
274
  check_conflict_default=True,
258
275
  )
259
276
  # custom toolsets from cli as experimental toolset should not override custom toolsets from config
277
+ enabled_toolsets_from_cli: List[Toolset] = []
260
278
  for custom_toolset_from_cli in custom_toolsets_from_cli:
261
279
  if custom_toolset_from_cli.name in toolsets_status_by_name:
262
280
  raise ValueError(
263
281
  f"Toolset {custom_toolset_from_cli.name} from cli is already defined in existing toolset"
264
282
  )
265
- # status of custom toolsets from cli is not cached, and we need to check prerequisites every time the cli runs.
266
- custom_toolset_from_cli.check_prerequisites()
283
+ enabled_toolsets_from_cli.append(custom_toolset_from_cli)
284
+ # status of custom toolsets from cli is not cached, and we need to check prerequisites every time the cli runs.
285
+ self.check_toolset_prerequisites(enabled_toolsets_from_cli)
267
286
 
268
287
  all_toolsets_with_status.extend(custom_toolsets_from_cli)
269
288
  if using_cached: