strix-agent 0.1.16__tar.gz → 0.1.18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of strix-agent might be problematic. Click here for more details.

Files changed (98) hide show
  1. {strix_agent-0.1.16 → strix_agent-0.1.18}/PKG-INFO +4 -4
  2. {strix_agent-0.1.16 → strix_agent-0.1.18}/README.md +3 -3
  3. {strix_agent-0.1.16 → strix_agent-0.1.18}/pyproject.toml +1 -1
  4. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/agents/base_agent.py +13 -2
  5. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/app.py +37 -6
  6. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/main.py +79 -23
  7. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/agents_graph_renderer.py +1 -1
  8. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/base_renderer.py +2 -1
  9. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/browser_renderer.py +3 -3
  10. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/proxy_renderer.py +2 -2
  11. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/registry.py +1 -1
  12. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/reporting_renderer.py +1 -1
  13. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/scan_info_renderer.py +8 -6
  14. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/terminal_renderer.py +1 -1
  15. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tracer.py +5 -1
  16. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/llm/llm.py +57 -3
  17. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/llm/request_queue.py +16 -1
  18. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/runtime/docker_runtime.py +2 -2
  19. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/executor.py +1 -1
  20. {strix_agent-0.1.16 → strix_agent-0.1.18}/LICENSE +0 -0
  21. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/__init__.py +0 -0
  22. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/agents/StrixAgent/__init__.py +0 -0
  23. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/agents/StrixAgent/strix_agent.py +0 -0
  24. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/agents/StrixAgent/system_prompt.jinja +0 -0
  25. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/agents/__init__.py +0 -0
  26. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/agents/state.py +0 -0
  27. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/__init__.py +0 -0
  28. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/assets/cli.tcss +0 -0
  29. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/__init__.py +0 -0
  30. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/file_edit_renderer.py +0 -0
  31. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/finish_renderer.py +0 -0
  32. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/notes_renderer.py +0 -0
  33. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/python_renderer.py +0 -0
  34. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/thinking_renderer.py +0 -0
  35. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/user_message_renderer.py +0 -0
  36. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/cli/tool_components/web_search_renderer.py +0 -0
  37. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/llm/__init__.py +0 -0
  38. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/llm/config.py +0 -0
  39. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/llm/memory_compressor.py +0 -0
  40. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/llm/utils.py +0 -0
  41. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/prompts/__init__.py +0 -0
  42. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/prompts/coordination/root_agent.jinja +0 -0
  43. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/prompts/vulnerabilities/authentication_jwt.jinja +0 -0
  44. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/prompts/vulnerabilities/business_logic.jinja +0 -0
  45. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/prompts/vulnerabilities/csrf.jinja +0 -0
  46. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/prompts/vulnerabilities/idor.jinja +0 -0
  47. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/prompts/vulnerabilities/race_conditions.jinja +0 -0
  48. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/prompts/vulnerabilities/rce.jinja +0 -0
  49. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/prompts/vulnerabilities/sql_injection.jinja +0 -0
  50. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/prompts/vulnerabilities/ssrf.jinja +0 -0
  51. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/prompts/vulnerabilities/xss.jinja +0 -0
  52. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/prompts/vulnerabilities/xxe.jinja +0 -0
  53. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/runtime/__init__.py +0 -0
  54. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/runtime/runtime.py +0 -0
  55. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/runtime/tool_server.py +0 -0
  56. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/__init__.py +0 -0
  57. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/agents_graph/__init__.py +0 -0
  58. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/agents_graph/agents_graph_actions.py +0 -0
  59. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/agents_graph/agents_graph_actions_schema.xml +0 -0
  60. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/argument_parser.py +0 -0
  61. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/browser/__init__.py +0 -0
  62. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/browser/browser_actions.py +0 -0
  63. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/browser/browser_actions_schema.xml +0 -0
  64. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/browser/browser_instance.py +0 -0
  65. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/browser/tab_manager.py +0 -0
  66. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/file_edit/__init__.py +0 -0
  67. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/file_edit/file_edit_actions.py +0 -0
  68. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/file_edit/file_edit_actions_schema.xml +0 -0
  69. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/finish/__init__.py +0 -0
  70. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/finish/finish_actions.py +0 -0
  71. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/finish/finish_actions_schema.xml +0 -0
  72. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/notes/__init__.py +0 -0
  73. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/notes/notes_actions.py +0 -0
  74. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/notes/notes_actions_schema.xml +0 -0
  75. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/proxy/__init__.py +0 -0
  76. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/proxy/proxy_actions.py +0 -0
  77. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/proxy/proxy_actions_schema.xml +0 -0
  78. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/proxy/proxy_manager.py +0 -0
  79. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/python/__init__.py +0 -0
  80. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/python/python_actions.py +0 -0
  81. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/python/python_actions_schema.xml +0 -0
  82. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/python/python_instance.py +0 -0
  83. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/python/python_manager.py +0 -0
  84. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/registry.py +0 -0
  85. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/reporting/__init__.py +0 -0
  86. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/reporting/reporting_actions.py +0 -0
  87. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/reporting/reporting_actions_schema.xml +0 -0
  88. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/terminal/__init__.py +0 -0
  89. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/terminal/terminal_actions.py +0 -0
  90. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/terminal/terminal_actions_schema.xml +0 -0
  91. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/terminal/terminal_manager.py +0 -0
  92. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/terminal/terminal_session.py +0 -0
  93. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/thinking/__init__.py +0 -0
  94. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/thinking/thinking_actions.py +0 -0
  95. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/thinking/thinking_actions_schema.xml +0 -0
  96. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/web_search/__init__.py +0 -0
  97. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/web_search/web_search_actions.py +0 -0
  98. {strix_agent-0.1.16 → strix_agent-0.1.18}/strix/tools/web_search/web_search_actions_schema.xml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: strix-agent
3
- Version: 0.1.16
3
+ Version: 0.1.18
4
4
  Summary: Open-source AI Hackers for your apps
5
5
  License: Apache-2.0
6
6
  Keywords: cybersecurity,security,vulnerability,scanner,pentest,agent,ai,cli
@@ -129,12 +129,12 @@ strix --target api.your-app.com --instruction "Prioritize authentication and aut
129
129
  ### ⚙️ Configuration
130
130
 
131
131
  ```bash
132
- # Required
133
132
  export STRIX_LLM="openai/gpt-5"
134
133
  export LLM_API_KEY="your-api-key"
135
134
 
136
- # Recommended
137
- export PERPLEXITY_API_KEY="your-api-key"
135
+ # Optional
136
+ export LLM_API_BASE="your-api-base-url" # if using a local model, e.g. Ollama, LMStudio
137
+ export PERPLEXITY_API_KEY="your-api-key" # for search capabilities
138
138
  ```
139
139
 
140
140
  [📚 View supported AI models](https://docs.litellm.ai/docs/providers)
@@ -89,12 +89,12 @@ strix --target api.your-app.com --instruction "Prioritize authentication and aut
89
89
  ### ⚙️ Configuration
90
90
 
91
91
  ```bash
92
- # Required
93
92
  export STRIX_LLM="openai/gpt-5"
94
93
  export LLM_API_KEY="your-api-key"
95
94
 
96
- # Recommended
97
- export PERPLEXITY_API_KEY="your-api-key"
95
+ # Optional
96
+ export LLM_API_BASE="your-api-base-url" # if using a local model, e.g. Ollama, LMStudio
97
+ export PERPLEXITY_API_KEY="your-api-key" # for search capabilities
98
98
  ```
99
99
 
100
100
  [📚 View supported AI models](https://docs.litellm.ai/docs/providers)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "strix-agent"
3
- version = "0.1.16"
3
+ version = "0.1.18"
4
4
  description = "Open-source AI Hackers for your apps"
5
5
  authors = ["Strix <hi@usestrix.com>"]
6
6
  readme = "README.md"
@@ -181,10 +181,21 @@ class BaseAgent(metaclass=AgentMeta):
181
181
  continue
182
182
 
183
183
  except LLMRequestFailedError as e:
184
- self.state.add_error(f"LLM request failed: {e}")
184
+ error_msg = str(e)
185
+ error_details = getattr(e, "details", None)
186
+ self.state.add_error(error_msg)
185
187
  self.state.enter_waiting_state(llm_failed=True)
186
188
  if tracer:
187
- tracer.update_agent_status(self.state.agent_id, "llm_failed")
189
+ tracer.update_agent_status(self.state.agent_id, "llm_failed", error_msg)
190
+ if error_details:
191
+ tracer.log_tool_execution_start(
192
+ self.state.agent_id,
193
+ "llm_error_details",
194
+ {"error": error_msg, "details": error_details},
195
+ )
196
+ tracer.update_tool_execution(
197
+ tracer._next_execution_id - 1, "failed", error_details
198
+ )
188
199
  continue
189
200
 
190
201
  except (RuntimeError, ValueError, TypeError) as e:
@@ -9,6 +9,8 @@ import threading
9
9
  from collections.abc import Callable
10
10
  from typing import Any, ClassVar
11
11
 
12
+ from rich.markup import escape as rich_escape
13
+ from rich.text import Text
12
14
  from textual import events, on
13
15
  from textual.app import App, ComposeResult
14
16
  from textual.binding import Binding
@@ -24,7 +26,7 @@ from strix.llm.config import LLMConfig
24
26
 
25
27
 
26
28
  def escape_markup(text: str) -> str:
27
- return text.replace("[", "\\[").replace("]", "\\]")
29
+ return rich_escape(text)
28
30
 
29
31
 
30
32
  class ChatTextArea(TextArea): # type: ignore[misc]
@@ -483,7 +485,7 @@ class StrixCLIApp(App): # type: ignore[misc]
483
485
  self._displayed_events = current_event_ids
484
486
 
485
487
  chat_display = self.query_one("#chat_display", Static)
486
- chat_display.update(content)
488
+ self._update_static_content_safe(chat_display, content)
487
489
 
488
490
  chat_display.set_classes(css_class)
489
491
 
@@ -546,11 +548,18 @@ class StrixCLIApp(App): # type: ignore[misc]
546
548
  self._safe_widget_operation(keymap_indicator.update, "")
547
549
  self._safe_widget_operation(status_display.remove_class, "hidden")
548
550
  elif status == "llm_failed":
549
- self._safe_widget_operation(status_text.update, "[red]LLM request failed[/red]")
551
+ error_msg = agent_data.get("error_message", "")
552
+ display_msg = (
553
+ f"[red]{escape_markup(error_msg)}[/red]"
554
+ if error_msg
555
+ else "[red]LLM request failed[/red]"
556
+ )
557
+ self._safe_widget_operation(status_text.update, display_msg)
550
558
  self._safe_widget_operation(
551
559
  keymap_indicator.update, "[dim]Send message to retry[/dim]"
552
560
  )
553
561
  self._safe_widget_operation(status_display.remove_class, "hidden")
562
+ self._stop_dot_animation()
554
563
  elif status == "waiting":
555
564
  animated_text = self._get_animated_waiting_text(self.selected_agent_id)
556
565
  self._safe_widget_operation(status_text.update, animated_text)
@@ -633,7 +642,7 @@ class StrixCLIApp(App): # type: ignore[misc]
633
642
 
634
643
  for agent_id, agent_data in self.tracer.agents.items():
635
644
  status = agent_data.get("status", "running")
636
- if status in ["running", "waiting", "llm_failed"]:
645
+ if status in ["running", "waiting"]:
637
646
  has_active_agents = True
638
647
  current_dots = self._agent_dot_states.get(agent_id, 0)
639
648
  self._agent_dot_states[agent_id] = (current_dots + 1) % 4
@@ -644,7 +653,7 @@ class StrixCLIApp(App): # type: ignore[misc]
644
653
  and self.selected_agent_id in self.tracer.agents
645
654
  ):
646
655
  selected_status = self.tracer.agents[self.selected_agent_id].get("status", "running")
647
- if selected_status in ["running", "waiting", "llm_failed"]:
656
+ if selected_status in ["running", "waiting"]:
648
657
  self._update_agent_status_display()
649
658
 
650
659
  if not has_active_agents:
@@ -652,7 +661,7 @@ class StrixCLIApp(App): # type: ignore[misc]
652
661
  for agent_id in list(self._agent_dot_states.keys()):
653
662
  if agent_id not in self.tracer.agents or self.tracer.agents[agent_id].get(
654
663
  "status"
655
- ) not in ["running", "waiting", "llm_failed"]:
664
+ ) not in ["running", "waiting"]:
656
665
  del self._agent_dot_states[agent_id]
657
666
 
658
667
  def _gather_agent_events(self, agent_id: str) -> list[dict[str, Any]]:
@@ -900,6 +909,7 @@ class StrixCLIApp(App): # type: ignore[misc]
900
909
  "reporting_action": "#ea580c",
901
910
  "scan_start_info": "#22c55e",
902
911
  "subagent_start_info": "#22c55e",
912
+ "llm_error_details": "#dc2626",
903
913
  }
904
914
 
905
915
  color = tool_colors.get(tool_name, "#737373")
@@ -911,6 +921,14 @@ class StrixCLIApp(App): # type: ignore[misc]
911
921
  if renderer:
912
922
  widget = renderer.render(tool_data)
913
923
  content = str(widget.renderable)
924
+ elif tool_name == "llm_error_details":
925
+ lines = ["[red]✗ LLM Request Failed[/red]"]
926
+ if args.get("details"):
927
+ details = args["details"]
928
+ if len(details) > 300:
929
+ details = details[:297] + "..."
930
+ lines.append(f"[dim]Details:[/dim] {escape_markup(details)}")
931
+ content = "\n".join(lines)
914
932
  else:
915
933
  status_icons = {
916
934
  "running": "[yellow]●[/yellow]",
@@ -1127,6 +1145,19 @@ class StrixCLIApp(App): # type: ignore[misc]
1127
1145
  else:
1128
1146
  return True
1129
1147
 
1148
+ def _update_static_content_safe(self, widget: Static, content: str) -> None:
1149
+ try:
1150
+ widget.update(content)
1151
+ except Exception: # noqa: BLE001
1152
+ try:
1153
+ safe_text = Text.from_markup(content)
1154
+ widget.update(safe_text)
1155
+ except Exception: # noqa: BLE001
1156
+ import re
1157
+
1158
+ plain_text = re.sub(r"\[.*?\]", "", content)
1159
+ widget.update(plain_text)
1160
+
1130
1161
 
1131
1162
  async def run_strix_cli(args: argparse.Namespace) -> None:
1132
1163
  app = StrixCLIApp(args)
@@ -48,8 +48,23 @@ def validate_environment() -> None:
48
48
  if not os.getenv("STRIX_LLM"):
49
49
  missing_required_vars.append("STRIX_LLM")
50
50
 
51
+ has_base_url = any(
52
+ [
53
+ os.getenv("LLM_API_BASE"),
54
+ os.getenv("OPENAI_API_BASE"),
55
+ os.getenv("LITELLM_BASE_URL"),
56
+ os.getenv("OLLAMA_API_BASE"),
57
+ ]
58
+ )
59
+
51
60
  if not os.getenv("LLM_API_KEY"):
52
- missing_required_vars.append("LLM_API_KEY")
61
+ if not has_base_url:
62
+ missing_required_vars.append("LLM_API_KEY")
63
+ else:
64
+ missing_optional_vars.append("LLM_API_KEY")
65
+
66
+ if not has_base_url:
67
+ missing_optional_vars.append("LLM_API_BASE")
53
68
 
54
69
  if not os.getenv("PERPLEXITY_API_KEY"):
55
70
  missing_optional_vars.append("PERPLEXITY_API_KEY")
@@ -65,40 +80,72 @@ def validate_environment() -> None:
65
80
  error_text.append(" is not set\n", style="white")
66
81
 
67
82
  if missing_optional_vars:
68
- error_text.append(
69
- "\nOptional (but recommended) environment variables:\n", style="dim white"
70
- )
83
+ error_text.append("\nOptional environment variables:\n", style="dim white")
71
84
  for var in missing_optional_vars:
72
85
  error_text.append(f"• {var}", style="dim yellow")
73
86
  error_text.append(" is not set\n", style="dim white")
74
87
 
75
88
  error_text.append("\nRequired environment variables:\n", style="white")
76
- error_text.append("• ", style="white")
77
- error_text.append("STRIX_LLM", style="bold cyan")
78
- error_text.append(
79
- " - Model name to use with litellm (e.g., 'openai/gpt-5')\n",
80
- style="white",
81
- )
82
- error_text.append("• ", style="white")
83
- error_text.append("LLM_API_KEY", style="bold cyan")
84
- error_text.append(" - API key for the LLM provider\n", style="white")
89
+ for var in missing_required_vars:
90
+ if var == "STRIX_LLM":
91
+ error_text.append("• ", style="white")
92
+ error_text.append("STRIX_LLM", style="bold cyan")
93
+ error_text.append(
94
+ " - Model name to use with litellm (e.g., 'openai/gpt-5')\n",
95
+ style="white",
96
+ )
97
+ elif var == "LLM_API_KEY":
98
+ error_text.append("• ", style="white")
99
+ error_text.append("LLM_API_KEY", style="bold cyan")
100
+ error_text.append(
101
+ " - API key for the LLM provider (required for cloud providers)\n",
102
+ style="white",
103
+ )
85
104
 
86
105
  if missing_optional_vars:
87
106
  error_text.append("\nOptional environment variables:\n", style="white")
88
- error_text.append("• ", style="white")
89
- error_text.append("PERPLEXITY_API_KEY", style="bold cyan")
90
- error_text.append(
91
- " - API key for Perplexity AI web search (enables real-time research)\n",
92
- style="white",
93
- )
107
+ for var in missing_optional_vars:
108
+ if var == "LLM_API_KEY":
109
+ error_text.append("• ", style="white")
110
+ error_text.append("LLM_API_KEY", style="bold cyan")
111
+ error_text.append(" - API key for the LLM provider\n", style="white")
112
+ elif var == "LLM_API_BASE":
113
+ error_text.append("• ", style="white")
114
+ error_text.append("LLM_API_BASE", style="bold cyan")
115
+ error_text.append(
116
+ " - Custom API base URL if using local models (e.g., Ollama, LMStudio)\n",
117
+ style="white",
118
+ )
119
+ elif var == "PERPLEXITY_API_KEY":
120
+ error_text.append("• ", style="white")
121
+ error_text.append("PERPLEXITY_API_KEY", style="bold cyan")
122
+ error_text.append(
123
+ " - API key for Perplexity AI web search (enables real-time research)\n",
124
+ style="white",
125
+ )
94
126
 
95
127
  error_text.append("\nExample setup:\n", style="white")
96
128
  error_text.append("export STRIX_LLM='openai/gpt-5'\n", style="dim white")
97
- error_text.append("export LLM_API_KEY='your-api-key-here'\n", style="dim white")
129
+
130
+ if "LLM_API_KEY" in missing_required_vars:
131
+ error_text.append("export LLM_API_KEY='your-api-key-here'\n", style="dim white")
132
+
98
133
  if missing_optional_vars:
99
- error_text.append(
100
- "export PERPLEXITY_API_KEY='your-perplexity-key-here'", style="dim white"
101
- )
134
+ for var in missing_optional_vars:
135
+ if var == "LLM_API_KEY":
136
+ error_text.append(
137
+ "export LLM_API_KEY='your-api-key-here' # optional with local models\n",
138
+ style="dim white",
139
+ )
140
+ elif var == "LLM_API_BASE":
141
+ error_text.append(
142
+ "export LLM_API_BASE='http://localhost:11434' # needed for local models only\n",
143
+ style="dim white",
144
+ )
145
+ elif var == "PERPLEXITY_API_KEY":
146
+ error_text.append(
147
+ "export PERPLEXITY_API_KEY='your-perplexity-key-here'\n", style="dim white"
148
+ )
102
149
 
103
150
  panel = Panel(
104
151
  error_text,
@@ -152,6 +199,15 @@ async def warm_up_llm() -> None:
152
199
  if api_key:
153
200
  litellm.api_key = api_key
154
201
 
202
+ api_base = (
203
+ os.getenv("LLM_API_BASE")
204
+ or os.getenv("OPENAI_API_BASE")
205
+ or os.getenv("LITELLM_BASE_URL")
206
+ or os.getenv("OLLAMA_API_BASE")
207
+ )
208
+ if api_base:
209
+ litellm.api_base = api_base
210
+
155
211
  test_messages = [
156
212
  {"role": "system", "content": "You are a helpful assistant."},
157
213
  {"role": "user", "content": "Reply with just 'OK'."},
@@ -31,7 +31,7 @@ class CreateAgentRenderer(BaseToolRenderer):
31
31
  task = args.get("task", "")
32
32
  name = args.get("name", "Agent")
33
33
 
34
- header = f"🤖 [bold #fbbf24]Creating {name}[/]"
34
+ header = f"🤖 [bold #fbbf24]Creating {cls.escape_markup(name)}[/]"
35
35
 
36
36
  if task:
37
37
  task_display = task[:400] + "..." if len(task) > 400 else task
@@ -1,6 +1,7 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from typing import Any, ClassVar
3
3
 
4
+ from rich.markup import escape as rich_escape
4
5
  from textual.widgets import Static
5
6
 
6
7
 
@@ -16,7 +17,7 @@ class BaseToolRenderer(ABC):
16
17
 
17
18
  @classmethod
18
19
  def escape_markup(cls, text: str) -> str:
19
- return text.replace("[", "\\[").replace("]", "\\]")
20
+ return rich_escape(text)
20
21
 
21
22
  @classmethod
22
23
  def format_args(cls, args: dict[str, Any], max_length: int = 500) -> str:
@@ -76,7 +76,7 @@ class BrowserRenderer(BaseToolRenderer):
76
76
  "double_click": "double clicking",
77
77
  "hover": "hovering",
78
78
  }
79
- message = action_words[action]
79
+ message = cls.escape_markup(action_words[action])
80
80
 
81
81
  return f"{browser_icon} [#06b6d4]{message}[/]"
82
82
 
@@ -97,9 +97,9 @@ class BrowserRenderer(BaseToolRenderer):
97
97
  }
98
98
 
99
99
  if action in simple_actions:
100
- return f"{browser_icon} [#06b6d4]{simple_actions[action]}[/]"
100
+ return f"{browser_icon} [#06b6d4]{cls.escape_markup(simple_actions[action])}[/]"
101
101
 
102
- return f"{browser_icon} [#06b6d4]{action}[/]"
102
+ return f"{browser_icon} [#06b6d4]{cls.escape_markup(action)}[/]"
103
103
 
104
104
  @classmethod
105
105
  def _format_url(cls, url: str) -> str:
@@ -64,7 +64,7 @@ class ViewRequestRenderer(BaseToolRenderer):
64
64
 
65
65
  part = args.get("part", "request")
66
66
 
67
- header = f"👀 [bold #06b6d4]Viewing {part}[/]"
67
+ header = f"👀 [bold #06b6d4]Viewing {cls.escape_markup(part)}[/]"
68
68
 
69
69
  if result and isinstance(result, dict):
70
70
  if "content" in result:
@@ -107,7 +107,7 @@ class SendRequestRenderer(BaseToolRenderer):
107
107
  method = args.get("method", "GET")
108
108
  url = args.get("url", "")
109
109
 
110
- header = f"📤 [bold #06b6d4]Sending {method}[/]"
110
+ header = f"📤 [bold #06b6d4]Sending {cls.escape_markup(method)}[/]"
111
111
 
112
112
  if result and isinstance(result, dict):
113
113
  status_code = result.get("status_code")
@@ -54,7 +54,7 @@ def _render_default_tool_widget(tool_data: dict[str, Any]) -> Static:
54
54
 
55
55
  status_text = BaseToolRenderer.get_status_icon(status)
56
56
 
57
- header = f"→ Using tool [bold blue]{tool_name}[/]"
57
+ header = f"→ Using tool [bold blue]{BaseToolRenderer.escape_markup(tool_name)}[/]"
58
58
  content_parts = [header]
59
59
 
60
60
  args_str = BaseToolRenderer.format_args(args)
@@ -27,7 +27,7 @@ class CreateVulnerabilityReportRenderer(BaseToolRenderer):
27
27
  if severity:
28
28
  severity_color = cls._get_severity_color(severity.lower())
29
29
  content_parts.append(
30
- f" [dim]Severity: [{severity_color}]{severity.upper()}[/{severity_color}][/]"
30
+ f" [dim]Severity: [{severity_color}]{cls.escape_markup(severity.upper())}[/{severity_color}][/]"
31
31
  )
32
32
 
33
33
  if content:
@@ -28,12 +28,12 @@ class ScanStartInfoRenderer(BaseToolRenderer):
28
28
  @classmethod
29
29
  def _build_target_display(cls, target: dict[str, Any]) -> str:
30
30
  if target_url := target.get("target_url"):
31
- return f"[bold #22c55e]{target_url}[/]"
31
+ return cls.escape_markup(str(target_url))
32
32
  if target_repo := target.get("target_repo"):
33
- return f"[bold #22c55e]{target_repo}[/]"
33
+ return cls.escape_markup(str(target_repo))
34
34
  if target_path := target.get("target_path"):
35
- return f"[bold #22c55e]{target_path}[/]"
36
- return "[dim]unknown target[/dim]"
35
+ return cls.escape_markup(str(target_path))
36
+ return "unknown target"
37
37
 
38
38
 
39
39
  @register_tool_renderer
@@ -49,9 +49,11 @@ class SubagentStartInfoRenderer(BaseToolRenderer):
49
49
  name = args.get("name", "Unknown Agent")
50
50
  task = args.get("task", "")
51
51
 
52
- content = f"🤖 Spawned subagent [bold #22c55e]{name}[/]"
52
+ name = cls.escape_markup(str(name))
53
+ content = f"🤖 Spawned subagent {name}"
53
54
  if task:
54
- content += f"\n Task: [dim]{task}[/dim]"
55
+ task = cls.escape_markup(str(task))
56
+ content += f"\n Task: {task}"
55
57
 
56
58
  css_classes = cls.get_css_classes(status)
57
59
  return Static(content, classes=css_classes)
@@ -111,7 +111,7 @@ class TerminalRenderer(BaseToolRenderer):
111
111
  )
112
112
 
113
113
  if is_special:
114
- return f"{terminal_icon} [#ef4444]{command}[/]"
114
+ return f"{terminal_icon} [#ef4444]{cls.escape_markup(command)}[/]"
115
115
 
116
116
  if is_input:
117
117
  formatted_command = cls._format_command_display(command)
@@ -168,10 +168,14 @@ class Tracer:
168
168
  self.tool_executions[execution_id]["result"] = result
169
169
  self.tool_executions[execution_id]["completed_at"] = datetime.now(UTC).isoformat()
170
170
 
171
- def update_agent_status(self, agent_id: str, status: str) -> None:
171
+ def update_agent_status(
172
+ self, agent_id: str, status: str, error_message: str | None = None
173
+ ) -> None:
172
174
  if agent_id in self.agents:
173
175
  self.agents[agent_id]["status"] = status
174
176
  self.agents[agent_id]["updated_at"] = datetime.now(UTC).isoformat()
177
+ if error_message:
178
+ self.agents[agent_id]["error_message"] = error_message
175
179
 
176
180
  def set_scan_config(self, config: dict[str, Any]) -> None:
177
181
  self.scan_config = config
@@ -28,9 +28,21 @@ api_key = os.getenv("LLM_API_KEY")
28
28
  if api_key:
29
29
  litellm.api_key = api_key
30
30
 
31
+ api_base = (
32
+ os.getenv("LLM_API_BASE")
33
+ or os.getenv("OPENAI_API_BASE")
34
+ or os.getenv("LITELLM_BASE_URL")
35
+ or os.getenv("OLLAMA_API_BASE")
36
+ )
37
+ if api_base:
38
+ litellm.api_base = api_base
39
+
31
40
 
32
41
  class LLMRequestFailedError(Exception):
33
- """Raised when LLM request fails after all retry attempts."""
42
+ def __init__(self, message: str, details: str | None = None):
43
+ super().__init__(message)
44
+ self.message = message
45
+ self.details = details
34
46
 
35
47
 
36
48
  MODELS_WITHOUT_STOP_WORDS = [
@@ -211,7 +223,7 @@ class LLM:
211
223
 
212
224
  return cached_messages
213
225
 
214
- async def generate(
226
+ async def generate( # noqa: PLR0912, PLR0915
215
227
  self,
216
228
  conversation_history: list[dict[str, Any]],
217
229
  scan_id: str | None = None,
@@ -255,8 +267,50 @@ class LLM:
255
267
  tool_invocations=tool_invocations if tool_invocations else None,
256
268
  )
257
269
 
270
+ except litellm.RateLimitError as e:
271
+ raise LLMRequestFailedError("LLM request failed: Rate limit exceeded", str(e)) from e
272
+ except litellm.AuthenticationError as e:
273
+ raise LLMRequestFailedError("LLM request failed: Invalid API key", str(e)) from e
274
+ except litellm.NotFoundError as e:
275
+ raise LLMRequestFailedError("LLM request failed: Model not found", str(e)) from e
276
+ except litellm.ContextWindowExceededError as e:
277
+ raise LLMRequestFailedError("LLM request failed: Context too long", str(e)) from e
278
+ except litellm.ContentPolicyViolationError as e:
279
+ raise LLMRequestFailedError(
280
+ "LLM request failed: Content policy violation", str(e)
281
+ ) from e
282
+ except litellm.ServiceUnavailableError as e:
283
+ raise LLMRequestFailedError("LLM request failed: Service unavailable", str(e)) from e
284
+ except litellm.Timeout as e:
285
+ raise LLMRequestFailedError("LLM request failed: Request timed out", str(e)) from e
286
+ except litellm.UnprocessableEntityError as e:
287
+ raise LLMRequestFailedError("LLM request failed: Unprocessable entity", str(e)) from e
288
+ except litellm.InternalServerError as e:
289
+ raise LLMRequestFailedError("LLM request failed: Internal server error", str(e)) from e
290
+ except litellm.APIConnectionError as e:
291
+ raise LLMRequestFailedError("LLM request failed: Connection error", str(e)) from e
292
+ except litellm.UnsupportedParamsError as e:
293
+ raise LLMRequestFailedError("LLM request failed: Unsupported parameters", str(e)) from e
294
+ except litellm.BudgetExceededError as e:
295
+ raise LLMRequestFailedError("LLM request failed: Budget exceeded", str(e)) from e
296
+ except litellm.APIResponseValidationError as e:
297
+ raise LLMRequestFailedError(
298
+ "LLM request failed: Response validation error", str(e)
299
+ ) from e
300
+ except litellm.JSONSchemaValidationError as e:
301
+ raise LLMRequestFailedError(
302
+ "LLM request failed: JSON schema validation error", str(e)
303
+ ) from e
304
+ except litellm.InvalidRequestError as e:
305
+ raise LLMRequestFailedError("LLM request failed: Invalid request", str(e)) from e
306
+ except litellm.BadRequestError as e:
307
+ raise LLMRequestFailedError("LLM request failed: Bad request", str(e)) from e
308
+ except litellm.APIError as e:
309
+ raise LLMRequestFailedError("LLM request failed: API error", str(e)) from e
310
+ except litellm.OpenAIError as e:
311
+ raise LLMRequestFailedError("LLM request failed: OpenAI error", str(e)) from e
258
312
  except Exception as e:
259
- raise LLMRequestFailedError("LLM request failed after all retry attempts") from e
313
+ raise LLMRequestFailedError(f"LLM request failed: {type(e).__name__}", str(e)) from e
260
314
 
261
315
  @property
262
316
  def usage_stats(self) -> dict[str, dict[str, int | float]]:
@@ -4,13 +4,27 @@ import threading
4
4
  import time
5
5
  from typing import Any
6
6
 
7
+ import litellm
7
8
  from litellm import ModelResponse, completion
8
- from tenacity import retry, stop_after_attempt, wait_exponential
9
+ from tenacity import retry, retry_if_exception, stop_after_attempt, wait_exponential
9
10
 
10
11
 
11
12
  logger = logging.getLogger(__name__)
12
13
 
13
14
 
15
+ def should_retry_exception(exception: Exception) -> bool:
16
+ status_code = None
17
+
18
+ if hasattr(exception, "status_code"):
19
+ status_code = exception.status_code
20
+ elif hasattr(exception, "response") and hasattr(exception.response, "status_code"):
21
+ status_code = exception.response.status_code
22
+
23
+ if status_code is not None:
24
+ return bool(litellm._should_retry(status_code))
25
+ return True
26
+
27
+
14
28
  class LLMRequestQueue:
15
29
  def __init__(self, max_concurrent: int = 6, delay_between_requests: float = 1.0):
16
30
  self.max_concurrent = max_concurrent
@@ -40,6 +54,7 @@ class LLMRequestQueue:
40
54
  @retry( # type: ignore[misc]
41
55
  stop=stop_after_attempt(5),
42
56
  wait=wait_exponential(multiplier=2, min=1, max=30),
57
+ retry=retry_if_exception(should_retry_exception),
43
58
  reraise=True,
44
59
  )
45
60
  async def _reliable_request(self, completion_args: dict[str, Any]) -> ModelResponse:
@@ -320,7 +320,7 @@ class DockerRuntime(AbstractRuntime):
320
320
  import httpx
321
321
 
322
322
  try:
323
- async with httpx.AsyncClient() as client:
323
+ async with httpx.AsyncClient(trust_env=False) as client:
324
324
  response = await client.post(
325
325
  f"{api_url}/register_agent",
326
326
  params={"agent_id": agent_id},
@@ -337,7 +337,7 @@ class DockerRuntime(AbstractRuntime):
337
337
  container = self.client.containers.get(container_id)
338
338
  container.reload()
339
339
 
340
- host = "localhost"
340
+ host = "127.0.0.1"
341
341
  if "DOCKER_HOST" in os.environ:
342
342
  docker_host = os.environ["DOCKER_HOST"]
343
343
  if "://" in docker_host:
@@ -62,7 +62,7 @@ async def _execute_tool_in_sandbox(tool_name: str, agent_state: Any, **kwargs: A
62
62
  "Content-Type": "application/json",
63
63
  }
64
64
 
65
- async with httpx.AsyncClient() as client:
65
+ async with httpx.AsyncClient(trust_env=False) as client:
66
66
  try:
67
67
  response = await client.post(
68
68
  request_url, json=request_data, headers=headers, timeout=None
File without changes