llmcode-cli 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (212) hide show
  1. llm_code/__init__.py +2 -0
  2. llm_code/analysis/__init__.py +6 -0
  3. llm_code/analysis/cache.py +33 -0
  4. llm_code/analysis/engine.py +256 -0
  5. llm_code/analysis/go_rules.py +114 -0
  6. llm_code/analysis/js_rules.py +84 -0
  7. llm_code/analysis/python_rules.py +311 -0
  8. llm_code/analysis/rules.py +140 -0
  9. llm_code/analysis/rust_rules.py +108 -0
  10. llm_code/analysis/universal_rules.py +111 -0
  11. llm_code/api/__init__.py +0 -0
  12. llm_code/api/client.py +90 -0
  13. llm_code/api/errors.py +73 -0
  14. llm_code/api/openai_compat.py +390 -0
  15. llm_code/api/provider.py +35 -0
  16. llm_code/api/sse.py +52 -0
  17. llm_code/api/types.py +140 -0
  18. llm_code/cli/__init__.py +0 -0
  19. llm_code/cli/commands.py +70 -0
  20. llm_code/cli/image.py +122 -0
  21. llm_code/cli/render.py +214 -0
  22. llm_code/cli/status_line.py +79 -0
  23. llm_code/cli/streaming.py +92 -0
  24. llm_code/cli/tui_main.py +220 -0
  25. llm_code/computer_use/__init__.py +11 -0
  26. llm_code/computer_use/app_detect.py +49 -0
  27. llm_code/computer_use/app_tier.py +57 -0
  28. llm_code/computer_use/coordinator.py +99 -0
  29. llm_code/computer_use/input_control.py +71 -0
  30. llm_code/computer_use/screenshot.py +93 -0
  31. llm_code/cron/__init__.py +13 -0
  32. llm_code/cron/parser.py +145 -0
  33. llm_code/cron/scheduler.py +135 -0
  34. llm_code/cron/storage.py +126 -0
  35. llm_code/enterprise/__init__.py +1 -0
  36. llm_code/enterprise/audit.py +59 -0
  37. llm_code/enterprise/auth.py +26 -0
  38. llm_code/enterprise/oidc.py +95 -0
  39. llm_code/enterprise/rbac.py +65 -0
  40. llm_code/harness/__init__.py +5 -0
  41. llm_code/harness/config.py +33 -0
  42. llm_code/harness/engine.py +129 -0
  43. llm_code/harness/guides.py +41 -0
  44. llm_code/harness/sensors.py +68 -0
  45. llm_code/harness/templates.py +84 -0
  46. llm_code/hida/__init__.py +1 -0
  47. llm_code/hida/classifier.py +187 -0
  48. llm_code/hida/engine.py +49 -0
  49. llm_code/hida/profiles.py +95 -0
  50. llm_code/hida/types.py +28 -0
  51. llm_code/ide/__init__.py +1 -0
  52. llm_code/ide/bridge.py +80 -0
  53. llm_code/ide/detector.py +76 -0
  54. llm_code/ide/server.py +169 -0
  55. llm_code/logging.py +29 -0
  56. llm_code/lsp/__init__.py +0 -0
  57. llm_code/lsp/client.py +298 -0
  58. llm_code/lsp/detector.py +42 -0
  59. llm_code/lsp/manager.py +56 -0
  60. llm_code/lsp/tools.py +288 -0
  61. llm_code/marketplace/__init__.py +0 -0
  62. llm_code/marketplace/builtin_registry.py +102 -0
  63. llm_code/marketplace/installer.py +162 -0
  64. llm_code/marketplace/plugin.py +78 -0
  65. llm_code/marketplace/registry.py +360 -0
  66. llm_code/mcp/__init__.py +0 -0
  67. llm_code/mcp/bridge.py +87 -0
  68. llm_code/mcp/client.py +117 -0
  69. llm_code/mcp/health.py +120 -0
  70. llm_code/mcp/manager.py +214 -0
  71. llm_code/mcp/oauth.py +219 -0
  72. llm_code/mcp/transport.py +254 -0
  73. llm_code/mcp/types.py +53 -0
  74. llm_code/remote/__init__.py +0 -0
  75. llm_code/remote/client.py +136 -0
  76. llm_code/remote/protocol.py +22 -0
  77. llm_code/remote/server.py +275 -0
  78. llm_code/remote/ssh_proxy.py +56 -0
  79. llm_code/runtime/__init__.py +0 -0
  80. llm_code/runtime/auto_commit.py +56 -0
  81. llm_code/runtime/auto_diagnose.py +62 -0
  82. llm_code/runtime/checkpoint.py +70 -0
  83. llm_code/runtime/checkpoint_recovery.py +142 -0
  84. llm_code/runtime/compaction.py +35 -0
  85. llm_code/runtime/compressor.py +415 -0
  86. llm_code/runtime/config.py +533 -0
  87. llm_code/runtime/context.py +49 -0
  88. llm_code/runtime/conversation.py +921 -0
  89. llm_code/runtime/cost_tracker.py +126 -0
  90. llm_code/runtime/dream.py +127 -0
  91. llm_code/runtime/file_protection.py +150 -0
  92. llm_code/runtime/hardware.py +85 -0
  93. llm_code/runtime/hooks.py +223 -0
  94. llm_code/runtime/indexer.py +230 -0
  95. llm_code/runtime/knowledge_compiler.py +232 -0
  96. llm_code/runtime/memory.py +132 -0
  97. llm_code/runtime/memory_layers.py +467 -0
  98. llm_code/runtime/memory_lint.py +252 -0
  99. llm_code/runtime/model_aliases.py +37 -0
  100. llm_code/runtime/ollama.py +93 -0
  101. llm_code/runtime/overlay.py +124 -0
  102. llm_code/runtime/permissions.py +200 -0
  103. llm_code/runtime/plan.py +45 -0
  104. llm_code/runtime/prompt.py +238 -0
  105. llm_code/runtime/repo_map.py +174 -0
  106. llm_code/runtime/sandbox.py +116 -0
  107. llm_code/runtime/session.py +268 -0
  108. llm_code/runtime/skill_resolver.py +61 -0
  109. llm_code/runtime/skills.py +133 -0
  110. llm_code/runtime/speculative.py +75 -0
  111. llm_code/runtime/streaming_executor.py +216 -0
  112. llm_code/runtime/telemetry.py +196 -0
  113. llm_code/runtime/token_budget.py +26 -0
  114. llm_code/runtime/vcr.py +142 -0
  115. llm_code/runtime/vision.py +102 -0
  116. llm_code/swarm/__init__.py +1 -0
  117. llm_code/swarm/backend_subprocess.py +108 -0
  118. llm_code/swarm/backend_tmux.py +103 -0
  119. llm_code/swarm/backend_worktree.py +306 -0
  120. llm_code/swarm/checkpoint.py +74 -0
  121. llm_code/swarm/coordinator.py +236 -0
  122. llm_code/swarm/mailbox.py +88 -0
  123. llm_code/swarm/manager.py +202 -0
  124. llm_code/swarm/memory_sync.py +80 -0
  125. llm_code/swarm/recovery.py +21 -0
  126. llm_code/swarm/team.py +67 -0
  127. llm_code/swarm/types.py +31 -0
  128. llm_code/task/__init__.py +16 -0
  129. llm_code/task/diagnostics.py +93 -0
  130. llm_code/task/manager.py +162 -0
  131. llm_code/task/types.py +112 -0
  132. llm_code/task/verifier.py +104 -0
  133. llm_code/tools/__init__.py +0 -0
  134. llm_code/tools/agent.py +145 -0
  135. llm_code/tools/agent_roles.py +82 -0
  136. llm_code/tools/base.py +94 -0
  137. llm_code/tools/bash.py +565 -0
  138. llm_code/tools/computer_use_tools.py +278 -0
  139. llm_code/tools/coordinator_tool.py +75 -0
  140. llm_code/tools/cron_create.py +90 -0
  141. llm_code/tools/cron_delete.py +49 -0
  142. llm_code/tools/cron_list.py +51 -0
  143. llm_code/tools/deferred.py +92 -0
  144. llm_code/tools/dump.py +116 -0
  145. llm_code/tools/edit_file.py +282 -0
  146. llm_code/tools/git_tools.py +531 -0
  147. llm_code/tools/glob_search.py +112 -0
  148. llm_code/tools/grep_search.py +144 -0
  149. llm_code/tools/ide_diagnostics.py +59 -0
  150. llm_code/tools/ide_open.py +58 -0
  151. llm_code/tools/ide_selection.py +52 -0
  152. llm_code/tools/memory_tools.py +138 -0
  153. llm_code/tools/multi_edit.py +143 -0
  154. llm_code/tools/notebook_edit.py +107 -0
  155. llm_code/tools/notebook_read.py +81 -0
  156. llm_code/tools/parsing.py +63 -0
  157. llm_code/tools/read_file.py +154 -0
  158. llm_code/tools/registry.py +58 -0
  159. llm_code/tools/search_backends/__init__.py +56 -0
  160. llm_code/tools/search_backends/brave.py +56 -0
  161. llm_code/tools/search_backends/duckduckgo.py +129 -0
  162. llm_code/tools/search_backends/searxng.py +71 -0
  163. llm_code/tools/search_backends/tavily.py +73 -0
  164. llm_code/tools/swarm_create.py +109 -0
  165. llm_code/tools/swarm_delete.py +95 -0
  166. llm_code/tools/swarm_list.py +44 -0
  167. llm_code/tools/swarm_message.py +109 -0
  168. llm_code/tools/task_close.py +79 -0
  169. llm_code/tools/task_plan.py +79 -0
  170. llm_code/tools/task_verify.py +90 -0
  171. llm_code/tools/tool_search.py +65 -0
  172. llm_code/tools/web_common.py +258 -0
  173. llm_code/tools/web_fetch.py +223 -0
  174. llm_code/tools/web_search.py +280 -0
  175. llm_code/tools/write_file.py +118 -0
  176. llm_code/tui/__init__.py +1 -0
  177. llm_code/tui/app.py +2432 -0
  178. llm_code/tui/chat_view.py +82 -0
  179. llm_code/tui/chat_widgets.py +309 -0
  180. llm_code/tui/header_bar.py +46 -0
  181. llm_code/tui/input_bar.py +349 -0
  182. llm_code/tui/keybindings.py +142 -0
  183. llm_code/tui/marketplace.py +210 -0
  184. llm_code/tui/status_bar.py +72 -0
  185. llm_code/tui/theme.py +96 -0
  186. llm_code/utils/__init__.py +0 -0
  187. llm_code/utils/diff.py +111 -0
  188. llm_code/utils/errors.py +70 -0
  189. llm_code/utils/hyperlink.py +73 -0
  190. llm_code/utils/notebook.py +179 -0
  191. llm_code/utils/search.py +69 -0
  192. llm_code/utils/text_normalize.py +28 -0
  193. llm_code/utils/version_check.py +62 -0
  194. llm_code/vim/__init__.py +4 -0
  195. llm_code/vim/engine.py +51 -0
  196. llm_code/vim/motions.py +172 -0
  197. llm_code/vim/operators.py +183 -0
  198. llm_code/vim/text_objects.py +139 -0
  199. llm_code/vim/transitions.py +279 -0
  200. llm_code/vim/types.py +68 -0
  201. llm_code/voice/__init__.py +1 -0
  202. llm_code/voice/languages.py +43 -0
  203. llm_code/voice/recorder.py +136 -0
  204. llm_code/voice/stt.py +36 -0
  205. llm_code/voice/stt_anthropic.py +66 -0
  206. llm_code/voice/stt_google.py +32 -0
  207. llm_code/voice/stt_whisper.py +52 -0
  208. llmcode_cli-1.0.0.dist-info/METADATA +524 -0
  209. llmcode_cli-1.0.0.dist-info/RECORD +212 -0
  210. llmcode_cli-1.0.0.dist-info/WHEEL +4 -0
  211. llmcode_cli-1.0.0.dist-info/entry_points.txt +2 -0
  212. llmcode_cli-1.0.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,223 @@
1
+ """WebFetch tool — fetch and extract content from URLs."""
2
+ from __future__ import annotations
3
+
4
+ from typing import Any, Literal
5
+
6
+ import httpx
7
+ from pydantic import BaseModel, field_validator
8
+
9
+ from llm_code.tools.base import PermissionLevel, Tool, ToolResult
10
+ from llm_code.tools.web_common import UrlCache, classify_url, extract_content
11
+
12
+
13
+ class WebFetchInput(BaseModel):
14
+ """Input model for WebFetchTool."""
15
+
16
+ url: str
17
+ prompt: str = ""
18
+ max_length: int = 50_000
19
+ raw: bool = False
20
+ renderer: Literal["auto", "default", "browser"] = "auto"
21
+
22
+ @field_validator("url")
23
+ @classmethod
24
+ def url_must_not_be_empty(cls, v: str) -> str:
25
+ if not v.strip():
26
+ raise ValueError("url must not be empty")
27
+ return v.strip()
28
+
29
+
30
+ # Module-level cache shared across tool instances
31
+ _cache = UrlCache(max_entries=50, ttl=900.0)
32
+
33
+
34
+ class WebFetchTool(Tool):
35
+ """Tool that fetches and extracts content from a URL."""
36
+
37
+ def __init__(self, cache: UrlCache | None = None) -> None:
38
+ self._cache = cache if cache is not None else _cache
39
+
40
+ @property
41
+ def name(self) -> str:
42
+ return "web_fetch"
43
+
44
+ @property
45
+ def description(self) -> str:
46
+ return (
47
+ "Fetch content from a URL and return it in a readable format. "
48
+ "Supports HTML (converted to markdown), JSON (pretty-printed), "
49
+ "and plain text. Results are cached for 15 minutes."
50
+ )
51
+
52
+ @property
53
+ def required_permission(self) -> PermissionLevel:
54
+ return PermissionLevel.FULL_ACCESS
55
+
56
+ @property
57
+ def input_schema(self) -> dict:
58
+ return {
59
+ "type": "object",
60
+ "properties": {
61
+ "url": {
62
+ "type": "string",
63
+ "description": "The URL to fetch.",
64
+ },
65
+ "prompt": {
66
+ "type": "string",
67
+ "description": "Optional description of what to extract from the page.",
68
+ "default": "",
69
+ },
70
+ "max_length": {
71
+ "type": "integer",
72
+ "description": "Maximum content length in characters.",
73
+ "default": 50_000,
74
+ },
75
+ "raw": {
76
+ "type": "boolean",
77
+ "description": "If true, skip readability processing for HTML.",
78
+ "default": False,
79
+ },
80
+ "renderer": {
81
+ "type": "string",
82
+ "enum": ["auto", "default", "browser"],
83
+ "description": (
84
+ "Renderer to use: 'auto' (detect playwright availability), "
85
+ "'default' (httpx), 'browser' (playwright)."
86
+ ),
87
+ "default": "auto",
88
+ },
89
+ },
90
+ "required": ["url"],
91
+ }
92
+
93
+ @property
94
+ def input_model(self) -> type[BaseModel]:
95
+ return WebFetchInput
96
+
97
+ def is_read_only(self, args: dict) -> bool: # noqa: ARG002
98
+ return False
99
+
100
+ def is_concurrency_safe(self, args: dict) -> bool: # noqa: ARG002
101
+ return True
102
+
103
+ def _resolve_renderer(self, renderer: str) -> str:
104
+ """Resolve 'auto' to a concrete renderer based on availability."""
105
+ if renderer == "auto":
106
+ try:
107
+ import playwright # noqa: F401
108
+ return "browser"
109
+ except ImportError:
110
+ return "default"
111
+ return renderer
112
+
113
+ def _fetch_with_httpx(self, url: str) -> tuple[str, str, int]:
114
+ """Fetch URL with httpx. Returns (body, content_type, status_code)."""
115
+ response = httpx.get(url, follow_redirects=True, timeout=30.0)
116
+ response.raise_for_status()
117
+ content_type = response.headers.get("content-type", "text/plain")
118
+ return response.text, content_type, response.status_code
119
+
120
+ def _fetch_with_browser(self, url: str) -> tuple[str, str, int]:
121
+ """Fetch URL with playwright. Falls back to httpx if playwright fails."""
122
+ try:
123
+ from playwright.sync_api import sync_playwright
124
+
125
+ with sync_playwright() as p:
126
+ browser = p.chromium.launch(headless=True)
127
+ try:
128
+ page = browser.new_page()
129
+ response = page.goto(url, timeout=30_000)
130
+ status_code = response.status if response else 200
131
+ content = page.content()
132
+ return content, "text/html", status_code
133
+ finally:
134
+ browser.close()
135
+ except Exception:
136
+ # Fall back to httpx
137
+ return self._fetch_with_httpx(url)
138
+
139
+ def execute(self, args: dict) -> ToolResult:
140
+ """Execute the web fetch tool."""
141
+ # Validate and parse input
142
+ try:
143
+ from pydantic import ValidationError
144
+ parsed = WebFetchInput(**args)
145
+ except (KeyError, TypeError, Exception) as exc:
146
+ return ToolResult(
147
+ output=f"Invalid input: {exc}",
148
+ is_error=True,
149
+ )
150
+
151
+ url = parsed.url
152
+ max_length = parsed.max_length
153
+ raw = parsed.raw
154
+ renderer = parsed.renderer
155
+
156
+ # Check URL safety
157
+ safety = classify_url(url)
158
+ if safety.is_blocked:
159
+ reasons = ", ".join(safety.reasons) if safety.reasons else "unknown"
160
+ return ToolResult(
161
+ output=f"URL blocked: {url} (reasons: {reasons})",
162
+ is_error=True,
163
+ metadata={"url": url, "blocked": True, "reasons": list(safety.reasons)},
164
+ )
165
+
166
+ # Check cache
167
+ cached_content = self._cache.get(url)
168
+ if cached_content is not None:
169
+ return ToolResult(
170
+ output=cached_content,
171
+ is_error=False,
172
+ metadata={
173
+ "url": url,
174
+ "cached": True,
175
+ "status_code": None,
176
+ "content_type": None,
177
+ },
178
+ )
179
+
180
+ # Resolve renderer
181
+ resolved_renderer = self._resolve_renderer(renderer)
182
+
183
+ # Fetch content
184
+ try:
185
+ if resolved_renderer == "browser":
186
+ body, content_type, status_code = self._fetch_with_browser(url)
187
+ else:
188
+ body, content_type, status_code = self._fetch_with_httpx(url)
189
+ except httpx.HTTPStatusError as exc:
190
+ return ToolResult(
191
+ output=f"HTTP error {exc.response.status_code}: {exc}",
192
+ is_error=True,
193
+ metadata={"url": url, "status_code": exc.response.status_code, "cached": False},
194
+ )
195
+ except httpx.RequestError as exc:
196
+ return ToolResult(
197
+ output=f"Network error fetching {url}: {exc}",
198
+ is_error=True,
199
+ metadata={"url": url, "cached": False},
200
+ )
201
+ except Exception as exc:
202
+ return ToolResult(
203
+ output=f"Error fetching {url}: {exc}",
204
+ is_error=True,
205
+ metadata={"url": url, "cached": False},
206
+ )
207
+
208
+ # Extract content
209
+ content = extract_content(body, content_type, raw=raw, max_length=max_length)
210
+
211
+ # Cache result
212
+ self._cache.put(url, content)
213
+
214
+ return ToolResult(
215
+ output=content,
216
+ is_error=False,
217
+ metadata={
218
+ "url": url,
219
+ "status_code": status_code,
220
+ "content_type": content_type,
221
+ "cached": False,
222
+ },
223
+ )
@@ -0,0 +1,280 @@
1
+ """WebSearchTool — web search using configurable backends."""
2
+ from __future__ import annotations
3
+
4
+ import fnmatch
5
+ import os
6
+ from urllib.parse import urlparse
7
+
8
+ from pydantic import BaseModel
9
+
10
+ from llm_code.tools.base import PermissionLevel, Tool, ToolResult
11
+ from llm_code.tools.search_backends import SearchResult, create_backend
12
+
13
+ _VALID_BACKENDS = ("auto", "duckduckgo", "brave", "tavily", "searxng")
14
+
15
+
16
+ class WebSearchInput(BaseModel):
17
+ query: str
18
+ max_results: int = 10
19
+ backend: str = "auto"
20
+
21
+
22
+ class WebSearchTool(Tool):
23
+ """Tool for performing web searches via configurable backends."""
24
+
25
+ @property
26
+ def name(self) -> str:
27
+ return "web_search"
28
+
29
+ @property
30
+ def description(self) -> str:
31
+ return (
32
+ "Search the web for information. "
33
+ "Supports DuckDuckGo (default), Brave, Tavily, and SearXNG backends. "
34
+ "Returns ranked results with titles, URLs, and snippets."
35
+ )
36
+
37
+ @property
38
+ def input_schema(self) -> dict:
39
+ return {
40
+ "type": "object",
41
+ "properties": {
42
+ "query": {
43
+ "type": "string",
44
+ "description": "The search query.",
45
+ },
46
+ "max_results": {
47
+ "type": "integer",
48
+ "description": "Maximum number of results to return (default: 10).",
49
+ "default": 10,
50
+ },
51
+ "backend": {
52
+ "type": "string",
53
+ "enum": list(_VALID_BACKENDS),
54
+ "description": (
55
+ "Search backend to use. 'auto' selects based on config "
56
+ "(default: duckduckgo)."
57
+ ),
58
+ "default": "auto",
59
+ },
60
+ },
61
+ "required": ["query"],
62
+ }
63
+
64
+ @property
65
+ def required_permission(self) -> PermissionLevel:
66
+ return PermissionLevel.FULL_ACCESS
67
+
68
+ @property
69
+ def input_model(self) -> type[WebSearchInput]:
70
+ return WebSearchInput
71
+
72
+ def is_read_only(self, args: dict) -> bool:
73
+ return False
74
+
75
+ def is_concurrency_safe(self, args: dict) -> bool:
76
+ return True
77
+
78
+ def _get_web_search_config(self) -> object | None:
79
+ """Attempt to load WebSearchConfig from runtime config."""
80
+ try:
81
+ from llm_code.runtime.config import WebSearchConfig
82
+ return WebSearchConfig()
83
+ except ImportError:
84
+ return None
85
+
86
+ def _resolve_backend(self, backend_arg: str) -> tuple[object, str]:
87
+ """Resolve backend name and instantiate it.
88
+
89
+ Returns (backend_instance, backend_name).
90
+ """
91
+ cfg = self._get_web_search_config()
92
+
93
+ if backend_arg == "auto":
94
+ backend_name = "duckduckgo"
95
+ if cfg is not None:
96
+ backend_name = getattr(cfg, "default_backend", "duckduckgo")
97
+ else:
98
+ backend_name = backend_arg
99
+
100
+ # Build kwargs for backends that need configuration
101
+ kwargs: dict = {}
102
+ if backend_name == "brave" and cfg is not None:
103
+ api_key_env = getattr(cfg, "brave_api_key_env", "BRAVE_API_KEY")
104
+ api_key = os.environ.get(api_key_env, "")
105
+ kwargs["api_key"] = api_key
106
+ elif backend_name == "tavily" and cfg is not None:
107
+ api_key_env = getattr(cfg, "tavily_api_key_env", "TAVILY_API_KEY")
108
+ api_key = os.environ.get(api_key_env, "")
109
+ kwargs["api_key"] = api_key
110
+ elif backend_name == "searxng" and cfg is not None:
111
+ kwargs["base_url"] = getattr(cfg, "searxng_base_url", "")
112
+
113
+ backend = create_backend(backend_name, **kwargs)
114
+ return backend, backend_name
115
+
116
+ def _filter_results(
117
+ self,
118
+ results: tuple[SearchResult, ...],
119
+ *,
120
+ domain_allowlist: tuple[str, ...],
121
+ domain_denylist: tuple[str, ...],
122
+ ) -> tuple[SearchResult, ...]:
123
+ """Apply domain denylist then allowlist filtering.
124
+
125
+ Denylist is applied first. If allowlist is non-empty, only results
126
+ matching an allowlist pattern are kept. Empty lists pass everything.
127
+
128
+ Args:
129
+ results: Results to filter.
130
+ domain_allowlist: Glob patterns for allowed domains.
131
+ domain_denylist: Glob patterns for denied domains.
132
+
133
+ Returns:
134
+ Filtered tuple of SearchResult.
135
+ """
136
+ def _get_domain(url: str) -> str:
137
+ try:
138
+ return urlparse(url).netloc
139
+ except Exception:
140
+ return url
141
+
142
+ filtered: list[SearchResult] = []
143
+ for result in results:
144
+ domain = _get_domain(result.url)
145
+
146
+ # Apply denylist first
147
+ if domain_denylist and any(
148
+ fnmatch.fnmatch(domain, pattern) for pattern in domain_denylist
149
+ ):
150
+ continue
151
+
152
+ # Apply allowlist
153
+ if domain_allowlist and not any(
154
+ fnmatch.fnmatch(domain, pattern) for pattern in domain_allowlist
155
+ ):
156
+ continue
157
+
158
+ filtered.append(result)
159
+
160
+ return tuple(filtered)
161
+
162
+ def _format_results(self, query: str, results: tuple[SearchResult, ...]) -> str:
163
+ """Format search results as markdown.
164
+
165
+ Args:
166
+ query: The original search query.
167
+ results: Search results to format.
168
+
169
+ Returns:
170
+ Formatted markdown string.
171
+ """
172
+ lines: list[str] = [f'## Search Results for "{query}"', ""]
173
+
174
+ if not results:
175
+ lines.append("(0 results)")
176
+ return "\n".join(lines)
177
+
178
+ for i, result in enumerate(results, start=1):
179
+ lines.append(f"{i}. **[{result.title}]({result.url})**")
180
+ lines.append(f" {result.snippet}")
181
+ lines.append("")
182
+
183
+ lines.append(f"({len(results)} results)")
184
+ return "\n".join(lines)
185
+
186
+ def execute(self, args: dict) -> ToolResult:
187
+ """Execute a web search.
188
+
189
+ Args:
190
+ args: Dictionary with keys: query (required), max_results (int),
191
+ backend (str enum).
192
+
193
+ Returns:
194
+ ToolResult with formatted search results, or error.
195
+ """
196
+ query = args.get("query", "")
197
+ if not query or not str(query).strip():
198
+ return ToolResult(
199
+ output="Error: 'query' is required and must not be empty.",
200
+ is_error=True,
201
+ )
202
+
203
+ max_results = int(args.get("max_results", 10))
204
+ backend_arg = str(args.get("backend", "auto"))
205
+
206
+ # Apply domain filtering from config
207
+ cfg = self._get_web_search_config()
208
+ allowlist: tuple[str, ...] = ()
209
+ denylist: tuple[str, ...] = ()
210
+ if cfg is not None:
211
+ allowlist = getattr(cfg, "domain_allowlist", ())
212
+ denylist = getattr(cfg, "domain_denylist", ())
213
+
214
+ if backend_arg == "auto":
215
+ # Fallback chain: try each configured backend until one returns results
216
+ results = self._search_with_fallback(query, max_results, cfg)
217
+ else:
218
+ try:
219
+ backend, _name = self._resolve_backend(backend_arg)
220
+ except (ValueError, Exception) as exc:
221
+ return ToolResult(
222
+ output=f"Error: Failed to initialize search backend: {exc}",
223
+ is_error=True,
224
+ )
225
+ try:
226
+ results = backend.search(query, max_results=max_results)
227
+ except Exception as exc:
228
+ return ToolResult(
229
+ output=f"Error: Search failed: {exc}",
230
+ is_error=True,
231
+ )
232
+
233
+ results = self._filter_results(results, domain_allowlist=allowlist, domain_denylist=denylist)
234
+ output = self._format_results(query, results)
235
+ return ToolResult(output=output, is_error=False)
236
+
237
+ def _search_with_fallback(
238
+ self, query: str, max_results: int, cfg: object | None,
239
+ ) -> tuple[SearchResult, ...]:
240
+ """Try backends in order until one returns results.
241
+
242
+ Fallback order: duckduckgo -> brave -> searxng -> tavily.
243
+ Only backends that are configured (have API keys / base_url set) are tried.
244
+ """
245
+ # Build ordered list of (backend_name, kwargs) to try
246
+ chain: list[tuple[str, dict]] = []
247
+
248
+ # 1. DuckDuckGo (always available, no config needed)
249
+ chain.append(("duckduckgo", {}))
250
+
251
+ # 2. Brave (if API key configured)
252
+ if cfg is not None:
253
+ brave_key_env = getattr(cfg, "brave_api_key_env", "BRAVE_API_KEY")
254
+ brave_key = os.environ.get(brave_key_env, "")
255
+ if brave_key:
256
+ chain.append(("brave", {"api_key": brave_key}))
257
+
258
+ # 3. SearXNG (if base_url configured)
259
+ if cfg is not None:
260
+ searxng_url = getattr(cfg, "searxng_base_url", "")
261
+ if searxng_url:
262
+ chain.append(("searxng", {"base_url": searxng_url}))
263
+
264
+ # 4. Tavily (if API key configured)
265
+ if cfg is not None:
266
+ tavily_key_env = getattr(cfg, "tavily_api_key_env", "TAVILY_API_KEY")
267
+ tavily_key = os.environ.get(tavily_key_env, "")
268
+ if tavily_key:
269
+ chain.append(("tavily", {"api_key": tavily_key}))
270
+
271
+ for backend_name, kwargs in chain:
272
+ try:
273
+ backend = create_backend(backend_name, **kwargs)
274
+ results = backend.search(query, max_results=max_results)
275
+ if results:
276
+ return results
277
+ except Exception:
278
+ continue
279
+
280
+ return ()
@@ -0,0 +1,118 @@
1
+ """WriteFileTool — writes content to a file, auto-creating parent directories."""
2
+ from __future__ import annotations
3
+
4
+ import pathlib
5
+ from typing import TYPE_CHECKING
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from llm_code.runtime.file_protection import check_write
10
+ from llm_code.tools.base import PermissionLevel, Tool, ToolResult
11
+ from llm_code.utils.errors import friendly_error
12
+
13
+ if TYPE_CHECKING:
14
+ from llm_code.runtime.overlay import OverlayFS
15
+
16
+
17
+ class WriteFileInput(BaseModel):
18
+ path: str
19
+ content: str
20
+
21
+
22
+ class WriteFileTool(Tool):
23
+ @property
24
+ def name(self) -> str:
25
+ return "write_file"
26
+
27
+ @property
28
+ def description(self) -> str:
29
+ return "Write content to a file, creating parent directories as needed."
30
+
31
+ @property
32
+ def input_schema(self) -> dict:
33
+ return {
34
+ "type": "object",
35
+ "properties": {
36
+ "path": {"type": "string", "description": "Absolute path to write to"},
37
+ "content": {"type": "string", "description": "Content to write"},
38
+ },
39
+ "required": ["path", "content"],
40
+ }
41
+
42
+ @property
43
+ def required_permission(self) -> PermissionLevel:
44
+ return PermissionLevel.WORKSPACE_WRITE
45
+
46
+ @property
47
+ def input_model(self) -> type[WriteFileInput]:
48
+ return WriteFileInput
49
+
50
+ def execute(self, args: dict, overlay: "OverlayFS | None" = None) -> ToolResult:
51
+ path = pathlib.Path(args["path"])
52
+ content: str = args["content"]
53
+
54
+ protection = check_write(str(path))
55
+ if not protection.allowed:
56
+ return ToolResult(output=protection.reason, is_error=True)
57
+ if protection.severity == "warn":
58
+ # Surface the warning in output metadata; execution still proceeds
59
+ warning_prefix = f"[WARNING] {protection.reason}\n"
60
+ else:
61
+ warning_prefix = ""
62
+
63
+ if overlay is not None:
64
+ # Speculative mode: write to overlay, read old content from overlay/real FS
65
+ old_content: str | None = None
66
+ try:
67
+ old_content = overlay.read(path)
68
+ except FileNotFoundError:
69
+ pass
70
+
71
+ overlay.write(path, content)
72
+
73
+ line_count = len(content.splitlines())
74
+ output = warning_prefix + f"Wrote {line_count} lines to {path}"
75
+
76
+ metadata: dict | None = None
77
+ if old_content is not None and old_content != content:
78
+ from llm_code.utils.diff import generate_diff, count_changes
79
+
80
+ hunks = generate_diff(old_content, content, path.name)
81
+ adds, dels = count_changes(hunks)
82
+ metadata = {
83
+ "diff": [h.to_dict() for h in hunks],
84
+ "additions": adds,
85
+ "deletions": dels,
86
+ }
87
+
88
+ return ToolResult(output=output, metadata=metadata)
89
+
90
+ # Normal mode: write directly to the real filesystem
91
+ # Capture old content if overwriting
92
+ old_content = None
93
+ if path.exists():
94
+ old_content = path.read_text()
95
+
96
+ try:
97
+ path.parent.mkdir(parents=True, exist_ok=True)
98
+ path.write_text(content)
99
+ except (PermissionError, OSError) as exc:
100
+ return ToolResult(output=friendly_error(exc, str(path)), is_error=True)
101
+
102
+ line_count = len(content.splitlines())
103
+ output = warning_prefix + f"Wrote {line_count} lines to {path}"
104
+
105
+ # Generate diff for overwrites
106
+ metadata = None
107
+ if old_content is not None and old_content != content:
108
+ from llm_code.utils.diff import generate_diff, count_changes
109
+
110
+ hunks = generate_diff(old_content, content, path.name)
111
+ adds, dels = count_changes(hunks)
112
+ metadata = {
113
+ "diff": [h.to_dict() for h in hunks],
114
+ "additions": adds,
115
+ "deletions": dels,
116
+ }
117
+
118
+ return ToolResult(output=output, metadata=metadata)
@@ -0,0 +1 @@
1
+ """Fullscreen TUI package using Textual."""