llmcode-cli 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (212) hide show
  1. llm_code/__init__.py +2 -0
  2. llm_code/analysis/__init__.py +6 -0
  3. llm_code/analysis/cache.py +33 -0
  4. llm_code/analysis/engine.py +256 -0
  5. llm_code/analysis/go_rules.py +114 -0
  6. llm_code/analysis/js_rules.py +84 -0
  7. llm_code/analysis/python_rules.py +311 -0
  8. llm_code/analysis/rules.py +140 -0
  9. llm_code/analysis/rust_rules.py +108 -0
  10. llm_code/analysis/universal_rules.py +111 -0
  11. llm_code/api/__init__.py +0 -0
  12. llm_code/api/client.py +90 -0
  13. llm_code/api/errors.py +73 -0
  14. llm_code/api/openai_compat.py +390 -0
  15. llm_code/api/provider.py +35 -0
  16. llm_code/api/sse.py +52 -0
  17. llm_code/api/types.py +140 -0
  18. llm_code/cli/__init__.py +0 -0
  19. llm_code/cli/commands.py +70 -0
  20. llm_code/cli/image.py +122 -0
  21. llm_code/cli/render.py +214 -0
  22. llm_code/cli/status_line.py +79 -0
  23. llm_code/cli/streaming.py +92 -0
  24. llm_code/cli/tui_main.py +220 -0
  25. llm_code/computer_use/__init__.py +11 -0
  26. llm_code/computer_use/app_detect.py +49 -0
  27. llm_code/computer_use/app_tier.py +57 -0
  28. llm_code/computer_use/coordinator.py +99 -0
  29. llm_code/computer_use/input_control.py +71 -0
  30. llm_code/computer_use/screenshot.py +93 -0
  31. llm_code/cron/__init__.py +13 -0
  32. llm_code/cron/parser.py +145 -0
  33. llm_code/cron/scheduler.py +135 -0
  34. llm_code/cron/storage.py +126 -0
  35. llm_code/enterprise/__init__.py +1 -0
  36. llm_code/enterprise/audit.py +59 -0
  37. llm_code/enterprise/auth.py +26 -0
  38. llm_code/enterprise/oidc.py +95 -0
  39. llm_code/enterprise/rbac.py +65 -0
  40. llm_code/harness/__init__.py +5 -0
  41. llm_code/harness/config.py +33 -0
  42. llm_code/harness/engine.py +129 -0
  43. llm_code/harness/guides.py +41 -0
  44. llm_code/harness/sensors.py +68 -0
  45. llm_code/harness/templates.py +84 -0
  46. llm_code/hida/__init__.py +1 -0
  47. llm_code/hida/classifier.py +187 -0
  48. llm_code/hida/engine.py +49 -0
  49. llm_code/hida/profiles.py +95 -0
  50. llm_code/hida/types.py +28 -0
  51. llm_code/ide/__init__.py +1 -0
  52. llm_code/ide/bridge.py +80 -0
  53. llm_code/ide/detector.py +76 -0
  54. llm_code/ide/server.py +169 -0
  55. llm_code/logging.py +29 -0
  56. llm_code/lsp/__init__.py +0 -0
  57. llm_code/lsp/client.py +298 -0
  58. llm_code/lsp/detector.py +42 -0
  59. llm_code/lsp/manager.py +56 -0
  60. llm_code/lsp/tools.py +288 -0
  61. llm_code/marketplace/__init__.py +0 -0
  62. llm_code/marketplace/builtin_registry.py +102 -0
  63. llm_code/marketplace/installer.py +162 -0
  64. llm_code/marketplace/plugin.py +78 -0
  65. llm_code/marketplace/registry.py +360 -0
  66. llm_code/mcp/__init__.py +0 -0
  67. llm_code/mcp/bridge.py +87 -0
  68. llm_code/mcp/client.py +117 -0
  69. llm_code/mcp/health.py +120 -0
  70. llm_code/mcp/manager.py +214 -0
  71. llm_code/mcp/oauth.py +219 -0
  72. llm_code/mcp/transport.py +254 -0
  73. llm_code/mcp/types.py +53 -0
  74. llm_code/remote/__init__.py +0 -0
  75. llm_code/remote/client.py +136 -0
  76. llm_code/remote/protocol.py +22 -0
  77. llm_code/remote/server.py +275 -0
  78. llm_code/remote/ssh_proxy.py +56 -0
  79. llm_code/runtime/__init__.py +0 -0
  80. llm_code/runtime/auto_commit.py +56 -0
  81. llm_code/runtime/auto_diagnose.py +62 -0
  82. llm_code/runtime/checkpoint.py +70 -0
  83. llm_code/runtime/checkpoint_recovery.py +142 -0
  84. llm_code/runtime/compaction.py +35 -0
  85. llm_code/runtime/compressor.py +415 -0
  86. llm_code/runtime/config.py +533 -0
  87. llm_code/runtime/context.py +49 -0
  88. llm_code/runtime/conversation.py +921 -0
  89. llm_code/runtime/cost_tracker.py +126 -0
  90. llm_code/runtime/dream.py +127 -0
  91. llm_code/runtime/file_protection.py +150 -0
  92. llm_code/runtime/hardware.py +85 -0
  93. llm_code/runtime/hooks.py +223 -0
  94. llm_code/runtime/indexer.py +230 -0
  95. llm_code/runtime/knowledge_compiler.py +232 -0
  96. llm_code/runtime/memory.py +132 -0
  97. llm_code/runtime/memory_layers.py +467 -0
  98. llm_code/runtime/memory_lint.py +252 -0
  99. llm_code/runtime/model_aliases.py +37 -0
  100. llm_code/runtime/ollama.py +93 -0
  101. llm_code/runtime/overlay.py +124 -0
  102. llm_code/runtime/permissions.py +200 -0
  103. llm_code/runtime/plan.py +45 -0
  104. llm_code/runtime/prompt.py +238 -0
  105. llm_code/runtime/repo_map.py +174 -0
  106. llm_code/runtime/sandbox.py +116 -0
  107. llm_code/runtime/session.py +268 -0
  108. llm_code/runtime/skill_resolver.py +61 -0
  109. llm_code/runtime/skills.py +133 -0
  110. llm_code/runtime/speculative.py +75 -0
  111. llm_code/runtime/streaming_executor.py +216 -0
  112. llm_code/runtime/telemetry.py +196 -0
  113. llm_code/runtime/token_budget.py +26 -0
  114. llm_code/runtime/vcr.py +142 -0
  115. llm_code/runtime/vision.py +102 -0
  116. llm_code/swarm/__init__.py +1 -0
  117. llm_code/swarm/backend_subprocess.py +108 -0
  118. llm_code/swarm/backend_tmux.py +103 -0
  119. llm_code/swarm/backend_worktree.py +306 -0
  120. llm_code/swarm/checkpoint.py +74 -0
  121. llm_code/swarm/coordinator.py +236 -0
  122. llm_code/swarm/mailbox.py +88 -0
  123. llm_code/swarm/manager.py +202 -0
  124. llm_code/swarm/memory_sync.py +80 -0
  125. llm_code/swarm/recovery.py +21 -0
  126. llm_code/swarm/team.py +67 -0
  127. llm_code/swarm/types.py +31 -0
  128. llm_code/task/__init__.py +16 -0
  129. llm_code/task/diagnostics.py +93 -0
  130. llm_code/task/manager.py +162 -0
  131. llm_code/task/types.py +112 -0
  132. llm_code/task/verifier.py +104 -0
  133. llm_code/tools/__init__.py +0 -0
  134. llm_code/tools/agent.py +145 -0
  135. llm_code/tools/agent_roles.py +82 -0
  136. llm_code/tools/base.py +94 -0
  137. llm_code/tools/bash.py +565 -0
  138. llm_code/tools/computer_use_tools.py +278 -0
  139. llm_code/tools/coordinator_tool.py +75 -0
  140. llm_code/tools/cron_create.py +90 -0
  141. llm_code/tools/cron_delete.py +49 -0
  142. llm_code/tools/cron_list.py +51 -0
  143. llm_code/tools/deferred.py +92 -0
  144. llm_code/tools/dump.py +116 -0
  145. llm_code/tools/edit_file.py +282 -0
  146. llm_code/tools/git_tools.py +531 -0
  147. llm_code/tools/glob_search.py +112 -0
  148. llm_code/tools/grep_search.py +144 -0
  149. llm_code/tools/ide_diagnostics.py +59 -0
  150. llm_code/tools/ide_open.py +58 -0
  151. llm_code/tools/ide_selection.py +52 -0
  152. llm_code/tools/memory_tools.py +138 -0
  153. llm_code/tools/multi_edit.py +143 -0
  154. llm_code/tools/notebook_edit.py +107 -0
  155. llm_code/tools/notebook_read.py +81 -0
  156. llm_code/tools/parsing.py +63 -0
  157. llm_code/tools/read_file.py +154 -0
  158. llm_code/tools/registry.py +58 -0
  159. llm_code/tools/search_backends/__init__.py +56 -0
  160. llm_code/tools/search_backends/brave.py +56 -0
  161. llm_code/tools/search_backends/duckduckgo.py +129 -0
  162. llm_code/tools/search_backends/searxng.py +71 -0
  163. llm_code/tools/search_backends/tavily.py +73 -0
  164. llm_code/tools/swarm_create.py +109 -0
  165. llm_code/tools/swarm_delete.py +95 -0
  166. llm_code/tools/swarm_list.py +44 -0
  167. llm_code/tools/swarm_message.py +109 -0
  168. llm_code/tools/task_close.py +79 -0
  169. llm_code/tools/task_plan.py +79 -0
  170. llm_code/tools/task_verify.py +90 -0
  171. llm_code/tools/tool_search.py +65 -0
  172. llm_code/tools/web_common.py +258 -0
  173. llm_code/tools/web_fetch.py +223 -0
  174. llm_code/tools/web_search.py +280 -0
  175. llm_code/tools/write_file.py +118 -0
  176. llm_code/tui/__init__.py +1 -0
  177. llm_code/tui/app.py +2432 -0
  178. llm_code/tui/chat_view.py +82 -0
  179. llm_code/tui/chat_widgets.py +309 -0
  180. llm_code/tui/header_bar.py +46 -0
  181. llm_code/tui/input_bar.py +349 -0
  182. llm_code/tui/keybindings.py +142 -0
  183. llm_code/tui/marketplace.py +210 -0
  184. llm_code/tui/status_bar.py +72 -0
  185. llm_code/tui/theme.py +96 -0
  186. llm_code/utils/__init__.py +0 -0
  187. llm_code/utils/diff.py +111 -0
  188. llm_code/utils/errors.py +70 -0
  189. llm_code/utils/hyperlink.py +73 -0
  190. llm_code/utils/notebook.py +179 -0
  191. llm_code/utils/search.py +69 -0
  192. llm_code/utils/text_normalize.py +28 -0
  193. llm_code/utils/version_check.py +62 -0
  194. llm_code/vim/__init__.py +4 -0
  195. llm_code/vim/engine.py +51 -0
  196. llm_code/vim/motions.py +172 -0
  197. llm_code/vim/operators.py +183 -0
  198. llm_code/vim/text_objects.py +139 -0
  199. llm_code/vim/transitions.py +279 -0
  200. llm_code/vim/types.py +68 -0
  201. llm_code/voice/__init__.py +1 -0
  202. llm_code/voice/languages.py +43 -0
  203. llm_code/voice/recorder.py +136 -0
  204. llm_code/voice/stt.py +36 -0
  205. llm_code/voice/stt_anthropic.py +66 -0
  206. llm_code/voice/stt_google.py +32 -0
  207. llm_code/voice/stt_whisper.py +52 -0
  208. llmcode_cli-1.0.0.dist-info/METADATA +524 -0
  209. llmcode_cli-1.0.0.dist-info/RECORD +212 -0
  210. llmcode_cli-1.0.0.dist-info/WHEEL +4 -0
  211. llmcode_cli-1.0.0.dist-info/entry_points.txt +2 -0
  212. llmcode_cli-1.0.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,107 @@
1
+ """NotebookEditTool — replace, insert, or delete cells in a Jupyter notebook."""
2
+ from __future__ import annotations
3
+
4
+ import json
5
+ import pathlib
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from llm_code.tools.base import PermissionLevel, Tool, ToolResult
10
+ from llm_code.utils.notebook import edit_notebook, validate_notebook
11
+
12
+
13
+ class NotebookEditInput(BaseModel):
14
+ path: str
15
+ command: str
16
+ cell_index: int
17
+ source: str | None = None
18
+ cell_type: str | None = None
19
+
20
+
21
+ class NotebookEditTool(Tool):
22
+ @property
23
+ def name(self) -> str:
24
+ return "notebook_edit"
25
+
26
+ @property
27
+ def description(self) -> str:
28
+ return (
29
+ "Edit a Jupyter notebook (.ipynb) file by replacing, inserting, or deleting a cell. "
30
+ "Use 'replace' to change a cell's source, 'insert' to add a new cell before a given index, "
31
+ "or 'delete' to remove a cell."
32
+ )
33
+
34
+ @property
35
+ def input_schema(self) -> dict:
36
+ return {
37
+ "type": "object",
38
+ "properties": {
39
+ "path": {"type": "string", "description": "Absolute path to the .ipynb file"},
40
+ "command": {
41
+ "type": "string",
42
+ "enum": ["replace", "insert", "delete"],
43
+ "description": "Edit command: replace, insert, or delete",
44
+ },
45
+ "cell_index": {
46
+ "type": "integer",
47
+ "description": "0-based cell index to operate on",
48
+ },
49
+ "source": {
50
+ "type": "string",
51
+ "description": "New cell source (required for replace and insert)",
52
+ },
53
+ "cell_type": {
54
+ "type": "string",
55
+ "enum": ["code", "markdown", "raw"],
56
+ "description": "Cell type for insert or replace (default: code)",
57
+ },
58
+ },
59
+ "required": ["path", "command", "cell_index"],
60
+ }
61
+
62
+ @property
63
+ def required_permission(self) -> PermissionLevel:
64
+ return PermissionLevel.WORKSPACE_WRITE
65
+
66
+ @property
67
+ def input_model(self) -> type[NotebookEditInput]:
68
+ return NotebookEditInput
69
+
70
+ def execute(self, args: dict) -> ToolResult:
71
+ path = pathlib.Path(args["path"])
72
+ command: str = args["command"]
73
+ cell_index: int = int(args["cell_index"])
74
+ source: str | None = args.get("source")
75
+ cell_type: str | None = args.get("cell_type")
76
+
77
+ if command not in ("replace", "insert", "delete"):
78
+ return ToolResult(
79
+ output=f"Invalid command {command!r}. Use replace, insert, or delete.",
80
+ is_error=True,
81
+ )
82
+
83
+ if not path.exists():
84
+ return ToolResult(output=f"Notebook not found: {path}", is_error=True)
85
+
86
+ try:
87
+ data = json.loads(path.read_text(encoding="utf-8"))
88
+ except (json.JSONDecodeError, UnicodeDecodeError) as exc:
89
+ return ToolResult(output=f"Failed to parse notebook JSON: {exc}", is_error=True)
90
+
91
+ if not validate_notebook(data):
92
+ return ToolResult(
93
+ output="Invalid notebook: requires nbformat >= 4 and a cells list.",
94
+ is_error=True,
95
+ )
96
+
97
+ try:
98
+ updated = edit_notebook(data, command, cell_index, source=source, cell_type=cell_type)
99
+ except (IndexError, ValueError) as exc:
100
+ return ToolResult(output=str(exc), is_error=True)
101
+
102
+ path.write_text(json.dumps(updated, indent=1, ensure_ascii=False), encoding="utf-8")
103
+
104
+ n_cells = len(updated.get("cells", []))
105
+ return ToolResult(
106
+ output=f"Notebook updated: {command} at cell {cell_index}. Notebook now has {n_cells} cell(s)."
107
+ )
@@ -0,0 +1,81 @@
1
+ """NotebookReadTool — reads Jupyter notebook cells with outputs."""
2
+ from __future__ import annotations
3
+
4
+ import json
5
+ import pathlib
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from llm_code.tools.base import PermissionLevel, Tool, ToolResult
10
+ from llm_code.utils.notebook import format_cells, parse_notebook, validate_notebook
11
+
12
+
13
+ class NotebookReadInput(BaseModel):
14
+ path: str
15
+
16
+
17
+ class NotebookReadTool(Tool):
18
+ @property
19
+ def name(self) -> str:
20
+ return "notebook_read"
21
+
22
+ @property
23
+ def description(self) -> str:
24
+ return (
25
+ "Read a Jupyter notebook (.ipynb) file. "
26
+ "Returns all cells with their source code, outputs, and execution counts. "
27
+ "Images from outputs are returned as base64 in metadata."
28
+ )
29
+
30
+ @property
31
+ def input_schema(self) -> dict:
32
+ return {
33
+ "type": "object",
34
+ "properties": {
35
+ "path": {"type": "string", "description": "Absolute path to the .ipynb file"},
36
+ },
37
+ "required": ["path"],
38
+ }
39
+
40
+ @property
41
+ def required_permission(self) -> PermissionLevel:
42
+ return PermissionLevel.READ_ONLY
43
+
44
+ @property
45
+ def input_model(self) -> type[NotebookReadInput]:
46
+ return NotebookReadInput
47
+
48
+ def is_read_only(self, args: dict) -> bool:
49
+ return True
50
+
51
+ def is_concurrency_safe(self, args: dict) -> bool:
52
+ return True
53
+
54
+ def execute(self, args: dict) -> ToolResult:
55
+ path = pathlib.Path(args["path"])
56
+
57
+ if not path.exists():
58
+ return ToolResult(output=f"Notebook not found: {path}", is_error=True)
59
+
60
+ try:
61
+ data = json.loads(path.read_text(encoding="utf-8"))
62
+ except (json.JSONDecodeError, UnicodeDecodeError) as exc:
63
+ return ToolResult(output=f"Failed to parse notebook JSON: {exc}", is_error=True)
64
+
65
+ if not validate_notebook(data):
66
+ return ToolResult(
67
+ output="Invalid notebook: requires nbformat >= 4 and a cells list.",
68
+ is_error=True,
69
+ )
70
+
71
+ cells = parse_notebook(data)
72
+ output_text = format_cells(cells)
73
+
74
+ # Collect all images from all cells
75
+ all_images: list[dict] = []
76
+ for cell in cells:
77
+ all_images.extend(cell.images)
78
+
79
+ metadata: dict | None = {"images": all_images} if all_images else None
80
+
81
+ return ToolResult(output=output_text, metadata=metadata)
@@ -0,0 +1,63 @@
1
+ """Dual-track tool call parsing: native API format and XML tag format."""
2
+ from __future__ import annotations
3
+
4
+ import dataclasses
5
+ import json
6
+ import re
7
+ import uuid
8
+
9
+ _XML_TOOL_CALL_RE = re.compile(
10
+ r"<tool_call>(.*?)</tool_call>",
11
+ re.DOTALL,
12
+ )
13
+
14
+
15
+ @dataclasses.dataclass(frozen=True)
16
+ class ParsedToolCall:
17
+ id: str
18
+ name: str
19
+ args: dict
20
+ source: str # "native" | "xml_tag"
21
+
22
+
23
+ def parse_tool_calls(
24
+ response_text: str,
25
+ native_tool_calls: list[dict] | None,
26
+ ) -> list[ParsedToolCall]:
27
+ """Parse tool calls from either native API format or XML tags in text.
28
+
29
+ If native_tool_calls is a non-empty list, parse those (native track).
30
+ Otherwise, fall back to scanning response_text for <tool_call>...</tool_call> tags.
31
+ """
32
+ if native_tool_calls:
33
+ return _parse_native(native_tool_calls)
34
+ return _parse_xml(response_text)
35
+
36
+
37
+ def _parse_native(native: list[dict]) -> list[ParsedToolCall]:
38
+ result: list[ParsedToolCall] = []
39
+ for call in native:
40
+ call_id = call.get("id", str(uuid.uuid4()))
41
+ name = call.get("name", "")
42
+ args = call.get("input", {})
43
+ if not name:
44
+ continue
45
+ result.append(ParsedToolCall(id=call_id, name=name, args=args, source="native"))
46
+ return result
47
+
48
+
49
+ def _parse_xml(text: str) -> list[ParsedToolCall]:
50
+ result: list[ParsedToolCall] = []
51
+ for match in _XML_TOOL_CALL_RE.finditer(text):
52
+ raw = match.group(1).strip()
53
+ try:
54
+ data = json.loads(raw)
55
+ except json.JSONDecodeError:
56
+ continue
57
+ name = data.get("tool")
58
+ if not name:
59
+ continue
60
+ args = data.get("args", {})
61
+ call_id = str(uuid.uuid4())
62
+ result.append(ParsedToolCall(id=call_id, name=name, args=args, source="xml_tag"))
63
+ return result
@@ -0,0 +1,154 @@
1
+ """ReadFileTool — reads text files with line numbers, or images as base64."""
2
+ from __future__ import annotations
3
+
4
+ import base64
5
+ import json
6
+ import pathlib
7
+
8
+ from pydantic import BaseModel
9
+
10
+ from llm_code.runtime.file_protection import check_read
11
+ from llm_code.tools.base import PermissionLevel, Tool, ToolResult
12
+ from llm_code.utils.errors import friendly_error
13
+
14
+
15
+ class ReadFileInput(BaseModel):
16
+ path: str
17
+ offset: int = 1
18
+ limit: int = 2000
19
+
20
+ _NOTEBOOK_EXTENSION = ".ipynb"
21
+
22
+ _IMAGE_EXTENSIONS = {
23
+ ".png": "image/png",
24
+ ".jpg": "image/jpeg",
25
+ ".jpeg": "image/jpeg",
26
+ ".gif": "image/gif",
27
+ ".webp": "image/webp",
28
+ ".bmp": "image/bmp",
29
+ ".svg": "image/svg+xml",
30
+ }
31
+
32
+
33
+ class ReadFileTool(Tool):
34
+ @property
35
+ def name(self) -> str:
36
+ return "read_file"
37
+
38
+ @property
39
+ def description(self) -> str:
40
+ return (
41
+ "Read a file from the filesystem. "
42
+ "Text files are returned with line numbers. "
43
+ "Images are returned as base64 in metadata."
44
+ )
45
+
46
+ @property
47
+ def input_schema(self) -> dict:
48
+ return {
49
+ "type": "object",
50
+ "properties": {
51
+ "path": {"type": "string", "description": "Absolute path to the file"},
52
+ "offset": {
53
+ "type": "integer",
54
+ "description": "1-based line number to start reading from (default 1)",
55
+ "default": 1,
56
+ },
57
+ "limit": {
58
+ "type": "integer",
59
+ "description": "Maximum number of lines to read (default 2000)",
60
+ "default": 2000,
61
+ },
62
+ },
63
+ "required": ["path"],
64
+ }
65
+
66
+ @property
67
+ def required_permission(self) -> PermissionLevel:
68
+ return PermissionLevel.READ_ONLY
69
+
70
+ @property
71
+ def input_model(self) -> type[ReadFileInput]:
72
+ return ReadFileInput
73
+
74
+ def is_read_only(self, args: dict) -> bool:
75
+ return True
76
+
77
+ def is_concurrency_safe(self, args: dict) -> bool:
78
+ return True
79
+
80
+ def execute(self, args: dict) -> ToolResult:
81
+ path = pathlib.Path(args["path"])
82
+ offset: int = int(args.get("offset", 1))
83
+ limit: int = int(args.get("limit", 2000))
84
+
85
+ if not path.exists():
86
+ return ToolResult(output=f"File not found: {path}", is_error=True)
87
+
88
+ if path.is_dir():
89
+ return ToolResult(
90
+ output=f"Path is a directory, not a file: {path}\nUse glob_search or bash 'ls' to list directory contents.",
91
+ is_error=True,
92
+ )
93
+
94
+ read_check = check_read(str(path))
95
+ read_warning = read_check.reason if read_check.severity == "warn" else ""
96
+
97
+ suffix = path.suffix.lower()
98
+
99
+ if suffix == _NOTEBOOK_EXTENSION:
100
+ result = self._read_notebook(path)
101
+ elif suffix in _IMAGE_EXTENSIONS:
102
+ result = self._read_image(path, _IMAGE_EXTENSIONS[suffix])
103
+ else:
104
+ result = self._read_text(path, offset, limit)
105
+
106
+ if read_warning and not result.is_error:
107
+ result = ToolResult(
108
+ output=f"[WARNING] {read_warning}\n{result.output}",
109
+ metadata=result.metadata,
110
+ is_error=result.is_error,
111
+ )
112
+ return result
113
+
114
+ def _read_notebook(self, path: pathlib.Path) -> ToolResult:
115
+ from llm_code.utils.notebook import format_cells, parse_notebook, validate_notebook
116
+
117
+ try:
118
+ data = json.loads(path.read_text(encoding="utf-8"))
119
+ except (json.JSONDecodeError, UnicodeDecodeError) as exc:
120
+ return ToolResult(output=f"Failed to parse notebook JSON: {exc}", is_error=True)
121
+
122
+ if not validate_notebook(data):
123
+ return ToolResult(
124
+ output="Invalid notebook: requires nbformat >= 4 and a cells list.",
125
+ is_error=True,
126
+ )
127
+
128
+ cells = parse_notebook(data)
129
+ output_text = format_cells(cells)
130
+
131
+ all_images: list[dict] = []
132
+ for cell in cells:
133
+ all_images.extend(cell.images)
134
+
135
+ metadata: dict | None = {"images": all_images} if all_images else None
136
+ return ToolResult(output=output_text, metadata=metadata)
137
+
138
+ def _read_image(self, path: pathlib.Path, media_type: str) -> ToolResult:
139
+ data = base64.b64encode(path.read_bytes()).decode()
140
+ return ToolResult(
141
+ output=f"[image: {path.name}]",
142
+ metadata={"type": "image", "media_type": media_type, "data": data},
143
+ )
144
+
145
+ def _read_text(self, path: pathlib.Path, offset: int, limit: int) -> ToolResult:
146
+ try:
147
+ lines = path.read_text(errors="replace").splitlines()
148
+ except (PermissionError, OSError) as exc:
149
+ return ToolResult(output=friendly_error(exc, str(path)), is_error=True)
150
+ # offset is 1-based
151
+ start = max(offset - 1, 0)
152
+ selected = lines[start : start + limit]
153
+ numbered = "\n".join(f"{start + i + 1}\t{line}" for i, line in enumerate(selected))
154
+ return ToolResult(output=numbered)
@@ -0,0 +1,58 @@
1
+ """Tool registry for managing and dispatching tools."""
2
+ from __future__ import annotations
3
+
4
+ from llm_code.api.types import ToolDefinition
5
+ from llm_code.tools.base import Tool, ToolResult
6
+
7
+
8
+ class ToolRegistry:
9
+ """Central registry for tools with lookup and execution."""
10
+
11
+ def __init__(self) -> None:
12
+ self._tools: dict[str, Tool] = {}
13
+
14
+ def register(self, tool: Tool) -> None:
15
+ """Register a tool; raises ValueError if name already registered."""
16
+ if tool.name in self._tools:
17
+ raise ValueError(f"Tool '{tool.name}' is already registered")
18
+ self._tools[tool.name] = tool
19
+
20
+ def get(self, name: str) -> Tool | None:
21
+ """Return the tool with the given name, or None if not found."""
22
+ return self._tools.get(name)
23
+
24
+ def all_tools(self) -> tuple[Tool, ...]:
25
+ """Return all registered tools as a tuple."""
26
+ return tuple(self._tools.values())
27
+
28
+ def definitions(self, allowed: set[str] | None = None) -> tuple[ToolDefinition, ...]:
29
+ """Return ToolDefinitions, optionally filtered to allowed names."""
30
+ tools = self._tools.values()
31
+ if allowed is not None:
32
+ tools = (t for t in tools if t.name in allowed) # type: ignore[assignment]
33
+ return tuple(t.to_definition() for t in tools)
34
+
35
+ def definitions_with_deferred(
36
+ self,
37
+ allowed: set[str] | None = None,
38
+ max_visible: int = 20,
39
+ ) -> tuple[tuple[ToolDefinition, ...], int]:
40
+ """Return (visible_definitions, deferred_count) using DeferredToolManager.
41
+
42
+ Core tools are always visible; remaining tools fill slots up to
43
+ max_visible; the rest are deferred. Returns the visible definitions
44
+ as a tuple and the count of deferred tools as an integer.
45
+ """
46
+ from llm_code.tools.deferred import DeferredToolManager
47
+
48
+ all_defs = list(self.definitions(allowed=allowed))
49
+ manager = DeferredToolManager()
50
+ visible, deferred = manager.select_tools(all_defs, max_visible=max_visible)
51
+ return tuple(visible), len(deferred)
52
+
53
+ def execute(self, name: str, args: dict) -> ToolResult:
54
+ """Execute a tool by name; returns is_error=True if tool not found."""
55
+ tool = self._tools.get(name)
56
+ if tool is None:
57
+ return ToolResult(output=f"Tool '{name}' not found", is_error=True)
58
+ return tool.execute(args)
@@ -0,0 +1,56 @@
1
+ """Search backend protocol and factory."""
2
+ from __future__ import annotations
3
+
4
+ import dataclasses
5
+ from typing import Protocol, runtime_checkable
6
+
7
+
8
+ @dataclasses.dataclass(frozen=True)
9
+ class SearchResult:
10
+ """A single search result."""
11
+
12
+ title: str
13
+ url: str
14
+ snippet: str
15
+
16
+
17
+ @runtime_checkable
18
+ class SearchBackend(Protocol):
19
+ """Protocol for search backends."""
20
+
21
+ @property
22
+ def name(self) -> str:
23
+ """Backend identifier."""
24
+ ...
25
+
26
+ def search(self, query: str, *, max_results: int = 10) -> tuple[SearchResult, ...]:
27
+ """Execute search and return results.
28
+
29
+ Returns empty tuple on error.
30
+ """
31
+ ...
32
+
33
+
34
+ def create_backend(backend_name: str, **kwargs: object) -> SearchBackend:
35
+ """Factory function to create a search backend by name.
36
+
37
+ Args:
38
+ backend_name: One of "duckduckgo", "brave", "tavily", "searxng".
39
+ **kwargs: Backend-specific keyword arguments (e.g. api_key, base_url).
40
+
41
+ Raises:
42
+ ValueError: If backend_name is not recognized.
43
+ """
44
+ if backend_name == "duckduckgo":
45
+ from llm_code.tools.search_backends.duckduckgo import DuckDuckGoBackend
46
+ return DuckDuckGoBackend(**kwargs)
47
+ if backend_name == "brave":
48
+ from llm_code.tools.search_backends.brave import BraveBackend
49
+ return BraveBackend(**kwargs)
50
+ if backend_name == "tavily":
51
+ from llm_code.tools.search_backends.tavily import TavilyBackend
52
+ return TavilyBackend(**kwargs)
53
+ if backend_name == "searxng":
54
+ from llm_code.tools.search_backends.searxng import SearXNGBackend
55
+ return SearXNGBackend(**kwargs)
56
+ raise ValueError(f"Unknown search backend: {backend_name!r}")
@@ -0,0 +1,56 @@
1
+ """Brave Search backend."""
2
+ from __future__ import annotations
3
+
4
+ import httpx
5
+
6
+ from llm_code.tools.search_backends import SearchResult
7
+
8
+ _BRAVE_SEARCH_URL = "https://api.search.brave.com/res/v1/web/search"
9
+
10
+
11
+ class BraveBackend:
12
+ """Search backend using Brave Search API (free tier: 2000 queries/month)."""
13
+
14
+ def __init__(self, api_key: str) -> None:
15
+ if not api_key or not api_key.strip():
16
+ raise ValueError("api_key must not be empty")
17
+ self._api_key = api_key
18
+
19
+ @property
20
+ def name(self) -> str:
21
+ return "brave"
22
+
23
+ def search(self, query: str, *, max_results: int = 10) -> tuple[SearchResult, ...]:
24
+ try:
25
+ response = httpx.get(
26
+ _BRAVE_SEARCH_URL,
27
+ params={"q": query, "count": max_results},
28
+ headers={
29
+ "Accept": "application/json",
30
+ "Accept-Encoding": "gzip",
31
+ "X-Subscription-Token": self._api_key,
32
+ },
33
+ timeout=15.0,
34
+ )
35
+ except httpx.RequestError:
36
+ return ()
37
+
38
+ if response.status_code != 200:
39
+ return ()
40
+
41
+ try:
42
+ data = response.json()
43
+ except Exception:
44
+ return ()
45
+
46
+ web_results = data.get("web", {}).get("results", [])
47
+ results = tuple(
48
+ SearchResult(
49
+ title=r.get("title", ""),
50
+ url=r.get("url", ""),
51
+ snippet=r.get("description", ""),
52
+ )
53
+ for r in web_results[:max_results]
54
+ if r.get("url")
55
+ )
56
+ return results