llmcode-cli 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (212) hide show
  1. llm_code/__init__.py +2 -0
  2. llm_code/analysis/__init__.py +6 -0
  3. llm_code/analysis/cache.py +33 -0
  4. llm_code/analysis/engine.py +256 -0
  5. llm_code/analysis/go_rules.py +114 -0
  6. llm_code/analysis/js_rules.py +84 -0
  7. llm_code/analysis/python_rules.py +311 -0
  8. llm_code/analysis/rules.py +140 -0
  9. llm_code/analysis/rust_rules.py +108 -0
  10. llm_code/analysis/universal_rules.py +111 -0
  11. llm_code/api/__init__.py +0 -0
  12. llm_code/api/client.py +90 -0
  13. llm_code/api/errors.py +73 -0
  14. llm_code/api/openai_compat.py +390 -0
  15. llm_code/api/provider.py +35 -0
  16. llm_code/api/sse.py +52 -0
  17. llm_code/api/types.py +140 -0
  18. llm_code/cli/__init__.py +0 -0
  19. llm_code/cli/commands.py +70 -0
  20. llm_code/cli/image.py +122 -0
  21. llm_code/cli/render.py +214 -0
  22. llm_code/cli/status_line.py +79 -0
  23. llm_code/cli/streaming.py +92 -0
  24. llm_code/cli/tui_main.py +220 -0
  25. llm_code/computer_use/__init__.py +11 -0
  26. llm_code/computer_use/app_detect.py +49 -0
  27. llm_code/computer_use/app_tier.py +57 -0
  28. llm_code/computer_use/coordinator.py +99 -0
  29. llm_code/computer_use/input_control.py +71 -0
  30. llm_code/computer_use/screenshot.py +93 -0
  31. llm_code/cron/__init__.py +13 -0
  32. llm_code/cron/parser.py +145 -0
  33. llm_code/cron/scheduler.py +135 -0
  34. llm_code/cron/storage.py +126 -0
  35. llm_code/enterprise/__init__.py +1 -0
  36. llm_code/enterprise/audit.py +59 -0
  37. llm_code/enterprise/auth.py +26 -0
  38. llm_code/enterprise/oidc.py +95 -0
  39. llm_code/enterprise/rbac.py +65 -0
  40. llm_code/harness/__init__.py +5 -0
  41. llm_code/harness/config.py +33 -0
  42. llm_code/harness/engine.py +129 -0
  43. llm_code/harness/guides.py +41 -0
  44. llm_code/harness/sensors.py +68 -0
  45. llm_code/harness/templates.py +84 -0
  46. llm_code/hida/__init__.py +1 -0
  47. llm_code/hida/classifier.py +187 -0
  48. llm_code/hida/engine.py +49 -0
  49. llm_code/hida/profiles.py +95 -0
  50. llm_code/hida/types.py +28 -0
  51. llm_code/ide/__init__.py +1 -0
  52. llm_code/ide/bridge.py +80 -0
  53. llm_code/ide/detector.py +76 -0
  54. llm_code/ide/server.py +169 -0
  55. llm_code/logging.py +29 -0
  56. llm_code/lsp/__init__.py +0 -0
  57. llm_code/lsp/client.py +298 -0
  58. llm_code/lsp/detector.py +42 -0
  59. llm_code/lsp/manager.py +56 -0
  60. llm_code/lsp/tools.py +288 -0
  61. llm_code/marketplace/__init__.py +0 -0
  62. llm_code/marketplace/builtin_registry.py +102 -0
  63. llm_code/marketplace/installer.py +162 -0
  64. llm_code/marketplace/plugin.py +78 -0
  65. llm_code/marketplace/registry.py +360 -0
  66. llm_code/mcp/__init__.py +0 -0
  67. llm_code/mcp/bridge.py +87 -0
  68. llm_code/mcp/client.py +117 -0
  69. llm_code/mcp/health.py +120 -0
  70. llm_code/mcp/manager.py +214 -0
  71. llm_code/mcp/oauth.py +219 -0
  72. llm_code/mcp/transport.py +254 -0
  73. llm_code/mcp/types.py +53 -0
  74. llm_code/remote/__init__.py +0 -0
  75. llm_code/remote/client.py +136 -0
  76. llm_code/remote/protocol.py +22 -0
  77. llm_code/remote/server.py +275 -0
  78. llm_code/remote/ssh_proxy.py +56 -0
  79. llm_code/runtime/__init__.py +0 -0
  80. llm_code/runtime/auto_commit.py +56 -0
  81. llm_code/runtime/auto_diagnose.py +62 -0
  82. llm_code/runtime/checkpoint.py +70 -0
  83. llm_code/runtime/checkpoint_recovery.py +142 -0
  84. llm_code/runtime/compaction.py +35 -0
  85. llm_code/runtime/compressor.py +415 -0
  86. llm_code/runtime/config.py +533 -0
  87. llm_code/runtime/context.py +49 -0
  88. llm_code/runtime/conversation.py +921 -0
  89. llm_code/runtime/cost_tracker.py +126 -0
  90. llm_code/runtime/dream.py +127 -0
  91. llm_code/runtime/file_protection.py +150 -0
  92. llm_code/runtime/hardware.py +85 -0
  93. llm_code/runtime/hooks.py +223 -0
  94. llm_code/runtime/indexer.py +230 -0
  95. llm_code/runtime/knowledge_compiler.py +232 -0
  96. llm_code/runtime/memory.py +132 -0
  97. llm_code/runtime/memory_layers.py +467 -0
  98. llm_code/runtime/memory_lint.py +252 -0
  99. llm_code/runtime/model_aliases.py +37 -0
  100. llm_code/runtime/ollama.py +93 -0
  101. llm_code/runtime/overlay.py +124 -0
  102. llm_code/runtime/permissions.py +200 -0
  103. llm_code/runtime/plan.py +45 -0
  104. llm_code/runtime/prompt.py +238 -0
  105. llm_code/runtime/repo_map.py +174 -0
  106. llm_code/runtime/sandbox.py +116 -0
  107. llm_code/runtime/session.py +268 -0
  108. llm_code/runtime/skill_resolver.py +61 -0
  109. llm_code/runtime/skills.py +133 -0
  110. llm_code/runtime/speculative.py +75 -0
  111. llm_code/runtime/streaming_executor.py +216 -0
  112. llm_code/runtime/telemetry.py +196 -0
  113. llm_code/runtime/token_budget.py +26 -0
  114. llm_code/runtime/vcr.py +142 -0
  115. llm_code/runtime/vision.py +102 -0
  116. llm_code/swarm/__init__.py +1 -0
  117. llm_code/swarm/backend_subprocess.py +108 -0
  118. llm_code/swarm/backend_tmux.py +103 -0
  119. llm_code/swarm/backend_worktree.py +306 -0
  120. llm_code/swarm/checkpoint.py +74 -0
  121. llm_code/swarm/coordinator.py +236 -0
  122. llm_code/swarm/mailbox.py +88 -0
  123. llm_code/swarm/manager.py +202 -0
  124. llm_code/swarm/memory_sync.py +80 -0
  125. llm_code/swarm/recovery.py +21 -0
  126. llm_code/swarm/team.py +67 -0
  127. llm_code/swarm/types.py +31 -0
  128. llm_code/task/__init__.py +16 -0
  129. llm_code/task/diagnostics.py +93 -0
  130. llm_code/task/manager.py +162 -0
  131. llm_code/task/types.py +112 -0
  132. llm_code/task/verifier.py +104 -0
  133. llm_code/tools/__init__.py +0 -0
  134. llm_code/tools/agent.py +145 -0
  135. llm_code/tools/agent_roles.py +82 -0
  136. llm_code/tools/base.py +94 -0
  137. llm_code/tools/bash.py +565 -0
  138. llm_code/tools/computer_use_tools.py +278 -0
  139. llm_code/tools/coordinator_tool.py +75 -0
  140. llm_code/tools/cron_create.py +90 -0
  141. llm_code/tools/cron_delete.py +49 -0
  142. llm_code/tools/cron_list.py +51 -0
  143. llm_code/tools/deferred.py +92 -0
  144. llm_code/tools/dump.py +116 -0
  145. llm_code/tools/edit_file.py +282 -0
  146. llm_code/tools/git_tools.py +531 -0
  147. llm_code/tools/glob_search.py +112 -0
  148. llm_code/tools/grep_search.py +144 -0
  149. llm_code/tools/ide_diagnostics.py +59 -0
  150. llm_code/tools/ide_open.py +58 -0
  151. llm_code/tools/ide_selection.py +52 -0
  152. llm_code/tools/memory_tools.py +138 -0
  153. llm_code/tools/multi_edit.py +143 -0
  154. llm_code/tools/notebook_edit.py +107 -0
  155. llm_code/tools/notebook_read.py +81 -0
  156. llm_code/tools/parsing.py +63 -0
  157. llm_code/tools/read_file.py +154 -0
  158. llm_code/tools/registry.py +58 -0
  159. llm_code/tools/search_backends/__init__.py +56 -0
  160. llm_code/tools/search_backends/brave.py +56 -0
  161. llm_code/tools/search_backends/duckduckgo.py +129 -0
  162. llm_code/tools/search_backends/searxng.py +71 -0
  163. llm_code/tools/search_backends/tavily.py +73 -0
  164. llm_code/tools/swarm_create.py +109 -0
  165. llm_code/tools/swarm_delete.py +95 -0
  166. llm_code/tools/swarm_list.py +44 -0
  167. llm_code/tools/swarm_message.py +109 -0
  168. llm_code/tools/task_close.py +79 -0
  169. llm_code/tools/task_plan.py +79 -0
  170. llm_code/tools/task_verify.py +90 -0
  171. llm_code/tools/tool_search.py +65 -0
  172. llm_code/tools/web_common.py +258 -0
  173. llm_code/tools/web_fetch.py +223 -0
  174. llm_code/tools/web_search.py +280 -0
  175. llm_code/tools/write_file.py +118 -0
  176. llm_code/tui/__init__.py +1 -0
  177. llm_code/tui/app.py +2432 -0
  178. llm_code/tui/chat_view.py +82 -0
  179. llm_code/tui/chat_widgets.py +309 -0
  180. llm_code/tui/header_bar.py +46 -0
  181. llm_code/tui/input_bar.py +349 -0
  182. llm_code/tui/keybindings.py +142 -0
  183. llm_code/tui/marketplace.py +210 -0
  184. llm_code/tui/status_bar.py +72 -0
  185. llm_code/tui/theme.py +96 -0
  186. llm_code/utils/__init__.py +0 -0
  187. llm_code/utils/diff.py +111 -0
  188. llm_code/utils/errors.py +70 -0
  189. llm_code/utils/hyperlink.py +73 -0
  190. llm_code/utils/notebook.py +179 -0
  191. llm_code/utils/search.py +69 -0
  192. llm_code/utils/text_normalize.py +28 -0
  193. llm_code/utils/version_check.py +62 -0
  194. llm_code/vim/__init__.py +4 -0
  195. llm_code/vim/engine.py +51 -0
  196. llm_code/vim/motions.py +172 -0
  197. llm_code/vim/operators.py +183 -0
  198. llm_code/vim/text_objects.py +139 -0
  199. llm_code/vim/transitions.py +279 -0
  200. llm_code/vim/types.py +68 -0
  201. llm_code/voice/__init__.py +1 -0
  202. llm_code/voice/languages.py +43 -0
  203. llm_code/voice/recorder.py +136 -0
  204. llm_code/voice/stt.py +36 -0
  205. llm_code/voice/stt_anthropic.py +66 -0
  206. llm_code/voice/stt_google.py +32 -0
  207. llm_code/voice/stt_whisper.py +52 -0
  208. llmcode_cli-1.0.0.dist-info/METADATA +524 -0
  209. llmcode_cli-1.0.0.dist-info/RECORD +212 -0
  210. llmcode_cli-1.0.0.dist-info/WHEEL +4 -0
  211. llmcode_cli-1.0.0.dist-info/entry_points.txt +2 -0
  212. llmcode_cli-1.0.0.dist-info/licenses/LICENSE +21 -0
llm_code/mcp/types.py ADDED
@@ -0,0 +1,53 @@
1
+ """MCP protocol types as frozen dataclasses."""
2
+ from __future__ import annotations
3
+
4
+ from dataclasses import dataclass
5
+
6
+
7
+ @dataclass(frozen=True)
8
+ class McpServerConfig:
9
+ """Configuration for connecting to an MCP server."""
10
+
11
+ command: str | None = None
12
+ args: tuple[str, ...] = ()
13
+ env: dict[str, str] | None = None
14
+ transport_type: str = "stdio"
15
+ url: str | None = None
16
+ headers: dict[str, str] | None = None
17
+
18
+
19
+ @dataclass(frozen=True)
20
+ class McpToolDefinition:
21
+ """Definition of a tool exposed by an MCP server."""
22
+
23
+ name: str
24
+ description: str
25
+ input_schema: dict
26
+ annotations: dict | None = None
27
+
28
+
29
+ @dataclass(frozen=True)
30
+ class McpToolResult:
31
+ """Result returned from calling an MCP tool."""
32
+
33
+ content: str
34
+ is_error: bool = False
35
+
36
+
37
+ @dataclass(frozen=True)
38
+ class McpResource:
39
+ """A resource exposed by an MCP server."""
40
+
41
+ uri: str
42
+ name: str
43
+ description: str | None = None
44
+ mime_type: str | None = None
45
+
46
+
47
+ @dataclass(frozen=True)
48
+ class McpServerInfo:
49
+ """Information about an MCP server returned during initialization."""
50
+
51
+ name: str
52
+ version: str
53
+ capabilities: dict
File without changes
@@ -0,0 +1,136 @@
1
+ """Remote client — connects to a remote llm-code server, renders UI locally."""
2
+ from __future__ import annotations
3
+
4
+ import asyncio
5
+ import json
6
+
7
+ import websockets
8
+
9
+ from rich.console import Console
10
+ from rich.markdown import Markdown
11
+
12
+ console = Console()
13
+
14
+
15
+ class RemoteClient:
16
+ def __init__(self, url: str):
17
+ self._url = url if url.startswith("ws") else f"ws://{url}"
18
+ self._ws = None
19
+
20
+ async def connect(self) -> None:
21
+ """Connect to remote server and start UI."""
22
+ console.print(f"[dim]Connecting to {self._url}...[/]")
23
+
24
+ try:
25
+ async with websockets.connect(self._url) as ws:
26
+ self._ws = ws
27
+ console.print("[green]✓ Connected[/]")
28
+
29
+ # Start reading server events in background
30
+ recv_task = asyncio.create_task(self._recv_loop(ws))
31
+
32
+ # Input loop
33
+ from prompt_toolkit import PromptSession
34
+ from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
35
+ session = PromptSession(auto_suggest=AutoSuggestFromHistory())
36
+
37
+ while True:
38
+ try:
39
+ user_input = await session.prompt_async("❯ ")
40
+ except (EOFError, KeyboardInterrupt):
41
+ console.print("\n[dim]Disconnecting...[/]")
42
+ break
43
+
44
+ user_input = user_input.strip()
45
+ if not user_input:
46
+ continue
47
+
48
+ if user_input in ("/exit", "/quit"):
49
+ break
50
+
51
+ await ws.send(json.dumps({"type": "user_input", "text": user_input}))
52
+
53
+ recv_task.cancel()
54
+
55
+ except ConnectionRefusedError:
56
+ console.print(f"[red]Cannot connect to {self._url}[/]")
57
+ except Exception as exc:
58
+ console.print(f"[red]Connection error: {exc}[/]")
59
+
60
+ async def _recv_loop(self, ws) -> None:
61
+ """Receive and render server events."""
62
+ try:
63
+ async for raw in ws:
64
+ msg = json.loads(raw)
65
+ self._render_event(msg)
66
+ except asyncio.CancelledError:
67
+ pass
68
+ except websockets.ConnectionClosed:
69
+ console.print("[dim]Server disconnected.[/]")
70
+
71
+ def _render_event(self, msg: dict) -> None:
72
+ """Render a server event — same format as Ink IPC protocol."""
73
+ msg_type = msg.get("type", "")
74
+
75
+ if msg_type == "welcome":
76
+ console.print()
77
+ console.print(" [bold cyan]╭──────────────╮[/]")
78
+ console.print(" [bold cyan]│ llm-code │[/] [dim](remote)[/]")
79
+ console.print(" [bold cyan]╰──────────────╯[/]")
80
+ console.print(f" [yellow]Model [/] {msg.get('model', '')}")
81
+ console.print(f" [yellow]Directory [/] {msg.get('cwd', '')}")
82
+ console.print(f" [yellow]Server [/] {self._url}")
83
+ console.print()
84
+
85
+ elif msg_type == "user_echo":
86
+ console.print(f"\n[bold]❯[/] {msg.get('text', '')}")
87
+
88
+ elif msg_type == "thinking_start":
89
+ console.print("[blue]⠋ Thinking…[/]", end="\r")
90
+
91
+ elif msg_type == "thinking_stop":
92
+ elapsed = msg.get("elapsed", 0)
93
+ console.print(f"[dim]({elapsed:.1f}s)[/] ")
94
+
95
+ elif msg_type == "text_delta":
96
+ text = msg.get("text", "")
97
+ if text.strip():
98
+ console.print(Markdown(text, code_theme="monokai"))
99
+
100
+ elif msg_type == "text_done":
101
+ text = msg.get("text", "")
102
+ if text.strip():
103
+ console.print(Markdown(text, code_theme="monokai"))
104
+
105
+ elif msg_type == "tool_start":
106
+ name = msg.get("name", "")
107
+ detail = msg.get("detail", "")
108
+ console.print(f"\n [grey62]╭─[/] [bold cyan]{name}[/] [grey62]─╮[/]")
109
+ console.print(f" [grey62]│[/] {detail}")
110
+ console.print(f" [grey62]╰{'─' * (len(name) + 4)}╯[/]")
111
+
112
+ elif msg_type == "tool_result":
113
+ output = msg.get("output", "")
114
+ is_error = msg.get("isError", False)
115
+ if is_error:
116
+ console.print(f" [bold red]✗[/] {output[:150]}")
117
+ else:
118
+ lines = output.strip().splitlines()[:3]
119
+ for line in lines:
120
+ console.print(f" [green]✓[/] [dim]{line[:150]}[/]")
121
+
122
+ elif msg_type == "turn_done":
123
+ elapsed = msg.get("elapsed", 0)
124
+ tokens = msg.get("tokens", 0)
125
+ console.print(f"[green]✓ Done ({elapsed:.1f}s)[/] [dim]↓{tokens} tok[/]")
126
+ console.print()
127
+
128
+ elif msg_type == "message":
129
+ console.print(f"[dim]{msg.get('text', '')}[/]")
130
+
131
+ elif msg_type == "error":
132
+ console.print(f"[bold red]Error: {msg.get('message', '')}[/]")
133
+
134
+ elif msg_type == "help":
135
+ for c in msg.get("commands", []):
136
+ console.print(f" [dim]{c['cmd']:<20} {c['desc']}[/]")
@@ -0,0 +1,22 @@
1
+ """Remote execution protocol — JSON-RPC over WebSocket."""
2
+ from __future__ import annotations
3
+ from dataclasses import dataclass
4
+ import json
5
+
6
+ # Server → Client events (same as Ink IPC protocol)
7
+ # Client → Server commands
8
+
9
+ @dataclass
10
+ class RemoteMessage:
11
+ """Base message format for client-server communication."""
12
+ type: str
13
+ data: dict
14
+
15
+ def to_json(self) -> str:
16
+ return json.dumps({"type": self.type, **self.data})
17
+
18
+ @classmethod
19
+ def from_json(cls, text: str) -> RemoteMessage:
20
+ obj = json.loads(text)
21
+ msg_type = obj.pop("type", "unknown")
22
+ return cls(type=msg_type, data=obj)
@@ -0,0 +1,275 @@
1
+ """Remote server — runs on the remote machine, executes tools locally."""
2
+ from __future__ import annotations
3
+
4
+ import asyncio
5
+ import json
6
+ import os
7
+ import time
8
+ from pathlib import Path
9
+
10
+ import websockets
11
+ from websockets.asyncio.server import ServerConnection
12
+
13
+ from llm_code.runtime.config import RuntimeConfig, load_config
14
+ from llm_code.runtime.model_aliases import resolve_model
15
+
16
+
17
+ class RemoteServer:
18
+ def __init__(self, host: str = "0.0.0.0", port: int = 8765, config: RuntimeConfig | None = None):
19
+ self._host = host
20
+ self._port = port
21
+ self._config = config
22
+ self._runtime = None
23
+ self._skills = None
24
+ self._memory = None
25
+
26
+ async def start(self) -> None:
27
+ """Start WebSocket server."""
28
+ print(f"llm-code server listening on ws://{self._host}:{self._port}")
29
+ async with websockets.serve(self._handle_client, self._host, self._port):
30
+ await asyncio.Future() # run forever
31
+
32
+ async def _handle_client(self, ws: ServerConnection) -> None:
33
+ """Handle a connected client."""
34
+ print(f"Client connected: {ws.remote_address}")
35
+
36
+ # Load config if not provided
37
+ if not self._config:
38
+ cwd = Path.cwd()
39
+ self._config = load_config(
40
+ user_dir=Path.home() / ".llm-code",
41
+ project_dir=cwd,
42
+ local_path=cwd / ".llm-code" / "config.json",
43
+ cli_overrides={},
44
+ )
45
+
46
+ # Initialize session
47
+ self._init_session()
48
+
49
+ # Send welcome
50
+ cwd = Path.cwd()
51
+ import subprocess
52
+ try:
53
+ branch = subprocess.run(
54
+ ["git", "rev-parse", "--abbrev-ref", "HEAD"],
55
+ cwd=cwd, capture_output=True, text=True, timeout=3
56
+ ).stdout.strip()
57
+ except Exception:
58
+ branch = ""
59
+
60
+ await ws.send(json.dumps({
61
+ "type": "welcome",
62
+ "model": self._config.model,
63
+ "workspace": cwd.name,
64
+ "cwd": str(cwd),
65
+ "permissions": self._config.permission_mode,
66
+ "branch": branch,
67
+ }))
68
+
69
+ # Main message loop
70
+ try:
71
+ async for raw in ws:
72
+ msg = json.loads(raw)
73
+ await self._handle_message(ws, msg)
74
+ except websockets.ConnectionClosed:
75
+ print(f"Client disconnected: {ws.remote_address}")
76
+
77
+ async def _handle_message(self, ws: ServerConnection, msg: dict) -> None:
78
+ msg_type = msg.get("type", "")
79
+
80
+ if msg_type == "user_input":
81
+ text = msg.get("text", "").strip()
82
+ if text.startswith("/"):
83
+ await self._handle_command(ws, text)
84
+ else:
85
+ await self._run_turn(ws, text)
86
+
87
+ async def _run_turn(self, ws: ServerConnection, user_input: str) -> None:
88
+ """Run a conversation turn, streaming events to client."""
89
+ if not self._runtime:
90
+ self._init_session()
91
+
92
+ from llm_code.api.types import (
93
+ StreamTextDelta, StreamToolExecStart, StreamToolExecResult,
94
+ StreamToolProgress, StreamMessageStop,
95
+ )
96
+
97
+ await ws.send(json.dumps({"type": "user_echo", "text": user_input}))
98
+ await ws.send(json.dumps({"type": "thinking_start"}))
99
+
100
+ start = time.monotonic()
101
+ text_buffer = ""
102
+ output_tokens = 0
103
+
104
+ # Tag filtering state
105
+ in_tool_call = False
106
+ in_think = False
107
+ tag_buffer = ""
108
+
109
+ try:
110
+ async for event in self._runtime.run_turn(user_input):
111
+ if isinstance(event, StreamTextDelta):
112
+ output_tokens += len(event.text) // 4
113
+
114
+ # Filter tags
115
+ for char in event.text:
116
+ if in_tool_call:
117
+ tag_buffer += char
118
+ if tag_buffer.endswith("</tool_call>"):
119
+ in_tool_call = False
120
+ tag_buffer = ""
121
+ elif in_think:
122
+ tag_buffer += char
123
+ if tag_buffer.endswith("</think>"):
124
+ in_think = False
125
+ tag_buffer = ""
126
+ elif tag_buffer:
127
+ tag_buffer += char
128
+ if tag_buffer == "<tool_call>":
129
+ in_tool_call = True
130
+ elif tag_buffer == "<think>":
131
+ in_think = True
132
+ elif not "<tool_call>".startswith(tag_buffer) and not "<think>".startswith(tag_buffer):
133
+ text_buffer += tag_buffer
134
+ tag_buffer = ""
135
+ elif char == "<":
136
+ tag_buffer = "<"
137
+ else:
138
+ text_buffer += char
139
+
140
+ # Flush periodically
141
+ in_code = text_buffer.count("```") % 2 == 1
142
+ if not in_code and len(text_buffer) > 100:
143
+ await ws.send(json.dumps({"type": "text_delta", "text": text_buffer}))
144
+ text_buffer = ""
145
+
146
+ elif isinstance(event, StreamToolExecStart):
147
+ if text_buffer:
148
+ await ws.send(json.dumps({"type": "text_delta", "text": text_buffer}))
149
+ text_buffer = ""
150
+ await ws.send(json.dumps({"type": "tool_start", "name": event.tool_name, "detail": event.args_summary}))
151
+
152
+ elif isinstance(event, StreamToolExecResult):
153
+ await ws.send(json.dumps({"type": "tool_result", "name": event.tool_name, "output": event.output[:500], "isError": event.is_error}))
154
+
155
+ elif isinstance(event, StreamToolProgress):
156
+ await ws.send(json.dumps({"type": "tool_progress", "name": event.tool_name, "message": event.message}))
157
+
158
+ elif isinstance(event, StreamMessageStop):
159
+ if event.usage and event.usage.output_tokens > 0:
160
+ output_tokens = event.usage.output_tokens
161
+
162
+ except Exception as exc:
163
+ await ws.send(json.dumps({"type": "error", "message": str(exc)}))
164
+ return
165
+
166
+ # Flush remaining
167
+ if tag_buffer and not in_tool_call and not in_think:
168
+ text_buffer += tag_buffer
169
+ if text_buffer:
170
+ await ws.send(json.dumps({"type": "text_done", "text": text_buffer}))
171
+ else:
172
+ await ws.send(json.dumps({"type": "text_done", "text": ""}))
173
+
174
+ elapsed = time.monotonic() - start
175
+ await ws.send(json.dumps({"type": "thinking_stop", "elapsed": elapsed, "tokens": output_tokens}))
176
+ await ws.send(json.dumps({"type": "turn_done", "elapsed": elapsed, "tokens": output_tokens}))
177
+
178
+ async def _handle_command(self, ws: ServerConnection, text: str) -> None:
179
+ from llm_code.cli.commands import parse_slash_command
180
+ cmd = parse_slash_command(text)
181
+ if not cmd:
182
+ return
183
+
184
+ if cmd.name == "help":
185
+ await ws.send(json.dumps({
186
+ "type": "help",
187
+ "commands": [
188
+ {"cmd": "/help", "desc": "Show commands"},
189
+ {"cmd": "/clear", "desc": "Clear conversation"},
190
+ {"cmd": "/cost", "desc": "Token usage"},
191
+ {"cmd": "/exit", "desc": "Disconnect"},
192
+ ],
193
+ }))
194
+ elif cmd.name == "clear":
195
+ self._init_session()
196
+ await ws.send(json.dumps({"type": "message", "text": "Conversation cleared."}))
197
+ elif cmd.name == "cost":
198
+ if self._runtime:
199
+ u = self._runtime.session.total_usage
200
+ await ws.send(json.dumps({"type": "message", "text": f"Tokens — in: {u.input_tokens:,} out: {u.output_tokens:,}"}))
201
+ elif cmd.name == "exit":
202
+ await ws.close()
203
+ else:
204
+ await ws.send(json.dumps({"type": "message", "text": f"Command /{cmd.name} not available in remote mode."}))
205
+
206
+ def _init_session(self) -> None:
207
+ """Initialize ConversationRuntime — same pattern as tui.py."""
208
+ from llm_code.api.client import ProviderClient
209
+ from llm_code.runtime.context import ProjectContext
210
+ from llm_code.runtime.conversation import ConversationRuntime
211
+ from llm_code.runtime.hooks import HookRunner
212
+ from llm_code.runtime.permissions import PermissionMode, PermissionPolicy
213
+ from llm_code.runtime.prompt import SystemPromptBuilder
214
+ from llm_code.runtime.session import Session
215
+ from llm_code.tools.registry import ToolRegistry
216
+
217
+ model = resolve_model(self._config.model, self._config.model_aliases)
218
+ api_key = os.environ.get(self._config.provider_api_key_env, "")
219
+
220
+ provider = ProviderClient.from_model(
221
+ model=model,
222
+ base_url=self._config.provider_base_url or "",
223
+ api_key=api_key,
224
+ timeout=self._config.timeout,
225
+ max_retries=self._config.max_retries,
226
+ native_tools=self._config.native_tools,
227
+ )
228
+
229
+ from llm_code.tools.read_file import ReadFileTool
230
+ from llm_code.tools.write_file import WriteFileTool
231
+ from llm_code.tools.edit_file import EditFileTool
232
+ from llm_code.tools.bash import BashTool
233
+ from llm_code.tools.glob_search import GlobSearchTool
234
+ from llm_code.tools.grep_search import GrepSearchTool
235
+
236
+ registry = ToolRegistry()
237
+ for cls in (ReadFileTool, WriteFileTool, EditFileTool, BashTool, GlobSearchTool, GrepSearchTool):
238
+ registry.register(cls())
239
+
240
+ try:
241
+ from llm_code.tools.git_tools import (
242
+ GitStatusTool, GitDiffTool, GitLogTool, GitCommitTool,
243
+ GitPushTool, GitStashTool, GitBranchTool,
244
+ )
245
+ for cls in (GitStatusTool, GitDiffTool, GitLogTool, GitCommitTool, GitPushTool, GitStashTool, GitBranchTool):
246
+ try:
247
+ registry.register(cls())
248
+ except ValueError:
249
+ pass
250
+ except ImportError:
251
+ pass
252
+
253
+ mode_map = {
254
+ "read_only": PermissionMode.READ_ONLY,
255
+ "workspace_write": PermissionMode.WORKSPACE_WRITE,
256
+ "full_access": PermissionMode.FULL_ACCESS,
257
+ "auto_accept": PermissionMode.AUTO_ACCEPT,
258
+ "prompt": PermissionMode.PROMPT,
259
+ }
260
+
261
+ cwd = Path.cwd()
262
+ self._runtime = ConversationRuntime(
263
+ provider=provider,
264
+ tool_registry=registry,
265
+ permission_policy=PermissionPolicy(
266
+ mode=mode_map.get(self._config.permission_mode, PermissionMode.PROMPT),
267
+ allow_tools=self._config.allowed_tools,
268
+ deny_tools=self._config.denied_tools,
269
+ ),
270
+ hook_runner=HookRunner(self._config.hooks),
271
+ prompt_builder=SystemPromptBuilder(),
272
+ config=self._config,
273
+ session=Session.create(cwd),
274
+ context=ProjectContext.discover(cwd),
275
+ )
@@ -0,0 +1,56 @@
1
+ """SSH proxy — SSH to remote host, auto-start server, connect."""
2
+ from __future__ import annotations
3
+
4
+ import asyncio
5
+ import subprocess
6
+
7
+
8
+ async def ssh_connect(target: str, port: int = 8765) -> None:
9
+ """SSH to target, start llm-code server, connect locally.
10
+
11
+ target: user@host or just host
12
+ """
13
+ from rich.console import Console
14
+ console = Console()
15
+
16
+ console.print(f"[dim]Setting up SSH tunnel to {target}...[/]")
17
+
18
+ # Start SSH tunnel: forward local port to remote
19
+ # Also start llm-code --serve on remote
20
+ ssh_cmd = [
21
+ "ssh", "-tt",
22
+ "-L", f"{port}:localhost:{port}",
23
+ target,
24
+ f"cd ~ && llm-code --serve --port {port}",
25
+ ]
26
+
27
+ console.print(f"[dim]$ {' '.join(ssh_cmd)}[/]")
28
+
29
+ # Start SSH in background
30
+ ssh_proc = subprocess.Popen(
31
+ ssh_cmd,
32
+ stdin=subprocess.DEVNULL,
33
+ stdout=subprocess.PIPE,
34
+ stderr=subprocess.PIPE,
35
+ )
36
+
37
+ # Wait a moment for server to start
38
+ await asyncio.sleep(3)
39
+
40
+ if ssh_proc.poll() is not None:
41
+ stderr = ssh_proc.stderr.read().decode() if ssh_proc.stderr else ""
42
+ console.print(f"[red]SSH failed: {stderr[:200]}[/]")
43
+ return
44
+
45
+ console.print("[green]✓ SSH tunnel established[/]")
46
+
47
+ # Connect to local forwarded port
48
+ from llm_code.remote.client import RemoteClient
49
+ client = RemoteClient(f"ws://localhost:{port}")
50
+
51
+ try:
52
+ await client.connect()
53
+ finally:
54
+ ssh_proc.terminate()
55
+ ssh_proc.wait(timeout=5)
56
+ console.print("[dim]SSH tunnel closed.[/]")
File without changes
@@ -0,0 +1,56 @@
1
+ """Auto-commit checkpoint -- git commit individual file changes after tool edits."""
2
+ from __future__ import annotations
3
+
4
+ import logging
5
+ import subprocess
6
+ from pathlib import Path
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+ _TIMEOUT_S = 5
11
+
12
+
13
+ def auto_commit_file(path: Path, tool_name: str) -> bool:
14
+ """Stage and commit a single file as a checkpoint.
15
+
16
+ Returns True on successful commit, False on any failure (silently).
17
+ """
18
+ if not path.exists():
19
+ return False
20
+
21
+ try:
22
+ # Stage the specific file only
23
+ add_result = subprocess.run(
24
+ ["git", "add", "--", str(path)],
25
+ capture_output=True,
26
+ text=True,
27
+ timeout=_TIMEOUT_S,
28
+ cwd=path.parent,
29
+ )
30
+ if add_result.returncode != 0:
31
+ logger.debug("git add failed (rc=%d): %s", add_result.returncode, add_result.stderr)
32
+ return False
33
+
34
+ # Commit with checkpoint message
35
+ filename = path.name
36
+ message = f"checkpoint: {tool_name} {filename}"
37
+ commit_result = subprocess.run(
38
+ ["git", "commit", "-m", message, "--no-verify"],
39
+ capture_output=True,
40
+ text=True,
41
+ timeout=_TIMEOUT_S,
42
+ cwd=path.parent,
43
+ )
44
+ if commit_result.returncode != 0:
45
+ logger.debug("git commit failed (rc=%d): %s", commit_result.returncode, commit_result.stderr)
46
+ return False
47
+
48
+ logger.info("Auto-committed checkpoint: %s", message)
49
+ return True
50
+
51
+ except subprocess.TimeoutExpired:
52
+ logger.warning("Auto-commit timed out for %s", path)
53
+ return False
54
+ except (OSError, FileNotFoundError):
55
+ logger.debug("Auto-commit skipped -- git not available or not a repo")
56
+ return False
@@ -0,0 +1,62 @@
1
+ """Auto-diagnose -- run LSP diagnostics after file edits and report errors."""
2
+ from __future__ import annotations
3
+
4
+ import logging
5
+ from pathlib import Path
6
+ from typing import Any
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+ # Extension to language mapping (mirrors llm_code/lsp/tools.py)
11
+ _EXT_LANGUAGE: dict[str, str] = {
12
+ ".py": "python",
13
+ ".pyi": "python",
14
+ ".ts": "typescript",
15
+ ".tsx": "typescript",
16
+ ".js": "typescript",
17
+ ".jsx": "typescript",
18
+ ".go": "go",
19
+ ".rs": "rust",
20
+ }
21
+
22
+
23
+ def format_diagnostics(diagnostics: list[Any]) -> list[str]:
24
+ """Format diagnostic objects into human-readable strings."""
25
+ return [
26
+ f"{d.file}:{d.line}:{d.column} [{d.severity}] {d.message} ({d.source})"
27
+ for d in diagnostics
28
+ ]
29
+
30
+
31
+ async def auto_diagnose(lsp_manager: Any, file_path: str) -> list[str]:
32
+ """Run LSP diagnostics on a file and return error-level issues only.
33
+
34
+ Returns a list of formatted error strings. Empty list if no errors
35
+ or LSP is unavailable. Never raises — all exceptions are caught.
36
+ """
37
+ try:
38
+ suffix = Path(file_path).suffix.lower()
39
+ language = _EXT_LANGUAGE.get(suffix, "")
40
+ if not language:
41
+ return []
42
+
43
+ client = lsp_manager.get_client(language)
44
+ if client is None:
45
+ return []
46
+
47
+ file_uri = Path(file_path).resolve().as_uri()
48
+ diagnostics = await client.get_diagnostics(file_uri)
49
+
50
+ if not diagnostics:
51
+ return []
52
+
53
+ # Filter to error-level only
54
+ errors = [d for d in diagnostics if d.severity == "error"]
55
+ if not errors:
56
+ return []
57
+
58
+ return format_diagnostics(errors)
59
+
60
+ except Exception:
61
+ logger.debug("Auto-diagnose failed for %s", file_path, exc_info=True)
62
+ return []