llmcode-cli 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_code/__init__.py +2 -0
- llm_code/analysis/__init__.py +6 -0
- llm_code/analysis/cache.py +33 -0
- llm_code/analysis/engine.py +256 -0
- llm_code/analysis/go_rules.py +114 -0
- llm_code/analysis/js_rules.py +84 -0
- llm_code/analysis/python_rules.py +311 -0
- llm_code/analysis/rules.py +140 -0
- llm_code/analysis/rust_rules.py +108 -0
- llm_code/analysis/universal_rules.py +111 -0
- llm_code/api/__init__.py +0 -0
- llm_code/api/client.py +90 -0
- llm_code/api/errors.py +73 -0
- llm_code/api/openai_compat.py +390 -0
- llm_code/api/provider.py +35 -0
- llm_code/api/sse.py +52 -0
- llm_code/api/types.py +140 -0
- llm_code/cli/__init__.py +0 -0
- llm_code/cli/commands.py +70 -0
- llm_code/cli/image.py +122 -0
- llm_code/cli/render.py +214 -0
- llm_code/cli/status_line.py +79 -0
- llm_code/cli/streaming.py +92 -0
- llm_code/cli/tui_main.py +220 -0
- llm_code/computer_use/__init__.py +11 -0
- llm_code/computer_use/app_detect.py +49 -0
- llm_code/computer_use/app_tier.py +57 -0
- llm_code/computer_use/coordinator.py +99 -0
- llm_code/computer_use/input_control.py +71 -0
- llm_code/computer_use/screenshot.py +93 -0
- llm_code/cron/__init__.py +13 -0
- llm_code/cron/parser.py +145 -0
- llm_code/cron/scheduler.py +135 -0
- llm_code/cron/storage.py +126 -0
- llm_code/enterprise/__init__.py +1 -0
- llm_code/enterprise/audit.py +59 -0
- llm_code/enterprise/auth.py +26 -0
- llm_code/enterprise/oidc.py +95 -0
- llm_code/enterprise/rbac.py +65 -0
- llm_code/harness/__init__.py +5 -0
- llm_code/harness/config.py +33 -0
- llm_code/harness/engine.py +129 -0
- llm_code/harness/guides.py +41 -0
- llm_code/harness/sensors.py +68 -0
- llm_code/harness/templates.py +84 -0
- llm_code/hida/__init__.py +1 -0
- llm_code/hida/classifier.py +187 -0
- llm_code/hida/engine.py +49 -0
- llm_code/hida/profiles.py +95 -0
- llm_code/hida/types.py +28 -0
- llm_code/ide/__init__.py +1 -0
- llm_code/ide/bridge.py +80 -0
- llm_code/ide/detector.py +76 -0
- llm_code/ide/server.py +169 -0
- llm_code/logging.py +29 -0
- llm_code/lsp/__init__.py +0 -0
- llm_code/lsp/client.py +298 -0
- llm_code/lsp/detector.py +42 -0
- llm_code/lsp/manager.py +56 -0
- llm_code/lsp/tools.py +288 -0
- llm_code/marketplace/__init__.py +0 -0
- llm_code/marketplace/builtin_registry.py +102 -0
- llm_code/marketplace/installer.py +162 -0
- llm_code/marketplace/plugin.py +78 -0
- llm_code/marketplace/registry.py +360 -0
- llm_code/mcp/__init__.py +0 -0
- llm_code/mcp/bridge.py +87 -0
- llm_code/mcp/client.py +117 -0
- llm_code/mcp/health.py +120 -0
- llm_code/mcp/manager.py +214 -0
- llm_code/mcp/oauth.py +219 -0
- llm_code/mcp/transport.py +254 -0
- llm_code/mcp/types.py +53 -0
- llm_code/remote/__init__.py +0 -0
- llm_code/remote/client.py +136 -0
- llm_code/remote/protocol.py +22 -0
- llm_code/remote/server.py +275 -0
- llm_code/remote/ssh_proxy.py +56 -0
- llm_code/runtime/__init__.py +0 -0
- llm_code/runtime/auto_commit.py +56 -0
- llm_code/runtime/auto_diagnose.py +62 -0
- llm_code/runtime/checkpoint.py +70 -0
- llm_code/runtime/checkpoint_recovery.py +142 -0
- llm_code/runtime/compaction.py +35 -0
- llm_code/runtime/compressor.py +415 -0
- llm_code/runtime/config.py +533 -0
- llm_code/runtime/context.py +49 -0
- llm_code/runtime/conversation.py +921 -0
- llm_code/runtime/cost_tracker.py +126 -0
- llm_code/runtime/dream.py +127 -0
- llm_code/runtime/file_protection.py +150 -0
- llm_code/runtime/hardware.py +85 -0
- llm_code/runtime/hooks.py +223 -0
- llm_code/runtime/indexer.py +230 -0
- llm_code/runtime/knowledge_compiler.py +232 -0
- llm_code/runtime/memory.py +132 -0
- llm_code/runtime/memory_layers.py +467 -0
- llm_code/runtime/memory_lint.py +252 -0
- llm_code/runtime/model_aliases.py +37 -0
- llm_code/runtime/ollama.py +93 -0
- llm_code/runtime/overlay.py +124 -0
- llm_code/runtime/permissions.py +200 -0
- llm_code/runtime/plan.py +45 -0
- llm_code/runtime/prompt.py +238 -0
- llm_code/runtime/repo_map.py +174 -0
- llm_code/runtime/sandbox.py +116 -0
- llm_code/runtime/session.py +268 -0
- llm_code/runtime/skill_resolver.py +61 -0
- llm_code/runtime/skills.py +133 -0
- llm_code/runtime/speculative.py +75 -0
- llm_code/runtime/streaming_executor.py +216 -0
- llm_code/runtime/telemetry.py +196 -0
- llm_code/runtime/token_budget.py +26 -0
- llm_code/runtime/vcr.py +142 -0
- llm_code/runtime/vision.py +102 -0
- llm_code/swarm/__init__.py +1 -0
- llm_code/swarm/backend_subprocess.py +108 -0
- llm_code/swarm/backend_tmux.py +103 -0
- llm_code/swarm/backend_worktree.py +306 -0
- llm_code/swarm/checkpoint.py +74 -0
- llm_code/swarm/coordinator.py +236 -0
- llm_code/swarm/mailbox.py +88 -0
- llm_code/swarm/manager.py +202 -0
- llm_code/swarm/memory_sync.py +80 -0
- llm_code/swarm/recovery.py +21 -0
- llm_code/swarm/team.py +67 -0
- llm_code/swarm/types.py +31 -0
- llm_code/task/__init__.py +16 -0
- llm_code/task/diagnostics.py +93 -0
- llm_code/task/manager.py +162 -0
- llm_code/task/types.py +112 -0
- llm_code/task/verifier.py +104 -0
- llm_code/tools/__init__.py +0 -0
- llm_code/tools/agent.py +145 -0
- llm_code/tools/agent_roles.py +82 -0
- llm_code/tools/base.py +94 -0
- llm_code/tools/bash.py +565 -0
- llm_code/tools/computer_use_tools.py +278 -0
- llm_code/tools/coordinator_tool.py +75 -0
- llm_code/tools/cron_create.py +90 -0
- llm_code/tools/cron_delete.py +49 -0
- llm_code/tools/cron_list.py +51 -0
- llm_code/tools/deferred.py +92 -0
- llm_code/tools/dump.py +116 -0
- llm_code/tools/edit_file.py +282 -0
- llm_code/tools/git_tools.py +531 -0
- llm_code/tools/glob_search.py +112 -0
- llm_code/tools/grep_search.py +144 -0
- llm_code/tools/ide_diagnostics.py +59 -0
- llm_code/tools/ide_open.py +58 -0
- llm_code/tools/ide_selection.py +52 -0
- llm_code/tools/memory_tools.py +138 -0
- llm_code/tools/multi_edit.py +143 -0
- llm_code/tools/notebook_edit.py +107 -0
- llm_code/tools/notebook_read.py +81 -0
- llm_code/tools/parsing.py +63 -0
- llm_code/tools/read_file.py +154 -0
- llm_code/tools/registry.py +58 -0
- llm_code/tools/search_backends/__init__.py +56 -0
- llm_code/tools/search_backends/brave.py +56 -0
- llm_code/tools/search_backends/duckduckgo.py +129 -0
- llm_code/tools/search_backends/searxng.py +71 -0
- llm_code/tools/search_backends/tavily.py +73 -0
- llm_code/tools/swarm_create.py +109 -0
- llm_code/tools/swarm_delete.py +95 -0
- llm_code/tools/swarm_list.py +44 -0
- llm_code/tools/swarm_message.py +109 -0
- llm_code/tools/task_close.py +79 -0
- llm_code/tools/task_plan.py +79 -0
- llm_code/tools/task_verify.py +90 -0
- llm_code/tools/tool_search.py +65 -0
- llm_code/tools/web_common.py +258 -0
- llm_code/tools/web_fetch.py +223 -0
- llm_code/tools/web_search.py +280 -0
- llm_code/tools/write_file.py +118 -0
- llm_code/tui/__init__.py +1 -0
- llm_code/tui/app.py +2432 -0
- llm_code/tui/chat_view.py +82 -0
- llm_code/tui/chat_widgets.py +309 -0
- llm_code/tui/header_bar.py +46 -0
- llm_code/tui/input_bar.py +349 -0
- llm_code/tui/keybindings.py +142 -0
- llm_code/tui/marketplace.py +210 -0
- llm_code/tui/status_bar.py +72 -0
- llm_code/tui/theme.py +96 -0
- llm_code/utils/__init__.py +0 -0
- llm_code/utils/diff.py +111 -0
- llm_code/utils/errors.py +70 -0
- llm_code/utils/hyperlink.py +73 -0
- llm_code/utils/notebook.py +179 -0
- llm_code/utils/search.py +69 -0
- llm_code/utils/text_normalize.py +28 -0
- llm_code/utils/version_check.py +62 -0
- llm_code/vim/__init__.py +4 -0
- llm_code/vim/engine.py +51 -0
- llm_code/vim/motions.py +172 -0
- llm_code/vim/operators.py +183 -0
- llm_code/vim/text_objects.py +139 -0
- llm_code/vim/transitions.py +279 -0
- llm_code/vim/types.py +68 -0
- llm_code/voice/__init__.py +1 -0
- llm_code/voice/languages.py +43 -0
- llm_code/voice/recorder.py +136 -0
- llm_code/voice/stt.py +36 -0
- llm_code/voice/stt_anthropic.py +66 -0
- llm_code/voice/stt_google.py +32 -0
- llm_code/voice/stt_whisper.py +52 -0
- llmcode_cli-1.0.0.dist-info/METADATA +524 -0
- llmcode_cli-1.0.0.dist-info/RECORD +212 -0
- llmcode_cli-1.0.0.dist-info/WHEEL +4 -0
- llmcode_cli-1.0.0.dist-info/entry_points.txt +2 -0
- llmcode_cli-1.0.0.dist-info/licenses/LICENSE +21 -0
llm_code/tui/app.py
ADDED
|
@@ -0,0 +1,2432 @@
|
|
|
1
|
+
# llm_code/tui/app.py
|
|
2
|
+
"""LLMCodeTUI — Textual fullscreen app composing all widgets."""
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import re
|
|
7
|
+
import shutil
|
|
8
|
+
import subprocess
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from textual.app import App, ComposeResult
|
|
13
|
+
|
|
14
|
+
from llm_code.tui.chat_view import ChatScrollView, UserMessage, AssistantText
|
|
15
|
+
from llm_code.tui.header_bar import HeaderBar
|
|
16
|
+
from llm_code.tui.input_bar import InputBar
|
|
17
|
+
from llm_code.tui.status_bar import StatusBar
|
|
18
|
+
from llm_code.tui.theme import APP_CSS
|
|
19
|
+
from llm_code.logging import get_logger
|
|
20
|
+
|
|
21
|
+
logger = get_logger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class LLMCodeTUI(App):
|
|
25
|
+
"""Fullscreen TUI matching Claude Code's visual experience."""
|
|
26
|
+
|
|
27
|
+
TITLE = "llm-code"
|
|
28
|
+
CSS = APP_CSS
|
|
29
|
+
ENABLE_MOUSE_SUPPORT = False # CRITICAL: allow terminal mouse selection + copy
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
config: Any = None,
|
|
34
|
+
cwd: Path | None = None,
|
|
35
|
+
budget: int | None = None,
|
|
36
|
+
) -> None:
|
|
37
|
+
super().__init__()
|
|
38
|
+
self._config = config
|
|
39
|
+
self._cwd = cwd or Path.cwd()
|
|
40
|
+
self._budget = budget
|
|
41
|
+
self._runtime = None
|
|
42
|
+
self._cost_tracker = None
|
|
43
|
+
self._input_tokens = 0
|
|
44
|
+
self._output_tokens = 0
|
|
45
|
+
self._tool_reg = None
|
|
46
|
+
self._deferred_tool_manager = None
|
|
47
|
+
self._checkpoint_mgr = None
|
|
48
|
+
self._mcp_manager = None
|
|
49
|
+
self._skills = None
|
|
50
|
+
self._memory = None
|
|
51
|
+
self._cron_storage = None
|
|
52
|
+
self._swarm_manager = None
|
|
53
|
+
self._task_manager = None
|
|
54
|
+
self._ide_bridge = None
|
|
55
|
+
self._lsp_manager = None
|
|
56
|
+
self._project_index = None
|
|
57
|
+
self._coordinator_class = None
|
|
58
|
+
self._coordinator_tool_class = None
|
|
59
|
+
self._permission_pending = False
|
|
60
|
+
self._pending_images: list = []
|
|
61
|
+
self._plan_mode: bool = False
|
|
62
|
+
self._voice_active = False
|
|
63
|
+
self._vcr_recorder = None
|
|
64
|
+
self._interrupt_pending: bool = False
|
|
65
|
+
self._last_interrupt_time: float = 0.0
|
|
66
|
+
self._analysis_context: str | None = None
|
|
67
|
+
|
|
68
|
+
def compose(self) -> ComposeResult:
|
|
69
|
+
yield HeaderBar(id="header-bar")
|
|
70
|
+
yield ChatScrollView()
|
|
71
|
+
yield InputBar()
|
|
72
|
+
yield StatusBar(id="status-bar")
|
|
73
|
+
|
|
74
|
+
def on_mount(self) -> None:
|
|
75
|
+
self._init_runtime()
|
|
76
|
+
header = self.query_one(HeaderBar)
|
|
77
|
+
if self._config:
|
|
78
|
+
header.model = getattr(self._config, "model", "")
|
|
79
|
+
header.project = self._cwd.name
|
|
80
|
+
header.branch = self._detect_branch()
|
|
81
|
+
self._render_welcome()
|
|
82
|
+
# Detect local provider and update status bar
|
|
83
|
+
if self._config and self._config.provider_base_url:
|
|
84
|
+
url = self._config.provider_base_url
|
|
85
|
+
status = self.query_one(StatusBar)
|
|
86
|
+
status.is_local = "localhost" in url or "127.0.0.1" in url or "0.0.0.0" in url
|
|
87
|
+
# Focus input bar so it receives key events
|
|
88
|
+
self.query_one(InputBar).focus()
|
|
89
|
+
# Register SIGINT handler for clean interrupt (Ctrl+C)
|
|
90
|
+
import signal
|
|
91
|
+
|
|
92
|
+
def _sigint_handler(signum, frame):
|
|
93
|
+
self.call_from_thread(self._handle_interrupt)
|
|
94
|
+
|
|
95
|
+
signal.signal(signal.SIGINT, _sigint_handler)
|
|
96
|
+
# Start MCP servers async
|
|
97
|
+
self.run_worker(self._init_mcp(), name="init_mcp")
|
|
98
|
+
|
|
99
|
+
def _render_welcome(self) -> None:
|
|
100
|
+
"""Show styled welcome banner in chat area."""
|
|
101
|
+
import sys
|
|
102
|
+
from textual.widgets import Static
|
|
103
|
+
from rich.text import Text as RichText
|
|
104
|
+
|
|
105
|
+
chat = self.query_one(ChatScrollView)
|
|
106
|
+
|
|
107
|
+
logo_lines = [
|
|
108
|
+
" ██╗ ██╗ ███╗ ███╗",
|
|
109
|
+
" ██║ ██║ ████╗ ████║",
|
|
110
|
+
" ██║ ██║ ██╔████╔██║",
|
|
111
|
+
" ██║ ██║ ██║╚██╔╝██║",
|
|
112
|
+
" ███████╗███████╗██║ ╚═╝ ██║",
|
|
113
|
+
" ╚══════╝╚══════╝╚═╝ ╚═╝",
|
|
114
|
+
" ██████╗ ██████╗ ██████╗ ███████╗",
|
|
115
|
+
" ██╔════╝██╔═══██╗██╔══██╗██╔════╝",
|
|
116
|
+
" ██║ ██║ ██║██║ ██║█████╗",
|
|
117
|
+
" ██║ ██║ ██║██║ ██║██╔══╝",
|
|
118
|
+
" ╚██████╗╚██████╔╝██████╔╝███████╗",
|
|
119
|
+
" ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝",
|
|
120
|
+
]
|
|
121
|
+
|
|
122
|
+
model = self._config.model if self._config else "(not set)"
|
|
123
|
+
branch = self._detect_branch()
|
|
124
|
+
workspace = self._cwd.name
|
|
125
|
+
if branch:
|
|
126
|
+
workspace += f" · {branch}"
|
|
127
|
+
perm = self._config.permission_mode if self._config else "prompt"
|
|
128
|
+
paste_key = "Cmd+V to paste" if sys.platform == "darwin" else "Ctrl+V to paste"
|
|
129
|
+
|
|
130
|
+
text = RichText()
|
|
131
|
+
for line in logo_lines:
|
|
132
|
+
text.append(line + "\n", style="bold cyan")
|
|
133
|
+
text.append("\n")
|
|
134
|
+
for label, value in [
|
|
135
|
+
("Model", model),
|
|
136
|
+
("Workspace", workspace),
|
|
137
|
+
("Directory", str(self._cwd)),
|
|
138
|
+
("Permissions", perm),
|
|
139
|
+
]:
|
|
140
|
+
text.append(f" {label:<14}", style="yellow")
|
|
141
|
+
text.append(f" {value}\n", style="bold white")
|
|
142
|
+
text.append("\n")
|
|
143
|
+
for label, value in [
|
|
144
|
+
("Quick start", "/help · /skill · /mcp"),
|
|
145
|
+
("Multiline", "Shift+Enter"),
|
|
146
|
+
("Images", paste_key),
|
|
147
|
+
]:
|
|
148
|
+
text.append(f" {label:<14}", style="dim")
|
|
149
|
+
text.append(f" {value}\n", style="white")
|
|
150
|
+
text.append("\n")
|
|
151
|
+
text.append(" Ready\n", style="bold green")
|
|
152
|
+
|
|
153
|
+
banner = Static(text)
|
|
154
|
+
banner.styles.height = "auto"
|
|
155
|
+
chat.add_entry(banner)
|
|
156
|
+
|
|
157
|
+
@staticmethod
|
|
158
|
+
def _is_safe_name(name: str) -> bool:
|
|
159
|
+
"""Validate skill/plugin name — alphanumeric, hyphens, underscores, dots only."""
|
|
160
|
+
return bool(re.match(r'^[a-zA-Z0-9_.-]+$', name))
|
|
161
|
+
|
|
162
|
+
@staticmethod
|
|
163
|
+
def _is_valid_repo(source: str) -> bool:
|
|
164
|
+
"""Validate GitHub repo format: owner/name with safe characters."""
|
|
165
|
+
cleaned = source.replace("https://github.com/", "").rstrip("/")
|
|
166
|
+
parts = cleaned.split("/")
|
|
167
|
+
if len(parts) != 2:
|
|
168
|
+
return False
|
|
169
|
+
return all(re.match(r'^[a-zA-Z0-9_.-]+$', p) for p in parts)
|
|
170
|
+
|
|
171
|
+
def _detect_branch(self) -> str:
|
|
172
|
+
try:
|
|
173
|
+
r = subprocess.run(
|
|
174
|
+
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
|
175
|
+
cwd=self._cwd, capture_output=True, text=True, timeout=3,
|
|
176
|
+
)
|
|
177
|
+
return r.stdout.strip() if r.returncode == 0 else ""
|
|
178
|
+
except Exception:
|
|
179
|
+
return ""
|
|
180
|
+
|
|
181
|
+
def _handle_interrupt(self) -> None:
|
|
182
|
+
"""Handle Ctrl+C: first press saves checkpoint, second force exits."""
|
|
183
|
+
import time as _time
|
|
184
|
+
|
|
185
|
+
now = _time.monotonic()
|
|
186
|
+
status = self.query_one(StatusBar)
|
|
187
|
+
chat = self.query_one(ChatScrollView)
|
|
188
|
+
|
|
189
|
+
# If not streaming, exit immediately
|
|
190
|
+
if not status.is_streaming:
|
|
191
|
+
self.exit()
|
|
192
|
+
return
|
|
193
|
+
|
|
194
|
+
# Second Ctrl+C within 2 seconds: force exit
|
|
195
|
+
if self._interrupt_pending and (now - self._last_interrupt_time) < 2.0:
|
|
196
|
+
chat.add_entry(AssistantText("Goodbye."))
|
|
197
|
+
self.exit()
|
|
198
|
+
return
|
|
199
|
+
|
|
200
|
+
# First Ctrl+C while streaming: save checkpoint and prompt
|
|
201
|
+
self._interrupt_pending = True
|
|
202
|
+
self._last_interrupt_time = now
|
|
203
|
+
|
|
204
|
+
session_id = ""
|
|
205
|
+
if self._runtime is not None:
|
|
206
|
+
try:
|
|
207
|
+
from llm_code.runtime.checkpoint_recovery import CheckpointRecovery
|
|
208
|
+
recovery = CheckpointRecovery(
|
|
209
|
+
Path.home() / ".llm-code" / "checkpoints"
|
|
210
|
+
)
|
|
211
|
+
path = recovery.save_checkpoint(self._runtime.session)
|
|
212
|
+
session_id = self._runtime.session.id
|
|
213
|
+
except Exception:
|
|
214
|
+
pass
|
|
215
|
+
|
|
216
|
+
resume_hint = (
|
|
217
|
+
f"\n Resume with: llm-code --resume {session_id}" if session_id else ""
|
|
218
|
+
)
|
|
219
|
+
chat.add_entry(AssistantText(
|
|
220
|
+
f"\u23f8 Session paused and saved.{resume_hint}\n"
|
|
221
|
+
f" Press Ctrl+C again to quit immediately."
|
|
222
|
+
))
|
|
223
|
+
|
|
224
|
+
def _init_runtime(self) -> None:
|
|
225
|
+
"""Initialize the conversation runtime."""
|
|
226
|
+
if self._config is None:
|
|
227
|
+
logger.warning("No config provided; runtime will not be initialized.")
|
|
228
|
+
return
|
|
229
|
+
|
|
230
|
+
from llm_code.api.client import ProviderClient
|
|
231
|
+
from llm_code.runtime.cost_tracker import CostTracker
|
|
232
|
+
from llm_code.runtime.model_aliases import resolve_model
|
|
233
|
+
from llm_code.runtime.context import ProjectContext
|
|
234
|
+
from llm_code.runtime.conversation import ConversationRuntime
|
|
235
|
+
from llm_code.runtime.hooks import HookRunner
|
|
236
|
+
from llm_code.runtime.permissions import PermissionMode, PermissionPolicy
|
|
237
|
+
from llm_code.runtime.prompt import SystemPromptBuilder
|
|
238
|
+
from llm_code.runtime.session import Session
|
|
239
|
+
from llm_code.tools.bash import BashTool
|
|
240
|
+
from llm_code.tools.edit_file import EditFileTool
|
|
241
|
+
from llm_code.tools.git_tools import (
|
|
242
|
+
GitBranchTool, GitCommitTool, GitDiffTool,
|
|
243
|
+
GitLogTool, GitPushTool, GitStashTool, GitStatusTool,
|
|
244
|
+
)
|
|
245
|
+
from llm_code.tools.glob_search import GlobSearchTool
|
|
246
|
+
from llm_code.tools.grep_search import GrepSearchTool
|
|
247
|
+
from llm_code.tools.notebook_edit import NotebookEditTool
|
|
248
|
+
from llm_code.tools.notebook_read import NotebookReadTool
|
|
249
|
+
from llm_code.tools.read_file import ReadFileTool
|
|
250
|
+
from llm_code.tools.registry import ToolRegistry
|
|
251
|
+
from llm_code.tools.write_file import WriteFileTool
|
|
252
|
+
|
|
253
|
+
api_key = os.environ.get(self._config.provider_api_key_env, "")
|
|
254
|
+
base_url = self._config.provider_base_url or ""
|
|
255
|
+
|
|
256
|
+
resolved_model = resolve_model(
|
|
257
|
+
self._config.model, custom_aliases=self._config.model_aliases
|
|
258
|
+
)
|
|
259
|
+
self._cost_tracker = CostTracker(
|
|
260
|
+
model=resolved_model,
|
|
261
|
+
custom_pricing=self._config.pricing or None,
|
|
262
|
+
max_budget_usd=self._config.max_budget_usd,
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
provider = ProviderClient.from_model(
|
|
266
|
+
model=resolved_model,
|
|
267
|
+
base_url=base_url,
|
|
268
|
+
api_key=api_key,
|
|
269
|
+
timeout=self._config.timeout,
|
|
270
|
+
max_retries=self._config.max_retries,
|
|
271
|
+
native_tools=self._config.native_tools,
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
# Register core tools — local models get longer bash timeout
|
|
275
|
+
_base_url = self._config.provider_base_url or ""
|
|
276
|
+
_is_local = any(h in _base_url for h in ("localhost", "127.0.0.1", "0.0.0.0", "192.168.", "10.", "172."))
|
|
277
|
+
_bash_timeout = 0 if _is_local else 30 # 0 = no timeout for local models
|
|
278
|
+
|
|
279
|
+
self._tool_reg = ToolRegistry()
|
|
280
|
+
from llm_code.tools.web_fetch import WebFetchTool
|
|
281
|
+
from llm_code.tools.web_search import WebSearchTool
|
|
282
|
+
|
|
283
|
+
for tool in (
|
|
284
|
+
ReadFileTool(),
|
|
285
|
+
WriteFileTool(),
|
|
286
|
+
EditFileTool(),
|
|
287
|
+
BashTool(default_timeout=_bash_timeout),
|
|
288
|
+
GlobSearchTool(),
|
|
289
|
+
GrepSearchTool(),
|
|
290
|
+
NotebookReadTool(),
|
|
291
|
+
NotebookEditTool(),
|
|
292
|
+
WebFetchTool(),
|
|
293
|
+
WebSearchTool(),
|
|
294
|
+
):
|
|
295
|
+
try:
|
|
296
|
+
self._tool_reg.register(tool)
|
|
297
|
+
except ValueError:
|
|
298
|
+
pass
|
|
299
|
+
|
|
300
|
+
for cls in (
|
|
301
|
+
GitStatusTool, GitDiffTool, GitLogTool, GitCommitTool,
|
|
302
|
+
GitPushTool, GitStashTool, GitBranchTool,
|
|
303
|
+
):
|
|
304
|
+
try:
|
|
305
|
+
self._tool_reg.register(cls())
|
|
306
|
+
except ValueError:
|
|
307
|
+
pass
|
|
308
|
+
|
|
309
|
+
# Try to register AgentTool
|
|
310
|
+
try:
|
|
311
|
+
from llm_code.tools.agent import AgentTool
|
|
312
|
+
if self._tool_reg.get("agent") is None:
|
|
313
|
+
self._tool_reg.register(AgentTool(
|
|
314
|
+
runtime_factory=None, max_depth=3, current_depth=0,
|
|
315
|
+
))
|
|
316
|
+
except (ImportError, ValueError):
|
|
317
|
+
pass
|
|
318
|
+
|
|
319
|
+
# Deferred tool manager + ToolSearchTool
|
|
320
|
+
from llm_code.tools.deferred import DeferredToolManager
|
|
321
|
+
from llm_code.tools.tool_search import ToolSearchTool
|
|
322
|
+
self._deferred_tool_manager = DeferredToolManager()
|
|
323
|
+
try:
|
|
324
|
+
self._tool_reg.register(ToolSearchTool(self._deferred_tool_manager))
|
|
325
|
+
except ValueError:
|
|
326
|
+
pass
|
|
327
|
+
|
|
328
|
+
context = ProjectContext.discover(self._cwd)
|
|
329
|
+
session = Session.create(self._cwd)
|
|
330
|
+
|
|
331
|
+
mode_map = {
|
|
332
|
+
"read_only": PermissionMode.READ_ONLY,
|
|
333
|
+
"workspace_write": PermissionMode.WORKSPACE_WRITE,
|
|
334
|
+
"full_access": PermissionMode.FULL_ACCESS,
|
|
335
|
+
"auto_accept": PermissionMode.AUTO_ACCEPT,
|
|
336
|
+
"prompt": PermissionMode.PROMPT,
|
|
337
|
+
}
|
|
338
|
+
perm_mode = mode_map.get(self._config.permission_mode, PermissionMode.PROMPT)
|
|
339
|
+
permissions = PermissionPolicy(
|
|
340
|
+
mode=perm_mode,
|
|
341
|
+
allow_tools=self._config.allowed_tools,
|
|
342
|
+
deny_tools=self._config.denied_tools,
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
hooks = HookRunner(self._config.hooks)
|
|
346
|
+
prompt_builder = SystemPromptBuilder()
|
|
347
|
+
|
|
348
|
+
# Checkpoint manager (git-based undo)
|
|
349
|
+
checkpoint_mgr = None
|
|
350
|
+
if (self._cwd / ".git").is_dir():
|
|
351
|
+
try:
|
|
352
|
+
from llm_code.runtime.checkpoint import CheckpointManager
|
|
353
|
+
checkpoint_mgr = CheckpointManager(self._cwd)
|
|
354
|
+
except Exception:
|
|
355
|
+
pass
|
|
356
|
+
self._checkpoint_mgr = checkpoint_mgr
|
|
357
|
+
|
|
358
|
+
# Recovery checkpoint (session state persistence)
|
|
359
|
+
recovery_checkpoint = None
|
|
360
|
+
try:
|
|
361
|
+
from llm_code.runtime.checkpoint_recovery import CheckpointRecovery
|
|
362
|
+
recovery_checkpoint = CheckpointRecovery(Path.home() / ".llm-code" / "checkpoints")
|
|
363
|
+
except Exception:
|
|
364
|
+
pass
|
|
365
|
+
|
|
366
|
+
# Token budget
|
|
367
|
+
token_budget = None
|
|
368
|
+
if self._budget is not None:
|
|
369
|
+
try:
|
|
370
|
+
from llm_code.runtime.token_budget import TokenBudget
|
|
371
|
+
token_budget = TokenBudget(target=self._budget)
|
|
372
|
+
except Exception:
|
|
373
|
+
pass
|
|
374
|
+
|
|
375
|
+
# Skills
|
|
376
|
+
try:
|
|
377
|
+
from llm_code.runtime.skills import SkillLoader
|
|
378
|
+
from llm_code.marketplace.installer import PluginInstaller
|
|
379
|
+
skill_dirs: list[Path] = [
|
|
380
|
+
Path.home() / ".llm-code" / "skills",
|
|
381
|
+
self._cwd / ".llm-code" / "skills",
|
|
382
|
+
]
|
|
383
|
+
plugin_dir = Path.home() / ".llm-code" / "plugins"
|
|
384
|
+
if plugin_dir.is_dir():
|
|
385
|
+
pi = PluginInstaller(plugin_dir)
|
|
386
|
+
for p in pi.list_installed():
|
|
387
|
+
if p.enabled and p.manifest.skills:
|
|
388
|
+
sp = p.path / p.manifest.skills
|
|
389
|
+
if sp.is_dir():
|
|
390
|
+
skill_dirs.append(sp)
|
|
391
|
+
direct = p.path / "skills"
|
|
392
|
+
if p.enabled and direct.is_dir() and direct not in skill_dirs:
|
|
393
|
+
skill_dirs.append(direct)
|
|
394
|
+
self._skills = SkillLoader().load_from_dirs(skill_dirs)
|
|
395
|
+
except Exception:
|
|
396
|
+
self._skills = None
|
|
397
|
+
|
|
398
|
+
# Memory
|
|
399
|
+
try:
|
|
400
|
+
from llm_code.runtime.memory import MemoryStore
|
|
401
|
+
memory_dir = Path.home() / ".llm-code" / "memory"
|
|
402
|
+
self._memory = MemoryStore(memory_dir, self._cwd)
|
|
403
|
+
except Exception:
|
|
404
|
+
self._memory = None
|
|
405
|
+
|
|
406
|
+
# Register memory tools
|
|
407
|
+
try:
|
|
408
|
+
from llm_code.tools.memory_tools import MemoryStoreTool, MemoryRecallTool, MemoryListTool
|
|
409
|
+
if self._memory is not None:
|
|
410
|
+
for tool_cls in (MemoryStoreTool, MemoryRecallTool, MemoryListTool):
|
|
411
|
+
try:
|
|
412
|
+
self._tool_reg.register(tool_cls(self._memory))
|
|
413
|
+
except ValueError:
|
|
414
|
+
pass
|
|
415
|
+
except ImportError:
|
|
416
|
+
pass
|
|
417
|
+
|
|
418
|
+
# Register cron tools
|
|
419
|
+
try:
|
|
420
|
+
from llm_code.cron.storage import CronStorage
|
|
421
|
+
from llm_code.tools.cron_create import CronCreateTool
|
|
422
|
+
from llm_code.tools.cron_list import CronListTool
|
|
423
|
+
from llm_code.tools.cron_delete import CronDeleteTool
|
|
424
|
+
cron_storage = CronStorage(self._cwd / ".llm-code" / "scheduled_tasks.json")
|
|
425
|
+
self._cron_storage = cron_storage
|
|
426
|
+
for tool in (CronCreateTool(cron_storage), CronListTool(cron_storage), CronDeleteTool(cron_storage)):
|
|
427
|
+
try:
|
|
428
|
+
self._tool_reg.register(tool)
|
|
429
|
+
except ValueError:
|
|
430
|
+
pass
|
|
431
|
+
except Exception:
|
|
432
|
+
self._cron_storage = None
|
|
433
|
+
|
|
434
|
+
# Register swarm tools
|
|
435
|
+
self._swarm_manager = None
|
|
436
|
+
try:
|
|
437
|
+
if self._config.swarm.enabled:
|
|
438
|
+
from llm_code.swarm.manager import SwarmManager
|
|
439
|
+
from llm_code.tools.swarm_create import SwarmCreateTool
|
|
440
|
+
from llm_code.tools.swarm_list import SwarmListTool
|
|
441
|
+
from llm_code.tools.swarm_message import SwarmMessageTool
|
|
442
|
+
from llm_code.tools.swarm_delete import SwarmDeleteTool
|
|
443
|
+
from llm_code.swarm.coordinator import Coordinator
|
|
444
|
+
from llm_code.tools.coordinator_tool import CoordinatorTool
|
|
445
|
+
|
|
446
|
+
swarm_mgr = SwarmManager(
|
|
447
|
+
swarm_dir=self._cwd / ".llm-code" / "swarm",
|
|
448
|
+
max_members=self._config.swarm.max_members,
|
|
449
|
+
backend_preference=self._config.swarm.backend,
|
|
450
|
+
)
|
|
451
|
+
self._swarm_manager = swarm_mgr
|
|
452
|
+
for tool in (
|
|
453
|
+
SwarmCreateTool(swarm_mgr),
|
|
454
|
+
SwarmListTool(swarm_mgr),
|
|
455
|
+
SwarmMessageTool(swarm_mgr),
|
|
456
|
+
SwarmDeleteTool(swarm_mgr),
|
|
457
|
+
):
|
|
458
|
+
try:
|
|
459
|
+
self._tool_reg.register(tool)
|
|
460
|
+
except ValueError:
|
|
461
|
+
pass
|
|
462
|
+
self._coordinator_class = Coordinator
|
|
463
|
+
self._coordinator_tool_class = CoordinatorTool
|
|
464
|
+
except Exception:
|
|
465
|
+
self._swarm_manager = None
|
|
466
|
+
|
|
467
|
+
# Register task lifecycle tools
|
|
468
|
+
self._task_manager = None
|
|
469
|
+
try:
|
|
470
|
+
from llm_code.task.manager import TaskLifecycleManager
|
|
471
|
+
from llm_code.task.verifier import Verifier
|
|
472
|
+
from llm_code.task.diagnostics import DiagnosticsEngine
|
|
473
|
+
from llm_code.tools.task_plan import TaskPlanTool
|
|
474
|
+
from llm_code.tools.task_verify import TaskVerifyTool
|
|
475
|
+
from llm_code.tools.task_close import TaskCloseTool
|
|
476
|
+
|
|
477
|
+
task_dir = self._cwd / ".llm-code" / "tasks"
|
|
478
|
+
diag_dir = self._cwd / ".llm-code" / "diagnostics"
|
|
479
|
+
task_mgr = TaskLifecycleManager(task_dir=task_dir)
|
|
480
|
+
verifier = Verifier(cwd=self._cwd)
|
|
481
|
+
diagnostics = DiagnosticsEngine(diagnostics_dir=diag_dir)
|
|
482
|
+
self._task_manager = task_mgr
|
|
483
|
+
|
|
484
|
+
sid = session.id if session else ""
|
|
485
|
+
|
|
486
|
+
for tool in (
|
|
487
|
+
TaskPlanTool(task_mgr, session_id=sid),
|
|
488
|
+
TaskVerifyTool(task_mgr, verifier, diagnostics),
|
|
489
|
+
TaskCloseTool(task_mgr),
|
|
490
|
+
):
|
|
491
|
+
try:
|
|
492
|
+
self._tool_reg.register(tool)
|
|
493
|
+
except ValueError:
|
|
494
|
+
pass
|
|
495
|
+
except Exception:
|
|
496
|
+
self._task_manager = None
|
|
497
|
+
|
|
498
|
+
# Register computer-use tools (only when enabled)
|
|
499
|
+
if self._config.computer_use.enabled:
|
|
500
|
+
try:
|
|
501
|
+
from llm_code.tools.computer_use_tools import (
|
|
502
|
+
ScreenshotTool, MouseClickTool, KeyboardTypeTool,
|
|
503
|
+
KeyPressTool, ScrollTool, MouseDragTool,
|
|
504
|
+
)
|
|
505
|
+
cu_config = self._config.computer_use
|
|
506
|
+
for tool in (
|
|
507
|
+
ScreenshotTool(cu_config), MouseClickTool(cu_config),
|
|
508
|
+
KeyboardTypeTool(cu_config), KeyPressTool(cu_config),
|
|
509
|
+
ScrollTool(cu_config), MouseDragTool(cu_config),
|
|
510
|
+
):
|
|
511
|
+
try:
|
|
512
|
+
self._tool_reg.register(tool)
|
|
513
|
+
except ValueError:
|
|
514
|
+
pass
|
|
515
|
+
except ImportError:
|
|
516
|
+
pass
|
|
517
|
+
|
|
518
|
+
# Register IDE tools if enabled
|
|
519
|
+
if self._config.ide.enabled:
|
|
520
|
+
try:
|
|
521
|
+
from llm_code.ide.bridge import IDEBridge
|
|
522
|
+
from llm_code.tools.ide_open import IDEOpenTool
|
|
523
|
+
from llm_code.tools.ide_diagnostics import IDEDiagnosticsTool
|
|
524
|
+
from llm_code.tools.ide_selection import IDESelectionTool
|
|
525
|
+
|
|
526
|
+
self._ide_bridge = IDEBridge(self._config.ide)
|
|
527
|
+
for tool in (
|
|
528
|
+
IDEOpenTool(self._ide_bridge),
|
|
529
|
+
IDEDiagnosticsTool(self._ide_bridge),
|
|
530
|
+
IDESelectionTool(self._ide_bridge),
|
|
531
|
+
):
|
|
532
|
+
try:
|
|
533
|
+
self._tool_reg.register(tool)
|
|
534
|
+
except ValueError:
|
|
535
|
+
pass
|
|
536
|
+
except ImportError:
|
|
537
|
+
self._ide_bridge = None
|
|
538
|
+
else:
|
|
539
|
+
self._ide_bridge = None
|
|
540
|
+
|
|
541
|
+
# Register LSP tools if configured
|
|
542
|
+
self._lsp_manager = None
|
|
543
|
+
if self._config.lsp_servers or self._config.lsp_auto_detect:
|
|
544
|
+
try:
|
|
545
|
+
from llm_code.lsp.manager import LspServerManager
|
|
546
|
+
from llm_code.lsp.tools import LspGotoDefinitionTool, LspFindReferencesTool, LspDiagnosticsTool
|
|
547
|
+
self._lsp_manager = LspServerManager()
|
|
548
|
+
for tool in (
|
|
549
|
+
LspGotoDefinitionTool(self._lsp_manager),
|
|
550
|
+
LspFindReferencesTool(self._lsp_manager),
|
|
551
|
+
LspDiagnosticsTool(self._lsp_manager),
|
|
552
|
+
):
|
|
553
|
+
try:
|
|
554
|
+
self._tool_reg.register(tool)
|
|
555
|
+
except ValueError:
|
|
556
|
+
pass
|
|
557
|
+
except ImportError:
|
|
558
|
+
pass
|
|
559
|
+
|
|
560
|
+
# Build project index
|
|
561
|
+
self._project_index = None
|
|
562
|
+
try:
|
|
563
|
+
from llm_code.runtime.indexer import ProjectIndexer
|
|
564
|
+
self._project_index = ProjectIndexer(self._cwd).build_index()
|
|
565
|
+
except Exception:
|
|
566
|
+
pass
|
|
567
|
+
|
|
568
|
+
# Initialize telemetry
|
|
569
|
+
telemetry = None
|
|
570
|
+
if getattr(self._config, "telemetry", None) and self._config.telemetry.enabled:
|
|
571
|
+
try:
|
|
572
|
+
from llm_code.runtime.telemetry import Telemetry, TelemetryConfig
|
|
573
|
+
telemetry = Telemetry(TelemetryConfig(
|
|
574
|
+
enabled=True,
|
|
575
|
+
endpoint=self._config.telemetry.endpoint,
|
|
576
|
+
service_name=self._config.telemetry.service_name,
|
|
577
|
+
))
|
|
578
|
+
except Exception:
|
|
579
|
+
pass
|
|
580
|
+
|
|
581
|
+
# Sandbox detection — inject info into context
|
|
582
|
+
try:
|
|
583
|
+
from llm_code.runtime.sandbox import get_sandbox_info
|
|
584
|
+
sandbox = get_sandbox_info()
|
|
585
|
+
if sandbox["sandboxed"]:
|
|
586
|
+
logger.info("Sandbox detected: %s", sandbox["type"])
|
|
587
|
+
except Exception:
|
|
588
|
+
pass
|
|
589
|
+
|
|
590
|
+
# Create runtime with all subsystem references
|
|
591
|
+
self._runtime = ConversationRuntime(
|
|
592
|
+
provider=provider,
|
|
593
|
+
tool_registry=self._tool_reg,
|
|
594
|
+
permission_policy=permissions,
|
|
595
|
+
hook_runner=hooks,
|
|
596
|
+
prompt_builder=prompt_builder,
|
|
597
|
+
config=self._config,
|
|
598
|
+
session=session,
|
|
599
|
+
context=context,
|
|
600
|
+
checkpoint_manager=checkpoint_mgr,
|
|
601
|
+
token_budget=token_budget,
|
|
602
|
+
recovery_checkpoint=recovery_checkpoint,
|
|
603
|
+
cost_tracker=self._cost_tracker,
|
|
604
|
+
deferred_tool_manager=self._deferred_tool_manager,
|
|
605
|
+
telemetry=telemetry,
|
|
606
|
+
skills=self._skills,
|
|
607
|
+
memory_store=self._memory,
|
|
608
|
+
task_manager=self._task_manager,
|
|
609
|
+
project_index=self._project_index,
|
|
610
|
+
lsp_manager=self._lsp_manager,
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
async def _init_mcp(self) -> None:
|
|
614
|
+
"""Start MCP servers and register their tools (async, called after _init_runtime)."""
|
|
615
|
+
if self._config is None or not self._config.mcp_servers:
|
|
616
|
+
self._mcp_manager = None
|
|
617
|
+
return
|
|
618
|
+
try:
|
|
619
|
+
from llm_code.mcp.manager import McpServerManager
|
|
620
|
+
from llm_code.mcp.types import McpServerConfig
|
|
621
|
+
|
|
622
|
+
manager = McpServerManager()
|
|
623
|
+
configs: dict[str, McpServerConfig] = {}
|
|
624
|
+
for name, raw in self._config.mcp_servers.items():
|
|
625
|
+
if isinstance(raw, dict):
|
|
626
|
+
configs[name] = McpServerConfig(
|
|
627
|
+
command=raw.get("command"),
|
|
628
|
+
args=tuple(raw.get("args", ())),
|
|
629
|
+
env=raw.get("env"),
|
|
630
|
+
transport_type=raw.get("transport_type", "stdio"),
|
|
631
|
+
url=raw.get("url"),
|
|
632
|
+
headers=raw.get("headers"),
|
|
633
|
+
)
|
|
634
|
+
await manager.start_all(configs)
|
|
635
|
+
registered = await manager.register_all_tools(self._tool_reg)
|
|
636
|
+
self._mcp_manager = manager
|
|
637
|
+
if self._runtime is not None:
|
|
638
|
+
self._runtime._mcp_manager = manager
|
|
639
|
+
if registered:
|
|
640
|
+
logger.info("MCP: %d server(s), %d tool(s) registered", len(configs), registered)
|
|
641
|
+
except Exception as exc:
|
|
642
|
+
logger.warning("MCP initialization failed: %s", exc)
|
|
643
|
+
self._mcp_manager = None
|
|
644
|
+
|
|
645
|
+
def _hot_start_mcp(self, name: str, raw_config: dict) -> None:
|
|
646
|
+
"""Start a single MCP server without restart."""
|
|
647
|
+
async def _start():
|
|
648
|
+
try:
|
|
649
|
+
from llm_code.mcp.manager import McpServerManager
|
|
650
|
+
from llm_code.mcp.types import McpServerConfig
|
|
651
|
+
|
|
652
|
+
cfg = McpServerConfig(
|
|
653
|
+
command=raw_config.get("command"),
|
|
654
|
+
args=tuple(raw_config.get("args", ())),
|
|
655
|
+
env=raw_config.get("env"),
|
|
656
|
+
transport_type=raw_config.get("transport_type", "stdio"),
|
|
657
|
+
url=raw_config.get("url"),
|
|
658
|
+
headers=raw_config.get("headers"),
|
|
659
|
+
)
|
|
660
|
+
if self._mcp_manager is None:
|
|
661
|
+
self._mcp_manager = McpServerManager()
|
|
662
|
+
await self._mcp_manager.start_all({name: cfg})
|
|
663
|
+
registered = await self._mcp_manager.register_all_tools(self._tool_reg)
|
|
664
|
+
if self._runtime is not None:
|
|
665
|
+
self._runtime._mcp_manager = self._mcp_manager
|
|
666
|
+
chat = self.query_one(ChatScrollView)
|
|
667
|
+
chat.add_entry(AssistantText(
|
|
668
|
+
f"MCP server '{name}' started ({registered} tools registered)."
|
|
669
|
+
))
|
|
670
|
+
except Exception as exc:
|
|
671
|
+
chat = self.query_one(ChatScrollView)
|
|
672
|
+
chat.add_entry(AssistantText(f"MCP start failed: {exc}"))
|
|
673
|
+
|
|
674
|
+
self.run_worker(_start(), name=f"mcp_start_{name}")
|
|
675
|
+
|
|
676
|
+
def _paste_clipboard_image(self) -> None:
|
|
677
|
+
"""Try to capture an image from the system clipboard."""
|
|
678
|
+
chat = self.query_one(ChatScrollView)
|
|
679
|
+
input_bar = self.query_one(InputBar)
|
|
680
|
+
try:
|
|
681
|
+
from llm_code.cli.image import capture_clipboard_image
|
|
682
|
+
img = capture_clipboard_image()
|
|
683
|
+
if img is not None:
|
|
684
|
+
self._pending_images.append(img)
|
|
685
|
+
input_bar.insert_image_marker()
|
|
686
|
+
else:
|
|
687
|
+
chat.add_entry(AssistantText("No image found in clipboard."))
|
|
688
|
+
except (ImportError, FileNotFoundError, OSError):
|
|
689
|
+
chat.add_entry(AssistantText("Clipboard not available (install pngpaste: brew install pngpaste)."))
|
|
690
|
+
except Exception as exc:
|
|
691
|
+
chat.add_entry(AssistantText(f"Clipboard error: {exc}"))
|
|
692
|
+
|
|
693
|
+
def on_paste(self, event) -> None:
|
|
694
|
+
"""Handle terminal paste events — check clipboard for images.
|
|
695
|
+
|
|
696
|
+
When user presses Cmd+V (macOS) or Ctrl+V (Linux), the terminal
|
|
697
|
+
pastes text via bracket paste mode. We also check the system
|
|
698
|
+
clipboard for an image — if found, attach it silently.
|
|
699
|
+
"""
|
|
700
|
+
self._paste_clipboard_image()
|
|
701
|
+
|
|
702
|
+
def on_screen_resume(self) -> None:
|
|
703
|
+
"""Return focus to InputBar after any modal screen closes."""
|
|
704
|
+
self.query_one(InputBar).focus()
|
|
705
|
+
|
|
706
|
+
def _on_idle(self) -> None:
|
|
707
|
+
"""Ensure InputBar stays focused during normal operation."""
|
|
708
|
+
try:
|
|
709
|
+
input_bar = self.query_one(InputBar)
|
|
710
|
+
# Only refocus on the default screen (not during modals)
|
|
711
|
+
if self.screen is self.screen_stack[0] and self.focused is not input_bar:
|
|
712
|
+
if not input_bar.disabled:
|
|
713
|
+
input_bar.focus()
|
|
714
|
+
except Exception:
|
|
715
|
+
pass
|
|
716
|
+
|
|
717
|
+
def on_input_bar_submitted(self, event: InputBar.Submitted) -> None:
|
|
718
|
+
"""Handle user input submission."""
|
|
719
|
+
input_bar = self.query_one(InputBar)
|
|
720
|
+
# Strip image markers from submitted value
|
|
721
|
+
marker = InputBar._IMAGE_MARKER
|
|
722
|
+
clean_text = event.value.replace(marker, "").strip()
|
|
723
|
+
if not clean_text and not self._pending_images:
|
|
724
|
+
return
|
|
725
|
+
|
|
726
|
+
chat = self.query_one(ChatScrollView)
|
|
727
|
+
chat.resume_auto_scroll()
|
|
728
|
+
|
|
729
|
+
# Show user message with inline image markers rendered
|
|
730
|
+
if self._pending_images:
|
|
731
|
+
n = len(self._pending_images)
|
|
732
|
+
label = f"{n} image{'s' if n > 1 else ''}"
|
|
733
|
+
display = f"[{label}] {clean_text}" if clean_text else f"[{label}]"
|
|
734
|
+
chat.add_entry(UserMessage(display))
|
|
735
|
+
else:
|
|
736
|
+
chat.add_entry(UserMessage(clean_text))
|
|
737
|
+
text = clean_text
|
|
738
|
+
|
|
739
|
+
if text.startswith("/"):
|
|
740
|
+
self._handle_slash_command(text)
|
|
741
|
+
else:
|
|
742
|
+
# Pass pending images to runtime and reset
|
|
743
|
+
images = list(self._pending_images)
|
|
744
|
+
self._pending_images.clear()
|
|
745
|
+
input_bar.pending_image_count = 0
|
|
746
|
+
self.run_worker(self._run_turn(text, images=images), name="run_turn")
|
|
747
|
+
|
|
748
|
+
def on_input_bar_cancelled(self, event: InputBar.Cancelled) -> None:
|
|
749
|
+
"""Handle Escape — cancel running generation."""
|
|
750
|
+
pass # Phase 2: cancel runtime
|
|
751
|
+
|
|
752
|
+
def on_key(self, event: "events.Key") -> None:
|
|
753
|
+
"""Handle single-key permission responses (y/n/a), image paste, and scroll."""
|
|
754
|
+
# Ctrl+D — quit (with dream consolidation)
|
|
755
|
+
if event.key == "ctrl+d":
|
|
756
|
+
import asyncio
|
|
757
|
+
asyncio.ensure_future(self._graceful_exit())
|
|
758
|
+
return
|
|
759
|
+
|
|
760
|
+
# Ctrl+V / Ctrl+I — paste image from clipboard
|
|
761
|
+
if event.key in ("ctrl+v", "ctrl+i"):
|
|
762
|
+
self._paste_clipboard_image()
|
|
763
|
+
event.prevent_default()
|
|
764
|
+
event.stop()
|
|
765
|
+
return
|
|
766
|
+
|
|
767
|
+
# Page Up / Page Down for chat scrolling
|
|
768
|
+
if event.key == "pageup":
|
|
769
|
+
chat = self.query_one(ChatScrollView)
|
|
770
|
+
chat.scroll_up(animate=False)
|
|
771
|
+
chat.pause_auto_scroll()
|
|
772
|
+
event.prevent_default()
|
|
773
|
+
return
|
|
774
|
+
if event.key == "pagedown":
|
|
775
|
+
chat = self.query_one(ChatScrollView)
|
|
776
|
+
chat.scroll_down(animate=False)
|
|
777
|
+
chat.resume_auto_scroll()
|
|
778
|
+
event.prevent_default()
|
|
779
|
+
return
|
|
780
|
+
|
|
781
|
+
# Permission handling (y/n/a)
|
|
782
|
+
if not self._permission_pending or self._runtime is None:
|
|
783
|
+
return
|
|
784
|
+
response_map = {"y": "allow", "n": "deny", "a": "always"}
|
|
785
|
+
response = response_map.get(event.key)
|
|
786
|
+
if response is not None:
|
|
787
|
+
self._runtime.send_permission_response(response)
|
|
788
|
+
event.prevent_default()
|
|
789
|
+
event.stop()
|
|
790
|
+
|
|
791
|
+
async def _run_turn(self, user_input: str, images: list | None = None) -> None:
|
|
792
|
+
"""Run a conversation turn with full streaming event handling."""
|
|
793
|
+
import asyncio
|
|
794
|
+
import time
|
|
795
|
+
from llm_code.api.types import (
|
|
796
|
+
StreamPermissionRequest, StreamTextDelta, StreamThinkingDelta,
|
|
797
|
+
StreamToolExecStart, StreamToolExecResult, StreamToolProgress,
|
|
798
|
+
StreamMessageStop,
|
|
799
|
+
)
|
|
800
|
+
from llm_code.tui.chat_widgets import (
|
|
801
|
+
PermissionInline, SpinnerLine, ThinkingBlock, ToolBlock, TurnSummary,
|
|
802
|
+
)
|
|
803
|
+
|
|
804
|
+
if self._runtime is None:
|
|
805
|
+
chat = self.query_one(ChatScrollView)
|
|
806
|
+
chat.add_entry(AssistantText("Error: runtime not initialized. Check configuration."))
|
|
807
|
+
return
|
|
808
|
+
|
|
809
|
+
chat = self.query_one(ChatScrollView)
|
|
810
|
+
input_bar = self.query_one(InputBar)
|
|
811
|
+
status = self.query_one(StatusBar)
|
|
812
|
+
|
|
813
|
+
input_bar.disabled = True
|
|
814
|
+
status.is_streaming = True
|
|
815
|
+
|
|
816
|
+
# Reset per-turn counters
|
|
817
|
+
turn_input_tokens = 0
|
|
818
|
+
turn_output_tokens = 0
|
|
819
|
+
|
|
820
|
+
spinner = SpinnerLine()
|
|
821
|
+
spinner.phase = "waiting"
|
|
822
|
+
chat.add_entry(spinner)
|
|
823
|
+
start = time.monotonic()
|
|
824
|
+
|
|
825
|
+
async def update_spinner():
|
|
826
|
+
while input_bar.disabled:
|
|
827
|
+
await asyncio.sleep(0.1)
|
|
828
|
+
spinner.elapsed = time.monotonic() - start
|
|
829
|
+
spinner.advance_frame()
|
|
830
|
+
|
|
831
|
+
timer_task = asyncio.create_task(update_spinner())
|
|
832
|
+
|
|
833
|
+
assistant = AssistantText()
|
|
834
|
+
assistant_added = False
|
|
835
|
+
thinking_buffer = ""
|
|
836
|
+
thinking_start = time.monotonic()
|
|
837
|
+
# Client-side tag parsing for models (like Qwen) that emit
|
|
838
|
+
# <think> and <tool_call> as raw StreamTextDelta
|
|
839
|
+
_in_think_tag = False
|
|
840
|
+
_think_close_tag = "</think>"
|
|
841
|
+
_in_tool_call_tag = False
|
|
842
|
+
_raw_text_buffer = ""
|
|
843
|
+
|
|
844
|
+
async def remove_spinner() -> None:
|
|
845
|
+
"""Remove spinner if it is currently mounted."""
|
|
846
|
+
if spinner.is_mounted:
|
|
847
|
+
await spinner.remove()
|
|
848
|
+
|
|
849
|
+
perm_widget = None
|
|
850
|
+
|
|
851
|
+
# Sync plan mode flag to runtime before each turn
|
|
852
|
+
self._runtime.plan_mode = self._plan_mode
|
|
853
|
+
|
|
854
|
+
try:
|
|
855
|
+
async for event in self._runtime.run_turn(user_input, images=images):
|
|
856
|
+
# Clean up permission widget from previous iteration
|
|
857
|
+
if self._permission_pending and not isinstance(event, StreamPermissionRequest):
|
|
858
|
+
self._permission_pending = False
|
|
859
|
+
if perm_widget is not None and perm_widget.is_mounted:
|
|
860
|
+
await perm_widget.remove()
|
|
861
|
+
perm_widget = None
|
|
862
|
+
# Re-add spinner while tool executes
|
|
863
|
+
spinner.phase = "running"
|
|
864
|
+
chat.add_entry(spinner)
|
|
865
|
+
|
|
866
|
+
if isinstance(event, StreamTextDelta):
|
|
867
|
+
# Client-side parsing: buffer text and strip think/tool_call tags
|
|
868
|
+
_raw_text_buffer += event.text
|
|
869
|
+
|
|
870
|
+
# Handle <think> / <thinking> tags — route to thinking buffer
|
|
871
|
+
for open_tag, close_tag in [("<think>", "</think>"), ("<thinking>", "</thinking>")]:
|
|
872
|
+
while open_tag in _raw_text_buffer and not _in_think_tag:
|
|
873
|
+
before, _, _raw_text_buffer = _raw_text_buffer.partition(open_tag)
|
|
874
|
+
if before.strip():
|
|
875
|
+
if not assistant_added:
|
|
876
|
+
await remove_spinner()
|
|
877
|
+
chat.add_entry(assistant)
|
|
878
|
+
assistant_added = True
|
|
879
|
+
assistant.append_text(before)
|
|
880
|
+
_in_think_tag = True
|
|
881
|
+
_think_close_tag = close_tag
|
|
882
|
+
spinner.phase = "thinking"
|
|
883
|
+
|
|
884
|
+
if _in_think_tag:
|
|
885
|
+
if _think_close_tag in _raw_text_buffer:
|
|
886
|
+
think_content, _, _raw_text_buffer = _raw_text_buffer.partition(_think_close_tag)
|
|
887
|
+
thinking_buffer += think_content
|
|
888
|
+
_in_think_tag = False
|
|
889
|
+
if thinking_buffer.strip():
|
|
890
|
+
elapsed_t = time.monotonic() - thinking_start
|
|
891
|
+
tokens_t = len(thinking_buffer) // 4
|
|
892
|
+
chat.add_entry(ThinkingBlock(thinking_buffer, elapsed_t, tokens_t))
|
|
893
|
+
thinking_buffer = ""
|
|
894
|
+
else:
|
|
895
|
+
thinking_buffer += _raw_text_buffer
|
|
896
|
+
_raw_text_buffer = ""
|
|
897
|
+
continue
|
|
898
|
+
|
|
899
|
+
# Handle <tool_call> tags — suppress (runtime handles tool execution)
|
|
900
|
+
while "<tool_call>" in _raw_text_buffer and not _in_tool_call_tag:
|
|
901
|
+
before, _, _raw_text_buffer = _raw_text_buffer.partition("<tool_call>")
|
|
902
|
+
if before.strip():
|
|
903
|
+
if not assistant_added:
|
|
904
|
+
await remove_spinner()
|
|
905
|
+
chat.add_entry(assistant)
|
|
906
|
+
assistant_added = True
|
|
907
|
+
assistant.append_text(before)
|
|
908
|
+
_in_tool_call_tag = True
|
|
909
|
+
|
|
910
|
+
if _in_tool_call_tag:
|
|
911
|
+
if "</tool_call>" in _raw_text_buffer:
|
|
912
|
+
_, _, _raw_text_buffer = _raw_text_buffer.partition("</tool_call>")
|
|
913
|
+
_in_tool_call_tag = False
|
|
914
|
+
else:
|
|
915
|
+
_raw_text_buffer = ""
|
|
916
|
+
continue
|
|
917
|
+
|
|
918
|
+
# Normal text — output to assistant
|
|
919
|
+
if _raw_text_buffer:
|
|
920
|
+
if not assistant_added:
|
|
921
|
+
await remove_spinner()
|
|
922
|
+
chat.add_entry(assistant)
|
|
923
|
+
assistant_added = True
|
|
924
|
+
assistant.append_text(_raw_text_buffer)
|
|
925
|
+
_raw_text_buffer = ""
|
|
926
|
+
chat.resume_auto_scroll()
|
|
927
|
+
|
|
928
|
+
elif isinstance(event, StreamThinkingDelta):
|
|
929
|
+
spinner.phase = "thinking"
|
|
930
|
+
thinking_buffer += event.text
|
|
931
|
+
|
|
932
|
+
elif isinstance(event, StreamToolExecStart):
|
|
933
|
+
await remove_spinner()
|
|
934
|
+
tool_widget = ToolBlock.create(
|
|
935
|
+
event.tool_name, event.args_summary, "", is_error=False,
|
|
936
|
+
)
|
|
937
|
+
chat.add_entry(tool_widget)
|
|
938
|
+
spinner.phase = "running"
|
|
939
|
+
spinner._tool_name = event.tool_name
|
|
940
|
+
chat.add_entry(spinner)
|
|
941
|
+
|
|
942
|
+
elif isinstance(event, StreamToolExecResult):
|
|
943
|
+
await remove_spinner()
|
|
944
|
+
tool_widget = ToolBlock.create(
|
|
945
|
+
event.tool_name, "", event.output[:200], event.is_error,
|
|
946
|
+
)
|
|
947
|
+
chat.add_entry(tool_widget)
|
|
948
|
+
spinner.phase = "processing"
|
|
949
|
+
thinking_start = time.monotonic()
|
|
950
|
+
chat.add_entry(spinner)
|
|
951
|
+
|
|
952
|
+
elif isinstance(event, StreamToolProgress):
|
|
953
|
+
spinner.phase = "running"
|
|
954
|
+
spinner._tool_name = event.tool_name
|
|
955
|
+
|
|
956
|
+
elif isinstance(event, StreamPermissionRequest):
|
|
957
|
+
await remove_spinner()
|
|
958
|
+
perm_widget = PermissionInline(
|
|
959
|
+
event.tool_name, event.args_preview,
|
|
960
|
+
)
|
|
961
|
+
chat.add_entry(perm_widget)
|
|
962
|
+
self._permission_pending = True
|
|
963
|
+
# No explicit wait — the runtime generator is suspended
|
|
964
|
+
# on its own asyncio.Future. The async for loop blocks on
|
|
965
|
+
# __anext__ until y/n/a resolves the Future via on_key →
|
|
966
|
+
# send_permission_response. Cleanup at top of loop.
|
|
967
|
+
|
|
968
|
+
elif isinstance(event, StreamMessageStop):
|
|
969
|
+
if event.usage:
|
|
970
|
+
turn_input_tokens += event.usage.input_tokens
|
|
971
|
+
turn_output_tokens += event.usage.output_tokens
|
|
972
|
+
self._input_tokens += event.usage.input_tokens
|
|
973
|
+
self._output_tokens += event.usage.output_tokens
|
|
974
|
+
if self._cost_tracker:
|
|
975
|
+
self._cost_tracker.add_usage(
|
|
976
|
+
event.usage.input_tokens, event.usage.output_tokens,
|
|
977
|
+
)
|
|
978
|
+
# Real-time status bar update
|
|
979
|
+
status.tokens = self._output_tokens
|
|
980
|
+
if self._cost_tracker:
|
|
981
|
+
cost_usd = self._cost_tracker.total_cost_usd
|
|
982
|
+
status.cost = f"${cost_usd:.4f}" if cost_usd > 0.0001 else ""
|
|
983
|
+
|
|
984
|
+
except Exception as exc:
|
|
985
|
+
chat.add_entry(AssistantText(f"Error: {exc}"))
|
|
986
|
+
finally:
|
|
987
|
+
timer_task.cancel()
|
|
988
|
+
self._permission_pending = False
|
|
989
|
+
try:
|
|
990
|
+
await remove_spinner()
|
|
991
|
+
except Exception:
|
|
992
|
+
pass
|
|
993
|
+
if perm_widget is not None and perm_widget.is_mounted:
|
|
994
|
+
try:
|
|
995
|
+
await perm_widget.remove()
|
|
996
|
+
except Exception:
|
|
997
|
+
pass
|
|
998
|
+
input_bar.disabled = False
|
|
999
|
+
status.is_streaming = False
|
|
1000
|
+
|
|
1001
|
+
if thinking_buffer:
|
|
1002
|
+
elapsed = time.monotonic() - thinking_start
|
|
1003
|
+
tokens = len(thinking_buffer) // 4
|
|
1004
|
+
chat.add_entry(ThinkingBlock(thinking_buffer, elapsed, tokens))
|
|
1005
|
+
|
|
1006
|
+
elapsed = time.monotonic() - start
|
|
1007
|
+
cost = self._cost_tracker.format_cost() if self._cost_tracker else ""
|
|
1008
|
+
chat.add_entry(TurnSummary.create(elapsed, turn_input_tokens, turn_output_tokens, cost))
|
|
1009
|
+
|
|
1010
|
+
status.tokens = self._output_tokens # session total in status bar
|
|
1011
|
+
status.cost = cost
|
|
1012
|
+
chat.resume_auto_scroll()
|
|
1013
|
+
|
|
1014
|
+
def _handle_slash_command(self, text: str) -> None:
|
|
1015
|
+
"""Handle slash commands — dispatches to _cmd_* methods."""
|
|
1016
|
+
from llm_code.cli.commands import parse_slash_command
|
|
1017
|
+
|
|
1018
|
+
cmd = parse_slash_command(text)
|
|
1019
|
+
if cmd is None:
|
|
1020
|
+
return
|
|
1021
|
+
|
|
1022
|
+
name = cmd.name
|
|
1023
|
+
args = cmd.args.strip()
|
|
1024
|
+
|
|
1025
|
+
handler = getattr(self, f"_cmd_{name}", None)
|
|
1026
|
+
if handler is not None:
|
|
1027
|
+
handler(args)
|
|
1028
|
+
else:
|
|
1029
|
+
chat = self.query_one(ChatScrollView)
|
|
1030
|
+
chat.add_entry(AssistantText(f"Unknown command: /{name} — type /help for help"))
|
|
1031
|
+
|
|
1032
|
+
def _cmd_exit(self, args: str) -> None:
|
|
1033
|
+
import asyncio
|
|
1034
|
+
asyncio.ensure_future(self._graceful_exit())
|
|
1035
|
+
|
|
1036
|
+
_cmd_quit = _cmd_exit
|
|
1037
|
+
|
|
1038
|
+
async def _graceful_exit(self) -> None:
|
|
1039
|
+
"""Dream consolidation + session save before exit."""
|
|
1040
|
+
await self._dream_on_exit()
|
|
1041
|
+
self.exit()
|
|
1042
|
+
|
|
1043
|
+
async def _dream_on_exit(self) -> None:
|
|
1044
|
+
"""Fire DreamTask consolidation + knowledge compilation on session exit."""
|
|
1045
|
+
import asyncio as _aio
|
|
1046
|
+
if not self._memory or not self._runtime:
|
|
1047
|
+
return
|
|
1048
|
+
|
|
1049
|
+
dream_summary = ""
|
|
1050
|
+
try:
|
|
1051
|
+
from llm_code.runtime.dream import DreamTask
|
|
1052
|
+
dream = DreamTask()
|
|
1053
|
+
dream_summary = await _aio.wait_for(
|
|
1054
|
+
dream.consolidate(
|
|
1055
|
+
self._runtime.session,
|
|
1056
|
+
self._memory,
|
|
1057
|
+
self._runtime._provider,
|
|
1058
|
+
self._config,
|
|
1059
|
+
),
|
|
1060
|
+
timeout=30.0,
|
|
1061
|
+
)
|
|
1062
|
+
except Exception:
|
|
1063
|
+
pass
|
|
1064
|
+
|
|
1065
|
+
# Knowledge compilation (after DreamTask, best-effort)
|
|
1066
|
+
if getattr(self._config, "knowledge", None) and self._config.knowledge.compile_on_exit:
|
|
1067
|
+
try:
|
|
1068
|
+
from llm_code.runtime.knowledge_compiler import KnowledgeCompiler
|
|
1069
|
+
compile_model = self._config.knowledge.compile_model or getattr(
|
|
1070
|
+
self._config.model_routing, "compaction", ""
|
|
1071
|
+
)
|
|
1072
|
+
compiler = KnowledgeCompiler(
|
|
1073
|
+
cwd=self._cwd,
|
|
1074
|
+
llm_provider=self._runtime._provider,
|
|
1075
|
+
compile_model=compile_model,
|
|
1076
|
+
)
|
|
1077
|
+
facts = []
|
|
1078
|
+
if dream_summary:
|
|
1079
|
+
for line in dream_summary.splitlines():
|
|
1080
|
+
stripped = line.strip()
|
|
1081
|
+
if stripped.startswith("- ") and not stripped.startswith("- ["):
|
|
1082
|
+
facts.append(stripped[2:])
|
|
1083
|
+
ingest_data = compiler.ingest(facts=facts, since_commit=None)
|
|
1084
|
+
await _aio.wait_for(compiler.compile(ingest_data), timeout=30.0)
|
|
1085
|
+
except Exception:
|
|
1086
|
+
pass
|
|
1087
|
+
|
|
1088
|
+
def _cmd_help(self, args: str) -> None:
|
|
1089
|
+
from textual.screen import ModalScreen
|
|
1090
|
+
from textual.containers import VerticalScroll
|
|
1091
|
+
from textual.widgets import Static
|
|
1092
|
+
from textual.reactive import reactive
|
|
1093
|
+
from rich.text import Text as RichText
|
|
1094
|
+
|
|
1095
|
+
skills = self._skills
|
|
1096
|
+
app_ref = self
|
|
1097
|
+
|
|
1098
|
+
_COMMANDS = [
|
|
1099
|
+
("/help", "Show this help"),
|
|
1100
|
+
("/clear", "Clear conversation"),
|
|
1101
|
+
("/model", "Switch model"),
|
|
1102
|
+
("/cost", "Token usage & costs"),
|
|
1103
|
+
("/budget", "Set token budget"),
|
|
1104
|
+
("/undo", "Undo last file change"),
|
|
1105
|
+
("/cd", "Change directory"),
|
|
1106
|
+
("/config", "Show runtime config"),
|
|
1107
|
+
("/thinking", "Toggle extended thinking"),
|
|
1108
|
+
("/vim", "Toggle vim mode"),
|
|
1109
|
+
("/plan", "Toggle plan/act mode (read-only when ON)"),
|
|
1110
|
+
("/harness", "Show/configure harness quality controls"),
|
|
1111
|
+
("/knowledge", "View or rebuild project knowledge base"),
|
|
1112
|
+
("/dump", "Dump codebase to .llm-code/dump.txt for external LLM use"),
|
|
1113
|
+
("/analyze", "Run code analysis rules on the codebase"),
|
|
1114
|
+
("/diff_check", "Show new/fixed violations vs last analysis"),
|
|
1115
|
+
("/image", "Attach image"),
|
|
1116
|
+
("/search", "Search conversation"),
|
|
1117
|
+
("/index", "Project index"),
|
|
1118
|
+
("/session", "Session management"),
|
|
1119
|
+
("/skill", "Browse & manage skills"),
|
|
1120
|
+
("/plugin", "Browse & manage plugins"),
|
|
1121
|
+
("/mcp", "Browse & manage MCP servers"),
|
|
1122
|
+
("/memory", "Project memory"),
|
|
1123
|
+
("/cron", "Scheduled tasks"),
|
|
1124
|
+
("/checkpoint", "Session checkpoints"),
|
|
1125
|
+
("/vcr", "VCR recording"),
|
|
1126
|
+
("/cancel", "Cancel generation"),
|
|
1127
|
+
("/exit", "Quit"),
|
|
1128
|
+
]
|
|
1129
|
+
|
|
1130
|
+
_custom_cmds: list[tuple[str, str]] = []
|
|
1131
|
+
if skills:
|
|
1132
|
+
for s in sorted(
|
|
1133
|
+
list(skills.auto_skills) + list(skills.command_skills),
|
|
1134
|
+
key=lambda x: x.name,
|
|
1135
|
+
):
|
|
1136
|
+
trigger = f"/{s.trigger}" if s.trigger else f"(auto: {s.name})"
|
|
1137
|
+
desc = s.description if hasattr(s, "description") and s.description else s.name
|
|
1138
|
+
source = "user" if not getattr(s, "plugin", None) else f"({s.plugin})"
|
|
1139
|
+
_custom_cmds.append((trigger, f"{desc} {source}"))
|
|
1140
|
+
|
|
1141
|
+
class HelpScreen(ModalScreen):
|
|
1142
|
+
DEFAULT_CSS = """
|
|
1143
|
+
HelpScreen { align: center middle; }
|
|
1144
|
+
#help-box {
|
|
1145
|
+
width: 90%;
|
|
1146
|
+
height: 85%;
|
|
1147
|
+
background: $surface;
|
|
1148
|
+
border: round $accent;
|
|
1149
|
+
padding: 1 2;
|
|
1150
|
+
}
|
|
1151
|
+
#help-content { height: 1fr; }
|
|
1152
|
+
#help-footer {
|
|
1153
|
+
dock: bottom;
|
|
1154
|
+
height: 1;
|
|
1155
|
+
color: $text-muted;
|
|
1156
|
+
text-align: center;
|
|
1157
|
+
}
|
|
1158
|
+
"""
|
|
1159
|
+
|
|
1160
|
+
def __init__(self) -> None:
|
|
1161
|
+
super().__init__()
|
|
1162
|
+
self._tab = 0
|
|
1163
|
+
self._cursor = 0
|
|
1164
|
+
self._tab_names = ["general", "commands", "custom-commands"]
|
|
1165
|
+
|
|
1166
|
+
def compose(self):
|
|
1167
|
+
with VerticalScroll(id="help-box"):
|
|
1168
|
+
yield Static("Loading...", id="help-content")
|
|
1169
|
+
yield Static("← → tabs · ↑↓ navigate · Enter execute · Esc close", id="help-footer")
|
|
1170
|
+
|
|
1171
|
+
def on_mount(self):
|
|
1172
|
+
self._refresh_content()
|
|
1173
|
+
|
|
1174
|
+
def on_key(self, event) -> None:
|
|
1175
|
+
key = event.key
|
|
1176
|
+
if key == "escape":
|
|
1177
|
+
self.dismiss()
|
|
1178
|
+
elif key == "left":
|
|
1179
|
+
self._tab = max(0, self._tab - 1)
|
|
1180
|
+
self._cursor = 0
|
|
1181
|
+
self._refresh_content()
|
|
1182
|
+
elif key == "right":
|
|
1183
|
+
self._tab = min(2, self._tab + 1)
|
|
1184
|
+
self._cursor = 0
|
|
1185
|
+
self._refresh_content()
|
|
1186
|
+
elif key == "up" and self._tab > 0:
|
|
1187
|
+
self._cursor = max(0, self._cursor - 1)
|
|
1188
|
+
self._refresh_content()
|
|
1189
|
+
elif key == "down" and self._tab > 0:
|
|
1190
|
+
items = _COMMANDS if self._tab == 1 else _custom_cmds
|
|
1191
|
+
self._cursor = min(len(items) - 1, self._cursor + 1)
|
|
1192
|
+
self._refresh_content()
|
|
1193
|
+
elif key == "enter" and self._tab > 0:
|
|
1194
|
+
items = _COMMANDS if self._tab == 1 else _custom_cmds
|
|
1195
|
+
if 0 <= self._cursor < len(items):
|
|
1196
|
+
cmd = items[self._cursor][0]
|
|
1197
|
+
self.dismiss()
|
|
1198
|
+
# Execute the command after dismiss
|
|
1199
|
+
app_ref.query_one(InputBar).value = ""
|
|
1200
|
+
app_ref._handle_slash_command(cmd)
|
|
1201
|
+
event.prevent_default()
|
|
1202
|
+
event.stop()
|
|
1203
|
+
|
|
1204
|
+
def _render_header(self) -> RichText:
|
|
1205
|
+
text = RichText()
|
|
1206
|
+
text.append("llm-code", style="bold cyan")
|
|
1207
|
+
text.append(" ", style="dim")
|
|
1208
|
+
for i, name in enumerate(self._tab_names):
|
|
1209
|
+
if i == self._tab:
|
|
1210
|
+
text.append(f" {name} ", style="bold white on #3a3a5a")
|
|
1211
|
+
else:
|
|
1212
|
+
text.append(f" {name} ", style="dim")
|
|
1213
|
+
text.append("\n\n")
|
|
1214
|
+
return text
|
|
1215
|
+
|
|
1216
|
+
def _refresh_content(self) -> None:
|
|
1217
|
+
content = self.query_one("#help-content", Static)
|
|
1218
|
+
from rich.console import Console
|
|
1219
|
+
from io import StringIO
|
|
1220
|
+
if self._tab == 0:
|
|
1221
|
+
rt = self._build_general()
|
|
1222
|
+
elif self._tab == 1:
|
|
1223
|
+
rt = self._build_list("Browse default commands:", _COMMANDS)
|
|
1224
|
+
else:
|
|
1225
|
+
rt = self._build_list("Browse custom commands:", _custom_cmds)
|
|
1226
|
+
# Render Rich Text to ANSI string for Static.update()
|
|
1227
|
+
buf = StringIO()
|
|
1228
|
+
console = Console(file=buf, force_terminal=True, width=120)
|
|
1229
|
+
console.print(rt, end="")
|
|
1230
|
+
content.update(buf.getvalue())
|
|
1231
|
+
|
|
1232
|
+
def _build_general(self) -> RichText:
|
|
1233
|
+
text = self._render_header()
|
|
1234
|
+
text.append(
|
|
1235
|
+
"llm-code understands your codebase, makes edits with your "
|
|
1236
|
+
"permission, and executes commands — right from your terminal.\n\n",
|
|
1237
|
+
style="white",
|
|
1238
|
+
)
|
|
1239
|
+
text.append("Shortcuts\n", style="bold white")
|
|
1240
|
+
shortcuts = [
|
|
1241
|
+
("! for bash mode", "double tap esc to clear", "Ctrl+D to quit"),
|
|
1242
|
+
("/ for commands", "Shift+Enter for multiline", "Ctrl+I to paste images"),
|
|
1243
|
+
("/skill browse skills", "Page Up/Down to scroll", "/vim toggle vim"),
|
|
1244
|
+
("/plugin browse plugins", "Tab to autocomplete", "/model switch model"),
|
|
1245
|
+
("/mcp MCP servers", "Ctrl+O verbose output", "/undo revert changes"),
|
|
1246
|
+
]
|
|
1247
|
+
for row in shortcuts:
|
|
1248
|
+
for i, col in enumerate(row):
|
|
1249
|
+
text.append(f"{col:<32s}", style="white" if i == 0 else "dim")
|
|
1250
|
+
text.append("\n")
|
|
1251
|
+
return text
|
|
1252
|
+
|
|
1253
|
+
def _build_list(self, title: str, items: list[tuple[str, str]]) -> RichText:
|
|
1254
|
+
text = self._render_header()
|
|
1255
|
+
text.append(f"{title}\n\n", style="white")
|
|
1256
|
+
if not items:
|
|
1257
|
+
text.append(" No items available.\n", style="dim")
|
|
1258
|
+
text.append(" Use /skill to browse and install.\n", style="dim")
|
|
1259
|
+
return text
|
|
1260
|
+
for i, (cmd, desc) in enumerate(items):
|
|
1261
|
+
if i == self._cursor:
|
|
1262
|
+
text.append(" > ", style="bold cyan")
|
|
1263
|
+
text.append(f"{cmd}\n", style="bold white")
|
|
1264
|
+
else:
|
|
1265
|
+
text.append(f" {cmd}\n", style="bold white")
|
|
1266
|
+
text.append(f" {desc}\n", style="dim")
|
|
1267
|
+
return text
|
|
1268
|
+
|
|
1269
|
+
self.push_screen(HelpScreen())
|
|
1270
|
+
|
|
1271
|
+
def _cmd_clear(self, args: str) -> None:
|
|
1272
|
+
self.query_one(ChatScrollView).remove_children()
|
|
1273
|
+
|
|
1274
|
+
def _cmd_model(self, args: str) -> None:
|
|
1275
|
+
chat = self.query_one(ChatScrollView)
|
|
1276
|
+
if args:
|
|
1277
|
+
import dataclasses
|
|
1278
|
+
self._config = dataclasses.replace(self._config, model=args)
|
|
1279
|
+
self._init_runtime()
|
|
1280
|
+
self.query_one(HeaderBar).model = args
|
|
1281
|
+
chat.add_entry(AssistantText(f"Model switched to: {args}"))
|
|
1282
|
+
else:
|
|
1283
|
+
model = self._config.model if self._config else "(not set)"
|
|
1284
|
+
chat.add_entry(AssistantText(f"Current model: {model}"))
|
|
1285
|
+
|
|
1286
|
+
def _cmd_cost(self, args: str) -> None:
|
|
1287
|
+
cost = self._cost_tracker.format_cost() if self._cost_tracker else "No cost data"
|
|
1288
|
+
self.query_one(ChatScrollView).add_entry(AssistantText(cost))
|
|
1289
|
+
|
|
1290
|
+
def _cmd_cd(self, args: str) -> None:
|
|
1291
|
+
chat = self.query_one(ChatScrollView)
|
|
1292
|
+
if args:
|
|
1293
|
+
new_path = Path(args).expanduser()
|
|
1294
|
+
if not new_path.is_absolute():
|
|
1295
|
+
new_path = self._cwd / new_path
|
|
1296
|
+
new_path = new_path.resolve()
|
|
1297
|
+
if new_path.is_dir():
|
|
1298
|
+
self._cwd = new_path
|
|
1299
|
+
os.chdir(new_path)
|
|
1300
|
+
chat.add_entry(AssistantText(f"Working directory: {new_path}"))
|
|
1301
|
+
else:
|
|
1302
|
+
chat.add_entry(AssistantText(f"Directory not found: {new_path}"))
|
|
1303
|
+
else:
|
|
1304
|
+
chat.add_entry(AssistantText(f"Current directory: {self._cwd}"))
|
|
1305
|
+
|
|
1306
|
+
def _cmd_budget(self, args: str) -> None:
|
|
1307
|
+
chat = self.query_one(ChatScrollView)
|
|
1308
|
+
if args:
|
|
1309
|
+
try:
|
|
1310
|
+
self._budget = int(args)
|
|
1311
|
+
chat.add_entry(AssistantText(f"Token budget set: {self._budget:,}"))
|
|
1312
|
+
except ValueError:
|
|
1313
|
+
chat.add_entry(AssistantText("Usage: /budget <number>"))
|
|
1314
|
+
elif self._budget is not None:
|
|
1315
|
+
chat.add_entry(AssistantText(f"Current token budget: {self._budget:,}"))
|
|
1316
|
+
else:
|
|
1317
|
+
chat.add_entry(AssistantText("No budget set."))
|
|
1318
|
+
|
|
1319
|
+
def _cmd_undo(self, args: str) -> None:
|
|
1320
|
+
chat = self.query_one(ChatScrollView)
|
|
1321
|
+
if not self._checkpoint_mgr:
|
|
1322
|
+
chat.add_entry(AssistantText("Not in a git repository — undo not available."))
|
|
1323
|
+
return
|
|
1324
|
+
if args.strip() == "list":
|
|
1325
|
+
cps = self._checkpoint_mgr.list_checkpoints()
|
|
1326
|
+
if cps:
|
|
1327
|
+
lines = [f" {cp.id} {cp.tool_name} {cp.timestamp[:19]}" for cp in cps]
|
|
1328
|
+
chat.add_entry(AssistantText("\n".join(lines)))
|
|
1329
|
+
else:
|
|
1330
|
+
chat.add_entry(AssistantText("No checkpoints."))
|
|
1331
|
+
elif self._checkpoint_mgr.can_undo():
|
|
1332
|
+
cp = self._checkpoint_mgr.undo()
|
|
1333
|
+
if cp:
|
|
1334
|
+
chat.add_entry(AssistantText(f"Undone: {cp.tool_name} ({cp.tool_args_summary[:50]})"))
|
|
1335
|
+
else:
|
|
1336
|
+
chat.add_entry(AssistantText("Nothing to undo."))
|
|
1337
|
+
|
|
1338
|
+
def _cmd_index(self, args: str) -> None:
|
|
1339
|
+
chat = self.query_one(ChatScrollView)
|
|
1340
|
+
if args.strip() == "rebuild":
|
|
1341
|
+
try:
|
|
1342
|
+
from llm_code.runtime.indexer import ProjectIndexer
|
|
1343
|
+
self._project_index = ProjectIndexer(self._cwd).build_index()
|
|
1344
|
+
idx = self._project_index
|
|
1345
|
+
chat.add_entry(AssistantText(f"Index rebuilt: {len(idx.files)} files, {len(idx.symbols)} symbols"))
|
|
1346
|
+
except Exception as exc:
|
|
1347
|
+
chat.add_entry(AssistantText(f"Index rebuild failed: {exc}"))
|
|
1348
|
+
elif self._project_index:
|
|
1349
|
+
lines = [f"Files: {len(self._project_index.files)}, Symbols: {len(self._project_index.symbols)}"]
|
|
1350
|
+
for s in self._project_index.symbols[:20]:
|
|
1351
|
+
lines.append(f" {s.kind} {s.name} — {s.file}:{s.line}")
|
|
1352
|
+
chat.add_entry(AssistantText("\n".join(lines)))
|
|
1353
|
+
else:
|
|
1354
|
+
chat.add_entry(AssistantText("No index available."))
|
|
1355
|
+
|
|
1356
|
+
def _cmd_thinking(self, args: str) -> None:
|
|
1357
|
+
chat = self.query_one(ChatScrollView)
|
|
1358
|
+
if args in ("on", "off", "adaptive"):
|
|
1359
|
+
import dataclasses
|
|
1360
|
+
mode_map = {"on": "enabled", "off": "disabled", "adaptive": "adaptive"}
|
|
1361
|
+
new_mode = mode_map[args]
|
|
1362
|
+
from llm_code.runtime.config import ThinkingConfig
|
|
1363
|
+
new_thinking = ThinkingConfig(mode=new_mode, budget_tokens=self._config.thinking.budget_tokens)
|
|
1364
|
+
self._config = dataclasses.replace(self._config, thinking=new_thinking)
|
|
1365
|
+
if self._runtime:
|
|
1366
|
+
self._runtime._config = self._config
|
|
1367
|
+
chat.add_entry(AssistantText(f"Thinking mode: {new_mode}"))
|
|
1368
|
+
else:
|
|
1369
|
+
current = self._config.thinking.mode if self._config else "unknown"
|
|
1370
|
+
chat.add_entry(AssistantText(f"Thinking: {current}\nUsage: /thinking [adaptive|on|off]"))
|
|
1371
|
+
|
|
1372
|
+
def _cmd_vim(self, args: str) -> None:
|
|
1373
|
+
chat = self.query_one(ChatScrollView)
|
|
1374
|
+
input_bar = self.query_one(InputBar)
|
|
1375
|
+
status_bar = self.query_one(StatusBar)
|
|
1376
|
+
if input_bar.vim_mode:
|
|
1377
|
+
input_bar.vim_mode = ""
|
|
1378
|
+
status_bar.vim_mode = ""
|
|
1379
|
+
chat.add_entry(AssistantText("Vim mode disabled"))
|
|
1380
|
+
else:
|
|
1381
|
+
input_bar.vim_mode = "NORMAL"
|
|
1382
|
+
status_bar.vim_mode = "NORMAL"
|
|
1383
|
+
chat.add_entry(AssistantText("Vim mode enabled"))
|
|
1384
|
+
|
|
1385
|
+
def _cmd_image(self, args: str) -> None:
|
|
1386
|
+
chat = self.query_one(ChatScrollView)
|
|
1387
|
+
input_bar = self.query_one(InputBar)
|
|
1388
|
+
if not args:
|
|
1389
|
+
chat.add_entry(AssistantText("Usage: /image <path>"))
|
|
1390
|
+
return
|
|
1391
|
+
try:
|
|
1392
|
+
from llm_code.cli.image import load_image_from_path
|
|
1393
|
+
img_path = Path(args).expanduser().resolve()
|
|
1394
|
+
img = load_image_from_path(str(img_path))
|
|
1395
|
+
self._pending_images.append(img)
|
|
1396
|
+
input_bar.insert_image_marker()
|
|
1397
|
+
except FileNotFoundError:
|
|
1398
|
+
chat.add_entry(AssistantText(f"Image not found: {args}"))
|
|
1399
|
+
|
|
1400
|
+
def _cmd_lsp(self, args: str) -> None:
|
|
1401
|
+
self.query_one(ChatScrollView).add_entry(AssistantText("LSP: not started in this session."))
|
|
1402
|
+
|
|
1403
|
+
def _cmd_cancel(self, args: str) -> None:
|
|
1404
|
+
if self._runtime and hasattr(self._runtime, '_cancel'):
|
|
1405
|
+
self._runtime._cancel()
|
|
1406
|
+
self.query_one(ChatScrollView).add_entry(AssistantText("(cancelled)"))
|
|
1407
|
+
|
|
1408
|
+
def _cmd_plan(self, args: str) -> None:
|
|
1409
|
+
"""Toggle plan/act mode."""
|
|
1410
|
+
self._plan_mode = not self._plan_mode
|
|
1411
|
+
status = self.query_one(StatusBar)
|
|
1412
|
+
chat = self.query_one(ChatScrollView)
|
|
1413
|
+
if self._plan_mode:
|
|
1414
|
+
status.plan_mode = "PLAN"
|
|
1415
|
+
chat.add_entry(AssistantText(
|
|
1416
|
+
"Plan mode ON -- agent will explore and plan without making changes."
|
|
1417
|
+
))
|
|
1418
|
+
else:
|
|
1419
|
+
status.plan_mode = ""
|
|
1420
|
+
chat.add_entry(AssistantText(
|
|
1421
|
+
"Plan mode OFF -- back to normal."
|
|
1422
|
+
))
|
|
1423
|
+
if self._runtime:
|
|
1424
|
+
self._runtime.plan_mode = self._plan_mode
|
|
1425
|
+
|
|
1426
|
+
def _cmd_harness(self, args: str) -> None:
|
|
1427
|
+
"""Show or configure harness controls."""
|
|
1428
|
+
chat = self.query_one(ChatScrollView)
|
|
1429
|
+
|
|
1430
|
+
if not self._runtime or not hasattr(self._runtime, "_harness"):
|
|
1431
|
+
chat.add_entry(AssistantText("Harness not available."))
|
|
1432
|
+
return
|
|
1433
|
+
|
|
1434
|
+
harness = self._runtime._harness
|
|
1435
|
+
parts = args.strip().split()
|
|
1436
|
+
|
|
1437
|
+
if not parts:
|
|
1438
|
+
# Show status
|
|
1439
|
+
status = harness.status()
|
|
1440
|
+
lines = [f"Harness: {status['template']}\n"]
|
|
1441
|
+
lines.append(" Guides (feedforward):")
|
|
1442
|
+
for g in status["guides"]:
|
|
1443
|
+
mark = "✓" if g["enabled"] else "✗"
|
|
1444
|
+
lines.append(f" {mark} {g['name']:<22} {g['trigger']:<12} {g['kind']}")
|
|
1445
|
+
lines.append("\n Sensors (feedback):")
|
|
1446
|
+
for s in status["sensors"]:
|
|
1447
|
+
mark = "✓" if s["enabled"] else "✗"
|
|
1448
|
+
lines.append(f" {mark} {s['name']:<22} {s['trigger']:<12} {s['kind']}")
|
|
1449
|
+
chat.add_entry(AssistantText("\n".join(lines)))
|
|
1450
|
+
return
|
|
1451
|
+
|
|
1452
|
+
action = parts[0]
|
|
1453
|
+
if action == "enable" and len(parts) > 1:
|
|
1454
|
+
harness.enable(parts[1])
|
|
1455
|
+
chat.add_entry(AssistantText(f"Enabled: {parts[1]}"))
|
|
1456
|
+
elif action == "disable" and len(parts) > 1:
|
|
1457
|
+
harness.disable(parts[1])
|
|
1458
|
+
chat.add_entry(AssistantText(f"Disabled: {parts[1]}"))
|
|
1459
|
+
elif action == "template" and len(parts) > 1:
|
|
1460
|
+
from llm_code.harness.templates import default_controls
|
|
1461
|
+
from llm_code.harness.config import HarnessConfig
|
|
1462
|
+
new_controls = default_controls(parts[1])
|
|
1463
|
+
harness._config = HarnessConfig(template=parts[1], controls=new_controls)
|
|
1464
|
+
harness._overrides.clear()
|
|
1465
|
+
chat.add_entry(AssistantText(f"Switched to template: {parts[1]}"))
|
|
1466
|
+
else:
|
|
1467
|
+
chat.add_entry(AssistantText(
|
|
1468
|
+
"Usage: /harness [enable|disable|template] [name]\n"
|
|
1469
|
+
" /harness — show status\n"
|
|
1470
|
+
" /harness enable X — enable control X\n"
|
|
1471
|
+
" /harness disable X — disable control X\n"
|
|
1472
|
+
" /harness template Y — switch to template Y"
|
|
1473
|
+
))
|
|
1474
|
+
|
|
1475
|
+
def _cmd_knowledge(self, args: str) -> None:
|
|
1476
|
+
"""View or rebuild the project knowledge base."""
|
|
1477
|
+
chat = self.query_one(ChatScrollView)
|
|
1478
|
+
|
|
1479
|
+
parts = args.strip().split()
|
|
1480
|
+
action = parts[0] if parts else ""
|
|
1481
|
+
|
|
1482
|
+
if action == "rebuild":
|
|
1483
|
+
import asyncio
|
|
1484
|
+
asyncio.ensure_future(self._rebuild_knowledge())
|
|
1485
|
+
return
|
|
1486
|
+
|
|
1487
|
+
# Show knowledge index
|
|
1488
|
+
try:
|
|
1489
|
+
from llm_code.runtime.knowledge_compiler import KnowledgeCompiler
|
|
1490
|
+
compiler = KnowledgeCompiler(cwd=self._cwd, llm_provider=None)
|
|
1491
|
+
entries = compiler.get_index()
|
|
1492
|
+
except Exception:
|
|
1493
|
+
chat.add_entry(AssistantText("Knowledge base not available."))
|
|
1494
|
+
return
|
|
1495
|
+
|
|
1496
|
+
if not entries:
|
|
1497
|
+
chat.add_entry(AssistantText(
|
|
1498
|
+
"Knowledge base is empty.\n"
|
|
1499
|
+
"It will be built automatically after your next session, "
|
|
1500
|
+
"or run `/knowledge rebuild` to build now."
|
|
1501
|
+
))
|
|
1502
|
+
return
|
|
1503
|
+
|
|
1504
|
+
lines = ["## Project Knowledge Base\n"]
|
|
1505
|
+
for entry in entries:
|
|
1506
|
+
lines.append(f"- **{entry.title}** — {entry.summary}")
|
|
1507
|
+
lines.append(f"\n{len(entries)} articles. Use `/knowledge rebuild` to force recompilation.")
|
|
1508
|
+
chat.add_entry(AssistantText("\n".join(lines)))
|
|
1509
|
+
|
|
1510
|
+
async def _rebuild_knowledge(self) -> None:
|
|
1511
|
+
"""Force full knowledge rebuild."""
|
|
1512
|
+
chat = self.query_one(ChatScrollView)
|
|
1513
|
+
if not self._runtime:
|
|
1514
|
+
chat.add_entry(AssistantText("Runtime not available."))
|
|
1515
|
+
return
|
|
1516
|
+
|
|
1517
|
+
chat.add_entry(AssistantText("Rebuilding knowledge base..."))
|
|
1518
|
+
try:
|
|
1519
|
+
from llm_code.runtime.knowledge_compiler import KnowledgeCompiler
|
|
1520
|
+
compile_model = ""
|
|
1521
|
+
if hasattr(self._config, "knowledge"):
|
|
1522
|
+
compile_model = self._config.knowledge.compile_model
|
|
1523
|
+
if not compile_model and hasattr(self._config, "model_routing"):
|
|
1524
|
+
compile_model = self._config.model_routing.compaction
|
|
1525
|
+
compiler = KnowledgeCompiler(
|
|
1526
|
+
cwd=self._cwd,
|
|
1527
|
+
llm_provider=self._runtime._provider,
|
|
1528
|
+
compile_model=compile_model,
|
|
1529
|
+
)
|
|
1530
|
+
ingest_data = compiler.ingest(facts=[], since_commit=None)
|
|
1531
|
+
import asyncio
|
|
1532
|
+
await asyncio.wait_for(compiler.compile(ingest_data), timeout=60.0)
|
|
1533
|
+
entries = compiler.get_index()
|
|
1534
|
+
chat.add_entry(AssistantText(f"Knowledge base rebuilt: {len(entries)} articles."))
|
|
1535
|
+
except Exception as exc:
|
|
1536
|
+
chat.add_entry(AssistantText(f"Rebuild failed: {exc}"))
|
|
1537
|
+
|
|
1538
|
+
def _cmd_dump(self, args: str) -> None:
|
|
1539
|
+
"""Dump codebase for external LLM use (DAFC pattern)."""
|
|
1540
|
+
import asyncio
|
|
1541
|
+
asyncio.ensure_future(self._run_dump(args))
|
|
1542
|
+
|
|
1543
|
+
async def _run_dump(self, args: str) -> None:
|
|
1544
|
+
from llm_code.tools.dump import dump_codebase
|
|
1545
|
+
chat = self.query_one(ChatScrollView)
|
|
1546
|
+
|
|
1547
|
+
max_files = 200
|
|
1548
|
+
if args.strip().isdigit():
|
|
1549
|
+
max_files = int(args.strip())
|
|
1550
|
+
|
|
1551
|
+
result = dump_codebase(self._cwd, max_files=max_files)
|
|
1552
|
+
|
|
1553
|
+
if result.file_count == 0:
|
|
1554
|
+
chat.add_entry(AssistantText("No source files found to dump."))
|
|
1555
|
+
return
|
|
1556
|
+
|
|
1557
|
+
# Write to file
|
|
1558
|
+
dump_path = self._cwd / ".llm-code" / "dump.txt"
|
|
1559
|
+
dump_path.parent.mkdir(parents=True, exist_ok=True)
|
|
1560
|
+
dump_path.write_text(result.text, encoding="utf-8")
|
|
1561
|
+
|
|
1562
|
+
chat.add_entry(AssistantText(
|
|
1563
|
+
f"Dumped {result.file_count} files "
|
|
1564
|
+
f"({result.total_lines:,} lines, ~{result.estimated_tokens:,} tokens)\n"
|
|
1565
|
+
f"Saved to: {dump_path}"
|
|
1566
|
+
))
|
|
1567
|
+
|
|
1568
|
+
def _cmd_analyze(self, args: str) -> None:
|
|
1569
|
+
"""Run code analysis rules on the codebase."""
|
|
1570
|
+
import asyncio
|
|
1571
|
+
asyncio.ensure_future(self._run_analyze(args))
|
|
1572
|
+
|
|
1573
|
+
async def _run_analyze(self, args: str) -> None:
|
|
1574
|
+
from llm_code.analysis.engine import run_analysis
|
|
1575
|
+
chat = self.query_one(ChatScrollView)
|
|
1576
|
+
|
|
1577
|
+
target = Path(args.strip()) if args.strip() else self._cwd
|
|
1578
|
+
if not target.is_absolute():
|
|
1579
|
+
target = self._cwd / target
|
|
1580
|
+
|
|
1581
|
+
try:
|
|
1582
|
+
result = run_analysis(target)
|
|
1583
|
+
except Exception as exc:
|
|
1584
|
+
chat.add_entry(AssistantText(f"Analysis failed: {exc}"))
|
|
1585
|
+
return
|
|
1586
|
+
|
|
1587
|
+
chat.add_entry(AssistantText(result.format_chat()))
|
|
1588
|
+
|
|
1589
|
+
# Store context for injection into future prompts
|
|
1590
|
+
if result.violations:
|
|
1591
|
+
self._analysis_context = result.format_context(max_tokens=1000)
|
|
1592
|
+
if self._runtime is not None:
|
|
1593
|
+
self._runtime.analysis_context = self._analysis_context
|
|
1594
|
+
else:
|
|
1595
|
+
self._analysis_context = None
|
|
1596
|
+
if self._runtime is not None:
|
|
1597
|
+
self._runtime.analysis_context = None
|
|
1598
|
+
|
|
1599
|
+
def _cmd_diff_check(self, args: str) -> None:
|
|
1600
|
+
"""Show new and fixed violations compared with cached results."""
|
|
1601
|
+
import asyncio
|
|
1602
|
+
asyncio.ensure_future(self._run_diff_check(args))
|
|
1603
|
+
|
|
1604
|
+
async def _run_diff_check(self, args: str) -> None:
|
|
1605
|
+
from llm_code.analysis.engine import run_diff_check
|
|
1606
|
+
chat = self.query_one(ChatScrollView)
|
|
1607
|
+
|
|
1608
|
+
try:
|
|
1609
|
+
new_violations, fixed_violations = run_diff_check(self._cwd)
|
|
1610
|
+
except Exception as exc:
|
|
1611
|
+
chat.add_entry(AssistantText(f"Diff check failed: {exc}"))
|
|
1612
|
+
return
|
|
1613
|
+
|
|
1614
|
+
if not new_violations and not fixed_violations:
|
|
1615
|
+
chat.add_entry(AssistantText("No changes in violations since last analysis."))
|
|
1616
|
+
return
|
|
1617
|
+
|
|
1618
|
+
lines: list[str] = ["## Diff Check"]
|
|
1619
|
+
for v in new_violations:
|
|
1620
|
+
loc = f"{v.file_path}:{v.line}" if v.line > 0 else v.file_path
|
|
1621
|
+
lines.append(f"NEW {v.severity.upper()} {loc} {v.message}")
|
|
1622
|
+
for v in fixed_violations:
|
|
1623
|
+
loc = f"{v.file_path}:{v.line}" if v.line > 0 else v.file_path
|
|
1624
|
+
lines.append(f"FIXED {v.severity.upper()} {loc} {v.message}")
|
|
1625
|
+
|
|
1626
|
+
lines.append(f"\n{len(new_violations)} new, {len(fixed_violations)} fixed")
|
|
1627
|
+
chat.add_entry(AssistantText("\n".join(lines)))
|
|
1628
|
+
|
|
1629
|
+
def _cmd_search(self, args: str) -> None:
|
|
1630
|
+
chat = self.query_one(ChatScrollView)
|
|
1631
|
+
if not args or not self._runtime:
|
|
1632
|
+
chat.add_entry(AssistantText("Usage: /search <query>"))
|
|
1633
|
+
return
|
|
1634
|
+
results = []
|
|
1635
|
+
for msg in self._runtime.session.messages:
|
|
1636
|
+
if args.lower() in str(msg.content).lower():
|
|
1637
|
+
results.append(f" [{msg.role}] {str(msg.content)[:100]}")
|
|
1638
|
+
if results:
|
|
1639
|
+
chat.add_entry(AssistantText(f"Found {len(results)} matches:\n" + "\n".join(results[:20])))
|
|
1640
|
+
else:
|
|
1641
|
+
chat.add_entry(AssistantText(f"No matches for: {args}"))
|
|
1642
|
+
|
|
1643
|
+
def _cmd_config(self, args: str) -> None:
|
|
1644
|
+
chat = self.query_one(ChatScrollView)
|
|
1645
|
+
if not self._config:
|
|
1646
|
+
chat.add_entry(AssistantText("No config loaded."))
|
|
1647
|
+
return
|
|
1648
|
+
lines = [
|
|
1649
|
+
f"model: {self._config.model}",
|
|
1650
|
+
f"provider: {self._config.provider_base_url or 'default'}",
|
|
1651
|
+
f"permission: {self._config.permission_mode}",
|
|
1652
|
+
f"thinking: {self._config.thinking.mode}",
|
|
1653
|
+
]
|
|
1654
|
+
chat.add_entry(AssistantText("\n".join(lines)))
|
|
1655
|
+
|
|
1656
|
+
def _cmd_session(self, args: str) -> None:
|
|
1657
|
+
self.query_one(ChatScrollView).add_entry(AssistantText("Session management: use /session list|save"))
|
|
1658
|
+
|
|
1659
|
+
# ── Voice ─────────────────────────────────────────────────────────
|
|
1660
|
+
|
|
1661
|
+
def _cmd_voice(self, args: str) -> None:
|
|
1662
|
+
chat = self.query_one(ChatScrollView)
|
|
1663
|
+
arg = args.strip().lower()
|
|
1664
|
+
if arg == "on":
|
|
1665
|
+
if self._config and getattr(self._config, 'voice', None) and self._config.voice.enabled:
|
|
1666
|
+
self._voice_active = True
|
|
1667
|
+
chat.add_entry(AssistantText("Voice input enabled"))
|
|
1668
|
+
else:
|
|
1669
|
+
chat.add_entry(AssistantText("Voice not configured. Set voice.enabled in config."))
|
|
1670
|
+
elif arg == "off":
|
|
1671
|
+
self._voice_active = False
|
|
1672
|
+
chat.add_entry(AssistantText("Voice input disabled"))
|
|
1673
|
+
else:
|
|
1674
|
+
active = self._voice_active
|
|
1675
|
+
chat.add_entry(AssistantText(
|
|
1676
|
+
f"Voice: {'active' if active else 'inactive'}\nUsage: /voice [on|off]"
|
|
1677
|
+
))
|
|
1678
|
+
|
|
1679
|
+
# ── Cron ──────────────────────────────────────────────────────────
|
|
1680
|
+
|
|
1681
|
+
def _cmd_cron(self, args: str) -> None:
|
|
1682
|
+
chat = self.query_one(ChatScrollView)
|
|
1683
|
+
if self._cron_storage is None:
|
|
1684
|
+
chat.add_entry(AssistantText("Cron not available."))
|
|
1685
|
+
return
|
|
1686
|
+
sub = args.strip() if args.strip() else "list"
|
|
1687
|
+
if not sub or sub == "list":
|
|
1688
|
+
tasks = self._cron_storage.list_all()
|
|
1689
|
+
if not tasks:
|
|
1690
|
+
chat.add_entry(AssistantText("No scheduled tasks."))
|
|
1691
|
+
else:
|
|
1692
|
+
lines = [f"Scheduled tasks ({len(tasks)}):"]
|
|
1693
|
+
for t in tasks:
|
|
1694
|
+
flags = []
|
|
1695
|
+
if t.recurring:
|
|
1696
|
+
flags.append("recurring")
|
|
1697
|
+
if t.permanent:
|
|
1698
|
+
flags.append("permanent")
|
|
1699
|
+
flag_str = f" [{', '.join(flags)}]" if flags else ""
|
|
1700
|
+
fired = f", last fired: {t.last_fired_at:%Y-%m-%d %H:%M}" if t.last_fired_at else ""
|
|
1701
|
+
lines.append(f" {t.id} {t.cron} \"{t.prompt}\"{flag_str}{fired}")
|
|
1702
|
+
chat.add_entry(AssistantText("\n".join(lines)))
|
|
1703
|
+
elif sub.startswith("delete "):
|
|
1704
|
+
task_id = sub.split(None, 1)[1].strip()
|
|
1705
|
+
removed = self._cron_storage.remove(task_id)
|
|
1706
|
+
if removed:
|
|
1707
|
+
chat.add_entry(AssistantText(f"Deleted task {task_id}"))
|
|
1708
|
+
else:
|
|
1709
|
+
chat.add_entry(AssistantText(f"Task '{task_id}' not found"))
|
|
1710
|
+
elif sub == "add":
|
|
1711
|
+
chat.add_entry(AssistantText(
|
|
1712
|
+
"Use the cron_create tool to schedule a task:\n"
|
|
1713
|
+
" cron: '0 9 * * *' (5-field cron expression)\n"
|
|
1714
|
+
" prompt: 'your prompt here'\n"
|
|
1715
|
+
" recurring: true/false\n"
|
|
1716
|
+
" permanent: true/false"
|
|
1717
|
+
))
|
|
1718
|
+
else:
|
|
1719
|
+
chat.add_entry(AssistantText("Usage: /cron [list|add|delete <id>]"))
|
|
1720
|
+
|
|
1721
|
+
# ── Task ──────────────────────────────────────────────────────────
|
|
1722
|
+
|
|
1723
|
+
def _cmd_task(self, args: str) -> None:
|
|
1724
|
+
chat = self.query_one(ChatScrollView)
|
|
1725
|
+
parts = args.strip().split(None, 1)
|
|
1726
|
+
sub = parts[0] if parts else ""
|
|
1727
|
+
if sub in ("new", ""):
|
|
1728
|
+
chat.add_entry(AssistantText("Use the task tools directly to create or manage tasks."))
|
|
1729
|
+
elif sub == "list":
|
|
1730
|
+
if self._task_manager is None:
|
|
1731
|
+
chat.add_entry(AssistantText("Task manager not initialized."))
|
|
1732
|
+
else:
|
|
1733
|
+
try:
|
|
1734
|
+
tasks = self._task_manager.list_tasks(exclude_done=False)
|
|
1735
|
+
if not tasks:
|
|
1736
|
+
chat.add_entry(AssistantText("No tasks found."))
|
|
1737
|
+
else:
|
|
1738
|
+
lines = ["Tasks:"]
|
|
1739
|
+
for t in tasks:
|
|
1740
|
+
lines.append(f" {t.id} [{t.status.value:8s}] {t.title}")
|
|
1741
|
+
chat.add_entry(AssistantText("\n".join(lines)))
|
|
1742
|
+
except Exception as exc:
|
|
1743
|
+
chat.add_entry(AssistantText(f"Error listing tasks: {exc}"))
|
|
1744
|
+
elif sub in ("verify", "close"):
|
|
1745
|
+
chat.add_entry(AssistantText("Use the task tools directly."))
|
|
1746
|
+
else:
|
|
1747
|
+
chat.add_entry(AssistantText("Usage: /task [new|verify <id>|close <id>|list]"))
|
|
1748
|
+
|
|
1749
|
+
# ── Swarm ─────────────────────────────────────────────────────────
|
|
1750
|
+
|
|
1751
|
+
def _cmd_swarm(self, args: str) -> None:
|
|
1752
|
+
chat = self.query_one(ChatScrollView)
|
|
1753
|
+
parts = args.strip().split(None, 1)
|
|
1754
|
+
sub = parts[0] if parts else ""
|
|
1755
|
+
rest = parts[1].strip() if len(parts) > 1 else ""
|
|
1756
|
+
if sub == "coordinate":
|
|
1757
|
+
if not rest:
|
|
1758
|
+
chat.add_entry(AssistantText("Usage: /swarm coordinate <task>"))
|
|
1759
|
+
return
|
|
1760
|
+
chat.add_entry(AssistantText("Swarm coordination: use the swarm tools directly."))
|
|
1761
|
+
else:
|
|
1762
|
+
if self._swarm_manager is None:
|
|
1763
|
+
chat.add_entry(AssistantText("Swarm: not enabled. Set swarm.enabled=true in config."))
|
|
1764
|
+
else:
|
|
1765
|
+
chat.add_entry(AssistantText("Swarm: active\nUsage: /swarm coordinate <task>"))
|
|
1766
|
+
|
|
1767
|
+
# ── VCR ───────────────────────────────────────────────────────────
|
|
1768
|
+
|
|
1769
|
+
def _cmd_vcr(self, args: str) -> None:
|
|
1770
|
+
chat = self.query_one(ChatScrollView)
|
|
1771
|
+
sub = args.strip().split(None, 1)[0] if args.strip() else ""
|
|
1772
|
+
if sub == "start":
|
|
1773
|
+
if self._vcr_recorder is not None:
|
|
1774
|
+
chat.add_entry(AssistantText("VCR recording already active."))
|
|
1775
|
+
return
|
|
1776
|
+
try:
|
|
1777
|
+
import uuid
|
|
1778
|
+
from llm_code.runtime.vcr import VCRRecorder
|
|
1779
|
+
recordings_dir = Path.home() / ".llm-code" / "recordings"
|
|
1780
|
+
recordings_dir.mkdir(parents=True, exist_ok=True)
|
|
1781
|
+
session_id = uuid.uuid4().hex[:8]
|
|
1782
|
+
path = recordings_dir / f"{session_id}.jsonl"
|
|
1783
|
+
self._vcr_recorder = VCRRecorder(path)
|
|
1784
|
+
if self._runtime is not None:
|
|
1785
|
+
self._runtime._vcr_recorder = self._vcr_recorder
|
|
1786
|
+
chat.add_entry(AssistantText(f"VCR recording started: {path.name}"))
|
|
1787
|
+
except Exception as exc:
|
|
1788
|
+
chat.add_entry(AssistantText(f"VCR start failed: {exc}"))
|
|
1789
|
+
elif sub == "stop":
|
|
1790
|
+
if self._vcr_recorder is None:
|
|
1791
|
+
chat.add_entry(AssistantText("No active VCR recording."))
|
|
1792
|
+
return
|
|
1793
|
+
self._vcr_recorder.close()
|
|
1794
|
+
self._vcr_recorder = None
|
|
1795
|
+
if self._runtime is not None:
|
|
1796
|
+
self._runtime._vcr_recorder = None
|
|
1797
|
+
chat.add_entry(AssistantText("VCR recording stopped."))
|
|
1798
|
+
elif sub == "list":
|
|
1799
|
+
recordings_dir = Path.home() / ".llm-code" / "recordings"
|
|
1800
|
+
if not recordings_dir.is_dir():
|
|
1801
|
+
chat.add_entry(AssistantText("No recordings found."))
|
|
1802
|
+
return
|
|
1803
|
+
files = sorted(recordings_dir.glob("*.jsonl"))
|
|
1804
|
+
if not files:
|
|
1805
|
+
chat.add_entry(AssistantText("No recordings found."))
|
|
1806
|
+
return
|
|
1807
|
+
try:
|
|
1808
|
+
from llm_code.runtime.vcr import VCRPlayer
|
|
1809
|
+
lines = []
|
|
1810
|
+
for f in files:
|
|
1811
|
+
player = VCRPlayer(f)
|
|
1812
|
+
s = player.summary()
|
|
1813
|
+
lines.append(
|
|
1814
|
+
f" {f.name} events={s['event_count']} "
|
|
1815
|
+
f"duration={s['duration']:.1f}s "
|
|
1816
|
+
f"tools={sum(s['tool_calls'].values())}"
|
|
1817
|
+
)
|
|
1818
|
+
chat.add_entry(AssistantText("\n".join(lines)))
|
|
1819
|
+
except Exception as exc:
|
|
1820
|
+
chat.add_entry(AssistantText(f"VCR list failed: {exc}"))
|
|
1821
|
+
else:
|
|
1822
|
+
active = "active" if self._vcr_recorder is not None else "inactive"
|
|
1823
|
+
chat.add_entry(AssistantText(f"VCR: {active}\nUsage: /vcr start|stop|list"))
|
|
1824
|
+
|
|
1825
|
+
# ── Checkpoint ────────────────────────────────────────────────────
|
|
1826
|
+
|
|
1827
|
+
def _cmd_checkpoint(self, args: str) -> None:
|
|
1828
|
+
chat = self.query_one(ChatScrollView)
|
|
1829
|
+
try:
|
|
1830
|
+
from llm_code.runtime.checkpoint_recovery import CheckpointRecovery
|
|
1831
|
+
except ImportError:
|
|
1832
|
+
chat.add_entry(AssistantText("Checkpoint recovery not available."))
|
|
1833
|
+
return
|
|
1834
|
+
checkpoints_dir = Path.home() / ".llm-code" / "checkpoints"
|
|
1835
|
+
recovery = CheckpointRecovery(checkpoints_dir)
|
|
1836
|
+
parts = args.strip().split(None, 1)
|
|
1837
|
+
sub = parts[0].lower() if parts else "list"
|
|
1838
|
+
rest = parts[1].strip() if len(parts) > 1 else ""
|
|
1839
|
+
if sub == "save":
|
|
1840
|
+
if self._runtime is None:
|
|
1841
|
+
chat.add_entry(AssistantText("No active session to checkpoint."))
|
|
1842
|
+
return
|
|
1843
|
+
try:
|
|
1844
|
+
path = recovery.save_checkpoint(self._runtime.session)
|
|
1845
|
+
chat.add_entry(AssistantText(f"Checkpoint saved: {path}"))
|
|
1846
|
+
except Exception as exc:
|
|
1847
|
+
chat.add_entry(AssistantText(f"Save failed: {exc}"))
|
|
1848
|
+
elif sub in ("list", ""):
|
|
1849
|
+
try:
|
|
1850
|
+
entries = recovery.list_checkpoints()
|
|
1851
|
+
if not entries:
|
|
1852
|
+
chat.add_entry(AssistantText("No checkpoints found."))
|
|
1853
|
+
return
|
|
1854
|
+
lines = ["Checkpoints:"]
|
|
1855
|
+
for e in entries:
|
|
1856
|
+
lines.append(
|
|
1857
|
+
f" {e['session_id']} "
|
|
1858
|
+
f"{e['saved_at'][:19]} "
|
|
1859
|
+
f"({e['message_count']} msgs) "
|
|
1860
|
+
f"{e['project_path']}"
|
|
1861
|
+
)
|
|
1862
|
+
chat.add_entry(AssistantText("\n".join(lines)))
|
|
1863
|
+
except Exception as exc:
|
|
1864
|
+
chat.add_entry(AssistantText(f"List failed: {exc}"))
|
|
1865
|
+
elif sub == "resume":
|
|
1866
|
+
try:
|
|
1867
|
+
session_id = rest or None
|
|
1868
|
+
if session_id:
|
|
1869
|
+
session = recovery.load_checkpoint(session_id)
|
|
1870
|
+
else:
|
|
1871
|
+
session = recovery.detect_last_checkpoint()
|
|
1872
|
+
if session is None:
|
|
1873
|
+
chat.add_entry(AssistantText("No checkpoint found to resume."))
|
|
1874
|
+
return
|
|
1875
|
+
self._init_runtime()
|
|
1876
|
+
chat.add_entry(AssistantText(
|
|
1877
|
+
f"Resumed session {session.id} ({len(session.messages)} messages)"
|
|
1878
|
+
))
|
|
1879
|
+
except Exception as exc:
|
|
1880
|
+
chat.add_entry(AssistantText(f"Resume failed: {exc}"))
|
|
1881
|
+
else:
|
|
1882
|
+
chat.add_entry(AssistantText("Usage: /checkpoint [save|list|resume [session_id]]"))
|
|
1883
|
+
|
|
1884
|
+
# ── Memory ────────────────────────────────────────────────────────
|
|
1885
|
+
|
|
1886
|
+
def _cmd_memory(self, args: str) -> None:
|
|
1887
|
+
chat = self.query_one(ChatScrollView)
|
|
1888
|
+
if not self._memory:
|
|
1889
|
+
chat.add_entry(AssistantText("Memory not initialized."))
|
|
1890
|
+
return
|
|
1891
|
+
parts = args.strip().split(None, 2)
|
|
1892
|
+
sub = parts[0] if parts else ""
|
|
1893
|
+
try:
|
|
1894
|
+
if sub == "set" and len(parts) > 2:
|
|
1895
|
+
self._memory.store(parts[1], parts[2])
|
|
1896
|
+
chat.add_entry(AssistantText(f"Stored: {parts[1]}"))
|
|
1897
|
+
elif sub == "get" and len(parts) > 1:
|
|
1898
|
+
val = self._memory.recall(parts[1])
|
|
1899
|
+
if val:
|
|
1900
|
+
chat.add_entry(AssistantText(str(val)))
|
|
1901
|
+
else:
|
|
1902
|
+
chat.add_entry(AssistantText(f"Key not found: {parts[1]}"))
|
|
1903
|
+
elif sub == "delete" and len(parts) > 1:
|
|
1904
|
+
self._memory.delete(parts[1])
|
|
1905
|
+
chat.add_entry(AssistantText(f"Deleted: {parts[1]}"))
|
|
1906
|
+
elif sub == "consolidate":
|
|
1907
|
+
chat.add_entry(AssistantText("Use --lite mode for consolidate (requires async)."))
|
|
1908
|
+
elif sub == "history":
|
|
1909
|
+
summaries = self._memory.load_consolidated_summaries(limit=5)
|
|
1910
|
+
if not summaries:
|
|
1911
|
+
chat.add_entry(AssistantText("No consolidated memories yet."))
|
|
1912
|
+
else:
|
|
1913
|
+
lines = [f"Consolidated Memories ({len(summaries)} most recent)"]
|
|
1914
|
+
for i, s in enumerate(summaries):
|
|
1915
|
+
preview = "\n".join(s.strip().splitlines()[:3])
|
|
1916
|
+
lines.append(f" #{i+1} {preview}")
|
|
1917
|
+
chat.add_entry(AssistantText("\n".join(lines)))
|
|
1918
|
+
elif sub == "lint":
|
|
1919
|
+
flags = parts[1] if len(parts) > 1 else ""
|
|
1920
|
+
if "--deep" in flags:
|
|
1921
|
+
import asyncio
|
|
1922
|
+
asyncio.ensure_future(self._memory_lint_deep())
|
|
1923
|
+
elif "--fix" in flags:
|
|
1924
|
+
import asyncio
|
|
1925
|
+
asyncio.ensure_future(self._memory_lint_fix())
|
|
1926
|
+
else:
|
|
1927
|
+
self._memory_lint_fast()
|
|
1928
|
+
else:
|
|
1929
|
+
entries = self._memory.get_all()
|
|
1930
|
+
lines = [f"Memory ({len(entries)} entries)"]
|
|
1931
|
+
for k, v in entries.items():
|
|
1932
|
+
lines.append(f" {k}: {v.value[:60]}")
|
|
1933
|
+
if not entries:
|
|
1934
|
+
lines.append(" No memories stored.")
|
|
1935
|
+
chat.add_entry(AssistantText("\n".join(lines)))
|
|
1936
|
+
except Exception as exc:
|
|
1937
|
+
chat.add_entry(AssistantText(f"Memory error: {exc}"))
|
|
1938
|
+
|
|
1939
|
+
def _memory_lint_fast(self) -> None:
|
|
1940
|
+
"""Run fast computational memory lint."""
|
|
1941
|
+
chat = self.query_one(ChatScrollView)
|
|
1942
|
+
try:
|
|
1943
|
+
from llm_code.runtime.memory_lint import lint_memory
|
|
1944
|
+
result = lint_memory(memory_dir=self._memory._dir, cwd=self._cwd)
|
|
1945
|
+
report = result.format_report()
|
|
1946
|
+
if not result.stale and not result.coverage_gaps and not result.old:
|
|
1947
|
+
report += "\n\nContradictions: (requires LLM, skipped — use /memory lint --deep)"
|
|
1948
|
+
chat.add_entry(AssistantText(report))
|
|
1949
|
+
except Exception as exc:
|
|
1950
|
+
chat.add_entry(AssistantText(f"Lint failed: {exc}"))
|
|
1951
|
+
|
|
1952
|
+
async def _memory_lint_deep(self) -> None:
|
|
1953
|
+
"""Run deep memory lint with LLM contradiction detection."""
|
|
1954
|
+
chat = self.query_one(ChatScrollView)
|
|
1955
|
+
chat.add_entry(AssistantText("Running deep memory lint..."))
|
|
1956
|
+
try:
|
|
1957
|
+
from llm_code.runtime.memory_lint import lint_memory_deep
|
|
1958
|
+
provider = self._runtime._provider if self._runtime else None
|
|
1959
|
+
result = await lint_memory_deep(
|
|
1960
|
+
memory_dir=self._memory._dir,
|
|
1961
|
+
cwd=self._cwd,
|
|
1962
|
+
llm_provider=provider,
|
|
1963
|
+
)
|
|
1964
|
+
chat.add_entry(AssistantText(result.format_report()))
|
|
1965
|
+
except Exception as exc:
|
|
1966
|
+
chat.add_entry(AssistantText(f"Deep lint failed: {exc}"))
|
|
1967
|
+
|
|
1968
|
+
async def _memory_lint_fix(self) -> None:
|
|
1969
|
+
"""Run lint and auto-remove stale references."""
|
|
1970
|
+
chat = self.query_one(ChatScrollView)
|
|
1971
|
+
try:
|
|
1972
|
+
from llm_code.runtime.memory_lint import lint_memory
|
|
1973
|
+
result = lint_memory(memory_dir=self._memory._dir, cwd=self._cwd)
|
|
1974
|
+
if not result.stale:
|
|
1975
|
+
chat.add_entry(AssistantText("No stale references to fix."))
|
|
1976
|
+
return
|
|
1977
|
+
removed = 0
|
|
1978
|
+
for s in result.stale:
|
|
1979
|
+
self._memory.delete(s.key)
|
|
1980
|
+
removed += 1
|
|
1981
|
+
chat.add_entry(AssistantText(f"Removed {removed} stale entries.\n\n{result.format_report()}"))
|
|
1982
|
+
except Exception as exc:
|
|
1983
|
+
chat.add_entry(AssistantText(f"Lint fix failed: {exc}"))
|
|
1984
|
+
|
|
1985
|
+
# ── Repo Map ─────────────────────────────────────────────────────
|
|
1986
|
+
|
|
1987
|
+
def _cmd_map(self, args: str) -> None:
|
|
1988
|
+
"""Show repo map."""
|
|
1989
|
+
from llm_code.runtime.repo_map import build_repo_map
|
|
1990
|
+
chat = self.query_one(ChatScrollView)
|
|
1991
|
+
|
|
1992
|
+
try:
|
|
1993
|
+
repo_map = build_repo_map(self._cwd)
|
|
1994
|
+
compact = repo_map.to_compact(max_tokens=2000)
|
|
1995
|
+
if compact:
|
|
1996
|
+
chat.add_entry(AssistantText(f"# Repo Map\n{compact}"))
|
|
1997
|
+
else:
|
|
1998
|
+
chat.add_entry(AssistantText("No source files found."))
|
|
1999
|
+
except Exception as exc:
|
|
2000
|
+
chat.add_entry(AssistantText(f"Error building repo map: {exc}"))
|
|
2001
|
+
|
|
2002
|
+
# ── MCP ───────────────────────────────────────────────────────────
|
|
2003
|
+
|
|
2004
|
+
def _cmd_mcp(self, args: str) -> None:
|
|
2005
|
+
chat = self.query_one(ChatScrollView)
|
|
2006
|
+
parts = args.strip().split(None, 1)
|
|
2007
|
+
sub = parts[0] if parts else ""
|
|
2008
|
+
subargs = parts[1] if len(parts) > 1 else ""
|
|
2009
|
+
if sub == "install" and subargs:
|
|
2010
|
+
pkg = subargs.strip()
|
|
2011
|
+
short_name = pkg.split("/")[-1] if "/" in pkg else pkg
|
|
2012
|
+
# Write to config.json
|
|
2013
|
+
config_path = Path.home() / ".llm-code" / "config.json"
|
|
2014
|
+
try:
|
|
2015
|
+
import json
|
|
2016
|
+
config_data: dict = {}
|
|
2017
|
+
if config_path.exists():
|
|
2018
|
+
config_data = json.loads(config_path.read_text())
|
|
2019
|
+
mcp_servers = config_data.setdefault("mcp_servers", {})
|
|
2020
|
+
mcp_servers[short_name] = {"command": "npx", "args": ["-y", pkg]}
|
|
2021
|
+
config_path.parent.mkdir(parents=True, exist_ok=True)
|
|
2022
|
+
config_path.write_text(json.dumps(config_data, indent=2) + "\n")
|
|
2023
|
+
# Update in-memory config so marketplace reflects the change
|
|
2024
|
+
if self._config is not None:
|
|
2025
|
+
import dataclasses
|
|
2026
|
+
current_servers = dict(self._config.mcp_servers or {})
|
|
2027
|
+
current_servers[short_name] = {"command": "npx", "args": ["-y", pkg]}
|
|
2028
|
+
self._config = dataclasses.replace(self._config, mcp_servers=current_servers)
|
|
2029
|
+
chat.add_entry(AssistantText(f"Added {short_name} to config. Starting server..."))
|
|
2030
|
+
# Hot-start the MCP server without restart
|
|
2031
|
+
self._hot_start_mcp(short_name, {"command": "npx", "args": ["-y", pkg]})
|
|
2032
|
+
except Exception as exc:
|
|
2033
|
+
chat.add_entry(AssistantText(f"Install failed: {exc}"))
|
|
2034
|
+
elif sub == "remove" and subargs:
|
|
2035
|
+
name = subargs.strip()
|
|
2036
|
+
config_path = Path.home() / ".llm-code" / "config.json"
|
|
2037
|
+
try:
|
|
2038
|
+
import json
|
|
2039
|
+
if config_path.exists():
|
|
2040
|
+
config_data = json.loads(config_path.read_text())
|
|
2041
|
+
mcp_servers = config_data.get("mcp_servers", {})
|
|
2042
|
+
if name in mcp_servers:
|
|
2043
|
+
del mcp_servers[name]
|
|
2044
|
+
config_path.write_text(json.dumps(config_data, indent=2) + "\n")
|
|
2045
|
+
# Update in-memory config
|
|
2046
|
+
if self._config is not None:
|
|
2047
|
+
import dataclasses
|
|
2048
|
+
current = dict(self._config.mcp_servers or {})
|
|
2049
|
+
current.pop(name, None)
|
|
2050
|
+
self._config = dataclasses.replace(self._config, mcp_servers=current)
|
|
2051
|
+
chat.add_entry(AssistantText(f"Removed {name} from config."))
|
|
2052
|
+
else:
|
|
2053
|
+
chat.add_entry(AssistantText(f"MCP server '{name}' not found in config."))
|
|
2054
|
+
else:
|
|
2055
|
+
chat.add_entry(AssistantText("No config file found."))
|
|
2056
|
+
except Exception as exc:
|
|
2057
|
+
chat.add_entry(AssistantText(f"Remove failed: {exc}"))
|
|
2058
|
+
else:
|
|
2059
|
+
# Open interactive MCP marketplace browser
|
|
2060
|
+
from llm_code.tui.marketplace import MarketplaceBrowser, MarketplaceItem
|
|
2061
|
+
|
|
2062
|
+
items: list[MarketplaceItem] = []
|
|
2063
|
+
configured: set[str] = set()
|
|
2064
|
+
|
|
2065
|
+
# Configured MCP servers
|
|
2066
|
+
servers = {}
|
|
2067
|
+
if self._config and self._config.mcp_servers:
|
|
2068
|
+
servers = self._config.mcp_servers
|
|
2069
|
+
for name, cfg in servers.items():
|
|
2070
|
+
configured.add(name)
|
|
2071
|
+
cmd = ""
|
|
2072
|
+
if isinstance(cfg, dict):
|
|
2073
|
+
cmd = f"{cfg.get('command', '')} {' '.join(cfg.get('args', []))}".strip()
|
|
2074
|
+
items.append(MarketplaceItem(
|
|
2075
|
+
name=name,
|
|
2076
|
+
description=cmd or "(configured)",
|
|
2077
|
+
source="configured",
|
|
2078
|
+
installed=True,
|
|
2079
|
+
enabled=True,
|
|
2080
|
+
repo="",
|
|
2081
|
+
))
|
|
2082
|
+
|
|
2083
|
+
# Known MCP servers from npm registry (popular ones)
|
|
2084
|
+
known_mcp = [
|
|
2085
|
+
("@anthropic/mcp-server-filesystem", "File system access via MCP"),
|
|
2086
|
+
("@anthropic/mcp-server-github", "GitHub API integration via MCP"),
|
|
2087
|
+
("@anthropic/mcp-server-slack", "Slack integration via MCP"),
|
|
2088
|
+
("@anthropic/mcp-server-google-maps", "Google Maps API via MCP"),
|
|
2089
|
+
("@anthropic/mcp-server-puppeteer", "Browser automation via MCP"),
|
|
2090
|
+
("@anthropic/mcp-server-memory", "Persistent memory via MCP"),
|
|
2091
|
+
("@anthropic/mcp-server-postgres", "PostgreSQL access via MCP"),
|
|
2092
|
+
("@anthropic/mcp-server-sqlite", "SQLite database via MCP"),
|
|
2093
|
+
("@modelcontextprotocol/server-brave-search", "Brave search via MCP"),
|
|
2094
|
+
("@modelcontextprotocol/server-fetch", "HTTP fetch via MCP"),
|
|
2095
|
+
("tavily-mcp", "Tavily AI search via MCP"),
|
|
2096
|
+
("@supabase/mcp-server-supabase", "Supabase database via MCP"),
|
|
2097
|
+
("context7-mcp", "Context7 documentation lookup via MCP"),
|
|
2098
|
+
]
|
|
2099
|
+
for pkg_name, desc in known_mcp:
|
|
2100
|
+
short = pkg_name.split("/")[-1] if "/" in pkg_name else pkg_name
|
|
2101
|
+
if short not in configured and pkg_name not in configured:
|
|
2102
|
+
items.append(MarketplaceItem(
|
|
2103
|
+
name=pkg_name,
|
|
2104
|
+
description=desc,
|
|
2105
|
+
source="npm",
|
|
2106
|
+
installed=False,
|
|
2107
|
+
repo="",
|
|
2108
|
+
extra="npx",
|
|
2109
|
+
))
|
|
2110
|
+
|
|
2111
|
+
browser = MarketplaceBrowser("MCP Server Marketplace", items)
|
|
2112
|
+
self.push_screen(browser)
|
|
2113
|
+
|
|
2114
|
+
# ── IDE ───────────────────────────────────────────────────────────
|
|
2115
|
+
|
|
2116
|
+
def _cmd_ide(self, args: str) -> None:
|
|
2117
|
+
chat = self.query_one(ChatScrollView)
|
|
2118
|
+
sub = args.strip().lower()
|
|
2119
|
+
if sub == "connect":
|
|
2120
|
+
chat.add_entry(AssistantText("IDE bridge starts automatically when configured. Set ide.enabled=true in config."))
|
|
2121
|
+
return
|
|
2122
|
+
# status (default)
|
|
2123
|
+
if self._ide_bridge is None:
|
|
2124
|
+
chat.add_entry(AssistantText("IDE integration is disabled. Set ide.enabled=true in config."))
|
|
2125
|
+
return
|
|
2126
|
+
try:
|
|
2127
|
+
if self._ide_bridge.is_connected:
|
|
2128
|
+
ides = self._ide_bridge._server.connected_ides if self._ide_bridge._server else []
|
|
2129
|
+
names = ", ".join(ide.name for ide in ides) if ides else "unknown"
|
|
2130
|
+
chat.add_entry(AssistantText(f"IDE connected: {names}"))
|
|
2131
|
+
else:
|
|
2132
|
+
port = self._ide_bridge._config.port
|
|
2133
|
+
chat.add_entry(AssistantText(f"IDE bridge listening on port {port}, no IDE connected."))
|
|
2134
|
+
except Exception as exc:
|
|
2135
|
+
chat.add_entry(AssistantText(f"IDE status error: {exc}"))
|
|
2136
|
+
|
|
2137
|
+
# ── HIDA ──────────────────────────────────────────────────────────
|
|
2138
|
+
|
|
2139
|
+
def _cmd_hida(self, args: str) -> None:
|
|
2140
|
+
chat = self.query_one(ChatScrollView)
|
|
2141
|
+
if self._runtime and hasattr(self._runtime, "_last_hida_profile"):
|
|
2142
|
+
profile = self._runtime._last_hida_profile
|
|
2143
|
+
if profile is not None:
|
|
2144
|
+
try:
|
|
2145
|
+
from llm_code.hida.engine import HidaEngine
|
|
2146
|
+
engine = HidaEngine()
|
|
2147
|
+
summary = engine.build_summary(profile)
|
|
2148
|
+
chat.add_entry(AssistantText(f"HIDA: {summary}"))
|
|
2149
|
+
except Exception as exc:
|
|
2150
|
+
chat.add_entry(AssistantText(f"HIDA: {exc}"))
|
|
2151
|
+
else:
|
|
2152
|
+
hida_enabled = (
|
|
2153
|
+
getattr(self._config, "hida", None) and self._config.hida.enabled
|
|
2154
|
+
)
|
|
2155
|
+
status = "enabled" if hida_enabled else "disabled"
|
|
2156
|
+
chat.add_entry(AssistantText(f"HIDA: {status}, no classification yet"))
|
|
2157
|
+
else:
|
|
2158
|
+
chat.add_entry(AssistantText("HIDA: not initialized"))
|
|
2159
|
+
|
|
2160
|
+
# ── Skill ─────────────────────────────────────────────────────────
|
|
2161
|
+
|
|
2162
|
+
def _cmd_skill(self, args: str) -> None:
|
|
2163
|
+
chat = self.query_one(ChatScrollView)
|
|
2164
|
+
parts = args.strip().split(None, 1)
|
|
2165
|
+
sub = parts[0] if parts else ""
|
|
2166
|
+
subargs = parts[1] if len(parts) > 1 else ""
|
|
2167
|
+
if sub == "install" and subargs:
|
|
2168
|
+
source = subargs.strip()
|
|
2169
|
+
if not self._is_valid_repo(source):
|
|
2170
|
+
chat.add_entry(AssistantText("Usage: /skill install owner/repo"))
|
|
2171
|
+
return
|
|
2172
|
+
import tempfile
|
|
2173
|
+
repo = source.replace("https://github.com/", "").rstrip("/")
|
|
2174
|
+
name = repo.split("/")[-1]
|
|
2175
|
+
dest = Path.home() / ".llm-code" / "skills" / name
|
|
2176
|
+
if dest.exists():
|
|
2177
|
+
shutil.rmtree(dest)
|
|
2178
|
+
chat.add_entry(AssistantText(f"Cloning {repo}..."))
|
|
2179
|
+
try:
|
|
2180
|
+
with tempfile.TemporaryDirectory() as tmp:
|
|
2181
|
+
result = subprocess.run(
|
|
2182
|
+
["git", "clone", "--depth", "1",
|
|
2183
|
+
f"https://github.com/{repo}.git", tmp],
|
|
2184
|
+
capture_output=True, text=True, timeout=30,
|
|
2185
|
+
)
|
|
2186
|
+
if result.returncode == 0:
|
|
2187
|
+
skills_src = Path(tmp) / "skills"
|
|
2188
|
+
if skills_src.is_dir():
|
|
2189
|
+
shutil.copytree(skills_src, dest)
|
|
2190
|
+
else:
|
|
2191
|
+
shutil.copytree(tmp, dest)
|
|
2192
|
+
chat.add_entry(AssistantText(f"Installed {name}. Restart to activate."))
|
|
2193
|
+
else:
|
|
2194
|
+
logger.warning("Skill clone failed for %s: %s", repo, result.stderr[:200])
|
|
2195
|
+
chat.add_entry(AssistantText(f"Clone failed. Check the repository URL."))
|
|
2196
|
+
except Exception as exc:
|
|
2197
|
+
chat.add_entry(AssistantText(f"Install failed: {exc}"))
|
|
2198
|
+
elif sub == "enable" and subargs:
|
|
2199
|
+
if not self._is_safe_name(subargs):
|
|
2200
|
+
chat.add_entry(AssistantText("Invalid skill name."))
|
|
2201
|
+
return
|
|
2202
|
+
marker = Path.home() / ".llm-code" / "skills" / subargs / ".disabled"
|
|
2203
|
+
marker.unlink(missing_ok=True)
|
|
2204
|
+
chat.add_entry(AssistantText(f"Enabled {subargs}"))
|
|
2205
|
+
elif sub == "disable" and subargs:
|
|
2206
|
+
if not self._is_safe_name(subargs):
|
|
2207
|
+
chat.add_entry(AssistantText("Invalid skill name."))
|
|
2208
|
+
return
|
|
2209
|
+
marker = Path.home() / ".llm-code" / "skills" / subargs / ".disabled"
|
|
2210
|
+
marker.parent.mkdir(parents=True, exist_ok=True)
|
|
2211
|
+
marker.touch()
|
|
2212
|
+
chat.add_entry(AssistantText(f"Disabled {subargs}"))
|
|
2213
|
+
elif sub == "remove" and subargs:
|
|
2214
|
+
if not self._is_safe_name(subargs):
|
|
2215
|
+
chat.add_entry(AssistantText("Invalid skill name."))
|
|
2216
|
+
return
|
|
2217
|
+
d = Path.home() / ".llm-code" / "skills" / subargs
|
|
2218
|
+
if d.is_dir():
|
|
2219
|
+
shutil.rmtree(d)
|
|
2220
|
+
chat.add_entry(AssistantText(f"Removed {subargs}"))
|
|
2221
|
+
else:
|
|
2222
|
+
chat.add_entry(AssistantText(f"Not found: {subargs}"))
|
|
2223
|
+
else:
|
|
2224
|
+
# Open interactive marketplace browser
|
|
2225
|
+
from llm_code.tui.marketplace import MarketplaceBrowser, MarketplaceItem
|
|
2226
|
+
from llm_code.marketplace.builtin_registry import get_all_known_plugins
|
|
2227
|
+
|
|
2228
|
+
items: list[MarketplaceItem] = []
|
|
2229
|
+
installed_names: set[str] = set()
|
|
2230
|
+
|
|
2231
|
+
# Installed skills (from runtime)
|
|
2232
|
+
all_skills: list = []
|
|
2233
|
+
if self._skills:
|
|
2234
|
+
all_skills = list(self._skills.auto_skills) + list(self._skills.command_skills)
|
|
2235
|
+
for s in all_skills:
|
|
2236
|
+
installed_names.add(s.name)
|
|
2237
|
+
tokens = len(s.content) // 4
|
|
2238
|
+
mode = "auto" if s.auto else f"/{s.trigger}"
|
|
2239
|
+
items.append(MarketplaceItem(
|
|
2240
|
+
name=s.name,
|
|
2241
|
+
description=f"{mode} ~{tokens} tokens",
|
|
2242
|
+
source="installed",
|
|
2243
|
+
installed=True,
|
|
2244
|
+
enabled=not (Path.home() / ".llm-code" / "skills" / s.name / ".disabled").exists(),
|
|
2245
|
+
repo="",
|
|
2246
|
+
extra=mode,
|
|
2247
|
+
))
|
|
2248
|
+
|
|
2249
|
+
# Installed plugins (check filesystem for newly installed)
|
|
2250
|
+
try:
|
|
2251
|
+
from llm_code.marketplace.installer import PluginInstaller
|
|
2252
|
+
pi = PluginInstaller(Path.home() / ".llm-code" / "plugins")
|
|
2253
|
+
for p in pi.list_installed():
|
|
2254
|
+
if p.manifest.name not in installed_names:
|
|
2255
|
+
installed_names.add(p.manifest.name)
|
|
2256
|
+
items.append(MarketplaceItem(
|
|
2257
|
+
name=p.manifest.name,
|
|
2258
|
+
description=getattr(p.manifest, "description", ""),
|
|
2259
|
+
source="installed",
|
|
2260
|
+
installed=True,
|
|
2261
|
+
enabled=p.enabled,
|
|
2262
|
+
repo="",
|
|
2263
|
+
extra=f"v{p.manifest.version}",
|
|
2264
|
+
))
|
|
2265
|
+
except Exception:
|
|
2266
|
+
pass
|
|
2267
|
+
|
|
2268
|
+
# Marketplace plugins with skills — not yet installed
|
|
2269
|
+
for p in get_all_known_plugins():
|
|
2270
|
+
if p.get("skills", 0) > 0 and p["name"] not in installed_names:
|
|
2271
|
+
items.append(MarketplaceItem(
|
|
2272
|
+
name=p["name"],
|
|
2273
|
+
description=p.get("desc", ""),
|
|
2274
|
+
source=p.get("source", "official"),
|
|
2275
|
+
installed=False,
|
|
2276
|
+
repo=p.get("repo", ""),
|
|
2277
|
+
extra=f"{p['skills']} skills",
|
|
2278
|
+
))
|
|
2279
|
+
|
|
2280
|
+
browser = MarketplaceBrowser("Skills Marketplace", items)
|
|
2281
|
+
self.push_screen(browser)
|
|
2282
|
+
|
|
2283
|
+
# ── Plugin ────────────────────────────────────────────────────────
|
|
2284
|
+
|
|
2285
|
+
def _cmd_plugin(self, args: str) -> None:
|
|
2286
|
+
chat = self.query_one(ChatScrollView)
|
|
2287
|
+
parts = args.strip().split(None, 1)
|
|
2288
|
+
sub = parts[0] if parts else ""
|
|
2289
|
+
subargs = parts[1] if len(parts) > 1 else ""
|
|
2290
|
+
try:
|
|
2291
|
+
from llm_code.marketplace.installer import PluginInstaller
|
|
2292
|
+
installer = PluginInstaller(Path.home() / ".llm-code" / "plugins")
|
|
2293
|
+
except ImportError:
|
|
2294
|
+
chat.add_entry(AssistantText("Plugin system not available."))
|
|
2295
|
+
return
|
|
2296
|
+
if sub == "install" and subargs:
|
|
2297
|
+
source = subargs.strip()
|
|
2298
|
+
if not self._is_valid_repo(source):
|
|
2299
|
+
chat.add_entry(AssistantText("Usage: /plugin install owner/repo"))
|
|
2300
|
+
return
|
|
2301
|
+
repo = source.replace("https://github.com/", "").rstrip("/")
|
|
2302
|
+
name = repo.split("/")[-1]
|
|
2303
|
+
dest = Path.home() / ".llm-code" / "plugins" / name
|
|
2304
|
+
if dest.exists():
|
|
2305
|
+
shutil.rmtree(dest)
|
|
2306
|
+
chat.add_entry(AssistantText(f"Cloning {repo}..."))
|
|
2307
|
+
try:
|
|
2308
|
+
result = subprocess.run(
|
|
2309
|
+
["git", "clone", "--depth", "1",
|
|
2310
|
+
f"https://github.com/{repo}.git", str(dest)],
|
|
2311
|
+
capture_output=True, text=True, timeout=30,
|
|
2312
|
+
)
|
|
2313
|
+
if result.returncode == 0:
|
|
2314
|
+
installer.enable(name)
|
|
2315
|
+
chat.add_entry(AssistantText(f"Installed {name}. Restart to activate."))
|
|
2316
|
+
else:
|
|
2317
|
+
logger.warning("Plugin clone failed for %s: %s", repo, result.stderr[:200])
|
|
2318
|
+
chat.add_entry(AssistantText("Clone failed. Check the repository URL."))
|
|
2319
|
+
except Exception as exc:
|
|
2320
|
+
chat.add_entry(AssistantText(f"Install failed: {exc}"))
|
|
2321
|
+
elif sub == "enable" and subargs:
|
|
2322
|
+
if not self._is_safe_name(subargs):
|
|
2323
|
+
chat.add_entry(AssistantText("Invalid plugin name."))
|
|
2324
|
+
return
|
|
2325
|
+
try:
|
|
2326
|
+
installer.enable(subargs)
|
|
2327
|
+
chat.add_entry(AssistantText(f"Enabled {subargs}"))
|
|
2328
|
+
except Exception as exc:
|
|
2329
|
+
chat.add_entry(AssistantText(f"Enable failed: {exc}"))
|
|
2330
|
+
elif sub == "disable" and subargs:
|
|
2331
|
+
if not self._is_safe_name(subargs):
|
|
2332
|
+
chat.add_entry(AssistantText("Invalid plugin name."))
|
|
2333
|
+
return
|
|
2334
|
+
try:
|
|
2335
|
+
installer.disable(subargs)
|
|
2336
|
+
chat.add_entry(AssistantText(f"Disabled {subargs}"))
|
|
2337
|
+
except Exception as exc:
|
|
2338
|
+
chat.add_entry(AssistantText(f"Disable failed: {exc}"))
|
|
2339
|
+
elif sub in ("remove", "uninstall") and subargs:
|
|
2340
|
+
if not self._is_safe_name(subargs):
|
|
2341
|
+
chat.add_entry(AssistantText("Invalid plugin name."))
|
|
2342
|
+
return
|
|
2343
|
+
try:
|
|
2344
|
+
installer.uninstall(subargs)
|
|
2345
|
+
chat.add_entry(AssistantText(f"Removed {subargs}"))
|
|
2346
|
+
except Exception as exc:
|
|
2347
|
+
chat.add_entry(AssistantText(f"Remove failed: {exc}"))
|
|
2348
|
+
else:
|
|
2349
|
+
# Open interactive marketplace browser
|
|
2350
|
+
from llm_code.tui.marketplace import MarketplaceBrowser, MarketplaceItem
|
|
2351
|
+
from llm_code.marketplace.builtin_registry import get_all_known_plugins
|
|
2352
|
+
|
|
2353
|
+
items: list[MarketplaceItem] = []
|
|
2354
|
+
|
|
2355
|
+
# Installed plugins first
|
|
2356
|
+
installed_names: set[str] = set()
|
|
2357
|
+
try:
|
|
2358
|
+
installed = installer.list_installed()
|
|
2359
|
+
for p in installed:
|
|
2360
|
+
installed_names.add(p.manifest.name)
|
|
2361
|
+
items.append(MarketplaceItem(
|
|
2362
|
+
name=p.manifest.name,
|
|
2363
|
+
description=getattr(p.manifest, "description", ""),
|
|
2364
|
+
source="installed",
|
|
2365
|
+
installed=True,
|
|
2366
|
+
enabled=p.enabled,
|
|
2367
|
+
repo="",
|
|
2368
|
+
extra=f"v{p.manifest.version}",
|
|
2369
|
+
))
|
|
2370
|
+
except Exception:
|
|
2371
|
+
pass
|
|
2372
|
+
|
|
2373
|
+
# Known marketplace plugins not yet installed
|
|
2374
|
+
for p in get_all_known_plugins():
|
|
2375
|
+
if p["name"] not in installed_names:
|
|
2376
|
+
skills_count = p.get("skills", 0)
|
|
2377
|
+
extra = f"{skills_count} skills" if skills_count else ""
|
|
2378
|
+
items.append(MarketplaceItem(
|
|
2379
|
+
name=p["name"],
|
|
2380
|
+
description=p.get("desc", ""),
|
|
2381
|
+
source=p.get("source", "official"),
|
|
2382
|
+
installed=False,
|
|
2383
|
+
repo=p.get("repo", ""),
|
|
2384
|
+
extra=extra,
|
|
2385
|
+
))
|
|
2386
|
+
|
|
2387
|
+
browser = MarketplaceBrowser("Plugin Marketplace", items)
|
|
2388
|
+
self.push_screen(browser)
|
|
2389
|
+
|
|
2390
|
+
# ── Marketplace ItemAction handler ────────────────────────────────
|
|
2391
|
+
|
|
2392
|
+
def on_marketplace_browser_item_action(
|
|
2393
|
+
self, event: "MarketplaceBrowser.ItemAction"
|
|
2394
|
+
) -> None:
|
|
2395
|
+
"""Handle marketplace item selection (install/enable/disable/remove)."""
|
|
2396
|
+
from llm_code.tui.marketplace import MarketplaceBrowser
|
|
2397
|
+
from llm_code.tui.chat_view import AssistantText
|
|
2398
|
+
|
|
2399
|
+
chat = self.query_one(ChatScrollView)
|
|
2400
|
+
item = event.item
|
|
2401
|
+
action = event.action
|
|
2402
|
+
|
|
2403
|
+
if action == "install":
|
|
2404
|
+
if item.source == "npm":
|
|
2405
|
+
# MCP server install — show config instructions
|
|
2406
|
+
self._cmd_mcp(f"install {item.name}")
|
|
2407
|
+
elif item.repo:
|
|
2408
|
+
if item.source in ("official", "community"):
|
|
2409
|
+
self._cmd_plugin(f"install {item.repo}")
|
|
2410
|
+
else:
|
|
2411
|
+
self._cmd_skill(f"install {item.repo}")
|
|
2412
|
+
else:
|
|
2413
|
+
chat.add_entry(AssistantText(
|
|
2414
|
+
f"No repo URL available for {item.name}. Install manually."
|
|
2415
|
+
))
|
|
2416
|
+
elif action == "enable":
|
|
2417
|
+
if item.source in ("official", "community", "installed"):
|
|
2418
|
+
self._cmd_plugin(f"enable {item.name}")
|
|
2419
|
+
else:
|
|
2420
|
+
self._cmd_skill(f"enable {item.name}")
|
|
2421
|
+
elif action == "disable":
|
|
2422
|
+
if item.source in ("official", "community", "installed"):
|
|
2423
|
+
self._cmd_plugin(f"disable {item.name}")
|
|
2424
|
+
else:
|
|
2425
|
+
self._cmd_skill(f"disable {item.name}")
|
|
2426
|
+
elif action == "remove":
|
|
2427
|
+
if item.source in ("official", "community", "installed"):
|
|
2428
|
+
self._cmd_plugin(f"remove {item.name}")
|
|
2429
|
+
else:
|
|
2430
|
+
self._cmd_skill(f"remove {item.name}")
|
|
2431
|
+
# Return focus to InputBar after marketplace action
|
|
2432
|
+
self.query_one(InputBar).focus()
|