vmcode-cli 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/INSTALLATION_METHODS.md +181 -0
- package/LICENSE +21 -0
- package/README.md +199 -0
- package/bin/npm-wrapper.js +171 -0
- package/bin/rg +0 -0
- package/bin/rg.exe +0 -0
- package/config.yaml.example +159 -0
- package/package.json +42 -0
- package/requirements.txt +7 -0
- package/scripts/install.js +132 -0
- package/setup.bat +114 -0
- package/setup.sh +135 -0
- package/src/__init__.py +4 -0
- package/src/core/__init__.py +1 -0
- package/src/core/agentic.py +2342 -0
- package/src/core/chat_manager.py +1201 -0
- package/src/core/config_manager.py +269 -0
- package/src/core/init.py +161 -0
- package/src/core/sub_agent.py +174 -0
- package/src/exceptions.py +75 -0
- package/src/llm/__init__.py +1 -0
- package/src/llm/client.py +149 -0
- package/src/llm/config.py +445 -0
- package/src/llm/prompts.py +569 -0
- package/src/llm/providers.py +402 -0
- package/src/llm/token_tracker.py +220 -0
- package/src/ui/__init__.py +1 -0
- package/src/ui/banner.py +103 -0
- package/src/ui/commands.py +489 -0
- package/src/ui/displays.py +167 -0
- package/src/ui/main.py +351 -0
- package/src/ui/prompt_utils.py +162 -0
- package/src/utils/__init__.py +1 -0
- package/src/utils/editor.py +158 -0
- package/src/utils/gitignore_filter.py +149 -0
- package/src/utils/logger.py +254 -0
- package/src/utils/markdown.py +32 -0
- package/src/utils/settings.py +94 -0
- package/src/utils/tools/__init__.py +55 -0
- package/src/utils/tools/command_executor.py +217 -0
- package/src/utils/tools/create_file.py +143 -0
- package/src/utils/tools/definitions.py +193 -0
- package/src/utils/tools/directory.py +374 -0
- package/src/utils/tools/file_editor.py +345 -0
- package/src/utils/tools/file_helpers.py +109 -0
- package/src/utils/tools/file_reader.py +331 -0
- package/src/utils/tools/formatters.py +458 -0
- package/src/utils/tools/parallel_executor.py +195 -0
- package/src/utils/validation.py +117 -0
- package/src/utils/web_search.py +71 -0
- package/vmcode-proxy/.env.example +5 -0
- package/vmcode-proxy/README.md +235 -0
- package/vmcode-proxy/package-lock.json +947 -0
- package/vmcode-proxy/package.json +20 -0
- package/vmcode-proxy/server.js +248 -0
- package/vmcode-proxy/server.js.bak +157 -0
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
"""UI display functions for command outputs."""
|
|
2
|
+
|
|
3
|
+
from rich.table import Table
|
|
4
|
+
from rich.panel import Panel
|
|
5
|
+
from rich import box
|
|
6
|
+
from llm import config
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def show_provider_table(current_provider: str, console):
|
|
10
|
+
"""Display provider status table.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
current_provider: Name of the currently active provider.
|
|
14
|
+
console: Rich Console instance for output.
|
|
15
|
+
"""
|
|
16
|
+
table = Table("Provider", "Status", "Details", title="Providers", box=box.SIMPLE_HEAD)
|
|
17
|
+
for provider in config.get_providers():
|
|
18
|
+
cfg = config.get_provider_config(provider)
|
|
19
|
+
model = cfg.get('model', 'N/A')
|
|
20
|
+
if provider == 'local':
|
|
21
|
+
status = '✅' if cfg.get('model') else '❌ (set model path)'
|
|
22
|
+
else:
|
|
23
|
+
status = '✅' if cfg.get('api_key') else '❌ (set API key)'
|
|
24
|
+
active = ' [green](active)[/green]' if provider == current_provider else ''
|
|
25
|
+
table.add_row(provider.capitalize(), status, f"{model[:40]}{active}")
|
|
26
|
+
|
|
27
|
+
console.print(table)
|
|
28
|
+
|
|
29
|
+
help_text = """Usage: /provider <name>
|
|
30
|
+
|
|
31
|
+
Examples:
|
|
32
|
+
/provider openrouter
|
|
33
|
+
/provider glm
|
|
34
|
+
/provider local
|
|
35
|
+
/provider gemini
|
|
36
|
+
/provider minimax
|
|
37
|
+
/provider anthropic
|
|
38
|
+
/provider kimi
|
|
39
|
+
|
|
40
|
+
After switching, use:
|
|
41
|
+
/key <api_key> - Set API key for current provider
|
|
42
|
+
/model <model> - Set model for current provider"""
|
|
43
|
+
console.print(Panel(help_text, title="[cyan]Switch Provider[/cyan]"))
|
|
44
|
+
console.print("")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def show_help_table(console):
|
|
48
|
+
"""Display command help table.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
console: Rich Console instance for output.
|
|
52
|
+
"""
|
|
53
|
+
console.print("")
|
|
54
|
+
table = Table(show_header=True, box=box.SIMPLE_HEAD)
|
|
55
|
+
table.add_column("Command", no_wrap=True)
|
|
56
|
+
table.add_column("Description")
|
|
57
|
+
|
|
58
|
+
table.add_row("/help", "Show help")
|
|
59
|
+
table.add_row("/exit", "Exit chat")
|
|
60
|
+
table.add_row("/debug", "Toggle debug")
|
|
61
|
+
table.add_row("/mode", "Toggle Plan/Edit/Learn mode")
|
|
62
|
+
table.add_row("/logging", "Toggle conversation logging")
|
|
63
|
+
table.add_row("/preplan", "Toggle pre-tool planning")
|
|
64
|
+
table.add_row("/config", "Show all configuration settings")
|
|
65
|
+
table.add_row("/provider [name]", "Switch provider or show provider table")
|
|
66
|
+
table.add_row("/key <key>", "Set API key for current provider")
|
|
67
|
+
table.add_row("/model <name>", "Set model for current provider")
|
|
68
|
+
table.add_row("/usage [provider] [in|out] <cost>", "Set/view provider-specific token cost")
|
|
69
|
+
table.add_row("/compact [-a]", "Compact context with an AI summary (add -a for aggressive mode)")
|
|
70
|
+
table.add_row("/init", "Generate agents.md")
|
|
71
|
+
table.add_row("/edit, /e", "Open editor for multi-line input")
|
|
72
|
+
|
|
73
|
+
console.print(Panel(table, title="[bold cyan]Commands[/bold cyan]", border_style="grey23", padding=(0, 2)))
|
|
74
|
+
|
|
75
|
+
# Keybinds section
|
|
76
|
+
console.print()
|
|
77
|
+
keybinds = Table(show_header=True, box=box.SIMPLE_HEAD)
|
|
78
|
+
keybinds.add_column("Keybind", no_wrap=True)
|
|
79
|
+
keybinds.add_column("Action")
|
|
80
|
+
|
|
81
|
+
keybinds.add_row("Tab", "Toggle Plan/Edit/Learn mode")
|
|
82
|
+
keybinds.add_row("Shift+Tab", "Cycle plan/approval/learning mode (mode-dependent)")
|
|
83
|
+
keybinds.add_row("Ctrl+C", "Interrupt response")
|
|
84
|
+
keybinds.add_row("Ctrl+C (2x)", "Exit program")
|
|
85
|
+
|
|
86
|
+
console.print(Panel(keybinds, title="[bold cyan]Keybinds[/bold cyan]", border_style="grey23", padding=(0, 2)))
|
|
87
|
+
console.print("")
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def show_config_overview(chat_manager, console, debug_mode_container, current_provider):
|
|
91
|
+
"""Display comprehensive configuration overview.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
chat_manager: ChatManager instance for runtime state
|
|
95
|
+
console: Rich Console instance for output
|
|
96
|
+
debug_mode_container: Dict with 'debug' key for debug mode state
|
|
97
|
+
current_provider: Name of the currently active provider
|
|
98
|
+
"""
|
|
99
|
+
from core.config_manager import ConfigManager
|
|
100
|
+
config_manager = ConfigManager()
|
|
101
|
+
config_data = config_manager.load()
|
|
102
|
+
|
|
103
|
+
console.print()
|
|
104
|
+
|
|
105
|
+
# ===== Runtime Settings =====
|
|
106
|
+
runtime_table = Table("Setting", "Status", title="Runtime Settings", box=box.SIMPLE_HEAD)
|
|
107
|
+
debug_status = "[green]ON[/green]" if debug_mode_container.get('debug') else "[dim]OFF[/dim]"
|
|
108
|
+
runtime_table.add_row("Debug Mode", debug_status)
|
|
109
|
+
preplan_status = "[green]ON[/green]" if chat_manager.pre_tool_planning_enabled else "[dim]OFF[/dim]"
|
|
110
|
+
runtime_table.add_row("Pre-tool Planning", preplan_status)
|
|
111
|
+
logging_status = "[green]ON[/green]" if chat_manager.markdown_logger else "[dim]OFF[/dim]"
|
|
112
|
+
runtime_table.add_row("Conversation Logging", logging_status)
|
|
113
|
+
mode_labels = {"edit": "EDIT", "plan": "PLAN", "learn": "LEARN"}
|
|
114
|
+
mode_colors = {"edit": "green", "plan": "cyan", "learn": "magenta"}
|
|
115
|
+
mode = chat_manager.interaction_mode
|
|
116
|
+
mode_color = mode_colors.get(mode, "white")
|
|
117
|
+
runtime_table.add_row("Interaction Mode", f"[{mode_color}]{mode_labels.get(mode, mode.upper())}[/{mode_color}]")
|
|
118
|
+
approve_labels = {"safe": "SAFE", "normal": "NORMAL", "danger": "DANGER"}
|
|
119
|
+
approve_colors = {"safe": "green", "normal": "yellow", "danger": "red"}
|
|
120
|
+
approve_mode = chat_manager.approve_mode
|
|
121
|
+
approve_color = approve_colors.get(approve_mode, "white")
|
|
122
|
+
runtime_table.add_row("Approval Mode", f"[{approve_color}]{approve_labels.get(approve_mode, approve_mode.upper())}[/{approve_color}]")
|
|
123
|
+
console.print(runtime_table)
|
|
124
|
+
|
|
125
|
+
# ===== Provider Settings =====
|
|
126
|
+
console.print()
|
|
127
|
+
provider_table = Table("Provider", "Model", "$ in/out", "API Key", title="Providers", box=box.SIMPLE_HEAD)
|
|
128
|
+
|
|
129
|
+
active_provider = config_data.get('LAST_PROVIDER', 'Not set').upper()
|
|
130
|
+
provider_table.add_row("[green]Active[/green]", f"[green]{active_provider}[/green]", "", "")
|
|
131
|
+
|
|
132
|
+
def fmt(v, max_len=35):
|
|
133
|
+
return v[:max_len-3] + "..." if len(v) > max_len else v
|
|
134
|
+
|
|
135
|
+
# Local provider
|
|
136
|
+
local_model = config_data.get('LOCAL_MODEL_PATH', 'Not set')
|
|
137
|
+
provider_table.add_row("Local", fmt(local_model), "N/A", "N/A")
|
|
138
|
+
|
|
139
|
+
# API providers
|
|
140
|
+
for provider in ["OpenRouter", "GLM", "OpenAI", "Gemini", "MiniMax", "Anthropic", "Kimi"]:
|
|
141
|
+
model = config_data.get(f'{provider.upper()}_MODEL', 'Not set')
|
|
142
|
+
key = config_data.get(f'{provider.upper()}_API_KEY', '')
|
|
143
|
+
key_status = "[green]✓[/green]" if key else "[red]✗[/red]"
|
|
144
|
+
|
|
145
|
+
# Check for model-specific pricing
|
|
146
|
+
model_prices = config_data.get('MODEL_PRICES', {})
|
|
147
|
+
if model and model in model_prices:
|
|
148
|
+
cost_in = model_prices[model].get('cost_in', 0)
|
|
149
|
+
cost_out = model_prices[model].get('cost_out', 0)
|
|
150
|
+
if cost_in > 0 or cost_out > 0:
|
|
151
|
+
cost_str = f"${cost_in:.2f}/${cost_out:.2f}"
|
|
152
|
+
else:
|
|
153
|
+
cost_str = "Not set"
|
|
154
|
+
else:
|
|
155
|
+
cost_str = "Not set"
|
|
156
|
+
|
|
157
|
+
provider_table.add_row(provider, fmt(model), cost_str, key_status)
|
|
158
|
+
|
|
159
|
+
console.print(provider_table)
|
|
160
|
+
|
|
161
|
+
# ===== Quick Commands Reference =====
|
|
162
|
+
console.print()
|
|
163
|
+
help_text = """[bold cyan]Commands:[/bold cyan] /provider <name> /model <path> /key <key>
|
|
164
|
+
[cyan] :[/cyan] /usage [provider] [in|out] <$> /debug /mode /logging /preplan"""
|
|
165
|
+
console.print(Panel(help_text, title="[cyan]Quick Reference[/cyan]"))
|
|
166
|
+
console.print()
|
|
167
|
+
|
package/src/ui/main.py
ADDED
|
@@ -0,0 +1,351 @@
|
|
|
1
|
+
"""Main entry point for vmCode chatbot."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
5
|
+
import time
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
# Add src directory to Python path so we can import llm, core, utils modules
|
|
9
|
+
src_dir = Path(__file__).resolve().parent.parent
|
|
10
|
+
if str(src_dir) not in sys.path:
|
|
11
|
+
sys.path.insert(0, str(src_dir))
|
|
12
|
+
|
|
13
|
+
from rich.console import Console
|
|
14
|
+
from rich.theme import Theme
|
|
15
|
+
from rich.markdown import Markdown
|
|
16
|
+
from rich.text import Text
|
|
17
|
+
from prompt_toolkit import PromptSession
|
|
18
|
+
from prompt_toolkit.key_binding import KeyBindings
|
|
19
|
+
from prompt_toolkit.formatted_text import ANSI
|
|
20
|
+
from prompt_toolkit.styles import Style
|
|
21
|
+
|
|
22
|
+
from llm import config
|
|
23
|
+
from llm.config import TOOLS_ENABLED
|
|
24
|
+
from core.chat_manager import ChatManager
|
|
25
|
+
from ui.commands import process_command
|
|
26
|
+
from ui.banner import display_startup_banner
|
|
27
|
+
from ui.prompt_utils import get_bottom_toolbar_text
|
|
28
|
+
from core.agentic import agentic_answer
|
|
29
|
+
from utils.settings import MonokaiDarkBGStyle
|
|
30
|
+
from utils.markdown import left_align_headings
|
|
31
|
+
from exceptions import VmCodeError
|
|
32
|
+
|
|
33
|
+
# Console setup
|
|
34
|
+
console = Console(theme=Theme({
|
|
35
|
+
"markdown.hr": "grey50",
|
|
36
|
+
"markdown.heading": "default",
|
|
37
|
+
"markdown.h1": "default",
|
|
38
|
+
"markdown.h2": "default",
|
|
39
|
+
"markdown.h3": "default",
|
|
40
|
+
"markdown.h4": "default",
|
|
41
|
+
"markdown.h5": "default",
|
|
42
|
+
"markdown.h6": "default",
|
|
43
|
+
}))
|
|
44
|
+
|
|
45
|
+
# Debug mode container (used as mutable reference)
|
|
46
|
+
DEBUG_MODE_CONTAINER = {'debug': False}
|
|
47
|
+
|
|
48
|
+
# Ctrl+C exit tracking (for double Ctrl+C to exit)
|
|
49
|
+
CTRL_C_TRACKER = {
|
|
50
|
+
'last_time': 0,
|
|
51
|
+
'exit_window': 2.0, # 2 second window for double Ctrl+C
|
|
52
|
+
'exit_requested': False
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
# Path constants
|
|
56
|
+
REPO_ROOT = Path.cwd().resolve()
|
|
57
|
+
APP_ROOT = (
|
|
58
|
+
Path(sys.executable).resolve().parent
|
|
59
|
+
if getattr(sys, "frozen", False)
|
|
60
|
+
else Path(__file__).resolve().parents[2]
|
|
61
|
+
)
|
|
62
|
+
# Platform-agnostic ripgrep path: 'rg' on Unix/Linux, 'rg.exe' on Windows
|
|
63
|
+
RG_EXE_NAME = "rg.exe" if os.name == "nt" else "rg"
|
|
64
|
+
RG_EXE_PATH = (APP_ROOT / "bin" / RG_EXE_NAME).resolve()
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class ThinkingIndicator:
|
|
68
|
+
"""Simple spinner wrapper that always cleans up."""
|
|
69
|
+
|
|
70
|
+
def __init__(self, console, message="Thinking ...", spinner="dots"):
|
|
71
|
+
self.console = console
|
|
72
|
+
self.message = message
|
|
73
|
+
self.spinner = spinner
|
|
74
|
+
self._status = None
|
|
75
|
+
self._active = False
|
|
76
|
+
|
|
77
|
+
def start(self):
|
|
78
|
+
if self._status is None:
|
|
79
|
+
self._status = self.console.status(self.message, spinner=self.spinner, spinner_style="cyan")
|
|
80
|
+
if not self._active:
|
|
81
|
+
self._status.start()
|
|
82
|
+
self._active = True
|
|
83
|
+
|
|
84
|
+
def stop(self):
|
|
85
|
+
if self._status and self._active:
|
|
86
|
+
self._status.stop()
|
|
87
|
+
self._active = False
|
|
88
|
+
|
|
89
|
+
def pause(self):
|
|
90
|
+
self.stop()
|
|
91
|
+
|
|
92
|
+
def resume(self):
|
|
93
|
+
self.start()
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def check_double_ctrl_c() -> bool:
|
|
97
|
+
"""
|
|
98
|
+
Check if this is a double Ctrl+C (within exit window).
|
|
99
|
+
Returns True if should exit, False otherwise.
|
|
100
|
+
Updates the tracker timestamp and exit_requested flag.
|
|
101
|
+
"""
|
|
102
|
+
# Check if exit was already requested
|
|
103
|
+
if CTRL_C_TRACKER['exit_requested']:
|
|
104
|
+
return True
|
|
105
|
+
|
|
106
|
+
current_time = time.time()
|
|
107
|
+
time_since_last = current_time - CTRL_C_TRACKER['last_time']
|
|
108
|
+
|
|
109
|
+
if time_since_last <= CTRL_C_TRACKER['exit_window']:
|
|
110
|
+
# Double Ctrl+C detected - set exit flag and return True
|
|
111
|
+
CTRL_C_TRACKER['exit_requested'] = True
|
|
112
|
+
return True
|
|
113
|
+
else:
|
|
114
|
+
# First Ctrl+C or too much time passed - update timestamp and continue
|
|
115
|
+
CTRL_C_TRACKER['last_time'] = current_time
|
|
116
|
+
return False
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def main():
|
|
120
|
+
"""Main interactive chat loop."""
|
|
121
|
+
# Check for config.yaml and provide helpful message if missing
|
|
122
|
+
config_path = Path(__file__).resolve().parents[1].parent / "config.yaml"
|
|
123
|
+
config_example = Path(__file__).resolve().parents[1].parent / "config.yaml.example"
|
|
124
|
+
|
|
125
|
+
if not config_path.exists():
|
|
126
|
+
console.print("\n[yellow]No config.yaml found![/yellow]")
|
|
127
|
+
console.print("\n[cyan]Getting Started:[/cyan]\n")
|
|
128
|
+
|
|
129
|
+
if config_example.exists():
|
|
130
|
+
console.print(f"1. Copy the example config:")
|
|
131
|
+
console.print(f" [dim]cp config.yaml.example config.yaml[/dim]\n")
|
|
132
|
+
console.print(f"2. Edit config.yaml and add your API keys\n")
|
|
133
|
+
console.print(f"3. Or set environment variables:")
|
|
134
|
+
console.print(f" [dim]export OPENAI_API_KEY='sk-your-key'[/dim]\n")
|
|
135
|
+
console.print(f"4. Then run: [green]vmcode[/green]\n")
|
|
136
|
+
console.print(f"[dim]You can also set keys interactively with: /key <your-key>[/dim]\n")
|
|
137
|
+
else:
|
|
138
|
+
console.print("[red]config.yaml.example not found. Please reinstall vmcode.[/red]\n")
|
|
139
|
+
|
|
140
|
+
# Continue anyway - user can set keys via /key command
|
|
141
|
+
console.print("[yellow]Continuing... You can set API keys with the /key command.[/yellow]\n")
|
|
142
|
+
|
|
143
|
+
chat_manager = ChatManager()
|
|
144
|
+
thinking_indicator = ThinkingIndicator(console)
|
|
145
|
+
# Start server if needed
|
|
146
|
+
console.print("[yellow]Initializing...[/yellow]")
|
|
147
|
+
chat_manager.server_process = chat_manager.start_server_if_needed()
|
|
148
|
+
if not chat_manager.server_process and chat_manager.client.provider == "local":
|
|
149
|
+
console.print("[red]Failed to start local server![/red]")
|
|
150
|
+
return
|
|
151
|
+
|
|
152
|
+
display_startup_banner(chat_manager.approve_mode, chat_manager.interaction_mode)
|
|
153
|
+
|
|
154
|
+
# Setup prompt_toolkit with Tab key binding
|
|
155
|
+
bindings = KeyBindings()
|
|
156
|
+
|
|
157
|
+
def get_prompt(chat_manager):
|
|
158
|
+
"""Return colored prompt based on current mode."""
|
|
159
|
+
if chat_manager.interaction_mode == "plan":
|
|
160
|
+
prompt_text = Text.assemble(
|
|
161
|
+
(" Plan", "bold cyan"),
|
|
162
|
+
(" > ", "white")
|
|
163
|
+
)
|
|
164
|
+
elif chat_manager.interaction_mode == "edit":
|
|
165
|
+
prompt_text = Text.assemble(
|
|
166
|
+
(" Edit", "green"),
|
|
167
|
+
(" > ", "white")
|
|
168
|
+
)
|
|
169
|
+
else:
|
|
170
|
+
prompt_text = Text.assemble(
|
|
171
|
+
(" Learn", "magenta"),
|
|
172
|
+
(" > ", "white")
|
|
173
|
+
)
|
|
174
|
+
with console.capture() as capture:
|
|
175
|
+
console.print(prompt_text, end="")
|
|
176
|
+
return ANSI(capture.get())
|
|
177
|
+
|
|
178
|
+
@bindings.add('tab')
|
|
179
|
+
def toggle_mode(event):
|
|
180
|
+
"""Toggle between Plan and Edit modes."""
|
|
181
|
+
chat_manager.toggle_interaction_mode()
|
|
182
|
+
event.app.invalidate()
|
|
183
|
+
|
|
184
|
+
@bindings.add('s-tab')
|
|
185
|
+
def toggle_approve_mode(event):
|
|
186
|
+
"""Toggle plan types (Plan), approval modes (Edit), or learning modes (Learn) using Shift+Tab."""
|
|
187
|
+
if chat_manager.interaction_mode == "learn":
|
|
188
|
+
chat_manager.cycle_learning_mode()
|
|
189
|
+
else:
|
|
190
|
+
chat_manager.cycle_approve_mode()
|
|
191
|
+
event.app.invalidate()
|
|
192
|
+
|
|
193
|
+
@bindings.add('escape', 'escape')
|
|
194
|
+
def clear_input(event):
|
|
195
|
+
"""Clear the current input line on double ESC press."""
|
|
196
|
+
buffer = event.app.current_buffer
|
|
197
|
+
if buffer is not None:
|
|
198
|
+
buffer.text = ""
|
|
199
|
+
event.app.invalidate()
|
|
200
|
+
|
|
201
|
+
toolbar_style = Style.from_dict({
|
|
202
|
+
"bottom-toolbar": "bg:default fg:white noreverse",
|
|
203
|
+
"bottom-toolbar.text": "bg:default fg:white noreverse",
|
|
204
|
+
})
|
|
205
|
+
session = PromptSession(key_bindings=bindings, style=toolbar_style)
|
|
206
|
+
|
|
207
|
+
try:
|
|
208
|
+
while True:
|
|
209
|
+
# Check if exit was requested via double Ctrl+C
|
|
210
|
+
if CTRL_C_TRACKER['exit_requested']:
|
|
211
|
+
break
|
|
212
|
+
|
|
213
|
+
try:
|
|
214
|
+
# Use prompt_toolkit for input with Tab key binding and dynamic prompt
|
|
215
|
+
prompt_kwargs = {
|
|
216
|
+
"bottom_toolbar": lambda: get_bottom_toolbar_text(chat_manager),
|
|
217
|
+
}
|
|
218
|
+
user_input = session.prompt(
|
|
219
|
+
lambda: get_prompt(chat_manager),
|
|
220
|
+
**prompt_kwargs,
|
|
221
|
+
).strip()
|
|
222
|
+
|
|
223
|
+
if not user_input:
|
|
224
|
+
continue
|
|
225
|
+
|
|
226
|
+
# Process commands
|
|
227
|
+
cmd_result, modified_input = process_command(chat_manager, user_input, console, DEBUG_MODE_CONTAINER)
|
|
228
|
+
if cmd_result == "exit":
|
|
229
|
+
break
|
|
230
|
+
elif cmd_result == "handled":
|
|
231
|
+
continue
|
|
232
|
+
|
|
233
|
+
# Use modified input if provided (from /edit command)
|
|
234
|
+
final_input = modified_input if modified_input else user_input
|
|
235
|
+
|
|
236
|
+
chat_manager.maybe_auto_compact(console)
|
|
237
|
+
|
|
238
|
+
thinking_indicator.start()
|
|
239
|
+
try:
|
|
240
|
+
console.print() # Extra newline after user input to separate from LLM response
|
|
241
|
+
# Add user message
|
|
242
|
+
if TOOLS_ENABLED:
|
|
243
|
+
chat_manager.command_history.clear()
|
|
244
|
+
try:
|
|
245
|
+
agentic_answer(
|
|
246
|
+
chat_manager,
|
|
247
|
+
final_input,
|
|
248
|
+
console,
|
|
249
|
+
REPO_ROOT,
|
|
250
|
+
RG_EXE_PATH,
|
|
251
|
+
DEBUG_MODE_CONTAINER['debug'],
|
|
252
|
+
thinking_indicator=thinking_indicator,
|
|
253
|
+
pre_tool_planning_enabled=chat_manager.pre_tool_planning_enabled,
|
|
254
|
+
)
|
|
255
|
+
chat_manager._update_context_tokens()
|
|
256
|
+
except KeyboardInterrupt:
|
|
257
|
+
if not check_double_ctrl_c():
|
|
258
|
+
console.print("\n[yellow]Response interrupted (Ctrl+C). Press Ctrl+C again to exit.[/yellow]")
|
|
259
|
+
console.print() # Extra spacing
|
|
260
|
+
except VmCodeError as e:
|
|
261
|
+
# Handle all vmCode custom exceptions gracefully
|
|
262
|
+
console.print(f"[red]Error: {e}[/red]", markup=False)
|
|
263
|
+
if hasattr(e, 'details') and e.details:
|
|
264
|
+
console.print(f"[dim]Details: {e.details}[/dim]", markup=False)
|
|
265
|
+
else:
|
|
266
|
+
chat_manager.messages.append({"role": "user", "content": final_input})
|
|
267
|
+
|
|
268
|
+
try:
|
|
269
|
+
stream = chat_manager.client.chat_completion(
|
|
270
|
+
chat_manager.messages, stream=True
|
|
271
|
+
)
|
|
272
|
+
if isinstance(stream, str):
|
|
273
|
+
console.print(f"[red]Error: {stream}[/red]")
|
|
274
|
+
continue
|
|
275
|
+
|
|
276
|
+
try:
|
|
277
|
+
# Stream response
|
|
278
|
+
chunks = []
|
|
279
|
+
usage_data = None
|
|
280
|
+
for chunk in stream:
|
|
281
|
+
# Check if this is usage data (final chunk)
|
|
282
|
+
if isinstance(chunk, dict) and '__usage__' in chunk:
|
|
283
|
+
usage_data = chunk['__usage__']
|
|
284
|
+
else:
|
|
285
|
+
chunks.append(chunk)
|
|
286
|
+
full_response = "".join(chunks)
|
|
287
|
+
|
|
288
|
+
if full_response.strip():
|
|
289
|
+
md = Markdown(left_align_headings(full_response), code_theme=MonokaiDarkBGStyle, justify="left")
|
|
290
|
+
console.print(md)
|
|
291
|
+
|
|
292
|
+
chat_manager.messages.append(
|
|
293
|
+
{"role": "assistant", "content": full_response}
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
# Add usage tracking
|
|
297
|
+
if usage_data:
|
|
298
|
+
chat_manager.token_tracker.add_usage(usage_data)
|
|
299
|
+
|
|
300
|
+
chat_manager._update_context_tokens()
|
|
301
|
+
|
|
302
|
+
console.print() # Extra spacing
|
|
303
|
+
except KeyboardInterrupt:
|
|
304
|
+
# Ctrl+C pressed during streaming
|
|
305
|
+
if not check_double_ctrl_c():
|
|
306
|
+
console.print("\n[yellow]Response interrupted (Ctrl+C). Press Ctrl+C again to exit.[/yellow]")
|
|
307
|
+
# Save partial response
|
|
308
|
+
if chunks:
|
|
309
|
+
partial = "".join(chunks)
|
|
310
|
+
if partial.strip():
|
|
311
|
+
partial_with_note = partial + "\n\n*[Response interrupted]*"
|
|
312
|
+
md = Markdown(left_align_headings(partial_with_note), code_theme=MonokaiDarkBGStyle, justify="left")
|
|
313
|
+
console.print(md)
|
|
314
|
+
chat_manager.messages.append(
|
|
315
|
+
{"role": "assistant", "content": partial}
|
|
316
|
+
)
|
|
317
|
+
console.print() # Extra spacing
|
|
318
|
+
finally:
|
|
319
|
+
# Ensure HTTP connection is closed
|
|
320
|
+
if hasattr(stream, 'close'):
|
|
321
|
+
stream.close()
|
|
322
|
+
|
|
323
|
+
except VmCodeError as e:
|
|
324
|
+
# Handle all vmCode custom exceptions gracefully
|
|
325
|
+
console.print(f"[red]Error: {e}[/red]", markup=False)
|
|
326
|
+
if hasattr(e, 'details') and e.details:
|
|
327
|
+
console.print(f"[dim]Details: {e.details}[/dim]", markup=False)
|
|
328
|
+
except Exception as e:
|
|
329
|
+
console.print(f"[red]Error during generation: {e}[/red]", markup=False)
|
|
330
|
+
finally:
|
|
331
|
+
thinking_indicator.stop()
|
|
332
|
+
|
|
333
|
+
except KeyboardInterrupt:
|
|
334
|
+
# Ctrl+C pressed while waiting for input
|
|
335
|
+
if check_double_ctrl_c():
|
|
336
|
+
break
|
|
337
|
+
else:
|
|
338
|
+
console.print("\n[dim](Press Ctrl+C again to exit, or type 'exit' to quit)[/dim]")
|
|
339
|
+
continue
|
|
340
|
+
|
|
341
|
+
finally:
|
|
342
|
+
# Display session summary before cleanup
|
|
343
|
+
summary = chat_manager.token_tracker.get_session_summary()
|
|
344
|
+
console.print(f"\n[white]Session Summary: {summary}[/white]")
|
|
345
|
+
|
|
346
|
+
chat_manager.cleanup()
|
|
347
|
+
console.print("[yellow]Goodbye![/yellow]")
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
if __name__ == "__main__":
|
|
351
|
+
main()
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
"""Shared prompt utilities for vmCode CLI."""
|
|
2
|
+
|
|
3
|
+
from prompt_toolkit import PromptSession
|
|
4
|
+
from prompt_toolkit.key_binding import KeyBindings
|
|
5
|
+
from prompt_toolkit.styles import Style
|
|
6
|
+
from prompt_toolkit.formatted_text import HTML
|
|
7
|
+
from llm.config import get_provider_config, APPROVE_MODE_LABELS, LEARNING_MODE_LABELS, PLAN_TYPE_LABELS
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def get_bottom_toolbar_text(chat_manager):
|
|
11
|
+
"""Return bottom toolbar text with model, approval mode, and token count.
|
|
12
|
+
|
|
13
|
+
This is extracted from main.py for reuse in confirmation prompts.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
chat_manager: ChatManager instance for state access
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
HTML formatted toolbar text
|
|
20
|
+
"""
|
|
21
|
+
provider_name = chat_manager.client.provider
|
|
22
|
+
model = get_provider_config(provider_name).get("model", "Unknown")
|
|
23
|
+
|
|
24
|
+
# Get token counts
|
|
25
|
+
tokens_curr = chat_manager.token_tracker.current_context_tokens
|
|
26
|
+
tokens_in = chat_manager.token_tracker.total_prompt_tokens
|
|
27
|
+
tokens_out = chat_manager.token_tracker.total_completion_tokens
|
|
28
|
+
tokens_total = chat_manager.token_tracker.total_tokens
|
|
29
|
+
|
|
30
|
+
# Format model name (take last part if path)
|
|
31
|
+
if "\\" in model or "/" in model:
|
|
32
|
+
model_display = model.split("\\")[-1].split("/")[-1]
|
|
33
|
+
else:
|
|
34
|
+
model_display = model
|
|
35
|
+
|
|
36
|
+
# In Plan mode, show plan types
|
|
37
|
+
if chat_manager.interaction_mode == "plan":
|
|
38
|
+
plan_type = PLAN_TYPE_LABELS.get(
|
|
39
|
+
chat_manager.plan_type,
|
|
40
|
+
chat_manager.plan_type.upper()
|
|
41
|
+
)
|
|
42
|
+
# Colorize plan type
|
|
43
|
+
if chat_manager.plan_type == "feature":
|
|
44
|
+
plan_type_colored = f'<style fg="cyan">{plan_type}</style>'
|
|
45
|
+
elif chat_manager.plan_type == "refactor":
|
|
46
|
+
plan_type_colored = f'<style fg="green">{plan_type}</style>'
|
|
47
|
+
elif chat_manager.plan_type == "debug":
|
|
48
|
+
plan_type_colored = f'<style fg="red">{plan_type}</style>'
|
|
49
|
+
else: # optimize
|
|
50
|
+
plan_type_colored = f'<style fg="yellow">{plan_type}</style>'
|
|
51
|
+
|
|
52
|
+
return HTML(
|
|
53
|
+
'<style fg="white">Model: {} | Plan: </style>{}'
|
|
54
|
+
'<style fg="white"> | </style><style fg="cyan">curr</style><style fg="white">: {:,} | </style>'
|
|
55
|
+
'<style fg="cyan">in</style><style fg="white">: {:,} | </style>'
|
|
56
|
+
'<style fg="cyan">out</style><style fg="white">: {:,} | </style>'
|
|
57
|
+
'<style fg="cyan">total</style><style fg="white">: {:,}</style>'.format(
|
|
58
|
+
model_display or provider_name,
|
|
59
|
+
plan_type_colored,
|
|
60
|
+
tokens_curr,
|
|
61
|
+
tokens_in,
|
|
62
|
+
tokens_out,
|
|
63
|
+
tokens_total
|
|
64
|
+
)
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# In Learn mode, show learning modes instead of approval modes
|
|
68
|
+
if chat_manager.interaction_mode == "learn":
|
|
69
|
+
learning_mode = LEARNING_MODE_LABELS.get(
|
|
70
|
+
chat_manager.learning_mode,
|
|
71
|
+
chat_manager.learning_mode.upper()
|
|
72
|
+
)
|
|
73
|
+
# Colorize learning mode
|
|
74
|
+
if chat_manager.learning_mode == "succinct":
|
|
75
|
+
learning_mode_colored = f'<style fg="cyan">{learning_mode}</style>'
|
|
76
|
+
elif chat_manager.learning_mode == "balanced":
|
|
77
|
+
learning_mode_colored = f'<style fg="green">{learning_mode}</style>'
|
|
78
|
+
else: # verbose
|
|
79
|
+
learning_mode_colored = f'<style fg="magenta">{learning_mode}</style>'
|
|
80
|
+
|
|
81
|
+
return HTML(
|
|
82
|
+
'<style fg="white">Model: {} | Learn: </style>{}'
|
|
83
|
+
'<style fg="white"> | </style><style fg="cyan">curr</style><style fg="white">: {:,} | </style>'
|
|
84
|
+
'<style fg="cyan">in</style><style fg="white">: {:,} | </style>'
|
|
85
|
+
'<style fg="cyan">out</style><style fg="white">: {:,} | </style>'
|
|
86
|
+
'<style fg="cyan">total</style><style fg="white">: {:,}</style>'.format(
|
|
87
|
+
model_display or provider_name,
|
|
88
|
+
learning_mode_colored,
|
|
89
|
+
tokens_curr,
|
|
90
|
+
tokens_in,
|
|
91
|
+
tokens_out,
|
|
92
|
+
tokens_total
|
|
93
|
+
)
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Show approval modes for Plan/Edit modes
|
|
97
|
+
approval_mode = APPROVE_MODE_LABELS.get(
|
|
98
|
+
chat_manager.approve_mode,
|
|
99
|
+
chat_manager.approve_mode.upper()
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
# Colorize approval mode based on type
|
|
103
|
+
if chat_manager.approve_mode == "safe":
|
|
104
|
+
approval_mode_colored = f'<style fg="green">{approval_mode}</style>'
|
|
105
|
+
elif chat_manager.approve_mode == "accept_edits":
|
|
106
|
+
approval_mode_colored = f'<style fg="yellow">{approval_mode}</style>'
|
|
107
|
+
else:
|
|
108
|
+
approval_mode_colored = approval_mode
|
|
109
|
+
|
|
110
|
+
return HTML(
|
|
111
|
+
'<style fg="white">Model: {} | Approval: </style>{}'
|
|
112
|
+
'<style fg="white"> | </style><style fg="cyan">curr</style><style fg="white">: {:,} | </style>'
|
|
113
|
+
'<style fg="cyan">in</style><style fg="white">: {:,} | </style>'
|
|
114
|
+
'<style fg="cyan">out</style><style fg="white">: {:,} | </style>'
|
|
115
|
+
'<style fg="cyan">total</style><style fg="white">: {:,}</style>'.format(
|
|
116
|
+
model_display or provider_name,
|
|
117
|
+
approval_mode_colored,
|
|
118
|
+
tokens_curr,
|
|
119
|
+
tokens_in,
|
|
120
|
+
tokens_out,
|
|
121
|
+
tokens_total
|
|
122
|
+
)
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def create_confirmation_prompt_session(chat_manager, message_func):
|
|
127
|
+
"""Create a PromptSession for confirmation prompts with key bindings and toolbar.
|
|
128
|
+
|
|
129
|
+
This provides:
|
|
130
|
+
- Shift+Tab to toggle approval mode
|
|
131
|
+
- Bottom toolbar showing model, approval mode, and token counts
|
|
132
|
+
- Dynamic prompt message that updates when mode changes
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
chat_manager: ChatManager instance for state access
|
|
136
|
+
message_func: Function that returns the prompt message HTML (called on each redraw)
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
PromptSession configured with bindings and toolbar
|
|
140
|
+
"""
|
|
141
|
+
bindings = KeyBindings()
|
|
142
|
+
|
|
143
|
+
@bindings.add('s-tab')
|
|
144
|
+
def toggle_approve_mode(event):
|
|
145
|
+
"""Toggle between approval modes using Shift+Tab."""
|
|
146
|
+
if chat_manager.interaction_mode == "learn":
|
|
147
|
+
chat_manager.cycle_learning_mode()
|
|
148
|
+
else:
|
|
149
|
+
chat_manager.cycle_approve_mode()
|
|
150
|
+
event.app.invalidate()
|
|
151
|
+
|
|
152
|
+
toolbar_style = Style.from_dict({
|
|
153
|
+
"bottom-toolbar": "bg:default fg:white noreverse",
|
|
154
|
+
"bottom-toolbar.text": "bg:default fg:white noreverse",
|
|
155
|
+
})
|
|
156
|
+
|
|
157
|
+
return PromptSession(
|
|
158
|
+
key_bindings=bindings,
|
|
159
|
+
style=toolbar_style,
|
|
160
|
+
bottom_toolbar=lambda: get_bottom_toolbar_text(chat_manager),
|
|
161
|
+
message=message_func
|
|
162
|
+
)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Utility modules for vmCode."""
|