vmcode-cli 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/INSTALLATION_METHODS.md +181 -0
  2. package/LICENSE +21 -0
  3. package/README.md +199 -0
  4. package/bin/npm-wrapper.js +171 -0
  5. package/bin/rg +0 -0
  6. package/bin/rg.exe +0 -0
  7. package/config.yaml.example +159 -0
  8. package/package.json +42 -0
  9. package/requirements.txt +7 -0
  10. package/scripts/install.js +132 -0
  11. package/setup.bat +114 -0
  12. package/setup.sh +135 -0
  13. package/src/__init__.py +4 -0
  14. package/src/core/__init__.py +1 -0
  15. package/src/core/agentic.py +2342 -0
  16. package/src/core/chat_manager.py +1201 -0
  17. package/src/core/config_manager.py +269 -0
  18. package/src/core/init.py +161 -0
  19. package/src/core/sub_agent.py +174 -0
  20. package/src/exceptions.py +75 -0
  21. package/src/llm/__init__.py +1 -0
  22. package/src/llm/client.py +149 -0
  23. package/src/llm/config.py +445 -0
  24. package/src/llm/prompts.py +569 -0
  25. package/src/llm/providers.py +402 -0
  26. package/src/llm/token_tracker.py +220 -0
  27. package/src/ui/__init__.py +1 -0
  28. package/src/ui/banner.py +103 -0
  29. package/src/ui/commands.py +489 -0
  30. package/src/ui/displays.py +167 -0
  31. package/src/ui/main.py +351 -0
  32. package/src/ui/prompt_utils.py +162 -0
  33. package/src/utils/__init__.py +1 -0
  34. package/src/utils/editor.py +158 -0
  35. package/src/utils/gitignore_filter.py +149 -0
  36. package/src/utils/logger.py +254 -0
  37. package/src/utils/markdown.py +32 -0
  38. package/src/utils/settings.py +94 -0
  39. package/src/utils/tools/__init__.py +55 -0
  40. package/src/utils/tools/command_executor.py +217 -0
  41. package/src/utils/tools/create_file.py +143 -0
  42. package/src/utils/tools/definitions.py +193 -0
  43. package/src/utils/tools/directory.py +374 -0
  44. package/src/utils/tools/file_editor.py +345 -0
  45. package/src/utils/tools/file_helpers.py +109 -0
  46. package/src/utils/tools/file_reader.py +331 -0
  47. package/src/utils/tools/formatters.py +458 -0
  48. package/src/utils/tools/parallel_executor.py +195 -0
  49. package/src/utils/validation.py +117 -0
  50. package/src/utils/web_search.py +71 -0
  51. package/vmcode-proxy/.env.example +5 -0
  52. package/vmcode-proxy/README.md +235 -0
  53. package/vmcode-proxy/package-lock.json +947 -0
  54. package/vmcode-proxy/package.json +20 -0
  55. package/vmcode-proxy/server.js +248 -0
  56. package/vmcode-proxy/server.js.bak +157 -0
@@ -0,0 +1,269 @@
1
+
2
+
3
+ from pathlib import Path
4
+ import shutil
5
+ from typing import Dict, Any, Optional
6
+ import logging
7
+ import yaml
8
+ from llm import config as llm_config
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ class ConfigManager:
14
+
15
+ def __init__(self, config_path: Optional[Path] = None):
16
+ self.config_path = config_path or llm_config.CONFIG_PATH
17
+ self._cached_data = None
18
+
19
+ def load(self, force_reload: bool = False) -> Dict[str, Any]:
20
+ """Load configuration from file, using cache if available.
21
+
22
+ Args:
23
+ force_reload: If True, bypass cache and reload from disk
24
+
25
+ Returns:
26
+ Configuration dictionary
27
+ """
28
+ if not force_reload and self._cached_data is not None:
29
+ return self._cached_data
30
+
31
+ if not self.config_path.exists():
32
+ self._cached_data = llm_config.generate_config_template()
33
+ return self._cached_data
34
+
35
+ try:
36
+ with open(self.config_path, 'r', encoding='utf-8-sig') as f:
37
+ self._cached_data = yaml.safe_load(f) or {}
38
+ return self._cached_data
39
+ except yaml.YAMLError as e:
40
+ logger.error(f"Failed to parse config file {self.config_path}: {e}")
41
+ logger.warning("Using default configuration template")
42
+ self._cached_data = llm_config.generate_config_template()
43
+ return self._cached_data
44
+
45
+ def save(self, config_data: Dict[str, Any], create_backup: bool = False):
46
+ if create_backup and self.config_path.exists():
47
+ backup_path = self.config_path.with_suffix('.backup')
48
+ shutil.copy2(self.config_path, backup_path)
49
+
50
+ with open(self.config_path, 'w', encoding='utf-8-sig') as f:
51
+ yaml.dump(config_data, f, default_flow_style=False, sort_keys=False, allow_unicode=True)
52
+
53
+ self._cached_data = config_data
54
+ # Note: Config is read from disk on reload. Call reload_config() after changes.
55
+
56
+ def update_field(self, key: str, value: Any, create_backup: bool = False) -> Optional[Path]:
57
+ """Update a single configuration field.
58
+
59
+ Args:
60
+ key: Configuration key to update
61
+ value: New value for the key
62
+ create_backup: If True, create a backup before saving
63
+
64
+ Returns:
65
+ Backup path if backup was created, None otherwise
66
+ """
67
+ config_data = self.load(force_reload=True)
68
+ config_data[key] = value
69
+
70
+ backup_path = None
71
+ if create_backup and self.config_path.exists():
72
+ backup_path = self.config_path.with_suffix('.backup')
73
+
74
+ self.save(config_data, create_backup=create_backup)
75
+ return backup_path
76
+
77
+ def set_provider(self, provider_name: str) -> Optional[Path]:
78
+ return self.update_field('LAST_PROVIDER', provider_name)
79
+
80
+ def _extract_model_pricing(self, config_data: Dict[str, Any], model: str) -> Dict[str, float]:
81
+ """Extract pricing for a model from config.
82
+
83
+ Args:
84
+ config_data: Configuration dictionary
85
+ model: Model name to look up
86
+
87
+ Returns:
88
+ Dict with 'in' and 'out' cost values per 1M tokens
89
+ """
90
+ model_prices = config_data.get('MODEL_PRICES', {})
91
+ if model and model in model_prices:
92
+ model_cost = model_prices[model]
93
+ return {
94
+ 'in': float(model_cost.get('cost_in', 0.0)),
95
+ 'out': float(model_cost.get('cost_out', 0.0))
96
+ }
97
+ return {'in': 0.0, 'out': 0.0}
98
+
99
+ def get_usage_costs(self, provider: str = None, model: str = None) -> Dict[str, float]:
100
+ """Get usage costs for a specific model.
101
+
102
+ Args:
103
+ provider: Provider name (e.g., 'openrouter', 'glm', 'openai').
104
+ If None, uses the last provider from config.
105
+ model: Model name (e.g., 'minimax/minimax-m2.5', 'GLM-4.7').
106
+ If None, uses the current model from the provider.
107
+
108
+ Returns:
109
+ Dict with 'in' and 'out' cost values per 1M tokens
110
+ """
111
+ config_data = self.load()
112
+
113
+ if provider is None:
114
+ provider = config_data.get('LAST_PROVIDER', 'glm')
115
+
116
+ # Get model name from config if not provided
117
+ if model is None:
118
+ provider_model_map = {
119
+ 'vmcode_free': 'VMCODE_FREE_MODEL',
120
+ 'openrouter': 'OPENROUTER_MODEL',
121
+ 'glm': 'GLM_MODEL',
122
+ 'openai': 'OPENAI_MODEL',
123
+ 'gemini': 'GEMINI_MODEL',
124
+ 'minimax': 'MINIMAX_MODEL',
125
+ 'anthropic': 'ANTHROPIC_MODEL',
126
+ 'kimi': 'KIMI_MODEL'
127
+ }
128
+ model_key = provider_model_map.get(provider.lower())
129
+ if model_key:
130
+ model = config_data.get(model_key, '')
131
+
132
+ return self._extract_model_pricing(config_data, model)
133
+
134
+ def set_model(self, provider_name: str, model: str) -> Optional[Path]:
135
+ """Set model for a specific provider.
136
+
137
+ Args:
138
+ provider_name: Provider name (e.g., 'openrouter', 'glm', 'local', 'openai')
139
+ model: Model name/path to set
140
+
141
+ Returns:
142
+ Backup path if backup was created, None otherwise
143
+ """
144
+ # Map provider names to their config keys
145
+ provider_keys = {
146
+ 'local': 'LOCAL_MODEL_PATH',
147
+ 'openrouter': 'OPENROUTER_MODEL',
148
+ 'glm': 'GLM_MODEL',
149
+ 'openai': 'OPENAI_MODEL',
150
+ 'gemini': 'GEMINI_MODEL',
151
+ 'minimax': 'MINIMAX_MODEL',
152
+ 'anthropic': 'ANTHROPIC_MODEL',
153
+ 'kimi': 'KIMI_MODEL'
154
+ }
155
+
156
+ if provider_name not in provider_keys:
157
+ raise ValueError(f"Unknown provider: {provider_name}")
158
+
159
+ key = provider_keys[provider_name]
160
+ return self.update_field(key, model)
161
+
162
+ def set_api_key(self, provider_name: str, api_key: str) -> Optional[Path]:
163
+ """Set API key for a specific provider.
164
+
165
+ Args:
166
+ provider_name: Provider name (e.g., 'openrouter', 'glm', 'openai')
167
+ api_key: API key to set
168
+
169
+ Returns:
170
+ Backup path if backup was created, None otherwise
171
+ """
172
+ # Map provider names to their config keys
173
+ provider_keys = {
174
+ 'openrouter': 'OPENROUTER_API_KEY',
175
+ 'glm': 'GLM_API_KEY',
176
+ 'openai': 'OPENAI_API_KEY',
177
+ 'gemini': 'GEMINI_API_KEY',
178
+ 'minimax': 'MINIMAX_API_KEY',
179
+ 'anthropic': 'ANTHROPIC_API_KEY',
180
+ 'kimi': 'KIMI_API_KEY'
181
+ }
182
+
183
+ if provider_name not in provider_keys:
184
+ raise ValueError(f"Unknown provider: {provider_name}")
185
+
186
+ key = provider_keys[provider_name]
187
+ return self.update_field(key, api_key)
188
+
189
+ def get_pre_tool_planning(self) -> bool:
190
+ """Get the pre-tool planning enabled state.
191
+
192
+ Returns:
193
+ True if pre-tool planning is enabled, False otherwise.
194
+ Defaults to False if not set in config.
195
+ """
196
+ config_data = self.load()
197
+ return bool(config_data.get('PRE_TOOL_PLANNING', False))
198
+
199
+ def set_pre_tool_planning(self, enabled: bool) -> Optional[Path]:
200
+ """Set the pre-tool planning enabled state.
201
+
202
+ Args:
203
+ enabled: True to enable pre-tool planning, False to disable.
204
+
205
+ Returns:
206
+ Backup path if backup was created, None otherwise.
207
+ """
208
+ return self.update_field('PRE_TOOL_PLANNING', enabled)
209
+
210
+ def get_model_price(self, model_name: str) -> Dict[str, float]:
211
+ """Get pricing for a specific model.
212
+
213
+ Args:
214
+ model_name: Model name (e.g., 'minimax/minimax-m2.5', 'GLM-4.7')
215
+
216
+ Returns:
217
+ Dict with 'in' and 'out' cost values per 1M tokens
218
+ """
219
+ config_data = self.load()
220
+ return self._extract_model_pricing(config_data, model_name)
221
+
222
+ def set_model_price(self, model_name: str, cost_in: float, cost_out: float) -> Optional[Path]:
223
+ """Set pricing for a specific model.
224
+
225
+ Args:
226
+ model_name: Model name (e.g., 'minimax/minimax-m2.5', 'GLM-4.7')
227
+ cost_in: Cost per 1M input tokens
228
+ cost_out: Cost per 1M output tokens
229
+
230
+ Returns:
231
+ Backup path if backup was created, None otherwise
232
+ """
233
+ config_data = self.load(force_reload=True)
234
+
235
+ if 'MODEL_PRICES' not in config_data:
236
+ config_data['MODEL_PRICES'] = {}
237
+
238
+ config_data['MODEL_PRICES'][model_name] = {
239
+ 'cost_in': cost_in,
240
+ 'cost_out': cost_out
241
+ }
242
+
243
+ return self.save(config_data, create_backup=False)
244
+
245
+ def list_model_prices(self) -> Dict[str, Dict[str, float]]:
246
+ """Get all model-specific pricing.
247
+
248
+ Returns:
249
+ Dict mapping model names to their pricing (cost_in/cost_out per 1M tokens)
250
+ """
251
+ config_data = self.load()
252
+ return config_data.get('MODEL_PRICES', {})
253
+
254
+ def delete_model_price(self, model_name: str) -> Optional[Path]:
255
+ """Delete pricing for a specific model.
256
+
257
+ Args:
258
+ model_name: Model name to remove from pricing
259
+
260
+ Returns:
261
+ Backup path if backup was created, None otherwise
262
+ """
263
+ config_data = self.load(force_reload=True)
264
+
265
+ if 'MODEL_PRICES' in config_data and model_name in config_data['MODEL_PRICES']:
266
+ del config_data['MODEL_PRICES'][model_name]
267
+ return self.save(config_data, create_backup=False)
268
+
269
+ return None
@@ -0,0 +1,161 @@
1
+ """Repository scan and agents.md generation."""
2
+
3
+ from pathlib import Path
4
+ from typing import List, Tuple
5
+
6
+ from llm.client import LLMClient
7
+ from utils.settings import file_settings
8
+
9
+
10
+ EXCLUDE_DIRS = file_settings.exclude_dirs
11
+ MAX_FILE_BYTES = file_settings.max_file_bytes
12
+ MAX_TOTAL_BYTES = file_settings.max_total_bytes
13
+
14
+
15
+ def _is_excluded(path: Path, repo_root: Path) -> bool:
16
+ rel_parts = path.relative_to(repo_root).parts
17
+ return any(part in EXCLUDE_DIRS for part in rel_parts)
18
+
19
+
20
+ def _read_file(path: Path) -> Tuple[str, str]:
21
+ size = path.stat().st_size
22
+ if size > MAX_FILE_BYTES:
23
+ return "skipped_large", ""
24
+
25
+ raw = path.read_bytes()
26
+ if b"\x00" in raw:
27
+ return "skipped_binary", ""
28
+
29
+ text = raw.decode("utf-8", errors="replace")
30
+ return "ok", text
31
+
32
+
33
+ def _collect_files(repo_root: Path) -> List[Path]:
34
+ """Collect files from repository, respecting EXCLUDE_DIRS and .gitignore.
35
+
36
+ Args:
37
+ repo_root: Repository root directory
38
+
39
+ Returns:
40
+ List of Path objects for files to include
41
+ """
42
+ from utils.gitignore_filter import load_gitignore_spec, is_path_ignored
43
+
44
+ # Load .gitignore spec
45
+ gitignore_spec = load_gitignore_spec(repo_root)
46
+
47
+ files = []
48
+ for path in repo_root.rglob("*"):
49
+ if not path.is_file():
50
+ continue
51
+
52
+ # Check hard-coded exclusions
53
+ if _is_excluded(path, repo_root):
54
+ continue
55
+
56
+ # Check .gitignore
57
+ if gitignore_spec is not None:
58
+ is_ignored, _ = is_path_ignored(path, repo_root, gitignore_spec)
59
+ if is_ignored:
60
+ continue
61
+
62
+ files.append(path)
63
+
64
+ return sorted(files, key=lambda p: str(p.relative_to(repo_root)))
65
+
66
+
67
+ def _build_prompt(repo_root: Path, files: List[Path]) -> str:
68
+ total_bytes = 0
69
+ file_blocks = []
70
+ for path in files:
71
+ rel_path = path.relative_to(repo_root)
72
+ status, text = _read_file(path)
73
+ size = path.stat().st_size
74
+
75
+ if status == "ok":
76
+ if total_bytes + len(text.encode("utf-8")) > MAX_TOTAL_BYTES:
77
+ status = "skipped_total_limit"
78
+ text = ""
79
+ else:
80
+ total_bytes += len(text.encode("utf-8"))
81
+
82
+ if status == "ok":
83
+ content = text
84
+ elif status == "skipped_large":
85
+ content = "[skipped: file too large]"
86
+ elif status == "skipped_binary":
87
+ content = "[skipped: binary file]"
88
+ else:
89
+ content = "[skipped: total content limit reached]"
90
+
91
+ block = (
92
+ f"FILE: {rel_path}\n"
93
+ f"SIZE: {size}\n"
94
+ f"CONTENT:\n{content}\n"
95
+ )
96
+ file_blocks.append(block)
97
+
98
+ files_payload = "\n".join(file_blocks)
99
+ return (
100
+ "Generate a concise agents.md for this repository.\n\n"
101
+ "CRITICAL: Be extremely concise. Single-line descriptions only.\n\n"
102
+ "Requirements:\n"
103
+ "1) '# Files' section - bullets: `path` - one-line description\n"
104
+ "2) '# Key Classes' section - bullets: **Class** (`path`) - one-line responsibility\n"
105
+ "3) '# Architecture' section - compact flow diagrams showing entry points, data flow, tool-calling loop\n"
106
+ "4) '# Configuration' section - grouped bullets: `VAR_1`, `VAR_2` - purpose\n"
107
+ "5) '# Patterns' section - bullets: **Category**: description\n"
108
+ "6) '# Summary' section - 1-2 sentences max\n\n"
109
+ "Style rules:\n"
110
+ "- One line per file/class description\n"
111
+ "- No verbose explanations or implementation details\n"
112
+ "- Use arrows (→) for flows instead of numbered lists\n"
113
+ "- Group related config vars on single line\n"
114
+ "- Focus on WHAT and WHERE, not HOW\n"
115
+ "- Return ONLY markdown, no code blocks\n\n"
116
+ f"REPO_ROOT: {repo_root}\n\n"
117
+ f"{files_payload}"
118
+ )
119
+
120
+
121
+ def run_init(repo_root: Path, console) -> None:
122
+ """Scan repo files with the LLM and write agents.md."""
123
+ console.print("[yellow]Generating agents.md with LLM scan...[/yellow]")
124
+
125
+ files = _collect_files(repo_root)
126
+ if not files:
127
+ console.print("[red]No files found to scan.[/red]")
128
+ return
129
+
130
+ prompt = _build_prompt(repo_root, files)
131
+ client = LLMClient()
132
+
133
+ messages = [
134
+ {
135
+ "role": "system",
136
+ "content": (
137
+ "You are a documentation generator for a codebase. "
138
+ "Follow the user's formatting requirements exactly."
139
+ ),
140
+ },
141
+ {"role": "user", "content": prompt},
142
+ ]
143
+
144
+ response = client.chat_completion(messages, stream=False)
145
+ if isinstance(response, str):
146
+ console.print(f"[red]{response}[/red]", markup=False)
147
+ return
148
+
149
+ content = None
150
+ try:
151
+ content = response["choices"][0]["message"]["content"]
152
+ except (KeyError, IndexError, TypeError):
153
+ content = None
154
+
155
+ if not content or not content.strip():
156
+ console.print("[red]LLM returned empty content.[/red]")
157
+ return
158
+
159
+ output_path = Path(__file__).resolve().parents[1] / "agents.md"
160
+ output_path.write_text(content.strip() + "\n", encoding="utf-8")
161
+ console.print(f"[green]Generated {output_path.name} successfully![/green]")
@@ -0,0 +1,174 @@
1
+ """Sub-agent for delegated tasks.
2
+
3
+ Uses existing AgenticOrchestrator with isolated message context
4
+ and read-only tools to execute generic delegated tasks.
5
+ """
6
+
7
+ from pathlib import Path
8
+
9
+ from core.agentic import AgenticOrchestrator
10
+ from core.chat_manager import ChatManager
11
+ from llm.prompts import build_sub_agent_prompt
12
+ from utils.settings import sub_agent_settings
13
+
14
+
15
+ # Read-only tools allowed for sub-agent
16
+ SUB_AGENT_TOOLS = ["rg", "read_file", "list_directory", "web_search"]
17
+
18
+
19
+ def _create_chat_manager():
20
+ """Create a fresh ChatManager instance for sub-agent use.
21
+
22
+ Returns:
23
+ ChatManager: A new ChatManager instance with pre-configured system prompt
24
+ """
25
+ # Subagent uses configurable compaction setting (disabled by default)
26
+ if sub_agent_settings.enable_compaction:
27
+ # Use default compaction trigger from context_settings if enabled
28
+ chat_manager = ChatManager()
29
+ else:
30
+ # Disable compaction by passing None (no auto-compaction)
31
+ chat_manager = ChatManager(compact_trigger_tokens=None)
32
+
33
+ # Build sub-agent prompt with token awareness (use configurable soft limit)
34
+ base_prompt = build_sub_agent_prompt()
35
+ token_usage = chat_manager.token_tracker.get_usage_for_prompt(
36
+ context_limit=sub_agent_settings.soft_limit_tokens
37
+ )
38
+
39
+ # Inject token usage into sub-agent system prompt
40
+ chat_manager.messages = [{"role": "system", "content": f"{base_prompt}\n\n{token_usage}"}]
41
+
42
+ # Load agents.md if it exists in current working directory
43
+ agents_path = Path.cwd() / "agents.md"
44
+ if agents_path.exists():
45
+ map_content = agents_path.read_text(encoding="utf-8").strip()
46
+ user_msg = (
47
+ "Here is the codebase map for this project. "
48
+ "This provides an overview of the repository structure and file purposes. "
49
+ "Use this as a reference when exploring the codebase.\n\n"
50
+ f"## Codebase Map (auto-generated from agents.md)\n\n{map_content}"
51
+ )
52
+ assistant_msg = (
53
+ "I've received the codebase map. I'll use this as a reference when "
54
+ "exploring the repository, but I'll always verify current state by "
55
+ "reading files and searching the codebase before making changes."
56
+ )
57
+ chat_manager.messages.append({"role": "user", "content": user_msg})
58
+ chat_manager.messages.append({"role": "assistant", "content": assistant_msg})
59
+
60
+ # No conversation logging for sub-agent (isolated)
61
+ chat_manager.conversation_logger = None
62
+ # CRITICAL: Force plan mode to restrict dangerous tools
63
+ chat_manager.interaction_mode = "plan"
64
+ return chat_manager
65
+
66
+
67
+ def run_sub_agent(
68
+ task_query: str,
69
+ repo_root: Path,
70
+ rg_exe_path: str,
71
+ console=None,
72
+ panel_updater=None,
73
+ ) -> dict:
74
+ """Run sub-agent using existing AgenticOrchestrator for delegated tasks.
75
+
76
+ Args:
77
+ task_query: Generic task query to execute (e.g., "Read file config.json")
78
+ repo_root: Repository root path
79
+ rg_exe_path: Path to rg executable
80
+ console: Optional Rich console for output
81
+ panel_updater: Optional SubAgentPanel for live panel updates
82
+
83
+ Returns:
84
+ Dict with:
85
+ - 'result': Formatted markdown string (goes into chat history)
86
+ - 'usage': Usage data for billing
87
+ - 'error': Error message if failed (None if success)
88
+ """
89
+ # Validate panel_updater type if provided
90
+ if panel_updater is not None and not hasattr(panel_updater, 'append'):
91
+ panel_updater = None
92
+
93
+ # Create fresh ChatManager for sub-agent
94
+ temp_chat_manager = _create_chat_manager()
95
+
96
+ # Create orchestrator (reuses existing implementation)
97
+ orchestrator = AgenticOrchestrator(
98
+ chat_manager=temp_chat_manager,
99
+ repo_root=repo_root,
100
+ rg_exe_path=rg_exe_path,
101
+ console=console,
102
+ debug_mode=False,
103
+ suppress_result_display=True,
104
+ is_sub_agent=True,
105
+ panel_updater=panel_updater,
106
+ pre_tool_planning_enabled=False,
107
+ force_parallel_execution=True # Enable parallel execution for read-only tools
108
+ )
109
+
110
+ # Wrap orchestrator.run to check hard limit before each LLM call
111
+ original_get_llm_response = orchestrator._get_llm_response
112
+
113
+ def _get_llm_response_with_hard_limit(allowed_tools=None):
114
+ """Wrapper to check hard token limit before each LLM call."""
115
+ # Check hard token limit before making LLM call
116
+ current_total = temp_chat_manager.token_tracker.total_tokens
117
+ if current_total >= sub_agent_settings.hard_limit_tokens:
118
+ raise Exception(
119
+ f"Sub-agent hard token limit exceeded: "
120
+ f"{current_total:,} / {sub_agent_settings.hard_limit_tokens:,} tokens. "
121
+ "Please refine your query or use more targeted searches."
122
+ )
123
+
124
+ return original_get_llm_response(allowed_tools=allowed_tools)
125
+
126
+ # Replace the method with our wrapper
127
+ orchestrator._get_llm_response = _get_llm_response_with_hard_limit
128
+
129
+ try:
130
+ # Run sub-agent task
131
+ orchestrator.run(
132
+ task_query,
133
+ thinking_indicator=None,
134
+ allowed_tools=SUB_AGENT_TOOLS
135
+ )
136
+ except Exception as e:
137
+ return {
138
+ "result": "",
139
+ "usage": {
140
+ "prompt_tokens": 0,
141
+ "completion_tokens": 0,
142
+ "total_tokens": 0
143
+ },
144
+ "error": str(e)
145
+ }
146
+
147
+ # Get final token usage (no need for delta calculation on fresh instance)
148
+ delta_prompt = temp_chat_manager.token_tracker.total_prompt_tokens
149
+ delta_completion = temp_chat_manager.token_tracker.total_completion_tokens
150
+ delta_total = temp_chat_manager.token_tracker.total_tokens
151
+
152
+ # Extract final response (last assistant message with content)
153
+ final_content = ""
154
+ for msg in reversed(temp_chat_manager.messages):
155
+ if msg.get("role") == "assistant" and msg.get("content"):
156
+ final_content = msg["content"].strip()
157
+ break
158
+
159
+ # Format with usage at end
160
+ result = (
161
+ f"{final_content}\n\n"
162
+ f"---\n"
163
+ f"Sub-agent used: {delta_prompt} prompt tokens, {delta_completion} completion tokens ({delta_total} total)"
164
+ )
165
+
166
+ return {
167
+ "result": result,
168
+ "usage": {
169
+ "prompt_tokens": delta_prompt,
170
+ "completion_tokens": delta_completion,
171
+ "total_tokens": delta_total
172
+ },
173
+ "error": None
174
+ }
@@ -0,0 +1,75 @@
1
+ """Custom exception hierarchy for vmCode."""
2
+
3
+ class VmCodeError(Exception):
4
+ """Base exception for all vmCode application errors.
5
+
6
+ All custom exceptions should inherit from this class.
7
+ Provides consistent error handling and allows catching
8
+ all vmCode-specific errors with a single except clause.
9
+ """
10
+ def __init__(self, message: str, *, details: dict = None):
11
+ """Initialize exception with optional details.
12
+
13
+ Args:
14
+ message: Human-readable error message
15
+ details: Optional dictionary with additional error context
16
+ """
17
+ super().__init__(message)
18
+ self.details = details or {}
19
+
20
+ def __str__(self):
21
+ base_msg = super().__str__()
22
+ if self.details:
23
+ details_str = ", ".join(f"{k}={v}" for k, v in self.details.items())
24
+ return f"{base_msg} ({details_str})"
25
+ return base_msg
26
+
27
+
28
+ class ConfigurationError(VmCodeError):
29
+ """Raised when configuration is invalid, missing, or cannot be loaded."""
30
+ pass
31
+
32
+
33
+ class LLMError(VmCodeError):
34
+ """Raised when LLM API communication fails or returns unexpected data."""
35
+ pass
36
+
37
+
38
+ class LLMConnectionError(LLMError):
39
+ """Raised when network connection to LLM provider fails."""
40
+ pass
41
+
42
+
43
+ class LLMResponseError(LLMError):
44
+ """Raised when LLM response is malformed or invalid."""
45
+ pass
46
+
47
+
48
+ class ToolExecutionError(VmCodeError):
49
+ """Raised when tool execution fails."""
50
+ pass
51
+
52
+
53
+ class CommandExecutionError(ToolExecutionError):
54
+ """Raised when shell command execution fails."""
55
+ pass
56
+
57
+
58
+ class FileEditError(ToolExecutionError):
59
+ """Raised when file edit operation fails."""
60
+ pass
61
+
62
+
63
+ class ValidationError(VmCodeError):
64
+ """Raised when input validation fails."""
65
+ pass
66
+
67
+
68
+ class PathValidationError(ValidationError):
69
+ """Raised when path validation fails (blocked by gitignore, etc.)."""
70
+ pass
71
+
72
+
73
+ class CommandValidationError(ValidationError):
74
+ """Raised when command validation fails (dangerous operators, etc.)."""
75
+ pass
@@ -0,0 +1 @@
1
+ """LLM integration layer for vmCode."""