vmcode-cli 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/INSTALLATION_METHODS.md +181 -0
  2. package/LICENSE +21 -0
  3. package/README.md +199 -0
  4. package/bin/npm-wrapper.js +171 -0
  5. package/bin/rg +0 -0
  6. package/bin/rg.exe +0 -0
  7. package/config.yaml.example +159 -0
  8. package/package.json +42 -0
  9. package/requirements.txt +7 -0
  10. package/scripts/install.js +132 -0
  11. package/setup.bat +114 -0
  12. package/setup.sh +135 -0
  13. package/src/__init__.py +4 -0
  14. package/src/core/__init__.py +1 -0
  15. package/src/core/agentic.py +2342 -0
  16. package/src/core/chat_manager.py +1201 -0
  17. package/src/core/config_manager.py +269 -0
  18. package/src/core/init.py +161 -0
  19. package/src/core/sub_agent.py +174 -0
  20. package/src/exceptions.py +75 -0
  21. package/src/llm/__init__.py +1 -0
  22. package/src/llm/client.py +149 -0
  23. package/src/llm/config.py +445 -0
  24. package/src/llm/prompts.py +569 -0
  25. package/src/llm/providers.py +402 -0
  26. package/src/llm/token_tracker.py +220 -0
  27. package/src/ui/__init__.py +1 -0
  28. package/src/ui/banner.py +103 -0
  29. package/src/ui/commands.py +489 -0
  30. package/src/ui/displays.py +167 -0
  31. package/src/ui/main.py +351 -0
  32. package/src/ui/prompt_utils.py +162 -0
  33. package/src/utils/__init__.py +1 -0
  34. package/src/utils/editor.py +158 -0
  35. package/src/utils/gitignore_filter.py +149 -0
  36. package/src/utils/logger.py +254 -0
  37. package/src/utils/markdown.py +32 -0
  38. package/src/utils/settings.py +94 -0
  39. package/src/utils/tools/__init__.py +55 -0
  40. package/src/utils/tools/command_executor.py +217 -0
  41. package/src/utils/tools/create_file.py +143 -0
  42. package/src/utils/tools/definitions.py +193 -0
  43. package/src/utils/tools/directory.py +374 -0
  44. package/src/utils/tools/file_editor.py +345 -0
  45. package/src/utils/tools/file_helpers.py +109 -0
  46. package/src/utils/tools/file_reader.py +331 -0
  47. package/src/utils/tools/formatters.py +458 -0
  48. package/src/utils/tools/parallel_executor.py +195 -0
  49. package/src/utils/validation.py +117 -0
  50. package/src/utils/web_search.py +71 -0
  51. package/vmcode-proxy/.env.example +5 -0
  52. package/vmcode-proxy/README.md +235 -0
  53. package/vmcode-proxy/package-lock.json +947 -0
  54. package/vmcode-proxy/package.json +20 -0
  55. package/vmcode-proxy/server.js +248 -0
  56. package/vmcode-proxy/server.js.bak +157 -0
@@ -0,0 +1,149 @@
1
+ """LLM client for making API requests to various providers."""
2
+
3
+ import requests
4
+ from llm import config as config_module
5
+ from llm.config import PROVIDER_REGISTRY, get_provider_config, get_providers
6
+ from llm.providers import get_handler
7
+ from exceptions import LLMConnectionError, LLMResponseError, ConfigurationError
8
+
9
+
10
+ class StreamWrapper:
11
+ """Wraps streaming response generator with cleanup capability."""
12
+
13
+ def __init__(self, response, generator):
14
+ self._response = response
15
+ self._generator = generator
16
+
17
+ def __iter__(self):
18
+ return self
19
+
20
+ def __next__(self):
21
+ return next(self._generator)
22
+
23
+ def close(self):
24
+ """Close underlying HTTP connection."""
25
+ if self._response:
26
+ self._response.close()
27
+
28
+
29
+ class LLMClient:
30
+ def __init__(self, provider=None):
31
+ """Initialize LLM client.
32
+
33
+ Args:
34
+ provider: Provider name. If None, uses global LLM_PROVIDER from config.
35
+ """
36
+ self.provider = provider or config_module.LLM_PROVIDER
37
+ self.handler = get_handler(self.provider)
38
+ self.config = self._get_provider_config()
39
+
40
+ @property
41
+ def model(self) -> str:
42
+ """Return configured model name, if any."""
43
+ return str(self.config.get("payload", {}).get("model") or "")
44
+
45
+ def _get_provider_config(self):
46
+ """Build provider config from PROVIDER_REGISTRY."""
47
+ registry = get_provider_config(self.provider)
48
+ if not registry:
49
+ raise ConfigurationError(f"Unknown provider: {self.provider}")
50
+
51
+ # Build headers using handler
52
+ headers = self.handler.build_headers(registry)
53
+
54
+ # Build payload with model name
55
+ payload = {}
56
+ model_name = registry.get("api_model") or registry.get("model")
57
+ if model_name:
58
+ payload["model"] = model_name
59
+
60
+ return {
61
+ "url": f"{registry['api_base']}{registry['endpoint']}",
62
+ "headers": headers,
63
+ "payload": payload,
64
+ "error_prefix": registry["error_prefix"],
65
+ "registry": registry
66
+ }
67
+
68
+ def chat_completion(self, messages, stream=True, tools=None):
69
+ """Make a chat completion request.
70
+
71
+ Args:
72
+ messages: List of message dicts
73
+ stream: Whether to stream the response
74
+ tools: Optional list of tool definitions
75
+
76
+ Returns:
77
+ StreamWrapper if stream=True, else response dict
78
+ """
79
+ config = self.config
80
+ registry = config["registry"]
81
+
82
+ # Build payload using handler
83
+ payload = self.handler.build_payload(registry, messages, tools, stream)
84
+
85
+ try:
86
+ response = requests.post(
87
+ config["url"],
88
+ headers=config["headers"],
89
+ json=payload,
90
+ stream=stream
91
+ )
92
+
93
+ # For better debugging, include response text on 4xx errors
94
+ if not response.ok:
95
+ error_details = response.text if response.text else str(response.status_code)
96
+ raise LLMConnectionError(
97
+ f"Error communicating with {config['error_prefix']}",
98
+ details={"provider": self.provider, "original_error": error_details}
99
+ )
100
+ response.raise_for_status()
101
+
102
+ if stream:
103
+ return StreamWrapper(
104
+ response,
105
+ self.handler.parse_stream(response)
106
+ )
107
+ else:
108
+ response_json = response.json()
109
+ return self.handler.parse_response(response_json)
110
+
111
+ except requests.exceptions.RequestException as e:
112
+ raise LLMConnectionError(
113
+ f"Error communicating with {config['error_prefix']}",
114
+ details={"provider": self.provider, "original_error": str(e)}
115
+ )
116
+
117
+ def switch_provider(self, new_provider):
118
+ """Switch to a different provider.
119
+
120
+ Args:
121
+ new_provider: Name of the provider to switch to.
122
+
123
+ Returns:
124
+ True if successful, False if provider not found.
125
+ """
126
+ if new_provider in get_providers():
127
+ self.provider = new_provider
128
+ self.handler = get_handler(new_provider)
129
+ self.config = self._get_provider_config()
130
+ return True
131
+ return False
132
+
133
+ def sync_provider_from_config(self):
134
+ """Sync this client's provider and config with the current config.
135
+
136
+ This should be called after config is reloaded from disk.
137
+ """
138
+ current_provider = config_module.LLM_PROVIDER
139
+ if self.provider != current_provider:
140
+ self.provider = current_provider
141
+ self.handler = get_handler(current_provider)
142
+ self.config = self._get_provider_config()
143
+ return True
144
+ # Even if provider hasn't changed, config values (model, api_key) might have
145
+ self.config = self._get_provider_config()
146
+ return False
147
+
148
+
149
+ __all__ = ['LLMClient']
@@ -0,0 +1,445 @@
1
+ import os
2
+ import platform
3
+ from pathlib import Path
4
+ import yaml
5
+
6
+ # Provider selection - loaded from config (see after PROVIDER_REGISTRY definition)
7
+
8
+ CONFIG_PATH = Path(__file__).resolve().parents[2] / "config.yaml"
9
+
10
+ # Environment variable names for API keys (env vars take precedence over config file)
11
+ ENV_API_KEYS = {
12
+ 'ANTHROPIC_API_KEY': os.environ.get('ANTHROPIC_API_KEY'),
13
+ 'OPENAI_API_KEY': os.environ.get('OPENAI_API_KEY'),
14
+ 'GLM_API_KEY': os.environ.get('GLM_API_KEY'),
15
+ 'GEMINI_API_KEY': os.environ.get('GEMINI_API_KEY'),
16
+ 'OPENROUTER_API_KEY': os.environ.get('OPENROUTER_API_KEY'),
17
+ 'KIMI_API_KEY': os.environ.get('KIMI_API_KEY'),
18
+ 'MINIMAX_API_KEY': os.environ.get('MINIMAX_API_KEY'),
19
+ }
20
+
21
+ # Detect platform for llama.cpp paths
22
+ _IS_WINDOWS = platform.system() == "Windows"
23
+ _IS_LINUX = platform.system() == "Linux"
24
+
25
+ # Set llama.cpp paths based on platform
26
+ if _IS_WINDOWS:
27
+ _LLAMA_SERVER_NAME = "llama-server.exe"
28
+ _LLAMA_BUILD_DIR = "build"
29
+ elif _IS_LINUX:
30
+ _LLAMA_SERVER_NAME = "llama-server"
31
+ _LLAMA_BUILD_DIR = "build-linux"
32
+ else:
33
+ # Fallback for macOS or other platforms
34
+ _LLAMA_SERVER_NAME = "llama-server"
35
+ _LLAMA_BUILD_DIR = "build"
36
+
37
+ def _load_config():
38
+ """Load config from YAML file, with environment variable overrides for API keys.
39
+
40
+ Environment variables take precedence over values in config.yaml.
41
+ """
42
+ config_path = Path(__file__).resolve().parents[2] / "config.yaml"
43
+ if not config_path.exists():
44
+ return {}
45
+ try:
46
+ config = yaml.safe_load(config_path.read_text(encoding="utf-8-sig")) or {}
47
+ except yaml.YAMLError:
48
+ config = {}
49
+
50
+ # Override API keys from environment variables (env vars take precedence)
51
+ for key, env_value in ENV_API_KEYS.items():
52
+ if env_value: # Only override if env var is set and non-empty
53
+ config[key] = env_value
54
+
55
+ return config
56
+
57
+
58
+ _CONFIG = _load_config()
59
+
60
+ # Cache for provider registry (built once at module load)
61
+ _provider_registry_cache = None
62
+ _cached_provider = None
63
+
64
+
65
+ def _get_provider_registry():
66
+ """Build PROVIDER_REGISTRY from current config (cached)."""
67
+ global _provider_registry_cache
68
+ if _provider_registry_cache is not None:
69
+ return _provider_registry_cache
70
+
71
+ # Helper function to get model-specific pricing
72
+ def get_model_cost(provider_name: str, model_name: str, cost_key_in: str, cost_key_out: str, default_in: float, default_out: float) -> tuple[float, float]:
73
+ """Get model-specific cost from MODEL_PRICES."""
74
+ model_prices = _CONFIG.get("MODEL_PRICES", {})
75
+ if model_name in model_prices:
76
+ model_cost = model_prices[model_name]
77
+ return float(model_cost.get("cost_in", 0.0)), float(model_cost.get("cost_out", 0.0))
78
+ return 0.0, 0.0
79
+
80
+ _provider_registry_cache = {
81
+ "local": {
82
+ "type": "local",
83
+ "api_key": None,
84
+ "model": _CONFIG.get("LOCAL_MODEL_PATH", ""),
85
+ "api_model": "model",
86
+ "api_base": "http://127.0.0.1:8080",
87
+ "endpoint": "/v1/chat/completions",
88
+ "error_prefix": "local server",
89
+ "config_keys": {
90
+ "LOCAL_MODEL_PATH": "",
91
+ "LOCAL_SERVER_PATH": str(
92
+ Path(__file__).resolve().parents[2] /
93
+ f"llama.cpp/{_LLAMA_BUILD_DIR}/bin/{_LLAMA_SERVER_NAME}"
94
+ ),
95
+ },
96
+ "extra": {
97
+ "host": "127.0.0.1",
98
+ "port": 8080,
99
+ },
100
+ "default_temperature": 0.1,
101
+ "default_top_p": 0.9,
102
+ "allow_top_p": True,
103
+ "allow_temperature": True,
104
+ "cost_in": 0.0,
105
+ "cost_out": 0.0
106
+ },
107
+ "openrouter": {
108
+ "type": "api",
109
+ "api_key": _CONFIG.get("OPENROUTER_API_KEY", ""),
110
+ "model": _CONFIG.get("OPENROUTER_MODEL", ""),
111
+ "api_base": _CONFIG.get("OPENROUTER_API_BASE", "https://openrouter.ai/api/v1"),
112
+ "endpoint": "/chat/completions",
113
+ "error_prefix": "OpenRouter",
114
+ "headers_extra": {
115
+ "HTTP-Referer": "http://localhost:8080",
116
+ "X-Title": "Chat App"
117
+ },
118
+ "config_keys": {
119
+ "OPENROUTER_API_KEY": "",
120
+ "OPENROUTER_MODEL": "",
121
+ "OPENROUTER_API_BASE": "https://openrouter.ai/api/v1",
122
+ },
123
+ "default_temperature": 0.1,
124
+ "default_top_p": 0.9,
125
+ "allow_top_p": True,
126
+ "allow_temperature": True,
127
+ "cost_in": get_model_cost("openrouter", _CONFIG.get("OPENROUTER_MODEL", ""),
128
+ "", "", 0.0, 0.0)[0],
129
+ "cost_out": get_model_cost("openrouter", _CONFIG.get("OPENROUTER_MODEL", ""),
130
+ "", "", 0.0, 0.0)[1]
131
+ },
132
+ "glm": {
133
+ "type": "api",
134
+ "api_key": _CONFIG.get("GLM_API_KEY", ""),
135
+ "model": _CONFIG.get("GLM_MODEL", ""),
136
+ "api_base": _CONFIG.get("GLM_API_BASE", "https://open.bigmodel.cn/api/paas/v4"),
137
+ "endpoint": "/chat/completions",
138
+ "error_prefix": "GLM",
139
+ "config_keys": {
140
+ "GLM_API_KEY": "",
141
+ "GLM_MODEL": "",
142
+ "GLM_API_BASE": "https://open.bigmodel.cn/api/paas/v4",
143
+ },
144
+ "default_temperature": 0.1,
145
+ "default_top_p": 0.9,
146
+ "allow_top_p": True,
147
+ "allow_temperature": True,
148
+ "cost_in": get_model_cost("glm", _CONFIG.get("GLM_MODEL", ""),
149
+ "", "", 0.0, 0.0)[0],
150
+ "cost_out": get_model_cost("glm", _CONFIG.get("GLM_MODEL", ""),
151
+ "", "", 0.0, 0.0)[1]
152
+ },
153
+ "openai": {
154
+ "type": "api",
155
+ "api_key": _CONFIG.get("OPENAI_API_KEY", ""),
156
+ "model": _CONFIG.get("OPENAI_MODEL", ""),
157
+ "api_base": _CONFIG.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
158
+ "endpoint": "/chat/completions",
159
+ "error_prefix": "OpenAI",
160
+ "config_keys": {
161
+ "OPENAI_API_KEY": "",
162
+ "OPENAI_MODEL": "",
163
+ "OPENAI_API_BASE": "https://api.openai.com/v1",
164
+ },
165
+ "default_temperature": 0.1,
166
+ "default_top_p": 0.9,
167
+ "allow_top_p": False,
168
+ "allow_temperature": False,
169
+ "cost_in": get_model_cost("openai", _CONFIG.get("OPENAI_MODEL", ""),
170
+ "", "", 0.0, 0.0)[0],
171
+ "cost_out": get_model_cost("openai", _CONFIG.get("OPENAI_MODEL", ""),
172
+ "", "", 0.0, 0.0)[1]
173
+ },
174
+ "gemini": {
175
+ "type": "api",
176
+ "api_key": _CONFIG.get("GEMINI_API_KEY", ""),
177
+ "model": _CONFIG.get("GEMINI_MODEL", ""),
178
+ "api_base": _CONFIG.get("GEMINI_API_BASE", "https://generativelanguage.googleapis.com/v1beta"),
179
+ "endpoint": "/chat/completions",
180
+ "error_prefix": "Gemini",
181
+ "config_keys": {
182
+ "GEMINI_API_KEY": "",
183
+ "GEMINI_MODEL": "",
184
+ "GEMINI_API_BASE": "https://generativelanguage.googleapis.com/v1beta",
185
+ },
186
+ "default_temperature": 0.1,
187
+ "default_top_p": 0.9,
188
+ "allow_top_p": True,
189
+ "allow_temperature": True,
190
+ "cost_in": get_model_cost("gemini", _CONFIG.get("GEMINI_MODEL", ""),
191
+ "", "", 0.0, 0.0)[0],
192
+ "cost_out": get_model_cost("gemini", _CONFIG.get("GEMINI_MODEL", ""),
193
+ "", "", 0.0, 0.0)[1]
194
+ },
195
+ "minimax": {
196
+ "type": "api",
197
+ "api_key": _CONFIG.get("MINIMAX_API_KEY", ""),
198
+ "model": _CONFIG.get("MINIMAX_MODEL", ""),
199
+ "api_base": _CONFIG.get("MINIMAX_API_BASE", "https://api.minimax.chat/v1"),
200
+ "endpoint": "/chat/completions",
201
+ "error_prefix": "MiniMax",
202
+ "config_keys": {
203
+ "MINIMAX_API_KEY": "",
204
+ "MINIMAX_MODEL": "",
205
+ "MINIMAX_API_BASE": "https://api.minimax.chat/v1",
206
+ },
207
+ "default_temperature": 0.1,
208
+ "default_top_p": 0.9,
209
+ "allow_top_p": True,
210
+ "allow_temperature": True,
211
+ "cost_in": get_model_cost("minimax", _CONFIG.get("MINIMAX_MODEL", ""),
212
+ "", "", 0.0, 0.0)[0],
213
+ "cost_out": get_model_cost("minimax", _CONFIG.get("MINIMAX_MODEL", ""),
214
+ "", "", 0.0, 0.0)[1]
215
+ },
216
+ "anthropic": {
217
+ "type": "api",
218
+ "api_key": _CONFIG.get("ANTHROPIC_API_KEY", ""),
219
+ "model": _CONFIG.get("ANTHROPIC_MODEL", ""),
220
+ "api_base": _CONFIG.get("ANTHROPIC_API_BASE", "https://api.anthropic.com/v1"),
221
+ "endpoint": "/messages",
222
+ "error_prefix": "Anthropic",
223
+ "headers_extra": {
224
+ "anthropic-version": "2023-06-01"
225
+ },
226
+ "config_keys": {
227
+ "ANTHROPIC_API_KEY": "",
228
+ "ANTHROPIC_MODEL": "",
229
+ "ANTHROPIC_API_BASE": "https://api.anthropic.com/v1",
230
+ },
231
+ "default_temperature": 0.1,
232
+ "default_top_p": 0.9,
233
+ "allow_top_p": False,
234
+ "allow_temperature": True,
235
+ "max_tokens": 4096,
236
+ "cost_in": get_model_cost("anthropic", _CONFIG.get("ANTHROPIC_MODEL", ""),
237
+ "", "", 0.0, 0.0)[0],
238
+ "cost_out": get_model_cost("anthropic", _CONFIG.get("ANTHROPIC_MODEL", ""),
239
+ "", "", 0.0, 0.0)[1]
240
+ },
241
+ "kimi": {
242
+ "type": "api",
243
+ "api_key": _CONFIG.get("KIMI_API_KEY", ""),
244
+ "model": _CONFIG.get("KIMI_MODEL", ""),
245
+ "api_base": _CONFIG.get("KIMI_API_BASE", "https://api.moonshot.cn/v1"),
246
+ "endpoint": "/chat/completions",
247
+ "error_prefix": "Kimi",
248
+ "config_keys": {
249
+ "KIMI_API_KEY": "",
250
+ "KIMI_MODEL": "",
251
+ "KIMI_API_BASE": "https://api.moonshot.cn/v1",
252
+ },
253
+ "default_temperature": 0.1,
254
+ "default_top_p": 0.9,
255
+ "allow_top_p": True,
256
+ "allow_temperature": True,
257
+ "cost_in": get_model_cost("kimi", _CONFIG.get("KIMI_MODEL", ""),
258
+ "", "", 0.0, 0.0)[0],
259
+ "cost_out": get_model_cost("kimi", _CONFIG.get("KIMI_MODEL", ""),
260
+ "", "", 0.0, 0.0)[1]
261
+ },
262
+ "vmcode_free": {
263
+ "type": "api",
264
+ "api_key": "",
265
+ "model": "z-ai/glm-4.5-air:free",
266
+ "api_base": _CONFIG.get("VMCODE_FREE_API_BASE", "https://vmcode-five.vercel.app"),
267
+ "endpoint": "/chat",
268
+ "error_prefix": "vmCode Free",
269
+ "config_keys": {
270
+ "VMCODE_FREE_API_BASE": "https://vmcode-five.vercel.app",
271
+ },
272
+ "default_temperature": 0.7,
273
+ "default_top_p": 0.9,
274
+ "allow_top_p": True,
275
+ "allow_temperature": True,
276
+ "cost_in": 0.0,
277
+ "cost_out": 0.0
278
+ },
279
+ }
280
+ return _provider_registry_cache
281
+
282
+
283
+ def _get_provider():
284
+ """Get the current provider from config (cached)."""
285
+ global _cached_provider
286
+ if _cached_provider is not None:
287
+ return _cached_provider
288
+
289
+ last_provider = _CONFIG.get("LAST_PROVIDER")
290
+ if last_provider and last_provider in _provider_registry_cache:
291
+ _cached_provider = last_provider
292
+ return _cached_provider
293
+ _cached_provider = "glm"
294
+ return _cached_provider
295
+
296
+
297
+ def reload_config():
298
+ """Reload config from disk and invalidate caches.
299
+
300
+ Reloads both the config.yaml file and environment variables.
301
+
302
+ Note: This is a manual operation - call after config changes.
303
+ """
304
+ global _CONFIG, _provider_registry_cache, _cached_provider, PROVIDER_REGISTRY, LLM_PROVIDER
305
+ _CONFIG = _load_config()
306
+ _provider_registry_cache = None
307
+ _cached_provider = None
308
+ # Rebuild module-level variables
309
+ PROVIDER_REGISTRY = _get_provider_registry()
310
+ LLM_PROVIDER = _get_provider()
311
+
312
+
313
+ def get_providers():
314
+ """Get list of available providers.
315
+
316
+ Returns:
317
+ list: List of provider names from PROVIDER_REGISTRY.
318
+ """
319
+ return list(PROVIDER_REGISTRY.keys())
320
+
321
+
322
+ # ============================================================================
323
+ # PROVIDER REGISTRY - Centralized provider configuration
324
+ # ============================================================================
325
+
326
+ # Build provider registry and export as module-level constants (loaded once)
327
+ PROVIDER_REGISTRY = _get_provider_registry()
328
+ LLM_PROVIDER = _get_provider()
329
+
330
+
331
+ __all__ = [
332
+ "CONFIG_PATH",
333
+ "PROVIDER_REGISTRY",
334
+ "get_providers",
335
+ "LLM_PROVIDER",
336
+ "TOOLS_ENABLED",
337
+ "TOOLS_REQUIRE_CONFIRMATION",
338
+ "WEB_SEARCH_REQUIRE_CONFIRMATION",
339
+ "APPROVE_MODES",
340
+ "APPROVE_MODE_LABELS",
341
+ "INTERACTION_MODES",
342
+ "INTERACTION_MODE_LABELS",
343
+ "LEARNING_MODES",
344
+ "LEARNING_MODE_LABELS",
345
+ "PLAN_TYPES",
346
+ "PLAN_TYPE_LABELS",
347
+ "ALLOWED_COMMANDS",
348
+ "get_provider_config",
349
+ "generate_config_template",
350
+ "reload_config",
351
+ ]
352
+
353
+
354
+ def generate_config_template():
355
+ """Generate default template for config.json from provider registry."""
356
+ template = {}
357
+ for provider, config in PROVIDER_REGISTRY.items():
358
+ if "config_keys" in config:
359
+ template.update(config["config_keys"])
360
+ return template
361
+
362
+ # Tooling configuration
363
+ TOOLS_ENABLED = True
364
+ TOOLS_REQUIRE_CONFIRMATION = False
365
+ WEB_SEARCH_REQUIRE_CONFIRMATION = False
366
+
367
+ # Tool approval modes
368
+ APPROVE_MODES = ("safe", "accept_edits")
369
+ APPROVE_MODE_LABELS = {
370
+ "safe": "Safe",
371
+ "accept_edits": "Accept Edits",
372
+ }
373
+
374
+ # Interaction modes
375
+ INTERACTION_MODES = ("edit", "plan", "learn")
376
+ INTERACTION_MODE_LABELS = {
377
+ "edit": "Edit (Full Access)",
378
+ "plan": "Plan (Read-Only)",
379
+ "learn": "Learn (Read-Only)"
380
+ }
381
+
382
+ # Learning modes (sub-modes for Learn interaction mode)
383
+ LEARNING_MODES = ("succinct", "balanced", "verbose")
384
+ LEARNING_MODE_LABELS = {
385
+ "succinct": "Succinct",
386
+ "balanced": "Balanced",
387
+ "verbose": "Verbose"
388
+ }
389
+
390
+ # Plan types (planning behavior options for Plan interaction mode)
391
+ PLAN_TYPES = ("feature", "refactor", "debug", "optimize")
392
+ PLAN_TYPE_LABELS = {
393
+ "feature": "Feature",
394
+ "refactor": "Refactor",
395
+ "debug": "Debug",
396
+ "optimize": "Optimize"
397
+ }
398
+
399
+ # Commands that do NOT require approval (safe, read-only commands)
400
+ ALLOWED_COMMANDS = [
401
+ # System queries
402
+ "which", "whereis", "type", "pwd",
403
+
404
+ # System info (read-only)
405
+ "ps", "pgrep", "pidof", # Process info
406
+ "df", "du", "free", # Resource info
407
+ "uname", "hostname", "uptime", # System info
408
+ "env", "printenv", "export", # Environment (read operations)
409
+ "lscpu", "lsblk", "lsof", # Hardware info
410
+ "date", "cal", "uptime", # Time/date
411
+
412
+ # Network query (read-only)
413
+ "ping", "nslookup", "dig", "ss", "ip", "ifconfig",
414
+
415
+ # Package query (read-only only - install/upgrade requires approval)
416
+ "pacman", "dpkg", "apt-cache", "rpm", "dnf", "yum",
417
+
418
+ # Text utilities (read-only operations)
419
+ "grep", "egrep", "fgrep", "sed", "awk",
420
+ "cut", "sort", "head", "tail", "wc", "tr", "uniq",
421
+
422
+ # Development queries
423
+ "python", "python3", "node", "npm", "pip", # When used for queries (version, help, etc.)
424
+
425
+ # Computer agent debugging tools
426
+ "file", "stat", # File inspection
427
+ "md5sum", "sha256sum", # File checksums
428
+ "systemctl", "service", # Service management
429
+ "journalctl", "dmesg", # System logs
430
+ "ltrace", # Library call tracer
431
+ "netstat", # Network connections (legacy)
432
+ "apt-show", "dpkg-query", # Package info queries
433
+ ]
434
+
435
+
436
+ def get_provider_config(provider: str):
437
+ """Retrieve the configuration dictionary for a given provider.
438
+
439
+ Args:
440
+ provider (str): Provider name (e.g., 'local', 'openrouter', 'glm', 'openai').
441
+
442
+ Returns:
443
+ dict: Provider config from the PROVIDER_REGISTRY.
444
+ """
445
+ return PROVIDER_REGISTRY.get(provider, {})