vmcode-cli 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/INSTALLATION_METHODS.md +181 -0
  2. package/LICENSE +21 -0
  3. package/README.md +199 -0
  4. package/bin/npm-wrapper.js +171 -0
  5. package/bin/rg +0 -0
  6. package/bin/rg.exe +0 -0
  7. package/config.yaml.example +159 -0
  8. package/package.json +42 -0
  9. package/requirements.txt +7 -0
  10. package/scripts/install.js +132 -0
  11. package/setup.bat +114 -0
  12. package/setup.sh +135 -0
  13. package/src/__init__.py +4 -0
  14. package/src/core/__init__.py +1 -0
  15. package/src/core/agentic.py +2342 -0
  16. package/src/core/chat_manager.py +1201 -0
  17. package/src/core/config_manager.py +269 -0
  18. package/src/core/init.py +161 -0
  19. package/src/core/sub_agent.py +174 -0
  20. package/src/exceptions.py +75 -0
  21. package/src/llm/__init__.py +1 -0
  22. package/src/llm/client.py +149 -0
  23. package/src/llm/config.py +445 -0
  24. package/src/llm/prompts.py +569 -0
  25. package/src/llm/providers.py +402 -0
  26. package/src/llm/token_tracker.py +220 -0
  27. package/src/ui/__init__.py +1 -0
  28. package/src/ui/banner.py +103 -0
  29. package/src/ui/commands.py +489 -0
  30. package/src/ui/displays.py +167 -0
  31. package/src/ui/main.py +351 -0
  32. package/src/ui/prompt_utils.py +162 -0
  33. package/src/utils/__init__.py +1 -0
  34. package/src/utils/editor.py +158 -0
  35. package/src/utils/gitignore_filter.py +149 -0
  36. package/src/utils/logger.py +254 -0
  37. package/src/utils/markdown.py +32 -0
  38. package/src/utils/settings.py +94 -0
  39. package/src/utils/tools/__init__.py +55 -0
  40. package/src/utils/tools/command_executor.py +217 -0
  41. package/src/utils/tools/create_file.py +143 -0
  42. package/src/utils/tools/definitions.py +193 -0
  43. package/src/utils/tools/directory.py +374 -0
  44. package/src/utils/tools/file_editor.py +345 -0
  45. package/src/utils/tools/file_helpers.py +109 -0
  46. package/src/utils/tools/file_reader.py +331 -0
  47. package/src/utils/tools/formatters.py +458 -0
  48. package/src/utils/tools/parallel_executor.py +195 -0
  49. package/src/utils/validation.py +117 -0
  50. package/src/utils/web_search.py +71 -0
  51. package/vmcode-proxy/.env.example +5 -0
  52. package/vmcode-proxy/README.md +235 -0
  53. package/vmcode-proxy/package-lock.json +947 -0
  54. package/vmcode-proxy/package.json +20 -0
  55. package/vmcode-proxy/server.js +248 -0
  56. package/vmcode-proxy/server.js.bak +157 -0
@@ -0,0 +1,458 @@
1
+ """Result formatting utilities for tool output and diffs."""
2
+
3
+ import os
4
+ import re
5
+ import difflib
6
+ import shutil
7
+ from pathlib import Path
8
+ from pygments import highlight
9
+ from pygments.lexers import get_lexer_by_name, get_lexer_for_filename
10
+ from pygments.formatters.terminal256 import Terminal256Formatter
11
+ from pygments.util import ClassNotFound
12
+ from rich.text import Text
13
+
14
+ # Module-level lexer cache for syntax highlighting
15
+ _lexer_cache = {}
16
+
17
+
18
+ def _get_lexer_for_file(file_path):
19
+ """Get cached lexer for file extension.
20
+
21
+ Args:
22
+ file_path: Path to file being highlighted
23
+
24
+ Returns:
25
+ Pygments lexer instance
26
+ """
27
+ if not file_path:
28
+ return get_lexer_by_name('text')
29
+
30
+ ext = Path(file_path).suffix
31
+ if ext not in _lexer_cache:
32
+ try:
33
+ _lexer_cache[ext] = get_lexer_for_filename(file_path)
34
+ except ClassNotFound:
35
+ _lexer_cache[ext] = get_lexer_by_name('text')
36
+ return _lexer_cache[ext]
37
+
38
+
39
+ def strip_ansi_codes(text: str) -> str:
40
+ """Remove ANSI escape codes from text.
41
+
42
+ Args:
43
+ text: Text that may contain ANSI escape codes
44
+
45
+ Returns:
46
+ Text with ANSI codes removed
47
+ """
48
+ # ANSI escape sequences pattern
49
+ ansi_pattern = re.compile(r'\x1b\[[0-9;]*m')
50
+ return ansi_pattern.sub('', text)
51
+
52
+
53
+ def _detect_newline(text):
54
+ """Detect the newline character used in text."""
55
+ if "\r\n" in text:
56
+ return "\r\n"
57
+ if "\n" in text:
58
+ return "\n"
59
+ return os.linesep
60
+
61
+
62
+ def _should_color(color_mode):
63
+ """Determine if diff output should be colorized."""
64
+ if color_mode == "on":
65
+ return True
66
+ if color_mode == "off":
67
+ return False
68
+ if os.environ.get("NO_COLOR"):
69
+ return False
70
+ if os.environ.get("TERM", "").lower() == "dumb":
71
+ return False
72
+ return True
73
+
74
+
75
+ def _get_bg_ansi(sign, color_mode):
76
+ """Get ANSI background color and reset codes for diff lines."""
77
+ if not _should_color(color_mode):
78
+ return "", ""
79
+
80
+ if sign == "+":
81
+ return "\033[48;5;22m", "\033[0m" # Dark green
82
+ elif sign == "-":
83
+ return "\033[48;5;52m", "\033[0m" # Dark red
84
+ else:
85
+ return "", "" # No background for context lines
86
+
87
+
88
+ def _insert_padding_at_wrap_points(text, terminal_width, prefix_width, bg_ansi):
89
+ """Insert padding spaces at wrap points to fill gaps on all wrapped rows.
90
+
91
+ This ensures background color extends through every row when text wraps,
92
+ not just the last row.
93
+
94
+ Args:
95
+ text: ANSI-colored text (from Pygments)
96
+ terminal_width: Terminal column width
97
+ prefix_width: Width of line number prefix (e.g., "123 + ")
98
+ bg_ansi: Background ANSI code to preserve after padding
99
+
100
+ Returns:
101
+ Text with padding inserted at each wrap point
102
+ """
103
+ # ANSI escape sequence pattern
104
+ ansi_pattern = re.compile(r'\x1b\[[0-9;]*m')
105
+
106
+ # Split text into segments (alternating: plain text, ANSI code, plain text, ...)
107
+ parts = []
108
+ last_end = 0
109
+ for match in ansi_pattern.finditer(text):
110
+ parts.append(text[last_end:match.start()]) # plain text
111
+ parts.append(match.group(0)) # ANSI code
112
+ last_end = match.end()
113
+ parts.append(text[last_end:]) # remaining plain text
114
+
115
+ result = []
116
+ current_width = prefix_width
117
+
118
+ for i, part in enumerate(parts):
119
+ if i % 2 == 0: # plain text segment
120
+ # Process character by character
121
+ for char in part:
122
+ char_width = Text.from_ansi(char).cell_len
123
+
124
+ # Check if adding this char would wrap
125
+ if current_width + char_width >= terminal_width:
126
+ # Calculate padding needed to reach end of current row
127
+ padding_needed = terminal_width - current_width
128
+ if padding_needed > 0:
129
+ # Fill current row to edge with background (no reset - let it wrap)
130
+ result.append(bg_ansi + " " * padding_needed)
131
+ # Background stays active through wrap to next line
132
+ current_width = 0 # Reset for next row
133
+
134
+ result.append(char)
135
+ current_width += char_width
136
+ else: # ANSI code segment
137
+ result.append(part)
138
+
139
+ # Pad the final row to terminal edge to ensure full background coverage
140
+ padding_needed = terminal_width - current_width
141
+ if padding_needed > 0:
142
+ result.append(bg_ansi + " " * padding_needed + "\x1b[0m")
143
+
144
+ return ''.join(result)
145
+
146
+
147
+ def _colorize_numbered_lines(lines, color_mode, file_path=None):
148
+ """Add ANSI color codes to numbered diff lines with background highlights and syntax highlighting."""
149
+ # Fast-path: skip all colorization if color mode is off
150
+ if not _should_color(color_mode):
151
+ return lines
152
+
153
+ # Use cached lexer for the file extension
154
+ lexer = _get_lexer_for_file(file_path)
155
+
156
+ colored = []
157
+
158
+ for line in lines:
159
+ if len(line) < 8:
160
+ bg, rst = _get_bg_ansi(" ", color_mode)
161
+ ansi = bg + line + rst if bg else line
162
+ colored.append(ansi)
163
+ continue
164
+
165
+ sign = line[6]
166
+ prefix = line[:7]
167
+ code = line[8:]
168
+
169
+ # Get background color first so we can preserve it through syntax highlighting
170
+ bg, rst = _get_bg_ansi(sign, color_mode)
171
+
172
+ syntax_ansi = highlight(code, lexer, Terminal256Formatter(style='monokai')).rstrip('\n')
173
+
174
+ # Replace Pygments' reset codes with reset+reapply background
175
+ # This prevents syntax highlighting from clearing the diff background color
176
+ # Pygments can emit: \x1b[0m, \x1b[39m, \x1b[49m, \x1b[39;49m, \x1b[49;39m, \x1b[39;49;00m, \x1b[49;39;00m
177
+ # Any sequence that resets background (49) needs to reapply our diff background
178
+ if bg:
179
+ # Match all reset sequences that clear the background
180
+ # Pattern matches: \x1b[0m, \x1b[39m, \x1b[49m, \x1b[39;49m, \x1b[49;39m, \x1b[39;49;00m, \x1b[49;39;00m
181
+ reset_pattern = re.compile(r'\x1b\[(?:0m|39m|49m|39;49m|49;39m|39;49;00m|49;39;00m)')
182
+ syntax_ansi = reset_pattern.sub(lambda m: m.group(0) + bg, syntax_ansi)
183
+
184
+ if bg:
185
+ # Calculate visible width using Rich's Text to handle Unicode and ANSI codes
186
+ # cell_len properly accounts for wide characters (e.g., → has width 2) and ignores ANSI codes
187
+ prefix_width = len(prefix) + 1 # prefix + space
188
+ code_width = Text.from_ansi(syntax_ansi).cell_len
189
+ visible_width = prefix_width + code_width
190
+ try:
191
+ terminal_width = shutil.get_terminal_size(fallback=(80, 20)).columns
192
+ except OSError:
193
+ terminal_width = 80
194
+
195
+ # Insert padding at wrap points to fill gaps on all wrapped rows
196
+ # This ensures background color extends through every row when text wraps
197
+ padded_code = _insert_padding_at_wrap_points(
198
+ syntax_ansi, terminal_width, prefix_width, bg
199
+ )
200
+ ansi_line = bg + prefix + " " + padded_code + rst
201
+ else:
202
+ ansi_line = prefix + " " + syntax_ansi
203
+
204
+ colored.append(ansi_line)
205
+
206
+ return colored
207
+
208
+
209
+ def _build_numbered_diff_lines(original_content, new_content, context_lines):
210
+ """Build numbered diff lines from original and new content."""
211
+ if original_content == new_content:
212
+ return [], 0, 0
213
+
214
+ diff_lines = list(difflib.unified_diff(
215
+ original_content.splitlines(keepends=False),
216
+ new_content.splitlines(keepends=False),
217
+ fromfile="old",
218
+ tofile="new",
219
+ n=context_lines,
220
+ lineterm="",
221
+ ))
222
+
223
+ hunk_re = re.compile(r"@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@")
224
+ old_line = None
225
+ new_line = None
226
+ removed = 0
227
+ added = 0
228
+ formatted_lines = []
229
+
230
+ for line in diff_lines:
231
+ if line.startswith("--- ") or line.startswith("+++ "):
232
+ continue
233
+ if line.startswith("@@ "):
234
+ match = hunk_re.match(line)
235
+ if match:
236
+ old_line = int(match.group(1))
237
+ new_line = int(match.group(3))
238
+ continue
239
+
240
+ if old_line is None or new_line is None:
241
+ continue
242
+
243
+ sign = line[:1]
244
+ text = line[1:]
245
+ if sign == " ":
246
+ line_no = old_line
247
+ old_line += 1
248
+ new_line += 1
249
+ elif sign == "-":
250
+ line_no = old_line
251
+ old_line += 1
252
+ removed += 1
253
+ elif sign == "+":
254
+ line_no = new_line
255
+ new_line += 1
256
+ added += 1
257
+ else:
258
+ continue
259
+
260
+ formatted_lines.append(f"{line_no:>5} {sign} {text}")
261
+
262
+ return formatted_lines, added, removed
263
+
264
+
265
+ def _build_diff(
266
+ original_content,
267
+ new_content,
268
+ file_path,
269
+ context_lines,
270
+ color_mode,
271
+ show_header=False,
272
+ repo_root=None
273
+ ):
274
+ """Build a diff with optional header or summary line.
275
+
276
+ Args:
277
+ original_content: Original file content
278
+ new_content: Modified file content
279
+ file_path: Path to the file being edited
280
+ context_lines: Number of context lines for diff
281
+ color_mode: Color mode for diff display ('auto', 'on', 'off')
282
+ show_header: If True, show filename header (used in preview)
283
+ repo_root: Required when show_header=True to compute relative path
284
+
285
+ Returns:
286
+ Formatted diff string with either header (preview mode) or summary (edit mode)
287
+ """
288
+ formatted_lines, added, removed = _build_numbered_diff_lines(
289
+ original_content, new_content, context_lines,
290
+ )
291
+
292
+ # Build header or summary based on mode
293
+ if show_header:
294
+ try:
295
+ rel_path = file_path.relative_to(repo_root)
296
+ except (ValueError, TypeError):
297
+ rel_path = file_path
298
+ header = f"\x1b[1m{rel_path} | -{removed} | +{added}\x1b[0m"
299
+ else:
300
+ header = None
301
+ summary = f"Changes: +{added}, -{removed}"
302
+
303
+ # Handle empty case
304
+ if not formatted_lines:
305
+ if show_header:
306
+ return f"\n{header}\n(no changes)"
307
+ return "(no changes)"
308
+
309
+ # Validate and colorize
310
+ if color_mode not in ("auto", "on", "off"):
311
+ color_mode = "auto"
312
+
313
+ colored = _colorize_numbered_lines(formatted_lines, color_mode, file_path)
314
+
315
+ # Build output based on mode
316
+ if show_header:
317
+ return f"\n{header}\n" + "\n".join(colored)
318
+ return "\n".join(colored + [summary])
319
+
320
+
321
+ def _normalize_search_replace_for_newlines(search, replace, newline):
322
+ """Normalize search/replace text to match file's newline characters."""
323
+ if newline == "\n":
324
+ return search, replace, False
325
+ if "\n" not in search or "\r\n" in search:
326
+ return search, replace, False
327
+ normalized_search = search.replace("\n", newline)
328
+ normalized_replace = replace.replace("\n", newline)
329
+ return normalized_search, normalized_replace, True
330
+
331
+
332
+ def format_tool_result(result, command=None, is_rg=False, debug_mode=False):
333
+ """Format subprocess result for model consumption.
334
+
335
+ Args:
336
+ result: subprocess.CompletedProcess result
337
+ command: The command that was executed (for display and mode detection)
338
+ is_rg: Whether this was an rg command (affects empty output and counting)
339
+ debug_mode: If True, show full output; if False, show summary only
340
+
341
+ Returns:
342
+ str: Formatted result with exit code
343
+ """
344
+ output = (result.stdout or "") + (result.stderr or "")
345
+ output = output.strip()
346
+
347
+ if not output:
348
+ if is_rg and result.returncode == 1:
349
+ output = "no matches found"
350
+ else:
351
+ output = "(no output)"
352
+
353
+ # For rg commands, apply smart truncation to prevent context explosion
354
+ if is_rg:
355
+ label = "files" if command and "--files-with-matches" in command.lower() else "matches"
356
+ MAX_LINES = 100 # Maximum lines of output to show
357
+
358
+ # Exit code 0: found matches, Exit code 1: no matches
359
+ if result.returncode == 1:
360
+ # No matches found - rg returns 1 in this case
361
+ count = 0
362
+ elif result.returncode == 0:
363
+ # Count actual matches (lines with ':number:' pattern), not context lines
364
+ if "--files-with-matches" in (command or "").lower():
365
+ # files-with-matches mode: count lines (each line is a file)
366
+ lines = [line for line in output.splitlines() if line.strip()]
367
+ count = len(lines)
368
+ else:
369
+ # Normal mode: count match lines by finding ':number:' pattern
370
+ # Match format 1: path:line:content (colon before line number)
371
+ # Match format 2: line:content (when searching single file)
372
+ # Context format: path-line:content (hyphen before line number)
373
+ # Try both patterns - check for path:line:content first, then line:content at start
374
+ path_line_matches = re.findall(r':\d+:', output)
375
+ if path_line_matches:
376
+ count = len(path_line_matches)
377
+ else:
378
+ # Single file search: count lines starting with line number
379
+ count = len(re.findall(r'^\d+:', output, re.MULTILINE))
380
+ else:
381
+ # Error occurred (exit code 2 or higher)
382
+ count = 0
383
+
384
+ # Handle no matches
385
+ if result.returncode == 1:
386
+ return f"exit_code={result.returncode}\nNo matches found\n\n"
387
+ elif count == 0:
388
+ # Exit code 0 but no output - unusual but possible
389
+ return f"exit_code={result.returncode}\n{output}\n\n"
390
+
391
+ # Truncate output if it exceeds MAX_LINES
392
+ output_lines = output.splitlines()
393
+ if len(output_lines) > MAX_LINES:
394
+ truncated = "\n".join(output_lines[:MAX_LINES])
395
+ omitted = len(output_lines) - MAX_LINES
396
+ output = f"{truncated}\n\n... ({omitted} more {label} truncated)"
397
+ else:
398
+ output = "\n".join(output_lines)
399
+
400
+ return f"exit_code={result.returncode}\n{label}={count}\n{output}\n\n"
401
+
402
+ return f"exit_code={result.returncode}\n{output}\n\n"
403
+
404
+
405
+ def format_file_result(exit_code, content=None, error=None, path=None,
406
+ lines_read=None, start_line=None, truncated=False, items_count=None,
407
+ truncation_info=None):
408
+ """Format file operation result for model consumption.
409
+
410
+ Args:
411
+ exit_code: Exit code (0 for success, 1 for error)
412
+ content: Optional content string (for successful reads)
413
+ error: Optional error message (for failures)
414
+ path: Path to the file/directory
415
+ lines_read: Number of lines read (for file reads)
416
+ start_line: 1-based starting line number for file reads
417
+ truncated: Whether content was truncated (for file reads)
418
+ items_count: Number of items (for directory listings)
419
+ truncation_info: Optional dict with truncation metadata (total, shown, omitted)
420
+
421
+ Returns:
422
+ str: Formatted result with exit code and metadata
423
+ """
424
+ metadata_parts = [f"exit_code={exit_code}"]
425
+
426
+ if path is not None:
427
+ metadata_parts.append(f"path={path}")
428
+
429
+ if lines_read is not None:
430
+ metadata_parts.append(f"lines_read={lines_read}")
431
+
432
+ if start_line is not None:
433
+ metadata_parts.append(f"start_line={start_line}")
434
+
435
+ if truncated:
436
+ metadata_parts.append("truncated=true")
437
+ if truncation_info:
438
+ metadata_parts.append(
439
+ f"truncation_info=total:{truncation_info['total']},"
440
+ f"shown:{truncation_info['shown']},"
441
+ f"omitted:{truncation_info['omitted']}"
442
+ )
443
+
444
+ if items_count is not None:
445
+ metadata_parts.append(f"items_count={items_count}")
446
+
447
+ metadata = " ".join(metadata_parts)
448
+
449
+ if error:
450
+ return f"{metadata}\nerror: {error}"
451
+
452
+ if content is not None:
453
+ return f"{metadata}\n{content}\n\n"
454
+
455
+ return f"{metadata}\n\n"
456
+
457
+
458
+ # format_file_preview removed - now using Rich Syntax directly in agentic.py
@@ -0,0 +1,195 @@
1
+ """Concurrent tool execution engine.
2
+
3
+ This module provides parallel execution of multiple tool calls using
4
+ ThreadPoolExecutor for I/O-bound operations like file reads and web searches.
5
+ """
6
+
7
+ import concurrent.futures
8
+ from typing import List, Dict, Callable, Any, Tuple
9
+ from dataclasses import dataclass
10
+
11
+
12
+ @dataclass
13
+ class ToolCall:
14
+ """Represents a single tool call.
15
+
16
+ Attributes:
17
+ tool_id: Unique identifier for this tool call
18
+ function_name: Name of the tool function to execute
19
+ arguments: Dict of arguments to pass to the tool handler
20
+ call_index: Index in original tool_calls array (for order preservation)
21
+ """
22
+ tool_id: str
23
+ function_name: str
24
+ arguments: dict
25
+ call_index: int
26
+
27
+
28
+ @dataclass
29
+ class ToolResult:
30
+ """Result of a tool execution.
31
+
32
+ Attributes:
33
+ tool_id: Unique identifier for the tool call
34
+ call_index: Index in original tool_calls array (for order preservation)
35
+ success: Whether the tool executed successfully
36
+ result: String result from tool execution (if successful)
37
+ error: Error message (if failed)
38
+ should_exit: Whether the tool requested the orchestration loop to exit
39
+ """
40
+ tool_id: str
41
+ call_index: int
42
+ success: bool
43
+ result: str
44
+ error: str = None
45
+ should_exit: bool = False
46
+
47
+
48
+ class ParallelToolExecutor:
49
+ """Executes multiple tool calls concurrently with proper error handling.
50
+
51
+ This class provides thread-safe concurrent execution of tool calls using
52
+ ThreadPoolExecutor. Key features:
53
+ - Executes independent tools concurrently for performance
54
+ - Preserves result order using call_index tracking
55
+ - Isolates errors (one failure doesn't stop others)
56
+ - Fast-path optimization for single tool calls (no threading overhead)
57
+ """
58
+
59
+ def __init__(self, max_workers: int = 5):
60
+ """Initialize executor.
61
+
62
+ Args:
63
+ max_workers: Maximum number of concurrent tool executions
64
+ """
65
+ self.max_workers = max_workers
66
+
67
+ def execute_tools(
68
+ self,
69
+ tool_calls: List[ToolCall],
70
+ handler_map: Dict[str, Callable],
71
+ context: dict
72
+ ) -> Tuple[List[ToolResult], bool]:
73
+ """Execute multiple tools concurrently.
74
+
75
+ Args:
76
+ tool_calls: List of ToolCall objects
77
+ handler_map: Dict mapping function_name to handler callable
78
+ context: Dict containing repo_root, console, chat_manager, etc.
79
+
80
+ Returns:
81
+ Tuple of (results in call_index order, had_any_errors)
82
+ """
83
+ if len(tool_calls) == 1:
84
+ # Fast path for single tool (no threading overhead)
85
+ return self._execute_single(tool_calls[0], handler_map, context)
86
+
87
+ # Parallel execution for multiple tools
88
+ results = []
89
+
90
+ with concurrent.futures.ThreadPoolExecutor(
91
+ max_workers=min(self.max_workers, len(tool_calls))
92
+ ) as executor:
93
+ # Submit all tool executions
94
+ future_to_call = {
95
+ executor.submit(
96
+ self._execute_single_tool,
97
+ tool_call,
98
+ handler_map,
99
+ context
100
+ ): tool_call
101
+ for tool_call in tool_calls
102
+ }
103
+
104
+ # Collect results as they complete
105
+ for future in concurrent.futures.as_completed(future_to_call):
106
+ tool_call = future_to_call[future]
107
+ try:
108
+ result = future.result()
109
+ results.append(result)
110
+ except Exception as e:
111
+ results.append(ToolResult(
112
+ tool_id=tool_call.tool_id,
113
+ call_index=tool_call.call_index,
114
+ success=False,
115
+ result="",
116
+ error=str(e)
117
+ ))
118
+
119
+ # Sort by call_index to maintain order
120
+ results.sort(key=lambda r: r.call_index)
121
+
122
+ # Check for errors
123
+ had_errors = any(not r.success for r in results)
124
+
125
+ return results, had_errors
126
+
127
+ def _execute_single(
128
+ self,
129
+ tool_call: ToolCall,
130
+ handler_map: Dict[str, Callable],
131
+ context: dict
132
+ ) -> Tuple[List[ToolResult], bool]:
133
+ """Execute single tool (fast path, no threading overhead).
134
+
135
+ Args:
136
+ tool_call: Single ToolCall to execute
137
+ handler_map: Handler function mapping
138
+ context: Execution context dict
139
+
140
+ Returns:
141
+ Tuple of (single-element result list, had_errors)
142
+ """
143
+ result = self._execute_single_tool(tool_call, handler_map, context)
144
+ return [result], not result.success
145
+
146
+ def _execute_single_tool(
147
+ self,
148
+ tool_call: ToolCall,
149
+ handler_map: Dict[str, Callable],
150
+ context: dict
151
+ ) -> ToolResult:
152
+ """Execute a single tool call with error handling.
153
+
154
+ Args:
155
+ tool_call: ToolCall to execute
156
+ handler_map: Handler function mapping
157
+ context: Execution context dict
158
+
159
+ Returns:
160
+ ToolResult with execution outcome
161
+ """
162
+ handler = handler_map.get(tool_call.function_name)
163
+
164
+ if not handler:
165
+ return ToolResult(
166
+ tool_id=tool_call.tool_id,
167
+ call_index=tool_call.call_index,
168
+ success=False,
169
+ result="",
170
+ error=f"Unknown tool '{tool_call.function_name}'"
171
+ )
172
+
173
+ try:
174
+ should_exit, tool_result = handler(
175
+ tool_call.tool_id,
176
+ tool_call.arguments,
177
+ context.get('thinking_indicator')
178
+ )
179
+
180
+ return ToolResult(
181
+ tool_id=tool_call.tool_id,
182
+ call_index=tool_call.call_index,
183
+ success=True,
184
+ result=tool_result,
185
+ should_exit=should_exit
186
+ )
187
+
188
+ except Exception as e:
189
+ return ToolResult(
190
+ tool_id=tool_call.tool_id,
191
+ call_index=tool_call.call_index,
192
+ success=False,
193
+ result="",
194
+ error=str(e)
195
+ )