tunacode-cli 0.0.55__py3-none-any.whl → 0.0.57__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

Files changed (47) hide show
  1. tunacode/cli/commands/implementations/plan.py +50 -0
  2. tunacode/cli/commands/registry.py +3 -0
  3. tunacode/cli/repl.py +327 -186
  4. tunacode/cli/repl_components/command_parser.py +37 -4
  5. tunacode/cli/repl_components/error_recovery.py +79 -1
  6. tunacode/cli/repl_components/output_display.py +21 -1
  7. tunacode/cli/repl_components/tool_executor.py +12 -0
  8. tunacode/configuration/defaults.py +8 -0
  9. tunacode/constants.py +10 -2
  10. tunacode/core/agents/agent_components/agent_config.py +212 -22
  11. tunacode/core/agents/agent_components/node_processor.py +46 -40
  12. tunacode/core/code_index.py +83 -29
  13. tunacode/core/state.py +44 -0
  14. tunacode/core/token_usage/usage_tracker.py +2 -2
  15. tunacode/core/tool_handler.py +20 -0
  16. tunacode/prompts/system.md +117 -490
  17. tunacode/services/mcp.py +29 -7
  18. tunacode/tools/base.py +110 -0
  19. tunacode/tools/bash.py +96 -1
  20. tunacode/tools/exit_plan_mode.py +273 -0
  21. tunacode/tools/glob.py +366 -33
  22. tunacode/tools/grep.py +226 -77
  23. tunacode/tools/grep_components/result_formatter.py +98 -4
  24. tunacode/tools/list_dir.py +132 -2
  25. tunacode/tools/present_plan.py +288 -0
  26. tunacode/tools/read_file.py +91 -0
  27. tunacode/tools/run_command.py +99 -0
  28. tunacode/tools/schema_assembler.py +167 -0
  29. tunacode/tools/todo.py +108 -1
  30. tunacode/tools/update_file.py +94 -0
  31. tunacode/tools/write_file.py +86 -0
  32. tunacode/types.py +58 -0
  33. tunacode/ui/input.py +14 -2
  34. tunacode/ui/keybindings.py +25 -4
  35. tunacode/ui/panels.py +53 -8
  36. tunacode/ui/prompt_manager.py +25 -2
  37. tunacode/ui/tool_ui.py +3 -2
  38. tunacode/utils/json_utils.py +206 -0
  39. tunacode/utils/message_utils.py +14 -4
  40. tunacode/utils/ripgrep.py +332 -9
  41. {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.57.dist-info}/METADATA +8 -3
  42. {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.57.dist-info}/RECORD +46 -42
  43. tunacode/tools/read_file_async_poc.py +0 -196
  44. {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.57.dist-info}/WHEEL +0 -0
  45. {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.57.dist-info}/entry_points.txt +0 -0
  46. {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.57.dist-info}/licenses/LICENSE +0 -0
  47. {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.57.dist-info}/top_level.txt +0 -0
tunacode/tools/grep.py CHANGED
@@ -12,13 +12,17 @@ CLAUDE_ANCHOR[grep-module]: Fast parallel file search with 3-second deadline
12
12
  """
13
13
 
14
14
  import asyncio
15
+ import logging
15
16
  import re
16
- import subprocess
17
17
  import time
18
18
  from concurrent.futures import ThreadPoolExecutor
19
+ from functools import lru_cache
19
20
  from pathlib import Path
20
- from typing import List, Optional, Union
21
+ from typing import Any, Dict, List, Optional, Union
21
22
 
23
+ import defusedxml.ElementTree as ET
24
+
25
+ from tunacode.configuration.defaults import DEFAULT_USER_CONFIG
22
26
  from tunacode.exceptions import TooBroadPatternError, ToolExecutionError
23
27
  from tunacode.tools.base import BaseTool
24
28
  from tunacode.tools.grep_components import (
@@ -28,6 +32,10 @@ from tunacode.tools.grep_components import (
28
32
  SearchResult,
29
33
  )
30
34
  from tunacode.tools.grep_components.result_formatter import ResultFormatter
35
+ from tunacode.utils.ripgrep import RipgrepExecutor
36
+ from tunacode.utils.ripgrep import metrics as ripgrep_metrics
37
+
38
+ logger = logging.getLogger(__name__)
31
39
 
32
40
 
33
41
  class ParallelGrep(BaseTool):
@@ -42,11 +50,144 @@ class ParallelGrep(BaseTool):
42
50
  self._file_filter = FileFilter()
43
51
  self._pattern_matcher = PatternMatcher()
44
52
  self._result_formatter = ResultFormatter()
53
+ self._ripgrep_executor = RipgrepExecutor()
54
+
55
+ # Load configuration
56
+ self._config = self._load_ripgrep_config()
45
57
 
46
58
  @property
47
59
  def tool_name(self) -> str:
48
60
  return "grep"
49
61
 
62
+ @lru_cache(maxsize=1)
63
+ def _get_base_prompt(self) -> str:
64
+ """Load and return the base prompt from XML file.
65
+
66
+ Returns:
67
+ str: The loaded prompt from XML or a default prompt
68
+ """
69
+ try:
70
+ # Load prompt from XML file
71
+ prompt_file = Path(__file__).parent / "prompts" / "grep_prompt.xml"
72
+ if prompt_file.exists():
73
+ tree = ET.parse(prompt_file)
74
+ root = tree.getroot()
75
+ description = root.find("description")
76
+ if description is not None:
77
+ return description.text.strip()
78
+ except Exception as e:
79
+ logger.warning(f"Failed to load XML prompt for grep: {e}")
80
+
81
+ # Fallback to default prompt
82
+ return """A powerful search tool built on ripgrep
83
+
84
+ Usage:
85
+ - ALWAYS use Grep for search tasks. NEVER invoke `grep` or `rg` as a Bash command.
86
+ - Supports full regex syntax
87
+ - Filter files with glob or type parameters
88
+ - Multiple output modes available"""
89
+
90
+ @lru_cache(maxsize=1)
91
+ def _get_parameters_schema(self) -> Dict[str, Any]:
92
+ """Get the parameters schema for grep tool.
93
+
94
+ Returns:
95
+ Dict containing the JSON schema for tool parameters
96
+ """
97
+ # Try to load from XML first
98
+ try:
99
+ prompt_file = Path(__file__).parent / "prompts" / "grep_prompt.xml"
100
+ if prompt_file.exists():
101
+ tree = ET.parse(prompt_file)
102
+ root = tree.getroot()
103
+ parameters = root.find("parameters")
104
+ if parameters is not None:
105
+ schema: Dict[str, Any] = {"type": "object", "properties": {}, "required": []}
106
+ required_fields: List[str] = []
107
+
108
+ for param in parameters.findall("parameter"):
109
+ name = param.get("name")
110
+ required = param.get("required", "false").lower() == "true"
111
+ param_type = param.find("type")
112
+ description = param.find("description")
113
+
114
+ if name and param_type is not None:
115
+ prop = {
116
+ "type": param_type.text.strip(),
117
+ "description": description.text.strip()
118
+ if description is not None
119
+ else "",
120
+ }
121
+
122
+ # Add enum values if present
123
+ enums = param.findall("enum")
124
+ if enums:
125
+ prop["enum"] = [e.text.strip() for e in enums]
126
+
127
+ schema["properties"][name] = prop
128
+ if required:
129
+ required_fields.append(name)
130
+
131
+ schema["required"] = required_fields
132
+ return schema
133
+ except Exception as e:
134
+ logger.warning(f"Failed to load parameters from XML for grep: {e}")
135
+
136
+ # Fallback to hardcoded schema
137
+ return {
138
+ "type": "object",
139
+ "properties": {
140
+ "pattern": {
141
+ "type": "string",
142
+ "description": "Regular expression pattern to search for",
143
+ },
144
+ "directory": {"type": "string", "description": "Directory to search in"},
145
+ "include": {
146
+ "type": "array",
147
+ "items": {"type": "string"},
148
+ "description": "File patterns to include",
149
+ },
150
+ "exclude": {
151
+ "type": "array",
152
+ "items": {"type": "string"},
153
+ "description": "File patterns to exclude",
154
+ },
155
+ "max_results": {"type": "integer", "description": "Maximum number of results"},
156
+ "context_before": {
157
+ "type": "integer",
158
+ "description": "Lines of context before matches",
159
+ },
160
+ "context_after": {
161
+ "type": "integer",
162
+ "description": "Lines of context after matches",
163
+ },
164
+ },
165
+ "required": ["pattern"],
166
+ }
167
+
168
+ def _load_ripgrep_config(self) -> Dict:
169
+ """Load ripgrep configuration from settings."""
170
+ try:
171
+ settings = DEFAULT_USER_CONFIG.get("settings", {})
172
+ return settings.get(
173
+ "ripgrep",
174
+ {
175
+ "timeout": 10,
176
+ "max_buffer_size": 1048576,
177
+ "max_results": 100,
178
+ "enable_metrics": False,
179
+ "debug": False,
180
+ },
181
+ )
182
+ except Exception:
183
+ return {
184
+ "timeout": 10,
185
+ "max_buffer_size": 1048576,
186
+ "max_results": 100,
187
+ "enable_metrics": False,
188
+ "debug": False,
189
+ }
190
+
50
191
  async def _execute(
51
192
  self,
52
193
  pattern: str,
@@ -176,98 +317,106 @@ class ParallelGrep(BaseTool):
176
317
  self, pattern: str, candidates: List[Path], config: SearchConfig
177
318
  ) -> List[SearchResult]:
178
319
  """
179
- Run ripgrep on pre-filtered file list with first match deadline.
320
+ Run ripgrep on pre-filtered file list using the enhanced RipgrepExecutor.
180
321
  """
181
322
 
182
- def run_ripgrep_filtered():
183
- cmd = ["rg", "--json"]
323
+ def run_enhanced_ripgrep():
324
+ """Execute ripgrep search using the new executor."""
325
+ start_time = time.time()
326
+ first_match_time = None
327
+ results = []
184
328
 
185
- # Add configuration flags
186
- if not config.case_sensitive:
187
- cmd.append("--ignore-case")
188
- if config.context_lines > 0:
189
- cmd.extend(["--context", str(config.context_lines)])
190
- if config.max_results:
191
- cmd.extend(["--max-count", str(config.max_results)])
329
+ # Configure timeout from settings
330
+ timeout = min(self._config.get("timeout", 10), config.timeout_seconds)
192
331
 
193
- # Add pattern and explicit file list
194
- cmd.append(pattern)
195
- cmd.extend(str(f) for f in candidates)
332
+ # If ripgrep executor is using fallback, skip this method entirely
333
+ if self._ripgrep_executor._use_python_fallback:
334
+ # Return empty to trigger Python fallback in the calling function
335
+ return []
196
336
 
197
337
  try:
198
- # Start the process
199
- process = subprocess.Popen(
200
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1
338
+ # Use the enhanced executor with support for context lines
339
+ # Note: Currently searching all files, not using candidates
340
+ # This is a limitation that should be addressed in future enhancement
341
+ search_results = self._ripgrep_executor.search(
342
+ pattern=pattern,
343
+ path=".", # Search in current directory
344
+ timeout=timeout,
345
+ max_matches=config.max_results,
346
+ case_insensitive=not config.case_sensitive,
347
+ context_before=config.context_lines,
348
+ context_after=config.context_lines,
201
349
  )
202
350
 
203
- # Monitor for first match within deadline
204
- start_time = time.time()
205
- output_lines = []
206
- first_match_found = False
351
+ # Track first match time for metrics
352
+ if search_results and first_match_time is None:
353
+ first_match_time = time.time() - start_time
207
354
 
208
- while True:
209
355
  # Check if we exceeded the first match deadline
210
- if (
211
- not first_match_found
212
- and (time.time() - start_time) > config.first_match_deadline
213
- ):
214
- process.kill()
215
- process.wait()
356
+ if first_match_time > config.first_match_deadline:
357
+ if self._config.get("debug", False):
358
+ logger.debug(
359
+ f"Search exceeded first match deadline: {first_match_time:.2f}s"
360
+ )
216
361
  raise TooBroadPatternError(pattern, config.first_match_deadline)
217
362
 
218
- # Check if process is still running
219
- if process.poll() is not None:
220
- # Process finished, get any remaining output
221
- remaining_output, _ = process.communicate()
222
- if remaining_output:
223
- output_lines.extend(remaining_output.splitlines())
224
- break
225
-
226
- # Try to read a line (non-blocking)
227
- try:
228
- # Use a small timeout to avoid blocking indefinitely
229
- line = process.stdout.readline()
230
- if line:
231
- output_lines.append(line.rstrip())
232
- # Check if this is a match line
233
- if '"type":"match"' in line:
234
- first_match_found = True
235
- except Exception:
236
- pass
237
-
238
- # Small sleep to avoid busy waiting
239
- time.sleep(0.01)
240
-
241
- # Check exit code
242
- if process.returncode == 0 or output_lines:
243
- # Return output even if exit code is non-zero but we have matches
244
- return "\n".join(output_lines)
245
- else:
246
- return None
363
+ # Parse results
364
+ for result_line in search_results:
365
+ # Parse ripgrep output format "file:line:content"
366
+ parts = result_line.split(":", 2)
367
+ if len(parts) >= 3:
368
+ # Filter to only include results from candidates
369
+ file_path = Path(parts[0])
370
+ if file_path not in candidates:
371
+ continue
372
+
373
+ try:
374
+ search_result = SearchResult(
375
+ file_path=parts[0],
376
+ line_number=int(parts[1]),
377
+ line_content=parts[2] if len(parts) > 2 else "",
378
+ match_start=0,
379
+ match_end=len(parts[2]) if len(parts) > 2 else 0,
380
+ context_before=[],
381
+ context_after=[],
382
+ relevance_score=1.0,
383
+ )
384
+ results.append(search_result)
385
+
386
+ # Stop if we have enough results
387
+ if config.max_results and len(results) >= config.max_results:
388
+ break
389
+ except (ValueError, IndexError):
390
+ continue
247
391
 
248
392
  except TooBroadPatternError:
249
393
  raise
250
- except (subprocess.TimeoutExpired, FileNotFoundError):
251
- return None
252
- except Exception:
253
- # Make sure to clean up the process
254
- if "process" in locals():
255
- try:
256
- process.kill()
257
- process.wait()
258
- except Exception:
259
- pass
260
- return None
261
-
262
- # Run ripgrep with monitoring in thread pool
394
+ except Exception as e:
395
+ if self._config.get("debug", False):
396
+ logger.debug(f"Search error: {e}")
397
+ # Return empty to trigger fallback
398
+ return []
399
+
400
+ # Record metrics if enabled
401
+ if self._config.get("enable_metrics", False):
402
+ total_time = time.time() - start_time
403
+ ripgrep_metrics.record_search(
404
+ duration=total_time, used_fallback=self._ripgrep_executor._use_python_fallback
405
+ )
406
+
407
+ if self._config.get("debug", False):
408
+ logger.debug(
409
+ f"Ripgrep search completed in {total_time:.2f}s "
410
+ f"(first match: {first_match_time:.2f}s if found)"
411
+ )
412
+
413
+ return results
414
+
415
+ # Run the enhanced ripgrep search
263
416
  try:
264
- output = await asyncio.get_event_loop().run_in_executor(
265
- self._executor, run_ripgrep_filtered
417
+ return await asyncio.get_event_loop().run_in_executor(
418
+ self._executor, run_enhanced_ripgrep
266
419
  )
267
- if output:
268
- parsed = self._pattern_matcher.parse_ripgrep_output(output)
269
- return parsed
270
- return []
271
420
  except TooBroadPatternError:
272
421
  raise
273
422
 
@@ -1,21 +1,53 @@
1
1
  """
2
+ Extended result formatter with multiple output modes for flexible presentation.
2
3
  Result formatting functionality for the grep tool.
3
4
  """
4
5
 
5
- from typing import List
6
+ from typing import Dict, List
6
7
 
7
8
  from .search_result import SearchConfig, SearchResult
8
9
 
9
10
 
10
11
  class ResultFormatter:
11
- """Handles formatting of search results for display."""
12
+ """Handles formatting of search results for display with multiple output modes."""
12
13
 
13
14
  @staticmethod
14
- def format_results(results: List[SearchResult], pattern: str, config: SearchConfig) -> str:
15
- """Format search results for display."""
15
+ def format_results(
16
+ results: List[SearchResult],
17
+ pattern: str,
18
+ config: SearchConfig,
19
+ output_mode: str = "content",
20
+ ) -> str:
21
+ """Format search results for display.
22
+
23
+ Args:
24
+ results: List of search results
25
+ pattern: Search pattern
26
+ config: Search configuration
27
+ output_mode: Output format mode:
28
+ - "content": Show matching lines with context (default)
29
+ - "files_with_matches": Show only file paths
30
+ - "count": Show match counts per file
31
+ - "json": JSON format for programmatic use
32
+
33
+ Returns:
34
+ Formatted string based on output mode
35
+ """
16
36
  if not results:
17
37
  return f"No matches found for pattern: {pattern}"
18
38
 
39
+ if output_mode == "files_with_matches":
40
+ return ResultFormatter._format_files_only(results, pattern)
41
+ elif output_mode == "count":
42
+ return ResultFormatter._format_count(results, pattern)
43
+ elif output_mode == "json":
44
+ return ResultFormatter._format_json(results, pattern)
45
+ else: # Default to "content"
46
+ return ResultFormatter._format_content(results, pattern, config)
47
+
48
+ @staticmethod
49
+ def _format_content(results: List[SearchResult], pattern: str, config: SearchConfig) -> str:
50
+ """Format results with full content and context."""
19
51
  output = []
20
52
  output.append(f"Found {len(results)} matches for pattern: {pattern}")
21
53
  output.append("=" * 60)
@@ -43,3 +75,65 @@ class ResultFormatter:
43
75
  output.append(f" {line_num:4d}│ {context_line}")
44
76
 
45
77
  return "\n".join(output)
78
+
79
+ @staticmethod
80
+ def _format_files_only(results: List[SearchResult], pattern: str) -> str:
81
+ """Format results showing only file paths."""
82
+ # Collect unique file paths
83
+ files = sorted(set(r.file_path for r in results))
84
+
85
+ output = []
86
+ output.append(f"Files with matches for pattern: {pattern}")
87
+ output.append(f"Total files: {len(files)}")
88
+ output.append("=" * 60)
89
+
90
+ for file_path in files:
91
+ output.append(file_path)
92
+
93
+ return "\n".join(output)
94
+
95
+ @staticmethod
96
+ def _format_count(results: List[SearchResult], pattern: str) -> str:
97
+ """Format results showing match counts per file."""
98
+ # Count matches per file
99
+ file_counts: Dict[str, int] = {}
100
+ for result in results:
101
+ file_counts[result.file_path] = file_counts.get(result.file_path, 0) + 1
102
+
103
+ output = []
104
+ output.append(f"Match counts for pattern: {pattern}")
105
+ output.append(f"Total matches: {len(results)} across {len(file_counts)} files")
106
+ output.append("=" * 60)
107
+
108
+ # Sort by count (descending) then by file path
109
+ sorted_counts = sorted(file_counts.items(), key=lambda x: (-x[1], x[0]))
110
+
111
+ for file_path, count in sorted_counts:
112
+ output.append(f"{count:5d} {file_path}")
113
+
114
+ return "\n".join(output)
115
+
116
+ @staticmethod
117
+ def _format_json(results: List[SearchResult], pattern: str) -> str:
118
+ """Format results as JSON for programmatic use."""
119
+ import json
120
+
121
+ # Convert results to JSON-serializable format
122
+ json_results = []
123
+ for result in results:
124
+ json_results.append(
125
+ {
126
+ "file": result.file_path,
127
+ "line": result.line_number,
128
+ "content": result.line_content,
129
+ "match_start": result.match_start,
130
+ "match_end": result.match_end,
131
+ "context_before": result.context_before,
132
+ "context_after": result.context_after,
133
+ "score": result.relevance_score,
134
+ }
135
+ )
136
+
137
+ output_data = {"pattern": pattern, "total_matches": len(results), "results": json_results}
138
+
139
+ return json.dumps(output_data, indent=2)
@@ -6,14 +6,20 @@ Provides efficient directory listing without using shell commands.
6
6
  """
7
7
 
8
8
  import asyncio
9
+ import logging
9
10
  import os
11
+ from functools import lru_cache
10
12
  from pathlib import Path
11
- from typing import List, Tuple
13
+ from typing import Any, Dict, List, Tuple
14
+
15
+ import defusedxml.ElementTree as ET
12
16
 
13
17
  from tunacode.exceptions import ToolExecutionError
14
18
  from tunacode.tools.base import FileBasedTool
15
19
  from tunacode.types import FilePath, ToolResult
16
20
 
21
+ logger = logging.getLogger(__name__)
22
+
17
23
 
18
24
  class ListDirTool(FileBasedTool):
19
25
  """Tool for listing directory contents without shell commands."""
@@ -22,6 +28,92 @@ class ListDirTool(FileBasedTool):
22
28
  def tool_name(self) -> str:
23
29
  return "ListDir"
24
30
 
31
+ @lru_cache(maxsize=1)
32
+ def _get_base_prompt(self) -> str:
33
+ """Load and return the base prompt from XML file.
34
+
35
+ Returns:
36
+ str: The loaded prompt from XML or a default prompt
37
+ """
38
+ try:
39
+ # Load prompt from XML file
40
+ prompt_file = Path(__file__).parent / "prompts" / "list_dir_prompt.xml"
41
+ if prompt_file.exists():
42
+ tree = ET.parse(prompt_file)
43
+ root = tree.getroot()
44
+ description = root.find("description")
45
+ if description is not None:
46
+ return description.text.strip()
47
+ except Exception as e:
48
+ logger.warning(f"Failed to load XML prompt for list_dir: {e}")
49
+
50
+ # Fallback to default prompt
51
+ return """Lists files and directories in a given path"""
52
+
53
+ @lru_cache(maxsize=1)
54
+ def _get_parameters_schema(self) -> Dict[str, Any]:
55
+ """Get the parameters schema for list_dir tool.
56
+
57
+ Returns:
58
+ Dict containing the JSON schema for tool parameters
59
+ """
60
+ # Try to load from XML first
61
+ try:
62
+ prompt_file = Path(__file__).parent / "prompts" / "list_dir_prompt.xml"
63
+ if prompt_file.exists():
64
+ tree = ET.parse(prompt_file)
65
+ root = tree.getroot()
66
+ parameters = root.find("parameters")
67
+ if parameters is not None:
68
+ schema: Dict[str, Any] = {"type": "object", "properties": {}, "required": []}
69
+ required_fields: List[str] = []
70
+
71
+ for param in parameters.findall("parameter"):
72
+ name = param.get("name")
73
+ required = param.get("required", "false").lower() == "true"
74
+ param_type = param.find("type")
75
+ description = param.find("description")
76
+
77
+ if name and param_type is not None:
78
+ prop = {
79
+ "type": param_type.text.strip(),
80
+ "description": description.text.strip()
81
+ if description is not None
82
+ else "",
83
+ }
84
+
85
+ # Handle array types
86
+ if param_type.text.strip() == "array":
87
+ items = param.find("items")
88
+ if items is not None:
89
+ prop["items"] = {"type": items.text.strip()}
90
+
91
+ schema["properties"][name] = prop
92
+ if required:
93
+ required_fields.append(name)
94
+
95
+ schema["required"] = required_fields
96
+ return schema
97
+ except Exception as e:
98
+ logger.warning(f"Failed to load parameters from XML for list_dir: {e}")
99
+
100
+ # Fallback to hardcoded schema
101
+ return {
102
+ "type": "object",
103
+ "properties": {
104
+ "path": {
105
+ "type": "string",
106
+ "description": "The absolute path to the directory to list",
107
+ },
108
+ "ignore": {
109
+ "type": "array",
110
+ "items": {"type": "string"},
111
+ "description": "List of glob patterns to ignore",
112
+ },
113
+ },
114
+ "required": ["path"],
115
+ }
116
+
25
117
  async def _execute(
26
118
  self, directory: FilePath = ".", max_entries: int = 200, show_hidden: bool = False
27
119
  ) -> ToolResult:
@@ -48,7 +140,34 @@ class ListDirTool(FileBasedTool):
48
140
  if not dir_path.is_dir():
49
141
  raise NotADirectoryError(f"Not a directory: {dir_path}")
50
142
 
51
- # Collect entries in a background thread to prevent blocking the event loop
143
+ # Try to use cached data from CodeIndex first
144
+ try:
145
+ from tunacode.core.code_index import CodeIndex
146
+
147
+ index = CodeIndex.get_instance()
148
+ cached_entries = index.get_directory_contents(dir_path)
149
+
150
+ if cached_entries:
151
+ # Filter cached entries based on show_hidden
152
+ if not show_hidden:
153
+ cached_entries = [name for name in cached_entries if not name.startswith(".")]
154
+
155
+ # Limit entries and format output
156
+ limited_entries = cached_entries[:max_entries]
157
+
158
+ # Return simple format for cached results (names only for speed)
159
+ if limited_entries:
160
+ return f"Files in {dir_path}:\n" + "\n".join(
161
+ f" {name}" for name in limited_entries
162
+ )
163
+ else:
164
+ return f"Directory {dir_path} is empty"
165
+
166
+ except Exception as e:
167
+ # If CodeIndex fails, fall back to regular scanning
168
+ logger.debug(f"CodeIndex cache miss for {dir_path}: {e}")
169
+
170
+ # Fallback: Collect entries in a background thread to prevent blocking the event loop
52
171
  def _scan_directory(path: Path) -> List[Tuple[str, bool, str]]:
53
172
  """Synchronous helper that scans a directory and returns entry metadata."""
54
173
  collected: List[Tuple[str, bool, str]] = []
@@ -98,6 +217,17 @@ class ListDirTool(FileBasedTool):
98
217
  # Sort entries: directories first, then files, both alphabetically
99
218
  entries.sort(key=lambda x: (not x[1], x[0].lower()))
100
219
 
220
+ # Update CodeIndex cache with the fresh data
221
+ try:
222
+ from tunacode.core.code_index import CodeIndex
223
+
224
+ index = CodeIndex.get_instance()
225
+ # Extract just the names for cache storage
226
+ entry_names = [name for name, _, _ in entries]
227
+ index.update_directory_cache(dir_path, entry_names)
228
+ except Exception as e:
229
+ logger.debug(f"Failed to update CodeIndex cache for {dir_path}: {e}")
230
+
101
231
  # Apply limit after sorting to ensure consistent results
102
232
  total_entries = len(entries)
103
233
  if len(entries) > max_entries: