tunacode-cli 0.0.55__py3-none-any.whl → 0.0.78.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tunacode-cli might be problematic. Click here for more details.

Files changed (114) hide show
  1. tunacode/cli/commands/__init__.py +2 -2
  2. tunacode/cli/commands/implementations/__init__.py +2 -3
  3. tunacode/cli/commands/implementations/command_reload.py +48 -0
  4. tunacode/cli/commands/implementations/debug.py +2 -2
  5. tunacode/cli/commands/implementations/development.py +10 -8
  6. tunacode/cli/commands/implementations/model.py +357 -29
  7. tunacode/cli/commands/implementations/quickstart.py +43 -0
  8. tunacode/cli/commands/implementations/system.py +96 -3
  9. tunacode/cli/commands/implementations/template.py +0 -2
  10. tunacode/cli/commands/registry.py +139 -5
  11. tunacode/cli/commands/slash/__init__.py +32 -0
  12. tunacode/cli/commands/slash/command.py +157 -0
  13. tunacode/cli/commands/slash/loader.py +135 -0
  14. tunacode/cli/commands/slash/processor.py +294 -0
  15. tunacode/cli/commands/slash/types.py +93 -0
  16. tunacode/cli/commands/slash/validator.py +400 -0
  17. tunacode/cli/main.py +23 -2
  18. tunacode/cli/repl.py +217 -190
  19. tunacode/cli/repl_components/command_parser.py +38 -4
  20. tunacode/cli/repl_components/error_recovery.py +85 -4
  21. tunacode/cli/repl_components/output_display.py +12 -1
  22. tunacode/cli/repl_components/tool_executor.py +1 -1
  23. tunacode/configuration/defaults.py +12 -3
  24. tunacode/configuration/key_descriptions.py +284 -0
  25. tunacode/configuration/settings.py +0 -1
  26. tunacode/constants.py +12 -40
  27. tunacode/core/agents/__init__.py +43 -2
  28. tunacode/core/agents/agent_components/__init__.py +7 -0
  29. tunacode/core/agents/agent_components/agent_config.py +249 -55
  30. tunacode/core/agents/agent_components/agent_helpers.py +43 -13
  31. tunacode/core/agents/agent_components/node_processor.py +179 -139
  32. tunacode/core/agents/agent_components/response_state.py +123 -6
  33. tunacode/core/agents/agent_components/state_transition.py +116 -0
  34. tunacode/core/agents/agent_components/streaming.py +296 -0
  35. tunacode/core/agents/agent_components/task_completion.py +19 -6
  36. tunacode/core/agents/agent_components/tool_buffer.py +21 -1
  37. tunacode/core/agents/agent_components/tool_executor.py +10 -0
  38. tunacode/core/agents/main.py +522 -370
  39. tunacode/core/agents/main_legact.py +538 -0
  40. tunacode/core/agents/prompts.py +66 -0
  41. tunacode/core/agents/utils.py +29 -121
  42. tunacode/core/code_index.py +83 -29
  43. tunacode/core/setup/__init__.py +0 -2
  44. tunacode/core/setup/config_setup.py +110 -20
  45. tunacode/core/setup/config_wizard.py +230 -0
  46. tunacode/core/setup/coordinator.py +14 -5
  47. tunacode/core/state.py +16 -20
  48. tunacode/core/token_usage/usage_tracker.py +5 -3
  49. tunacode/core/tool_authorization.py +352 -0
  50. tunacode/core/tool_handler.py +67 -40
  51. tunacode/exceptions.py +119 -5
  52. tunacode/prompts/system.xml +751 -0
  53. tunacode/services/mcp.py +125 -7
  54. tunacode/setup.py +5 -25
  55. tunacode/tools/base.py +163 -0
  56. tunacode/tools/bash.py +110 -1
  57. tunacode/tools/glob.py +332 -34
  58. tunacode/tools/grep.py +179 -82
  59. tunacode/tools/grep_components/result_formatter.py +98 -4
  60. tunacode/tools/list_dir.py +132 -2
  61. tunacode/tools/prompts/bash_prompt.xml +72 -0
  62. tunacode/tools/prompts/glob_prompt.xml +45 -0
  63. tunacode/tools/prompts/grep_prompt.xml +98 -0
  64. tunacode/tools/prompts/list_dir_prompt.xml +31 -0
  65. tunacode/tools/prompts/react_prompt.xml +23 -0
  66. tunacode/tools/prompts/read_file_prompt.xml +54 -0
  67. tunacode/tools/prompts/run_command_prompt.xml +64 -0
  68. tunacode/tools/prompts/update_file_prompt.xml +53 -0
  69. tunacode/tools/prompts/write_file_prompt.xml +37 -0
  70. tunacode/tools/react.py +153 -0
  71. tunacode/tools/read_file.py +91 -0
  72. tunacode/tools/run_command.py +114 -0
  73. tunacode/tools/schema_assembler.py +167 -0
  74. tunacode/tools/update_file.py +94 -0
  75. tunacode/tools/write_file.py +86 -0
  76. tunacode/tools/xml_helper.py +83 -0
  77. tunacode/tutorial/__init__.py +9 -0
  78. tunacode/tutorial/content.py +98 -0
  79. tunacode/tutorial/manager.py +182 -0
  80. tunacode/tutorial/steps.py +124 -0
  81. tunacode/types.py +20 -27
  82. tunacode/ui/completers.py +434 -50
  83. tunacode/ui/config_dashboard.py +585 -0
  84. tunacode/ui/console.py +63 -11
  85. tunacode/ui/input.py +20 -3
  86. tunacode/ui/keybindings.py +7 -4
  87. tunacode/ui/model_selector.py +395 -0
  88. tunacode/ui/output.py +40 -19
  89. tunacode/ui/panels.py +212 -43
  90. tunacode/ui/path_heuristics.py +91 -0
  91. tunacode/ui/prompt_manager.py +5 -1
  92. tunacode/ui/tool_ui.py +33 -10
  93. tunacode/utils/api_key_validation.py +93 -0
  94. tunacode/utils/config_comparator.py +340 -0
  95. tunacode/utils/json_utils.py +206 -0
  96. tunacode/utils/message_utils.py +14 -4
  97. tunacode/utils/models_registry.py +593 -0
  98. tunacode/utils/ripgrep.py +332 -9
  99. tunacode/utils/text_utils.py +18 -1
  100. tunacode/utils/user_configuration.py +45 -0
  101. tunacode_cli-0.0.78.6.dist-info/METADATA +260 -0
  102. tunacode_cli-0.0.78.6.dist-info/RECORD +158 -0
  103. {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.78.6.dist-info}/WHEEL +1 -2
  104. tunacode/cli/commands/implementations/todo.py +0 -217
  105. tunacode/context.py +0 -71
  106. tunacode/core/setup/git_safety_setup.py +0 -182
  107. tunacode/prompts/system.md +0 -731
  108. tunacode/tools/read_file_async_poc.py +0 -196
  109. tunacode/tools/todo.py +0 -349
  110. tunacode_cli-0.0.55.dist-info/METADATA +0 -322
  111. tunacode_cli-0.0.55.dist-info/RECORD +0 -126
  112. tunacode_cli-0.0.55.dist-info/top_level.txt +0 -1
  113. {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.78.6.dist-info}/entry_points.txt +0 -0
  114. {tunacode_cli-0.0.55.dist-info → tunacode_cli-0.0.78.6.dist-info}/licenses/LICENSE +0 -0
tunacode/tools/grep.py CHANGED
@@ -12,13 +12,15 @@ CLAUDE_ANCHOR[grep-module]: Fast parallel file search with 3-second deadline
12
12
  """
13
13
 
14
14
  import asyncio
15
+ import logging
15
16
  import re
16
- import subprocess
17
17
  import time
18
18
  from concurrent.futures import ThreadPoolExecutor
19
+ from functools import lru_cache
19
20
  from pathlib import Path
20
- from typing import List, Optional, Union
21
+ from typing import Any, Dict, List, Optional, Union
21
22
 
23
+ from tunacode.configuration.defaults import DEFAULT_USER_CONFIG
22
24
  from tunacode.exceptions import TooBroadPatternError, ToolExecutionError
23
25
  from tunacode.tools.base import BaseTool
24
26
  from tunacode.tools.grep_components import (
@@ -28,6 +30,11 @@ from tunacode.tools.grep_components import (
28
30
  SearchResult,
29
31
  )
30
32
  from tunacode.tools.grep_components.result_formatter import ResultFormatter
33
+ from tunacode.tools.xml_helper import load_parameters_schema_from_xml, load_prompt_from_xml
34
+ from tunacode.utils.ripgrep import RipgrepExecutor
35
+ from tunacode.utils.ripgrep import metrics as ripgrep_metrics
36
+
37
+ logger = logging.getLogger(__name__)
31
38
 
32
39
 
33
40
  class ParallelGrep(BaseTool):
@@ -42,11 +49,103 @@ class ParallelGrep(BaseTool):
42
49
  self._file_filter = FileFilter()
43
50
  self._pattern_matcher = PatternMatcher()
44
51
  self._result_formatter = ResultFormatter()
52
+ self._ripgrep_executor = RipgrepExecutor()
53
+
54
+ # Load configuration
55
+ self._config = self._load_ripgrep_config()
45
56
 
46
57
  @property
47
58
  def tool_name(self) -> str:
48
59
  return "grep"
49
60
 
61
+ @lru_cache(maxsize=1)
62
+ def _get_base_prompt(self) -> str:
63
+ """Load and return the base prompt from XML file.
64
+
65
+ Returns:
66
+ str: The loaded prompt from XML or a default prompt
67
+ """
68
+ # Try to load from XML helper
69
+ prompt = load_prompt_from_xml("grep")
70
+ if prompt:
71
+ return prompt
72
+
73
+ # Fallback to default prompt
74
+ return """A powerful search tool built on ripgrep
75
+
76
+ Usage:
77
+ - ALWAYS use Grep for search tasks. NEVER invoke `grep` or `rg` as a Bash command.
78
+ - Supports full regex syntax
79
+ - Filter files with glob or type parameters
80
+ - Multiple output modes available"""
81
+
82
+ @lru_cache(maxsize=1)
83
+ def _get_parameters_schema(self) -> Dict[str, Any]:
84
+ """Get the parameters schema for grep tool.
85
+
86
+ Returns:
87
+ Dict containing the JSON schema for tool parameters
88
+ """
89
+ # Try to load from XML helper
90
+ schema = load_parameters_schema_from_xml("grep")
91
+ if schema:
92
+ return schema
93
+
94
+ # Fallback to hardcoded schema
95
+ return {
96
+ "type": "object",
97
+ "properties": {
98
+ "pattern": {
99
+ "type": "string",
100
+ "description": "Regular expression pattern to search for",
101
+ },
102
+ "directory": {"type": "string", "description": "Directory to search in"},
103
+ "include": {
104
+ "type": "array",
105
+ "items": {"type": "string"},
106
+ "description": "File patterns to include",
107
+ },
108
+ "exclude": {
109
+ "type": "array",
110
+ "items": {"type": "string"},
111
+ "description": "File patterns to exclude",
112
+ },
113
+ "max_results": {"type": "integer", "description": "Maximum number of results"},
114
+ "context_before": {
115
+ "type": "integer",
116
+ "description": "Lines of context before matches",
117
+ },
118
+ "context_after": {
119
+ "type": "integer",
120
+ "description": "Lines of context after matches",
121
+ },
122
+ },
123
+ "required": ["pattern"],
124
+ }
125
+
126
+ def _load_ripgrep_config(self) -> Dict:
127
+ """Load ripgrep configuration from settings."""
128
+ try:
129
+ settings = DEFAULT_USER_CONFIG.get("settings", {})
130
+ return settings.get(
131
+ "ripgrep",
132
+ {
133
+ "timeout": 10,
134
+ "max_buffer_size": 1048576,
135
+ "max_results": 100,
136
+ "enable_metrics": False,
137
+ "debug": False,
138
+ },
139
+ )
140
+ except Exception:
141
+ return {
142
+ "timeout": 10,
143
+ "max_buffer_size": 1048576,
144
+ "max_results": 100,
145
+ "enable_metrics": False,
146
+ "debug": False,
147
+ }
148
+
50
149
  async def _execute(
51
150
  self,
52
151
  pattern: str,
@@ -143,7 +242,10 @@ class ParallelGrep(BaseTool):
143
242
  raise ToolExecutionError(f"Unknown search type: {search_type}")
144
243
 
145
244
  # 5️⃣ Format and return results with strategy info
146
- strategy_info = f"Strategy: {search_type} (was {original_search_type}), Files: {len(candidates)}/{5000}"
245
+ strategy_info = (
246
+ f"Strategy: {search_type} (was {original_search_type}), "
247
+ f"Files: {len(candidates)}/{5000}"
248
+ )
147
249
  formatted_results = self._result_formatter.format_results(results, pattern, config)
148
250
 
149
251
  if return_format == "list":
@@ -176,98 +278,93 @@ class ParallelGrep(BaseTool):
176
278
  self, pattern: str, candidates: List[Path], config: SearchConfig
177
279
  ) -> List[SearchResult]:
178
280
  """
179
- Run ripgrep on pre-filtered file list with first match deadline.
281
+ Run ripgrep on pre-filtered file list using the enhanced RipgrepExecutor.
180
282
  """
181
283
 
182
- def run_ripgrep_filtered():
183
- cmd = ["rg", "--json"]
284
+ def run_enhanced_ripgrep():
285
+ """Execute ripgrep search using the new executor."""
286
+ start_time = time.time()
287
+ results = []
184
288
 
185
- # Add configuration flags
186
- if not config.case_sensitive:
187
- cmd.append("--ignore-case")
188
- if config.context_lines > 0:
189
- cmd.extend(["--context", str(config.context_lines)])
190
- if config.max_results:
191
- cmd.extend(["--max-count", str(config.max_results)])
289
+ # Configure timeout from settings
290
+ timeout = min(self._config.get("timeout", 10), config.timeout_seconds)
192
291
 
193
- # Add pattern and explicit file list
194
- cmd.append(pattern)
195
- cmd.extend(str(f) for f in candidates)
292
+ # If ripgrep executor is using fallback, skip this method entirely
293
+ if self._ripgrep_executor._use_python_fallback:
294
+ # Return empty to trigger Python fallback in the calling function
295
+ return []
196
296
 
197
297
  try:
198
- # Start the process
199
- process = subprocess.Popen(
200
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1
298
+ # Use the enhanced executor with support for context lines
299
+ # Note: Currently searching all files, not using candidates
300
+ # This is a limitation that should be addressed in future enhancement
301
+ search_results = self._ripgrep_executor.search(
302
+ pattern=pattern,
303
+ path=".", # Search in current directory
304
+ timeout=timeout,
305
+ max_matches=config.max_results,
306
+ case_insensitive=not config.case_sensitive,
307
+ context_before=config.context_lines,
308
+ context_after=config.context_lines,
201
309
  )
202
310
 
203
- # Monitor for first match within deadline
204
- start_time = time.time()
205
- output_lines = []
206
- first_match_found = False
207
-
208
- while True:
209
- # Check if we exceeded the first match deadline
210
- if (
211
- not first_match_found
212
- and (time.time() - start_time) > config.first_match_deadline
213
- ):
214
- process.kill()
215
- process.wait()
216
- raise TooBroadPatternError(pattern, config.first_match_deadline)
217
-
218
- # Check if process is still running
219
- if process.poll() is not None:
220
- # Process finished, get any remaining output
221
- remaining_output, _ = process.communicate()
222
- if remaining_output:
223
- output_lines.extend(remaining_output.splitlines())
224
- break
225
-
226
- # Try to read a line (non-blocking)
227
- try:
228
- # Use a small timeout to avoid blocking indefinitely
229
- line = process.stdout.readline()
230
- if line:
231
- output_lines.append(line.rstrip())
232
- # Check if this is a match line
233
- if '"type":"match"' in line:
234
- first_match_found = True
235
- except Exception:
236
- pass
237
-
238
- # Small sleep to avoid busy waiting
239
- time.sleep(0.01)
240
-
241
- # Check exit code
242
- if process.returncode == 0 or output_lines:
243
- # Return output even if exit code is non-zero but we have matches
244
- return "\n".join(output_lines)
245
- else:
246
- return None
311
+ # Ripgrep doesn't provide timing info for first match, so we rely on
312
+ # the overall timeout mechanism instead of first_match_deadline
313
+
314
+ # Parse results
315
+ for result_line in search_results:
316
+ # Parse ripgrep output format "file:line:content"
317
+ parts = result_line.split(":", 2)
318
+ if len(parts) >= 3:
319
+ # Filter to only include results from candidates
320
+ file_path = Path(parts[0])
321
+ if file_path not in candidates:
322
+ continue
323
+
324
+ try:
325
+ search_result = SearchResult(
326
+ file_path=parts[0],
327
+ line_number=int(parts[1]),
328
+ line_content=parts[2] if len(parts) > 2 else "",
329
+ match_start=0,
330
+ match_end=len(parts[2]) if len(parts) > 2 else 0,
331
+ context_before=[],
332
+ context_after=[],
333
+ relevance_score=1.0,
334
+ )
335
+ results.append(search_result)
336
+
337
+ # Stop if we have enough results
338
+ if config.max_results and len(results) >= config.max_results:
339
+ break
340
+ except (ValueError, IndexError):
341
+ continue
247
342
 
248
343
  except TooBroadPatternError:
249
344
  raise
250
- except (subprocess.TimeoutExpired, FileNotFoundError):
251
- return None
252
- except Exception:
253
- # Make sure to clean up the process
254
- if "process" in locals():
255
- try:
256
- process.kill()
257
- process.wait()
258
- except Exception:
259
- pass
260
- return None
261
-
262
- # Run ripgrep with monitoring in thread pool
345
+ except Exception as e:
346
+ if self._config.get("debug", False):
347
+ logger.debug(f"Search error: {e}")
348
+ # Return empty to trigger fallback
349
+ return []
350
+
351
+ # Record metrics if enabled
352
+ if self._config.get("enable_metrics", False):
353
+ total_time = time.time() - start_time
354
+ ripgrep_metrics.record_search(
355
+ duration=total_time, used_fallback=self._ripgrep_executor._use_python_fallback
356
+ )
357
+
358
+ if self._config.get("debug", False):
359
+ logger.debug(f"Ripgrep search completed in {total_time:.2f}s")
360
+
361
+ return results
362
+
363
+ # Run the enhanced ripgrep search
263
364
  try:
264
- output = await asyncio.get_event_loop().run_in_executor(
265
- self._executor, run_ripgrep_filtered
365
+ return await asyncio.get_event_loop().run_in_executor(
366
+ self._executor, run_enhanced_ripgrep
266
367
  )
267
- if output:
268
- parsed = self._pattern_matcher.parse_ripgrep_output(output)
269
- return parsed
270
- return []
271
368
  except TooBroadPatternError:
272
369
  raise
273
370
 
@@ -1,21 +1,53 @@
1
1
  """
2
+ Extended result formatter with multiple output modes for flexible presentation.
2
3
  Result formatting functionality for the grep tool.
3
4
  """
4
5
 
5
- from typing import List
6
+ from typing import Dict, List
6
7
 
7
8
  from .search_result import SearchConfig, SearchResult
8
9
 
9
10
 
10
11
  class ResultFormatter:
11
- """Handles formatting of search results for display."""
12
+ """Handles formatting of search results for display with multiple output modes."""
12
13
 
13
14
  @staticmethod
14
- def format_results(results: List[SearchResult], pattern: str, config: SearchConfig) -> str:
15
- """Format search results for display."""
15
+ def format_results(
16
+ results: List[SearchResult],
17
+ pattern: str,
18
+ config: SearchConfig,
19
+ output_mode: str = "content",
20
+ ) -> str:
21
+ """Format search results for display.
22
+
23
+ Args:
24
+ results: List of search results
25
+ pattern: Search pattern
26
+ config: Search configuration
27
+ output_mode: Output format mode:
28
+ - "content": Show matching lines with context (default)
29
+ - "files_with_matches": Show only file paths
30
+ - "count": Show match counts per file
31
+ - "json": JSON format for programmatic use
32
+
33
+ Returns:
34
+ Formatted string based on output mode
35
+ """
16
36
  if not results:
17
37
  return f"No matches found for pattern: {pattern}"
18
38
 
39
+ if output_mode == "files_with_matches":
40
+ return ResultFormatter._format_files_only(results, pattern)
41
+ elif output_mode == "count":
42
+ return ResultFormatter._format_count(results, pattern)
43
+ elif output_mode == "json":
44
+ return ResultFormatter._format_json(results, pattern)
45
+ else: # Default to "content"
46
+ return ResultFormatter._format_content(results, pattern, config)
47
+
48
+ @staticmethod
49
+ def _format_content(results: List[SearchResult], pattern: str, config: SearchConfig) -> str:
50
+ """Format results with full content and context."""
19
51
  output = []
20
52
  output.append(f"Found {len(results)} matches for pattern: {pattern}")
21
53
  output.append("=" * 60)
@@ -43,3 +75,65 @@ class ResultFormatter:
43
75
  output.append(f" {line_num:4d}│ {context_line}")
44
76
 
45
77
  return "\n".join(output)
78
+
79
+ @staticmethod
80
+ def _format_files_only(results: List[SearchResult], pattern: str) -> str:
81
+ """Format results showing only file paths."""
82
+ # Collect unique file paths
83
+ files = sorted(set(r.file_path for r in results))
84
+
85
+ output = []
86
+ output.append(f"Files with matches for pattern: {pattern}")
87
+ output.append(f"Total files: {len(files)}")
88
+ output.append("=" * 60)
89
+
90
+ for file_path in files:
91
+ output.append(file_path)
92
+
93
+ return "\n".join(output)
94
+
95
+ @staticmethod
96
+ def _format_count(results: List[SearchResult], pattern: str) -> str:
97
+ """Format results showing match counts per file."""
98
+ # Count matches per file
99
+ file_counts: Dict[str, int] = {}
100
+ for result in results:
101
+ file_counts[result.file_path] = file_counts.get(result.file_path, 0) + 1
102
+
103
+ output = []
104
+ output.append(f"Match counts for pattern: {pattern}")
105
+ output.append(f"Total matches: {len(results)} across {len(file_counts)} files")
106
+ output.append("=" * 60)
107
+
108
+ # Sort by count (descending) then by file path
109
+ sorted_counts = sorted(file_counts.items(), key=lambda x: (-x[1], x[0]))
110
+
111
+ for file_path, count in sorted_counts:
112
+ output.append(f"{count:5d} {file_path}")
113
+
114
+ return "\n".join(output)
115
+
116
+ @staticmethod
117
+ def _format_json(results: List[SearchResult], pattern: str) -> str:
118
+ """Format results as JSON for programmatic use."""
119
+ import json
120
+
121
+ # Convert results to JSON-serializable format
122
+ json_results = []
123
+ for result in results:
124
+ json_results.append(
125
+ {
126
+ "file": result.file_path,
127
+ "line": result.line_number,
128
+ "content": result.line_content,
129
+ "match_start": result.match_start,
130
+ "match_end": result.match_end,
131
+ "context_before": result.context_before,
132
+ "context_after": result.context_after,
133
+ "score": result.relevance_score,
134
+ }
135
+ )
136
+
137
+ output_data = {"pattern": pattern, "total_matches": len(results), "results": json_results}
138
+
139
+ return json.dumps(output_data, indent=2)
@@ -6,14 +6,20 @@ Provides efficient directory listing without using shell commands.
6
6
  """
7
7
 
8
8
  import asyncio
9
+ import logging
9
10
  import os
11
+ from functools import lru_cache
10
12
  from pathlib import Path
11
- from typing import List, Tuple
13
+ from typing import Any, Dict, List, Tuple
14
+
15
+ import defusedxml.ElementTree as ET
12
16
 
13
17
  from tunacode.exceptions import ToolExecutionError
14
18
  from tunacode.tools.base import FileBasedTool
15
19
  from tunacode.types import FilePath, ToolResult
16
20
 
21
+ logger = logging.getLogger(__name__)
22
+
17
23
 
18
24
  class ListDirTool(FileBasedTool):
19
25
  """Tool for listing directory contents without shell commands."""
@@ -22,6 +28,92 @@ class ListDirTool(FileBasedTool):
22
28
  def tool_name(self) -> str:
23
29
  return "ListDir"
24
30
 
31
+ @lru_cache(maxsize=1)
32
+ def _get_base_prompt(self) -> str:
33
+ """Load and return the base prompt from XML file.
34
+
35
+ Returns:
36
+ str: The loaded prompt from XML or a default prompt
37
+ """
38
+ try:
39
+ # Load prompt from XML file
40
+ prompt_file = Path(__file__).parent / "prompts" / "list_dir_prompt.xml"
41
+ if prompt_file.exists():
42
+ tree = ET.parse(prompt_file)
43
+ root = tree.getroot()
44
+ description = root.find("description")
45
+ if description is not None:
46
+ return description.text.strip()
47
+ except Exception as e:
48
+ logger.warning(f"Failed to load XML prompt for list_dir: {e}")
49
+
50
+ # Fallback to default prompt
51
+ return """Lists files and directories in a given path"""
52
+
53
+ @lru_cache(maxsize=1)
54
+ def _get_parameters_schema(self) -> Dict[str, Any]:
55
+ """Get the parameters schema for list_dir tool.
56
+
57
+ Returns:
58
+ Dict containing the JSON schema for tool parameters
59
+ """
60
+ # Try to load from XML first
61
+ try:
62
+ prompt_file = Path(__file__).parent / "prompts" / "list_dir_prompt.xml"
63
+ if prompt_file.exists():
64
+ tree = ET.parse(prompt_file)
65
+ root = tree.getroot()
66
+ parameters = root.find("parameters")
67
+ if parameters is not None:
68
+ schema: Dict[str, Any] = {"type": "object", "properties": {}, "required": []}
69
+ required_fields: List[str] = []
70
+
71
+ for param in parameters.findall("parameter"):
72
+ name = param.get("name")
73
+ required = param.get("required", "false").lower() == "true"
74
+ param_type = param.find("type")
75
+ description = param.find("description")
76
+
77
+ if name and param_type is not None:
78
+ prop = {
79
+ "type": param_type.text.strip(),
80
+ "description": description.text.strip()
81
+ if description is not None
82
+ else "",
83
+ }
84
+
85
+ # Handle array types
86
+ if param_type.text.strip() == "array":
87
+ items = param.find("items")
88
+ if items is not None:
89
+ prop["items"] = {"type": items.text.strip()}
90
+
91
+ schema["properties"][name] = prop
92
+ if required:
93
+ required_fields.append(name)
94
+
95
+ schema["required"] = required_fields
96
+ return schema
97
+ except Exception as e:
98
+ logger.warning(f"Failed to load parameters from XML for list_dir: {e}")
99
+
100
+ # Fallback to hardcoded schema
101
+ return {
102
+ "type": "object",
103
+ "properties": {
104
+ "path": {
105
+ "type": "string",
106
+ "description": "The absolute path to the directory to list",
107
+ },
108
+ "ignore": {
109
+ "type": "array",
110
+ "items": {"type": "string"},
111
+ "description": "List of glob patterns to ignore",
112
+ },
113
+ },
114
+ "required": ["path"],
115
+ }
116
+
25
117
  async def _execute(
26
118
  self, directory: FilePath = ".", max_entries: int = 200, show_hidden: bool = False
27
119
  ) -> ToolResult:
@@ -48,7 +140,34 @@ class ListDirTool(FileBasedTool):
48
140
  if not dir_path.is_dir():
49
141
  raise NotADirectoryError(f"Not a directory: {dir_path}")
50
142
 
51
- # Collect entries in a background thread to prevent blocking the event loop
143
+ # Try to use cached data from CodeIndex first
144
+ try:
145
+ from tunacode.core.code_index import CodeIndex
146
+
147
+ index = CodeIndex.get_instance()
148
+ cached_entries = index.get_directory_contents(dir_path)
149
+
150
+ if cached_entries:
151
+ # Filter cached entries based on show_hidden
152
+ if not show_hidden:
153
+ cached_entries = [name for name in cached_entries if not name.startswith(".")]
154
+
155
+ # Limit entries and format output
156
+ limited_entries = cached_entries[:max_entries]
157
+
158
+ # Return simple format for cached results (names only for speed)
159
+ if limited_entries:
160
+ return f"Files in {dir_path}:\n" + "\n".join(
161
+ f" {name}" for name in limited_entries
162
+ )
163
+ else:
164
+ return f"Directory {dir_path} is empty"
165
+
166
+ except Exception as e:
167
+ # If CodeIndex fails, fall back to regular scanning
168
+ logger.debug(f"CodeIndex cache miss for {dir_path}: {e}")
169
+
170
+ # Fallback: Collect entries in a background thread to prevent blocking the event loop
52
171
  def _scan_directory(path: Path) -> List[Tuple[str, bool, str]]:
53
172
  """Synchronous helper that scans a directory and returns entry metadata."""
54
173
  collected: List[Tuple[str, bool, str]] = []
@@ -98,6 +217,17 @@ class ListDirTool(FileBasedTool):
98
217
  # Sort entries: directories first, then files, both alphabetically
99
218
  entries.sort(key=lambda x: (not x[1], x[0].lower()))
100
219
 
220
+ # Update CodeIndex cache with the fresh data
221
+ try:
222
+ from tunacode.core.code_index import CodeIndex
223
+
224
+ index = CodeIndex.get_instance()
225
+ # Extract just the names for cache storage
226
+ entry_names = [name for name, _, _ in entries]
227
+ index.update_directory_cache(dir_path, entry_names)
228
+ except Exception as e:
229
+ logger.debug(f"Failed to update CodeIndex cache for {dir_path}: {e}")
230
+
101
231
  # Apply limit after sorting to ensure consistent results
102
232
  total_entries = len(entries)
103
233
  if len(entries) > max_entries: