aider-ce 0.87.2.dev9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aider-ce might be problematic. Click here for more details.

Files changed (264) hide show
  1. aider/__init__.py +20 -0
  2. aider/__main__.py +4 -0
  3. aider/_version.py +34 -0
  4. aider/analytics.py +258 -0
  5. aider/args.py +1014 -0
  6. aider/args_formatter.py +228 -0
  7. aider/change_tracker.py +133 -0
  8. aider/coders/__init__.py +36 -0
  9. aider/coders/architect_coder.py +48 -0
  10. aider/coders/architect_prompts.py +40 -0
  11. aider/coders/ask_coder.py +9 -0
  12. aider/coders/ask_prompts.py +35 -0
  13. aider/coders/base_coder.py +3013 -0
  14. aider/coders/base_prompts.py +87 -0
  15. aider/coders/chat_chunks.py +64 -0
  16. aider/coders/context_coder.py +53 -0
  17. aider/coders/context_prompts.py +75 -0
  18. aider/coders/editblock_coder.py +657 -0
  19. aider/coders/editblock_fenced_coder.py +10 -0
  20. aider/coders/editblock_fenced_prompts.py +143 -0
  21. aider/coders/editblock_func_coder.py +141 -0
  22. aider/coders/editblock_func_prompts.py +27 -0
  23. aider/coders/editblock_prompts.py +177 -0
  24. aider/coders/editor_diff_fenced_coder.py +9 -0
  25. aider/coders/editor_diff_fenced_prompts.py +11 -0
  26. aider/coders/editor_editblock_coder.py +9 -0
  27. aider/coders/editor_editblock_prompts.py +21 -0
  28. aider/coders/editor_whole_coder.py +9 -0
  29. aider/coders/editor_whole_prompts.py +12 -0
  30. aider/coders/help_coder.py +16 -0
  31. aider/coders/help_prompts.py +46 -0
  32. aider/coders/navigator_coder.py +2711 -0
  33. aider/coders/navigator_legacy_prompts.py +338 -0
  34. aider/coders/navigator_prompts.py +530 -0
  35. aider/coders/patch_coder.py +706 -0
  36. aider/coders/patch_prompts.py +161 -0
  37. aider/coders/search_replace.py +757 -0
  38. aider/coders/shell.py +37 -0
  39. aider/coders/single_wholefile_func_coder.py +102 -0
  40. aider/coders/single_wholefile_func_prompts.py +27 -0
  41. aider/coders/udiff_coder.py +429 -0
  42. aider/coders/udiff_prompts.py +117 -0
  43. aider/coders/udiff_simple.py +14 -0
  44. aider/coders/udiff_simple_prompts.py +25 -0
  45. aider/coders/wholefile_coder.py +144 -0
  46. aider/coders/wholefile_func_coder.py +134 -0
  47. aider/coders/wholefile_func_prompts.py +27 -0
  48. aider/coders/wholefile_prompts.py +70 -0
  49. aider/commands.py +1946 -0
  50. aider/copypaste.py +72 -0
  51. aider/deprecated.py +126 -0
  52. aider/diffs.py +128 -0
  53. aider/dump.py +29 -0
  54. aider/editor.py +147 -0
  55. aider/exceptions.py +107 -0
  56. aider/format_settings.py +26 -0
  57. aider/gui.py +545 -0
  58. aider/help.py +163 -0
  59. aider/help_pats.py +19 -0
  60. aider/history.py +178 -0
  61. aider/io.py +1257 -0
  62. aider/linter.py +304 -0
  63. aider/llm.py +47 -0
  64. aider/main.py +1297 -0
  65. aider/mcp/__init__.py +94 -0
  66. aider/mcp/server.py +119 -0
  67. aider/mdstream.py +243 -0
  68. aider/models.py +1344 -0
  69. aider/onboarding.py +428 -0
  70. aider/openrouter.py +129 -0
  71. aider/prompts.py +56 -0
  72. aider/queries/tree-sitter-language-pack/README.md +7 -0
  73. aider/queries/tree-sitter-language-pack/arduino-tags.scm +5 -0
  74. aider/queries/tree-sitter-language-pack/c-tags.scm +9 -0
  75. aider/queries/tree-sitter-language-pack/chatito-tags.scm +16 -0
  76. aider/queries/tree-sitter-language-pack/clojure-tags.scm +7 -0
  77. aider/queries/tree-sitter-language-pack/commonlisp-tags.scm +122 -0
  78. aider/queries/tree-sitter-language-pack/cpp-tags.scm +15 -0
  79. aider/queries/tree-sitter-language-pack/csharp-tags.scm +26 -0
  80. aider/queries/tree-sitter-language-pack/d-tags.scm +26 -0
  81. aider/queries/tree-sitter-language-pack/dart-tags.scm +92 -0
  82. aider/queries/tree-sitter-language-pack/elisp-tags.scm +5 -0
  83. aider/queries/tree-sitter-language-pack/elixir-tags.scm +54 -0
  84. aider/queries/tree-sitter-language-pack/elm-tags.scm +19 -0
  85. aider/queries/tree-sitter-language-pack/gleam-tags.scm +41 -0
  86. aider/queries/tree-sitter-language-pack/go-tags.scm +42 -0
  87. aider/queries/tree-sitter-language-pack/java-tags.scm +20 -0
  88. aider/queries/tree-sitter-language-pack/javascript-tags.scm +88 -0
  89. aider/queries/tree-sitter-language-pack/lua-tags.scm +34 -0
  90. aider/queries/tree-sitter-language-pack/matlab-tags.scm +10 -0
  91. aider/queries/tree-sitter-language-pack/ocaml-tags.scm +115 -0
  92. aider/queries/tree-sitter-language-pack/ocaml_interface-tags.scm +98 -0
  93. aider/queries/tree-sitter-language-pack/pony-tags.scm +39 -0
  94. aider/queries/tree-sitter-language-pack/properties-tags.scm +5 -0
  95. aider/queries/tree-sitter-language-pack/python-tags.scm +14 -0
  96. aider/queries/tree-sitter-language-pack/r-tags.scm +21 -0
  97. aider/queries/tree-sitter-language-pack/racket-tags.scm +12 -0
  98. aider/queries/tree-sitter-language-pack/ruby-tags.scm +64 -0
  99. aider/queries/tree-sitter-language-pack/rust-tags.scm +60 -0
  100. aider/queries/tree-sitter-language-pack/solidity-tags.scm +43 -0
  101. aider/queries/tree-sitter-language-pack/swift-tags.scm +51 -0
  102. aider/queries/tree-sitter-language-pack/udev-tags.scm +20 -0
  103. aider/queries/tree-sitter-languages/README.md +23 -0
  104. aider/queries/tree-sitter-languages/c-tags.scm +9 -0
  105. aider/queries/tree-sitter-languages/c_sharp-tags.scm +46 -0
  106. aider/queries/tree-sitter-languages/cpp-tags.scm +15 -0
  107. aider/queries/tree-sitter-languages/dart-tags.scm +91 -0
  108. aider/queries/tree-sitter-languages/elisp-tags.scm +8 -0
  109. aider/queries/tree-sitter-languages/elixir-tags.scm +54 -0
  110. aider/queries/tree-sitter-languages/elm-tags.scm +19 -0
  111. aider/queries/tree-sitter-languages/go-tags.scm +30 -0
  112. aider/queries/tree-sitter-languages/hcl-tags.scm +77 -0
  113. aider/queries/tree-sitter-languages/java-tags.scm +20 -0
  114. aider/queries/tree-sitter-languages/javascript-tags.scm +88 -0
  115. aider/queries/tree-sitter-languages/kotlin-tags.scm +27 -0
  116. aider/queries/tree-sitter-languages/matlab-tags.scm +10 -0
  117. aider/queries/tree-sitter-languages/ocaml-tags.scm +115 -0
  118. aider/queries/tree-sitter-languages/ocaml_interface-tags.scm +98 -0
  119. aider/queries/tree-sitter-languages/php-tags.scm +26 -0
  120. aider/queries/tree-sitter-languages/python-tags.scm +12 -0
  121. aider/queries/tree-sitter-languages/ql-tags.scm +26 -0
  122. aider/queries/tree-sitter-languages/ruby-tags.scm +64 -0
  123. aider/queries/tree-sitter-languages/rust-tags.scm +60 -0
  124. aider/queries/tree-sitter-languages/scala-tags.scm +65 -0
  125. aider/queries/tree-sitter-languages/typescript-tags.scm +41 -0
  126. aider/reasoning_tags.py +82 -0
  127. aider/repo.py +621 -0
  128. aider/repomap.py +988 -0
  129. aider/report.py +200 -0
  130. aider/resources/__init__.py +3 -0
  131. aider/resources/model-metadata.json +699 -0
  132. aider/resources/model-settings.yml +2046 -0
  133. aider/run_cmd.py +132 -0
  134. aider/scrape.py +284 -0
  135. aider/sendchat.py +61 -0
  136. aider/special.py +203 -0
  137. aider/tools/__init__.py +26 -0
  138. aider/tools/command.py +58 -0
  139. aider/tools/command_interactive.py +53 -0
  140. aider/tools/delete_block.py +120 -0
  141. aider/tools/delete_line.py +112 -0
  142. aider/tools/delete_lines.py +137 -0
  143. aider/tools/extract_lines.py +276 -0
  144. aider/tools/grep.py +171 -0
  145. aider/tools/indent_lines.py +155 -0
  146. aider/tools/insert_block.py +211 -0
  147. aider/tools/list_changes.py +51 -0
  148. aider/tools/ls.py +49 -0
  149. aider/tools/make_editable.py +46 -0
  150. aider/tools/make_readonly.py +29 -0
  151. aider/tools/remove.py +48 -0
  152. aider/tools/replace_all.py +77 -0
  153. aider/tools/replace_line.py +125 -0
  154. aider/tools/replace_lines.py +160 -0
  155. aider/tools/replace_text.py +125 -0
  156. aider/tools/show_numbered_context.py +101 -0
  157. aider/tools/tool_utils.py +313 -0
  158. aider/tools/undo_change.py +60 -0
  159. aider/tools/view.py +13 -0
  160. aider/tools/view_files_at_glob.py +65 -0
  161. aider/tools/view_files_matching.py +103 -0
  162. aider/tools/view_files_with_symbol.py +121 -0
  163. aider/urls.py +17 -0
  164. aider/utils.py +454 -0
  165. aider/versioncheck.py +113 -0
  166. aider/voice.py +187 -0
  167. aider/waiting.py +221 -0
  168. aider/watch.py +318 -0
  169. aider/watch_prompts.py +12 -0
  170. aider/website/Gemfile +8 -0
  171. aider/website/_includes/blame.md +162 -0
  172. aider/website/_includes/get-started.md +22 -0
  173. aider/website/_includes/help-tip.md +5 -0
  174. aider/website/_includes/help.md +24 -0
  175. aider/website/_includes/install.md +5 -0
  176. aider/website/_includes/keys.md +4 -0
  177. aider/website/_includes/model-warnings.md +67 -0
  178. aider/website/_includes/multi-line.md +22 -0
  179. aider/website/_includes/python-m-aider.md +5 -0
  180. aider/website/_includes/recording.css +228 -0
  181. aider/website/_includes/recording.md +34 -0
  182. aider/website/_includes/replit-pipx.md +9 -0
  183. aider/website/_includes/works-best.md +1 -0
  184. aider/website/_sass/custom/custom.scss +103 -0
  185. aider/website/docs/config/adv-model-settings.md +2260 -0
  186. aider/website/docs/config/aider_conf.md +548 -0
  187. aider/website/docs/config/api-keys.md +90 -0
  188. aider/website/docs/config/dotenv.md +493 -0
  189. aider/website/docs/config/editor.md +127 -0
  190. aider/website/docs/config/mcp.md +95 -0
  191. aider/website/docs/config/model-aliases.md +104 -0
  192. aider/website/docs/config/options.md +890 -0
  193. aider/website/docs/config/reasoning.md +210 -0
  194. aider/website/docs/config.md +44 -0
  195. aider/website/docs/faq.md +384 -0
  196. aider/website/docs/git.md +76 -0
  197. aider/website/docs/index.md +47 -0
  198. aider/website/docs/install/codespaces.md +39 -0
  199. aider/website/docs/install/docker.md +57 -0
  200. aider/website/docs/install/optional.md +100 -0
  201. aider/website/docs/install/replit.md +8 -0
  202. aider/website/docs/install.md +115 -0
  203. aider/website/docs/languages.md +264 -0
  204. aider/website/docs/legal/contributor-agreement.md +111 -0
  205. aider/website/docs/legal/privacy.md +104 -0
  206. aider/website/docs/llms/anthropic.md +77 -0
  207. aider/website/docs/llms/azure.md +48 -0
  208. aider/website/docs/llms/bedrock.md +132 -0
  209. aider/website/docs/llms/cohere.md +34 -0
  210. aider/website/docs/llms/deepseek.md +32 -0
  211. aider/website/docs/llms/gemini.md +49 -0
  212. aider/website/docs/llms/github.md +111 -0
  213. aider/website/docs/llms/groq.md +36 -0
  214. aider/website/docs/llms/lm-studio.md +39 -0
  215. aider/website/docs/llms/ollama.md +75 -0
  216. aider/website/docs/llms/openai-compat.md +39 -0
  217. aider/website/docs/llms/openai.md +58 -0
  218. aider/website/docs/llms/openrouter.md +78 -0
  219. aider/website/docs/llms/other.md +111 -0
  220. aider/website/docs/llms/vertex.md +50 -0
  221. aider/website/docs/llms/warnings.md +10 -0
  222. aider/website/docs/llms/xai.md +53 -0
  223. aider/website/docs/llms.md +54 -0
  224. aider/website/docs/more/analytics.md +127 -0
  225. aider/website/docs/more/edit-formats.md +116 -0
  226. aider/website/docs/more/infinite-output.md +159 -0
  227. aider/website/docs/more-info.md +8 -0
  228. aider/website/docs/recordings/auto-accept-architect.md +31 -0
  229. aider/website/docs/recordings/dont-drop-original-read-files.md +35 -0
  230. aider/website/docs/recordings/index.md +21 -0
  231. aider/website/docs/recordings/model-accepts-settings.md +69 -0
  232. aider/website/docs/recordings/tree-sitter-language-pack.md +80 -0
  233. aider/website/docs/repomap.md +112 -0
  234. aider/website/docs/scripting.md +100 -0
  235. aider/website/docs/troubleshooting/aider-not-found.md +24 -0
  236. aider/website/docs/troubleshooting/edit-errors.md +76 -0
  237. aider/website/docs/troubleshooting/imports.md +62 -0
  238. aider/website/docs/troubleshooting/models-and-keys.md +54 -0
  239. aider/website/docs/troubleshooting/support.md +79 -0
  240. aider/website/docs/troubleshooting/token-limits.md +96 -0
  241. aider/website/docs/troubleshooting/warnings.md +12 -0
  242. aider/website/docs/troubleshooting.md +11 -0
  243. aider/website/docs/usage/browser.md +57 -0
  244. aider/website/docs/usage/caching.md +49 -0
  245. aider/website/docs/usage/commands.md +133 -0
  246. aider/website/docs/usage/conventions.md +119 -0
  247. aider/website/docs/usage/copypaste.md +121 -0
  248. aider/website/docs/usage/images-urls.md +48 -0
  249. aider/website/docs/usage/lint-test.md +118 -0
  250. aider/website/docs/usage/modes.md +211 -0
  251. aider/website/docs/usage/not-code.md +179 -0
  252. aider/website/docs/usage/notifications.md +87 -0
  253. aider/website/docs/usage/tips.md +79 -0
  254. aider/website/docs/usage/tutorials.md +30 -0
  255. aider/website/docs/usage/voice.md +121 -0
  256. aider/website/docs/usage/watch.md +294 -0
  257. aider/website/docs/usage.md +102 -0
  258. aider/website/share/index.md +101 -0
  259. aider_ce-0.87.2.dev9.dist-info/METADATA +543 -0
  260. aider_ce-0.87.2.dev9.dist-info/RECORD +264 -0
  261. aider_ce-0.87.2.dev9.dist-info/WHEEL +5 -0
  262. aider_ce-0.87.2.dev9.dist-info/entry_points.txt +3 -0
  263. aider_ce-0.87.2.dev9.dist-info/licenses/LICENSE.txt +202 -0
  264. aider_ce-0.87.2.dev9.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2711 @@
1
+ import ast
2
+ import asyncio
3
+ import base64
4
+ import json
5
+ import locale
6
+ import os
7
+ import platform
8
+ import re
9
+ import time
10
+ import traceback
11
+
12
+ # Add necessary imports if not already present
13
+ from collections import defaultdict
14
+ from datetime import datetime
15
+ from pathlib import Path
16
+
17
+ from litellm import experimental_mcp_client
18
+
19
+ from aider import urls, utils
20
+
21
+ # Import the change tracker
22
+ from aider.change_tracker import ChangeTracker
23
+ from aider.mcp.server import LocalServer
24
+ from aider.repo import ANY_GIT_ERROR
25
+
26
+ # Import run_cmd for potentially interactive execution and run_cmd_subprocess for guaranteed non-interactive
27
+ from aider.tools.command import _execute_command
28
+ from aider.tools.command_interactive import _execute_command_interactive
29
+ from aider.tools.delete_block import _execute_delete_block
30
+ from aider.tools.delete_line import _execute_delete_line
31
+ from aider.tools.delete_lines import _execute_delete_lines
32
+ from aider.tools.extract_lines import _execute_extract_lines
33
+ from aider.tools.grep import _execute_grep
34
+ from aider.tools.indent_lines import _execute_indent_lines
35
+ from aider.tools.insert_block import _execute_insert_block
36
+ from aider.tools.list_changes import _execute_list_changes
37
+ from aider.tools.ls import execute_ls
38
+ from aider.tools.make_editable import _execute_make_editable
39
+ from aider.tools.make_readonly import _execute_make_readonly
40
+ from aider.tools.remove import _execute_remove
41
+ from aider.tools.replace_all import _execute_replace_all
42
+ from aider.tools.replace_line import _execute_replace_line
43
+ from aider.tools.replace_lines import _execute_replace_lines
44
+ from aider.tools.replace_text import _execute_replace_text
45
+ from aider.tools.show_numbered_context import execute_show_numbered_context
46
+ from aider.tools.undo_change import _execute_undo_change
47
+ from aider.tools.view import execute_view
48
+
49
+ # Import tool functions
50
+ from aider.tools.view_files_at_glob import execute_view_files_at_glob
51
+ from aider.tools.view_files_matching import execute_view_files_matching
52
+ from aider.tools.view_files_with_symbol import _execute_view_files_with_symbol
53
+
54
+ from .base_coder import ChatChunks, Coder
55
+ from .editblock_coder import do_replace, find_original_update_blocks, find_similar_lines
56
+ from .navigator_legacy_prompts import NavigatorLegacyPrompts
57
+ from .navigator_prompts import NavigatorPrompts
58
+
59
+
60
+ class NavigatorCoder(Coder):
61
+ """Mode where the LLM autonomously manages which files are in context."""
62
+
63
+ edit_format = "navigator"
64
+
65
+ # TODO: We'll turn on granular editing by default once those tools stabilize
66
+ use_granular_editing = False
67
+
68
+ def __init__(self, *args, **kwargs):
69
+ # Initialize appropriate prompt set before calling parent constructor
70
+ # This needs to happen before super().__init__ so the parent class has access to gpt_prompts
71
+ self.gpt_prompts = (
72
+ NavigatorPrompts() if self.use_granular_editing else NavigatorLegacyPrompts()
73
+ )
74
+
75
+ # Dictionary to track recently removed files
76
+ self.recently_removed = {}
77
+
78
+ # Configuration parameters
79
+ self.max_tool_calls = 100 # Maximum number of tool calls per response
80
+
81
+ # Context management parameters
82
+ self.large_file_token_threshold = (
83
+ 25000 # Files larger than this in tokens are considered large
84
+ )
85
+ self.max_files_per_glob = 50 # Maximum number of files to add at once via glob/grep
86
+
87
+ # Enable context management by default only in navigator mode
88
+ self.context_management_enabled = True # Enabled by default for navigator mode
89
+
90
+ # Initialize change tracker for granular editing
91
+ self.change_tracker = ChangeTracker()
92
+
93
+ # Track files added during current exploration
94
+ self.files_added_in_exploration = set()
95
+
96
+ # Counter for tool calls
97
+ self.tool_call_count = 0
98
+
99
+ # Set high max reflections to allow many exploration rounds
100
+ # This controls how many automatic iterations the LLM can do
101
+ self.max_reflections = 15
102
+
103
+ # Enable enhanced context blocks by default
104
+ self.use_enhanced_context = True
105
+
106
+ # Initialize empty token tracking dictionary and cache structures
107
+ # but don't populate yet to avoid startup delay
108
+ self.context_block_tokens = {}
109
+ self.context_blocks_cache = {}
110
+ self.tokens_calculated = False
111
+
112
+ super().__init__(*args, **kwargs)
113
+ self.initialize_local_tools()
114
+
115
+ def initialize_local_tools(self):
116
+ if not self.use_granular_editing:
117
+ return
118
+
119
+ local_tools = self.get_local_tool_schemas()
120
+ if not local_tools:
121
+ return
122
+
123
+ local_server_config = {"name": "local_tools"}
124
+ local_server = LocalServer(local_server_config)
125
+
126
+ if not self.mcp_servers:
127
+ self.mcp_servers = []
128
+ if not any(isinstance(s, LocalServer) for s in self.mcp_servers):
129
+ self.mcp_servers.append(local_server)
130
+
131
+ if not self.mcp_tools:
132
+ self.mcp_tools = []
133
+
134
+ if "local_tools" not in [name for name, _ in self.mcp_tools]:
135
+ self.mcp_tools.append((local_server.name, local_tools))
136
+ self.functions = self.get_tool_list()
137
+
138
+ def get_local_tool_schemas(self):
139
+ """Returns the JSON schemas for all local tools."""
140
+ return [
141
+ {
142
+ "type": "function",
143
+ "function": {
144
+ "name": "ViewFilesAtGlob",
145
+ "description": "View files matching a glob pattern.",
146
+ "parameters": {
147
+ "type": "object",
148
+ "properties": {
149
+ "pattern": {
150
+ "type": "string",
151
+ "description": "The glob pattern to match files.",
152
+ },
153
+ },
154
+ "required": ["pattern"],
155
+ },
156
+ },
157
+ },
158
+ {
159
+ "type": "function",
160
+ "function": {
161
+ "name": "ViewFilesMatching",
162
+ "description": "View files containing a specific pattern.",
163
+ "parameters": {
164
+ "type": "object",
165
+ "properties": {
166
+ "pattern": {
167
+ "type": "string",
168
+ "description": "The pattern to search for in file contents.",
169
+ },
170
+ "file_pattern": {
171
+ "type": "string",
172
+ "description": (
173
+ "An optional glob pattern to filter which files are searched."
174
+ ),
175
+ },
176
+ "regex": {
177
+ "type": "boolean",
178
+ "description": (
179
+ "Whether the pattern is a regular expression. Defaults to"
180
+ " False."
181
+ ),
182
+ },
183
+ },
184
+ "required": ["pattern"],
185
+ },
186
+ },
187
+ },
188
+ {
189
+ "type": "function",
190
+ "function": {
191
+ "name": "Ls",
192
+ "description": "List files in a directory.",
193
+ "parameters": {
194
+ "type": "object",
195
+ "properties": {
196
+ "directory": {
197
+ "type": "string",
198
+ "description": "The directory to list.",
199
+ },
200
+ },
201
+ "required": ["directory"],
202
+ },
203
+ },
204
+ },
205
+ {
206
+ "type": "function",
207
+ "function": {
208
+ "name": "View",
209
+ "description": "View a specific file.",
210
+ "parameters": {
211
+ "type": "object",
212
+ "properties": {
213
+ "file_path": {
214
+ "type": "string",
215
+ "description": "The path to the file to view.",
216
+ },
217
+ },
218
+ "required": ["file_path"],
219
+ },
220
+ },
221
+ },
222
+ {
223
+ "type": "function",
224
+ "function": {
225
+ "name": "Remove",
226
+ "description": "Remove a file from the chat context.",
227
+ "parameters": {
228
+ "type": "object",
229
+ "properties": {
230
+ "file_path": {
231
+ "type": "string",
232
+ "description": "The path to the file to remove.",
233
+ },
234
+ },
235
+ "required": ["file_path"],
236
+ },
237
+ },
238
+ },
239
+ {
240
+ "type": "function",
241
+ "function": {
242
+ "name": "MakeEditable",
243
+ "description": "Make a read-only file editable.",
244
+ "parameters": {
245
+ "type": "object",
246
+ "properties": {
247
+ "file_path": {
248
+ "type": "string",
249
+ "description": "The path to the file to make editable.",
250
+ },
251
+ },
252
+ "required": ["file_path"],
253
+ },
254
+ },
255
+ },
256
+ {
257
+ "type": "function",
258
+ "function": {
259
+ "name": "MakeReadonly",
260
+ "description": "Make an editable file read-only.",
261
+ "parameters": {
262
+ "type": "object",
263
+ "properties": {
264
+ "file_path": {
265
+ "type": "string",
266
+ "description": "The path to the file to make read-only.",
267
+ },
268
+ },
269
+ "required": ["file_path"],
270
+ },
271
+ },
272
+ },
273
+ {
274
+ "type": "function",
275
+ "function": {
276
+ "name": "ViewFilesWithSymbol",
277
+ "description": (
278
+ "View files that contain a specific symbol (e.g., class, function)."
279
+ ),
280
+ "parameters": {
281
+ "type": "object",
282
+ "properties": {
283
+ "symbol": {
284
+ "type": "string",
285
+ "description": "The symbol to search for.",
286
+ },
287
+ },
288
+ "required": ["symbol"],
289
+ },
290
+ },
291
+ },
292
+ {
293
+ "type": "function",
294
+ "function": {
295
+ "name": "Command",
296
+ "description": "Execute a shell command.",
297
+ "parameters": {
298
+ "type": "object",
299
+ "properties": {
300
+ "command_string": {
301
+ "type": "string",
302
+ "description": "The shell command to execute.",
303
+ },
304
+ },
305
+ "required": ["command_string"],
306
+ },
307
+ },
308
+ },
309
+ {
310
+ "type": "function",
311
+ "function": {
312
+ "name": "CommandInteractive",
313
+ "description": "Execute a shell command interactively.",
314
+ "parameters": {
315
+ "type": "object",
316
+ "properties": {
317
+ "command_string": {
318
+ "type": "string",
319
+ "description": "The interactive shell command to execute.",
320
+ },
321
+ },
322
+ "required": ["command_string"],
323
+ },
324
+ },
325
+ },
326
+ {
327
+ "type": "function",
328
+ "function": {
329
+ "name": "Grep",
330
+ "description": "Search for a pattern in files.",
331
+ "parameters": {
332
+ "type": "object",
333
+ "properties": {
334
+ "pattern": {
335
+ "type": "string",
336
+ "description": "The pattern to search for.",
337
+ },
338
+ "file_pattern": {
339
+ "type": "string",
340
+ "description": "Glob pattern for files to search. Defaults to '*'.",
341
+ },
342
+ "directory": {
343
+ "type": "string",
344
+ "description": "Directory to search in. Defaults to '.'.",
345
+ },
346
+ "use_regex": {
347
+ "type": "boolean",
348
+ "description": "Whether to use regex. Defaults to False.",
349
+ },
350
+ "case_insensitive": {
351
+ "type": "boolean",
352
+ "description": (
353
+ "Whether to perform a case-insensitive search. Defaults to"
354
+ " False."
355
+ ),
356
+ },
357
+ "context_before": {
358
+ "type": "integer",
359
+ "description": (
360
+ "Number of lines to show before a match. Defaults to 5."
361
+ ),
362
+ },
363
+ "context_after": {
364
+ "type": "integer",
365
+ "description": (
366
+ "Number of lines to show after a match. Defaults to 5."
367
+ ),
368
+ },
369
+ },
370
+ "required": ["pattern"],
371
+ },
372
+ },
373
+ },
374
+ {
375
+ "type": "function",
376
+ "function": {
377
+ "name": "ReplaceText",
378
+ "description": "Replace text in a file.",
379
+ "parameters": {
380
+ "type": "object",
381
+ "properties": {
382
+ "file_path": {"type": "string"},
383
+ "find_text": {"type": "string"},
384
+ "replace_text": {"type": "string"},
385
+ "near_context": {"type": "string"},
386
+ "occurrence": {"type": "integer", "default": 1},
387
+ "change_id": {"type": "string"},
388
+ "dry_run": {"type": "boolean", "default": False},
389
+ },
390
+ "required": ["file_path", "find_text", "replace_text"],
391
+ },
392
+ },
393
+ },
394
+ {
395
+ "type": "function",
396
+ "function": {
397
+ "name": "ReplaceAll",
398
+ "description": "Replace all occurrences of text in a file.",
399
+ "parameters": {
400
+ "type": "object",
401
+ "properties": {
402
+ "file_path": {"type": "string"},
403
+ "find_text": {"type": "string"},
404
+ "replace_text": {"type": "string"},
405
+ "change_id": {"type": "string"},
406
+ "dry_run": {"type": "boolean", "default": False},
407
+ },
408
+ "required": ["file_path", "find_text", "replace_text"],
409
+ },
410
+ },
411
+ },
412
+ {
413
+ "type": "function",
414
+ "function": {
415
+ "name": "InsertBlock",
416
+ "description": "Insert a block of content into a file.",
417
+ "parameters": {
418
+ "type": "object",
419
+ "properties": {
420
+ "file_path": {"type": "string"},
421
+ "content": {"type": "string"},
422
+ "after_pattern": {"type": "string"},
423
+ "before_pattern": {"type": "string"},
424
+ "occurrence": {"type": "integer", "default": 1},
425
+ "change_id": {"type": "string"},
426
+ "dry_run": {"type": "boolean", "default": False},
427
+ "position": {"type": "string", "enum": ["top", "bottom"]},
428
+ "auto_indent": {"type": "boolean", "default": True},
429
+ "use_regex": {"type": "boolean", "default": False},
430
+ },
431
+ "required": ["file_path", "content"],
432
+ },
433
+ },
434
+ },
435
+ {
436
+ "type": "function",
437
+ "function": {
438
+ "name": "DeleteBlock",
439
+ "description": "Delete a block of lines from a file.",
440
+ "parameters": {
441
+ "type": "object",
442
+ "properties": {
443
+ "file_path": {"type": "string"},
444
+ "start_pattern": {"type": "string"},
445
+ "end_pattern": {"type": "string"},
446
+ "line_count": {"type": "integer"},
447
+ "near_context": {"type": "string"},
448
+ "occurrence": {"type": "integer", "default": 1},
449
+ "change_id": {"type": "string"},
450
+ "dry_run": {"type": "boolean", "default": False},
451
+ },
452
+ "required": ["file_path", "start_pattern"],
453
+ },
454
+ },
455
+ },
456
+ {
457
+ "type": "function",
458
+ "function": {
459
+ "name": "ReplaceLine",
460
+ "description": "Replace a single line in a file.",
461
+ "parameters": {
462
+ "type": "object",
463
+ "properties": {
464
+ "file_path": {"type": "string"},
465
+ "line_number": {"type": "integer"},
466
+ "new_content": {"type": "string"},
467
+ "change_id": {"type": "string"},
468
+ "dry_run": {"type": "boolean", "default": False},
469
+ },
470
+ "required": ["file_path", "line_number", "new_content"],
471
+ },
472
+ },
473
+ },
474
+ {
475
+ "type": "function",
476
+ "function": {
477
+ "name": "ReplaceLines",
478
+ "description": "Replace a range of lines in a file.",
479
+ "parameters": {
480
+ "type": "object",
481
+ "properties": {
482
+ "file_path": {"type": "string"},
483
+ "start_line": {"type": "integer"},
484
+ "end_line": {"type": "integer"},
485
+ "new_content": {"type": "string"},
486
+ "change_id": {"type": "string"},
487
+ "dry_run": {"type": "boolean", "default": False},
488
+ },
489
+ "required": ["file_path", "start_line", "end_line", "new_content"],
490
+ },
491
+ },
492
+ },
493
+ {
494
+ "type": "function",
495
+ "function": {
496
+ "name": "IndentLines",
497
+ "description": "Indent a block of lines in a file.",
498
+ "parameters": {
499
+ "type": "object",
500
+ "properties": {
501
+ "file_path": {"type": "string"},
502
+ "start_pattern": {"type": "string"},
503
+ "end_pattern": {"type": "string"},
504
+ "line_count": {"type": "integer"},
505
+ "indent_levels": {"type": "integer", "default": 1},
506
+ "near_context": {"type": "string"},
507
+ "occurrence": {"type": "integer", "default": 1},
508
+ "change_id": {"type": "string"},
509
+ "dry_run": {"type": "boolean", "default": False},
510
+ },
511
+ "required": ["file_path", "start_pattern"],
512
+ },
513
+ },
514
+ },
515
+ {
516
+ "type": "function",
517
+ "function": {
518
+ "name": "DeleteLine",
519
+ "description": "Delete a single line from a file.",
520
+ "parameters": {
521
+ "type": "object",
522
+ "properties": {
523
+ "file_path": {"type": "string"},
524
+ "line_number": {"type": "integer"},
525
+ "change_id": {"type": "string"},
526
+ "dry_run": {"type": "boolean", "default": False},
527
+ },
528
+ "required": ["file_path", "line_number"],
529
+ },
530
+ },
531
+ },
532
+ {
533
+ "type": "function",
534
+ "function": {
535
+ "name": "DeleteLines",
536
+ "description": "Delete a range of lines from a file.",
537
+ "parameters": {
538
+ "type": "object",
539
+ "properties": {
540
+ "file_path": {"type": "string"},
541
+ "start_line": {"type": "integer"},
542
+ "end_line": {"type": "integer"},
543
+ "change_id": {"type": "string"},
544
+ "dry_run": {"type": "boolean", "default": False},
545
+ },
546
+ "required": ["file_path", "start_line", "end_line"],
547
+ },
548
+ },
549
+ },
550
+ {
551
+ "type": "function",
552
+ "function": {
553
+ "name": "UndoChange",
554
+ "description": "Undo a previously applied change.",
555
+ "parameters": {
556
+ "type": "object",
557
+ "properties": {
558
+ "change_id": {"type": "string"},
559
+ "file_path": {"type": "string"},
560
+ },
561
+ },
562
+ },
563
+ },
564
+ {
565
+ "type": "function",
566
+ "function": {
567
+ "name": "ListChanges",
568
+ "description": "List recent changes made.",
569
+ "parameters": {
570
+ "type": "object",
571
+ "properties": {
572
+ "file_path": {"type": "string"},
573
+ "limit": {"type": "integer", "default": 10},
574
+ },
575
+ },
576
+ },
577
+ },
578
+ {
579
+ "type": "function",
580
+ "function": {
581
+ "name": "ExtractLines",
582
+ "description": (
583
+ "Extract lines from a source file and append them to a target file."
584
+ ),
585
+ "parameters": {
586
+ "type": "object",
587
+ "properties": {
588
+ "source_file_path": {"type": "string"},
589
+ "target_file_path": {"type": "string"},
590
+ "start_pattern": {"type": "string"},
591
+ "end_pattern": {"type": "string"},
592
+ "line_count": {"type": "integer"},
593
+ "near_context": {"type": "string"},
594
+ "occurrence": {"type": "integer", "default": 1},
595
+ "dry_run": {"type": "boolean", "default": False},
596
+ },
597
+ "required": ["source_file_path", "target_file_path", "start_pattern"],
598
+ },
599
+ },
600
+ },
601
+ {
602
+ "type": "function",
603
+ "function": {
604
+ "name": "ShowNumberedContext",
605
+ "description": (
606
+ "Show numbered lines of context around a pattern or line number."
607
+ ),
608
+ "parameters": {
609
+ "type": "object",
610
+ "properties": {
611
+ "file_path": {"type": "string"},
612
+ "pattern": {"type": "string"},
613
+ "line_number": {"type": "integer"},
614
+ "context_lines": {"type": "integer", "default": 3},
615
+ },
616
+ "required": ["file_path"],
617
+ },
618
+ },
619
+ },
620
+ ]
621
+
622
+ async def _execute_local_tool_calls(self, tool_calls_list):
623
+ tool_responses = []
624
+ for tool_call in tool_calls_list:
625
+ tool_name = tool_call.function.name
626
+ result_message = ""
627
+ try:
628
+ # Arguments can be a stream of JSON objects.
629
+ # We need to parse them and run a tool call for each.
630
+ args_string = tool_call.function.arguments.strip()
631
+ parsed_args_list = []
632
+ if args_string:
633
+ json_chunks = utils.split_concatenated_json(args_string)
634
+ for chunk in json_chunks:
635
+ try:
636
+ parsed_args_list.append(json.loads(chunk))
637
+ except json.JSONDecodeError:
638
+ self.io.tool_warning(
639
+ f"Could not parse JSON chunk for tool {tool_name}: {chunk}"
640
+ )
641
+ continue
642
+
643
+ if not parsed_args_list and not args_string:
644
+ parsed_args_list.append({}) # For tool calls with no arguments
645
+
646
+ all_results_content = []
647
+ norm_tool_name = tool_name.lower()
648
+
649
+ for params in parsed_args_list:
650
+ single_result = ""
651
+ # Dispatch to the correct tool execution function
652
+ if norm_tool_name == "viewfilesatglob":
653
+ single_result = execute_view_files_at_glob(self, **params)
654
+ elif norm_tool_name == "viewfilesmatching":
655
+ single_result = execute_view_files_matching(self, **params)
656
+ elif norm_tool_name == "ls":
657
+ single_result = execute_ls(self, **params)
658
+ elif norm_tool_name == "view":
659
+ single_result = execute_view(self, **params)
660
+ elif norm_tool_name == "remove":
661
+ single_result = _execute_remove(self, **params)
662
+ elif norm_tool_name == "makeeditable":
663
+ single_result = _execute_make_editable(self, **params)
664
+ elif norm_tool_name == "makereadonly":
665
+ single_result = _execute_make_readonly(self, **params)
666
+ elif norm_tool_name == "viewfileswithsymbol":
667
+ single_result = _execute_view_files_with_symbol(self, **params)
668
+ elif norm_tool_name == "command":
669
+ single_result = _execute_command(self, **params)
670
+ elif norm_tool_name == "commandinteractive":
671
+ single_result = _execute_command_interactive(self, **params)
672
+ elif norm_tool_name == "grep":
673
+ single_result = _execute_grep(self, **params)
674
+ elif norm_tool_name == "replacetext":
675
+ single_result = _execute_replace_text(self, **params)
676
+ elif norm_tool_name == "replaceall":
677
+ single_result = _execute_replace_all(self, **params)
678
+ elif norm_tool_name == "insertblock":
679
+ single_result = _execute_insert_block(self, **params)
680
+ elif norm_tool_name == "deleteblock":
681
+ single_result = _execute_delete_block(self, **params)
682
+ elif norm_tool_name == "replaceline":
683
+ single_result = _execute_replace_line(self, **params)
684
+ elif norm_tool_name == "replacelines":
685
+ single_result = _execute_replace_lines(self, **params)
686
+ elif norm_tool_name == "indentlines":
687
+ single_result = _execute_indent_lines(self, **params)
688
+ elif norm_tool_name == "deleteline":
689
+ single_result = _execute_delete_line(self, **params)
690
+ elif norm_tool_name == "deletelines":
691
+ single_result = _execute_delete_lines(self, **params)
692
+ elif norm_tool_name == "undochange":
693
+ single_result = _execute_undo_change(self, **params)
694
+ elif norm_tool_name == "listchanges":
695
+ single_result = _execute_list_changes(self, **params)
696
+ elif norm_tool_name == "extractlines":
697
+ single_result = _execute_extract_lines(self, **params)
698
+ elif norm_tool_name == "shownumberedcontext":
699
+ single_result = execute_show_numbered_context(self, **params)
700
+ else:
701
+ single_result = f"Error: Unknown local tool name '{tool_name}'"
702
+
703
+ all_results_content.append(str(single_result))
704
+
705
+ result_message = "\n\n".join(all_results_content)
706
+
707
+ except Exception as e:
708
+ result_message = f"Error executing {tool_name}: {e}"
709
+ self.io.tool_error(
710
+ f"Error during {tool_name} execution: {e}\n{traceback.format_exc()}"
711
+ )
712
+
713
+ tool_responses.append(
714
+ {
715
+ "role": "tool",
716
+ "tool_call_id": tool_call.id,
717
+ "name": tool_name,
718
+ "content": result_message,
719
+ }
720
+ )
721
+ return tool_responses
722
+
723
+ def _execute_mcp_tool(self, server, tool_name, params):
724
+ """Helper to execute a single MCP tool call, created from legacy format."""
725
+
726
+ # This is a simplified, synchronous wrapper around async logic
727
+ # It's duplicating logic from BaseCoder for legacy tool support.
728
+ async def _exec_async():
729
+ # Construct a ToolCall object-like structure to be compatible with mcp_client
730
+ function_dict = {"name": tool_name, "arguments": json.dumps(params)}
731
+ tool_call_dict = {
732
+ "id": f"mcp-tool-call-{time.time()}",
733
+ "function": function_dict,
734
+ "type": "function",
735
+ }
736
+ try:
737
+ session = await server.connect()
738
+ call_result = await experimental_mcp_client.call_openai_tool(
739
+ session=session,
740
+ openai_tool=tool_call_dict,
741
+ )
742
+
743
+ content_parts = []
744
+ if call_result.content:
745
+ for item in call_result.content:
746
+ if hasattr(item, "resource"): # EmbeddedResource
747
+ resource = item.resource
748
+ if hasattr(resource, "text"): # TextResourceContents
749
+ content_parts.append(resource.text)
750
+ elif hasattr(resource, "blob"): # BlobResourceContents
751
+ try:
752
+ decoded_blob = base64.b64decode(resource.blob).decode("utf-8")
753
+ content_parts.append(decoded_blob)
754
+ except (UnicodeDecodeError, TypeError):
755
+ name = getattr(resource, "name", "unnamed")
756
+ mime_type = getattr(resource, "mimeType", "unknown mime type")
757
+ content_parts.append(
758
+ f"[embedded binary resource: {name} ({mime_type})]"
759
+ )
760
+ elif hasattr(item, "text"): # TextContent
761
+ content_parts.append(item.text)
762
+
763
+ return "".join(content_parts)
764
+
765
+ except Exception as e:
766
+ self.io.tool_warning(
767
+ f"Executing {tool_name} on {server.name} failed: \n Error: {e}\n"
768
+ )
769
+ return f"Error executing tool call {tool_name}: {e}"
770
+ finally:
771
+ await server.disconnect()
772
+
773
+ return asyncio.run(_exec_async())
774
+
775
+ def _calculate_context_block_tokens(self, force=False):
776
+ """
777
+ Calculate token counts for all enhanced context blocks.
778
+ This is the central method for calculating token counts,
779
+ ensuring they're consistent across all parts of the code.
780
+
781
+ This method populates the cache for context blocks and calculates tokens.
782
+
783
+ Args:
784
+ force: If True, recalculate tokens even if already calculated
785
+ """
786
+ # Skip if already calculated and not forced
787
+ if hasattr(self, "tokens_calculated") and self.tokens_calculated and not force:
788
+ return
789
+
790
+ # Clear existing token counts
791
+ self.context_block_tokens = {}
792
+
793
+ # Initialize the cache for context blocks if needed
794
+ if not hasattr(self, "context_blocks_cache"):
795
+ self.context_blocks_cache = {}
796
+
797
+ if not self.use_enhanced_context:
798
+ return
799
+
800
+ try:
801
+ # First, clear the cache to force regeneration of all blocks
802
+ self.context_blocks_cache = {}
803
+
804
+ # Generate all context blocks and calculate token counts
805
+ block_types = [
806
+ "environment_info",
807
+ "directory_structure",
808
+ "git_status",
809
+ "symbol_outline",
810
+ ]
811
+
812
+ for block_type in block_types:
813
+ block_content = self._generate_context_block(block_type)
814
+ if block_content:
815
+ self.context_block_tokens[block_type] = self.main_model.token_count(
816
+ block_content
817
+ )
818
+
819
+ # Mark as calculated
820
+ self.tokens_calculated = True
821
+ except Exception:
822
+ # Silently handle errors during calculation
823
+ # This prevents errors in token counting from breaking the main functionality
824
+ pass
825
+
826
+ def _generate_context_block(self, block_name):
827
+ """
828
+ Generate a specific context block and cache it.
829
+ This is a helper method for get_cached_context_block.
830
+ """
831
+ content = None
832
+
833
+ if block_name == "environment_info":
834
+ content = self.get_environment_info()
835
+ elif block_name == "directory_structure":
836
+ content = self.get_directory_structure()
837
+ elif block_name == "git_status":
838
+ content = self.get_git_status()
839
+ elif block_name == "symbol_outline":
840
+ content = self.get_context_symbol_outline()
841
+ elif block_name == "context_summary":
842
+ content = self.get_context_summary()
843
+
844
+ # Cache the result if it's not None
845
+ if content is not None:
846
+ self.context_blocks_cache[block_name] = content
847
+
848
+ return content
849
+
850
+ def get_cached_context_block(self, block_name):
851
+ """
852
+ Get a context block from the cache, or generate it if not available.
853
+ This should be used by format_chat_chunks to avoid regenerating blocks.
854
+
855
+ This will ensure tokens are calculated if they haven't been yet.
856
+ """
857
+ # Make sure tokens have been calculated at least once
858
+ if not hasattr(self, "tokens_calculated") or not self.tokens_calculated:
859
+ self._calculate_context_block_tokens()
860
+
861
+ # Return from cache if available
862
+ if hasattr(self, "context_blocks_cache") and block_name in self.context_blocks_cache:
863
+ return self.context_blocks_cache[block_name]
864
+
865
+ # Otherwise generate and cache the block
866
+ return self._generate_context_block(block_name)
867
+
868
+ def set_granular_editing(self, enabled):
869
+ """
870
+ Switch between granular editing tools and legacy search/replace.
871
+
872
+ Args:
873
+ enabled (bool): True to use granular editing tools, False to use legacy search/replace
874
+ """
875
+ self.use_granular_editing = enabled
876
+ self.gpt_prompts = NavigatorPrompts() if enabled else NavigatorLegacyPrompts()
877
+
878
+ def get_context_symbol_outline(self):
879
+ """
880
+ Generate a symbol outline for files currently in context using Tree-sitter,
881
+ bypassing the cache for freshness.
882
+ """
883
+ if not self.use_enhanced_context or not self.repo_map:
884
+ return None
885
+
886
+ try:
887
+ result = '<context name="symbol_outline">\n'
888
+ result += "## Symbol Outline (Current Context)\n\n"
889
+ result += (
890
+ "Code definitions (classes, functions, methods, etc.) found in files currently in"
891
+ " chat context.\n\n"
892
+ )
893
+
894
+ files_to_outline = list(self.abs_fnames) + list(self.abs_read_only_fnames)
895
+ if not files_to_outline:
896
+ result += "No files currently in context.\n"
897
+ result += "</context>"
898
+ return result
899
+
900
+ all_tags_by_file = defaultdict(list)
901
+ has_symbols = False
902
+
903
+ # Use repo_map which should be initialized in BaseCoder
904
+ if not self.repo_map:
905
+ self.io.tool_warning("RepoMap not initialized, cannot generate symbol outline.")
906
+ return None # Or return a message indicating repo map is unavailable
907
+
908
+ for abs_fname in sorted(files_to_outline):
909
+ rel_fname = self.get_rel_fname(abs_fname)
910
+ try:
911
+ # Call get_tags_raw directly to bypass cache and ensure freshness
912
+ tags = list(self.repo_map.get_tags_raw(abs_fname, rel_fname))
913
+ if tags:
914
+ all_tags_by_file[rel_fname].extend(tags)
915
+ has_symbols = True
916
+ except Exception as e:
917
+ self.io.tool_warning(f"Could not get symbols for {rel_fname}: {e}")
918
+
919
+ if not has_symbols:
920
+ result += "No symbols found in the current context files.\n"
921
+ else:
922
+ for rel_fname in sorted(all_tags_by_file.keys()):
923
+ tags = sorted(all_tags_by_file[rel_fname], key=lambda t: (t.line, t.name))
924
+
925
+ definition_tags = []
926
+ for tag in tags:
927
+ # Use specific_kind first if available, otherwise fall back to kind
928
+ kind_to_check = tag.specific_kind or tag.kind
929
+ # Check if the kind represents a definition using the set from RepoMap
930
+ if (
931
+ kind_to_check
932
+ and kind_to_check.lower() in self.repo_map.definition_kinds
933
+ ):
934
+ definition_tags.append(tag)
935
+
936
+ if definition_tags:
937
+ result += f"### {rel_fname}\n"
938
+ # Simple list format for now, could be enhanced later (e.g., indentation for scope)
939
+ for tag in definition_tags:
940
+ # Display line number if available
941
+ line_info = f", line {tag.line + 1}" if tag.line >= 0 else ""
942
+ # Display the specific kind (which we checked)
943
+ kind_to_check = tag.specific_kind or tag.kind # Recalculate for safety
944
+ result += f"- {tag.name} ({kind_to_check}{line_info})\n"
945
+ result += "\n" # Add space between files
946
+
947
+ result += "</context>"
948
+ return result.strip() # Remove trailing newline if any
949
+
950
+ except Exception as e:
951
+ self.io.tool_error(f"Error generating symbol outline: {str(e)}")
952
+ # Optionally include traceback for debugging if verbose
953
+ # if self.verbose:
954
+ # self.io.tool_error(traceback.format_exc())
955
+ return None
956
+
957
+ def format_chat_chunks(self):
958
+ """
959
+ Override parent's format_chat_chunks to include enhanced context blocks with a
960
+ cleaner, more hierarchical structure for better organization.
961
+
962
+ Optimized for prompt caching by placing context blocks strategically:
963
+ 1. Relatively static blocks (directory structure, environment info) before done_messages
964
+ 2. Dynamic blocks (context summary, symbol outline, git status) after chat_files
965
+
966
+ This approach preserves prefix caching while providing fresh context information.
967
+ """
968
+ # First get the normal chat chunks from the parent method without calling super
969
+ # We'll manually build the chunks to control placement of context blocks
970
+ chunks = self.format_chat_chunks_base()
971
+
972
+ # If enhanced context blocks are not enabled, just return the base chunks
973
+ if not self.use_enhanced_context:
974
+ return chunks
975
+
976
+ # Make sure token counts are updated - using centralized method
977
+ # This also populates the context block cache
978
+ self._calculate_context_block_tokens()
979
+
980
+ # Get blocks from cache to avoid regenerating them
981
+ env_context = self.get_cached_context_block("environment_info")
982
+ dir_structure = self.get_cached_context_block("directory_structure")
983
+ git_status = self.get_cached_context_block("git_status")
984
+ symbol_outline = self.get_cached_context_block("symbol_outline")
985
+
986
+ # Context summary needs special handling because it depends on other blocks
987
+ context_summary = self.get_context_summary()
988
+
989
+ # 1. Add relatively static blocks BEFORE done_messages
990
+ # These blocks change less frequently and can be part of the cacheable prefix
991
+ static_blocks = []
992
+ if dir_structure:
993
+ static_blocks.append(dir_structure)
994
+ if env_context:
995
+ static_blocks.append(env_context)
996
+
997
+ if static_blocks:
998
+ static_message = "\n\n".join(static_blocks)
999
+ # Insert as a system message right before done_messages
1000
+ chunks.done.insert(0, dict(role="system", content=static_message))
1001
+
1002
+ # 2. Add dynamic blocks AFTER chat_files
1003
+ # These blocks change with the current files in context
1004
+ dynamic_blocks = []
1005
+ if context_summary:
1006
+ dynamic_blocks.append(context_summary)
1007
+ if symbol_outline:
1008
+ dynamic_blocks.append(symbol_outline)
1009
+ if git_status:
1010
+ dynamic_blocks.append(git_status)
1011
+
1012
+ if dynamic_blocks:
1013
+ dynamic_message = "\n\n".join(dynamic_blocks)
1014
+ # Append as a system message after chat_files
1015
+ chunks.chat_files.append(dict(role="system", content=dynamic_message))
1016
+
1017
+ return chunks
1018
+
1019
+ def format_chat_chunks_base(self):
1020
+ """
1021
+ Create base chat chunks without enhanced context blocks.
1022
+ This is a copy of the parent's format_chat_chunks method to avoid
1023
+ calling super() which would create a recursive loop.
1024
+ """
1025
+ self.choose_fence()
1026
+ main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)
1027
+
1028
+ example_messages = []
1029
+ if self.main_model.examples_as_sys_msg:
1030
+ if self.gpt_prompts.example_messages:
1031
+ main_sys += "\n# Example conversations:\n\n"
1032
+ for msg in self.gpt_prompts.example_messages:
1033
+ role = msg["role"]
1034
+ content = self.fmt_system_prompt(msg["content"])
1035
+ main_sys += f"## {role.upper()}: {content}\n\n"
1036
+ main_sys = main_sys.strip()
1037
+ else:
1038
+ for msg in self.gpt_prompts.example_messages:
1039
+ example_messages.append(
1040
+ dict(
1041
+ role=msg["role"],
1042
+ content=self.fmt_system_prompt(msg["content"]),
1043
+ )
1044
+ )
1045
+ if self.gpt_prompts.example_messages:
1046
+ example_messages += [
1047
+ dict(
1048
+ role="user",
1049
+ content=(
1050
+ "I switched to a new code base. Please don't consider the above files"
1051
+ " or try to edit them any longer."
1052
+ ),
1053
+ ),
1054
+ dict(role="assistant", content="Ok."),
1055
+ ]
1056
+
1057
+ if self.gpt_prompts.system_reminder:
1058
+ main_sys += "\n" + self.fmt_system_prompt(self.gpt_prompts.system_reminder)
1059
+
1060
+ chunks = ChatChunks()
1061
+
1062
+ if self.main_model.use_system_prompt:
1063
+ chunks.system = [
1064
+ dict(role="system", content=main_sys),
1065
+ ]
1066
+ else:
1067
+ chunks.system = [
1068
+ dict(role="user", content=main_sys),
1069
+ dict(role="assistant", content="Ok."),
1070
+ ]
1071
+
1072
+ chunks.examples = example_messages
1073
+
1074
+ self.summarize_end()
1075
+ chunks.done = self.done_messages
1076
+
1077
+ chunks.repo = self.get_repo_messages()
1078
+ chunks.readonly_files = self.get_readonly_files_messages()
1079
+ chunks.chat_files = self.get_chat_files_messages()
1080
+
1081
+ if self.gpt_prompts.system_reminder:
1082
+ reminder_message = [
1083
+ dict(
1084
+ role="system", content=self.fmt_system_prompt(self.gpt_prompts.system_reminder)
1085
+ ),
1086
+ ]
1087
+ else:
1088
+ reminder_message = []
1089
+
1090
+ chunks.cur = list(self.cur_messages)
1091
+ chunks.reminder = []
1092
+
1093
+ # Use accurate token counting method that considers enhanced context blocks
1094
+ base_messages = chunks.all_messages()
1095
+ messages_tokens = self.main_model.token_count(base_messages)
1096
+ reminder_tokens = self.main_model.token_count(reminder_message)
1097
+ cur_tokens = self.main_model.token_count(chunks.cur)
1098
+
1099
+ if None not in (messages_tokens, reminder_tokens, cur_tokens):
1100
+ total_tokens = messages_tokens
1101
+ # Only add tokens for reminder and cur if they're not already included
1102
+ # in the messages_tokens calculation
1103
+ if not chunks.reminder:
1104
+ total_tokens += reminder_tokens
1105
+ if not chunks.cur:
1106
+ total_tokens += cur_tokens
1107
+ else:
1108
+ # add the reminder anyway
1109
+ total_tokens = 0
1110
+
1111
+ if chunks.cur:
1112
+ final = chunks.cur[-1]
1113
+ else:
1114
+ final = None
1115
+
1116
+ max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
1117
+ # Add the reminder prompt if we still have room to include it.
1118
+ if (
1119
+ not max_input_tokens
1120
+ or total_tokens < max_input_tokens
1121
+ and self.gpt_prompts.system_reminder
1122
+ ):
1123
+ if self.main_model.reminder == "sys":
1124
+ chunks.reminder = reminder_message
1125
+ elif self.main_model.reminder == "user" and final and final["role"] == "user":
1126
+ # stuff it into the user message
1127
+ new_content = (
1128
+ final["content"]
1129
+ + "\n\n"
1130
+ + self.fmt_system_prompt(self.gpt_prompts.system_reminder)
1131
+ )
1132
+ chunks.cur[-1] = dict(role=final["role"], content=new_content)
1133
+
1134
+ return chunks
1135
+
1136
+ def get_context_summary(self):
1137
+ """
1138
+ Generate a summary of the current context, including file content tokens and additional context blocks,
1139
+ with an accurate total token count.
1140
+ """
1141
+ if not self.use_enhanced_context:
1142
+ return None
1143
+
1144
+ # If context_summary is already in the cache, return it
1145
+ if hasattr(self, "context_blocks_cache") and "context_summary" in self.context_blocks_cache:
1146
+ return self.context_blocks_cache["context_summary"]
1147
+
1148
+ try:
1149
+ # Make sure token counts are updated before generating the summary
1150
+ if not hasattr(self, "context_block_tokens") or not self.context_block_tokens:
1151
+ self._calculate_context_block_tokens()
1152
+
1153
+ result = '<context name="context_summary">\n'
1154
+ result += "## Current Context Overview\n\n"
1155
+ max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
1156
+ if max_input_tokens:
1157
+ result += f"Model context limit: {max_input_tokens:,} tokens\n\n"
1158
+
1159
+ total_file_tokens = 0
1160
+ editable_tokens = 0
1161
+ readonly_tokens = 0
1162
+ editable_files = []
1163
+ readonly_files = []
1164
+
1165
+ # Editable files
1166
+ if self.abs_fnames:
1167
+ result += "### Editable Files\n\n"
1168
+ for fname in sorted(self.abs_fnames):
1169
+ rel_fname = self.get_rel_fname(fname)
1170
+ content = self.io.read_text(fname)
1171
+ if content is not None:
1172
+ tokens = self.main_model.token_count(content)
1173
+ total_file_tokens += tokens
1174
+ editable_tokens += tokens
1175
+ size_indicator = (
1176
+ "🔴 Large"
1177
+ if tokens > 5000
1178
+ else ("🟡 Medium" if tokens > 1000 else "🟢 Small")
1179
+ )
1180
+ editable_files.append(
1181
+ f"- {rel_fname}: {tokens:,} tokens ({size_indicator})"
1182
+ )
1183
+ if editable_files:
1184
+ result += "\n".join(editable_files) + "\n\n"
1185
+ result += (
1186
+ f"**Total editable: {len(editable_files)} files,"
1187
+ f" {editable_tokens:,} tokens**\n\n"
1188
+ )
1189
+ else:
1190
+ result += "No editable files in context\n\n"
1191
+
1192
+ # Read-only files
1193
+ if self.abs_read_only_fnames:
1194
+ result += "### Read-Only Files\n\n"
1195
+ for fname in sorted(self.abs_read_only_fnames):
1196
+ rel_fname = self.get_rel_fname(fname)
1197
+ content = self.io.read_text(fname)
1198
+ if content is not None:
1199
+ tokens = self.main_model.token_count(content)
1200
+ total_file_tokens += tokens
1201
+ readonly_tokens += tokens
1202
+ size_indicator = (
1203
+ "🔴 Large"
1204
+ if tokens > 5000
1205
+ else ("🟡 Medium" if tokens > 1000 else "🟢 Small")
1206
+ )
1207
+ readonly_files.append(
1208
+ f"- {rel_fname}: {tokens:,} tokens ({size_indicator})"
1209
+ )
1210
+ if readonly_files:
1211
+ result += "\n".join(readonly_files) + "\n\n"
1212
+ result += (
1213
+ f"**Total read-only: {len(readonly_files)} files,"
1214
+ f" {readonly_tokens:,} tokens**\n\n"
1215
+ )
1216
+ else:
1217
+ result += "No read-only files in context\n\n"
1218
+
1219
+ # Use the pre-calculated context block tokens
1220
+ extra_tokens = sum(self.context_block_tokens.values())
1221
+ total_tokens = total_file_tokens + extra_tokens
1222
+
1223
+ result += f"**Total files usage: {total_file_tokens:,} tokens**\n\n"
1224
+ result += f"**Additional context usage: {extra_tokens:,} tokens**\n\n"
1225
+ result += f"**Total context usage: {total_tokens:,} tokens**"
1226
+ if max_input_tokens:
1227
+ percentage = (total_tokens / max_input_tokens) * 100
1228
+ result += f" ({percentage:.1f}% of limit)"
1229
+ if percentage > 80:
1230
+ result += "\n\n⚠️ **Context is getting full!** Remove non-essential files via:\n"
1231
+ result += '- `[tool_call(Remove, file_path="path/to/large_file.ext")]`\n'
1232
+ result += "- Keep only essential files in context for best performance"
1233
+ result += "\n</context>"
1234
+
1235
+ # Cache the result
1236
+ if not hasattr(self, "context_blocks_cache"):
1237
+ self.context_blocks_cache = {}
1238
+ self.context_blocks_cache["context_summary"] = result
1239
+
1240
+ return result
1241
+ except Exception as e:
1242
+ self.io.tool_error(f"Error generating context summary: {str(e)}")
1243
+ return None
1244
+
1245
+ def get_environment_info(self):
1246
+ """
1247
+ Generate an environment information context block with key system details.
1248
+ Returns formatted string with working directory, platform, date, and other relevant environment details.
1249
+ """
1250
+ if not self.use_enhanced_context:
1251
+ return None
1252
+
1253
+ try:
1254
+ # Get current date in ISO format
1255
+ current_date = datetime.now().strftime("%Y-%m-%d")
1256
+
1257
+ # Get platform information
1258
+ platform_info = platform.platform()
1259
+
1260
+ # Get language preference
1261
+ language = self.chat_language or locale.getlocale()[0] or "en-US"
1262
+
1263
+ result = '<context name="environment_info">\n'
1264
+ result += "## Environment Information\n\n"
1265
+ result += f"- Working directory: {self.root}\n"
1266
+ result += f"- Current date: {current_date}\n"
1267
+ result += f"- Platform: {platform_info}\n"
1268
+ result += f"- Language preference: {language}\n"
1269
+
1270
+ # Add git repo information if available
1271
+ if self.repo:
1272
+ try:
1273
+ rel_repo_dir = self.repo.get_rel_repo_dir()
1274
+ num_files = len(self.repo.get_tracked_files())
1275
+ result += f"- Git repository: {rel_repo_dir} with {num_files:,} files\n"
1276
+ except Exception:
1277
+ result += "- Git repository: active but details unavailable\n"
1278
+ else:
1279
+ result += "- Git repository: none\n"
1280
+
1281
+ # Add enabled features information
1282
+ features = []
1283
+ if self.context_management_enabled:
1284
+ features.append("context management")
1285
+ if self.use_enhanced_context:
1286
+ features.append("enhanced context blocks")
1287
+ if features:
1288
+ result += f"- Enabled features: {', '.join(features)}\n"
1289
+
1290
+ result += "</context>"
1291
+ return result
1292
+ except Exception as e:
1293
+ self.io.tool_error(f"Error generating environment info: {str(e)}")
1294
+ return None
1295
+
1296
+ def reply_completed(self):
1297
+ """Process the completed response from the LLM.
1298
+
1299
+ This is a key method that:
1300
+ 1. Processes any tool commands in the response (only after a '---' line)
1301
+ 2. Processes any SEARCH/REPLACE blocks in the response (only before the '---' line if one exists)
1302
+ 3. If tool commands were found, sets up for another automatic round
1303
+
1304
+ This enables the "auto-exploration" workflow where the LLM can
1305
+ iteratively discover and analyze relevant files before providing
1306
+ a final answer to the user's question.
1307
+ """
1308
+ # In granular editing mode, tool calls are handled by BaseCoder's process_tool_calls.
1309
+ # This method is now only for legacy tool call format and search/replace blocks.
1310
+ if self.use_granular_editing:
1311
+ # Handle SEARCH/REPLACE blocks
1312
+ content = self.partial_response_content
1313
+ if not content or not content.strip():
1314
+ return True
1315
+
1316
+ # Check for search/replace blocks
1317
+ has_search = "<<<<<<< SEARCH" in content
1318
+ has_divider = "=======" in content
1319
+ has_replace = ">>>>>>> REPLACE" in content
1320
+ if has_search and has_divider and has_replace:
1321
+ self.io.tool_output("Detected edit blocks, applying changes...")
1322
+ edited_files = self._apply_edits_from_response()
1323
+ if self.reflected_message:
1324
+ return False # Trigger reflection if edits failed
1325
+
1326
+ # If edits were successful, we might want to reflect.
1327
+ # For now, let's consider the turn complete.
1328
+
1329
+ # Since tool calls are handled earlier, we finalize the turn.
1330
+ self.tool_call_count = 0
1331
+ self.files_added_in_exploration = set()
1332
+ self.move_back_cur_messages(None)
1333
+ return True
1334
+
1335
+ # Legacy tool call processing for use_granular_editing=False
1336
+ content = self.partial_response_content
1337
+ if not content or not content.strip():
1338
+ return True
1339
+ original_content = content # Keep the original response
1340
+
1341
+ # Process tool commands: returns content with tool calls removed, results, flag if any tool calls were found,
1342
+ # and the content before the last '---' line
1343
+ processed_content, result_messages, tool_calls_found, content_before_last_separator = (
1344
+ self._process_tool_commands(content)
1345
+ )
1346
+
1347
+ # Since we are no longer suppressing, the partial_response_content IS the final content.
1348
+ # We might want to update it to the processed_content (without tool calls) if we don't
1349
+ # want the raw tool calls to remain in the final assistant message history.
1350
+ # Let's update it for cleaner history.
1351
+ self.partial_response_content = processed_content.strip()
1352
+
1353
+ # Process implicit file mentions using the content *after* tool calls were removed
1354
+ self._process_file_mentions(processed_content)
1355
+
1356
+ # Check if the content contains the SEARCH/REPLACE markers
1357
+ has_search = "<<<<<<< SEARCH" in self.partial_response_content
1358
+ has_divider = "=======" in self.partial_response_content
1359
+ has_replace = ">>>>>>> REPLACE" in self.partial_response_content
1360
+ edit_match = has_search and has_divider and has_replace
1361
+
1362
+ # Check if there's a '---' line - if yes, SEARCH/REPLACE blocks can only appear before it
1363
+ separator_marker = "\n---\n"
1364
+ if separator_marker in original_content and edit_match:
1365
+ # Check if the edit blocks are only in the part before the last '---' line
1366
+ has_search_before = "<<<<<<< SEARCH" in content_before_last_separator
1367
+ has_divider_before = "=======" in content_before_last_separator
1368
+ has_replace_before = ">>>>>>> REPLACE" in content_before_last_separator
1369
+ edit_match = has_search_before and has_divider_before and has_replace_before
1370
+
1371
+ if edit_match:
1372
+ self.io.tool_output("Detected edit blocks, applying changes within Navigator...")
1373
+ edited_files = self._apply_edits_from_response()
1374
+ # If _apply_edits_from_response set a reflected_message (due to errors),
1375
+ # return False to trigger a reflection loop.
1376
+ if self.reflected_message:
1377
+ return False
1378
+
1379
+ # If edits were successfully applied and we haven't exceeded reflection limits,
1380
+ # set up for another iteration (similar to tool calls)
1381
+ if edited_files and self.num_reflections < self.max_reflections:
1382
+ # Get the original user question from the most recent user message
1383
+ if self.cur_messages and len(self.cur_messages) >= 1:
1384
+ for msg in reversed(self.cur_messages):
1385
+ if msg["role"] == "user":
1386
+ original_question = msg["content"]
1387
+ break
1388
+ else:
1389
+ # Default if no user message found
1390
+ original_question = (
1391
+ "Please continue your exploration and provide a final answer."
1392
+ )
1393
+
1394
+ # Construct the message for the next turn
1395
+ next_prompt = (
1396
+ "I have applied the edits you suggested. "
1397
+ f"The following files were modified: {', '.join(edited_files)}. "
1398
+ "Let me continue working on your request.\n\n"
1399
+ f"Your original question was: {original_question}"
1400
+ )
1401
+
1402
+ self.reflected_message = next_prompt
1403
+ self.io.tool_output("Continuing after applying edits...")
1404
+ return False # Indicate that we need another iteration
1405
+
1406
+ # If any tool calls were found and we haven't exceeded reflection limits, set up for another iteration
1407
+ # This is implicit continuation when any tool calls are present, rather than requiring Continue explicitly
1408
+ if tool_calls_found and self.num_reflections < self.max_reflections:
1409
+ # Reset tool counter for next iteration
1410
+ self.tool_call_count = 0
1411
+ # Clear exploration files for the next round
1412
+ self.files_added_in_exploration = set()
1413
+
1414
+ # Get the original user question from the most recent user message
1415
+ if self.cur_messages and len(self.cur_messages) >= 1:
1416
+ for msg in reversed(self.cur_messages):
1417
+ if msg["role"] == "user":
1418
+ original_question = msg["content"]
1419
+ break
1420
+ else:
1421
+ # Default if no user message found
1422
+ original_question = (
1423
+ "Please continue your exploration and provide a final answer."
1424
+ )
1425
+
1426
+ # Construct the message for the next turn, including tool results
1427
+ next_prompt_parts = []
1428
+ next_prompt_parts.append(
1429
+ "I have processed the results of the previous tool calls. "
1430
+ "Let me analyze them and continue working towards your request."
1431
+ )
1432
+
1433
+ if result_messages:
1434
+ next_prompt_parts.append("\nResults from previous tool calls:")
1435
+ # result_messages already have [Result (...): ...] format
1436
+ next_prompt_parts.extend(result_messages)
1437
+ next_prompt_parts.append(
1438
+ "\nBased on these results and the updated file context, I will proceed."
1439
+ )
1440
+ else:
1441
+ next_prompt_parts.append(
1442
+ "\nNo specific results were returned from the previous tool calls, but the"
1443
+ " file context may have been updated. I will proceed based on the current"
1444
+ " context."
1445
+ )
1446
+
1447
+ next_prompt_parts.append(f"\nYour original question was: {original_question}")
1448
+
1449
+ self.reflected_message = "\n".join(next_prompt_parts)
1450
+
1451
+ self.io.tool_output("Continuing exploration...")
1452
+ return False # Indicate that we need another iteration
1453
+ else:
1454
+ # Exploration finished for this turn.
1455
+ # Append results to the content that will be stored in history.
1456
+ if result_messages:
1457
+ results_block = "\n\n" + "\n".join(result_messages)
1458
+ # Append results to the cleaned content
1459
+ self.partial_response_content += results_block
1460
+
1461
+ # After applying edits OR determining no edits were needed (and no reflection needed),
1462
+ # the turn is complete. Reset counters and finalize history.
1463
+ self.tool_call_count = 0
1464
+ self.files_added_in_exploration = set()
1465
+ # Move cur_messages to done_messages
1466
+ self.move_back_cur_messages(
1467
+ None
1468
+ ) # Pass None as we handled commit message earlier if needed
1469
+ return True # Indicate exploration is finished for this round
1470
+
1471
+ def _process_tool_commands(self, content):
1472
+ """
1473
+ Process tool commands in the `[tool_call(name, param=value)]` format within the content.
1474
+
1475
+ Rules:
1476
+ 1. Tool calls must appear after the LAST '---' line separator in the content
1477
+ 2. Any tool calls before this last separator are treated as text (not executed)
1478
+ 3. SEARCH/REPLACE blocks can only appear before this last separator
1479
+
1480
+ Returns processed content, result messages, and a flag indicating if any tool calls were found.
1481
+ Also returns the content before the last separator for SEARCH/REPLACE block validation.
1482
+ """
1483
+ result_messages = []
1484
+ modified_content = content # Start with original content
1485
+ tool_calls_found = False
1486
+ call_count = 0
1487
+ max_calls = self.max_tool_calls
1488
+
1489
+ # Check if there's a '---' separator and only process tool calls after the LAST one
1490
+ separator_marker = "---"
1491
+ content_parts = content.split(separator_marker)
1492
+
1493
+ # If there's no separator, treat the entire content as before the separator
1494
+ if len(content_parts) == 1:
1495
+ # Return the original content with no tool calls processed, and the content itself as before_separator
1496
+ return content, result_messages, False, content
1497
+
1498
+ # Take everything before the last separator (including intermediate separators)
1499
+ content_before_separator = separator_marker.join(content_parts[:-1])
1500
+ # Take only what comes after the last separator
1501
+ content_after_separator = content_parts[-1]
1502
+
1503
+ # Find tool calls using a more robust method, but only in the content after separator
1504
+ processed_content = content_before_separator + separator_marker
1505
+ last_index = 0
1506
+
1507
+ # Support any [tool_...(...)] format
1508
+ tool_call_pattern = re.compile(r"\[tool_.*?\(", re.DOTALL)
1509
+ end_marker = "]" # The parenthesis balancing finds the ')', we just need the final ']'
1510
+
1511
+ while True:
1512
+ match = tool_call_pattern.search(content_after_separator, last_index)
1513
+ if not match:
1514
+ processed_content += content_after_separator[last_index:]
1515
+ break
1516
+
1517
+ start_pos = match.start()
1518
+ start_marker = match.group(0)
1519
+
1520
+ # Check for escaped tool call: \[tool_...
1521
+ # Count preceding backslashes to handle \\
1522
+ backslashes = 0
1523
+ p = start_pos - 1
1524
+ while p >= 0 and content_after_separator[p] == "\\":
1525
+ backslashes += 1
1526
+ p -= 1
1527
+
1528
+ if backslashes % 2 == 1:
1529
+ # Odd number of backslashes means it's escaped. Treat as text.
1530
+ # We append up to the end of the marker and continue searching.
1531
+ processed_content += content_after_separator[
1532
+ last_index : start_pos + len(start_marker)
1533
+ ]
1534
+ last_index = start_pos + len(start_marker)
1535
+ continue
1536
+
1537
+ # Append content before the (non-escaped) tool call
1538
+ processed_content += content_after_separator[last_index:start_pos]
1539
+
1540
+ scan_start_pos = start_pos + len(start_marker)
1541
+ paren_level = 1
1542
+ in_single_quotes = False
1543
+ in_double_quotes = False
1544
+ escaped = False
1545
+ end_paren_pos = -1
1546
+
1547
+ # Scan to find the matching closing parenthesis, respecting quotes
1548
+ for i in range(scan_start_pos, len(content_after_separator)):
1549
+ char = content_after_separator[i]
1550
+
1551
+ if escaped:
1552
+ escaped = False
1553
+ elif char == "\\":
1554
+ escaped = True
1555
+ elif char == "'" and not in_double_quotes:
1556
+ in_single_quotes = not in_single_quotes
1557
+ elif char == '"' and not in_single_quotes:
1558
+ in_double_quotes = not in_double_quotes
1559
+ elif char == "(" and not in_single_quotes and not in_double_quotes:
1560
+ paren_level += 1
1561
+ elif char == ")" and not in_single_quotes and not in_double_quotes:
1562
+ paren_level -= 1
1563
+ if paren_level == 0:
1564
+ end_paren_pos = i
1565
+ break
1566
+
1567
+ # Check for the end marker after the closing parenthesis, skipping whitespace
1568
+ expected_end_marker_start = end_paren_pos + 1
1569
+ actual_end_marker_start = -1
1570
+ end_marker_found = False
1571
+ if end_paren_pos != -1: # Only search if we found a closing parenthesis
1572
+ for j in range(expected_end_marker_start, len(content_after_separator)):
1573
+ if not content_after_separator[j].isspace():
1574
+ actual_end_marker_start = j
1575
+ # Check if the found character is the end marker ']'
1576
+ if content_after_separator[actual_end_marker_start] == end_marker:
1577
+ end_marker_found = True
1578
+ break # Stop searching after first non-whitespace char
1579
+
1580
+ if not end_marker_found:
1581
+ # Try to extract the tool name for better error message
1582
+ tool_name = "unknown"
1583
+ try:
1584
+ # Look for the first comma after the tool call start
1585
+ partial_content = content_after_separator[
1586
+ scan_start_pos : scan_start_pos + 100
1587
+ ] # Limit to avoid huge strings
1588
+ comma_pos = partial_content.find(",")
1589
+ if comma_pos > 0:
1590
+ tool_name = partial_content[:comma_pos].strip()
1591
+ else:
1592
+ # If no comma, look for opening parenthesis or first whitespace
1593
+ space_pos = partial_content.find(" ")
1594
+ paren_pos = partial_content.find("(")
1595
+ if space_pos > 0 and (paren_pos < 0 or space_pos < paren_pos):
1596
+ tool_name = partial_content[:space_pos].strip()
1597
+ elif paren_pos > 0:
1598
+ tool_name = partial_content[:paren_pos].strip()
1599
+ except Exception:
1600
+ pass # Silently fail if we can't extract the name
1601
+
1602
+ # Malformed call: couldn't find matching ')' or the subsequent ']'
1603
+ self.io.tool_warning(
1604
+ f"Malformed tool call for '{tool_name}'. Missing closing parenthesis or"
1605
+ " bracket. Skipping."
1606
+ )
1607
+ # Append the start marker itself to processed content so it's not lost
1608
+ processed_content += start_marker
1609
+ last_index = scan_start_pos # Continue searching after the marker
1610
+ continue
1611
+
1612
+ # Found a potential tool call
1613
+ # Adjust full_match_str and last_index based on the actual end marker ']' position
1614
+ full_match_str = content_after_separator[
1615
+ start_pos : actual_end_marker_start + 1
1616
+ ] # End marker ']' is 1 char
1617
+ inner_content = content_after_separator[scan_start_pos:end_paren_pos].strip()
1618
+ last_index = actual_end_marker_start + 1 # Move past the processed call (including ']')
1619
+
1620
+ call_count += 1
1621
+ if call_count > max_calls:
1622
+ self.io.tool_warning(
1623
+ f"Exceeded maximum tool calls ({max_calls}). Skipping remaining calls."
1624
+ )
1625
+ # Don't append the skipped call to processed_content
1626
+ continue # Skip processing this call
1627
+
1628
+ tool_calls_found = True
1629
+ tool_name = None
1630
+ params = {}
1631
+ result_message = None
1632
+
1633
+ # Mark that we found at least one tool call (assuming it passes validation)
1634
+ tool_calls_found = True
1635
+
1636
+ try:
1637
+ # Pre-process inner_content to handle non-identifier tool names by quoting them.
1638
+ # This allows ast.parse to succeed on names like 'resolve-library-id'.
1639
+ if inner_content:
1640
+ parts = inner_content.split(",", 1)
1641
+ potential_tool_name = parts[0].strip()
1642
+
1643
+ is_string = (
1644
+ potential_tool_name.startswith("'") and potential_tool_name.endswith("'")
1645
+ ) or (potential_tool_name.startswith('"') and potential_tool_name.endswith('"'))
1646
+
1647
+ if not potential_tool_name.isidentifier() and not is_string:
1648
+ # It's not a valid identifier and not a string, so quote it.
1649
+ # Use json.dumps to handle escaping correctly.
1650
+ quoted_tool_name = json.dumps(potential_tool_name)
1651
+ if len(parts) > 1:
1652
+ inner_content = quoted_tool_name + ", " + parts[1]
1653
+ else:
1654
+ inner_content = quoted_tool_name
1655
+
1656
+ # Wrap the inner content to make it parseable as a function call
1657
+ # Example: ToolName, key="value" becomes f(ToolName, key="value")
1658
+ parse_str = f"f({inner_content})"
1659
+ parsed_ast = ast.parse(parse_str)
1660
+
1661
+ # Validate AST structure
1662
+ if (
1663
+ not isinstance(parsed_ast, ast.Module)
1664
+ or not parsed_ast.body
1665
+ or not isinstance(parsed_ast.body[0], ast.Expr)
1666
+ ):
1667
+ raise ValueError("Unexpected AST structure")
1668
+ call_node = parsed_ast.body[0].value
1669
+ if not isinstance(call_node, ast.Call):
1670
+ raise ValueError("Expected a Call node")
1671
+
1672
+ # Extract tool name (should be the first positional argument)
1673
+ if not call_node.args:
1674
+ raise ValueError("Tool name not found or invalid")
1675
+
1676
+ tool_name_node = call_node.args[0]
1677
+ if isinstance(tool_name_node, ast.Name):
1678
+ tool_name = tool_name_node.id
1679
+ elif isinstance(tool_name_node, ast.Constant) and isinstance(
1680
+ tool_name_node.value, str
1681
+ ):
1682
+ tool_name = tool_name_node.value
1683
+ else:
1684
+ raise ValueError("Tool name must be an identifier or a string literal")
1685
+
1686
+ # Extract keyword arguments
1687
+ for keyword in call_node.keywords:
1688
+ key = keyword.arg
1689
+ value_node = keyword.value
1690
+ # Extract value based on AST node type
1691
+ if isinstance(value_node, ast.Constant):
1692
+ value = value_node.value
1693
+ # Check if this is a multiline string and trim whitespace
1694
+ if isinstance(value, str) and "\n" in value:
1695
+ # Get the source line(s) for this node to check if it's a triple-quoted string
1696
+ lineno = value_node.lineno if hasattr(value_node, "lineno") else 0
1697
+ end_lineno = (
1698
+ value_node.end_lineno
1699
+ if hasattr(value_node, "end_lineno")
1700
+ else lineno
1701
+ )
1702
+ if end_lineno > lineno: # It's a multiline string
1703
+ # Trim exactly one leading and one trailing newline if present
1704
+ if value.startswith("\n"):
1705
+ value = value[1:]
1706
+ if value.endswith("\n"):
1707
+ value = value[:-1]
1708
+ elif isinstance(
1709
+ value_node, ast.Name
1710
+ ): # Handle unquoted values like True/False/None or variables
1711
+ id_val = value_node.id.lower()
1712
+ if id_val == "true":
1713
+ value = True
1714
+ elif id_val == "false":
1715
+ value = False
1716
+ elif id_val == "none":
1717
+ value = None
1718
+ else:
1719
+ value = value_node.id # Keep as string if it's something else
1720
+ # Add more types if needed (e.g., ast.List, ast.Dict)
1721
+ else:
1722
+ # Attempt to reconstruct the source for complex types, or raise error
1723
+ try:
1724
+ # Note: ast.unparse requires Python 3.9+
1725
+ # If using older Python, might need a different approach or limit supported types
1726
+ value = ast.unparse(value_node)
1727
+ except AttributeError: # Handle case where ast.unparse is not available
1728
+ raise ValueError(
1729
+ f"Unsupported argument type for key '{key}': {type(value_node)}"
1730
+ )
1731
+ except Exception as unparse_e:
1732
+ raise ValueError(
1733
+ f"Could not unparse value for key '{key}': {unparse_e}"
1734
+ )
1735
+
1736
+ # Check for suppressed values (e.g., "...")
1737
+ suppressed_arg_values = ["..."]
1738
+ if isinstance(value, str) and value in suppressed_arg_values:
1739
+ self.io.tool_warning(
1740
+ f"Skipping suppressed argument value '{value}' for key '{key}' in tool"
1741
+ f" '{tool_name}'"
1742
+ )
1743
+ continue
1744
+
1745
+ params[key] = value
1746
+
1747
+ except (SyntaxError, ValueError) as e:
1748
+ result_message = f"Error parsing tool call '{inner_content}': {e}"
1749
+ self.io.tool_error(f"Failed to parse tool call: {full_match_str}\nError: {e}")
1750
+ # Don't append the malformed call to processed_content
1751
+ result_messages.append(f"[Result (Parse Error): {result_message}]")
1752
+ continue # Skip execution
1753
+ except Exception as e: # Catch any other unexpected parsing errors
1754
+ result_message = f"Unexpected error parsing tool call '{inner_content}': {e}"
1755
+ self.io.tool_error(
1756
+ f"Unexpected error during parsing: {full_match_str}\nError:"
1757
+ f" {e}\n{traceback.format_exc()}"
1758
+ )
1759
+ result_messages.append(f"[Result (Parse Error): {result_message}]")
1760
+ continue
1761
+
1762
+ # Execute the tool based on its name
1763
+ try:
1764
+ # Normalize tool name for case-insensitive matching
1765
+ norm_tool_name = tool_name.lower()
1766
+
1767
+ if norm_tool_name == "viewfilesatglob":
1768
+ pattern = params.get("pattern")
1769
+ if pattern is not None:
1770
+ # Call the imported function
1771
+ result_message = execute_view_files_at_glob(self, pattern)
1772
+ else:
1773
+ result_message = "Error: Missing 'pattern' parameter for ViewFilesAtGlob"
1774
+ elif norm_tool_name == "viewfilesmatching":
1775
+ pattern = params.get("pattern")
1776
+ file_pattern = params.get("file_pattern") # Optional
1777
+ regex = params.get("regex", False) # Default to False if not provided
1778
+ if pattern is not None:
1779
+ result_message = execute_view_files_matching(
1780
+ self, pattern, file_pattern, regex
1781
+ )
1782
+ else:
1783
+ result_message = "Error: Missing 'pattern' parameter for ViewFilesMatching"
1784
+ elif norm_tool_name == "ls":
1785
+ directory = params.get("directory")
1786
+ if directory is not None:
1787
+ result_message = execute_ls(self, directory)
1788
+ else:
1789
+ result_message = "Error: Missing 'directory' parameter for Ls"
1790
+ elif norm_tool_name == "view":
1791
+ file_path = params.get("file_path")
1792
+ if file_path is not None:
1793
+ result_message = execute_view(self, file_path)
1794
+ else:
1795
+ result_message = "Error: Missing 'file_path' parameter for View"
1796
+ elif norm_tool_name == "remove":
1797
+ file_path = params.get("file_path")
1798
+ if file_path is not None:
1799
+ result_message = _execute_remove(self, file_path)
1800
+ else:
1801
+ result_message = "Error: Missing 'file_path' parameter for Remove"
1802
+ elif norm_tool_name == "makeeditable":
1803
+ file_path = params.get("file_path")
1804
+ if file_path is not None:
1805
+ result_message = _execute_make_editable(self, file_path)
1806
+ else:
1807
+ result_message = "Error: Missing 'file_path' parameter for MakeEditable"
1808
+ elif norm_tool_name == "makereadonly":
1809
+ file_path = params.get("file_path")
1810
+ if file_path is not None:
1811
+ result_message = _execute_make_readonly(self, file_path)
1812
+ else:
1813
+ result_message = "Error: Missing 'file_path' parameter for MakeReadonly"
1814
+ elif norm_tool_name == "viewfileswithsymbol":
1815
+ symbol = params.get("symbol")
1816
+ if symbol is not None:
1817
+ # Call the imported function from the tools directory
1818
+ result_message = _execute_view_files_with_symbol(self, symbol)
1819
+ else:
1820
+ result_message = "Error: Missing 'symbol' parameter for ViewFilesWithSymbol"
1821
+
1822
+ # Command tools
1823
+ elif norm_tool_name == "command":
1824
+ command_string = params.get("command_string")
1825
+ if command_string is not None:
1826
+ result_message = _execute_command(self, command_string)
1827
+ else:
1828
+ result_message = "Error: Missing 'command_string' parameter for Command"
1829
+ elif norm_tool_name == "commandinteractive":
1830
+ command_string = params.get("command_string")
1831
+ if command_string is not None:
1832
+ result_message = _execute_command_interactive(self, command_string)
1833
+ else:
1834
+ result_message = (
1835
+ "Error: Missing 'command_string' parameter for CommandInteractive"
1836
+ )
1837
+
1838
+ # Grep tool
1839
+ elif norm_tool_name == "grep":
1840
+ pattern = params.get("pattern")
1841
+ file_pattern = params.get("file_pattern", "*") # Default to all files
1842
+ directory = params.get("directory", ".") # Default to current directory
1843
+ use_regex = params.get("use_regex", False) # Default to literal search
1844
+ case_insensitive = params.get(
1845
+ "case_insensitive", False
1846
+ ) # Default to case-sensitive
1847
+ context_before = params.get("context_before", 5)
1848
+ context_after = params.get("context_after", 5)
1849
+
1850
+ if pattern is not None:
1851
+ # Import the function if not already imported (it should be)
1852
+ from aider.tools.grep import _execute_grep
1853
+
1854
+ result_message = _execute_grep(
1855
+ self,
1856
+ pattern,
1857
+ file_pattern,
1858
+ directory,
1859
+ use_regex,
1860
+ case_insensitive,
1861
+ context_before,
1862
+ context_after,
1863
+ )
1864
+ else:
1865
+ result_message = "Error: Missing required 'pattern' parameter for Grep"
1866
+
1867
+ # Granular editing tools
1868
+ elif norm_tool_name == "replacetext":
1869
+ file_path = params.get("file_path")
1870
+ find_text = params.get("find_text")
1871
+ replace_text = params.get("replace_text")
1872
+ near_context = params.get("near_context")
1873
+ occurrence = params.get("occurrence", 1) # Default to first occurrence
1874
+ change_id = params.get("change_id")
1875
+ dry_run = params.get("dry_run", False) # Default to False
1876
+
1877
+ if file_path is not None and find_text is not None and replace_text is not None:
1878
+ result_message = _execute_replace_text(
1879
+ self,
1880
+ file_path,
1881
+ find_text,
1882
+ replace_text,
1883
+ near_context,
1884
+ occurrence,
1885
+ change_id,
1886
+ dry_run,
1887
+ )
1888
+ else:
1889
+ result_message = (
1890
+ "Error: Missing required parameters for ReplaceText (file_path,"
1891
+ " find_text, replace_text)"
1892
+ )
1893
+
1894
+ elif norm_tool_name == "replaceall":
1895
+ file_path = params.get("file_path")
1896
+ find_text = params.get("find_text")
1897
+ replace_text = params.get("replace_text")
1898
+ change_id = params.get("change_id")
1899
+ dry_run = params.get("dry_run", False) # Default to False
1900
+
1901
+ if file_path is not None and find_text is not None and replace_text is not None:
1902
+ result_message = _execute_replace_all(
1903
+ self, file_path, find_text, replace_text, change_id, dry_run
1904
+ )
1905
+ else:
1906
+ result_message = (
1907
+ "Error: Missing required parameters for ReplaceAll (file_path,"
1908
+ " find_text, replace_text)"
1909
+ )
1910
+
1911
+ elif norm_tool_name == "insertblock":
1912
+ file_path = params.get("file_path")
1913
+ content = params.get("content")
1914
+ after_pattern = params.get("after_pattern")
1915
+ before_pattern = params.get("before_pattern")
1916
+ occurrence = params.get("occurrence", 1) # Default 1
1917
+ change_id = params.get("change_id")
1918
+ dry_run = params.get("dry_run", False) # Default False
1919
+ position = params.get("position")
1920
+ auto_indent = params.get("auto_indent", True) # Default True
1921
+ use_regex = params.get("use_regex", False) # Default False
1922
+
1923
+ if (
1924
+ file_path is not None
1925
+ and content is not None
1926
+ and (
1927
+ after_pattern is not None
1928
+ or before_pattern is not None
1929
+ or position is not None
1930
+ )
1931
+ ):
1932
+ result_message = _execute_insert_block(
1933
+ self,
1934
+ file_path,
1935
+ content,
1936
+ after_pattern,
1937
+ before_pattern,
1938
+ occurrence,
1939
+ change_id,
1940
+ dry_run,
1941
+ position,
1942
+ auto_indent,
1943
+ use_regex,
1944
+ )
1945
+ else:
1946
+ result_message = (
1947
+ "Error: Missing required parameters for InsertBlock (file_path,"
1948
+ " content, and either after_pattern or before_pattern)"
1949
+ )
1950
+
1951
+ elif norm_tool_name == "deleteblock":
1952
+ file_path = params.get("file_path")
1953
+ start_pattern = params.get("start_pattern")
1954
+ end_pattern = params.get("end_pattern")
1955
+ line_count = params.get("line_count")
1956
+ near_context = params.get("near_context") # New
1957
+ occurrence = params.get("occurrence", 1) # New, default 1
1958
+ change_id = params.get("change_id")
1959
+ dry_run = params.get("dry_run", False) # New, default False
1960
+
1961
+ if file_path is not None and start_pattern is not None:
1962
+ result_message = _execute_delete_block(
1963
+ self,
1964
+ file_path,
1965
+ start_pattern,
1966
+ end_pattern,
1967
+ line_count,
1968
+ near_context,
1969
+ occurrence,
1970
+ change_id,
1971
+ dry_run,
1972
+ )
1973
+ else:
1974
+ result_message = (
1975
+ "Error: Missing required parameters for DeleteBlock (file_path,"
1976
+ " start_pattern)"
1977
+ )
1978
+
1979
+ elif norm_tool_name == "replaceline":
1980
+ file_path = params.get("file_path")
1981
+ line_number = params.get("line_number")
1982
+ new_content = params.get("new_content")
1983
+ change_id = params.get("change_id")
1984
+ dry_run = params.get("dry_run", False) # New, default False
1985
+
1986
+ if (
1987
+ file_path is not None
1988
+ and line_number is not None
1989
+ and new_content is not None
1990
+ ):
1991
+ result_message = _execute_replace_line(
1992
+ self, file_path, line_number, new_content, change_id, dry_run
1993
+ )
1994
+ else:
1995
+ result_message = (
1996
+ "Error: Missing required parameters for ReplaceLine (file_path,"
1997
+ " line_number, new_content)"
1998
+ )
1999
+
2000
+ elif norm_tool_name == "replacelines":
2001
+ file_path = params.get("file_path")
2002
+ start_line = params.get("start_line")
2003
+ end_line = params.get("end_line")
2004
+ new_content = params.get("new_content")
2005
+ change_id = params.get("change_id")
2006
+ dry_run = params.get("dry_run", False) # New, default False
2007
+
2008
+ if (
2009
+ file_path is not None
2010
+ and start_line is not None
2011
+ and end_line is not None
2012
+ and new_content is not None
2013
+ ):
2014
+ result_message = _execute_replace_lines(
2015
+ self, file_path, start_line, end_line, new_content, change_id, dry_run
2016
+ )
2017
+ else:
2018
+ result_message = (
2019
+ "Error: Missing required parameters for ReplaceLines (file_path,"
2020
+ " start_line, end_line, new_content)"
2021
+ )
2022
+
2023
+ elif norm_tool_name == "indentlines":
2024
+ file_path = params.get("file_path")
2025
+ start_pattern = params.get("start_pattern")
2026
+ end_pattern = params.get("end_pattern")
2027
+ line_count = params.get("line_count")
2028
+ indent_levels = params.get("indent_levels", 1) # Default to indent 1 level
2029
+ near_context = params.get("near_context") # New
2030
+ occurrence = params.get("occurrence", 1) # New, default 1
2031
+ change_id = params.get("change_id")
2032
+ dry_run = params.get("dry_run", False) # New, default False
2033
+
2034
+ if file_path is not None and start_pattern is not None:
2035
+ result_message = _execute_indent_lines(
2036
+ self,
2037
+ file_path,
2038
+ start_pattern,
2039
+ end_pattern,
2040
+ line_count,
2041
+ indent_levels,
2042
+ near_context,
2043
+ occurrence,
2044
+ change_id,
2045
+ dry_run,
2046
+ )
2047
+ else:
2048
+ result_message = (
2049
+ "Error: Missing required parameters for IndentLines (file_path,"
2050
+ " start_pattern)"
2051
+ )
2052
+
2053
+ elif norm_tool_name == "deleteline":
2054
+ file_path = params.get("file_path")
2055
+ line_number = params.get("line_number")
2056
+ change_id = params.get("change_id")
2057
+ dry_run = params.get("dry_run", False)
2058
+
2059
+ if file_path is not None and line_number is not None:
2060
+ result_message = _execute_delete_line(
2061
+ self, file_path, line_number, change_id, dry_run
2062
+ )
2063
+ else:
2064
+ result_message = (
2065
+ "Error: Missing required parameters for DeleteLine (file_path,"
2066
+ " line_number)"
2067
+ )
2068
+
2069
+ elif norm_tool_name == "deletelines":
2070
+ file_path = params.get("file_path")
2071
+ start_line = params.get("start_line")
2072
+ end_line = params.get("end_line")
2073
+ change_id = params.get("change_id")
2074
+ dry_run = params.get("dry_run", False)
2075
+
2076
+ if file_path is not None and start_line is not None and end_line is not None:
2077
+ result_message = _execute_delete_lines(
2078
+ self, file_path, start_line, end_line, change_id, dry_run
2079
+ )
2080
+ else:
2081
+ result_message = (
2082
+ "Error: Missing required parameters for DeleteLines (file_path,"
2083
+ " start_line, end_line)"
2084
+ )
2085
+
2086
+ elif norm_tool_name == "undochange":
2087
+ change_id = params.get("change_id")
2088
+ file_path = params.get("file_path")
2089
+
2090
+ result_message = _execute_undo_change(self, change_id, file_path)
2091
+
2092
+ elif norm_tool_name == "listchanges":
2093
+ file_path = params.get("file_path")
2094
+ limit = params.get("limit", 10)
2095
+
2096
+ result_message = _execute_list_changes(self, file_path, limit)
2097
+
2098
+ elif norm_tool_name == "extractlines":
2099
+ source_file_path = params.get("source_file_path")
2100
+ target_file_path = params.get("target_file_path")
2101
+ start_pattern = params.get("start_pattern")
2102
+ end_pattern = params.get("end_pattern")
2103
+ line_count = params.get("line_count")
2104
+ near_context = params.get("near_context")
2105
+ occurrence = params.get("occurrence", 1)
2106
+ dry_run = params.get("dry_run", False)
2107
+
2108
+ if source_file_path and target_file_path and start_pattern:
2109
+ result_message = _execute_extract_lines(
2110
+ self,
2111
+ source_file_path,
2112
+ target_file_path,
2113
+ start_pattern,
2114
+ end_pattern,
2115
+ line_count,
2116
+ near_context,
2117
+ occurrence,
2118
+ dry_run,
2119
+ )
2120
+ else:
2121
+ result_message = (
2122
+ "Error: Missing required parameters for ExtractLines (source_file_path,"
2123
+ " target_file_path, start_pattern)"
2124
+ )
2125
+
2126
+ elif norm_tool_name == "shownumberedcontext":
2127
+ file_path = params.get("file_path")
2128
+ pattern = params.get("pattern")
2129
+ line_number = params.get("line_number")
2130
+ context_lines = params.get("context_lines", 3) # Default context
2131
+
2132
+ if file_path is not None and (pattern is not None or line_number is not None):
2133
+ result_message = execute_show_numbered_context(
2134
+ self, file_path, pattern, line_number, context_lines
2135
+ )
2136
+ else:
2137
+ result_message = (
2138
+ "Error: Missing required parameters for ViewNumberedContext (file_path"
2139
+ " and either pattern or line_number)"
2140
+ )
2141
+
2142
+ else:
2143
+ result_message = f"Error: Unknown tool name '{tool_name}'"
2144
+ if self.mcp_tools:
2145
+ for server_name, server_tools in self.mcp_tools:
2146
+ if any(
2147
+ t.get("function", {}).get("name") == tool_name for t in server_tools
2148
+ ):
2149
+ server = next(
2150
+ (s for s in self.mcp_servers if s.name == server_name), None
2151
+ )
2152
+ if server:
2153
+ result_message = self._execute_mcp_tool(
2154
+ server, tool_name, params
2155
+ )
2156
+ else:
2157
+ result_message = (
2158
+ f"Error: Could not find server instance for {server_name}"
2159
+ )
2160
+ break
2161
+
2162
+ except Exception as e:
2163
+ result_message = f"Error executing {tool_name}: {str(e)}"
2164
+ self.io.tool_error(
2165
+ f"Error during {tool_name} execution: {e}\n{traceback.format_exc()}"
2166
+ )
2167
+
2168
+ if result_message:
2169
+ result_messages.append(f"[Result ({tool_name}): {result_message}]")
2170
+
2171
+ # Note: We don't add the tool call string back to processed_content
2172
+
2173
+ # Update internal counter
2174
+ self.tool_call_count += call_count
2175
+
2176
+ # Return the content with tool calls removed
2177
+ modified_content = processed_content
2178
+
2179
+ # Update internal counter
2180
+ self.tool_call_count += call_count
2181
+
2182
+ return modified_content, result_messages, tool_calls_found, content_before_separator
2183
+
2184
+ def _apply_edits_from_response(self):
2185
+ """
2186
+ Parses and applies SEARCH/REPLACE edits found in self.partial_response_content.
2187
+ Returns a set of relative file paths that were successfully edited.
2188
+ """
2189
+ edited_files = set()
2190
+ try:
2191
+ # 1. Get edits (logic from EditBlockCoder.get_edits)
2192
+ # Use the current partial_response_content which contains the LLM response
2193
+ # including the edit blocks but excluding the tool calls.
2194
+ edits = list(
2195
+ find_original_update_blocks(
2196
+ self.partial_response_content,
2197
+ self.fence,
2198
+ self.get_inchat_relative_files(),
2199
+ )
2200
+ )
2201
+ # Separate shell commands from file edits
2202
+ self.shell_commands += [edit[1] for edit in edits if edit[0] is None]
2203
+ edits = [edit for edit in edits if edit[0] is not None]
2204
+
2205
+ # 2. Prepare edits (check permissions, commit dirty files)
2206
+ prepared_edits = []
2207
+ seen_paths = dict()
2208
+ self.need_commit_before_edits = set() # Reset before checking
2209
+
2210
+ for edit in edits:
2211
+ path = edit[0]
2212
+ if path in seen_paths:
2213
+ allowed = seen_paths[path]
2214
+ else:
2215
+ # Use the base Coder's permission check method
2216
+ allowed = self.allowed_to_edit(path)
2217
+ seen_paths[path] = allowed
2218
+ if allowed:
2219
+ prepared_edits.append(edit)
2220
+
2221
+ # Commit any dirty files identified by allowed_to_edit
2222
+ self.dirty_commit()
2223
+ self.need_commit_before_edits = set() # Clear after commit
2224
+
2225
+ # 3. Apply edits (logic adapted from EditBlockCoder.apply_edits)
2226
+ failed = []
2227
+ passed = []
2228
+ for edit in prepared_edits:
2229
+ path, original, updated = edit
2230
+ full_path = self.abs_root_path(path)
2231
+ new_content = None
2232
+
2233
+ if Path(full_path).exists():
2234
+ content = self.io.read_text(full_path)
2235
+ # Use the imported do_replace function
2236
+ new_content = do_replace(full_path, content, original, updated, self.fence)
2237
+
2238
+ # Simplified cross-file patching check from EditBlockCoder
2239
+ if not new_content and original.strip():
2240
+ for other_full_path in self.abs_fnames:
2241
+ if other_full_path == full_path:
2242
+ continue
2243
+ other_content = self.io.read_text(other_full_path)
2244
+ other_new_content = do_replace(
2245
+ other_full_path, other_content, original, updated, self.fence
2246
+ )
2247
+ if other_new_content:
2248
+ path = self.get_rel_fname(other_full_path)
2249
+ full_path = other_full_path
2250
+ new_content = other_new_content
2251
+ self.io.tool_warning(f"Applied edit intended for {edit[0]} to {path}")
2252
+ break
2253
+
2254
+ if new_content:
2255
+ if not self.dry_run:
2256
+ self.io.write_text(full_path, new_content)
2257
+ self.io.tool_output(f"Applied edit to {path}")
2258
+ else:
2259
+ self.io.tool_output(f"Did not apply edit to {path} (--dry-run)")
2260
+ passed.append((path, original, updated)) # Store path relative to root
2261
+ else:
2262
+ failed.append(edit)
2263
+
2264
+ if failed:
2265
+ # Handle failed edits (adapted from EditBlockCoder)
2266
+ blocks = "block" if len(failed) == 1 else "blocks"
2267
+ error_message = f"# {len(failed)} SEARCH/REPLACE {blocks} failed to match!\n"
2268
+ for edit in failed:
2269
+ path, original, updated = edit
2270
+ full_path = self.abs_root_path(path)
2271
+ content = self.io.read_text(full_path) # Read content again for context
2272
+
2273
+ error_message += f"""
2274
+ ## SearchReplaceNoExactMatch: This SEARCH block failed to exactly match lines in {path}
2275
+ <<<<<<< SEARCH
2276
+ {original}=======
2277
+ {updated}>>>>>>> REPLACE
2278
+
2279
+ """
2280
+ did_you_mean = find_similar_lines(original, content)
2281
+ if did_you_mean:
2282
+ error_message += f"""Did you mean to match some of these actual lines from {path}?
2283
+
2284
+ {self.fence[0]}
2285
+ {did_you_mean}
2286
+ {self.fence[1]}
2287
+
2288
+ """
2289
+ if updated in content and updated:
2290
+ error_message += f"""Are you sure you need this SEARCH/REPLACE block?
2291
+ The REPLACE lines are already in {path}!
2292
+
2293
+ """
2294
+ error_message += (
2295
+ "The SEARCH section must exactly match an existing block of lines including all"
2296
+ " white space, comments, indentation, docstrings, etc\n"
2297
+ )
2298
+ if passed:
2299
+ pblocks = "block" if len(passed) == 1 else "blocks"
2300
+ error_message += f"""
2301
+ # The other {len(passed)} SEARCH/REPLACE {pblocks} were applied successfully.
2302
+ Don't re-send them.
2303
+ Just reply with fixed versions of the {blocks} above that failed to match.
2304
+ """
2305
+ self.io.tool_error(error_message)
2306
+ # Set reflected_message to prompt LLM to fix the failed blocks
2307
+ self.reflected_message = error_message
2308
+
2309
+ edited_files = set(edit[0] for edit in passed) # Use relative paths stored in passed
2310
+
2311
+ # 4. Post-edit actions (commit, lint, test, shell commands)
2312
+ if edited_files:
2313
+ self.aider_edited_files.update(edited_files) # Track edited files
2314
+ self.auto_commit(edited_files)
2315
+ # We don't use saved_message here as we are not moving history back
2316
+
2317
+ if self.auto_lint:
2318
+ lint_errors = self.lint_edited(edited_files)
2319
+ self.auto_commit(edited_files, context="Ran the linter")
2320
+ if lint_errors and not self.reflected_message: # Reflect only if no edit errors
2321
+ ok = self.io.confirm_ask("Attempt to fix lint errors?")
2322
+ if ok:
2323
+ self.reflected_message = lint_errors
2324
+
2325
+ shared_output = self.run_shell_commands()
2326
+ if shared_output:
2327
+ # Add shell output as a new user message? Or just display?
2328
+ # Let's just display for now to avoid complex history manipulation
2329
+ self.io.tool_output("Shell command output:\n" + shared_output)
2330
+
2331
+ if self.auto_test and not self.reflected_message: # Reflect only if no prior errors
2332
+ test_errors = self.commands.cmd_test(self.test_cmd)
2333
+ if test_errors:
2334
+ ok = self.io.confirm_ask("Attempt to fix test errors?")
2335
+ if ok:
2336
+ self.reflected_message = test_errors
2337
+
2338
+ self.show_undo_hint()
2339
+
2340
+ except ValueError as err:
2341
+ # Handle parsing errors from find_original_update_blocks
2342
+ self.num_malformed_responses += 1
2343
+ error_message = err.args[0]
2344
+ self.io.tool_error("The LLM did not conform to the edit format.")
2345
+ self.io.tool_output(urls.edit_errors)
2346
+ self.io.tool_output()
2347
+ self.io.tool_output(str(error_message))
2348
+ self.reflected_message = str(error_message) # Reflect parsing errors
2349
+ except ANY_GIT_ERROR as err:
2350
+ self.io.tool_error(f"Git error during edit application: {str(err)}")
2351
+ self.reflected_message = f"Git error during edit application: {str(err)}"
2352
+ except Exception as err:
2353
+ self.io.tool_error("Exception while applying edits:")
2354
+ self.io.tool_error(str(err), strip=False)
2355
+ traceback.print_exc()
2356
+ self.reflected_message = f"Exception while applying edits: {str(err)}"
2357
+
2358
+ return edited_files
2359
+
2360
+ def _add_file_to_context(self, file_path, explicit=False):
2361
+ """
2362
+ Helper method to add a file to context as read-only.
2363
+
2364
+ Parameters:
2365
+ - file_path: Path to the file to add
2366
+ - explicit: Whether this was an explicit view command (vs. implicit through ViewFilesAtGlob/ViewFilesMatching)
2367
+ """
2368
+ # Check if file exists
2369
+ abs_path = self.abs_root_path(file_path)
2370
+ rel_path = self.get_rel_fname(abs_path)
2371
+
2372
+ if not os.path.isfile(abs_path):
2373
+ self.io.tool_output(f"⚠️ File '{file_path}' not found")
2374
+ return "File not found"
2375
+
2376
+ # Check if the file is already in context (either editable or read-only)
2377
+ if abs_path in self.abs_fnames:
2378
+ if explicit:
2379
+ self.io.tool_output(f"📎 File '{file_path}' already in context as editable")
2380
+ return "File already in context as editable"
2381
+ return "File already in context as editable"
2382
+
2383
+ if abs_path in self.abs_read_only_fnames:
2384
+ if explicit:
2385
+ self.io.tool_output(f"📎 File '{file_path}' already in context as read-only")
2386
+ return "File already in context as read-only"
2387
+ return "File already in context as read-only"
2388
+
2389
+ # Add file to context as read-only
2390
+ try:
2391
+ # Check for large file and apply context management if enabled
2392
+ content = self.io.read_text(abs_path)
2393
+ if content is None:
2394
+ return f"Error reading file: {file_path}"
2395
+
2396
+ # Check if file is very large and context management is enabled
2397
+ if self.context_management_enabled:
2398
+ file_tokens = self.main_model.token_count(content)
2399
+ if file_tokens > self.large_file_token_threshold:
2400
+ self.io.tool_output(
2401
+ f"⚠️ '{file_path}' is very large ({file_tokens} tokens). "
2402
+ "Use /context-management to toggle truncation off if needed."
2403
+ )
2404
+
2405
+ # Add to read-only files
2406
+ self.abs_read_only_fnames.add(abs_path)
2407
+
2408
+ # Track in exploration set
2409
+ self.files_added_in_exploration.add(rel_path)
2410
+
2411
+ # Inform user
2412
+ if explicit:
2413
+ self.io.tool_output(f"📎 Viewed '{file_path}' (added to context as read-only)")
2414
+ return "Viewed file (added to context as read-only)"
2415
+ else:
2416
+ # For implicit adds (from ViewFilesAtGlob/ViewFilesMatching), just return success
2417
+ return "Added file to context as read-only"
2418
+
2419
+ except Exception as e:
2420
+ self.io.tool_error(f"Error adding file '{file_path}' for viewing: {str(e)}")
2421
+ return f"Error adding file for viewing: {str(e)}"
2422
+
2423
+ def _process_file_mentions(self, content):
2424
+ """
2425
+ Process implicit file mentions in the content, adding files if they're not already in context.
2426
+
2427
+ This handles the case where the LLM mentions file paths without using explicit tool commands.
2428
+ """
2429
+ # Extract file mentions using the parent class's method
2430
+ mentioned_files = set(self.get_file_mentions(content, ignore_current=False))
2431
+ current_files = set(self.get_inchat_relative_files())
2432
+
2433
+ # Get new files to add (not already in context)
2434
+ mentioned_files - current_files
2435
+
2436
+ # In navigator mode, we *only* add files via explicit tool commands (`View`, `ViewFilesAtGlob`, etc.).
2437
+ # Do nothing here for implicit mentions.
2438
+ pass
2439
+
2440
+ def check_for_file_mentions(self, content):
2441
+ """
2442
+ Override parent's method to use our own file processing logic.
2443
+
2444
+ Override parent's method to disable implicit file mention handling in navigator mode.
2445
+ Files should only be added via explicit tool commands
2446
+ (`View`, `ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`).
2447
+ """
2448
+ # Do nothing - disable implicit file adds in navigator mode.
2449
+ pass
2450
+
2451
+ def preproc_user_input(self, inp):
2452
+ """
2453
+ Override parent's method to wrap user input in a context block.
2454
+ This clearly delineates user input from other sections in the context window.
2455
+ """
2456
+ # First apply the parent's preprocessing
2457
+ inp = super().preproc_user_input(inp)
2458
+
2459
+ # If we still have input after preprocessing, wrap it in a context block
2460
+ if inp and not inp.startswith('<context name="user_input">'):
2461
+ inp = f'<context name="user_input">\n{inp}\n</context>'
2462
+
2463
+ return inp
2464
+
2465
+ def get_directory_structure(self):
2466
+ """
2467
+ Generate a structured directory listing of the project file structure.
2468
+ Returns a formatted string representation of the directory tree.
2469
+ """
2470
+ if not self.use_enhanced_context:
2471
+ return None
2472
+
2473
+ try:
2474
+ # Start with the header
2475
+ result = '<context name="directoryStructure">\n'
2476
+ result += "## Project File Structure\n\n"
2477
+ result += (
2478
+ "Below is a snapshot of this project's file structure at the current time. It skips"
2479
+ " over .gitignore patterns.\n\n"
2480
+ )
2481
+
2482
+ # Get the root directory
2483
+ Path(self.root)
2484
+
2485
+ # Get all files in the repo (both tracked and untracked)
2486
+ if self.repo:
2487
+ # Get tracked files
2488
+ tracked_files = self.repo.get_tracked_files()
2489
+
2490
+ # Get untracked files (files present in the working directory but not in git)
2491
+ untracked_files = []
2492
+ try:
2493
+ # Run git status to get untracked files
2494
+ untracked_output = self.repo.repo.git.status("--porcelain")
2495
+ for line in untracked_output.splitlines():
2496
+ if line.startswith("??"):
2497
+ # Extract the filename (remove the '?? ' prefix)
2498
+ untracked_file = line[3:]
2499
+ if not self.repo.git_ignored_file(untracked_file):
2500
+ untracked_files.append(untracked_file)
2501
+ except Exception as e:
2502
+ self.io.tool_warning(f"Error getting untracked files: {str(e)}")
2503
+
2504
+ # Combine tracked and untracked files
2505
+ all_files = tracked_files + untracked_files
2506
+ else:
2507
+ # If no repo, get all files relative to root
2508
+ all_files = []
2509
+ for path in Path(self.root).rglob("*"):
2510
+ if path.is_file():
2511
+ all_files.append(str(path.relative_to(self.root)))
2512
+
2513
+ # Sort files to ensure deterministic output
2514
+ all_files = sorted(all_files)
2515
+
2516
+ # Filter out .aider files/dirs
2517
+ all_files = [
2518
+ f for f in all_files if not any(part.startswith(".aider") for part in f.split("/"))
2519
+ ]
2520
+
2521
+ # Build tree structure
2522
+ tree = {}
2523
+ for file in all_files:
2524
+ parts = file.split("/")
2525
+ current = tree
2526
+ for i, part in enumerate(parts):
2527
+ if i == len(parts) - 1: # Last part (file)
2528
+ if "." not in current:
2529
+ current["."] = []
2530
+ current["."].append(part)
2531
+ else: # Directory
2532
+ if part not in current:
2533
+ current[part] = {}
2534
+ current = current[part]
2535
+
2536
+ # Function to recursively print the tree
2537
+ def print_tree(node, prefix="- ", indent=" ", path=""):
2538
+ lines = []
2539
+ # First print all directories
2540
+ dirs = sorted([k for k in node.keys() if k != "."])
2541
+ for i, dir_name in enumerate(dirs):
2542
+ full_path = f"{path}/{dir_name}" if path else dir_name
2543
+ lines.append(f"{prefix}{full_path}/")
2544
+ sub_lines = print_tree(
2545
+ node[dir_name], prefix=prefix, indent=indent, path=full_path
2546
+ )
2547
+ for sub_line in sub_lines:
2548
+ lines.append(f"{indent}{sub_line}")
2549
+
2550
+ # Then print all files
2551
+ if "." in node:
2552
+ for file_name in sorted(node["."]):
2553
+ lines.append(
2554
+ f"{prefix}{path}/{file_name}" if path else f"{prefix}{file_name}"
2555
+ )
2556
+
2557
+ return lines
2558
+
2559
+ # Generate the tree starting from root
2560
+ tree_lines = print_tree(tree, prefix="- ")
2561
+ result += "\n".join(tree_lines)
2562
+ result += "\n</context>"
2563
+
2564
+ return result
2565
+ except Exception as e:
2566
+ self.io.tool_error(f"Error generating directory structure: {str(e)}")
2567
+ return None
2568
+
2569
+ def get_git_status(self):
2570
+ """
2571
+ Generate a git status context block for repository information.
2572
+ Returns a formatted string with git branch, status, and recent commits.
2573
+ """
2574
+ if not self.use_enhanced_context or not self.repo:
2575
+ return None
2576
+
2577
+ try:
2578
+ result = '<context name="gitStatus">\n'
2579
+ result += "## Git Repository Status\n\n"
2580
+ result += "This is a snapshot of the git status at the current time.\n"
2581
+
2582
+ # Get current branch
2583
+ try:
2584
+ current_branch = self.repo.repo.active_branch.name
2585
+ result += f"Current branch: {current_branch}\n\n"
2586
+ except Exception:
2587
+ result += "Current branch: (detached HEAD state)\n\n"
2588
+
2589
+ # Get main/master branch
2590
+ main_branch = None
2591
+ try:
2592
+ for branch in self.repo.repo.branches:
2593
+ if branch.name in ("main", "master"):
2594
+ main_branch = branch.name
2595
+ break
2596
+ if main_branch:
2597
+ result += f"Main branch (you will usually use this for PRs): {main_branch}\n\n"
2598
+ except Exception:
2599
+ pass
2600
+
2601
+ # Git status
2602
+ result += "Status:\n"
2603
+ try:
2604
+ # Get modified files
2605
+ status = self.repo.repo.git.status("--porcelain")
2606
+
2607
+ # Process and categorize the status output
2608
+ if status:
2609
+ status_lines = status.strip().split("\n")
2610
+
2611
+ # Group by status type for better organization
2612
+ staged_added = []
2613
+ staged_modified = []
2614
+ staged_deleted = []
2615
+ unstaged_modified = []
2616
+ unstaged_deleted = []
2617
+ untracked = []
2618
+
2619
+ for line in status_lines:
2620
+ if len(line) < 4: # Ensure the line has enough characters
2621
+ continue
2622
+
2623
+ status_code = line[:2]
2624
+ file_path = line[3:]
2625
+
2626
+ # Skip .aider files/dirs
2627
+ if any(part.startswith(".aider") for part in file_path.split("/")):
2628
+ continue
2629
+
2630
+ # Staged changes
2631
+ if status_code[0] == "A":
2632
+ staged_added.append(file_path)
2633
+ elif status_code[0] == "M":
2634
+ staged_modified.append(file_path)
2635
+ elif status_code[0] == "D":
2636
+ staged_deleted.append(file_path)
2637
+ # Unstaged changes
2638
+ if status_code[1] == "M":
2639
+ unstaged_modified.append(file_path)
2640
+ elif status_code[1] == "D":
2641
+ unstaged_deleted.append(file_path)
2642
+ # Untracked files
2643
+ if status_code == "??":
2644
+ untracked.append(file_path)
2645
+
2646
+ # Output in a nicely formatted manner
2647
+ if staged_added:
2648
+ for file in staged_added:
2649
+ result += f"A {file}\n"
2650
+ if staged_modified:
2651
+ for file in staged_modified:
2652
+ result += f"M {file}\n"
2653
+ if staged_deleted:
2654
+ for file in staged_deleted:
2655
+ result += f"D {file}\n"
2656
+ if unstaged_modified:
2657
+ for file in unstaged_modified:
2658
+ result += f" M {file}\n"
2659
+ if unstaged_deleted:
2660
+ for file in unstaged_deleted:
2661
+ result += f" D {file}\n"
2662
+ if untracked:
2663
+ for file in untracked:
2664
+ result += f"?? {file}\n"
2665
+ else:
2666
+ result += "Working tree clean\n"
2667
+ except Exception as e:
2668
+ result += f"Unable to get modified files: {str(e)}\n"
2669
+
2670
+ # Recent commits
2671
+ result += "\nRecent commits:\n"
2672
+ try:
2673
+ commits = list(self.repo.repo.iter_commits(max_count=5))
2674
+ for commit in commits:
2675
+ short_hash = commit.hexsha[:8]
2676
+ message = commit.message.strip().split("\n")[0] # First line only
2677
+ result += f"{short_hash} {message}\n"
2678
+ except Exception:
2679
+ result += "Unable to get recent commits\n"
2680
+
2681
+ result += "</context>"
2682
+ return result
2683
+ except Exception as e:
2684
+ self.io.tool_error(f"Error generating git status: {str(e)}")
2685
+ return None
2686
+
2687
+ def cmd_context_blocks(self, args=""):
2688
+ """
2689
+ Toggle enhanced context blocks feature.
2690
+ """
2691
+ self.use_enhanced_context = not self.use_enhanced_context
2692
+
2693
+ if self.use_enhanced_context:
2694
+ self.io.tool_output(
2695
+ "Enhanced context blocks are now ON - directory structure and git status will be"
2696
+ " included."
2697
+ )
2698
+ # Mark tokens as needing calculation, but don't calculate yet (lazy calculation)
2699
+ self.tokens_calculated = False
2700
+ self.context_blocks_cache = {}
2701
+ else:
2702
+ self.io.tool_output(
2703
+ "Enhanced context blocks are now OFF - directory structure and git status will not"
2704
+ " be included."
2705
+ )
2706
+ # Clear token counts and cache when disabled
2707
+ self.context_block_tokens = {}
2708
+ self.context_blocks_cache = {}
2709
+ self.tokens_calculated = False
2710
+
2711
+ return True