aider-ce 0.87.2.dev9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aider-ce might be problematic. Click here for more details.

Files changed (264) hide show
  1. aider/__init__.py +20 -0
  2. aider/__main__.py +4 -0
  3. aider/_version.py +34 -0
  4. aider/analytics.py +258 -0
  5. aider/args.py +1014 -0
  6. aider/args_formatter.py +228 -0
  7. aider/change_tracker.py +133 -0
  8. aider/coders/__init__.py +36 -0
  9. aider/coders/architect_coder.py +48 -0
  10. aider/coders/architect_prompts.py +40 -0
  11. aider/coders/ask_coder.py +9 -0
  12. aider/coders/ask_prompts.py +35 -0
  13. aider/coders/base_coder.py +3013 -0
  14. aider/coders/base_prompts.py +87 -0
  15. aider/coders/chat_chunks.py +64 -0
  16. aider/coders/context_coder.py +53 -0
  17. aider/coders/context_prompts.py +75 -0
  18. aider/coders/editblock_coder.py +657 -0
  19. aider/coders/editblock_fenced_coder.py +10 -0
  20. aider/coders/editblock_fenced_prompts.py +143 -0
  21. aider/coders/editblock_func_coder.py +141 -0
  22. aider/coders/editblock_func_prompts.py +27 -0
  23. aider/coders/editblock_prompts.py +177 -0
  24. aider/coders/editor_diff_fenced_coder.py +9 -0
  25. aider/coders/editor_diff_fenced_prompts.py +11 -0
  26. aider/coders/editor_editblock_coder.py +9 -0
  27. aider/coders/editor_editblock_prompts.py +21 -0
  28. aider/coders/editor_whole_coder.py +9 -0
  29. aider/coders/editor_whole_prompts.py +12 -0
  30. aider/coders/help_coder.py +16 -0
  31. aider/coders/help_prompts.py +46 -0
  32. aider/coders/navigator_coder.py +2711 -0
  33. aider/coders/navigator_legacy_prompts.py +338 -0
  34. aider/coders/navigator_prompts.py +530 -0
  35. aider/coders/patch_coder.py +706 -0
  36. aider/coders/patch_prompts.py +161 -0
  37. aider/coders/search_replace.py +757 -0
  38. aider/coders/shell.py +37 -0
  39. aider/coders/single_wholefile_func_coder.py +102 -0
  40. aider/coders/single_wholefile_func_prompts.py +27 -0
  41. aider/coders/udiff_coder.py +429 -0
  42. aider/coders/udiff_prompts.py +117 -0
  43. aider/coders/udiff_simple.py +14 -0
  44. aider/coders/udiff_simple_prompts.py +25 -0
  45. aider/coders/wholefile_coder.py +144 -0
  46. aider/coders/wholefile_func_coder.py +134 -0
  47. aider/coders/wholefile_func_prompts.py +27 -0
  48. aider/coders/wholefile_prompts.py +70 -0
  49. aider/commands.py +1946 -0
  50. aider/copypaste.py +72 -0
  51. aider/deprecated.py +126 -0
  52. aider/diffs.py +128 -0
  53. aider/dump.py +29 -0
  54. aider/editor.py +147 -0
  55. aider/exceptions.py +107 -0
  56. aider/format_settings.py +26 -0
  57. aider/gui.py +545 -0
  58. aider/help.py +163 -0
  59. aider/help_pats.py +19 -0
  60. aider/history.py +178 -0
  61. aider/io.py +1257 -0
  62. aider/linter.py +304 -0
  63. aider/llm.py +47 -0
  64. aider/main.py +1297 -0
  65. aider/mcp/__init__.py +94 -0
  66. aider/mcp/server.py +119 -0
  67. aider/mdstream.py +243 -0
  68. aider/models.py +1344 -0
  69. aider/onboarding.py +428 -0
  70. aider/openrouter.py +129 -0
  71. aider/prompts.py +56 -0
  72. aider/queries/tree-sitter-language-pack/README.md +7 -0
  73. aider/queries/tree-sitter-language-pack/arduino-tags.scm +5 -0
  74. aider/queries/tree-sitter-language-pack/c-tags.scm +9 -0
  75. aider/queries/tree-sitter-language-pack/chatito-tags.scm +16 -0
  76. aider/queries/tree-sitter-language-pack/clojure-tags.scm +7 -0
  77. aider/queries/tree-sitter-language-pack/commonlisp-tags.scm +122 -0
  78. aider/queries/tree-sitter-language-pack/cpp-tags.scm +15 -0
  79. aider/queries/tree-sitter-language-pack/csharp-tags.scm +26 -0
  80. aider/queries/tree-sitter-language-pack/d-tags.scm +26 -0
  81. aider/queries/tree-sitter-language-pack/dart-tags.scm +92 -0
  82. aider/queries/tree-sitter-language-pack/elisp-tags.scm +5 -0
  83. aider/queries/tree-sitter-language-pack/elixir-tags.scm +54 -0
  84. aider/queries/tree-sitter-language-pack/elm-tags.scm +19 -0
  85. aider/queries/tree-sitter-language-pack/gleam-tags.scm +41 -0
  86. aider/queries/tree-sitter-language-pack/go-tags.scm +42 -0
  87. aider/queries/tree-sitter-language-pack/java-tags.scm +20 -0
  88. aider/queries/tree-sitter-language-pack/javascript-tags.scm +88 -0
  89. aider/queries/tree-sitter-language-pack/lua-tags.scm +34 -0
  90. aider/queries/tree-sitter-language-pack/matlab-tags.scm +10 -0
  91. aider/queries/tree-sitter-language-pack/ocaml-tags.scm +115 -0
  92. aider/queries/tree-sitter-language-pack/ocaml_interface-tags.scm +98 -0
  93. aider/queries/tree-sitter-language-pack/pony-tags.scm +39 -0
  94. aider/queries/tree-sitter-language-pack/properties-tags.scm +5 -0
  95. aider/queries/tree-sitter-language-pack/python-tags.scm +14 -0
  96. aider/queries/tree-sitter-language-pack/r-tags.scm +21 -0
  97. aider/queries/tree-sitter-language-pack/racket-tags.scm +12 -0
  98. aider/queries/tree-sitter-language-pack/ruby-tags.scm +64 -0
  99. aider/queries/tree-sitter-language-pack/rust-tags.scm +60 -0
  100. aider/queries/tree-sitter-language-pack/solidity-tags.scm +43 -0
  101. aider/queries/tree-sitter-language-pack/swift-tags.scm +51 -0
  102. aider/queries/tree-sitter-language-pack/udev-tags.scm +20 -0
  103. aider/queries/tree-sitter-languages/README.md +23 -0
  104. aider/queries/tree-sitter-languages/c-tags.scm +9 -0
  105. aider/queries/tree-sitter-languages/c_sharp-tags.scm +46 -0
  106. aider/queries/tree-sitter-languages/cpp-tags.scm +15 -0
  107. aider/queries/tree-sitter-languages/dart-tags.scm +91 -0
  108. aider/queries/tree-sitter-languages/elisp-tags.scm +8 -0
  109. aider/queries/tree-sitter-languages/elixir-tags.scm +54 -0
  110. aider/queries/tree-sitter-languages/elm-tags.scm +19 -0
  111. aider/queries/tree-sitter-languages/go-tags.scm +30 -0
  112. aider/queries/tree-sitter-languages/hcl-tags.scm +77 -0
  113. aider/queries/tree-sitter-languages/java-tags.scm +20 -0
  114. aider/queries/tree-sitter-languages/javascript-tags.scm +88 -0
  115. aider/queries/tree-sitter-languages/kotlin-tags.scm +27 -0
  116. aider/queries/tree-sitter-languages/matlab-tags.scm +10 -0
  117. aider/queries/tree-sitter-languages/ocaml-tags.scm +115 -0
  118. aider/queries/tree-sitter-languages/ocaml_interface-tags.scm +98 -0
  119. aider/queries/tree-sitter-languages/php-tags.scm +26 -0
  120. aider/queries/tree-sitter-languages/python-tags.scm +12 -0
  121. aider/queries/tree-sitter-languages/ql-tags.scm +26 -0
  122. aider/queries/tree-sitter-languages/ruby-tags.scm +64 -0
  123. aider/queries/tree-sitter-languages/rust-tags.scm +60 -0
  124. aider/queries/tree-sitter-languages/scala-tags.scm +65 -0
  125. aider/queries/tree-sitter-languages/typescript-tags.scm +41 -0
  126. aider/reasoning_tags.py +82 -0
  127. aider/repo.py +621 -0
  128. aider/repomap.py +988 -0
  129. aider/report.py +200 -0
  130. aider/resources/__init__.py +3 -0
  131. aider/resources/model-metadata.json +699 -0
  132. aider/resources/model-settings.yml +2046 -0
  133. aider/run_cmd.py +132 -0
  134. aider/scrape.py +284 -0
  135. aider/sendchat.py +61 -0
  136. aider/special.py +203 -0
  137. aider/tools/__init__.py +26 -0
  138. aider/tools/command.py +58 -0
  139. aider/tools/command_interactive.py +53 -0
  140. aider/tools/delete_block.py +120 -0
  141. aider/tools/delete_line.py +112 -0
  142. aider/tools/delete_lines.py +137 -0
  143. aider/tools/extract_lines.py +276 -0
  144. aider/tools/grep.py +171 -0
  145. aider/tools/indent_lines.py +155 -0
  146. aider/tools/insert_block.py +211 -0
  147. aider/tools/list_changes.py +51 -0
  148. aider/tools/ls.py +49 -0
  149. aider/tools/make_editable.py +46 -0
  150. aider/tools/make_readonly.py +29 -0
  151. aider/tools/remove.py +48 -0
  152. aider/tools/replace_all.py +77 -0
  153. aider/tools/replace_line.py +125 -0
  154. aider/tools/replace_lines.py +160 -0
  155. aider/tools/replace_text.py +125 -0
  156. aider/tools/show_numbered_context.py +101 -0
  157. aider/tools/tool_utils.py +313 -0
  158. aider/tools/undo_change.py +60 -0
  159. aider/tools/view.py +13 -0
  160. aider/tools/view_files_at_glob.py +65 -0
  161. aider/tools/view_files_matching.py +103 -0
  162. aider/tools/view_files_with_symbol.py +121 -0
  163. aider/urls.py +17 -0
  164. aider/utils.py +454 -0
  165. aider/versioncheck.py +113 -0
  166. aider/voice.py +187 -0
  167. aider/waiting.py +221 -0
  168. aider/watch.py +318 -0
  169. aider/watch_prompts.py +12 -0
  170. aider/website/Gemfile +8 -0
  171. aider/website/_includes/blame.md +162 -0
  172. aider/website/_includes/get-started.md +22 -0
  173. aider/website/_includes/help-tip.md +5 -0
  174. aider/website/_includes/help.md +24 -0
  175. aider/website/_includes/install.md +5 -0
  176. aider/website/_includes/keys.md +4 -0
  177. aider/website/_includes/model-warnings.md +67 -0
  178. aider/website/_includes/multi-line.md +22 -0
  179. aider/website/_includes/python-m-aider.md +5 -0
  180. aider/website/_includes/recording.css +228 -0
  181. aider/website/_includes/recording.md +34 -0
  182. aider/website/_includes/replit-pipx.md +9 -0
  183. aider/website/_includes/works-best.md +1 -0
  184. aider/website/_sass/custom/custom.scss +103 -0
  185. aider/website/docs/config/adv-model-settings.md +2260 -0
  186. aider/website/docs/config/aider_conf.md +548 -0
  187. aider/website/docs/config/api-keys.md +90 -0
  188. aider/website/docs/config/dotenv.md +493 -0
  189. aider/website/docs/config/editor.md +127 -0
  190. aider/website/docs/config/mcp.md +95 -0
  191. aider/website/docs/config/model-aliases.md +104 -0
  192. aider/website/docs/config/options.md +890 -0
  193. aider/website/docs/config/reasoning.md +210 -0
  194. aider/website/docs/config.md +44 -0
  195. aider/website/docs/faq.md +384 -0
  196. aider/website/docs/git.md +76 -0
  197. aider/website/docs/index.md +47 -0
  198. aider/website/docs/install/codespaces.md +39 -0
  199. aider/website/docs/install/docker.md +57 -0
  200. aider/website/docs/install/optional.md +100 -0
  201. aider/website/docs/install/replit.md +8 -0
  202. aider/website/docs/install.md +115 -0
  203. aider/website/docs/languages.md +264 -0
  204. aider/website/docs/legal/contributor-agreement.md +111 -0
  205. aider/website/docs/legal/privacy.md +104 -0
  206. aider/website/docs/llms/anthropic.md +77 -0
  207. aider/website/docs/llms/azure.md +48 -0
  208. aider/website/docs/llms/bedrock.md +132 -0
  209. aider/website/docs/llms/cohere.md +34 -0
  210. aider/website/docs/llms/deepseek.md +32 -0
  211. aider/website/docs/llms/gemini.md +49 -0
  212. aider/website/docs/llms/github.md +111 -0
  213. aider/website/docs/llms/groq.md +36 -0
  214. aider/website/docs/llms/lm-studio.md +39 -0
  215. aider/website/docs/llms/ollama.md +75 -0
  216. aider/website/docs/llms/openai-compat.md +39 -0
  217. aider/website/docs/llms/openai.md +58 -0
  218. aider/website/docs/llms/openrouter.md +78 -0
  219. aider/website/docs/llms/other.md +111 -0
  220. aider/website/docs/llms/vertex.md +50 -0
  221. aider/website/docs/llms/warnings.md +10 -0
  222. aider/website/docs/llms/xai.md +53 -0
  223. aider/website/docs/llms.md +54 -0
  224. aider/website/docs/more/analytics.md +127 -0
  225. aider/website/docs/more/edit-formats.md +116 -0
  226. aider/website/docs/more/infinite-output.md +159 -0
  227. aider/website/docs/more-info.md +8 -0
  228. aider/website/docs/recordings/auto-accept-architect.md +31 -0
  229. aider/website/docs/recordings/dont-drop-original-read-files.md +35 -0
  230. aider/website/docs/recordings/index.md +21 -0
  231. aider/website/docs/recordings/model-accepts-settings.md +69 -0
  232. aider/website/docs/recordings/tree-sitter-language-pack.md +80 -0
  233. aider/website/docs/repomap.md +112 -0
  234. aider/website/docs/scripting.md +100 -0
  235. aider/website/docs/troubleshooting/aider-not-found.md +24 -0
  236. aider/website/docs/troubleshooting/edit-errors.md +76 -0
  237. aider/website/docs/troubleshooting/imports.md +62 -0
  238. aider/website/docs/troubleshooting/models-and-keys.md +54 -0
  239. aider/website/docs/troubleshooting/support.md +79 -0
  240. aider/website/docs/troubleshooting/token-limits.md +96 -0
  241. aider/website/docs/troubleshooting/warnings.md +12 -0
  242. aider/website/docs/troubleshooting.md +11 -0
  243. aider/website/docs/usage/browser.md +57 -0
  244. aider/website/docs/usage/caching.md +49 -0
  245. aider/website/docs/usage/commands.md +133 -0
  246. aider/website/docs/usage/conventions.md +119 -0
  247. aider/website/docs/usage/copypaste.md +121 -0
  248. aider/website/docs/usage/images-urls.md +48 -0
  249. aider/website/docs/usage/lint-test.md +118 -0
  250. aider/website/docs/usage/modes.md +211 -0
  251. aider/website/docs/usage/not-code.md +179 -0
  252. aider/website/docs/usage/notifications.md +87 -0
  253. aider/website/docs/usage/tips.md +79 -0
  254. aider/website/docs/usage/tutorials.md +30 -0
  255. aider/website/docs/usage/voice.md +121 -0
  256. aider/website/docs/usage/watch.md +294 -0
  257. aider/website/docs/usage.md +102 -0
  258. aider/website/share/index.md +101 -0
  259. aider_ce-0.87.2.dev9.dist-info/METADATA +543 -0
  260. aider_ce-0.87.2.dev9.dist-info/RECORD +264 -0
  261. aider_ce-0.87.2.dev9.dist-info/WHEEL +5 -0
  262. aider_ce-0.87.2.dev9.dist-info/entry_points.txt +3 -0
  263. aider_ce-0.87.2.dev9.dist-info/licenses/LICENSE.txt +202 -0
  264. aider_ce-0.87.2.dev9.dist-info/top_level.txt +1 -0
@@ -0,0 +1,3013 @@
1
+ #!/usr/bin/env python
2
+
3
+ import asyncio
4
+ import base64
5
+ import hashlib
6
+ import json
7
+ import locale
8
+ import math
9
+ import mimetypes
10
+ import os
11
+ import platform
12
+ import re
13
+ import sys
14
+ import threading
15
+ import time
16
+ import traceback
17
+ from collections import defaultdict
18
+ from datetime import datetime
19
+
20
+ # Optional dependency: used to convert locale codes (eg ``en_US``)
21
+ # into human-readable language names (eg ``English``).
22
+ try:
23
+ from babel import Locale # type: ignore
24
+ except ImportError: # Babel not installed – we will fall back to a small mapping
25
+ Locale = None
26
+ from json.decoder import JSONDecodeError
27
+ from pathlib import Path
28
+ from typing import List
29
+
30
+ from litellm import experimental_mcp_client
31
+ from rich.console import Console
32
+
33
+ from aider import __version__, models, prompts, urls, utils
34
+ from aider.analytics import Analytics
35
+ from aider.commands import Commands
36
+ from aider.exceptions import LiteLLMExceptions
37
+ from aider.history import ChatSummary
38
+ from aider.io import ConfirmGroup, InputOutput
39
+ from aider.linter import Linter
40
+ from aider.llm import litellm
41
+ from aider.mcp.server import LocalServer
42
+ from aider.models import RETRY_TIMEOUT
43
+ from aider.reasoning_tags import (
44
+ REASONING_TAG,
45
+ format_reasoning_content,
46
+ remove_reasoning_content,
47
+ replace_reasoning_tags,
48
+ )
49
+ from aider.repo import ANY_GIT_ERROR, GitRepo
50
+ from aider.repomap import RepoMap
51
+ from aider.run_cmd import run_cmd
52
+ from aider.utils import format_content, format_messages, format_tokens, is_image_file
53
+ from aider.waiting import WaitingSpinner
54
+
55
+ from ..dump import dump # noqa: F401
56
+ from .chat_chunks import ChatChunks
57
+
58
+
59
+ class UnknownEditFormat(ValueError):
60
+ def __init__(self, edit_format, valid_formats):
61
+ self.edit_format = edit_format
62
+ self.valid_formats = valid_formats
63
+ super().__init__(
64
+ f"Unknown edit format {edit_format}. Valid formats are: {', '.join(valid_formats)}"
65
+ )
66
+
67
+
68
+ class MissingAPIKeyError(ValueError):
69
+ pass
70
+
71
+
72
+ class FinishReasonLength(Exception):
73
+ pass
74
+
75
+
76
+ def wrap_fence(name):
77
+ return f"<{name}>", f"</{name}>"
78
+
79
+
80
+ all_fences = [
81
+ ("`" * 3, "`" * 3),
82
+ ("`" * 4, "`" * 4), # LLMs ignore and revert to triple-backtick, causing #2879
83
+ wrap_fence("source"),
84
+ wrap_fence("code"),
85
+ wrap_fence("pre"),
86
+ wrap_fence("codeblock"),
87
+ wrap_fence("sourcecode"),
88
+ ]
89
+
90
+
91
+ class Coder:
92
+ abs_fnames = None
93
+ abs_read_only_fnames = None
94
+ repo = None
95
+ last_aider_commit_hash = None
96
+ aider_edited_files = None
97
+ last_asked_for_commit_time = 0
98
+ repo_map = None
99
+ functions = None
100
+ num_exhausted_context_windows = 0
101
+ num_malformed_responses = 0
102
+ last_keyboard_interrupt = None
103
+ num_reflections = 0
104
+ max_reflections = 3
105
+ num_tool_calls = 0
106
+ max_tool_calls = 25
107
+ edit_format = None
108
+ yield_stream = False
109
+ temperature = None
110
+ auto_lint = True
111
+ auto_test = False
112
+ test_cmd = None
113
+ lint_outcome = None
114
+ test_outcome = None
115
+ multi_response_content = ""
116
+ partial_response_content = ""
117
+ partial_response_tool_call = []
118
+ commit_before_message = []
119
+ message_cost = 0.0
120
+ add_cache_headers = False
121
+ cache_warming_thread = None
122
+ num_cache_warming_pings = 0
123
+ suggest_shell_commands = True
124
+ detect_urls = True
125
+ ignore_mentions = None
126
+ chat_language = None
127
+ commit_language = None
128
+ file_watcher = None
129
+ mcp_servers = None
130
+ mcp_tools = None
131
+
132
+ # Context management settings (for all modes)
133
+ context_management_enabled = False # Disabled by default except for navigator mode
134
+ large_file_token_threshold = (
135
+ 25000 # Files larger than this will be truncated when context management is enabled
136
+ )
137
+
138
+ @classmethod
139
+ def create(
140
+ self,
141
+ main_model=None,
142
+ edit_format=None,
143
+ io=None,
144
+ from_coder=None,
145
+ summarize_from_coder=True,
146
+ **kwargs,
147
+ ):
148
+ import aider.coders as coders
149
+
150
+ if not main_model:
151
+ if from_coder:
152
+ main_model = from_coder.main_model
153
+ else:
154
+ main_model = models.Model(models.DEFAULT_MODEL_NAME)
155
+
156
+ if edit_format == "code":
157
+ edit_format = None
158
+ if edit_format is None:
159
+ if from_coder:
160
+ edit_format = from_coder.edit_format
161
+ else:
162
+ edit_format = main_model.edit_format
163
+
164
+ if not io and from_coder:
165
+ io = from_coder.io
166
+
167
+ if from_coder:
168
+ use_kwargs = dict(from_coder.original_kwargs) # copy orig kwargs
169
+
170
+ # If the edit format changes, we can't leave old ASSISTANT
171
+ # messages in the chat history. The old edit format will
172
+ # confused the new LLM. It may try and imitate it, disobeying
173
+ # the system prompt.
174
+ done_messages = from_coder.done_messages
175
+ if edit_format != from_coder.edit_format and done_messages and summarize_from_coder:
176
+ try:
177
+ done_messages = from_coder.summarizer.summarize_all(done_messages)
178
+ except ValueError:
179
+ # If summarization fails, keep the original messages and warn the user
180
+ io.tool_warning(
181
+ "Chat history summarization failed, continuing with full history"
182
+ )
183
+
184
+ # Bring along context from the old Coder
185
+ update = dict(
186
+ fnames=list(from_coder.abs_fnames),
187
+ # Copy read-only files
188
+ read_only_fnames=list(from_coder.abs_read_only_fnames),
189
+ done_messages=done_messages,
190
+ cur_messages=from_coder.cur_messages,
191
+ aider_commit_hashes=from_coder.aider_commit_hashes,
192
+ commands=from_coder.commands.clone(),
193
+ total_cost=from_coder.total_cost,
194
+ ignore_mentions=from_coder.ignore_mentions,
195
+ total_tokens_sent=from_coder.total_tokens_sent,
196
+ total_tokens_received=from_coder.total_tokens_received,
197
+ file_watcher=from_coder.file_watcher,
198
+ )
199
+ use_kwargs.update(update) # override to complete the switch
200
+ use_kwargs.update(kwargs) # override passed kwargs
201
+
202
+ kwargs = use_kwargs
203
+ from_coder.ok_to_warm_cache = False
204
+
205
+ for coder in coders.__all__:
206
+ if hasattr(coder, "edit_format") and coder.edit_format == edit_format:
207
+ res = coder(main_model, io, **kwargs)
208
+ res.original_kwargs = dict(kwargs)
209
+ return res
210
+
211
+ valid_formats = [
212
+ str(c.edit_format)
213
+ for c in coders.__all__
214
+ if hasattr(c, "edit_format") and c.edit_format is not None
215
+ ]
216
+ raise UnknownEditFormat(edit_format, valid_formats)
217
+
218
+ def clone(self, **kwargs):
219
+ new_coder = Coder.create(from_coder=self, **kwargs)
220
+ return new_coder
221
+
222
+ def get_announcements(self):
223
+ lines = []
224
+ lines.append(f"Aider v{__version__}")
225
+
226
+ # Model
227
+ main_model = self.main_model
228
+ weak_model = main_model.weak_model
229
+
230
+ if weak_model is not main_model:
231
+ prefix = "Main model"
232
+ else:
233
+ prefix = "Model"
234
+
235
+ output = f"{prefix}: {main_model.name} with {self.edit_format} edit format"
236
+
237
+ # Check for thinking token budget
238
+ thinking_tokens = main_model.get_thinking_tokens()
239
+ if thinking_tokens:
240
+ output += f", {thinking_tokens} think tokens"
241
+
242
+ # Check for reasoning effort
243
+ reasoning_effort = main_model.get_reasoning_effort()
244
+ if reasoning_effort:
245
+ output += f", reasoning {reasoning_effort}"
246
+
247
+ if self.add_cache_headers or main_model.caches_by_default:
248
+ output += ", prompt cache"
249
+ if main_model.info.get("supports_assistant_prefill"):
250
+ output += ", infinite output"
251
+
252
+ lines.append(output)
253
+
254
+ if self.edit_format == "architect":
255
+ output = (
256
+ f"Editor model: {main_model.editor_model.name} with"
257
+ f" {main_model.editor_edit_format} edit format"
258
+ )
259
+ lines.append(output)
260
+
261
+ if weak_model is not main_model:
262
+ output = f"Weak model: {weak_model.name}"
263
+ lines.append(output)
264
+
265
+ # Repo
266
+ if self.repo:
267
+ rel_repo_dir = self.repo.get_rel_repo_dir()
268
+ num_files = len(self.repo.get_tracked_files())
269
+
270
+ lines.append(f"Git repo: {rel_repo_dir} with {num_files:,} files")
271
+ if num_files > 1000:
272
+ lines.append(
273
+ "Warning: For large repos, consider using --subtree-only and .aiderignore"
274
+ )
275
+ lines.append(f"See: {urls.large_repos}")
276
+ else:
277
+ lines.append("Git repo: none")
278
+
279
+ # Repo-map
280
+ if self.repo_map:
281
+ map_tokens = self.repo_map.max_map_tokens
282
+ if map_tokens > 0:
283
+ refresh = self.repo_map.refresh
284
+ lines.append(f"Repo-map: using {map_tokens} tokens, {refresh} refresh")
285
+ max_map_tokens = self.main_model.get_repo_map_tokens() * 2
286
+ if map_tokens > max_map_tokens:
287
+ lines.append(
288
+ f"Warning: map-tokens > {max_map_tokens} is not recommended. Too much"
289
+ " irrelevant code can confuse LLMs."
290
+ )
291
+ else:
292
+ lines.append("Repo-map: disabled because map_tokens == 0")
293
+ else:
294
+ lines.append("Repo-map: disabled")
295
+
296
+ # Files
297
+ for fname in self.get_inchat_relative_files():
298
+ lines.append(f"Added {fname} to the chat.")
299
+
300
+ for fname in self.abs_read_only_fnames:
301
+ rel_fname = self.get_rel_fname(fname)
302
+ lines.append(f"Added {rel_fname} to the chat (read-only).")
303
+
304
+ if self.done_messages:
305
+ lines.append("Restored previous conversation history.")
306
+
307
+ if self.io.multiline_mode:
308
+ lines.append("Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text")
309
+
310
+ return lines
311
+
312
+ ok_to_warm_cache = False
313
+
314
+ def __init__(
315
+ self,
316
+ main_model,
317
+ io,
318
+ repo=None,
319
+ fnames=None,
320
+ add_gitignore_files=False,
321
+ read_only_fnames=None,
322
+ show_diffs=False,
323
+ auto_commits=True,
324
+ dirty_commits=True,
325
+ dry_run=False,
326
+ map_tokens=1024,
327
+ verbose=False,
328
+ stream=True,
329
+ use_git=True,
330
+ cur_messages=None,
331
+ done_messages=None,
332
+ restore_chat_history=False,
333
+ auto_lint=True,
334
+ auto_test=False,
335
+ lint_cmds=None,
336
+ test_cmd=None,
337
+ aider_commit_hashes=None,
338
+ map_mul_no_files=8,
339
+ map_max_line_length=100,
340
+ commands=None,
341
+ summarizer=None,
342
+ total_cost=0.0,
343
+ analytics=None,
344
+ map_refresh="auto",
345
+ cache_prompts=False,
346
+ num_cache_warming_pings=0,
347
+ suggest_shell_commands=True,
348
+ chat_language=None,
349
+ commit_language=None,
350
+ detect_urls=True,
351
+ ignore_mentions=None,
352
+ total_tokens_sent=0,
353
+ total_tokens_received=0,
354
+ file_watcher=None,
355
+ auto_copy_context=False,
356
+ auto_accept_architect=True,
357
+ mcp_servers=None,
358
+ enable_context_compaction=False,
359
+ context_compaction_max_tokens=None,
360
+ context_compaction_summary_tokens=8192,
361
+ map_cache_dir=".",
362
+ ):
363
+ # initialize from args.map_cache_dir
364
+ self.map_cache_dir = map_cache_dir
365
+
366
+ # Fill in a dummy Analytics if needed, but it is never .enable()'d
367
+ self.analytics = analytics if analytics is not None else Analytics()
368
+
369
+ self.event = self.analytics.event
370
+ self.chat_language = chat_language
371
+ self.commit_language = commit_language
372
+ self.commit_before_message = []
373
+ self.aider_commit_hashes = set()
374
+ self.rejected_urls = set()
375
+ self.abs_root_path_cache = {}
376
+
377
+ self.auto_copy_context = auto_copy_context
378
+ self.auto_accept_architect = auto_accept_architect
379
+
380
+ self.ignore_mentions = ignore_mentions
381
+ if not self.ignore_mentions:
382
+ self.ignore_mentions = set()
383
+
384
+ self.file_watcher = file_watcher
385
+ if self.file_watcher:
386
+ self.file_watcher.coder = self
387
+
388
+ self.suggest_shell_commands = suggest_shell_commands
389
+ self.detect_urls = detect_urls
390
+
391
+ self.num_cache_warming_pings = num_cache_warming_pings
392
+ self.mcp_servers = mcp_servers
393
+ self.enable_context_compaction = enable_context_compaction
394
+
395
+ self.context_compaction_max_tokens = context_compaction_max_tokens
396
+ self.context_compaction_summary_tokens = context_compaction_summary_tokens
397
+
398
+ if not fnames:
399
+ fnames = []
400
+
401
+ if io is None:
402
+ io = InputOutput()
403
+
404
+ if aider_commit_hashes:
405
+ self.aider_commit_hashes = aider_commit_hashes
406
+ else:
407
+ self.aider_commit_hashes = set()
408
+
409
+ self.chat_completion_call_hashes = []
410
+ self.chat_completion_response_hashes = []
411
+ self.need_commit_before_edits = set()
412
+
413
+ self.total_cost = total_cost
414
+ self.total_tokens_sent = total_tokens_sent
415
+ self.total_tokens_received = total_tokens_received
416
+ self.message_tokens_sent = 0
417
+ self.message_tokens_received = 0
418
+
419
+ self.verbose = verbose
420
+ self.abs_fnames = set()
421
+ self.abs_read_only_fnames = set()
422
+ self.add_gitignore_files = add_gitignore_files
423
+
424
+ if cur_messages:
425
+ self.cur_messages = cur_messages
426
+ else:
427
+ self.cur_messages = []
428
+
429
+ if done_messages:
430
+ self.done_messages = done_messages
431
+ else:
432
+ self.done_messages = []
433
+
434
+ self.io = io
435
+
436
+ self.shell_commands = []
437
+
438
+ if not auto_commits:
439
+ dirty_commits = False
440
+
441
+ self.auto_commits = auto_commits
442
+ self.dirty_commits = dirty_commits
443
+
444
+ self.dry_run = dry_run
445
+ self.pretty = self.io.pretty
446
+
447
+ self.main_model = main_model
448
+ # Set the reasoning tag name based on model settings or default
449
+ self.reasoning_tag_name = (
450
+ self.main_model.reasoning_tag if self.main_model.reasoning_tag else REASONING_TAG
451
+ )
452
+
453
+ self.stream = stream and main_model.streaming
454
+
455
+ if cache_prompts and self.main_model.cache_control:
456
+ self.add_cache_headers = True
457
+
458
+ self.show_diffs = show_diffs
459
+
460
+ self.commands = commands or Commands(self.io, self)
461
+ self.commands.coder = self
462
+
463
+ self.repo = repo
464
+ if use_git and self.repo is None:
465
+ try:
466
+ self.repo = GitRepo(
467
+ self.io,
468
+ fnames,
469
+ None,
470
+ models=main_model.commit_message_models(),
471
+ )
472
+ except FileNotFoundError:
473
+ pass
474
+
475
+ if self.repo:
476
+ self.root = self.repo.root
477
+
478
+ for fname in fnames:
479
+ fname = Path(fname)
480
+ if self.repo and self.repo.git_ignored_file(fname) and not self.add_gitignore_files:
481
+ self.io.tool_warning(f"Skipping {fname} that matches gitignore spec.")
482
+ continue
483
+
484
+ if self.repo and self.repo.ignored_file(fname):
485
+ self.io.tool_warning(f"Skipping {fname} that matches aiderignore spec.")
486
+ continue
487
+
488
+ if not fname.exists():
489
+ if utils.touch_file(fname):
490
+ self.io.tool_output(f"Creating empty file {fname}")
491
+ else:
492
+ self.io.tool_warning(f"Can not create {fname}, skipping.")
493
+ continue
494
+
495
+ if not fname.is_file():
496
+ self.io.tool_warning(f"Skipping {fname} that is not a normal file.")
497
+ continue
498
+
499
+ fname = str(fname.resolve())
500
+
501
+ self.abs_fnames.add(fname)
502
+ self.check_added_files()
503
+
504
+ if not self.repo:
505
+ self.root = utils.find_common_root(self.abs_fnames)
506
+
507
+ if read_only_fnames:
508
+ self.abs_read_only_fnames = set()
509
+ for fname in read_only_fnames:
510
+ abs_fname = self.abs_root_path(fname)
511
+ if os.path.exists(abs_fname):
512
+ self.abs_read_only_fnames.add(abs_fname)
513
+ else:
514
+ self.io.tool_warning(f"Error: Read-only file {fname} does not exist. Skipping.")
515
+
516
+ if map_tokens is None:
517
+ use_repo_map = main_model.use_repo_map
518
+ map_tokens = 1024
519
+ else:
520
+ use_repo_map = map_tokens > 0
521
+
522
+ max_inp_tokens = self.main_model.info.get("max_input_tokens") or 0
523
+
524
+ has_map_prompt = hasattr(self, "gpt_prompts") and self.gpt_prompts.repo_content_prefix
525
+
526
+ if use_repo_map and self.repo and has_map_prompt:
527
+ self.repo_map = RepoMap(
528
+ map_tokens,
529
+ self.map_cache_dir,
530
+ self.main_model,
531
+ io,
532
+ self.gpt_prompts.repo_content_prefix,
533
+ self.verbose,
534
+ max_inp_tokens,
535
+ map_mul_no_files=map_mul_no_files,
536
+ refresh=map_refresh,
537
+ max_code_line_length=map_max_line_length,
538
+ )
539
+
540
+ self.summarizer = summarizer or ChatSummary(
541
+ [self.main_model.weak_model, self.main_model],
542
+ self.main_model.max_chat_history_tokens,
543
+ )
544
+
545
+ self.summarizer_thread = None
546
+ self.summarized_done_messages = []
547
+ self.summarizing_messages = None
548
+
549
+ if not self.done_messages and restore_chat_history:
550
+ history_md = self.io.read_text(self.io.chat_history_file)
551
+ if history_md:
552
+ self.done_messages = utils.split_chat_history_markdown(history_md)
553
+ self.summarize_start()
554
+
555
+ # Linting and testing
556
+ self.linter = Linter(root=self.root, encoding=io.encoding)
557
+ self.auto_lint = auto_lint
558
+ self.setup_lint_cmds(lint_cmds)
559
+ self.lint_cmds = lint_cmds
560
+ self.auto_test = auto_test
561
+ self.test_cmd = test_cmd
562
+
563
+ # Instantiate MCP tools
564
+ if self.mcp_servers:
565
+ self.initialize_mcp_tools()
566
+ # validate the functions jsonschema
567
+ if self.functions:
568
+ from jsonschema import Draft7Validator
569
+
570
+ for function in self.functions:
571
+ Draft7Validator.check_schema(function)
572
+
573
+ if self.verbose:
574
+ self.io.tool_output("JSON Schema:")
575
+ self.io.tool_output(json.dumps(self.functions, indent=4))
576
+
577
+ def setup_lint_cmds(self, lint_cmds):
578
+ if not lint_cmds:
579
+ return
580
+ for lang, cmd in lint_cmds.items():
581
+ self.linter.set_linter(lang, cmd)
582
+
583
+ def show_announcements(self):
584
+ bold = True
585
+ for line in self.get_announcements():
586
+ self.io.tool_output(line, bold=bold)
587
+ bold = False
588
+
589
+ def add_rel_fname(self, rel_fname):
590
+ self.abs_fnames.add(self.abs_root_path(rel_fname))
591
+ self.check_added_files()
592
+
593
+ def drop_rel_fname(self, fname):
594
+ abs_fname = self.abs_root_path(fname)
595
+ if abs_fname in self.abs_fnames:
596
+ self.abs_fnames.remove(abs_fname)
597
+ return True
598
+
599
+ def abs_root_path(self, path):
600
+ key = path
601
+ if key in self.abs_root_path_cache:
602
+ return self.abs_root_path_cache[key]
603
+
604
+ res = Path(self.root) / path
605
+ res = utils.safe_abs_path(res)
606
+ self.abs_root_path_cache[key] = res
607
+ return res
608
+
609
+ fences = all_fences
610
+ fence = fences[0]
611
+
612
+ def show_pretty(self):
613
+ if not self.pretty:
614
+ return False
615
+
616
+ # only show pretty output if fences are the normal triple-backtick
617
+ if self.fence[0][0] != "`":
618
+ return False
619
+
620
+ return True
621
+
622
+ def _stop_waiting_spinner(self):
623
+ """Stop and clear the waiting spinner if it is running."""
624
+ spinner = getattr(self, "waiting_spinner", None)
625
+ if spinner:
626
+ try:
627
+ spinner.stop()
628
+ finally:
629
+ self.waiting_spinner = None
630
+
631
+ def get_abs_fnames_content(self):
632
+ for fname in list(self.abs_fnames):
633
+ content = self.io.read_text(fname)
634
+
635
+ if content is None:
636
+ relative_fname = self.get_rel_fname(fname)
637
+ self.io.tool_warning(f"Dropping {relative_fname} from the chat.")
638
+ self.abs_fnames.remove(fname)
639
+ else:
640
+ yield fname, content
641
+
642
+ def choose_fence(self):
643
+ all_content = ""
644
+ for _fname, content in self.get_abs_fnames_content():
645
+ all_content += content + "\n"
646
+ for _fname in self.abs_read_only_fnames:
647
+ content = self.io.read_text(_fname)
648
+ if content is not None:
649
+ all_content += content + "\n"
650
+
651
+ lines = all_content.splitlines()
652
+ good = False
653
+ for fence_open, fence_close in self.fences:
654
+ if any(line.startswith(fence_open) or line.startswith(fence_close) for line in lines):
655
+ continue
656
+ good = True
657
+ break
658
+
659
+ if good:
660
+ self.fence = (fence_open, fence_close)
661
+ else:
662
+ self.fence = self.fences[0]
663
+ self.io.tool_warning(
664
+ "Unable to find a fencing strategy! Falling back to:"
665
+ f" {self.fence[0]}...{self.fence[1]}"
666
+ )
667
+
668
+ return
669
+
670
+ def get_files_content(self, fnames=None):
671
+ if not fnames:
672
+ fnames = self.abs_fnames
673
+
674
+ prompt = ""
675
+ for fname, content in self.get_abs_fnames_content():
676
+ if not is_image_file(fname):
677
+ relative_fname = self.get_rel_fname(fname)
678
+ prompt += "\n"
679
+ prompt += relative_fname
680
+ prompt += f"\n{self.fence[0]}\n"
681
+
682
+ # Apply context management if enabled for large files
683
+ if self.context_management_enabled:
684
+ # Calculate tokens for this file
685
+ file_tokens = self.main_model.token_count(content)
686
+
687
+ if file_tokens > self.large_file_token_threshold:
688
+ # Truncate the file content
689
+ lines = content.splitlines()
690
+
691
+ # Keep the first and last parts of the file with a marker in between
692
+ keep_lines = (
693
+ self.large_file_token_threshold // 40
694
+ ) # Rough estimate of tokens per line
695
+ first_chunk = lines[: keep_lines // 2]
696
+ last_chunk = lines[-(keep_lines // 2) :]
697
+
698
+ truncated_content = "\n".join(first_chunk)
699
+ truncated_content += (
700
+ f"\n\n... [File truncated due to size ({file_tokens} tokens). Use"
701
+ " /context-management to toggle truncation off] ...\n\n"
702
+ )
703
+ truncated_content += "\n".join(last_chunk)
704
+
705
+ # Add message about truncation
706
+ self.io.tool_output(
707
+ f"⚠️ '{relative_fname}' is very large ({file_tokens} tokens). "
708
+ "Use /context-management to toggle truncation off if needed."
709
+ )
710
+
711
+ prompt += truncated_content
712
+ else:
713
+ prompt += content
714
+ else:
715
+ prompt += content
716
+
717
+ prompt += f"{self.fence[1]}\n"
718
+
719
+ return prompt
720
+
721
+ def get_read_only_files_content(self):
722
+ prompt = ""
723
+ for fname in self.abs_read_only_fnames:
724
+ content = self.io.read_text(fname)
725
+ if content is not None and not is_image_file(fname):
726
+ relative_fname = self.get_rel_fname(fname)
727
+ prompt += "\n"
728
+ prompt += relative_fname
729
+ prompt += f"\n{self.fence[0]}\n"
730
+
731
+ # Apply context management if enabled for large files (same as get_files_content)
732
+ if self.context_management_enabled:
733
+ # Calculate tokens for this file
734
+ file_tokens = self.main_model.token_count(content)
735
+
736
+ if file_tokens > self.large_file_token_threshold:
737
+ # Truncate the file content
738
+ lines = content.splitlines()
739
+
740
+ # Keep the first and last parts of the file with a marker in between
741
+ keep_lines = (
742
+ self.large_file_token_threshold // 40
743
+ ) # Rough estimate of tokens per line
744
+ first_chunk = lines[: keep_lines // 2]
745
+ last_chunk = lines[-(keep_lines // 2) :]
746
+
747
+ truncated_content = "\n".join(first_chunk)
748
+ truncated_content += (
749
+ f"\n\n... [File truncated due to size ({file_tokens} tokens). Use"
750
+ " /context-management to toggle truncation off] ...\n\n"
751
+ )
752
+ truncated_content += "\n".join(last_chunk)
753
+
754
+ # Add message about truncation
755
+ self.io.tool_output(
756
+ f"⚠️ '{relative_fname}' is very large ({file_tokens} tokens). "
757
+ "Use /context-management to toggle truncation off if needed."
758
+ )
759
+
760
+ prompt += truncated_content
761
+ else:
762
+ prompt += content
763
+ else:
764
+ prompt += content
765
+
766
+ prompt += f"{self.fence[1]}\n"
767
+ return prompt
768
+
769
+ def get_cur_message_text(self):
770
+ text = ""
771
+ for msg in self.cur_messages:
772
+ # For some models the content is None if the message
773
+ # contains tool calls.
774
+ content = msg["content"] or ""
775
+ text += content + "\n"
776
+ return text
777
+
778
+ def get_ident_mentions(self, text):
779
+ # Split the string on any character that is not alphanumeric
780
+ # \W+ matches one or more non-word characters (equivalent to [^a-zA-Z0-9_]+)
781
+ words = set(re.split(r"\W+", text))
782
+ return words
783
+
784
+ def get_ident_filename_matches(self, idents):
785
+ all_fnames = defaultdict(set)
786
+ for fname in self.get_all_relative_files():
787
+ # Skip empty paths or just '.'
788
+ if not fname or fname == ".":
789
+ continue
790
+
791
+ try:
792
+ # Handle dotfiles properly
793
+ path = Path(fname)
794
+ base = path.stem.lower() # Use stem instead of with_suffix("").name
795
+ if len(base) >= 5:
796
+ all_fnames[base].add(fname)
797
+ except ValueError:
798
+ # Skip paths that can't be processed
799
+ continue
800
+
801
+ matches = set()
802
+ for ident in idents:
803
+ if len(ident) < 5:
804
+ continue
805
+ matches.update(all_fnames[ident.lower()])
806
+
807
+ return matches
808
+
809
+ def get_repo_map(self, force_refresh=False):
810
+ if not self.repo_map:
811
+ return
812
+
813
+ cur_msg_text = self.get_cur_message_text()
814
+ mentioned_fnames = self.get_file_mentions(cur_msg_text)
815
+ mentioned_idents = self.get_ident_mentions(cur_msg_text)
816
+
817
+ mentioned_fnames.update(self.get_ident_filename_matches(mentioned_idents))
818
+
819
+ all_abs_files = set(self.get_all_abs_files())
820
+ repo_abs_read_only_fnames = set(self.abs_read_only_fnames) & all_abs_files
821
+ chat_files = set(self.abs_fnames) | repo_abs_read_only_fnames
822
+ other_files = all_abs_files - chat_files
823
+
824
+ repo_content = self.repo_map.get_repo_map(
825
+ chat_files,
826
+ other_files,
827
+ mentioned_fnames=mentioned_fnames,
828
+ mentioned_idents=mentioned_idents,
829
+ force_refresh=force_refresh,
830
+ )
831
+
832
+ # fall back to global repo map if files in chat are disjoint from rest of repo
833
+ if not repo_content:
834
+ repo_content = self.repo_map.get_repo_map(
835
+ set(),
836
+ all_abs_files,
837
+ mentioned_fnames=mentioned_fnames,
838
+ mentioned_idents=mentioned_idents,
839
+ )
840
+
841
+ # fall back to completely unhinted repo
842
+ if not repo_content:
843
+ repo_content = self.repo_map.get_repo_map(
844
+ set(),
845
+ all_abs_files,
846
+ )
847
+
848
+ return repo_content
849
+
850
+ def get_repo_messages(self):
851
+ repo_messages = []
852
+ repo_content = self.get_repo_map()
853
+ if repo_content:
854
+ repo_messages += [
855
+ dict(role="user", content=repo_content),
856
+ dict(
857
+ role="assistant",
858
+ content="Ok, I won't try and edit those files without asking first.",
859
+ ),
860
+ ]
861
+ return repo_messages
862
+
863
+ def get_readonly_files_messages(self):
864
+ readonly_messages = []
865
+
866
+ # Handle non-image files
867
+ read_only_content = self.get_read_only_files_content()
868
+ if read_only_content:
869
+ readonly_messages += [
870
+ dict(
871
+ role="user", content=self.gpt_prompts.read_only_files_prefix + read_only_content
872
+ ),
873
+ dict(
874
+ role="assistant",
875
+ content="Ok, I will use these files as references.",
876
+ ),
877
+ ]
878
+
879
+ # Handle image files
880
+ images_message = self.get_images_message(self.abs_read_only_fnames)
881
+ if images_message is not None:
882
+ readonly_messages += [
883
+ images_message,
884
+ dict(role="assistant", content="Ok, I will use these images as references."),
885
+ ]
886
+
887
+ return readonly_messages
888
+
889
+ def get_chat_files_messages(self):
890
+ chat_files_messages = []
891
+ if self.abs_fnames:
892
+ files_content = self.gpt_prompts.files_content_prefix
893
+ files_content += self.get_files_content()
894
+ files_reply = self.gpt_prompts.files_content_assistant_reply
895
+ elif self.get_repo_map() and self.gpt_prompts.files_no_full_files_with_repo_map:
896
+ files_content = self.gpt_prompts.files_no_full_files_with_repo_map
897
+ files_reply = self.gpt_prompts.files_no_full_files_with_repo_map_reply
898
+ else:
899
+ files_content = self.gpt_prompts.files_no_full_files
900
+ files_reply = "Ok."
901
+
902
+ if files_content:
903
+ chat_files_messages += [
904
+ dict(role="user", content=files_content),
905
+ dict(role="assistant", content=files_reply),
906
+ ]
907
+
908
+ images_message = self.get_images_message(self.abs_fnames)
909
+ if images_message is not None:
910
+ chat_files_messages += [
911
+ images_message,
912
+ dict(role="assistant", content="Ok."),
913
+ ]
914
+
915
+ return chat_files_messages
916
+
917
+ def get_images_message(self, fnames):
918
+ supports_images = self.main_model.info.get("supports_vision")
919
+ supports_pdfs = self.main_model.info.get("supports_pdf_input") or self.main_model.info.get(
920
+ "max_pdf_size_mb"
921
+ )
922
+
923
+ # https://github.com/BerriAI/litellm/pull/6928
924
+ supports_pdfs = supports_pdfs or "claude-3-5-sonnet-20241022" in self.main_model.name
925
+
926
+ if not (supports_images or supports_pdfs):
927
+ return None
928
+
929
+ image_messages = []
930
+ for fname in fnames:
931
+ if not is_image_file(fname):
932
+ continue
933
+
934
+ mime_type, _ = mimetypes.guess_type(fname)
935
+ if not mime_type:
936
+ continue
937
+
938
+ with open(fname, "rb") as image_file:
939
+ encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
940
+ image_url = f"data:{mime_type};base64,{encoded_string}"
941
+ rel_fname = self.get_rel_fname(fname)
942
+
943
+ if mime_type.startswith("image/") and supports_images:
944
+ image_messages += [
945
+ {"type": "text", "text": f"Image file: {rel_fname}"},
946
+ {"type": "image_url", "image_url": {"url": image_url, "detail": "high"}},
947
+ ]
948
+ elif mime_type == "application/pdf" and supports_pdfs:
949
+ image_messages += [
950
+ {"type": "text", "text": f"PDF file: {rel_fname}"},
951
+ {"type": "image_url", "image_url": image_url},
952
+ ]
953
+
954
+ if not image_messages:
955
+ return None
956
+
957
+ return {"role": "user", "content": image_messages}
958
+
959
+ def run_stream(self, user_message):
960
+ self.io.user_input(user_message)
961
+ self.init_before_message()
962
+ yield from self.send_message(user_message)
963
+
964
+ def init_before_message(self):
965
+ self.aider_edited_files = set()
966
+ self.reflected_message = None
967
+ self.num_reflections = 0
968
+ self.lint_outcome = None
969
+ self.test_outcome = None
970
+ self.shell_commands = []
971
+ self.message_cost = 0
972
+
973
+ if self.repo:
974
+ self.commit_before_message.append(self.repo.get_head_commit_sha())
975
+
976
+ def run(self, with_message=None, preproc=True):
977
+ try:
978
+ if with_message:
979
+ self.io.user_input(with_message)
980
+ self.run_one(with_message, preproc)
981
+ return self.partial_response_content
982
+ while True:
983
+ try:
984
+ if not self.io.placeholder:
985
+ self.copy_context()
986
+ user_message = self.get_input()
987
+ self.compact_context_if_needed()
988
+ self.run_one(user_message, preproc)
989
+ self.show_undo_hint()
990
+ except KeyboardInterrupt:
991
+ self.keyboard_interrupt()
992
+ except EOFError:
993
+ return
994
+
995
+ def copy_context(self):
996
+ if self.auto_copy_context:
997
+ self.commands.cmd_copy_context()
998
+
999
+ def get_input(self):
1000
+ inchat_files = self.get_inchat_relative_files()
1001
+ read_only_files = [self.get_rel_fname(fname) for fname in self.abs_read_only_fnames]
1002
+ all_files = sorted(set(inchat_files + read_only_files))
1003
+ edit_format = "" if self.edit_format == self.main_model.edit_format else self.edit_format
1004
+ return self.io.get_input(
1005
+ self.root,
1006
+ all_files,
1007
+ self.get_addable_relative_files(),
1008
+ self.commands,
1009
+ self.abs_read_only_fnames,
1010
+ edit_format=edit_format,
1011
+ )
1012
+
1013
+ def preproc_user_input(self, inp):
1014
+ if not inp:
1015
+ return
1016
+
1017
+ if self.commands.is_command(inp):
1018
+ return self.commands.run(inp)
1019
+
1020
+ self.check_for_file_mentions(inp)
1021
+ inp = self.check_for_urls(inp)
1022
+
1023
+ return inp
1024
+
1025
+ def run_one(self, user_message, preproc):
1026
+ self.init_before_message()
1027
+
1028
+ if preproc:
1029
+ message = self.preproc_user_input(user_message)
1030
+ else:
1031
+ message = user_message
1032
+
1033
+ while message:
1034
+ self.reflected_message = None
1035
+ list(self.send_message(message))
1036
+
1037
+ if not self.reflected_message:
1038
+ break
1039
+
1040
+ if self.num_reflections >= self.max_reflections:
1041
+ self.io.tool_warning(f"Only {self.max_reflections} reflections allowed, stopping.")
1042
+ return
1043
+
1044
+ self.num_reflections += 1
1045
+ message = self.reflected_message
1046
+
1047
+ def check_and_open_urls(self, exc, friendly_msg=None):
1048
+ """Check exception for URLs, offer to open in a browser, with user-friendly error msgs."""
1049
+ text = str(exc)
1050
+
1051
+ if friendly_msg:
1052
+ self.io.tool_warning(text)
1053
+ self.io.tool_error(f"{friendly_msg}")
1054
+ else:
1055
+ self.io.tool_error(text)
1056
+
1057
+ # Exclude double quotes from the matched URL characters
1058
+ url_pattern = re.compile(r'(https?://[^\s/$.?#].[^\s"]*)')
1059
+ # Use set to remove duplicates
1060
+ urls = list(set(url_pattern.findall(text)))
1061
+ for url in urls:
1062
+ url = url.rstrip(".',\"}") # Added } to the characters to strip
1063
+ self.io.offer_url(url)
1064
+ return urls
1065
+
1066
+ def check_for_urls(self, inp: str) -> List[str]:
1067
+ """Check input for URLs and offer to add them to the chat."""
1068
+ if not self.detect_urls:
1069
+ return inp
1070
+
1071
+ # Exclude double quotes from the matched URL characters
1072
+ url_pattern = re.compile(r'(https?://[^\s/$.?#].[^\s"]*[^\s,.])')
1073
+ # Use set to remove duplicates
1074
+ urls = list(set(url_pattern.findall(inp)))
1075
+ group = ConfirmGroup(urls)
1076
+ for url in urls:
1077
+ if url not in self.rejected_urls:
1078
+ url = url.rstrip(".',\"")
1079
+ if self.io.confirm_ask(
1080
+ "Add URL to the chat?", subject=url, group=group, allow_never=True
1081
+ ):
1082
+ inp += "\n\n"
1083
+ inp += self.commands.cmd_web(url, return_content=True)
1084
+ else:
1085
+ self.rejected_urls.add(url)
1086
+
1087
+ return inp
1088
+
1089
+ def keyboard_interrupt(self):
1090
+ # Ensure cursor is visible on exit
1091
+ Console().show_cursor(True)
1092
+
1093
+ now = time.time()
1094
+
1095
+ thresh = 2 # seconds
1096
+ if self.last_keyboard_interrupt and now - self.last_keyboard_interrupt < thresh:
1097
+ self.io.tool_warning("\n\n^C KeyboardInterrupt")
1098
+ self.event("exit", reason="Control-C")
1099
+ sys.exit()
1100
+
1101
+ self.io.tool_warning("\n\n^C again to exit")
1102
+
1103
+ self.last_keyboard_interrupt = now
1104
+
1105
+ def summarize_start(self):
1106
+ if not self.summarizer.check_max_tokens(self.done_messages):
1107
+ return
1108
+
1109
+ self.summarize_end()
1110
+
1111
+ if self.verbose:
1112
+ self.io.tool_output("Starting to summarize chat history.")
1113
+
1114
+ self.summarizer_thread = threading.Thread(target=self.summarize_worker)
1115
+ self.summarizer_thread.start()
1116
+
1117
+ def summarize_worker(self):
1118
+ self.summarizing_messages = list(self.done_messages)
1119
+ try:
1120
+ self.summarized_done_messages = self.summarizer.summarize(self.summarizing_messages)
1121
+ except ValueError as err:
1122
+ self.io.tool_warning(err.args[0])
1123
+ self.summarized_done_messages = self.summarizing_messages
1124
+
1125
+ if self.verbose:
1126
+ self.io.tool_output("Finished summarizing chat history.")
1127
+
1128
+ def summarize_end(self):
1129
+ if self.summarizer_thread is None:
1130
+ return
1131
+
1132
+ self.summarizer_thread.join()
1133
+ self.summarizer_thread = None
1134
+
1135
+ if self.summarizing_messages == self.done_messages:
1136
+ self.done_messages = self.summarized_done_messages
1137
+ self.summarizing_messages = None
1138
+ self.summarized_done_messages = []
1139
+
1140
+ def compact_context_if_needed(self):
1141
+ if not self.enable_context_compaction:
1142
+ self.summarize_start()
1143
+ return
1144
+
1145
+ if not self.summarizer.check_max_tokens(
1146
+ self.done_messages, max_tokens=self.context_compaction_max_tokens
1147
+ ):
1148
+ return
1149
+
1150
+ self.io.tool_output("Compacting chat history to make room for new messages...")
1151
+
1152
+ try:
1153
+ # Create a summary of the conversation
1154
+ summary_text = self.summarizer.summarize_all_as_text(
1155
+ self.done_messages,
1156
+ self.gpt_prompts.compaction_prompt,
1157
+ self.context_compaction_summary_tokens,
1158
+ )
1159
+ if not summary_text:
1160
+ raise ValueError("Summarization returned an empty result.")
1161
+
1162
+ # Replace old messages with the summary
1163
+ self.done_messages = [
1164
+ {
1165
+ "role": "user",
1166
+ "content": summary_text,
1167
+ },
1168
+ {
1169
+ "role": "assistant",
1170
+ "content": (
1171
+ "Ok, I will use this summary as the context for our conversation going"
1172
+ " forward."
1173
+ ),
1174
+ },
1175
+ ]
1176
+ self.io.tool_output("...chat history compacted.")
1177
+ except Exception as e:
1178
+ self.io.tool_warning(f"Context compaction failed: {e}")
1179
+ self.io.tool_warning("Proceeding with full history for now.")
1180
+ self.summarize_start()
1181
+ return
1182
+
1183
+ def move_back_cur_messages(self, message):
1184
+ self.done_messages += self.cur_messages
1185
+
1186
+ # TODO check for impact on image messages
1187
+ if message:
1188
+ self.done_messages += [
1189
+ dict(role="user", content=message),
1190
+ dict(role="assistant", content="Ok."),
1191
+ ]
1192
+ self.cur_messages = []
1193
+
1194
+ def normalize_language(self, lang_code):
1195
+ """
1196
+ Convert a locale code such as ``en_US`` or ``fr`` into a readable
1197
+ language name (e.g. ``English`` or ``French``). If Babel is
1198
+ available it is used for reliable conversion; otherwise a small
1199
+ built-in fallback map handles common languages.
1200
+ """
1201
+ if not lang_code:
1202
+ return None
1203
+
1204
+ if lang_code.upper() in ("C", "POSIX"):
1205
+ return None
1206
+
1207
+ # Probably already a language name
1208
+ if (
1209
+ len(lang_code) > 3
1210
+ and "_" not in lang_code
1211
+ and "-" not in lang_code
1212
+ and lang_code[0].isupper()
1213
+ ):
1214
+ return lang_code
1215
+
1216
+ # Preferred: Babel
1217
+ if Locale is not None:
1218
+ try:
1219
+ loc = Locale.parse(lang_code.replace("-", "_"))
1220
+ return loc.get_display_name("en").capitalize()
1221
+ except Exception:
1222
+ pass # Fall back to manual mapping
1223
+
1224
+ # Simple fallback for common languages
1225
+ fallback = {
1226
+ "en": "English",
1227
+ "fr": "French",
1228
+ "es": "Spanish",
1229
+ "de": "German",
1230
+ "it": "Italian",
1231
+ "pt": "Portuguese",
1232
+ "zh": "Chinese",
1233
+ "ja": "Japanese",
1234
+ "ko": "Korean",
1235
+ "ru": "Russian",
1236
+ }
1237
+ primary_lang_code = lang_code.replace("-", "_").split("_")[0].lower()
1238
+ return fallback.get(primary_lang_code, lang_code)
1239
+
1240
+ def get_user_language(self):
1241
+ """
1242
+ Detect the user's language preference and return a human-readable
1243
+ language name such as ``English``. Detection order:
1244
+
1245
+ 1. ``self.chat_language`` if explicitly set
1246
+ 2. ``locale.getlocale()``
1247
+ 3. ``LANG`` / ``LANGUAGE`` / ``LC_ALL`` / ``LC_MESSAGES`` environment variables
1248
+ """
1249
+
1250
+ # Explicit override
1251
+ if self.chat_language:
1252
+ return self.normalize_language(self.chat_language)
1253
+
1254
+ # System locale
1255
+ try:
1256
+ lang = locale.getlocale()[0]
1257
+ if lang:
1258
+ lang = self.normalize_language(lang)
1259
+ if lang:
1260
+ return lang
1261
+ except Exception:
1262
+ pass
1263
+
1264
+ # Environment variables
1265
+ for env_var in ("LANG", "LANGUAGE", "LC_ALL", "LC_MESSAGES"):
1266
+ lang = os.environ.get(env_var)
1267
+ if lang:
1268
+ lang = lang.split(".")[0] # Strip encoding if present
1269
+ return self.normalize_language(lang)
1270
+
1271
+ return None
1272
+
1273
+ def get_platform_info(self):
1274
+ platform_text = ""
1275
+ try:
1276
+ platform_text = f"- Platform: {platform.platform()}\n"
1277
+ except KeyError:
1278
+ # Skip platform info if it can't be retrieved
1279
+ platform_text = "- Platform information unavailable\n"
1280
+
1281
+ shell_var = "COMSPEC" if os.name == "nt" else "SHELL"
1282
+ shell_val = os.getenv(shell_var)
1283
+ platform_text += f"- Shell: {shell_var}={shell_val}\n"
1284
+
1285
+ user_lang = self.get_user_language()
1286
+ if user_lang:
1287
+ platform_text += f"- Language: {user_lang}\n"
1288
+
1289
+ dt = datetime.now().astimezone().strftime("%Y-%m-%d")
1290
+ platform_text += f"- Current date: {dt}\n"
1291
+
1292
+ if self.repo:
1293
+ platform_text += "- The user is operating inside a git repository\n"
1294
+
1295
+ if self.lint_cmds:
1296
+ if self.auto_lint:
1297
+ platform_text += (
1298
+ "- The user's pre-commit runs these lint commands, don't suggest running"
1299
+ " them:\n"
1300
+ )
1301
+ else:
1302
+ platform_text += "- The user prefers these lint commands:\n"
1303
+ for lang, cmd in self.lint_cmds.items():
1304
+ if lang is None:
1305
+ platform_text += f" - {cmd}\n"
1306
+ else:
1307
+ platform_text += f" - {lang}: {cmd}\n"
1308
+
1309
+ if self.test_cmd:
1310
+ if self.auto_test:
1311
+ platform_text += (
1312
+ "- The user's pre-commit runs this test command, don't suggest running them: "
1313
+ )
1314
+ else:
1315
+ platform_text += "- The user prefers this test command: "
1316
+ platform_text += self.test_cmd + "\n"
1317
+
1318
+ return platform_text
1319
+
1320
+ def fmt_system_prompt(self, prompt):
1321
+ final_reminders = []
1322
+
1323
+ lazy_prompt = ""
1324
+ if self.main_model.lazy:
1325
+ lazy_prompt = self.gpt_prompts.lazy_prompt
1326
+ final_reminders.append(lazy_prompt)
1327
+
1328
+ overeager_prompt = ""
1329
+ if self.main_model.overeager:
1330
+ overeager_prompt = self.gpt_prompts.overeager_prompt
1331
+ final_reminders.append(overeager_prompt)
1332
+
1333
+ user_lang = self.get_user_language()
1334
+ if user_lang:
1335
+ final_reminders.append(f"Reply in {user_lang}.\n")
1336
+
1337
+ platform_text = self.get_platform_info()
1338
+
1339
+ if self.suggest_shell_commands:
1340
+ shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(platform=platform_text)
1341
+ shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(platform=platform_text)
1342
+ rename_with_shell = self.gpt_prompts.rename_with_shell
1343
+ else:
1344
+ shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(platform=platform_text)
1345
+ shell_cmd_reminder = self.gpt_prompts.no_shell_cmd_reminder.format(
1346
+ platform=platform_text
1347
+ )
1348
+ rename_with_shell = ""
1349
+
1350
+ if user_lang: # user_lang is the result of self.get_user_language()
1351
+ language = user_lang
1352
+ else:
1353
+ # Default if no specific lang detected
1354
+ language = "the same language they are using"
1355
+
1356
+ if self.fence[0] == "`" * 4:
1357
+ quad_backtick_reminder = (
1358
+ "\nIMPORTANT: Use *quadruple* backticks ```` as fences, not triple backticks!\n"
1359
+ )
1360
+ else:
1361
+ quad_backtick_reminder = ""
1362
+
1363
+ if self.mcp_tools and len(self.mcp_tools) > 0:
1364
+ final_reminders.append(self.gpt_prompts.tool_prompt)
1365
+
1366
+ final_reminders = "\n\n".join(final_reminders)
1367
+
1368
+ prompt = prompt.format(
1369
+ fence=self.fence,
1370
+ quad_backtick_reminder=quad_backtick_reminder,
1371
+ final_reminders=final_reminders,
1372
+ platform=platform_text,
1373
+ shell_cmd_prompt=shell_cmd_prompt,
1374
+ rename_with_shell=rename_with_shell,
1375
+ shell_cmd_reminder=shell_cmd_reminder,
1376
+ go_ahead_tip=self.gpt_prompts.go_ahead_tip,
1377
+ language=language,
1378
+ lazy_prompt=lazy_prompt,
1379
+ overeager_prompt=overeager_prompt,
1380
+ )
1381
+
1382
+ return prompt
1383
+
1384
+ def format_chat_chunks(self):
1385
+ self.choose_fence()
1386
+ main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)
1387
+ if self.main_model.system_prompt_prefix:
1388
+ main_sys = self.main_model.system_prompt_prefix + "\n" + main_sys
1389
+
1390
+ example_messages = []
1391
+ if self.main_model.examples_as_sys_msg:
1392
+ if self.gpt_prompts.example_messages:
1393
+ main_sys += "\n# Example conversations:\n\n"
1394
+ for msg in self.gpt_prompts.example_messages:
1395
+ role = msg["role"]
1396
+ content = self.fmt_system_prompt(msg["content"])
1397
+ main_sys += f"## {role.upper()}: {content}\n\n"
1398
+ main_sys = main_sys.strip()
1399
+ else:
1400
+ for msg in self.gpt_prompts.example_messages:
1401
+ example_messages.append(
1402
+ dict(
1403
+ role=msg["role"],
1404
+ content=self.fmt_system_prompt(msg["content"]),
1405
+ )
1406
+ )
1407
+ if self.gpt_prompts.example_messages:
1408
+ example_messages += [
1409
+ dict(
1410
+ role="user",
1411
+ content=(
1412
+ "I switched to a new code base. Please don't consider the above files"
1413
+ " or try to edit them any longer."
1414
+ ),
1415
+ ),
1416
+ dict(role="assistant", content="Ok."),
1417
+ ]
1418
+
1419
+ if self.gpt_prompts.system_reminder:
1420
+ main_sys += "\n" + self.fmt_system_prompt(self.gpt_prompts.system_reminder)
1421
+
1422
+ chunks = ChatChunks()
1423
+
1424
+ if self.main_model.use_system_prompt:
1425
+ chunks.system = [
1426
+ dict(role="system", content=main_sys),
1427
+ ]
1428
+ else:
1429
+ chunks.system = [
1430
+ dict(role="user", content=main_sys),
1431
+ dict(role="assistant", content="Ok."),
1432
+ ]
1433
+
1434
+ chunks.examples = example_messages
1435
+
1436
+ self.summarize_end()
1437
+ chunks.done = self.done_messages
1438
+
1439
+ chunks.repo = self.get_repo_messages()
1440
+ chunks.readonly_files = self.get_readonly_files_messages()
1441
+ chunks.chat_files = self.get_chat_files_messages()
1442
+
1443
+ if self.gpt_prompts.system_reminder:
1444
+ reminder_message = [
1445
+ dict(
1446
+ role="system", content=self.fmt_system_prompt(self.gpt_prompts.system_reminder)
1447
+ ),
1448
+ ]
1449
+ else:
1450
+ reminder_message = []
1451
+
1452
+ chunks.cur = list(self.cur_messages)
1453
+ chunks.reminder = []
1454
+
1455
+ # TODO review impact of token count on image messages
1456
+ messages_tokens = self.main_model.token_count(chunks.all_messages())
1457
+ reminder_tokens = self.main_model.token_count(reminder_message)
1458
+ cur_tokens = self.main_model.token_count(chunks.cur)
1459
+
1460
+ if None not in (messages_tokens, reminder_tokens, cur_tokens):
1461
+ total_tokens = messages_tokens + reminder_tokens + cur_tokens
1462
+ else:
1463
+ # add the reminder anyway
1464
+ total_tokens = 0
1465
+
1466
+ if chunks.cur:
1467
+ final = chunks.cur[-1]
1468
+ else:
1469
+ final = None
1470
+
1471
+ max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
1472
+ # Add the reminder prompt if we still have room to include it.
1473
+ if (
1474
+ not max_input_tokens
1475
+ or total_tokens < max_input_tokens
1476
+ and self.gpt_prompts.system_reminder
1477
+ ):
1478
+ if self.main_model.reminder == "sys":
1479
+ chunks.reminder = reminder_message
1480
+ elif self.main_model.reminder == "user" and final and final["role"] == "user":
1481
+ # stuff it into the user message
1482
+ new_content = (
1483
+ final["content"]
1484
+ + "\n\n"
1485
+ + self.fmt_system_prompt(self.gpt_prompts.system_reminder)
1486
+ )
1487
+ chunks.cur[-1] = dict(role=final["role"], content=new_content)
1488
+
1489
+ return chunks
1490
+
1491
+ def format_messages(self):
1492
+ chunks = self.format_chat_chunks()
1493
+ if self.add_cache_headers:
1494
+ chunks.add_cache_control_headers()
1495
+
1496
+ return chunks
1497
+
1498
+ def warm_cache(self, chunks):
1499
+ if not self.add_cache_headers:
1500
+ return
1501
+ if not self.num_cache_warming_pings:
1502
+ return
1503
+ if not self.ok_to_warm_cache:
1504
+ return
1505
+
1506
+ delay = 5 * 60 - 5
1507
+ delay = float(os.environ.get("AIDER_CACHE_KEEPALIVE_DELAY", delay))
1508
+ self.next_cache_warm = time.time() + delay
1509
+ self.warming_pings_left = self.num_cache_warming_pings
1510
+ self.cache_warming_chunks = chunks
1511
+
1512
+ if self.cache_warming_thread:
1513
+ return
1514
+
1515
+ def warm_cache_worker():
1516
+ while self.ok_to_warm_cache:
1517
+ time.sleep(1)
1518
+ if self.warming_pings_left <= 0:
1519
+ continue
1520
+ now = time.time()
1521
+ if now < self.next_cache_warm:
1522
+ continue
1523
+
1524
+ self.warming_pings_left -= 1
1525
+ self.next_cache_warm = time.time() + delay
1526
+
1527
+ kwargs = dict(self.main_model.extra_params) or dict()
1528
+ kwargs["max_tokens"] = 1
1529
+
1530
+ try:
1531
+ completion = litellm.completion(
1532
+ model=self.main_model.name,
1533
+ messages=self.cache_warming_chunks.cacheable_messages(),
1534
+ stream=False,
1535
+ **kwargs,
1536
+ )
1537
+ except Exception as err:
1538
+ self.io.tool_warning(f"Cache warming error: {str(err)}")
1539
+ continue
1540
+
1541
+ cache_hit_tokens = getattr(
1542
+ completion.usage, "prompt_cache_hit_tokens", 0
1543
+ ) or getattr(completion.usage, "cache_read_input_tokens", 0)
1544
+
1545
+ if self.verbose:
1546
+ self.io.tool_output(f"Warmed {format_tokens(cache_hit_tokens)} cached tokens.")
1547
+
1548
+ self.cache_warming_thread = threading.Timer(0, warm_cache_worker)
1549
+ self.cache_warming_thread.daemon = True
1550
+ self.cache_warming_thread.start()
1551
+
1552
+ return chunks
1553
+
1554
+ def check_tokens(self, messages):
1555
+ """Check if the messages will fit within the model's token limits."""
1556
+ input_tokens = self.main_model.token_count(messages)
1557
+ max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
1558
+
1559
+ if max_input_tokens and input_tokens >= max_input_tokens:
1560
+ self.io.tool_error(
1561
+ f"Your estimated chat context of {input_tokens:,} tokens exceeds the"
1562
+ f" {max_input_tokens:,} token limit for {self.main_model.name}!"
1563
+ )
1564
+ self.io.tool_output("To reduce the chat context:")
1565
+ self.io.tool_output("- Use /drop to remove unneeded files from the chat")
1566
+ self.io.tool_output("- Use /clear to clear the chat history")
1567
+ self.io.tool_output("- Break your code into smaller files")
1568
+ self.io.tool_output(
1569
+ "It's probably safe to try and send the request, most providers won't charge if"
1570
+ " the context limit is exceeded."
1571
+ )
1572
+
1573
+ if not self.io.confirm_ask("Try to proceed anyway?"):
1574
+ return False
1575
+ return True
1576
+
1577
+ def send_message(self, inp):
1578
+ self.event("message_send_starting")
1579
+
1580
+ # Notify IO that LLM processing is starting
1581
+ self.io.llm_started()
1582
+
1583
+ self.cur_messages += [
1584
+ dict(role="user", content=inp),
1585
+ ]
1586
+
1587
+ chunks = self.format_messages()
1588
+ messages = chunks.all_messages()
1589
+
1590
+ if not self.check_tokens(messages):
1591
+ return
1592
+ self.warm_cache(chunks)
1593
+
1594
+ if self.verbose:
1595
+ utils.show_messages(messages, functions=self.functions)
1596
+
1597
+ self.multi_response_content = ""
1598
+ if self.show_pretty():
1599
+ self.waiting_spinner = WaitingSpinner("Waiting for " + self.main_model.name)
1600
+ self.waiting_spinner.start()
1601
+ if self.stream:
1602
+ self.mdstream = self.io.get_assistant_mdstream()
1603
+ else:
1604
+ self.mdstream = None
1605
+ else:
1606
+ self.mdstream = None
1607
+
1608
+ retry_delay = 0.125
1609
+
1610
+ litellm_ex = LiteLLMExceptions()
1611
+
1612
+ self.usage_report = None
1613
+ exhausted = False
1614
+ interrupted = False
1615
+ try:
1616
+ while True:
1617
+ try:
1618
+ yield from self.send(messages, functions=self.functions)
1619
+ break
1620
+ except litellm_ex.exceptions_tuple() as err:
1621
+ ex_info = litellm_ex.get_ex_info(err)
1622
+
1623
+ if ex_info.name == "ContextWindowExceededError":
1624
+ exhausted = True
1625
+ break
1626
+
1627
+ should_retry = ex_info.retry
1628
+ if should_retry:
1629
+ retry_delay *= 2
1630
+ if retry_delay > RETRY_TIMEOUT:
1631
+ should_retry = False
1632
+
1633
+ if not should_retry:
1634
+ self.mdstream = None
1635
+ self.check_and_open_urls(err, ex_info.description)
1636
+ break
1637
+
1638
+ err_msg = str(err)
1639
+ if ex_info.description:
1640
+ self.io.tool_warning(err_msg)
1641
+ self.io.tool_error(ex_info.description)
1642
+ else:
1643
+ self.io.tool_error(err_msg)
1644
+
1645
+ self.io.tool_output(f"Retrying in {retry_delay:.1f} seconds...")
1646
+ time.sleep(retry_delay)
1647
+ continue
1648
+ except KeyboardInterrupt:
1649
+ interrupted = True
1650
+ break
1651
+ except FinishReasonLength:
1652
+ # We hit the output limit!
1653
+ if not self.main_model.info.get("supports_assistant_prefill"):
1654
+ exhausted = True
1655
+ break
1656
+
1657
+ self.multi_response_content = self.get_multi_response_content_in_progress()
1658
+
1659
+ if messages[-1]["role"] == "assistant":
1660
+ messages[-1]["content"] = self.multi_response_content
1661
+ else:
1662
+ messages.append(
1663
+ dict(role="assistant", content=self.multi_response_content, prefix=True)
1664
+ )
1665
+ except Exception as err:
1666
+ self.mdstream = None
1667
+ lines = traceback.format_exception(type(err), err, err.__traceback__)
1668
+ self.io.tool_warning("".join(lines))
1669
+ self.io.tool_error(str(err))
1670
+ self.event("message_send_exception", exception=str(err))
1671
+ return
1672
+ finally:
1673
+ if self.mdstream:
1674
+ self.live_incremental_response(True)
1675
+ self.mdstream = None
1676
+
1677
+ # Ensure any waiting spinner is stopped
1678
+ self._stop_waiting_spinner()
1679
+
1680
+ self.partial_response_content = self.get_multi_response_content_in_progress(True)
1681
+ self.remove_reasoning_content()
1682
+ self.multi_response_content = ""
1683
+
1684
+ ###
1685
+ # print()
1686
+ # print("=" * 20)
1687
+ # dump(self.partial_response_content)
1688
+
1689
+ self.io.tool_output()
1690
+
1691
+ self.show_usage_report()
1692
+
1693
+ self.add_assistant_reply_to_cur_messages()
1694
+
1695
+ if exhausted:
1696
+ if self.cur_messages and self.cur_messages[-1]["role"] == "user":
1697
+ self.cur_messages += [
1698
+ dict(
1699
+ role="assistant",
1700
+ content="FinishReasonLength exception: you sent too many tokens",
1701
+ ),
1702
+ ]
1703
+
1704
+ self.show_exhausted_error()
1705
+ self.num_exhausted_context_windows += 1
1706
+ return
1707
+
1708
+ if self.partial_response_function_call:
1709
+ args = self.parse_partial_args()
1710
+ if args:
1711
+ content = args.get("explanation") or ""
1712
+ else:
1713
+ content = ""
1714
+ elif self.partial_response_content:
1715
+ content = self.partial_response_content
1716
+ else:
1717
+ content = ""
1718
+
1719
+ if not interrupted:
1720
+ add_rel_files_message = self.check_for_file_mentions(content)
1721
+ if add_rel_files_message:
1722
+ if self.reflected_message:
1723
+ self.reflected_message += "\n\n" + add_rel_files_message
1724
+ else:
1725
+ self.reflected_message = add_rel_files_message
1726
+ return
1727
+
1728
+ # Process any tools using MCP servers
1729
+ tool_call_response = litellm.stream_chunk_builder(self.partial_response_tool_call)
1730
+ if self.process_tool_calls(tool_call_response):
1731
+ self.num_tool_calls += 1
1732
+ return self.run(with_message="Continue with tool call response", preproc=False)
1733
+
1734
+ self.num_tool_calls = 0
1735
+
1736
+ try:
1737
+ if self.reply_completed():
1738
+ return
1739
+ except KeyboardInterrupt:
1740
+ interrupted = True
1741
+
1742
+ if interrupted:
1743
+ if self.cur_messages and self.cur_messages[-1]["role"] == "user":
1744
+ self.cur_messages[-1]["content"] += "\n^C KeyboardInterrupt"
1745
+ else:
1746
+ self.cur_messages += [dict(role="user", content="^C KeyboardInterrupt")]
1747
+ self.cur_messages += [
1748
+ dict(role="assistant", content="I see that you interrupted my previous reply.")
1749
+ ]
1750
+ return
1751
+
1752
+ edited = self.apply_updates()
1753
+
1754
+ if edited:
1755
+ self.aider_edited_files.update(edited)
1756
+ saved_message = self.auto_commit(edited)
1757
+
1758
+ if not saved_message and hasattr(self.gpt_prompts, "files_content_gpt_edits_no_repo"):
1759
+ saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo
1760
+
1761
+ self.move_back_cur_messages(saved_message)
1762
+
1763
+ if self.reflected_message:
1764
+ return
1765
+
1766
+ if edited and self.auto_lint:
1767
+ lint_errors = self.lint_edited(edited)
1768
+ self.auto_commit(edited, context="Ran the linter")
1769
+ self.lint_outcome = not lint_errors
1770
+ if lint_errors:
1771
+ ok = self.io.confirm_ask("Attempt to fix lint errors?")
1772
+ if ok:
1773
+ self.reflected_message = lint_errors
1774
+ return
1775
+
1776
+ shared_output = self.run_shell_commands()
1777
+ if shared_output:
1778
+ self.cur_messages += [
1779
+ dict(role="user", content=shared_output),
1780
+ dict(role="assistant", content="Ok"),
1781
+ ]
1782
+
1783
+ if edited and self.auto_test:
1784
+ test_errors = self.commands.cmd_test(self.test_cmd)
1785
+ self.test_outcome = not test_errors
1786
+ if test_errors:
1787
+ ok = self.io.confirm_ask("Attempt to fix test errors?")
1788
+ if ok:
1789
+ self.reflected_message = test_errors
1790
+ return
1791
+
1792
+ def process_tool_calls(self, tool_call_response):
1793
+ if tool_call_response is None:
1794
+ return False
1795
+
1796
+ original_tool_calls = tool_call_response.choices[0].message.tool_calls
1797
+ if not original_tool_calls:
1798
+ return False
1799
+
1800
+ # Expand any tool calls that have concatenated JSON in their arguments.
1801
+ # This is necessary because some models (like Gemini) will serialize
1802
+ # multiple tool calls in this way.
1803
+ expanded_tool_calls = []
1804
+ for tool_call in original_tool_calls:
1805
+ args_string = tool_call.function.arguments.strip()
1806
+
1807
+ # If there are no arguments, or it's not a string that looks like it could
1808
+ # be concatenated JSON, just add it and continue.
1809
+ if not args_string or not (args_string.startswith("{") or args_string.startswith("[")):
1810
+ expanded_tool_calls.append(tool_call)
1811
+ continue
1812
+
1813
+ json_chunks = utils.split_concatenated_json(args_string)
1814
+
1815
+ # If it's just a single JSON object, there's nothing to expand.
1816
+ if len(json_chunks) <= 1:
1817
+ expanded_tool_calls.append(tool_call)
1818
+ continue
1819
+
1820
+ # We have concatenated JSON, so expand it into multiple tool calls.
1821
+ for i, chunk in enumerate(json_chunks):
1822
+ if not chunk.strip():
1823
+ continue
1824
+
1825
+ # Create a new tool call for each JSON chunk, with a unique ID.
1826
+ new_function = tool_call.function.model_copy(update={"arguments": chunk})
1827
+ new_tool_call = tool_call.model_copy(
1828
+ update={"id": f"{tool_call.id}-{i}", "function": new_function}
1829
+ )
1830
+ expanded_tool_calls.append(new_tool_call)
1831
+
1832
+ # Replace the original tool_calls in the response object with the expanded list.
1833
+ tool_call_response.choices[0].message.tool_calls = expanded_tool_calls
1834
+ tool_calls = expanded_tool_calls
1835
+
1836
+ # Collect all tool calls grouped by server
1837
+ server_tool_calls = self._gather_server_tool_calls(tool_calls)
1838
+
1839
+ if server_tool_calls and self.num_tool_calls < self.max_tool_calls:
1840
+ self._print_tool_call_info(server_tool_calls)
1841
+
1842
+ if self.io.confirm_ask("Run tools?"):
1843
+ tool_responses = self._execute_tool_calls(server_tool_calls)
1844
+
1845
+ # Add the assistant message with the modified (expanded) tool calls.
1846
+ # This ensures that what's stored in history is valid.
1847
+ self.cur_messages.append(tool_call_response.choices[0].message.to_dict())
1848
+
1849
+ # Add all tool responses
1850
+ for tool_response in tool_responses:
1851
+ self.cur_messages.append(tool_response)
1852
+
1853
+ return True
1854
+ elif self.num_tool_calls >= self.max_tool_calls:
1855
+ self.io.tool_warning(f"Only {self.max_tool_calls} tool calls allowed, stopping.")
1856
+
1857
+ return False
1858
+
1859
+ def _print_tool_call_info(self, server_tool_calls):
1860
+ """Print information about an MCP tool call."""
1861
+ self.io.tool_output("Preparing to run MCP tools", bold=True)
1862
+
1863
+ for server, tool_calls in server_tool_calls.items():
1864
+ for tool_call in tool_calls:
1865
+ self.io.tool_output(f"Tool Call: {tool_call.function.name}")
1866
+ self.io.tool_output(f"Arguments: {tool_call.function.arguments}")
1867
+ self.io.tool_output(f"MCP Server: {server.name}")
1868
+
1869
+ if self.verbose:
1870
+ self.io.tool_output(f"Tool ID: {tool_call.id}")
1871
+ self.io.tool_output(f"Tool type: {tool_call.type}")
1872
+
1873
+ self.io.tool_output("\n")
1874
+
1875
+ def _gather_server_tool_calls(self, tool_calls):
1876
+ """Collect all tool calls grouped by server.
1877
+ Args:
1878
+ tool_calls: List of tool calls from the LLM response
1879
+
1880
+ Returns:
1881
+ dict: Dictionary mapping servers to their respective tool calls
1882
+ """
1883
+ if not self.mcp_tools or len(self.mcp_tools) == 0:
1884
+ return None
1885
+
1886
+ server_tool_calls = {}
1887
+ for tool_call in tool_calls:
1888
+ # Check if this tool_call matches any MCP tool
1889
+ for server_name, server_tools in self.mcp_tools:
1890
+ for tool in server_tools:
1891
+ if tool.get("function", {}).get("name") == tool_call.function.name:
1892
+ # Find the McpServer instance that will be used for communication
1893
+ for server in self.mcp_servers:
1894
+ if server.name == server_name:
1895
+ if server not in server_tool_calls:
1896
+ server_tool_calls[server] = []
1897
+ server_tool_calls[server].append(tool_call)
1898
+ break
1899
+
1900
+ return server_tool_calls
1901
+
1902
+ def _execute_tool_calls(self, tool_calls):
1903
+ """Process tool calls from the response and execute them if they match MCP tools.
1904
+ Returns a list of tool response messages."""
1905
+ tool_responses = []
1906
+
1907
+ # Define the coroutine to execute all tool calls for a single server
1908
+ async def _exec_server_tools(server, tool_calls_list):
1909
+ if isinstance(server, LocalServer):
1910
+ if hasattr(self, "_execute_local_tool_calls"):
1911
+ return await self._execute_local_tool_calls(tool_calls_list)
1912
+ else:
1913
+ # This coder doesn't support local tools, return errors for all calls
1914
+ error_responses = []
1915
+ for tool_call in tool_calls_list:
1916
+ error_responses.append(
1917
+ {
1918
+ "role": "tool",
1919
+ "tool_call_id": tool_call.id,
1920
+ "content": (
1921
+ f"Coder does not support local tool: {tool_call.function.name}"
1922
+ ),
1923
+ }
1924
+ )
1925
+ return error_responses
1926
+
1927
+ tool_responses = []
1928
+ try:
1929
+ # Connect to the server once
1930
+ session = await server.connect()
1931
+ # Execute all tool calls for this server
1932
+ for tool_call in tool_calls_list:
1933
+ try:
1934
+ # Arguments can be a stream of JSON objects.
1935
+ # We need to parse them and run a tool call for each.
1936
+ args_string = tool_call.function.arguments.strip()
1937
+ parsed_args_list = []
1938
+ if args_string:
1939
+ json_chunks = utils.split_concatenated_json(args_string)
1940
+ for chunk in json_chunks:
1941
+ try:
1942
+ parsed_args_list.append(json.loads(chunk))
1943
+ except json.JSONDecodeError:
1944
+ self.io.tool_warning(
1945
+ "Could not parse JSON chunk for tool"
1946
+ f" {tool_call.function.name}: {chunk}"
1947
+ )
1948
+ continue
1949
+
1950
+ if not parsed_args_list and not args_string:
1951
+ parsed_args_list.append({}) # For tool calls with no arguments
1952
+
1953
+ all_results_content = []
1954
+ for args in parsed_args_list:
1955
+ new_tool_call = tool_call.model_copy(deep=True)
1956
+ new_tool_call.function.arguments = json.dumps(args)
1957
+
1958
+ call_result = await experimental_mcp_client.call_openai_tool(
1959
+ session=session,
1960
+ openai_tool=new_tool_call,
1961
+ )
1962
+
1963
+ content_parts = []
1964
+ if call_result.content:
1965
+ for item in call_result.content:
1966
+ if hasattr(item, "resource"): # EmbeddedResource
1967
+ resource = item.resource
1968
+ if hasattr(resource, "text"): # TextResourceContents
1969
+ content_parts.append(resource.text)
1970
+ elif hasattr(resource, "blob"): # BlobResourceContents
1971
+ try:
1972
+ decoded_blob = base64.b64decode(
1973
+ resource.blob
1974
+ ).decode("utf-8")
1975
+ content_parts.append(decoded_blob)
1976
+ except (UnicodeDecodeError, TypeError):
1977
+ # Handle non-text blobs gracefully
1978
+ name = getattr(resource, "name", "unnamed")
1979
+ mime_type = getattr(
1980
+ resource, "mimeType", "unknown mime type"
1981
+ )
1982
+ content_parts.append(
1983
+ "[embedded binary resource:"
1984
+ f" {name} ({mime_type})]"
1985
+ )
1986
+ elif hasattr(item, "text"): # TextContent
1987
+ content_parts.append(item.text)
1988
+
1989
+ result_text = "".join(content_parts)
1990
+ all_results_content.append(result_text)
1991
+
1992
+ tool_responses.append(
1993
+ {
1994
+ "role": "tool",
1995
+ "tool_call_id": tool_call.id,
1996
+ "content": "\n\n".join(all_results_content),
1997
+ }
1998
+ )
1999
+
2000
+ except Exception as e:
2001
+ tool_error = f"Error executing tool call {tool_call.function.name}: \n{e}"
2002
+ self.io.tool_warning(
2003
+ f"Executing {tool_call.function.name} on {server.name} failed: \n "
2004
+ f" Error: {e}\n"
2005
+ )
2006
+ tool_responses.append(
2007
+ {"role": "tool", "tool_call_id": tool_call.id, "content": tool_error}
2008
+ )
2009
+ except Exception as e:
2010
+ connection_error = f"Could not connect to server {server.name}\n{e}"
2011
+ self.io.tool_warning(connection_error)
2012
+ for tool_call in tool_calls_list:
2013
+ tool_responses.append(
2014
+ {"role": "tool", "tool_call_id": tool_call.id, "content": connection_error}
2015
+ )
2016
+ finally:
2017
+ await server.disconnect()
2018
+
2019
+ return tool_responses
2020
+
2021
+ # Execute all tool calls concurrently
2022
+ async def _execute_all_tool_calls():
2023
+ tasks = []
2024
+ for server, tool_calls_list in tool_calls.items():
2025
+ tasks.append(_exec_server_tools(server, tool_calls_list))
2026
+ # Wait for all tasks to complete
2027
+ results = await asyncio.gather(*tasks)
2028
+ return results
2029
+
2030
+ # Run the async execution and collect results
2031
+ if tool_calls:
2032
+ all_results = []
2033
+ max_retries = 3
2034
+ for i in range(max_retries):
2035
+ try:
2036
+ all_results = asyncio.run(_execute_all_tool_calls())
2037
+ break
2038
+ except asyncio.exceptions.CancelledError:
2039
+ if i < max_retries - 1:
2040
+ time.sleep(0.1) # Brief pause before retrying
2041
+ else:
2042
+ self.io.tool_warning(
2043
+ "MCP tool execution failed after multiple retries due to cancellation."
2044
+ )
2045
+ all_results = []
2046
+
2047
+ # Flatten the results from all servers
2048
+ for server_results in all_results:
2049
+ tool_responses.extend(server_results)
2050
+
2051
+ return tool_responses
2052
+
2053
+ def initialize_mcp_tools(self):
2054
+ """
2055
+ Initialize tools from all configured MCP servers. MCP Servers that fail to be
2056
+ initialized will not be available to the Coder instance.
2057
+ """
2058
+ tools = []
2059
+
2060
+ async def get_server_tools(server):
2061
+ try:
2062
+ session = await server.connect()
2063
+ server_tools = await experimental_mcp_client.load_mcp_tools(
2064
+ session=session, format="openai"
2065
+ )
2066
+ return (server.name, server_tools)
2067
+ except Exception as e:
2068
+ self.io.tool_warning(f"Error initializing MCP server {server.name}:\n{e}")
2069
+ return None
2070
+ finally:
2071
+ await server.disconnect()
2072
+
2073
+ async def get_all_server_tools():
2074
+ tasks = [get_server_tools(server) for server in self.mcp_servers]
2075
+ results = await asyncio.gather(*tasks)
2076
+ return [result for result in results if result is not None]
2077
+
2078
+ if self.mcp_servers:
2079
+ # Retry initialization in case of CancelledError
2080
+ max_retries = 3
2081
+ for i in range(max_retries):
2082
+ try:
2083
+ tools = asyncio.run(get_all_server_tools())
2084
+ break
2085
+ except asyncio.exceptions.CancelledError:
2086
+ if i < max_retries - 1:
2087
+ time.sleep(0.1) # Brief pause before retrying
2088
+ else:
2089
+ self.io.tool_warning(
2090
+ "MCP tool initialization failed after multiple retries due to"
2091
+ " cancellation."
2092
+ )
2093
+ tools = []
2094
+
2095
+ if len(tools) > 0:
2096
+ self.io.tool_output("MCP servers configured:")
2097
+ for server_name, server_tools in tools:
2098
+ self.io.tool_output(f" - {server_name}")
2099
+
2100
+ if self.verbose:
2101
+ for tool in server_tools:
2102
+ tool_name = tool.get("function", {}).get("name", "unknown")
2103
+ tool_desc = tool.get("function", {}).get("description", "").split("\n")[0]
2104
+ self.io.tool_output(f" - {tool_name}: {tool_desc}")
2105
+
2106
+ self.mcp_tools = tools
2107
+
2108
+ def get_tool_list(self):
2109
+ """Get a flattened list of all MCP tools."""
2110
+ tool_list = []
2111
+ if self.mcp_tools:
2112
+ for _, server_tools in self.mcp_tools:
2113
+ tool_list.extend(server_tools)
2114
+ return tool_list
2115
+
2116
+ def reply_completed(self):
2117
+ pass
2118
+
2119
+ def show_exhausted_error(self):
2120
+ output_tokens = 0
2121
+ if self.partial_response_content:
2122
+ output_tokens = self.main_model.token_count(self.partial_response_content)
2123
+ max_output_tokens = self.main_model.info.get("max_output_tokens") or 0
2124
+
2125
+ input_tokens = self.main_model.token_count(self.format_messages().all_messages())
2126
+ max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
2127
+
2128
+ total_tokens = input_tokens + output_tokens
2129
+
2130
+ fudge = 0.7
2131
+
2132
+ out_err = ""
2133
+ if output_tokens >= max_output_tokens * fudge:
2134
+ out_err = " -- possibly exceeded output limit!"
2135
+
2136
+ inp_err = ""
2137
+ if input_tokens >= max_input_tokens * fudge:
2138
+ inp_err = " -- possibly exhausted context window!"
2139
+
2140
+ tot_err = ""
2141
+ if total_tokens >= max_input_tokens * fudge:
2142
+ tot_err = " -- possibly exhausted context window!"
2143
+
2144
+ res = ["", ""]
2145
+ res.append(f"Model {self.main_model.name} has hit a token limit!")
2146
+ res.append("Token counts below are approximate.")
2147
+ res.append("")
2148
+ res.append(f"Input tokens: ~{input_tokens:,} of {max_input_tokens:,}{inp_err}")
2149
+ res.append(f"Output tokens: ~{output_tokens:,} of {max_output_tokens:,}{out_err}")
2150
+ res.append(f"Total tokens: ~{total_tokens:,} of {max_input_tokens:,}{tot_err}")
2151
+
2152
+ if output_tokens >= max_output_tokens:
2153
+ res.append("")
2154
+ res.append("To reduce output tokens:")
2155
+ res.append("- Ask for smaller changes in each request.")
2156
+ res.append("- Break your code into smaller source files.")
2157
+ if "diff" not in self.main_model.edit_format:
2158
+ res.append("- Use a stronger model that can return diffs.")
2159
+
2160
+ if input_tokens >= max_input_tokens or total_tokens >= max_input_tokens:
2161
+ res.append("")
2162
+ res.append("To reduce input tokens:")
2163
+ res.append("- Use /tokens to see token usage.")
2164
+ res.append("- Use /drop to remove unneeded files from the chat session.")
2165
+ res.append("- Use /clear to clear the chat history.")
2166
+ res.append("- Break your code into smaller source files.")
2167
+
2168
+ res = "".join([line + "\n" for line in res])
2169
+ self.io.tool_error(res)
2170
+ self.io.offer_url(urls.token_limits)
2171
+
2172
+ def lint_edited(self, fnames):
2173
+ res = ""
2174
+ for fname in fnames:
2175
+ if not fname:
2176
+ continue
2177
+ errors = self.linter.lint(self.abs_root_path(fname))
2178
+
2179
+ if errors:
2180
+ res += "\n"
2181
+ res += errors
2182
+ res += "\n"
2183
+
2184
+ if res:
2185
+ self.io.tool_warning(res)
2186
+
2187
+ return res
2188
+
2189
+ def __del__(self):
2190
+ """Cleanup when the Coder object is destroyed."""
2191
+ self.ok_to_warm_cache = False
2192
+
2193
+ def add_assistant_reply_to_cur_messages(self):
2194
+ if self.partial_response_content:
2195
+ self.cur_messages += [dict(role="assistant", content=self.partial_response_content)]
2196
+ if self.partial_response_function_call:
2197
+ self.cur_messages += [
2198
+ dict(
2199
+ role="assistant",
2200
+ content=None,
2201
+ function_call=self.partial_response_function_call,
2202
+ )
2203
+ ]
2204
+
2205
+ def get_file_mentions(self, content, ignore_current=False):
2206
+ words = set(word for word in content.split())
2207
+
2208
+ # drop sentence punctuation from the end
2209
+ words = set(word.rstrip(",.!;:?") for word in words)
2210
+
2211
+ # strip away all kinds of quotes
2212
+ quotes = "\"'`*_"
2213
+ words = set(word.strip(quotes) for word in words)
2214
+
2215
+ if ignore_current:
2216
+ addable_rel_fnames = self.get_all_relative_files()
2217
+ existing_basenames = {}
2218
+ else:
2219
+ addable_rel_fnames = self.get_addable_relative_files()
2220
+
2221
+ # Get basenames of files already in chat or read-only
2222
+ existing_basenames = {os.path.basename(f) for f in self.get_inchat_relative_files()} | {
2223
+ os.path.basename(self.get_rel_fname(f)) for f in self.abs_read_only_fnames
2224
+ }
2225
+
2226
+ mentioned_rel_fnames = set()
2227
+ fname_to_rel_fnames = {}
2228
+ for rel_fname in addable_rel_fnames:
2229
+ normalized_rel_fname = rel_fname.replace("\\", "/")
2230
+ normalized_words = set(word.replace("\\", "/") for word in words)
2231
+ if normalized_rel_fname in normalized_words:
2232
+ mentioned_rel_fnames.add(rel_fname)
2233
+
2234
+ fname = os.path.basename(rel_fname)
2235
+
2236
+ # Don't add basenames that could be plain words like "run" or "make"
2237
+ if "/" in fname or "\\" in fname or "." in fname or "_" in fname or "-" in fname:
2238
+ if fname not in fname_to_rel_fnames:
2239
+ fname_to_rel_fnames[fname] = []
2240
+ fname_to_rel_fnames[fname].append(rel_fname)
2241
+
2242
+ for fname, rel_fnames in fname_to_rel_fnames.items():
2243
+ # If the basename is already in chat, don't add based on a basename mention
2244
+ if fname in existing_basenames:
2245
+ continue
2246
+ # If the basename mention is unique among addable files and present in the text
2247
+ if len(rel_fnames) == 1 and fname in words:
2248
+ mentioned_rel_fnames.add(rel_fnames[0])
2249
+
2250
+ return mentioned_rel_fnames
2251
+
2252
+ def check_for_file_mentions(self, content):
2253
+ mentioned_rel_fnames = self.get_file_mentions(content)
2254
+
2255
+ new_mentions = mentioned_rel_fnames - self.ignore_mentions
2256
+
2257
+ if not new_mentions:
2258
+ return
2259
+
2260
+ added_fnames = []
2261
+ group = ConfirmGroup(new_mentions)
2262
+ for rel_fname in sorted(new_mentions):
2263
+ if self.io.confirm_ask(
2264
+ "Add file to the chat?", subject=rel_fname, group=group, allow_never=True
2265
+ ):
2266
+ self.add_rel_fname(rel_fname)
2267
+ added_fnames.append(rel_fname)
2268
+ else:
2269
+ self.ignore_mentions.add(rel_fname)
2270
+
2271
+ if added_fnames:
2272
+ return prompts.added_files.format(fnames=", ".join(added_fnames))
2273
+
2274
+ def send(self, messages, model=None, functions=None):
2275
+ self.got_reasoning_content = False
2276
+ self.ended_reasoning_content = False
2277
+
2278
+ if not model:
2279
+ model = self.main_model
2280
+
2281
+ self.partial_response_content = ""
2282
+ self.partial_response_function_call = dict()
2283
+
2284
+ self.io.log_llm_history("TO LLM", format_messages(messages))
2285
+
2286
+ completion = None
2287
+
2288
+ try:
2289
+ tool_list = self.get_tool_list()
2290
+
2291
+ hash_object, completion = model.send_completion(
2292
+ messages,
2293
+ functions,
2294
+ self.stream,
2295
+ self.temperature,
2296
+ # This could include any tools, but for now it is just MCP tools
2297
+ tools=tool_list,
2298
+ )
2299
+ self.chat_completion_call_hashes.append(hash_object.hexdigest())
2300
+
2301
+ if self.stream:
2302
+ yield from self.show_send_output_stream(completion)
2303
+ else:
2304
+ self.show_send_output(completion)
2305
+
2306
+ # Calculate costs for successful responses
2307
+ self.calculate_and_show_tokens_and_cost(messages, completion)
2308
+
2309
+ except LiteLLMExceptions().exceptions_tuple() as err:
2310
+ ex_info = LiteLLMExceptions().get_ex_info(err)
2311
+ if ex_info.name == "ContextWindowExceededError":
2312
+ # Still calculate costs for context window errors
2313
+ self.calculate_and_show_tokens_and_cost(messages, completion)
2314
+ raise
2315
+ except KeyboardInterrupt as kbi:
2316
+ self.keyboard_interrupt()
2317
+ raise kbi
2318
+ finally:
2319
+ self.io.log_llm_history(
2320
+ "LLM RESPONSE",
2321
+ format_content("ASSISTANT", self.partial_response_content),
2322
+ )
2323
+
2324
+ if self.partial_response_content:
2325
+ self.io.ai_output(self.partial_response_content)
2326
+ elif self.partial_response_function_call:
2327
+ # TODO: push this into subclasses
2328
+ args = self.parse_partial_args()
2329
+ if args:
2330
+ self.io.ai_output(json.dumps(args, indent=4))
2331
+
2332
+ def show_send_output(self, completion):
2333
+ # Stop spinner once we have a response
2334
+ self._stop_waiting_spinner()
2335
+
2336
+ if self.verbose:
2337
+ print(completion)
2338
+
2339
+ if not completion.choices:
2340
+ self.io.tool_error(str(completion))
2341
+ return
2342
+
2343
+ show_func_err = None
2344
+ show_content_err = None
2345
+ try:
2346
+ if completion.choices[0].message.tool_calls:
2347
+ self.partial_response_function_call = (
2348
+ completion.choices[0].message.tool_calls[0].function
2349
+ )
2350
+ except AttributeError as func_err:
2351
+ show_func_err = func_err
2352
+
2353
+ try:
2354
+ reasoning_content = completion.choices[0].message.reasoning_content
2355
+ except AttributeError:
2356
+ try:
2357
+ reasoning_content = completion.choices[0].message.reasoning
2358
+ except AttributeError:
2359
+ reasoning_content = None
2360
+
2361
+ try:
2362
+ self.partial_response_content = completion.choices[0].message.content or ""
2363
+ except AttributeError as content_err:
2364
+ show_content_err = content_err
2365
+
2366
+ resp_hash = dict(
2367
+ function_call=str(self.partial_response_function_call),
2368
+ content=self.partial_response_content,
2369
+ )
2370
+ resp_hash = hashlib.sha1(json.dumps(resp_hash, sort_keys=True).encode())
2371
+ self.chat_completion_response_hashes.append(resp_hash.hexdigest())
2372
+
2373
+ if show_func_err and show_content_err:
2374
+ self.io.tool_error(show_func_err)
2375
+ self.io.tool_error(show_content_err)
2376
+ raise Exception("No data found in LLM response!")
2377
+
2378
+ show_resp = self.render_incremental_response(True)
2379
+
2380
+ if reasoning_content:
2381
+ formatted_reasoning = format_reasoning_content(
2382
+ reasoning_content, self.reasoning_tag_name
2383
+ )
2384
+ show_resp = formatted_reasoning + show_resp
2385
+
2386
+ show_resp = replace_reasoning_tags(show_resp, self.reasoning_tag_name)
2387
+
2388
+ self.io.assistant_output(show_resp, pretty=self.show_pretty())
2389
+
2390
+ if (
2391
+ hasattr(completion.choices[0], "finish_reason")
2392
+ and completion.choices[0].finish_reason == "length"
2393
+ ):
2394
+ raise FinishReasonLength()
2395
+
2396
+ def show_send_output_stream(self, completion):
2397
+ received_content = False
2398
+ self.partial_response_tool_call = []
2399
+
2400
+ for chunk in completion:
2401
+ if isinstance(chunk, str):
2402
+ text = chunk
2403
+ received_content = True
2404
+ else:
2405
+ if len(chunk.choices) == 0:
2406
+ continue
2407
+
2408
+ if (
2409
+ hasattr(chunk.choices[0], "finish_reason")
2410
+ and chunk.choices[0].finish_reason == "length"
2411
+ ):
2412
+ raise FinishReasonLength()
2413
+
2414
+ if chunk.choices[0].delta.tool_calls:
2415
+ self.partial_response_tool_call.append(chunk)
2416
+
2417
+ try:
2418
+ func = chunk.choices[0].delta.function_call
2419
+ # dump(func)
2420
+ for k, v in func.items():
2421
+ if k in self.partial_response_function_call:
2422
+ self.partial_response_function_call[k] += v
2423
+ else:
2424
+ self.partial_response_function_call[k] = v
2425
+
2426
+ received_content = True
2427
+ except AttributeError:
2428
+ pass
2429
+
2430
+ text = ""
2431
+
2432
+ try:
2433
+ reasoning_content = chunk.choices[0].delta.reasoning_content
2434
+ except AttributeError:
2435
+ try:
2436
+ reasoning_content = chunk.choices[0].delta.reasoning
2437
+ except AttributeError:
2438
+ reasoning_content = None
2439
+
2440
+ if reasoning_content:
2441
+ if not self.got_reasoning_content:
2442
+ text += f"<{REASONING_TAG}>\n\n"
2443
+ text += reasoning_content
2444
+ self.got_reasoning_content = True
2445
+ received_content = True
2446
+
2447
+ try:
2448
+ content = chunk.choices[0].delta.content
2449
+ if content:
2450
+ if self.got_reasoning_content and not self.ended_reasoning_content:
2451
+ text += f"\n\n</{self.reasoning_tag_name}>\n\n"
2452
+ self.ended_reasoning_content = True
2453
+
2454
+ text += content
2455
+ received_content = True
2456
+ except AttributeError:
2457
+ pass
2458
+
2459
+ if received_content:
2460
+ self._stop_waiting_spinner()
2461
+ self.partial_response_content += text
2462
+
2463
+ if self.show_pretty():
2464
+ self.live_incremental_response(False)
2465
+ elif text:
2466
+ # Apply reasoning tag formatting
2467
+ text = replace_reasoning_tags(text, self.reasoning_tag_name)
2468
+ try:
2469
+ sys.stdout.write(text)
2470
+ except UnicodeEncodeError:
2471
+ # Safely encode and decode the text
2472
+ safe_text = text.encode(sys.stdout.encoding, errors="backslashreplace").decode(
2473
+ sys.stdout.encoding
2474
+ )
2475
+ sys.stdout.write(safe_text)
2476
+ sys.stdout.flush()
2477
+ yield text
2478
+
2479
+ if not received_content and len(self.partial_response_tool_call) == 0:
2480
+ self.io.tool_warning("Empty response received from LLM. Check your provider account?")
2481
+
2482
+ def live_incremental_response(self, final):
2483
+ show_resp = self.render_incremental_response(final)
2484
+ # Apply any reasoning tag formatting
2485
+ show_resp = replace_reasoning_tags(show_resp, self.reasoning_tag_name)
2486
+ self.mdstream.update(show_resp, final=final)
2487
+
2488
+ def render_incremental_response(self, final):
2489
+ return self.get_multi_response_content_in_progress()
2490
+
2491
+ def remove_reasoning_content(self):
2492
+ """Remove reasoning content from the model's response."""
2493
+
2494
+ self.partial_response_content = remove_reasoning_content(
2495
+ self.partial_response_content,
2496
+ self.reasoning_tag_name,
2497
+ )
2498
+
2499
+ def calculate_and_show_tokens_and_cost(self, messages, completion=None):
2500
+ prompt_tokens = 0
2501
+ completion_tokens = 0
2502
+ cache_hit_tokens = 0
2503
+ cache_write_tokens = 0
2504
+
2505
+ if completion and hasattr(completion, "usage") and completion.usage is not None:
2506
+ prompt_tokens = completion.usage.prompt_tokens
2507
+ completion_tokens = completion.usage.completion_tokens
2508
+ cache_hit_tokens = getattr(completion.usage, "prompt_cache_hit_tokens", 0) or getattr(
2509
+ completion.usage, "cache_read_input_tokens", 0
2510
+ )
2511
+ cache_write_tokens = getattr(completion.usage, "cache_creation_input_tokens", 0)
2512
+
2513
+ if hasattr(completion.usage, "cache_read_input_tokens") or hasattr(
2514
+ completion.usage, "cache_creation_input_tokens"
2515
+ ):
2516
+ self.message_tokens_sent += prompt_tokens
2517
+ self.message_tokens_sent += cache_write_tokens
2518
+ else:
2519
+ self.message_tokens_sent += prompt_tokens
2520
+
2521
+ else:
2522
+ prompt_tokens = self.main_model.token_count(messages)
2523
+ completion_tokens = self.main_model.token_count(self.partial_response_content)
2524
+ self.message_tokens_sent += prompt_tokens
2525
+
2526
+ self.message_tokens_received += completion_tokens
2527
+
2528
+ tokens_report = f"Tokens: {format_tokens(self.message_tokens_sent)} sent"
2529
+
2530
+ if cache_write_tokens:
2531
+ tokens_report += f", {format_tokens(cache_write_tokens)} cache write"
2532
+ if cache_hit_tokens:
2533
+ tokens_report += f", {format_tokens(cache_hit_tokens)} cache hit"
2534
+ tokens_report += f", {format_tokens(self.message_tokens_received)} received."
2535
+
2536
+ if not self.main_model.info.get("input_cost_per_token"):
2537
+ self.usage_report = tokens_report
2538
+ return
2539
+
2540
+ try:
2541
+ # Try and use litellm's built in cost calculator. Seems to work for non-streaming only?
2542
+ cost = litellm.completion_cost(completion_response=completion)
2543
+ except Exception:
2544
+ cost = 0
2545
+
2546
+ if not cost:
2547
+ cost = self.compute_costs_from_tokens(
2548
+ prompt_tokens, completion_tokens, cache_write_tokens, cache_hit_tokens
2549
+ )
2550
+
2551
+ self.total_cost += cost
2552
+ self.message_cost += cost
2553
+
2554
+ def format_cost(value):
2555
+ if value == 0:
2556
+ return "0.00"
2557
+ magnitude = abs(value)
2558
+ if magnitude >= 0.01:
2559
+ return f"{value:.2f}"
2560
+ else:
2561
+ return f"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}"
2562
+
2563
+ cost_report = (
2564
+ f"Cost: ${format_cost(self.message_cost)} message,"
2565
+ f" ${format_cost(self.total_cost)} session."
2566
+ )
2567
+
2568
+ if cache_hit_tokens and cache_write_tokens:
2569
+ sep = "\n"
2570
+ else:
2571
+ sep = " "
2572
+
2573
+ self.usage_report = tokens_report + sep + cost_report
2574
+
2575
+ def compute_costs_from_tokens(
2576
+ self, prompt_tokens, completion_tokens, cache_write_tokens, cache_hit_tokens
2577
+ ):
2578
+ cost = 0
2579
+
2580
+ input_cost_per_token = self.main_model.info.get("input_cost_per_token") or 0
2581
+ output_cost_per_token = self.main_model.info.get("output_cost_per_token") or 0
2582
+ input_cost_per_token_cache_hit = (
2583
+ self.main_model.info.get("input_cost_per_token_cache_hit") or 0
2584
+ )
2585
+
2586
+ # deepseek
2587
+ # prompt_cache_hit_tokens + prompt_cache_miss_tokens
2588
+ # == prompt_tokens == total tokens that were sent
2589
+ #
2590
+ # Anthropic
2591
+ # cache_creation_input_tokens + cache_read_input_tokens + prompt
2592
+ # == total tokens that were
2593
+
2594
+ if input_cost_per_token_cache_hit:
2595
+ # must be deepseek
2596
+ cost += input_cost_per_token_cache_hit * cache_hit_tokens
2597
+ cost += (prompt_tokens - input_cost_per_token_cache_hit) * input_cost_per_token
2598
+ else:
2599
+ # hard code the anthropic adjustments, no-ops for other models since cache_x_tokens==0
2600
+ cost += cache_write_tokens * input_cost_per_token * 1.25
2601
+ cost += cache_hit_tokens * input_cost_per_token * 0.10
2602
+ cost += prompt_tokens * input_cost_per_token
2603
+
2604
+ cost += completion_tokens * output_cost_per_token
2605
+ return cost
2606
+
2607
+ def show_usage_report(self):
2608
+ if not self.usage_report:
2609
+ return
2610
+
2611
+ self.total_tokens_sent += self.message_tokens_sent
2612
+ self.total_tokens_received += self.message_tokens_received
2613
+
2614
+ self.io.tool_output(self.usage_report)
2615
+
2616
+ prompt_tokens = self.message_tokens_sent
2617
+ completion_tokens = self.message_tokens_received
2618
+ self.event(
2619
+ "message_send",
2620
+ main_model=self.main_model,
2621
+ edit_format=self.edit_format,
2622
+ prompt_tokens=prompt_tokens,
2623
+ completion_tokens=completion_tokens,
2624
+ total_tokens=prompt_tokens + completion_tokens,
2625
+ cost=self.message_cost,
2626
+ total_cost=self.total_cost,
2627
+ )
2628
+
2629
+ self.message_cost = 0.0
2630
+ self.message_tokens_sent = 0
2631
+ self.message_tokens_received = 0
2632
+
2633
+ def get_multi_response_content_in_progress(self, final=False):
2634
+ cur = self.multi_response_content or ""
2635
+ new = self.partial_response_content or ""
2636
+
2637
+ if new.rstrip() != new and not final:
2638
+ new = new.rstrip()
2639
+
2640
+ return cur + new
2641
+
2642
+ def get_rel_fname(self, fname):
2643
+ try:
2644
+ return os.path.relpath(fname, self.root)
2645
+ except ValueError:
2646
+ return fname
2647
+
2648
+ def get_inchat_relative_files(self):
2649
+ files = [self.get_rel_fname(fname) for fname in self.abs_fnames]
2650
+ return sorted(set(files))
2651
+
2652
+ def is_file_safe(self, fname):
2653
+ try:
2654
+ return Path(self.abs_root_path(fname)).is_file()
2655
+ except OSError:
2656
+ return
2657
+
2658
+ def get_all_relative_files(self):
2659
+ if self.repo:
2660
+ files = self.repo.get_tracked_files()
2661
+ else:
2662
+ files = self.get_inchat_relative_files()
2663
+
2664
+ # This is quite slow in large repos
2665
+ # files = [fname for fname in files if self.is_file_safe(fname)]
2666
+
2667
+ return sorted(set(files))
2668
+
2669
+ def get_all_abs_files(self):
2670
+ files = self.get_all_relative_files()
2671
+ files = [self.abs_root_path(path) for path in files]
2672
+ return files
2673
+
2674
+ def get_addable_relative_files(self):
2675
+ all_files = set(self.get_all_relative_files())
2676
+ inchat_files = set(self.get_inchat_relative_files())
2677
+ read_only_files = set(self.get_rel_fname(fname) for fname in self.abs_read_only_fnames)
2678
+ return all_files - inchat_files - read_only_files
2679
+
2680
+ def check_for_dirty_commit(self, path):
2681
+ if not self.repo:
2682
+ return
2683
+ if not self.dirty_commits:
2684
+ return
2685
+ if not self.repo.is_dirty(path):
2686
+ return
2687
+
2688
+ # We need a committed copy of the file in order to /undo, so skip this
2689
+ # fullp = Path(self.abs_root_path(path))
2690
+ # if not fullp.stat().st_size:
2691
+ # return
2692
+
2693
+ self.io.tool_output(f"Committing {path} before applying edits.")
2694
+ self.need_commit_before_edits.add(path)
2695
+
2696
+ def allowed_to_edit(self, path):
2697
+ full_path = self.abs_root_path(path)
2698
+ if self.repo:
2699
+ need_to_add = not self.repo.path_in_repo(path)
2700
+ else:
2701
+ need_to_add = False
2702
+
2703
+ if full_path in self.abs_fnames:
2704
+ self.check_for_dirty_commit(path)
2705
+ return True
2706
+
2707
+ if self.repo and self.repo.git_ignored_file(path):
2708
+ self.io.tool_warning(f"Skipping edits to {path} that matches gitignore spec.")
2709
+ return
2710
+
2711
+ if not Path(full_path).exists():
2712
+ if not self.io.confirm_ask("Create new file?", subject=path):
2713
+ self.io.tool_output(f"Skipping edits to {path}")
2714
+ return
2715
+
2716
+ if not self.dry_run:
2717
+ if not utils.touch_file(full_path):
2718
+ self.io.tool_error(f"Unable to create {path}, skipping edits.")
2719
+ return
2720
+
2721
+ # Seems unlikely that we needed to create the file, but it was
2722
+ # actually already part of the repo.
2723
+ # But let's only add if we need to, just to be safe.
2724
+ if need_to_add:
2725
+ self.repo.repo.git.add(full_path)
2726
+
2727
+ self.abs_fnames.add(full_path)
2728
+ self.check_added_files()
2729
+ return True
2730
+
2731
+ if not self.io.confirm_ask(
2732
+ "Allow edits to file that has not been added to the chat?",
2733
+ subject=path,
2734
+ ):
2735
+ self.io.tool_output(f"Skipping edits to {path}")
2736
+ return
2737
+
2738
+ if need_to_add:
2739
+ self.repo.repo.git.add(full_path)
2740
+
2741
+ self.abs_fnames.add(full_path)
2742
+ self.check_added_files()
2743
+ self.check_for_dirty_commit(path)
2744
+
2745
+ return True
2746
+
2747
+ warning_given = False
2748
+
2749
+ def check_added_files(self):
2750
+ if self.warning_given:
2751
+ return
2752
+
2753
+ warn_number_of_files = 4
2754
+ warn_number_of_tokens = 20 * 1024
2755
+
2756
+ num_files = len(self.abs_fnames)
2757
+ if num_files < warn_number_of_files:
2758
+ return
2759
+
2760
+ tokens = 0
2761
+ for fname in self.abs_fnames:
2762
+ if is_image_file(fname):
2763
+ continue
2764
+ content = self.io.read_text(fname)
2765
+ tokens += self.main_model.token_count(content)
2766
+
2767
+ if tokens < warn_number_of_tokens:
2768
+ return
2769
+
2770
+ self.io.tool_warning("Warning: it's best to only add files that need changes to the chat.")
2771
+ self.io.tool_warning(urls.edit_errors)
2772
+ self.warning_given = True
2773
+
2774
+ def prepare_to_edit(self, edits):
2775
+ res = []
2776
+ seen = dict()
2777
+
2778
+ self.need_commit_before_edits = set()
2779
+
2780
+ for edit in edits:
2781
+ path = edit[0]
2782
+ if path is None:
2783
+ res.append(edit)
2784
+ continue
2785
+ if path == "python":
2786
+ dump(edits)
2787
+ if path in seen:
2788
+ allowed = seen[path]
2789
+ else:
2790
+ allowed = self.allowed_to_edit(path)
2791
+ seen[path] = allowed
2792
+
2793
+ if allowed:
2794
+ res.append(edit)
2795
+
2796
+ self.dirty_commit()
2797
+ self.need_commit_before_edits = set()
2798
+
2799
+ return res
2800
+
2801
+ def apply_updates(self):
2802
+ edited = set()
2803
+ try:
2804
+ edits = self.get_edits()
2805
+ edits = self.apply_edits_dry_run(edits)
2806
+ edits = self.prepare_to_edit(edits)
2807
+ edited = set(edit[0] for edit in edits)
2808
+
2809
+ self.apply_edits(edits)
2810
+ except ValueError as err:
2811
+ self.num_malformed_responses += 1
2812
+
2813
+ err = err.args[0]
2814
+
2815
+ self.io.tool_error("The LLM did not conform to the edit format.")
2816
+ self.io.tool_output(urls.edit_errors)
2817
+ self.io.tool_output()
2818
+ self.io.tool_output(str(err))
2819
+
2820
+ self.reflected_message = str(err)
2821
+ return edited
2822
+
2823
+ except ANY_GIT_ERROR as err:
2824
+ self.io.tool_error(str(err))
2825
+ return edited
2826
+ except Exception as err:
2827
+ self.io.tool_error("Exception while updating files:")
2828
+ self.io.tool_error(str(err), strip=False)
2829
+
2830
+ traceback.print_exc()
2831
+
2832
+ self.reflected_message = str(err)
2833
+ return edited
2834
+
2835
+ for path in edited:
2836
+ if self.dry_run:
2837
+ self.io.tool_output(f"Did not apply edit to {path} (--dry-run)")
2838
+ else:
2839
+ self.io.tool_output(f"Applied edit to {path}")
2840
+
2841
+ return edited
2842
+
2843
+ def parse_partial_args(self):
2844
+ # dump(self.partial_response_function_call)
2845
+
2846
+ data = self.partial_response_function_call.get("arguments")
2847
+ if not data:
2848
+ return
2849
+
2850
+ try:
2851
+ return json.loads(data)
2852
+ except JSONDecodeError:
2853
+ pass
2854
+
2855
+ try:
2856
+ return json.loads(data + "]}")
2857
+ except JSONDecodeError:
2858
+ pass
2859
+
2860
+ try:
2861
+ return json.loads(data + "}]}")
2862
+ except JSONDecodeError:
2863
+ pass
2864
+
2865
+ try:
2866
+ return json.loads(data + '"}]}')
2867
+ except JSONDecodeError:
2868
+ pass
2869
+
2870
+ def _find_occurrences(self, content, pattern, near_context=None):
2871
+ """Find all occurrences of pattern, optionally filtered by near_context."""
2872
+ occurrences = []
2873
+ start = 0
2874
+ while True:
2875
+ index = content.find(pattern, start)
2876
+ if index == -1:
2877
+ break
2878
+
2879
+ if near_context:
2880
+ # Check if near_context is within a window around the match
2881
+ window_start = max(0, index - 200)
2882
+ window_end = min(len(content), index + len(pattern) + 200)
2883
+ window = content[window_start:window_end]
2884
+ if near_context in window:
2885
+ occurrences.append(index)
2886
+ else:
2887
+ occurrences.append(index)
2888
+
2889
+ start = index + 1 # Move past this occurrence's start
2890
+ return occurrences
2891
+
2892
+ # commits...
2893
+
2894
+ def get_context_from_history(self, history):
2895
+ context = ""
2896
+ if history:
2897
+ for msg in history:
2898
+ msg_content = msg.get("content") or ""
2899
+ context += "\n" + msg["role"].upper() + ": " + msg_content + "\n"
2900
+
2901
+ return context
2902
+
2903
+ def auto_commit(self, edited, context=None):
2904
+ if not self.repo or not self.auto_commits or self.dry_run:
2905
+ return
2906
+
2907
+ if not context:
2908
+ context = self.get_context_from_history(self.cur_messages)
2909
+
2910
+ try:
2911
+ res = self.repo.commit(fnames=edited, context=context, aider_edits=True, coder=self)
2912
+ if res:
2913
+ self.show_auto_commit_outcome(res)
2914
+ commit_hash, commit_message = res
2915
+ return self.gpt_prompts.files_content_gpt_edits.format(
2916
+ hash=commit_hash,
2917
+ message=commit_message,
2918
+ )
2919
+
2920
+ return self.gpt_prompts.files_content_gpt_no_edits
2921
+ except ANY_GIT_ERROR as err:
2922
+ self.io.tool_error(f"Unable to commit: {str(err)}")
2923
+ return
2924
+
2925
+ def show_auto_commit_outcome(self, res):
2926
+ commit_hash, commit_message = res
2927
+ self.last_aider_commit_hash = commit_hash
2928
+ self.aider_commit_hashes.add(commit_hash)
2929
+ self.last_aider_commit_message = commit_message
2930
+ if self.show_diffs:
2931
+ self.commands.cmd_diff()
2932
+
2933
+ def show_undo_hint(self):
2934
+ if not self.commit_before_message:
2935
+ return
2936
+ if self.commit_before_message[-1] != self.repo.get_head_commit_sha():
2937
+ self.io.tool_output("You can use /undo to undo and discard each aider commit.")
2938
+
2939
+ def dirty_commit(self):
2940
+ if not self.need_commit_before_edits:
2941
+ return
2942
+ if not self.dirty_commits:
2943
+ return
2944
+ if not self.repo:
2945
+ return
2946
+
2947
+ self.repo.commit(fnames=self.need_commit_before_edits, coder=self)
2948
+
2949
+ # files changed, move cur messages back behind the files messages
2950
+ # self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)
2951
+ return True
2952
+
2953
+ def get_edits(self, mode="update"):
2954
+ return []
2955
+
2956
+ def apply_edits(self, edits):
2957
+ return
2958
+
2959
+ def apply_edits_dry_run(self, edits):
2960
+ return edits
2961
+
2962
+ def run_shell_commands(self):
2963
+ if not self.suggest_shell_commands:
2964
+ return ""
2965
+
2966
+ done = set()
2967
+ group = ConfirmGroup(set(self.shell_commands))
2968
+ accumulated_output = ""
2969
+ for command in self.shell_commands:
2970
+ if command in done:
2971
+ continue
2972
+ done.add(command)
2973
+ output = self.handle_shell_commands(command, group)
2974
+ if output:
2975
+ accumulated_output += output + "\n\n"
2976
+ return accumulated_output
2977
+
2978
+ def handle_shell_commands(self, commands_str, group):
2979
+ commands = commands_str.strip().splitlines()
2980
+ command_count = sum(
2981
+ 1 for cmd in commands if cmd.strip() and not cmd.strip().startswith("#")
2982
+ )
2983
+ prompt = "Run shell command?" if command_count == 1 else "Run shell commands?"
2984
+ if not self.io.confirm_ask(
2985
+ prompt,
2986
+ subject="\n".join(commands),
2987
+ explicit_yes_required=True,
2988
+ group=group,
2989
+ allow_never=True,
2990
+ ):
2991
+ return
2992
+
2993
+ accumulated_output = ""
2994
+ for command in commands:
2995
+ command = command.strip()
2996
+ if not command or command.startswith("#"):
2997
+ continue
2998
+
2999
+ self.io.tool_output()
3000
+ self.io.tool_output(f"Running {command}")
3001
+ # Add the command to input history
3002
+ self.io.add_to_input_history(f"/run {command.strip()}")
3003
+ exit_status, output = run_cmd(command, error_print=self.io.tool_error, cwd=self.root)
3004
+ if output:
3005
+ accumulated_output += f"Output from {command}\n{output}\n"
3006
+
3007
+ if accumulated_output.strip() and self.io.confirm_ask(
3008
+ "Add command output to the chat?", allow_never=True
3009
+ ):
3010
+ num_lines = len(accumulated_output.strip().splitlines())
3011
+ line_plural = "line" if num_lines == 1 else "lines"
3012
+ self.io.tool_output(f"Added {num_lines} {line_plural} of output to the chat.")
3013
+ return accumulated_output