chatmcp-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aider/__init__.py +20 -0
- aider/__main__.py +4 -0
- aider/_version.py +21 -0
- aider/analytics.py +250 -0
- aider/args.py +926 -0
- aider/args_formatter.py +228 -0
- aider/coders/__init__.py +34 -0
- aider/coders/architect_coder.py +48 -0
- aider/coders/architect_prompts.py +40 -0
- aider/coders/ask_coder.py +9 -0
- aider/coders/ask_prompts.py +35 -0
- aider/coders/base_coder.py +2483 -0
- aider/coders/base_prompts.py +60 -0
- aider/coders/chat_chunks.py +64 -0
- aider/coders/context_coder.py +53 -0
- aider/coders/context_prompts.py +75 -0
- aider/coders/editblock_coder.py +657 -0
- aider/coders/editblock_fenced_coder.py +10 -0
- aider/coders/editblock_fenced_prompts.py +143 -0
- aider/coders/editblock_func_coder.py +141 -0
- aider/coders/editblock_func_prompts.py +27 -0
- aider/coders/editblock_prompts.py +174 -0
- aider/coders/editor_diff_fenced_coder.py +9 -0
- aider/coders/editor_diff_fenced_prompts.py +11 -0
- aider/coders/editor_editblock_coder.py +8 -0
- aider/coders/editor_editblock_prompts.py +18 -0
- aider/coders/editor_whole_coder.py +8 -0
- aider/coders/editor_whole_prompts.py +10 -0
- aider/coders/help_coder.py +16 -0
- aider/coders/help_prompts.py +46 -0
- aider/coders/patch_coder.py +706 -0
- aider/coders/patch_prompts.py +161 -0
- aider/coders/search_replace.py +757 -0
- aider/coders/shell.py +37 -0
- aider/coders/single_wholefile_func_coder.py +102 -0
- aider/coders/single_wholefile_func_prompts.py +27 -0
- aider/coders/udiff_coder.py +429 -0
- aider/coders/udiff_prompts.py +115 -0
- aider/coders/udiff_simple.py +14 -0
- aider/coders/udiff_simple_prompts.py +25 -0
- aider/coders/wholefile_coder.py +144 -0
- aider/coders/wholefile_func_coder.py +134 -0
- aider/coders/wholefile_func_prompts.py +27 -0
- aider/coders/wholefile_prompts.py +67 -0
- aider/commands.py +1665 -0
- aider/copypaste.py +72 -0
- aider/deprecated.py +126 -0
- aider/diffs.py +128 -0
- aider/dump.py +29 -0
- aider/editor.py +147 -0
- aider/exceptions.py +107 -0
- aider/format_settings.py +26 -0
- aider/gui.py +545 -0
- aider/help.py +163 -0
- aider/help_pats.py +19 -0
- aider/history.py +143 -0
- aider/io.py +1175 -0
- aider/linter.py +304 -0
- aider/llm.py +47 -0
- aider/main.py +1267 -0
- aider/mdstream.py +243 -0
- aider/models.py +1286 -0
- aider/onboarding.py +428 -0
- aider/openrouter.py +128 -0
- aider/prompts.py +64 -0
- aider/queries/tree-sitter-language-pack/README.md +7 -0
- aider/queries/tree-sitter-language-pack/arduino-tags.scm +5 -0
- aider/queries/tree-sitter-language-pack/c-tags.scm +9 -0
- aider/queries/tree-sitter-language-pack/chatito-tags.scm +16 -0
- aider/queries/tree-sitter-language-pack/commonlisp-tags.scm +122 -0
- aider/queries/tree-sitter-language-pack/cpp-tags.scm +15 -0
- aider/queries/tree-sitter-language-pack/csharp-tags.scm +26 -0
- aider/queries/tree-sitter-language-pack/d-tags.scm +26 -0
- aider/queries/tree-sitter-language-pack/dart-tags.scm +92 -0
- aider/queries/tree-sitter-language-pack/elisp-tags.scm +5 -0
- aider/queries/tree-sitter-language-pack/elixir-tags.scm +54 -0
- aider/queries/tree-sitter-language-pack/elm-tags.scm +19 -0
- aider/queries/tree-sitter-language-pack/gleam-tags.scm +41 -0
- aider/queries/tree-sitter-language-pack/go-tags.scm +42 -0
- aider/queries/tree-sitter-language-pack/java-tags.scm +20 -0
- aider/queries/tree-sitter-language-pack/javascript-tags.scm +88 -0
- aider/queries/tree-sitter-language-pack/lua-tags.scm +34 -0
- aider/queries/tree-sitter-language-pack/ocaml-tags.scm +115 -0
- aider/queries/tree-sitter-language-pack/ocaml_interface-tags.scm +98 -0
- aider/queries/tree-sitter-language-pack/pony-tags.scm +39 -0
- aider/queries/tree-sitter-language-pack/properties-tags.scm +5 -0
- aider/queries/tree-sitter-language-pack/python-tags.scm +14 -0
- aider/queries/tree-sitter-language-pack/r-tags.scm +21 -0
- aider/queries/tree-sitter-language-pack/racket-tags.scm +12 -0
- aider/queries/tree-sitter-language-pack/ruby-tags.scm +64 -0
- aider/queries/tree-sitter-language-pack/rust-tags.scm +60 -0
- aider/queries/tree-sitter-language-pack/solidity-tags.scm +43 -0
- aider/queries/tree-sitter-language-pack/swift-tags.scm +51 -0
- aider/queries/tree-sitter-language-pack/udev-tags.scm +20 -0
- aider/queries/tree-sitter-languages/README.md +23 -0
- aider/queries/tree-sitter-languages/c-tags.scm +9 -0
- aider/queries/tree-sitter-languages/c_sharp-tags.scm +46 -0
- aider/queries/tree-sitter-languages/cpp-tags.scm +15 -0
- aider/queries/tree-sitter-languages/dart-tags.scm +91 -0
- aider/queries/tree-sitter-languages/elisp-tags.scm +8 -0
- aider/queries/tree-sitter-languages/elixir-tags.scm +54 -0
- aider/queries/tree-sitter-languages/elm-tags.scm +19 -0
- aider/queries/tree-sitter-languages/go-tags.scm +30 -0
- aider/queries/tree-sitter-languages/hcl-tags.scm +77 -0
- aider/queries/tree-sitter-languages/java-tags.scm +20 -0
- aider/queries/tree-sitter-languages/javascript-tags.scm +88 -0
- aider/queries/tree-sitter-languages/kotlin-tags.scm +27 -0
- aider/queries/tree-sitter-languages/ocaml-tags.scm +115 -0
- aider/queries/tree-sitter-languages/ocaml_interface-tags.scm +98 -0
- aider/queries/tree-sitter-languages/php-tags.scm +26 -0
- aider/queries/tree-sitter-languages/python-tags.scm +12 -0
- aider/queries/tree-sitter-languages/ql-tags.scm +26 -0
- aider/queries/tree-sitter-languages/ruby-tags.scm +64 -0
- aider/queries/tree-sitter-languages/rust-tags.scm +60 -0
- aider/queries/tree-sitter-languages/scala-tags.scm +65 -0
- aider/queries/tree-sitter-languages/typescript-tags.scm +41 -0
- aider/reasoning_tags.py +82 -0
- aider/repo.py +623 -0
- aider/repomap.py +847 -0
- aider/report.py +200 -0
- aider/resources/__init__.py +3 -0
- aider/resources/model-metadata.json +468 -0
- aider/resources/model-settings.yml +1767 -0
- aider/run_cmd.py +132 -0
- aider/scrape.py +284 -0
- aider/sendchat.py +61 -0
- aider/special.py +203 -0
- aider/urls.py +17 -0
- aider/utils.py +338 -0
- aider/versioncheck.py +113 -0
- aider/voice.py +187 -0
- aider/waiting.py +221 -0
- aider/watch.py +318 -0
- aider/watch_prompts.py +12 -0
- aider/website/Gemfile +8 -0
- aider/website/_includes/blame.md +162 -0
- aider/website/_includes/get-started.md +22 -0
- aider/website/_includes/help-tip.md +5 -0
- aider/website/_includes/help.md +24 -0
- aider/website/_includes/install.md +5 -0
- aider/website/_includes/keys.md +4 -0
- aider/website/_includes/model-warnings.md +67 -0
- aider/website/_includes/multi-line.md +22 -0
- aider/website/_includes/python-m-aider.md +5 -0
- aider/website/_includes/recording.css +228 -0
- aider/website/_includes/recording.md +34 -0
- aider/website/_includes/replit-pipx.md +9 -0
- aider/website/_includes/works-best.md +1 -0
- aider/website/_sass/custom/custom.scss +103 -0
- aider/website/docs/config/adv-model-settings.md +1881 -0
- aider/website/docs/config/aider_conf.md +527 -0
- aider/website/docs/config/api-keys.md +90 -0
- aider/website/docs/config/dotenv.md +478 -0
- aider/website/docs/config/editor.md +127 -0
- aider/website/docs/config/model-aliases.md +103 -0
- aider/website/docs/config/options.md +843 -0
- aider/website/docs/config/reasoning.md +209 -0
- aider/website/docs/config.md +44 -0
- aider/website/docs/faq.md +378 -0
- aider/website/docs/git.md +76 -0
- aider/website/docs/index.md +47 -0
- aider/website/docs/install/codespaces.md +39 -0
- aider/website/docs/install/docker.md +57 -0
- aider/website/docs/install/optional.md +100 -0
- aider/website/docs/install/replit.md +8 -0
- aider/website/docs/install.md +115 -0
- aider/website/docs/languages.md +264 -0
- aider/website/docs/legal/contributor-agreement.md +111 -0
- aider/website/docs/legal/privacy.md +104 -0
- aider/website/docs/llms/anthropic.md +77 -0
- aider/website/docs/llms/azure.md +48 -0
- aider/website/docs/llms/bedrock.md +132 -0
- aider/website/docs/llms/cohere.md +34 -0
- aider/website/docs/llms/deepseek.md +32 -0
- aider/website/docs/llms/gemini.md +49 -0
- aider/website/docs/llms/github.md +105 -0
- aider/website/docs/llms/groq.md +36 -0
- aider/website/docs/llms/lm-studio.md +39 -0
- aider/website/docs/llms/ollama.md +75 -0
- aider/website/docs/llms/openai-compat.md +39 -0
- aider/website/docs/llms/openai.md +58 -0
- aider/website/docs/llms/openrouter.md +78 -0
- aider/website/docs/llms/other.md +103 -0
- aider/website/docs/llms/vertex.md +50 -0
- aider/website/docs/llms/warnings.md +10 -0
- aider/website/docs/llms/xai.md +53 -0
- aider/website/docs/llms.md +54 -0
- aider/website/docs/more/analytics.md +122 -0
- aider/website/docs/more/edit-formats.md +116 -0
- aider/website/docs/more/infinite-output.md +137 -0
- aider/website/docs/more-info.md +8 -0
- aider/website/docs/recordings/auto-accept-architect.md +31 -0
- aider/website/docs/recordings/dont-drop-original-read-files.md +35 -0
- aider/website/docs/recordings/index.md +21 -0
- aider/website/docs/recordings/model-accepts-settings.md +69 -0
- aider/website/docs/recordings/tree-sitter-language-pack.md +80 -0
- aider/website/docs/repomap.md +112 -0
- aider/website/docs/scripting.md +100 -0
- aider/website/docs/troubleshooting/aider-not-found.md +24 -0
- aider/website/docs/troubleshooting/edit-errors.md +76 -0
- aider/website/docs/troubleshooting/imports.md +62 -0
- aider/website/docs/troubleshooting/models-and-keys.md +54 -0
- aider/website/docs/troubleshooting/support.md +79 -0
- aider/website/docs/troubleshooting/token-limits.md +96 -0
- aider/website/docs/troubleshooting/warnings.md +12 -0
- aider/website/docs/troubleshooting.md +11 -0
- aider/website/docs/usage/browser.md +57 -0
- aider/website/docs/usage/caching.md +49 -0
- aider/website/docs/usage/commands.md +132 -0
- aider/website/docs/usage/conventions.md +119 -0
- aider/website/docs/usage/copypaste.md +121 -0
- aider/website/docs/usage/images-urls.md +48 -0
- aider/website/docs/usage/lint-test.md +118 -0
- aider/website/docs/usage/modes.md +211 -0
- aider/website/docs/usage/not-code.md +179 -0
- aider/website/docs/usage/notifications.md +87 -0
- aider/website/docs/usage/tips.md +79 -0
- aider/website/docs/usage/tutorials.md +30 -0
- aider/website/docs/usage/voice.md +121 -0
- aider/website/docs/usage/watch.md +294 -0
- aider/website/docs/usage.md +92 -0
- aider/website/share/index.md +101 -0
- chatmcp_cli-0.1.0.dist-info/METADATA +502 -0
- chatmcp_cli-0.1.0.dist-info/RECORD +228 -0
- chatmcp_cli-0.1.0.dist-info/WHEEL +5 -0
- chatmcp_cli-0.1.0.dist-info/entry_points.txt +3 -0
- chatmcp_cli-0.1.0.dist-info/licenses/LICENSE.txt +202 -0
- chatmcp_cli-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2483 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
|
3
|
+
import base64
|
4
|
+
import hashlib
|
5
|
+
import json
|
6
|
+
import locale
|
7
|
+
import math
|
8
|
+
import mimetypes
|
9
|
+
import os
|
10
|
+
import platform
|
11
|
+
import re
|
12
|
+
import sys
|
13
|
+
import threading
|
14
|
+
import time
|
15
|
+
import traceback
|
16
|
+
from collections import defaultdict
|
17
|
+
from datetime import datetime
|
18
|
+
|
19
|
+
# Optional dependency: used to convert locale codes (eg ``en_US``)
|
20
|
+
# into human-readable language names (eg ``English``).
|
21
|
+
try:
|
22
|
+
from babel import Locale # type: ignore
|
23
|
+
except ImportError: # Babel not installed – we will fall back to a small mapping
|
24
|
+
Locale = None
|
25
|
+
from json.decoder import JSONDecodeError
|
26
|
+
from pathlib import Path
|
27
|
+
from typing import List
|
28
|
+
|
29
|
+
from rich.console import Console
|
30
|
+
|
31
|
+
from aider import __version__, models, prompts, urls, utils
|
32
|
+
from aider.analytics import Analytics
|
33
|
+
from aider.commands import Commands
|
34
|
+
from aider.exceptions import LiteLLMExceptions
|
35
|
+
from aider.history import ChatSummary
|
36
|
+
from aider.io import ConfirmGroup, InputOutput
|
37
|
+
from aider.linter import Linter
|
38
|
+
from aider.llm import litellm
|
39
|
+
from aider.models import RETRY_TIMEOUT
|
40
|
+
from aider.reasoning_tags import (
|
41
|
+
REASONING_TAG,
|
42
|
+
format_reasoning_content,
|
43
|
+
remove_reasoning_content,
|
44
|
+
replace_reasoning_tags,
|
45
|
+
)
|
46
|
+
from aider.repo import ANY_GIT_ERROR, GitRepo
|
47
|
+
from aider.repomap import RepoMap
|
48
|
+
from aider.run_cmd import run_cmd
|
49
|
+
from aider.utils import format_content, format_messages, format_tokens, is_image_file
|
50
|
+
from aider.waiting import WaitingSpinner
|
51
|
+
|
52
|
+
from ..dump import dump # noqa: F401
|
53
|
+
from .chat_chunks import ChatChunks
|
54
|
+
|
55
|
+
|
56
|
+
class UnknownEditFormat(ValueError):
|
57
|
+
def __init__(self, edit_format, valid_formats):
|
58
|
+
self.edit_format = edit_format
|
59
|
+
self.valid_formats = valid_formats
|
60
|
+
super().__init__(
|
61
|
+
f"Unknown edit format {edit_format}. Valid formats are: {', '.join(valid_formats)}"
|
62
|
+
)
|
63
|
+
|
64
|
+
|
65
|
+
class MissingAPIKeyError(ValueError):
|
66
|
+
pass
|
67
|
+
|
68
|
+
|
69
|
+
class FinishReasonLength(Exception):
|
70
|
+
pass
|
71
|
+
|
72
|
+
|
73
|
+
def wrap_fence(name):
|
74
|
+
return f"<{name}>", f"</{name}>"
|
75
|
+
|
76
|
+
|
77
|
+
all_fences = [
|
78
|
+
("`" * 3, "`" * 3),
|
79
|
+
("`" * 4, "`" * 4), # LLMs ignore and revert to triple-backtick, causing #2879
|
80
|
+
wrap_fence("source"),
|
81
|
+
wrap_fence("code"),
|
82
|
+
wrap_fence("pre"),
|
83
|
+
wrap_fence("codeblock"),
|
84
|
+
wrap_fence("sourcecode"),
|
85
|
+
]
|
86
|
+
|
87
|
+
|
88
|
+
class Coder:
|
89
|
+
abs_fnames = None
|
90
|
+
abs_read_only_fnames = None
|
91
|
+
repo = None
|
92
|
+
last_aider_commit_hash = None
|
93
|
+
aider_edited_files = None
|
94
|
+
last_asked_for_commit_time = 0
|
95
|
+
repo_map = None
|
96
|
+
functions = None
|
97
|
+
num_exhausted_context_windows = 0
|
98
|
+
num_malformed_responses = 0
|
99
|
+
last_keyboard_interrupt = None
|
100
|
+
num_reflections = 0
|
101
|
+
max_reflections = 3
|
102
|
+
edit_format = None
|
103
|
+
yield_stream = False
|
104
|
+
temperature = None
|
105
|
+
auto_lint = True
|
106
|
+
auto_test = False
|
107
|
+
test_cmd = None
|
108
|
+
lint_outcome = None
|
109
|
+
test_outcome = None
|
110
|
+
multi_response_content = ""
|
111
|
+
partial_response_content = ""
|
112
|
+
commit_before_message = []
|
113
|
+
message_cost = 0.0
|
114
|
+
add_cache_headers = False
|
115
|
+
cache_warming_thread = None
|
116
|
+
num_cache_warming_pings = 0
|
117
|
+
suggest_shell_commands = True
|
118
|
+
detect_urls = True
|
119
|
+
ignore_mentions = None
|
120
|
+
chat_language = None
|
121
|
+
commit_language = None
|
122
|
+
file_watcher = None
|
123
|
+
|
124
|
+
@classmethod
|
125
|
+
def create(
|
126
|
+
self,
|
127
|
+
main_model=None,
|
128
|
+
edit_format=None,
|
129
|
+
io=None,
|
130
|
+
from_coder=None,
|
131
|
+
summarize_from_coder=True,
|
132
|
+
**kwargs,
|
133
|
+
):
|
134
|
+
import aider.coders as coders
|
135
|
+
|
136
|
+
if not main_model:
|
137
|
+
if from_coder:
|
138
|
+
main_model = from_coder.main_model
|
139
|
+
else:
|
140
|
+
main_model = models.Model(models.DEFAULT_MODEL_NAME)
|
141
|
+
|
142
|
+
if edit_format == "code":
|
143
|
+
edit_format = None
|
144
|
+
if edit_format is None:
|
145
|
+
if from_coder:
|
146
|
+
edit_format = from_coder.edit_format
|
147
|
+
else:
|
148
|
+
edit_format = main_model.edit_format
|
149
|
+
|
150
|
+
if not io and from_coder:
|
151
|
+
io = from_coder.io
|
152
|
+
|
153
|
+
if from_coder:
|
154
|
+
use_kwargs = dict(from_coder.original_kwargs) # copy orig kwargs
|
155
|
+
|
156
|
+
# If the edit format changes, we can't leave old ASSISTANT
|
157
|
+
# messages in the chat history. The old edit format will
|
158
|
+
# confused the new LLM. It may try and imitate it, disobeying
|
159
|
+
# the system prompt.
|
160
|
+
done_messages = from_coder.done_messages
|
161
|
+
if edit_format != from_coder.edit_format and done_messages and summarize_from_coder:
|
162
|
+
try:
|
163
|
+
done_messages = from_coder.summarizer.summarize_all(done_messages)
|
164
|
+
except ValueError:
|
165
|
+
# If summarization fails, keep the original messages and warn the user
|
166
|
+
io.tool_warning(
|
167
|
+
"Chat history summarization failed, continuing with full history"
|
168
|
+
)
|
169
|
+
|
170
|
+
# Bring along context from the old Coder
|
171
|
+
update = dict(
|
172
|
+
fnames=list(from_coder.abs_fnames),
|
173
|
+
read_only_fnames=list(from_coder.abs_read_only_fnames), # Copy read-only files
|
174
|
+
done_messages=done_messages,
|
175
|
+
cur_messages=from_coder.cur_messages,
|
176
|
+
aider_commit_hashes=from_coder.aider_commit_hashes,
|
177
|
+
commands=from_coder.commands.clone(),
|
178
|
+
total_cost=from_coder.total_cost,
|
179
|
+
ignore_mentions=from_coder.ignore_mentions,
|
180
|
+
total_tokens_sent=from_coder.total_tokens_sent,
|
181
|
+
total_tokens_received=from_coder.total_tokens_received,
|
182
|
+
file_watcher=from_coder.file_watcher,
|
183
|
+
)
|
184
|
+
use_kwargs.update(update) # override to complete the switch
|
185
|
+
use_kwargs.update(kwargs) # override passed kwargs
|
186
|
+
|
187
|
+
kwargs = use_kwargs
|
188
|
+
from_coder.ok_to_warm_cache = False
|
189
|
+
|
190
|
+
for coder in coders.__all__:
|
191
|
+
if hasattr(coder, "edit_format") and coder.edit_format == edit_format:
|
192
|
+
res = coder(main_model, io, **kwargs)
|
193
|
+
res.original_kwargs = dict(kwargs)
|
194
|
+
return res
|
195
|
+
|
196
|
+
valid_formats = [
|
197
|
+
str(c.edit_format)
|
198
|
+
for c in coders.__all__
|
199
|
+
if hasattr(c, "edit_format") and c.edit_format is not None
|
200
|
+
]
|
201
|
+
raise UnknownEditFormat(edit_format, valid_formats)
|
202
|
+
|
203
|
+
def clone(self, **kwargs):
|
204
|
+
new_coder = Coder.create(from_coder=self, **kwargs)
|
205
|
+
return new_coder
|
206
|
+
|
207
|
+
def get_announcements(self):
|
208
|
+
lines = []
|
209
|
+
lines.append(f"Aider v{__version__}")
|
210
|
+
|
211
|
+
# Model
|
212
|
+
main_model = self.main_model
|
213
|
+
weak_model = main_model.weak_model
|
214
|
+
|
215
|
+
if weak_model is not main_model:
|
216
|
+
prefix = "Main model"
|
217
|
+
else:
|
218
|
+
prefix = "Model"
|
219
|
+
|
220
|
+
output = f"{prefix}: {main_model.name} with {self.edit_format} edit format"
|
221
|
+
|
222
|
+
# Check for thinking token budget
|
223
|
+
thinking_tokens = main_model.get_thinking_tokens()
|
224
|
+
if thinking_tokens:
|
225
|
+
output += f", {thinking_tokens} think tokens"
|
226
|
+
|
227
|
+
# Check for reasoning effort
|
228
|
+
reasoning_effort = main_model.get_reasoning_effort()
|
229
|
+
if reasoning_effort:
|
230
|
+
output += f", reasoning {reasoning_effort}"
|
231
|
+
|
232
|
+
if self.add_cache_headers or main_model.caches_by_default:
|
233
|
+
output += ", prompt cache"
|
234
|
+
if main_model.info.get("supports_assistant_prefill"):
|
235
|
+
output += ", infinite output"
|
236
|
+
|
237
|
+
lines.append(output)
|
238
|
+
|
239
|
+
if self.edit_format == "architect":
|
240
|
+
output = (
|
241
|
+
f"Editor model: {main_model.editor_model.name} with"
|
242
|
+
f" {main_model.editor_edit_format} edit format"
|
243
|
+
)
|
244
|
+
lines.append(output)
|
245
|
+
|
246
|
+
if weak_model is not main_model:
|
247
|
+
output = f"Weak model: {weak_model.name}"
|
248
|
+
lines.append(output)
|
249
|
+
|
250
|
+
# Repo
|
251
|
+
if self.repo:
|
252
|
+
rel_repo_dir = self.repo.get_rel_repo_dir()
|
253
|
+
num_files = len(self.repo.get_tracked_files())
|
254
|
+
|
255
|
+
lines.append(f"Git repo: {rel_repo_dir} with {num_files:,} files")
|
256
|
+
if num_files > 1000:
|
257
|
+
lines.append(
|
258
|
+
"Warning: For large repos, consider using --subtree-only and .aiderignore"
|
259
|
+
)
|
260
|
+
lines.append(f"See: {urls.large_repos}")
|
261
|
+
else:
|
262
|
+
lines.append("Git repo: none")
|
263
|
+
|
264
|
+
# Repo-map
|
265
|
+
if self.repo_map:
|
266
|
+
map_tokens = self.repo_map.max_map_tokens
|
267
|
+
if map_tokens > 0:
|
268
|
+
refresh = self.repo_map.refresh
|
269
|
+
lines.append(f"Repo-map: using {map_tokens} tokens, {refresh} refresh")
|
270
|
+
max_map_tokens = self.main_model.get_repo_map_tokens() * 2
|
271
|
+
if map_tokens > max_map_tokens:
|
272
|
+
lines.append(
|
273
|
+
f"Warning: map-tokens > {max_map_tokens} is not recommended. Too much"
|
274
|
+
" irrelevant code can confuse LLMs."
|
275
|
+
)
|
276
|
+
else:
|
277
|
+
lines.append("Repo-map: disabled because map_tokens == 0")
|
278
|
+
else:
|
279
|
+
lines.append("Repo-map: disabled")
|
280
|
+
|
281
|
+
# Files
|
282
|
+
for fname in self.get_inchat_relative_files():
|
283
|
+
lines.append(f"Added {fname} to the chat.")
|
284
|
+
|
285
|
+
for fname in self.abs_read_only_fnames:
|
286
|
+
rel_fname = self.get_rel_fname(fname)
|
287
|
+
lines.append(f"Added {rel_fname} to the chat (read-only).")
|
288
|
+
|
289
|
+
if self.done_messages:
|
290
|
+
lines.append("Restored previous conversation history.")
|
291
|
+
|
292
|
+
if self.io.multiline_mode:
|
293
|
+
lines.append("Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text")
|
294
|
+
|
295
|
+
return lines
|
296
|
+
|
297
|
+
ok_to_warm_cache = False
|
298
|
+
|
299
|
+
def __init__(
|
300
|
+
self,
|
301
|
+
main_model,
|
302
|
+
io,
|
303
|
+
repo=None,
|
304
|
+
fnames=None,
|
305
|
+
read_only_fnames=None,
|
306
|
+
show_diffs=False,
|
307
|
+
auto_commits=True,
|
308
|
+
dirty_commits=True,
|
309
|
+
dry_run=False,
|
310
|
+
map_tokens=1024,
|
311
|
+
verbose=False,
|
312
|
+
stream=True,
|
313
|
+
use_git=True,
|
314
|
+
cur_messages=None,
|
315
|
+
done_messages=None,
|
316
|
+
restore_chat_history=False,
|
317
|
+
auto_lint=True,
|
318
|
+
auto_test=False,
|
319
|
+
lint_cmds=None,
|
320
|
+
test_cmd=None,
|
321
|
+
aider_commit_hashes=None,
|
322
|
+
map_mul_no_files=8,
|
323
|
+
commands=None,
|
324
|
+
summarizer=None,
|
325
|
+
total_cost=0.0,
|
326
|
+
analytics=None,
|
327
|
+
map_refresh="auto",
|
328
|
+
cache_prompts=False,
|
329
|
+
num_cache_warming_pings=0,
|
330
|
+
suggest_shell_commands=True,
|
331
|
+
chat_language=None,
|
332
|
+
commit_language=None,
|
333
|
+
detect_urls=True,
|
334
|
+
ignore_mentions=None,
|
335
|
+
total_tokens_sent=0,
|
336
|
+
total_tokens_received=0,
|
337
|
+
file_watcher=None,
|
338
|
+
auto_copy_context=False,
|
339
|
+
auto_accept_architect=True,
|
340
|
+
):
|
341
|
+
# Fill in a dummy Analytics if needed, but it is never .enable()'d
|
342
|
+
self.analytics = analytics if analytics is not None else Analytics()
|
343
|
+
|
344
|
+
self.event = self.analytics.event
|
345
|
+
self.chat_language = chat_language
|
346
|
+
self.commit_language = commit_language
|
347
|
+
self.commit_before_message = []
|
348
|
+
self.aider_commit_hashes = set()
|
349
|
+
self.rejected_urls = set()
|
350
|
+
self.abs_root_path_cache = {}
|
351
|
+
|
352
|
+
self.auto_copy_context = auto_copy_context
|
353
|
+
self.auto_accept_architect = auto_accept_architect
|
354
|
+
|
355
|
+
self.ignore_mentions = ignore_mentions
|
356
|
+
if not self.ignore_mentions:
|
357
|
+
self.ignore_mentions = set()
|
358
|
+
|
359
|
+
self.file_watcher = file_watcher
|
360
|
+
if self.file_watcher:
|
361
|
+
self.file_watcher.coder = self
|
362
|
+
|
363
|
+
self.suggest_shell_commands = suggest_shell_commands
|
364
|
+
self.detect_urls = detect_urls
|
365
|
+
|
366
|
+
self.num_cache_warming_pings = num_cache_warming_pings
|
367
|
+
|
368
|
+
if not fnames:
|
369
|
+
fnames = []
|
370
|
+
|
371
|
+
if io is None:
|
372
|
+
io = InputOutput()
|
373
|
+
|
374
|
+
if aider_commit_hashes:
|
375
|
+
self.aider_commit_hashes = aider_commit_hashes
|
376
|
+
else:
|
377
|
+
self.aider_commit_hashes = set()
|
378
|
+
|
379
|
+
self.chat_completion_call_hashes = []
|
380
|
+
self.chat_completion_response_hashes = []
|
381
|
+
self.need_commit_before_edits = set()
|
382
|
+
|
383
|
+
self.total_cost = total_cost
|
384
|
+
self.total_tokens_sent = total_tokens_sent
|
385
|
+
self.total_tokens_received = total_tokens_received
|
386
|
+
self.message_tokens_sent = 0
|
387
|
+
self.message_tokens_received = 0
|
388
|
+
|
389
|
+
self.verbose = verbose
|
390
|
+
self.abs_fnames = set()
|
391
|
+
self.abs_read_only_fnames = set()
|
392
|
+
|
393
|
+
if cur_messages:
|
394
|
+
self.cur_messages = cur_messages
|
395
|
+
else:
|
396
|
+
self.cur_messages = []
|
397
|
+
|
398
|
+
if done_messages:
|
399
|
+
self.done_messages = done_messages
|
400
|
+
else:
|
401
|
+
self.done_messages = []
|
402
|
+
|
403
|
+
self.io = io
|
404
|
+
|
405
|
+
self.shell_commands = []
|
406
|
+
|
407
|
+
if not auto_commits:
|
408
|
+
dirty_commits = False
|
409
|
+
|
410
|
+
self.auto_commits = auto_commits
|
411
|
+
self.dirty_commits = dirty_commits
|
412
|
+
|
413
|
+
self.dry_run = dry_run
|
414
|
+
self.pretty = self.io.pretty
|
415
|
+
|
416
|
+
self.main_model = main_model
|
417
|
+
# Set the reasoning tag name based on model settings or default
|
418
|
+
self.reasoning_tag_name = (
|
419
|
+
self.main_model.reasoning_tag if self.main_model.reasoning_tag else REASONING_TAG
|
420
|
+
)
|
421
|
+
|
422
|
+
self.stream = stream and main_model.streaming
|
423
|
+
|
424
|
+
if cache_prompts and self.main_model.cache_control:
|
425
|
+
self.add_cache_headers = True
|
426
|
+
|
427
|
+
self.show_diffs = show_diffs
|
428
|
+
|
429
|
+
self.commands = commands or Commands(self.io, self)
|
430
|
+
self.commands.coder = self
|
431
|
+
|
432
|
+
self.repo = repo
|
433
|
+
if use_git and self.repo is None:
|
434
|
+
try:
|
435
|
+
self.repo = GitRepo(
|
436
|
+
self.io,
|
437
|
+
fnames,
|
438
|
+
None,
|
439
|
+
models=main_model.commit_message_models(),
|
440
|
+
)
|
441
|
+
except FileNotFoundError:
|
442
|
+
pass
|
443
|
+
|
444
|
+
if self.repo:
|
445
|
+
self.root = self.repo.root
|
446
|
+
|
447
|
+
for fname in fnames:
|
448
|
+
fname = Path(fname)
|
449
|
+
if self.repo and self.repo.git_ignored_file(fname):
|
450
|
+
self.io.tool_warning(f"Skipping {fname} that matches gitignore spec.")
|
451
|
+
continue
|
452
|
+
|
453
|
+
if self.repo and self.repo.ignored_file(fname):
|
454
|
+
self.io.tool_warning(f"Skipping {fname} that matches aiderignore spec.")
|
455
|
+
continue
|
456
|
+
|
457
|
+
if not fname.exists():
|
458
|
+
if utils.touch_file(fname):
|
459
|
+
self.io.tool_output(f"Creating empty file {fname}")
|
460
|
+
else:
|
461
|
+
self.io.tool_warning(f"Can not create {fname}, skipping.")
|
462
|
+
continue
|
463
|
+
|
464
|
+
if not fname.is_file():
|
465
|
+
self.io.tool_warning(f"Skipping {fname} that is not a normal file.")
|
466
|
+
continue
|
467
|
+
|
468
|
+
fname = str(fname.resolve())
|
469
|
+
|
470
|
+
self.abs_fnames.add(fname)
|
471
|
+
self.check_added_files()
|
472
|
+
|
473
|
+
if not self.repo:
|
474
|
+
self.root = utils.find_common_root(self.abs_fnames)
|
475
|
+
|
476
|
+
if read_only_fnames:
|
477
|
+
self.abs_read_only_fnames = set()
|
478
|
+
for fname in read_only_fnames:
|
479
|
+
abs_fname = self.abs_root_path(fname)
|
480
|
+
if os.path.exists(abs_fname):
|
481
|
+
self.abs_read_only_fnames.add(abs_fname)
|
482
|
+
else:
|
483
|
+
self.io.tool_warning(f"Error: Read-only file {fname} does not exist. Skipping.")
|
484
|
+
|
485
|
+
if map_tokens is None:
|
486
|
+
use_repo_map = main_model.use_repo_map
|
487
|
+
map_tokens = 1024
|
488
|
+
else:
|
489
|
+
use_repo_map = map_tokens > 0
|
490
|
+
|
491
|
+
max_inp_tokens = self.main_model.info.get("max_input_tokens") or 0
|
492
|
+
|
493
|
+
has_map_prompt = hasattr(self, "gpt_prompts") and self.gpt_prompts.repo_content_prefix
|
494
|
+
|
495
|
+
if use_repo_map and self.repo and has_map_prompt:
|
496
|
+
self.repo_map = RepoMap(
|
497
|
+
map_tokens,
|
498
|
+
self.root,
|
499
|
+
self.main_model,
|
500
|
+
io,
|
501
|
+
self.gpt_prompts.repo_content_prefix,
|
502
|
+
self.verbose,
|
503
|
+
max_inp_tokens,
|
504
|
+
map_mul_no_files=map_mul_no_files,
|
505
|
+
refresh=map_refresh,
|
506
|
+
)
|
507
|
+
|
508
|
+
self.summarizer = summarizer or ChatSummary(
|
509
|
+
[self.main_model.weak_model, self.main_model],
|
510
|
+
self.main_model.max_chat_history_tokens,
|
511
|
+
)
|
512
|
+
|
513
|
+
self.summarizer_thread = None
|
514
|
+
self.summarized_done_messages = []
|
515
|
+
self.summarizing_messages = None
|
516
|
+
|
517
|
+
if not self.done_messages and restore_chat_history:
|
518
|
+
history_md = self.io.read_text(self.io.chat_history_file)
|
519
|
+
if history_md:
|
520
|
+
self.done_messages = utils.split_chat_history_markdown(history_md)
|
521
|
+
self.summarize_start()
|
522
|
+
|
523
|
+
# Linting and testing
|
524
|
+
self.linter = Linter(root=self.root, encoding=io.encoding)
|
525
|
+
self.auto_lint = auto_lint
|
526
|
+
self.setup_lint_cmds(lint_cmds)
|
527
|
+
self.lint_cmds = lint_cmds
|
528
|
+
self.auto_test = auto_test
|
529
|
+
self.test_cmd = test_cmd
|
530
|
+
|
531
|
+
# validate the functions jsonschema
|
532
|
+
if self.functions:
|
533
|
+
from jsonschema import Draft7Validator
|
534
|
+
|
535
|
+
for function in self.functions:
|
536
|
+
Draft7Validator.check_schema(function)
|
537
|
+
|
538
|
+
if self.verbose:
|
539
|
+
self.io.tool_output("JSON Schema:")
|
540
|
+
self.io.tool_output(json.dumps(self.functions, indent=4))
|
541
|
+
|
542
|
+
def setup_lint_cmds(self, lint_cmds):
|
543
|
+
if not lint_cmds:
|
544
|
+
return
|
545
|
+
for lang, cmd in lint_cmds.items():
|
546
|
+
self.linter.set_linter(lang, cmd)
|
547
|
+
|
548
|
+
def show_announcements(self):
|
549
|
+
bold = True
|
550
|
+
for line in self.get_announcements():
|
551
|
+
self.io.tool_output(line, bold=bold)
|
552
|
+
bold = False
|
553
|
+
|
554
|
+
def add_rel_fname(self, rel_fname):
|
555
|
+
self.abs_fnames.add(self.abs_root_path(rel_fname))
|
556
|
+
self.check_added_files()
|
557
|
+
|
558
|
+
def drop_rel_fname(self, fname):
|
559
|
+
abs_fname = self.abs_root_path(fname)
|
560
|
+
if abs_fname in self.abs_fnames:
|
561
|
+
self.abs_fnames.remove(abs_fname)
|
562
|
+
return True
|
563
|
+
|
564
|
+
def abs_root_path(self, path):
|
565
|
+
key = path
|
566
|
+
if key in self.abs_root_path_cache:
|
567
|
+
return self.abs_root_path_cache[key]
|
568
|
+
|
569
|
+
res = Path(self.root) / path
|
570
|
+
res = utils.safe_abs_path(res)
|
571
|
+
self.abs_root_path_cache[key] = res
|
572
|
+
return res
|
573
|
+
|
574
|
+
fences = all_fences
|
575
|
+
fence = fences[0]
|
576
|
+
|
577
|
+
def show_pretty(self):
|
578
|
+
if not self.pretty:
|
579
|
+
return False
|
580
|
+
|
581
|
+
# only show pretty output if fences are the normal triple-backtick
|
582
|
+
if self.fence[0][0] != "`":
|
583
|
+
return False
|
584
|
+
|
585
|
+
return True
|
586
|
+
|
587
|
+
def _stop_waiting_spinner(self):
|
588
|
+
"""Stop and clear the waiting spinner if it is running."""
|
589
|
+
spinner = getattr(self, "waiting_spinner", None)
|
590
|
+
if spinner:
|
591
|
+
try:
|
592
|
+
spinner.stop()
|
593
|
+
finally:
|
594
|
+
self.waiting_spinner = None
|
595
|
+
|
596
|
+
def get_abs_fnames_content(self):
|
597
|
+
for fname in list(self.abs_fnames):
|
598
|
+
content = self.io.read_text(fname)
|
599
|
+
|
600
|
+
if content is None:
|
601
|
+
relative_fname = self.get_rel_fname(fname)
|
602
|
+
self.io.tool_warning(f"Dropping {relative_fname} from the chat.")
|
603
|
+
self.abs_fnames.remove(fname)
|
604
|
+
else:
|
605
|
+
yield fname, content
|
606
|
+
|
607
|
+
def choose_fence(self):
|
608
|
+
all_content = ""
|
609
|
+
for _fname, content in self.get_abs_fnames_content():
|
610
|
+
all_content += content + "\n"
|
611
|
+
for _fname in self.abs_read_only_fnames:
|
612
|
+
content = self.io.read_text(_fname)
|
613
|
+
if content is not None:
|
614
|
+
all_content += content + "\n"
|
615
|
+
|
616
|
+
lines = all_content.splitlines()
|
617
|
+
good = False
|
618
|
+
for fence_open, fence_close in self.fences:
|
619
|
+
if any(line.startswith(fence_open) or line.startswith(fence_close) for line in lines):
|
620
|
+
continue
|
621
|
+
good = True
|
622
|
+
break
|
623
|
+
|
624
|
+
if good:
|
625
|
+
self.fence = (fence_open, fence_close)
|
626
|
+
else:
|
627
|
+
self.fence = self.fences[0]
|
628
|
+
self.io.tool_warning(
|
629
|
+
"Unable to find a fencing strategy! Falling back to:"
|
630
|
+
f" {self.fence[0]}...{self.fence[1]}"
|
631
|
+
)
|
632
|
+
|
633
|
+
return
|
634
|
+
|
635
|
+
def get_files_content(self, fnames=None):
|
636
|
+
if not fnames:
|
637
|
+
fnames = self.abs_fnames
|
638
|
+
|
639
|
+
prompt = ""
|
640
|
+
for fname, content in self.get_abs_fnames_content():
|
641
|
+
if not is_image_file(fname):
|
642
|
+
relative_fname = self.get_rel_fname(fname)
|
643
|
+
prompt += "\n"
|
644
|
+
prompt += relative_fname
|
645
|
+
prompt += f"\n{self.fence[0]}\n"
|
646
|
+
|
647
|
+
prompt += content
|
648
|
+
|
649
|
+
# lines = content.splitlines(keepends=True)
|
650
|
+
# lines = [f"{i+1:03}:{line}" for i, line in enumerate(lines)]
|
651
|
+
# prompt += "".join(lines)
|
652
|
+
|
653
|
+
prompt += f"{self.fence[1]}\n"
|
654
|
+
|
655
|
+
return prompt
|
656
|
+
|
657
|
+
def get_read_only_files_content(self):
|
658
|
+
prompt = ""
|
659
|
+
for fname in self.abs_read_only_fnames:
|
660
|
+
content = self.io.read_text(fname)
|
661
|
+
if content is not None and not is_image_file(fname):
|
662
|
+
relative_fname = self.get_rel_fname(fname)
|
663
|
+
prompt += "\n"
|
664
|
+
prompt += relative_fname
|
665
|
+
prompt += f"\n{self.fence[0]}\n"
|
666
|
+
prompt += content
|
667
|
+
prompt += f"{self.fence[1]}\n"
|
668
|
+
return prompt
|
669
|
+
|
670
|
+
def get_cur_message_text(self):
|
671
|
+
text = ""
|
672
|
+
for msg in self.cur_messages:
|
673
|
+
text += msg["content"] + "\n"
|
674
|
+
return text
|
675
|
+
|
676
|
+
def get_ident_mentions(self, text):
|
677
|
+
# Split the string on any character that is not alphanumeric
|
678
|
+
# \W+ matches one or more non-word characters (equivalent to [^a-zA-Z0-9_]+)
|
679
|
+
words = set(re.split(r"\W+", text))
|
680
|
+
return words
|
681
|
+
|
682
|
+
def get_ident_filename_matches(self, idents):
|
683
|
+
all_fnames = defaultdict(set)
|
684
|
+
for fname in self.get_all_relative_files():
|
685
|
+
# Skip empty paths or just '.'
|
686
|
+
if not fname or fname == ".":
|
687
|
+
continue
|
688
|
+
|
689
|
+
try:
|
690
|
+
# Handle dotfiles properly
|
691
|
+
path = Path(fname)
|
692
|
+
base = path.stem.lower() # Use stem instead of with_suffix("").name
|
693
|
+
if len(base) >= 5:
|
694
|
+
all_fnames[base].add(fname)
|
695
|
+
except ValueError:
|
696
|
+
# Skip paths that can't be processed
|
697
|
+
continue
|
698
|
+
|
699
|
+
matches = set()
|
700
|
+
for ident in idents:
|
701
|
+
if len(ident) < 5:
|
702
|
+
continue
|
703
|
+
matches.update(all_fnames[ident.lower()])
|
704
|
+
|
705
|
+
return matches
|
706
|
+
|
707
|
+
def get_repo_map(self, force_refresh=False):
|
708
|
+
if not self.repo_map:
|
709
|
+
return
|
710
|
+
|
711
|
+
cur_msg_text = self.get_cur_message_text()
|
712
|
+
mentioned_fnames = self.get_file_mentions(cur_msg_text)
|
713
|
+
mentioned_idents = self.get_ident_mentions(cur_msg_text)
|
714
|
+
|
715
|
+
mentioned_fnames.update(self.get_ident_filename_matches(mentioned_idents))
|
716
|
+
|
717
|
+
all_abs_files = set(self.get_all_abs_files())
|
718
|
+
repo_abs_read_only_fnames = set(self.abs_read_only_fnames) & all_abs_files
|
719
|
+
chat_files = set(self.abs_fnames) | repo_abs_read_only_fnames
|
720
|
+
other_files = all_abs_files - chat_files
|
721
|
+
|
722
|
+
repo_content = self.repo_map.get_repo_map(
|
723
|
+
chat_files,
|
724
|
+
other_files,
|
725
|
+
mentioned_fnames=mentioned_fnames,
|
726
|
+
mentioned_idents=mentioned_idents,
|
727
|
+
force_refresh=force_refresh,
|
728
|
+
)
|
729
|
+
|
730
|
+
# fall back to global repo map if files in chat are disjoint from rest of repo
|
731
|
+
if not repo_content:
|
732
|
+
repo_content = self.repo_map.get_repo_map(
|
733
|
+
set(),
|
734
|
+
all_abs_files,
|
735
|
+
mentioned_fnames=mentioned_fnames,
|
736
|
+
mentioned_idents=mentioned_idents,
|
737
|
+
)
|
738
|
+
|
739
|
+
# fall back to completely unhinted repo
|
740
|
+
if not repo_content:
|
741
|
+
repo_content = self.repo_map.get_repo_map(
|
742
|
+
set(),
|
743
|
+
all_abs_files,
|
744
|
+
)
|
745
|
+
|
746
|
+
return repo_content
|
747
|
+
|
748
|
+
def get_repo_messages(self):
|
749
|
+
repo_messages = []
|
750
|
+
repo_content = self.get_repo_map()
|
751
|
+
if repo_content:
|
752
|
+
repo_messages += [
|
753
|
+
dict(role="user", content=repo_content),
|
754
|
+
dict(
|
755
|
+
role="assistant",
|
756
|
+
content="Ok, I won't try and edit those files without asking first.",
|
757
|
+
),
|
758
|
+
]
|
759
|
+
return repo_messages
|
760
|
+
|
761
|
+
def get_readonly_files_messages(self):
|
762
|
+
readonly_messages = []
|
763
|
+
|
764
|
+
# Handle non-image files
|
765
|
+
read_only_content = self.get_read_only_files_content()
|
766
|
+
if read_only_content:
|
767
|
+
readonly_messages += [
|
768
|
+
dict(
|
769
|
+
role="user", content=self.gpt_prompts.read_only_files_prefix + read_only_content
|
770
|
+
),
|
771
|
+
dict(
|
772
|
+
role="assistant",
|
773
|
+
content="Ok, I will use these files as references.",
|
774
|
+
),
|
775
|
+
]
|
776
|
+
|
777
|
+
# Handle image files
|
778
|
+
images_message = self.get_images_message(self.abs_read_only_fnames)
|
779
|
+
if images_message is not None:
|
780
|
+
readonly_messages += [
|
781
|
+
images_message,
|
782
|
+
dict(role="assistant", content="Ok, I will use these images as references."),
|
783
|
+
]
|
784
|
+
|
785
|
+
return readonly_messages
|
786
|
+
|
787
|
+
def get_chat_files_messages(self):
|
788
|
+
chat_files_messages = []
|
789
|
+
if self.abs_fnames:
|
790
|
+
files_content = self.gpt_prompts.files_content_prefix
|
791
|
+
files_content += self.get_files_content()
|
792
|
+
files_reply = self.gpt_prompts.files_content_assistant_reply
|
793
|
+
elif self.get_repo_map() and self.gpt_prompts.files_no_full_files_with_repo_map:
|
794
|
+
files_content = self.gpt_prompts.files_no_full_files_with_repo_map
|
795
|
+
files_reply = self.gpt_prompts.files_no_full_files_with_repo_map_reply
|
796
|
+
else:
|
797
|
+
files_content = self.gpt_prompts.files_no_full_files
|
798
|
+
files_reply = "Ok."
|
799
|
+
|
800
|
+
if files_content:
|
801
|
+
chat_files_messages += [
|
802
|
+
dict(role="user", content=files_content),
|
803
|
+
dict(role="assistant", content=files_reply),
|
804
|
+
]
|
805
|
+
|
806
|
+
images_message = self.get_images_message(self.abs_fnames)
|
807
|
+
if images_message is not None:
|
808
|
+
chat_files_messages += [
|
809
|
+
images_message,
|
810
|
+
dict(role="assistant", content="Ok."),
|
811
|
+
]
|
812
|
+
|
813
|
+
return chat_files_messages
|
814
|
+
|
815
|
+
def get_images_message(self, fnames):
|
816
|
+
supports_images = self.main_model.info.get("supports_vision")
|
817
|
+
supports_pdfs = self.main_model.info.get("supports_pdf_input") or self.main_model.info.get(
|
818
|
+
"max_pdf_size_mb"
|
819
|
+
)
|
820
|
+
|
821
|
+
# https://github.com/BerriAI/litellm/pull/6928
|
822
|
+
supports_pdfs = supports_pdfs or "claude-3-5-sonnet-20241022" in self.main_model.name
|
823
|
+
|
824
|
+
if not (supports_images or supports_pdfs):
|
825
|
+
return None
|
826
|
+
|
827
|
+
image_messages = []
|
828
|
+
for fname in fnames:
|
829
|
+
if not is_image_file(fname):
|
830
|
+
continue
|
831
|
+
|
832
|
+
mime_type, _ = mimetypes.guess_type(fname)
|
833
|
+
if not mime_type:
|
834
|
+
continue
|
835
|
+
|
836
|
+
with open(fname, "rb") as image_file:
|
837
|
+
encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
|
838
|
+
image_url = f"data:{mime_type};base64,{encoded_string}"
|
839
|
+
rel_fname = self.get_rel_fname(fname)
|
840
|
+
|
841
|
+
if mime_type.startswith("image/") and supports_images:
|
842
|
+
image_messages += [
|
843
|
+
{"type": "text", "text": f"Image file: {rel_fname}"},
|
844
|
+
{"type": "image_url", "image_url": {"url": image_url, "detail": "high"}},
|
845
|
+
]
|
846
|
+
elif mime_type == "application/pdf" and supports_pdfs:
|
847
|
+
image_messages += [
|
848
|
+
{"type": "text", "text": f"PDF file: {rel_fname}"},
|
849
|
+
{"type": "image_url", "image_url": image_url},
|
850
|
+
]
|
851
|
+
|
852
|
+
if not image_messages:
|
853
|
+
return None
|
854
|
+
|
855
|
+
return {"role": "user", "content": image_messages}
|
856
|
+
|
857
|
+
def run_stream(self, user_message):
|
858
|
+
self.io.user_input(user_message)
|
859
|
+
self.init_before_message()
|
860
|
+
yield from self.send_message(user_message)
|
861
|
+
|
862
|
+
def init_before_message(self):
|
863
|
+
self.aider_edited_files = set()
|
864
|
+
self.reflected_message = None
|
865
|
+
self.num_reflections = 0
|
866
|
+
self.lint_outcome = None
|
867
|
+
self.test_outcome = None
|
868
|
+
self.shell_commands = []
|
869
|
+
self.message_cost = 0
|
870
|
+
|
871
|
+
if self.repo:
|
872
|
+
self.commit_before_message.append(self.repo.get_head_commit_sha())
|
873
|
+
|
874
|
+
def run(self, with_message=None, preproc=True):
|
875
|
+
try:
|
876
|
+
if with_message:
|
877
|
+
self.io.user_input(with_message)
|
878
|
+
self.run_one(with_message, preproc)
|
879
|
+
return self.partial_response_content
|
880
|
+
while True:
|
881
|
+
try:
|
882
|
+
if not self.io.placeholder:
|
883
|
+
self.copy_context()
|
884
|
+
user_message = self.get_input()
|
885
|
+
self.run_one(user_message, preproc)
|
886
|
+
self.show_undo_hint()
|
887
|
+
except KeyboardInterrupt:
|
888
|
+
self.keyboard_interrupt()
|
889
|
+
except EOFError:
|
890
|
+
return
|
891
|
+
|
892
|
+
def copy_context(self):
|
893
|
+
if self.auto_copy_context:
|
894
|
+
self.commands.cmd_copy_context()
|
895
|
+
|
896
|
+
def get_input(self):
|
897
|
+
inchat_files = self.get_inchat_relative_files()
|
898
|
+
read_only_files = [self.get_rel_fname(fname) for fname in self.abs_read_only_fnames]
|
899
|
+
all_files = sorted(set(inchat_files + read_only_files))
|
900
|
+
edit_format = "" if self.edit_format == self.main_model.edit_format else self.edit_format
|
901
|
+
return self.io.get_input(
|
902
|
+
self.root,
|
903
|
+
all_files,
|
904
|
+
self.get_addable_relative_files(),
|
905
|
+
self.commands,
|
906
|
+
self.abs_read_only_fnames,
|
907
|
+
edit_format=edit_format,
|
908
|
+
)
|
909
|
+
|
910
|
+
def preproc_user_input(self, inp):
|
911
|
+
if not inp:
|
912
|
+
return
|
913
|
+
|
914
|
+
if self.commands.is_command(inp):
|
915
|
+
return self.commands.run(inp)
|
916
|
+
|
917
|
+
self.check_for_file_mentions(inp)
|
918
|
+
inp = self.check_for_urls(inp)
|
919
|
+
|
920
|
+
return inp
|
921
|
+
|
922
|
+
def run_one(self, user_message, preproc):
|
923
|
+
self.init_before_message()
|
924
|
+
|
925
|
+
if preproc:
|
926
|
+
message = self.preproc_user_input(user_message)
|
927
|
+
else:
|
928
|
+
message = user_message
|
929
|
+
|
930
|
+
while message:
|
931
|
+
self.reflected_message = None
|
932
|
+
list(self.send_message(message))
|
933
|
+
|
934
|
+
if not self.reflected_message:
|
935
|
+
break
|
936
|
+
|
937
|
+
if self.num_reflections >= self.max_reflections:
|
938
|
+
self.io.tool_warning(f"Only {self.max_reflections} reflections allowed, stopping.")
|
939
|
+
return
|
940
|
+
|
941
|
+
self.num_reflections += 1
|
942
|
+
message = self.reflected_message
|
943
|
+
|
944
|
+
def check_and_open_urls(self, exc, friendly_msg=None):
|
945
|
+
"""Check exception for URLs, offer to open in a browser, with user-friendly error msgs."""
|
946
|
+
text = str(exc)
|
947
|
+
|
948
|
+
if friendly_msg:
|
949
|
+
self.io.tool_warning(text)
|
950
|
+
self.io.tool_error(f"{friendly_msg}")
|
951
|
+
else:
|
952
|
+
self.io.tool_error(text)
|
953
|
+
|
954
|
+
# Exclude double quotes from the matched URL characters
|
955
|
+
url_pattern = re.compile(r'(https?://[^\s/$.?#].[^\s"]*)')
|
956
|
+
urls = list(set(url_pattern.findall(text))) # Use set to remove duplicates
|
957
|
+
for url in urls:
|
958
|
+
url = url.rstrip(".',\"}") # Added } to the characters to strip
|
959
|
+
self.io.offer_url(url)
|
960
|
+
return urls
|
961
|
+
|
962
|
+
def check_for_urls(self, inp: str) -> List[str]:
|
963
|
+
"""Check input for URLs and offer to add them to the chat."""
|
964
|
+
if not self.detect_urls:
|
965
|
+
return inp
|
966
|
+
|
967
|
+
# Exclude double quotes from the matched URL characters
|
968
|
+
url_pattern = re.compile(r'(https?://[^\s/$.?#].[^\s"]*[^\s,.])')
|
969
|
+
urls = list(set(url_pattern.findall(inp))) # Use set to remove duplicates
|
970
|
+
group = ConfirmGroup(urls)
|
971
|
+
for url in urls:
|
972
|
+
if url not in self.rejected_urls:
|
973
|
+
url = url.rstrip(".',\"")
|
974
|
+
if self.io.confirm_ask(
|
975
|
+
"Add URL to the chat?", subject=url, group=group, allow_never=True
|
976
|
+
):
|
977
|
+
inp += "\n\n"
|
978
|
+
inp += self.commands.cmd_web(url, return_content=True)
|
979
|
+
else:
|
980
|
+
self.rejected_urls.add(url)
|
981
|
+
|
982
|
+
return inp
|
983
|
+
|
984
|
+
def keyboard_interrupt(self):
|
985
|
+
# Ensure cursor is visible on exit
|
986
|
+
Console().show_cursor(True)
|
987
|
+
|
988
|
+
now = time.time()
|
989
|
+
|
990
|
+
thresh = 2 # seconds
|
991
|
+
if self.last_keyboard_interrupt and now - self.last_keyboard_interrupt < thresh:
|
992
|
+
self.io.tool_warning("\n\n^C KeyboardInterrupt")
|
993
|
+
self.event("exit", reason="Control-C")
|
994
|
+
sys.exit()
|
995
|
+
|
996
|
+
self.io.tool_warning("\n\n^C again to exit")
|
997
|
+
|
998
|
+
self.last_keyboard_interrupt = now
|
999
|
+
|
1000
|
+
def summarize_start(self):
|
1001
|
+
if not self.summarizer.too_big(self.done_messages):
|
1002
|
+
return
|
1003
|
+
|
1004
|
+
self.summarize_end()
|
1005
|
+
|
1006
|
+
if self.verbose:
|
1007
|
+
self.io.tool_output("Starting to summarize chat history.")
|
1008
|
+
|
1009
|
+
self.summarizer_thread = threading.Thread(target=self.summarize_worker)
|
1010
|
+
self.summarizer_thread.start()
|
1011
|
+
|
1012
|
+
def summarize_worker(self):
|
1013
|
+
self.summarizing_messages = list(self.done_messages)
|
1014
|
+
try:
|
1015
|
+
self.summarized_done_messages = self.summarizer.summarize(self.summarizing_messages)
|
1016
|
+
except ValueError as err:
|
1017
|
+
self.io.tool_warning(err.args[0])
|
1018
|
+
|
1019
|
+
if self.verbose:
|
1020
|
+
self.io.tool_output("Finished summarizing chat history.")
|
1021
|
+
|
1022
|
+
def summarize_end(self):
|
1023
|
+
if self.summarizer_thread is None:
|
1024
|
+
return
|
1025
|
+
|
1026
|
+
self.summarizer_thread.join()
|
1027
|
+
self.summarizer_thread = None
|
1028
|
+
|
1029
|
+
if self.summarizing_messages == self.done_messages:
|
1030
|
+
self.done_messages = self.summarized_done_messages
|
1031
|
+
self.summarizing_messages = None
|
1032
|
+
self.summarized_done_messages = []
|
1033
|
+
|
1034
|
+
def move_back_cur_messages(self, message):
|
1035
|
+
self.done_messages += self.cur_messages
|
1036
|
+
self.summarize_start()
|
1037
|
+
|
1038
|
+
# TODO check for impact on image messages
|
1039
|
+
if message:
|
1040
|
+
self.done_messages += [
|
1041
|
+
dict(role="user", content=message),
|
1042
|
+
dict(role="assistant", content="Ok."),
|
1043
|
+
]
|
1044
|
+
self.cur_messages = []
|
1045
|
+
|
1046
|
+
def normalize_language(self, lang_code):
|
1047
|
+
"""
|
1048
|
+
Convert a locale code such as ``en_US`` or ``fr`` into a readable
|
1049
|
+
language name (e.g. ``English`` or ``French``). If Babel is
|
1050
|
+
available it is used for reliable conversion; otherwise a small
|
1051
|
+
built-in fallback map handles common languages.
|
1052
|
+
"""
|
1053
|
+
if not lang_code:
|
1054
|
+
return None
|
1055
|
+
|
1056
|
+
if lang_code.upper() in ("C", "POSIX"):
|
1057
|
+
return None
|
1058
|
+
|
1059
|
+
# Probably already a language name
|
1060
|
+
if (
|
1061
|
+
len(lang_code) > 3
|
1062
|
+
and "_" not in lang_code
|
1063
|
+
and "-" not in lang_code
|
1064
|
+
and lang_code[0].isupper()
|
1065
|
+
):
|
1066
|
+
return lang_code
|
1067
|
+
|
1068
|
+
# Preferred: Babel
|
1069
|
+
if Locale is not None:
|
1070
|
+
try:
|
1071
|
+
loc = Locale.parse(lang_code.replace("-", "_"))
|
1072
|
+
return loc.get_display_name("en").capitalize()
|
1073
|
+
except Exception:
|
1074
|
+
pass # Fall back to manual mapping
|
1075
|
+
|
1076
|
+
# Simple fallback for common languages
|
1077
|
+
fallback = {
|
1078
|
+
"en": "English",
|
1079
|
+
"fr": "French",
|
1080
|
+
"es": "Spanish",
|
1081
|
+
"de": "German",
|
1082
|
+
"it": "Italian",
|
1083
|
+
"pt": "Portuguese",
|
1084
|
+
"zh": "Chinese",
|
1085
|
+
"ja": "Japanese",
|
1086
|
+
"ko": "Korean",
|
1087
|
+
"ru": "Russian",
|
1088
|
+
}
|
1089
|
+
primary_lang_code = lang_code.replace("-", "_").split("_")[0].lower()
|
1090
|
+
return fallback.get(primary_lang_code, lang_code)
|
1091
|
+
|
1092
|
+
def get_user_language(self):
|
1093
|
+
"""
|
1094
|
+
Detect the user's language preference and return a human-readable
|
1095
|
+
language name such as ``English``. Detection order:
|
1096
|
+
|
1097
|
+
1. ``self.chat_language`` if explicitly set
|
1098
|
+
2. ``locale.getlocale()``
|
1099
|
+
3. ``LANG`` / ``LANGUAGE`` / ``LC_ALL`` / ``LC_MESSAGES`` environment variables
|
1100
|
+
"""
|
1101
|
+
|
1102
|
+
# Explicit override
|
1103
|
+
if self.chat_language:
|
1104
|
+
return self.normalize_language(self.chat_language)
|
1105
|
+
|
1106
|
+
# System locale
|
1107
|
+
try:
|
1108
|
+
lang = locale.getlocale()[0]
|
1109
|
+
if lang:
|
1110
|
+
lang = self.normalize_language(lang)
|
1111
|
+
if lang:
|
1112
|
+
return lang
|
1113
|
+
except Exception:
|
1114
|
+
pass
|
1115
|
+
|
1116
|
+
# Environment variables
|
1117
|
+
for env_var in ("LANG", "LANGUAGE", "LC_ALL", "LC_MESSAGES"):
|
1118
|
+
lang = os.environ.get(env_var)
|
1119
|
+
if lang:
|
1120
|
+
lang = lang.split(".")[0] # Strip encoding if present
|
1121
|
+
return self.normalize_language(lang)
|
1122
|
+
|
1123
|
+
return None
|
1124
|
+
|
1125
|
+
def get_platform_info(self):
|
1126
|
+
platform_text = ""
|
1127
|
+
try:
|
1128
|
+
platform_text = f"- Platform: {platform.platform()}\n"
|
1129
|
+
except KeyError:
|
1130
|
+
# Skip platform info if it can't be retrieved
|
1131
|
+
platform_text = "- Platform information unavailable\n"
|
1132
|
+
|
1133
|
+
shell_var = "COMSPEC" if os.name == "nt" else "SHELL"
|
1134
|
+
shell_val = os.getenv(shell_var)
|
1135
|
+
platform_text += f"- Shell: {shell_var}={shell_val}\n"
|
1136
|
+
|
1137
|
+
user_lang = self.get_user_language()
|
1138
|
+
if user_lang:
|
1139
|
+
platform_text += f"- Language: {user_lang}\n"
|
1140
|
+
|
1141
|
+
dt = datetime.now().astimezone().strftime("%Y-%m-%d")
|
1142
|
+
platform_text += f"- Current date: {dt}\n"
|
1143
|
+
|
1144
|
+
if self.repo:
|
1145
|
+
platform_text += "- The user is operating inside a git repository\n"
|
1146
|
+
|
1147
|
+
if self.lint_cmds:
|
1148
|
+
if self.auto_lint:
|
1149
|
+
platform_text += (
|
1150
|
+
"- The user's pre-commit runs these lint commands, don't suggest running"
|
1151
|
+
" them:\n"
|
1152
|
+
)
|
1153
|
+
else:
|
1154
|
+
platform_text += "- The user prefers these lint commands:\n"
|
1155
|
+
for lang, cmd in self.lint_cmds.items():
|
1156
|
+
if lang is None:
|
1157
|
+
platform_text += f" - {cmd}\n"
|
1158
|
+
else:
|
1159
|
+
platform_text += f" - {lang}: {cmd}\n"
|
1160
|
+
|
1161
|
+
if self.test_cmd:
|
1162
|
+
if self.auto_test:
|
1163
|
+
platform_text += (
|
1164
|
+
"- The user's pre-commit runs this test command, don't suggest running them: "
|
1165
|
+
)
|
1166
|
+
else:
|
1167
|
+
platform_text += "- The user prefers this test command: "
|
1168
|
+
platform_text += self.test_cmd + "\n"
|
1169
|
+
|
1170
|
+
return platform_text
|
1171
|
+
|
1172
|
+
def fmt_system_prompt(self, prompt):
|
1173
|
+
final_reminders = []
|
1174
|
+
if self.main_model.lazy:
|
1175
|
+
final_reminders.append(self.gpt_prompts.lazy_prompt)
|
1176
|
+
if self.main_model.overeager:
|
1177
|
+
final_reminders.append(self.gpt_prompts.overeager_prompt)
|
1178
|
+
|
1179
|
+
user_lang = self.get_user_language()
|
1180
|
+
if user_lang:
|
1181
|
+
final_reminders.append(f"Reply in {user_lang}.\n")
|
1182
|
+
|
1183
|
+
platform_text = self.get_platform_info()
|
1184
|
+
|
1185
|
+
if self.suggest_shell_commands:
|
1186
|
+
shell_cmd_prompt = self.gpt_prompts.shell_cmd_prompt.format(platform=platform_text)
|
1187
|
+
shell_cmd_reminder = self.gpt_prompts.shell_cmd_reminder.format(platform=platform_text)
|
1188
|
+
rename_with_shell = self.gpt_prompts.rename_with_shell
|
1189
|
+
else:
|
1190
|
+
shell_cmd_prompt = self.gpt_prompts.no_shell_cmd_prompt.format(platform=platform_text)
|
1191
|
+
shell_cmd_reminder = self.gpt_prompts.no_shell_cmd_reminder.format(
|
1192
|
+
platform=platform_text
|
1193
|
+
)
|
1194
|
+
rename_with_shell = ""
|
1195
|
+
|
1196
|
+
if user_lang: # user_lang is the result of self.get_user_language()
|
1197
|
+
language = user_lang
|
1198
|
+
else:
|
1199
|
+
language = "the same language they are using" # Default if no specific lang detected
|
1200
|
+
|
1201
|
+
if self.fence[0] == "`" * 4:
|
1202
|
+
quad_backtick_reminder = (
|
1203
|
+
"\nIMPORTANT: Use *quadruple* backticks ```` as fences, not triple backticks!\n"
|
1204
|
+
)
|
1205
|
+
else:
|
1206
|
+
quad_backtick_reminder = ""
|
1207
|
+
|
1208
|
+
final_reminders = "\n\n".join(final_reminders)
|
1209
|
+
|
1210
|
+
prompt = prompt.format(
|
1211
|
+
fence=self.fence,
|
1212
|
+
quad_backtick_reminder=quad_backtick_reminder,
|
1213
|
+
final_reminders=final_reminders,
|
1214
|
+
platform=platform_text,
|
1215
|
+
shell_cmd_prompt=shell_cmd_prompt,
|
1216
|
+
rename_with_shell=rename_with_shell,
|
1217
|
+
shell_cmd_reminder=shell_cmd_reminder,
|
1218
|
+
go_ahead_tip=self.gpt_prompts.go_ahead_tip,
|
1219
|
+
language=language,
|
1220
|
+
)
|
1221
|
+
|
1222
|
+
return prompt
|
1223
|
+
|
1224
|
+
def format_chat_chunks(self):
|
1225
|
+
self.choose_fence()
|
1226
|
+
main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)
|
1227
|
+
if self.main_model.system_prompt_prefix:
|
1228
|
+
main_sys = self.main_model.system_prompt_prefix + "\n" + main_sys
|
1229
|
+
|
1230
|
+
example_messages = []
|
1231
|
+
if self.main_model.examples_as_sys_msg:
|
1232
|
+
if self.gpt_prompts.example_messages:
|
1233
|
+
main_sys += "\n# Example conversations:\n\n"
|
1234
|
+
for msg in self.gpt_prompts.example_messages:
|
1235
|
+
role = msg["role"]
|
1236
|
+
content = self.fmt_system_prompt(msg["content"])
|
1237
|
+
main_sys += f"## {role.upper()}: {content}\n\n"
|
1238
|
+
main_sys = main_sys.strip()
|
1239
|
+
else:
|
1240
|
+
for msg in self.gpt_prompts.example_messages:
|
1241
|
+
example_messages.append(
|
1242
|
+
dict(
|
1243
|
+
role=msg["role"],
|
1244
|
+
content=self.fmt_system_prompt(msg["content"]),
|
1245
|
+
)
|
1246
|
+
)
|
1247
|
+
if self.gpt_prompts.example_messages:
|
1248
|
+
example_messages += [
|
1249
|
+
dict(
|
1250
|
+
role="user",
|
1251
|
+
content=(
|
1252
|
+
"I switched to a new code base. Please don't consider the above files"
|
1253
|
+
" or try to edit them any longer."
|
1254
|
+
),
|
1255
|
+
),
|
1256
|
+
dict(role="assistant", content="Ok."),
|
1257
|
+
]
|
1258
|
+
|
1259
|
+
if self.gpt_prompts.system_reminder:
|
1260
|
+
main_sys += "\n" + self.fmt_system_prompt(self.gpt_prompts.system_reminder)
|
1261
|
+
|
1262
|
+
chunks = ChatChunks()
|
1263
|
+
|
1264
|
+
if self.main_model.use_system_prompt:
|
1265
|
+
chunks.system = [
|
1266
|
+
dict(role="system", content=main_sys),
|
1267
|
+
]
|
1268
|
+
else:
|
1269
|
+
chunks.system = [
|
1270
|
+
dict(role="user", content=main_sys),
|
1271
|
+
dict(role="assistant", content="Ok."),
|
1272
|
+
]
|
1273
|
+
|
1274
|
+
chunks.examples = example_messages
|
1275
|
+
|
1276
|
+
self.summarize_end()
|
1277
|
+
chunks.done = self.done_messages
|
1278
|
+
|
1279
|
+
chunks.repo = self.get_repo_messages()
|
1280
|
+
chunks.readonly_files = self.get_readonly_files_messages()
|
1281
|
+
chunks.chat_files = self.get_chat_files_messages()
|
1282
|
+
|
1283
|
+
if self.gpt_prompts.system_reminder:
|
1284
|
+
reminder_message = [
|
1285
|
+
dict(
|
1286
|
+
role="system", content=self.fmt_system_prompt(self.gpt_prompts.system_reminder)
|
1287
|
+
),
|
1288
|
+
]
|
1289
|
+
else:
|
1290
|
+
reminder_message = []
|
1291
|
+
|
1292
|
+
chunks.cur = list(self.cur_messages)
|
1293
|
+
chunks.reminder = []
|
1294
|
+
|
1295
|
+
# TODO review impact of token count on image messages
|
1296
|
+
messages_tokens = self.main_model.token_count(chunks.all_messages())
|
1297
|
+
reminder_tokens = self.main_model.token_count(reminder_message)
|
1298
|
+
cur_tokens = self.main_model.token_count(chunks.cur)
|
1299
|
+
|
1300
|
+
if None not in (messages_tokens, reminder_tokens, cur_tokens):
|
1301
|
+
total_tokens = messages_tokens + reminder_tokens + cur_tokens
|
1302
|
+
else:
|
1303
|
+
# add the reminder anyway
|
1304
|
+
total_tokens = 0
|
1305
|
+
|
1306
|
+
if chunks.cur:
|
1307
|
+
final = chunks.cur[-1]
|
1308
|
+
else:
|
1309
|
+
final = None
|
1310
|
+
|
1311
|
+
max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
|
1312
|
+
# Add the reminder prompt if we still have room to include it.
|
1313
|
+
if (
|
1314
|
+
not max_input_tokens
|
1315
|
+
or total_tokens < max_input_tokens
|
1316
|
+
and self.gpt_prompts.system_reminder
|
1317
|
+
):
|
1318
|
+
if self.main_model.reminder == "sys":
|
1319
|
+
chunks.reminder = reminder_message
|
1320
|
+
elif self.main_model.reminder == "user" and final and final["role"] == "user":
|
1321
|
+
# stuff it into the user message
|
1322
|
+
new_content = (
|
1323
|
+
final["content"]
|
1324
|
+
+ "\n\n"
|
1325
|
+
+ self.fmt_system_prompt(self.gpt_prompts.system_reminder)
|
1326
|
+
)
|
1327
|
+
chunks.cur[-1] = dict(role=final["role"], content=new_content)
|
1328
|
+
|
1329
|
+
return chunks
|
1330
|
+
|
1331
|
+
def format_messages(self):
|
1332
|
+
chunks = self.format_chat_chunks()
|
1333
|
+
if self.add_cache_headers:
|
1334
|
+
chunks.add_cache_control_headers()
|
1335
|
+
|
1336
|
+
return chunks
|
1337
|
+
|
1338
|
+
def warm_cache(self, chunks):
|
1339
|
+
if not self.add_cache_headers:
|
1340
|
+
return
|
1341
|
+
if not self.num_cache_warming_pings:
|
1342
|
+
return
|
1343
|
+
if not self.ok_to_warm_cache:
|
1344
|
+
return
|
1345
|
+
|
1346
|
+
delay = 5 * 60 - 5
|
1347
|
+
delay = float(os.environ.get("AIDER_CACHE_KEEPALIVE_DELAY", delay))
|
1348
|
+
self.next_cache_warm = time.time() + delay
|
1349
|
+
self.warming_pings_left = self.num_cache_warming_pings
|
1350
|
+
self.cache_warming_chunks = chunks
|
1351
|
+
|
1352
|
+
if self.cache_warming_thread:
|
1353
|
+
return
|
1354
|
+
|
1355
|
+
def warm_cache_worker():
|
1356
|
+
while self.ok_to_warm_cache:
|
1357
|
+
time.sleep(1)
|
1358
|
+
if self.warming_pings_left <= 0:
|
1359
|
+
continue
|
1360
|
+
now = time.time()
|
1361
|
+
if now < self.next_cache_warm:
|
1362
|
+
continue
|
1363
|
+
|
1364
|
+
self.warming_pings_left -= 1
|
1365
|
+
self.next_cache_warm = time.time() + delay
|
1366
|
+
|
1367
|
+
kwargs = dict(self.main_model.extra_params) or dict()
|
1368
|
+
kwargs["max_tokens"] = 1
|
1369
|
+
|
1370
|
+
try:
|
1371
|
+
completion = litellm.completion(
|
1372
|
+
model=self.main_model.name,
|
1373
|
+
messages=self.cache_warming_chunks.cacheable_messages(),
|
1374
|
+
stream=False,
|
1375
|
+
**kwargs,
|
1376
|
+
)
|
1377
|
+
except Exception as err:
|
1378
|
+
self.io.tool_warning(f"Cache warming error: {str(err)}")
|
1379
|
+
continue
|
1380
|
+
|
1381
|
+
cache_hit_tokens = getattr(
|
1382
|
+
completion.usage, "prompt_cache_hit_tokens", 0
|
1383
|
+
) or getattr(completion.usage, "cache_read_input_tokens", 0)
|
1384
|
+
|
1385
|
+
if self.verbose:
|
1386
|
+
self.io.tool_output(f"Warmed {format_tokens(cache_hit_tokens)} cached tokens.")
|
1387
|
+
|
1388
|
+
self.cache_warming_thread = threading.Timer(0, warm_cache_worker)
|
1389
|
+
self.cache_warming_thread.daemon = True
|
1390
|
+
self.cache_warming_thread.start()
|
1391
|
+
|
1392
|
+
return chunks
|
1393
|
+
|
1394
|
+
def check_tokens(self, messages):
|
1395
|
+
"""Check if the messages will fit within the model's token limits."""
|
1396
|
+
input_tokens = self.main_model.token_count(messages)
|
1397
|
+
max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
|
1398
|
+
|
1399
|
+
if max_input_tokens and input_tokens >= max_input_tokens:
|
1400
|
+
self.io.tool_error(
|
1401
|
+
f"Your estimated chat context of {input_tokens:,} tokens exceeds the"
|
1402
|
+
f" {max_input_tokens:,} token limit for {self.main_model.name}!"
|
1403
|
+
)
|
1404
|
+
self.io.tool_output("To reduce the chat context:")
|
1405
|
+
self.io.tool_output("- Use /drop to remove unneeded files from the chat")
|
1406
|
+
self.io.tool_output("- Use /clear to clear the chat history")
|
1407
|
+
self.io.tool_output("- Break your code into smaller files")
|
1408
|
+
self.io.tool_output(
|
1409
|
+
"It's probably safe to try and send the request, most providers won't charge if"
|
1410
|
+
" the context limit is exceeded."
|
1411
|
+
)
|
1412
|
+
|
1413
|
+
if not self.io.confirm_ask("Try to proceed anyway?"):
|
1414
|
+
return False
|
1415
|
+
return True
|
1416
|
+
|
1417
|
+
def send_message(self, inp):
|
1418
|
+
self.event("message_send_starting")
|
1419
|
+
|
1420
|
+
# Notify IO that LLM processing is starting
|
1421
|
+
self.io.llm_started()
|
1422
|
+
|
1423
|
+
self.cur_messages += [
|
1424
|
+
dict(role="user", content=inp),
|
1425
|
+
]
|
1426
|
+
|
1427
|
+
chunks = self.format_messages()
|
1428
|
+
messages = chunks.all_messages()
|
1429
|
+
if not self.check_tokens(messages):
|
1430
|
+
return
|
1431
|
+
self.warm_cache(chunks)
|
1432
|
+
|
1433
|
+
if self.verbose:
|
1434
|
+
utils.show_messages(messages, functions=self.functions)
|
1435
|
+
|
1436
|
+
self.multi_response_content = ""
|
1437
|
+
if self.show_pretty():
|
1438
|
+
self.waiting_spinner = WaitingSpinner("Waiting for " + self.main_model.name)
|
1439
|
+
self.waiting_spinner.start()
|
1440
|
+
if self.stream:
|
1441
|
+
self.mdstream = self.io.get_assistant_mdstream()
|
1442
|
+
else:
|
1443
|
+
self.mdstream = None
|
1444
|
+
else:
|
1445
|
+
self.mdstream = None
|
1446
|
+
|
1447
|
+
retry_delay = 0.125
|
1448
|
+
|
1449
|
+
litellm_ex = LiteLLMExceptions()
|
1450
|
+
|
1451
|
+
self.usage_report = None
|
1452
|
+
exhausted = False
|
1453
|
+
interrupted = False
|
1454
|
+
try:
|
1455
|
+
while True:
|
1456
|
+
try:
|
1457
|
+
yield from self.send(messages, functions=self.functions)
|
1458
|
+
break
|
1459
|
+
except litellm_ex.exceptions_tuple() as err:
|
1460
|
+
ex_info = litellm_ex.get_ex_info(err)
|
1461
|
+
|
1462
|
+
if ex_info.name == "ContextWindowExceededError":
|
1463
|
+
exhausted = True
|
1464
|
+
break
|
1465
|
+
|
1466
|
+
should_retry = ex_info.retry
|
1467
|
+
if should_retry:
|
1468
|
+
retry_delay *= 2
|
1469
|
+
if retry_delay > RETRY_TIMEOUT:
|
1470
|
+
should_retry = False
|
1471
|
+
|
1472
|
+
if not should_retry:
|
1473
|
+
self.mdstream = None
|
1474
|
+
self.check_and_open_urls(err, ex_info.description)
|
1475
|
+
break
|
1476
|
+
|
1477
|
+
err_msg = str(err)
|
1478
|
+
if ex_info.description:
|
1479
|
+
self.io.tool_warning(err_msg)
|
1480
|
+
self.io.tool_error(ex_info.description)
|
1481
|
+
else:
|
1482
|
+
self.io.tool_error(err_msg)
|
1483
|
+
|
1484
|
+
self.io.tool_output(f"Retrying in {retry_delay:.1f} seconds...")
|
1485
|
+
time.sleep(retry_delay)
|
1486
|
+
continue
|
1487
|
+
except KeyboardInterrupt:
|
1488
|
+
interrupted = True
|
1489
|
+
break
|
1490
|
+
except FinishReasonLength:
|
1491
|
+
# We hit the output limit!
|
1492
|
+
if not self.main_model.info.get("supports_assistant_prefill"):
|
1493
|
+
exhausted = True
|
1494
|
+
break
|
1495
|
+
|
1496
|
+
self.multi_response_content = self.get_multi_response_content_in_progress()
|
1497
|
+
|
1498
|
+
if messages[-1]["role"] == "assistant":
|
1499
|
+
messages[-1]["content"] = self.multi_response_content
|
1500
|
+
else:
|
1501
|
+
messages.append(
|
1502
|
+
dict(role="assistant", content=self.multi_response_content, prefix=True)
|
1503
|
+
)
|
1504
|
+
except Exception as err:
|
1505
|
+
self.mdstream = None
|
1506
|
+
lines = traceback.format_exception(type(err), err, err.__traceback__)
|
1507
|
+
self.io.tool_warning("".join(lines))
|
1508
|
+
self.io.tool_error(str(err))
|
1509
|
+
self.event("message_send_exception", exception=str(err))
|
1510
|
+
return
|
1511
|
+
finally:
|
1512
|
+
if self.mdstream:
|
1513
|
+
self.live_incremental_response(True)
|
1514
|
+
self.mdstream = None
|
1515
|
+
|
1516
|
+
# Ensure any waiting spinner is stopped
|
1517
|
+
self._stop_waiting_spinner()
|
1518
|
+
|
1519
|
+
self.partial_response_content = self.get_multi_response_content_in_progress(True)
|
1520
|
+
self.remove_reasoning_content()
|
1521
|
+
self.multi_response_content = ""
|
1522
|
+
|
1523
|
+
###
|
1524
|
+
# print()
|
1525
|
+
# print("=" * 20)
|
1526
|
+
# dump(self.partial_response_content)
|
1527
|
+
|
1528
|
+
self.io.tool_output()
|
1529
|
+
|
1530
|
+
self.show_usage_report()
|
1531
|
+
|
1532
|
+
self.add_assistant_reply_to_cur_messages()
|
1533
|
+
|
1534
|
+
if exhausted:
|
1535
|
+
if self.cur_messages and self.cur_messages[-1]["role"] == "user":
|
1536
|
+
self.cur_messages += [
|
1537
|
+
dict(
|
1538
|
+
role="assistant",
|
1539
|
+
content="FinishReasonLength exception: you sent too many tokens",
|
1540
|
+
),
|
1541
|
+
]
|
1542
|
+
|
1543
|
+
self.show_exhausted_error()
|
1544
|
+
self.num_exhausted_context_windows += 1
|
1545
|
+
return
|
1546
|
+
|
1547
|
+
if self.partial_response_function_call:
|
1548
|
+
args = self.parse_partial_args()
|
1549
|
+
if args:
|
1550
|
+
content = args.get("explanation") or ""
|
1551
|
+
else:
|
1552
|
+
content = ""
|
1553
|
+
elif self.partial_response_content:
|
1554
|
+
content = self.partial_response_content
|
1555
|
+
else:
|
1556
|
+
content = ""
|
1557
|
+
|
1558
|
+
if not interrupted:
|
1559
|
+
add_rel_files_message = self.check_for_file_mentions(content)
|
1560
|
+
if add_rel_files_message:
|
1561
|
+
if self.reflected_message:
|
1562
|
+
self.reflected_message += "\n\n" + add_rel_files_message
|
1563
|
+
else:
|
1564
|
+
self.reflected_message = add_rel_files_message
|
1565
|
+
return
|
1566
|
+
|
1567
|
+
try:
|
1568
|
+
if self.reply_completed():
|
1569
|
+
return
|
1570
|
+
except KeyboardInterrupt:
|
1571
|
+
interrupted = True
|
1572
|
+
|
1573
|
+
if interrupted:
|
1574
|
+
if self.cur_messages and self.cur_messages[-1]["role"] == "user":
|
1575
|
+
self.cur_messages[-1]["content"] += "\n^C KeyboardInterrupt"
|
1576
|
+
else:
|
1577
|
+
self.cur_messages += [dict(role="user", content="^C KeyboardInterrupt")]
|
1578
|
+
self.cur_messages += [
|
1579
|
+
dict(role="assistant", content="I see that you interrupted my previous reply.")
|
1580
|
+
]
|
1581
|
+
return
|
1582
|
+
|
1583
|
+
edited = self.apply_updates()
|
1584
|
+
|
1585
|
+
if edited:
|
1586
|
+
self.aider_edited_files.update(edited)
|
1587
|
+
saved_message = self.auto_commit(edited)
|
1588
|
+
|
1589
|
+
if not saved_message and hasattr(self.gpt_prompts, "files_content_gpt_edits_no_repo"):
|
1590
|
+
saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo
|
1591
|
+
|
1592
|
+
self.move_back_cur_messages(saved_message)
|
1593
|
+
|
1594
|
+
if self.reflected_message:
|
1595
|
+
return
|
1596
|
+
|
1597
|
+
if edited and self.auto_lint:
|
1598
|
+
lint_errors = self.lint_edited(edited)
|
1599
|
+
self.auto_commit(edited, context="Ran the linter")
|
1600
|
+
self.lint_outcome = not lint_errors
|
1601
|
+
if lint_errors:
|
1602
|
+
ok = self.io.confirm_ask("Attempt to fix lint errors?")
|
1603
|
+
if ok:
|
1604
|
+
self.reflected_message = lint_errors
|
1605
|
+
return
|
1606
|
+
|
1607
|
+
shared_output = self.run_shell_commands()
|
1608
|
+
if shared_output:
|
1609
|
+
self.cur_messages += [
|
1610
|
+
dict(role="user", content=shared_output),
|
1611
|
+
dict(role="assistant", content="Ok"),
|
1612
|
+
]
|
1613
|
+
|
1614
|
+
if edited and self.auto_test:
|
1615
|
+
test_errors = self.commands.cmd_test(self.test_cmd)
|
1616
|
+
self.test_outcome = not test_errors
|
1617
|
+
if test_errors:
|
1618
|
+
ok = self.io.confirm_ask("Attempt to fix test errors?")
|
1619
|
+
if ok:
|
1620
|
+
self.reflected_message = test_errors
|
1621
|
+
return
|
1622
|
+
|
1623
|
+
def reply_completed(self):
|
1624
|
+
pass
|
1625
|
+
|
1626
|
+
def show_exhausted_error(self):
|
1627
|
+
output_tokens = 0
|
1628
|
+
if self.partial_response_content:
|
1629
|
+
output_tokens = self.main_model.token_count(self.partial_response_content)
|
1630
|
+
max_output_tokens = self.main_model.info.get("max_output_tokens") or 0
|
1631
|
+
|
1632
|
+
input_tokens = self.main_model.token_count(self.format_messages().all_messages())
|
1633
|
+
max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
|
1634
|
+
|
1635
|
+
total_tokens = input_tokens + output_tokens
|
1636
|
+
|
1637
|
+
fudge = 0.7
|
1638
|
+
|
1639
|
+
out_err = ""
|
1640
|
+
if output_tokens >= max_output_tokens * fudge:
|
1641
|
+
out_err = " -- possibly exceeded output limit!"
|
1642
|
+
|
1643
|
+
inp_err = ""
|
1644
|
+
if input_tokens >= max_input_tokens * fudge:
|
1645
|
+
inp_err = " -- possibly exhausted context window!"
|
1646
|
+
|
1647
|
+
tot_err = ""
|
1648
|
+
if total_tokens >= max_input_tokens * fudge:
|
1649
|
+
tot_err = " -- possibly exhausted context window!"
|
1650
|
+
|
1651
|
+
res = ["", ""]
|
1652
|
+
res.append(f"Model {self.main_model.name} has hit a token limit!")
|
1653
|
+
res.append("Token counts below are approximate.")
|
1654
|
+
res.append("")
|
1655
|
+
res.append(f"Input tokens: ~{input_tokens:,} of {max_input_tokens:,}{inp_err}")
|
1656
|
+
res.append(f"Output tokens: ~{output_tokens:,} of {max_output_tokens:,}{out_err}")
|
1657
|
+
res.append(f"Total tokens: ~{total_tokens:,} of {max_input_tokens:,}{tot_err}")
|
1658
|
+
|
1659
|
+
if output_tokens >= max_output_tokens:
|
1660
|
+
res.append("")
|
1661
|
+
res.append("To reduce output tokens:")
|
1662
|
+
res.append("- Ask for smaller changes in each request.")
|
1663
|
+
res.append("- Break your code into smaller source files.")
|
1664
|
+
if "diff" not in self.main_model.edit_format:
|
1665
|
+
res.append("- Use a stronger model that can return diffs.")
|
1666
|
+
|
1667
|
+
if input_tokens >= max_input_tokens or total_tokens >= max_input_tokens:
|
1668
|
+
res.append("")
|
1669
|
+
res.append("To reduce input tokens:")
|
1670
|
+
res.append("- Use /tokens to see token usage.")
|
1671
|
+
res.append("- Use /drop to remove unneeded files from the chat session.")
|
1672
|
+
res.append("- Use /clear to clear the chat history.")
|
1673
|
+
res.append("- Break your code into smaller source files.")
|
1674
|
+
|
1675
|
+
res = "".join([line + "\n" for line in res])
|
1676
|
+
self.io.tool_error(res)
|
1677
|
+
self.io.offer_url(urls.token_limits)
|
1678
|
+
|
1679
|
+
def lint_edited(self, fnames):
|
1680
|
+
res = ""
|
1681
|
+
for fname in fnames:
|
1682
|
+
if not fname:
|
1683
|
+
continue
|
1684
|
+
errors = self.linter.lint(self.abs_root_path(fname))
|
1685
|
+
|
1686
|
+
if errors:
|
1687
|
+
res += "\n"
|
1688
|
+
res += errors
|
1689
|
+
res += "\n"
|
1690
|
+
|
1691
|
+
if res:
|
1692
|
+
self.io.tool_warning(res)
|
1693
|
+
|
1694
|
+
return res
|
1695
|
+
|
1696
|
+
def __del__(self):
|
1697
|
+
"""Cleanup when the Coder object is destroyed."""
|
1698
|
+
self.ok_to_warm_cache = False
|
1699
|
+
|
1700
|
+
def add_assistant_reply_to_cur_messages(self):
|
1701
|
+
if self.partial_response_content:
|
1702
|
+
self.cur_messages += [dict(role="assistant", content=self.partial_response_content)]
|
1703
|
+
if self.partial_response_function_call:
|
1704
|
+
self.cur_messages += [
|
1705
|
+
dict(
|
1706
|
+
role="assistant",
|
1707
|
+
content=None,
|
1708
|
+
function_call=self.partial_response_function_call,
|
1709
|
+
)
|
1710
|
+
]
|
1711
|
+
|
1712
|
+
def get_file_mentions(self, content, ignore_current=False):
|
1713
|
+
words = set(word for word in content.split())
|
1714
|
+
|
1715
|
+
# drop sentence punctuation from the end
|
1716
|
+
words = set(word.rstrip(",.!;:?") for word in words)
|
1717
|
+
|
1718
|
+
# strip away all kinds of quotes
|
1719
|
+
quotes = "\"'`*_"
|
1720
|
+
words = set(word.strip(quotes) for word in words)
|
1721
|
+
|
1722
|
+
if ignore_current:
|
1723
|
+
addable_rel_fnames = self.get_all_relative_files()
|
1724
|
+
existing_basenames = {}
|
1725
|
+
else:
|
1726
|
+
addable_rel_fnames = self.get_addable_relative_files()
|
1727
|
+
|
1728
|
+
# Get basenames of files already in chat or read-only
|
1729
|
+
existing_basenames = {os.path.basename(f) for f in self.get_inchat_relative_files()} | {
|
1730
|
+
os.path.basename(self.get_rel_fname(f)) for f in self.abs_read_only_fnames
|
1731
|
+
}
|
1732
|
+
|
1733
|
+
mentioned_rel_fnames = set()
|
1734
|
+
fname_to_rel_fnames = {}
|
1735
|
+
for rel_fname in addable_rel_fnames:
|
1736
|
+
normalized_rel_fname = rel_fname.replace("\\", "/")
|
1737
|
+
normalized_words = set(word.replace("\\", "/") for word in words)
|
1738
|
+
if normalized_rel_fname in normalized_words:
|
1739
|
+
mentioned_rel_fnames.add(rel_fname)
|
1740
|
+
|
1741
|
+
fname = os.path.basename(rel_fname)
|
1742
|
+
|
1743
|
+
# Don't add basenames that could be plain words like "run" or "make"
|
1744
|
+
if "/" in fname or "\\" in fname or "." in fname or "_" in fname or "-" in fname:
|
1745
|
+
if fname not in fname_to_rel_fnames:
|
1746
|
+
fname_to_rel_fnames[fname] = []
|
1747
|
+
fname_to_rel_fnames[fname].append(rel_fname)
|
1748
|
+
|
1749
|
+
for fname, rel_fnames in fname_to_rel_fnames.items():
|
1750
|
+
# If the basename is already in chat, don't add based on a basename mention
|
1751
|
+
if fname in existing_basenames:
|
1752
|
+
continue
|
1753
|
+
# If the basename mention is unique among addable files and present in the text
|
1754
|
+
if len(rel_fnames) == 1 and fname in words:
|
1755
|
+
mentioned_rel_fnames.add(rel_fnames[0])
|
1756
|
+
|
1757
|
+
return mentioned_rel_fnames
|
1758
|
+
|
1759
|
+
def check_for_file_mentions(self, content):
|
1760
|
+
mentioned_rel_fnames = self.get_file_mentions(content)
|
1761
|
+
|
1762
|
+
new_mentions = mentioned_rel_fnames - self.ignore_mentions
|
1763
|
+
|
1764
|
+
if not new_mentions:
|
1765
|
+
return
|
1766
|
+
|
1767
|
+
added_fnames = []
|
1768
|
+
group = ConfirmGroup(new_mentions)
|
1769
|
+
for rel_fname in sorted(new_mentions):
|
1770
|
+
if self.io.confirm_ask(
|
1771
|
+
"Add file to the chat?", subject=rel_fname, group=group, allow_never=True
|
1772
|
+
):
|
1773
|
+
self.add_rel_fname(rel_fname)
|
1774
|
+
added_fnames.append(rel_fname)
|
1775
|
+
else:
|
1776
|
+
self.ignore_mentions.add(rel_fname)
|
1777
|
+
|
1778
|
+
if added_fnames:
|
1779
|
+
return prompts.added_files.format(fnames=", ".join(added_fnames))
|
1780
|
+
|
1781
|
+
def send(self, messages, model=None, functions=None):
|
1782
|
+
self.got_reasoning_content = False
|
1783
|
+
self.ended_reasoning_content = False
|
1784
|
+
|
1785
|
+
if not model:
|
1786
|
+
model = self.main_model
|
1787
|
+
|
1788
|
+
self.partial_response_content = ""
|
1789
|
+
self.partial_response_function_call = dict()
|
1790
|
+
|
1791
|
+
self.io.log_llm_history("TO LLM", format_messages(messages))
|
1792
|
+
|
1793
|
+
completion = None
|
1794
|
+
try:
|
1795
|
+
hash_object, completion = model.send_completion(
|
1796
|
+
messages,
|
1797
|
+
functions,
|
1798
|
+
self.stream,
|
1799
|
+
self.temperature,
|
1800
|
+
)
|
1801
|
+
self.chat_completion_call_hashes.append(hash_object.hexdigest())
|
1802
|
+
|
1803
|
+
if self.stream:
|
1804
|
+
yield from self.show_send_output_stream(completion)
|
1805
|
+
else:
|
1806
|
+
self.show_send_output(completion)
|
1807
|
+
|
1808
|
+
# Calculate costs for successful responses
|
1809
|
+
self.calculate_and_show_tokens_and_cost(messages, completion)
|
1810
|
+
|
1811
|
+
except LiteLLMExceptions().exceptions_tuple() as err:
|
1812
|
+
ex_info = LiteLLMExceptions().get_ex_info(err)
|
1813
|
+
if ex_info.name == "ContextWindowExceededError":
|
1814
|
+
# Still calculate costs for context window errors
|
1815
|
+
self.calculate_and_show_tokens_and_cost(messages, completion)
|
1816
|
+
raise
|
1817
|
+
except KeyboardInterrupt as kbi:
|
1818
|
+
self.keyboard_interrupt()
|
1819
|
+
raise kbi
|
1820
|
+
finally:
|
1821
|
+
self.io.log_llm_history(
|
1822
|
+
"LLM RESPONSE",
|
1823
|
+
format_content("ASSISTANT", self.partial_response_content),
|
1824
|
+
)
|
1825
|
+
|
1826
|
+
if self.partial_response_content:
|
1827
|
+
self.io.ai_output(self.partial_response_content)
|
1828
|
+
elif self.partial_response_function_call:
|
1829
|
+
# TODO: push this into subclasses
|
1830
|
+
args = self.parse_partial_args()
|
1831
|
+
if args:
|
1832
|
+
self.io.ai_output(json.dumps(args, indent=4))
|
1833
|
+
|
1834
|
+
def show_send_output(self, completion):
|
1835
|
+
# Stop spinner once we have a response
|
1836
|
+
self._stop_waiting_spinner()
|
1837
|
+
|
1838
|
+
if self.verbose:
|
1839
|
+
print(completion)
|
1840
|
+
|
1841
|
+
if not completion.choices:
|
1842
|
+
self.io.tool_error(str(completion))
|
1843
|
+
return
|
1844
|
+
|
1845
|
+
show_func_err = None
|
1846
|
+
show_content_err = None
|
1847
|
+
try:
|
1848
|
+
if completion.choices[0].message.tool_calls:
|
1849
|
+
self.partial_response_function_call = (
|
1850
|
+
completion.choices[0].message.tool_calls[0].function
|
1851
|
+
)
|
1852
|
+
except AttributeError as func_err:
|
1853
|
+
show_func_err = func_err
|
1854
|
+
|
1855
|
+
try:
|
1856
|
+
reasoning_content = completion.choices[0].message.reasoning_content
|
1857
|
+
except AttributeError:
|
1858
|
+
try:
|
1859
|
+
reasoning_content = completion.choices[0].message.reasoning
|
1860
|
+
except AttributeError:
|
1861
|
+
reasoning_content = None
|
1862
|
+
|
1863
|
+
try:
|
1864
|
+
self.partial_response_content = completion.choices[0].message.content or ""
|
1865
|
+
except AttributeError as content_err:
|
1866
|
+
show_content_err = content_err
|
1867
|
+
|
1868
|
+
resp_hash = dict(
|
1869
|
+
function_call=str(self.partial_response_function_call),
|
1870
|
+
content=self.partial_response_content,
|
1871
|
+
)
|
1872
|
+
resp_hash = hashlib.sha1(json.dumps(resp_hash, sort_keys=True).encode())
|
1873
|
+
self.chat_completion_response_hashes.append(resp_hash.hexdigest())
|
1874
|
+
|
1875
|
+
if show_func_err and show_content_err:
|
1876
|
+
self.io.tool_error(show_func_err)
|
1877
|
+
self.io.tool_error(show_content_err)
|
1878
|
+
raise Exception("No data found in LLM response!")
|
1879
|
+
|
1880
|
+
show_resp = self.render_incremental_response(True)
|
1881
|
+
|
1882
|
+
if reasoning_content:
|
1883
|
+
formatted_reasoning = format_reasoning_content(
|
1884
|
+
reasoning_content, self.reasoning_tag_name
|
1885
|
+
)
|
1886
|
+
show_resp = formatted_reasoning + show_resp
|
1887
|
+
|
1888
|
+
show_resp = replace_reasoning_tags(show_resp, self.reasoning_tag_name)
|
1889
|
+
|
1890
|
+
self.io.assistant_output(show_resp, pretty=self.show_pretty())
|
1891
|
+
|
1892
|
+
if (
|
1893
|
+
hasattr(completion.choices[0], "finish_reason")
|
1894
|
+
and completion.choices[0].finish_reason == "length"
|
1895
|
+
):
|
1896
|
+
raise FinishReasonLength()
|
1897
|
+
|
1898
|
+
def show_send_output_stream(self, completion):
|
1899
|
+
received_content = False
|
1900
|
+
|
1901
|
+
for chunk in completion:
|
1902
|
+
if len(chunk.choices) == 0:
|
1903
|
+
continue
|
1904
|
+
|
1905
|
+
if (
|
1906
|
+
hasattr(chunk.choices[0], "finish_reason")
|
1907
|
+
and chunk.choices[0].finish_reason == "length"
|
1908
|
+
):
|
1909
|
+
raise FinishReasonLength()
|
1910
|
+
|
1911
|
+
try:
|
1912
|
+
func = chunk.choices[0].delta.function_call
|
1913
|
+
# dump(func)
|
1914
|
+
for k, v in func.items():
|
1915
|
+
if k in self.partial_response_function_call:
|
1916
|
+
self.partial_response_function_call[k] += v
|
1917
|
+
else:
|
1918
|
+
self.partial_response_function_call[k] = v
|
1919
|
+
received_content = True
|
1920
|
+
except AttributeError:
|
1921
|
+
pass
|
1922
|
+
|
1923
|
+
text = ""
|
1924
|
+
|
1925
|
+
try:
|
1926
|
+
reasoning_content = chunk.choices[0].delta.reasoning_content
|
1927
|
+
except AttributeError:
|
1928
|
+
try:
|
1929
|
+
reasoning_content = chunk.choices[0].delta.reasoning
|
1930
|
+
except AttributeError:
|
1931
|
+
reasoning_content = None
|
1932
|
+
|
1933
|
+
if reasoning_content:
|
1934
|
+
if not self.got_reasoning_content:
|
1935
|
+
text += f"<{REASONING_TAG}>\n\n"
|
1936
|
+
text += reasoning_content
|
1937
|
+
self.got_reasoning_content = True
|
1938
|
+
received_content = True
|
1939
|
+
|
1940
|
+
try:
|
1941
|
+
content = chunk.choices[0].delta.content
|
1942
|
+
if content:
|
1943
|
+
if self.got_reasoning_content and not self.ended_reasoning_content:
|
1944
|
+
text += f"\n\n</{self.reasoning_tag_name}>\n\n"
|
1945
|
+
self.ended_reasoning_content = True
|
1946
|
+
|
1947
|
+
text += content
|
1948
|
+
received_content = True
|
1949
|
+
except AttributeError:
|
1950
|
+
pass
|
1951
|
+
|
1952
|
+
if received_content:
|
1953
|
+
self._stop_waiting_spinner()
|
1954
|
+
self.partial_response_content += text
|
1955
|
+
|
1956
|
+
if self.show_pretty():
|
1957
|
+
self.live_incremental_response(False)
|
1958
|
+
elif text:
|
1959
|
+
# Apply reasoning tag formatting
|
1960
|
+
text = replace_reasoning_tags(text, self.reasoning_tag_name)
|
1961
|
+
try:
|
1962
|
+
sys.stdout.write(text)
|
1963
|
+
except UnicodeEncodeError:
|
1964
|
+
# Safely encode and decode the text
|
1965
|
+
safe_text = text.encode(sys.stdout.encoding, errors="backslashreplace").decode(
|
1966
|
+
sys.stdout.encoding
|
1967
|
+
)
|
1968
|
+
sys.stdout.write(safe_text)
|
1969
|
+
sys.stdout.flush()
|
1970
|
+
yield text
|
1971
|
+
|
1972
|
+
if not received_content:
|
1973
|
+
self.io.tool_warning("Empty response received from LLM. Check your provider account?")
|
1974
|
+
|
1975
|
+
def live_incremental_response(self, final):
|
1976
|
+
show_resp = self.render_incremental_response(final)
|
1977
|
+
# Apply any reasoning tag formatting
|
1978
|
+
show_resp = replace_reasoning_tags(show_resp, self.reasoning_tag_name)
|
1979
|
+
self.mdstream.update(show_resp, final=final)
|
1980
|
+
|
1981
|
+
def render_incremental_response(self, final):
|
1982
|
+
return self.get_multi_response_content_in_progress()
|
1983
|
+
|
1984
|
+
def remove_reasoning_content(self):
|
1985
|
+
"""Remove reasoning content from the model's response."""
|
1986
|
+
|
1987
|
+
self.partial_response_content = remove_reasoning_content(
|
1988
|
+
self.partial_response_content,
|
1989
|
+
self.reasoning_tag_name,
|
1990
|
+
)
|
1991
|
+
|
1992
|
+
def calculate_and_show_tokens_and_cost(self, messages, completion=None):
|
1993
|
+
prompt_tokens = 0
|
1994
|
+
completion_tokens = 0
|
1995
|
+
cache_hit_tokens = 0
|
1996
|
+
cache_write_tokens = 0
|
1997
|
+
|
1998
|
+
if completion and hasattr(completion, "usage") and completion.usage is not None:
|
1999
|
+
prompt_tokens = completion.usage.prompt_tokens
|
2000
|
+
completion_tokens = completion.usage.completion_tokens
|
2001
|
+
cache_hit_tokens = getattr(completion.usage, "prompt_cache_hit_tokens", 0) or getattr(
|
2002
|
+
completion.usage, "cache_read_input_tokens", 0
|
2003
|
+
)
|
2004
|
+
cache_write_tokens = getattr(completion.usage, "cache_creation_input_tokens", 0)
|
2005
|
+
|
2006
|
+
if hasattr(completion.usage, "cache_read_input_tokens") or hasattr(
|
2007
|
+
completion.usage, "cache_creation_input_tokens"
|
2008
|
+
):
|
2009
|
+
self.message_tokens_sent += prompt_tokens
|
2010
|
+
self.message_tokens_sent += cache_write_tokens
|
2011
|
+
else:
|
2012
|
+
self.message_tokens_sent += prompt_tokens
|
2013
|
+
|
2014
|
+
else:
|
2015
|
+
prompt_tokens = self.main_model.token_count(messages)
|
2016
|
+
completion_tokens = self.main_model.token_count(self.partial_response_content)
|
2017
|
+
self.message_tokens_sent += prompt_tokens
|
2018
|
+
|
2019
|
+
self.message_tokens_received += completion_tokens
|
2020
|
+
|
2021
|
+
tokens_report = f"Tokens: {format_tokens(self.message_tokens_sent)} sent"
|
2022
|
+
|
2023
|
+
if cache_write_tokens:
|
2024
|
+
tokens_report += f", {format_tokens(cache_write_tokens)} cache write"
|
2025
|
+
if cache_hit_tokens:
|
2026
|
+
tokens_report += f", {format_tokens(cache_hit_tokens)} cache hit"
|
2027
|
+
tokens_report += f", {format_tokens(self.message_tokens_received)} received."
|
2028
|
+
|
2029
|
+
if not self.main_model.info.get("input_cost_per_token"):
|
2030
|
+
self.usage_report = tokens_report
|
2031
|
+
return
|
2032
|
+
|
2033
|
+
try:
|
2034
|
+
# Try and use litellm's built in cost calculator. Seems to work for non-streaming only?
|
2035
|
+
cost = litellm.completion_cost(completion_response=completion)
|
2036
|
+
except Exception:
|
2037
|
+
cost = 0
|
2038
|
+
|
2039
|
+
if not cost:
|
2040
|
+
cost = self.compute_costs_from_tokens(
|
2041
|
+
prompt_tokens, completion_tokens, cache_write_tokens, cache_hit_tokens
|
2042
|
+
)
|
2043
|
+
|
2044
|
+
self.total_cost += cost
|
2045
|
+
self.message_cost += cost
|
2046
|
+
|
2047
|
+
def format_cost(value):
|
2048
|
+
if value == 0:
|
2049
|
+
return "0.00"
|
2050
|
+
magnitude = abs(value)
|
2051
|
+
if magnitude >= 0.01:
|
2052
|
+
return f"{value:.2f}"
|
2053
|
+
else:
|
2054
|
+
return f"{value:.{max(2, 2 - int(math.log10(magnitude)))}f}"
|
2055
|
+
|
2056
|
+
cost_report = (
|
2057
|
+
f"Cost: ${format_cost(self.message_cost)} message,"
|
2058
|
+
f" ${format_cost(self.total_cost)} session."
|
2059
|
+
)
|
2060
|
+
|
2061
|
+
if cache_hit_tokens and cache_write_tokens:
|
2062
|
+
sep = "\n"
|
2063
|
+
else:
|
2064
|
+
sep = " "
|
2065
|
+
|
2066
|
+
self.usage_report = tokens_report + sep + cost_report
|
2067
|
+
|
2068
|
+
def compute_costs_from_tokens(
|
2069
|
+
self, prompt_tokens, completion_tokens, cache_write_tokens, cache_hit_tokens
|
2070
|
+
):
|
2071
|
+
cost = 0
|
2072
|
+
|
2073
|
+
input_cost_per_token = self.main_model.info.get("input_cost_per_token") or 0
|
2074
|
+
output_cost_per_token = self.main_model.info.get("output_cost_per_token") or 0
|
2075
|
+
input_cost_per_token_cache_hit = (
|
2076
|
+
self.main_model.info.get("input_cost_per_token_cache_hit") or 0
|
2077
|
+
)
|
2078
|
+
|
2079
|
+
# deepseek
|
2080
|
+
# prompt_cache_hit_tokens + prompt_cache_miss_tokens
|
2081
|
+
# == prompt_tokens == total tokens that were sent
|
2082
|
+
#
|
2083
|
+
# Anthropic
|
2084
|
+
# cache_creation_input_tokens + cache_read_input_tokens + prompt
|
2085
|
+
# == total tokens that were
|
2086
|
+
|
2087
|
+
if input_cost_per_token_cache_hit:
|
2088
|
+
# must be deepseek
|
2089
|
+
cost += input_cost_per_token_cache_hit * cache_hit_tokens
|
2090
|
+
cost += (prompt_tokens - input_cost_per_token_cache_hit) * input_cost_per_token
|
2091
|
+
else:
|
2092
|
+
# hard code the anthropic adjustments, no-ops for other models since cache_x_tokens==0
|
2093
|
+
cost += cache_write_tokens * input_cost_per_token * 1.25
|
2094
|
+
cost += cache_hit_tokens * input_cost_per_token * 0.10
|
2095
|
+
cost += prompt_tokens * input_cost_per_token
|
2096
|
+
|
2097
|
+
cost += completion_tokens * output_cost_per_token
|
2098
|
+
return cost
|
2099
|
+
|
2100
|
+
def show_usage_report(self):
|
2101
|
+
if not self.usage_report:
|
2102
|
+
return
|
2103
|
+
|
2104
|
+
self.total_tokens_sent += self.message_tokens_sent
|
2105
|
+
self.total_tokens_received += self.message_tokens_received
|
2106
|
+
|
2107
|
+
self.io.tool_output(self.usage_report)
|
2108
|
+
|
2109
|
+
prompt_tokens = self.message_tokens_sent
|
2110
|
+
completion_tokens = self.message_tokens_received
|
2111
|
+
self.event(
|
2112
|
+
"message_send",
|
2113
|
+
main_model=self.main_model,
|
2114
|
+
edit_format=self.edit_format,
|
2115
|
+
prompt_tokens=prompt_tokens,
|
2116
|
+
completion_tokens=completion_tokens,
|
2117
|
+
total_tokens=prompt_tokens + completion_tokens,
|
2118
|
+
cost=self.message_cost,
|
2119
|
+
total_cost=self.total_cost,
|
2120
|
+
)
|
2121
|
+
|
2122
|
+
self.message_cost = 0.0
|
2123
|
+
self.message_tokens_sent = 0
|
2124
|
+
self.message_tokens_received = 0
|
2125
|
+
|
2126
|
+
def get_multi_response_content_in_progress(self, final=False):
|
2127
|
+
cur = self.multi_response_content or ""
|
2128
|
+
new = self.partial_response_content or ""
|
2129
|
+
|
2130
|
+
if new.rstrip() != new and not final:
|
2131
|
+
new = new.rstrip()
|
2132
|
+
|
2133
|
+
return cur + new
|
2134
|
+
|
2135
|
+
def get_rel_fname(self, fname):
|
2136
|
+
try:
|
2137
|
+
return os.path.relpath(fname, self.root)
|
2138
|
+
except ValueError:
|
2139
|
+
return fname
|
2140
|
+
|
2141
|
+
def get_inchat_relative_files(self):
|
2142
|
+
files = [self.get_rel_fname(fname) for fname in self.abs_fnames]
|
2143
|
+
return sorted(set(files))
|
2144
|
+
|
2145
|
+
def is_file_safe(self, fname):
|
2146
|
+
try:
|
2147
|
+
return Path(self.abs_root_path(fname)).is_file()
|
2148
|
+
except OSError:
|
2149
|
+
return
|
2150
|
+
|
2151
|
+
def get_all_relative_files(self):
|
2152
|
+
if self.repo:
|
2153
|
+
files = self.repo.get_tracked_files()
|
2154
|
+
else:
|
2155
|
+
files = self.get_inchat_relative_files()
|
2156
|
+
|
2157
|
+
# This is quite slow in large repos
|
2158
|
+
# files = [fname for fname in files if self.is_file_safe(fname)]
|
2159
|
+
|
2160
|
+
return sorted(set(files))
|
2161
|
+
|
2162
|
+
def get_all_abs_files(self):
|
2163
|
+
files = self.get_all_relative_files()
|
2164
|
+
files = [self.abs_root_path(path) for path in files]
|
2165
|
+
return files
|
2166
|
+
|
2167
|
+
def get_addable_relative_files(self):
|
2168
|
+
all_files = set(self.get_all_relative_files())
|
2169
|
+
inchat_files = set(self.get_inchat_relative_files())
|
2170
|
+
read_only_files = set(self.get_rel_fname(fname) for fname in self.abs_read_only_fnames)
|
2171
|
+
return all_files - inchat_files - read_only_files
|
2172
|
+
|
2173
|
+
def check_for_dirty_commit(self, path):
|
2174
|
+
if not self.repo:
|
2175
|
+
return
|
2176
|
+
if not self.dirty_commits:
|
2177
|
+
return
|
2178
|
+
if not self.repo.is_dirty(path):
|
2179
|
+
return
|
2180
|
+
|
2181
|
+
# We need a committed copy of the file in order to /undo, so skip this
|
2182
|
+
# fullp = Path(self.abs_root_path(path))
|
2183
|
+
# if not fullp.stat().st_size:
|
2184
|
+
# return
|
2185
|
+
|
2186
|
+
self.io.tool_output(f"Committing {path} before applying edits.")
|
2187
|
+
self.need_commit_before_edits.add(path)
|
2188
|
+
|
2189
|
+
def allowed_to_edit(self, path):
|
2190
|
+
full_path = self.abs_root_path(path)
|
2191
|
+
if self.repo:
|
2192
|
+
need_to_add = not self.repo.path_in_repo(path)
|
2193
|
+
else:
|
2194
|
+
need_to_add = False
|
2195
|
+
|
2196
|
+
if full_path in self.abs_fnames:
|
2197
|
+
self.check_for_dirty_commit(path)
|
2198
|
+
return True
|
2199
|
+
|
2200
|
+
if self.repo and self.repo.git_ignored_file(path):
|
2201
|
+
self.io.tool_warning(f"Skipping edits to {path} that matches gitignore spec.")
|
2202
|
+
return
|
2203
|
+
|
2204
|
+
if not Path(full_path).exists():
|
2205
|
+
if not self.io.confirm_ask("Create new file?", subject=path):
|
2206
|
+
self.io.tool_output(f"Skipping edits to {path}")
|
2207
|
+
return
|
2208
|
+
|
2209
|
+
if not self.dry_run:
|
2210
|
+
if not utils.touch_file(full_path):
|
2211
|
+
self.io.tool_error(f"Unable to create {path}, skipping edits.")
|
2212
|
+
return
|
2213
|
+
|
2214
|
+
# Seems unlikely that we needed to create the file, but it was
|
2215
|
+
# actually already part of the repo.
|
2216
|
+
# But let's only add if we need to, just to be safe.
|
2217
|
+
if need_to_add:
|
2218
|
+
self.repo.repo.git.add(full_path)
|
2219
|
+
|
2220
|
+
self.abs_fnames.add(full_path)
|
2221
|
+
self.check_added_files()
|
2222
|
+
return True
|
2223
|
+
|
2224
|
+
if not self.io.confirm_ask(
|
2225
|
+
"Allow edits to file that has not been added to the chat?",
|
2226
|
+
subject=path,
|
2227
|
+
):
|
2228
|
+
self.io.tool_output(f"Skipping edits to {path}")
|
2229
|
+
return
|
2230
|
+
|
2231
|
+
if need_to_add:
|
2232
|
+
self.repo.repo.git.add(full_path)
|
2233
|
+
|
2234
|
+
self.abs_fnames.add(full_path)
|
2235
|
+
self.check_added_files()
|
2236
|
+
self.check_for_dirty_commit(path)
|
2237
|
+
|
2238
|
+
return True
|
2239
|
+
|
2240
|
+
warning_given = False
|
2241
|
+
|
2242
|
+
def check_added_files(self):
|
2243
|
+
if self.warning_given:
|
2244
|
+
return
|
2245
|
+
|
2246
|
+
warn_number_of_files = 4
|
2247
|
+
warn_number_of_tokens = 20 * 1024
|
2248
|
+
|
2249
|
+
num_files = len(self.abs_fnames)
|
2250
|
+
if num_files < warn_number_of_files:
|
2251
|
+
return
|
2252
|
+
|
2253
|
+
tokens = 0
|
2254
|
+
for fname in self.abs_fnames:
|
2255
|
+
if is_image_file(fname):
|
2256
|
+
continue
|
2257
|
+
content = self.io.read_text(fname)
|
2258
|
+
tokens += self.main_model.token_count(content)
|
2259
|
+
|
2260
|
+
if tokens < warn_number_of_tokens:
|
2261
|
+
return
|
2262
|
+
|
2263
|
+
self.io.tool_warning("Warning: it's best to only add files that need changes to the chat.")
|
2264
|
+
self.io.tool_warning(urls.edit_errors)
|
2265
|
+
self.warning_given = True
|
2266
|
+
|
2267
|
+
def prepare_to_edit(self, edits):
|
2268
|
+
res = []
|
2269
|
+
seen = dict()
|
2270
|
+
|
2271
|
+
self.need_commit_before_edits = set()
|
2272
|
+
|
2273
|
+
for edit in edits:
|
2274
|
+
path = edit[0]
|
2275
|
+
if path is None:
|
2276
|
+
res.append(edit)
|
2277
|
+
continue
|
2278
|
+
if path == "python":
|
2279
|
+
dump(edits)
|
2280
|
+
if path in seen:
|
2281
|
+
allowed = seen[path]
|
2282
|
+
else:
|
2283
|
+
allowed = self.allowed_to_edit(path)
|
2284
|
+
seen[path] = allowed
|
2285
|
+
|
2286
|
+
if allowed:
|
2287
|
+
res.append(edit)
|
2288
|
+
|
2289
|
+
self.dirty_commit()
|
2290
|
+
self.need_commit_before_edits = set()
|
2291
|
+
|
2292
|
+
return res
|
2293
|
+
|
2294
|
+
def apply_updates(self):
|
2295
|
+
edited = set()
|
2296
|
+
try:
|
2297
|
+
edits = self.get_edits()
|
2298
|
+
edits = self.apply_edits_dry_run(edits)
|
2299
|
+
edits = self.prepare_to_edit(edits)
|
2300
|
+
edited = set(edit[0] for edit in edits)
|
2301
|
+
|
2302
|
+
self.apply_edits(edits)
|
2303
|
+
except ValueError as err:
|
2304
|
+
self.num_malformed_responses += 1
|
2305
|
+
|
2306
|
+
err = err.args[0]
|
2307
|
+
|
2308
|
+
self.io.tool_error("The LLM did not conform to the edit format.")
|
2309
|
+
self.io.tool_output(urls.edit_errors)
|
2310
|
+
self.io.tool_output()
|
2311
|
+
self.io.tool_output(str(err))
|
2312
|
+
|
2313
|
+
self.reflected_message = str(err)
|
2314
|
+
return edited
|
2315
|
+
|
2316
|
+
except ANY_GIT_ERROR as err:
|
2317
|
+
self.io.tool_error(str(err))
|
2318
|
+
return edited
|
2319
|
+
except Exception as err:
|
2320
|
+
self.io.tool_error("Exception while updating files:")
|
2321
|
+
self.io.tool_error(str(err), strip=False)
|
2322
|
+
|
2323
|
+
traceback.print_exc()
|
2324
|
+
|
2325
|
+
self.reflected_message = str(err)
|
2326
|
+
return edited
|
2327
|
+
|
2328
|
+
for path in edited:
|
2329
|
+
if self.dry_run:
|
2330
|
+
self.io.tool_output(f"Did not apply edit to {path} (--dry-run)")
|
2331
|
+
else:
|
2332
|
+
self.io.tool_output(f"Applied edit to {path}")
|
2333
|
+
|
2334
|
+
return edited
|
2335
|
+
|
2336
|
+
def parse_partial_args(self):
|
2337
|
+
# dump(self.partial_response_function_call)
|
2338
|
+
|
2339
|
+
data = self.partial_response_function_call.get("arguments")
|
2340
|
+
if not data:
|
2341
|
+
return
|
2342
|
+
|
2343
|
+
try:
|
2344
|
+
return json.loads(data)
|
2345
|
+
except JSONDecodeError:
|
2346
|
+
pass
|
2347
|
+
|
2348
|
+
try:
|
2349
|
+
return json.loads(data + "]}")
|
2350
|
+
except JSONDecodeError:
|
2351
|
+
pass
|
2352
|
+
|
2353
|
+
try:
|
2354
|
+
return json.loads(data + "}]}")
|
2355
|
+
except JSONDecodeError:
|
2356
|
+
pass
|
2357
|
+
|
2358
|
+
try:
|
2359
|
+
return json.loads(data + '"}]}')
|
2360
|
+
except JSONDecodeError:
|
2361
|
+
pass
|
2362
|
+
|
2363
|
+
# commits...
|
2364
|
+
|
2365
|
+
def get_context_from_history(self, history):
|
2366
|
+
context = ""
|
2367
|
+
if history:
|
2368
|
+
for msg in history:
|
2369
|
+
context += "\n" + msg["role"].upper() + ": " + msg["content"] + "\n"
|
2370
|
+
|
2371
|
+
return context
|
2372
|
+
|
2373
|
+
def auto_commit(self, edited, context=None):
|
2374
|
+
if not self.repo or not self.auto_commits or self.dry_run:
|
2375
|
+
return
|
2376
|
+
|
2377
|
+
if not context:
|
2378
|
+
context = self.get_context_from_history(self.cur_messages)
|
2379
|
+
|
2380
|
+
try:
|
2381
|
+
res = self.repo.commit(fnames=edited, context=context, aider_edits=True, coder=self)
|
2382
|
+
if res:
|
2383
|
+
self.show_auto_commit_outcome(res)
|
2384
|
+
commit_hash, commit_message = res
|
2385
|
+
return self.gpt_prompts.files_content_gpt_edits.format(
|
2386
|
+
hash=commit_hash,
|
2387
|
+
message=commit_message,
|
2388
|
+
)
|
2389
|
+
|
2390
|
+
return self.gpt_prompts.files_content_gpt_no_edits
|
2391
|
+
except ANY_GIT_ERROR as err:
|
2392
|
+
self.io.tool_error(f"Unable to commit: {str(err)}")
|
2393
|
+
return
|
2394
|
+
|
2395
|
+
def show_auto_commit_outcome(self, res):
|
2396
|
+
commit_hash, commit_message = res
|
2397
|
+
self.last_aider_commit_hash = commit_hash
|
2398
|
+
self.aider_commit_hashes.add(commit_hash)
|
2399
|
+
self.last_aider_commit_message = commit_message
|
2400
|
+
if self.show_diffs:
|
2401
|
+
self.commands.cmd_diff()
|
2402
|
+
|
2403
|
+
def show_undo_hint(self):
|
2404
|
+
if not self.commit_before_message:
|
2405
|
+
return
|
2406
|
+
if self.commit_before_message[-1] != self.repo.get_head_commit_sha():
|
2407
|
+
self.io.tool_output("You can use /undo to undo and discard each aider commit.")
|
2408
|
+
|
2409
|
+
def dirty_commit(self):
|
2410
|
+
if not self.need_commit_before_edits:
|
2411
|
+
return
|
2412
|
+
if not self.dirty_commits:
|
2413
|
+
return
|
2414
|
+
if not self.repo:
|
2415
|
+
return
|
2416
|
+
|
2417
|
+
self.repo.commit(fnames=self.need_commit_before_edits, coder=self)
|
2418
|
+
|
2419
|
+
# files changed, move cur messages back behind the files messages
|
2420
|
+
# self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)
|
2421
|
+
return True
|
2422
|
+
|
2423
|
+
def get_edits(self, mode="update"):
|
2424
|
+
return []
|
2425
|
+
|
2426
|
+
def apply_edits(self, edits):
|
2427
|
+
return
|
2428
|
+
|
2429
|
+
def apply_edits_dry_run(self, edits):
|
2430
|
+
return edits
|
2431
|
+
|
2432
|
+
def run_shell_commands(self):
|
2433
|
+
if not self.suggest_shell_commands:
|
2434
|
+
return ""
|
2435
|
+
|
2436
|
+
done = set()
|
2437
|
+
group = ConfirmGroup(set(self.shell_commands))
|
2438
|
+
accumulated_output = ""
|
2439
|
+
for command in self.shell_commands:
|
2440
|
+
if command in done:
|
2441
|
+
continue
|
2442
|
+
done.add(command)
|
2443
|
+
output = self.handle_shell_commands(command, group)
|
2444
|
+
if output:
|
2445
|
+
accumulated_output += output + "\n\n"
|
2446
|
+
return accumulated_output
|
2447
|
+
|
2448
|
+
def handle_shell_commands(self, commands_str, group):
|
2449
|
+
commands = commands_str.strip().splitlines()
|
2450
|
+
command_count = sum(
|
2451
|
+
1 for cmd in commands if cmd.strip() and not cmd.strip().startswith("#")
|
2452
|
+
)
|
2453
|
+
prompt = "Run shell command?" if command_count == 1 else "Run shell commands?"
|
2454
|
+
if not self.io.confirm_ask(
|
2455
|
+
prompt,
|
2456
|
+
subject="\n".join(commands),
|
2457
|
+
explicit_yes_required=True,
|
2458
|
+
group=group,
|
2459
|
+
allow_never=True,
|
2460
|
+
):
|
2461
|
+
return
|
2462
|
+
|
2463
|
+
accumulated_output = ""
|
2464
|
+
for command in commands:
|
2465
|
+
command = command.strip()
|
2466
|
+
if not command or command.startswith("#"):
|
2467
|
+
continue
|
2468
|
+
|
2469
|
+
self.io.tool_output()
|
2470
|
+
self.io.tool_output(f"Running {command}")
|
2471
|
+
# Add the command to input history
|
2472
|
+
self.io.add_to_input_history(f"/run {command.strip()}")
|
2473
|
+
exit_status, output = run_cmd(command, error_print=self.io.tool_error, cwd=self.root)
|
2474
|
+
if output:
|
2475
|
+
accumulated_output += f"Output from {command}\n{output}\n"
|
2476
|
+
|
2477
|
+
if accumulated_output.strip() and self.io.confirm_ask(
|
2478
|
+
"Add command output to the chat?", allow_never=True
|
2479
|
+
):
|
2480
|
+
num_lines = len(accumulated_output.strip().splitlines())
|
2481
|
+
line_plural = "line" if num_lines == 1 else "lines"
|
2482
|
+
self.io.tool_output(f"Added {num_lines} {line_plural} of output to the chat.")
|
2483
|
+
return accumulated_output
|