cecli-dev 0.95.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cecli/__init__.py +20 -0
- cecli/__main__.py +4 -0
- cecli/_version.py +34 -0
- cecli/args.py +1092 -0
- cecli/args_formatter.py +228 -0
- cecli/change_tracker.py +133 -0
- cecli/coders/__init__.py +38 -0
- cecli/coders/agent_coder.py +1872 -0
- cecli/coders/architect_coder.py +63 -0
- cecli/coders/ask_coder.py +8 -0
- cecli/coders/base_coder.py +3993 -0
- cecli/coders/chat_chunks.py +116 -0
- cecli/coders/context_coder.py +52 -0
- cecli/coders/copypaste_coder.py +269 -0
- cecli/coders/editblock_coder.py +656 -0
- cecli/coders/editblock_fenced_coder.py +9 -0
- cecli/coders/editblock_func_coder.py +140 -0
- cecli/coders/editor_diff_fenced_coder.py +8 -0
- cecli/coders/editor_editblock_coder.py +8 -0
- cecli/coders/editor_whole_coder.py +8 -0
- cecli/coders/help_coder.py +15 -0
- cecli/coders/patch_coder.py +705 -0
- cecli/coders/search_replace.py +757 -0
- cecli/coders/shell.py +37 -0
- cecli/coders/single_wholefile_func_coder.py +101 -0
- cecli/coders/udiff_coder.py +428 -0
- cecli/coders/udiff_simple.py +12 -0
- cecli/coders/wholefile_coder.py +143 -0
- cecli/coders/wholefile_func_coder.py +133 -0
- cecli/commands/__init__.py +192 -0
- cecli/commands/add.py +226 -0
- cecli/commands/agent.py +51 -0
- cecli/commands/architect.py +46 -0
- cecli/commands/ask.py +44 -0
- cecli/commands/chat_mode.py +0 -0
- cecli/commands/clear.py +37 -0
- cecli/commands/code.py +46 -0
- cecli/commands/command_prefix.py +44 -0
- cecli/commands/commit.py +52 -0
- cecli/commands/context.py +47 -0
- cecli/commands/context_blocks.py +124 -0
- cecli/commands/context_management.py +51 -0
- cecli/commands/copy.py +62 -0
- cecli/commands/copy_context.py +81 -0
- cecli/commands/core.py +287 -0
- cecli/commands/diff.py +68 -0
- cecli/commands/drop.py +217 -0
- cecli/commands/editor.py +78 -0
- cecli/commands/exit.py +55 -0
- cecli/commands/git.py +57 -0
- cecli/commands/help.py +140 -0
- cecli/commands/history_search.py +40 -0
- cecli/commands/lint.py +109 -0
- cecli/commands/list_sessions.py +56 -0
- cecli/commands/load.py +85 -0
- cecli/commands/load_session.py +48 -0
- cecli/commands/load_skill.py +68 -0
- cecli/commands/ls.py +75 -0
- cecli/commands/map.py +37 -0
- cecli/commands/map_refresh.py +35 -0
- cecli/commands/model.py +118 -0
- cecli/commands/models.py +41 -0
- cecli/commands/multiline_mode.py +38 -0
- cecli/commands/paste.py +91 -0
- cecli/commands/quit.py +32 -0
- cecli/commands/read_only.py +267 -0
- cecli/commands/read_only_stub.py +270 -0
- cecli/commands/reasoning_effort.py +70 -0
- cecli/commands/remove_skill.py +68 -0
- cecli/commands/report.py +40 -0
- cecli/commands/reset.py +88 -0
- cecli/commands/run.py +99 -0
- cecli/commands/save.py +49 -0
- cecli/commands/save_session.py +43 -0
- cecli/commands/settings.py +69 -0
- cecli/commands/test.py +58 -0
- cecli/commands/think_tokens.py +74 -0
- cecli/commands/tokens.py +207 -0
- cecli/commands/undo.py +145 -0
- cecli/commands/utils/__init__.py +0 -0
- cecli/commands/utils/base_command.py +131 -0
- cecli/commands/utils/helpers.py +142 -0
- cecli/commands/utils/registry.py +53 -0
- cecli/commands/utils/save_load_manager.py +98 -0
- cecli/commands/voice.py +78 -0
- cecli/commands/weak_model.py +123 -0
- cecli/commands/web.py +87 -0
- cecli/deprecated_args.py +185 -0
- cecli/diffs.py +129 -0
- cecli/dump.py +29 -0
- cecli/editor.py +147 -0
- cecli/exceptions.py +115 -0
- cecli/format_settings.py +26 -0
- cecli/help.py +119 -0
- cecli/help_pats.py +19 -0
- cecli/helpers/__init__.py +9 -0
- cecli/helpers/copypaste.py +123 -0
- cecli/helpers/coroutines.py +8 -0
- cecli/helpers/file_searcher.py +142 -0
- cecli/helpers/model_providers.py +552 -0
- cecli/helpers/plugin_manager.py +81 -0
- cecli/helpers/profiler.py +162 -0
- cecli/helpers/requests.py +77 -0
- cecli/helpers/similarity.py +98 -0
- cecli/helpers/skills.py +577 -0
- cecli/history.py +186 -0
- cecli/io.py +1782 -0
- cecli/linter.py +304 -0
- cecli/llm.py +101 -0
- cecli/main.py +1280 -0
- cecli/mcp/__init__.py +154 -0
- cecli/mcp/oauth.py +250 -0
- cecli/mcp/server.py +278 -0
- cecli/mdstream.py +243 -0
- cecli/models.py +1255 -0
- cecli/onboarding.py +301 -0
- cecli/prompts/__init__.py +0 -0
- cecli/prompts/agent.yml +71 -0
- cecli/prompts/architect.yml +35 -0
- cecli/prompts/ask.yml +31 -0
- cecli/prompts/base.yml +99 -0
- cecli/prompts/context.yml +60 -0
- cecli/prompts/copypaste.yml +5 -0
- cecli/prompts/editblock.yml +143 -0
- cecli/prompts/editblock_fenced.yml +106 -0
- cecli/prompts/editblock_func.yml +25 -0
- cecli/prompts/editor_diff_fenced.yml +115 -0
- cecli/prompts/editor_editblock.yml +121 -0
- cecli/prompts/editor_whole.yml +46 -0
- cecli/prompts/help.yml +37 -0
- cecli/prompts/patch.yml +110 -0
- cecli/prompts/single_wholefile_func.yml +24 -0
- cecli/prompts/udiff.yml +106 -0
- cecli/prompts/udiff_simple.yml +13 -0
- cecli/prompts/utils/__init__.py +0 -0
- cecli/prompts/utils/prompt_registry.py +167 -0
- cecli/prompts/utils/system.py +56 -0
- cecli/prompts/wholefile.yml +50 -0
- cecli/prompts/wholefile_func.yml +24 -0
- cecli/queries/tree-sitter-language-pack/README.md +7 -0
- cecli/queries/tree-sitter-language-pack/arduino-tags.scm +5 -0
- cecli/queries/tree-sitter-language-pack/c-tags.scm +12 -0
- cecli/queries/tree-sitter-language-pack/chatito-tags.scm +16 -0
- cecli/queries/tree-sitter-language-pack/clojure-tags.scm +12 -0
- cecli/queries/tree-sitter-language-pack/commonlisp-tags.scm +127 -0
- cecli/queries/tree-sitter-language-pack/cpp-tags.scm +18 -0
- cecli/queries/tree-sitter-language-pack/csharp-tags.scm +32 -0
- cecli/queries/tree-sitter-language-pack/d-tags.scm +26 -0
- cecli/queries/tree-sitter-language-pack/dart-tags.scm +97 -0
- cecli/queries/tree-sitter-language-pack/elisp-tags.scm +5 -0
- cecli/queries/tree-sitter-language-pack/elixir-tags.scm +59 -0
- cecli/queries/tree-sitter-language-pack/elm-tags.scm +22 -0
- cecli/queries/tree-sitter-language-pack/gleam-tags.scm +41 -0
- cecli/queries/tree-sitter-language-pack/go-tags.scm +49 -0
- cecli/queries/tree-sitter-language-pack/java-tags.scm +26 -0
- cecli/queries/tree-sitter-language-pack/javascript-tags.scm +96 -0
- cecli/queries/tree-sitter-language-pack/lua-tags.scm +39 -0
- cecli/queries/tree-sitter-language-pack/matlab-tags.scm +10 -0
- cecli/queries/tree-sitter-language-pack/ocaml-tags.scm +115 -0
- cecli/queries/tree-sitter-language-pack/ocaml_interface-tags.scm +101 -0
- cecli/queries/tree-sitter-language-pack/pony-tags.scm +39 -0
- cecli/queries/tree-sitter-language-pack/properties-tags.scm +5 -0
- cecli/queries/tree-sitter-language-pack/python-tags.scm +24 -0
- cecli/queries/tree-sitter-language-pack/r-tags.scm +27 -0
- cecli/queries/tree-sitter-language-pack/racket-tags.scm +12 -0
- cecli/queries/tree-sitter-language-pack/ruby-tags.scm +69 -0
- cecli/queries/tree-sitter-language-pack/rust-tags.scm +63 -0
- cecli/queries/tree-sitter-language-pack/solidity-tags.scm +43 -0
- cecli/queries/tree-sitter-language-pack/swift-tags.scm +54 -0
- cecli/queries/tree-sitter-language-pack/udev-tags.scm +20 -0
- cecli/queries/tree-sitter-languages/README.md +24 -0
- cecli/queries/tree-sitter-languages/c-tags.scm +12 -0
- cecli/queries/tree-sitter-languages/c_sharp-tags.scm +52 -0
- cecli/queries/tree-sitter-languages/cpp-tags.scm +18 -0
- cecli/queries/tree-sitter-languages/dart-tags.scm +92 -0
- cecli/queries/tree-sitter-languages/elisp-tags.scm +8 -0
- cecli/queries/tree-sitter-languages/elixir-tags.scm +59 -0
- cecli/queries/tree-sitter-languages/elm-tags.scm +22 -0
- cecli/queries/tree-sitter-languages/fortran-tags.scm +18 -0
- cecli/queries/tree-sitter-languages/go-tags.scm +36 -0
- cecli/queries/tree-sitter-languages/haskell-tags.scm +5 -0
- cecli/queries/tree-sitter-languages/hcl-tags.scm +77 -0
- cecli/queries/tree-sitter-languages/java-tags.scm +26 -0
- cecli/queries/tree-sitter-languages/javascript-tags.scm +96 -0
- cecli/queries/tree-sitter-languages/julia-tags.scm +60 -0
- cecli/queries/tree-sitter-languages/kotlin-tags.scm +30 -0
- cecli/queries/tree-sitter-languages/matlab-tags.scm +10 -0
- cecli/queries/tree-sitter-languages/ocaml-tags.scm +115 -0
- cecli/queries/tree-sitter-languages/ocaml_interface-tags.scm +104 -0
- cecli/queries/tree-sitter-languages/php-tags.scm +32 -0
- cecli/queries/tree-sitter-languages/python-tags.scm +22 -0
- cecli/queries/tree-sitter-languages/ql-tags.scm +26 -0
- cecli/queries/tree-sitter-languages/ruby-tags.scm +69 -0
- cecli/queries/tree-sitter-languages/rust-tags.scm +63 -0
- cecli/queries/tree-sitter-languages/scala-tags.scm +64 -0
- cecli/queries/tree-sitter-languages/typescript-tags.scm +44 -0
- cecli/queries/tree-sitter-languages/zig-tags.scm +20 -0
- cecli/reasoning_tags.py +82 -0
- cecli/repo.py +626 -0
- cecli/repomap.py +1368 -0
- cecli/report.py +260 -0
- cecli/resources/__init__.py +3 -0
- cecli/resources/model-metadata.json +25751 -0
- cecli/resources/model-settings.yml +2394 -0
- cecli/resources/providers.json +67 -0
- cecli/run_cmd.py +143 -0
- cecli/scrape.py +295 -0
- cecli/sendchat.py +250 -0
- cecli/sessions.py +281 -0
- cecli/special.py +203 -0
- cecli/tools/__init__.py +72 -0
- cecli/tools/command.py +103 -0
- cecli/tools/command_interactive.py +113 -0
- cecli/tools/context_manager.py +175 -0
- cecli/tools/delete_block.py +154 -0
- cecli/tools/delete_line.py +120 -0
- cecli/tools/delete_lines.py +144 -0
- cecli/tools/extract_lines.py +281 -0
- cecli/tools/finished.py +35 -0
- cecli/tools/git_branch.py +132 -0
- cecli/tools/git_diff.py +49 -0
- cecli/tools/git_log.py +43 -0
- cecli/tools/git_remote.py +39 -0
- cecli/tools/git_show.py +37 -0
- cecli/tools/git_status.py +32 -0
- cecli/tools/grep.py +242 -0
- cecli/tools/indent_lines.py +195 -0
- cecli/tools/insert_block.py +263 -0
- cecli/tools/list_changes.py +71 -0
- cecli/tools/load_skill.py +51 -0
- cecli/tools/ls.py +77 -0
- cecli/tools/remove_skill.py +51 -0
- cecli/tools/replace_all.py +113 -0
- cecli/tools/replace_line.py +135 -0
- cecli/tools/replace_lines.py +180 -0
- cecli/tools/replace_text.py +186 -0
- cecli/tools/show_numbered_context.py +137 -0
- cecli/tools/thinking.py +52 -0
- cecli/tools/undo_change.py +82 -0
- cecli/tools/update_todo_list.py +148 -0
- cecli/tools/utils/base_tool.py +64 -0
- cecli/tools/utils/helpers.py +359 -0
- cecli/tools/utils/output.py +119 -0
- cecli/tools/utils/registry.py +145 -0
- cecli/tools/view_files_matching.py +138 -0
- cecli/tools/view_files_with_symbol.py +117 -0
- cecli/tui/__init__.py +83 -0
- cecli/tui/app.py +971 -0
- cecli/tui/io.py +566 -0
- cecli/tui/styles.tcss +117 -0
- cecli/tui/widgets/__init__.py +19 -0
- cecli/tui/widgets/completion_bar.py +331 -0
- cecli/tui/widgets/file_list.py +76 -0
- cecli/tui/widgets/footer.py +165 -0
- cecli/tui/widgets/input_area.py +320 -0
- cecli/tui/widgets/key_hints.py +16 -0
- cecli/tui/widgets/output.py +354 -0
- cecli/tui/widgets/status_bar.py +279 -0
- cecli/tui/worker.py +160 -0
- cecli/urls.py +16 -0
- cecli/utils.py +499 -0
- cecli/versioncheck.py +90 -0
- cecli/voice.py +90 -0
- cecli/waiting.py +38 -0
- cecli/watch.py +316 -0
- cecli/watch_prompts.py +12 -0
- cecli/website/Gemfile +8 -0
- cecli/website/_includes/blame.md +162 -0
- cecli/website/_includes/get-started.md +22 -0
- cecli/website/_includes/help-tip.md +5 -0
- cecli/website/_includes/help.md +24 -0
- cecli/website/_includes/install.md +5 -0
- cecli/website/_includes/keys.md +4 -0
- cecli/website/_includes/model-warnings.md +67 -0
- cecli/website/_includes/multi-line.md +22 -0
- cecli/website/_includes/python-m-aider.md +5 -0
- cecli/website/_includes/recording.css +228 -0
- cecli/website/_includes/recording.md +34 -0
- cecli/website/_includes/replit-pipx.md +9 -0
- cecli/website/_includes/works-best.md +1 -0
- cecli/website/_sass/custom/custom.scss +103 -0
- cecli/website/docs/config/adv-model-settings.md +2498 -0
- cecli/website/docs/config/agent-mode.md +320 -0
- cecli/website/docs/config/aider_conf.md +548 -0
- cecli/website/docs/config/api-keys.md +90 -0
- cecli/website/docs/config/custom-commands.md +187 -0
- cecli/website/docs/config/dotenv.md +493 -0
- cecli/website/docs/config/editor.md +127 -0
- cecli/website/docs/config/mcp.md +210 -0
- cecli/website/docs/config/model-aliases.md +173 -0
- cecli/website/docs/config/options.md +890 -0
- cecli/website/docs/config/reasoning.md +210 -0
- cecli/website/docs/config/skills.md +172 -0
- cecli/website/docs/config/tui.md +126 -0
- cecli/website/docs/config.md +44 -0
- cecli/website/docs/faq.md +379 -0
- cecli/website/docs/git.md +76 -0
- cecli/website/docs/index.md +47 -0
- cecli/website/docs/install/codespaces.md +39 -0
- cecli/website/docs/install/docker.md +48 -0
- cecli/website/docs/install/optional.md +100 -0
- cecli/website/docs/install/replit.md +8 -0
- cecli/website/docs/install.md +115 -0
- cecli/website/docs/languages.md +264 -0
- cecli/website/docs/legal/contributor-agreement.md +111 -0
- cecli/website/docs/legal/privacy.md +104 -0
- cecli/website/docs/llms/anthropic.md +77 -0
- cecli/website/docs/llms/azure.md +48 -0
- cecli/website/docs/llms/bedrock.md +132 -0
- cecli/website/docs/llms/cohere.md +34 -0
- cecli/website/docs/llms/deepseek.md +32 -0
- cecli/website/docs/llms/gemini.md +49 -0
- cecli/website/docs/llms/github.md +111 -0
- cecli/website/docs/llms/groq.md +36 -0
- cecli/website/docs/llms/lm-studio.md +39 -0
- cecli/website/docs/llms/ollama.md +75 -0
- cecli/website/docs/llms/openai-compat.md +39 -0
- cecli/website/docs/llms/openai.md +58 -0
- cecli/website/docs/llms/openrouter.md +78 -0
- cecli/website/docs/llms/other.md +117 -0
- cecli/website/docs/llms/vertex.md +50 -0
- cecli/website/docs/llms/warnings.md +10 -0
- cecli/website/docs/llms/xai.md +53 -0
- cecli/website/docs/llms.md +54 -0
- cecli/website/docs/more/analytics.md +127 -0
- cecli/website/docs/more/edit-formats.md +116 -0
- cecli/website/docs/more/infinite-output.md +192 -0
- cecli/website/docs/more-info.md +8 -0
- cecli/website/docs/recordings/auto-accept-architect.md +31 -0
- cecli/website/docs/recordings/dont-drop-original-read-files.md +35 -0
- cecli/website/docs/recordings/index.md +21 -0
- cecli/website/docs/recordings/model-accepts-settings.md +69 -0
- cecli/website/docs/recordings/tree-sitter-language-pack.md +80 -0
- cecli/website/docs/repomap.md +112 -0
- cecli/website/docs/scripting.md +100 -0
- cecli/website/docs/sessions.md +213 -0
- cecli/website/docs/troubleshooting/aider-not-found.md +24 -0
- cecli/website/docs/troubleshooting/edit-errors.md +76 -0
- cecli/website/docs/troubleshooting/imports.md +62 -0
- cecli/website/docs/troubleshooting/models-and-keys.md +54 -0
- cecli/website/docs/troubleshooting/support.md +79 -0
- cecli/website/docs/troubleshooting/token-limits.md +96 -0
- cecli/website/docs/troubleshooting/warnings.md +12 -0
- cecli/website/docs/troubleshooting.md +11 -0
- cecli/website/docs/usage/browser.md +57 -0
- cecli/website/docs/usage/caching.md +49 -0
- cecli/website/docs/usage/commands.md +133 -0
- cecli/website/docs/usage/conventions.md +119 -0
- cecli/website/docs/usage/copypaste.md +136 -0
- cecli/website/docs/usage/images-urls.md +48 -0
- cecli/website/docs/usage/lint-test.md +118 -0
- cecli/website/docs/usage/modes.md +211 -0
- cecli/website/docs/usage/not-code.md +179 -0
- cecli/website/docs/usage/notifications.md +87 -0
- cecli/website/docs/usage/tips.md +79 -0
- cecli/website/docs/usage/tutorials.md +30 -0
- cecli/website/docs/usage/voice.md +121 -0
- cecli/website/docs/usage/watch.md +294 -0
- cecli/website/docs/usage.md +102 -0
- cecli/website/share/index.md +101 -0
- cecli_dev-0.95.5.dist-info/METADATA +549 -0
- cecli_dev-0.95.5.dist-info/RECORD +366 -0
- cecli_dev-0.95.5.dist-info/WHEEL +5 -0
- cecli_dev-0.95.5.dist-info/entry_points.txt +4 -0
- cecli_dev-0.95.5.dist-info/licenses/LICENSE.txt +202 -0
- cecli_dev-0.95.5.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1872 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
import asyncio
|
|
3
|
+
import base64
|
|
4
|
+
import json
|
|
5
|
+
import locale
|
|
6
|
+
import os
|
|
7
|
+
import platform
|
|
8
|
+
import re
|
|
9
|
+
import time
|
|
10
|
+
import traceback
|
|
11
|
+
from collections import Counter, defaultdict
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
|
|
15
|
+
from litellm import experimental_mcp_client
|
|
16
|
+
|
|
17
|
+
from cecli import urls, utils
|
|
18
|
+
from cecli.change_tracker import ChangeTracker
|
|
19
|
+
from cecli.helpers.similarity import (
|
|
20
|
+
cosine_similarity,
|
|
21
|
+
create_bigram_vector,
|
|
22
|
+
normalize_vector,
|
|
23
|
+
)
|
|
24
|
+
from cecli.helpers.skills import SkillsManager
|
|
25
|
+
from cecli.mcp.server import LocalServer
|
|
26
|
+
from cecli.repo import ANY_GIT_ERROR
|
|
27
|
+
from cecli.tools.utils.registry import ToolRegistry
|
|
28
|
+
|
|
29
|
+
from .base_coder import ChatChunks, Coder
|
|
30
|
+
from .editblock_coder import do_replace, find_original_update_blocks, find_similar_lines
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class AgentCoder(Coder):
|
|
34
|
+
"""Mode where the LLM autonomously manages which files are in context."""
|
|
35
|
+
|
|
36
|
+
edit_format = "agent"
|
|
37
|
+
prompt_format = "agent"
|
|
38
|
+
|
|
39
|
+
def __init__(self, *args, **kwargs):
|
|
40
|
+
self.recently_removed = {}
|
|
41
|
+
self.tool_usage_history = []
|
|
42
|
+
self.tool_usage_retries = 10
|
|
43
|
+
self.last_round_tools = []
|
|
44
|
+
self.tool_call_vectors = []
|
|
45
|
+
self.tool_similarity_threshold = 0.99
|
|
46
|
+
self.max_tool_vector_history = 10
|
|
47
|
+
self.read_tools = {
|
|
48
|
+
"viewfilesatglob",
|
|
49
|
+
"viewfilesmatching",
|
|
50
|
+
"ls",
|
|
51
|
+
"viewfileswithsymbol",
|
|
52
|
+
"grep",
|
|
53
|
+
"listchanges",
|
|
54
|
+
"extractlines",
|
|
55
|
+
"shownumberedcontext",
|
|
56
|
+
}
|
|
57
|
+
self.write_tools = {
|
|
58
|
+
"command",
|
|
59
|
+
"commandinteractive",
|
|
60
|
+
"insertblock",
|
|
61
|
+
"replaceblock",
|
|
62
|
+
"replaceall",
|
|
63
|
+
"replacetext",
|
|
64
|
+
"undochange",
|
|
65
|
+
}
|
|
66
|
+
self.max_tool_calls = 10000
|
|
67
|
+
self.large_file_token_threshold = 25000
|
|
68
|
+
self.context_management_enabled = True
|
|
69
|
+
self.skills_manager = None
|
|
70
|
+
self.change_tracker = ChangeTracker()
|
|
71
|
+
self.args = kwargs.get("args")
|
|
72
|
+
self.files_added_in_exploration = set()
|
|
73
|
+
self.tool_call_count = 0
|
|
74
|
+
self.max_reflections = 15
|
|
75
|
+
self.use_enhanced_context = True
|
|
76
|
+
self._last_edited_file = None
|
|
77
|
+
self._cur_message_divider = None
|
|
78
|
+
self.allowed_context_blocks = set()
|
|
79
|
+
self.context_block_tokens = {}
|
|
80
|
+
self.context_blocks_cache = {}
|
|
81
|
+
self.tokens_calculated = False
|
|
82
|
+
self.skip_cli_confirmations = False
|
|
83
|
+
self.agent_finished = False
|
|
84
|
+
self.agent_config = self._get_agent_config()
|
|
85
|
+
ToolRegistry.build_registry(agent_config=self.agent_config)
|
|
86
|
+
super().__init__(*args, **kwargs)
|
|
87
|
+
|
|
88
|
+
def _get_agent_config(self):
|
|
89
|
+
"""
|
|
90
|
+
Parse and return agent configuration from args.agent_config.
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
dict: Agent configuration with defaults for missing values
|
|
94
|
+
"""
|
|
95
|
+
config = {}
|
|
96
|
+
if (
|
|
97
|
+
hasattr(self, "args")
|
|
98
|
+
and self.args
|
|
99
|
+
and hasattr(self.args, "agent_config")
|
|
100
|
+
and self.args.agent_config
|
|
101
|
+
):
|
|
102
|
+
try:
|
|
103
|
+
config = json.loads(self.args.agent_config)
|
|
104
|
+
except (json.JSONDecodeError, TypeError) as e:
|
|
105
|
+
self.io.tool_warning(f"Failed to parse agent-config JSON: {e}")
|
|
106
|
+
return {}
|
|
107
|
+
|
|
108
|
+
if "large_file_token_threshold" not in config:
|
|
109
|
+
config["large_file_token_threshold"] = 25000
|
|
110
|
+
|
|
111
|
+
if "tools_paths" not in config:
|
|
112
|
+
config["tools_paths"] = []
|
|
113
|
+
if "tools_includelist" not in config:
|
|
114
|
+
config["tools_includelist"] = []
|
|
115
|
+
if "tools_excludelist" not in config:
|
|
116
|
+
config["tools_excludelist"] = []
|
|
117
|
+
|
|
118
|
+
if "include_context_blocks" in config:
|
|
119
|
+
self.allowed_context_blocks = set(config["include_context_blocks"])
|
|
120
|
+
else:
|
|
121
|
+
self.allowed_context_blocks = {
|
|
122
|
+
"context_summary",
|
|
123
|
+
"directory_structure",
|
|
124
|
+
"environment_info",
|
|
125
|
+
"git_status",
|
|
126
|
+
"symbol_outline",
|
|
127
|
+
"todo_list",
|
|
128
|
+
"skills",
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
if "exclude_context_blocks" in config:
|
|
132
|
+
for context_block in config["exclude_context_blocks"]:
|
|
133
|
+
try:
|
|
134
|
+
self.allowed_context_blocks.remove(context_block)
|
|
135
|
+
except KeyError:
|
|
136
|
+
pass
|
|
137
|
+
|
|
138
|
+
self.large_file_token_threshold = config["large_file_token_threshold"]
|
|
139
|
+
self.skip_cli_confirmations = config.get(
|
|
140
|
+
"skip_cli_confirmations", config.get("yolo", False)
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
if "skills" in self.allowed_context_blocks:
|
|
144
|
+
if "skills_paths" not in config:
|
|
145
|
+
config["skills_paths"] = []
|
|
146
|
+
if "skills_includelist" not in config:
|
|
147
|
+
config["skills_includelist"] = []
|
|
148
|
+
if "skills_excludelist" not in config:
|
|
149
|
+
config["skills_excludelist"] = []
|
|
150
|
+
|
|
151
|
+
if "skills" not in self.allowed_context_blocks or not config.get("skills_paths", []):
|
|
152
|
+
config["tools_excludelist"].append("loadskill")
|
|
153
|
+
config["tools_excludelist"].append("removeskill")
|
|
154
|
+
|
|
155
|
+
self._initialize_skills_manager(config)
|
|
156
|
+
return config
|
|
157
|
+
|
|
158
|
+
def _initialize_skills_manager(self, config):
|
|
159
|
+
"""
|
|
160
|
+
Initialize the skills manager with the configured directory paths and filters.
|
|
161
|
+
"""
|
|
162
|
+
try:
|
|
163
|
+
git_root = str(self.repo.root) if self.repo else None
|
|
164
|
+
self.skills_manager = SkillsManager(
|
|
165
|
+
directory_paths=config.get("skills_paths", []),
|
|
166
|
+
include_list=config.get("skills_includelist", []),
|
|
167
|
+
exclude_list=config.get("skills_excludelist", []),
|
|
168
|
+
git_root=git_root,
|
|
169
|
+
coder=self,
|
|
170
|
+
)
|
|
171
|
+
except Exception as e:
|
|
172
|
+
self.io.tool_warning(f"Failed to initialize skills manager: {str(e)}")
|
|
173
|
+
|
|
174
|
+
def show_announcements(self):
|
|
175
|
+
super().show_announcements()
|
|
176
|
+
skills = self.skills_manager.find_skills()
|
|
177
|
+
if skills:
|
|
178
|
+
skills_list = []
|
|
179
|
+
for skill in skills:
|
|
180
|
+
skills_list.append(skill.name)
|
|
181
|
+
joined_skills = ", ".join(skills_list)
|
|
182
|
+
self.io.tool_output(f"Available Skills: {joined_skills}")
|
|
183
|
+
|
|
184
|
+
def get_local_tool_schemas(self):
|
|
185
|
+
"""Returns the JSON schemas for all local tools using the tool registry."""
|
|
186
|
+
schemas = []
|
|
187
|
+
for tool_name in ToolRegistry.get_registered_tools():
|
|
188
|
+
tool_module = ToolRegistry.get_tool(tool_name)
|
|
189
|
+
if hasattr(tool_module, "SCHEMA"):
|
|
190
|
+
schemas.append(tool_module.SCHEMA)
|
|
191
|
+
return schemas
|
|
192
|
+
|
|
193
|
+
async def initialize_mcp_tools(self):
|
|
194
|
+
await super().initialize_mcp_tools()
|
|
195
|
+
server_name = "Local"
|
|
196
|
+
if server_name not in [name for name, _ in self.mcp_tools]:
|
|
197
|
+
local_tools = self.get_local_tool_schemas()
|
|
198
|
+
if not local_tools:
|
|
199
|
+
return
|
|
200
|
+
local_server_config = {"name": server_name}
|
|
201
|
+
local_server = LocalServer(local_server_config)
|
|
202
|
+
if not self.mcp_servers:
|
|
203
|
+
self.mcp_servers = []
|
|
204
|
+
if not any(isinstance(s, LocalServer) for s in self.mcp_servers):
|
|
205
|
+
self.mcp_servers.append(local_server)
|
|
206
|
+
if not self.mcp_tools:
|
|
207
|
+
self.mcp_tools = []
|
|
208
|
+
if server_name not in [name for name, _ in self.mcp_tools]:
|
|
209
|
+
self.mcp_tools.append((local_server.name, local_tools))
|
|
210
|
+
|
|
211
|
+
async def _execute_local_tool_calls(self, tool_calls_list):
|
|
212
|
+
tool_responses = []
|
|
213
|
+
for tool_call in tool_calls_list:
|
|
214
|
+
tool_name = tool_call.function.name
|
|
215
|
+
result_message = ""
|
|
216
|
+
try:
|
|
217
|
+
args_string = tool_call.function.arguments.strip()
|
|
218
|
+
parsed_args_list = []
|
|
219
|
+
if args_string:
|
|
220
|
+
json_chunks = utils.split_concatenated_json(args_string)
|
|
221
|
+
for chunk in json_chunks:
|
|
222
|
+
try:
|
|
223
|
+
parsed_args_list.append(json.loads(chunk))
|
|
224
|
+
except json.JSONDecodeError:
|
|
225
|
+
self.io.tool_warning(
|
|
226
|
+
f"Could not parse JSON chunk for tool {tool_name}: {chunk}"
|
|
227
|
+
)
|
|
228
|
+
continue
|
|
229
|
+
if not parsed_args_list and not args_string:
|
|
230
|
+
parsed_args_list.append({})
|
|
231
|
+
all_results_content = []
|
|
232
|
+
norm_tool_name = tool_name.lower()
|
|
233
|
+
tasks = []
|
|
234
|
+
if norm_tool_name in ToolRegistry.get_registered_tools():
|
|
235
|
+
tool_module = ToolRegistry.get_tool(norm_tool_name)
|
|
236
|
+
for params in parsed_args_list:
|
|
237
|
+
result = tool_module.process_response(self, params)
|
|
238
|
+
if asyncio.iscoroutine(result):
|
|
239
|
+
tasks.append(result)
|
|
240
|
+
else:
|
|
241
|
+
tasks.append(asyncio.to_thread(lambda: result))
|
|
242
|
+
elif self.mcp_tools:
|
|
243
|
+
for server_name, server_tools in self.mcp_tools:
|
|
244
|
+
if any(
|
|
245
|
+
t.get("function", {}).get("name") == norm_tool_name
|
|
246
|
+
for t in server_tools
|
|
247
|
+
):
|
|
248
|
+
server = next(
|
|
249
|
+
(s for s in self.mcp_servers if s.name == server_name), None
|
|
250
|
+
)
|
|
251
|
+
if server:
|
|
252
|
+
for params in parsed_args_list:
|
|
253
|
+
tasks.append(
|
|
254
|
+
self._execute_mcp_tool(server, norm_tool_name, params)
|
|
255
|
+
)
|
|
256
|
+
break
|
|
257
|
+
else:
|
|
258
|
+
all_results_content.append(f"Error: Unknown tool name '{tool_name}'")
|
|
259
|
+
else:
|
|
260
|
+
all_results_content.append(f"Error: Unknown tool name '{tool_name}'")
|
|
261
|
+
if tasks:
|
|
262
|
+
task_results = await asyncio.gather(*tasks)
|
|
263
|
+
all_results_content.extend(str(res) for res in task_results)
|
|
264
|
+
result_message = "\n\n".join(all_results_content)
|
|
265
|
+
except Exception as e:
|
|
266
|
+
result_message = f"Error executing {tool_name}: {e}"
|
|
267
|
+
self.io.tool_error(f"""Error during {tool_name} execution: {e}
|
|
268
|
+
{traceback.format_exc()}""")
|
|
269
|
+
tool_responses.append(
|
|
270
|
+
{"role": "tool", "tool_call_id": tool_call.id, "content": result_message}
|
|
271
|
+
)
|
|
272
|
+
return tool_responses
|
|
273
|
+
|
|
274
|
+
async def _execute_mcp_tool(self, server, tool_name, params):
|
|
275
|
+
"""Helper to execute a single MCP tool call, created from legacy format."""
|
|
276
|
+
|
|
277
|
+
async def _exec_async():
|
|
278
|
+
function_dict = {"name": tool_name, "arguments": json.dumps(params)}
|
|
279
|
+
tool_call_dict = {
|
|
280
|
+
"id": f"mcp-tool-call-{time.time()}",
|
|
281
|
+
"function": function_dict,
|
|
282
|
+
"type": "function",
|
|
283
|
+
}
|
|
284
|
+
try:
|
|
285
|
+
session = await server.connect()
|
|
286
|
+
call_result = await experimental_mcp_client.call_openai_tool(
|
|
287
|
+
session=session, openai_tool=tool_call_dict
|
|
288
|
+
)
|
|
289
|
+
content_parts = []
|
|
290
|
+
if call_result.content:
|
|
291
|
+
for item in call_result.content:
|
|
292
|
+
if hasattr(item, "resource"):
|
|
293
|
+
resource = item.resource
|
|
294
|
+
if hasattr(resource, "text"):
|
|
295
|
+
content_parts.append(resource.text)
|
|
296
|
+
elif hasattr(resource, "blob"):
|
|
297
|
+
try:
|
|
298
|
+
decoded_blob = base64.b64decode(resource.blob).decode("utf-8")
|
|
299
|
+
content_parts.append(decoded_blob)
|
|
300
|
+
except (UnicodeDecodeError, TypeError):
|
|
301
|
+
name = getattr(resource, "name", "unnamed")
|
|
302
|
+
mime_type = getattr(resource, "mimeType", "unknown mime type")
|
|
303
|
+
content_parts.append(
|
|
304
|
+
f"[embedded binary resource: {name} ({mime_type})]"
|
|
305
|
+
)
|
|
306
|
+
elif hasattr(item, "text"):
|
|
307
|
+
content_parts.append(item.text)
|
|
308
|
+
return "".join(content_parts)
|
|
309
|
+
except Exception as e:
|
|
310
|
+
self.io.tool_warning(f"""Executing {tool_name} on {server.name} failed:
|
|
311
|
+
Error: {e}
|
|
312
|
+
""")
|
|
313
|
+
return f"Error executing tool call {tool_name}: {e}"
|
|
314
|
+
|
|
315
|
+
return await _exec_async()
|
|
316
|
+
|
|
317
|
+
def _calculate_context_block_tokens(self, force=False):
|
|
318
|
+
"""
|
|
319
|
+
Calculate token counts for all enhanced context blocks.
|
|
320
|
+
This is the central method for calculating token counts,
|
|
321
|
+
ensuring they're consistent across all parts of the code.
|
|
322
|
+
|
|
323
|
+
This method populates the cache for context blocks and calculates tokens.
|
|
324
|
+
|
|
325
|
+
Args:
|
|
326
|
+
force: If True, recalculate tokens even if already calculated
|
|
327
|
+
"""
|
|
328
|
+
if hasattr(self, "tokens_calculated") and self.tokens_calculated and not force:
|
|
329
|
+
return
|
|
330
|
+
self.context_block_tokens = {}
|
|
331
|
+
if not hasattr(self, "context_blocks_cache"):
|
|
332
|
+
self.context_blocks_cache = {}
|
|
333
|
+
if not self.use_enhanced_context:
|
|
334
|
+
return
|
|
335
|
+
try:
|
|
336
|
+
self.context_blocks_cache = {}
|
|
337
|
+
block_types = [
|
|
338
|
+
"environment_info",
|
|
339
|
+
"directory_structure",
|
|
340
|
+
"git_status",
|
|
341
|
+
"symbol_outline",
|
|
342
|
+
"skills",
|
|
343
|
+
"loaded_skills",
|
|
344
|
+
]
|
|
345
|
+
for block_type in block_types:
|
|
346
|
+
if block_type in self.allowed_context_blocks:
|
|
347
|
+
block_content = self._generate_context_block(block_type)
|
|
348
|
+
if block_content:
|
|
349
|
+
self.context_block_tokens[block_type] = self.main_model.token_count(
|
|
350
|
+
block_content
|
|
351
|
+
)
|
|
352
|
+
self.tokens_calculated = True
|
|
353
|
+
except Exception:
|
|
354
|
+
pass
|
|
355
|
+
|
|
356
|
+
def _generate_context_block(self, block_name):
|
|
357
|
+
"""
|
|
358
|
+
Generate a specific context block and cache it.
|
|
359
|
+
This is a helper method for get_cached_context_block.
|
|
360
|
+
"""
|
|
361
|
+
content = None
|
|
362
|
+
if block_name == "environment_info":
|
|
363
|
+
content = self.get_environment_info()
|
|
364
|
+
elif block_name == "directory_structure":
|
|
365
|
+
content = self.get_directory_structure()
|
|
366
|
+
elif block_name == "git_status":
|
|
367
|
+
content = self.get_git_status()
|
|
368
|
+
elif block_name == "symbol_outline":
|
|
369
|
+
content = self.get_context_symbol_outline()
|
|
370
|
+
elif block_name == "context_summary":
|
|
371
|
+
content = self.get_context_summary()
|
|
372
|
+
elif block_name == "todo_list":
|
|
373
|
+
content = self.get_todo_list()
|
|
374
|
+
elif block_name == "skills":
|
|
375
|
+
content = self.get_skills_context()
|
|
376
|
+
elif block_name == "loaded_skills":
|
|
377
|
+
content = self.get_skills_content()
|
|
378
|
+
if content is not None:
|
|
379
|
+
self.context_blocks_cache[block_name] = content
|
|
380
|
+
return content
|
|
381
|
+
|
|
382
|
+
def get_cached_context_block(self, block_name):
|
|
383
|
+
"""
|
|
384
|
+
Get a context block from the cache, or generate it if not available.
|
|
385
|
+
This should be used by format_chat_chunks to avoid regenerating blocks.
|
|
386
|
+
|
|
387
|
+
This will ensure tokens are calculated if they haven't been yet.
|
|
388
|
+
"""
|
|
389
|
+
if not hasattr(self, "tokens_calculated") or not self.tokens_calculated:
|
|
390
|
+
self._calculate_context_block_tokens()
|
|
391
|
+
if hasattr(self, "context_blocks_cache") and block_name in self.context_blocks_cache:
|
|
392
|
+
return self.context_blocks_cache[block_name]
|
|
393
|
+
return self._generate_context_block(block_name)
|
|
394
|
+
|
|
395
|
+
def get_context_symbol_outline(self):
|
|
396
|
+
"""
|
|
397
|
+
Generate a symbol outline for files currently in context using Tree-sitter,
|
|
398
|
+
bypassing the cache for freshness.
|
|
399
|
+
"""
|
|
400
|
+
if not self.use_enhanced_context or not self.repo_map:
|
|
401
|
+
return None
|
|
402
|
+
try:
|
|
403
|
+
result = '<context name="symbol_outline">\n'
|
|
404
|
+
result += "## Symbol Outline (Current Context)\n\n"
|
|
405
|
+
result += """Code definitions (classes, functions, methods, etc.) found in files currently in chat context.
|
|
406
|
+
|
|
407
|
+
"""
|
|
408
|
+
files_to_outline = list(self.abs_fnames) + list(self.abs_read_only_fnames)
|
|
409
|
+
if not files_to_outline:
|
|
410
|
+
result += "No files currently in context.\n"
|
|
411
|
+
result += "</context>"
|
|
412
|
+
return result
|
|
413
|
+
all_tags_by_file = defaultdict(list)
|
|
414
|
+
has_symbols = False
|
|
415
|
+
if not self.repo_map:
|
|
416
|
+
self.io.tool_warning("RepoMap not initialized, cannot generate symbol outline.")
|
|
417
|
+
return None
|
|
418
|
+
for abs_fname in sorted(files_to_outline):
|
|
419
|
+
rel_fname = self.get_rel_fname(abs_fname)
|
|
420
|
+
try:
|
|
421
|
+
tags = list(self.repo_map.get_tags_raw(abs_fname, rel_fname))
|
|
422
|
+
if tags:
|
|
423
|
+
all_tags_by_file[rel_fname].extend(tags)
|
|
424
|
+
has_symbols = True
|
|
425
|
+
except Exception as e:
|
|
426
|
+
self.io.tool_warning(f"Could not get symbols for {rel_fname}: {e}")
|
|
427
|
+
if not has_symbols:
|
|
428
|
+
result += "No symbols found in the current context files.\n"
|
|
429
|
+
else:
|
|
430
|
+
for rel_fname in sorted(all_tags_by_file.keys()):
|
|
431
|
+
tags = sorted(all_tags_by_file[rel_fname], key=lambda t: (t.line, t.name))
|
|
432
|
+
definition_tags = []
|
|
433
|
+
for tag in tags:
|
|
434
|
+
kind_to_check = tag.specific_kind or tag.kind
|
|
435
|
+
if (
|
|
436
|
+
kind_to_check
|
|
437
|
+
and kind_to_check.lower() in self.repo_map.definition_kinds
|
|
438
|
+
):
|
|
439
|
+
definition_tags.append(tag)
|
|
440
|
+
if definition_tags:
|
|
441
|
+
result += f"### {rel_fname}\n"
|
|
442
|
+
for tag in definition_tags:
|
|
443
|
+
line_info = f", line {tag.line + 1}" if tag.line >= 0 else ""
|
|
444
|
+
kind_to_check = tag.specific_kind or tag.kind
|
|
445
|
+
result += f"- {tag.name} ({kind_to_check}{line_info})\n"
|
|
446
|
+
result += "\n"
|
|
447
|
+
result += "</context>"
|
|
448
|
+
return result.strip()
|
|
449
|
+
except Exception as e:
|
|
450
|
+
self.io.tool_error(f"Error generating symbol outline: {str(e)}")
|
|
451
|
+
return None
|
|
452
|
+
|
|
453
|
+
def format_chat_chunks(self):
|
|
454
|
+
"""
|
|
455
|
+
Override parent's format_chat_chunks to include enhanced context blocks with a
|
|
456
|
+
cleaner, more hierarchical structure for better organization.
|
|
457
|
+
|
|
458
|
+
Optimized for prompt caching by placing context blocks strategically:
|
|
459
|
+
1. Relatively static blocks (directory structure, environment info) before done_messages
|
|
460
|
+
2. Dynamic blocks (context summary, symbol outline, git status) after chat_files
|
|
461
|
+
|
|
462
|
+
This approach preserves prefix caching while providing fresh context information.
|
|
463
|
+
"""
|
|
464
|
+
if not self.use_enhanced_context:
|
|
465
|
+
return super().format_chat_chunks()
|
|
466
|
+
self.choose_fence()
|
|
467
|
+
main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system)
|
|
468
|
+
example_messages = []
|
|
469
|
+
if self.main_model.examples_as_sys_msg:
|
|
470
|
+
if self.gpt_prompts.example_messages:
|
|
471
|
+
main_sys += "\n# Example conversations:\n\n"
|
|
472
|
+
for msg in self.gpt_prompts.example_messages:
|
|
473
|
+
role = msg["role"]
|
|
474
|
+
content = self.fmt_system_prompt(msg["content"])
|
|
475
|
+
main_sys += f"## {role.upper()}: {content}\n\n"
|
|
476
|
+
main_sys = main_sys.strip()
|
|
477
|
+
else:
|
|
478
|
+
for msg in self.gpt_prompts.example_messages:
|
|
479
|
+
example_messages.append(
|
|
480
|
+
dict(role=msg["role"], content=self.fmt_system_prompt(msg["content"]))
|
|
481
|
+
)
|
|
482
|
+
if self.gpt_prompts.example_messages:
|
|
483
|
+
example_messages += [
|
|
484
|
+
dict(
|
|
485
|
+
role="user",
|
|
486
|
+
content=(
|
|
487
|
+
"I switched to a new code base. Please don't consider the above files"
|
|
488
|
+
" or try to edit them any longer."
|
|
489
|
+
),
|
|
490
|
+
),
|
|
491
|
+
dict(role="assistant", content="Ok."),
|
|
492
|
+
]
|
|
493
|
+
if self.gpt_prompts.system_reminder:
|
|
494
|
+
main_sys += "\n" + self.fmt_system_prompt(self.gpt_prompts.system_reminder)
|
|
495
|
+
chunks = ChatChunks(
|
|
496
|
+
chunk_ordering=[
|
|
497
|
+
"system",
|
|
498
|
+
"static",
|
|
499
|
+
"examples",
|
|
500
|
+
"readonly_files",
|
|
501
|
+
"repo",
|
|
502
|
+
"chat_files",
|
|
503
|
+
"pre_message",
|
|
504
|
+
"done",
|
|
505
|
+
"edit_files",
|
|
506
|
+
"cur",
|
|
507
|
+
"post_message",
|
|
508
|
+
"reminder",
|
|
509
|
+
]
|
|
510
|
+
)
|
|
511
|
+
if self.main_model.use_system_prompt:
|
|
512
|
+
chunks.system = [dict(role="system", content=main_sys)]
|
|
513
|
+
else:
|
|
514
|
+
chunks.system = [
|
|
515
|
+
dict(role="user", content=main_sys),
|
|
516
|
+
dict(role="assistant", content="Ok."),
|
|
517
|
+
]
|
|
518
|
+
chunks.examples = example_messages
|
|
519
|
+
self.summarize_end()
|
|
520
|
+
cur_messages_list = list(self.cur_messages)
|
|
521
|
+
cur_messages_pre = []
|
|
522
|
+
cur_messages_post = cur_messages_list
|
|
523
|
+
chunks.readonly_files = self.get_readonly_files_messages()
|
|
524
|
+
chat_files_result = self.get_chat_files_messages()
|
|
525
|
+
chunks.chat_files = chat_files_result.get("chat_files", [])
|
|
526
|
+
chunks.edit_files = chat_files_result.get("edit_files", [])
|
|
527
|
+
edit_file_names = chat_files_result.get("edit_file_names", set())
|
|
528
|
+
divider = self._update_edit_file_tracking(edit_file_names)
|
|
529
|
+
if divider is not None:
|
|
530
|
+
if divider > 0 and divider < len(cur_messages_list):
|
|
531
|
+
cur_messages_pre = cur_messages_list[:divider]
|
|
532
|
+
cur_messages_post = cur_messages_list[divider:]
|
|
533
|
+
chunks.repo = self.get_repo_messages()
|
|
534
|
+
chunks.done = list(self.done_messages) + cur_messages_pre
|
|
535
|
+
if self.gpt_prompts.system_reminder:
|
|
536
|
+
reminder_message = [
|
|
537
|
+
dict(
|
|
538
|
+
role="system", content=self.fmt_system_prompt(self.gpt_prompts.system_reminder)
|
|
539
|
+
)
|
|
540
|
+
]
|
|
541
|
+
else:
|
|
542
|
+
reminder_message = []
|
|
543
|
+
chunks.cur = cur_messages_post
|
|
544
|
+
chunks.reminder = []
|
|
545
|
+
self._calculate_context_block_tokens()
|
|
546
|
+
chunks.static = []
|
|
547
|
+
chunks.pre_message = []
|
|
548
|
+
chunks.post_message = []
|
|
549
|
+
static_blocks = []
|
|
550
|
+
pre_message_blocks = []
|
|
551
|
+
post_message_blocks = []
|
|
552
|
+
if "environment_info" in self.allowed_context_blocks:
|
|
553
|
+
block = self.get_cached_context_block("environment_info")
|
|
554
|
+
static_blocks.append(block)
|
|
555
|
+
if "directory_structure" in self.allowed_context_blocks:
|
|
556
|
+
block = self.get_cached_context_block("directory_structure")
|
|
557
|
+
static_blocks.append(block)
|
|
558
|
+
if "skills" in self.allowed_context_blocks:
|
|
559
|
+
block = self._generate_context_block("skills")
|
|
560
|
+
static_blocks.append(block)
|
|
561
|
+
if "symbol_outline" in self.allowed_context_blocks:
|
|
562
|
+
block = self.get_cached_context_block("symbol_outline")
|
|
563
|
+
pre_message_blocks.append(block)
|
|
564
|
+
if "git_status" in self.allowed_context_blocks:
|
|
565
|
+
block = self.get_cached_context_block("git_status")
|
|
566
|
+
pre_message_blocks.append(block)
|
|
567
|
+
if "todo_list" in self.allowed_context_blocks:
|
|
568
|
+
block = self.get_cached_context_block("todo_list")
|
|
569
|
+
pre_message_blocks.append(block)
|
|
570
|
+
if "skills" in self.allowed_context_blocks:
|
|
571
|
+
block = self._generate_context_block("loaded_skills")
|
|
572
|
+
pre_message_blocks.append(block)
|
|
573
|
+
if "context_summary" in self.allowed_context_blocks:
|
|
574
|
+
block = self.get_context_summary()
|
|
575
|
+
pre_message_blocks.insert(0, block)
|
|
576
|
+
if hasattr(self, "tool_usage_history") and self.tool_usage_history:
|
|
577
|
+
repetitive_tools = self._get_repetitive_tools()
|
|
578
|
+
if repetitive_tools:
|
|
579
|
+
tool_context = self._generate_tool_context(repetitive_tools)
|
|
580
|
+
if tool_context:
|
|
581
|
+
post_message_blocks.append(tool_context)
|
|
582
|
+
else:
|
|
583
|
+
write_context = self._generate_write_context()
|
|
584
|
+
if write_context:
|
|
585
|
+
post_message_blocks.append(write_context)
|
|
586
|
+
if static_blocks:
|
|
587
|
+
for block in static_blocks:
|
|
588
|
+
if block:
|
|
589
|
+
chunks.static.append(dict(role="system", content=block))
|
|
590
|
+
if pre_message_blocks:
|
|
591
|
+
for block in pre_message_blocks:
|
|
592
|
+
if block:
|
|
593
|
+
chunks.pre_message.append(dict(role="system", content=block))
|
|
594
|
+
if post_message_blocks:
|
|
595
|
+
for block in post_message_blocks:
|
|
596
|
+
if block:
|
|
597
|
+
chunks.post_message.append(dict(role="system", content=block))
|
|
598
|
+
base_messages = chunks.all_messages()
|
|
599
|
+
messages_tokens = self.main_model.token_count(base_messages)
|
|
600
|
+
reminder_tokens = self.main_model.token_count(reminder_message)
|
|
601
|
+
cur_tokens = self.main_model.token_count(chunks.cur)
|
|
602
|
+
if None not in (messages_tokens, reminder_tokens, cur_tokens):
|
|
603
|
+
total_tokens = messages_tokens
|
|
604
|
+
if not chunks.reminder:
|
|
605
|
+
total_tokens += reminder_tokens
|
|
606
|
+
if not chunks.cur:
|
|
607
|
+
total_tokens += cur_tokens
|
|
608
|
+
else:
|
|
609
|
+
total_tokens = 0
|
|
610
|
+
if chunks.cur:
|
|
611
|
+
final = chunks.cur[-1]
|
|
612
|
+
else:
|
|
613
|
+
final = None
|
|
614
|
+
max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
|
|
615
|
+
if (
|
|
616
|
+
not max_input_tokens
|
|
617
|
+
or total_tokens < max_input_tokens
|
|
618
|
+
and self.gpt_prompts.system_reminder
|
|
619
|
+
):
|
|
620
|
+
if self.main_model.reminder == "sys":
|
|
621
|
+
chunks.reminder = reminder_message
|
|
622
|
+
elif self.main_model.reminder == "user" and final and final["role"] == "user":
|
|
623
|
+
new_content = (
|
|
624
|
+
final["content"]
|
|
625
|
+
+ "\n\n"
|
|
626
|
+
+ self.fmt_system_prompt(self.gpt_prompts.system_reminder)
|
|
627
|
+
)
|
|
628
|
+
chunks.cur[-1] = dict(role=final["role"], content=new_content)
|
|
629
|
+
if self.verbose:
|
|
630
|
+
self._log_chunks(chunks)
|
|
631
|
+
return chunks
|
|
632
|
+
|
|
633
|
+
def _update_edit_file_tracking(self, edit_file_names):
|
|
634
|
+
"""
|
|
635
|
+
Update tracking for last edited file and message divider for caching efficiency.
|
|
636
|
+
|
|
637
|
+
When the last edited file changes, we store the current message index minus 4
|
|
638
|
+
as a divider to split cur_messages, moving older messages to done_messages
|
|
639
|
+
for better caching.
|
|
640
|
+
"""
|
|
641
|
+
kept_messages = 8
|
|
642
|
+
if not edit_file_names:
|
|
643
|
+
self._cur_message_divider = 0
|
|
644
|
+
sorted_edit_files = sorted(edit_file_names)
|
|
645
|
+
current_edited_file = sorted_edit_files[0] if sorted_edit_files else None
|
|
646
|
+
if current_edited_file != self._last_edited_file:
|
|
647
|
+
self._last_edited_file = current_edited_file
|
|
648
|
+
cur_messages_list = list(self.cur_messages)
|
|
649
|
+
if len(cur_messages_list) > kept_messages:
|
|
650
|
+
self._cur_message_divider = len(cur_messages_list) - kept_messages
|
|
651
|
+
else:
|
|
652
|
+
self._cur_message_divider = 0
|
|
653
|
+
return self._cur_message_divider
|
|
654
|
+
|
|
655
|
+
def get_context_summary(self):
|
|
656
|
+
"""
|
|
657
|
+
Generate a summary of the current context, including file content tokens and additional context blocks,
|
|
658
|
+
with an accurate total token count.
|
|
659
|
+
"""
|
|
660
|
+
if not self.use_enhanced_context:
|
|
661
|
+
return None
|
|
662
|
+
if hasattr(self, "context_blocks_cache") and "context_summary" in self.context_blocks_cache:
|
|
663
|
+
return self.context_blocks_cache["context_summary"]
|
|
664
|
+
try:
|
|
665
|
+
if not hasattr(self, "context_block_tokens") or not self.context_block_tokens:
|
|
666
|
+
self._calculate_context_block_tokens()
|
|
667
|
+
result = '<context name="context_summary">\n'
|
|
668
|
+
result += "## Current Context Overview\n\n"
|
|
669
|
+
max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
|
|
670
|
+
if max_input_tokens:
|
|
671
|
+
result += f"Model context limit: {max_input_tokens:,} tokens\n\n"
|
|
672
|
+
total_file_tokens = 0
|
|
673
|
+
editable_tokens = 0
|
|
674
|
+
readonly_tokens = 0
|
|
675
|
+
editable_files = []
|
|
676
|
+
readonly_files = []
|
|
677
|
+
if self.abs_fnames:
|
|
678
|
+
result += "### Editable Files\n\n"
|
|
679
|
+
for fname in sorted(self.abs_fnames):
|
|
680
|
+
rel_fname = self.get_rel_fname(fname)
|
|
681
|
+
content = self.io.read_text(fname)
|
|
682
|
+
if content is not None:
|
|
683
|
+
tokens = self.main_model.token_count(content)
|
|
684
|
+
total_file_tokens += tokens
|
|
685
|
+
editable_tokens += tokens
|
|
686
|
+
size_indicator = (
|
|
687
|
+
"🔴 Large"
|
|
688
|
+
if tokens > 5000
|
|
689
|
+
else "🟡 Medium" if tokens > 1000 else "🟢 Small"
|
|
690
|
+
)
|
|
691
|
+
editable_files.append(
|
|
692
|
+
f"- {rel_fname}: {tokens:,} tokens ({size_indicator})"
|
|
693
|
+
)
|
|
694
|
+
if editable_files:
|
|
695
|
+
result += "\n".join(editable_files) + "\n\n"
|
|
696
|
+
result += f"""**Total editable: {len(editable_files)} files, {editable_tokens:,} tokens**
|
|
697
|
+
|
|
698
|
+
"""
|
|
699
|
+
else:
|
|
700
|
+
result += "No editable files in context\n\n"
|
|
701
|
+
if self.abs_read_only_fnames:
|
|
702
|
+
result += "### Read-Only Files\n\n"
|
|
703
|
+
for fname in sorted(self.abs_read_only_fnames):
|
|
704
|
+
rel_fname = self.get_rel_fname(fname)
|
|
705
|
+
content = self.io.read_text(fname)
|
|
706
|
+
if content is not None:
|
|
707
|
+
tokens = self.main_model.token_count(content)
|
|
708
|
+
total_file_tokens += tokens
|
|
709
|
+
readonly_tokens += tokens
|
|
710
|
+
size_indicator = (
|
|
711
|
+
"🔴 Large"
|
|
712
|
+
if tokens > 5000
|
|
713
|
+
else "🟡 Medium" if tokens > 1000 else "🟢 Small"
|
|
714
|
+
)
|
|
715
|
+
readonly_files.append(
|
|
716
|
+
f"- {rel_fname}: {tokens:,} tokens ({size_indicator})"
|
|
717
|
+
)
|
|
718
|
+
if readonly_files:
|
|
719
|
+
result += "\n".join(readonly_files) + "\n\n"
|
|
720
|
+
result += f"""**Total read-only: {len(readonly_files)} files, {readonly_tokens:,} tokens**
|
|
721
|
+
|
|
722
|
+
"""
|
|
723
|
+
else:
|
|
724
|
+
result += "No read-only files in context\n\n"
|
|
725
|
+
extra_tokens = sum(self.context_block_tokens.values())
|
|
726
|
+
total_tokens = total_file_tokens + extra_tokens
|
|
727
|
+
result += f"**Total files usage: {total_file_tokens:,} tokens**\n\n"
|
|
728
|
+
result += f"**Additional context usage: {extra_tokens:,} tokens**\n\n"
|
|
729
|
+
result += f"**Total context usage: {total_tokens:,} tokens**"
|
|
730
|
+
if max_input_tokens:
|
|
731
|
+
percentage = total_tokens / max_input_tokens * 100
|
|
732
|
+
result += f" ({percentage:.1f}% of limit)"
|
|
733
|
+
if percentage > 80:
|
|
734
|
+
result += "\n\n⚠️ **Context is getting full!**\n"
|
|
735
|
+
result += "- Remove non-essential files via the `ContextManager` tool.\n"
|
|
736
|
+
result += "- Keep only essential files in context for best performance"
|
|
737
|
+
result += "\n</context>"
|
|
738
|
+
if not hasattr(self, "context_blocks_cache"):
|
|
739
|
+
self.context_blocks_cache = {}
|
|
740
|
+
self.context_blocks_cache["context_summary"] = result
|
|
741
|
+
return result
|
|
742
|
+
except Exception as e:
|
|
743
|
+
self.io.tool_error(f"Error generating context summary: {str(e)}")
|
|
744
|
+
return None
|
|
745
|
+
|
|
746
|
+
def get_environment_info(self):
|
|
747
|
+
"""
|
|
748
|
+
Generate an environment information context block with key system details.
|
|
749
|
+
Returns formatted string with working directory, platform, date, and other relevant environment details.
|
|
750
|
+
"""
|
|
751
|
+
if not self.use_enhanced_context:
|
|
752
|
+
return None
|
|
753
|
+
try:
|
|
754
|
+
current_date = datetime.now().strftime("%Y-%m-%d")
|
|
755
|
+
platform_info = platform.platform()
|
|
756
|
+
language = self.chat_language or locale.getlocale()[0] or "en-US"
|
|
757
|
+
result = '<context name="environment_info">\n'
|
|
758
|
+
result += "## Environment Information\n\n"
|
|
759
|
+
result += f"- Working directory: {self.root}\n"
|
|
760
|
+
result += f"- Current date: {current_date}\n"
|
|
761
|
+
result += f"- Platform: {platform_info}\n"
|
|
762
|
+
result += f"- Language preference: {language}\n"
|
|
763
|
+
if self.repo:
|
|
764
|
+
try:
|
|
765
|
+
rel_repo_dir = self.repo.get_rel_repo_dir()
|
|
766
|
+
num_files = len(self.repo.get_tracked_files())
|
|
767
|
+
result += f"- Git repository: {rel_repo_dir} with {num_files:,} files\n"
|
|
768
|
+
except Exception:
|
|
769
|
+
result += "- Git repository: active but details unavailable\n"
|
|
770
|
+
else:
|
|
771
|
+
result += "- Git repository: none\n"
|
|
772
|
+
features = []
|
|
773
|
+
if self.context_management_enabled:
|
|
774
|
+
features.append("context management")
|
|
775
|
+
if self.use_enhanced_context:
|
|
776
|
+
features.append("enhanced context blocks")
|
|
777
|
+
if features:
|
|
778
|
+
result += f"- Enabled features: {', '.join(features)}\n"
|
|
779
|
+
result += "</context>"
|
|
780
|
+
return result
|
|
781
|
+
except Exception as e:
|
|
782
|
+
self.io.tool_error(f"Error generating environment info: {str(e)}")
|
|
783
|
+
return None
|
|
784
|
+
|
|
785
|
+
async def process_tool_calls(self, tool_call_response):
|
|
786
|
+
"""
|
|
787
|
+
Track tool usage before calling the base implementation.
|
|
788
|
+
"""
|
|
789
|
+
self.agent_finished = False
|
|
790
|
+
await self.auto_save_session()
|
|
791
|
+
self.last_round_tools = []
|
|
792
|
+
if self.partial_response_tool_calls:
|
|
793
|
+
for tool_call in self.partial_response_tool_calls:
|
|
794
|
+
tool_name = tool_call.get("function", {}).get("name")
|
|
795
|
+
if tool_name:
|
|
796
|
+
self.last_round_tools.append(tool_name)
|
|
797
|
+
tool_call_copy = tool_call.copy()
|
|
798
|
+
if "id" in tool_call_copy:
|
|
799
|
+
del tool_call_copy["id"]
|
|
800
|
+
tool_call_str = str(tool_call_copy)
|
|
801
|
+
tool_vector = create_bigram_vector((tool_call_str,))
|
|
802
|
+
tool_vector_norm = normalize_vector(tool_vector)
|
|
803
|
+
self.tool_call_vectors.append(tool_vector_norm)
|
|
804
|
+
if self.last_round_tools:
|
|
805
|
+
self.tool_usage_history += self.last_round_tools
|
|
806
|
+
self.tool_usage_history = list(filter(None, self.tool_usage_history))
|
|
807
|
+
if len(self.tool_usage_history) > self.tool_usage_retries:
|
|
808
|
+
self.tool_usage_history.pop(0)
|
|
809
|
+
if len(self.tool_call_vectors) > self.max_tool_vector_history:
|
|
810
|
+
self.tool_call_vectors.pop(0)
|
|
811
|
+
return await super().process_tool_calls(tool_call_response)
|
|
812
|
+
|
|
813
|
+
async def reply_completed(self):
|
|
814
|
+
"""Process the completed response from the LLM.
|
|
815
|
+
|
|
816
|
+
This is a key method that:
|
|
817
|
+
1. Processes any tool commands in the response (only after a '---' line)
|
|
818
|
+
2. Processes any SEARCH/REPLACE blocks in the response (only before the '---' line if one exists)
|
|
819
|
+
3. If tool commands were found, sets up for another automatic round
|
|
820
|
+
|
|
821
|
+
This enables the "auto-exploration" workflow where the LLM can
|
|
822
|
+
iteratively discover and analyze relevant files before providing
|
|
823
|
+
a final answer to the user's question.
|
|
824
|
+
"""
|
|
825
|
+
content = self.partial_response_content
|
|
826
|
+
if not content or not content.strip():
|
|
827
|
+
if len(self.tool_usage_history) > self.tool_usage_retries:
|
|
828
|
+
self.tool_usage_history = []
|
|
829
|
+
return True
|
|
830
|
+
original_content = content
|
|
831
|
+
(
|
|
832
|
+
processed_content,
|
|
833
|
+
result_messages,
|
|
834
|
+
tool_calls_found,
|
|
835
|
+
content_before_last_separator,
|
|
836
|
+
tool_names_this_turn,
|
|
837
|
+
) = await self._process_tool_commands(content)
|
|
838
|
+
if self.agent_finished:
|
|
839
|
+
self.tool_usage_history = []
|
|
840
|
+
if self.files_edited_by_tools:
|
|
841
|
+
_ = await self.auto_commit(self.files_edited_by_tools)
|
|
842
|
+
return False
|
|
843
|
+
self.partial_response_content = processed_content.strip()
|
|
844
|
+
self._process_file_mentions(processed_content)
|
|
845
|
+
has_search = "<<<<<<< SEARCH" in self.partial_response_content
|
|
846
|
+
has_divider = "=======" in self.partial_response_content
|
|
847
|
+
has_replace = ">>>>>>> REPLACE" in self.partial_response_content
|
|
848
|
+
edit_match = has_search and has_divider and has_replace
|
|
849
|
+
separator_marker = "\n---\n"
|
|
850
|
+
if separator_marker in original_content and edit_match:
|
|
851
|
+
has_search_before = "<<<<<<< SEARCH" in content_before_last_separator
|
|
852
|
+
has_divider_before = "=======" in content_before_last_separator
|
|
853
|
+
has_replace_before = ">>>>>>> REPLACE" in content_before_last_separator
|
|
854
|
+
edit_match = has_search_before and has_divider_before and has_replace_before
|
|
855
|
+
if edit_match:
|
|
856
|
+
self.io.tool_output("Detected edit blocks, applying changes within Agent...")
|
|
857
|
+
edited_files = await self._apply_edits_from_response()
|
|
858
|
+
if self.reflected_message:
|
|
859
|
+
return False
|
|
860
|
+
if edited_files and self.num_reflections < self.max_reflections:
|
|
861
|
+
if self.cur_messages and len(self.cur_messages) >= 1:
|
|
862
|
+
for msg in reversed(self.cur_messages):
|
|
863
|
+
if msg["role"] == "user":
|
|
864
|
+
original_question = msg["content"]
|
|
865
|
+
break
|
|
866
|
+
else:
|
|
867
|
+
original_question = (
|
|
868
|
+
"Please continue your exploration and provide a final answer."
|
|
869
|
+
)
|
|
870
|
+
next_prompt = f"""
|
|
871
|
+
I have applied the edits you suggested.
|
|
872
|
+
The following files were modified: {', '.join(edited_files)}. Let me continue working on your request.
|
|
873
|
+
Your original question was: {original_question}"""
|
|
874
|
+
self.reflected_message = next_prompt
|
|
875
|
+
self.io.tool_output("Continuing after applying edits...")
|
|
876
|
+
return False
|
|
877
|
+
if tool_calls_found and self.num_reflections < self.max_reflections:
|
|
878
|
+
self.tool_call_count = 0
|
|
879
|
+
self.files_added_in_exploration = set()
|
|
880
|
+
if self.cur_messages and len(self.cur_messages) >= 1:
|
|
881
|
+
for msg in reversed(self.cur_messages):
|
|
882
|
+
if msg["role"] == "user":
|
|
883
|
+
original_question = msg["content"]
|
|
884
|
+
break
|
|
885
|
+
else:
|
|
886
|
+
original_question = (
|
|
887
|
+
"Please continue your exploration and provide a final answer."
|
|
888
|
+
)
|
|
889
|
+
next_prompt_parts = []
|
|
890
|
+
next_prompt_parts.append(
|
|
891
|
+
"I have processed the results of the previous tool calls. Let me analyze them"
|
|
892
|
+
" and continue working towards your request."
|
|
893
|
+
)
|
|
894
|
+
if result_messages:
|
|
895
|
+
next_prompt_parts.append("\nResults from previous tool calls:")
|
|
896
|
+
next_prompt_parts.extend(result_messages)
|
|
897
|
+
next_prompt_parts.append("""
|
|
898
|
+
Based on these results and the updated file context, I will proceed.""")
|
|
899
|
+
else:
|
|
900
|
+
next_prompt_parts.append("""
|
|
901
|
+
No specific results were returned from the previous tool calls, but the file context may have been updated.
|
|
902
|
+
I will proceed based on the current context.""")
|
|
903
|
+
next_prompt_parts.append(f"\nYour original question was: {original_question}")
|
|
904
|
+
self.reflected_message = "\n".join(next_prompt_parts)
|
|
905
|
+
self.io.tool_output("Continuing exploration...")
|
|
906
|
+
return False
|
|
907
|
+
elif result_messages:
|
|
908
|
+
results_block = "\n\n" + "\n".join(result_messages)
|
|
909
|
+
self.partial_response_content += results_block
|
|
910
|
+
if self.files_edited_by_tools:
|
|
911
|
+
saved_message = await self.auto_commit(self.files_edited_by_tools)
|
|
912
|
+
if not saved_message and hasattr(self.gpt_prompts, "files_content_gpt_edits_no_repo"):
|
|
913
|
+
saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo
|
|
914
|
+
self.move_back_cur_messages(saved_message)
|
|
915
|
+
self.tool_call_count = 0
|
|
916
|
+
self.files_added_in_exploration = set()
|
|
917
|
+
self.files_edited_by_tools = set()
|
|
918
|
+
self.move_back_cur_messages(None)
|
|
919
|
+
return False
|
|
920
|
+
|
|
921
|
+
async def _execute_tool_with_registry(self, norm_tool_name, params):
|
|
922
|
+
"""
|
|
923
|
+
Execute a tool using the tool registry.
|
|
924
|
+
|
|
925
|
+
Args:
|
|
926
|
+
norm_tool_name: Normalized tool name (lowercase)
|
|
927
|
+
params: Dictionary of parameters
|
|
928
|
+
|
|
929
|
+
Returns:
|
|
930
|
+
str: Result message
|
|
931
|
+
"""
|
|
932
|
+
if norm_tool_name in ToolRegistry.get_registered_tools():
|
|
933
|
+
tool_module = ToolRegistry.get_tool(norm_tool_name)
|
|
934
|
+
try:
|
|
935
|
+
result = tool_module.process_response(self, params)
|
|
936
|
+
if asyncio.iscoroutine(result):
|
|
937
|
+
result = await result
|
|
938
|
+
return result
|
|
939
|
+
except Exception as e:
|
|
940
|
+
self.io.tool_error(f"""Error during {norm_tool_name} execution: {e}
|
|
941
|
+
{traceback.format_exc()}""")
|
|
942
|
+
return f"Error executing {norm_tool_name}: {str(e)}"
|
|
943
|
+
if self.mcp_tools:
|
|
944
|
+
for server_name, server_tools in self.mcp_tools:
|
|
945
|
+
if any(t.get("function", {}).get("name") == norm_tool_name for t in server_tools):
|
|
946
|
+
server = next((s for s in self.mcp_servers if s.name == server_name), None)
|
|
947
|
+
if server:
|
|
948
|
+
return await self._execute_mcp_tool(server, norm_tool_name, params)
|
|
949
|
+
else:
|
|
950
|
+
return f"Error: Could not find server instance for {server_name}"
|
|
951
|
+
return f"Error: Unknown tool name '{norm_tool_name}'"
|
|
952
|
+
|
|
953
|
+
def _convert_concatenated_json_to_tool_calls(self, content):
|
|
954
|
+
"""
|
|
955
|
+
Check if content contains concatenated JSON objects and convert them to tool call format.
|
|
956
|
+
|
|
957
|
+
Args:
|
|
958
|
+
content (str): Content to check for concatenated JSON
|
|
959
|
+
|
|
960
|
+
Returns:
|
|
961
|
+
str: Content with concatenated JSON converted to tool call format, or original content if no JSON found
|
|
962
|
+
"""
|
|
963
|
+
try:
|
|
964
|
+
json_chunks = utils.split_concatenated_json(content)
|
|
965
|
+
if len(json_chunks) >= 1:
|
|
966
|
+
tool_calls = []
|
|
967
|
+
for chunk in json_chunks:
|
|
968
|
+
try:
|
|
969
|
+
json_obj = json.loads(chunk)
|
|
970
|
+
if (
|
|
971
|
+
isinstance(json_obj, dict)
|
|
972
|
+
and "name" in json_obj
|
|
973
|
+
and "arguments" in json_obj
|
|
974
|
+
):
|
|
975
|
+
tool_name = json_obj["name"]
|
|
976
|
+
arguments = json_obj["arguments"]
|
|
977
|
+
kw_args = []
|
|
978
|
+
for key, value in arguments.items():
|
|
979
|
+
if isinstance(value, str):
|
|
980
|
+
escaped_value = value.replace('"', '\\"')
|
|
981
|
+
kw_args.append(f'{key}="{escaped_value}"')
|
|
982
|
+
elif isinstance(value, bool):
|
|
983
|
+
kw_args.append(f"{key}={str(value).lower()}")
|
|
984
|
+
elif value is None:
|
|
985
|
+
kw_args.append(f"{key}=None")
|
|
986
|
+
else:
|
|
987
|
+
kw_args.append(f"{key}={repr(value)}")
|
|
988
|
+
kw_args_str = ", ".join(kw_args)
|
|
989
|
+
tool_call = f"[tool_call({tool_name}, {kw_args_str})]"
|
|
990
|
+
tool_calls.append(tool_call)
|
|
991
|
+
else:
|
|
992
|
+
tool_calls.append(chunk)
|
|
993
|
+
except json.JSONDecodeError:
|
|
994
|
+
tool_calls.append(chunk)
|
|
995
|
+
if any(call.startswith("[tool_") for call in tool_calls):
|
|
996
|
+
return "".join(tool_calls)
|
|
997
|
+
except Exception as e:
|
|
998
|
+
self.io.tool_warning(f"Error converting concatenated JSON to tool calls: {str(e)}")
|
|
999
|
+
return content
|
|
1000
|
+
|
|
1001
|
+
async def _process_tool_commands(self, content):
|
|
1002
|
+
"""
|
|
1003
|
+
Process tool commands in the `[tool_call(name, param=value)]` format within the content.
|
|
1004
|
+
|
|
1005
|
+
Rules:
|
|
1006
|
+
1. Tool calls must appear after the LAST '---' line separator in the content
|
|
1007
|
+
2. Any tool calls before this last separator are treated as text (not executed)
|
|
1008
|
+
3. SEARCH/REPLACE blocks can only appear before this last separator
|
|
1009
|
+
|
|
1010
|
+
Returns processed content, result messages, and a flag indicating if any tool calls were found.
|
|
1011
|
+
Also returns the content before the last separator for SEARCH/REPLACE block validation.
|
|
1012
|
+
"""
|
|
1013
|
+
result_messages = []
|
|
1014
|
+
modified_content = content
|
|
1015
|
+
tool_calls_found = False
|
|
1016
|
+
call_count = 0
|
|
1017
|
+
max_calls = self.max_tool_calls
|
|
1018
|
+
tool_names = []
|
|
1019
|
+
content = self._convert_concatenated_json_to_tool_calls(content)
|
|
1020
|
+
separator_marker = "---"
|
|
1021
|
+
content_parts = content.split(separator_marker)
|
|
1022
|
+
if len(content_parts) == 1:
|
|
1023
|
+
tool_call_pattern = "\\[tool_call\\([^\\]]+\\)\\]"
|
|
1024
|
+
if re.search(tool_call_pattern, content):
|
|
1025
|
+
content_before_separator = ""
|
|
1026
|
+
content_after_separator = content
|
|
1027
|
+
else:
|
|
1028
|
+
return content, result_messages, False, content, tool_names
|
|
1029
|
+
content_before_separator = separator_marker.join(content_parts[:-1])
|
|
1030
|
+
content_after_separator = content_parts[-1]
|
|
1031
|
+
processed_content = content_before_separator + separator_marker
|
|
1032
|
+
last_index = 0
|
|
1033
|
+
tool_call_pattern = re.compile("\\[tool_.*?\\(", re.DOTALL)
|
|
1034
|
+
end_marker = "]"
|
|
1035
|
+
while True:
|
|
1036
|
+
match = tool_call_pattern.search(content_after_separator, last_index)
|
|
1037
|
+
if not match:
|
|
1038
|
+
processed_content += content_after_separator[last_index:]
|
|
1039
|
+
break
|
|
1040
|
+
start_pos = match.start()
|
|
1041
|
+
start_marker = match.group(0)
|
|
1042
|
+
backslashes = 0
|
|
1043
|
+
p = start_pos - 1
|
|
1044
|
+
while p >= 0 and content_after_separator[p] == "\\":
|
|
1045
|
+
backslashes += 1
|
|
1046
|
+
p -= 1
|
|
1047
|
+
if backslashes % 2 == 1:
|
|
1048
|
+
processed_content += content_after_separator[
|
|
1049
|
+
last_index : start_pos + len(start_marker)
|
|
1050
|
+
]
|
|
1051
|
+
last_index = start_pos + len(start_marker)
|
|
1052
|
+
continue
|
|
1053
|
+
processed_content += content_after_separator[last_index:start_pos]
|
|
1054
|
+
scan_start_pos = start_pos + len(start_marker)
|
|
1055
|
+
paren_level = 1
|
|
1056
|
+
in_single_quotes = False
|
|
1057
|
+
in_double_quotes = False
|
|
1058
|
+
escaped = False
|
|
1059
|
+
end_paren_pos = -1
|
|
1060
|
+
for i in range(scan_start_pos, len(content_after_separator)):
|
|
1061
|
+
char = content_after_separator[i]
|
|
1062
|
+
if escaped:
|
|
1063
|
+
escaped = False
|
|
1064
|
+
elif char == "\\":
|
|
1065
|
+
escaped = True
|
|
1066
|
+
elif char == "'" and not in_double_quotes:
|
|
1067
|
+
in_single_quotes = not in_single_quotes
|
|
1068
|
+
elif char == '"' and not in_single_quotes:
|
|
1069
|
+
in_double_quotes = not in_double_quotes
|
|
1070
|
+
elif char == "(" and not in_single_quotes and not in_double_quotes:
|
|
1071
|
+
paren_level += 1
|
|
1072
|
+
elif char == ")" and not in_single_quotes and not in_double_quotes:
|
|
1073
|
+
paren_level -= 1
|
|
1074
|
+
if paren_level == 0:
|
|
1075
|
+
end_paren_pos = i
|
|
1076
|
+
break
|
|
1077
|
+
expected_end_marker_start = end_paren_pos + 1
|
|
1078
|
+
actual_end_marker_start = -1
|
|
1079
|
+
end_marker_found = False
|
|
1080
|
+
if end_paren_pos != -1:
|
|
1081
|
+
for j in range(expected_end_marker_start, len(content_after_separator)):
|
|
1082
|
+
if not content_after_separator[j].isspace():
|
|
1083
|
+
actual_end_marker_start = j
|
|
1084
|
+
if content_after_separator[actual_end_marker_start] == end_marker:
|
|
1085
|
+
end_marker_found = True
|
|
1086
|
+
break
|
|
1087
|
+
if not end_marker_found:
|
|
1088
|
+
tool_name = "unknown"
|
|
1089
|
+
try:
|
|
1090
|
+
partial_content = content_after_separator[scan_start_pos : scan_start_pos + 100]
|
|
1091
|
+
comma_pos = partial_content.find(",")
|
|
1092
|
+
if comma_pos > 0:
|
|
1093
|
+
tool_name = partial_content[:comma_pos].strip()
|
|
1094
|
+
else:
|
|
1095
|
+
space_pos = partial_content.find(" ")
|
|
1096
|
+
paren_pos = partial_content.find("(")
|
|
1097
|
+
if space_pos > 0 and (paren_pos < 0 or space_pos < paren_pos):
|
|
1098
|
+
tool_name = partial_content[:space_pos].strip()
|
|
1099
|
+
elif paren_pos > 0:
|
|
1100
|
+
tool_name = partial_content[:paren_pos].strip()
|
|
1101
|
+
except Exception:
|
|
1102
|
+
pass
|
|
1103
|
+
self.io.tool_warning(
|
|
1104
|
+
f"Malformed tool call for '{tool_name}'. Missing closing parenthesis or"
|
|
1105
|
+
" bracket. Skipping."
|
|
1106
|
+
)
|
|
1107
|
+
processed_content += start_marker
|
|
1108
|
+
last_index = scan_start_pos
|
|
1109
|
+
continue
|
|
1110
|
+
full_match_str = content_after_separator[start_pos : actual_end_marker_start + 1]
|
|
1111
|
+
inner_content = content_after_separator[scan_start_pos:end_paren_pos].strip()
|
|
1112
|
+
last_index = actual_end_marker_start + 1
|
|
1113
|
+
call_count += 1
|
|
1114
|
+
if call_count > max_calls:
|
|
1115
|
+
self.io.tool_warning(
|
|
1116
|
+
f"Exceeded maximum tool calls ({max_calls}). Skipping remaining calls."
|
|
1117
|
+
)
|
|
1118
|
+
continue
|
|
1119
|
+
tool_calls_found = True
|
|
1120
|
+
tool_name = None
|
|
1121
|
+
params = {}
|
|
1122
|
+
result_message = None
|
|
1123
|
+
tool_calls_found = True
|
|
1124
|
+
try:
|
|
1125
|
+
if inner_content:
|
|
1126
|
+
parts = inner_content.split(",", 1)
|
|
1127
|
+
potential_tool_name = parts[0].strip()
|
|
1128
|
+
is_string = (
|
|
1129
|
+
potential_tool_name.startswith("'")
|
|
1130
|
+
and potential_tool_name.endswith("'")
|
|
1131
|
+
or potential_tool_name.startswith('"')
|
|
1132
|
+
and potential_tool_name.endswith('"')
|
|
1133
|
+
)
|
|
1134
|
+
if not potential_tool_name.isidentifier() and not is_string:
|
|
1135
|
+
quoted_tool_name = json.dumps(potential_tool_name)
|
|
1136
|
+
if len(parts) > 1:
|
|
1137
|
+
inner_content = quoted_tool_name + ", " + parts[1]
|
|
1138
|
+
else:
|
|
1139
|
+
inner_content = quoted_tool_name
|
|
1140
|
+
parse_str = f"f({inner_content})"
|
|
1141
|
+
parsed_ast = ast.parse(parse_str)
|
|
1142
|
+
if (
|
|
1143
|
+
not isinstance(parsed_ast, ast.Module)
|
|
1144
|
+
or not parsed_ast.body
|
|
1145
|
+
or not isinstance(parsed_ast.body[0], ast.Expr)
|
|
1146
|
+
):
|
|
1147
|
+
raise ValueError("Unexpected AST structure")
|
|
1148
|
+
call_node = parsed_ast.body[0].value
|
|
1149
|
+
if not isinstance(call_node, ast.Call):
|
|
1150
|
+
raise ValueError("Expected a Call node")
|
|
1151
|
+
if not call_node.args:
|
|
1152
|
+
raise ValueError("Tool name not found or invalid")
|
|
1153
|
+
tool_name_node = call_node.args[0]
|
|
1154
|
+
if isinstance(tool_name_node, ast.Name):
|
|
1155
|
+
tool_name = tool_name_node.id
|
|
1156
|
+
elif isinstance(tool_name_node, ast.Constant) and isinstance(
|
|
1157
|
+
tool_name_node.value, str
|
|
1158
|
+
):
|
|
1159
|
+
tool_name = tool_name_node.value
|
|
1160
|
+
else:
|
|
1161
|
+
raise ValueError("Tool name must be an identifier or a string literal")
|
|
1162
|
+
tool_names.append(tool_name)
|
|
1163
|
+
for keyword in call_node.keywords:
|
|
1164
|
+
key = keyword.arg
|
|
1165
|
+
value_node = keyword.value
|
|
1166
|
+
if isinstance(value_node, ast.Constant):
|
|
1167
|
+
value = value_node.value
|
|
1168
|
+
if isinstance(value, str) and "\n" in value:
|
|
1169
|
+
lineno = value_node.lineno if hasattr(value_node, "lineno") else 0
|
|
1170
|
+
end_lineno = (
|
|
1171
|
+
value_node.end_lineno
|
|
1172
|
+
if hasattr(value_node, "end_lineno")
|
|
1173
|
+
else lineno
|
|
1174
|
+
)
|
|
1175
|
+
if end_lineno > lineno:
|
|
1176
|
+
if value.startswith("\n"):
|
|
1177
|
+
value = value[1:]
|
|
1178
|
+
if value.endswith("\n"):
|
|
1179
|
+
value = value[:-1]
|
|
1180
|
+
elif isinstance(value_node, ast.Name):
|
|
1181
|
+
id_val = value_node.id.lower()
|
|
1182
|
+
if id_val == "true":
|
|
1183
|
+
value = True
|
|
1184
|
+
elif id_val == "false":
|
|
1185
|
+
value = False
|
|
1186
|
+
elif id_val == "none":
|
|
1187
|
+
value = None
|
|
1188
|
+
else:
|
|
1189
|
+
value = value_node.id
|
|
1190
|
+
else:
|
|
1191
|
+
try:
|
|
1192
|
+
value = ast.unparse(value_node)
|
|
1193
|
+
except AttributeError:
|
|
1194
|
+
raise ValueError(
|
|
1195
|
+
f"Unsupported argument type for key '{key}': {type(value_node)}"
|
|
1196
|
+
)
|
|
1197
|
+
except Exception as unparse_e:
|
|
1198
|
+
raise ValueError(
|
|
1199
|
+
f"Could not unparse value for key '{key}': {unparse_e}"
|
|
1200
|
+
)
|
|
1201
|
+
suppressed_arg_values = ["..."]
|
|
1202
|
+
if isinstance(value, str) and value in suppressed_arg_values:
|
|
1203
|
+
self.io.tool_warning(
|
|
1204
|
+
f"Skipping suppressed argument value '{value}' for key '{key}' in tool"
|
|
1205
|
+
f" '{tool_name}'"
|
|
1206
|
+
)
|
|
1207
|
+
continue
|
|
1208
|
+
params[key] = value
|
|
1209
|
+
except (SyntaxError, ValueError) as e:
|
|
1210
|
+
result_message = f"Error parsing tool call '{inner_content}': {e}"
|
|
1211
|
+
self.io.tool_error(f"Failed to parse tool call: {full_match_str}\nError: {e}")
|
|
1212
|
+
result_messages.append(f"[Result (Parse Error): {result_message}]")
|
|
1213
|
+
continue
|
|
1214
|
+
except Exception as e:
|
|
1215
|
+
result_message = f"Unexpected error parsing tool call '{inner_content}': {e}"
|
|
1216
|
+
self.io.tool_error(f"""Unexpected error during parsing: {full_match_str}
|
|
1217
|
+
Error: {e}
|
|
1218
|
+
{traceback.format_exc()}""")
|
|
1219
|
+
result_messages.append(f"[Result (Parse Error): {result_message}]")
|
|
1220
|
+
continue
|
|
1221
|
+
try:
|
|
1222
|
+
norm_tool_name = tool_name.lower()
|
|
1223
|
+
result_message = await self._execute_tool_with_registry(norm_tool_name, params)
|
|
1224
|
+
except Exception as e:
|
|
1225
|
+
result_message = f"Error executing {tool_name}: {str(e)}"
|
|
1226
|
+
self.io.tool_error(f"""Error during {tool_name} execution: {e}
|
|
1227
|
+
{traceback.format_exc()}""")
|
|
1228
|
+
if result_message:
|
|
1229
|
+
result_messages.append(f"[Result ({tool_name}): {result_message}]")
|
|
1230
|
+
self.tool_call_count += call_count
|
|
1231
|
+
modified_content = processed_content
|
|
1232
|
+
return (
|
|
1233
|
+
modified_content,
|
|
1234
|
+
result_messages,
|
|
1235
|
+
tool_calls_found,
|
|
1236
|
+
content_before_separator,
|
|
1237
|
+
tool_names,
|
|
1238
|
+
)
|
|
1239
|
+
|
|
1240
|
+
def _get_repetitive_tools(self):
|
|
1241
|
+
"""
|
|
1242
|
+
Identifies repetitive tool usage patterns from rounds of tool calls.
|
|
1243
|
+
|
|
1244
|
+
This method combines count-based and similarity-based detection:
|
|
1245
|
+
1. If the last round contained a write tool, it assumes progress and returns no repetitive tools.
|
|
1246
|
+
2. It checks for any read tool that has been used 2 or more times across rounds.
|
|
1247
|
+
3. If no tools are repeated, but all tools in the history are read tools,
|
|
1248
|
+
it flags all of them as potentially repetitive.
|
|
1249
|
+
4. It checks for similarity-based repetition using cosine similarity on tool call strings.
|
|
1250
|
+
|
|
1251
|
+
It avoids flagging repetition if a "write" tool was used recently,
|
|
1252
|
+
as that suggests progress is being made.
|
|
1253
|
+
"""
|
|
1254
|
+
history_len = len(self.tool_usage_history)
|
|
1255
|
+
if history_len < 5:
|
|
1256
|
+
return set()
|
|
1257
|
+
similarity_repetitive_tools = self._get_repetitive_tools_by_similarity()
|
|
1258
|
+
all_tools = []
|
|
1259
|
+
for round_tools in self.tool_usage_history:
|
|
1260
|
+
all_tools.extend(round_tools)
|
|
1261
|
+
if self.last_round_tools:
|
|
1262
|
+
last_round_has_write = any(
|
|
1263
|
+
tool.lower() in self.write_tools for tool in self.last_round_tools
|
|
1264
|
+
)
|
|
1265
|
+
if last_round_has_write:
|
|
1266
|
+
self.tool_usage_history = []
|
|
1267
|
+
return similarity_repetitive_tools if len(similarity_repetitive_tools) else set()
|
|
1268
|
+
if all(tool.lower() in self.read_tools for tool in all_tools):
|
|
1269
|
+
return set(all_tools)
|
|
1270
|
+
tool_counts = Counter(all_tools)
|
|
1271
|
+
count_repetitive_tools = {
|
|
1272
|
+
tool
|
|
1273
|
+
for tool, count in tool_counts.items()
|
|
1274
|
+
if count >= 5 and tool.lower() in self.read_tools
|
|
1275
|
+
}
|
|
1276
|
+
repetitive_tools = count_repetitive_tools.union(similarity_repetitive_tools)
|
|
1277
|
+
if repetitive_tools:
|
|
1278
|
+
return repetitive_tools
|
|
1279
|
+
return set()
|
|
1280
|
+
|
|
1281
|
+
def _get_repetitive_tools_by_similarity(self):
|
|
1282
|
+
"""
|
|
1283
|
+
Identifies repetitive tool usage patterns using cosine similarity on tool call strings.
|
|
1284
|
+
|
|
1285
|
+
This method checks if the latest tool calls are highly similar (>0.99 threshold)
|
|
1286
|
+
to historical tool calls using bigram vector similarity.
|
|
1287
|
+
|
|
1288
|
+
Returns:
|
|
1289
|
+
set: Set of tool names that are repetitive based on similarity
|
|
1290
|
+
"""
|
|
1291
|
+
if not self.tool_usage_history or len(self.tool_call_vectors) < 2:
|
|
1292
|
+
return set()
|
|
1293
|
+
latest_vector = self.tool_call_vectors[-1]
|
|
1294
|
+
for i, historical_vector in enumerate(self.tool_call_vectors[:-1]):
|
|
1295
|
+
similarity = cosine_similarity(latest_vector, historical_vector)
|
|
1296
|
+
if similarity >= self.tool_similarity_threshold:
|
|
1297
|
+
if i < len(self.tool_usage_history):
|
|
1298
|
+
return {self.tool_usage_history[i]}
|
|
1299
|
+
return set()
|
|
1300
|
+
|
|
1301
|
+
def _generate_tool_context(self, repetitive_tools):
|
|
1302
|
+
"""
|
|
1303
|
+
Generate a context message for the LLM about recent tool usage.
|
|
1304
|
+
"""
|
|
1305
|
+
if not self.tool_usage_history:
|
|
1306
|
+
return ""
|
|
1307
|
+
context_parts = ['<context name="tool_usage_history">']
|
|
1308
|
+
context_parts.append("## Turn and Tool Call Statistics")
|
|
1309
|
+
context_parts.append(f"- Current turn: {self.num_reflections + 1}")
|
|
1310
|
+
context_parts.append(f"- Total tool calls this turn: {self.num_tool_calls}")
|
|
1311
|
+
context_parts.append("\n\n")
|
|
1312
|
+
context_parts.append("## Recent Tool Usage History")
|
|
1313
|
+
if len(self.tool_usage_history) > 10:
|
|
1314
|
+
recent_history = self.tool_usage_history[-10:]
|
|
1315
|
+
context_parts.append("(Showing last 10 tools)")
|
|
1316
|
+
else:
|
|
1317
|
+
recent_history = self.tool_usage_history
|
|
1318
|
+
for i, tool in enumerate(recent_history, 1):
|
|
1319
|
+
context_parts.append(f"{i}. {tool}")
|
|
1320
|
+
context_parts.append("\n\n")
|
|
1321
|
+
if repetitive_tools:
|
|
1322
|
+
context_parts.append("""**Instruction:**
|
|
1323
|
+
You have used the following tool(s) repeatedly:""")
|
|
1324
|
+
context_parts.append("### DO NOT USE THE FOLLOWING TOOLS/FUNCTIONS")
|
|
1325
|
+
for tool in repetitive_tools:
|
|
1326
|
+
context_parts.append(f"- `{tool}`")
|
|
1327
|
+
context_parts.append(
|
|
1328
|
+
"Your exploration appears to be stuck in a loop. Please try a different approach."
|
|
1329
|
+
" Use the `Thinking` tool to clarify your intentions and new approach to what you"
|
|
1330
|
+
" are currently attempting to accomplish."
|
|
1331
|
+
)
|
|
1332
|
+
context_parts.append("\n")
|
|
1333
|
+
context_parts.append("**Suggestions for alternative approaches:**")
|
|
1334
|
+
context_parts.append(
|
|
1335
|
+
"- If you've been searching for files, try working with the files already in"
|
|
1336
|
+
" context"
|
|
1337
|
+
)
|
|
1338
|
+
context_parts.append(
|
|
1339
|
+
"- If you've been viewing files, try making actual edits to move forward"
|
|
1340
|
+
)
|
|
1341
|
+
context_parts.append("- Consider using different tools that you haven't used recently")
|
|
1342
|
+
context_parts.append(
|
|
1343
|
+
"- Focus on making concrete progress rather than gathering more information"
|
|
1344
|
+
)
|
|
1345
|
+
context_parts.append(
|
|
1346
|
+
"- Use the files you've already discovered to implement the requested changes"
|
|
1347
|
+
)
|
|
1348
|
+
context_parts.append("\n")
|
|
1349
|
+
context_parts.append(
|
|
1350
|
+
"You most likely have enough context for a subset of the necessary changes."
|
|
1351
|
+
)
|
|
1352
|
+
context_parts.append("Please prioritize file editing over further exploration.")
|
|
1353
|
+
context_parts.append("</context>")
|
|
1354
|
+
return "\n".join(context_parts)
|
|
1355
|
+
|
|
1356
|
+
def _generate_write_context(self):
|
|
1357
|
+
if self.last_round_tools:
|
|
1358
|
+
last_round_has_write = any(
|
|
1359
|
+
tool.lower() in self.write_tools for tool in self.last_round_tools
|
|
1360
|
+
)
|
|
1361
|
+
if last_round_has_write:
|
|
1362
|
+
context_parts = [
|
|
1363
|
+
'<context name="tool_usage_history">',
|
|
1364
|
+
"A file was just edited.",
|
|
1365
|
+
(
|
|
1366
|
+
" Do not just modify comments and/or logging statements with placeholder"
|
|
1367
|
+
" information."
|
|
1368
|
+
),
|
|
1369
|
+
"Make sure that something of value was done.</context>",
|
|
1370
|
+
]
|
|
1371
|
+
return "\n".join(context_parts)
|
|
1372
|
+
return ""
|
|
1373
|
+
|
|
1374
|
+
async def _apply_edits_from_response(self):
|
|
1375
|
+
"""
|
|
1376
|
+
Parses and applies SEARCH/REPLACE edits found in self.partial_response_content.
|
|
1377
|
+
Returns a set of relative file paths that were successfully edited.
|
|
1378
|
+
"""
|
|
1379
|
+
edited_files = set()
|
|
1380
|
+
try:
|
|
1381
|
+
edits = list(
|
|
1382
|
+
find_original_update_blocks(
|
|
1383
|
+
self.partial_response_content, self.fence, self.get_inchat_relative_files()
|
|
1384
|
+
)
|
|
1385
|
+
)
|
|
1386
|
+
self.shell_commands += [edit[1] for edit in edits if edit[0] is None]
|
|
1387
|
+
edits = [edit for edit in edits if edit[0] is not None]
|
|
1388
|
+
prepared_edits = []
|
|
1389
|
+
seen_paths = dict()
|
|
1390
|
+
self.need_commit_before_edits = set()
|
|
1391
|
+
for edit in edits:
|
|
1392
|
+
path = edit[0]
|
|
1393
|
+
if path in seen_paths:
|
|
1394
|
+
allowed = seen_paths[path]
|
|
1395
|
+
else:
|
|
1396
|
+
allowed = await self.allowed_to_edit(path)
|
|
1397
|
+
seen_paths[path] = allowed
|
|
1398
|
+
if allowed:
|
|
1399
|
+
prepared_edits.append(edit)
|
|
1400
|
+
await self.dirty_commit()
|
|
1401
|
+
self.need_commit_before_edits = set()
|
|
1402
|
+
failed = []
|
|
1403
|
+
passed = []
|
|
1404
|
+
for edit in prepared_edits:
|
|
1405
|
+
path, original, updated = edit
|
|
1406
|
+
full_path = self.abs_root_path(path)
|
|
1407
|
+
new_content = None
|
|
1408
|
+
if Path(full_path).exists():
|
|
1409
|
+
content = self.io.read_text(full_path)
|
|
1410
|
+
new_content = do_replace(full_path, content, original, updated, self.fence)
|
|
1411
|
+
if not new_content and original.strip():
|
|
1412
|
+
for other_full_path in self.abs_fnames:
|
|
1413
|
+
if other_full_path == full_path:
|
|
1414
|
+
continue
|
|
1415
|
+
other_content = self.io.read_text(other_full_path)
|
|
1416
|
+
other_new_content = do_replace(
|
|
1417
|
+
other_full_path, other_content, original, updated, self.fence
|
|
1418
|
+
)
|
|
1419
|
+
if other_new_content:
|
|
1420
|
+
path = self.get_rel_fname(other_full_path)
|
|
1421
|
+
full_path = other_full_path
|
|
1422
|
+
new_content = other_new_content
|
|
1423
|
+
self.io.tool_warning(f"Applied edit intended for {edit[0]} to {path}")
|
|
1424
|
+
break
|
|
1425
|
+
if new_content:
|
|
1426
|
+
if not self.dry_run:
|
|
1427
|
+
self.io.write_text(full_path, new_content)
|
|
1428
|
+
self.io.tool_output(f"Applied edit to {path}")
|
|
1429
|
+
else:
|
|
1430
|
+
self.io.tool_output(f"Did not apply edit to {path} (--dry-run)")
|
|
1431
|
+
passed.append((path, original, updated))
|
|
1432
|
+
else:
|
|
1433
|
+
failed.append(edit)
|
|
1434
|
+
if failed:
|
|
1435
|
+
blocks = "block" if len(failed) == 1 else "blocks"
|
|
1436
|
+
error_message = f"# {len(failed)} SEARCH/REPLACE {blocks} failed to match!\n"
|
|
1437
|
+
for edit in failed:
|
|
1438
|
+
path, original, updated = edit
|
|
1439
|
+
full_path = self.abs_root_path(path)
|
|
1440
|
+
content = self.io.read_text(full_path)
|
|
1441
|
+
error_message += f"""
|
|
1442
|
+
## SearchReplaceNoExactMatch: This SEARCH block failed to exactly match lines in {path}
|
|
1443
|
+
<<<<<<< SEARCH
|
|
1444
|
+
{original}=======
|
|
1445
|
+
{updated}>>>>>>> REPLACE
|
|
1446
|
+
|
|
1447
|
+
"""
|
|
1448
|
+
did_you_mean = find_similar_lines(original, content)
|
|
1449
|
+
if did_you_mean:
|
|
1450
|
+
error_message += f"""Did you mean to match some of these actual lines from {path}?
|
|
1451
|
+
|
|
1452
|
+
{self.fence[0]}
|
|
1453
|
+
{did_you_mean}
|
|
1454
|
+
{self.fence[1]}
|
|
1455
|
+
|
|
1456
|
+
"""
|
|
1457
|
+
if updated in content and updated:
|
|
1458
|
+
error_message += f"""Are you sure you need this SEARCH/REPLACE block?
|
|
1459
|
+
The REPLACE lines are already in {path}!
|
|
1460
|
+
|
|
1461
|
+
"""
|
|
1462
|
+
error_message += (
|
|
1463
|
+
"The SEARCH section must exactly match an existing block of lines including all"
|
|
1464
|
+
" white space, comments, indentation, docstrings, etc"
|
|
1465
|
+
)
|
|
1466
|
+
if passed:
|
|
1467
|
+
pblocks = "block" if len(passed) == 1 else "blocks"
|
|
1468
|
+
error_message += f"""
|
|
1469
|
+
# The other {len(passed)} SEARCH/REPLACE {pblocks} were applied successfully.
|
|
1470
|
+
Don't re-send them.
|
|
1471
|
+
Just reply with fixed versions of the {blocks} above that failed to match.
|
|
1472
|
+
"""
|
|
1473
|
+
self.io.tool_error(error_message)
|
|
1474
|
+
self.reflected_message = error_message
|
|
1475
|
+
edited_files = set(edit[0] for edit in passed)
|
|
1476
|
+
if edited_files:
|
|
1477
|
+
self.coder_edited_files.update(edited_files)
|
|
1478
|
+
self.auto_commit(edited_files)
|
|
1479
|
+
if self.auto_lint:
|
|
1480
|
+
lint_errors = self.lint_edited(edited_files)
|
|
1481
|
+
self.auto_commit(edited_files, context="Ran the linter")
|
|
1482
|
+
if lint_errors and not self.reflected_message:
|
|
1483
|
+
ok = await self.io.confirm_ask("Attempt to fix lint errors?")
|
|
1484
|
+
if ok:
|
|
1485
|
+
self.reflected_message = lint_errors
|
|
1486
|
+
shared_output = await self.run_shell_commands()
|
|
1487
|
+
if shared_output:
|
|
1488
|
+
self.io.tool_output("Shell command output:\n" + shared_output)
|
|
1489
|
+
if self.auto_test and not self.reflected_message:
|
|
1490
|
+
test_errors = await self.commands.cmd_test(self.test_cmd)
|
|
1491
|
+
if test_errors:
|
|
1492
|
+
ok = await self.io.confirm_ask("Attempt to fix test errors?")
|
|
1493
|
+
if ok:
|
|
1494
|
+
self.reflected_message = test_errors
|
|
1495
|
+
self.show_undo_hint()
|
|
1496
|
+
except ValueError as err:
|
|
1497
|
+
self.num_malformed_responses += 1
|
|
1498
|
+
error_message = err.args[0]
|
|
1499
|
+
self.io.tool_error("The LLM did not conform to the edit format.")
|
|
1500
|
+
self.io.tool_output(urls.edit_errors)
|
|
1501
|
+
self.io.tool_output()
|
|
1502
|
+
self.io.tool_output(str(error_message))
|
|
1503
|
+
self.reflected_message = str(error_message)
|
|
1504
|
+
except ANY_GIT_ERROR as err:
|
|
1505
|
+
self.io.tool_error(f"Git error during edit application: {str(err)}")
|
|
1506
|
+
self.reflected_message = f"Git error during edit application: {str(err)}"
|
|
1507
|
+
except Exception as err:
|
|
1508
|
+
self.io.tool_error("Exception while applying edits:")
|
|
1509
|
+
self.io.tool_error(str(err), strip=False)
|
|
1510
|
+
self.io.tool_error(traceback.format_exc())
|
|
1511
|
+
self.reflected_message = f"Exception while applying edits: {str(err)}"
|
|
1512
|
+
return edited_files
|
|
1513
|
+
|
|
1514
|
+
def _add_file_to_context(self, file_path, explicit=False):
|
|
1515
|
+
"""
|
|
1516
|
+
Helper method to add a file to context as read-only.
|
|
1517
|
+
|
|
1518
|
+
Parameters:
|
|
1519
|
+
- file_path: Path to the file to add
|
|
1520
|
+
- explicit: Whether this was an explicit view command (vs. implicit through ViewFilesMatching)
|
|
1521
|
+
"""
|
|
1522
|
+
abs_path = self.abs_root_path(file_path)
|
|
1523
|
+
rel_path = self.get_rel_fname(abs_path)
|
|
1524
|
+
if not os.path.isfile(abs_path):
|
|
1525
|
+
self.io.tool_output(f"⚠️ File '{file_path}' not found")
|
|
1526
|
+
return "File not found"
|
|
1527
|
+
if abs_path in self.abs_fnames:
|
|
1528
|
+
if explicit:
|
|
1529
|
+
self.io.tool_output(f"📎 File '{file_path}' already in context as editable")
|
|
1530
|
+
return "File already in context as editable"
|
|
1531
|
+
return "File already in context as editable"
|
|
1532
|
+
if abs_path in self.abs_read_only_fnames:
|
|
1533
|
+
if explicit:
|
|
1534
|
+
self.io.tool_output(f"📎 File '{file_path}' already in context as read-only")
|
|
1535
|
+
return "File already in context as read-only"
|
|
1536
|
+
return "File already in context as read-only"
|
|
1537
|
+
try:
|
|
1538
|
+
content = self.io.read_text(abs_path)
|
|
1539
|
+
if content is None:
|
|
1540
|
+
return f"Error reading file: {file_path}"
|
|
1541
|
+
if self.context_management_enabled:
|
|
1542
|
+
file_tokens = self.main_model.token_count(content)
|
|
1543
|
+
if file_tokens > self.large_file_token_threshold:
|
|
1544
|
+
self.io.tool_output(
|
|
1545
|
+
f"⚠️ '{file_path}' is very large ({file_tokens} tokens). Use"
|
|
1546
|
+
" /context-management to toggle truncation off if needed."
|
|
1547
|
+
)
|
|
1548
|
+
self.abs_read_only_fnames.add(abs_path)
|
|
1549
|
+
self.files_added_in_exploration.add(rel_path)
|
|
1550
|
+
if explicit:
|
|
1551
|
+
self.io.tool_output(f"📎 Viewed '{file_path}' (added to context as read-only)")
|
|
1552
|
+
return "Viewed file (added to context as read-only)"
|
|
1553
|
+
else:
|
|
1554
|
+
return "Added file to context as read-only"
|
|
1555
|
+
except Exception as e:
|
|
1556
|
+
self.io.tool_error(f"Error adding file '{file_path}' for viewing: {str(e)}")
|
|
1557
|
+
return f"Error adding file for viewing: {str(e)}"
|
|
1558
|
+
|
|
1559
|
+
def _process_file_mentions(self, content):
|
|
1560
|
+
"""
|
|
1561
|
+
Process implicit file mentions in the content, adding files if they're not already in context.
|
|
1562
|
+
|
|
1563
|
+
This handles the case where the LLM mentions file paths without using explicit tool commands.
|
|
1564
|
+
"""
|
|
1565
|
+
mentioned_files = set(self.get_file_mentions(content, ignore_current=False))
|
|
1566
|
+
current_files = set(self.get_inchat_relative_files())
|
|
1567
|
+
mentioned_files - current_files
|
|
1568
|
+
pass
|
|
1569
|
+
|
|
1570
|
+
async def check_for_file_mentions(self, content):
|
|
1571
|
+
"""
|
|
1572
|
+
Override parent's method to use our own file processing logic.
|
|
1573
|
+
|
|
1574
|
+
Override parent's method to disable implicit file mention handling in agent mode.
|
|
1575
|
+
Files should only be added via explicit tool commands
|
|
1576
|
+
(`View`, `ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`).
|
|
1577
|
+
"""
|
|
1578
|
+
pass
|
|
1579
|
+
|
|
1580
|
+
async def preproc_user_input(self, inp):
|
|
1581
|
+
"""
|
|
1582
|
+
Override parent's method to wrap user input in a context block.
|
|
1583
|
+
This clearly delineates user input from other sections in the context window.
|
|
1584
|
+
"""
|
|
1585
|
+
inp = await super().preproc_user_input(inp)
|
|
1586
|
+
if inp and not inp.startswith('<context name="user_input">'):
|
|
1587
|
+
inp = f'<context name="user_input">\n{inp}\n</context>'
|
|
1588
|
+
return inp
|
|
1589
|
+
|
|
1590
|
+
def get_directory_structure(self):
|
|
1591
|
+
"""
|
|
1592
|
+
Generate a structured directory listing of the project file structure.
|
|
1593
|
+
Returns a formatted string representation of the directory tree.
|
|
1594
|
+
"""
|
|
1595
|
+
if not self.use_enhanced_context:
|
|
1596
|
+
return None
|
|
1597
|
+
try:
|
|
1598
|
+
result = '<context name="directoryStructure">\n'
|
|
1599
|
+
result += "## Project File Structure\n\n"
|
|
1600
|
+
result += (
|
|
1601
|
+
"Below is a snapshot of this project's file structure at the current time. "
|
|
1602
|
+
"It skips over .gitignore patterns."
|
|
1603
|
+
)
|
|
1604
|
+
Path(self.root)
|
|
1605
|
+
if self.repo:
|
|
1606
|
+
tracked_files = self.repo.get_tracked_files()
|
|
1607
|
+
untracked_files = []
|
|
1608
|
+
try:
|
|
1609
|
+
untracked_output = self.repo.repo.git.status("--porcelain")
|
|
1610
|
+
for line in untracked_output.splitlines():
|
|
1611
|
+
if line.startswith("??"):
|
|
1612
|
+
untracked_file = line[3:]
|
|
1613
|
+
if not self.repo.ignored_file(untracked_file):
|
|
1614
|
+
untracked_files.append(untracked_file)
|
|
1615
|
+
except Exception as e:
|
|
1616
|
+
self.io.tool_warning(f"Error getting untracked files: {str(e)}")
|
|
1617
|
+
all_files = tracked_files + untracked_files
|
|
1618
|
+
else:
|
|
1619
|
+
all_files = []
|
|
1620
|
+
for path in Path(self.root).rglob("*"):
|
|
1621
|
+
if path.is_file():
|
|
1622
|
+
all_files.append(str(path.relative_to(self.root)))
|
|
1623
|
+
all_files = sorted(all_files)
|
|
1624
|
+
all_files = [
|
|
1625
|
+
f for f in all_files if not any(part.startswith(".cecli") for part in f.split("/"))
|
|
1626
|
+
]
|
|
1627
|
+
tree = {}
|
|
1628
|
+
for file in all_files:
|
|
1629
|
+
parts = file.split("/")
|
|
1630
|
+
current = tree
|
|
1631
|
+
for i, part in enumerate(parts):
|
|
1632
|
+
if i == len(parts) - 1:
|
|
1633
|
+
if "." not in current:
|
|
1634
|
+
current["."] = []
|
|
1635
|
+
current["."].append(part)
|
|
1636
|
+
else:
|
|
1637
|
+
if part not in current:
|
|
1638
|
+
current[part] = {}
|
|
1639
|
+
current = current[part]
|
|
1640
|
+
|
|
1641
|
+
def print_tree(node, prefix="- ", indent=" ", current_path=""):
|
|
1642
|
+
lines = []
|
|
1643
|
+
dirs = sorted([k for k in node.keys() if k != "."])
|
|
1644
|
+
for i, dir_name in enumerate(dirs):
|
|
1645
|
+
lines.append(f"{prefix}{dir_name}/")
|
|
1646
|
+
sub_lines = print_tree(
|
|
1647
|
+
node[dir_name], prefix=prefix, indent=indent, current_path=dir_name
|
|
1648
|
+
)
|
|
1649
|
+
for sub_line in sub_lines:
|
|
1650
|
+
lines.append(f"{indent}{sub_line}")
|
|
1651
|
+
if "." in node:
|
|
1652
|
+
for file_name in sorted(node["."]):
|
|
1653
|
+
lines.append(f"{prefix}{file_name}")
|
|
1654
|
+
return lines
|
|
1655
|
+
|
|
1656
|
+
tree_lines = print_tree(tree, prefix="- ")
|
|
1657
|
+
result += "\n".join(tree_lines)
|
|
1658
|
+
result += "\n</context>"
|
|
1659
|
+
return result
|
|
1660
|
+
except Exception as e:
|
|
1661
|
+
self.io.tool_error(f"Error generating directory structure: {str(e)}")
|
|
1662
|
+
return None
|
|
1663
|
+
|
|
1664
|
+
def get_todo_list(self):
|
|
1665
|
+
"""
|
|
1666
|
+
Generate a todo list context block from the .cecli.todo.txt file.
|
|
1667
|
+
Returns formatted string with the current todo list or None if empty/not present.
|
|
1668
|
+
"""
|
|
1669
|
+
try:
|
|
1670
|
+
todo_file_path = ".cecli.todo.txt"
|
|
1671
|
+
abs_path = self.abs_root_path(todo_file_path)
|
|
1672
|
+
import os
|
|
1673
|
+
|
|
1674
|
+
if not os.path.isfile(abs_path):
|
|
1675
|
+
return """<context name="todo_list">
|
|
1676
|
+
Todo list does not exist. Please update it with the `UpdataTodoList` tool.</context>"""
|
|
1677
|
+
content = self.io.read_text(abs_path)
|
|
1678
|
+
if content is None or not content.strip():
|
|
1679
|
+
return None
|
|
1680
|
+
result = '<context name="todo_list">\n'
|
|
1681
|
+
result += "## Current Todo List\n\n"
|
|
1682
|
+
result += "Below is the current todo list managed via the `UpdateTodoList` tool:\n\n"
|
|
1683
|
+
result += f"```\n{content}\n```\n"
|
|
1684
|
+
result += "</context>"
|
|
1685
|
+
return result
|
|
1686
|
+
except Exception as e:
|
|
1687
|
+
self.io.tool_error(f"Error generating todo list context: {str(e)}")
|
|
1688
|
+
return None
|
|
1689
|
+
|
|
1690
|
+
def get_skills_context(self):
|
|
1691
|
+
"""
|
|
1692
|
+
Generate a context block for available skills.
|
|
1693
|
+
|
|
1694
|
+
Returns:
|
|
1695
|
+
Formatted context block string or None if no skills available
|
|
1696
|
+
"""
|
|
1697
|
+
if not self.use_enhanced_context or not self.skills_manager:
|
|
1698
|
+
return None
|
|
1699
|
+
try:
|
|
1700
|
+
return self.skills_manager.get_skills_context()
|
|
1701
|
+
except Exception as e:
|
|
1702
|
+
self.io.tool_error(f"Error generating skills context: {str(e)}")
|
|
1703
|
+
return None
|
|
1704
|
+
|
|
1705
|
+
def get_skills_content(self):
|
|
1706
|
+
"""
|
|
1707
|
+
Generate a context block with the actual content of loaded skills.
|
|
1708
|
+
|
|
1709
|
+
Returns:
|
|
1710
|
+
Formatted context block string with skill contents or None if no skills available
|
|
1711
|
+
"""
|
|
1712
|
+
if not self.use_enhanced_context or not self.skills_manager:
|
|
1713
|
+
return None
|
|
1714
|
+
try:
|
|
1715
|
+
return self.skills_manager.get_skills_content()
|
|
1716
|
+
except Exception as e:
|
|
1717
|
+
self.io.tool_error(f"Error generating skills content context: {str(e)}")
|
|
1718
|
+
return None
|
|
1719
|
+
|
|
1720
|
+
def get_git_status(self):
|
|
1721
|
+
"""
|
|
1722
|
+
Generate a git status context block for repository information.
|
|
1723
|
+
Returns a formatted string with git branch, status, and recent commits.
|
|
1724
|
+
"""
|
|
1725
|
+
if not self.use_enhanced_context or not self.repo:
|
|
1726
|
+
return None
|
|
1727
|
+
try:
|
|
1728
|
+
result = '<context name="gitStatus">\n'
|
|
1729
|
+
result += "## Git Repository Status\n\n"
|
|
1730
|
+
result += "This is a snapshot of the git status at the current time.\n"
|
|
1731
|
+
try:
|
|
1732
|
+
current_branch = self.repo.repo.active_branch.name
|
|
1733
|
+
result += f"Current branch: {current_branch}\n\n"
|
|
1734
|
+
except Exception:
|
|
1735
|
+
result += "Current branch: (detached HEAD state)\n\n"
|
|
1736
|
+
main_branch = None
|
|
1737
|
+
try:
|
|
1738
|
+
for branch in self.repo.repo.branches:
|
|
1739
|
+
if branch.name in ("main", "master"):
|
|
1740
|
+
main_branch = branch.name
|
|
1741
|
+
break
|
|
1742
|
+
if main_branch:
|
|
1743
|
+
result += f"Main branch (you will usually use this for PRs): {main_branch}\n\n"
|
|
1744
|
+
except Exception:
|
|
1745
|
+
pass
|
|
1746
|
+
result += "Status:\n"
|
|
1747
|
+
try:
|
|
1748
|
+
status = self.repo.repo.git.status("--porcelain")
|
|
1749
|
+
if status:
|
|
1750
|
+
status_lines = status.strip().split("\n")
|
|
1751
|
+
staged_added = []
|
|
1752
|
+
staged_modified = []
|
|
1753
|
+
staged_deleted = []
|
|
1754
|
+
unstaged_modified = []
|
|
1755
|
+
unstaged_deleted = []
|
|
1756
|
+
untracked = []
|
|
1757
|
+
for line in status_lines:
|
|
1758
|
+
if len(line) < 4:
|
|
1759
|
+
continue
|
|
1760
|
+
status_code = line[:2]
|
|
1761
|
+
file_path = line[3:]
|
|
1762
|
+
if any(part.startswith(".cecli") for part in file_path.split("/")):
|
|
1763
|
+
continue
|
|
1764
|
+
if status_code[0] == "A":
|
|
1765
|
+
staged_added.append(file_path)
|
|
1766
|
+
elif status_code[0] == "M":
|
|
1767
|
+
staged_modified.append(file_path)
|
|
1768
|
+
elif status_code[0] == "D":
|
|
1769
|
+
staged_deleted.append(file_path)
|
|
1770
|
+
if status_code[1] == "M":
|
|
1771
|
+
unstaged_modified.append(file_path)
|
|
1772
|
+
elif status_code[1] == "D":
|
|
1773
|
+
unstaged_deleted.append(file_path)
|
|
1774
|
+
if status_code == "??":
|
|
1775
|
+
untracked.append(file_path)
|
|
1776
|
+
if staged_added:
|
|
1777
|
+
for file in staged_added:
|
|
1778
|
+
result += f"A {file}\n"
|
|
1779
|
+
if staged_modified:
|
|
1780
|
+
for file in staged_modified:
|
|
1781
|
+
result += f"M {file}\n"
|
|
1782
|
+
if staged_deleted:
|
|
1783
|
+
for file in staged_deleted:
|
|
1784
|
+
result += f"D {file}\n"
|
|
1785
|
+
if unstaged_modified:
|
|
1786
|
+
for file in unstaged_modified:
|
|
1787
|
+
result += f" M {file}\n"
|
|
1788
|
+
if unstaged_deleted:
|
|
1789
|
+
for file in unstaged_deleted:
|
|
1790
|
+
result += f" D {file}\n"
|
|
1791
|
+
if untracked:
|
|
1792
|
+
for file in untracked:
|
|
1793
|
+
result += f"?? {file}\n"
|
|
1794
|
+
else:
|
|
1795
|
+
result += "Working tree clean\n"
|
|
1796
|
+
except Exception as e:
|
|
1797
|
+
result += f"Unable to get modified files: {str(e)}\n"
|
|
1798
|
+
result += "\nRecent commits:\n"
|
|
1799
|
+
try:
|
|
1800
|
+
commits = list(self.repo.repo.iter_commits(max_count=5))
|
|
1801
|
+
for commit in commits:
|
|
1802
|
+
short_hash = commit.hexsha[:8]
|
|
1803
|
+
message = commit.message.strip().split("\n")[0]
|
|
1804
|
+
result += f"{short_hash} {message}\n"
|
|
1805
|
+
except Exception:
|
|
1806
|
+
result += "Unable to get recent commits\n"
|
|
1807
|
+
result += "</context>"
|
|
1808
|
+
return result
|
|
1809
|
+
except Exception as e:
|
|
1810
|
+
self.io.tool_error(f"Error generating git status: {str(e)}")
|
|
1811
|
+
return None
|
|
1812
|
+
|
|
1813
|
+
def cmd_context_blocks(self, args=""):
|
|
1814
|
+
"""
|
|
1815
|
+
Toggle enhanced context blocks feature.
|
|
1816
|
+
"""
|
|
1817
|
+
self.use_enhanced_context = not self.use_enhanced_context
|
|
1818
|
+
if self.use_enhanced_context:
|
|
1819
|
+
self.io.tool_output(
|
|
1820
|
+
"Enhanced context blocks are now ON - directory structure and git status will be"
|
|
1821
|
+
" included."
|
|
1822
|
+
)
|
|
1823
|
+
self.tokens_calculated = False
|
|
1824
|
+
self.context_blocks_cache = {}
|
|
1825
|
+
else:
|
|
1826
|
+
self.io.tool_output(
|
|
1827
|
+
"Enhanced context blocks are now OFF - directory structure and git status will not"
|
|
1828
|
+
" be included."
|
|
1829
|
+
)
|
|
1830
|
+
self.context_block_tokens = {}
|
|
1831
|
+
self.context_blocks_cache = {}
|
|
1832
|
+
self.tokens_calculated = False
|
|
1833
|
+
return True
|
|
1834
|
+
|
|
1835
|
+
def _log_chunks(self, chunks):
|
|
1836
|
+
try:
|
|
1837
|
+
import hashlib
|
|
1838
|
+
import json
|
|
1839
|
+
|
|
1840
|
+
if not hasattr(self, "_message_hashes"):
|
|
1841
|
+
self._message_hashes = {
|
|
1842
|
+
"system": None,
|
|
1843
|
+
"static": None,
|
|
1844
|
+
"examples": None,
|
|
1845
|
+
"readonly_files": None,
|
|
1846
|
+
"repo": None,
|
|
1847
|
+
"chat_files": None,
|
|
1848
|
+
"pre_message": None,
|
|
1849
|
+
"done": None,
|
|
1850
|
+
"edit_files": None,
|
|
1851
|
+
"cur": None,
|
|
1852
|
+
"post_message": None,
|
|
1853
|
+
"reminder": None,
|
|
1854
|
+
}
|
|
1855
|
+
changes = []
|
|
1856
|
+
for key, value in self._message_hashes.items():
|
|
1857
|
+
json_obj = json.dumps(
|
|
1858
|
+
getattr(chunks, key, ""), sort_keys=True, separators=(",", ":")
|
|
1859
|
+
)
|
|
1860
|
+
new_hash = hashlib.sha256(json_obj.encode("utf-8")).hexdigest()
|
|
1861
|
+
if self._message_hashes[key] != new_hash:
|
|
1862
|
+
changes.append(key)
|
|
1863
|
+
self._message_hashes[key] = new_hash
|
|
1864
|
+
print("")
|
|
1865
|
+
print("MESSAGE CHUNK HASHES")
|
|
1866
|
+
print(self._message_hashes)
|
|
1867
|
+
print("")
|
|
1868
|
+
print(changes)
|
|
1869
|
+
print("")
|
|
1870
|
+
except Exception as e:
|
|
1871
|
+
print(e)
|
|
1872
|
+
pass
|