aider-ce 0.88.20__py3-none-any.whl → 0.88.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. aider/__init__.py +1 -1
  2. aider/_version.py +2 -2
  3. aider/args.py +63 -43
  4. aider/coders/agent_coder.py +331 -79
  5. aider/coders/agent_prompts.py +3 -15
  6. aider/coders/architect_coder.py +21 -5
  7. aider/coders/base_coder.py +661 -413
  8. aider/coders/base_prompts.py +6 -3
  9. aider/coders/chat_chunks.py +39 -17
  10. aider/commands.py +79 -15
  11. aider/diffs.py +10 -9
  12. aider/exceptions.py +1 -1
  13. aider/helpers/coroutines.py +8 -0
  14. aider/helpers/requests.py +45 -0
  15. aider/history.py +5 -0
  16. aider/io.py +179 -25
  17. aider/main.py +86 -35
  18. aider/models.py +16 -8
  19. aider/queries/tree-sitter-language-pack/c-tags.scm +3 -0
  20. aider/queries/tree-sitter-language-pack/clojure-tags.scm +5 -0
  21. aider/queries/tree-sitter-language-pack/commonlisp-tags.scm +5 -0
  22. aider/queries/tree-sitter-language-pack/cpp-tags.scm +3 -0
  23. aider/queries/tree-sitter-language-pack/csharp-tags.scm +6 -0
  24. aider/queries/tree-sitter-language-pack/dart-tags.scm +5 -0
  25. aider/queries/tree-sitter-language-pack/elixir-tags.scm +5 -0
  26. aider/queries/tree-sitter-language-pack/elm-tags.scm +3 -0
  27. aider/queries/tree-sitter-language-pack/go-tags.scm +7 -0
  28. aider/queries/tree-sitter-language-pack/java-tags.scm +6 -0
  29. aider/queries/tree-sitter-language-pack/javascript-tags.scm +8 -0
  30. aider/queries/tree-sitter-language-pack/lua-tags.scm +5 -0
  31. aider/queries/tree-sitter-language-pack/ocaml_interface-tags.scm +3 -0
  32. aider/queries/tree-sitter-language-pack/python-tags.scm +10 -0
  33. aider/queries/tree-sitter-language-pack/r-tags.scm +6 -0
  34. aider/queries/tree-sitter-language-pack/ruby-tags.scm +5 -0
  35. aider/queries/tree-sitter-language-pack/rust-tags.scm +3 -0
  36. aider/queries/tree-sitter-language-pack/solidity-tags.scm +1 -1
  37. aider/queries/tree-sitter-language-pack/swift-tags.scm +4 -1
  38. aider/queries/tree-sitter-languages/c-tags.scm +3 -0
  39. aider/queries/tree-sitter-languages/c_sharp-tags.scm +6 -0
  40. aider/queries/tree-sitter-languages/cpp-tags.scm +3 -0
  41. aider/queries/tree-sitter-languages/dart-tags.scm +2 -1
  42. aider/queries/tree-sitter-languages/elixir-tags.scm +5 -0
  43. aider/queries/tree-sitter-languages/elm-tags.scm +3 -0
  44. aider/queries/tree-sitter-languages/fortran-tags.scm +3 -0
  45. aider/queries/tree-sitter-languages/go-tags.scm +6 -0
  46. aider/queries/tree-sitter-languages/haskell-tags.scm +2 -0
  47. aider/queries/tree-sitter-languages/java-tags.scm +6 -0
  48. aider/queries/tree-sitter-languages/javascript-tags.scm +8 -0
  49. aider/queries/tree-sitter-languages/julia-tags.scm +2 -2
  50. aider/queries/tree-sitter-languages/kotlin-tags.scm +3 -0
  51. aider/queries/tree-sitter-languages/ocaml_interface-tags.scm +6 -0
  52. aider/queries/tree-sitter-languages/php-tags.scm +6 -0
  53. aider/queries/tree-sitter-languages/python-tags.scm +10 -0
  54. aider/queries/tree-sitter-languages/ruby-tags.scm +5 -0
  55. aider/queries/tree-sitter-languages/rust-tags.scm +3 -0
  56. aider/queries/tree-sitter-languages/scala-tags.scm +2 -3
  57. aider/queries/tree-sitter-languages/typescript-tags.scm +3 -0
  58. aider/queries/tree-sitter-languages/zig-tags.scm +20 -3
  59. aider/repomap.py +71 -11
  60. aider/resources/model-metadata.json +27335 -635
  61. aider/resources/model-settings.yml +190 -0
  62. aider/scrape.py +2 -0
  63. aider/tools/__init__.py +2 -0
  64. aider/tools/command.py +84 -94
  65. aider/tools/command_interactive.py +95 -110
  66. aider/tools/delete_block.py +131 -159
  67. aider/tools/delete_line.py +97 -132
  68. aider/tools/delete_lines.py +120 -160
  69. aider/tools/extract_lines.py +288 -312
  70. aider/tools/finished.py +30 -43
  71. aider/tools/git_branch.py +107 -109
  72. aider/tools/git_diff.py +44 -56
  73. aider/tools/git_log.py +39 -53
  74. aider/tools/git_remote.py +37 -51
  75. aider/tools/git_show.py +33 -47
  76. aider/tools/git_status.py +30 -44
  77. aider/tools/grep.py +214 -242
  78. aider/tools/indent_lines.py +175 -201
  79. aider/tools/insert_block.py +220 -253
  80. aider/tools/list_changes.py +65 -80
  81. aider/tools/ls.py +64 -80
  82. aider/tools/make_editable.py +57 -73
  83. aider/tools/make_readonly.py +50 -66
  84. aider/tools/remove.py +64 -80
  85. aider/tools/replace_all.py +96 -109
  86. aider/tools/replace_line.py +118 -156
  87. aider/tools/replace_lines.py +160 -197
  88. aider/tools/replace_text.py +159 -160
  89. aider/tools/show_numbered_context.py +115 -141
  90. aider/tools/thinking.py +52 -0
  91. aider/tools/undo_change.py +78 -91
  92. aider/tools/update_todo_list.py +130 -138
  93. aider/tools/utils/base_tool.py +64 -0
  94. aider/tools/utils/output.py +118 -0
  95. aider/tools/view.py +38 -54
  96. aider/tools/view_files_matching.py +131 -134
  97. aider/tools/view_files_with_symbol.py +108 -120
  98. aider/urls.py +1 -1
  99. aider/versioncheck.py +4 -3
  100. aider/website/docs/config/adv-model-settings.md +237 -0
  101. aider/website/docs/config/agent-mode.md +36 -3
  102. aider/website/docs/config/model-aliases.md +2 -1
  103. aider/website/docs/faq.md +6 -11
  104. aider/website/docs/languages.md +2 -2
  105. aider/website/docs/more/infinite-output.md +27 -0
  106. {aider_ce-0.88.20.dist-info → aider_ce-0.88.38.dist-info}/METADATA +112 -70
  107. {aider_ce-0.88.20.dist-info → aider_ce-0.88.38.dist-info}/RECORD +112 -107
  108. aider_ce-0.88.38.dist-info/entry_points.txt +6 -0
  109. aider_ce-0.88.20.dist-info/entry_points.txt +0 -2
  110. /aider/tools/{tool_utils.py → utils/helpers.py} +0 -0
  111. {aider_ce-0.88.20.dist-info → aider_ce-0.88.38.dist-info}/WHEEL +0 -0
  112. {aider_ce-0.88.20.dist-info → aider_ce-0.88.38.dist-info}/licenses/LICENSE.txt +0 -0
  113. {aider_ce-0.88.20.dist-info → aider_ce-0.88.38.dist-info}/top_level.txt +0 -0
@@ -30,13 +30,7 @@ from typing import List
30
30
 
31
31
  import httpx
32
32
  from litellm import experimental_mcp_client
33
- from litellm.types.utils import (
34
- ChatCompletionMessageToolCall,
35
- Choices,
36
- Function,
37
- Message,
38
- ModelResponse,
39
- )
33
+ from litellm.types.utils import ModelResponse
40
34
  from prompt_toolkit.patch_stdout import patch_stdout
41
35
  from rich.console import Console
42
36
 
@@ -44,6 +38,7 @@ from aider import __version__, models, prompts, urls, utils
44
38
  from aider.analytics import Analytics
45
39
  from aider.commands import Commands, SwitchCoder
46
40
  from aider.exceptions import LiteLLMExceptions
41
+ from aider.helpers import coroutines
47
42
  from aider.history import ChatSummary
48
43
  from aider.io import ConfirmGroup, InputOutput
49
44
  from aider.linter import Linter
@@ -60,6 +55,7 @@ from aider.repo import ANY_GIT_ERROR, GitRepo
60
55
  from aider.repomap import RepoMap
61
56
  from aider.run_cmd import run_cmd
62
57
  from aider.sessions import SessionManager
58
+ from aider.tools.utils.output import print_tool_response
63
59
  from aider.utils import format_tokens, is_image_file
64
60
 
65
61
  from ..dump import dump # noqa: F401
@@ -125,6 +121,8 @@ class Coder:
125
121
  test_outcome = None
126
122
  multi_response_content = ""
127
123
  partial_response_content = ""
124
+ partial_response_reasoning_content = ""
125
+ partial_response_chunks = []
128
126
  partial_response_tool_calls = []
129
127
  commit_before_message = []
130
128
  message_cost = 0.0
@@ -143,6 +141,9 @@ class Coder:
143
141
  compact_context_completed = True
144
142
  suppress_announcements_for_next_prompt = False
145
143
  tool_reflection = False
144
+ # Task coordination state variables
145
+ input_running = False
146
+ output_running = False
146
147
 
147
148
  # Context management settings (for all modes)
148
149
  context_management_enabled = False # Disabled by default except for agent mode
@@ -240,7 +241,7 @@ class Coder:
240
241
 
241
242
  def get_announcements(self):
242
243
  lines = []
243
- lines.append(f"Aider v{__version__}")
244
+ lines.append(f"Aider-CE v{__version__}")
244
245
 
245
246
  # Model
246
247
  main_model = self.main_model
@@ -475,6 +476,7 @@ class Coder:
475
476
  self.dry_run = dry_run
476
477
  self.pretty = self.io.pretty
477
478
  self.linear_output = linear_output
479
+ self.io.linear = linear_output
478
480
  self.main_model = main_model
479
481
 
480
482
  # Set the reasoning tag name based on model settings or default
@@ -492,7 +494,10 @@ class Coder:
492
494
  self.commands = commands or Commands(self.io, self)
493
495
  self.commands.coder = self
494
496
 
495
- self.data_cache = {"repo": {"last_key": ""}, "relative_files": None}
497
+ self.data_cache = {
498
+ "repo": {"last_key": "", "read_only_count": None},
499
+ "relative_files": None,
500
+ }
496
501
 
497
502
  self.repo = repo
498
503
  if use_git and self.repo is None:
@@ -582,6 +587,7 @@ class Coder:
582
587
  max_code_line_length=map_max_line_length,
583
588
  repo_root=self.root,
584
589
  use_memory_cache=repomap_in_memory,
590
+ use_enhanced_map=False if not self.args or self.args.use_enhanced_map else True,
585
591
  )
586
592
 
587
593
  self.summarizer = summarizer or ChatSummary(
@@ -678,7 +684,13 @@ class Coder:
678
684
  return True
679
685
 
680
686
  def get_abs_fnames_content(self):
681
- for fname in list(self.abs_fnames):
687
+ # Sort files by last modified time (earliest first, latest last)
688
+ sorted_fnames = sorted(
689
+ list(filter(lambda f: os.path.exists(f), self.abs_fnames)),
690
+ key=lambda fname: os.path.getmtime(fname),
691
+ )
692
+
693
+ for fname in sorted_fnames:
682
694
  content = self.io.read_text(fname)
683
695
 
684
696
  if content is None:
@@ -724,57 +736,121 @@ class Coder:
724
736
  if not fnames:
725
737
  fnames = self.abs_fnames
726
738
 
727
- prompt = ""
728
- for fname, content in self.get_abs_fnames_content():
729
- if not is_image_file(fname):
730
- relative_fname = self.get_rel_fname(fname)
731
- prompt += "\n"
732
- prompt += relative_fname
733
- prompt += f"\n{self.fence[0]}\n"
734
-
735
- # Apply context management if enabled for large files
736
- if self.context_management_enabled:
737
- # Calculate tokens for this file
738
- file_tokens = self.main_model.token_count(content)
739
-
740
- if file_tokens > self.large_file_token_threshold:
741
- # Truncate the file content
742
- lines = content.splitlines()
739
+ # If there are files, return a dictionary with chat_files and edit_files
740
+ if fnames:
741
+ # Get current time for comparison
742
+ current_time = time.time()
743
+ lookback = current_time - 30
743
744
 
744
- # Keep the first and last parts of the file with a marker in between
745
- keep_lines = (
746
- self.large_file_token_threshold // 40
747
- ) # Rough estimate of tokens per line
748
- first_chunk = lines[: keep_lines // 2]
749
- last_chunk = lines[-(keep_lines // 2) :]
745
+ # Get file modification times and sort by most recent first
746
+ file_times = []
747
+ for fname in fnames:
748
+ try:
749
+ if os.path.exists(fname):
750
+ mtime = os.path.getmtime(fname)
751
+ file_times.append((fname, mtime))
752
+ except OSError:
753
+ # Skip files that can't be accessed
754
+ continue
750
755
 
751
- truncated_content = "\n".join(first_chunk)
752
- truncated_content += (
753
- f"\n\n... [File truncated due to size ({file_tokens} tokens). Use"
754
- " /context-management to toggle truncation off] ...\n\n"
755
- )
756
- truncated_content += "\n".join(last_chunk)
756
+ # Sort by modification time (most recent first)
757
+ file_times.sort(key=lambda x: x[1], reverse=True)
758
+
759
+ # Determine which files go to edit_files
760
+ edit_files = set()
761
+ if file_times:
762
+ # Always include the most recently edited file
763
+ most_recent_file, most_recent_time = file_times[0]
764
+ edit_files.add(most_recent_file)
765
+
766
+ # Include any files edited within the last minute
767
+ for fname, mtime in file_times:
768
+ if mtime >= lookback:
769
+ edit_files.add(fname)
770
+
771
+ # Build content for chat_files and edit_files
772
+ chat_files_prompt = ""
773
+ edit_files_prompt = ""
774
+ chat_file_names = set()
775
+ edit_file_names = set()
776
+
777
+ for fname, content in self.get_abs_fnames_content():
778
+ if not is_image_file(fname):
779
+ relative_fname = self.get_rel_fname(fname)
780
+ file_prompt = "\n"
781
+ file_prompt += relative_fname
782
+ file_prompt += f"\n{self.fence[0]}\n"
783
+
784
+ # Apply context management if enabled for large files
785
+ if self.context_management_enabled:
786
+ # Calculate tokens for this file
787
+ file_tokens = self.main_model.token_count(content)
788
+
789
+ if file_tokens > self.large_file_token_threshold:
790
+ # Truncate the file content
791
+ lines = content.splitlines()
792
+
793
+ # Keep the first and last parts of the file with a marker in between
794
+ keep_lines = (
795
+ self.large_file_token_threshold // 40
796
+ ) # Rough estimate of tokens per line
797
+ first_chunk = lines[: keep_lines // 2]
798
+ last_chunk = lines[-(keep_lines // 2) :]
799
+
800
+ truncated_content = "\n".join(first_chunk)
801
+ truncated_content += (
802
+ f"\n\n... [File truncated due to size ({file_tokens} tokens). Use"
803
+ " /context-management to toggle truncation off] ...\n\n"
804
+ )
805
+ truncated_content += "\n".join(last_chunk)
757
806
 
758
- # Add message about truncation
759
- self.io.tool_output(
760
- f"⚠️ '{relative_fname}' is very large ({file_tokens} tokens). "
761
- "Use /context-management to toggle truncation off if needed."
762
- )
807
+ # Add message about truncation
808
+ self.io.tool_output(
809
+ f"⚠️ '{relative_fname}' is very large ({file_tokens} tokens). "
810
+ "Use /context-management to toggle truncation off if needed."
811
+ )
763
812
 
764
- prompt += truncated_content
813
+ file_prompt += truncated_content
814
+ else:
815
+ file_prompt += content
765
816
  else:
766
- prompt += content
767
- else:
768
- prompt += content
817
+ file_prompt += content
769
818
 
770
- prompt += f"{self.fence[1]}\n"
819
+ file_prompt += f"{self.fence[1]}\n"
771
820
 
772
- return prompt
821
+ # Add to appropriate prompt based on edit time
822
+ if fname in edit_files:
823
+ edit_files_prompt += file_prompt
824
+ edit_file_names.add(relative_fname)
825
+ else:
826
+ chat_files_prompt += file_prompt
827
+ chat_file_names.add(relative_fname)
828
+
829
+ return {
830
+ "chat_files": chat_files_prompt,
831
+ "edit_files": edit_files_prompt,
832
+ "chat_file_names": chat_file_names,
833
+ "edit_file_names": edit_file_names,
834
+ }
835
+ else:
836
+ # Return empty dictionary when no files
837
+ return {
838
+ "chat_files": "",
839
+ "edit_files": "",
840
+ "chat_file_names": set(),
841
+ "edit_file_names": set(),
842
+ }
773
843
 
774
844
  def get_read_only_files_content(self):
775
845
  prompt = ""
846
+ # Sort read-only files by last modified time (earliest first, latest last)
847
+ sorted_fnames = sorted(
848
+ list(filter(lambda f: os.path.exists(f), self.abs_read_only_fnames)),
849
+ key=lambda fname: os.path.getmtime(fname),
850
+ )
851
+
776
852
  # Handle regular read-only files
777
- for fname in self.abs_read_only_fnames:
853
+ for fname in sorted_fnames:
778
854
  content = self.io.read_text(fname)
779
855
  if content is not None and not is_image_file(fname):
780
856
  relative_fname = self.get_rel_fname(fname)
@@ -819,8 +895,14 @@ class Coder:
819
895
 
820
896
  prompt += f"{self.fence[1]}\n"
821
897
 
898
+ # Sort stub files by last modified time (earliest first, latest last)
899
+ sorted_stub_fnames = sorted(
900
+ list(filter(lambda f: os.path.exists(f), self.abs_read_only_stubs_fnames)),
901
+ key=lambda fname: os.path.getmtime(fname),
902
+ )
903
+
822
904
  # Handle stub files
823
- for fname in self.abs_read_only_stubs_fnames:
905
+ for fname in sorted_stub_fnames:
824
906
  if not is_image_file(fname):
825
907
  relative_fname = self.get_rel_fname(fname)
826
908
  prompt += "\n"
@@ -878,7 +960,16 @@ class Coder:
878
960
  self.io.update_spinner("Updating repo map")
879
961
 
880
962
  cur_msg_text = self.get_cur_message_text()
881
- staged_files_hash = hash(str([item.a_path for item in self.repo.repo.index.diff("HEAD")]))
963
+ try:
964
+ staged_files_hash = hash(
965
+ str([item.a_path for item in self.repo.repo.index.diff("HEAD")])
966
+ )
967
+ except ANY_GIT_ERROR as err:
968
+ # Handle git errors gracefully - use a fallback hash
969
+ if self.verbose:
970
+ self.io.tool_warning(f"Git error while checking staged files for repo map: {err}")
971
+ staged_files_hash = hash(str(time.time())) # Use timestamp as fallback
972
+
882
973
  read_only_count = len(set(self.abs_read_only_fnames)) + len(
883
974
  set(self.abs_read_only_stubs_fnames)
884
975
  )
@@ -889,7 +980,6 @@ class Coder:
889
980
  or read_only_count != self.data_cache["repo"]["read_only_count"]
890
981
  ):
891
982
  self.data_cache["repo"]["last_key"] = staged_files_hash
892
-
893
983
  mentioned_idents = self.data_cache["repo"]["mentioned_idents"]
894
984
  mentioned_fnames = self.get_file_mentions(cur_msg_text)
895
985
  mentioned_fnames.update(self.get_ident_filename_matches(mentioned_idents))
@@ -907,7 +997,7 @@ class Coder:
907
997
  return False
908
998
  if ".min." in parts[-1]:
909
999
  return False
910
- if self.repo.git_ignored_file(abs_path):
1000
+ if self.repo.ignored_file(abs_path):
911
1001
  return False
912
1002
  return True
913
1003
 
@@ -1001,22 +1091,56 @@ class Coder:
1001
1091
 
1002
1092
  def get_chat_files_messages(self):
1003
1093
  chat_files_messages = []
1094
+ edit_files_messages = []
1095
+ chat_file_names = set()
1096
+ edit_file_names = set()
1097
+
1004
1098
  if self.abs_fnames:
1005
- files_content = self.gpt_prompts.files_content_prefix
1006
- files_content += self.get_files_content()
1099
+ files_content_result = self.get_files_content()
1100
+
1101
+ # Get content and file names from dictionary
1102
+ chat_files_content = files_content_result.get("chat_files", "")
1103
+ edit_files_content = files_content_result.get("edit_files", "")
1104
+ chat_file_names = files_content_result.get("chat_file_names", set())
1105
+ edit_file_names = files_content_result.get("edit_file_names", set())
1106
+
1007
1107
  files_reply = self.gpt_prompts.files_content_assistant_reply
1108
+
1109
+ if chat_files_content:
1110
+ chat_files_messages += [
1111
+ dict(
1112
+ role="user",
1113
+ content=self.gpt_prompts.files_content_prefix + chat_files_content,
1114
+ ),
1115
+ dict(role="assistant", content=files_reply),
1116
+ ]
1117
+
1118
+ if edit_files_content:
1119
+ edit_files_messages += [
1120
+ dict(
1121
+ role="user",
1122
+ content=self.gpt_prompts.files_content_prefix + edit_files_content,
1123
+ ),
1124
+ dict(role="assistant", content=files_reply),
1125
+ ]
1008
1126
  elif self.gpt_prompts.files_no_full_files_with_repo_map:
1009
1127
  files_content = self.gpt_prompts.files_no_full_files_with_repo_map
1010
1128
  files_reply = self.gpt_prompts.files_no_full_files_with_repo_map_reply
1129
+
1130
+ if files_content:
1131
+ chat_files_messages += [
1132
+ dict(role="user", content=files_content),
1133
+ dict(role="assistant", content=files_reply),
1134
+ ]
1011
1135
  else:
1012
1136
  files_content = self.gpt_prompts.files_no_full_files
1013
1137
  files_reply = "Ok."
1014
1138
 
1015
- if files_content:
1016
- chat_files_messages += [
1017
- dict(role="user", content=files_content),
1018
- dict(role="assistant", content=files_reply),
1019
- ]
1139
+ if files_content:
1140
+ chat_files_messages += [
1141
+ dict(role="user", content=files_content),
1142
+ dict(role="assistant", content=files_reply),
1143
+ ]
1020
1144
 
1021
1145
  images_message = self.get_images_message(self.abs_fnames)
1022
1146
  if images_message is not None:
@@ -1025,7 +1149,12 @@ class Coder:
1025
1149
  dict(role="assistant", content="Ok."),
1026
1150
  ]
1027
1151
 
1028
- return chat_files_messages
1152
+ return {
1153
+ "chat_files": chat_files_messages,
1154
+ "edit_files": edit_files_messages,
1155
+ "chat_file_names": chat_file_names,
1156
+ "edit_file_names": edit_file_names,
1157
+ }
1029
1158
 
1030
1159
  def get_images_message(self, fnames):
1031
1160
  supports_images = self.main_model.info.get("supports_vision")
@@ -1096,9 +1225,9 @@ class Coder:
1096
1225
 
1097
1226
  if self.io.prompt_session:
1098
1227
  with patch_stdout(raw=True):
1099
- return await self._run_patched(with_message, preproc)
1228
+ return await self._run_parallel(with_message, preproc)
1100
1229
  else:
1101
- return await self._run_patched(with_message, preproc)
1230
+ return await self._run_parallel(with_message, preproc)
1102
1231
 
1103
1232
  async def _run_linear(self, with_message=None, preproc=True):
1104
1233
  try:
@@ -1108,8 +1237,7 @@ class Coder:
1108
1237
  return self.partial_response_content
1109
1238
 
1110
1239
  user_message = None
1111
- await self.io.cancel_input_task()
1112
- await self.io.cancel_output_task()
1240
+ await self.io.stop_task_streams()
1113
1241
 
1114
1242
  while True:
1115
1243
  try:
@@ -1124,186 +1252,218 @@ class Coder:
1124
1252
  await self.io.recreate_input()
1125
1253
  await self.io.input_task
1126
1254
  user_message = self.io.input_task.result()
1127
-
1128
- self.io.output_task = asyncio.create_task(self._generate(user_message, preproc))
1255
+ self.io.tool_output("Processing...\n")
1256
+ self.io.output_task = asyncio.create_task(self.generate(user_message, preproc))
1129
1257
 
1130
1258
  await self.io.output_task
1131
-
1259
+ self.io.tool_output("Finished.")
1132
1260
  self.io.ring_bell()
1133
1261
  user_message = None
1134
- except KeyboardInterrupt:
1135
- if self.io.input_task:
1136
- self.io.set_placeholder("")
1137
- await self.io.cancel_input_task()
1138
-
1139
- if self.io.output_task:
1140
- await self.io.cancel_output_task()
1141
- self.io.stop_spinner()
1262
+ await self.auto_save_session()
1142
1263
 
1264
+ except KeyboardInterrupt:
1265
+ self.io.set_placeholder("")
1266
+ self.io.stop_spinner()
1143
1267
  self.keyboard_interrupt()
1268
+ await self.io.stop_task_streams()
1144
1269
  except (asyncio.CancelledError, IndexError):
1145
1270
  pass
1146
1271
 
1147
- self.auto_save_session()
1148
1272
  except EOFError:
1149
1273
  return
1150
1274
  finally:
1151
- await self.io.cancel_input_task()
1152
- await self.io.cancel_output_task()
1275
+ await self.io.stop_task_streams()
1153
1276
 
1154
- async def _run_patched(self, with_message=None, preproc=True):
1277
+ async def _run_parallel(self, with_message=None, preproc=True):
1155
1278
  try:
1156
1279
  if with_message:
1157
1280
  self.io.user_input(with_message)
1158
1281
  await self.run_one(with_message, preproc)
1159
1282
  return self.partial_response_content
1160
1283
 
1161
- user_message = None
1284
+ # Initialize state for task coordination
1285
+ self.input_running = True
1286
+ self.output_running = True
1162
1287
  self.user_message = ""
1163
- await self.io.cancel_input_task()
1164
- await self.io.cancel_output_task()
1165
1288
 
1166
- while True:
1167
- try:
1168
- if (
1169
- not self.io.confirmation_in_progress
1170
- and not user_message
1171
- and (
1172
- not self.io.input_task
1173
- or self.io.input_task.done()
1174
- or self.io.input_task.cancelled()
1175
- )
1176
- and (not self.io.output_task or not self.io.placeholder)
1177
- ):
1178
- if not self.suppress_announcements_for_next_prompt:
1179
- self.show_announcements()
1180
- self.suppress_announcements_for_next_prompt = True
1181
-
1182
- # Stop spinner before showing announcements or getting input
1183
- self.io.stop_spinner()
1184
- self.copy_context()
1185
- await self.io.recreate_input()
1289
+ # Cancel any existing tasks
1290
+ await self.io.stop_task_streams()
1186
1291
 
1187
- if self.user_message:
1188
- self.io.output_task = asyncio.create_task(
1189
- self._generate(self.user_message, preproc)
1190
- )
1292
+ # Start the input and output tasks
1293
+ input_task = asyncio.create_task(self.input_task(preproc))
1294
+ output_task = asyncio.create_task(self.output_task(preproc))
1191
1295
 
1192
- self.user_message = ""
1193
- # Start spinner for processing task
1194
- self.io.start_spinner("Processing...")
1195
-
1196
- if self.commands.cmd_running:
1197
- await asyncio.sleep(0.1)
1198
- continue
1199
-
1200
- tasks = set()
1296
+ try:
1297
+ # Wait for both tasks to complete or for one to raise an exception
1298
+ done, pending = await asyncio.wait(
1299
+ [input_task, output_task], return_when=asyncio.FIRST_EXCEPTION
1300
+ )
1201
1301
 
1202
- if self.io.output_task:
1203
- if self.io.output_task.done():
1204
- exception = self.io.output_task.exception()
1205
- if exception:
1206
- if isinstance(exception, SwitchCoder):
1207
- await self.io.output_task
1208
- elif not self.io.output_task.done() and not self.io.output_task.cancelled():
1209
- tasks.add(self.io.output_task)
1302
+ # Check for exceptions
1303
+ for task in done:
1304
+ if task.exception():
1305
+ raise task.exception()
1210
1306
 
1211
- if (
1212
- self.io.input_task
1213
- and not self.io.input_task.done()
1214
- and not self.io.input_task.cancelled()
1215
- ):
1216
- tasks.add(self.io.input_task)
1307
+ except (SwitchCoder, SystemExit):
1308
+ # Re-raise SwitchCoder to be handled by outer try block
1309
+ raise
1310
+ except KeyboardInterrupt:
1311
+ # Handle keyboard interrupt gracefully
1312
+ self.io.set_placeholder("")
1313
+ self.io.stop_spinner()
1314
+ self.keyboard_interrupt()
1315
+ finally:
1316
+ # Signal tasks to stop
1317
+ self.input_running = False
1318
+ self.output_running = False
1319
+
1320
+ # Cancel tasks
1321
+ input_task.cancel()
1322
+ output_task.cancel()
1323
+
1324
+ # Wait for tasks to finish
1325
+ try:
1326
+ await asyncio.gather(input_task, output_task, return_exceptions=True)
1327
+ except (asyncio.CancelledError, KeyboardInterrupt):
1328
+ pass
1217
1329
 
1218
- if tasks:
1219
- done, pending = await asyncio.wait(
1220
- tasks, return_when=asyncio.FIRST_COMPLETED
1221
- )
1330
+ # Ensure IO tasks are properly cancelled
1331
+ await self.io.stop_task_streams()
1222
1332
 
1223
- if self.io.input_task and self.io.input_task in done:
1224
- if self.io.output_task:
1225
- if not self.io.confirmation_in_progress:
1226
- await self.io.cancel_output_task()
1227
- self.io.stop_spinner()
1333
+ await self.auto_save_session()
1334
+ except EOFError:
1335
+ return
1336
+ finally:
1337
+ await self.io.stop_task_streams()
1228
1338
 
1229
- try:
1230
- if self.io.input_task:
1231
- user_message = self.io.input_task.result()
1232
- await self.io.cancel_input_task()
1339
+ async def input_task(self, preproc):
1340
+ """
1341
+ Handles input creation/recreation and user message processing.
1342
+ This task manages the input loop and coordinates with output_task.
1343
+ """
1344
+ while self.input_running:
1345
+ try:
1346
+ # Wait for commands to finish
1347
+ if self.commands.cmd_running:
1348
+ await asyncio.sleep(0.1)
1349
+ continue
1233
1350
 
1234
- if self.commands.is_run_command(user_message):
1235
- self.commands.cmd_running = True
1351
+ # Wait for input task completion
1352
+ if self.io.input_task and self.io.input_task.done():
1353
+ try:
1354
+ user_message = self.io.input_task.result()
1355
+
1356
+ # Set user message for output task
1357
+ if not self.io.acknowledge_confirmation():
1358
+ if user_message:
1359
+ self.user_message = user_message
1360
+ await self.auto_save_session()
1361
+ else:
1362
+ self.user_message = ""
1363
+ await self.io.stop_task_streams()
1364
+
1365
+ except (asyncio.CancelledError, KeyboardInterrupt):
1366
+ self.user_message = ""
1367
+ await self.io.stop_task_streams()
1236
1368
 
1237
- except (asyncio.CancelledError, KeyboardInterrupt):
1238
- user_message = None
1369
+ # Check if we should show announcements
1370
+ if (
1371
+ not self.io.confirmation_in_progress
1372
+ and not self.user_message
1373
+ and not coroutines.is_active(self.io.input_task)
1374
+ and (not coroutines.is_active(self.io.output_task) or not self.io.placeholder)
1375
+ ):
1376
+ if not self.suppress_announcements_for_next_prompt:
1377
+ self.show_announcements()
1378
+ self.suppress_announcements_for_next_prompt = True
1239
1379
 
1240
- if not user_message:
1241
- await self.io.cancel_input_task()
1242
- continue
1380
+ # Stop spinner before showing announcements or getting input
1381
+ self.io.stop_spinner()
1382
+ self.copy_context()
1243
1383
 
1244
- if self.io.output_task and self.io.output_task in pending:
1245
- try:
1246
- tasks = set()
1247
- tasks.add(self.io.output_task)
1384
+ # Check if we should recreate input
1385
+ if not coroutines.is_active(self.io.input_task):
1386
+ self.io.ring_bell()
1387
+ await self.io.recreate_input()
1248
1388
 
1249
- # We just did a confirmation so add a new input task
1250
- if self.io.get_confirmation_acknowledgement():
1251
- await self.io.recreate_input()
1252
- tasks.add(self.io.input_task)
1389
+ await asyncio.sleep(0.01) # Small yield to prevent tight loop
1253
1390
 
1254
- done, pending = await asyncio.wait(
1255
- tasks, return_when=asyncio.FIRST_COMPLETED
1256
- )
1391
+ except KeyboardInterrupt:
1392
+ self.io.set_placeholder("")
1393
+ self.keyboard_interrupt()
1394
+ await self.io.stop_task_streams()
1395
+ except (SwitchCoder, SystemExit):
1396
+ raise
1397
+ except Exception as e:
1398
+ if self.verbose or self.args.debug:
1399
+ print(e)
1257
1400
 
1258
- if (
1259
- self.io.input_task
1260
- and self.io.input_task in done
1261
- and not self.io.confirmation_in_progress
1262
- ):
1263
- await self.io.cancel_output_task()
1264
- self.io.stop_spinner()
1265
- self.io.acknowledge_confirmation()
1401
+ async def output_task(self, preproc):
1402
+ """
1403
+ Handles output task generation and monitoring.
1404
+ This task manages the output loop and coordinates with input_task.
1405
+ """
1406
+ while self.output_running:
1407
+ try:
1408
+ # Wait for commands to finish
1409
+ if self.commands.cmd_running:
1410
+ await asyncio.sleep(0.1)
1411
+ continue
1266
1412
 
1267
- try:
1268
- user_message = self.io.input_task.result()
1269
- await self.io.cancel_input_task()
1270
- except (asyncio.CancelledError, KeyboardInterrupt):
1271
- user_message = None
1413
+ # Check if we have a user message to process
1414
+ if self.user_message and not self.io.get_confirmation_acknowledgement():
1415
+ user_message = self.user_message
1416
+ self.user_message = ""
1272
1417
 
1273
- except (asyncio.CancelledError, KeyboardInterrupt):
1274
- pass
1418
+ # Create output task for processing
1419
+ self.io.output_task = asyncio.create_task(self.generate(user_message, preproc))
1275
1420
 
1276
- # Stop spinner when processing task completes
1277
- self.io.stop_spinner()
1421
+ # Start spinner for output task
1422
+ self.io.start_spinner("Processing...")
1423
+ await self.io.recreate_input()
1278
1424
 
1279
- if user_message and not self.io.acknowledge_confirmation():
1280
- self.user_message = user_message
1425
+ # Monitor output task
1426
+ if self.io.output_task:
1427
+ if self.io.output_task.done():
1428
+ exception = self.io.output_task.exception()
1429
+ if exception:
1430
+ if isinstance(exception, SwitchCoder):
1431
+ await self.io.output_task
1432
+ raise exception
1433
+
1434
+ self.io.tool_error(f"Error during generation: {exception}")
1435
+ if self.verbose:
1436
+ traceback.print_exception(
1437
+ type(exception), exception, exception.__traceback__
1438
+ )
1281
1439
 
1282
- self.io.ring_bell()
1283
- user_message = None
1284
- except KeyboardInterrupt:
1285
- self.io.set_placeholder("")
1440
+ # Stop spinner when processing task completes
1441
+ self.io.stop_spinner()
1286
1442
 
1287
- await self.io.cancel_input_task()
1288
- await self.io.cancel_output_task()
1443
+ # And stop monitoring the output task
1444
+ await self.io.stop_output_task()
1289
1445
 
1290
- self.io.stop_spinner()
1291
- self.keyboard_interrupt()
1446
+ await self.auto_save_session()
1447
+ await asyncio.sleep(0.01) # Small yield to prevent tight loop
1292
1448
 
1293
- self.auto_save_session()
1294
- except EOFError:
1295
- return
1296
- finally:
1297
- await self.io.cancel_input_task()
1298
- await self.io.cancel_output_task()
1449
+ except KeyboardInterrupt:
1450
+ self.io.stop_spinner()
1451
+ self.keyboard_interrupt()
1452
+ await self.io.stop_task_streams()
1453
+ except (SwitchCoder, SystemExit):
1454
+ raise
1455
+ except Exception as e:
1456
+ if self.verbose or self.args.debug:
1457
+ print(e)
1299
1458
 
1300
- async def _generate(self, user_message, preproc):
1459
+ async def generate(self, user_message, preproc):
1301
1460
  await asyncio.sleep(0.1)
1302
1461
 
1303
1462
  try:
1304
- self.compact_context_completed = False
1305
- await self.compact_context_if_needed()
1306
- self.compact_context_completed = True
1463
+ if not self.enable_context_compaction:
1464
+ self.compact_context_completed = False
1465
+ await self.compact_context_if_needed()
1466
+ self.compact_context_completed = True
1307
1467
 
1308
1468
  self.run_one_completed = False
1309
1469
  await self.run_one(user_message, preproc)
@@ -1393,6 +1553,9 @@ class Coder:
1393
1553
  else:
1394
1554
  message = self.reflected_message
1395
1555
 
1556
+ if self.enable_context_compaction:
1557
+ await self.compact_context_if_needed()
1558
+
1396
1559
  async def check_and_open_urls(self, exc, friendly_msg=None):
1397
1560
  """Check exception for URLs, offer to open in a browser, with user-friendly error msgs."""
1398
1561
  text = str(exc)
@@ -1485,38 +1648,81 @@ class Coder:
1485
1648
  self.summarize_start()
1486
1649
  return
1487
1650
 
1488
- if not self.summarizer.check_max_tokens(
1489
- self.done_messages, max_tokens=self.context_compaction_max_tokens
1490
- ):
1651
+ # Check if combined messages exceed the token limit,
1652
+ # Exclude first cur_message since that's the user's initial input
1653
+ done_tokens = self.summarizer.count_tokens(self.done_messages)
1654
+ cur_tokens = self.summarizer.count_tokens(self.cur_messages[1:])
1655
+ combined_tokens = done_tokens + cur_tokens
1656
+
1657
+ if combined_tokens < self.context_compaction_max_tokens:
1491
1658
  return
1492
1659
 
1493
1660
  self.io.tool_output("Compacting chat history to make room for new messages...")
1661
+ self.io.update_spinner("Compacting...")
1494
1662
 
1495
1663
  try:
1496
- # Create a summary of the conversation
1497
- summary_text = await self.summarizer.summarize_all_as_text(
1498
- self.done_messages,
1499
- self.gpt_prompts.compaction_prompt,
1500
- self.context_compaction_summary_tokens,
1501
- )
1502
- if not summary_text:
1503
- raise ValueError("Summarization returned an empty result.")
1664
+ # Check if done_messages alone exceed the limit
1665
+ if done_tokens > self.context_compaction_max_tokens or done_tokens > cur_tokens:
1666
+ # Create a summary of the done_messages
1667
+ summary_text = await self.summarizer.summarize_all_as_text(
1668
+ self.done_messages,
1669
+ self.gpt_prompts.compaction_prompt,
1670
+ self.context_compaction_summary_tokens,
1671
+ )
1672
+
1673
+ if not summary_text:
1674
+ raise ValueError("Summarization returned an empty result.")
1675
+
1676
+ # Replace old messages with the summary
1677
+ self.done_messages = [
1678
+ {
1679
+ "role": "user",
1680
+ "content": summary_text,
1681
+ },
1682
+ {
1683
+ "role": "assistant",
1684
+ "content": (
1685
+ "Ok, I will use this summary as the context for our conversation going"
1686
+ " forward."
1687
+ ),
1688
+ },
1689
+ ]
1690
+
1691
+ # Check if cur_messages alone exceed the limit (after potentially compacting done_messages)
1692
+ if cur_tokens > self.context_compaction_max_tokens or cur_tokens > done_tokens:
1693
+ # Create a summary of the cur_messages
1694
+ cur_summary_text = await self.summarizer.summarize_all_as_text(
1695
+ self.cur_messages,
1696
+ self.gpt_prompts.compaction_prompt,
1697
+ self.context_compaction_summary_tokens,
1698
+ )
1699
+
1700
+ if not cur_summary_text:
1701
+ raise ValueError("Summarization of current messages returned an empty result.")
1702
+
1703
+ # Replace current messages with the summary
1704
+ self.cur_messages = [
1705
+ self.cur_messages[0],
1706
+ {
1707
+ "role": "assistant",
1708
+ "content": "Ok. I am awaiting your summary of our goals to proceed.",
1709
+ },
1710
+ {
1711
+ "role": "user",
1712
+ "content": f"Here is a summary of our current goals:\n{cur_summary_text}",
1713
+ },
1714
+ {
1715
+ "role": "assistant",
1716
+ "content": (
1717
+ "Ok, I will use this summary and proceed with our task."
1718
+ " I will first apply any changes in the summary and then"
1719
+ " continue exploration as necessary."
1720
+ ),
1721
+ },
1722
+ ]
1504
1723
 
1505
- # Replace old messages with the summary
1506
- self.done_messages = [
1507
- {
1508
- "role": "user",
1509
- "content": summary_text,
1510
- },
1511
- {
1512
- "role": "assistant",
1513
- "content": (
1514
- "Ok, I will use this summary as the context for our conversation going"
1515
- " forward."
1516
- ),
1517
- },
1518
- ]
1519
1724
  self.io.tool_output("...chat history compacted.")
1725
+ self.io.update_spinner(self.io.last_spinner_text)
1520
1726
  except Exception as e:
1521
1727
  self.io.tool_warning(f"Context compaction failed: {e}")
1522
1728
  self.io.tool_warning("Proceeding with full history for now.")
@@ -1781,7 +1987,11 @@ class Coder:
1781
1987
 
1782
1988
  chunks.repo = self.get_repo_messages()
1783
1989
  chunks.readonly_files = self.get_readonly_files_messages()
1784
- chunks.chat_files = self.get_chat_files_messages()
1990
+
1991
+ # Handle the dictionary structure from get_chat_files_messages()
1992
+ chat_files_result = self.get_chat_files_messages()
1993
+ chunks.chat_files = chat_files_result.get("chat_files", [])
1994
+ chunks.edit_files = chat_files_result.get("edit_files", [])
1785
1995
 
1786
1996
  if self.gpt_prompts.system_reminder:
1787
1997
  reminder_message = [
@@ -1919,7 +2129,7 @@ class Coder:
1919
2129
  " the context limit is exceeded."
1920
2130
  )
1921
2131
 
1922
- if not await self.io.confirm_ask("Try to proceed anyway?"):
2132
+ if not await self.io.confirm_ask("Try to proceed anyway?", explicit_yes_required=True):
1923
2133
  return False
1924
2134
  return True
1925
2135
 
@@ -2039,9 +2249,7 @@ class Coder:
2039
2249
  self.multi_response_content = ""
2040
2250
 
2041
2251
  self.io.tool_output()
2042
-
2043
2252
  self.show_usage_report()
2044
-
2045
2253
  self.add_assistant_reply_to_cur_messages()
2046
2254
 
2047
2255
  if exhausted:
@@ -2101,43 +2309,7 @@ class Coder:
2101
2309
  # Process any tools using MCP servers
2102
2310
  try:
2103
2311
  if self.partial_response_tool_calls:
2104
- tool_calls = []
2105
- tool_id_set = set()
2106
-
2107
- for tool_call_dict in self.partial_response_tool_calls:
2108
- # LLM APIs sometimes return duplicates and that's annoying
2109
- if tool_call_dict.get("id") in tool_id_set:
2110
- continue
2111
-
2112
- tool_id_set.add(tool_call_dict.get("id"))
2113
-
2114
- tool_calls.append(
2115
- ChatCompletionMessageToolCall(
2116
- id=tool_call_dict.get("id"),
2117
- function=Function(
2118
- name=tool_call_dict.get("function", {}).get("name"),
2119
- arguments=tool_call_dict.get("function", {}).get(
2120
- "arguments", ""
2121
- ),
2122
- ),
2123
- type=tool_call_dict.get("type"),
2124
- )
2125
- )
2126
-
2127
- tool_call_response = ModelResponse(
2128
- choices=[
2129
- Choices(
2130
- finish_reason="tool_calls",
2131
- index=0,
2132
- message=Message(
2133
- content=None,
2134
- role="assistant",
2135
- tool_calls=tool_calls,
2136
- ),
2137
- )
2138
- ]
2139
- )
2140
-
2312
+ tool_call_response, a, b = self.consolidate_chunks()
2141
2313
  if await self.process_tool_calls(tool_call_response):
2142
2314
  self.num_tool_calls += 1
2143
2315
  self.reflected_message = True
@@ -2244,7 +2416,6 @@ class Coder:
2244
2416
  self._print_tool_call_info(server_tool_calls)
2245
2417
 
2246
2418
  if await self.io.confirm_ask("Run tools?", group_response="Run MCP Tools"):
2247
- await self.io.recreate_input()
2248
2419
  tool_responses = await self._execute_tool_calls(server_tool_calls)
2249
2420
 
2250
2421
  # Add all tool responses
@@ -2263,54 +2434,14 @@ class Coder:
2263
2434
 
2264
2435
  for server, tool_calls in server_tool_calls.items():
2265
2436
  for tool_call in tool_calls:
2266
- color_start = "[blue]" if self.pretty else ""
2267
- color_end = "[/blue]" if self.pretty else ""
2268
-
2269
- self.io.tool_output(
2270
- f"{color_start}Tool Call:{color_end} {server.name} • {tool_call.function.name}"
2271
- )
2272
- # Parse and format arguments as headers with values
2273
- if tool_call.function.arguments:
2274
- # Only do JSON unwrapping for tools containing "replace" in their name
2275
- if (
2276
- "replace" in tool_call.function.name.lower()
2277
- or "insert" in tool_call.function.name.lower()
2278
- or "update" in tool_call.function.name.lower()
2279
- ):
2280
- try:
2281
- args_dict = json.loads(tool_call.function.arguments)
2282
- first_key = True
2283
- for key, value in args_dict.items():
2284
- # Convert explicit \\n sequences to actual newlines using regex
2285
- # Only match \\n that is not preceded by any other backslashes
2286
- if isinstance(value, str):
2287
- value = re.sub(r"(?<!\\)\\n", "\n", value)
2288
- # Add extra newline before first key/header
2289
- if first_key:
2290
- self.io.tool_output("\n")
2291
- first_key = False
2292
- self.io.tool_output(f"{color_start}{key}:{color_end}")
2293
- # Split the value by newlines and output each line separately
2294
- if isinstance(value, str):
2295
- for line in value.split("\n"):
2296
- self.io.tool_output(f"{line}")
2297
- else:
2298
- self.io.tool_output(f"{str(value)}")
2299
- self.io.tool_output("")
2300
- except json.JSONDecodeError:
2301
- # If JSON parsing fails, show raw arguments
2302
- raw_args = tool_call.function.arguments
2303
- self.io.tool_output(f"{color_start}Arguments:{color_end} {raw_args}")
2304
- else:
2305
- # For non-replace tools, show raw arguments
2306
- raw_args = tool_call.function.arguments
2307
- self.io.tool_output(f"{color_start}Arguments:{color_end} {raw_args}")
2308
-
2309
- if self.verbose:
2310
- self.io.tool_output(f"Tool ID: {tool_call.id}")
2311
- self.io.tool_output(f"Tool type: {tool_call.type}")
2312
-
2313
- self.io.tool_output("\n")
2437
+ if hasattr(self, "tool_registry") and self.tool_registry.get(
2438
+ tool_call.function.name.lower(), None
2439
+ ):
2440
+ self.tool_registry.get(tool_call.function.name.lower()).format_output(
2441
+ coder=self, mcp_server=server, tool_response=tool_call
2442
+ )
2443
+ else:
2444
+ print_tool_response(coder=self, mcp_server=server, tool_response=tool_call)
2314
2445
 
2315
2446
  def _gather_server_tool_calls(self, tool_calls):
2316
2447
  """Collect all tool calls grouped by server.
@@ -2662,18 +2793,26 @@ class Coder:
2662
2793
  to be `None` when `tool_calls` are present.
2663
2794
  """
2664
2795
  msg = dict(role="assistant")
2665
- has_tool_calls = self.partial_response_tool_calls or self.partial_response_function_call
2796
+ response = (
2797
+ self.partial_response_chunks[0]
2798
+ if not self.stream
2799
+ else litellm.stream_chunk_builder(self.partial_response_chunks)
2800
+ )
2666
2801
 
2667
- # If we have tool calls and we're using a Deepseek model, force content to be None.
2668
- if has_tool_calls and self.main_model.is_deepseek():
2669
- msg["content"] = None
2670
- else:
2671
- # Otherwise, use logic similar to the base implementation.
2672
- content = self.partial_response_content
2673
- if content:
2674
- msg["content"] = content
2675
- elif has_tool_calls:
2676
- msg["content"] = None
2802
+ try:
2803
+ # Use response_dict as a regular dictionary
2804
+ response_dict = response.model_dump()
2805
+ except AttributeError:
2806
+ # Option 2: Fall back to dict() or response.dict() (Pydantic V1 style)
2807
+ try:
2808
+ # Note: calling dict(response) works in both V1 and V2 for raw fields,
2809
+ # but response.dict() is the Pydantic V1 method name.
2810
+ response_dict = dict(response)
2811
+ except TypeError:
2812
+ print("Neither model_dump() nor dict() worked as expected.")
2813
+ raise
2814
+
2815
+ msg = response_dict["choices"][0]["message"]
2677
2816
 
2678
2817
  if self.partial_response_tool_calls:
2679
2818
  msg["tool_calls"] = self.partial_response_tool_calls
@@ -2681,7 +2820,7 @@ class Coder:
2681
2820
  msg["function_call"] = self.partial_response_function_call
2682
2821
 
2683
2822
  # Only add a message if it's not empty.
2684
- if msg.get("content") is not None or msg.get("tool_calls") or msg.get("function_call"):
2823
+ if msg is not None:
2685
2824
  self.cur_messages.append(msg)
2686
2825
 
2687
2826
  def get_file_mentions(self, content, ignore_current=False):
@@ -2746,7 +2885,6 @@ class Coder:
2746
2885
  if await self.io.confirm_ask(
2747
2886
  "Add file to the chat?", subject=rel_fname, group=group, allow_never=True
2748
2887
  ):
2749
- await self.io.recreate_input()
2750
2888
  self.add_rel_fname(rel_fname)
2751
2889
  added_fnames.append(rel_fname)
2752
2890
  else:
@@ -2766,8 +2904,10 @@ class Coder:
2766
2904
  model = self.main_model
2767
2905
 
2768
2906
  self.partial_response_content = ""
2769
- self.partial_response_function_call = dict()
2907
+ self.partial_response_reasoning_content = ""
2908
+ self.partial_response_chunks = []
2770
2909
  self.partial_response_tool_calls = []
2910
+ self.partial_response_function_call = dict()
2771
2911
 
2772
2912
  completion = None
2773
2913
 
@@ -2823,28 +2963,9 @@ class Coder:
2823
2963
  self.io.tool_error(str(completion))
2824
2964
  return
2825
2965
 
2826
- show_func_err = None
2827
- show_content_err = None
2828
- try:
2829
- if completion.choices[0].message.tool_calls:
2830
- self.partial_response_function_call = (
2831
- completion.choices[0].message.tool_calls[0].function
2832
- )
2833
- except AttributeError as func_err:
2834
- show_func_err = func_err
2835
-
2836
- try:
2837
- reasoning_content = completion.choices[0].message.reasoning_content
2838
- except AttributeError:
2839
- try:
2840
- reasoning_content = completion.choices[0].message.reasoning
2841
- except AttributeError:
2842
- reasoning_content = None
2966
+ self.partial_response_chunks.append(completion)
2843
2967
 
2844
- try:
2845
- self.partial_response_content = completion.choices[0].message.content or ""
2846
- except AttributeError as content_err:
2847
- show_content_err = content_err
2968
+ response, func_err, content_err = self.consolidate_chunks()
2848
2969
 
2849
2970
  resp_hash = dict(
2850
2971
  function_call=str(self.partial_response_function_call),
@@ -2853,16 +2974,16 @@ class Coder:
2853
2974
  resp_hash = hashlib.sha1(json.dumps(resp_hash, sort_keys=True).encode())
2854
2975
  self.chat_completion_response_hashes.append(resp_hash.hexdigest())
2855
2976
 
2856
- if show_func_err and show_content_err:
2857
- self.io.tool_error(show_func_err)
2858
- self.io.tool_error(show_content_err)
2977
+ if func_err and content_err:
2978
+ self.io.tool_error(func_err)
2979
+ self.io.tool_error(content_err)
2859
2980
  raise Exception("No data found in LLM response!")
2860
2981
 
2861
2982
  show_resp = self.render_incremental_response(True)
2862
2983
 
2863
- if reasoning_content:
2984
+ if self.partial_response_reasoning_content:
2864
2985
  formatted_reasoning = format_reasoning_content(
2865
- reasoning_content, self.reasoning_tag_name
2986
+ self.partial_response_reasoning_content, self.reasoning_tag_name
2866
2987
  )
2867
2988
  show_resp = formatted_reasoning + show_resp
2868
2989
 
@@ -2903,43 +3024,18 @@ class Coder:
2903
3024
  for tool_call_chunk in chunk.choices[0].delta.tool_calls:
2904
3025
  self.tool_reflection = True
2905
3026
 
2906
- index = tool_call_chunk.index
2907
- if len(self.partial_response_tool_calls) <= index:
2908
- self.partial_response_tool_calls.extend(
2909
- [{}] * (index - len(self.partial_response_tool_calls) + 1)
2910
- )
2911
-
2912
- if tool_call_chunk.id:
2913
- self.partial_response_tool_calls[index]["id"] = tool_call_chunk.id
2914
3027
  if tool_call_chunk.type:
2915
- self.partial_response_tool_calls[index][
2916
- "type"
2917
- ] = tool_call_chunk.type
3028
+ self.io.update_spinner_suffix(tool_call_chunk.type)
3029
+
2918
3030
  if tool_call_chunk.function:
2919
- if "function" not in self.partial_response_tool_calls[index]:
2920
- self.partial_response_tool_calls[index]["function"] = {}
2921
3031
  if tool_call_chunk.function.name:
2922
- if (
2923
- "name"
2924
- not in self.partial_response_tool_calls[index]["function"]
2925
- ):
2926
- self.partial_response_tool_calls[index]["function"][
2927
- "name"
2928
- ] = ""
2929
- self.partial_response_tool_calls[index]["function"][
2930
- "name"
2931
- ] += tool_call_chunk.function.name
3032
+ self.io.update_spinner_suffix(tool_call_chunk.function.name)
3033
+
2932
3034
  if tool_call_chunk.function.arguments:
2933
- if (
2934
- "arguments"
2935
- not in self.partial_response_tool_calls[index]["function"]
2936
- ):
2937
- self.partial_response_tool_calls[index]["function"][
2938
- "arguments"
2939
- ] = ""
2940
- self.partial_response_tool_calls[index]["function"][
2941
- "arguments"
2942
- ] += tool_call_chunk.function.arguments
3035
+ self.io.update_spinner_suffix(
3036
+ tool_call_chunk.function.arguments
3037
+ )
3038
+
2943
3039
  except (AttributeError, IndexError):
2944
3040
  # Handle cases where the response structure doesn't match expectations
2945
3041
  pass
@@ -2949,11 +3045,7 @@ class Coder:
2949
3045
  # dump(func)
2950
3046
  for k, v in func.items():
2951
3047
  self.tool_reflection = True
2952
-
2953
- if k in self.partial_response_function_call:
2954
- self.partial_response_function_call[k] += v
2955
- else:
2956
- self.partial_response_function_call[k] = v
3048
+ self.io.update_spinner_suffix(v)
2957
3049
 
2958
3050
  received_content = True
2959
3051
  except AttributeError:
@@ -2975,6 +3067,8 @@ class Coder:
2975
3067
  text += reasoning_content
2976
3068
  self.got_reasoning_content = True
2977
3069
  received_content = True
3070
+ self.io.update_spinner_suffix(reasoning_content)
3071
+ self.partial_response_reasoning_content += reasoning_content
2978
3072
 
2979
3073
  try:
2980
3074
  content = chunk.choices[0].delta.content
@@ -2985,10 +3079,14 @@ class Coder:
2985
3079
 
2986
3080
  text += content
2987
3081
  received_content = True
3082
+ self.io.update_spinner_suffix(content)
2988
3083
  except AttributeError:
2989
3084
  pass
2990
3085
 
2991
3086
  self.partial_response_content += text
3087
+
3088
+ self.partial_response_chunks.append(chunk)
3089
+
2992
3090
  if self.show_pretty():
2993
3091
  # Use simplified streaming - just call the method with full content
2994
3092
  content_to_show = self.live_incremental_response(False)
@@ -3006,9 +3104,111 @@ class Coder:
3006
3104
  self.stream_wrapper(safe_text, final=False)
3007
3105
  yield text
3008
3106
 
3107
+ # The Part Doing the Heavy Lifting Now
3108
+ self.consolidate_chunks()
3109
+
3009
3110
  if not received_content and len(self.partial_response_tool_calls) == 0:
3010
3111
  self.io.tool_warning("Empty response received from LLM. Check your provider account?")
3011
3112
 
3113
+ def consolidate_chunks(self):
3114
+ response = (
3115
+ self.partial_response_chunks[0]
3116
+ if not self.stream
3117
+ else litellm.stream_chunk_builder(self.partial_response_chunks)
3118
+ )
3119
+ func_err = None
3120
+ content_err = None
3121
+
3122
+ # Collect provider-specific fields from chunks to preserve them
3123
+ # We need to track both by ID (primary) and index (fallback) since
3124
+ # early chunks might not have IDs established yet
3125
+ provider_specific_fields_by_id = {}
3126
+ provider_specific_fields_by_index = {}
3127
+
3128
+ for chunk in self.partial_response_chunks:
3129
+ try:
3130
+ if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.tool_calls:
3131
+ for tool_call in chunk.choices[0].delta.tool_calls:
3132
+ if (
3133
+ hasattr(tool_call, "provider_specific_fields")
3134
+ and tool_call.provider_specific_fields
3135
+ ):
3136
+ # Ensure provider_specific_fields is a dictionary
3137
+ psf = tool_call.provider_specific_fields
3138
+ if not isinstance(psf, dict):
3139
+ continue
3140
+
3141
+ # Try to use ID first
3142
+ if hasattr(tool_call, "id") and tool_call.id:
3143
+ tool_id = tool_call.id
3144
+ if tool_id not in provider_specific_fields_by_id:
3145
+ provider_specific_fields_by_id[tool_id] = {}
3146
+ # Merge provider-specific fields for this tool ID
3147
+ provider_specific_fields_by_id[tool_id].update(psf)
3148
+ # Also track by index as fallback
3149
+ elif hasattr(tool_call, "index"):
3150
+ tool_index = tool_call.index
3151
+ if tool_index not in provider_specific_fields_by_index:
3152
+ provider_specific_fields_by_index[tool_index] = {}
3153
+ provider_specific_fields_by_index[tool_index].update(psf)
3154
+ except (AttributeError, IndexError):
3155
+ continue
3156
+
3157
+ try:
3158
+ if response.choices[0].message.tool_calls:
3159
+ for i, tool_call in enumerate(response.choices[0].message.tool_calls):
3160
+ # Add provider-specific fields if we collected any for this tool
3161
+ tool_id = tool_call.id
3162
+
3163
+ # Try ID first
3164
+ if tool_id in provider_specific_fields_by_id:
3165
+ # Add provider-specific fields directly to the tool call object
3166
+ tool_call.provider_specific_fields = provider_specific_fields_by_id[tool_id]
3167
+ # Fall back to index
3168
+ elif i in provider_specific_fields_by_index:
3169
+ # Add provider-specific fields directly to the tool call object
3170
+ tool_call.provider_specific_fields = provider_specific_fields_by_index[i]
3171
+
3172
+ # Create dictionary version with provider-specific fields
3173
+ tool_call_dict = tool_call.model_dump()
3174
+
3175
+ # Add provider-specific fields to the dictionary too (in case model_dump() doesn't include them)
3176
+ if tool_id in provider_specific_fields_by_id:
3177
+ tool_call_dict["provider_specific_fields"] = provider_specific_fields_by_id[
3178
+ tool_id
3179
+ ]
3180
+ elif i in provider_specific_fields_by_index:
3181
+ tool_call_dict["provider_specific_fields"] = (
3182
+ provider_specific_fields_by_index[i]
3183
+ )
3184
+
3185
+ # Only append to partial_response_tool_calls if it's empty
3186
+ if len(self.partial_response_tool_calls) == 0:
3187
+ self.partial_response_tool_calls.append(tool_call_dict)
3188
+
3189
+ self.partial_response_function_call = (
3190
+ response.choices[0].message.tool_calls[0].function
3191
+ )
3192
+ except AttributeError as e:
3193
+ func_err = e
3194
+
3195
+ try:
3196
+ reasoning_content = response.choices[0].message.reasoning_content
3197
+ except AttributeError:
3198
+ try:
3199
+ reasoning_content = response.choices[0].message.reasoning
3200
+ except AttributeError:
3201
+ reasoning_content = None
3202
+
3203
+ self.partial_response_reasoning_content = reasoning_content or ""
3204
+
3205
+ try:
3206
+ self.partial_response_content = response.choices[0].message.content or ""
3207
+ except AttributeError as e:
3208
+ content_err = e
3209
+
3210
+ return response, func_err, content_err
3211
+
3012
3212
  def stream_wrapper(self, content, final):
3013
3213
  if not hasattr(self, "_streaming_buffer_length"):
3014
3214
  self._streaming_buffer_length = 0
@@ -3233,12 +3433,21 @@ class Coder:
3233
3433
  return
3234
3434
 
3235
3435
  def get_all_relative_files(self):
3236
- staged_files_hash = hash(str([item.a_path for item in self.repo.repo.index.diff("HEAD")]))
3237
- if (
3238
- staged_files_hash == self.data_cache["repo"]["last_key"]
3239
- and self.data_cache["relative_files"]
3240
- ):
3241
- return self.data_cache["relative_files"]
3436
+ if self.repo_map and self.repo:
3437
+ try:
3438
+ staged_files_hash = hash(
3439
+ str([item.a_path for item in self.repo.repo.index.diff("HEAD")])
3440
+ )
3441
+ if (
3442
+ staged_files_hash == self.data_cache["repo"]["last_key"]
3443
+ and self.data_cache["relative_files"]
3444
+ ):
3445
+ return self.data_cache["relative_files"]
3446
+ except ANY_GIT_ERROR as err:
3447
+ # Handle git errors gracefully - fall back to getting tracked files
3448
+ if self.verbose:
3449
+ self.io.tool_warning(f"Git error while checking staged files: {err}")
3450
+ # Continue to get tracked files normally
3242
3451
 
3243
3452
  if self.repo:
3244
3453
  files = self.repo.get_tracked_files()
@@ -3388,6 +3597,16 @@ class Coder:
3388
3597
  async def apply_updates(self):
3389
3598
  edited = set()
3390
3599
  try:
3600
+ if getattr(self.args, "tweak_responses", False):
3601
+ confirmation = await self.io.confirm_ask("Tweak Response?", allow_tweak=True)
3602
+
3603
+ if confirmation or confirmation == "tweak":
3604
+ self.partial_response_content = self.io.edit_in_editor(
3605
+ self.partial_response_content
3606
+ )
3607
+
3608
+ await asyncio.sleep(0.1)
3609
+
3391
3610
  edits = self.get_edits()
3392
3611
  edits = self.apply_edits_dry_run(edits)
3393
3612
  edits = await self.prepare_to_edit(edits)
@@ -3546,16 +3765,34 @@ class Coder:
3546
3765
  def apply_edits_dry_run(self, edits):
3547
3766
  return edits
3548
3767
 
3549
- def auto_save_session(self):
3768
+ async def auto_save_session(self):
3550
3769
  """Automatically save the current session as 'auto-save'."""
3551
3770
  if not getattr(self.args, "auto_save", False):
3552
3771
  return
3553
- try:
3554
- session_manager = SessionManager(self, self.io)
3555
- session_manager.save_session("auto-save", False)
3556
- except Exception:
3557
- # Don't show errors for auto-save to avoid interrupting the user experience
3558
- pass
3772
+
3773
+ # Initialize last autosave time if not exists
3774
+ if not hasattr(self, "_last_autosave_time"):
3775
+ self._last_autosave_time = 0
3776
+
3777
+ if not hasattr(self, "_autosave_future"):
3778
+ self._autosave_future = None
3779
+
3780
+ if self._autosave_future and not self._autosave_future.done():
3781
+ return
3782
+
3783
+ # Throttle autosave to run at most once every 15 seconds
3784
+ current_time = time.time()
3785
+ if current_time - self._last_autosave_time >= 15.0:
3786
+ try:
3787
+ self._last_autosave_time = current_time
3788
+ session_manager = SessionManager(self, self.io)
3789
+ loop = asyncio.get_running_loop()
3790
+ self._autosave_future = loop.run_in_executor(
3791
+ None, session_manager.save_session, "auto-save", False
3792
+ )
3793
+ except Exception:
3794
+ # Don't show errors for auto-save to avoid interrupting the user experience
3795
+ pass
3559
3796
 
3560
3797
  async def run_shell_commands(self):
3561
3798
  if not self.suggest_shell_commands:
@@ -3564,14 +3801,21 @@ class Coder:
3564
3801
  done = set()
3565
3802
  group = ConfirmGroup(set(self.shell_commands))
3566
3803
  accumulated_output = ""
3567
- for command in self.shell_commands:
3568
- if command in done:
3569
- continue
3570
- done.add(command)
3571
- output = await self.handle_shell_commands(command, group)
3572
- if output:
3573
- accumulated_output += output + "\n\n"
3574
- return accumulated_output
3804
+
3805
+ try:
3806
+ self.commands.cmd_running = True
3807
+
3808
+ for command in self.shell_commands:
3809
+ if command in done:
3810
+ continue
3811
+ done.add(command)
3812
+ output = await self.handle_shell_commands(command, group)
3813
+ if output:
3814
+ accumulated_output += output + "\n\n"
3815
+
3816
+ return accumulated_output
3817
+ finally:
3818
+ self.commands.cmd_running = False
3575
3819
 
3576
3820
  async def handle_shell_commands(self, commands_str, group):
3577
3821
  commands = commands_str.strip().splitlines()
@@ -3594,6 +3838,10 @@ class Coder:
3594
3838
  if not command or command.startswith("#"):
3595
3839
  continue
3596
3840
 
3841
+ if command and getattr(self.args, "command_prefix", None):
3842
+ command_prefix = getattr(self.args, "command_prefix", None)
3843
+ command = f"{command_prefix} {command}"
3844
+
3597
3845
  self.io.tool_output()
3598
3846
  self.io.tool_output(f"Running {command}")
3599
3847
  # Add the command to input history