auto-coder 0.1.331__py3-none-any.whl → 0.1.333__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.331.dist-info → auto_coder-0.1.333.dist-info}/METADATA +1 -1
- {auto_coder-0.1.331.dist-info → auto_coder-0.1.333.dist-info}/RECORD +28 -26
- autocoder/agent/agentic_filter.py +929 -0
- autocoder/auto_coder.py +5 -23
- autocoder/auto_coder_runner.py +2 -0
- autocoder/commands/auto_command.py +1 -1
- autocoder/commands/tools.py +68 -3
- autocoder/common/__init__.py +2 -0
- autocoder/common/auto_coder_lang.py +9 -1
- autocoder/common/code_modification_ranker.py +6 -2
- autocoder/common/conf_utils.py +36 -0
- autocoder/common/stream_out_type.py +4 -2
- autocoder/common/types.py +2 -2
- autocoder/common/v2/code_auto_merge_editblock.py +1 -1
- autocoder/common/v2/code_diff_manager.py +73 -6
- autocoder/common/v2/code_editblock_manager.py +480 -163
- autocoder/compilers/provided_compiler.py +39 -0
- autocoder/helper/project_creator.py +282 -100
- autocoder/index/entry.py +35 -10
- autocoder/linters/reactjs_linter.py +55 -61
- autocoder/linters/shadow_linter.py +4 -0
- autocoder/shadows/shadow_manager.py +1 -1
- autocoder/utils/project_structure.py +2 -2
- autocoder/version.py +1 -1
- {auto_coder-0.1.331.dist-info → auto_coder-0.1.333.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.331.dist-info → auto_coder-0.1.333.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.331.dist-info → auto_coder-0.1.333.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.331.dist-info → auto_coder-0.1.333.dist-info}/top_level.txt +0 -0
autocoder/auto_coder.py
CHANGED
|
@@ -1197,21 +1197,7 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1197
1197
|
border_style="blue",
|
|
1198
1198
|
expand=False,
|
|
1199
1199
|
)
|
|
1200
|
-
)
|
|
1201
|
-
if args.request_id:
|
|
1202
|
-
request_queue.add_request(
|
|
1203
|
-
args.request_id,
|
|
1204
|
-
RequestValue(
|
|
1205
|
-
value=StreamValue(value=[chat_content]), status=RequestOption.RUNNING
|
|
1206
|
-
),
|
|
1207
|
-
)
|
|
1208
|
-
request_queue.add_request(
|
|
1209
|
-
args.request_id,
|
|
1210
|
-
RequestValue(
|
|
1211
|
-
value=StreamValue(value=[""]), status=RequestOption.COMPLETED
|
|
1212
|
-
),
|
|
1213
|
-
)
|
|
1214
|
-
return {}
|
|
1200
|
+
)
|
|
1215
1201
|
|
|
1216
1202
|
lines = []
|
|
1217
1203
|
while True:
|
|
@@ -1244,14 +1230,7 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1244
1230
|
)
|
|
1245
1231
|
|
|
1246
1232
|
with open(memory_file, "w",encoding="utf-8") as f:
|
|
1247
|
-
json.dump(chat_history, f, ensure_ascii=False)
|
|
1248
|
-
|
|
1249
|
-
request_queue.add_request(
|
|
1250
|
-
args.request_id,
|
|
1251
|
-
RequestValue(
|
|
1252
|
-
value=DefaultValue(value=result), status=RequestOption.COMPLETED
|
|
1253
|
-
),
|
|
1254
|
-
)
|
|
1233
|
+
json.dump(chat_history, f, ensure_ascii=False)
|
|
1255
1234
|
|
|
1256
1235
|
if "save" in commands_info:
|
|
1257
1236
|
save_to_memory_file(ask_conversation=chat_history["ask_conversation"],
|
|
@@ -1311,6 +1290,9 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1311
1290
|
printer.print_in_terminal("estimated_chat_input_tokens", style="yellow",
|
|
1312
1291
|
estimated_input_tokens=estimated_input_tokens
|
|
1313
1292
|
)
|
|
1293
|
+
|
|
1294
|
+
# with open("/tmp/output.txt", "w",encoding="utf-8") as f:
|
|
1295
|
+
# f.write(json.dumps(loaded_conversations, ensure_ascii=False, indent=4))
|
|
1314
1296
|
|
|
1315
1297
|
v = stream_chat_with_continue(
|
|
1316
1298
|
llm=chat_llm,
|
autocoder/auto_coder_runner.py
CHANGED
|
@@ -57,6 +57,8 @@ from autocoder import command_parser as CommandParser
|
|
|
57
57
|
from loguru import logger
|
|
58
58
|
from autocoder.utils.project_structure import EnhancedFileAnalyzer
|
|
59
59
|
|
|
60
|
+
## 对外API,用于第三方集成 auto-coder 使用。
|
|
61
|
+
|
|
60
62
|
class SymbolItem(BaseModel):
|
|
61
63
|
symbol_name: str
|
|
62
64
|
symbol_type: SymbolType
|
autocoder/commands/tools.py
CHANGED
|
@@ -8,7 +8,7 @@ from autocoder.common import AutoCoderArgs, SourceCode
|
|
|
8
8
|
from autocoder.common.interpreter import Interpreter
|
|
9
9
|
from autocoder.common import ExecuteSteps, ExecuteStep, detect_env
|
|
10
10
|
from autocoder.common import code_auto_execute
|
|
11
|
-
from typing import List, Tuple
|
|
11
|
+
from typing import List, Tuple,Dict
|
|
12
12
|
import os
|
|
13
13
|
import byzerllm
|
|
14
14
|
import json
|
|
@@ -147,8 +147,11 @@ class AutoCommandTools:
|
|
|
147
147
|
|
|
148
148
|
return answer
|
|
149
149
|
|
|
150
|
-
def response_user(self, response: str):
|
|
150
|
+
def response_user(self, response: Union[str, Dict]):
|
|
151
151
|
# 如果是在web模式下,则使用event_manager事件来询问用户
|
|
152
|
+
if isinstance(response, dict):
|
|
153
|
+
response = json.dumps(response, ensure_ascii=False,indent=4)
|
|
154
|
+
|
|
152
155
|
if get_run_context().is_web():
|
|
153
156
|
try:
|
|
154
157
|
get_event_manager(
|
|
@@ -193,6 +196,56 @@ class AutoCommandTools:
|
|
|
193
196
|
})
|
|
194
197
|
|
|
195
198
|
return response
|
|
199
|
+
|
|
200
|
+
def output_result(self, response: Union[str, Dict]):
|
|
201
|
+
# 如果是在web模式下,则使用event_manager事件来询问用户
|
|
202
|
+
if isinstance(response, dict):
|
|
203
|
+
response = json.dumps(response, ensure_ascii=False,indent=4)
|
|
204
|
+
|
|
205
|
+
if get_run_context().is_web():
|
|
206
|
+
try:
|
|
207
|
+
get_event_manager(
|
|
208
|
+
self.args.event_file).write_result(
|
|
209
|
+
EventContentCreator.create_result(
|
|
210
|
+
EventContentCreator.MarkdownContent(
|
|
211
|
+
content=response
|
|
212
|
+
)
|
|
213
|
+
)
|
|
214
|
+
)
|
|
215
|
+
self.result_manager.append(content=response, meta={
|
|
216
|
+
"action": "output_result",
|
|
217
|
+
"input": {
|
|
218
|
+
"response": response
|
|
219
|
+
}
|
|
220
|
+
})
|
|
221
|
+
except Exception as e:
|
|
222
|
+
error_message = f"Error: {str(e)}\n\n完整异常堆栈信息:\n{traceback.format_exc()}"
|
|
223
|
+
self.result_manager.append(content=f"Error: {error_message}", meta={
|
|
224
|
+
"action": "output_result",
|
|
225
|
+
"input": {
|
|
226
|
+
"response": response
|
|
227
|
+
}
|
|
228
|
+
})
|
|
229
|
+
return response
|
|
230
|
+
|
|
231
|
+
console = Console()
|
|
232
|
+
answer_text = Text(response, style="italic")
|
|
233
|
+
answer_panel = Panel(
|
|
234
|
+
answer_text,
|
|
235
|
+
title="",
|
|
236
|
+
border_style="green",
|
|
237
|
+
expand=False
|
|
238
|
+
)
|
|
239
|
+
console.print(answer_panel)
|
|
240
|
+
|
|
241
|
+
self.result_manager.append(content=response, meta={
|
|
242
|
+
"action": "output_result",
|
|
243
|
+
"input": {
|
|
244
|
+
"response": response
|
|
245
|
+
}
|
|
246
|
+
})
|
|
247
|
+
|
|
248
|
+
return response
|
|
196
249
|
|
|
197
250
|
def run_python_code(self, code: str) -> str:
|
|
198
251
|
"""
|
|
@@ -690,6 +743,18 @@ class AutoCommandTools:
|
|
|
690
743
|
matched_files.append(os.path.join(root, file))
|
|
691
744
|
|
|
692
745
|
v = ",".join(matched_files)
|
|
746
|
+
|
|
747
|
+
tokens = count_tokens(v)
|
|
748
|
+
if tokens > self.args.conversation_prune_safe_zone_tokens / 2.0:
|
|
749
|
+
result = f"The result is too large to return. (tokens: {tokens}). Try to use another function or use another keyword to search."
|
|
750
|
+
self.result_manager.add_result(content=result, meta={
|
|
751
|
+
"action": "find_files_by_name",
|
|
752
|
+
"input": {
|
|
753
|
+
"keyword": keyword
|
|
754
|
+
}
|
|
755
|
+
})
|
|
756
|
+
return result
|
|
757
|
+
|
|
693
758
|
self.result_manager.add_result(content=v, meta={
|
|
694
759
|
"action": "find_files_by_name",
|
|
695
760
|
"input": {
|
|
@@ -731,7 +796,7 @@ class AutoCommandTools:
|
|
|
731
796
|
excluded_dirs = [
|
|
732
797
|
'node_modules', '.git', '.venv', 'venv', '__pycache__', 'dist', 'build',
|
|
733
798
|
'.DS_Store', '.idea', '.vscode', 'tmp', 'temp', 'cache', 'coverage',
|
|
734
|
-
'htmlcov', '.mypy_cache', '.pytest_cache', '.hypothesis'
|
|
799
|
+
'htmlcov', '.mypy_cache', '.pytest_cache', '.hypothesis',".auto-coder"
|
|
735
800
|
]
|
|
736
801
|
excluded_file_patterns = [
|
|
737
802
|
'*.pyc', '*.pyo', '*.pyd', '*.egg-info', '*.log'
|
autocoder/common/__init__.py
CHANGED
|
@@ -270,6 +270,8 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
270
270
|
index_model_max_length: Optional[int] = 0
|
|
271
271
|
index_model_max_input_length: Optional[int] = 0
|
|
272
272
|
index_model_anti_quota_limit: Optional[int] = 0
|
|
273
|
+
|
|
274
|
+
enable_agentic_filter: Optional[bool] = False
|
|
273
275
|
|
|
274
276
|
|
|
275
277
|
index_filter_level: Optional[int] = 0
|
|
@@ -785,7 +785,15 @@ MESSAGES = {
|
|
|
785
785
|
"max_unmerged_blocks_attempts_reached": {
|
|
786
786
|
"en": "Maximum unmerged blocks fix attempts reached",
|
|
787
787
|
"zh": "已达到最大未合并代码块修复尝试次数"
|
|
788
|
-
}
|
|
788
|
+
},
|
|
789
|
+
"agenticFilterContext": {
|
|
790
|
+
"en": "Start to find context...",
|
|
791
|
+
"zh": "开始智能查找上下文...."
|
|
792
|
+
},
|
|
793
|
+
"agenticFilterContextFinished": {
|
|
794
|
+
"en": "End to find context...",
|
|
795
|
+
"zh": "结束智能查找上下文...."
|
|
796
|
+
}
|
|
789
797
|
}
|
|
790
798
|
|
|
791
799
|
|
|
@@ -148,17 +148,20 @@ class CodeModificationRanker:
|
|
|
148
148
|
input_tokens_count = 0
|
|
149
149
|
generated_tokens_count = 0
|
|
150
150
|
try:
|
|
151
|
+
import traceback
|
|
152
|
+
traceback.print_stack()
|
|
151
153
|
# Create a thread pool with (number of models * generate_times) workers
|
|
152
154
|
with ThreadPoolExecutor(max_workers=total_tasks) as executor:
|
|
153
155
|
# Submit tasks for each model and generate_times
|
|
154
156
|
futures = []
|
|
157
|
+
count = 0
|
|
155
158
|
for llm in self.llms:
|
|
156
159
|
model_name = ",".join(get_llm_names(llm))
|
|
157
160
|
self.printer.print_in_terminal(
|
|
158
161
|
"ranking_start", style="blue", count=len(generate_result.contents), model_name=model_name)
|
|
159
162
|
|
|
160
|
-
for
|
|
161
|
-
if
|
|
163
|
+
for _ in range(rank_times):
|
|
164
|
+
if count == 0:
|
|
162
165
|
futures.append(
|
|
163
166
|
executor.submit(
|
|
164
167
|
stream_chat_with_continue,
|
|
@@ -178,6 +181,7 @@ class CodeModificationRanker:
|
|
|
178
181
|
self.args
|
|
179
182
|
)
|
|
180
183
|
)
|
|
184
|
+
count += 1
|
|
181
185
|
|
|
182
186
|
# Collect all results
|
|
183
187
|
results = []
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
import pkg_resources
|
|
4
|
+
from autocoder.common import AutoCoderArgs
|
|
5
|
+
|
|
6
|
+
## 用于auto-coder 内部使用
|
|
7
|
+
|
|
8
|
+
def load_tokenizer():
|
|
9
|
+
from autocoder.rag.variable_holder import VariableHolder
|
|
10
|
+
from tokenizers import Tokenizer
|
|
11
|
+
try:
|
|
12
|
+
tokenizer_path = pkg_resources.resource_filename(
|
|
13
|
+
"autocoder", "data/tokenizer.json"
|
|
14
|
+
)
|
|
15
|
+
VariableHolder.TOKENIZER_PATH = tokenizer_path
|
|
16
|
+
VariableHolder.TOKENIZER_MODEL = Tokenizer.from_file(tokenizer_path)
|
|
17
|
+
except FileNotFoundError:
|
|
18
|
+
tokenizer_path = None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def save_memory(args: AutoCoderArgs,memory):
|
|
22
|
+
with open(os.path.join(args.source_dir, ".auto-coder", "plugins", "chat-auto-coder", "memory.json"), "w",encoding="utf-8") as f:
|
|
23
|
+
json.dump(memory, f, indent=2, ensure_ascii=False)
|
|
24
|
+
|
|
25
|
+
def load_memory(args: AutoCoderArgs):
|
|
26
|
+
memory_path = os.path.join(args.source_dir, ".auto-coder", "plugins", "chat-auto-coder", "memory.json")
|
|
27
|
+
if os.path.exists(memory_path):
|
|
28
|
+
with open(memory_path, "r", encoding="utf-8") as f:
|
|
29
|
+
_memory = json.load(f)
|
|
30
|
+
memory = _memory
|
|
31
|
+
else:
|
|
32
|
+
memory = {}
|
|
33
|
+
return memory
|
|
34
|
+
|
|
35
|
+
def get_memory(args: AutoCoderArgs):
|
|
36
|
+
return load_memory(args)
|
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
from enum import Enum
|
|
2
2
|
|
|
3
3
|
class AutoCommandStreamOutType(Enum):
|
|
4
|
-
COMMAND_SUGGESTION = "command_suggestion"
|
|
5
|
-
|
|
4
|
+
COMMAND_SUGGESTION = "command_suggestion"
|
|
6
5
|
class IndexFilterStreamOutType(Enum):
|
|
7
6
|
FILE_NUMBER_LIST = "file_number_list"
|
|
8
7
|
|
|
8
|
+
class AgenticFilterStreamOutType(Enum):
|
|
9
|
+
AGENTIC_FILTER = "agentic_filter"
|
|
10
|
+
|
|
9
11
|
|
|
10
12
|
class CodeGenerateStreamOutType(Enum):
|
|
11
13
|
CODE_GENERATE = "code_generate"
|
autocoder/common/types.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from enum import Enum
|
|
2
2
|
import pydantic
|
|
3
|
-
from typing import List, Dict, Tuple,Any
|
|
3
|
+
from typing import List, Dict, Tuple,Any,Optional
|
|
4
4
|
class Mode(Enum):
|
|
5
5
|
MULTI_ROUND = "multi_round"
|
|
6
6
|
SINGLE_ROUND = "single_round"
|
|
@@ -17,4 +17,4 @@ class CodeGenerateResult(pydantic.BaseModel):
|
|
|
17
17
|
class MergeCodeWithoutEffect(pydantic.BaseModel):
|
|
18
18
|
success_blocks: List[Tuple[str, str]]
|
|
19
19
|
failed_blocks: List[Any]
|
|
20
|
-
merged_blocks:
|
|
20
|
+
merged_blocks: Optional[Any] = None
|
|
@@ -272,7 +272,7 @@ class CodeAutoMergeEditBlock:
|
|
|
272
272
|
merged_blocks = []
|
|
273
273
|
|
|
274
274
|
for block in codes:
|
|
275
|
-
file_path, head, update = block
|
|
275
|
+
file_path, head, update = block
|
|
276
276
|
if not os.path.exists(file_path):
|
|
277
277
|
merged_blocks.append((file_path, "", update))
|
|
278
278
|
file_content_mapping[file_path] = update
|
|
@@ -1,19 +1,19 @@
|
|
|
1
1
|
from typing import List, Dict, Tuple, Optional, Any
|
|
2
2
|
import os
|
|
3
|
-
import json
|
|
4
3
|
import time
|
|
5
|
-
from concurrent.futures import ThreadPoolExecutor
|
|
6
4
|
|
|
7
5
|
import byzerllm
|
|
8
6
|
from byzerllm.utils.client import code_utils
|
|
9
7
|
|
|
10
8
|
from autocoder.common.types import Mode, CodeGenerateResult, MergeCodeWithoutEffect
|
|
11
|
-
from autocoder.common import AutoCoderArgs, git_utils, SourceCodeList
|
|
9
|
+
from autocoder.common import AutoCoderArgs, git_utils, SourceCodeList, SourceCode
|
|
10
|
+
from autocoder.common.action_yml_file_manager import ActionYmlFileManager
|
|
12
11
|
from autocoder.common import sys_prompt
|
|
12
|
+
from autocoder.compilers.shadow_compiler import ShadowCompiler
|
|
13
13
|
from autocoder.privacy.model_filter import ModelPathFilter
|
|
14
14
|
from autocoder.common.utils_code_auto_generate import chat_with_continue, stream_chat_with_continue, ChatWithContinueResult
|
|
15
15
|
from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
|
|
16
|
-
from autocoder.common.stream_out_type import CodeGenerateStreamOutType
|
|
16
|
+
from autocoder.common.stream_out_type import LintStreamOutType, CompileStreamOutType, UnmergedBlocksStreamOutType, CodeGenerateStreamOutType
|
|
17
17
|
from autocoder.common.auto_coder_lang import get_message_with_format
|
|
18
18
|
from autocoder.common.printer import Printer
|
|
19
19
|
from autocoder.rag.token_counter import count_tokens
|
|
@@ -28,6 +28,9 @@ from loguru import logger
|
|
|
28
28
|
from autocoder.common.global_cancel import global_cancel
|
|
29
29
|
from autocoder.linters.models import ProjectLintResult
|
|
30
30
|
from autocoder.common.token_cost_caculate import TokenCostCalculator
|
|
31
|
+
from autocoder.events.event_manager_singleton import get_event_manager
|
|
32
|
+
from autocoder.events.event_types import Event, EventType, EventMetadata
|
|
33
|
+
from autocoder.events import event_content as EventContentCreator
|
|
31
34
|
|
|
32
35
|
|
|
33
36
|
class CodeDiffManager:
|
|
@@ -47,7 +50,8 @@ class CodeDiffManager:
|
|
|
47
50
|
self.args = args
|
|
48
51
|
self.action = action
|
|
49
52
|
self.generate_times_same_model = args.generate_times_same_model
|
|
50
|
-
self.
|
|
53
|
+
self.auto_fix_lint_max_attempts = args.auto_fix_lint_max_attempts
|
|
54
|
+
self.auto_fix_compile_max_attempts = args.auto_fix_compile_max_attempts
|
|
51
55
|
self.printer = Printer()
|
|
52
56
|
|
|
53
57
|
# Initialize sub-components
|
|
@@ -55,8 +59,11 @@ class CodeDiffManager:
|
|
|
55
59
|
self.code_merger = CodeAutoMergeDiff(llm, args)
|
|
56
60
|
|
|
57
61
|
# Create shadow manager for linting
|
|
58
|
-
self.shadow_manager = ShadowManager(
|
|
62
|
+
self.shadow_manager = ShadowManager(
|
|
63
|
+
args.source_dir, args.event_file, args.ignore_clean_shadows)
|
|
59
64
|
self.shadow_linter = ShadowLinter(self.shadow_manager, verbose=False)
|
|
65
|
+
self.shadow_compiler = ShadowCompiler(
|
|
66
|
+
self.shadow_manager, verbose=False)
|
|
60
67
|
|
|
61
68
|
@byzerllm.prompt()
|
|
62
69
|
def fix_linter_errors(self, query: str, lint_issues: str) -> str:
|
|
@@ -75,6 +82,66 @@ class CodeDiffManager:
|
|
|
75
82
|
请使用 unified diff 格式输出修改。
|
|
76
83
|
"""
|
|
77
84
|
|
|
85
|
+
@byzerllm.prompt()
|
|
86
|
+
def fix_compile_errors(self, query: str, compile_errors: str) -> str:
|
|
87
|
+
"""
|
|
88
|
+
编译错误:
|
|
89
|
+
<compile_errors>
|
|
90
|
+
{{ compile_errors }}
|
|
91
|
+
</compile_errors>
|
|
92
|
+
|
|
93
|
+
用户原始需求:
|
|
94
|
+
<user_query_wrapper>
|
|
95
|
+
{{ query }}
|
|
96
|
+
</user_query_wrapper>
|
|
97
|
+
|
|
98
|
+
修复上述问题,请确保代码质量问题被解决,同时保持代码的原有功能。
|
|
99
|
+
请使用 unified diff 格式输出修改。
|
|
100
|
+
"""
|
|
101
|
+
|
|
102
|
+
@byzerllm.prompt()
|
|
103
|
+
def fix_missing_context(self, query: str, original_code: str, missing_files: str) -> str:
|
|
104
|
+
"""
|
|
105
|
+
下面是你根据格式要求输出的一份修改代码:
|
|
106
|
+
<original_code>
|
|
107
|
+
{{ original_code }}
|
|
108
|
+
</original_code>
|
|
109
|
+
|
|
110
|
+
我发现你尝试修改以下文件,但这些文件没有在上下文中提供,所以你无法看到它们的内容:
|
|
111
|
+
<missing_files>
|
|
112
|
+
{{ missing_files }}
|
|
113
|
+
</missing_files>
|
|
114
|
+
|
|
115
|
+
下面是用户原始的需求:
|
|
116
|
+
<user_query_wrapper>
|
|
117
|
+
{{ query }}
|
|
118
|
+
</user_query_wrapper>
|
|
119
|
+
|
|
120
|
+
我已经将这些文件添加到上下文中,请重新生成代码,确保使用 unified diff 格式正确修改这些文件。
|
|
121
|
+
"""
|
|
122
|
+
|
|
123
|
+
@byzerllm.prompt()
|
|
124
|
+
def fix_unmerged_blocks(self, query: str, original_code: str, unmerged_blocks: str) -> str:
|
|
125
|
+
"""
|
|
126
|
+
下面是你根据格式要求输出的一份修改代码:
|
|
127
|
+
<original_code>
|
|
128
|
+
{{ original_code }}
|
|
129
|
+
</original_code>
|
|
130
|
+
|
|
131
|
+
但是我发现下面的代码块无法合并:
|
|
132
|
+
<unmerged_blocks>
|
|
133
|
+
{{ unmerged_blocks }}
|
|
134
|
+
</unmerged_blocks>
|
|
135
|
+
|
|
136
|
+
下面是用户原始的需求:
|
|
137
|
+
<user_query_wrapper>
|
|
138
|
+
{{ query }}
|
|
139
|
+
</user_query_wrapper>
|
|
140
|
+
|
|
141
|
+
请根据反馈,回顾之前的格式要求,重新生成一份修改代码,确保所有代码块都能够正确合并。
|
|
142
|
+
请使用 unified diff 格式输出修改。
|
|
143
|
+
"""
|
|
144
|
+
|
|
78
145
|
def _create_shadow_files_from_edits(self, generation_result: CodeGenerateResult) -> Dict[str, str]:
|
|
79
146
|
"""
|
|
80
147
|
从编辑块内容中提取代码并创建临时影子文件用于检查。
|