auto-coder 0.1.330__py3-none-any.whl → 0.1.332__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/METADATA +1 -1
- {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/RECORD +47 -45
- autocoder/agent/agentic_filter.py +928 -0
- autocoder/agent/project_reader.py +1 -14
- autocoder/auto_coder.py +6 -47
- autocoder/auto_coder_runner.py +2 -0
- autocoder/command_args.py +1 -6
- autocoder/commands/auto_command.py +1 -1
- autocoder/commands/tools.py +68 -16
- autocoder/common/__init__.py +8 -3
- autocoder/common/auto_coder_lang.py +21 -1
- autocoder/common/code_auto_generate.py +6 -160
- autocoder/common/code_auto_generate_diff.py +5 -111
- autocoder/common/code_auto_generate_editblock.py +5 -95
- autocoder/common/code_auto_generate_strict_diff.py +6 -112
- autocoder/common/code_auto_merge_editblock.py +1 -45
- autocoder/common/code_modification_ranker.py +6 -2
- autocoder/common/command_templates.py +2 -9
- autocoder/common/conf_utils.py +36 -0
- autocoder/common/stream_out_type.py +7 -2
- autocoder/common/types.py +3 -2
- autocoder/common/v2/code_auto_generate.py +6 -4
- autocoder/common/v2/code_auto_generate_diff.py +4 -3
- autocoder/common/v2/code_auto_generate_editblock.py +9 -4
- autocoder/common/v2/code_auto_generate_strict_diff.py +182 -14
- autocoder/common/v2/code_auto_merge_diff.py +560 -306
- autocoder/common/v2/code_auto_merge_editblock.py +12 -45
- autocoder/common/v2/code_auto_merge_strict_diff.py +76 -7
- autocoder/common/v2/code_diff_manager.py +73 -6
- autocoder/common/v2/code_editblock_manager.py +534 -82
- autocoder/dispacher/actions/action.py +15 -28
- autocoder/dispacher/actions/plugins/action_regex_project.py +5 -9
- autocoder/helper/project_creator.py +0 -1
- autocoder/index/entry.py +35 -53
- autocoder/index/filter/normal_filter.py +0 -16
- autocoder/lang.py +2 -4
- autocoder/linters/shadow_linter.py +4 -0
- autocoder/pyproject/__init__.py +2 -19
- autocoder/rag/cache/simple_cache.py +31 -6
- autocoder/regexproject/__init__.py +4 -22
- autocoder/suffixproject/__init__.py +6 -24
- autocoder/tsproject/__init__.py +5 -22
- autocoder/version.py +1 -1
- {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/top_level.txt +0 -0
|
@@ -185,20 +185,7 @@ def get_tools(args: AutoCoderArgs, llm: byzerllm.ByzerLLM):
|
|
|
185
185
|
返回值是 用户对你问题的回答。
|
|
186
186
|
|
|
187
187
|
注意,尽量不要询问用户,除非你感受到你无法回答用户的问题。
|
|
188
|
-
'''
|
|
189
|
-
|
|
190
|
-
if args.request_id and not args.silence and not args.skip_events:
|
|
191
|
-
event_data = {
|
|
192
|
-
"question": question
|
|
193
|
-
}
|
|
194
|
-
response_json = queue_communicate.send_event(
|
|
195
|
-
request_id=args.request_id,
|
|
196
|
-
event=CommunicateEvent(
|
|
197
|
-
event_type=CommunicateEventType.ASK_HUMAN.value,
|
|
198
|
-
data=json.dumps(event_data, ensure_ascii=False),
|
|
199
|
-
),
|
|
200
|
-
)
|
|
201
|
-
return response_json
|
|
188
|
+
'''
|
|
202
189
|
|
|
203
190
|
console = Console()
|
|
204
191
|
|
autocoder/auto_coder.py
CHANGED
|
@@ -475,30 +475,7 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
475
475
|
border_style="blue",
|
|
476
476
|
expand=False,
|
|
477
477
|
)
|
|
478
|
-
)
|
|
479
|
-
|
|
480
|
-
if args.request_id and not args.silence and not args.skip_events:
|
|
481
|
-
event_data = {
|
|
482
|
-
"instruction": final_ins,
|
|
483
|
-
"model": model,
|
|
484
|
-
"request_id": args.request_id,
|
|
485
|
-
}
|
|
486
|
-
response_json = queue_communicate.send_event(
|
|
487
|
-
request_id=args.request_id,
|
|
488
|
-
event=CommunicateEvent(
|
|
489
|
-
event_type=CommunicateEventType.CODE_HUMAN_AS_MODEL.value,
|
|
490
|
-
data=json.dumps(event_data, ensure_ascii=False),
|
|
491
|
-
),
|
|
492
|
-
)
|
|
493
|
-
response = json.loads(response_json)
|
|
494
|
-
v = [
|
|
495
|
-
{
|
|
496
|
-
"predict": response["value"],
|
|
497
|
-
"input": input_value[0]["instruction"],
|
|
498
|
-
"metadata": {},
|
|
499
|
-
}
|
|
500
|
-
]
|
|
501
|
-
return False, v
|
|
478
|
+
)
|
|
502
479
|
|
|
503
480
|
lines = []
|
|
504
481
|
while True:
|
|
@@ -1220,21 +1197,7 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1220
1197
|
border_style="blue",
|
|
1221
1198
|
expand=False,
|
|
1222
1199
|
)
|
|
1223
|
-
)
|
|
1224
|
-
if args.request_id:
|
|
1225
|
-
request_queue.add_request(
|
|
1226
|
-
args.request_id,
|
|
1227
|
-
RequestValue(
|
|
1228
|
-
value=StreamValue(value=[chat_content]), status=RequestOption.RUNNING
|
|
1229
|
-
),
|
|
1230
|
-
)
|
|
1231
|
-
request_queue.add_request(
|
|
1232
|
-
args.request_id,
|
|
1233
|
-
RequestValue(
|
|
1234
|
-
value=StreamValue(value=[""]), status=RequestOption.COMPLETED
|
|
1235
|
-
),
|
|
1236
|
-
)
|
|
1237
|
-
return {}
|
|
1200
|
+
)
|
|
1238
1201
|
|
|
1239
1202
|
lines = []
|
|
1240
1203
|
while True:
|
|
@@ -1267,14 +1230,7 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1267
1230
|
)
|
|
1268
1231
|
|
|
1269
1232
|
with open(memory_file, "w",encoding="utf-8") as f:
|
|
1270
|
-
json.dump(chat_history, f, ensure_ascii=False)
|
|
1271
|
-
|
|
1272
|
-
request_queue.add_request(
|
|
1273
|
-
args.request_id,
|
|
1274
|
-
RequestValue(
|
|
1275
|
-
value=DefaultValue(value=result), status=RequestOption.COMPLETED
|
|
1276
|
-
),
|
|
1277
|
-
)
|
|
1233
|
+
json.dump(chat_history, f, ensure_ascii=False)
|
|
1278
1234
|
|
|
1279
1235
|
if "save" in commands_info:
|
|
1280
1236
|
save_to_memory_file(ask_conversation=chat_history["ask_conversation"],
|
|
@@ -1334,6 +1290,9 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1334
1290
|
printer.print_in_terminal("estimated_chat_input_tokens", style="yellow",
|
|
1335
1291
|
estimated_input_tokens=estimated_input_tokens
|
|
1336
1292
|
)
|
|
1293
|
+
|
|
1294
|
+
# with open("/tmp/output.txt", "w",encoding="utf-8") as f:
|
|
1295
|
+
# f.write(json.dumps(loaded_conversations, ensure_ascii=False, indent=4))
|
|
1337
1296
|
|
|
1338
1297
|
v = stream_chat_with_continue(
|
|
1339
1298
|
llm=chat_llm,
|
autocoder/auto_coder_runner.py
CHANGED
|
@@ -57,6 +57,8 @@ from autocoder import command_parser as CommandParser
|
|
|
57
57
|
from loguru import logger
|
|
58
58
|
from autocoder.utils.project_structure import EnhancedFileAnalyzer
|
|
59
59
|
|
|
60
|
+
## 对外API,用于第三方集成 auto-coder 使用。
|
|
61
|
+
|
|
60
62
|
class SymbolItem(BaseModel):
|
|
61
63
|
symbol_name: str
|
|
62
64
|
symbol_type: SymbolType
|
autocoder/command_args.py
CHANGED
|
@@ -195,12 +195,7 @@ def parse_args(input_args: Optional[List[str]] = None) -> AutoCoderArgs:
|
|
|
195
195
|
parser.add_argument(
|
|
196
196
|
"--image_max_iter", type=int, default=1, help=desc["image_max_iter"]
|
|
197
197
|
)
|
|
198
|
-
|
|
199
|
-
parser.add_argument(
|
|
200
|
-
"--enable_multi_round_generate",
|
|
201
|
-
action="store_true",
|
|
202
|
-
help=desc["enable_multi_round_generate"],
|
|
203
|
-
)
|
|
198
|
+
|
|
204
199
|
parser.add_argument(
|
|
205
200
|
"--skip_confirm", action="store_true", help=desc["skip_confirm"]
|
|
206
201
|
)
|
autocoder/commands/tools.py
CHANGED
|
@@ -8,7 +8,7 @@ from autocoder.common import AutoCoderArgs, SourceCode
|
|
|
8
8
|
from autocoder.common.interpreter import Interpreter
|
|
9
9
|
from autocoder.common import ExecuteSteps, ExecuteStep, detect_env
|
|
10
10
|
from autocoder.common import code_auto_execute
|
|
11
|
-
from typing import List, Tuple
|
|
11
|
+
from typing import List, Tuple,Dict
|
|
12
12
|
import os
|
|
13
13
|
import byzerllm
|
|
14
14
|
import json
|
|
@@ -105,19 +105,6 @@ class AutoCommandTools:
|
|
|
105
105
|
注意,尽量不要询问用户,除非你感受到你无法回答用户的问题。
|
|
106
106
|
'''
|
|
107
107
|
|
|
108
|
-
if self.args.request_id and not self.args.silence and not self.args.skip_events:
|
|
109
|
-
event_data = {
|
|
110
|
-
"question": question
|
|
111
|
-
}
|
|
112
|
-
response_json = queue_communicate.send_event(
|
|
113
|
-
request_id=self.args.request_id,
|
|
114
|
-
event=CommunicateEvent(
|
|
115
|
-
event_type=CommunicateEventType.ASK_HUMAN.value,
|
|
116
|
-
data=json.dumps(event_data, ensure_ascii=False),
|
|
117
|
-
),
|
|
118
|
-
)
|
|
119
|
-
return response_json
|
|
120
|
-
|
|
121
108
|
# 如果是在web模式下,则使用event_manager事件来询问用户
|
|
122
109
|
if get_run_context().is_web():
|
|
123
110
|
answer = get_event_manager(
|
|
@@ -160,8 +147,11 @@ class AutoCommandTools:
|
|
|
160
147
|
|
|
161
148
|
return answer
|
|
162
149
|
|
|
163
|
-
def response_user(self, response: str):
|
|
150
|
+
def response_user(self, response: Union[str, Dict]):
|
|
164
151
|
# 如果是在web模式下,则使用event_manager事件来询问用户
|
|
152
|
+
if isinstance(response, dict):
|
|
153
|
+
response = json.dumps(response, ensure_ascii=False,indent=4)
|
|
154
|
+
|
|
165
155
|
if get_run_context().is_web():
|
|
166
156
|
try:
|
|
167
157
|
get_event_manager(
|
|
@@ -206,6 +196,56 @@ class AutoCommandTools:
|
|
|
206
196
|
})
|
|
207
197
|
|
|
208
198
|
return response
|
|
199
|
+
|
|
200
|
+
def output_result(self, response: Union[str, Dict]):
|
|
201
|
+
# 如果是在web模式下,则使用event_manager事件来询问用户
|
|
202
|
+
if isinstance(response, dict):
|
|
203
|
+
response = json.dumps(response, ensure_ascii=False,indent=4)
|
|
204
|
+
|
|
205
|
+
if get_run_context().is_web():
|
|
206
|
+
try:
|
|
207
|
+
get_event_manager(
|
|
208
|
+
self.args.event_file).write_result(
|
|
209
|
+
EventContentCreator.create_result(
|
|
210
|
+
EventContentCreator.MarkdownContent(
|
|
211
|
+
content=response
|
|
212
|
+
)
|
|
213
|
+
)
|
|
214
|
+
)
|
|
215
|
+
self.result_manager.append(content=response, meta={
|
|
216
|
+
"action": "output_result",
|
|
217
|
+
"input": {
|
|
218
|
+
"response": response
|
|
219
|
+
}
|
|
220
|
+
})
|
|
221
|
+
except Exception as e:
|
|
222
|
+
error_message = f"Error: {str(e)}\n\n完整异常堆栈信息:\n{traceback.format_exc()}"
|
|
223
|
+
self.result_manager.append(content=f"Error: {error_message}", meta={
|
|
224
|
+
"action": "output_result",
|
|
225
|
+
"input": {
|
|
226
|
+
"response": response
|
|
227
|
+
}
|
|
228
|
+
})
|
|
229
|
+
return response
|
|
230
|
+
|
|
231
|
+
console = Console()
|
|
232
|
+
answer_text = Text(response, style="italic")
|
|
233
|
+
answer_panel = Panel(
|
|
234
|
+
answer_text,
|
|
235
|
+
title="",
|
|
236
|
+
border_style="green",
|
|
237
|
+
expand=False
|
|
238
|
+
)
|
|
239
|
+
console.print(answer_panel)
|
|
240
|
+
|
|
241
|
+
self.result_manager.append(content=response, meta={
|
|
242
|
+
"action": "output_result",
|
|
243
|
+
"input": {
|
|
244
|
+
"response": response
|
|
245
|
+
}
|
|
246
|
+
})
|
|
247
|
+
|
|
248
|
+
return response
|
|
209
249
|
|
|
210
250
|
def run_python_code(self, code: str) -> str:
|
|
211
251
|
"""
|
|
@@ -703,6 +743,18 @@ class AutoCommandTools:
|
|
|
703
743
|
matched_files.append(os.path.join(root, file))
|
|
704
744
|
|
|
705
745
|
v = ",".join(matched_files)
|
|
746
|
+
|
|
747
|
+
tokens = count_tokens(v)
|
|
748
|
+
if tokens > self.args.conversation_prune_safe_zone_tokens / 2.0:
|
|
749
|
+
result = f"The result is too large to return. (tokens: {tokens}). Try to use another function or use another keyword to search."
|
|
750
|
+
self.result_manager.add_result(content=result, meta={
|
|
751
|
+
"action": "find_files_by_name",
|
|
752
|
+
"input": {
|
|
753
|
+
"keyword": keyword
|
|
754
|
+
}
|
|
755
|
+
})
|
|
756
|
+
return result
|
|
757
|
+
|
|
706
758
|
self.result_manager.add_result(content=v, meta={
|
|
707
759
|
"action": "find_files_by_name",
|
|
708
760
|
"input": {
|
|
@@ -744,7 +796,7 @@ class AutoCommandTools:
|
|
|
744
796
|
excluded_dirs = [
|
|
745
797
|
'node_modules', '.git', '.venv', 'venv', '__pycache__', 'dist', 'build',
|
|
746
798
|
'.DS_Store', '.idea', '.vscode', 'tmp', 'temp', 'cache', 'coverage',
|
|
747
|
-
'htmlcov', '.mypy_cache', '.pytest_cache', '.hypothesis'
|
|
799
|
+
'htmlcov', '.mypy_cache', '.pytest_cache', '.hypothesis',".auto-coder"
|
|
748
800
|
]
|
|
749
801
|
excluded_file_patterns = [
|
|
750
802
|
'*.pyc', '*.pyo', '*.pyd', '*.egg-info', '*.log'
|
autocoder/common/__init__.py
CHANGED
|
@@ -244,7 +244,7 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
244
244
|
execute: Optional[bool] = None
|
|
245
245
|
package_name: Optional[str] = ""
|
|
246
246
|
script_path: Optional[str] = ""
|
|
247
|
-
|
|
247
|
+
|
|
248
248
|
model: Optional[str] = ""
|
|
249
249
|
chat_model: Optional[str] = ""
|
|
250
250
|
model_max_length: Optional[int] = 2000
|
|
@@ -270,6 +270,8 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
270
270
|
index_model_max_length: Optional[int] = 0
|
|
271
271
|
index_model_max_input_length: Optional[int] = 0
|
|
272
272
|
index_model_anti_quota_limit: Optional[int] = 0
|
|
273
|
+
|
|
274
|
+
enable_agentic_filter: Optional[bool] = False
|
|
273
275
|
|
|
274
276
|
|
|
275
277
|
index_filter_level: Optional[int] = 0
|
|
@@ -326,13 +328,14 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
326
328
|
image_max_iter: Optional[int] = 1
|
|
327
329
|
|
|
328
330
|
urls: Optional[Union[str, List[str]]] = ""
|
|
329
|
-
urls_use_model: Optional[bool] = False
|
|
330
|
-
enable_multi_round_generate: Optional[bool] = False
|
|
331
|
+
urls_use_model: Optional[bool] = False
|
|
331
332
|
command: Optional[str] = None
|
|
332
333
|
doc_command: Optional[str] = None
|
|
333
334
|
required_exts: Optional[str] = None
|
|
334
335
|
hybrid_index_max_output_tokens: Optional[int] = 1000000
|
|
335
336
|
|
|
337
|
+
enable_multi_round_generate: Optional[bool] = False
|
|
338
|
+
|
|
336
339
|
monitor_mode: bool = False
|
|
337
340
|
enable_hybrid_index: bool = False
|
|
338
341
|
rag_build_name: Optional[str] = None
|
|
@@ -420,9 +423,11 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
420
423
|
|
|
421
424
|
enable_auto_fix_lint: Optional[bool] = False
|
|
422
425
|
enable_auto_fix_compile: Optional[bool] = False
|
|
426
|
+
enable_auto_fix_merge: Optional[bool] = False
|
|
423
427
|
|
|
424
428
|
auto_fix_lint_max_attempts: Optional[int] = 5
|
|
425
429
|
auto_fix_compile_max_attempts: Optional[int] = 5
|
|
430
|
+
auto_fix_merge_max_attempts: Optional[int] = 5
|
|
426
431
|
|
|
427
432
|
ignore_clean_shadows: Optional[bool] = False
|
|
428
433
|
|
|
@@ -773,7 +773,27 @@ MESSAGES = {
|
|
|
773
773
|
"max_compile_attempts_reached": {
|
|
774
774
|
"en": "Maximum compilation attempts reached",
|
|
775
775
|
"zh": "已达到最大编译尝试次数"
|
|
776
|
-
}
|
|
776
|
+
},
|
|
777
|
+
"unmerged_blocks_fixed": {
|
|
778
|
+
"en": "Unmerged blocks fixed successfully",
|
|
779
|
+
"zh": "未合并代码块已成功修复"
|
|
780
|
+
},
|
|
781
|
+
"unmerged_blocks_attempt_status": {
|
|
782
|
+
"en": "Fixing unmerged blocks attempt {{attempt}}/{{max_correction_attempts}}",
|
|
783
|
+
"zh": "正在尝试修复未合并代码块 {{attempt}}/{{max_correction_attempts}}"
|
|
784
|
+
},
|
|
785
|
+
"max_unmerged_blocks_attempts_reached": {
|
|
786
|
+
"en": "Maximum unmerged blocks fix attempts reached",
|
|
787
|
+
"zh": "已达到最大未合并代码块修复尝试次数"
|
|
788
|
+
},
|
|
789
|
+
"agenticFilterContext": {
|
|
790
|
+
"en": "Start to find context...",
|
|
791
|
+
"zh": "开始智能查找上下文...."
|
|
792
|
+
},
|
|
793
|
+
"agenticFilterContextFinished": {
|
|
794
|
+
"en": "End to find context...",
|
|
795
|
+
"zh": "结束智能查找上下文...."
|
|
796
|
+
}
|
|
777
797
|
}
|
|
778
798
|
|
|
779
799
|
|
|
@@ -51,69 +51,7 @@ class CodeAutoGenerate:
|
|
|
51
51
|
{{ instruction }}
|
|
52
52
|
|
|
53
53
|
"""
|
|
54
|
-
|
|
55
|
-
@byzerllm.prompt(llm=lambda self: self.llm)
|
|
56
|
-
def multi_round_instruction(
|
|
57
|
-
self, instruction: str, content: str, context: str = "", package_context: str = ""
|
|
58
|
-
) -> str:
|
|
59
|
-
"""
|
|
60
|
-
{%- if structure %}
|
|
61
|
-
{{ structure }}
|
|
62
|
-
{%- endif %}
|
|
63
|
-
|
|
64
|
-
{%- if content %}
|
|
65
|
-
下面是一些文件路径以及每个文件对应的源码:
|
|
66
|
-
<files>
|
|
67
|
-
{{ content }}
|
|
68
|
-
</files>
|
|
69
|
-
{%- endif %}
|
|
70
|
-
|
|
71
|
-
{%- if package_context %}
|
|
72
|
-
下面是上面文件的一些信息(包括最近的变更情况):
|
|
73
|
-
<package_context>
|
|
74
|
-
{{ package_context }}
|
|
75
|
-
</package_context>
|
|
76
|
-
{%- endif %}
|
|
77
|
-
|
|
78
|
-
{%- if context %}
|
|
79
|
-
<extra_context>
|
|
80
|
-
{{ context }}
|
|
81
|
-
</extra_context>
|
|
82
|
-
{%- endif %}
|
|
83
|
-
|
|
84
|
-
下面是用户的需求:
|
|
85
|
-
|
|
86
|
-
{{ instruction }}
|
|
87
|
-
|
|
88
|
-
如果你需要生成代码,你生成的代码要符合这个格式:
|
|
89
|
-
|
|
90
|
-
```{lang}
|
|
91
|
-
##File: {FILE_PATH}
|
|
92
|
-
{CODE}
|
|
93
|
-
```
|
|
94
|
-
|
|
95
|
-
```{lang}
|
|
96
|
-
##File: {FILE_PATH}
|
|
97
|
-
{CODE}
|
|
98
|
-
```
|
|
99
|
-
|
|
100
|
-
其中,{lang}是代码的语言,{CODE}是代码的内容, {FILE_PATH} 是文件的路径(请尽量使用绝对路径),他们都在代码块中,请严格按上面的格式进行内容生成。
|
|
101
|
-
每次生成一个文件的代码,然后询问我是否继续,当我回复继续,继续生成下一个文件的代码。当没有后续任务时,请回复 "__完成__" 或者 "__EOF__"。
|
|
102
|
-
请确保每份代码的完整性,而不要只生成修改部分。
|
|
103
|
-
"""
|
|
104
|
-
|
|
105
|
-
if not self.args.include_project_structure:
|
|
106
|
-
return {
|
|
107
|
-
"structure": "",
|
|
108
|
-
}
|
|
109
|
-
|
|
110
|
-
return {
|
|
111
|
-
"structure": (
|
|
112
|
-
self.action.pp.get_tree_like_directory_structure()
|
|
113
|
-
if self.action
|
|
114
|
-
else ""
|
|
115
|
-
)
|
|
116
|
-
}
|
|
54
|
+
|
|
117
55
|
|
|
118
56
|
@byzerllm.prompt(llm=lambda self: self.llm)
|
|
119
57
|
def single_round_instruction(
|
|
@@ -197,15 +135,6 @@ class CodeAutoGenerate:
|
|
|
197
135
|
source_code_list = SourceCodeList(filtered_sources)
|
|
198
136
|
source_content = source_code_list.to_str()
|
|
199
137
|
|
|
200
|
-
if self.args.request_id and not self.args.skip_events:
|
|
201
|
-
queue_communicate.send_event_no_wait(
|
|
202
|
-
request_id=self.args.request_id,
|
|
203
|
-
event=CommunicateEvent(
|
|
204
|
-
event_type=CommunicateEventType.CODE_GENERATE_START.value,
|
|
205
|
-
data=query,
|
|
206
|
-
),
|
|
207
|
-
)
|
|
208
|
-
|
|
209
138
|
# 获取包上下文信息
|
|
210
139
|
package_context = ""
|
|
211
140
|
|
|
@@ -264,15 +193,16 @@ class CodeAutoGenerate:
|
|
|
264
193
|
if not self.args.human_as_model:
|
|
265
194
|
with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
|
|
266
195
|
futures = []
|
|
196
|
+
count = 0
|
|
267
197
|
for llm in self.llms:
|
|
268
198
|
model_names_list = llm_utils.get_llm_names(llm)
|
|
269
199
|
model_name = None
|
|
270
200
|
if model_names_list:
|
|
271
201
|
model_name = model_names_list[0]
|
|
272
202
|
|
|
273
|
-
for
|
|
203
|
+
for _ in range(self.generate_times_same_model):
|
|
274
204
|
model_names.append(model_name)
|
|
275
|
-
if
|
|
205
|
+
if count == 0:
|
|
276
206
|
def job():
|
|
277
207
|
stream_generator = stream_chat_with_continue(
|
|
278
208
|
llm=llm,
|
|
@@ -303,7 +233,7 @@ class CodeAutoGenerate:
|
|
|
303
233
|
llm_config=llm_config,
|
|
304
234
|
args=self.args
|
|
305
235
|
))
|
|
306
|
-
|
|
236
|
+
count += 1
|
|
307
237
|
temp_results = [future.result() for future in futures]
|
|
308
238
|
for result in temp_results:
|
|
309
239
|
results.append(result.content)
|
|
@@ -338,90 +268,6 @@ class CodeAutoGenerate:
|
|
|
338
268
|
"generated_tokens_cost": generated_tokens_cost
|
|
339
269
|
}
|
|
340
270
|
|
|
341
|
-
if self.args.request_id and not self.args.skip_events:
|
|
342
|
-
queue_communicate.send_event_no_wait(
|
|
343
|
-
request_id=self.args.request_id,
|
|
344
|
-
event=CommunicateEvent(
|
|
345
|
-
event_type=CommunicateEventType.CODE_GENERATE_END.value,
|
|
346
|
-
data=json.dumps(statistics, ensure_ascii=False),
|
|
347
|
-
),
|
|
348
|
-
)
|
|
349
|
-
|
|
350
271
|
return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
|
|
351
272
|
|
|
352
|
-
|
|
353
|
-
self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
|
|
354
|
-
) -> Tuple[List[str], List[Dict[str, str]]]:
|
|
355
|
-
llm_config = {"human_as_model": self.args.human_as_model}
|
|
356
|
-
result = []
|
|
357
|
-
source_content = source_code_list.to_str()
|
|
358
|
-
|
|
359
|
-
# 获取包上下文信息
|
|
360
|
-
package_context = ""
|
|
361
|
-
|
|
362
|
-
if self.args.enable_active_context:
|
|
363
|
-
# 初始化活动上下文管理器
|
|
364
|
-
active_context_manager = ActiveContextManager(self.llm, self.args.source_dir)
|
|
365
|
-
# 获取活动上下文信息
|
|
366
|
-
result = active_context_manager.load_active_contexts_for_files(
|
|
367
|
-
[source.module_name for source in source_code_list.sources]
|
|
368
|
-
)
|
|
369
|
-
# 将活动上下文信息格式化为文本
|
|
370
|
-
if result.contexts:
|
|
371
|
-
package_context_parts = []
|
|
372
|
-
for dir_path, context in result.contexts.items():
|
|
373
|
-
package_context_parts.append(f"<package_info>{context.content}</package_info>")
|
|
374
|
-
|
|
375
|
-
package_context = "\n".join(package_context_parts)
|
|
376
|
-
|
|
377
|
-
if self.args.template == "common":
|
|
378
|
-
init_prompt = self.multi_round_instruction.prompt(
|
|
379
|
-
instruction=query, content=source_content, context=self.args.context,
|
|
380
|
-
package_context=package_context
|
|
381
|
-
)
|
|
382
|
-
elif self.args.template == "auto_implement":
|
|
383
|
-
init_prompt = self.auto_implement_function.prompt(
|
|
384
|
-
instruction=query, content=source_content
|
|
385
|
-
)
|
|
386
|
-
|
|
387
|
-
conversations = [{"role": "user", "content": init_prompt}]
|
|
388
|
-
|
|
389
|
-
with open(self.args.target_file, "w",encoding="utf-8") as file:
|
|
390
|
-
file.write(init_prompt)
|
|
391
|
-
|
|
392
|
-
t = self.llm.chat_oai(conversations=conversations, llm_config=llm_config)
|
|
393
|
-
|
|
394
|
-
result.append(t[0].output)
|
|
395
|
-
|
|
396
|
-
conversations.append({"role": "assistant", "content": t[0].output})
|
|
397
|
-
|
|
398
|
-
if (
|
|
399
|
-
"__完成__" in t[0].output
|
|
400
|
-
or "/done" in t[0].output
|
|
401
|
-
or "__EOF__" in t[0].output
|
|
402
|
-
):
|
|
403
|
-
return result, conversations
|
|
404
|
-
|
|
405
|
-
current_step = 0
|
|
406
|
-
|
|
407
|
-
while current_step < max_steps:
|
|
408
|
-
|
|
409
|
-
conversations.append({"role": "user", "content": "继续"})
|
|
410
|
-
|
|
411
|
-
with open(self.args.target_file, "w",encoding="utf-8") as file:
|
|
412
|
-
file.write("继续")
|
|
413
|
-
|
|
414
|
-
t = self.llm.chat_oai(conversations=conversations, llm_config=llm_config)
|
|
415
|
-
|
|
416
|
-
result.append(t[0].output)
|
|
417
|
-
conversations.append({"role": "assistant", "content": t[0].output})
|
|
418
|
-
current_step += 1
|
|
419
|
-
|
|
420
|
-
if (
|
|
421
|
-
"__完成__" in t[0].output
|
|
422
|
-
or "/done" in t[0].output
|
|
423
|
-
or "__EOF__" in t[0].output
|
|
424
|
-
):
|
|
425
|
-
return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
|
|
426
|
-
|
|
427
|
-
return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
|
|
273
|
+
|