auto-coder 0.1.330__py3-none-any.whl → 0.1.332__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/METADATA +1 -1
- {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/RECORD +47 -45
- autocoder/agent/agentic_filter.py +928 -0
- autocoder/agent/project_reader.py +1 -14
- autocoder/auto_coder.py +6 -47
- autocoder/auto_coder_runner.py +2 -0
- autocoder/command_args.py +1 -6
- autocoder/commands/auto_command.py +1 -1
- autocoder/commands/tools.py +68 -16
- autocoder/common/__init__.py +8 -3
- autocoder/common/auto_coder_lang.py +21 -1
- autocoder/common/code_auto_generate.py +6 -160
- autocoder/common/code_auto_generate_diff.py +5 -111
- autocoder/common/code_auto_generate_editblock.py +5 -95
- autocoder/common/code_auto_generate_strict_diff.py +6 -112
- autocoder/common/code_auto_merge_editblock.py +1 -45
- autocoder/common/code_modification_ranker.py +6 -2
- autocoder/common/command_templates.py +2 -9
- autocoder/common/conf_utils.py +36 -0
- autocoder/common/stream_out_type.py +7 -2
- autocoder/common/types.py +3 -2
- autocoder/common/v2/code_auto_generate.py +6 -4
- autocoder/common/v2/code_auto_generate_diff.py +4 -3
- autocoder/common/v2/code_auto_generate_editblock.py +9 -4
- autocoder/common/v2/code_auto_generate_strict_diff.py +182 -14
- autocoder/common/v2/code_auto_merge_diff.py +560 -306
- autocoder/common/v2/code_auto_merge_editblock.py +12 -45
- autocoder/common/v2/code_auto_merge_strict_diff.py +76 -7
- autocoder/common/v2/code_diff_manager.py +73 -6
- autocoder/common/v2/code_editblock_manager.py +534 -82
- autocoder/dispacher/actions/action.py +15 -28
- autocoder/dispacher/actions/plugins/action_regex_project.py +5 -9
- autocoder/helper/project_creator.py +0 -1
- autocoder/index/entry.py +35 -53
- autocoder/index/filter/normal_filter.py +0 -16
- autocoder/lang.py +2 -4
- autocoder/linters/shadow_linter.py +4 -0
- autocoder/pyproject/__init__.py +2 -19
- autocoder/rag/cache/simple_cache.py +31 -6
- autocoder/regexproject/__init__.py +4 -22
- autocoder/suffixproject/__init__.py +6 -24
- autocoder/tsproject/__init__.py +5 -22
- autocoder/version.py +1 -1
- {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/top_level.txt +0 -0
|
@@ -281,6 +281,7 @@ class CodeAutoGenerateDiff:
|
|
|
281
281
|
if not self.args.human_as_model:
|
|
282
282
|
with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
|
|
283
283
|
futures = []
|
|
284
|
+
count = 0
|
|
284
285
|
for llm in self.llms:
|
|
285
286
|
|
|
286
287
|
model_names_list = llm_utils.get_llm_names(llm)
|
|
@@ -288,9 +289,9 @@ class CodeAutoGenerateDiff:
|
|
|
288
289
|
if model_names_list:
|
|
289
290
|
model_name = model_names_list[0]
|
|
290
291
|
|
|
291
|
-
for
|
|
292
|
+
for _ in range(self.generate_times_same_model):
|
|
292
293
|
model_names.append(model_name)
|
|
293
|
-
if
|
|
294
|
+
if count == 0:
|
|
294
295
|
def job():
|
|
295
296
|
stream_generator = stream_chat_with_continue(
|
|
296
297
|
llm=llm,
|
|
@@ -321,7 +322,7 @@ class CodeAutoGenerateDiff:
|
|
|
321
322
|
llm_config=llm_config,
|
|
322
323
|
args=self.args
|
|
323
324
|
))
|
|
324
|
-
|
|
325
|
+
count += 1
|
|
325
326
|
temp_results = [future.result() for future in futures]
|
|
326
327
|
|
|
327
328
|
for result,model_name in zip(temp_results,model_names):
|
|
@@ -15,6 +15,7 @@ from autocoder.rag.token_counter import count_tokens
|
|
|
15
15
|
from autocoder.utils import llms as llm_utils
|
|
16
16
|
from autocoder.common import SourceCodeList
|
|
17
17
|
from autocoder.memory.active_context_manager import ActiveContextManager
|
|
18
|
+
from loguru import logger
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
|
|
@@ -298,6 +299,7 @@ class CodeAutoGenerateEditBlock:
|
|
|
298
299
|
if not self.args.human_as_model:
|
|
299
300
|
with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
|
|
300
301
|
futures = []
|
|
302
|
+
count = 0
|
|
301
303
|
for llm in self.llms:
|
|
302
304
|
|
|
303
305
|
model_names_list = llm_utils.get_llm_names(llm)
|
|
@@ -305,9 +307,10 @@ class CodeAutoGenerateEditBlock:
|
|
|
305
307
|
if model_names_list:
|
|
306
308
|
model_name = model_names_list[0]
|
|
307
309
|
|
|
308
|
-
for
|
|
309
|
-
model_names.append(model_name)
|
|
310
|
-
if
|
|
310
|
+
for _ in range(self.generate_times_same_model):
|
|
311
|
+
model_names.append(model_name)
|
|
312
|
+
if count==0:
|
|
313
|
+
logger.info(f"code generation with model(Stream): {model_name}")
|
|
311
314
|
def job():
|
|
312
315
|
stream_generator = stream_chat_with_continue(
|
|
313
316
|
llm=llm,
|
|
@@ -330,7 +333,8 @@ class CodeAutoGenerateEditBlock:
|
|
|
330
333
|
generated_tokens_count=last_meta.generated_tokens_count
|
|
331
334
|
)
|
|
332
335
|
futures.append(executor.submit(job))
|
|
333
|
-
else:
|
|
336
|
+
else:
|
|
337
|
+
logger.info(f"code generation with model(Non-stream): {model_name}")
|
|
334
338
|
futures.append(executor.submit(
|
|
335
339
|
chat_with_continue,
|
|
336
340
|
llm=llm,
|
|
@@ -338,6 +342,7 @@ class CodeAutoGenerateEditBlock:
|
|
|
338
342
|
llm_config=llm_config,
|
|
339
343
|
args=self.args
|
|
340
344
|
))
|
|
345
|
+
count += 1
|
|
341
346
|
|
|
342
347
|
temp_results = [future.result() for future in futures]
|
|
343
348
|
|
|
@@ -2,17 +2,39 @@ from typing import List, Dict, Tuple
|
|
|
2
2
|
from autocoder.common.types import Mode, CodeGenerateResult
|
|
3
3
|
from autocoder.common import AutoCoderArgs
|
|
4
4
|
import byzerllm
|
|
5
|
+
from autocoder.utils.queue_communicate import queue_communicate, CommunicateEvent, CommunicateEventType
|
|
5
6
|
from autocoder.common import sys_prompt
|
|
6
|
-
from
|
|
7
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
8
|
+
import json
|
|
9
|
+
from autocoder.common.utils_code_auto_generate import chat_with_continue,stream_chat_with_continue,ChatWithContinueResult
|
|
10
|
+
from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
|
|
11
|
+
from autocoder.common.stream_out_type import CodeGenerateStreamOutType
|
|
12
|
+
from autocoder.common.auto_coder_lang import get_message_with_format
|
|
13
|
+
from autocoder.common.printer import Printer
|
|
14
|
+
from autocoder.rag.token_counter import count_tokens
|
|
15
|
+
from autocoder.utils import llms as llm_utils
|
|
7
16
|
from autocoder.common import SourceCodeList
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
17
|
+
from autocoder.privacy.model_filter import ModelPathFilter
|
|
18
|
+
from autocoder.memory.active_context_manager import ActiveContextManager
|
|
19
|
+
class CodeAutoGenerateStrictDiff:
|
|
20
|
+
def __init__(
|
|
21
|
+
self, llm: byzerllm.ByzerLLM, args: AutoCoderArgs, action=None
|
|
22
|
+
) -> None:
|
|
23
|
+
self.llm = llm
|
|
24
|
+
self.args = args
|
|
25
|
+
self.action = action
|
|
26
|
+
self.llms = []
|
|
27
|
+
self.generate_times_same_model = args.generate_times_same_model
|
|
28
|
+
if not self.llm:
|
|
29
|
+
raise ValueError(
|
|
30
|
+
"Please provide a valid model instance to use for code generation."
|
|
31
|
+
)
|
|
32
|
+
self.llms = self.llm.get_sub_client("code_model") or [self.llm]
|
|
33
|
+
if not isinstance(self.llms, list):
|
|
34
|
+
self.llms = [self.llms]
|
|
13
35
|
|
|
14
36
|
@byzerllm.prompt(llm=lambda self: self.llm)
|
|
15
|
-
def
|
|
37
|
+
def multi_round_instruction(
|
|
16
38
|
self, instruction: str, content: str, context: str = "", package_context: str = ""
|
|
17
39
|
) -> str:
|
|
18
40
|
"""
|
|
@@ -33,9 +55,6 @@ class CodeAutoGenerateStrictDiff(CodeAutoGenerate):
|
|
|
33
55
|
Indentation matters in the diffs!
|
|
34
56
|
|
|
35
57
|
To make a new file, show a diff from `--- /dev/null` to `+++ path/to/new/file.ext`.
|
|
36
|
-
The code part of the diff content should not contains any line number.
|
|
37
|
-
|
|
38
|
-
The path start with `---` or `+++` should be the absolute path of the file or relative path from the project root.
|
|
39
58
|
|
|
40
59
|
下面我们来看一个例子:
|
|
41
60
|
|
|
@@ -125,6 +144,8 @@ class CodeAutoGenerateStrictDiff(CodeAutoGenerate):
|
|
|
125
144
|
下面是用户的需求:
|
|
126
145
|
|
|
127
146
|
{{ instruction }}
|
|
147
|
+
|
|
148
|
+
每次生成一个文件的diff,然后询问我是否继续,当我回复继续,继续生成下一个文件的diff。当没有后续任务时,请回复 "__完成__" 或者 "__EOF__"。
|
|
128
149
|
"""
|
|
129
150
|
|
|
130
151
|
if not self.args.include_project_structure:
|
|
@@ -141,7 +162,7 @@ class CodeAutoGenerateStrictDiff(CodeAutoGenerate):
|
|
|
141
162
|
}
|
|
142
163
|
|
|
143
164
|
@byzerllm.prompt(llm=lambda self: self.llm)
|
|
144
|
-
def
|
|
165
|
+
def single_round_instruction(
|
|
145
166
|
self, instruction: str, content: str, context: str = "", package_context: str = ""
|
|
146
167
|
) -> str:
|
|
147
168
|
"""
|
|
@@ -162,6 +183,9 @@ class CodeAutoGenerateStrictDiff(CodeAutoGenerate):
|
|
|
162
183
|
Indentation matters in the diffs!
|
|
163
184
|
|
|
164
185
|
To make a new file, show a diff from `--- /dev/null` to `+++ path/to/new/file.ext`.
|
|
186
|
+
The code part of the diff content should not contains any line number.
|
|
187
|
+
|
|
188
|
+
The path start with `---` or `+++` should be the absolute path of the file or relative path from the project root.
|
|
165
189
|
|
|
166
190
|
下面我们来看一个例子:
|
|
167
191
|
|
|
@@ -251,8 +275,6 @@ class CodeAutoGenerateStrictDiff(CodeAutoGenerate):
|
|
|
251
275
|
下面是用户的需求:
|
|
252
276
|
|
|
253
277
|
{{ instruction }}
|
|
254
|
-
|
|
255
|
-
每次生成一个文件的diff,然后询问我是否继续,当我回复继续,继续生成下一个文件的diff。当没有后续任务时,请回复 "__完成__" 或者 "__EOF__"。
|
|
256
278
|
"""
|
|
257
279
|
|
|
258
280
|
if not self.args.include_project_structure:
|
|
@@ -266,4 +288,150 @@ class CodeAutoGenerateStrictDiff(CodeAutoGenerate):
|
|
|
266
288
|
if self.action
|
|
267
289
|
else ""
|
|
268
290
|
)
|
|
269
|
-
}
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
def single_round_run(
|
|
294
|
+
self, query: str, source_code_list: SourceCodeList
|
|
295
|
+
) -> CodeGenerateResult:
|
|
296
|
+
llm_config = {"human_as_model": self.args.human_as_model}
|
|
297
|
+
source_content = source_code_list.to_str()
|
|
298
|
+
|
|
299
|
+
# 获取包上下文信息
|
|
300
|
+
package_context = ""
|
|
301
|
+
|
|
302
|
+
if self.args.enable_active_context:
|
|
303
|
+
# 初始化活动上下文管理器
|
|
304
|
+
active_context_manager = ActiveContextManager(self.llm, self.args.source_dir)
|
|
305
|
+
# 获取活动上下文信息
|
|
306
|
+
result = active_context_manager.load_active_contexts_for_files(
|
|
307
|
+
[source.module_name for source in source_code_list.sources]
|
|
308
|
+
)
|
|
309
|
+
# 将活动上下文信息格式化为文本
|
|
310
|
+
if result.contexts:
|
|
311
|
+
package_context_parts = []
|
|
312
|
+
for dir_path, context in result.contexts.items():
|
|
313
|
+
package_context_parts.append(f"<package_info>{context.content}</package_info>")
|
|
314
|
+
|
|
315
|
+
package_context = "\n".join(package_context_parts)
|
|
316
|
+
|
|
317
|
+
if self.args.template == "common":
|
|
318
|
+
init_prompt = self.single_round_instruction.prompt(
|
|
319
|
+
instruction=query, content=source_content, context=self.args.context,
|
|
320
|
+
package_context=package_context
|
|
321
|
+
)
|
|
322
|
+
elif self.args.template == "auto_implement":
|
|
323
|
+
init_prompt = self.auto_implement_function.prompt(
|
|
324
|
+
instruction=query, content=source_content
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
with open(self.args.target_file, "w",encoding="utf-8") as file:
|
|
328
|
+
file.write(init_prompt)
|
|
329
|
+
|
|
330
|
+
conversations = []
|
|
331
|
+
|
|
332
|
+
if self.args.system_prompt and self.args.system_prompt.strip() == "claude":
|
|
333
|
+
conversations.append(
|
|
334
|
+
{"role": "system", "content": sys_prompt.claude_sys_prompt.prompt()})
|
|
335
|
+
elif self.args.system_prompt:
|
|
336
|
+
conversations.append(
|
|
337
|
+
{"role": "system", "content": self.args.system_prompt})
|
|
338
|
+
|
|
339
|
+
conversations.append({"role": "user", "content": init_prompt})
|
|
340
|
+
|
|
341
|
+
conversations_list = []
|
|
342
|
+
results = []
|
|
343
|
+
input_tokens_count = 0
|
|
344
|
+
generated_tokens_count = 0
|
|
345
|
+
input_tokens_cost = 0
|
|
346
|
+
generated_tokens_cost = 0
|
|
347
|
+
model_names = []
|
|
348
|
+
|
|
349
|
+
printer = Printer()
|
|
350
|
+
estimated_input_tokens = count_tokens(json.dumps(conversations, ensure_ascii=False))
|
|
351
|
+
printer.print_in_terminal("estimated_input_tokens_in_generate", style="yellow",
|
|
352
|
+
estimated_input_tokens_in_generate=estimated_input_tokens,
|
|
353
|
+
generate_mode="strict_diff"
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
if not self.args.human_as_model:
|
|
357
|
+
with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
|
|
358
|
+
futures = []
|
|
359
|
+
count = 0
|
|
360
|
+
for llm in self.llms:
|
|
361
|
+
for _ in range(self.generate_times_same_model):
|
|
362
|
+
|
|
363
|
+
model_names_list = llm_utils.get_llm_names(llm)
|
|
364
|
+
model_name = None
|
|
365
|
+
if model_names_list:
|
|
366
|
+
model_name = model_names_list[0]
|
|
367
|
+
|
|
368
|
+
for _ in range(self.generate_times_same_model):
|
|
369
|
+
model_names.append(model_name)
|
|
370
|
+
if count == 0:
|
|
371
|
+
def job():
|
|
372
|
+
stream_generator = stream_chat_with_continue(
|
|
373
|
+
llm=llm,
|
|
374
|
+
conversations=conversations,
|
|
375
|
+
llm_config=llm_config,
|
|
376
|
+
args=self.args
|
|
377
|
+
)
|
|
378
|
+
full_response, last_meta = stream_out(
|
|
379
|
+
stream_generator,
|
|
380
|
+
model_name=model_name,
|
|
381
|
+
title=get_message_with_format(
|
|
382
|
+
"code_generate_title", model_name=model_name),
|
|
383
|
+
args=self.args,
|
|
384
|
+
extra_meta={
|
|
385
|
+
"stream_out_type": CodeGenerateStreamOutType.CODE_GENERATE.value
|
|
386
|
+
})
|
|
387
|
+
return ChatWithContinueResult(
|
|
388
|
+
content=full_response,
|
|
389
|
+
input_tokens_count=last_meta.input_tokens_count,
|
|
390
|
+
generated_tokens_count=last_meta.generated_tokens_count
|
|
391
|
+
)
|
|
392
|
+
futures.append(executor.submit(job))
|
|
393
|
+
else:
|
|
394
|
+
futures.append(executor.submit(
|
|
395
|
+
chat_with_continue,
|
|
396
|
+
llm=llm,
|
|
397
|
+
conversations=conversations,
|
|
398
|
+
llm_config=llm_config,
|
|
399
|
+
args=self.args
|
|
400
|
+
))
|
|
401
|
+
count += 1
|
|
402
|
+
|
|
403
|
+
temp_results = [future.result() for future in futures]
|
|
404
|
+
for result in temp_results:
|
|
405
|
+
results.append(result.content)
|
|
406
|
+
input_tokens_count += result.input_tokens_count
|
|
407
|
+
generated_tokens_count += result.generated_tokens_count
|
|
408
|
+
model_info = llm_utils.get_model_info(model_name, self.args.product_mode)
|
|
409
|
+
input_cost = model_info.get("input_price", 0) if model_info else 0
|
|
410
|
+
output_cost = model_info.get("output_price", 0) if model_info else 0
|
|
411
|
+
input_tokens_cost += input_cost * result.input_tokens_count / 1000000
|
|
412
|
+
generated_tokens_cost += output_cost * result.generated_tokens_count / 1000000
|
|
413
|
+
for result in results:
|
|
414
|
+
conversations_list.append(
|
|
415
|
+
conversations + [{"role": "assistant", "content": result}])
|
|
416
|
+
else:
|
|
417
|
+
for _ in range(self.args.human_model_num):
|
|
418
|
+
single_result = chat_with_continue(
|
|
419
|
+
llm=self.llms[0],
|
|
420
|
+
conversations=conversations,
|
|
421
|
+
llm_config=llm_config,
|
|
422
|
+
args=self.args
|
|
423
|
+
)
|
|
424
|
+
results.append(single_result.content)
|
|
425
|
+
input_tokens_count += single_result.input_tokens_count
|
|
426
|
+
generated_tokens_count += single_result.generated_tokens_count
|
|
427
|
+
conversations_list.append(conversations + [{"role": "assistant", "content": single_result.content}])
|
|
428
|
+
|
|
429
|
+
statistics = {
|
|
430
|
+
"input_tokens_count": input_tokens_count,
|
|
431
|
+
"generated_tokens_count": generated_tokens_count,
|
|
432
|
+
"input_tokens_cost": input_tokens_cost,
|
|
433
|
+
"generated_tokens_cost": generated_tokens_cost
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
|
|
437
|
+
|