auto-coder 0.1.329__py3-none-any.whl → 0.1.331__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (41) hide show
  1. {auto_coder-0.1.329.dist-info → auto_coder-0.1.331.dist-info}/METADATA +1 -1
  2. {auto_coder-0.1.329.dist-info → auto_coder-0.1.331.dist-info}/RECORD +41 -41
  3. autocoder/agent/project_reader.py +1 -14
  4. autocoder/auto_coder.py +1 -24
  5. autocoder/command_args.py +1 -6
  6. autocoder/commands/tools.py +0 -13
  7. autocoder/common/__init__.py +6 -3
  8. autocoder/common/auto_coder_lang.py +12 -0
  9. autocoder/common/code_auto_generate.py +6 -160
  10. autocoder/common/code_auto_generate_diff.py +5 -111
  11. autocoder/common/code_auto_generate_editblock.py +5 -95
  12. autocoder/common/code_auto_generate_strict_diff.py +6 -112
  13. autocoder/common/code_auto_merge_editblock.py +1 -45
  14. autocoder/common/command_templates.py +2 -9
  15. autocoder/common/stream_out_type.py +3 -0
  16. autocoder/common/types.py +2 -1
  17. autocoder/common/v2/code_auto_generate.py +6 -4
  18. autocoder/common/v2/code_auto_generate_diff.py +4 -3
  19. autocoder/common/v2/code_auto_generate_editblock.py +9 -4
  20. autocoder/common/v2/code_auto_generate_strict_diff.py +182 -14
  21. autocoder/common/v2/code_auto_merge_diff.py +560 -306
  22. autocoder/common/v2/code_auto_merge_editblock.py +11 -44
  23. autocoder/common/v2/code_auto_merge_strict_diff.py +76 -7
  24. autocoder/common/v2/code_editblock_manager.py +141 -6
  25. autocoder/dispacher/actions/action.py +15 -28
  26. autocoder/dispacher/actions/plugins/action_regex_project.py +5 -9
  27. autocoder/helper/project_creator.py +0 -1
  28. autocoder/index/entry.py +0 -43
  29. autocoder/index/filter/normal_filter.py +0 -16
  30. autocoder/lang.py +2 -4
  31. autocoder/linters/python_linter.py +2 -0
  32. autocoder/pyproject/__init__.py +2 -19
  33. autocoder/rag/cache/simple_cache.py +31 -6
  34. autocoder/regexproject/__init__.py +4 -22
  35. autocoder/suffixproject/__init__.py +6 -24
  36. autocoder/tsproject/__init__.py +5 -22
  37. autocoder/version.py +1 -1
  38. {auto_coder-0.1.329.dist-info → auto_coder-0.1.331.dist-info}/LICENSE +0 -0
  39. {auto_coder-0.1.329.dist-info → auto_coder-0.1.331.dist-info}/WHEEL +0 -0
  40. {auto_coder-0.1.329.dist-info → auto_coder-0.1.331.dist-info}/entry_points.txt +0 -0
  41. {auto_coder-0.1.329.dist-info → auto_coder-0.1.331.dist-info}/top_level.txt +0 -0
@@ -2,17 +2,39 @@ from typing import List, Dict, Tuple
2
2
  from autocoder.common.types import Mode, CodeGenerateResult
3
3
  from autocoder.common import AutoCoderArgs
4
4
  import byzerllm
5
+ from autocoder.utils.queue_communicate import queue_communicate, CommunicateEvent, CommunicateEventType
5
6
  from autocoder.common import sys_prompt
6
- from autocoder.common.v2.code_auto_generate import CodeAutoGenerate
7
+ from concurrent.futures import ThreadPoolExecutor
8
+ import json
9
+ from autocoder.common.utils_code_auto_generate import chat_with_continue,stream_chat_with_continue,ChatWithContinueResult
10
+ from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
11
+ from autocoder.common.stream_out_type import CodeGenerateStreamOutType
12
+ from autocoder.common.auto_coder_lang import get_message_with_format
13
+ from autocoder.common.printer import Printer
14
+ from autocoder.rag.token_counter import count_tokens
15
+ from autocoder.utils import llms as llm_utils
7
16
  from autocoder.common import SourceCodeList
8
-
9
- class CodeAutoGenerateStrictDiff(CodeAutoGenerate):
10
- """
11
- A class that handles code generation in strict diff format.
12
- """
17
+ from autocoder.privacy.model_filter import ModelPathFilter
18
+ from autocoder.memory.active_context_manager import ActiveContextManager
19
+ class CodeAutoGenerateStrictDiff:
20
+ def __init__(
21
+ self, llm: byzerllm.ByzerLLM, args: AutoCoderArgs, action=None
22
+ ) -> None:
23
+ self.llm = llm
24
+ self.args = args
25
+ self.action = action
26
+ self.llms = []
27
+ self.generate_times_same_model = args.generate_times_same_model
28
+ if not self.llm:
29
+ raise ValueError(
30
+ "Please provide a valid model instance to use for code generation."
31
+ )
32
+ self.llms = self.llm.get_sub_client("code_model") or [self.llm]
33
+ if not isinstance(self.llms, list):
34
+ self.llms = [self.llms]
13
35
 
14
36
  @byzerllm.prompt(llm=lambda self: self.llm)
15
- def single_round_instruction(
37
+ def multi_round_instruction(
16
38
  self, instruction: str, content: str, context: str = "", package_context: str = ""
17
39
  ) -> str:
18
40
  """
@@ -33,9 +55,6 @@ class CodeAutoGenerateStrictDiff(CodeAutoGenerate):
33
55
  Indentation matters in the diffs!
34
56
 
35
57
  To make a new file, show a diff from `--- /dev/null` to `+++ path/to/new/file.ext`.
36
- The code part of the diff content should not contains any line number.
37
-
38
- The path start with `---` or `+++` should be the absolute path of the file or relative path from the project root.
39
58
 
40
59
  下面我们来看一个例子:
41
60
 
@@ -125,6 +144,8 @@ class CodeAutoGenerateStrictDiff(CodeAutoGenerate):
125
144
  下面是用户的需求:
126
145
 
127
146
  {{ instruction }}
147
+
148
+ 每次生成一个文件的diff,然后询问我是否继续,当我回复继续,继续生成下一个文件的diff。当没有后续任务时,请回复 "__完成__" 或者 "__EOF__"。
128
149
  """
129
150
 
130
151
  if not self.args.include_project_structure:
@@ -141,7 +162,7 @@ class CodeAutoGenerateStrictDiff(CodeAutoGenerate):
141
162
  }
142
163
 
143
164
  @byzerllm.prompt(llm=lambda self: self.llm)
144
- def multi_round_instruction(
165
+ def single_round_instruction(
145
166
  self, instruction: str, content: str, context: str = "", package_context: str = ""
146
167
  ) -> str:
147
168
  """
@@ -162,6 +183,9 @@ class CodeAutoGenerateStrictDiff(CodeAutoGenerate):
162
183
  Indentation matters in the diffs!
163
184
 
164
185
  To make a new file, show a diff from `--- /dev/null` to `+++ path/to/new/file.ext`.
186
+ The code part of the diff content should not contains any line number.
187
+
188
+ The path start with `---` or `+++` should be the absolute path of the file or relative path from the project root.
165
189
 
166
190
  下面我们来看一个例子:
167
191
 
@@ -251,8 +275,6 @@ class CodeAutoGenerateStrictDiff(CodeAutoGenerate):
251
275
  下面是用户的需求:
252
276
 
253
277
  {{ instruction }}
254
-
255
- 每次生成一个文件的diff,然后询问我是否继续,当我回复继续,继续生成下一个文件的diff。当没有后续任务时,请回复 "__完成__" 或者 "__EOF__"。
256
278
  """
257
279
 
258
280
  if not self.args.include_project_structure:
@@ -266,4 +288,150 @@ class CodeAutoGenerateStrictDiff(CodeAutoGenerate):
266
288
  if self.action
267
289
  else ""
268
290
  )
269
- }
291
+ }
292
+
293
+ def single_round_run(
294
+ self, query: str, source_code_list: SourceCodeList
295
+ ) -> CodeGenerateResult:
296
+ llm_config = {"human_as_model": self.args.human_as_model}
297
+ source_content = source_code_list.to_str()
298
+
299
+ # 获取包上下文信息
300
+ package_context = ""
301
+
302
+ if self.args.enable_active_context:
303
+ # 初始化活动上下文管理器
304
+ active_context_manager = ActiveContextManager(self.llm, self.args.source_dir)
305
+ # 获取活动上下文信息
306
+ result = active_context_manager.load_active_contexts_for_files(
307
+ [source.module_name for source in source_code_list.sources]
308
+ )
309
+ # 将活动上下文信息格式化为文本
310
+ if result.contexts:
311
+ package_context_parts = []
312
+ for dir_path, context in result.contexts.items():
313
+ package_context_parts.append(f"<package_info>{context.content}</package_info>")
314
+
315
+ package_context = "\n".join(package_context_parts)
316
+
317
+ if self.args.template == "common":
318
+ init_prompt = self.single_round_instruction.prompt(
319
+ instruction=query, content=source_content, context=self.args.context,
320
+ package_context=package_context
321
+ )
322
+ elif self.args.template == "auto_implement":
323
+ init_prompt = self.auto_implement_function.prompt(
324
+ instruction=query, content=source_content
325
+ )
326
+
327
+ with open(self.args.target_file, "w",encoding="utf-8") as file:
328
+ file.write(init_prompt)
329
+
330
+ conversations = []
331
+
332
+ if self.args.system_prompt and self.args.system_prompt.strip() == "claude":
333
+ conversations.append(
334
+ {"role": "system", "content": sys_prompt.claude_sys_prompt.prompt()})
335
+ elif self.args.system_prompt:
336
+ conversations.append(
337
+ {"role": "system", "content": self.args.system_prompt})
338
+
339
+ conversations.append({"role": "user", "content": init_prompt})
340
+
341
+ conversations_list = []
342
+ results = []
343
+ input_tokens_count = 0
344
+ generated_tokens_count = 0
345
+ input_tokens_cost = 0
346
+ generated_tokens_cost = 0
347
+ model_names = []
348
+
349
+ printer = Printer()
350
+ estimated_input_tokens = count_tokens(json.dumps(conversations, ensure_ascii=False))
351
+ printer.print_in_terminal("estimated_input_tokens_in_generate", style="yellow",
352
+ estimated_input_tokens_in_generate=estimated_input_tokens,
353
+ generate_mode="strict_diff"
354
+ )
355
+
356
+ if not self.args.human_as_model:
357
+ with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
358
+ futures = []
359
+ count = 0
360
+ for llm in self.llms:
361
+ for _ in range(self.generate_times_same_model):
362
+
363
+ model_names_list = llm_utils.get_llm_names(llm)
364
+ model_name = None
365
+ if model_names_list:
366
+ model_name = model_names_list[0]
367
+
368
+ for _ in range(self.generate_times_same_model):
369
+ model_names.append(model_name)
370
+ if count == 0:
371
+ def job():
372
+ stream_generator = stream_chat_with_continue(
373
+ llm=llm,
374
+ conversations=conversations,
375
+ llm_config=llm_config,
376
+ args=self.args
377
+ )
378
+ full_response, last_meta = stream_out(
379
+ stream_generator,
380
+ model_name=model_name,
381
+ title=get_message_with_format(
382
+ "code_generate_title", model_name=model_name),
383
+ args=self.args,
384
+ extra_meta={
385
+ "stream_out_type": CodeGenerateStreamOutType.CODE_GENERATE.value
386
+ })
387
+ return ChatWithContinueResult(
388
+ content=full_response,
389
+ input_tokens_count=last_meta.input_tokens_count,
390
+ generated_tokens_count=last_meta.generated_tokens_count
391
+ )
392
+ futures.append(executor.submit(job))
393
+ else:
394
+ futures.append(executor.submit(
395
+ chat_with_continue,
396
+ llm=llm,
397
+ conversations=conversations,
398
+ llm_config=llm_config,
399
+ args=self.args
400
+ ))
401
+ count += 1
402
+
403
+ temp_results = [future.result() for future in futures]
404
+ for result in temp_results:
405
+ results.append(result.content)
406
+ input_tokens_count += result.input_tokens_count
407
+ generated_tokens_count += result.generated_tokens_count
408
+ model_info = llm_utils.get_model_info(model_name, self.args.product_mode)
409
+ input_cost = model_info.get("input_price", 0) if model_info else 0
410
+ output_cost = model_info.get("output_price", 0) if model_info else 0
411
+ input_tokens_cost += input_cost * result.input_tokens_count / 1000000
412
+ generated_tokens_cost += output_cost * result.generated_tokens_count / 1000000
413
+ for result in results:
414
+ conversations_list.append(
415
+ conversations + [{"role": "assistant", "content": result}])
416
+ else:
417
+ for _ in range(self.args.human_model_num):
418
+ single_result = chat_with_continue(
419
+ llm=self.llms[0],
420
+ conversations=conversations,
421
+ llm_config=llm_config,
422
+ args=self.args
423
+ )
424
+ results.append(single_result.content)
425
+ input_tokens_count += single_result.input_tokens_count
426
+ generated_tokens_count += single_result.generated_tokens_count
427
+ conversations_list.append(conversations + [{"role": "assistant", "content": single_result.content}])
428
+
429
+ statistics = {
430
+ "input_tokens_count": input_tokens_count,
431
+ "generated_tokens_count": generated_tokens_count,
432
+ "input_tokens_cost": input_tokens_cost,
433
+ "generated_tokens_cost": generated_tokens_cost
434
+ }
435
+
436
+ return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
437
+