auto-coder 0.1.329__py3-none-any.whl → 0.1.331__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (41) hide show
  1. {auto_coder-0.1.329.dist-info → auto_coder-0.1.331.dist-info}/METADATA +1 -1
  2. {auto_coder-0.1.329.dist-info → auto_coder-0.1.331.dist-info}/RECORD +41 -41
  3. autocoder/agent/project_reader.py +1 -14
  4. autocoder/auto_coder.py +1 -24
  5. autocoder/command_args.py +1 -6
  6. autocoder/commands/tools.py +0 -13
  7. autocoder/common/__init__.py +6 -3
  8. autocoder/common/auto_coder_lang.py +12 -0
  9. autocoder/common/code_auto_generate.py +6 -160
  10. autocoder/common/code_auto_generate_diff.py +5 -111
  11. autocoder/common/code_auto_generate_editblock.py +5 -95
  12. autocoder/common/code_auto_generate_strict_diff.py +6 -112
  13. autocoder/common/code_auto_merge_editblock.py +1 -45
  14. autocoder/common/command_templates.py +2 -9
  15. autocoder/common/stream_out_type.py +3 -0
  16. autocoder/common/types.py +2 -1
  17. autocoder/common/v2/code_auto_generate.py +6 -4
  18. autocoder/common/v2/code_auto_generate_diff.py +4 -3
  19. autocoder/common/v2/code_auto_generate_editblock.py +9 -4
  20. autocoder/common/v2/code_auto_generate_strict_diff.py +182 -14
  21. autocoder/common/v2/code_auto_merge_diff.py +560 -306
  22. autocoder/common/v2/code_auto_merge_editblock.py +11 -44
  23. autocoder/common/v2/code_auto_merge_strict_diff.py +76 -7
  24. autocoder/common/v2/code_editblock_manager.py +141 -6
  25. autocoder/dispacher/actions/action.py +15 -28
  26. autocoder/dispacher/actions/plugins/action_regex_project.py +5 -9
  27. autocoder/helper/project_creator.py +0 -1
  28. autocoder/index/entry.py +0 -43
  29. autocoder/index/filter/normal_filter.py +0 -16
  30. autocoder/lang.py +2 -4
  31. autocoder/linters/python_linter.py +2 -0
  32. autocoder/pyproject/__init__.py +2 -19
  33. autocoder/rag/cache/simple_cache.py +31 -6
  34. autocoder/regexproject/__init__.py +4 -22
  35. autocoder/suffixproject/__init__.py +6 -24
  36. autocoder/tsproject/__init__.py +5 -22
  37. autocoder/version.py +1 -1
  38. {auto_coder-0.1.329.dist-info → auto_coder-0.1.331.dist-info}/LICENSE +0 -0
  39. {auto_coder-0.1.329.dist-info → auto_coder-0.1.331.dist-info}/WHEEL +0 -0
  40. {auto_coder-0.1.329.dist-info → auto_coder-0.1.331.dist-info}/entry_points.txt +0 -0
  41. {auto_coder-0.1.329.dist-info → auto_coder-0.1.331.dist-info}/top_level.txt +0 -0
@@ -365,15 +365,6 @@ class CodeAutoGenerateDiff:
365
365
 
366
366
  conversations.append({"role": "user", "content": init_prompt})
367
367
 
368
- if self.args.request_id and not self.args.skip_events:
369
- _ = queue_communicate.send_event(
370
- request_id=self.args.request_id,
371
- event=CommunicateEvent(
372
- event_type=CommunicateEventType.CODE_GENERATE_START.value,
373
- data=json.dumps({}, ensure_ascii=False),
374
- ),
375
- )
376
-
377
368
  conversations_list = []
378
369
  results = []
379
370
  input_tokens_count = 0
@@ -392,6 +383,7 @@ class CodeAutoGenerateDiff:
392
383
  if not self.args.human_as_model:
393
384
  with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
394
385
  futures = []
386
+ count = 0
395
387
  for llm in self.llms:
396
388
 
397
389
  model_names_list = llm_utils.get_llm_names(llm)
@@ -399,9 +391,9 @@ class CodeAutoGenerateDiff:
399
391
  if model_names_list:
400
392
  model_name = model_names_list[0]
401
393
 
402
- for i in range(self.generate_times_same_model):
394
+ for _ in range(self.generate_times_same_model):
403
395
  model_names.append(model_name)
404
- if i==0:
396
+ if count == 0:
405
397
  def job():
406
398
  stream_generator = stream_chat_with_continue(
407
399
  llm=llm,
@@ -432,7 +424,7 @@ class CodeAutoGenerateDiff:
432
424
  llm_config=llm_config,
433
425
  args=self.args
434
426
  ))
435
-
427
+ count += 1
436
428
  temp_results = [future.result() for future in futures]
437
429
  for result in temp_results:
438
430
  results.append(result.content)
@@ -468,104 +460,6 @@ class CodeAutoGenerateDiff:
468
460
  "generated_tokens_cost": generated_tokens_cost
469
461
  }
470
462
 
471
- if self.args.request_id and not self.args.skip_events:
472
- _ = queue_communicate.send_event(
473
- request_id=self.args.request_id,
474
- event=CommunicateEvent(
475
- event_type=CommunicateEventType.CODE_GENERATE_END.value,
476
- data=json.dumps(statistics, ensure_ascii=False),
477
- ),
478
- )
479
463
 
480
464
  return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
481
-
482
- def multi_round_run(
483
- self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
484
- ) -> CodeGenerateResult:
485
-
486
- # Apply model filter for code_llm
487
- printer = Printer()
488
- for llm in self.llms:
489
- model_filter = ModelPathFilter.from_model_object(llm, self.args)
490
- filtered_sources = []
491
- for source in source_code_list.sources:
492
- if model_filter.is_accessible(source.module_name):
493
- filtered_sources.append(source)
494
- else:
495
- printer.print_in_terminal("index_file_filtered",
496
- style="yellow",
497
- file_path=source.path,
498
- model_name=",".join(llm_utils.get_llm_names(llm)))
499
-
500
- source_code_list = SourceCodeList(filtered_sources)
501
-
502
- llm_config = {"human_as_model": self.args.human_as_model}
503
- result = []
504
- source_content = source_code_list.to_str()
505
-
506
- # 获取包上下文信息
507
- package_context = ""
508
-
509
- if self.args.enable_active_context:
510
- # 初始化活动上下文管理器
511
- active_context_manager = ActiveContextManager(self.llm, self.args.source_dir)
512
- # 获取活动上下文信息
513
- result = active_context_manager.load_active_contexts_for_files(
514
- [source.module_name for source in source_code_list.sources]
515
- )
516
- # 将活动上下文信息格式化为文本
517
- if result.contexts:
518
- package_context_parts = []
519
- for dir_path, context in result.contexts.items():
520
- package_context_parts.append(f"<package_info>{context.content}</package_info>")
521
-
522
- package_context = "\n".join(package_context_parts)
523
-
524
- if self.args.template == "common":
525
- init_prompt = self.multi_round_instruction.prompt(
526
- instruction=query, content=source_content, context=self.args.context,
527
- package_context=package_context
528
- )
529
- elif self.args.template == "auto_implement":
530
- init_prompt = self.auto_implement_function.prompt(
531
- instruction=query, content=source_content
532
- )
533
-
534
- conversations = []
535
- # conversations.append({"role": "system", "content": sys_prompt.prompt()})
536
- conversations.append({"role": "user", "content": init_prompt})
537
-
538
- with open(self.args.target_file, "w",encoding="utf-8") as file:
539
- file.write(init_prompt)
540
-
541
- code_llm = self.llms[0]
542
- t = code_llm.chat_oai(conversations=conversations,
543
- llm_config=llm_config)
544
-
545
- result.append(t[0].output)
546
-
547
- conversations.append({"role": "assistant", "content": t[0].output})
548
-
549
- if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
550
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
551
-
552
- current_step = 0
553
-
554
- while current_step < max_steps:
555
-
556
- conversations.append({"role": "user", "content": "继续"})
557
-
558
- with open(self.args.target_file, "w",encoding="utf-8") as file:
559
- file.write("继续")
560
-
561
- t = code_llm.chat_oai(
562
- conversations=conversations, llm_config=llm_config)
563
-
564
- result.append(t[0].output)
565
- conversations.append({"role": "assistant", "content": t[0].output})
566
- current_step += 1
567
-
568
- if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
569
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
570
-
571
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
465
+
@@ -474,15 +474,6 @@ class CodeAutoGenerateEditBlock:
474
474
 
475
475
  conversations.append({"role": "user", "content": init_prompt})
476
476
 
477
- if self.args.request_id and not self.args.skip_events:
478
- _ = queue_communicate.send_event(
479
- request_id=self.args.request_id,
480
- event=CommunicateEvent(
481
- event_type=CommunicateEventType.CODE_GENERATE_START.value,
482
- data=json.dumps({}, ensure_ascii=False),
483
- ),
484
- )
485
-
486
477
  conversations_list = []
487
478
  results = []
488
479
  input_tokens_count = 0
@@ -505,6 +496,7 @@ class CodeAutoGenerateEditBlock:
505
496
  if not self.args.human_as_model:
506
497
  with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
507
498
  futures = []
499
+ count = 0
508
500
  for llm in self.llms:
509
501
 
510
502
  model_names_list = llm_utils.get_llm_names(llm)
@@ -512,9 +504,9 @@ class CodeAutoGenerateEditBlock:
512
504
  if model_names_list:
513
505
  model_name = model_names_list[0]
514
506
 
515
- for i in range(self.generate_times_same_model):
507
+ for _ in range(self.generate_times_same_model):
516
508
  model_names.append(model_name)
517
- if i==0:
509
+ if count == 0:
518
510
  def job():
519
511
  stream_generator = stream_chat_with_continue(
520
512
  llm=llm,
@@ -545,6 +537,7 @@ class CodeAutoGenerateEditBlock:
545
537
  llm_config=llm_config,
546
538
  args=self.args
547
539
  ))
540
+ count += 1
548
541
 
549
542
  temp_results = [future.result() for future in futures]
550
543
 
@@ -582,88 +575,5 @@ class CodeAutoGenerateEditBlock:
582
575
  "generated_tokens_cost": generated_tokens_cost
583
576
  }
584
577
 
585
- if self.args.request_id and not self.args.skip_events:
586
- _ = queue_communicate.send_event(
587
- request_id=self.args.request_id,
588
- event=CommunicateEvent(
589
- event_type=CommunicateEventType.CODE_GENERATE_END.value,
590
- data=json.dumps(statistics, ensure_ascii=False),
591
- ),
592
- )
593
-
594
578
  return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
595
-
596
- def multi_round_run(
597
- self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
598
- ) -> CodeGenerateResult:
599
- llm_config = {"human_as_model": self.args.human_as_model}
600
- result = []
601
- source_content = source_code_list.to_str()
602
-
603
- # 获取包上下文信息
604
- package_context = ""
605
-
606
- if self.args.enable_active_context:
607
- # 初始化活动上下文管理器
608
- active_context_manager = ActiveContextManager(self.llm, self.args.source_dir)
609
- # 获取活动上下文信息
610
- result = active_context_manager.load_active_contexts_for_files(
611
- [source.module_name for source in source_code_list.sources]
612
- )
613
- # 将活动上下文信息格式化为文本
614
- if result.contexts:
615
- package_context_parts = []
616
- for dir_path, context in result.contexts.items():
617
- package_context_parts.append(f"<package_info>{context.content}</package_info>")
618
-
619
- package_context = "\n".join(package_context_parts)
620
-
621
- if self.args.template == "common":
622
- init_prompt = self.multi_round_instruction.prompt(
623
- instruction=query, content=source_content, context=self.args.context,
624
- package_context=package_context
625
- )
626
- elif self.args.template == "auto_implement":
627
- init_prompt = self.auto_implement_function.prompt(
628
- instruction=query, content=source_content
629
- )
630
-
631
- conversations = []
632
- # conversations.append({"role": "system", "content": sys_prompt.prompt()})
633
- conversations.append({"role": "user", "content": init_prompt})
634
-
635
- with open(self.args.target_file, "w",encoding="utf-8") as file:
636
- file.write(init_prompt)
637
-
638
- code_llm = self.llms[0]
639
- t = code_llm.chat_oai(conversations=conversations,
640
- llm_config=llm_config)
641
-
642
- result.append(t[0].output)
643
-
644
- conversations.append({"role": "assistant", "content": t[0].output})
645
-
646
- if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
647
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
648
-
649
- current_step = 0
650
-
651
- while current_step < max_steps:
652
-
653
- conversations.append({"role": "user", "content": "继续"})
654
-
655
- with open(self.args.target_file, "w",encoding="utf-8") as file:
656
- file.write("继续")
657
-
658
- t = code_llm.chat_oai(
659
- conversations=conversations, llm_config=llm_config)
660
-
661
- result.append(t[0].output)
662
- conversations.append({"role": "assistant", "content": t[0].output})
663
- current_step += 1
664
-
665
- if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
666
-
667
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
668
-
669
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
579
+
@@ -337,15 +337,7 @@ class CodeAutoGenerateStrictDiff:
337
337
  {"role": "system", "content": self.args.system_prompt})
338
338
 
339
339
  conversations.append({"role": "user", "content": init_prompt})
340
-
341
- if self.args.request_id and not self.args.skip_events:
342
- _ = queue_communicate.send_event(
343
- request_id=self.args.request_id,
344
- event=CommunicateEvent(
345
- event_type=CommunicateEventType.CODE_GENERATE_START.value,
346
- data=json.dumps({}, ensure_ascii=False),
347
- ),
348
- )
340
+
349
341
 
350
342
  conversations_list = []
351
343
  results = []
@@ -365,6 +357,7 @@ class CodeAutoGenerateStrictDiff:
365
357
  if not self.args.human_as_model:
366
358
  with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
367
359
  futures = []
360
+ count = 0
368
361
  for llm in self.llms:
369
362
  for _ in range(self.generate_times_same_model):
370
363
 
@@ -373,9 +366,9 @@ class CodeAutoGenerateStrictDiff:
373
366
  if model_names_list:
374
367
  model_name = model_names_list[0]
375
368
 
376
- for i in range(self.generate_times_same_model):
369
+ for _ in range(self.generate_times_same_model):
377
370
  model_names.append(model_name)
378
- if i==0:
371
+ if count == 0:
379
372
  def job():
380
373
  stream_generator = stream_chat_with_continue(
381
374
  llm=llm,
@@ -406,7 +399,7 @@ class CodeAutoGenerateStrictDiff:
406
399
  llm_config=llm_config,
407
400
  args=self.args
408
401
  ))
409
-
402
+ count += 1
410
403
  temp_results = [future.result() for future in futures]
411
404
  for result in temp_results:
412
405
  results.append(result.content)
@@ -439,105 +432,6 @@ class CodeAutoGenerateStrictDiff:
439
432
  "input_tokens_cost": input_tokens_cost,
440
433
  "generated_tokens_cost": generated_tokens_cost
441
434
  }
442
-
443
- if self.args.request_id and not self.args.skip_events:
444
- _ = queue_communicate.send_event(
445
- request_id=self.args.request_id,
446
- event=CommunicateEvent(
447
- event_type=CommunicateEventType.CODE_GENERATE_END.value,
448
- data=json.dumps(statistics, ensure_ascii=False),
449
- ),
450
- )
451
435
 
452
436
  return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
453
-
454
- def multi_round_run(
455
- self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
456
- ) -> CodeGenerateResult:
457
-
458
- # Apply model filter for code_llm
459
- printer = Printer()
460
- for llm in self.llms:
461
- model_filter = ModelPathFilter.from_model_object(llm, self.args)
462
- filtered_sources = []
463
- for source in source_code_list.sources:
464
- if model_filter.is_accessible(source.module_name):
465
- filtered_sources.append(source)
466
- else:
467
- printer.print_in_terminal("index_file_filtered",
468
- style="yellow",
469
- file_path=source.module_name,
470
- model_name=",".join(llm_utils.get_llm_names(llm)))
471
-
472
- source_code_list = SourceCodeList(filtered_sources)
473
-
474
- llm_config = {"human_as_model": self.args.human_as_model}
475
- result = []
476
- source_content = source_code_list.to_str()
477
-
478
- # 获取包上下文信息
479
- package_context = ""
480
-
481
- if self.args.enable_active_context:
482
- # 初始化活动上下文管理器
483
- active_context_manager = ActiveContextManager(self.llm, self.args.source_dir)
484
- # 获取活动上下文信息
485
- result = active_context_manager.load_active_contexts_for_files(
486
- [source.module_name for source in source_code_list.sources]
487
- )
488
- # 将活动上下文信息格式化为文本
489
- if result.contexts:
490
- package_context_parts = []
491
- for dir_path, context in result.contexts.items():
492
- package_context_parts.append(f"<package_info>{context.content}</package_info>")
493
-
494
- package_context = "\n".join(package_context_parts)
495
-
496
- if self.args.template == "common":
497
- init_prompt = self.multi_round_instruction.prompt(
498
- instruction=query, content=source_content, context=self.args.context,
499
- package_context=package_context
500
- )
501
- elif self.args.template == "auto_implement":
502
- init_prompt = self.auto_implement_function.prompt(
503
- instruction=query, content=source_content
504
- )
505
-
506
- conversations = []
507
- # conversations.append({"role": "system", "content": sys_prompt.prompt()})
508
- conversations.append({"role": "user", "content": init_prompt})
509
-
510
- with open(self.args.target_file, "w",encoding="utf-8") as file:
511
- file.write(init_prompt)
512
-
513
- code_llm = self.llms[0]
514
- t = code_llm.chat_oai(conversations=conversations,
515
- llm_config=llm_config)
516
-
517
- result.append(t[0].output)
518
-
519
- conversations.append({"role": "assistant", "content": t[0].output})
520
-
521
- if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
522
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
523
-
524
- current_step = 0
525
-
526
- while current_step < max_steps:
527
-
528
- conversations.append({"role": "user", "content": "继续"})
529
-
530
- with open(self.args.target_file, "w",encoding="utf-8") as file:
531
- file.write("继续")
532
-
533
- t = code_llm.chat_oai(
534
- conversations=conversations, llm_config=llm_config)
535
-
536
- result.append(t[0].output)
537
- conversations.append({"role": "assistant", "content": t[0].output})
538
- current_step += 1
539
-
540
- if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
541
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
542
-
543
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
437
+
@@ -316,29 +316,7 @@ class CodeAutoMergeEditBlock:
316
316
  unmerged_blocks.append(
317
317
  (file_path, head, update, similarity))
318
318
 
319
- if unmerged_blocks:
320
- if self.args.request_id and not self.args.skip_events:
321
- # collect unmerged blocks
322
- event_data = []
323
- for file_path, head, update, similarity in unmerged_blocks:
324
- event_data.append(
325
- {
326
- "file_path": file_path,
327
- "head": head,
328
- "update": update,
329
- "similarity": similarity,
330
- }
331
- )
332
-
333
- _ = queue_communicate.send_event(
334
- request_id=self.args.request_id,
335
- event=CommunicateEvent(
336
- event_type=CommunicateEventType.CODE_UNMERGE_RESULT.value,
337
- data=json.dumps(event_data, ensure_ascii=False),
338
- ),
339
- )
340
- return
341
-
319
+ if unmerged_blocks:
342
320
  self.printer.print_in_terminal("unmerged_blocks_warning", num_blocks=len(unmerged_blocks))
343
321
  self._print_unmerged_blocks(unmerged_blocks)
344
322
  return
@@ -361,28 +339,6 @@ class CodeAutoMergeEditBlock:
361
339
  with open(file_path, "w") as f:
362
340
  f.write(new_content)
363
341
 
364
- if self.args.request_id and not self.args.skip_events:
365
- # collect modified files
366
- event_data = []
367
- for code in merged_blocks:
368
- file_path, head, update, similarity = code
369
- event_data.append(
370
- {
371
- "file_path": file_path,
372
- "head": head,
373
- "update": update,
374
- "similarity": similarity,
375
- }
376
- )
377
-
378
- _ = queue_communicate.send_event(
379
- request_id=self.args.request_id,
380
- event=CommunicateEvent(
381
- event_type=CommunicateEventType.CODE_MERGE_RESULT.value,
382
- data=json.dumps(event_data, ensure_ascii=False),
383
- ),
384
- )
385
-
386
342
  if changes_made:
387
343
  if not force_skip_git and not self.args.skip_commit:
388
344
  try:
@@ -98,13 +98,7 @@ def init_command_template(source_dir:str):
98
98
  ## The model will generate the code for you
99
99
  ## 模型将为您生成代码
100
100
  execute: true
101
-
102
- ## If you want to generate multiple files, you can enable this option to generate the code in multiple rounds
103
- ## to avoid exceeding the maximum token limit of the model
104
- ## 如果您想生成多个文件,可以启用此选项,以便在多个回合中生成代码
105
- ## 以避免超过模型的最大令牌限制
106
- enable_multi_round_generate: false
107
-
101
+
108
102
  ## AutoCoder will merge the generated code into your project
109
103
  ## AutoCoder将合并生成的代码到您的项目中
110
104
  auto_merge: true
@@ -175,8 +169,7 @@ def base_base(source_dir:str,project_type:str)->str:
175
169
  target_file: {{ target_file }}
176
170
 
177
171
  model: v3_chat
178
- model_max_input_length: 60000
179
- enable_multi_round_generate: false
172
+ model_max_input_length: 60000
180
173
  index_filter_workers: 100
181
174
  index_build_workers: 100
182
175
  index_filter_level: 1
@@ -16,6 +16,9 @@ class CodeRankStreamOutType(Enum):
16
16
  class LintStreamOutType(Enum):
17
17
  LINT = "lint"
18
18
 
19
+ class UnmergedBlocksStreamOutType(Enum):
20
+ UNMERGED_BLOCKS = "unmerged_blocks"
21
+
19
22
  class CompileStreamOutType(Enum):
20
23
  COMPILE = "compile"
21
24
 
autocoder/common/types.py CHANGED
@@ -16,4 +16,5 @@ class CodeGenerateResult(pydantic.BaseModel):
16
16
 
17
17
  class MergeCodeWithoutEffect(pydantic.BaseModel):
18
18
  success_blocks: List[Tuple[str, str]]
19
- failed_blocks: List[Any]
19
+ failed_blocks: List[Any]
20
+ merged_blocks: List[Any]
@@ -15,7 +15,7 @@ from autocoder.rag.token_counter import count_tokens
15
15
  from autocoder.utils import llms as llm_utils
16
16
  from autocoder.common import SourceCodeList
17
17
  from autocoder.memory.active_context_manager import ActiveContextManager
18
-
18
+ from loguru import logger
19
19
 
20
20
  class CodeAutoGenerate:
21
21
  def __init__(
@@ -119,6 +119,7 @@ class CodeAutoGenerate:
119
119
  if not self.args.human_as_model:
120
120
  with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
121
121
  futures = []
122
+ count = 0
122
123
  for llm in self.llms:
123
124
 
124
125
  model_names_list = llm_utils.get_llm_names(llm)
@@ -126,9 +127,9 @@ class CodeAutoGenerate:
126
127
  if model_names_list:
127
128
  model_name = model_names_list[0]
128
129
 
129
- for i in range(self.generate_times_same_model):
130
- model_names.append(model_name)
131
- if i==0:
130
+ for _ in range(self.generate_times_same_model):
131
+ model_names.append(model_name)
132
+ if count == 0:
132
133
  def job():
133
134
  stream_generator = stream_chat_with_continue(
134
135
  llm=llm,
@@ -159,6 +160,7 @@ class CodeAutoGenerate:
159
160
  llm_config=llm_config,
160
161
  args=self.args
161
162
  ))
163
+ count += 1
162
164
 
163
165
  temp_results = [future.result() for future in futures]
164
166
 
@@ -281,6 +281,7 @@ class CodeAutoGenerateDiff:
281
281
  if not self.args.human_as_model:
282
282
  with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
283
283
  futures = []
284
+ count = 0
284
285
  for llm in self.llms:
285
286
 
286
287
  model_names_list = llm_utils.get_llm_names(llm)
@@ -288,9 +289,9 @@ class CodeAutoGenerateDiff:
288
289
  if model_names_list:
289
290
  model_name = model_names_list[0]
290
291
 
291
- for i in range(self.generate_times_same_model):
292
+ for _ in range(self.generate_times_same_model):
292
293
  model_names.append(model_name)
293
- if i==0:
294
+ if count == 0:
294
295
  def job():
295
296
  stream_generator = stream_chat_with_continue(
296
297
  llm=llm,
@@ -321,7 +322,7 @@ class CodeAutoGenerateDiff:
321
322
  llm_config=llm_config,
322
323
  args=self.args
323
324
  ))
324
-
325
+ count += 1
325
326
  temp_results = [future.result() for future in futures]
326
327
 
327
328
  for result,model_name in zip(temp_results,model_names):
@@ -15,6 +15,7 @@ from autocoder.rag.token_counter import count_tokens
15
15
  from autocoder.utils import llms as llm_utils
16
16
  from autocoder.common import SourceCodeList
17
17
  from autocoder.memory.active_context_manager import ActiveContextManager
18
+ from loguru import logger
18
19
 
19
20
 
20
21
 
@@ -298,6 +299,7 @@ class CodeAutoGenerateEditBlock:
298
299
  if not self.args.human_as_model:
299
300
  with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
300
301
  futures = []
302
+ count = 0
301
303
  for llm in self.llms:
302
304
 
303
305
  model_names_list = llm_utils.get_llm_names(llm)
@@ -305,9 +307,10 @@ class CodeAutoGenerateEditBlock:
305
307
  if model_names_list:
306
308
  model_name = model_names_list[0]
307
309
 
308
- for i in range(self.generate_times_same_model):
309
- model_names.append(model_name)
310
- if i==0:
310
+ for _ in range(self.generate_times_same_model):
311
+ model_names.append(model_name)
312
+ if count==0:
313
+ logger.info(f"code generation with model(Stream): {model_name}")
311
314
  def job():
312
315
  stream_generator = stream_chat_with_continue(
313
316
  llm=llm,
@@ -330,7 +333,8 @@ class CodeAutoGenerateEditBlock:
330
333
  generated_tokens_count=last_meta.generated_tokens_count
331
334
  )
332
335
  futures.append(executor.submit(job))
333
- else:
336
+ else:
337
+ logger.info(f"code generation with model(Non-stream): {model_name}")
334
338
  futures.append(executor.submit(
335
339
  chat_with_continue,
336
340
  llm=llm,
@@ -338,6 +342,7 @@ class CodeAutoGenerateEditBlock:
338
342
  llm_config=llm_config,
339
343
  args=self.args
340
344
  ))
345
+ count += 1
341
346
 
342
347
  temp_results = [future.result() for future in futures]
343
348