auto-coder 0.1.330__py3-none-any.whl → 0.1.332__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (47) hide show
  1. {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/METADATA +1 -1
  2. {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/RECORD +47 -45
  3. autocoder/agent/agentic_filter.py +928 -0
  4. autocoder/agent/project_reader.py +1 -14
  5. autocoder/auto_coder.py +6 -47
  6. autocoder/auto_coder_runner.py +2 -0
  7. autocoder/command_args.py +1 -6
  8. autocoder/commands/auto_command.py +1 -1
  9. autocoder/commands/tools.py +68 -16
  10. autocoder/common/__init__.py +8 -3
  11. autocoder/common/auto_coder_lang.py +21 -1
  12. autocoder/common/code_auto_generate.py +6 -160
  13. autocoder/common/code_auto_generate_diff.py +5 -111
  14. autocoder/common/code_auto_generate_editblock.py +5 -95
  15. autocoder/common/code_auto_generate_strict_diff.py +6 -112
  16. autocoder/common/code_auto_merge_editblock.py +1 -45
  17. autocoder/common/code_modification_ranker.py +6 -2
  18. autocoder/common/command_templates.py +2 -9
  19. autocoder/common/conf_utils.py +36 -0
  20. autocoder/common/stream_out_type.py +7 -2
  21. autocoder/common/types.py +3 -2
  22. autocoder/common/v2/code_auto_generate.py +6 -4
  23. autocoder/common/v2/code_auto_generate_diff.py +4 -3
  24. autocoder/common/v2/code_auto_generate_editblock.py +9 -4
  25. autocoder/common/v2/code_auto_generate_strict_diff.py +182 -14
  26. autocoder/common/v2/code_auto_merge_diff.py +560 -306
  27. autocoder/common/v2/code_auto_merge_editblock.py +12 -45
  28. autocoder/common/v2/code_auto_merge_strict_diff.py +76 -7
  29. autocoder/common/v2/code_diff_manager.py +73 -6
  30. autocoder/common/v2/code_editblock_manager.py +534 -82
  31. autocoder/dispacher/actions/action.py +15 -28
  32. autocoder/dispacher/actions/plugins/action_regex_project.py +5 -9
  33. autocoder/helper/project_creator.py +0 -1
  34. autocoder/index/entry.py +35 -53
  35. autocoder/index/filter/normal_filter.py +0 -16
  36. autocoder/lang.py +2 -4
  37. autocoder/linters/shadow_linter.py +4 -0
  38. autocoder/pyproject/__init__.py +2 -19
  39. autocoder/rag/cache/simple_cache.py +31 -6
  40. autocoder/regexproject/__init__.py +4 -22
  41. autocoder/suffixproject/__init__.py +6 -24
  42. autocoder/tsproject/__init__.py +5 -22
  43. autocoder/version.py +1 -1
  44. {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/LICENSE +0 -0
  45. {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/WHEEL +0 -0
  46. {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/entry_points.txt +0 -0
  47. {auto_coder-0.1.330.dist-info → auto_coder-0.1.332.dist-info}/top_level.txt +0 -0
@@ -365,15 +365,6 @@ class CodeAutoGenerateDiff:
365
365
 
366
366
  conversations.append({"role": "user", "content": init_prompt})
367
367
 
368
- if self.args.request_id and not self.args.skip_events:
369
- _ = queue_communicate.send_event(
370
- request_id=self.args.request_id,
371
- event=CommunicateEvent(
372
- event_type=CommunicateEventType.CODE_GENERATE_START.value,
373
- data=json.dumps({}, ensure_ascii=False),
374
- ),
375
- )
376
-
377
368
  conversations_list = []
378
369
  results = []
379
370
  input_tokens_count = 0
@@ -392,6 +383,7 @@ class CodeAutoGenerateDiff:
392
383
  if not self.args.human_as_model:
393
384
  with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
394
385
  futures = []
386
+ count = 0
395
387
  for llm in self.llms:
396
388
 
397
389
  model_names_list = llm_utils.get_llm_names(llm)
@@ -399,9 +391,9 @@ class CodeAutoGenerateDiff:
399
391
  if model_names_list:
400
392
  model_name = model_names_list[0]
401
393
 
402
- for i in range(self.generate_times_same_model):
394
+ for _ in range(self.generate_times_same_model):
403
395
  model_names.append(model_name)
404
- if i==0:
396
+ if count == 0:
405
397
  def job():
406
398
  stream_generator = stream_chat_with_continue(
407
399
  llm=llm,
@@ -432,7 +424,7 @@ class CodeAutoGenerateDiff:
432
424
  llm_config=llm_config,
433
425
  args=self.args
434
426
  ))
435
-
427
+ count += 1
436
428
  temp_results = [future.result() for future in futures]
437
429
  for result in temp_results:
438
430
  results.append(result.content)
@@ -468,104 +460,6 @@ class CodeAutoGenerateDiff:
468
460
  "generated_tokens_cost": generated_tokens_cost
469
461
  }
470
462
 
471
- if self.args.request_id and not self.args.skip_events:
472
- _ = queue_communicate.send_event(
473
- request_id=self.args.request_id,
474
- event=CommunicateEvent(
475
- event_type=CommunicateEventType.CODE_GENERATE_END.value,
476
- data=json.dumps(statistics, ensure_ascii=False),
477
- ),
478
- )
479
463
 
480
464
  return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
481
-
482
- def multi_round_run(
483
- self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
484
- ) -> CodeGenerateResult:
485
-
486
- # Apply model filter for code_llm
487
- printer = Printer()
488
- for llm in self.llms:
489
- model_filter = ModelPathFilter.from_model_object(llm, self.args)
490
- filtered_sources = []
491
- for source in source_code_list.sources:
492
- if model_filter.is_accessible(source.module_name):
493
- filtered_sources.append(source)
494
- else:
495
- printer.print_in_terminal("index_file_filtered",
496
- style="yellow",
497
- file_path=source.path,
498
- model_name=",".join(llm_utils.get_llm_names(llm)))
499
-
500
- source_code_list = SourceCodeList(filtered_sources)
501
-
502
- llm_config = {"human_as_model": self.args.human_as_model}
503
- result = []
504
- source_content = source_code_list.to_str()
505
-
506
- # 获取包上下文信息
507
- package_context = ""
508
-
509
- if self.args.enable_active_context:
510
- # 初始化活动上下文管理器
511
- active_context_manager = ActiveContextManager(self.llm, self.args.source_dir)
512
- # 获取活动上下文信息
513
- result = active_context_manager.load_active_contexts_for_files(
514
- [source.module_name for source in source_code_list.sources]
515
- )
516
- # 将活动上下文信息格式化为文本
517
- if result.contexts:
518
- package_context_parts = []
519
- for dir_path, context in result.contexts.items():
520
- package_context_parts.append(f"<package_info>{context.content}</package_info>")
521
-
522
- package_context = "\n".join(package_context_parts)
523
-
524
- if self.args.template == "common":
525
- init_prompt = self.multi_round_instruction.prompt(
526
- instruction=query, content=source_content, context=self.args.context,
527
- package_context=package_context
528
- )
529
- elif self.args.template == "auto_implement":
530
- init_prompt = self.auto_implement_function.prompt(
531
- instruction=query, content=source_content
532
- )
533
-
534
- conversations = []
535
- # conversations.append({"role": "system", "content": sys_prompt.prompt()})
536
- conversations.append({"role": "user", "content": init_prompt})
537
-
538
- with open(self.args.target_file, "w",encoding="utf-8") as file:
539
- file.write(init_prompt)
540
-
541
- code_llm = self.llms[0]
542
- t = code_llm.chat_oai(conversations=conversations,
543
- llm_config=llm_config)
544
-
545
- result.append(t[0].output)
546
-
547
- conversations.append({"role": "assistant", "content": t[0].output})
548
-
549
- if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
550
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
551
-
552
- current_step = 0
553
-
554
- while current_step < max_steps:
555
-
556
- conversations.append({"role": "user", "content": "继续"})
557
-
558
- with open(self.args.target_file, "w",encoding="utf-8") as file:
559
- file.write("继续")
560
-
561
- t = code_llm.chat_oai(
562
- conversations=conversations, llm_config=llm_config)
563
-
564
- result.append(t[0].output)
565
- conversations.append({"role": "assistant", "content": t[0].output})
566
- current_step += 1
567
-
568
- if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
569
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
570
-
571
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
465
+
@@ -474,15 +474,6 @@ class CodeAutoGenerateEditBlock:
474
474
 
475
475
  conversations.append({"role": "user", "content": init_prompt})
476
476
 
477
- if self.args.request_id and not self.args.skip_events:
478
- _ = queue_communicate.send_event(
479
- request_id=self.args.request_id,
480
- event=CommunicateEvent(
481
- event_type=CommunicateEventType.CODE_GENERATE_START.value,
482
- data=json.dumps({}, ensure_ascii=False),
483
- ),
484
- )
485
-
486
477
  conversations_list = []
487
478
  results = []
488
479
  input_tokens_count = 0
@@ -505,6 +496,7 @@ class CodeAutoGenerateEditBlock:
505
496
  if not self.args.human_as_model:
506
497
  with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
507
498
  futures = []
499
+ count = 0
508
500
  for llm in self.llms:
509
501
 
510
502
  model_names_list = llm_utils.get_llm_names(llm)
@@ -512,9 +504,9 @@ class CodeAutoGenerateEditBlock:
512
504
  if model_names_list:
513
505
  model_name = model_names_list[0]
514
506
 
515
- for i in range(self.generate_times_same_model):
507
+ for _ in range(self.generate_times_same_model):
516
508
  model_names.append(model_name)
517
- if i==0:
509
+ if count == 0:
518
510
  def job():
519
511
  stream_generator = stream_chat_with_continue(
520
512
  llm=llm,
@@ -545,6 +537,7 @@ class CodeAutoGenerateEditBlock:
545
537
  llm_config=llm_config,
546
538
  args=self.args
547
539
  ))
540
+ count += 1
548
541
 
549
542
  temp_results = [future.result() for future in futures]
550
543
 
@@ -582,88 +575,5 @@ class CodeAutoGenerateEditBlock:
582
575
  "generated_tokens_cost": generated_tokens_cost
583
576
  }
584
577
 
585
- if self.args.request_id and not self.args.skip_events:
586
- _ = queue_communicate.send_event(
587
- request_id=self.args.request_id,
588
- event=CommunicateEvent(
589
- event_type=CommunicateEventType.CODE_GENERATE_END.value,
590
- data=json.dumps(statistics, ensure_ascii=False),
591
- ),
592
- )
593
-
594
578
  return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
595
-
596
- def multi_round_run(
597
- self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
598
- ) -> CodeGenerateResult:
599
- llm_config = {"human_as_model": self.args.human_as_model}
600
- result = []
601
- source_content = source_code_list.to_str()
602
-
603
- # 获取包上下文信息
604
- package_context = ""
605
-
606
- if self.args.enable_active_context:
607
- # 初始化活动上下文管理器
608
- active_context_manager = ActiveContextManager(self.llm, self.args.source_dir)
609
- # 获取活动上下文信息
610
- result = active_context_manager.load_active_contexts_for_files(
611
- [source.module_name for source in source_code_list.sources]
612
- )
613
- # 将活动上下文信息格式化为文本
614
- if result.contexts:
615
- package_context_parts = []
616
- for dir_path, context in result.contexts.items():
617
- package_context_parts.append(f"<package_info>{context.content}</package_info>")
618
-
619
- package_context = "\n".join(package_context_parts)
620
-
621
- if self.args.template == "common":
622
- init_prompt = self.multi_round_instruction.prompt(
623
- instruction=query, content=source_content, context=self.args.context,
624
- package_context=package_context
625
- )
626
- elif self.args.template == "auto_implement":
627
- init_prompt = self.auto_implement_function.prompt(
628
- instruction=query, content=source_content
629
- )
630
-
631
- conversations = []
632
- # conversations.append({"role": "system", "content": sys_prompt.prompt()})
633
- conversations.append({"role": "user", "content": init_prompt})
634
-
635
- with open(self.args.target_file, "w",encoding="utf-8") as file:
636
- file.write(init_prompt)
637
-
638
- code_llm = self.llms[0]
639
- t = code_llm.chat_oai(conversations=conversations,
640
- llm_config=llm_config)
641
-
642
- result.append(t[0].output)
643
-
644
- conversations.append({"role": "assistant", "content": t[0].output})
645
-
646
- if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
647
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
648
-
649
- current_step = 0
650
-
651
- while current_step < max_steps:
652
-
653
- conversations.append({"role": "user", "content": "继续"})
654
-
655
- with open(self.args.target_file, "w",encoding="utf-8") as file:
656
- file.write("继续")
657
-
658
- t = code_llm.chat_oai(
659
- conversations=conversations, llm_config=llm_config)
660
-
661
- result.append(t[0].output)
662
- conversations.append({"role": "assistant", "content": t[0].output})
663
- current_step += 1
664
-
665
- if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
666
-
667
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
668
-
669
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
579
+
@@ -337,15 +337,7 @@ class CodeAutoGenerateStrictDiff:
337
337
  {"role": "system", "content": self.args.system_prompt})
338
338
 
339
339
  conversations.append({"role": "user", "content": init_prompt})
340
-
341
- if self.args.request_id and not self.args.skip_events:
342
- _ = queue_communicate.send_event(
343
- request_id=self.args.request_id,
344
- event=CommunicateEvent(
345
- event_type=CommunicateEventType.CODE_GENERATE_START.value,
346
- data=json.dumps({}, ensure_ascii=False),
347
- ),
348
- )
340
+
349
341
 
350
342
  conversations_list = []
351
343
  results = []
@@ -365,6 +357,7 @@ class CodeAutoGenerateStrictDiff:
365
357
  if not self.args.human_as_model:
366
358
  with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
367
359
  futures = []
360
+ count = 0
368
361
  for llm in self.llms:
369
362
  for _ in range(self.generate_times_same_model):
370
363
 
@@ -373,9 +366,9 @@ class CodeAutoGenerateStrictDiff:
373
366
  if model_names_list:
374
367
  model_name = model_names_list[0]
375
368
 
376
- for i in range(self.generate_times_same_model):
369
+ for _ in range(self.generate_times_same_model):
377
370
  model_names.append(model_name)
378
- if i==0:
371
+ if count == 0:
379
372
  def job():
380
373
  stream_generator = stream_chat_with_continue(
381
374
  llm=llm,
@@ -406,7 +399,7 @@ class CodeAutoGenerateStrictDiff:
406
399
  llm_config=llm_config,
407
400
  args=self.args
408
401
  ))
409
-
402
+ count += 1
410
403
  temp_results = [future.result() for future in futures]
411
404
  for result in temp_results:
412
405
  results.append(result.content)
@@ -439,105 +432,6 @@ class CodeAutoGenerateStrictDiff:
439
432
  "input_tokens_cost": input_tokens_cost,
440
433
  "generated_tokens_cost": generated_tokens_cost
441
434
  }
442
-
443
- if self.args.request_id and not self.args.skip_events:
444
- _ = queue_communicate.send_event(
445
- request_id=self.args.request_id,
446
- event=CommunicateEvent(
447
- event_type=CommunicateEventType.CODE_GENERATE_END.value,
448
- data=json.dumps(statistics, ensure_ascii=False),
449
- ),
450
- )
451
435
 
452
436
  return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
453
-
454
- def multi_round_run(
455
- self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
456
- ) -> CodeGenerateResult:
457
-
458
- # Apply model filter for code_llm
459
- printer = Printer()
460
- for llm in self.llms:
461
- model_filter = ModelPathFilter.from_model_object(llm, self.args)
462
- filtered_sources = []
463
- for source in source_code_list.sources:
464
- if model_filter.is_accessible(source.module_name):
465
- filtered_sources.append(source)
466
- else:
467
- printer.print_in_terminal("index_file_filtered",
468
- style="yellow",
469
- file_path=source.module_name,
470
- model_name=",".join(llm_utils.get_llm_names(llm)))
471
-
472
- source_code_list = SourceCodeList(filtered_sources)
473
-
474
- llm_config = {"human_as_model": self.args.human_as_model}
475
- result = []
476
- source_content = source_code_list.to_str()
477
-
478
- # 获取包上下文信息
479
- package_context = ""
480
-
481
- if self.args.enable_active_context:
482
- # 初始化活动上下文管理器
483
- active_context_manager = ActiveContextManager(self.llm, self.args.source_dir)
484
- # 获取活动上下文信息
485
- result = active_context_manager.load_active_contexts_for_files(
486
- [source.module_name for source in source_code_list.sources]
487
- )
488
- # 将活动上下文信息格式化为文本
489
- if result.contexts:
490
- package_context_parts = []
491
- for dir_path, context in result.contexts.items():
492
- package_context_parts.append(f"<package_info>{context.content}</package_info>")
493
-
494
- package_context = "\n".join(package_context_parts)
495
-
496
- if self.args.template == "common":
497
- init_prompt = self.multi_round_instruction.prompt(
498
- instruction=query, content=source_content, context=self.args.context,
499
- package_context=package_context
500
- )
501
- elif self.args.template == "auto_implement":
502
- init_prompt = self.auto_implement_function.prompt(
503
- instruction=query, content=source_content
504
- )
505
-
506
- conversations = []
507
- # conversations.append({"role": "system", "content": sys_prompt.prompt()})
508
- conversations.append({"role": "user", "content": init_prompt})
509
-
510
- with open(self.args.target_file, "w",encoding="utf-8") as file:
511
- file.write(init_prompt)
512
-
513
- code_llm = self.llms[0]
514
- t = code_llm.chat_oai(conversations=conversations,
515
- llm_config=llm_config)
516
-
517
- result.append(t[0].output)
518
-
519
- conversations.append({"role": "assistant", "content": t[0].output})
520
-
521
- if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
522
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
523
-
524
- current_step = 0
525
-
526
- while current_step < max_steps:
527
-
528
- conversations.append({"role": "user", "content": "继续"})
529
-
530
- with open(self.args.target_file, "w",encoding="utf-8") as file:
531
- file.write("继续")
532
-
533
- t = code_llm.chat_oai(
534
- conversations=conversations, llm_config=llm_config)
535
-
536
- result.append(t[0].output)
537
- conversations.append({"role": "assistant", "content": t[0].output})
538
- current_step += 1
539
-
540
- if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
541
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
542
-
543
- return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
437
+
@@ -316,29 +316,7 @@ class CodeAutoMergeEditBlock:
316
316
  unmerged_blocks.append(
317
317
  (file_path, head, update, similarity))
318
318
 
319
- if unmerged_blocks:
320
- if self.args.request_id and not self.args.skip_events:
321
- # collect unmerged blocks
322
- event_data = []
323
- for file_path, head, update, similarity in unmerged_blocks:
324
- event_data.append(
325
- {
326
- "file_path": file_path,
327
- "head": head,
328
- "update": update,
329
- "similarity": similarity,
330
- }
331
- )
332
-
333
- _ = queue_communicate.send_event(
334
- request_id=self.args.request_id,
335
- event=CommunicateEvent(
336
- event_type=CommunicateEventType.CODE_UNMERGE_RESULT.value,
337
- data=json.dumps(event_data, ensure_ascii=False),
338
- ),
339
- )
340
- return
341
-
319
+ if unmerged_blocks:
342
320
  self.printer.print_in_terminal("unmerged_blocks_warning", num_blocks=len(unmerged_blocks))
343
321
  self._print_unmerged_blocks(unmerged_blocks)
344
322
  return
@@ -361,28 +339,6 @@ class CodeAutoMergeEditBlock:
361
339
  with open(file_path, "w") as f:
362
340
  f.write(new_content)
363
341
 
364
- if self.args.request_id and not self.args.skip_events:
365
- # collect modified files
366
- event_data = []
367
- for code in merged_blocks:
368
- file_path, head, update, similarity = code
369
- event_data.append(
370
- {
371
- "file_path": file_path,
372
- "head": head,
373
- "update": update,
374
- "similarity": similarity,
375
- }
376
- )
377
-
378
- _ = queue_communicate.send_event(
379
- request_id=self.args.request_id,
380
- event=CommunicateEvent(
381
- event_type=CommunicateEventType.CODE_MERGE_RESULT.value,
382
- data=json.dumps(event_data, ensure_ascii=False),
383
- ),
384
- )
385
-
386
342
  if changes_made:
387
343
  if not force_skip_git and not self.args.skip_commit:
388
344
  try:
@@ -148,17 +148,20 @@ class CodeModificationRanker:
148
148
  input_tokens_count = 0
149
149
  generated_tokens_count = 0
150
150
  try:
151
+ import traceback
152
+ traceback.print_stack()
151
153
  # Create a thread pool with (number of models * generate_times) workers
152
154
  with ThreadPoolExecutor(max_workers=total_tasks) as executor:
153
155
  # Submit tasks for each model and generate_times
154
156
  futures = []
157
+ count = 0
155
158
  for llm in self.llms:
156
159
  model_name = ",".join(get_llm_names(llm))
157
160
  self.printer.print_in_terminal(
158
161
  "ranking_start", style="blue", count=len(generate_result.contents), model_name=model_name)
159
162
 
160
- for i in range(rank_times):
161
- if i == 0:
163
+ for _ in range(rank_times):
164
+ if count == 0:
162
165
  futures.append(
163
166
  executor.submit(
164
167
  stream_chat_with_continue,
@@ -178,6 +181,7 @@ class CodeModificationRanker:
178
181
  self.args
179
182
  )
180
183
  )
184
+ count += 1
181
185
 
182
186
  # Collect all results
183
187
  results = []
@@ -98,13 +98,7 @@ def init_command_template(source_dir:str):
98
98
  ## The model will generate the code for you
99
99
  ## 模型将为您生成代码
100
100
  execute: true
101
-
102
- ## If you want to generate multiple files, you can enable this option to generate the code in multiple rounds
103
- ## to avoid exceeding the maximum token limit of the model
104
- ## 如果您想生成多个文件,可以启用此选项,以便在多个回合中生成代码
105
- ## 以避免超过模型的最大令牌限制
106
- enable_multi_round_generate: false
107
-
101
+
108
102
  ## AutoCoder will merge the generated code into your project
109
103
  ## AutoCoder将合并生成的代码到您的项目中
110
104
  auto_merge: true
@@ -175,8 +169,7 @@ def base_base(source_dir:str,project_type:str)->str:
175
169
  target_file: {{ target_file }}
176
170
 
177
171
  model: v3_chat
178
- model_max_input_length: 60000
179
- enable_multi_round_generate: false
172
+ model_max_input_length: 60000
180
173
  index_filter_workers: 100
181
174
  index_build_workers: 100
182
175
  index_filter_level: 1
@@ -0,0 +1,36 @@
1
+ import os
2
+ import json
3
+ import pkg_resources
4
+ from autocoder.common import AutoCoderArgs
5
+
6
+ ## 用于auto-coder 内部使用
7
+
8
+ def load_tokenizer():
9
+ from autocoder.rag.variable_holder import VariableHolder
10
+ from tokenizers import Tokenizer
11
+ try:
12
+ tokenizer_path = pkg_resources.resource_filename(
13
+ "autocoder", "data/tokenizer.json"
14
+ )
15
+ VariableHolder.TOKENIZER_PATH = tokenizer_path
16
+ VariableHolder.TOKENIZER_MODEL = Tokenizer.from_file(tokenizer_path)
17
+ except FileNotFoundError:
18
+ tokenizer_path = None
19
+
20
+
21
+ def save_memory(args: AutoCoderArgs,memory):
22
+ with open(os.path.join(args.source_dir, ".auto-coder", "plugins", "chat-auto-coder", "memory.json"), "w",encoding="utf-8") as f:
23
+ json.dump(memory, f, indent=2, ensure_ascii=False)
24
+
25
+ def load_memory(args: AutoCoderArgs):
26
+ memory_path = os.path.join(args.source_dir, ".auto-coder", "plugins", "chat-auto-coder", "memory.json")
27
+ if os.path.exists(memory_path):
28
+ with open(memory_path, "r", encoding="utf-8") as f:
29
+ _memory = json.load(f)
30
+ memory = _memory
31
+ else:
32
+ memory = {}
33
+ return memory
34
+
35
+ def get_memory(args: AutoCoderArgs):
36
+ return load_memory(args)
@@ -1,11 +1,13 @@
1
1
  from enum import Enum
2
2
 
3
3
  class AutoCommandStreamOutType(Enum):
4
- COMMAND_SUGGESTION = "command_suggestion"
5
-
4
+ COMMAND_SUGGESTION = "command_suggestion"
6
5
  class IndexFilterStreamOutType(Enum):
7
6
  FILE_NUMBER_LIST = "file_number_list"
8
7
 
8
+ class AgenticFilterStreamOutType(Enum):
9
+ AGENTIC_FILTER = "agentic_filter"
10
+
9
11
 
10
12
  class CodeGenerateStreamOutType(Enum):
11
13
  CODE_GENERATE = "code_generate"
@@ -16,6 +18,9 @@ class CodeRankStreamOutType(Enum):
16
18
  class LintStreamOutType(Enum):
17
19
  LINT = "lint"
18
20
 
21
+ class UnmergedBlocksStreamOutType(Enum):
22
+ UNMERGED_BLOCKS = "unmerged_blocks"
23
+
19
24
  class CompileStreamOutType(Enum):
20
25
  COMPILE = "compile"
21
26
 
autocoder/common/types.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from enum import Enum
2
2
  import pydantic
3
- from typing import List, Dict, Tuple,Any
3
+ from typing import List, Dict, Tuple,Any,Optional
4
4
  class Mode(Enum):
5
5
  MULTI_ROUND = "multi_round"
6
6
  SINGLE_ROUND = "single_round"
@@ -16,4 +16,5 @@ class CodeGenerateResult(pydantic.BaseModel):
16
16
 
17
17
  class MergeCodeWithoutEffect(pydantic.BaseModel):
18
18
  success_blocks: List[Tuple[str, str]]
19
- failed_blocks: List[Any]
19
+ failed_blocks: List[Any]
20
+ merged_blocks: Optional[Any] = None
@@ -15,7 +15,7 @@ from autocoder.rag.token_counter import count_tokens
15
15
  from autocoder.utils import llms as llm_utils
16
16
  from autocoder.common import SourceCodeList
17
17
  from autocoder.memory.active_context_manager import ActiveContextManager
18
-
18
+ from loguru import logger
19
19
 
20
20
  class CodeAutoGenerate:
21
21
  def __init__(
@@ -119,6 +119,7 @@ class CodeAutoGenerate:
119
119
  if not self.args.human_as_model:
120
120
  with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
121
121
  futures = []
122
+ count = 0
122
123
  for llm in self.llms:
123
124
 
124
125
  model_names_list = llm_utils.get_llm_names(llm)
@@ -126,9 +127,9 @@ class CodeAutoGenerate:
126
127
  if model_names_list:
127
128
  model_name = model_names_list[0]
128
129
 
129
- for i in range(self.generate_times_same_model):
130
- model_names.append(model_name)
131
- if i==0:
130
+ for _ in range(self.generate_times_same_model):
131
+ model_names.append(model_name)
132
+ if count == 0:
132
133
  def job():
133
134
  stream_generator = stream_chat_with_continue(
134
135
  llm=llm,
@@ -159,6 +160,7 @@ class CodeAutoGenerate:
159
160
  llm_config=llm_config,
160
161
  args=self.args
161
162
  ))
163
+ count += 1
162
164
 
163
165
  temp_results = [future.result() for future in futures]
164
166