auto-coder 0.1.207__py3-none-any.whl → 0.1.208__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (33) hide show
  1. {auto_coder-0.1.207.dist-info → auto_coder-0.1.208.dist-info}/METADATA +2 -2
  2. {auto_coder-0.1.207.dist-info → auto_coder-0.1.208.dist-info}/RECORD +33 -30
  3. autocoder/agent/auto_demand_organizer.py +212 -0
  4. autocoder/agent/auto_guess_query.py +284 -0
  5. autocoder/auto_coder.py +64 -19
  6. autocoder/auto_coder_rag.py +6 -0
  7. autocoder/chat_auto_coder.py +119 -16
  8. autocoder/command_args.py +21 -5
  9. autocoder/common/__init__.py +7 -1
  10. autocoder/common/code_auto_generate.py +32 -10
  11. autocoder/common/code_auto_generate_diff.py +85 -47
  12. autocoder/common/code_auto_generate_editblock.py +50 -28
  13. autocoder/common/code_auto_generate_strict_diff.py +79 -45
  14. autocoder/common/code_auto_merge.py +51 -15
  15. autocoder/common/code_auto_merge_diff.py +55 -2
  16. autocoder/common/code_auto_merge_editblock.py +84 -14
  17. autocoder/common/code_auto_merge_strict_diff.py +69 -32
  18. autocoder/common/code_modification_ranker.py +100 -0
  19. autocoder/common/command_completer.py +6 -4
  20. autocoder/common/types.py +10 -2
  21. autocoder/dispacher/actions/action.py +141 -94
  22. autocoder/dispacher/actions/plugins/action_regex_project.py +35 -25
  23. autocoder/lang.py +9 -1
  24. autocoder/pyproject/__init__.py +4 -0
  25. autocoder/rag/long_context_rag.py +2 -0
  26. autocoder/rag/rag_entry.py +2 -2
  27. autocoder/suffixproject/__init__.py +2 -0
  28. autocoder/tsproject/__init__.py +4 -0
  29. autocoder/version.py +1 -1
  30. {auto_coder-0.1.207.dist-info → auto_coder-0.1.208.dist-info}/LICENSE +0 -0
  31. {auto_coder-0.1.207.dist-info → auto_coder-0.1.208.dist-info}/WHEEL +0 -0
  32. {auto_coder-0.1.207.dist-info → auto_coder-0.1.208.dist-info}/entry_points.txt +0 -0
  33. {auto_coder-0.1.207.dist-info → auto_coder-0.1.208.dist-info}/top_level.txt +0 -0
@@ -1,9 +1,12 @@
1
1
  from typing import List, Dict, Tuple
2
- from autocoder.common.types import Mode
2
+ from autocoder.common.types import Mode, CodeGenerateResult
3
3
  from autocoder.common import AutoCoderArgs
4
4
  import byzerllm
5
5
  from autocoder.utils.queue_communicate import queue_communicate, CommunicateEvent, CommunicateEventType
6
6
  from autocoder.common import sys_prompt
7
+ from concurrent.futures import ThreadPoolExecutor
8
+ import json
9
+
7
10
 
8
11
  class CodeAutoGenerateDiff:
9
12
  def __init__(
@@ -12,12 +15,15 @@ class CodeAutoGenerateDiff:
12
15
  self.llm = llm
13
16
  self.args = args
14
17
  self.action = action
18
+ self.llms = []
19
+ self.generate_times_same_model = args.generate_times_same_model
15
20
  if not self.llm:
16
21
  raise ValueError(
17
22
  "Please provide a valid model instance to use for code diff generation."
18
23
  )
19
- if self.llm.get_sub_client("code_model"):
20
- self.llm = self.llm.get_sub_client("code_model")
24
+ self.llms = self.llm.get_sub_client("code_model") or [self.llm]
25
+ if not isinstance(self.llms, list):
26
+ self.llms = [self.llms]
21
27
 
22
28
  @byzerllm.prompt(llm=lambda self: self.llm)
23
29
  def multi_round_instruction(
@@ -139,12 +145,12 @@ class CodeAutoGenerateDiff:
139
145
 
140
146
  每次生成一个文件的diff,然后询问我是否继续,当我回复继续,继续生成下一个文件的diff。当没有后续任务时,请回复 "__完成__" 或者 "__EOF__"。
141
147
  """
142
-
148
+
143
149
  if not self.args.include_project_structure:
144
150
  return {
145
151
  "structure": ""
146
152
  }
147
-
153
+
148
154
  return {
149
155
  "structure": (
150
156
  self.action.pp.get_tree_like_directory_structure()
@@ -271,12 +277,12 @@ class CodeAutoGenerateDiff:
271
277
 
272
278
  {{ instruction }}
273
279
  """
274
-
280
+
275
281
  if not self.args.include_project_structure:
276
282
  return {
277
283
  "structure": ""
278
284
  }
279
-
285
+
280
286
  return {
281
287
  "structure": (
282
288
  self.action.pp.get_tree_like_directory_structure()
@@ -287,72 +293,107 @@ class CodeAutoGenerateDiff:
287
293
 
288
294
  def single_round_run(
289
295
  self, query: str, source_content: str
290
- ) -> Tuple[str, Dict[str, str]]:
296
+ ) -> CodeGenerateResult:
291
297
  llm_config = {"human_as_model": self.args.human_as_model}
292
298
 
293
- if self.args.request_id and not self.args.skip_events:
294
- queue_communicate.send_event_no_wait(
295
- request_id=self.args.request_id,
296
- event=CommunicateEvent(
297
- event_type=CommunicateEventType.CODE_GENERATE_START.value,
298
- data=query,
299
- ),
299
+ if self.args.template == "common":
300
+ init_prompt = self.single_round_instruction.prompt(
301
+ instruction=query, content=source_content, context=self.args.context
302
+ )
303
+ elif self.args.template == "auto_implement":
304
+ init_prompt = self.auto_implement_function.prompt(
305
+ instruction=query, content=source_content
300
306
  )
301
-
302
- init_prompt = self.single_round_instruction.prompt(
303
- instruction=query, content=source_content, context=self.args.context
304
- )
305
307
 
306
308
  with open(self.args.target_file, "w") as file:
307
309
  file.write(init_prompt)
308
310
 
309
311
  conversations = []
312
+
310
313
  if self.args.system_prompt and self.args.system_prompt.strip() == "claude":
311
- conversations.append({"role": "system", "content": sys_prompt.claude_sys_prompt.prompt()})
312
- else:
313
- conversations.append({"role": "user", "content": init_prompt})
314
+ conversations.append(
315
+ {"role": "system", "content": sys_prompt.claude_sys_prompt.prompt()})
316
+ elif self.args.system_prompt:
317
+ conversations.append(
318
+ {"role": "system", "content": self.args.system_prompt})
314
319
 
315
- t = self.llm.chat_oai(conversations=conversations, llm_config=llm_config)
316
- conversations.append({"role": "assistant", "content": t[0].output})
320
+ conversations.append({"role": "user", "content": init_prompt})
317
321
 
318
322
  if self.args.request_id and not self.args.skip_events:
319
- queue_communicate.send_event_no_wait(
323
+ _ = queue_communicate.send_event(
324
+ request_id=self.args.request_id,
325
+ event=CommunicateEvent(
326
+ event_type=CommunicateEventType.CODE_GENERATE_START.value,
327
+ data=json.dumps({}, ensure_ascii=False),
328
+ ),
329
+ )
330
+
331
+ conversations_list = []
332
+ results = []
333
+ if not self.args.human_as_model:
334
+ with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
335
+ futures = []
336
+ for llm in self.llms:
337
+ for _ in range(self.generate_times_same_model):
338
+ futures.append(executor.submit(
339
+ llm.chat_oai, conversations=conversations, llm_config=llm_config))
340
+ results = [future.result()[0].output for future in futures]
341
+ for result in results:
342
+ conversations_list.append(
343
+ conversations + [{"role": "assistant", "content": result}])
344
+ else:
345
+ results = []
346
+ conversations_list = []
347
+ for _ in range(self.args.human_model_num):
348
+ v = self.llms[0].chat_oai(
349
+ conversations=conversations, llm_config=llm_config)
350
+ results.append(v[0].output)
351
+ conversations_list.append(
352
+ conversations + [{"role": "assistant", "content": v[0].output}])
353
+
354
+ if self.args.request_id and not self.args.skip_events:
355
+ _ = queue_communicate.send_event(
320
356
  request_id=self.args.request_id,
321
357
  event=CommunicateEvent(
322
358
  event_type=CommunicateEventType.CODE_GENERATE_END.value,
323
- data="",
359
+ data=json.dumps({}, ensure_ascii=False),
324
360
  ),
325
361
  )
326
362
 
327
- return [t[0].output], conversations
363
+ return CodeGenerateResult(contents=results, conversations=conversations_list)
328
364
 
329
365
  def multi_round_run(
330
366
  self, query: str, source_content: str, max_steps: int = 10
331
- ) -> Tuple[List[str], List[Dict[str, str]]]:
367
+ ) -> CodeGenerateResult:
332
368
  llm_config = {"human_as_model": self.args.human_as_model}
333
369
  result = []
334
370
 
335
- init_prompt = self.multi_round_instruction.prompt(
336
- instruction=query, content=source_content, context=self.args.context
337
- )
371
+ if self.args.template == "common":
372
+ init_prompt = self.multi_round_instruction.prompt(
373
+ instruction=query, content=source_content, context=self.args.context
374
+ )
375
+ elif self.args.template == "auto_implement":
376
+ init_prompt = self.auto_implement_function.prompt(
377
+ instruction=query, content=source_content
378
+ )
338
379
 
339
- conversations = [{"role": "user", "content": init_prompt}]
380
+ conversations = []
381
+ # conversations.append({"role": "system", "content": sys_prompt.prompt()})
382
+ conversations.append({"role": "user", "content": init_prompt})
340
383
 
341
384
  with open(self.args.target_file, "w") as file:
342
385
  file.write(init_prompt)
343
386
 
344
- t = self.llm.chat_oai(conversations=conversations, llm_config=llm_config)
387
+ code_llm = self.llms[0]
388
+ t = code_llm.chat_oai(conversations=conversations,
389
+ llm_config=llm_config)
345
390
 
346
391
  result.append(t[0].output)
347
392
 
348
393
  conversations.append({"role": "assistant", "content": t[0].output})
349
394
 
350
- if (
351
- "__完成__" in t[0].output
352
- or "/done" in t[0].output
353
- or "__EOF__" in t[0].output
354
- ):
355
- return result, conversations
395
+ if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
396
+ return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
356
397
 
357
398
  current_step = 0
358
399
 
@@ -363,17 +404,14 @@ class CodeAutoGenerateDiff:
363
404
  with open(self.args.target_file, "w") as file:
364
405
  file.write("继续")
365
406
 
366
- t = self.llm.chat_oai(conversations=conversations, llm_config=llm_config)
407
+ t = code_llm.chat_oai(
408
+ conversations=conversations, llm_config=llm_config)
367
409
 
368
410
  result.append(t[0].output)
369
411
  conversations.append({"role": "assistant", "content": t[0].output})
370
412
  current_step += 1
371
413
 
372
- if (
373
- "__完成__" in t[0].output
374
- or "/done" in t[0].output
375
- or "__EOF__" in t[0].output
376
- ):
377
- return result, conversations
414
+ if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
415
+ return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
378
416
 
379
- return result, conversations
417
+ return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
@@ -1,5 +1,5 @@
1
1
  from typing import List, Dict, Tuple
2
- from autocoder.common.types import Mode
2
+ from autocoder.common.types import Mode, CodeGenerateResult
3
3
  from autocoder.common import AutoCoderArgs
4
4
  import byzerllm
5
5
  from autocoder.common import sys_prompt
@@ -9,6 +9,7 @@ from autocoder.utils.queue_communicate import (
9
9
  CommunicateEventType,
10
10
  )
11
11
  import json
12
+ from concurrent.futures import ThreadPoolExecutor
12
13
 
13
14
 
14
15
  class CodeAutoGenerateEditBlock:
@@ -25,14 +26,16 @@ class CodeAutoGenerateEditBlock:
25
26
  self.action = action
26
27
  self.fence_0 = fence_0
27
28
  self.fence_1 = fence_1
29
+ self.generate_times_same_model = args.generate_times_same_model
28
30
  if not self.llm:
29
31
  raise ValueError(
30
32
  "Please provide a valid model instance to use for code generation."
31
33
  )
32
- if self.llm.get_sub_client("code_model"):
33
- self.llm = self.llm.get_sub_client("code_model")
34
+ self.llms = self.llm.get_sub_client("code_model") or [self.llm]
35
+ if not isinstance(self.llms, list):
36
+ self.llms = [self.llms]
34
37
 
35
- @byzerllm.prompt(llm=lambda self: self.llm)
38
+ @byzerllm.prompt()
36
39
  def auto_implement_function(self, instruction: str, content: str) -> str:
37
40
  """
38
41
  下面是一些文件路径以及每个文件对应的源码:
@@ -49,7 +52,7 @@ class CodeAutoGenerateEditBlock:
49
52
 
50
53
  """
51
54
 
52
- @byzerllm.prompt(llm=lambda self: self.llm)
55
+ @byzerllm.prompt()
53
56
  def multi_round_instruction(self, instruction: str, content: str, context: str = "") -> str:
54
57
  """
55
58
  如果你需要生成代码,对于每个需要更改的文件,你需要按 *SEARCH/REPLACE block* 的格式进行生成。
@@ -211,7 +214,7 @@ class CodeAutoGenerateEditBlock:
211
214
  "fence_1": self.fence_1,
212
215
  }
213
216
 
214
- @byzerllm.prompt(llm=lambda self: self.llm)
217
+ @byzerllm.prompt()
215
218
  def single_round_instruction(self, instruction: str, content: str, context: str = "") -> str:
216
219
  """
217
220
  如果你需要生成代码,对于每个需要更改的文件,你需要按 *SEARCH/REPLACE block* 的格式进行生成。
@@ -372,7 +375,7 @@ class CodeAutoGenerateEditBlock:
372
375
 
373
376
  def single_round_run(
374
377
  self, query: str, source_content: str
375
- ) -> Tuple[str, Dict[str, str]]:
378
+ ) -> CodeGenerateResult:
376
379
  llm_config = {"human_as_model": self.args.human_as_model}
377
380
 
378
381
  if self.args.template == "common":
@@ -388,12 +391,14 @@ class CodeAutoGenerateEditBlock:
388
391
  file.write(init_prompt)
389
392
 
390
393
  conversations = []
391
-
394
+
392
395
  if self.args.system_prompt and self.args.system_prompt.strip() == "claude":
393
- conversations.append({"role": "system", "content": sys_prompt.claude_sys_prompt.prompt()})
396
+ conversations.append(
397
+ {"role": "system", "content": sys_prompt.claude_sys_prompt.prompt()})
394
398
  elif self.args.system_prompt:
395
- conversations.append({"role": "system", "content": self.args.system_prompt})
396
-
399
+ conversations.append(
400
+ {"role": "system", "content": self.args.system_prompt})
401
+
397
402
  conversations.append({"role": "user", "content": init_prompt})
398
403
 
399
404
  if self.args.request_id and not self.args.skip_events:
@@ -404,11 +409,30 @@ class CodeAutoGenerateEditBlock:
404
409
  data=json.dumps({}, ensure_ascii=False),
405
410
  ),
406
411
  )
407
-
408
- t = self.llm.chat_oai(
409
- conversations=conversations, llm_config=llm_config)
410
- conversations.append({"role": "assistant", "content": t[0].output})
411
-
412
+
413
+ conversations_list = []
414
+ results = []
415
+ if not self.args.human_as_model:
416
+ with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
417
+ futures = []
418
+ for llm in self.llms:
419
+ for _ in range(self.generate_times_same_model):
420
+ futures.append(executor.submit(
421
+ llm.chat_oai, conversations=conversations, llm_config=llm_config))
422
+ results = [future.result()[0].output for future in futures]
423
+ for result in results:
424
+ conversations_list.append(
425
+ conversations + [{"role": "assistant", "content": result}])
426
+ else:
427
+ results = []
428
+ conversations_list = []
429
+ for _ in range(self.args.human_model_num):
430
+ v = self.llms[0].chat_oai(
431
+ conversations=conversations, llm_config=llm_config)
432
+ results.append(v[0].output)
433
+ conversations_list.append(conversations + [{"role": "assistant", "content": v[0].output}])
434
+
435
+ if self.args.request_id and not self.args.skip_events:
412
436
  _ = queue_communicate.send_event(
413
437
  request_id=self.args.request_id,
414
438
  event=CommunicateEvent(
@@ -416,16 +440,12 @@ class CodeAutoGenerateEditBlock:
416
440
  data=json.dumps({}, ensure_ascii=False),
417
441
  ),
418
442
  )
419
- return [t[0].output], conversations
420
- else:
421
- t = self.llm.chat_oai(
422
- conversations=conversations, llm_config=llm_config)
423
- conversations.append({"role": "assistant", "content": t[0].output})
424
- return [t[0].output], conversations
443
+
444
+ return CodeGenerateResult(contents=results, conversations=conversations_list)
425
445
 
426
446
  def multi_round_run(
427
447
  self, query: str, source_content: str, max_steps: int = 10
428
- ) -> Tuple[List[str], List[Dict[str, str]]]:
448
+ ) -> CodeGenerateResult:
429
449
  llm_config = {"human_as_model": self.args.human_as_model}
430
450
  result = []
431
451
 
@@ -445,7 +465,8 @@ class CodeAutoGenerateEditBlock:
445
465
  with open(self.args.target_file, "w") as file:
446
466
  file.write(init_prompt)
447
467
 
448
- t = self.llm.chat_oai(conversations=conversations,
468
+ code_llm = self.llms[0]
469
+ t = code_llm.chat_oai(conversations=conversations,
449
470
  llm_config=llm_config)
450
471
 
451
472
  result.append(t[0].output)
@@ -453,7 +474,7 @@ class CodeAutoGenerateEditBlock:
453
474
  conversations.append({"role": "assistant", "content": t[0].output})
454
475
 
455
476
  if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
456
- return result, conversations
477
+ return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
457
478
 
458
479
  current_step = 0
459
480
 
@@ -464,7 +485,7 @@ class CodeAutoGenerateEditBlock:
464
485
  with open(self.args.target_file, "w") as file:
465
486
  file.write("继续")
466
487
 
467
- t = self.llm.chat_oai(
488
+ t = code_llm.chat_oai(
468
489
  conversations=conversations, llm_config=llm_config)
469
490
 
470
491
  result.append(t[0].output)
@@ -472,6 +493,7 @@ class CodeAutoGenerateEditBlock:
472
493
  current_step += 1
473
494
 
474
495
  if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
475
- return result, conversations
476
496
 
477
- return result, conversations
497
+ return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
498
+
499
+ return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
@@ -1,9 +1,11 @@
1
1
  from typing import List, Dict, Tuple
2
- from autocoder.common.types import Mode
2
+ from autocoder.common.types import Mode, CodeGenerateResult
3
3
  from autocoder.common import AutoCoderArgs
4
4
  import byzerllm
5
5
  from autocoder.utils.queue_communicate import queue_communicate, CommunicateEvent, CommunicateEventType
6
6
  from autocoder.common import sys_prompt
7
+ from concurrent.futures import ThreadPoolExecutor
8
+ import json
7
9
 
8
10
  class CodeAutoGenerateStrictDiff:
9
11
  def __init__(
@@ -12,12 +14,15 @@ class CodeAutoGenerateStrictDiff:
12
14
  self.llm = llm
13
15
  self.args = args
14
16
  self.action = action
17
+ self.llms = []
18
+ self.generate_times_same_model = args.generate_times_same_model
15
19
  if not self.llm:
16
20
  raise ValueError(
17
21
  "Please provide a valid model instance to use for code generation."
18
- )
19
- if self.llm.get_sub_client("code_model"):
20
- self.llm = self.llm.get_sub_client("code_model")
22
+ )
23
+ self.llms = self.llm.get_sub_client("code_model") or [self.llm]
24
+ if not isinstance(self.llms, list):
25
+ self.llms = [self.llms]
21
26
 
22
27
  @byzerllm.prompt(llm=lambda self: self.llm)
23
28
  def multi_round_instruction(
@@ -258,74 +263,106 @@ class CodeAutoGenerateStrictDiff:
258
263
 
259
264
  def single_round_run(
260
265
  self, query: str, source_content: str
261
- ) -> Tuple[str, Dict[str, str]]:
266
+ ) -> CodeGenerateResult:
262
267
  llm_config = {"human_as_model": self.args.human_as_model}
263
268
 
264
- if self.args.request_id and not self.args.skip_events:
265
- queue_communicate.send_event_no_wait(
266
- request_id=self.args.request_id,
267
- event=CommunicateEvent(
268
- event_type=CommunicateEventType.CODE_GENERATE_START.value,
269
- data=query,
270
- ),
269
+ if self.args.template == "common":
270
+ init_prompt = self.single_round_instruction.prompt(
271
+ instruction=query, content=source_content, context=self.args.context
272
+ )
273
+ elif self.args.template == "auto_implement":
274
+ init_prompt = self.auto_implement_function.prompt(
275
+ instruction=query, content=source_content
271
276
  )
272
-
273
- init_prompt = self.single_round_instruction.prompt(
274
- instruction=query, content=source_content, context=self.args.context
275
- )
276
277
 
277
278
  with open(self.args.target_file, "w") as file:
278
279
  file.write(init_prompt)
279
280
 
280
281
  conversations = []
282
+
281
283
  if self.args.system_prompt and self.args.system_prompt.strip() == "claude":
282
- conversations.append({"role": "system", "content": sys_prompt.claude_sys_prompt.prompt()})
284
+ conversations.append(
285
+ {"role": "system", "content": sys_prompt.claude_sys_prompt.prompt()})
283
286
  elif self.args.system_prompt:
284
- conversations.append({"role": "system", "content": self.args.system_prompt})
285
-
287
+ conversations.append(
288
+ {"role": "system", "content": self.args.system_prompt})
289
+
286
290
  conversations.append({"role": "user", "content": init_prompt})
287
291
 
288
- t = self.llm.chat_oai(conversations=conversations, llm_config=llm_config)
289
- conversations.append({"role": "assistant", "content": t[0].output})
292
+ if self.args.request_id and not self.args.skip_events:
293
+ _ = queue_communicate.send_event(
294
+ request_id=self.args.request_id,
295
+ event=CommunicateEvent(
296
+ event_type=CommunicateEventType.CODE_GENERATE_START.value,
297
+ data=json.dumps({}, ensure_ascii=False),
298
+ ),
299
+ )
300
+
301
+ conversations_list = []
302
+ results = []
303
+ if not self.args.human_as_model:
304
+ with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
305
+ futures = []
306
+ for llm in self.llms:
307
+ for _ in range(self.generate_times_same_model):
308
+ futures.append(executor.submit(
309
+ llm.chat_oai, conversations=conversations, llm_config=llm_config))
310
+ results = [future.result()[0].output for future in futures]
311
+ for result in results:
312
+ conversations_list.append(
313
+ conversations + [{"role": "assistant", "content": result}])
314
+ else:
315
+ results = []
316
+ conversations_list = []
317
+ for _ in range(self.args.human_model_num):
318
+ v = self.llms[0].chat_oai(
319
+ conversations=conversations, llm_config=llm_config)
320
+ results.append(v[0].output)
321
+ conversations_list.append(conversations + [{"role": "assistant", "content": v[0].output}])
290
322
 
291
323
  if self.args.request_id and not self.args.skip_events:
292
- queue_communicate.send_event_no_wait(
324
+ _ = queue_communicate.send_event(
293
325
  request_id=self.args.request_id,
294
326
  event=CommunicateEvent(
295
327
  event_type=CommunicateEventType.CODE_GENERATE_END.value,
296
- data="",
328
+ data=json.dumps({}, ensure_ascii=False),
297
329
  ),
298
330
  )
299
331
 
300
- return [t[0].output], conversations
332
+ return CodeGenerateResult(contents=results, conversations=conversations_list)
301
333
 
302
334
  def multi_round_run(
303
335
  self, query: str, source_content: str, max_steps: int = 10
304
- ) -> Tuple[List[str], List[Dict[str, str]]]:
336
+ ) -> CodeGenerateResult:
305
337
  llm_config = {"human_as_model": self.args.human_as_model}
306
338
  result = []
307
339
 
308
- init_prompt = self.multi_round_instruction.prompt(
309
- instruction=query, content=source_content, context=self.args.context
310
- )
340
+ if self.args.template == "common":
341
+ init_prompt = self.multi_round_instruction.prompt(
342
+ instruction=query, content=source_content, context=self.args.context
343
+ )
344
+ elif self.args.template == "auto_implement":
345
+ init_prompt = self.auto_implement_function.prompt(
346
+ instruction=query, content=source_content
347
+ )
311
348
 
312
- conversations = [{"role": "user", "content": init_prompt}]
349
+ conversations = []
350
+ # conversations.append({"role": "system", "content": sys_prompt.prompt()})
351
+ conversations.append({"role": "user", "content": init_prompt})
313
352
 
314
353
  with open(self.args.target_file, "w") as file:
315
354
  file.write(init_prompt)
316
-
317
- t = self.llm.chat_oai(conversations=conversations, llm_config=llm_config)
355
+
356
+ code_llm = self.llms[0]
357
+ t = code_llm.chat_oai(conversations=conversations,
358
+ llm_config=llm_config)
318
359
 
319
360
  result.append(t[0].output)
320
361
 
321
362
  conversations.append({"role": "assistant", "content": t[0].output})
322
363
 
323
- if (
324
- "__完成__" in t[0].output
325
- or "/done" in t[0].output
326
- or "__EOF__" in t[0].output
327
- ):
328
- return result, conversations
364
+ if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
365
+ return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
329
366
 
330
367
  current_step = 0
331
368
 
@@ -336,17 +373,14 @@ class CodeAutoGenerateStrictDiff:
336
373
  with open(self.args.target_file, "w") as file:
337
374
  file.write("继续")
338
375
 
339
- t = self.llm.chat_oai(conversations=conversations, llm_config=llm_config)
376
+ t = code_llm.chat_oai(
377
+ conversations=conversations, llm_config=llm_config)
340
378
 
341
379
  result.append(t[0].output)
342
380
  conversations.append({"role": "assistant", "content": t[0].output})
343
381
  current_step += 1
344
382
 
345
- if (
346
- "__完成__" in t[0].output
347
- or "/done" in t[0].output
348
- or "__EOF__" in t[0].output
349
- ):
350
- return result, conversations
383
+ if "__完成__" in t[0].output or "/done" in t[0].output or "__EOF__" in t[0].output:
384
+ return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])
351
385
 
352
- return result, conversations
386
+ return CodeGenerateResult(contents=["\n\n".join(result)], conversations=[conversations])