auto-coder 0.1.206__py3-none-any.whl → 0.1.208__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (34) hide show
  1. {auto_coder-0.1.206.dist-info → auto_coder-0.1.208.dist-info}/METADATA +2 -2
  2. {auto_coder-0.1.206.dist-info → auto_coder-0.1.208.dist-info}/RECORD +34 -31
  3. autocoder/agent/auto_demand_organizer.py +212 -0
  4. autocoder/agent/auto_guess_query.py +284 -0
  5. autocoder/auto_coder.py +64 -19
  6. autocoder/auto_coder_rag.py +11 -2
  7. autocoder/benchmark.py +50 -47
  8. autocoder/chat_auto_coder.py +125 -17
  9. autocoder/command_args.py +21 -5
  10. autocoder/common/__init__.py +7 -1
  11. autocoder/common/code_auto_generate.py +32 -10
  12. autocoder/common/code_auto_generate_diff.py +85 -47
  13. autocoder/common/code_auto_generate_editblock.py +50 -28
  14. autocoder/common/code_auto_generate_strict_diff.py +79 -45
  15. autocoder/common/code_auto_merge.py +51 -15
  16. autocoder/common/code_auto_merge_diff.py +55 -2
  17. autocoder/common/code_auto_merge_editblock.py +84 -14
  18. autocoder/common/code_auto_merge_strict_diff.py +69 -32
  19. autocoder/common/code_modification_ranker.py +100 -0
  20. autocoder/common/command_completer.py +6 -4
  21. autocoder/common/types.py +10 -2
  22. autocoder/dispacher/actions/action.py +141 -94
  23. autocoder/dispacher/actions/plugins/action_regex_project.py +35 -25
  24. autocoder/lang.py +9 -1
  25. autocoder/pyproject/__init__.py +4 -0
  26. autocoder/rag/long_context_rag.py +2 -0
  27. autocoder/rag/rag_entry.py +2 -2
  28. autocoder/suffixproject/__init__.py +2 -0
  29. autocoder/tsproject/__init__.py +4 -0
  30. autocoder/version.py +1 -1
  31. {auto_coder-0.1.206.dist-info → auto_coder-0.1.208.dist-info}/LICENSE +0 -0
  32. {auto_coder-0.1.206.dist-info → auto_coder-0.1.208.dist-info}/WHEEL +0 -0
  33. {auto_coder-0.1.206.dist-info → auto_coder-0.1.208.dist-info}/entry_points.txt +0 -0
  34. {auto_coder-0.1.206.dist-info → auto_coder-0.1.208.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,100 @@
1
+ import byzerllm
2
+ from typing import List,Union
3
+ from autocoder.common import AutoCoderArgs
4
+ from autocoder.common.types import CodeGenerateResult
5
+ from pydantic import BaseModel
6
+ from loguru import logger
7
+ from concurrent.futures import ThreadPoolExecutor, as_completed
8
+ import traceback
9
+
10
+ class RankResult(BaseModel):
11
+ rank_result:List[int]
12
+
13
+ class CodeModificationRanker:
14
+ def __init__(self, llm: byzerllm.ByzerLLM, args: AutoCoderArgs):
15
+ self.llm = llm
16
+ self.args = args
17
+ if self.llm.get_sub_client("generate_rerank_model"):
18
+ self.rerank_llm = self.llm.get_sub_client("generate_rerank_model")
19
+ else:
20
+ self.rerank_llm = self.llm
21
+
22
+ @byzerllm.prompt()
23
+ def _rank_modifications(self, s:CodeGenerateResult) -> str:
24
+ '''
25
+ 对一组代码修改进行质量评估并排序。
26
+
27
+ 下面是修改需求:
28
+
29
+ <edit_requirement>
30
+ {{ s.conversations[0][-2]["content"] }}
31
+ </edit_requirement>
32
+
33
+ 下面是相应的代码修改:
34
+ {% for content in s.contents %}
35
+ <edit_block id="{{ loop.index0 }}">
36
+ {{content}}
37
+ </edit_block>
38
+ {% endfor %}
39
+
40
+ 请输出如下格式的评估结果,只包含 JSON 数据:
41
+
42
+ ```json
43
+ {
44
+ "rank_result": [id1, id2, id3] // id 为 edit_block 的 id,按质量从高到低排序
45
+ }
46
+ ```
47
+
48
+ 注意,只输出前面要求的 Json 格式就好,不要输出其他内容,Json 需要使用 ```json ```包裹。
49
+ '''
50
+
51
+
52
+ def rank_modifications(self, generate_result: CodeGenerateResult) -> CodeGenerateResult:
53
+ import time
54
+ start_time = time.time()
55
+
56
+ # 如果只有一个候选,直接返回
57
+ if len(generate_result.contents) == 1:
58
+ logger.info("Only 1 candidate, skip ranking")
59
+ return generate_result
60
+
61
+ logger.info(f"Start ranking {len(generate_result.contents)} candidates")
62
+ generate_times = self.args.generate_times_same_model
63
+
64
+ try:
65
+ # Create a thread pool with generate_times workers
66
+ with ThreadPoolExecutor(max_workers=generate_times) as executor:
67
+ # Submit tasks
68
+ futures = [
69
+ executor.submit(
70
+ self._rank_modifications.with_llm(self.rerank_llm).with_return_type(RankResult).run,
71
+ generate_result
72
+ ) for _ in range(generate_times)
73
+ ]
74
+
75
+ # Process results as they complete
76
+ for future in as_completed(futures):
77
+ try:
78
+ v = future.result()
79
+ # If we get a valid result, use it and cancel other tasks
80
+ for f in futures:
81
+ f.cancel()
82
+
83
+ elapsed = time.time() - start_time
84
+ logger.info(f"Ranking completed in {elapsed:.2f}s, best candidate index: {v.rank_result[0]}")
85
+
86
+ rerank_contents = [generate_result.contents[i] for i in v.rank_result]
87
+ rerank_conversations = [generate_result.conversations[i] for i in v.rank_result]
88
+ return CodeGenerateResult(contents=rerank_contents,conversations=rerank_conversations)
89
+ except Exception as e:
90
+ logger.warning(f"Ranking request failed: {str(e)}")
91
+ logger.debug(traceback.format_exc())
92
+ continue
93
+ except Exception as e:
94
+ logger.error(f"Ranking process failed: {str(e)}")
95
+ logger.debug(traceback.format_exc())
96
+
97
+ # If all requests failed, use the original codes
98
+ elapsed = time.time() - start_time
99
+ logger.warning(f"All ranking requests failed in {elapsed:.2f}s, using original order")
100
+ return generate_result
@@ -12,7 +12,7 @@ COMMANDS = {
12
12
  "/svg": {},
13
13
  "/sd": {},
14
14
  },
15
- "/coding": {},
15
+ "/coding": {"/apply": {}, "/next": {}},
16
16
  "/chat": {"/new": {}, "/review": {}, "/no_context": {}},
17
17
  "/lib": {
18
18
  "/add": "",
@@ -147,7 +147,8 @@ class CommandTextParser:
147
147
  current_word += v
148
148
  self.is_extracted = True
149
149
  self.current_word_end_pos = self.pos + 1
150
- self.current_word_start_pos = self.current_word_end_pos - len(current_word)
150
+ self.current_word_start_pos = self.current_word_end_pos - \
151
+ len(current_word)
151
152
 
152
153
  def previous(self) -> str:
153
154
  if self.pos > 1:
@@ -227,10 +228,11 @@ class CommandTextParser:
227
228
  self.is_extracted = True
228
229
 
229
230
  self.current_word_end_pos = self.pos + 1
230
- self.current_word_start_pos = self.current_word_end_pos - len(current_word)
231
+ self.current_word_start_pos = self.current_word_end_pos - \
232
+ len(current_word)
231
233
 
232
234
  def current_word(self) -> str:
233
- return self.text[self.current_word_start_pos : self.current_word_end_pos]
235
+ return self.text[self.current_word_start_pos: self.current_word_end_pos]
234
236
 
235
237
  def get_current_word(self) -> str:
236
238
  return self.current_word()
autocoder/common/types.py CHANGED
@@ -1,10 +1,18 @@
1
1
  from enum import Enum
2
2
  import pydantic
3
-
3
+ from typing import List, Dict, Tuple,Any
4
4
  class Mode(Enum):
5
5
  MULTI_ROUND = "multi_round"
6
6
  SINGLE_ROUND = "single_round"
7
7
 
8
8
  class StepNum(pydantic.BaseModel):
9
9
  step_num:int= pydantic.Field(1,description="总共步骤数")
10
- content:int= pydantic.Field(1,description="详细的执行步骤,每个步骤需要包含一个shell/python 代码块")
10
+ content:int= pydantic.Field(1,description="详细的执行步骤,每个步骤需要包含一个shell/python 代码块")
11
+
12
+ class CodeGenerateResult(pydantic.BaseModel):
13
+ contents:List[str]
14
+ conversations:List[List[Dict[str, Any]]]
15
+
16
+ class MergeCodeWithoutEffect(pydantic.BaseModel):
17
+ success_blocks: List[Tuple[str, str]]
18
+ failed_blocks: List[Any]
@@ -97,38 +97,48 @@ class ActionTSProject:
97
97
  else:
98
98
  generate = CodeAutoGenerate(llm=self.llm, args=self.args, action=self)
99
99
  if self.args.enable_multi_round_generate:
100
- result, conversations = generate.multi_round_run(
100
+ generate_result = generate.multi_round_run(
101
101
  query=args.query, source_content=content
102
102
  )
103
103
  else:
104
- result, conversations = generate.single_round_run(
104
+ generate_result = generate.single_round_run(
105
105
  query=args.query, source_content=content
106
106
  )
107
- content = "\n\n".join(result)
108
-
109
- store_code_model_conversation(
110
- args=self.args,
111
- instruction=self.args.query,
112
- conversations=conversations,
113
- model=self.llm.default_model_name,
114
- )
115
- with open(args.target_file, "w") as file:
116
- file.write(content)
117
-
118
- if args.execute and args.auto_merge:
119
- logger.info("Auto merge the code...")
120
- if args.auto_merge == "diff":
121
- code_merge = CodeAutoMergeDiff(llm=self.llm, args=self.args)
122
- code_merge.merge_code(content=content)
123
- elif args.auto_merge == "strict_diff":
124
- code_merge = CodeAutoMergeStrictDiff(llm=self.llm, args=self.args)
125
- code_merge.merge_code(content=content)
126
- elif args.auto_merge == "editblock":
127
- code_merge = CodeAutoMergeEditBlock(llm=self.llm, args=self.args)
128
- code_merge.merge_code(content=content)
129
- else:
130
- code_merge = CodeAutoMerge(llm=self.llm, args=self.args)
131
- code_merge.merge_code(content=content)
107
+ merge_result = None
108
+ if args.execute and args.auto_merge:
109
+ logger.info("Auto merge the code...")
110
+ if args.auto_merge == "diff":
111
+ code_merge = CodeAutoMergeDiff(llm=self.llm, args=self.args)
112
+ merge_result = code_merge.merge_code(generate_result=generate_result)
113
+ elif args.auto_merge == "strict_diff":
114
+ code_merge = CodeAutoMergeStrictDiff(llm=self.llm, args=self.args)
115
+ merge_result = code_merge.merge_code(generate_result=generate_result)
116
+ elif args.auto_merge == "editblock":
117
+ code_merge = CodeAutoMergeEditBlock(llm=self.llm, args=self.args)
118
+ merge_result = code_merge.merge_code(generate_result=generate_result)
119
+ else:
120
+ code_merge = CodeAutoMerge(llm=self.llm, args=self.args)
121
+ merge_result = code_merge.merge_code(generate_result=generate_result)
122
+
123
+ if merge_result is not None:
124
+ content = merge_result.contents[0]
125
+ store_code_model_conversation(
126
+ args=self.args,
127
+ instruction=self.args.query,
128
+ conversations=merge_result.conversations[0],
129
+ model=self.llm.default_model_name,
130
+ )
131
+ else:
132
+ content = generate_result.contents[0]
133
+ store_code_model_conversation(
134
+ args=self.args,
135
+ instruction=self.args.query,
136
+ conversations=generate_result.conversations[0],
137
+ model=self.llm.default_model_name,
138
+ )
139
+
140
+ with open(args.target_file, "w") as file:
141
+ file.write(content)
132
142
 
133
143
 
134
144
  class ActionPyScriptProject:
@@ -167,38 +177,49 @@ class ActionPyScriptProject:
167
177
  else:
168
178
  generate = CodeAutoGenerate(llm=self.llm, args=self.args, action=self)
169
179
  if self.args.enable_multi_round_generate:
170
- result, conversations = generate.multi_round_run(
180
+ generate_result = generate.multi_round_run(
171
181
  query=args.query, source_content=content
172
182
  )
173
183
  else:
174
- result, conversations = generate.single_round_run(
184
+ generate_result = generate.single_round_run(
175
185
  query=args.query, source_content=content
176
186
  )
177
- content = "\n\n".join(result)
187
+ merge_result = None
188
+ if args.execute and args.auto_merge:
189
+ logger.info("Auto merge the code...")
190
+ if args.auto_merge == "diff":
191
+ code_merge = CodeAutoMergeDiff(llm=self.llm, args=self.args)
192
+ merge_result = code_merge.merge_code(generate_result=generate_result)
193
+ elif args.auto_merge == "strict_diff":
194
+ code_merge = CodeAutoMergeStrictDiff(llm=self.llm, args=self.args)
195
+ merge_result = code_merge.merge_code(generate_result=generate_result)
196
+ elif args.auto_merge == "editblock":
197
+ code_merge = CodeAutoMergeEditBlock(llm=self.llm, args=self.args)
198
+ merge_result = code_merge.merge_code(generate_result=generate_result)
199
+ else:
200
+ code_merge = CodeAutoMerge(llm=self.llm, args=self.args)
201
+ merge_result = code_merge.merge_code(generate_result=generate_result)
202
+
203
+ content = merge_result.contents[0]
204
+
205
+ store_code_model_conversation(
206
+ args=self.args,
207
+ instruction=self.args.query,
208
+ conversations=merge_result.conversations[0],
209
+ model=self.llm.default_model_name,
210
+ )
211
+ else:
212
+ content = generate_result.contents[0]
178
213
 
179
- store_code_model_conversation(
180
- args=self.args,
181
- instruction=self.args.query,
182
- conversations=conversations,
183
- model=self.llm.default_model_name,
184
- )
185
- with open(self.args.target_file, "w") as file:
186
- file.write(content)
214
+ store_code_model_conversation(
215
+ args=self.args,
216
+ instruction=self.args.query,
217
+ conversations=generate_result.conversations[0],
218
+ model=self.llm.default_model_name,
219
+ )
187
220
 
188
- if args.execute and args.auto_merge:
189
- logger.info("Auto merge the code...")
190
- if args.auto_merge == "diff":
191
- code_merge = CodeAutoMergeDiff(llm=self.llm, args=self.args)
192
- code_merge.merge_code(content=content)
193
- elif args.auto_merge == "strict_diff":
194
- code_merge = CodeAutoMergeStrictDiff(llm=self.llm, args=self.args)
195
- code_merge.merge_code(content=content)
196
- elif args.auto_merge == "editblock":
197
- code_merge = CodeAutoMergeEditBlock(llm=self.llm, args=self.args)
198
- code_merge.merge_code(content=content)
199
- else:
200
- code_merge = CodeAutoMerge(llm=self.llm, args=self.args)
201
- code_merge.merge_code(content=content)
221
+ with open(self.args.target_file, "w") as file:
222
+ file.write(content)
202
223
 
203
224
 
204
225
  class ActionPyProject:
@@ -255,39 +276,50 @@ class ActionPyProject:
255
276
 
256
277
 
257
278
  if self.args.enable_multi_round_generate:
258
- result, conversations = generate.multi_round_run(
279
+ generate_result = generate.multi_round_run(
259
280
  query=args.query, source_content=content
260
281
  )
261
282
  else:
262
- result, conversations = generate.single_round_run(
283
+ generate_result = generate.single_round_run(
263
284
  query=args.query, source_content=content
264
285
  )
265
286
 
266
- content = "\n\n".join(result)
287
+ merge_result = None
288
+ if args.execute and args.auto_merge:
289
+ logger.info("Auto merge the code...")
290
+ if args.auto_merge == "diff":
291
+ code_merge = CodeAutoMergeDiff(llm=self.llm, args=self.args)
292
+ merge_result = code_merge.merge_code(generate_result=generate_result)
293
+ elif args.auto_merge == "strict_diff":
294
+ code_merge = CodeAutoMergeStrictDiff(llm=self.llm, args=self.args)
295
+ merge_result = code_merge.merge_code(generate_result=generate_result)
296
+ elif args.auto_merge == "editblock":
297
+ code_merge = CodeAutoMergeEditBlock(llm=self.llm, args=self.args)
298
+ merge_result = code_merge.merge_code(generate_result=generate_result)
299
+ else:
300
+ code_merge = CodeAutoMerge(llm=self.llm, args=self.args)
301
+ merge_result = code_merge.merge_code(generate_result=generate_result)
302
+
303
+ content = merge_result.contents[0]
304
+
305
+ store_code_model_conversation(
306
+ args=self.args,
307
+ instruction=self.args.query,
308
+ conversations=merge_result.conversations[0],
309
+ model=self.llm.default_model_name,
310
+ )
311
+ else:
312
+ content = generate_result.contents[0]
267
313
 
268
- store_code_model_conversation(
269
- args=self.args,
270
- instruction=self.args.query,
271
- conversations=conversations,
272
- model=self.llm.default_model_name,
273
- )
274
- with open(args.target_file, "w") as file:
275
- file.write(content)
314
+ store_code_model_conversation(
315
+ args=self.args,
316
+ instruction=self.args.query,
317
+ conversations=generate_result.conversations[0],
318
+ model=self.llm.default_model_name,
319
+ )
276
320
 
277
- if args.execute and args.auto_merge:
278
- logger.info("Auto merge the code...")
279
- if args.auto_merge == "diff":
280
- code_merge = CodeAutoMergeDiff(llm=self.llm, args=self.args)
281
- code_merge.merge_code(content=content)
282
- elif args.auto_merge == "strict_diff":
283
- code_merge = CodeAutoMergeStrictDiff(llm=self.llm, args=self.args)
284
- code_merge.merge_code(content=content)
285
- elif args.auto_merge == "editblock":
286
- code_merge = CodeAutoMergeEditBlock(llm=self.llm, args=self.args)
287
- code_merge.merge_code(content=content)
288
- else:
289
- code_merge = CodeAutoMerge(llm=self.llm, args=self.args)
290
- code_merge.merge_code(content=content)
321
+ with open(args.target_file, "w") as file:
322
+ file.write(content)
291
323
 
292
324
 
293
325
  class ActionSuffixProject:
@@ -337,36 +369,51 @@ class ActionSuffixProject:
337
369
  else:
338
370
  generate = CodeAutoGenerate(llm=self.llm, args=self.args, action=self)
339
371
  if self.args.enable_multi_round_generate:
340
- result, conversations = generate.multi_round_run(
372
+ generate_result = generate.multi_round_run(
341
373
  query=args.query, source_content=content
342
374
  )
343
375
  else:
344
- result, conversations = generate.single_round_run(
376
+ generate_result = generate.single_round_run(
345
377
  query=args.query, source_content=content
346
378
  )
347
- content = "\n\n".join(result)
348
-
349
- store_code_model_conversation(
350
- args=self.args,
351
- instruction=self.args.query,
352
- conversations=conversations,
353
- model=self.llm.default_model_name,
354
- )
355
-
356
- with open(args.target_file, "w") as file:
357
- file.write(content)
379
+
358
380
 
381
+ merge_result = None
359
382
  if args.execute and args.auto_merge:
360
383
  logger.info("Auto merge the code...")
361
384
  if args.auto_merge == "diff":
362
385
  code_merge = CodeAutoMergeDiff(llm=self.llm, args=self.args)
363
- code_merge.merge_code(content=content)
386
+ merge_result = code_merge.merge_code(generate_result=generate_result)
364
387
  elif args.auto_merge == "strict_diff":
365
388
  code_merge = CodeAutoMergeStrictDiff(llm=self.llm, args=self.args)
366
- code_merge.merge_code(content=content)
389
+ merge_result = code_merge.merge_code(generate_result=generate_result)
367
390
  elif args.auto_merge == "editblock":
368
391
  code_merge = CodeAutoMergeEditBlock(llm=self.llm, args=self.args)
369
- code_merge.merge_code(content=content)
392
+ merge_result = code_merge.merge_code(generate_result=generate_result)
370
393
  else:
371
394
  code_merge = CodeAutoMerge(llm=self.llm, args=self.args)
372
- code_merge.merge_code(content=content)
395
+ merge_result = code_merge.merge_code(generate_result=generate_result)
396
+
397
+ if merge_result is not None:
398
+ content = merge_result.contents[0]
399
+ store_code_model_conversation(
400
+ args=self.args,
401
+ instruction=self.args.query,
402
+ conversations=merge_result.conversations[0],
403
+ model=self.llm.default_model_name,
404
+ )
405
+ with open(args.target_file, "w") as file:
406
+ file.write(content)
407
+ else:
408
+ content = generate_result.contents[0]
409
+
410
+ store_code_model_conversation(
411
+ args=self.args,
412
+ instruction=self.args.query,
413
+ conversations=generate_result.conversations[0],
414
+ model=self.llm.default_model_name,
415
+ )
416
+
417
+ with open(args.target_file, "w") as file:
418
+ file.write(content)
419
+
@@ -68,35 +68,45 @@ class ActionRegexProject:
68
68
  else:
69
69
  generate = CodeAutoGenerate(llm=self.llm, args=self.args, action=self)
70
70
  if self.args.enable_multi_round_generate:
71
- result, conversations = generate.multi_round_run(
71
+ generate_result = generate.multi_round_run(
72
72
  query=args.query, source_content=content
73
73
  )
74
74
  else:
75
- result, conversations = generate.single_round_run(
75
+ generate_result = generate.single_round_run(
76
76
  query=args.query, source_content=content
77
77
  )
78
- content = "\n\n".join(result)
78
+ merge_result = None
79
+ if args.execute and args.auto_merge:
80
+ logger.info("Auto merge the code...")
81
+ if args.auto_merge == "diff":
82
+ code_merge = CodeAutoMergeDiff(llm=self.llm, args=self.args)
83
+ merge_result = code_merge.merge_code(generate_result=generate_result)
84
+ elif args.auto_merge == "strict_diff":
85
+ code_merge = CodeAutoMergeStrictDiff(llm=self.llm, args=self.args)
86
+ merge_result = code_merge.merge_code(generate_result=generate_result)
87
+ elif args.auto_merge == "editblock":
88
+ code_merge = CodeAutoMergeEditBlock(llm=self.llm, args=self.args)
89
+ merge_result = code_merge.merge_code(generate_result=generate_result)
90
+ else:
91
+ code_merge = CodeAutoMerge(llm=self.llm, args=self.args)
92
+ merge_result = code_merge.merge_code(generate_result=generate_result)
79
93
 
80
- store_code_model_conversation(
81
- args=self.args,
82
- instruction=self.args.query,
83
- conversations=conversations,
84
- model=self.llm.default_model_name,
85
- )
86
- with open(args.target_file, "w") as file:
87
- file.write(content)
88
-
89
- if args.execute and args.auto_merge:
90
- logger.info("Auto merge the code...")
91
- if args.auto_merge == "diff":
92
- code_merge = CodeAutoMergeDiff(llm=self.llm, args=self.args)
93
- code_merge.merge_code(content=content)
94
- elif args.auto_merge == "strict_diff":
95
- code_merge = CodeAutoMergeStrictDiff(llm=self.llm, args=self.args)
96
- code_merge.merge_code(content=content)
97
- elif args.auto_merge == "editblock":
98
- code_merge = CodeAutoMergeEditBlock(llm=self.llm, args=self.args)
99
- code_merge.merge_code(content=content)
94
+ if merge_result is not None:
95
+ content = merge_result.contents[0]
96
+ store_code_model_conversation(
97
+ args=self.args,
98
+ instruction=self.args.query,
99
+ conversations=merge_result.conversations[0],
100
+ model=self.llm.default_model_name,
101
+ )
100
102
  else:
101
- code_merge = CodeAutoMerge(llm=self.llm, args=self.args)
102
- code_merge.merge_code(content=content)
103
+ content = generate_result.contents[0]
104
+ store_code_model_conversation(
105
+ args=self.args,
106
+ instruction=self.args.query,
107
+ conversations=generate_result.conversations[0],
108
+ model=self.llm.default_model_name,
109
+ )
110
+
111
+ with open(args.target_file, "w") as file:
112
+ file.write(content)
autocoder/lang.py CHANGED
@@ -21,6 +21,7 @@ lang_desc = {
21
21
  "cmd_args_title": "Command Line Arguments:",
22
22
  "py_packages": "The Python packages added to context, only works for py project type. Default is empty.",
23
23
  "human_as_model": "Use human as model or not. Default is False",
24
+ "human_model_num": "Number of human models to use. Default is 1",
24
25
  "urls": "The urls to crawl and extract text from, separated by comma",
25
26
  "search_engine": "The search engine to use. Supported engines: bing, google. Default is empty",
26
27
  "search_engine_token": "The token for the search engine API. Default is empty",
@@ -62,6 +63,8 @@ lang_desc = {
62
63
  "screenshot_url": "The URL of the webpage to capture",
63
64
  "screenshot_output": "The directory to save the screenshots",
64
65
  "code_model": "The name of the code model to use. Default is empty",
66
+ "generate_rerank_model": "The name of the generate rerank model to use. Default is empty",
67
+ "inference_model": "The name of the inference model to use. Default is empty",
65
68
  "system_prompt": "The system prompt for the model. Default is empty",
66
69
  "planner_model": "The name of the planner model to use. Default is empty",
67
70
  "designer_model": "The name of the designer model to use. Default is empty",
@@ -78,6 +81,7 @@ lang_desc = {
78
81
  "rag_token": "The token for the RAG service. Default is empty",
79
82
  "rag_type": "RAG type (simple/storage), default is storage",
80
83
  "rag_params_max_tokens": "The maximum number of tokens for RAG parameters. Default is 4096",
84
+ "generate_times_same_model": "Number of times to generate using the same model. Default is 1",
81
85
  },
82
86
  "zh": {
83
87
  "request_id": "Request ID",
@@ -101,6 +105,7 @@ lang_desc = {
101
105
  "cmd_args_title": "命令行参数:",
102
106
  "py_packages": "添加到上下文的Python包,仅适用于py项目类型。默认为空。",
103
107
  "human_as_model": "是否使用人工作为模型。默认为False",
108
+ "human_model_num": "使用的人工模型数量。默认为1",
104
109
  "urls": "要爬取并提取文本的URL,多个URL以逗号分隔",
105
110
  "search_engine": "要使用的搜索引擎。支持的引擎:bing、google。默认为空",
106
111
  "search_engine_token": "搜索引擎API的令牌。默认为空",
@@ -142,6 +147,8 @@ lang_desc = {
142
147
  "screenshot_url": "要捕获的网页的URL",
143
148
  "screenshot_output": "保存截图的目录",
144
149
  "code_model": "要使用的代码模型的名称。默认为空",
150
+ "generate_rerank_model": "要使用的生成重排序模型的名称。默认为空",
151
+ "inference_model": "要使用的推理模型的名称。默认为空",
145
152
  "system_prompt": "模型使用的系统提示词。默认为空",
146
153
  "next_desc": "基于上一个action文件创建一个新的action文件",
147
154
  "planner_model": "要使用的规划模型的名称。默认为空",
@@ -157,6 +164,7 @@ lang_desc = {
157
164
  "rag_url": "RAG服务的URL",
158
165
  "rag_token": "RAG服务的令牌",
159
166
  "rag_type": "RAG类型(simple/storage),默认是storage",
160
- "rag_params_max_tokens": "RAG参数的最大token数。默认为4096",
167
+ "rag_params_max_tokens": "RAG参数的最大token数。默认为4096",
168
+ "generate_times_same_model": "使用相同模型生成的次数。默认为1",
161
169
  }
162
170
  }
@@ -114,6 +114,10 @@ class PyProject:
114
114
  "dist",
115
115
  "__pycache__",
116
116
  "node_modules",
117
+ ".auto-coder",
118
+ "actions",
119
+ ".vscode",
120
+ ".idea",
117
121
  ]
118
122
 
119
123
  @byzerllm.prompt()
@@ -395,6 +395,7 @@ class LongContextRAG:
395
395
  inference_deep_thought=self.args.inference_deep_thought,
396
396
  inference_slow_without_deep_thought=self.args.inference_slow_without_deep_thought,
397
397
  precision=self.args.inference_compute_precision,
398
+ data_cells_max_num=self.args.data_cells_max_num,
398
399
  )
399
400
  conversations = conversations[:-1]
400
401
  new_conversations = llm_compute_engine.process_conversation(
@@ -576,6 +577,7 @@ class LongContextRAG:
576
577
  inference_enhance=not self.args.disable_inference_enhance,
577
578
  inference_deep_thought=self.args.inference_deep_thought,
578
579
  precision=self.args.inference_compute_precision,
580
+ data_cells_max_num=self.args.data_cells_max_num,
579
581
  debug=False,
580
582
  )
581
583
  new_conversations = llm_compute_engine.process_conversation(
@@ -1,4 +1,4 @@
1
- from typing import List, Dict, Any, Optional
1
+ from typing import List, Dict, Any, Optional,Union
2
2
  from autocoder.common import AutoCoderArgs, SourceCode
3
3
  from byzerllm import ByzerLLM
4
4
  from .simple_rag import SimpleRAG
@@ -7,7 +7,7 @@ class RAGFactory:
7
7
 
8
8
 
9
9
  @staticmethod
10
- def get_rag(llm: ByzerLLM, args: AutoCoderArgs, path: str,**kargs) -> SimpleRAG | LongContextRAG:
10
+ def get_rag(llm: ByzerLLM, args: AutoCoderArgs, path: str,**kargs) -> Union[SimpleRAG, LongContextRAG]:
11
11
  """
12
12
  Factory method to get the appropriate RAG implementation based on arguments.
13
13
 
@@ -55,6 +55,8 @@ class SuffixProject:
55
55
  "node_modules",
56
56
  ".auto-coder",
57
57
  ".vscode",
58
+ "actions",
59
+ ".idea",
58
60
  ]
59
61
 
60
62
  @byzerllm.prompt()
@@ -43,6 +43,10 @@ class TSProject:
43
43
  "dist",
44
44
  "__pycache__",
45
45
  "node_modules",
46
+ ".auto-coder",
47
+ "actions",
48
+ ".vscode",
49
+ ".idea",
46
50
  ]
47
51
 
48
52
  @byzerllm.prompt()
autocoder/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.206"
1
+ __version__ = "0.1.208"