auto-coder 0.1.253__py3-none-any.whl → 0.1.254__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.253
3
+ Version: 0.1.254
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -11,7 +11,7 @@ autocoder/chat_auto_coder_lang.py,sha256=OiwjnOqbiWwbaVuOhehEqdUFPOJbhfvQr3sIq3b
11
11
  autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
12
12
  autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
13
13
  autocoder/models.py,sha256=uo4mFWb-kdtd-8e2HPfCuXjhkDsE37n1dY6Nt1SU6kk,7625
14
- autocoder/version.py,sha256=r3svv_IwmWAh4ENi7e33jEo4y7TePaK1ugMkXwnow5w,23
14
+ autocoder/version.py,sha256=LLYeCF3eLt_aBS3HsiF4z5DiLpgabod5UhddNdga9mk,23
15
15
  autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
17
17
  autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
@@ -80,21 +80,21 @@ autocoder/dispacher/actions/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQ
80
80
  autocoder/dispacher/actions/plugins/action_regex_project.py,sha256=ckTbisMlvwMNHQbrt5WB7pBvf2XAhYQYGH8uyYrvGXU,6060
81
81
  autocoder/dispacher/actions/plugins/action_translate.py,sha256=nVAtRSQpdGNmZxg1R_9zXG3AuTv3CHf2v7ODgj8u65c,7727
82
82
  autocoder/index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
83
- autocoder/index/entry.py,sha256=hXSOi-jPgRBKQB55eqWkT95vxvWrbVHDuIMtDkqjNNw,12232
83
+ autocoder/index/entry.py,sha256=1KIGPCtxQN0OdErAco9OmGTd5hB8WJTpWGrxsGLsTcE,12634
84
84
  autocoder/index/for_command.py,sha256=BFvljE4t6VaMBGboZAuhUCzVK0EitCy_n5D_7FEnihw,3204
85
85
  autocoder/index/index.py,sha256=hVAIyF10N9hxKMWHA_ibYygGRZYJQZfZxRRrhQhrTvk,21225
86
86
  autocoder/index/symbols_utils.py,sha256=CjcjUVajmJZB75Ty3a7kMv1BZphrm-tIBAdOJv6uo-0,2037
87
87
  autocoder/index/types.py,sha256=a2s_KV5FJlq7jqA2ELSo9E1sjuLwDB-JJYMhSpzBAhU,596
88
88
  autocoder/index/filter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
89
89
  autocoder/index/filter/normal_filter.py,sha256=APu34iSvWhtlLtWgkj8N3Vo4oW1TegtZQq2bwDX_cs4,8031
90
- autocoder/index/filter/quick_filter.py,sha256=8R54qtcAAAu0C6TQgJ4WkqkryGOke1mJXrv82vnvMAA,9342
90
+ autocoder/index/filter/quick_filter.py,sha256=Dsm23Z_RrJ_UwCypGUPN1BlKUMibae_9_D8jWD1UDFw,10518
91
91
  autocoder/pyproject/__init__.py,sha256=dQ2_7YZ7guybT9BhfxSGn43eLQJGQN2zgeKa6--JlaQ,14403
92
92
  autocoder/rag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
93
93
  autocoder/rag/api_server.py,sha256=dRbhAZVRAOlZ64Cnxf4_rKb4iJwHnrWS9Zr67IVORw0,7288
94
- autocoder/rag/doc_filter.py,sha256=bGx3OEmSwmqAFOT12nGDE8saMpukSsHFMVRVbHBgXik,6516
94
+ autocoder/rag/doc_filter.py,sha256=ZCixxUXNBbz6UiGbgXvbDWdn5moLac3HnZEphpasTDc,6579
95
95
  autocoder/rag/document_retriever.py,sha256=5oThtxukGuRFF96o3pHKsk306a8diXbhgSrbqyU2BvM,8894
96
96
  autocoder/rag/llm_wrapper.py,sha256=sbDxCANiZyWb_ocqNgqu2oy3c2t8orPNRGleEs-Uwl8,2649
97
- autocoder/rag/long_context_rag.py,sha256=aBp0fJ9JrlGb1KsJkQw0CSy7mX4kP52GPVKLX6sIXB4,25366
97
+ autocoder/rag/long_context_rag.py,sha256=wmNmGsXN8RAFl6e9HaVzlwISXV9D-3bvf2qiaWjRy7w,24646
98
98
  autocoder/rag/rag_config.py,sha256=8LwFcTd8OJWWwi1_WY4IzjqgtT6RyE2j4PjxS5cCTDE,802
99
99
  autocoder/rag/rag_entry.py,sha256=6TKtErZ0Us9XSV6HgRKXA6yR3SiZGPHpynOKSaR1wgE,2463
100
100
  autocoder/rag/raw_rag.py,sha256=yS2Ur6kG0IRjhCj2_VonwxjY_xls_E62jO5Gz5j2nqE,2952
@@ -144,9 +144,9 @@ autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
144
144
  autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
145
145
  autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=xWXqICANbDOovH4wcFW1eSI7lB7TjXbk1mSU4bTKEW4,11434
146
146
  autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
147
- auto_coder-0.1.253.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
148
- auto_coder-0.1.253.dist-info/METADATA,sha256=NipnGMN1xPzRQLC0rvEbcWax5fPTf5T2pixSG_qVyYA,2616
149
- auto_coder-0.1.253.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
150
- auto_coder-0.1.253.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
151
- auto_coder-0.1.253.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
152
- auto_coder-0.1.253.dist-info/RECORD,,
147
+ auto_coder-0.1.254.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
148
+ auto_coder-0.1.254.dist-info/METADATA,sha256=a7eZ5YDrFy4_RSOLcTBzbdye35TnHukamLS-g3-jNkM,2616
149
+ auto_coder-0.1.254.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
150
+ auto_coder-0.1.254.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
151
+ auto_coder-0.1.254.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
152
+ auto_coder-0.1.254.dist-info/RECORD,,
autocoder/index/entry.py CHANGED
@@ -107,7 +107,13 @@ def build_index_and_filter_files(
107
107
  model_name = "unknown(without default model name)"
108
108
  printer.print_in_terminal("quick_filter_start", style="blue", model_name=model_name)
109
109
  quick_filter = QuickFilter(index_manager,stats,sources)
110
- final_files = quick_filter.filter(index_manager.read_index(),args.query)
110
+ quick_filter_result = quick_filter.filter(index_manager.read_index(),args.query)
111
+ if quick_filter_result.has_error:
112
+ raise KeyboardInterrupt(printer.get_message_from_key_with_format("quick_filter_failed",error=quick_filter_result.error_message))
113
+
114
+ # Merge quick filter results into final_files
115
+ final_files.update(quick_filter_result.files)
116
+
111
117
 
112
118
  if not args.skip_filter_index and not args.index_filter_model:
113
119
  model_name = getattr(index_manager.llm, 'default_model_name', None)
@@ -115,7 +121,8 @@ def build_index_and_filter_files(
115
121
  model_name = "unknown(without default model name)"
116
122
  printer.print_in_terminal("normal_filter_start", style="blue",model_name=model_name)
117
123
  normal_filter = NormalFilter(index_manager,stats,sources)
118
- final_files = normal_filter.filter(index_manager.read_index(),args.query)
124
+ # Merge normal filter results into final_files
125
+ final_files.update(normal_filter.filter(index_manager.read_index(),args.query))
119
126
 
120
127
 
121
128
  def display_table_and_get_selections(data):
@@ -1,4 +1,5 @@
1
- from typing import List, Union, Dict, Any
1
+ from typing import List, Union, Dict, Any, Optional
2
+ from pydantic import BaseModel
2
3
  from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
3
4
  from autocoder.common.utils_code_auto_generate import stream_chat_with_continue
4
5
  from byzerllm.utils.str2model import to_model
@@ -26,6 +27,11 @@ def get_file_path(file_path):
26
27
  return file_path
27
28
 
28
29
 
30
+ class QuickFilterResult(BaseModel):
31
+ files: Dict[str, TargetFile]
32
+ has_error: bool
33
+ error_message: Optional[str] = None
34
+
29
35
  class QuickFilter():
30
36
  def __init__(self, index_manager: IndexManager,stats:Dict[str,Any],sources:List[SourceCode]):
31
37
  self.index_manager = index_manager
@@ -36,11 +42,7 @@ class QuickFilter():
36
42
  self.max_tokens = self.args.index_filter_model_max_input_length
37
43
 
38
44
 
39
- def big_filter(self, index_items: List[IndexItem],) -> Dict[str, TargetFile]:
40
-
41
- final_files: Dict[str, TargetFile] = {}
42
- final_files_lock = threading.Lock()
43
-
45
+ def big_filter(self, index_items: List[IndexItem],) -> QuickFilterResult:
44
46
  chunks = []
45
47
  current_chunk = []
46
48
 
@@ -70,9 +72,10 @@ class QuickFilter():
70
72
  split_size=len(chunks)
71
73
  )
72
74
 
73
- def process_chunk(chunk_index: int, chunk: List[IndexItem]) -> None:
75
+ def process_chunk(chunk_index: int, chunk: List[IndexItem]) -> QuickFilterResult:
74
76
  try:
75
77
  model_name = ",".join(get_llm_names(self.index_manager.index_filter_llm))
78
+ files: Dict[str, TargetFile] = {}
76
79
 
77
80
  if chunk_index == 0:
78
81
  # 第一个chunk使用流式输出
@@ -93,13 +96,16 @@ class QuickFilter():
93
96
  file_number_list = self.quick_filter_files.with_llm(self.index_manager.index_filter_llm).with_return_type(FileNumberList).run(chunk, self.args.query)
94
97
 
95
98
  if file_number_list:
96
- with final_files_lock:
97
- for file_number in file_number_list.file_list:
98
- file_path = get_file_path(chunk[file_number].module_name)
99
- final_files[file_path] = TargetFile(
100
- file_path=chunk[file_number].module_name,
101
- reason=self.printer.get_message_from_key("quick_filter_reason")
102
- )
99
+ for file_number in file_number_list.file_list:
100
+ file_path = get_file_path(chunk[file_number].module_name)
101
+ files[file_path] = TargetFile(
102
+ file_path=chunk[file_number].module_name,
103
+ reason=self.printer.get_message_from_key("quick_filter_reason")
104
+ )
105
+ return QuickFilterResult(
106
+ files=files,
107
+ has_error=False
108
+ )
103
109
 
104
110
  except Exception as e:
105
111
  self.printer.print_in_terminal(
@@ -107,18 +113,40 @@ class QuickFilter():
107
113
  style="red",
108
114
  error=str(e)
109
115
  )
116
+ return QuickFilterResult(
117
+ files={},
118
+ has_error=True,
119
+ error_message=str(e)
120
+ )
110
121
 
122
+ results: List[QuickFilterResult] = []
111
123
  if chunks:
112
124
  with ThreadPoolExecutor() as executor:
113
- # 提交所有chunks到线程池
125
+ # 提交所有chunks到线程池并收集结果
114
126
  futures = [executor.submit(process_chunk, i, chunk)
115
127
  for i, chunk in enumerate(chunks)]
116
128
 
117
- # 等待所有任务完成
129
+ # 等待所有任务完成并收集结果
118
130
  for future in futures:
119
- future.result()
120
-
121
- return final_files
131
+ results.append(future.result())
132
+
133
+ # 合并所有结果
134
+ final_files: Dict[str, TargetFile] = {}
135
+ has_error = False
136
+ error_messages: List[str] = []
137
+
138
+ for result in results:
139
+ if result.has_error:
140
+ has_error = True
141
+ if result.error_message:
142
+ error_messages.append(result.error_message)
143
+ final_files.update(result.files)
144
+
145
+ return QuickFilterResult(
146
+ files=final_files,
147
+ has_error=has_error,
148
+ error_message="\n".join(error_messages) if error_messages else None
149
+ )
122
150
 
123
151
  @byzerllm.prompt()
124
152
  def quick_filter_files(self,file_meta_list:List[IndexItem],query:str) -> str:
@@ -161,8 +189,8 @@ class QuickFilter():
161
189
  }
162
190
  return context
163
191
 
164
- def filter(self, index_items: List[IndexItem], query: str) -> Dict[str, TargetFile]:
165
- final_files: Dict[str, TargetFile] = {}
192
+ def filter(self, index_items: List[IndexItem], query: str) -> QuickFilterResult:
193
+ final_files: Dict[str, TargetFile] = {}
166
194
  start_time = time.monotonic()
167
195
 
168
196
  prompt_str = self.quick_filter_files.prompt(index_items,query)
@@ -217,7 +245,11 @@ class QuickFilter():
217
245
  style="red",
218
246
  error=str(e)
219
247
  )
220
- return final_files
248
+ return QuickFilterResult(
249
+ files=final_files,
250
+ has_error=True,
251
+ error_message=str(e)
252
+ )
221
253
 
222
254
  if file_number_list:
223
255
  for file_number in file_number_list.file_list:
@@ -227,4 +259,7 @@ class QuickFilter():
227
259
  )
228
260
  end_time = time.monotonic()
229
261
  self.stats["timings"]["quick_filter"] = end_time - start_time
230
- return final_files
262
+ return QuickFilterResult(
263
+ files=final_files,
264
+ has_error=False
265
+ )
@@ -64,7 +64,7 @@ class DocFilter:
64
64
  self.recall_llm = self.llm
65
65
 
66
66
  self.args = args
67
- self.relevant_score = self.args.rag_doc_filter_relevance or 5
67
+ self.relevant_score = self.args.rag_doc_filter_relevance
68
68
  self.on_ray = on_ray
69
69
  self.path = path
70
70
 
@@ -140,6 +140,7 @@ class DocFilter:
140
140
  f" - File: {doc.module_name}\n"
141
141
  f" - Relevance: {'Relevant' if relevance and relevance.is_relevant else 'Not Relevant'}\n"
142
142
  f" - Score: {relevance.relevant_score if relevance else 'N/A'}\n"
143
+ f" - Score Threshold: {self.relevant_score}\n"
143
144
  f" - Raw Response: {v}\n"
144
145
  f" - Timing:\n"
145
146
  f" * Total Duration: {task_timing.duration:.2f}s\n"
@@ -449,20 +449,7 @@ class LongContextRAG:
449
449
 
450
450
  if highly_relevant_docs:
451
451
  relevant_docs = highly_relevant_docs
452
- logger.info(f"Found {len(relevant_docs)} highly relevant documents")
453
- else:
454
- if relevant_docs:
455
- prefix_chunk = FilterDoc(
456
- source_code=SourceCode(
457
- module_name="特殊说明",
458
- source_code="没有找到特别相关的内容,下面的内容是一些不是很相关的文档。在根据后续文档回答问题前,你需要和用户先提前说一下。",
459
- ),
460
- relevance=DocRelevance(False, 0),
461
- )
462
- relevant_docs.insert(0, prefix_chunk)
463
- logger.info(
464
- "No highly relevant documents found. Added a prefix chunk to indicate this."
465
- )
452
+ logger.info(f"Found {len(relevant_docs)} highly relevant documents")
466
453
 
467
454
  logger.info(
468
455
  f"Filter time: {filter_time:.2f} seconds with {len(relevant_docs)} docs"
autocoder/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.253"
1
+ __version__ = "0.1.254"