auto-coder 0.1.259__py3-none-any.whl → 0.1.260__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -0,0 +1,115 @@
1
+ import os
2
+ import json
3
+ import time
4
+ from typing import List, Dict, Any, Optional
5
+ from pydantic import BaseModel, Field
6
+
7
+ class ResultItem(BaseModel):
8
+ """单条结果记录的数据模型"""
9
+ content: str = Field(..., description="结果内容")
10
+ meta: Dict[str, Any] = Field(default_factory=dict, description="元数据信息")
11
+ time: int = Field(default_factory=lambda: int(time.time()), description="记录时间戳")
12
+
13
+ class Config:
14
+ arbitrary_types_allowed = True
15
+
16
+ class ResultManager:
17
+ """结果管理器,用于维护一个追加写入的jsonl文件"""
18
+
19
+ def __init__(self, source_dir: Optional[str] = None):
20
+ """
21
+ 初始化结果管理器
22
+
23
+ Args:
24
+ source_dir: 可选的源目录,如果不提供则使用当前目录
25
+ """
26
+ self.source_dir = source_dir or os.getcwd()
27
+ self.result_dir = os.path.join(self.source_dir, ".auto-coder", "results")
28
+ self.result_file = os.path.join(self.result_dir, "results.jsonl")
29
+ os.makedirs(self.result_dir, exist_ok=True)
30
+
31
+ def append(self, content: str, meta: Optional[Dict[str, Any]] = None) -> ResultItem:
32
+ """
33
+ 追加一条新的结果记录
34
+
35
+ Args:
36
+ content: 结果内容
37
+ meta: 可选的元数据信息
38
+
39
+ Returns:
40
+ ResultItem: 新创建的结果记录
41
+ """
42
+ result_item = ResultItem(
43
+ content=content,
44
+ meta=meta or {},
45
+ )
46
+
47
+ with open(self.result_file, "a", encoding="utf-8") as f:
48
+ f.write(result_item.model_dump_json() + "\n")
49
+
50
+ return result_item
51
+
52
+ def add_result(self, content: str, meta: Optional[Dict[str, Any]] = None) -> ResultItem:
53
+ return self.append(content, meta)
54
+
55
+ def get_last(self) -> Optional[ResultItem]:
56
+ """
57
+ 获取最后一条记录
58
+
59
+ Returns:
60
+ Optional[ResultItem]: 最后一条记录,如果文件为空则返回None
61
+ """
62
+ if not os.path.exists(self.result_file):
63
+ return None
64
+
65
+ with open(self.result_file, "r", encoding="utf-8") as f:
66
+ lines = f.readlines()
67
+ if not lines:
68
+ return None
69
+ last_line = lines[-1].strip()
70
+ return ResultItem.model_validate_json(last_line)
71
+
72
+ def get_all(self) -> List[ResultItem]:
73
+ """
74
+ 获取所有记录
75
+
76
+ Returns:
77
+ List[ResultItem]: 所有记录的列表
78
+ """
79
+ if not os.path.exists(self.result_file):
80
+ return []
81
+
82
+ results = []
83
+ with open(self.result_file, "r", encoding="utf-8") as f:
84
+ for line in f:
85
+ line = line.strip()
86
+ if line: # 跳过空行
87
+ results.append(ResultItem.model_validate_json(line))
88
+ return results
89
+
90
+ def get_by_time_range(self,
91
+ start_time: Optional[int] = None,
92
+ end_time: Optional[int] = None) -> List[ResultItem]:
93
+ """
94
+ 获取指定时间范围内的记录
95
+
96
+ Args:
97
+ start_time: 开始时间戳
98
+ end_time: 结束时间戳
99
+
100
+ Returns:
101
+ List[ResultItem]: 符合时间范围的记录列表
102
+ """
103
+ results = []
104
+ for item in self.get_all():
105
+ if start_time and item.time < start_time:
106
+ continue
107
+ if end_time and item.time > end_time:
108
+ continue
109
+ results.append(item)
110
+ return results
111
+
112
+ def clear(self) -> None:
113
+ """清空所有记录"""
114
+ if os.path.exists(self.result_file):
115
+ os.remove(self.result_file)
@@ -1,5 +1,5 @@
1
1
  from byzerllm import ByzerLLM,SimpleByzerLLM
2
- from typing import Generator, List, Any, Union
2
+ from typing import Generator, List, Any, Union, Optional, Callable
3
3
  from pydantic import BaseModel
4
4
  from loguru import logger
5
5
 
@@ -41,7 +41,7 @@ def chat_with_continue(llm: Union[ByzerLLM,SimpleByzerLLM], conversations: List[
41
41
  def stream_chat_with_continue(
42
42
  llm: Union[ByzerLLM, SimpleByzerLLM],
43
43
  conversations: List[dict],
44
- llm_config: dict
44
+ llm_config: dict
45
45
  ) -> Generator[Any, None, None]:
46
46
  """
47
47
  流式处理并继续生成内容,直到完成。
@@ -145,7 +145,8 @@ class ActionTSProject(BaseAction):
145
145
  input_cost=input_tokens_cost,
146
146
  output_cost=generated_tokens_cost,
147
147
  speed=round(speed, 2),
148
- model_names=model_names
148
+ model_names=model_names,
149
+ sampling_count=len(generate_result.contents)
149
150
  )
150
151
  merge_result = None
151
152
  if args.execute and args.auto_merge:
@@ -245,7 +246,8 @@ class ActionPyScriptProject(BaseAction):
245
246
  input_cost=input_tokens_cost,
246
247
  output_cost=generated_tokens_cost,
247
248
  speed=round(speed, 2),
248
- model_names=model_names
249
+ model_names=model_names,
250
+ sampling_count=len(generate_result.contents)
249
251
  )
250
252
  merge_result = None
251
253
  if args.execute and args.auto_merge:
@@ -370,7 +372,8 @@ class ActionPyProject(BaseAction):
370
372
  input_cost=input_tokens_cost,
371
373
  output_cost=generated_tokens_cost,
372
374
  speed=round(speed, 2),
373
- model_names=model_names
375
+ model_names=model_names,
376
+ sampling_count=len(generate_result.contents)
374
377
  )
375
378
  merge_result = None
376
379
  if args.execute and args.auto_merge:
@@ -485,7 +488,8 @@ class ActionSuffixProject(BaseAction):
485
488
  input_cost=input_tokens_cost,
486
489
  output_cost=generated_tokens_cost,
487
490
  speed=round(speed, 2),
488
- model_names=model_names
491
+ model_names=model_names,
492
+ sampling_count=len(generate_result.contents)
489
493
  )
490
494
  merge_result = None
491
495
  if args.execute and args.auto_merge:
@@ -100,7 +100,8 @@ class ActionRegexProject:
100
100
  input_cost=input_tokens_cost,
101
101
  output_cost=generated_tokens_cost,
102
102
  speed=round(speed, 2),
103
- model_names=model_names
103
+ model_names=model_names,
104
+ sampling_count=len(generate_result.contents)
104
105
  )
105
106
  merge_result = None
106
107
  if args.execute and args.auto_merge:
@@ -19,6 +19,7 @@ from concurrent.futures import ThreadPoolExecutor
19
19
  from byzerllm import MetaHolder
20
20
 
21
21
  from autocoder.utils.llms import get_llm_names, get_model_info
22
+ from loguru import logger
22
23
 
23
24
 
24
25
  def get_file_path(file_path):
@@ -308,13 +309,22 @@ class QuickFilter():
308
309
  {}
309
310
  )
310
311
 
312
+ def extract_file_number_list(content: str) -> str:
313
+ try:
314
+ v = to_model(content, FileNumberList)
315
+ return "\n".join([index_items[file_number].module_name for file_number in v.file_list])
316
+ except Exception as e:
317
+ logger.error(f"Error extracting file number list: {e}")
318
+ return content
319
+
311
320
  # 获取完整响应
312
321
  full_response, last_meta = stream_out(
313
322
  stream_generator,
314
323
  model_name=model_name,
315
324
  title=self.printer.get_message_from_key_with_format(
316
325
  "quick_filter_title", model_name=model_name),
317
- args=self.args
326
+ args=self.args,
327
+ display_func=extract_file_number_list
318
328
  )
319
329
  # 解析结果
320
330
  file_number_list = to_model(full_response, FileNumberList)
@@ -335,6 +345,7 @@ class QuickFilter():
335
345
  # 四舍五入到4位小数
336
346
  total_input_cost = round(total_input_cost, 4)
337
347
  total_output_cost = round(total_output_cost, 4)
348
+ speed = last_meta.generated_tokens_count / (end_time - start_time)
338
349
 
339
350
  # 打印 token 统计信息和成本
340
351
  self.printer.print_in_terminal(
@@ -345,7 +356,8 @@ class QuickFilter():
345
356
  output_tokens=last_meta.generated_tokens_count,
346
357
  input_cost=total_input_cost,
347
358
  output_cost=total_output_cost,
348
- model_names=model_name
359
+ model_names=model_name,
360
+ speed=f"{speed:.2f}"
349
361
  )
350
362
 
351
363
  except Exception as e:
@@ -6,7 +6,7 @@ from rich.markdown import Markdown
6
6
  from rich.layout import Layout
7
7
  from threading import Thread, Lock
8
8
  from queue import Queue, Empty
9
- from typing import Generator, List, Dict, Any, Optional, Tuple, Literal
9
+ from typing import Generator, List, Dict, Any, Optional, Tuple, Callable
10
10
  from autocoder.utils.request_queue import RequestValue, RequestOption, StreamValue
11
11
  from autocoder.utils.request_queue import request_queue
12
12
  import time
@@ -148,7 +148,9 @@ def stream_out(
148
148
  console: Optional[Console] = None,
149
149
  model_name: Optional[str] = None,
150
150
  title: Optional[str] = None,
151
- args: Optional[AutoCoderArgs] = None
151
+ final_title: Optional[str] = None,
152
+ args: Optional[AutoCoderArgs] = None,
153
+ display_func: Optional[Callable] = None
152
154
  ) -> Tuple[str, Optional[SingleOutputMeta]]:
153
155
  """
154
156
  处理流式输出事件并在终端中展示
@@ -179,6 +181,7 @@ def stream_out(
179
181
  assistant_response = ""
180
182
  last_meta = None
181
183
  panel_title = title if title is not None else f"Response[ {model_name} ]"
184
+ final_panel_title = final_title if final_title is not None else title
182
185
  first_token_time = 0.0
183
186
  first_token_time_start = time.time()
184
187
  try:
@@ -250,8 +253,8 @@ def stream_out(
250
253
  value=StreamValue(value=[content]),
251
254
  status=RequestOption.RUNNING,
252
255
  ),
253
- )
254
-
256
+ )
257
+
255
258
  live.update(
256
259
  Panel(
257
260
  Markdown(display_content),
@@ -266,10 +269,14 @@ def stream_out(
266
269
  lines_buffer.append(current_line)
267
270
 
268
271
  # 最终显示结果
272
+ final_display_content = assistant_response
273
+ if display_func:
274
+ final_display_content = display_func(assistant_response)
275
+
269
276
  live.update(
270
277
  Panel(
271
- Markdown(assistant_response),
272
- title=f"Final {panel_title}",
278
+ Markdown(final_display_content),
279
+ title=f"{final_panel_title}",
273
280
  border_style="blue"
274
281
  )
275
282
  )
@@ -176,6 +176,7 @@ def run_in_raw_thread():
176
176
  exception = []
177
177
  def worker():
178
178
  try:
179
+ # global_cancel.reset()
179
180
  ret = func(*args, **kwargs)
180
181
  result.append(ret)
181
182
  global_cancel.reset()
@@ -196,6 +197,9 @@ def run_in_raw_thread():
196
197
  except KeyboardInterrupt:
197
198
  global_cancel.set()
198
199
  raise KeyboardInterrupt("Task was cancelled by user")
200
+ except Exception as e:
201
+ global_cancel.reset()
202
+ raise
199
203
 
200
204
  return wrapper
201
205
  return decorator
autocoder/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.259"
1
+ __version__ = "0.1.260"