auto-coder 0.1.278__py3-none-any.whl → 0.1.280__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -34,6 +34,16 @@ class DocFilterResult(BaseModel):
34
34
  model_name: str = "unknown"
35
35
 
36
36
 
37
+ class ProgressUpdate:
38
+ """表示处理过程中的进度更新"""
39
+ def __init__(self, phase: str, completed: int, total: int, relevant_count: int, message: str):
40
+ self.phase = phase # 当前处理阶段:doc_filter, token_check 等
41
+ self.completed = completed # 已完成的任务数
42
+ self.total = total # 总任务数
43
+ self.relevant_count = relevant_count # 找到的相关文档数
44
+ self.message = message # 进度消息
45
+
46
+
37
47
  def parse_relevance(text: Optional[str]) -> Optional[DocRelevance]:
38
48
  if text is None:
39
49
  return None
@@ -0,0 +1,193 @@
1
+ import inspect
2
+
3
+ def stream_with_thinking(response):
4
+ """
5
+ Process an OpenAI streaming response that may contain regular content and reasoning_content.
6
+ Returns a generator that yields the formatted output.
7
+
8
+ Args:
9
+ response: An OpenAI streaming response (generator)
10
+
11
+ Yields:
12
+ str: Formatted output with thinking sections marked
13
+ """
14
+ start_mark = "<thinking>\n"
15
+ end_mark = "\n</thinking>\n"
16
+ is_thinking = False # 跟踪我们是否在输出思考内容
17
+
18
+ for chunk in response:
19
+ # 如果有常规内容
20
+ if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
21
+ # 如果我们之前在输出思考内容,需要先结束思考部分
22
+ if is_thinking:
23
+ yield end_mark
24
+ is_thinking = False
25
+
26
+ yield chunk.choices[0].delta.content
27
+
28
+ # 如果有思考内容
29
+ elif hasattr(chunk.choices[0].delta, 'reasoning_content') and chunk.choices[0].delta.reasoning_content:
30
+ # 如果这是第一次输出思考内容,打印开始标记
31
+ if not is_thinking:
32
+ yield start_mark
33
+ is_thinking = True
34
+
35
+ yield chunk.choices[0].delta.reasoning_content
36
+
37
+ # 确保思考内容结束后有结束标记
38
+ if is_thinking:
39
+ yield end_mark
40
+
41
+ async def stream_with_thinking_async(response):
42
+ """
43
+ Process an OpenAI async streaming response that may contain regular content and reasoning_content.
44
+ Returns an async generator that yields the formatted output.
45
+
46
+ Args:
47
+ response: An OpenAI async streaming response
48
+
49
+ Yields:
50
+ str: Formatted output with thinking sections marked
51
+ """
52
+ start_mark = "<thinking>\n"
53
+ end_mark = "\n</thinking>\n"
54
+ is_thinking = False # 跟踪我们是否在输出思考内容
55
+
56
+ async for chunk in response:
57
+ # 如果有常规内容
58
+ if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
59
+ # 如果我们之前在输出思考内容,需要先结束思考部分
60
+ if is_thinking:
61
+ yield end_mark
62
+ is_thinking = False
63
+
64
+ yield chunk.choices[0].delta.content
65
+
66
+ # 如果有思考内容
67
+ elif hasattr(chunk.choices[0].delta, 'reasoning_content') and chunk.choices[0].delta.reasoning_content:
68
+ # 如果这是第一次输出思考内容,打印开始标记
69
+ if not is_thinking:
70
+ yield start_mark
71
+ is_thinking = True
72
+
73
+ yield chunk.choices[0].delta.reasoning_content
74
+
75
+ # 确保思考内容结束后有结束标记
76
+ if is_thinking:
77
+ yield end_mark
78
+
79
+ def process_streaming_response(response):
80
+ """
81
+ Process an OpenAI streaming response, detecting whether it's a regular or async generator.
82
+ If using the async version, you must use this with await in an async context.
83
+
84
+ Args:
85
+ response: An OpenAI streaming response
86
+
87
+ Returns:
88
+ A generator or async generator that yields formatted output
89
+ """
90
+ if inspect.isasyncgen(response):
91
+ return stream_with_thinking_async(response)
92
+ else:
93
+ return stream_with_thinking(response)
94
+
95
+ def print_streaming_response(response):
96
+ """
97
+ Print a streaming response with thinking sections clearly marked.
98
+
99
+ Args:
100
+ response: An OpenAI streaming response
101
+ """
102
+ for text in stream_with_thinking(response):
103
+ print(text, end="", flush=True)
104
+
105
+ async def print_streaming_response_async(response):
106
+ """
107
+ Print an async streaming response with thinking sections clearly marked.
108
+
109
+ Args:
110
+ response: An OpenAI async streaming response
111
+ """
112
+ async for text in stream_with_thinking_async(response):
113
+ print(text, end="", flush=True)
114
+
115
+ def separate_stream_thinking(response):
116
+ """
117
+ Process an OpenAI streaming response and return two separate generators:
118
+ one for thinking content and one for normal content.
119
+
120
+ Args:
121
+ response: An OpenAI streaming response (generator)
122
+
123
+ Returns:
124
+ tuple: (thinking_generator, content_generator)
125
+ """
126
+ pending_content_chunk = None
127
+
128
+ def thinking_generator():
129
+ nonlocal pending_content_chunk
130
+
131
+ for chunk in response:
132
+ # If we have thinking content
133
+ if hasattr(chunk.choices[0].delta, 'reasoning_content') and chunk.choices[0].delta.reasoning_content:
134
+ yield chunk.choices[0].delta.reasoning_content
135
+ # If we have regular content, store it but don't consume more than one chunk
136
+ elif hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
137
+ pending_content_chunk = chunk
138
+ break
139
+
140
+ def content_generator():
141
+ nonlocal pending_content_chunk
142
+
143
+ # First yield any pending content chunk from the thinking generator
144
+ if pending_content_chunk is not None:
145
+ yield pending_content_chunk.choices[0].delta.content
146
+ pending_content_chunk = None
147
+
148
+ # Continue with the rest of the response
149
+ for chunk in response:
150
+ if chunk.choices[0].delta.content:
151
+ yield chunk.choices[0].delta.content
152
+
153
+ return thinking_generator(), content_generator()
154
+
155
+ async def separate_stream_thinking_async(response):
156
+ """
157
+ Process an OpenAI async streaming response and return two separate async generators:
158
+ one for thinking content and one for normal content.
159
+
160
+ Args:
161
+ response: An OpenAI async streaming response
162
+
163
+ Returns:
164
+ tuple: (thinking_generator, content_generator)
165
+ """
166
+ pending_content_chunk = None
167
+
168
+ async def thinking_generator():
169
+ nonlocal pending_content_chunk
170
+
171
+ async for chunk in response:
172
+ # If we have thinking content
173
+ if hasattr(chunk.choices[0].delta, 'reasoning_content') and chunk.choices[0].delta.reasoning_content:
174
+ yield chunk.choices[0].delta.reasoning_content
175
+ # If we have regular content, store it but don't consume more than one chunk
176
+ elif hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
177
+ pending_content_chunk = chunk
178
+ break
179
+
180
+ async def content_generator():
181
+ nonlocal pending_content_chunk
182
+
183
+ # First yield any pending content chunk from the thinking generator
184
+ if pending_content_chunk is not None:
185
+ yield pending_content_chunk.choices[0].delta.content
186
+ pending_content_chunk = None
187
+
188
+ # Continue with the rest of the response
189
+ async for chunk in response:
190
+ if chunk.choices[0].delta.content:
191
+ yield chunk.choices[0].delta.content
192
+
193
+ return thinking_generator(), content_generator()
autocoder/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.278"
1
+ __version__ = "0.1.280"