pycoze 0.1.409__py3-none-any.whl → 0.1.410__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,313 @@
1
+ import json
2
+ import os
3
+ from .lib import get_formatted_filelist_str, read_local_file, resolve_relative_path
4
+ from .message import info
5
+ from pycoze.ai import chat_stream_async, extract
6
+ from .tools import ToolExecutor
7
+ from typing import List
8
+
9
+ def guess_files_in_message(cwd: str, user_message: str) -> List[str]:
10
+ try:
11
+ value = extract(
12
+ {"includedFiles": ["relative path format", "relative path format", "..."]},
13
+ 'Please find the files mentioned in the text. If none, return {"includedFiles": []}:\n'
14
+ + user_message,
15
+ )
16
+ return [resolve_relative_path(cwd, p) for p in value["includedFiles"]]
17
+ except:
18
+ print("Failed to guess files in message")
19
+ return []
20
+
21
+
22
+ def format_content(cwd, potential_paths, conversation_history):
23
+ content = []
24
+ for file_path in potential_paths:
25
+ file_path = resolve_relative_path(cwd, file_path)
26
+ if os.path.isfile(file_path):
27
+ file_marker = f"[[{file_path}]]'"
28
+ file_content = read_local_file(file_path)
29
+ if not any(
30
+ file_marker in msg["content"] for msg in conversation_history
31
+ ):
32
+ content.append(f"{file_marker}\n{file_content}")
33
+ return (
34
+ "Partial contents of files are as follows:" + "\n".join(content)
35
+ if content
36
+ else ""
37
+ )
38
+
39
+ def generate_user_task_prompt(conversation_history, cwd, user_input: str, bot_setting_file:str):
40
+ # 需要重新读取openedFiles和activeFile
41
+ with open(bot_setting_file, encoding="utf-8") as f:
42
+ bot_setting = json.load(f)
43
+ folder_context = bot_setting["folderContext"]
44
+ opened_files = bot_setting["openedFiles"]
45
+ active_file = bot_setting["activeFile"]
46
+
47
+ if folder_context:
48
+ potential_paths = guess_files_in_message(cwd, user_input)
49
+
50
+ existing_files = get_formatted_filelist_str(cwd, True, 200)
51
+
52
+ active_file_str = f"Currently viewing: {active_file}" if active_file else ""
53
+
54
+ opened_files_str = (
55
+ f"Open tabs:\n{'\n'.join(opened_files)}" if opened_files else ""
56
+ )
57
+ existing_files_str = f"Files in directory:\n{existing_files}" if existing_files else ""
58
+ return f"""<task>
59
+ {user_input}
60
+ </task>
61
+
62
+ <environment_details>
63
+ Current working directory: {cwd}
64
+
65
+ {active_file_str}
66
+
67
+ {opened_files_str}
68
+
69
+ {existing_files_str}
70
+
71
+ {format_content(cwd, potential_paths, conversation_history)}
72
+
73
+ </environment_details>
74
+ """
75
+
76
+ else:
77
+ return f"""<task>
78
+ {user_input}
79
+ </task>
80
+ """
81
+
82
+
83
+ def dumps_markdown_json(data):
84
+ json_str = json.dumps(data, indent=4, ensure_ascii=False)
85
+ return f"\n```json\n{json_str}\n```\n"
86
+
87
+
88
+
89
+ import re
90
+
91
+ import re
92
+
93
+ async def stream_openai_response(conversation_history, start_new_stream):
94
+ """
95
+ 异步流式传输 OpenAI 聊天完成响应并处理结构化输出
96
+ """
97
+ stream = None
98
+ buffer = ""
99
+ in_json_block = False
100
+ json_block_content = ""
101
+ text_content = ""
102
+
103
+ while True:
104
+ # 检查是否需要重新创建流
105
+ if stream is None or start_new_stream["value"]:
106
+ if stream is not None:
107
+ await stream.aclose() # 关闭之前的流
108
+ stream = chat_stream_async(conversation_history) # 获取新的异步生成器
109
+ start_new_stream["value"] = False # 重置标志
110
+ buffer = ""
111
+ in_json_block = False
112
+ json_block_content = ""
113
+ text_content = ""
114
+
115
+ # 使用 async for 迭代异步生成器
116
+ try:
117
+ async for chunk in stream:
118
+ print("chunk", chunk)
119
+ # 检查是否需要重新创建流
120
+ if start_new_stream["value"]:
121
+ break # 退出当前的 async for 循环,进入下一次 while 循环
122
+
123
+ info("assistant", chunk)
124
+ buffer += chunk
125
+
126
+ # 处理 buffer 中的内容
127
+ while buffer:
128
+ print("while buffer", buffer)
129
+ if not in_json_block:
130
+ # 查找 JSON 代码块的起始标记
131
+ json_start_match = re.search(r"```json", buffer, re.IGNORECASE)
132
+ if json_start_match:
133
+ # 提取 JSON 代码块之前的文本
134
+ text_content += buffer[:json_start_match.start()]
135
+ # 如果 text_content 不为空,先 yield 文本
136
+ if text_content.strip():
137
+ print("yield text", text_content.strip())
138
+ yield ("text", text_content.strip())
139
+ text_content = ""
140
+ # 进入 JSON 代码块模式
141
+ in_json_block = True
142
+ buffer = buffer[json_start_match.end():]
143
+ else:
144
+ # 如果没有找到 JSON 代码块,继续累积到 text_content
145
+ text_content += buffer
146
+ buffer = ""
147
+ else:
148
+ # 查找 JSON 代码块的结束标记
149
+ json_end_match = re.search(r"```", buffer)
150
+ if json_end_match:
151
+ # 提取 JSON 代码块内容
152
+ json_block_content += buffer[:json_end_match.start()]
153
+ # yield JSON 代码块
154
+ print("yield json", json_block_content.strip())
155
+ yield ("json", json_block_content.strip())
156
+ json_block_content = ""
157
+ in_json_block = False
158
+ buffer = buffer[json_end_match.end():]
159
+ else:
160
+ # 如果没有找到结束标记,继续累积 JSON 内容
161
+ json_block_content += buffer
162
+ buffer = ""
163
+
164
+ # 如果流正常结束,退出 while 循环
165
+ break
166
+
167
+ except Exception as e:
168
+ # 捕获其他异常(如网络错误)
169
+ print(f"Error: {e}", style="bold red")
170
+ break
171
+
172
+ # 处理 buffer 中剩余的内容
173
+ if buffer:
174
+ if in_json_block:
175
+ buffer = buffer.split("```")[0]
176
+ json_block_content += buffer
177
+ else:
178
+ text_content += buffer
179
+
180
+ # 处理流结束后的剩余内容
181
+ if in_json_block:
182
+ # 如果仍在 JSON 代码块模式,处理剩余的 JSON 内容
183
+ if json_block_content.strip():
184
+ print("yield json", json_block_content.strip())
185
+ yield ("json", json_block_content.strip())
186
+ else:
187
+ # 如果不在 JSON 代码块模式,处理剩余的文本内容
188
+ if text_content.strip():
189
+ print("yield json", text_content.strip())
190
+ yield ("text", text_content.strip())
191
+
192
+
193
+ async def handle_user_inputs(
194
+ conversation_history, user_input, cwd, abilities, has_any_tool, bot_setting, bot_setting_file:str
195
+ ):
196
+ no_exit_if_incomplete = bot_setting["systemAbility"]["no_exit_if_incomplete"]
197
+ show_tool_results = bot_setting["showToolResults"]
198
+
199
+ start_new_stream = {
200
+ "value": False
201
+ } # 当遇到AI准备执行JSON,即需要新信息的时候,用于强制停止当前stream,减少后续无效的tokens
202
+
203
+ print("Processing user command", user_input)
204
+ if user_input.lower() in ["exit", "quit"]:
205
+ exit(0)
206
+ # 将用户消息添加到对话历史
207
+ conversation_history.append(
208
+ {
209
+ "role": "user",
210
+ "content": generate_user_task_prompt(
211
+ conversation_history, cwd, user_input, bot_setting_file
212
+ ),
213
+ }
214
+ )
215
+ need_break = False
216
+
217
+ if no_exit_if_incomplete:
218
+ okay_str = 'Okay, please continue. If the tasks within <task>...task content...</task> have been completed, execute the tool "complete_all_tasks". If you have a question, use "ask_follow_up_question".'
219
+ else:
220
+ okay_str = "Okay"
221
+ while True:
222
+ async for response in stream_openai_response(
223
+ conversation_history, start_new_stream
224
+ ):
225
+ if len(response) == 2:
226
+ if (
227
+ response[0] == "text"
228
+ and response[1].strip() != ""
229
+ or (response[0] == "json" and not has_any_tool)
230
+ ):
231
+ if response[0] == 'text':
232
+ conversation_history.append(
233
+ {"role": "assistant", "content": response[1]}
234
+ )
235
+ else:
236
+ conversation_history.append(
237
+ {"role": "assistant", "content": "\n```" + response[0] + "\n" + response[1] + "\n```\n"}
238
+ )
239
+
240
+ conversation_history.append(
241
+ {
242
+ "role": "user",
243
+ "content": okay_str,
244
+ }
245
+ )
246
+ continue
247
+ elif response[0] == "json":
248
+ info("assistant", "\n")
249
+ cleaned_content = response[1]
250
+ try:
251
+ tool_request = json.loads(cleaned_content)
252
+ tool_name = list(tool_request.keys())[0]
253
+ except json.JSONDecodeError as e:
254
+ conversation_history.append(
255
+ {
256
+ "role": "assistant",
257
+ "content": f"\n```json\n{cleaned_content}\n```\n",
258
+ }
259
+ )
260
+ conversation_history.append(
261
+ {
262
+ "role": "user",
263
+ "content": "Invalid JSON content:" + str(e),
264
+ }
265
+ )
266
+ continue
267
+
268
+ ok, is_json_dumps, result = ToolExecutor.execute_tool(
269
+ cwd, tool_request, abilities
270
+ )
271
+
272
+ assistant_content = (
273
+ "Executing tool: \n"
274
+ + dumps_markdown_json(tool_request)
275
+ + "\n\n[Tool Result Begin]\n"
276
+ + result
277
+ + "\n[Tool Result End]\n"
278
+ )
279
+ lang = "json" if is_json_dumps else "text"
280
+
281
+ if show_tool_results:
282
+ status_str = "✅\n" if ok else "❌\n"
283
+ info("assistant", status_str + f"\n```{lang}\n" + result + "\n```\n\n")
284
+
285
+ conversation_history.append(
286
+ {"role": "assistant", "content": assistant_content}
287
+ )
288
+ if tool_name in ["complete_all_tasks", "ask_follow_up_question"]:
289
+ need_break = True
290
+ break
291
+ else:
292
+ conversation_history.append(
293
+ {
294
+ "role": "user",
295
+ "content": okay_str,
296
+ }
297
+ )
298
+ start_new_stream["value"] = True
299
+
300
+ if need_break:
301
+ break
302
+ if not no_exit_if_incomplete and not start_new_stream["value"]:
303
+ break
304
+ last_conversation = conversation_history[-1]
305
+ if last_conversation["role"] == 'user' and last_conversation["content"] == okay_str:
306
+ conversation_history.pop()
307
+
308
+ # 示例调用
309
+ # user_input_list = [
310
+ # "访问https://api-docs.deepseek.com/zh-cn/guides/chat_prefix_completion,并结合它编写一段代码,并保存"
311
+ # ]
312
+
313
+ # asyncio.run(handle_user_inputs(user_input_list))
pycoze/bot/chat_base.py CHANGED
@@ -86,10 +86,6 @@ def dumps_markdown_json(data):
86
86
 
87
87
 
88
88
 
89
- import re
90
-
91
- import re
92
-
93
89
  async def stream_openai_response(conversation_history, start_new_stream):
94
90
  """
95
91
  异步流式传输 OpenAI 聊天完成响应并处理结构化输出
@@ -115,51 +111,31 @@ async def stream_openai_response(conversation_history, start_new_stream):
115
111
  # 使用 async for 迭代异步生成器
116
112
  try:
117
113
  async for chunk in stream:
118
- print("chunk", chunk)
114
+ info("assistant", chunk)
115
+ buffer += chunk
116
+
119
117
  # 检查是否需要重新创建流
120
118
  if start_new_stream["value"]:
121
119
  break # 退出当前的 async for 循环,进入下一次 while 循环
122
-
123
- info("assistant", chunk)
124
- buffer += chunk
125
120
 
126
- # 处理 buffer 中的内容
127
- while buffer:
128
- print("while buffer", buffer)
121
+ # 处理 buffer 中的每一行
122
+ while "\n" in buffer:
123
+ line, buffer = buffer.split("\n", 1)
129
124
  if not in_json_block:
130
- # 查找 JSON 代码块的起始标记
131
- json_start_match = re.search(r"```json", buffer, re.IGNORECASE)
132
- if json_start_match:
133
- # 提取 JSON 代码块之前的文本
134
- text_content += buffer[:json_start_match.start()]
135
- # 如果 text_content 不为空,先 yield 文本
136
- if text_content.strip():
137
- print("yield text", text_content.strip())
125
+ if line.strip().lower().startswith("```json"):
126
+ if text_content:
138
127
  yield ("text", text_content.strip())
139
128
  text_content = ""
140
- # 进入 JSON 代码块模式
141
129
  in_json_block = True
142
- buffer = buffer[json_start_match.end():]
143
130
  else:
144
- # 如果没有找到 JSON 代码块,继续累积到 text_content
145
- text_content += buffer
146
- buffer = ""
131
+ text_content += line + "\n"
147
132
  else:
148
- # 查找 JSON 代码块的结束标记
149
- json_end_match = re.search(r"```", buffer)
150
- if json_end_match:
151
- # 提取 JSON 代码块内容
152
- json_block_content += buffer[:json_end_match.start()]
153
- # yield JSON 代码块
154
- print("yield json", json_block_content.strip())
133
+ if line.strip().lower().startswith("```") and in_json_block:
155
134
  yield ("json", json_block_content.strip())
156
135
  json_block_content = ""
157
136
  in_json_block = False
158
- buffer = buffer[json_end_match.end():]
159
137
  else:
160
- # 如果没有找到结束标记,继续累积 JSON 内容
161
- json_block_content += buffer
162
- buffer = ""
138
+ json_block_content += line + "\n"
163
139
 
164
140
  # 如果流正常结束,退出 while 循环
165
141
  break
@@ -174,20 +150,11 @@ async def stream_openai_response(conversation_history, start_new_stream):
174
150
  if in_json_block:
175
151
  buffer = buffer.split("```")[0]
176
152
  json_block_content += buffer
153
+ yield ("json", json_block_content.strip())
177
154
  else:
178
155
  text_content += buffer
179
-
180
- # 处理流结束后的剩余内容
181
- if in_json_block:
182
- # 如果仍在 JSON 代码块模式,处理剩余的 JSON 内容
183
- if json_block_content.strip():
184
- print("yield json", json_block_content.strip())
185
- yield ("json", json_block_content.strip())
186
- else:
187
- # 如果不在 JSON 代码块模式,处理剩余的文本内容
188
- if text_content.strip():
189
- print("yield json", text_content.strip())
190
- yield ("text", text_content.strip())
156
+ if text_content:
157
+ yield ("text", text_content.strip())
191
158
 
192
159
 
193
160
  async def handle_user_inputs(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: pycoze
3
- Version: 0.1.409
3
+ Version: 0.1.410
4
4
  Summary: Package for pycoze only!
5
5
  Author: Yuan Jie Xiong
6
6
  Author-email: aiqqqqqqq@qq.com
@@ -11,7 +11,8 @@ pycoze/api/lib/web.py,sha256=GWgtiTJOolKOX2drXcwuyqTcbo5FQVxa1NuBGcNyjyc,223
11
11
  pycoze/api/lib/window.py,sha256=dkzWfLwn5pE_L0DfQ38K8nx9tQyT5KO-GYyXi0rytFc,2073
12
12
  pycoze/bot/__init__.py,sha256=rL3Q-ycczRpSFfKn84fg3QBl5k22WpyeIU5qOEjEby8,79
13
13
  pycoze/bot/chat.py,sha256=qEuMxH0cVFU9QSU36FrOsjhRAxtsvOv7CQtuXvM3F6Y,6446
14
- pycoze/bot/chat_base.py,sha256=WZRB08zN4OeYAjSxccgu69Jb-k8tfbNwskqi5LmBF20,11979
14
+ pycoze/bot/chat_base copy.py,sha256=WZRB08zN4OeYAjSxccgu69Jb-k8tfbNwskqi5LmBF20,11979
15
+ pycoze/bot/chat_base.py,sha256=u2TDvXwtxcboXq8zixck1A6HHgQ1kDEMXFMON--fBLM,10314
15
16
  pycoze/bot/lib.py,sha256=_bQ52mKsWgFGAogFHnmRBJbvK_tPOwsAJ8NqJNMR5K4,7210
16
17
  pycoze/bot/message.py,sha256=udnIi-h4QgGzkbr_5VcAsVGjoLp9wXJSfBCeuOz7_Bk,802
17
18
  pycoze/bot/prompt.md,sha256=t7NQdiiNe-jCDVfeVbvTPfq5WK5nF8CxFUQUFMyXJlo,13880
@@ -31,8 +32,8 @@ pycoze/utils/arg.py,sha256=jop1tBfe5hYkHW1NSpCeaZBEznkgguBscj_7M2dWfrs,503
31
32
  pycoze/utils/env.py,sha256=5pWlXfM1F5ZU9hhv1rHlDEanjEW5wf0nbyez9bNRqqA,559
32
33
  pycoze/utils/socket.py,sha256=bZbFFRH4mfThzRqt55BAAGQ6eICx_ja4x8UGGrUdAm8,2428
33
34
  pycoze/utils/text_or_file.py,sha256=gpxZVWt2DW6YiEg_MnMuwg36VNf3TX383QD_1oZNB0Y,551
34
- pycoze-0.1.409.dist-info/LICENSE,sha256=QStd_Qsd0-kAam_-sOesCIp_uKrGWeoKwt9M49NVkNU,1090
35
- pycoze-0.1.409.dist-info/METADATA,sha256=Lz-_MF5byYWxCT0jGA_J4ydXL3szf4XlNhJQYFwHZnw,854
36
- pycoze-0.1.409.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
37
- pycoze-0.1.409.dist-info/top_level.txt,sha256=76dPeDhKvOCleL3ZC5gl1-y4vdS1tT_U1hxWVAn7sFo,7
38
- pycoze-0.1.409.dist-info/RECORD,,
35
+ pycoze-0.1.410.dist-info/LICENSE,sha256=QStd_Qsd0-kAam_-sOesCIp_uKrGWeoKwt9M49NVkNU,1090
36
+ pycoze-0.1.410.dist-info/METADATA,sha256=yZLpsyYmHf2ajNHg1Wz6CaIocYvs91QLKYLE_5MdZDY,854
37
+ pycoze-0.1.410.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
38
+ pycoze-0.1.410.dist-info/top_level.txt,sha256=76dPeDhKvOCleL3ZC5gl1-y4vdS1tT_U1hxWVAn7sFo,7
39
+ pycoze-0.1.410.dist-info/RECORD,,