pycoze 0.1.386__py3-none-any.whl → 0.1.387__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pycoze/api/lib/window.py +7 -0
- pycoze/bot/chat_base.py +10 -8
- pycoze/bot/lib.py +0 -1
- pycoze/bot/prompt.md +6 -31
- pycoze/bot/tools.py +0 -1
- {pycoze-0.1.386.dist-info → pycoze-0.1.387.dist-info}/METADATA +1 -1
- {pycoze-0.1.386.dist-info → pycoze-0.1.387.dist-info}/RECORD +10 -11
- pycoze/ai/llm/think.py +0 -131
- {pycoze-0.1.386.dist-info → pycoze-0.1.387.dist-info}/LICENSE +0 -0
- {pycoze-0.1.386.dist-info → pycoze-0.1.387.dist-info}/WHEEL +0 -0
- {pycoze-0.1.386.dist-info → pycoze-0.1.387.dist-info}/top_level.txt +0 -0
pycoze/api/lib/window.py
CHANGED
@@ -30,9 +30,16 @@ class WindowCls:
|
|
30
30
|
"input", {"title": title, "message": message}
|
31
31
|
)
|
32
32
|
|
33
|
+
def minimize(self):
|
34
|
+
socket.post("minimize", {})
|
35
|
+
|
33
36
|
def maximize(self):
|
34
37
|
socket.post("maximize", {})
|
35
38
|
|
39
|
+
def close_window(self):
|
40
|
+
socket.post("closeWindow", {})
|
41
|
+
|
42
|
+
|
36
43
|
def get_slected_text(self) -> str:
|
37
44
|
result = socket.post_and_recv_result("get-selected-text", {})
|
38
45
|
return result
|
pycoze/bot/chat_base.py
CHANGED
@@ -5,16 +5,18 @@ from .message import info
|
|
5
5
|
from pycoze.ai import chat_stream_async, extract
|
6
6
|
from .tools import ToolExecutor
|
7
7
|
from typing import List
|
8
|
-
import time
|
9
8
|
|
10
9
|
def guess_files_in_message(cwd: str, user_message: str) -> List[str]:
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
10
|
+
try:
|
11
|
+
value = extract(
|
12
|
+
{"includedFiles": ["relative path format", "relative path format", "..."]},
|
13
|
+
'Please find the files mentioned in the text. If none, return {"includedFiles": []}:\n'
|
14
|
+
+ user_message,
|
15
|
+
)
|
16
|
+
return [resolve_relative_path(cwd, p) for p in value["includedFiles"]]
|
17
|
+
except:
|
18
|
+
print("Failed to guess files in message")
|
19
|
+
return []
|
18
20
|
|
19
21
|
|
20
22
|
def user_task_prompt(conversation_history, cwd, user_input: str, programmer_mode: bool):
|
pycoze/bot/lib.py
CHANGED
@@ -75,7 +75,6 @@ def get_system_prompt(abilities, bot_setting):
|
|
75
75
|
"cd_prompt": cd_prompt,
|
76
76
|
"abilities_str": abilities_str,
|
77
77
|
"no_exit_if_incomplete": False,
|
78
|
-
"allow_read_file": False,
|
79
78
|
"allow_read_multiple_files": False,
|
80
79
|
"allow_execute_command": False,
|
81
80
|
"allow_write_or_overwrite_file": False,
|
pycoze/bot/prompt.md
CHANGED
@@ -20,12 +20,12 @@ Here's the translation in English:
|
|
20
20
|
```
|
21
21
|
{% endif %}
|
22
22
|
|
23
|
-
{% if
|
23
|
+
{% if allow_read_multiple_files %}
|
24
24
|
For example:
|
25
25
|
```json
|
26
26
|
{
|
27
|
-
"
|
28
|
-
"
|
27
|
+
"read_multiple_files": {
|
28
|
+
"paths_list": ["src/main.js", "src/app.svelte", "src/styles.css"]
|
29
29
|
}
|
30
30
|
}
|
31
31
|
```
|
@@ -36,33 +36,6 @@ Always follow this format to ensure that tool usage is correctly parsed and exec
|
|
36
36
|
### Tools
|
37
37
|
|
38
38
|
|
39
|
-
{% if allow_read_file %}
|
40
|
-
#### Read File
|
41
|
-
**Function Signature:**
|
42
|
-
```python
|
43
|
-
def read_file(path: str) -> str:
|
44
|
-
"""
|
45
|
-
Reads the content of a file at a specified path. Use this tool when you need to inspect the contents of existing files, such as analyzing code, viewing text files, or extracting information from configuration files. Automatically extracts raw text from PDF and DOCX files. May not work well with other types of binary files as it returns raw content as a string.
|
46
|
-
|
47
|
-
Parameters:
|
48
|
-
path (str): The file path to read.
|
49
|
-
|
50
|
-
Returns:
|
51
|
-
str: The content of the file.
|
52
|
-
"""
|
53
|
-
pass
|
54
|
-
```
|
55
|
-
|
56
|
-
**Usage:**
|
57
|
-
```json
|
58
|
-
{
|
59
|
-
"read_file": {
|
60
|
-
"path": "file_path"
|
61
|
-
}
|
62
|
-
}
|
63
|
-
```
|
64
|
-
{% endif %}
|
65
|
-
|
66
39
|
{% if allow_read_multiple_files %}
|
67
40
|
|
68
41
|
#### Read Multiple Files
|
@@ -132,7 +105,7 @@ def write_or_overwrite_file(path: str, content: str) -> None:
|
|
132
105
|
|
133
106
|
Parameters:
|
134
107
|
path (str): The file path to write.
|
135
|
-
content (str): The content to write or overwrite into the file. Always provide the full content of the file, do not truncate or omit parts. You must include all sections of the file, even those that haven't been modified
|
108
|
+
content (str): The content to write or overwrite into the file. Always provide the full content of the file, do not truncate or omit parts. You must include all sections of the file, even those that haven't been modified!
|
136
109
|
|
137
110
|
Returns:
|
138
111
|
None
|
@@ -357,6 +330,8 @@ def complete_all_tasks(result: str, command: str = None) -> None:
|
|
357
330
|
12. Never assume any outcome of tool usage—always wait for user confirmation.
|
358
331
|
13. Be direct and technical in responses, avoiding unnecessary conversational elements.
|
359
332
|
14. Always consider the broader context of the project and environment when making decisions.
|
333
|
+
15. Before thinking about how to modify an existing code file, it is necessary to first read and understand the content of the existing file.
|
334
|
+
16. You can't truncate or omit parts of the file content when writing or overwriting a file.
|
360
335
|
|
361
336
|
|
362
337
|
==== Goals
|
pycoze/bot/tools.py
CHANGED
@@ -256,7 +256,6 @@ class ToolExecutor:
|
|
256
256
|
|
257
257
|
TOOL_MAP = {
|
258
258
|
"execute_command": ExecuteCommandTool,
|
259
|
-
"read_file": ReadFileTool,
|
260
259
|
"read_multiple_files": ReadMultipleFilesTool,
|
261
260
|
"write_or_overwrite_file": WriteFileTool,
|
262
261
|
"replace_part_of_a_file": ReplaceInFileTool,
|
@@ -4,20 +4,19 @@ pycoze/ai/vram_reserve.py,sha256=DRxKzqf89fLAts0DzU8e19z9kecIF8OdMkQnJlCKZV0,424
|
|
4
4
|
pycoze/ai/llm/__init__.py,sha256=7qmligvCSneLx5AFCjKYfGURIiI4KlB4hE19SxIr-Xk,342
|
5
5
|
pycoze/ai/llm/chat.py,sha256=sQZT0ImvRW81fXdlKG0ZrHdDB8g5M4iudaWdG4Kpd6Q,6373
|
6
6
|
pycoze/ai/llm/text_to_image_prompt.py,sha256=0bx2C_YRvjAo7iphHGp1-pmGKsKqwur7dM0t3SiA8kA,3398
|
7
|
-
pycoze/ai/llm/think.py,sha256=sUgTBdGzcZtL3r-Wx8M3lDuVUmDVz8g3qC0VU8uiKAI,5143
|
8
7
|
pycoze/api/__init__.py,sha256=TLKvaZlRzTTt0KiXijLjj9b_iCr7fU1siwsXqyd74b8,375
|
9
8
|
pycoze/api/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
9
|
pycoze/api/lib/tab.py,sha256=DWO8ElI-VOODtIxqUFWaDB8VRrrFYAZRWivuIeD1wG0,2619
|
11
10
|
pycoze/api/lib/view.py,sha256=_PIpTfeuTPPlMDKshMGsqFQYMq7ZiO4Hg5XwHwDoU60,7357
|
12
11
|
pycoze/api/lib/web.py,sha256=GWgtiTJOolKOX2drXcwuyqTcbo5FQVxa1NuBGcNyjyc,223
|
13
|
-
pycoze/api/lib/window.py,sha256=
|
12
|
+
pycoze/api/lib/window.py,sha256=3Qu8AzDwiFK2gBzuSoBkLBXfk0_34343FR3bqa0jjnE,2074
|
14
13
|
pycoze/bot/__init__.py,sha256=rL3Q-ycczRpSFfKn84fg3QBl5k22WpyeIU5qOEjEby8,79
|
15
14
|
pycoze/bot/chat.py,sha256=pLq8RXxXAT43xMPwXwZTSquiX6hlRWjB9PWT_nMxzUw,5807
|
16
|
-
pycoze/bot/chat_base.py,sha256=
|
17
|
-
pycoze/bot/lib.py,sha256=
|
15
|
+
pycoze/bot/chat_base.py,sha256=BDwJnirYhd05et-XD5L6Sx0EtDnb0OZ7QojjFpbG-_k,9718
|
16
|
+
pycoze/bot/lib.py,sha256=mlNDSO7LwEJYG36h2pagESiEp4_aofv4sdNIe2wyLlQ,7214
|
18
17
|
pycoze/bot/message.py,sha256=udnIi-h4QgGzkbr_5VcAsVGjoLp9wXJSfBCeuOz7_Bk,802
|
19
|
-
pycoze/bot/prompt.md,sha256=
|
20
|
-
pycoze/bot/tools.py,sha256=
|
18
|
+
pycoze/bot/prompt.md,sha256=Km8GzmmC44HN0rnZh8txZxY52_0KWgyKh6GkjGQ3bCI,15379
|
19
|
+
pycoze/bot/tools.py,sha256=X5gpYbiRQZMvduhfXZy8VGYwjXGw5ndqbGHb-gXTQIE,10276
|
21
20
|
pycoze/reference/__init__.py,sha256=zgqGqvmA9HaqytEM33B6vi0kQVk8IiCwJaXa22xsFz8,114
|
22
21
|
pycoze/reference/bot.py,sha256=UZK24Qm8kpqpwXJy_zNZeTEEDee05luXdSBeUm0NCt0,2029
|
23
22
|
pycoze/reference/lib.py,sha256=T-oBOKxkus5dTouc0oDgfRzUyi6aTyY-FF4yX7SzF5M,3755
|
@@ -33,8 +32,8 @@ pycoze/utils/arg.py,sha256=jop1tBfe5hYkHW1NSpCeaZBEznkgguBscj_7M2dWfrs,503
|
|
33
32
|
pycoze/utils/env.py,sha256=5pWlXfM1F5ZU9hhv1rHlDEanjEW5wf0nbyez9bNRqqA,559
|
34
33
|
pycoze/utils/socket.py,sha256=bZbFFRH4mfThzRqt55BAAGQ6eICx_ja4x8UGGrUdAm8,2428
|
35
34
|
pycoze/utils/text_or_file.py,sha256=gpxZVWt2DW6YiEg_MnMuwg36VNf3TX383QD_1oZNB0Y,551
|
36
|
-
pycoze-0.1.
|
37
|
-
pycoze-0.1.
|
38
|
-
pycoze-0.1.
|
39
|
-
pycoze-0.1.
|
40
|
-
pycoze-0.1.
|
35
|
+
pycoze-0.1.387.dist-info/LICENSE,sha256=QStd_Qsd0-kAam_-sOesCIp_uKrGWeoKwt9M49NVkNU,1090
|
36
|
+
pycoze-0.1.387.dist-info/METADATA,sha256=qP9IaJUFRo7H940M_k04Uedy-AGs2w8HZXxlwS0d7_U,854
|
37
|
+
pycoze-0.1.387.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
38
|
+
pycoze-0.1.387.dist-info/top_level.txt,sha256=76dPeDhKvOCleL3ZC5gl1-y4vdS1tT_U1hxWVAn7sFo,7
|
39
|
+
pycoze-0.1.387.dist-info/RECORD,,
|
pycoze/ai/llm/think.py
DELETED
@@ -1,131 +0,0 @@
|
|
1
|
-
from pycoze import utils
|
2
|
-
import json5
|
3
|
-
import re
|
4
|
-
from retrying import retry
|
5
|
-
from openai import OpenAI
|
6
|
-
import openai
|
7
|
-
import requests
|
8
|
-
import os
|
9
|
-
|
10
|
-
|
11
|
-
def never_retry_on_rate_limit_error(exception):
|
12
|
-
"""Return True if we should retry (in this case when it's NOT a RateLimitError), False otherwise"""
|
13
|
-
return not isinstance(exception, openai.RateLimitError)
|
14
|
-
|
15
|
-
@retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=5)
|
16
|
-
def think(user_text, history, temperature=0.2, stop=None, **kwargs):
|
17
|
-
user_msg = {"role": "user", "content": user_text}
|
18
|
-
cfg = utils.read_json_file("llm.json")
|
19
|
-
|
20
|
-
base_url = cfg["baseURL"]
|
21
|
-
api_key = cfg["apiKey"]
|
22
|
-
model = cfg["model"]
|
23
|
-
|
24
|
-
base_url = base_url.replace("/chat/completions", "")
|
25
|
-
|
26
|
-
client = OpenAI(api_key=api_key, base_url=base_url)
|
27
|
-
|
28
|
-
response = client.chat.completions.create(model=model,
|
29
|
-
messages=simple_history(history) + [user_msg],
|
30
|
-
temperature=temperature,
|
31
|
-
stop=stop)
|
32
|
-
return response.choices[0].message.content
|
33
|
-
|
34
|
-
|
35
|
-
@retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=5)
|
36
|
-
def think_stream(user_text, history, temperature=0.2, stop=None, **kwargs):
|
37
|
-
user_msg = {"role": "user", "content": user_text}
|
38
|
-
cfg = utils.read_json_file("llm.json")
|
39
|
-
|
40
|
-
base_url = cfg["baseURL"]
|
41
|
-
api_key = cfg["apiKey"]
|
42
|
-
model = cfg["model"]
|
43
|
-
|
44
|
-
base_url = base_url.replace("/chat/completions", "")
|
45
|
-
|
46
|
-
client = OpenAI(api_key=api_key, base_url=base_url)
|
47
|
-
|
48
|
-
|
49
|
-
stream = client.chat.completions.create(model=model,
|
50
|
-
messages=simple_history(history) + [user_msg],
|
51
|
-
stream=True,
|
52
|
-
temperature=temperature,
|
53
|
-
stop=stop)
|
54
|
-
for chunk in stream:
|
55
|
-
yield chunk.choices[0].delta.content or ""
|
56
|
-
|
57
|
-
|
58
|
-
def simple_history(history):
|
59
|
-
return [{"role": h["role"], "content": h["content"]} for h in history]
|
60
|
-
|
61
|
-
|
62
|
-
@retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=3)
|
63
|
-
def extract(response_data, text: str, temperature=0, **kwargs):
|
64
|
-
"""print(extract({"name": "lowercase"}, "hello XiaoMing"))"""
|
65
|
-
if isinstance(response_data, dict):
|
66
|
-
response_items = [[res, response_data[res]] for res in response_data]
|
67
|
-
else:
|
68
|
-
response_items = response_data
|
69
|
-
|
70
|
-
json_text = ""
|
71
|
-
for i, res in enumerate(response_items):
|
72
|
-
comma = "," if i != len(response_items) - 1 else ""
|
73
|
-
json_text += f' "{res[0]}": {res[1]}{comma}\n'
|
74
|
-
|
75
|
-
# Combine the provided text with the formatted JSON schema
|
76
|
-
think_text = f"""
|
77
|
-
The output should be a markdown code snippet formatted in the following schema, including the leading and trailing "```json" and "```" tags:
|
78
|
-
```json
|
79
|
-
{{
|
80
|
-
{json_text}
|
81
|
-
}}
|
82
|
-
```
|
83
|
-
|
84
|
-
Request:
|
85
|
-
{text}
|
86
|
-
"""
|
87
|
-
# text放后面,当翻译等情况时,不会把"The output should"之类翻译了,导致错误
|
88
|
-
markdown = think(think_text, [], temperature=temperature, **kwargs)
|
89
|
-
pattern = r'```json(.*?)```'
|
90
|
-
matches = re.findall(pattern, markdown, re.DOTALL)
|
91
|
-
if matches:
|
92
|
-
json_str = matches[0].strip()
|
93
|
-
# lines = [line.split("//")[0] for line in json_str.split("\n")]//这样当json中有//时会出错,例如https://
|
94
|
-
json_dict = json5.loads(json_str)
|
95
|
-
for item in response_items:
|
96
|
-
if item[0] not in json_dict:
|
97
|
-
raise "item:" + item + " not exists"
|
98
|
-
return json_dict
|
99
|
-
|
100
|
-
|
101
|
-
def yes_or_no(question, temperature=0, **kwargs):
|
102
|
-
result = extract([("Result", "Yes or No")], question,
|
103
|
-
temperature=temperature, **kwargs)["Result"]
|
104
|
-
if isinstance(result, bool):
|
105
|
-
return result
|
106
|
-
return result.upper() == "YES"
|
107
|
-
|
108
|
-
|
109
|
-
@retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=3)
|
110
|
-
def extract_code(text: str, temperature=0, language="python", markdown_word='python', **kwargs):
|
111
|
-
"""print(extract_code("sum 1~100"))"""
|
112
|
-
think_text = text + f"""
|
113
|
-
The output should be a complete and usable {language} code snippet, including the leading and trailing "```{markdown_word}" and "```":
|
114
|
-
"""
|
115
|
-
markdown = think(think_text, [], temperature=temperature, **kwargs)
|
116
|
-
# 使用正则表达式匹配围绕在```{markdown_word} 和 ```之间的文本
|
117
|
-
pattern = rf'```{markdown_word}(.*?)```'
|
118
|
-
matches = re.findall(pattern, markdown, re.DOTALL)
|
119
|
-
if matches:
|
120
|
-
# 去除可能的前后空白字符
|
121
|
-
return matches[0].strip()
|
122
|
-
else:
|
123
|
-
raise Exception("The string is not a valid python code.")
|
124
|
-
|
125
|
-
|
126
|
-
if __name__ == "__main__":
|
127
|
-
print(think("你好", []))
|
128
|
-
for chunk in think_stream("你好", []):
|
129
|
-
print(chunk)
|
130
|
-
print(extract({"name": "lowercase"}, "hello XiaoMing"))
|
131
|
-
print(extract_code("sum 1~100"))
|
File without changes
|
File without changes
|
File without changes
|