hjxdl 0.2.80__py3-none-any.whl → 0.2.82__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hdl/_version.py +2 -2
- hdl/utils/database_tools/web.py +20 -10
- hdl/utils/general/glob.py +17 -0
- hdl/utils/general/runners.py +8 -5
- hdl/utils/llm/chat.py +58 -5
- hdl/utils/llm/chatgr.py +4 -4
- {hjxdl-0.2.80.dist-info → hjxdl-0.2.82.dist-info}/METADATA +1 -1
- {hjxdl-0.2.80.dist-info → hjxdl-0.2.82.dist-info}/RECORD +10 -10
- {hjxdl-0.2.80.dist-info → hjxdl-0.2.82.dist-info}/WHEEL +0 -0
- {hjxdl-0.2.80.dist-info → hjxdl-0.2.82.dist-info}/top_level.txt +0 -0
hdl/_version.py
CHANGED
hdl/utils/database_tools/web.py
CHANGED
@@ -58,11 +58,20 @@ def web_search_text(
|
|
58
58
|
|
59
59
|
def fetch_baidu_results(query, max_n_links=3):
|
60
60
|
"""
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
61
|
+
Fetches search results from Baidu for a given query and retrieves the text content of the top results.
|
62
|
+
Args:
|
63
|
+
query (str): The search query to be sent to Baidu.
|
64
|
+
max_n_links (int, optional): The maximum number of search result links to fetch. Defaults to 3.
|
65
|
+
Returns:
|
66
|
+
str: The concatenated text content of the fetched web pages.
|
67
|
+
Raises:
|
68
|
+
requests.RequestException: If there is an issue with the HTTP request.
|
65
69
|
"""
|
70
|
+
try:
|
71
|
+
max_n_links = int(max_n_links)
|
72
|
+
except Exception as e:
|
73
|
+
print(e)
|
74
|
+
max_n_links = 3
|
66
75
|
headers = {
|
67
76
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
68
77
|
}
|
@@ -100,13 +109,14 @@ def fetch_baidu_results(query, max_n_links=3):
|
|
100
109
|
|
101
110
|
def wolfram_alpha_calculate(query):
|
102
111
|
"""
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
query (str): The query string to send to Wolfram Alpha.
|
107
|
-
|
112
|
+
Sends a query to the Wolfram Alpha API and returns the result.
|
113
|
+
Args:
|
114
|
+
query (str): The query string to be sent to Wolfram Alpha.
|
108
115
|
Returns:
|
109
|
-
|
116
|
+
str: The result of the query in plaintext format, or an error message if the query was unsuccessful or if an error occurred.
|
117
|
+
Raises:
|
118
|
+
requests.Timeout: If the request to Wolfram Alpha times out.
|
119
|
+
Exception: If any other error occurs during the request.
|
110
120
|
"""
|
111
121
|
# Get the Wolfram Alpha App ID from environment variables
|
112
122
|
app_id = os.getenv('WOLFRAM_APP_ID', None)
|
hdl/utils/general/glob.py
CHANGED
@@ -2,6 +2,14 @@ import subprocess
|
|
2
2
|
|
3
3
|
|
4
4
|
def get_num_lines(file):
|
5
|
+
"""
|
6
|
+
Get the number of lines in a given file.
|
7
|
+
Args:
|
8
|
+
file (str): The path to the file.
|
9
|
+
Returns:
|
10
|
+
int: The number of lines in the file.
|
11
|
+
"""
|
12
|
+
|
5
13
|
num_lines = subprocess.check_output(
|
6
14
|
['wc', '-l', file]
|
7
15
|
).split()[0]
|
@@ -9,6 +17,15 @@ def get_num_lines(file):
|
|
9
17
|
|
10
18
|
|
11
19
|
def str_from_line(file, line, split=False):
|
20
|
+
"""
|
21
|
+
Extracts a specific line from a file and optionally splits the line.
|
22
|
+
Args:
|
23
|
+
file (str): The path to the file from which to extract the line.
|
24
|
+
line (int): The line number to extract (0-based index).
|
25
|
+
split (bool, optional): If True, splits the line at the first space or tab and returns the first part. Defaults to False.
|
26
|
+
Returns:
|
27
|
+
str: The extracted line, optionally split at the first space or tab.
|
28
|
+
"""
|
12
29
|
smi = subprocess.check_output(
|
13
30
|
# ['sed','-n', f'{str(i+1)}p', file]
|
14
31
|
["sed", f"{str(line + 1)}q;d", file]
|
hdl/utils/general/runners.py
CHANGED
@@ -61,14 +61,17 @@ def calculate(expression):
|
|
61
61
|
|
62
62
|
def count_character_occurrences(text, char):
|
63
63
|
"""
|
64
|
-
|
64
|
+
Count the occurrences of a character in a text.
|
65
65
|
|
66
66
|
Args:
|
67
|
-
text (str):
|
68
|
-
char (str):
|
67
|
+
text (str): The text in which to count character occurrences.
|
68
|
+
char (str): The character to count occurrences of.
|
69
69
|
|
70
70
|
Returns:
|
71
|
-
|
71
|
+
str: A string indicating how many times the character appears in the text.
|
72
|
+
|
73
|
+
Raises:
|
74
|
+
ValueError: If the input text is not a string or if the character is not a single character string.
|
72
75
|
"""
|
73
76
|
if not isinstance(text, str):
|
74
77
|
raise ValueError("输入的文本必须是字符串类型")
|
@@ -77,4 +80,4 @@ def count_character_occurrences(text, char):
|
|
77
80
|
text = text.lower()
|
78
81
|
char = char.lower()
|
79
82
|
|
80
|
-
return f"{text} 中 {char} 共出现了 {text.count(char)} 次。"
|
83
|
+
return f"{text} 中 {char} 共出现了 {text.count(char)} 次。"
|
hdl/utils/llm/chat.py
CHANGED
@@ -14,6 +14,15 @@ import json
|
|
14
14
|
# import traceback
|
15
15
|
|
16
16
|
def parse_fn_markdown(markdown_text, params_key="params"):
|
17
|
+
"""
|
18
|
+
Parses a markdown text to extract function name and parameters.
|
19
|
+
Args:
|
20
|
+
markdown_text (str): The markdown text containing the function name and parameters.
|
21
|
+
params_key (str, optional): The key under which the parameters will be nested in the result dictionary. Defaults to "params".
|
22
|
+
Returns:
|
23
|
+
dict: A dictionary containing the function name and parameters. The function name is stored with the key "function_name", and the parameters are nested under the specified params_key.
|
24
|
+
"""
|
25
|
+
|
17
26
|
lines = markdown_text.strip().split("\n")
|
18
27
|
result = {}
|
19
28
|
params = {}
|
@@ -53,6 +62,23 @@ def parse_fn_markdown(markdown_text, params_key="params"):
|
|
53
62
|
return result
|
54
63
|
|
55
64
|
def parse_cot_markdown(markdown_text):
|
65
|
+
"""
|
66
|
+
Parse a Markdown text formatted as 'COT' (Title, Tool, Content, Stop Thinking) and extract relevant information.
|
67
|
+
|
68
|
+
Args:
|
69
|
+
markdown_text (str): The Markdown text to parse.
|
70
|
+
|
71
|
+
Returns:
|
72
|
+
dict: A dictionary containing the parsed information with the following keys:
|
73
|
+
- 'title': Title extracted from the Markdown text.
|
74
|
+
- 'tool': Tool extracted from the Markdown text.
|
75
|
+
- 'content': Content extracted from the Markdown text.
|
76
|
+
- 'stop_thinking': Boolean indicating whether 'stop_thinking' is true or false.
|
77
|
+
|
78
|
+
Note:
|
79
|
+
- 'stop_thinking' value is considered True only if it is explicitly 'true' (case-insensitive).
|
80
|
+
|
81
|
+
"""
|
56
82
|
# 提取标题(支持跨行)
|
57
83
|
title_match = re.search(r"##\s*(.+?)(?=\n-|\Z)", markdown_text, re.DOTALL)
|
58
84
|
title = title_match.group(1).strip() if title_match else ""
|
@@ -78,6 +104,7 @@ def parse_cot_markdown(markdown_text):
|
|
78
104
|
}
|
79
105
|
|
80
106
|
|
107
|
+
|
81
108
|
def run_tool_with_kwargs(tool, func_kwargs):
|
82
109
|
"""Run the specified tool with the provided keyword arguments.
|
83
110
|
|
@@ -108,6 +135,24 @@ class OpenAI_M():
|
|
108
135
|
*args,
|
109
136
|
**kwargs
|
110
137
|
):
|
138
|
+
"""
|
139
|
+
Initialize an instance of the OpenAI_M class with configuration options.
|
140
|
+
|
141
|
+
Args:
|
142
|
+
model_path (str): Path to the model. Defaults to "default_model".
|
143
|
+
device (str): Device to use, either 'gpu' or 'cpu'. Defaults to 'gpu'.
|
144
|
+
generation_kwargs (dict, optional): Additional keyword arguments for generation.
|
145
|
+
server_ip (str): IP address of the server. Defaults to "172.28.1.2".
|
146
|
+
server_port (int): Port number of the server. Defaults to 8000.
|
147
|
+
api_key (str): API key for authentication. Defaults to "dummy_key".
|
148
|
+
use_groq (bool): Flag to use Groq client. Defaults to False.
|
149
|
+
groq_api_key (str, optional): API key for Groq client.
|
150
|
+
tools (list, optional): List of tools to be used.
|
151
|
+
tool_desc (dict, optional): Additional tool descriptions.
|
152
|
+
cot_desc (str, optional): Chain of Thought description.
|
153
|
+
*args: Additional positional arguments.
|
154
|
+
**kwargs: Additional keyword arguments.
|
155
|
+
"""
|
111
156
|
# self.model_path = model_path
|
112
157
|
self.server_ip = server_ip
|
113
158
|
self.server_port = server_port
|
@@ -155,14 +200,22 @@ class OpenAI_M():
|
|
155
200
|
steps: list = None,
|
156
201
|
**kwargs
|
157
202
|
):
|
158
|
-
"""
|
203
|
+
"""
|
204
|
+
Execute a Chain of Thought (COT) process to iteratively generate steps
|
205
|
+
towards solving a given prompt, utilizing tools if necessary.
|
159
206
|
|
160
207
|
Args:
|
161
|
-
prompt (
|
162
|
-
max_step (int, optional):
|
208
|
+
prompt (str): The initial question or problem to solve.
|
209
|
+
max_step (int, optional): Maximum number of steps to attempt. Defaults to 30.
|
210
|
+
steps (list, optional): List to accumulate steps taken. Defaults to None.
|
211
|
+
**kwargs: Additional keyword arguments for tool invocation.
|
163
212
|
|
164
|
-
|
165
|
-
|
213
|
+
Yields:
|
214
|
+
tuple: A tuple containing the current step number, accumulated information,
|
215
|
+
and the list of steps taken.
|
216
|
+
|
217
|
+
Raises:
|
218
|
+
Exception: If an error occurs during the parsing or tool invocation process.
|
166
219
|
"""
|
167
220
|
# 初始化当前信息为空字符串,用于累积后续的思考步骤和用户问题
|
168
221
|
current_info = ""
|
hdl/utils/llm/chatgr.py
CHANGED
@@ -32,11 +32,11 @@ def chat_with_llm(user_input, chat_history=[]):
|
|
32
32
|
def create_demo():
|
33
33
|
"""
|
34
34
|
Creates a Gradio demo interface for a chatbot application.
|
35
|
-
The interface
|
35
|
+
The interface includes:
|
36
36
|
- A chat history display at the top of the page.
|
37
|
-
- A user input textbox
|
38
|
-
|
39
|
-
|
37
|
+
- A user input textbox at the bottom of the page.
|
38
|
+
- A send button to submit messages.
|
39
|
+
The user can send messages either by clicking the send button or by pressing the Enter key.
|
40
40
|
Returns:
|
41
41
|
gr.Blocks: The Gradio Blocks object representing the demo interface.
|
42
42
|
"""
|
@@ -1,5 +1,5 @@
|
|
1
1
|
hdl/__init__.py,sha256=GffnD0jLJdhkd-vo989v40N90sQbofkayRBwxc6TVhQ,72
|
2
|
-
hdl/_version.py,sha256=
|
2
|
+
hdl/_version.py,sha256=e3IzU75tUXwJv1hSzAqotnR7QIz2cy3BvcTNm40NY9c,413
|
3
3
|
hdl/args/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
hdl/args/loss_args.py,sha256=s7YzSdd7IjD24rZvvOrxLLFqMZQb9YylxKeyelSdrTk,70
|
5
5
|
hdl/controllers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -120,16 +120,16 @@ hdl/utils/chemical_tools/sdf.py,sha256=71PEqU0H885L6IeGHEa6n7ZLZThvMsZOVLuFG2wno
|
|
120
120
|
hdl/utils/database_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
121
121
|
hdl/utils/database_tools/connect.py,sha256=xCacGucKxlQUXs6AsNddpeECvdqT1180V1ZWqHrUdNA,875
|
122
122
|
hdl/utils/database_tools/datetime.py,sha256=xqE2xNiOpADzX-R8_bM0bioJRF3Ay9Jp1CAG6dy6uVI,1202
|
123
|
-
hdl/utils/database_tools/web.py,sha256=
|
123
|
+
hdl/utils/database_tools/web.py,sha256=awJ8lafL-2KRjf3V1uuij8JIvX9U5fI8fLZKOkOvqtk,5771
|
124
124
|
hdl/utils/desc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
125
125
|
hdl/utils/desc/func_desc.py,sha256=VCqjvZs7qCwBq3NR3ZRknl4oiO5-JP7xm-Rx85W2exg,3365
|
126
126
|
hdl/utils/desc/template.py,sha256=GJSXkVzdTAQoNT3j7YTLhz8-4CmvCMt2gLr7YBYPRWw,2192
|
127
127
|
hdl/utils/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
128
|
-
hdl/utils/general/glob.py,sha256=
|
129
|
-
hdl/utils/general/runners.py,sha256=
|
128
|
+
hdl/utils/general/glob.py,sha256=Zuf7WHU0UdUPOs9UrhxmrCiMC8GrHxQU6n3mTThv6yc,1120
|
129
|
+
hdl/utils/general/runners.py,sha256=x7QBolp3MrqNV6L4rB6Ueybr26bqkRFZTuXhY0SwyLk,3061
|
130
130
|
hdl/utils/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
131
|
-
hdl/utils/llm/chat.py,sha256=
|
132
|
-
hdl/utils/llm/chatgr.py,sha256=
|
131
|
+
hdl/utils/llm/chat.py,sha256=JnegglANgyp-avF4BdnhGji1w-x2wtonPPgjllsquV0,23283
|
132
|
+
hdl/utils/llm/chatgr.py,sha256=5F5PJHe8vz3iCfi4TT54DCLRi1UeJshECdVtgvvvao0,3696
|
133
133
|
hdl/utils/llm/embs.py,sha256=Tf0FOYrOFZp7qQpEPiSCXzlgyHH0X9HVTUtsup74a9E,7174
|
134
134
|
hdl/utils/llm/extract.py,sha256=2sK_WJzmYIc8iuWaM9DA6Nw3_6q1O4lJ5pKpcZo-bBA,6512
|
135
135
|
hdl/utils/llm/llama_chat.py,sha256=watcHGOaz-bv3x-yDucYlGk5f8FiqfFhwWogrl334fk,4387
|
@@ -139,7 +139,7 @@ hdl/utils/schedulers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
|
|
139
139
|
hdl/utils/schedulers/norm_lr.py,sha256=bDwCmdEK-WkgxQMFBiMuchv8Mm7C0-GZJ6usm-PQk14,4461
|
140
140
|
hdl/utils/weather/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
141
141
|
hdl/utils/weather/weather.py,sha256=k11o6wM15kF8b9NMlEfrg68ak-SfSYLN3nOOflFUv-I,4381
|
142
|
-
hjxdl-0.2.
|
143
|
-
hjxdl-0.2.
|
144
|
-
hjxdl-0.2.
|
145
|
-
hjxdl-0.2.
|
142
|
+
hjxdl-0.2.82.dist-info/METADATA,sha256=o5ShJ2TtbtQjRWiWt_laol2kYbOys3NYLJsJdvffplo,836
|
143
|
+
hjxdl-0.2.82.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
144
|
+
hjxdl-0.2.82.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
|
145
|
+
hjxdl-0.2.82.dist-info/RECORD,,
|
File without changes
|
File without changes
|