hjxdl 0.3.56__py3-none-any.whl → 0.3.58__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hdl/_version.py +3 -3
- hdl/utils/llm/chat.py +0 -65
- hdl/utils/llm/llm_wrapper.py +291 -71
- {hjxdl-0.3.56.dist-info → hjxdl-0.3.58.dist-info}/METADATA +1 -1
- {hjxdl-0.3.56.dist-info → hjxdl-0.3.58.dist-info}/RECORD +8 -8
- {hjxdl-0.3.56.dist-info → hjxdl-0.3.58.dist-info}/WHEEL +0 -0
- {hjxdl-0.3.56.dist-info → hjxdl-0.3.58.dist-info}/licenses/LICENSE +0 -0
- {hjxdl-0.3.56.dist-info → hjxdl-0.3.58.dist-info}/top_level.txt +0 -0
hdl/_version.py
CHANGED
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '0.3.
|
|
32
|
-
__version_tuple__ = version_tuple = (0, 3,
|
|
31
|
+
__version__ = version = '0.3.58'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 3, 58)
|
|
33
33
|
|
|
34
|
-
__commit_id__ = commit_id = '
|
|
34
|
+
__commit_id__ = commit_id = 'ge579fcb03'
|
hdl/utils/llm/chat.py
CHANGED
|
@@ -302,71 +302,6 @@ class OpenAI_M:
|
|
|
302
302
|
continue
|
|
303
303
|
|
|
304
304
|
def get_resp(
|
|
305
|
-
self,
|
|
306
|
-
prompt: str,
|
|
307
|
-
client_id: str = None,
|
|
308
|
-
sys_info: str = None,
|
|
309
|
-
assis_info: str = None,
|
|
310
|
-
images: list = None,
|
|
311
|
-
image_keys: tuple = ("image_url", "url"),
|
|
312
|
-
stop: list[str] | None = ["USER:", "ASSISTANT:"],
|
|
313
|
-
model: str = None,
|
|
314
|
-
stream: bool = True,
|
|
315
|
-
**kwargs: t.Any,
|
|
316
|
-
):
|
|
317
|
-
"""Prepare and send a request to the model, and return the model's response."""
|
|
318
|
-
if not model:
|
|
319
|
-
model = self.client_conf[client_id]["model"]
|
|
320
|
-
|
|
321
|
-
# === 1️⃣ 准备输入 ===
|
|
322
|
-
content = [{"type": "text", "text": prompt}]
|
|
323
|
-
if isinstance(image_keys, str):
|
|
324
|
-
image_keys = (image_keys,) * 3
|
|
325
|
-
elif len(image_keys) == 2:
|
|
326
|
-
image_keys = (image_keys[0],) + tuple(image_keys)
|
|
327
|
-
elif len(image_keys) == 1:
|
|
328
|
-
image_keys = (image_keys[0],) * 3
|
|
329
|
-
|
|
330
|
-
if images:
|
|
331
|
-
if isinstance(images, str):
|
|
332
|
-
images = [images]
|
|
333
|
-
for img in images:
|
|
334
|
-
content.append({
|
|
335
|
-
"type": image_keys[0],
|
|
336
|
-
image_keys[1]: {image_keys[2]: img}
|
|
337
|
-
})
|
|
338
|
-
else:
|
|
339
|
-
content = prompt
|
|
340
|
-
|
|
341
|
-
# === 2️⃣ 构造 messages(兼容旧逻辑) ===
|
|
342
|
-
messages = []
|
|
343
|
-
if sys_info:
|
|
344
|
-
messages.append({"role": "system", "content": sys_info})
|
|
345
|
-
messages.append({"role": "user", "content": content})
|
|
346
|
-
if assis_info:
|
|
347
|
-
messages.append({"role": "assistant", "content": assis_info})
|
|
348
|
-
|
|
349
|
-
# === 3️⃣ 改成 responses.create ===
|
|
350
|
-
client = self.client_conf[client_id]["client"]
|
|
351
|
-
|
|
352
|
-
if stream:
|
|
353
|
-
response = client.responses.create(
|
|
354
|
-
model=model,
|
|
355
|
-
input=messages, # 注意:新版 responses 接口直接用 "input"
|
|
356
|
-
stream=True,
|
|
357
|
-
**kwargs
|
|
358
|
-
)
|
|
359
|
-
else:
|
|
360
|
-
response = client.responses.create(
|
|
361
|
-
model=model,
|
|
362
|
-
input=messages,
|
|
363
|
-
stream=False,
|
|
364
|
-
**kwargs
|
|
365
|
-
)
|
|
366
|
-
|
|
367
|
-
return response
|
|
368
|
-
|
|
369
|
-
def get_resp_legacy(
|
|
370
305
|
self,
|
|
371
306
|
prompt: str,
|
|
372
307
|
client_id: str = None,
|
hdl/utils/llm/llm_wrapper.py
CHANGED
|
@@ -3,6 +3,7 @@ import typing as t
|
|
|
3
3
|
|
|
4
4
|
from openai import OpenAI
|
|
5
5
|
|
|
6
|
+
|
|
6
7
|
class OpenAIWrapper(object):
|
|
7
8
|
def __init__(
|
|
8
9
|
self,
|
|
@@ -14,6 +15,29 @@ class OpenAIWrapper(object):
|
|
|
14
15
|
):
|
|
15
16
|
"""
|
|
16
17
|
Initializes the client configuration for the class.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
client_conf (dict, optional): A dictionary containing client configuration. If None,
|
|
21
|
+
client configuration will be loaded from the specified directory.
|
|
22
|
+
client_conf_dir (str, optional): The directory from which to load client configuration
|
|
23
|
+
if `client_conf` is None. Must be provided in that case.
|
|
24
|
+
load_conf (bool, optional): A flag indicating whether to load the client
|
|
25
|
+
configuration from the directory. Defaults to True.
|
|
26
|
+
*args: Variable length argument list for client initialization.
|
|
27
|
+
**kwargs: Arbitrary keyword arguments for client initialization.
|
|
28
|
+
|
|
29
|
+
Raises:
|
|
30
|
+
AssertionError: If `client_conf` is None and `client_conf_dir` is also None.
|
|
31
|
+
|
|
32
|
+
Note:
|
|
33
|
+
The method will create a client for each configuration found in `client_conf`,
|
|
34
|
+
initializing the client with the specified `base_url` and `api_key`.
|
|
35
|
+
Examples:
|
|
36
|
+
>>> llm = OpenAIWrapper(
|
|
37
|
+
>>> client_conf_dir="/some/path/model_conf.yaml",
|
|
38
|
+
>>> # load_conf=False
|
|
39
|
+
>>> )
|
|
40
|
+
)
|
|
17
41
|
"""
|
|
18
42
|
self.client_conf = {}
|
|
19
43
|
if client_conf is None:
|
|
@@ -24,7 +48,8 @@ class OpenAIWrapper(object):
|
|
|
24
48
|
else:
|
|
25
49
|
self.client_conf = client_conf
|
|
26
50
|
|
|
27
|
-
|
|
51
|
+
# self.clients = {}
|
|
52
|
+
for _, conf in self.client_conf.items():
|
|
28
53
|
conf["client"] = OpenAI(
|
|
29
54
|
base_url=conf["host"],
|
|
30
55
|
api_key=conf.get("api_key", "dummy_key"),
|
|
@@ -33,8 +58,6 @@ class OpenAIWrapper(object):
|
|
|
33
58
|
)
|
|
34
59
|
if "client_type" not in conf:
|
|
35
60
|
conf["client_type"] = "chat"
|
|
36
|
-
if "model" not in conf:
|
|
37
|
-
conf["model"] = None
|
|
38
61
|
|
|
39
62
|
def add_client(
|
|
40
63
|
self,
|
|
@@ -45,6 +68,31 @@ class OpenAIWrapper(object):
|
|
|
45
68
|
api_key: str = "dummy_key",
|
|
46
69
|
**kwargs
|
|
47
70
|
):
|
|
71
|
+
"""
|
|
72
|
+
Add a new client configuration to the client manager.
|
|
73
|
+
|
|
74
|
+
This method stores the configuration details for a new client identified by the
|
|
75
|
+
provided client ID. It constructs the host URL based on the input parameters
|
|
76
|
+
and initializes an OpenAI client instance.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
client_id (str): Unique identifier for the client.
|
|
80
|
+
host (str): Hostname or IP address of the client.
|
|
81
|
+
port (int, optional): Port number for the client connection. Defaults to None.
|
|
82
|
+
model (str, optional): Model to use for the client. Defaults to "default_model".
|
|
83
|
+
api_key (str, optional): API key for authentication. Defaults to "dummy_key".
|
|
84
|
+
**kwargs: Additional keyword arguments passed to the OpenAI client.
|
|
85
|
+
|
|
86
|
+
Raises:
|
|
87
|
+
ValueError: If both host and port are not valid for constructing a URL.
|
|
88
|
+
Examples:
|
|
89
|
+
>>> llm.add_client(
|
|
90
|
+
>>> client_id="rone",
|
|
91
|
+
>>> host="127.0.0.1",
|
|
92
|
+
>>> port=22299,
|
|
93
|
+
>>> model="ictrek/rone:1.5b32k",
|
|
94
|
+
>>> )
|
|
95
|
+
"""
|
|
48
96
|
self.client_conf[client_id] = {}
|
|
49
97
|
if not host.startswith('http') and port:
|
|
50
98
|
host = f"http://{host}:{port}/v1"
|
|
@@ -58,18 +106,36 @@ class OpenAIWrapper(object):
|
|
|
58
106
|
)
|
|
59
107
|
|
|
60
108
|
def load_clients(self):
|
|
109
|
+
"""
|
|
110
|
+
Loads client configuration from a YAML file and updates the 'host' field
|
|
111
|
+
for each client entry, ensuring the correct URL format.
|
|
112
|
+
|
|
113
|
+
This method reads the client configuration from the specified path,
|
|
114
|
+
updates the 'host' field to include the appropriate port and the
|
|
115
|
+
'http' protocol if not already specified, and stores the updated
|
|
116
|
+
configuration in the `client_conf` attribute.
|
|
117
|
+
|
|
118
|
+
Attributes:
|
|
119
|
+
client_conf_path (str): The file path to the client configuration YAML file.
|
|
120
|
+
client_conf (dict): The updated client configuration after processing.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
None
|
|
124
|
+
"""
|
|
61
125
|
with open(self.client_conf_path, 'r') as file:
|
|
62
126
|
data = yaml.safe_load(file)
|
|
127
|
+
|
|
128
|
+
# 更新 host 字段
|
|
63
129
|
for _, value in data.items():
|
|
64
130
|
host = value.get('host', '')
|
|
65
131
|
port = value.get('port', '')
|
|
66
|
-
if not host.startswith('http') and port:
|
|
132
|
+
if not host.startswith('http') and port: # 确保有 port 才处理
|
|
67
133
|
value['host'] = f"http://{host}:{port}/v1"
|
|
68
134
|
self.client_conf = data
|
|
69
135
|
|
|
70
136
|
def get_resp(
|
|
71
137
|
self,
|
|
72
|
-
prompt
|
|
138
|
+
prompt,
|
|
73
139
|
client_id: str = None,
|
|
74
140
|
history: list = None,
|
|
75
141
|
sys_info: str = None,
|
|
@@ -78,7 +144,7 @@ class OpenAIWrapper(object):
|
|
|
78
144
|
image_keys: tuple = ("image_url", "url"),
|
|
79
145
|
videos: list = None,
|
|
80
146
|
video_keys: tuple = ("video_url", "url"),
|
|
81
|
-
model: str
|
|
147
|
+
model: str=None,
|
|
82
148
|
tools: list = None,
|
|
83
149
|
tool_choice: str = "auto",
|
|
84
150
|
stream: bool = True,
|
|
@@ -86,95 +152,234 @@ class OpenAIWrapper(object):
|
|
|
86
152
|
**kwargs: t.Any,
|
|
87
153
|
):
|
|
88
154
|
"""
|
|
89
|
-
Generates a response from
|
|
90
|
-
|
|
91
|
-
|
|
155
|
+
Generates a response from a chat model based on the given prompt and additional context.
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
prompt (str): The main text prompt to send to the chat model.
|
|
159
|
+
client_id (str, optional): Identifier for the client configuration. Defaults to None.
|
|
160
|
+
history (list, optional): A list of previous messages to provide context for the conversation. Each message should be a dictionary with "role" and "content". Defaults to None.
|
|
161
|
+
sys_info (str, optional): System-level information to set the context of the chat. Defaults to None.
|
|
162
|
+
assis_info (str, optional): Information from the assistant to be included in the conversation. Defaults to None.
|
|
163
|
+
images (list, optional): A list of images to include in the message content. Defaults to None.
|
|
164
|
+
image_keys (tuple, optional): Keys to format the image data. Must be of length 1 or 2. Defaults to ("image_url", "url").
|
|
165
|
+
model (str, optional): The model to use for generating the response. If not provided, it defaults to the one in client configuration for the given client_id.
|
|
166
|
+
tools (list, optional): List of tools to be available during the chat. Defaults to None.
|
|
167
|
+
stream (bool, optional): Whether to stream the response. Defaults to True.
|
|
168
|
+
response_model (optional): Specifies the response model to use. Defaults to None.
|
|
169
|
+
**kwargs (Any): Additional configuration parameters.
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
Response: The response object from the chat model.
|
|
92
173
|
"""
|
|
93
|
-
if
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
client =
|
|
97
|
-
if
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
# Build input list
|
|
103
|
-
input_items: list[t.Any] = []
|
|
174
|
+
if not model:
|
|
175
|
+
model = self.client_conf[client_id]['model']
|
|
176
|
+
|
|
177
|
+
client = self.client_conf[client_id]['client']
|
|
178
|
+
if response_model:
|
|
179
|
+
import instructor #TODO 有些模型支持这个 instructor 的结构化输出,但实际上它调用的还是openai api的功能,以后适时删除或补全
|
|
180
|
+
client = instructor.from_openai(client)
|
|
181
|
+
|
|
182
|
+
messages = []
|
|
104
183
|
|
|
105
184
|
if sys_info:
|
|
106
|
-
|
|
185
|
+
messages.append({
|
|
186
|
+
"role": "system",
|
|
187
|
+
"content": sys_info
|
|
188
|
+
})
|
|
189
|
+
|
|
107
190
|
if history:
|
|
108
|
-
|
|
191
|
+
messages.extend(history)
|
|
192
|
+
# history 需要符合以下格式,其中system不是必须
|
|
193
|
+
# history = [
|
|
194
|
+
# {"role": "system", "content": "You are a helpful assistant."},
|
|
195
|
+
# {"role": "user", "content": "message 1 content."},
|
|
196
|
+
# {"role": "assistant", "content": "message 2 content"},
|
|
197
|
+
# {"role": "user", "content": "message 3 content"},
|
|
198
|
+
# {"role": "assistant", "content": "message 4 content."},
|
|
199
|
+
# {"role": "user", "content": "message 5 content."}
|
|
200
|
+
# ]
|
|
201
|
+
|
|
202
|
+
if not model:
|
|
203
|
+
model = self.client_conf[client_id]["model"]
|
|
204
|
+
# Adjust the image_keys to be a tuple of length 3 based on its current length
|
|
205
|
+
if isinstance(image_keys, str):
|
|
206
|
+
image_keys = (image_keys,) * 3
|
|
207
|
+
elif len(image_keys) == 2:
|
|
208
|
+
image_keys = (image_keys[0],) + tuple(image_keys)
|
|
209
|
+
elif len(image_keys) == 1:
|
|
210
|
+
image_keys = (image_keys[0],) * 3
|
|
211
|
+
|
|
212
|
+
if isinstance(video_keys, str):
|
|
213
|
+
video_keys = (video_keys,) * 3
|
|
214
|
+
elif len(video_keys) == 2:
|
|
215
|
+
video_keys = (video_keys[0],) + tuple(video_keys)
|
|
216
|
+
elif len(video_keys) == 1:
|
|
217
|
+
video_keys = (video_keys[0],) * 3
|
|
218
|
+
|
|
219
|
+
content = [{
|
|
220
|
+
"type": "text",
|
|
221
|
+
"text": prompt
|
|
222
|
+
}]
|
|
223
|
+
|
|
224
|
+
if videos:
|
|
225
|
+
if isinstance(videos, str):
|
|
226
|
+
images = [videos]
|
|
227
|
+
for video in videos:
|
|
228
|
+
content.append({
|
|
229
|
+
"type": video_keys[0],
|
|
230
|
+
video_keys[1]: {
|
|
231
|
+
video_keys[2]: video
|
|
232
|
+
}
|
|
233
|
+
})
|
|
234
|
+
|
|
109
235
|
|
|
110
|
-
# Build user message
|
|
111
236
|
if images:
|
|
112
237
|
if isinstance(images, str):
|
|
113
238
|
images = [images]
|
|
114
|
-
multimodal_content = [
|
|
115
|
-
{"type": "input_text", "text": prompt}
|
|
116
|
-
]
|
|
117
239
|
for img in images:
|
|
118
|
-
|
|
119
|
-
"type":
|
|
120
|
-
|
|
240
|
+
content.append({
|
|
241
|
+
"type": image_keys[0],
|
|
242
|
+
image_keys[1]: {
|
|
243
|
+
image_keys[2]: img
|
|
244
|
+
}
|
|
121
245
|
})
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
user_item = {"role": "user", "content": prompt}
|
|
246
|
+
if (not images) and (not videos):
|
|
247
|
+
content = prompt
|
|
125
248
|
|
|
126
|
-
|
|
249
|
+
# Add the user's input as a message
|
|
250
|
+
messages.append({
|
|
251
|
+
"role": "user",
|
|
252
|
+
"content": content
|
|
253
|
+
})
|
|
127
254
|
|
|
128
255
|
if assis_info:
|
|
129
|
-
|
|
256
|
+
messages.append({
|
|
257
|
+
"role": "assistant",
|
|
258
|
+
"content": assis_info
|
|
259
|
+
})
|
|
130
260
|
|
|
131
|
-
# Prepare call parameters
|
|
132
|
-
call_params = {
|
|
133
|
-
"model": model,
|
|
134
|
-
"input": input_items,
|
|
135
|
-
**kwargs
|
|
136
|
-
}
|
|
137
261
|
if tools:
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
**call_params
|
|
262
|
+
resp = client.chat.completions.create(
|
|
263
|
+
model=model,
|
|
264
|
+
messages=messages,
|
|
265
|
+
tools=tools,
|
|
266
|
+
tool_choice=tool_choice,
|
|
267
|
+
stream=stream,
|
|
268
|
+
**kwargs
|
|
146
269
|
)
|
|
147
|
-
return resp_stream
|
|
148
270
|
else:
|
|
149
|
-
resp = client.
|
|
150
|
-
|
|
151
|
-
|
|
271
|
+
resp = client.chat.completions.create(
|
|
272
|
+
model=model,
|
|
273
|
+
messages=messages,
|
|
274
|
+
stream=stream,
|
|
275
|
+
**kwargs
|
|
152
276
|
)
|
|
153
|
-
|
|
154
|
-
from types import SimpleNamespace
|
|
155
|
-
text_out = getattr(resp, "output_text", "")
|
|
156
|
-
message = SimpleNamespace(content=text_out, tool_calls=None)
|
|
157
|
-
choice = SimpleNamespace(message=message, finish_reason="stop")
|
|
158
|
-
fake_resp = SimpleNamespace(choices=[choice])
|
|
159
|
-
return fake_resp
|
|
277
|
+
return resp
|
|
160
278
|
|
|
161
279
|
def invoke(
|
|
162
280
|
self,
|
|
163
|
-
prompt
|
|
281
|
+
prompt,
|
|
164
282
|
**kwargs
|
|
165
283
|
):
|
|
284
|
+
"""
|
|
285
|
+
Invoke the API to get a response based on the provided prompt.
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
prompt (str): The input prompt to be processed.
|
|
289
|
+
**kwargs: Additional keyword arguments to customize the API request.
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
dict: A dictionary containing the type of response and its contents.
|
|
293
|
+
The possible keys are:
|
|
294
|
+
- 'type' (str): Indicates the type of response ('text' or 'tool_calls').
|
|
295
|
+
- 'contents' (str, optional): The text content if the response type is 'text'.
|
|
296
|
+
- 'tool_params' (dict, optional): The parameters of the tool called if the response type is 'tool_calls'.
|
|
297
|
+
|
|
298
|
+
Examples:
|
|
299
|
+
>>> llm.invoke(
|
|
300
|
+
>>> client_id="glm_4_flash",
|
|
301
|
+
>>> prompt="深圳天气怎么样?",
|
|
302
|
+
>>> tools=[TOOL_DICT['get_weather']],
|
|
303
|
+
>>> )
|
|
304
|
+
{'type': 'tool_calls',
|
|
305
|
+
'tool_parmas': Function(arguments='{"location": "Shenzhen"}', name='get_weather')}
|
|
306
|
+
"""
|
|
166
307
|
answer_dict = {}
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
308
|
+
|
|
309
|
+
resp = self.get_resp(
|
|
310
|
+
prompt,
|
|
311
|
+
stream=False,
|
|
312
|
+
**kwargs
|
|
313
|
+
)
|
|
314
|
+
if resp.choices[0].finish_reason == "stop":
|
|
315
|
+
answer_dict["type"] = "text"
|
|
316
|
+
answer_dict["contents"] = resp.choices[0].message.content
|
|
317
|
+
elif resp.choices[0].finish_reason == "tool_calls":
|
|
318
|
+
answer_dict["type"] = "tool_calls"
|
|
319
|
+
answer_dict["tool_params"] = resp.choices[0].message.tool_calls[0].function
|
|
320
|
+
|
|
170
321
|
return answer_dict
|
|
171
322
|
|
|
172
|
-
def stream(self, prompt
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
323
|
+
def stream(self, prompt, **kwargs):
|
|
324
|
+
"""
|
|
325
|
+
Streams responses based on the provided prompt, yielding chunks of data.
|
|
326
|
+
|
|
327
|
+
This function calls the `get_resp` method with the prompt and additional keyword arguments,
|
|
328
|
+
streaming the response in chunks. It processes each chunk to yield either tool call parameters
|
|
329
|
+
or textual content. If an error occurs while processing the chunks, it yields an error message.
|
|
330
|
+
|
|
331
|
+
Args:
|
|
332
|
+
prompt (str): The input prompt to generate responses for.
|
|
333
|
+
**kwargs: Additional keyword arguments to be passed to the `get_resp` method.
|
|
334
|
+
|
|
335
|
+
Yields:
|
|
336
|
+
dict: A dictionary with the following possible keys:
|
|
337
|
+
- type (str): Indicates the type of the response ('tool_calls', 'text', or 'error').
|
|
338
|
+
- tool_params (dict, optional): Parameters of the tool call if the type is 'tool_calls'.
|
|
339
|
+
- content (str, optional): The generated text content if the type is 'text'.
|
|
340
|
+
- message (str, optional): An error message if the type is 'error'.
|
|
341
|
+
|
|
342
|
+
Examplse:
|
|
343
|
+
>>> resp = llm.stream(
|
|
344
|
+
>>> client_id="r1", #此模型可以进行cot
|
|
345
|
+
>>> prompt=prompt,
|
|
346
|
+
>>> # tools=[TOOL_DICT['get_weather']],
|
|
347
|
+
>>> )
|
|
348
|
+
>>> for i in resp:
|
|
349
|
+
>>> if i['type'] == 'text' and i['content']:
|
|
350
|
+
>>> print(i['content'], flush=True, end="")
|
|
351
|
+
"""
|
|
352
|
+
resp = self.get_resp(prompt=prompt, stream=True, **kwargs)
|
|
353
|
+
|
|
354
|
+
for chunk in resp:
|
|
355
|
+
try:
|
|
356
|
+
choice = chunk.choices[0]
|
|
357
|
+
|
|
358
|
+
# 如果返回了 tool_calls
|
|
359
|
+
if hasattr(choice.delta, 'tool_calls') and choice.delta.tool_calls:
|
|
360
|
+
tool_calls = choice.delta.tool_calls
|
|
361
|
+
if tool_calls: # 防止为空
|
|
362
|
+
yield {
|
|
363
|
+
"type": "tool_calls",
|
|
364
|
+
"tool_params": tool_calls[0].function
|
|
365
|
+
}
|
|
366
|
+
return # 直接返回,结束流式输出
|
|
367
|
+
|
|
368
|
+
# 返回文本内容
|
|
369
|
+
elif hasattr(choice.delta, 'content'):
|
|
370
|
+
yield {
|
|
371
|
+
"type": "text",
|
|
372
|
+
"content": choice.delta.content
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
except (AttributeError, IndexError) as e:
|
|
376
|
+
# 防止意外的结构异常
|
|
377
|
+
yield {
|
|
378
|
+
"type": "error",
|
|
379
|
+
"message": f"Stream chunk error: {str(e)}"
|
|
380
|
+
}
|
|
381
|
+
return
|
|
382
|
+
|
|
178
383
|
return
|
|
179
384
|
|
|
180
385
|
def embedding(
|
|
@@ -183,13 +388,28 @@ class OpenAIWrapper(object):
|
|
|
183
388
|
texts: list[str],
|
|
184
389
|
model: str = None,
|
|
185
390
|
**kwargs
|
|
186
|
-
)
|
|
187
|
-
|
|
391
|
+
):
|
|
392
|
+
"""
|
|
393
|
+
Generates embeddings for a list of texts using a specified model.
|
|
394
|
+
|
|
395
|
+
Args:
|
|
396
|
+
client_id (str): The ID of the client to use for generating embeddings.
|
|
397
|
+
texts (list[str]): A list of texts for which to generate embeddings.
|
|
398
|
+
model (str, optional): The model to use for generating embeddings.
|
|
399
|
+
If not provided, the model specified in the client configuration will be used.
|
|
400
|
+
**kwargs: Additional keyword arguments to be passed to the client embedding creation method.
|
|
401
|
+
|
|
402
|
+
Returns:
|
|
403
|
+
list: A list of embeddings corresponding to the input texts.
|
|
404
|
+
"""
|
|
405
|
+
if not model:
|
|
188
406
|
model = self.client_conf[client_id]['model']
|
|
407
|
+
|
|
189
408
|
client = self.client_conf[client_id]['client']
|
|
190
409
|
response = client.embeddings.create(
|
|
191
410
|
input=texts,
|
|
192
411
|
model=model,
|
|
193
412
|
**kwargs
|
|
194
413
|
)
|
|
414
|
+
|
|
195
415
|
return [i.embedding for i in response.data]
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
hdl/__init__.py,sha256=GffnD0jLJdhkd-vo989v40N90sQbofkayRBwxc6TVhQ,72
|
|
2
|
-
hdl/_version.py,sha256=
|
|
2
|
+
hdl/_version.py,sha256=xjPDXkoC3XkceKjo-55ZXy9kGQL-5rAj-yOgDqhc8rE,714
|
|
3
3
|
hdl/args/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
hdl/args/loss_args.py,sha256=s7YzSdd7IjD24rZvvOrxLLFqMZQb9YylxKeyelSdrTk,70
|
|
5
5
|
hdl/controllers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -131,12 +131,12 @@ hdl/utils/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU
|
|
|
131
131
|
hdl/utils/general/glob.py,sha256=Zuf7WHU0UdUPOs9UrhxmrCiMC8GrHxQU6n3mTThv6yc,1120
|
|
132
132
|
hdl/utils/general/runners.py,sha256=R0lhqABIuT43jEyjFkeio84e_PFfvAkszOP1FBlAnQ8,4927
|
|
133
133
|
hdl/utils/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
134
|
-
hdl/utils/llm/chat.py,sha256=
|
|
134
|
+
hdl/utils/llm/chat.py,sha256=q0zQmf-6DeSYOrC2qDn_QFOlILjRHU69eVRUn0eIIbA,26526
|
|
135
135
|
hdl/utils/llm/chatgr.py,sha256=5F5PJHe8vz3iCfi4TT54DCLRi1UeJshECdVtgvvvao0,3696
|
|
136
136
|
hdl/utils/llm/embs.py,sha256=Tf0FOYrOFZp7qQpEPiSCXzlgyHH0X9HVTUtsup74a9E,7174
|
|
137
137
|
hdl/utils/llm/extract.py,sha256=2sK_WJzmYIc8iuWaM9DA6Nw3_6q1O4lJ5pKpcZo-bBA,6512
|
|
138
138
|
hdl/utils/llm/llama_chat.py,sha256=watcHGOaz-bv3x-yDucYlGk5f8FiqfFhwWogrl334fk,4387
|
|
139
|
-
hdl/utils/llm/llm_wrapper.py,sha256=
|
|
139
|
+
hdl/utils/llm/llm_wrapper.py,sha256=90kuCEYMUAjJtjRSzwHTcQsprxaVHx8kyrtzl_aw1iY,15931
|
|
140
140
|
hdl/utils/llm/ollama.py,sha256=uEdLsNAc6b56r37hNiE3nrd6oZ2lmQ0gYbVvOc9YVIM,1389
|
|
141
141
|
hdl/utils/llm/vis.py,sha256=jRa5l1LHaWtohtdIKVpOH_I4yyXWTbyaLGglFHsV_0Q,28826
|
|
142
142
|
hdl/utils/llm/visrag.py,sha256=0i-VrxqgiV-J7R3VPshu9oc7-rKjFJOldYik3HDXj6M,10176
|
|
@@ -146,8 +146,8 @@ hdl/utils/vis_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
|
|
|
146
146
|
hdl/utils/vis_tools/scene_detect.py,sha256=L6TFMT15QHJuOIFcLFVI_RSSSjyTVZhBEqbeUez2auU,6608
|
|
147
147
|
hdl/utils/weather/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
148
148
|
hdl/utils/weather/weather.py,sha256=k11o6wM15kF8b9NMlEfrg68ak-SfSYLN3nOOflFUv-I,4381
|
|
149
|
-
hjxdl-0.3.
|
|
150
|
-
hjxdl-0.3.
|
|
151
|
-
hjxdl-0.3.
|
|
152
|
-
hjxdl-0.3.
|
|
153
|
-
hjxdl-0.3.
|
|
149
|
+
hjxdl-0.3.58.dist-info/licenses/LICENSE,sha256=lkMiSbeZHBQLB9LJEkS9-L3Z-LBC4yGnKrzHSG8RkPM,2599
|
|
150
|
+
hjxdl-0.3.58.dist-info/METADATA,sha256=acyVOZm7COIyMnuI-HqyziPYFpyYMHhWdEQXR45QNDc,1332
|
|
151
|
+
hjxdl-0.3.58.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
152
|
+
hjxdl-0.3.58.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
|
|
153
|
+
hjxdl-0.3.58.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|