hjxdl 0.3.55__py3-none-any.whl → 0.3.56__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hdl/_version.py +3 -3
- hdl/utils/llm/llm_wrapper.py +63 -491
- {hjxdl-0.3.55.dist-info → hjxdl-0.3.56.dist-info}/METADATA +1 -1
- {hjxdl-0.3.55.dist-info → hjxdl-0.3.56.dist-info}/RECORD +7 -7
- {hjxdl-0.3.55.dist-info → hjxdl-0.3.56.dist-info}/WHEEL +0 -0
- {hjxdl-0.3.55.dist-info → hjxdl-0.3.56.dist-info}/licenses/LICENSE +0 -0
- {hjxdl-0.3.55.dist-info → hjxdl-0.3.56.dist-info}/top_level.txt +0 -0
hdl/_version.py
CHANGED
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '0.3.
|
|
32
|
-
__version_tuple__ = version_tuple = (0, 3,
|
|
31
|
+
__version__ = version = '0.3.56'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 3, 56)
|
|
33
33
|
|
|
34
|
-
__commit_id__ = commit_id = '
|
|
34
|
+
__commit_id__ = commit_id = 'g5d6407784'
|
hdl/utils/llm/llm_wrapper.py
CHANGED
|
@@ -3,7 +3,6 @@ import typing as t
|
|
|
3
3
|
|
|
4
4
|
from openai import OpenAI
|
|
5
5
|
|
|
6
|
-
|
|
7
6
|
class OpenAIWrapper(object):
|
|
8
7
|
def __init__(
|
|
9
8
|
self,
|
|
@@ -15,29 +14,6 @@ class OpenAIWrapper(object):
|
|
|
15
14
|
):
|
|
16
15
|
"""
|
|
17
16
|
Initializes the client configuration for the class.
|
|
18
|
-
|
|
19
|
-
Args:
|
|
20
|
-
client_conf (dict, optional): A dictionary containing client configuration. If None,
|
|
21
|
-
client configuration will be loaded from the specified directory.
|
|
22
|
-
client_conf_dir (str, optional): The directory from which to load client configuration
|
|
23
|
-
if `client_conf` is None. Must be provided in that case.
|
|
24
|
-
load_conf (bool, optional): A flag indicating whether to load the client
|
|
25
|
-
configuration from the directory. Defaults to True.
|
|
26
|
-
*args: Variable length argument list for client initialization.
|
|
27
|
-
**kwargs: Arbitrary keyword arguments for client initialization.
|
|
28
|
-
|
|
29
|
-
Raises:
|
|
30
|
-
AssertionError: If `client_conf` is None and `client_conf_dir` is also None.
|
|
31
|
-
|
|
32
|
-
Note:
|
|
33
|
-
The method will create a client for each configuration found in `client_conf`,
|
|
34
|
-
initializing the client with the specified `base_url` and `api_key`.
|
|
35
|
-
Examples:
|
|
36
|
-
>>> llm = OpenAIWrapper(
|
|
37
|
-
>>> client_conf_dir="/some/path/model_conf.yaml",
|
|
38
|
-
>>> # load_conf=False
|
|
39
|
-
>>> )
|
|
40
|
-
)
|
|
41
17
|
"""
|
|
42
18
|
self.client_conf = {}
|
|
43
19
|
if client_conf is None:
|
|
@@ -48,8 +24,7 @@ class OpenAIWrapper(object):
|
|
|
48
24
|
else:
|
|
49
25
|
self.client_conf = client_conf
|
|
50
26
|
|
|
51
|
-
|
|
52
|
-
for _, conf in self.client_conf.items():
|
|
27
|
+
for cid, conf in self.client_conf.items():
|
|
53
28
|
conf["client"] = OpenAI(
|
|
54
29
|
base_url=conf["host"],
|
|
55
30
|
api_key=conf.get("api_key", "dummy_key"),
|
|
@@ -58,6 +33,8 @@ class OpenAIWrapper(object):
|
|
|
58
33
|
)
|
|
59
34
|
if "client_type" not in conf:
|
|
60
35
|
conf["client_type"] = "chat"
|
|
36
|
+
if "model" not in conf:
|
|
37
|
+
conf["model"] = None
|
|
61
38
|
|
|
62
39
|
def add_client(
|
|
63
40
|
self,
|
|
@@ -68,31 +45,6 @@ class OpenAIWrapper(object):
|
|
|
68
45
|
api_key: str = "dummy_key",
|
|
69
46
|
**kwargs
|
|
70
47
|
):
|
|
71
|
-
"""
|
|
72
|
-
Add a new client configuration to the client manager.
|
|
73
|
-
|
|
74
|
-
This method stores the configuration details for a new client identified by the
|
|
75
|
-
provided client ID. It constructs the host URL based on the input parameters
|
|
76
|
-
and initializes an OpenAI client instance.
|
|
77
|
-
|
|
78
|
-
Args:
|
|
79
|
-
client_id (str): Unique identifier for the client.
|
|
80
|
-
host (str): Hostname or IP address of the client.
|
|
81
|
-
port (int, optional): Port number for the client connection. Defaults to None.
|
|
82
|
-
model (str, optional): Model to use for the client. Defaults to "default_model".
|
|
83
|
-
api_key (str, optional): API key for authentication. Defaults to "dummy_key".
|
|
84
|
-
**kwargs: Additional keyword arguments passed to the OpenAI client.
|
|
85
|
-
|
|
86
|
-
Raises:
|
|
87
|
-
ValueError: If both host and port are not valid for constructing a URL.
|
|
88
|
-
Examples:
|
|
89
|
-
>>> llm.add_client(
|
|
90
|
-
>>> client_id="rone",
|
|
91
|
-
>>> host="127.0.0.1",
|
|
92
|
-
>>> port=22299,
|
|
93
|
-
>>> model="ictrek/rone:1.5b32k",
|
|
94
|
-
>>> )
|
|
95
|
-
"""
|
|
96
48
|
self.client_conf[client_id] = {}
|
|
97
49
|
if not host.startswith('http') and port:
|
|
98
50
|
host = f"http://{host}:{port}/v1"
|
|
@@ -106,36 +58,18 @@ class OpenAIWrapper(object):
|
|
|
106
58
|
)
|
|
107
59
|
|
|
108
60
|
def load_clients(self):
|
|
109
|
-
"""
|
|
110
|
-
Loads client configuration from a YAML file and updates the 'host' field
|
|
111
|
-
for each client entry, ensuring the correct URL format.
|
|
112
|
-
|
|
113
|
-
This method reads the client configuration from the specified path,
|
|
114
|
-
updates the 'host' field to include the appropriate port and the
|
|
115
|
-
'http' protocol if not already specified, and stores the updated
|
|
116
|
-
configuration in the `client_conf` attribute.
|
|
117
|
-
|
|
118
|
-
Attributes:
|
|
119
|
-
client_conf_path (str): The file path to the client configuration YAML file.
|
|
120
|
-
client_conf (dict): The updated client configuration after processing.
|
|
121
|
-
|
|
122
|
-
Returns:
|
|
123
|
-
None
|
|
124
|
-
"""
|
|
125
61
|
with open(self.client_conf_path, 'r') as file:
|
|
126
62
|
data = yaml.safe_load(file)
|
|
127
|
-
|
|
128
|
-
# 更新 host 字段
|
|
129
63
|
for _, value in data.items():
|
|
130
64
|
host = value.get('host', '')
|
|
131
65
|
port = value.get('port', '')
|
|
132
|
-
if not host.startswith('http') and port:
|
|
66
|
+
if not host.startswith('http') and port:
|
|
133
67
|
value['host'] = f"http://{host}:{port}/v1"
|
|
134
68
|
self.client_conf = data
|
|
135
69
|
|
|
136
70
|
def get_resp(
|
|
137
71
|
self,
|
|
138
|
-
prompt,
|
|
72
|
+
prompt: str,
|
|
139
73
|
client_id: str = None,
|
|
140
74
|
history: list = None,
|
|
141
75
|
sys_info: str = None,
|
|
@@ -144,7 +78,7 @@ class OpenAIWrapper(object):
|
|
|
144
78
|
image_keys: tuple = ("image_url", "url"),
|
|
145
79
|
videos: list = None,
|
|
146
80
|
video_keys: tuple = ("video_url", "url"),
|
|
147
|
-
model: str=None,
|
|
81
|
+
model: str = None,
|
|
148
82
|
tools: list = None,
|
|
149
83
|
tool_choice: str = "auto",
|
|
150
84
|
stream: bool = True,
|
|
@@ -152,442 +86,95 @@ class OpenAIWrapper(object):
|
|
|
152
86
|
**kwargs: t.Any,
|
|
153
87
|
):
|
|
154
88
|
"""
|
|
155
|
-
|
|
156
|
-
|
|
89
|
+
Generates a response from the model using responses.create with input=.
|
|
90
|
+
Supports optional image input if `images` is provided.
|
|
91
|
+
Also supports external tools via `tools` + `tool_choice`.
|
|
157
92
|
"""
|
|
158
|
-
if
|
|
159
|
-
|
|
93
|
+
if client_id is None:
|
|
94
|
+
raise ValueError("client_id must be provided")
|
|
95
|
+
conf = self.client_conf[client_id]
|
|
96
|
+
client = conf["client"]
|
|
97
|
+
if model is None:
|
|
98
|
+
model = conf.get("model")
|
|
99
|
+
if model is None:
|
|
100
|
+
raise ValueError("model must be specified either in client_conf or via parameter")
|
|
160
101
|
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
import instructor
|
|
164
|
-
client = instructor.from_openai(client)
|
|
102
|
+
# Build input list
|
|
103
|
+
input_items: list[t.Any] = []
|
|
165
104
|
|
|
166
|
-
# ===== 构造 messages(保持你原有逻辑)=====
|
|
167
|
-
messages = []
|
|
168
105
|
if sys_info:
|
|
169
|
-
|
|
106
|
+
input_items.append({"role": "system", "content": sys_info})
|
|
170
107
|
if history:
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
# 规范化 media key(三元组)
|
|
174
|
-
def _triple_keys(keys):
|
|
175
|
-
if isinstance(keys, str):
|
|
176
|
-
return (keys,)*3
|
|
177
|
-
if len(keys) == 2:
|
|
178
|
-
return (keys[0],) + tuple(keys)
|
|
179
|
-
if len(keys) == 1:
|
|
180
|
-
return (keys[0],)*3
|
|
181
|
-
return keys
|
|
182
|
-
|
|
183
|
-
image_keys = _triple_keys(image_keys)
|
|
184
|
-
video_keys = _triple_keys(video_keys)
|
|
185
|
-
|
|
186
|
-
content = [{"type": "text", "text": prompt}]
|
|
187
|
-
if videos:
|
|
188
|
-
if isinstance(videos, str):
|
|
189
|
-
videos = [videos]
|
|
190
|
-
for v in videos:
|
|
191
|
-
content.append({
|
|
192
|
-
"type": video_keys[0],
|
|
193
|
-
video_keys[1]: {video_keys[2]: v}
|
|
194
|
-
})
|
|
108
|
+
input_items.extend(history)
|
|
195
109
|
|
|
110
|
+
# Build user message
|
|
196
111
|
if images:
|
|
197
112
|
if isinstance(images, str):
|
|
198
113
|
images = [images]
|
|
114
|
+
multimodal_content = [
|
|
115
|
+
{"type": "input_text", "text": prompt}
|
|
116
|
+
]
|
|
199
117
|
for img in images:
|
|
200
|
-
|
|
201
|
-
"type":
|
|
202
|
-
|
|
118
|
+
multimodal_content.append({
|
|
119
|
+
"type": "input_image",
|
|
120
|
+
"image_url": img
|
|
203
121
|
})
|
|
122
|
+
user_item = {"role": "user", "content": multimodal_content}
|
|
123
|
+
else:
|
|
124
|
+
user_item = {"role": "user", "content": prompt}
|
|
204
125
|
|
|
205
|
-
|
|
206
|
-
content = prompt
|
|
126
|
+
input_items.append(user_item)
|
|
207
127
|
|
|
208
|
-
messages.append({"role": "user", "content": content})
|
|
209
128
|
if assis_info:
|
|
210
|
-
|
|
129
|
+
input_items.append({"role": "assistant", "content": assis_info})
|
|
130
|
+
|
|
131
|
+
# Prepare call parameters
|
|
132
|
+
call_params = {
|
|
133
|
+
"model": model,
|
|
134
|
+
"input": input_items,
|
|
135
|
+
**kwargs
|
|
136
|
+
}
|
|
137
|
+
if tools:
|
|
138
|
+
call_params["tools"] = tools
|
|
139
|
+
call_params["tool_choice"] = tool_choice
|
|
211
140
|
|
|
212
|
-
#
|
|
213
|
-
# 注意:Responses 同时支持 messages 形状;tools 也直接传 tools / tool_choice。
|
|
141
|
+
# Call Responses API
|
|
214
142
|
if stream:
|
|
215
|
-
# --- 流式:返回一个生成器,伪装成 chat.completions 的 chunk 结构 ---
|
|
216
|
-
# 你的上层 `for chunk in resp:` 会收到具有
|
|
217
|
-
# chunk.choices[0].delta.content / .tool_calls 的对象
|
|
218
143
|
resp_stream = client.responses.create(
|
|
219
|
-
model=model,
|
|
220
|
-
messages=messages,
|
|
221
|
-
tools=tools if tools else None,
|
|
222
|
-
tool_choice=tool_choice if tools else None,
|
|
223
144
|
stream=True,
|
|
224
|
-
**
|
|
145
|
+
**call_params
|
|
225
146
|
)
|
|
226
|
-
|
|
227
|
-
# 适配层:把 Responses 的事件流,转成 Chat Completions 风格的 chunk
|
|
228
|
-
from types import SimpleNamespace
|
|
229
|
-
def _wrap_delta_text(text):
|
|
230
|
-
# -> chunk.choices[0].delta.content
|
|
231
|
-
delta = SimpleNamespace(content=text)
|
|
232
|
-
choice = SimpleNamespace(delta=delta)
|
|
233
|
-
return SimpleNamespace(choices=[choice])
|
|
234
|
-
|
|
235
|
-
def _wrap_delta_tool_call(name, arguments_fragment):
|
|
236
|
-
# -> chunk.choices[0].delta.tool_calls[0].function.{name, arguments}
|
|
237
|
-
func = SimpleNamespace(name=name, arguments=arguments_fragment)
|
|
238
|
-
tool_call = SimpleNamespace(function=func)
|
|
239
|
-
delta = SimpleNamespace(content=None, tool_calls=[tool_call])
|
|
240
|
-
choice = SimpleNamespace(delta=delta)
|
|
241
|
-
return SimpleNamespace(choices=[choice])
|
|
242
|
-
|
|
243
|
-
def _generator():
|
|
244
|
-
# SDK 的 Responses 流每个 event 有 event.type
|
|
245
|
-
# 我们尽量覆盖主流事件名;未知事件直接忽略
|
|
246
|
-
tool_args_acc = {} # 累积每个工具参数(按 id 聚合)
|
|
247
|
-
tool_name_cache = {}
|
|
248
|
-
|
|
249
|
-
for event in resp_stream:
|
|
250
|
-
et = getattr(event, "type", None)
|
|
251
|
-
|
|
252
|
-
# 文本增量
|
|
253
|
-
if et == "response.output_text.delta":
|
|
254
|
-
delta_text = getattr(event, "delta", None)
|
|
255
|
-
if delta_text:
|
|
256
|
-
yield _wrap_delta_text(delta_text)
|
|
257
|
-
|
|
258
|
-
# 文本结束(可忽略,上层会基于yield的终止判断)
|
|
259
|
-
elif et == "response.output_text.done":
|
|
260
|
-
pass
|
|
261
|
-
|
|
262
|
-
# 工具调用参数增量
|
|
263
|
-
elif et in ("response.tool_call.delta", "response.function_call.delta"):
|
|
264
|
-
# 常见字段:event.id, event.name, event.delta / event.arguments_delta
|
|
265
|
-
call_id = getattr(event, "id", None)
|
|
266
|
-
name = getattr(event, "name", None) or tool_name_cache.get(call_id)
|
|
267
|
-
args_delta = getattr(event, "arguments_delta", None) or getattr(event, "delta", "")
|
|
268
|
-
|
|
269
|
-
if call_id:
|
|
270
|
-
tool_name_cache.setdefault(call_id, name or "")
|
|
271
|
-
tool_args_acc.setdefault(call_id, "")
|
|
272
|
-
tool_args_acc[call_id] += (args_delta or "")
|
|
273
|
-
|
|
274
|
-
# 也把这一小段增量向上抛(让你上层能尽快看到 tool_calls)
|
|
275
|
-
yield _wrap_delta_tool_call(name or "", args_delta or "")
|
|
276
|
-
|
|
277
|
-
# 工具调用完成(把完整参数再抛一次,便于上层一次性拿到)
|
|
278
|
-
elif et in ("response.tool_call.done", "response.function_call.done"):
|
|
279
|
-
call_id = getattr(event, "id", None)
|
|
280
|
-
full_name = tool_name_cache.get(call_id, "")
|
|
281
|
-
full_args = tool_args_acc.get(call_id, "")
|
|
282
|
-
yield _wrap_delta_tool_call(full_name, full_args)
|
|
283
|
-
|
|
284
|
-
# 其它事件(如 response.completed / response.error 等)
|
|
285
|
-
else:
|
|
286
|
-
# 可以按需扩展,这里静默忽略
|
|
287
|
-
pass
|
|
288
|
-
|
|
289
|
-
return _generator()
|
|
290
|
-
|
|
147
|
+
return resp_stream
|
|
291
148
|
else:
|
|
292
|
-
# --- 非流式:把 Responses 同步结果适配成 chat.completions 风格 ---
|
|
293
149
|
resp = client.responses.create(
|
|
294
|
-
model=model,
|
|
295
|
-
messages=messages,
|
|
296
|
-
tools=tools if tools else None,
|
|
297
|
-
tool_choice=tool_choice if tools else None,
|
|
298
150
|
stream=False,
|
|
299
|
-
**
|
|
151
|
+
**call_params
|
|
300
152
|
)
|
|
301
|
-
|
|
302
|
-
# 从 Responses 里抽取文本 & 工具调用
|
|
303
|
-
# 尽量兼容:优先用 output_text;否则从 output 列表里聚合
|
|
304
|
-
text_out = getattr(resp, "output_text", None)
|
|
305
|
-
outputs = getattr(resp, "output", None)
|
|
306
|
-
|
|
307
|
-
if text_out is None and outputs:
|
|
308
|
-
# 聚合 message/output_text
|
|
309
|
-
parts = []
|
|
310
|
-
for item in outputs:
|
|
311
|
-
if getattr(item, "type", "") in ("message",):
|
|
312
|
-
# item.content 里通常还有若干块(output_text 等)
|
|
313
|
-
content_parts = getattr(item, "content", []) or []
|
|
314
|
-
for c in content_parts:
|
|
315
|
-
if getattr(c, "type", "") in ("output_text",):
|
|
316
|
-
parts.append(getattr(c, "text", ""))
|
|
317
|
-
text_out = "".join(parts) if parts else None
|
|
318
|
-
|
|
319
|
-
# 抽取工具调用(如果有)
|
|
320
|
-
tool_calls_wrapped = []
|
|
321
|
-
if outputs:
|
|
322
|
-
for item in outputs:
|
|
323
|
-
if getattr(item, "type", "") in ("tool_call", "function_call"):
|
|
324
|
-
name = getattr(item, "name", "")
|
|
325
|
-
arguments = getattr(item, "arguments", "")
|
|
326
|
-
from types import SimpleNamespace
|
|
327
|
-
func = SimpleNamespace(name=name, arguments=arguments)
|
|
328
|
-
tool_calls_wrapped.append(SimpleNamespace(function=func))
|
|
329
|
-
|
|
330
|
-
# 伪造 chat.completions 的返回结构
|
|
153
|
+
# Wrap to mimic chat.completions interface
|
|
331
154
|
from types import SimpleNamespace
|
|
332
|
-
|
|
333
|
-
message = SimpleNamespace(
|
|
334
|
-
|
|
335
|
-
tool_calls=tool_calls_wrapped if tool_calls_wrapped else None
|
|
336
|
-
)
|
|
337
|
-
choice = SimpleNamespace(
|
|
338
|
-
message=message,
|
|
339
|
-
finish_reason=finish_reason
|
|
340
|
-
)
|
|
155
|
+
text_out = getattr(resp, "output_text", "")
|
|
156
|
+
message = SimpleNamespace(content=text_out, tool_calls=None)
|
|
157
|
+
choice = SimpleNamespace(message=message, finish_reason="stop")
|
|
341
158
|
fake_resp = SimpleNamespace(choices=[choice])
|
|
342
159
|
return fake_resp
|
|
343
160
|
|
|
344
|
-
def get_resp_legacy(
|
|
345
|
-
self,
|
|
346
|
-
prompt,
|
|
347
|
-
client_id: str = None,
|
|
348
|
-
history: list = None,
|
|
349
|
-
sys_info: str = None,
|
|
350
|
-
assis_info: str = None,
|
|
351
|
-
images: list = None,
|
|
352
|
-
image_keys: tuple = ("image_url", "url"),
|
|
353
|
-
videos: list = None,
|
|
354
|
-
video_keys: tuple = ("video_url", "url"),
|
|
355
|
-
model: str=None,
|
|
356
|
-
tools: list = None,
|
|
357
|
-
tool_choice: str = "auto",
|
|
358
|
-
stream: bool = True,
|
|
359
|
-
response_model = None,
|
|
360
|
-
**kwargs: t.Any,
|
|
361
|
-
):
|
|
362
|
-
"""
|
|
363
|
-
Generates a response from a chat model based on the given prompt and additional context.
|
|
364
|
-
|
|
365
|
-
Args:
|
|
366
|
-
prompt (str): The main text prompt to send to the chat model.
|
|
367
|
-
client_id (str, optional): Identifier for the client configuration. Defaults to None.
|
|
368
|
-
history (list, optional): A list of previous messages to provide context for the conversation. Each message should be a dictionary with "role" and "content". Defaults to None.
|
|
369
|
-
sys_info (str, optional): System-level information to set the context of the chat. Defaults to None.
|
|
370
|
-
assis_info (str, optional): Information from the assistant to be included in the conversation. Defaults to None.
|
|
371
|
-
images (list, optional): A list of images to include in the message content. Defaults to None.
|
|
372
|
-
image_keys (tuple, optional): Keys to format the image data. Must be of length 1 or 2. Defaults to ("image_url", "url").
|
|
373
|
-
model (str, optional): The model to use for generating the response. If not provided, it defaults to the one in client configuration for the given client_id.
|
|
374
|
-
tools (list, optional): List of tools to be available during the chat. Defaults to None.
|
|
375
|
-
stream (bool, optional): Whether to stream the response. Defaults to True.
|
|
376
|
-
response_model (optional): Specifies the response model to use. Defaults to None.
|
|
377
|
-
**kwargs (Any): Additional configuration parameters.
|
|
378
|
-
|
|
379
|
-
Returns:
|
|
380
|
-
Response: The response object from the chat model.
|
|
381
|
-
"""
|
|
382
|
-
if not model:
|
|
383
|
-
model = self.client_conf[client_id]['model']
|
|
384
|
-
|
|
385
|
-
client = self.client_conf[client_id]['client']
|
|
386
|
-
if response_model:
|
|
387
|
-
import instructor #TODO 有些模型支持这个 instructor 的结构化输出,但实际上它调用的还是openai api的功能,以后适时删除或补全
|
|
388
|
-
client = instructor.from_openai(client)
|
|
389
|
-
|
|
390
|
-
messages = []
|
|
391
|
-
|
|
392
|
-
if sys_info:
|
|
393
|
-
messages.append({
|
|
394
|
-
"role": "system",
|
|
395
|
-
"content": sys_info
|
|
396
|
-
})
|
|
397
|
-
|
|
398
|
-
if history:
|
|
399
|
-
messages.extend(history)
|
|
400
|
-
# history 需要符合以下格式,其中system不是必须
|
|
401
|
-
# history = [
|
|
402
|
-
# {"role": "system", "content": "You are a helpful assistant."},
|
|
403
|
-
# {"role": "user", "content": "message 1 content."},
|
|
404
|
-
# {"role": "assistant", "content": "message 2 content"},
|
|
405
|
-
# {"role": "user", "content": "message 3 content"},
|
|
406
|
-
# {"role": "assistant", "content": "message 4 content."},
|
|
407
|
-
# {"role": "user", "content": "message 5 content."}
|
|
408
|
-
# ]
|
|
409
|
-
|
|
410
|
-
if not model:
|
|
411
|
-
model = self.client_conf[client_id]["model"]
|
|
412
|
-
# Adjust the image_keys to be a tuple of length 3 based on its current length
|
|
413
|
-
if isinstance(image_keys, str):
|
|
414
|
-
image_keys = (image_keys,) * 3
|
|
415
|
-
elif len(image_keys) == 2:
|
|
416
|
-
image_keys = (image_keys[0],) + tuple(image_keys)
|
|
417
|
-
elif len(image_keys) == 1:
|
|
418
|
-
image_keys = (image_keys[0],) * 3
|
|
419
|
-
|
|
420
|
-
if isinstance(video_keys, str):
|
|
421
|
-
video_keys = (video_keys,) * 3
|
|
422
|
-
elif len(video_keys) == 2:
|
|
423
|
-
video_keys = (video_keys[0],) + tuple(video_keys)
|
|
424
|
-
elif len(video_keys) == 1:
|
|
425
|
-
video_keys = (video_keys[0],) * 3
|
|
426
|
-
|
|
427
|
-
content = [{
|
|
428
|
-
"type": "text",
|
|
429
|
-
"text": prompt
|
|
430
|
-
}]
|
|
431
|
-
|
|
432
|
-
if videos:
|
|
433
|
-
if isinstance(videos, str):
|
|
434
|
-
images = [videos]
|
|
435
|
-
for video in videos:
|
|
436
|
-
content.append({
|
|
437
|
-
"type": video_keys[0],
|
|
438
|
-
video_keys[1]: {
|
|
439
|
-
video_keys[2]: video
|
|
440
|
-
}
|
|
441
|
-
})
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
if images:
|
|
445
|
-
if isinstance(images, str):
|
|
446
|
-
images = [images]
|
|
447
|
-
for img in images:
|
|
448
|
-
content.append({
|
|
449
|
-
"type": image_keys[0],
|
|
450
|
-
image_keys[1]: {
|
|
451
|
-
image_keys[2]: img
|
|
452
|
-
}
|
|
453
|
-
})
|
|
454
|
-
if (not images) and (not videos):
|
|
455
|
-
content = prompt
|
|
456
|
-
|
|
457
|
-
# Add the user's input as a message
|
|
458
|
-
messages.append({
|
|
459
|
-
"role": "user",
|
|
460
|
-
"content": content
|
|
461
|
-
})
|
|
462
|
-
|
|
463
|
-
if assis_info:
|
|
464
|
-
messages.append({
|
|
465
|
-
"role": "assistant",
|
|
466
|
-
"content": assis_info
|
|
467
|
-
})
|
|
468
|
-
|
|
469
|
-
if tools:
|
|
470
|
-
resp = client.chat.completions.create(
|
|
471
|
-
model=model,
|
|
472
|
-
messages=messages,
|
|
473
|
-
tools=tools,
|
|
474
|
-
tool_choice=tool_choice,
|
|
475
|
-
stream=stream,
|
|
476
|
-
**kwargs
|
|
477
|
-
)
|
|
478
|
-
else:
|
|
479
|
-
resp = client.chat.completions.create(
|
|
480
|
-
model=model,
|
|
481
|
-
messages=messages,
|
|
482
|
-
stream=stream,
|
|
483
|
-
**kwargs
|
|
484
|
-
)
|
|
485
|
-
return resp
|
|
486
|
-
|
|
487
161
|
def invoke(
|
|
488
162
|
self,
|
|
489
|
-
prompt,
|
|
163
|
+
prompt: str,
|
|
490
164
|
**kwargs
|
|
491
165
|
):
|
|
492
|
-
"""
|
|
493
|
-
Invoke the API to get a response based on the provided prompt.
|
|
494
|
-
|
|
495
|
-
Args:
|
|
496
|
-
prompt (str): The input prompt to be processed.
|
|
497
|
-
**kwargs: Additional keyword arguments to customize the API request.
|
|
498
|
-
|
|
499
|
-
Returns:
|
|
500
|
-
dict: A dictionary containing the type of response and its contents.
|
|
501
|
-
The possible keys are:
|
|
502
|
-
- 'type' (str): Indicates the type of response ('text' or 'tool_calls').
|
|
503
|
-
- 'contents' (str, optional): The text content if the response type is 'text'.
|
|
504
|
-
- 'tool_params' (dict, optional): The parameters of the tool called if the response type is 'tool_calls'.
|
|
505
|
-
|
|
506
|
-
Examples:
|
|
507
|
-
>>> llm.invoke(
|
|
508
|
-
>>> client_id="glm_4_flash",
|
|
509
|
-
>>> prompt="深圳天气怎么样?",
|
|
510
|
-
>>> tools=[TOOL_DICT['get_weather']],
|
|
511
|
-
>>> )
|
|
512
|
-
{'type': 'tool_calls',
|
|
513
|
-
'tool_parmas': Function(arguments='{"location": "Shenzhen"}', name='get_weather')}
|
|
514
|
-
"""
|
|
515
166
|
answer_dict = {}
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
stream=False,
|
|
520
|
-
**kwargs
|
|
521
|
-
)
|
|
522
|
-
if resp.choices[0].finish_reason == "stop":
|
|
523
|
-
answer_dict["type"] = "text"
|
|
524
|
-
answer_dict["contents"] = resp.choices[0].message.content
|
|
525
|
-
elif resp.choices[0].finish_reason == "tool_calls":
|
|
526
|
-
answer_dict["type"] = "tool_calls"
|
|
527
|
-
answer_dict["tool_params"] = resp.choices[0].message.tool_calls[0].function
|
|
528
|
-
|
|
167
|
+
resp = self.get_resp(prompt=prompt, stream=False, **kwargs)
|
|
168
|
+
answer_dict["type"] = "text"
|
|
169
|
+
answer_dict["contents"] = resp.choices[0].message.content
|
|
529
170
|
return answer_dict
|
|
530
171
|
|
|
531
|
-
def stream(self, prompt, **kwargs):
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
or textual content. If an error occurs while processing the chunks, it yields an error message.
|
|
538
|
-
|
|
539
|
-
Args:
|
|
540
|
-
prompt (str): The input prompt to generate responses for.
|
|
541
|
-
**kwargs: Additional keyword arguments to be passed to the `get_resp` method.
|
|
542
|
-
|
|
543
|
-
Yields:
|
|
544
|
-
dict: A dictionary with the following possible keys:
|
|
545
|
-
- type (str): Indicates the type of the response ('tool_calls', 'text', or 'error').
|
|
546
|
-
- tool_params (dict, optional): Parameters of the tool call if the type is 'tool_calls'.
|
|
547
|
-
- content (str, optional): The generated text content if the type is 'text'.
|
|
548
|
-
- message (str, optional): An error message if the type is 'error'.
|
|
549
|
-
|
|
550
|
-
Examplse:
|
|
551
|
-
>>> resp = llm.stream(
|
|
552
|
-
>>> client_id="r1", #此模型可以进行cot
|
|
553
|
-
>>> prompt=prompt,
|
|
554
|
-
>>> # tools=[TOOL_DICT['get_weather']],
|
|
555
|
-
>>> )
|
|
556
|
-
>>> for i in resp:
|
|
557
|
-
>>> if i['type'] == 'text' and i['content']:
|
|
558
|
-
>>> print(i['content'], flush=True, end="")
|
|
559
|
-
"""
|
|
560
|
-
resp = self.get_resp(prompt=prompt, stream=True, **kwargs)
|
|
561
|
-
|
|
562
|
-
for chunk in resp:
|
|
563
|
-
try:
|
|
564
|
-
choice = chunk.choices[0]
|
|
565
|
-
|
|
566
|
-
# 如果返回了 tool_calls
|
|
567
|
-
if hasattr(choice.delta, 'tool_calls') and choice.delta.tool_calls:
|
|
568
|
-
tool_calls = choice.delta.tool_calls
|
|
569
|
-
if tool_calls: # 防止为空
|
|
570
|
-
yield {
|
|
571
|
-
"type": "tool_calls",
|
|
572
|
-
"tool_params": tool_calls[0].function
|
|
573
|
-
}
|
|
574
|
-
return # 直接返回,结束流式输出
|
|
575
|
-
|
|
576
|
-
# 返回文本内容
|
|
577
|
-
elif hasattr(choice.delta, 'content'):
|
|
578
|
-
yield {
|
|
579
|
-
"type": "text",
|
|
580
|
-
"content": choice.delta.content
|
|
581
|
-
}
|
|
582
|
-
|
|
583
|
-
except (AttributeError, IndexError) as e:
|
|
584
|
-
# 防止意外的结构异常
|
|
585
|
-
yield {
|
|
586
|
-
"type": "error",
|
|
587
|
-
"message": f"Stream chunk error: {str(e)}"
|
|
588
|
-
}
|
|
589
|
-
return
|
|
590
|
-
|
|
172
|
+
def stream(self, prompt: str, **kwargs):
|
|
173
|
+
resp_stream = self.get_resp(prompt=prompt, stream=True, **kwargs)
|
|
174
|
+
for event in resp_stream:
|
|
175
|
+
delta = getattr(event, "delta", None)
|
|
176
|
+
if delta and hasattr(delta, "content"):
|
|
177
|
+
yield {"type": "text", "content": delta.content}
|
|
591
178
|
return
|
|
592
179
|
|
|
593
180
|
def embedding(
|
|
@@ -596,28 +183,13 @@ class OpenAIWrapper(object):
|
|
|
596
183
|
texts: list[str],
|
|
597
184
|
model: str = None,
|
|
598
185
|
**kwargs
|
|
599
|
-
):
|
|
600
|
-
|
|
601
|
-
Generates embeddings for a list of texts using a specified model.
|
|
602
|
-
|
|
603
|
-
Args:
|
|
604
|
-
client_id (str): The ID of the client to use for generating embeddings.
|
|
605
|
-
texts (list[str]): A list of texts for which to generate embeddings.
|
|
606
|
-
model (str, optional): The model to use for generating embeddings.
|
|
607
|
-
If not provided, the model specified in the client configuration will be used.
|
|
608
|
-
**kwargs: Additional keyword arguments to be passed to the client embedding creation method.
|
|
609
|
-
|
|
610
|
-
Returns:
|
|
611
|
-
list: A list of embeddings corresponding to the input texts.
|
|
612
|
-
"""
|
|
613
|
-
if not model:
|
|
186
|
+
) -> list:
|
|
187
|
+
if model is None:
|
|
614
188
|
model = self.client_conf[client_id]['model']
|
|
615
|
-
|
|
616
189
|
client = self.client_conf[client_id]['client']
|
|
617
190
|
response = client.embeddings.create(
|
|
618
191
|
input=texts,
|
|
619
192
|
model=model,
|
|
620
193
|
**kwargs
|
|
621
194
|
)
|
|
622
|
-
|
|
623
195
|
return [i.embedding for i in response.data]
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
hdl/__init__.py,sha256=GffnD0jLJdhkd-vo989v40N90sQbofkayRBwxc6TVhQ,72
|
|
2
|
-
hdl/_version.py,sha256=
|
|
2
|
+
hdl/_version.py,sha256=2Ln2Sf-BSjJoFUL4Jj83SaeT5iusbHd-apGtpKFocvQ,714
|
|
3
3
|
hdl/args/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
hdl/args/loss_args.py,sha256=s7YzSdd7IjD24rZvvOrxLLFqMZQb9YylxKeyelSdrTk,70
|
|
5
5
|
hdl/controllers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -136,7 +136,7 @@ hdl/utils/llm/chatgr.py,sha256=5F5PJHe8vz3iCfi4TT54DCLRi1UeJshECdVtgvvvao0,3696
|
|
|
136
136
|
hdl/utils/llm/embs.py,sha256=Tf0FOYrOFZp7qQpEPiSCXzlgyHH0X9HVTUtsup74a9E,7174
|
|
137
137
|
hdl/utils/llm/extract.py,sha256=2sK_WJzmYIc8iuWaM9DA6Nw3_6q1O4lJ5pKpcZo-bBA,6512
|
|
138
138
|
hdl/utils/llm/llama_chat.py,sha256=watcHGOaz-bv3x-yDucYlGk5f8FiqfFhwWogrl334fk,4387
|
|
139
|
-
hdl/utils/llm/llm_wrapper.py,sha256=
|
|
139
|
+
hdl/utils/llm/llm_wrapper.py,sha256=qebQkq70nGwCozqVvCfPegqnKMO7lyhdFobB8CJHXyk,6186
|
|
140
140
|
hdl/utils/llm/ollama.py,sha256=uEdLsNAc6b56r37hNiE3nrd6oZ2lmQ0gYbVvOc9YVIM,1389
|
|
141
141
|
hdl/utils/llm/vis.py,sha256=jRa5l1LHaWtohtdIKVpOH_I4yyXWTbyaLGglFHsV_0Q,28826
|
|
142
142
|
hdl/utils/llm/visrag.py,sha256=0i-VrxqgiV-J7R3VPshu9oc7-rKjFJOldYik3HDXj6M,10176
|
|
@@ -146,8 +146,8 @@ hdl/utils/vis_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
|
|
|
146
146
|
hdl/utils/vis_tools/scene_detect.py,sha256=L6TFMT15QHJuOIFcLFVI_RSSSjyTVZhBEqbeUez2auU,6608
|
|
147
147
|
hdl/utils/weather/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
148
148
|
hdl/utils/weather/weather.py,sha256=k11o6wM15kF8b9NMlEfrg68ak-SfSYLN3nOOflFUv-I,4381
|
|
149
|
-
hjxdl-0.3.
|
|
150
|
-
hjxdl-0.3.
|
|
151
|
-
hjxdl-0.3.
|
|
152
|
-
hjxdl-0.3.
|
|
153
|
-
hjxdl-0.3.
|
|
149
|
+
hjxdl-0.3.56.dist-info/licenses/LICENSE,sha256=lkMiSbeZHBQLB9LJEkS9-L3Z-LBC4yGnKrzHSG8RkPM,2599
|
|
150
|
+
hjxdl-0.3.56.dist-info/METADATA,sha256=dYCo8TjnxT5zrnB_MyMXTkQ-T9ocPhpJmnXQuBBRtqg,1332
|
|
151
|
+
hjxdl-0.3.56.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
152
|
+
hjxdl-0.3.56.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
|
|
153
|
+
hjxdl-0.3.56.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|