hjxdl 0.3.53__py3-none-any.whl → 0.3.55__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hdl/_version.py +16 -3
- hdl/utils/llm/chat.py +65 -0
- hdl/utils/llm/llm_wrapper.py +208 -0
- {hjxdl-0.3.53.dist-info → hjxdl-0.3.55.dist-info}/METADATA +1 -1
- {hjxdl-0.3.53.dist-info → hjxdl-0.3.55.dist-info}/RECORD +8 -8
- {hjxdl-0.3.53.dist-info → hjxdl-0.3.55.dist-info}/WHEEL +1 -1
- {hjxdl-0.3.53.dist-info → hjxdl-0.3.55.dist-info}/licenses/LICENSE +0 -0
- {hjxdl-0.3.53.dist-info → hjxdl-0.3.55.dist-info}/top_level.txt +0 -0
hdl/_version.py
CHANGED
|
@@ -1,7 +1,14 @@
|
|
|
1
1
|
# file generated by setuptools-scm
|
|
2
2
|
# don't change, don't track in version control
|
|
3
3
|
|
|
4
|
-
__all__ = [
|
|
4
|
+
__all__ = [
|
|
5
|
+
"__version__",
|
|
6
|
+
"__version_tuple__",
|
|
7
|
+
"version",
|
|
8
|
+
"version_tuple",
|
|
9
|
+
"__commit_id__",
|
|
10
|
+
"commit_id",
|
|
11
|
+
]
|
|
5
12
|
|
|
6
13
|
TYPE_CHECKING = False
|
|
7
14
|
if TYPE_CHECKING:
|
|
@@ -9,13 +16,19 @@ if TYPE_CHECKING:
|
|
|
9
16
|
from typing import Union
|
|
10
17
|
|
|
11
18
|
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
|
19
|
+
COMMIT_ID = Union[str, None]
|
|
12
20
|
else:
|
|
13
21
|
VERSION_TUPLE = object
|
|
22
|
+
COMMIT_ID = object
|
|
14
23
|
|
|
15
24
|
version: str
|
|
16
25
|
__version__: str
|
|
17
26
|
__version_tuple__: VERSION_TUPLE
|
|
18
27
|
version_tuple: VERSION_TUPLE
|
|
28
|
+
commit_id: COMMIT_ID
|
|
29
|
+
__commit_id__: COMMIT_ID
|
|
19
30
|
|
|
20
|
-
__version__ = version = '0.3.
|
|
21
|
-
__version_tuple__ = version_tuple = (0, 3,
|
|
31
|
+
__version__ = version = '0.3.55'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 3, 55)
|
|
33
|
+
|
|
34
|
+
__commit_id__ = commit_id = 'g9915d44d3'
|
hdl/utils/llm/chat.py
CHANGED
|
@@ -302,6 +302,71 @@ class OpenAI_M:
|
|
|
302
302
|
continue
|
|
303
303
|
|
|
304
304
|
def get_resp(
|
|
305
|
+
self,
|
|
306
|
+
prompt: str,
|
|
307
|
+
client_id: str = None,
|
|
308
|
+
sys_info: str = None,
|
|
309
|
+
assis_info: str = None,
|
|
310
|
+
images: list = None,
|
|
311
|
+
image_keys: tuple = ("image_url", "url"),
|
|
312
|
+
stop: list[str] | None = ["USER:", "ASSISTANT:"],
|
|
313
|
+
model: str = None,
|
|
314
|
+
stream: bool = True,
|
|
315
|
+
**kwargs: t.Any,
|
|
316
|
+
):
|
|
317
|
+
"""Prepare and send a request to the model, and return the model's response."""
|
|
318
|
+
if not model:
|
|
319
|
+
model = self.client_conf[client_id]["model"]
|
|
320
|
+
|
|
321
|
+
# === 1️⃣ 准备输入 ===
|
|
322
|
+
content = [{"type": "text", "text": prompt}]
|
|
323
|
+
if isinstance(image_keys, str):
|
|
324
|
+
image_keys = (image_keys,) * 3
|
|
325
|
+
elif len(image_keys) == 2:
|
|
326
|
+
image_keys = (image_keys[0],) + tuple(image_keys)
|
|
327
|
+
elif len(image_keys) == 1:
|
|
328
|
+
image_keys = (image_keys[0],) * 3
|
|
329
|
+
|
|
330
|
+
if images:
|
|
331
|
+
if isinstance(images, str):
|
|
332
|
+
images = [images]
|
|
333
|
+
for img in images:
|
|
334
|
+
content.append({
|
|
335
|
+
"type": image_keys[0],
|
|
336
|
+
image_keys[1]: {image_keys[2]: img}
|
|
337
|
+
})
|
|
338
|
+
else:
|
|
339
|
+
content = prompt
|
|
340
|
+
|
|
341
|
+
# === 2️⃣ 构造 messages(兼容旧逻辑) ===
|
|
342
|
+
messages = []
|
|
343
|
+
if sys_info:
|
|
344
|
+
messages.append({"role": "system", "content": sys_info})
|
|
345
|
+
messages.append({"role": "user", "content": content})
|
|
346
|
+
if assis_info:
|
|
347
|
+
messages.append({"role": "assistant", "content": assis_info})
|
|
348
|
+
|
|
349
|
+
# === 3️⃣ 改成 responses.create ===
|
|
350
|
+
client = self.client_conf[client_id]["client"]
|
|
351
|
+
|
|
352
|
+
if stream:
|
|
353
|
+
response = client.responses.create(
|
|
354
|
+
model=model,
|
|
355
|
+
input=messages, # 注意:新版 responses 接口直接用 "input"
|
|
356
|
+
stream=True,
|
|
357
|
+
**kwargs
|
|
358
|
+
)
|
|
359
|
+
else:
|
|
360
|
+
response = client.responses.create(
|
|
361
|
+
model=model,
|
|
362
|
+
input=messages,
|
|
363
|
+
stream=False,
|
|
364
|
+
**kwargs
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
return response
|
|
368
|
+
|
|
369
|
+
def get_resp_legacy(
|
|
305
370
|
self,
|
|
306
371
|
prompt: str,
|
|
307
372
|
client_id: str = None,
|
hdl/utils/llm/llm_wrapper.py
CHANGED
|
@@ -150,6 +150,214 @@ class OpenAIWrapper(object):
|
|
|
150
150
|
stream: bool = True,
|
|
151
151
|
response_model = None,
|
|
152
152
|
**kwargs: t.Any,
|
|
153
|
+
):
|
|
154
|
+
"""
|
|
155
|
+
与上层保持完全兼容:内部改用 responses.create,
|
|
156
|
+
但返回值/流式 chunk 仍然伪装成 chat.completions 的结构。
|
|
157
|
+
"""
|
|
158
|
+
if not model:
|
|
159
|
+
model = self.client_conf[client_id]['model']
|
|
160
|
+
|
|
161
|
+
client = self.client_conf[client_id]['client']
|
|
162
|
+
if response_model:
|
|
163
|
+
import instructor
|
|
164
|
+
client = instructor.from_openai(client)
|
|
165
|
+
|
|
166
|
+
# ===== 构造 messages(保持你原有逻辑)=====
|
|
167
|
+
messages = []
|
|
168
|
+
if sys_info:
|
|
169
|
+
messages.append({"role": "system", "content": sys_info})
|
|
170
|
+
if history:
|
|
171
|
+
messages.extend(history)
|
|
172
|
+
|
|
173
|
+
# 规范化 media key(三元组)
|
|
174
|
+
def _triple_keys(keys):
|
|
175
|
+
if isinstance(keys, str):
|
|
176
|
+
return (keys,)*3
|
|
177
|
+
if len(keys) == 2:
|
|
178
|
+
return (keys[0],) + tuple(keys)
|
|
179
|
+
if len(keys) == 1:
|
|
180
|
+
return (keys[0],)*3
|
|
181
|
+
return keys
|
|
182
|
+
|
|
183
|
+
image_keys = _triple_keys(image_keys)
|
|
184
|
+
video_keys = _triple_keys(video_keys)
|
|
185
|
+
|
|
186
|
+
content = [{"type": "text", "text": prompt}]
|
|
187
|
+
if videos:
|
|
188
|
+
if isinstance(videos, str):
|
|
189
|
+
videos = [videos]
|
|
190
|
+
for v in videos:
|
|
191
|
+
content.append({
|
|
192
|
+
"type": video_keys[0],
|
|
193
|
+
video_keys[1]: {video_keys[2]: v}
|
|
194
|
+
})
|
|
195
|
+
|
|
196
|
+
if images:
|
|
197
|
+
if isinstance(images, str):
|
|
198
|
+
images = [images]
|
|
199
|
+
for img in images:
|
|
200
|
+
content.append({
|
|
201
|
+
"type": image_keys[0],
|
|
202
|
+
image_keys[1]: {image_keys[2]: img}
|
|
203
|
+
})
|
|
204
|
+
|
|
205
|
+
if (not images) and (not videos):
|
|
206
|
+
content = prompt
|
|
207
|
+
|
|
208
|
+
messages.append({"role": "user", "content": content})
|
|
209
|
+
if assis_info:
|
|
210
|
+
messages.append({"role": "assistant", "content": assis_info})
|
|
211
|
+
|
|
212
|
+
# ===== Responses API 调用 =====
|
|
213
|
+
# 注意:Responses 同时支持 messages 形状;tools 也直接传 tools / tool_choice。
|
|
214
|
+
if stream:
|
|
215
|
+
# --- 流式:返回一个生成器,伪装成 chat.completions 的 chunk 结构 ---
|
|
216
|
+
# 你的上层 `for chunk in resp:` 会收到具有
|
|
217
|
+
# chunk.choices[0].delta.content / .tool_calls 的对象
|
|
218
|
+
resp_stream = client.responses.create(
|
|
219
|
+
model=model,
|
|
220
|
+
messages=messages,
|
|
221
|
+
tools=tools if tools else None,
|
|
222
|
+
tool_choice=tool_choice if tools else None,
|
|
223
|
+
stream=True,
|
|
224
|
+
**kwargs
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
# 适配层:把 Responses 的事件流,转成 Chat Completions 风格的 chunk
|
|
228
|
+
from types import SimpleNamespace
|
|
229
|
+
def _wrap_delta_text(text):
|
|
230
|
+
# -> chunk.choices[0].delta.content
|
|
231
|
+
delta = SimpleNamespace(content=text)
|
|
232
|
+
choice = SimpleNamespace(delta=delta)
|
|
233
|
+
return SimpleNamespace(choices=[choice])
|
|
234
|
+
|
|
235
|
+
def _wrap_delta_tool_call(name, arguments_fragment):
|
|
236
|
+
# -> chunk.choices[0].delta.tool_calls[0].function.{name, arguments}
|
|
237
|
+
func = SimpleNamespace(name=name, arguments=arguments_fragment)
|
|
238
|
+
tool_call = SimpleNamespace(function=func)
|
|
239
|
+
delta = SimpleNamespace(content=None, tool_calls=[tool_call])
|
|
240
|
+
choice = SimpleNamespace(delta=delta)
|
|
241
|
+
return SimpleNamespace(choices=[choice])
|
|
242
|
+
|
|
243
|
+
def _generator():
|
|
244
|
+
# SDK 的 Responses 流每个 event 有 event.type
|
|
245
|
+
# 我们尽量覆盖主流事件名;未知事件直接忽略
|
|
246
|
+
tool_args_acc = {} # 累积每个工具参数(按 id 聚合)
|
|
247
|
+
tool_name_cache = {}
|
|
248
|
+
|
|
249
|
+
for event in resp_stream:
|
|
250
|
+
et = getattr(event, "type", None)
|
|
251
|
+
|
|
252
|
+
# 文本增量
|
|
253
|
+
if et == "response.output_text.delta":
|
|
254
|
+
delta_text = getattr(event, "delta", None)
|
|
255
|
+
if delta_text:
|
|
256
|
+
yield _wrap_delta_text(delta_text)
|
|
257
|
+
|
|
258
|
+
# 文本结束(可忽略,上层会基于yield的终止判断)
|
|
259
|
+
elif et == "response.output_text.done":
|
|
260
|
+
pass
|
|
261
|
+
|
|
262
|
+
# 工具调用参数增量
|
|
263
|
+
elif et in ("response.tool_call.delta", "response.function_call.delta"):
|
|
264
|
+
# 常见字段:event.id, event.name, event.delta / event.arguments_delta
|
|
265
|
+
call_id = getattr(event, "id", None)
|
|
266
|
+
name = getattr(event, "name", None) or tool_name_cache.get(call_id)
|
|
267
|
+
args_delta = getattr(event, "arguments_delta", None) or getattr(event, "delta", "")
|
|
268
|
+
|
|
269
|
+
if call_id:
|
|
270
|
+
tool_name_cache.setdefault(call_id, name or "")
|
|
271
|
+
tool_args_acc.setdefault(call_id, "")
|
|
272
|
+
tool_args_acc[call_id] += (args_delta or "")
|
|
273
|
+
|
|
274
|
+
# 也把这一小段增量向上抛(让你上层能尽快看到 tool_calls)
|
|
275
|
+
yield _wrap_delta_tool_call(name or "", args_delta or "")
|
|
276
|
+
|
|
277
|
+
# 工具调用完成(把完整参数再抛一次,便于上层一次性拿到)
|
|
278
|
+
elif et in ("response.tool_call.done", "response.function_call.done"):
|
|
279
|
+
call_id = getattr(event, "id", None)
|
|
280
|
+
full_name = tool_name_cache.get(call_id, "")
|
|
281
|
+
full_args = tool_args_acc.get(call_id, "")
|
|
282
|
+
yield _wrap_delta_tool_call(full_name, full_args)
|
|
283
|
+
|
|
284
|
+
# 其它事件(如 response.completed / response.error 等)
|
|
285
|
+
else:
|
|
286
|
+
# 可以按需扩展,这里静默忽略
|
|
287
|
+
pass
|
|
288
|
+
|
|
289
|
+
return _generator()
|
|
290
|
+
|
|
291
|
+
else:
|
|
292
|
+
# --- 非流式:把 Responses 同步结果适配成 chat.completions 风格 ---
|
|
293
|
+
resp = client.responses.create(
|
|
294
|
+
model=model,
|
|
295
|
+
messages=messages,
|
|
296
|
+
tools=tools if tools else None,
|
|
297
|
+
tool_choice=tool_choice if tools else None,
|
|
298
|
+
stream=False,
|
|
299
|
+
**kwargs
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
# 从 Responses 里抽取文本 & 工具调用
|
|
303
|
+
# 尽量兼容:优先用 output_text;否则从 output 列表里聚合
|
|
304
|
+
text_out = getattr(resp, "output_text", None)
|
|
305
|
+
outputs = getattr(resp, "output", None)
|
|
306
|
+
|
|
307
|
+
if text_out is None and outputs:
|
|
308
|
+
# 聚合 message/output_text
|
|
309
|
+
parts = []
|
|
310
|
+
for item in outputs:
|
|
311
|
+
if getattr(item, "type", "") in ("message",):
|
|
312
|
+
# item.content 里通常还有若干块(output_text 等)
|
|
313
|
+
content_parts = getattr(item, "content", []) or []
|
|
314
|
+
for c in content_parts:
|
|
315
|
+
if getattr(c, "type", "") in ("output_text",):
|
|
316
|
+
parts.append(getattr(c, "text", ""))
|
|
317
|
+
text_out = "".join(parts) if parts else None
|
|
318
|
+
|
|
319
|
+
# 抽取工具调用(如果有)
|
|
320
|
+
tool_calls_wrapped = []
|
|
321
|
+
if outputs:
|
|
322
|
+
for item in outputs:
|
|
323
|
+
if getattr(item, "type", "") in ("tool_call", "function_call"):
|
|
324
|
+
name = getattr(item, "name", "")
|
|
325
|
+
arguments = getattr(item, "arguments", "")
|
|
326
|
+
from types import SimpleNamespace
|
|
327
|
+
func = SimpleNamespace(name=name, arguments=arguments)
|
|
328
|
+
tool_calls_wrapped.append(SimpleNamespace(function=func))
|
|
329
|
+
|
|
330
|
+
# 伪造 chat.completions 的返回结构
|
|
331
|
+
from types import SimpleNamespace
|
|
332
|
+
finish_reason = "tool_calls" if tool_calls_wrapped else "stop"
|
|
333
|
+
message = SimpleNamespace(
|
|
334
|
+
content=text_out or "",
|
|
335
|
+
tool_calls=tool_calls_wrapped if tool_calls_wrapped else None
|
|
336
|
+
)
|
|
337
|
+
choice = SimpleNamespace(
|
|
338
|
+
message=message,
|
|
339
|
+
finish_reason=finish_reason
|
|
340
|
+
)
|
|
341
|
+
fake_resp = SimpleNamespace(choices=[choice])
|
|
342
|
+
return fake_resp
|
|
343
|
+
|
|
344
|
+
def get_resp_legacy(
|
|
345
|
+
self,
|
|
346
|
+
prompt,
|
|
347
|
+
client_id: str = None,
|
|
348
|
+
history: list = None,
|
|
349
|
+
sys_info: str = None,
|
|
350
|
+
assis_info: str = None,
|
|
351
|
+
images: list = None,
|
|
352
|
+
image_keys: tuple = ("image_url", "url"),
|
|
353
|
+
videos: list = None,
|
|
354
|
+
video_keys: tuple = ("video_url", "url"),
|
|
355
|
+
model: str=None,
|
|
356
|
+
tools: list = None,
|
|
357
|
+
tool_choice: str = "auto",
|
|
358
|
+
stream: bool = True,
|
|
359
|
+
response_model = None,
|
|
360
|
+
**kwargs: t.Any,
|
|
153
361
|
):
|
|
154
362
|
"""
|
|
155
363
|
Generates a response from a chat model based on the given prompt and additional context.
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
hdl/__init__.py,sha256=GffnD0jLJdhkd-vo989v40N90sQbofkayRBwxc6TVhQ,72
|
|
2
|
-
hdl/_version.py,sha256=
|
|
2
|
+
hdl/_version.py,sha256=J32R-NeGJqzLh_7eUPBcs_wpve_PO9WxTbxII1hs3gE,714
|
|
3
3
|
hdl/args/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
hdl/args/loss_args.py,sha256=s7YzSdd7IjD24rZvvOrxLLFqMZQb9YylxKeyelSdrTk,70
|
|
5
5
|
hdl/controllers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -131,12 +131,12 @@ hdl/utils/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU
|
|
|
131
131
|
hdl/utils/general/glob.py,sha256=Zuf7WHU0UdUPOs9UrhxmrCiMC8GrHxQU6n3mTThv6yc,1120
|
|
132
132
|
hdl/utils/general/runners.py,sha256=R0lhqABIuT43jEyjFkeio84e_PFfvAkszOP1FBlAnQ8,4927
|
|
133
133
|
hdl/utils/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
134
|
-
hdl/utils/llm/chat.py,sha256=
|
|
134
|
+
hdl/utils/llm/chat.py,sha256=DUxvnYTPIEhhrdaXOapmSG_-o_gWe_PWla2cJ3MLHeE,28658
|
|
135
135
|
hdl/utils/llm/chatgr.py,sha256=5F5PJHe8vz3iCfi4TT54DCLRi1UeJshECdVtgvvvao0,3696
|
|
136
136
|
hdl/utils/llm/embs.py,sha256=Tf0FOYrOFZp7qQpEPiSCXzlgyHH0X9HVTUtsup74a9E,7174
|
|
137
137
|
hdl/utils/llm/extract.py,sha256=2sK_WJzmYIc8iuWaM9DA6Nw3_6q1O4lJ5pKpcZo-bBA,6512
|
|
138
138
|
hdl/utils/llm/llama_chat.py,sha256=watcHGOaz-bv3x-yDucYlGk5f8FiqfFhwWogrl334fk,4387
|
|
139
|
-
hdl/utils/llm/llm_wrapper.py,sha256=
|
|
139
|
+
hdl/utils/llm/llm_wrapper.py,sha256=QTe27eJRNjoUdvQva6RQm5XSQEY5WNf3PkvJsEKa-yE,24817
|
|
140
140
|
hdl/utils/llm/ollama.py,sha256=uEdLsNAc6b56r37hNiE3nrd6oZ2lmQ0gYbVvOc9YVIM,1389
|
|
141
141
|
hdl/utils/llm/vis.py,sha256=jRa5l1LHaWtohtdIKVpOH_I4yyXWTbyaLGglFHsV_0Q,28826
|
|
142
142
|
hdl/utils/llm/visrag.py,sha256=0i-VrxqgiV-J7R3VPshu9oc7-rKjFJOldYik3HDXj6M,10176
|
|
@@ -146,8 +146,8 @@ hdl/utils/vis_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
|
|
|
146
146
|
hdl/utils/vis_tools/scene_detect.py,sha256=L6TFMT15QHJuOIFcLFVI_RSSSjyTVZhBEqbeUez2auU,6608
|
|
147
147
|
hdl/utils/weather/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
148
148
|
hdl/utils/weather/weather.py,sha256=k11o6wM15kF8b9NMlEfrg68ak-SfSYLN3nOOflFUv-I,4381
|
|
149
|
-
hjxdl-0.3.
|
|
150
|
-
hjxdl-0.3.
|
|
151
|
-
hjxdl-0.3.
|
|
152
|
-
hjxdl-0.3.
|
|
153
|
-
hjxdl-0.3.
|
|
149
|
+
hjxdl-0.3.55.dist-info/licenses/LICENSE,sha256=lkMiSbeZHBQLB9LJEkS9-L3Z-LBC4yGnKrzHSG8RkPM,2599
|
|
150
|
+
hjxdl-0.3.55.dist-info/METADATA,sha256=RZpIrw445VbIpV_I1RWc5RFZR6mrC9g4LaQm0jEpMp4,1332
|
|
151
|
+
hjxdl-0.3.55.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
152
|
+
hjxdl-0.3.55.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
|
|
153
|
+
hjxdl-0.3.55.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|