hjxdl 0.1.86__py3-none-any.whl → 0.1.88__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hdl/_version.py +2 -2
- hdl/utils/llm/chat.py +8 -116
- {hjxdl-0.1.86.dist-info → hjxdl-0.1.88.dist-info}/METADATA +1 -1
- {hjxdl-0.1.86.dist-info → hjxdl-0.1.88.dist-info}/RECORD +6 -6
- {hjxdl-0.1.86.dist-info → hjxdl-0.1.88.dist-info}/WHEEL +0 -0
- {hjxdl-0.1.86.dist-info → hjxdl-0.1.88.dist-info}/top_level.txt +0 -0
hdl/_version.py
CHANGED
hdl/utils/llm/chat.py
CHANGED
@@ -148,7 +148,7 @@ class OpenAI_M():
|
|
148
148
|
self,
|
149
149
|
prompt : str,
|
150
150
|
images: list = [],
|
151
|
-
image_keys: tuple = ("image", "image"),
|
151
|
+
image_keys: tuple = ("image", "image", "image"),
|
152
152
|
stop: list[str] | None = ["USER:", "ASSISTANT:"],
|
153
153
|
model="default_model",
|
154
154
|
stream: bool = True,
|
@@ -171,14 +171,19 @@ class OpenAI_M():
|
|
171
171
|
content = [
|
172
172
|
{"type": "text", "text": prompt},
|
173
173
|
]
|
174
|
+
if isinstance(image_keys, str):
|
175
|
+
image_keys = (image_keys, image_keys, image_keys)
|
176
|
+
else:
|
177
|
+
if len(image_keys) == 2:
|
178
|
+
image_keys = (image_keys[0], ) + image_keys
|
174
179
|
if images:
|
175
180
|
if isinstance(images, str):
|
176
181
|
images = [images]
|
177
182
|
for img in images:
|
178
183
|
content.append({
|
179
184
|
"type": image_keys[0],
|
180
|
-
image_keys[
|
181
|
-
image_keys[
|
185
|
+
image_keys[1]: {
|
186
|
+
image_keys[2]: img
|
182
187
|
}
|
183
188
|
})
|
184
189
|
else:
|
@@ -249,119 +254,6 @@ class OpenAI_M():
|
|
249
254
|
else:
|
250
255
|
return self.invoke(*args, **kwargs)
|
251
256
|
|
252
|
-
|
253
|
-
def invoke_response(
|
254
|
-
self,
|
255
|
-
prompt : str,
|
256
|
-
images: list = [],
|
257
|
-
image_keys: tuple = ("image", "image"),
|
258
|
-
stop: list[str] | None = ["USER:", "ASSISTANT:"],
|
259
|
-
model="default_model",
|
260
|
-
**kwargs: t.Any,
|
261
|
-
):
|
262
|
-
"""Get response from chat completion model.
|
263
|
-
|
264
|
-
Args:
|
265
|
-
prompt (str): The prompt text to generate a response for.
|
266
|
-
images (list, optional): List of image URLs to include in the prompt. Defaults to [].
|
267
|
-
image_keys (tuple, optional): Tuple containing keys for image data. Defaults to ("image", "image").
|
268
|
-
stop (list[str] | None, optional): List of strings to stop the conversation. Defaults to ["USER:", "ASSISTANT:"].
|
269
|
-
model (str, optional): The model to use for generating the response. Defaults to "default_model".
|
270
|
-
stream (bool, optional): Whether to stream the response or not. Defaults to True.
|
271
|
-
**kwargs: Additional keyword arguments to pass to the chat completion API.
|
272
|
-
|
273
|
-
Yields:
|
274
|
-
str: The generated response content.
|
275
|
-
|
276
|
-
Returns:
|
277
|
-
str: The generated response content if stream is False.
|
278
|
-
"""
|
279
|
-
content = [
|
280
|
-
{"type": "text", "text": prompt},
|
281
|
-
]
|
282
|
-
if images:
|
283
|
-
if isinstance(images, str):
|
284
|
-
images = [images]
|
285
|
-
for img in images:
|
286
|
-
content.append({
|
287
|
-
"type": image_keys[0],
|
288
|
-
image_keys[0]: {
|
289
|
-
image_keys[1]: img
|
290
|
-
}
|
291
|
-
})
|
292
|
-
else:
|
293
|
-
content = prompt
|
294
|
-
|
295
|
-
response = self.client.chat.completions.create(
|
296
|
-
messages=[{
|
297
|
-
"role": "user",
|
298
|
-
"content": content
|
299
|
-
}],
|
300
|
-
stream=False,
|
301
|
-
model=model,
|
302
|
-
**kwargs
|
303
|
-
)
|
304
|
-
|
305
|
-
return response.choices[0].message.content
|
306
|
-
|
307
|
-
|
308
|
-
def stream_response(
|
309
|
-
self,
|
310
|
-
prompt : str,
|
311
|
-
images: list = [],
|
312
|
-
image_keys: tuple = ("image", "image"),
|
313
|
-
stop: list[str] | None = ["USER:", "ASSISTANT:"],
|
314
|
-
model="default_model",
|
315
|
-
**kwargs: t.Any,
|
316
|
-
):
|
317
|
-
"""Get response from chat completion model.
|
318
|
-
|
319
|
-
Args:
|
320
|
-
prompt (str): The prompt text to generate a response for.
|
321
|
-
images (list, optional): List of image URLs to include in the prompt. Defaults to [].
|
322
|
-
image_keys (tuple, optional): Tuple containing keys for image data. Defaults to ("image", "image").
|
323
|
-
stop (list[str] | None, optional): List of strings to stop the conversation. Defaults to ["USER:", "ASSISTANT:"].
|
324
|
-
model (str, optional): The model to use for generating the response. Defaults to "default_model".
|
325
|
-
stream (bool, optional): Whether to stream the response or not. Defaults to True.
|
326
|
-
**kwargs: Additional keyword arguments to pass to the chat completion API.
|
327
|
-
|
328
|
-
Yields:
|
329
|
-
str: The generated response content.
|
330
|
-
|
331
|
-
Returns:
|
332
|
-
str: The generated response content if stream is False.
|
333
|
-
"""
|
334
|
-
content = [
|
335
|
-
{"type": "text", "text": prompt},
|
336
|
-
]
|
337
|
-
if images:
|
338
|
-
if isinstance(images, str):
|
339
|
-
images = [images]
|
340
|
-
for img in images:
|
341
|
-
content.append({
|
342
|
-
"type": image_keys[0],
|
343
|
-
image_keys[0]: {
|
344
|
-
image_keys[1]: img
|
345
|
-
}
|
346
|
-
})
|
347
|
-
else:
|
348
|
-
content = prompt
|
349
|
-
|
350
|
-
response = self.client.chat.completions.create(
|
351
|
-
messages=[{
|
352
|
-
"role": "user",
|
353
|
-
"content": content
|
354
|
-
}],
|
355
|
-
stream=True,
|
356
|
-
model=model,
|
357
|
-
**kwargs
|
358
|
-
)
|
359
|
-
|
360
|
-
for chunk in response:
|
361
|
-
content = chunk.choices[0].delta.content
|
362
|
-
if content:
|
363
|
-
yield content
|
364
|
-
|
365
257
|
def agent_response(
|
366
258
|
self,
|
367
259
|
prompt : str,
|
@@ -1,5 +1,5 @@
|
|
1
1
|
hdl/__init__.py,sha256=GffnD0jLJdhkd-vo989v40N90sQbofkayRBwxc6TVhQ,72
|
2
|
-
hdl/_version.py,sha256=
|
2
|
+
hdl/_version.py,sha256=ihTSJ27GUS19zHSycBYFHvlgXp6beIKH-CsSY1r7LJ8,413
|
3
3
|
hdl/args/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
hdl/args/loss_args.py,sha256=s7YzSdd7IjD24rZvvOrxLLFqMZQb9YylxKeyelSdrTk,70
|
5
5
|
hdl/controllers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -127,7 +127,7 @@ hdl/utils/desc/template.py,sha256=a0UAkkKctt_EHY9UECsIIAwVkGPcM1Hr01HSkRMeIuw,12
|
|
127
127
|
hdl/utils/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
128
128
|
hdl/utils/general/glob.py,sha256=8-RCnt6L297wMIfn34ZAMCsGCZUjHG3MGglGZI1cX0g,491
|
129
129
|
hdl/utils/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
130
|
-
hdl/utils/llm/chat.py,sha256=
|
130
|
+
hdl/utils/llm/chat.py,sha256=wGMHryafswF3xrkpMjthMQlx5ON8PHbqcbpKthjMxMY,12497
|
131
131
|
hdl/utils/llm/embs.py,sha256=Tf0FOYrOFZp7qQpEPiSCXzlgyHH0X9HVTUtsup74a9E,7174
|
132
132
|
hdl/utils/llm/extract.py,sha256=2sK_WJzmYIc8iuWaM9DA6Nw3_6q1O4lJ5pKpcZo-bBA,6512
|
133
133
|
hdl/utils/llm/llama_chat.py,sha256=watcHGOaz-bv3x-yDucYlGk5f8FiqfFhwWogrl334fk,4387
|
@@ -136,7 +136,7 @@ hdl/utils/schedulers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
|
|
136
136
|
hdl/utils/schedulers/norm_lr.py,sha256=bDwCmdEK-WkgxQMFBiMuchv8Mm7C0-GZJ6usm-PQk14,4461
|
137
137
|
hdl/utils/weather/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
138
138
|
hdl/utils/weather/weather.py,sha256=k11o6wM15kF8b9NMlEfrg68ak-SfSYLN3nOOflFUv-I,4381
|
139
|
-
hjxdl-0.1.
|
140
|
-
hjxdl-0.1.
|
141
|
-
hjxdl-0.1.
|
142
|
-
hjxdl-0.1.
|
139
|
+
hjxdl-0.1.88.dist-info/METADATA,sha256=wN8xFReeugvOrrZfjYImWUomKN_DrneKQPLf-akoPLQ,903
|
140
|
+
hjxdl-0.1.88.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
141
|
+
hjxdl-0.1.88.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
|
142
|
+
hjxdl-0.1.88.dist-info/RECORD,,
|
File without changes
|
File without changes
|