hjxdl 0.1.85__py3-none-any.whl → 0.1.87__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hdl/_version.py +2 -2
- hdl/utils/llm/chat.py +41 -108
- {hjxdl-0.1.85.dist-info → hjxdl-0.1.87.dist-info}/METADATA +1 -1
- {hjxdl-0.1.85.dist-info → hjxdl-0.1.87.dist-info}/RECORD +6 -6
- {hjxdl-0.1.85.dist-info → hjxdl-0.1.87.dist-info}/WHEEL +0 -0
- {hjxdl-0.1.85.dist-info → hjxdl-0.1.87.dist-info}/top_level.txt +0 -0
hdl/_version.py
CHANGED
hdl/utils/llm/chat.py
CHANGED
@@ -154,22 +154,19 @@ class OpenAI_M():
|
|
154
154
|
stream: bool = True,
|
155
155
|
**kwargs: t.Any,
|
156
156
|
):
|
157
|
-
"""Get response from
|
157
|
+
"""Get response from chatbot based on the provided prompt and optional images.
|
158
158
|
|
159
159
|
Args:
|
160
|
-
prompt (str): The prompt
|
161
|
-
images (list, optional): List of
|
160
|
+
prompt (str): The prompt to provide to the chatbot.
|
161
|
+
images (list, optional): List of images to include in the response. Defaults to [].
|
162
162
|
image_keys (tuple, optional): Tuple containing keys for image data. Defaults to ("image", "image").
|
163
|
-
stop (list[str] | None, optional): List of strings
|
163
|
+
stop (list[str] | None, optional): List of strings that indicate the end of the conversation. Defaults to ["USER:", "ASSISTANT:"].
|
164
164
|
model (str, optional): The model to use for generating the response. Defaults to "default_model".
|
165
|
-
stream (bool, optional): Whether to stream the response
|
166
|
-
**kwargs: Additional keyword arguments to pass to the
|
167
|
-
|
168
|
-
Yields:
|
169
|
-
str: The generated response content.
|
165
|
+
stream (bool, optional): Whether to stream the response. Defaults to True.
|
166
|
+
**kwargs: Additional keyword arguments to pass to the chatbot API.
|
170
167
|
|
171
168
|
Returns:
|
172
|
-
|
169
|
+
dict: The response from the chatbot.
|
173
170
|
"""
|
174
171
|
content = [
|
175
172
|
{"type": "text", "text": prompt},
|
@@ -196,126 +193,62 @@ class OpenAI_M():
|
|
196
193
|
model=model,
|
197
194
|
**kwargs
|
198
195
|
)
|
199
|
-
|
200
|
-
return response.choices[0].message.content
|
201
|
-
else:
|
202
|
-
for chunk in response:
|
203
|
-
content = chunk.choices[0].delta.content
|
204
|
-
if content:
|
205
|
-
yield content
|
196
|
+
return response
|
206
197
|
|
207
198
|
def invoke(
|
208
199
|
self,
|
209
|
-
|
210
|
-
|
211
|
-
image_keys: tuple = ("image", "image"),
|
212
|
-
stop: list[str] | None = ["USER:", "ASSISTANT:"],
|
213
|
-
model="default_model",
|
214
|
-
**kwargs: t.Any,
|
200
|
+
*args,
|
201
|
+
**kwargs
|
215
202
|
):
|
216
|
-
"""
|
203
|
+
"""Invoke the function with the given arguments and keyword arguments.
|
217
204
|
|
218
205
|
Args:
|
219
|
-
|
220
|
-
|
221
|
-
image_keys (tuple, optional): Tuple containing keys for image data. Defaults to ("image", "image").
|
222
|
-
stop (list[str] | None, optional): List of strings to stop the conversation. Defaults to ["USER:", "ASSISTANT:"].
|
223
|
-
model (str, optional): The model to use for generating the response. Defaults to "default_model".
|
224
|
-
stream (bool, optional): Whether to stream the response or not. Defaults to True.
|
225
|
-
**kwargs: Additional keyword arguments to pass to the chat completion API.
|
226
|
-
|
227
|
-
Yields:
|
228
|
-
str: The generated response content.
|
206
|
+
*args: Variable length argument list.
|
207
|
+
**kwargs: Arbitrary keyword arguments.
|
229
208
|
|
230
209
|
Returns:
|
231
|
-
str: The
|
210
|
+
str: The content of the first choice message in the response.
|
232
211
|
"""
|
233
|
-
|
234
|
-
{"type": "text", "text": prompt},
|
235
|
-
]
|
236
|
-
if images:
|
237
|
-
if isinstance(images, str):
|
238
|
-
images = [images]
|
239
|
-
for img in images:
|
240
|
-
content.append({
|
241
|
-
"type": image_keys[0],
|
242
|
-
image_keys[0]: {
|
243
|
-
image_keys[1]: img
|
244
|
-
}
|
245
|
-
})
|
246
|
-
else:
|
247
|
-
content = prompt
|
248
|
-
|
249
|
-
response = self.client.chat.completions.create(
|
250
|
-
messages=[{
|
251
|
-
"role": "user",
|
252
|
-
"content": content
|
253
|
-
}],
|
254
|
-
stream=False,
|
255
|
-
model=model,
|
256
|
-
**kwargs
|
257
|
-
)
|
258
|
-
|
212
|
+
response = self.get_resp(*args, stream=False, **kwargs)
|
259
213
|
return response.choices[0].message.content
|
260
214
|
|
261
|
-
|
262
215
|
def stream(
|
263
216
|
self,
|
264
|
-
|
265
|
-
|
266
|
-
image_keys: tuple = ("image", "image"),
|
267
|
-
stop: list[str] | None = ["USER:", "ASSISTANT:"],
|
268
|
-
model="default_model",
|
269
|
-
**kwargs: t.Any,
|
217
|
+
*args,
|
218
|
+
**kwargs
|
270
219
|
):
|
271
|
-
"""
|
220
|
+
"""Stream content from the response in chunks.
|
272
221
|
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
image_keys (tuple, optional): Tuple containing keys for image data. Defaults to ("image", "image").
|
277
|
-
stop (list[str] | None, optional): List of strings to stop the conversation. Defaults to ["USER:", "ASSISTANT:"].
|
278
|
-
model (str, optional): The model to use for generating the response. Defaults to "default_model".
|
279
|
-
stream (bool, optional): Whether to stream the response or not. Defaults to True.
|
280
|
-
**kwargs: Additional keyword arguments to pass to the chat completion API.
|
281
|
-
|
282
|
-
Yields:
|
283
|
-
str: The generated response content.
|
222
|
+
Args:
|
223
|
+
*args: Variable length argument list.
|
224
|
+
**kwargs: Arbitrary keyword arguments.
|
284
225
|
|
285
|
-
|
286
|
-
|
226
|
+
Yields:
|
227
|
+
str: Content in chunks from the response.
|
287
228
|
"""
|
288
|
-
|
289
|
-
{"type": "text", "text": prompt},
|
290
|
-
]
|
291
|
-
if images:
|
292
|
-
if isinstance(images, str):
|
293
|
-
images = [images]
|
294
|
-
for img in images:
|
295
|
-
content.append({
|
296
|
-
"type": image_keys[0],
|
297
|
-
image_keys[0]: {
|
298
|
-
image_keys[1]: img
|
299
|
-
}
|
300
|
-
})
|
301
|
-
else:
|
302
|
-
content = prompt
|
303
|
-
|
304
|
-
response = self.client.chat.completions.create(
|
305
|
-
messages=[{
|
306
|
-
"role": "user",
|
307
|
-
"content": content
|
308
|
-
}],
|
309
|
-
stream=True,
|
310
|
-
model=model,
|
311
|
-
**kwargs
|
312
|
-
)
|
313
|
-
|
229
|
+
response = self.get_resp(*args, stream=True, **kwargs)
|
314
230
|
for chunk in response:
|
315
231
|
content = chunk.choices[0].delta.content
|
316
232
|
if content:
|
317
233
|
yield content
|
318
234
|
|
235
|
+
|
236
|
+
def chat(self, *args, stream=True, **kwargs):
|
237
|
+
"""Call either the stream or invoke method based on the value of the stream parameter.
|
238
|
+
|
239
|
+
Args:
|
240
|
+
*args: Variable length argument list.
|
241
|
+
stream (bool): A flag to determine whether to call the stream method (default is True).
|
242
|
+
**kwargs: Arbitrary keyword arguments.
|
243
|
+
|
244
|
+
Returns:
|
245
|
+
The result of calling either the stream or invoke method based on the value of the stream parameter.
|
246
|
+
"""
|
247
|
+
if stream:
|
248
|
+
return self.stream(*args, **kwargs)
|
249
|
+
else:
|
250
|
+
return self.invoke(*args, **kwargs)
|
251
|
+
|
319
252
|
def agent_response(
|
320
253
|
self,
|
321
254
|
prompt : str,
|
@@ -1,5 +1,5 @@
|
|
1
1
|
hdl/__init__.py,sha256=GffnD0jLJdhkd-vo989v40N90sQbofkayRBwxc6TVhQ,72
|
2
|
-
hdl/_version.py,sha256=
|
2
|
+
hdl/_version.py,sha256=bUs_HL_pbw_y81ldgiSux8MIsv2flHylExoYoRPNJbY,413
|
3
3
|
hdl/args/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
hdl/args/loss_args.py,sha256=s7YzSdd7IjD24rZvvOrxLLFqMZQb9YylxKeyelSdrTk,70
|
5
5
|
hdl/controllers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -127,7 +127,7 @@ hdl/utils/desc/template.py,sha256=a0UAkkKctt_EHY9UECsIIAwVkGPcM1Hr01HSkRMeIuw,12
|
|
127
127
|
hdl/utils/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
128
128
|
hdl/utils/general/glob.py,sha256=8-RCnt6L297wMIfn34ZAMCsGCZUjHG3MGglGZI1cX0g,491
|
129
129
|
hdl/utils/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
130
|
-
hdl/utils/llm/chat.py,sha256=
|
130
|
+
hdl/utils/llm/chat.py,sha256=3iW5FSfBjHbWY0X_mfuNM2YXSTHDTp_NMMyNovrhY_Y,12275
|
131
131
|
hdl/utils/llm/embs.py,sha256=Tf0FOYrOFZp7qQpEPiSCXzlgyHH0X9HVTUtsup74a9E,7174
|
132
132
|
hdl/utils/llm/extract.py,sha256=2sK_WJzmYIc8iuWaM9DA6Nw3_6q1O4lJ5pKpcZo-bBA,6512
|
133
133
|
hdl/utils/llm/llama_chat.py,sha256=watcHGOaz-bv3x-yDucYlGk5f8FiqfFhwWogrl334fk,4387
|
@@ -136,7 +136,7 @@ hdl/utils/schedulers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
|
|
136
136
|
hdl/utils/schedulers/norm_lr.py,sha256=bDwCmdEK-WkgxQMFBiMuchv8Mm7C0-GZJ6usm-PQk14,4461
|
137
137
|
hdl/utils/weather/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
138
138
|
hdl/utils/weather/weather.py,sha256=k11o6wM15kF8b9NMlEfrg68ak-SfSYLN3nOOflFUv-I,4381
|
139
|
-
hjxdl-0.1.
|
140
|
-
hjxdl-0.1.
|
141
|
-
hjxdl-0.1.
|
142
|
-
hjxdl-0.1.
|
139
|
+
hjxdl-0.1.87.dist-info/METADATA,sha256=sbdyBdLiyNp_QsuY-1PRDw7_ZRPr_7KZCje6B8HaoIE,903
|
140
|
+
hjxdl-0.1.87.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
141
|
+
hjxdl-0.1.87.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
|
142
|
+
hjxdl-0.1.87.dist-info/RECORD,,
|
File without changes
|
File without changes
|