hjxdl 0.1.84__py3-none-any.whl → 0.1.85__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
hdl/_version.py CHANGED
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '0.1.84'
16
- __version_tuple__ = version_tuple = (0, 1, 84)
15
+ __version__ = version = '0.1.85'
16
+ __version_tuple__ = version_tuple = (0, 1, 85)
hdl/utils/llm/chat.py CHANGED
@@ -206,35 +206,115 @@ class OpenAI_M():
206
206
 
207
207
  def invoke(
208
208
  self,
209
- *args,
210
- **kwargs
211
- ) -> str:
212
- """Invoke the function with the given arguments and keyword arguments.
209
+ prompt : str,
210
+ images: list = [],
211
+ image_keys: tuple = ("image", "image"),
212
+ stop: list[str] | None = ["USER:", "ASSISTANT:"],
213
+ model="default_model",
214
+ **kwargs: t.Any,
215
+ ):
216
+ """Get response from chat completion model.
213
217
 
214
218
  Args:
215
- *args: Variable length argument list.
216
- **kwargs: Arbitrary keyword arguments.
219
+ prompt (str): The prompt text to generate a response for.
220
+ images (list, optional): List of image URLs to include in the prompt. Defaults to [].
221
+ image_keys (tuple, optional): Tuple containing keys for image data. Defaults to ("image", "image").
222
+ stop (list[str] | None, optional): List of strings to stop the conversation. Defaults to ["USER:", "ASSISTANT:"].
223
+ model (str, optional): The model to use for generating the response. Defaults to "default_model".
224
+ stream (bool, optional): Whether to stream the response or not. Defaults to True.
225
+ **kwargs: Additional keyword arguments to pass to the chat completion API.
226
+
227
+ Yields:
228
+ str: The generated response content.
217
229
 
218
230
  Returns:
219
- str: The response obtained by calling the get_resp method with the provided arguments and keyword arguments.
231
+ str: The generated response content if stream is False.
220
232
  """
221
- return self.get_resp(*args, stream=False, **kwargs)
233
+ content = [
234
+ {"type": "text", "text": prompt},
235
+ ]
236
+ if images:
237
+ if isinstance(images, str):
238
+ images = [images]
239
+ for img in images:
240
+ content.append({
241
+ "type": image_keys[0],
242
+ image_keys[0]: {
243
+ image_keys[1]: img
244
+ }
245
+ })
246
+ else:
247
+ content = prompt
248
+
249
+ response = self.client.chat.completions.create(
250
+ messages=[{
251
+ "role": "user",
252
+ "content": content
253
+ }],
254
+ stream=False,
255
+ model=model,
256
+ **kwargs
257
+ )
258
+
259
+ return response.choices[0].message.content
260
+
222
261
 
223
262
  def stream(
224
263
  self,
225
- *args,
226
- **kwargs
264
+ prompt : str,
265
+ images: list = [],
266
+ image_keys: tuple = ("image", "image"),
267
+ stop: list[str] | None = ["USER:", "ASSISTANT:"],
268
+ model="default_model",
269
+ **kwargs: t.Any,
227
270
  ):
228
- """Stream data from the server.
271
+ """Get response from chat completion model.
229
272
 
230
273
  Args:
231
- *args: Variable length argument list.
232
- **kwargs: Arbitrary keyword arguments.
274
+ prompt (str): The prompt text to generate a response for.
275
+ images (list, optional): List of image URLs to include in the prompt. Defaults to [].
276
+ image_keys (tuple, optional): Tuple containing keys for image data. Defaults to ("image", "image").
277
+ stop (list[str] | None, optional): List of strings to stop the conversation. Defaults to ["USER:", "ASSISTANT:"].
278
+ model (str, optional): The model to use for generating the response. Defaults to "default_model".
279
+ stream (bool, optional): Whether to stream the response or not. Defaults to True.
280
+ **kwargs: Additional keyword arguments to pass to the chat completion API.
281
+
282
+ Yields:
283
+ str: The generated response content.
233
284
 
234
285
  Returns:
235
- Response from the server with streaming enabled.
286
+ str: The generated response content if stream is False.
236
287
  """
237
- return self.get_resp(*args, stream=True, **kwargs)
288
+ content = [
289
+ {"type": "text", "text": prompt},
290
+ ]
291
+ if images:
292
+ if isinstance(images, str):
293
+ images = [images]
294
+ for img in images:
295
+ content.append({
296
+ "type": image_keys[0],
297
+ image_keys[0]: {
298
+ image_keys[1]: img
299
+ }
300
+ })
301
+ else:
302
+ content = prompt
303
+
304
+ response = self.client.chat.completions.create(
305
+ messages=[{
306
+ "role": "user",
307
+ "content": content
308
+ }],
309
+ stream=True,
310
+ model=model,
311
+ **kwargs
312
+ )
313
+
314
+ for chunk in response:
315
+ content = chunk.choices[0].delta.content
316
+ if content:
317
+ yield content
238
318
 
239
319
  def agent_response(
240
320
  self,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: hjxdl
3
- Version: 0.1.84
3
+ Version: 0.1.85
4
4
  Summary: A collection of functions for Jupyter notebooks
5
5
  Home-page: https://github.com/huluxiaohuowa/hdl
6
6
  Author: Jianxing Hu
@@ -1,5 +1,5 @@
1
1
  hdl/__init__.py,sha256=GffnD0jLJdhkd-vo989v40N90sQbofkayRBwxc6TVhQ,72
2
- hdl/_version.py,sha256=d0L59EfPQwDJVDuCyDuNn4UyYbrKwPsGT6cg6MNTnD0,413
2
+ hdl/_version.py,sha256=u6xmXDjp51Z7_S7NBNHDiPQDrERCBS10JMdkBL0D0x4,413
3
3
  hdl/args/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  hdl/args/loss_args.py,sha256=s7YzSdd7IjD24rZvvOrxLLFqMZQb9YylxKeyelSdrTk,70
5
5
  hdl/controllers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -127,7 +127,7 @@ hdl/utils/desc/template.py,sha256=a0UAkkKctt_EHY9UECsIIAwVkGPcM1Hr01HSkRMeIuw,12
127
127
  hdl/utils/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
128
128
  hdl/utils/general/glob.py,sha256=8-RCnt6L297wMIfn34ZAMCsGCZUjHG3MGglGZI1cX0g,491
129
129
  hdl/utils/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
130
- hdl/utils/llm/chat.py,sha256=XU_tEwRGRDlg7KPdLetE3K6dbD3aPcmWGU7Vd56g0XQ,11748
130
+ hdl/utils/llm/chat.py,sha256=apKakrwtApXbVp-3iILeJSZHWLlMGGHyvcTZD8NnMqs,14806
131
131
  hdl/utils/llm/embs.py,sha256=Tf0FOYrOFZp7qQpEPiSCXzlgyHH0X9HVTUtsup74a9E,7174
132
132
  hdl/utils/llm/extract.py,sha256=2sK_WJzmYIc8iuWaM9DA6Nw3_6q1O4lJ5pKpcZo-bBA,6512
133
133
  hdl/utils/llm/llama_chat.py,sha256=watcHGOaz-bv3x-yDucYlGk5f8FiqfFhwWogrl334fk,4387
@@ -136,7 +136,7 @@ hdl/utils/schedulers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
136
136
  hdl/utils/schedulers/norm_lr.py,sha256=bDwCmdEK-WkgxQMFBiMuchv8Mm7C0-GZJ6usm-PQk14,4461
137
137
  hdl/utils/weather/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
138
138
  hdl/utils/weather/weather.py,sha256=k11o6wM15kF8b9NMlEfrg68ak-SfSYLN3nOOflFUv-I,4381
139
- hjxdl-0.1.84.dist-info/METADATA,sha256=NmWiAiAY4fIeJrJKSEs_FsD7wI76d8QafZW_N4zNWhI,903
140
- hjxdl-0.1.84.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
141
- hjxdl-0.1.84.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
142
- hjxdl-0.1.84.dist-info/RECORD,,
139
+ hjxdl-0.1.85.dist-info/METADATA,sha256=sQC_9QeM6FgZN7Pr5W6gUKlXX_qEpf4QyOho9Wi7rSo,903
140
+ hjxdl-0.1.85.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
141
+ hjxdl-0.1.85.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
142
+ hjxdl-0.1.85.dist-info/RECORD,,
File without changes