hjxdl 0.1.24__py3-none-any.whl → 0.1.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hdl/_version.py +2 -2
- hdl/utils/llm/chat.py +31 -113
- {hjxdl-0.1.24.dist-info → hjxdl-0.1.26.dist-info}/METADATA +1 -1
- {hjxdl-0.1.24.dist-info → hjxdl-0.1.26.dist-info}/RECORD +6 -6
- {hjxdl-0.1.24.dist-info → hjxdl-0.1.26.dist-info}/WHEEL +0 -0
- {hjxdl-0.1.24.dist-info → hjxdl-0.1.26.dist-info}/top_level.txt +0 -0
hdl/_version.py
CHANGED
hdl/utils/llm/chat.py
CHANGED
@@ -205,119 +205,6 @@ class GGUF_M(Llama):
|
|
205
205
|
# self.resps[-1] = "".join(self.resps[-1])
|
206
206
|
|
207
207
|
|
208
|
-
# class GGUF_M():
|
209
|
-
# def __init__(
|
210
|
-
# self,
|
211
|
-
# model_path :str,
|
212
|
-
# device: str='gpu',
|
213
|
-
# generation_kwargs: dict = {},
|
214
|
-
# server_ip: str = "127.0.0.1",
|
215
|
-
# server_port: int = 8000,
|
216
|
-
# ):
|
217
|
-
# """Initialize the model with the provided model path and optional parameters.
|
218
|
-
|
219
|
-
# Args:
|
220
|
-
# model_path (str): The path to the model.
|
221
|
-
# device (str, optional): The device to use for model initialization. Defaults to 'gpu'.
|
222
|
-
# generation_kwargs (dict, optional): Additional keyword arguments for model generation. Defaults to {}.
|
223
|
-
# server_ip (str, optional): The IP address of the server. Defaults to "127.0.0.1".
|
224
|
-
# server_port (int, optional): The port of the server. Defaults to 8000.
|
225
|
-
# """
|
226
|
-
# # 从本地初始化模型
|
227
|
-
# # super().__init__()
|
228
|
-
# self.generation_kwargs = generation_kwargs
|
229
|
-
# print("正在从本地加载模型...")
|
230
|
-
# if device == 'cpu':
|
231
|
-
# self.model = Llama(
|
232
|
-
# model_path=model_path,
|
233
|
-
# n_threads=self.generation_kwargs['num_threads'],
|
234
|
-
# n_ctx=self.generation_kwargs['max_context_length'],
|
235
|
-
# )
|
236
|
-
# else:
|
237
|
-
# self.model = Llama(
|
238
|
-
# model_path=model_path,
|
239
|
-
# n_threads=self.generation_kwargs['num_threads'],
|
240
|
-
# n_ctx=self.generation_kwargs['max_context_length'],
|
241
|
-
# n_gpu_layers=-1,
|
242
|
-
# flash_attn=True
|
243
|
-
# )
|
244
|
-
|
245
|
-
# print("完成本地模型的加载")
|
246
|
-
|
247
|
-
# def invoke(
|
248
|
-
# self,
|
249
|
-
# prompt : str,
|
250
|
-
# stop: list[str] | None = ["USER:", "ASSISTANT:"],
|
251
|
-
# # history: list = [],
|
252
|
-
# **kwargs: t.Any,
|
253
|
-
# ) -> str:
|
254
|
-
# """Invoke the model to generate a response based on the given prompt.
|
255
|
-
|
256
|
-
# Args:
|
257
|
-
# prompt (str): The prompt to be used for generating the response.
|
258
|
-
# stop (list[str], optional): List of strings that indicate when the model should stop generating the response. Defaults to ["USER:", "ASSISTANT:"].
|
259
|
-
# **kwargs: Additional keyword arguments to be passed to the model.
|
260
|
-
|
261
|
-
# Returns:
|
262
|
-
# str: The generated response based on the prompt.
|
263
|
-
# """
|
264
|
-
# prompt_final = f"USER:\n{prompt}\nASSISTANT:\n"
|
265
|
-
|
266
|
-
# result = self.model.create_completion(
|
267
|
-
# prompt_final,
|
268
|
-
# repeat_penalty=self.generation_kwargs["repetition_penalty"],
|
269
|
-
# max_tokens=self.generation_kwargs["max_new_tokens"],
|
270
|
-
# stop=stop,
|
271
|
-
# echo=False,
|
272
|
-
# temperature=self.generation_kwargs["temperature"],
|
273
|
-
# mirostat_mode = 2,
|
274
|
-
# mirostat_tau=4.0,
|
275
|
-
# mirostat_eta=1.1
|
276
|
-
# )
|
277
|
-
# resp = result['choices'][0]['text']
|
278
|
-
# # history.append(
|
279
|
-
# # [prompt, resp]
|
280
|
-
# # )
|
281
|
-
# return resp
|
282
|
-
|
283
|
-
# def stream(
|
284
|
-
# self,
|
285
|
-
# prompt: str,
|
286
|
-
# stop: list[str] | None = ["USER:", "ASSISTANT:"],
|
287
|
-
# # history: list = [],
|
288
|
-
# **kwargs: t.Any,
|
289
|
-
# ):
|
290
|
-
# """Generate text responses based on the given prompt using the model.
|
291
|
-
|
292
|
-
# Args:
|
293
|
-
# prompt (str): The prompt to generate text responses.
|
294
|
-
# stop (list[str], optional): List of strings to stop the generation. Defaults to ["USER:", "ASSISTANT:"].
|
295
|
-
# **kwargs: Additional keyword arguments for the model.
|
296
|
-
|
297
|
-
# Yields:
|
298
|
-
# str: Text responses generated by the model based on the prompt.
|
299
|
-
# """
|
300
|
-
# prompt = f"USER:\n{prompt}\nASSISTANT:\n"
|
301
|
-
# output = self.model.create_completion(
|
302
|
-
# prompt,
|
303
|
-
# stream=True,
|
304
|
-
# repeat_penalty=self.generation_kwargs["repetition_penalty"],
|
305
|
-
# max_tokens=self.generation_kwargs["max_new_tokens"],
|
306
|
-
# stop=stop,
|
307
|
-
# echo=False,
|
308
|
-
# temperature=self.generation_kwargs["temperature"],
|
309
|
-
# mirostat_mode = 2,
|
310
|
-
# mirostat_tau=4.0,
|
311
|
-
# mirostat_eta=1.1
|
312
|
-
# )
|
313
|
-
# # history.append([])
|
314
|
-
# for chunk in output:
|
315
|
-
# item = chunk['choices'][0]['text']
|
316
|
-
# # self.resps[-1].append(item)
|
317
|
-
# yield chunk['choices'][0]['text']
|
318
|
-
# # self.resps[-1] = "".join(self.resps[-1])
|
319
|
-
|
320
|
-
|
321
208
|
class OpenAI_M():
|
322
209
|
def __init__(
|
323
210
|
self,
|
@@ -434,6 +321,18 @@ class OpenAI_M():
|
|
434
321
|
prompt: str,
|
435
322
|
**kwargs: t.Any
|
436
323
|
):
|
324
|
+
"""Get decision based on the provided prompt and additional keyword arguments.
|
325
|
+
|
326
|
+
Args:
|
327
|
+
prompt (str): The prompt for decision making.
|
328
|
+
**kwargs: Additional keyword arguments.
|
329
|
+
|
330
|
+
Returns:
|
331
|
+
dict: A dictionary containing the decision.
|
332
|
+
|
333
|
+
Example:
|
334
|
+
decision = get_decision("Should I buy this product?", option1="yes", option2="no")
|
335
|
+
"""
|
437
336
|
prompt_final = FN_TEMPLATE
|
438
337
|
for tool in self.tools:
|
439
338
|
prompt_final += self.tool_desc.get(tool.__name__, "")
|
@@ -448,6 +347,15 @@ class OpenAI_M():
|
|
448
347
|
prompt: str,
|
449
348
|
**kwargs: t.Any
|
450
349
|
):
|
350
|
+
"""Get the result from a tool based on the provided prompt and keyword arguments.
|
351
|
+
|
352
|
+
Args:
|
353
|
+
prompt (str): The prompt to get the decision for.
|
354
|
+
**kwargs: Additional keyword arguments to pass to the decision function.
|
355
|
+
|
356
|
+
Returns:
|
357
|
+
str: The result from the selected tool based on the decision made.
|
358
|
+
"""
|
451
359
|
decision_dict = self.get_decision(prompt, **kwargs)
|
452
360
|
if decision_dict.get("function_name", None) is None:
|
453
361
|
return ""
|
@@ -466,7 +374,17 @@ class OpenAI_M():
|
|
466
374
|
stream = True,
|
467
375
|
**kwargs: t.Any
|
468
376
|
):
|
377
|
+
"""'''Generate agent response based on the given prompt.
|
469
378
|
|
379
|
+
Args:
|
380
|
+
prompt (str): The prompt for which agent response is generated.
|
381
|
+
stream (bool, optional): Flag to determine if the response should be streamed. Defaults to True.
|
382
|
+
**kwargs: Additional keyword arguments.
|
383
|
+
|
384
|
+
Returns:
|
385
|
+
str: The agent response based on the prompt.
|
386
|
+
'''
|
387
|
+
"""
|
470
388
|
decision_dict = self.get_decision(prompt, **kwargs)
|
471
389
|
if decision_dict.get("function_name", None) is None:
|
472
390
|
return self.stream(prompt, **kwargs)
|
@@ -1,5 +1,5 @@
|
|
1
1
|
hdl/__init__.py,sha256=GffnD0jLJdhkd-vo989v40N90sQbofkayRBwxc6TVhQ,72
|
2
|
-
hdl/_version.py,sha256=
|
2
|
+
hdl/_version.py,sha256=pHptU6h1OxA8-tsynXa5Rz3N6XELevZ_27Ye-N1R-ds,413
|
3
3
|
hdl/args/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
hdl/args/loss_args.py,sha256=s7YzSdd7IjD24rZvvOrxLLFqMZQb9YylxKeyelSdrTk,70
|
5
5
|
hdl/controllers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -125,14 +125,14 @@ hdl/utils/desc/template.py,sha256=uKPGIgYFgSY4uBvfsbh5fZN7s39NdGQ2XSVH3pIpsuc,10
|
|
125
125
|
hdl/utils/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
126
126
|
hdl/utils/general/glob.py,sha256=8-RCnt6L297wMIfn34ZAMCsGCZUjHG3MGglGZI1cX0g,491
|
127
127
|
hdl/utils/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
128
|
-
hdl/utils/llm/chat.py,sha256=
|
128
|
+
hdl/utils/llm/chat.py,sha256=LH595PmR0MfFPD5XKff93FOxdL2aQQSt6VjR9nyksqU,13414
|
129
129
|
hdl/utils/llm/embs.py,sha256=Tf0FOYrOFZp7qQpEPiSCXzlgyHH0X9HVTUtsup74a9E,7174
|
130
130
|
hdl/utils/llm/extract.py,sha256=2sK_WJzmYIc8iuWaM9DA6Nw3_6q1O4lJ5pKpcZo-bBA,6512
|
131
131
|
hdl/utils/schedulers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
132
132
|
hdl/utils/schedulers/norm_lr.py,sha256=bDwCmdEK-WkgxQMFBiMuchv8Mm7C0-GZJ6usm-PQk14,4461
|
133
133
|
hdl/utils/weather/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
134
134
|
hdl/utils/weather/weather.py,sha256=k11o6wM15kF8b9NMlEfrg68ak-SfSYLN3nOOflFUv-I,4381
|
135
|
-
hjxdl-0.1.
|
136
|
-
hjxdl-0.1.
|
137
|
-
hjxdl-0.1.
|
138
|
-
hjxdl-0.1.
|
135
|
+
hjxdl-0.1.26.dist-info/METADATA,sha256=mG6nvKosLF2q7cuIYTDmGvQ1PZMXTgJTzr9NB4PvpD0,627
|
136
|
+
hjxdl-0.1.26.dist-info/WHEEL,sha256=UvcQYKBHoFqaQd6LKyqHw9fxEolWLQnlzP0h_LgJAfI,91
|
137
|
+
hjxdl-0.1.26.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
|
138
|
+
hjxdl-0.1.26.dist-info/RECORD,,
|
File without changes
|
File without changes
|