hjxdl 0.1.39__py3-none-any.whl → 0.1.41__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hdl/_version.py +2 -2
- hdl/utils/llm/chat.py +33 -63
- {hjxdl-0.1.39.dist-info → hjxdl-0.1.41.dist-info}/METADATA +1 -1
- {hjxdl-0.1.39.dist-info → hjxdl-0.1.41.dist-info}/RECORD +6 -6
- {hjxdl-0.1.39.dist-info → hjxdl-0.1.41.dist-info}/WHEEL +0 -0
- {hjxdl-0.1.39.dist-info → hjxdl-0.1.41.dist-info}/top_level.txt +0 -0
hdl/_version.py
CHANGED
hdl/utils/llm/chat.py
CHANGED
@@ -198,47 +198,47 @@ class OpenAI_M():
|
|
198
198
|
if content:
|
199
199
|
yield content
|
200
200
|
|
201
|
-
def
|
201
|
+
def agent_response(
|
202
202
|
self,
|
203
|
-
prompt: str,
|
203
|
+
prompt : str,
|
204
|
+
stream = True,
|
204
205
|
**kwargs: t.Any
|
205
206
|
):
|
206
|
-
"""
|
207
|
+
"""'''Generate agent response based on the given prompt.
|
207
208
|
|
208
209
|
Args:
|
209
|
-
prompt (str): The prompt for
|
210
|
+
prompt (str): The prompt for which agent response is generated.
|
211
|
+
stream (bool, optional): Flag to determine if the response should be streamed. Defaults to True.
|
210
212
|
**kwargs: Additional keyword arguments.
|
211
213
|
|
212
214
|
Returns:
|
213
|
-
|
214
|
-
|
215
|
-
Example:
|
216
|
-
decision = get_decision("Should I buy this product?", option1="yes", option2="no")
|
215
|
+
str: The agent response based on the prompt.
|
216
|
+
'''
|
217
217
|
"""
|
218
|
+
decision_dict = self.get_decision(prompt, **kwargs)
|
219
|
+
if decision_dict.get("function_name", None) is None:
|
220
|
+
return self.stream(prompt, **kwargs)
|
221
|
+
else:
|
222
|
+
tool_result = str(self.get_tool_result(prompt, **kwargs))
|
223
|
+
prompt_final = "根据上下文回答最后的用户问题:\n上下文信息:\n"
|
224
|
+
prompt_final += tool_result
|
225
|
+
prompt_final += f"\n用户的问题:\n{prompt}"
|
226
|
+
if stream:
|
227
|
+
return self.stream(prompt_final, **kwargs)
|
228
|
+
else:
|
229
|
+
return self.invoke(prompt_final, **kwargs)
|
230
|
+
|
231
|
+
def get_decision(self, prompt: str, **kwargs: t.Any):
|
232
|
+
# 该方法与之前一致...
|
218
233
|
prompt_final = FN_TEMPLATE
|
219
234
|
for tool in self.tools:
|
220
235
|
prompt_final += self.tool_desc.get(tool.__name__, "")
|
221
236
|
prompt_final += f"\n用户的问题:\n{prompt}"
|
222
|
-
|
223
|
-
decision_dict_str = self.invoke(prompt_final ,**kwargs)
|
224
|
-
print(decision_dict_str)
|
237
|
+
decision_dict_str = self.invoke(prompt_final, **kwargs)
|
225
238
|
return decision_dict_str
|
226
|
-
# return json.loads(decision_dict)
|
227
|
-
|
228
|
-
def get_tool_result(
|
229
|
-
self,
|
230
|
-
prompt: str,
|
231
|
-
**kwargs: t.Any
|
232
|
-
):
|
233
|
-
"""Get the result from a tool based on the provided prompt and keyword arguments.
|
234
239
|
|
235
|
-
|
236
|
-
|
237
|
-
**kwargs: Additional keyword arguments to pass to the decision function.
|
238
|
-
|
239
|
-
Returns:
|
240
|
-
str: The result from the selected tool based on the decision made.
|
241
|
-
"""
|
240
|
+
def get_tool_result(self, prompt: str, **kwargs: t.Any):
|
241
|
+
# 同步方法与之前一致...
|
242
242
|
decision_dict_str = self.get_decision(prompt, **kwargs)
|
243
243
|
try:
|
244
244
|
decision_dict = json.loads(decision_dict_str)
|
@@ -259,11 +259,7 @@ class OpenAI_M():
|
|
259
259
|
print(e)
|
260
260
|
return ""
|
261
261
|
|
262
|
-
async def get_tool_result_async(
|
263
|
-
self,
|
264
|
-
prompt: str,
|
265
|
-
**kwargs: t.Any
|
266
|
-
):
|
262
|
+
async def get_tool_result_async(self, prompt: str, **kwargs: t.Any):
|
267
263
|
"""
|
268
264
|
Asynchronous version of the get_tool_result function that can run in parallel using multiprocessing.
|
269
265
|
|
@@ -274,6 +270,10 @@ class OpenAI_M():
|
|
274
270
|
Returns:
|
275
271
|
str: The result from the selected tool based on the decision made.
|
276
272
|
"""
|
273
|
+
|
274
|
+
def run_tool_with_kwargs(tool, func_kwargs):
|
275
|
+
return tool(**func_kwargs)
|
276
|
+
|
277
277
|
decision_dict_str = await asyncio.to_thread(self.get_decision, prompt, **kwargs)
|
278
278
|
try:
|
279
279
|
decision_dict = json.loads(decision_dict_str)
|
@@ -292,38 +292,8 @@ class OpenAI_M():
|
|
292
292
|
|
293
293
|
loop = asyncio.get_running_loop()
|
294
294
|
with ProcessPoolExecutor() as pool:
|
295
|
-
result = await loop.run_in_executor(pool, tool_final,
|
295
|
+
result = await loop.run_in_executor(pool, run_tool_with_kwargs, tool_final, func_kwargs)
|
296
296
|
return result
|
297
297
|
except Exception as e:
|
298
298
|
print(e)
|
299
|
-
return ""
|
300
|
-
|
301
|
-
def agent_response(
|
302
|
-
self,
|
303
|
-
prompt : str,
|
304
|
-
stream = True,
|
305
|
-
**kwargs: t.Any
|
306
|
-
):
|
307
|
-
"""'''Generate agent response based on the given prompt.
|
308
|
-
|
309
|
-
Args:
|
310
|
-
prompt (str): The prompt for which agent response is generated.
|
311
|
-
stream (bool, optional): Flag to determine if the response should be streamed. Defaults to True.
|
312
|
-
**kwargs: Additional keyword arguments.
|
313
|
-
|
314
|
-
Returns:
|
315
|
-
str: The agent response based on the prompt.
|
316
|
-
'''
|
317
|
-
"""
|
318
|
-
decision_dict = self.get_decision(prompt, **kwargs)
|
319
|
-
if decision_dict.get("function_name", None) is None:
|
320
|
-
return self.stream(prompt, **kwargs)
|
321
|
-
else:
|
322
|
-
tool_result = str(self.get_tool_result(prompt, **kwargs))
|
323
|
-
prompt_final = "根据上下文回答最后的用户问题:\n上下文信息:\n"
|
324
|
-
prompt_final += tool_result
|
325
|
-
prompt_final += f"\n用户的问题:\n{prompt}"
|
326
|
-
if stream:
|
327
|
-
return self.stream(prompt_final, **kwargs)
|
328
|
-
else:
|
329
|
-
return self.invoke(prompt_final, **kwargs)
|
299
|
+
return ""
|
@@ -1,5 +1,5 @@
|
|
1
1
|
hdl/__init__.py,sha256=GffnD0jLJdhkd-vo989v40N90sQbofkayRBwxc6TVhQ,72
|
2
|
-
hdl/_version.py,sha256=
|
2
|
+
hdl/_version.py,sha256=65hEflmRlvVz3zHSlCs2w2jXSKc7YICoUmNE8oYJni4,413
|
3
3
|
hdl/args/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
hdl/args/loss_args.py,sha256=s7YzSdd7IjD24rZvvOrxLLFqMZQb9YylxKeyelSdrTk,70
|
5
5
|
hdl/controllers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -127,7 +127,7 @@ hdl/utils/desc/template.py,sha256=a3NcSihzZMm9Bk76iDVe54_xBDceGmLebS0XMONE3nk,11
|
|
127
127
|
hdl/utils/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
128
128
|
hdl/utils/general/glob.py,sha256=8-RCnt6L297wMIfn34ZAMCsGCZUjHG3MGglGZI1cX0g,491
|
129
129
|
hdl/utils/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
130
|
-
hdl/utils/llm/chat.py,sha256
|
130
|
+
hdl/utils/llm/chat.py,sha256=-k5fnxzOciSHh7nKF7EnEOF1D-wrIu1mupgg5REPxmY,9974
|
131
131
|
hdl/utils/llm/embs.py,sha256=Tf0FOYrOFZp7qQpEPiSCXzlgyHH0X9HVTUtsup74a9E,7174
|
132
132
|
hdl/utils/llm/extract.py,sha256=2sK_WJzmYIc8iuWaM9DA6Nw3_6q1O4lJ5pKpcZo-bBA,6512
|
133
133
|
hdl/utils/llm/llama_chat.py,sha256=watcHGOaz-bv3x-yDucYlGk5f8FiqfFhwWogrl334fk,4387
|
@@ -135,7 +135,7 @@ hdl/utils/schedulers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
|
|
135
135
|
hdl/utils/schedulers/norm_lr.py,sha256=bDwCmdEK-WkgxQMFBiMuchv8Mm7C0-GZJ6usm-PQk14,4461
|
136
136
|
hdl/utils/weather/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
137
137
|
hdl/utils/weather/weather.py,sha256=k11o6wM15kF8b9NMlEfrg68ak-SfSYLN3nOOflFUv-I,4381
|
138
|
-
hjxdl-0.1.
|
139
|
-
hjxdl-0.1.
|
140
|
-
hjxdl-0.1.
|
141
|
-
hjxdl-0.1.
|
138
|
+
hjxdl-0.1.41.dist-info/METADATA,sha256=BSH-xT-DtotFaVKeyIoPIjU0Y23K4vgb-FJoQQg4pv0,737
|
139
|
+
hjxdl-0.1.41.dist-info/WHEEL,sha256=UvcQYKBHoFqaQd6LKyqHw9fxEolWLQnlzP0h_LgJAfI,91
|
140
|
+
hjxdl-0.1.41.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
|
141
|
+
hjxdl-0.1.41.dist-info/RECORD,,
|
File without changes
|
File without changes
|