praisonaiagents 0.0.61__py3-none-any.whl → 0.0.62__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +69 -3
- praisonaiagents/llm/llm.py +311 -1
- {praisonaiagents-0.0.61.dist-info → praisonaiagents-0.0.62.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.61.dist-info → praisonaiagents-0.0.62.dist-info}/RECORD +6 -6
- {praisonaiagents-0.0.61.dist-info → praisonaiagents-0.0.62.dist-info}/WHEEL +1 -1
- {praisonaiagents-0.0.61.dist-info → praisonaiagents-0.0.62.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -714,6 +714,22 @@ Your Goal: {self.goal}
|
|
714
714
|
return None
|
715
715
|
|
716
716
|
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
|
717
|
+
# Log all parameter values when in debug mode
|
718
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
719
|
+
param_info = {
|
720
|
+
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
|
721
|
+
"temperature": temperature,
|
722
|
+
"tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
|
723
|
+
"output_json": str(output_json.__class__.__name__) if output_json else None,
|
724
|
+
"output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
|
725
|
+
"reasoning_steps": reasoning_steps,
|
726
|
+
"agent_name": self.name,
|
727
|
+
"agent_role": self.role,
|
728
|
+
"agent_goal": self.goal
|
729
|
+
}
|
730
|
+
logging.debug(f"Agent.chat parameters: {json.dumps(param_info, indent=2, default=str)}")
|
731
|
+
|
732
|
+
start_time = time.time()
|
717
733
|
reasoning_steps = reasoning_steps or self.reasoning_steps
|
718
734
|
# Search for existing knowledge if any knowledge is provided
|
719
735
|
if self.knowledge:
|
@@ -738,7 +754,7 @@ Your Goal: {self.goal}
|
|
738
754
|
system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
|
739
755
|
chat_history=self.chat_history,
|
740
756
|
temperature=temperature,
|
741
|
-
tools=tools,
|
757
|
+
tools=self.tools if tools is None else tools,
|
742
758
|
output_json=output_json,
|
743
759
|
output_pydantic=output_pydantic,
|
744
760
|
verbose=self.verbose,
|
@@ -749,7 +765,7 @@ Your Goal: {self.goal}
|
|
749
765
|
console=self.console,
|
750
766
|
agent_name=self.name,
|
751
767
|
agent_role=self.role,
|
752
|
-
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
|
768
|
+
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
|
753
769
|
execute_tool_fn=self.execute_tool, # Pass tool execution function
|
754
770
|
reasoning_steps=reasoning_steps
|
755
771
|
)
|
@@ -757,6 +773,11 @@ Your Goal: {self.goal}
|
|
757
773
|
self.chat_history.append({"role": "user", "content": prompt})
|
758
774
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
759
775
|
|
776
|
+
# Log completion time if in debug mode
|
777
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
778
|
+
total_time = time.time() - start_time
|
779
|
+
logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
|
780
|
+
|
760
781
|
return response_text
|
761
782
|
except Exception as e:
|
762
783
|
display_error(f"Error in LLM chat: {e}")
|
@@ -944,6 +965,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
944
965
|
display_error(f"Error in chat: {e}", console=self.console)
|
945
966
|
return None
|
946
967
|
|
968
|
+
# Log completion time if in debug mode
|
969
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
970
|
+
total_time = time.time() - start_time
|
971
|
+
logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
|
972
|
+
|
973
|
+
return response_text
|
974
|
+
|
947
975
|
def clean_json_output(self, output: str) -> str:
|
948
976
|
"""Clean and extract JSON from response text."""
|
949
977
|
cleaned = output.strip()
|
@@ -958,6 +986,22 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
958
986
|
|
959
987
|
async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
|
960
988
|
"""Async version of chat method. TODO: Requires Syncing with chat method."""
|
989
|
+
# Log all parameter values when in debug mode
|
990
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
991
|
+
param_info = {
|
992
|
+
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
|
993
|
+
"temperature": temperature,
|
994
|
+
"tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
|
995
|
+
"output_json": str(output_json.__class__.__name__) if output_json else None,
|
996
|
+
"output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
|
997
|
+
"reasoning_steps": reasoning_steps,
|
998
|
+
"agent_name": self.name,
|
999
|
+
"agent_role": self.role,
|
1000
|
+
"agent_goal": self.goal
|
1001
|
+
}
|
1002
|
+
logging.debug(f"Agent.achat parameters: {json.dumps(param_info, indent=2, default=str)}")
|
1003
|
+
|
1004
|
+
start_time = time.time()
|
961
1005
|
reasoning_steps = reasoning_steps or self.reasoning_steps
|
962
1006
|
try:
|
963
1007
|
# Search for existing knowledge if any knowledge is provided
|
@@ -996,9 +1040,15 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
996
1040
|
self.chat_history.append({"role": "user", "content": prompt})
|
997
1041
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
998
1042
|
|
1043
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1044
|
+
total_time = time.time() - start_time
|
1045
|
+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
999
1046
|
return response_text
|
1000
1047
|
except Exception as e:
|
1001
1048
|
display_error(f"Error in LLM chat: {e}")
|
1049
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1050
|
+
total_time = time.time() - start_time
|
1051
|
+
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
|
1002
1052
|
return None
|
1003
1053
|
|
1004
1054
|
# For OpenAI client
|
@@ -1081,7 +1131,11 @@ Your Goal: {self.goal}
|
|
1081
1131
|
temperature=temperature,
|
1082
1132
|
tools=formatted_tools
|
1083
1133
|
)
|
1084
|
-
|
1134
|
+
result = await self._achat_completion(response, tools)
|
1135
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1136
|
+
total_time = time.time() - start_time
|
1137
|
+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1138
|
+
return result
|
1085
1139
|
elif output_json or output_pydantic:
|
1086
1140
|
response = await async_client.chat.completions.create(
|
1087
1141
|
model=self.llm,
|
@@ -1090,6 +1144,9 @@ Your Goal: {self.goal}
|
|
1090
1144
|
response_format={"type": "json_object"}
|
1091
1145
|
)
|
1092
1146
|
# Return the raw response
|
1147
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1148
|
+
total_time = time.time() - start_time
|
1149
|
+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1093
1150
|
return response.choices[0].message.content
|
1094
1151
|
else:
|
1095
1152
|
response = await async_client.chat.completions.create(
|
@@ -1097,12 +1154,21 @@ Your Goal: {self.goal}
|
|
1097
1154
|
messages=messages,
|
1098
1155
|
temperature=temperature
|
1099
1156
|
)
|
1157
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1158
|
+
total_time = time.time() - start_time
|
1159
|
+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1100
1160
|
return response.choices[0].message.content
|
1101
1161
|
except Exception as e:
|
1102
1162
|
display_error(f"Error in chat completion: {e}")
|
1163
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1164
|
+
total_time = time.time() - start_time
|
1165
|
+
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
|
1103
1166
|
return None
|
1104
1167
|
except Exception as e:
|
1105
1168
|
display_error(f"Error in achat: {e}")
|
1169
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1170
|
+
total_time = time.time() - start_time
|
1171
|
+
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
|
1106
1172
|
return None
|
1107
1173
|
|
1108
1174
|
async def _achat_completion(self, response, tools, reasoning_steps=False):
|
praisonaiagents/llm/llm.py
CHANGED
@@ -172,6 +172,36 @@ class LLM:
|
|
172
172
|
# Enable error dropping for cleaner output
|
173
173
|
litellm.drop_params = True
|
174
174
|
self._setup_event_tracking(events)
|
175
|
+
|
176
|
+
# Log all initialization parameters when in debug mode
|
177
|
+
if not isinstance(verbose, bool) and verbose >= 10:
|
178
|
+
debug_info = {
|
179
|
+
"model": self.model,
|
180
|
+
"timeout": self.timeout,
|
181
|
+
"temperature": self.temperature,
|
182
|
+
"top_p": self.top_p,
|
183
|
+
"n": self.n,
|
184
|
+
"max_tokens": self.max_tokens,
|
185
|
+
"presence_penalty": self.presence_penalty,
|
186
|
+
"frequency_penalty": self.frequency_penalty,
|
187
|
+
"logit_bias": self.logit_bias,
|
188
|
+
"response_format": self.response_format,
|
189
|
+
"seed": self.seed,
|
190
|
+
"logprobs": self.logprobs,
|
191
|
+
"top_logprobs": self.top_logprobs,
|
192
|
+
"api_version": self.api_version,
|
193
|
+
"stop_phrases": self.stop_phrases,
|
194
|
+
"api_key": "***" if self.api_key else None, # Mask API key for security
|
195
|
+
"base_url": self.base_url,
|
196
|
+
"verbose": self.verbose,
|
197
|
+
"markdown": self.markdown,
|
198
|
+
"self_reflect": self.self_reflect,
|
199
|
+
"max_reflect": self.max_reflect,
|
200
|
+
"min_reflect": self.min_reflect,
|
201
|
+
"reasoning_steps": self.reasoning_steps,
|
202
|
+
"extra_settings": {k: v for k, v in self.extra_settings.items() if k not in ["api_key"]}
|
203
|
+
}
|
204
|
+
logging.debug(f"LLM instance initialized with: {json.dumps(debug_info, indent=2, default=str)}")
|
175
205
|
|
176
206
|
def get_response(
|
177
207
|
self,
|
@@ -195,6 +225,56 @@ class LLM:
|
|
195
225
|
**kwargs
|
196
226
|
) -> str:
|
197
227
|
"""Enhanced get_response with all OpenAI-like features"""
|
228
|
+
logging.info(f"Getting response from {self.model}")
|
229
|
+
# Log all self values when in debug mode
|
230
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
231
|
+
debug_info = {
|
232
|
+
"model": self.model,
|
233
|
+
"timeout": self.timeout,
|
234
|
+
"temperature": self.temperature,
|
235
|
+
"top_p": self.top_p,
|
236
|
+
"n": self.n,
|
237
|
+
"max_tokens": self.max_tokens,
|
238
|
+
"presence_penalty": self.presence_penalty,
|
239
|
+
"frequency_penalty": self.frequency_penalty,
|
240
|
+
"logit_bias": self.logit_bias,
|
241
|
+
"response_format": self.response_format,
|
242
|
+
"seed": self.seed,
|
243
|
+
"logprobs": self.logprobs,
|
244
|
+
"top_logprobs": self.top_logprobs,
|
245
|
+
"api_version": self.api_version,
|
246
|
+
"stop_phrases": self.stop_phrases,
|
247
|
+
"api_key": "***" if self.api_key else None, # Mask API key for security
|
248
|
+
"base_url": self.base_url,
|
249
|
+
"verbose": self.verbose,
|
250
|
+
"markdown": self.markdown,
|
251
|
+
"self_reflect": self.self_reflect,
|
252
|
+
"max_reflect": self.max_reflect,
|
253
|
+
"min_reflect": self.min_reflect,
|
254
|
+
"reasoning_steps": self.reasoning_steps
|
255
|
+
}
|
256
|
+
logging.debug(f"LLM instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
|
257
|
+
|
258
|
+
# Log the parameter values passed to get_response
|
259
|
+
param_info = {
|
260
|
+
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
|
261
|
+
"system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
|
262
|
+
"chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
|
263
|
+
"temperature": temperature,
|
264
|
+
"tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
|
265
|
+
"output_json": str(output_json.__class__.__name__) if output_json else None,
|
266
|
+
"output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
|
267
|
+
"verbose": verbose,
|
268
|
+
"markdown": markdown,
|
269
|
+
"self_reflect": self_reflect,
|
270
|
+
"max_reflect": max_reflect,
|
271
|
+
"min_reflect": min_reflect,
|
272
|
+
"agent_name": agent_name,
|
273
|
+
"agent_role": agent_role,
|
274
|
+
"agent_tools": agent_tools,
|
275
|
+
"kwargs": str(kwargs)
|
276
|
+
}
|
277
|
+
logging.debug(f"get_response parameters: {json.dumps(param_info, indent=2, default=str)}")
|
198
278
|
try:
|
199
279
|
import litellm
|
200
280
|
# This below **kwargs** is passed to .completion() directly. so reasoning_steps has to be popped. OR find alternate best way of handling this.
|
@@ -202,6 +282,23 @@ class LLM:
|
|
202
282
|
# Disable litellm debug messages
|
203
283
|
litellm.set_verbose = False
|
204
284
|
|
285
|
+
# Format tools if provided
|
286
|
+
formatted_tools = None
|
287
|
+
if tools:
|
288
|
+
formatted_tools = []
|
289
|
+
for tool in tools:
|
290
|
+
if callable(tool):
|
291
|
+
tool_def = self._generate_tool_definition(tool.__name__)
|
292
|
+
elif isinstance(tool, str):
|
293
|
+
tool_def = self._generate_tool_definition(tool)
|
294
|
+
else:
|
295
|
+
continue
|
296
|
+
|
297
|
+
if tool_def:
|
298
|
+
formatted_tools.append(tool_def)
|
299
|
+
if not formatted_tools:
|
300
|
+
formatted_tools = None
|
301
|
+
|
205
302
|
# Build messages list
|
206
303
|
messages = []
|
207
304
|
if system_prompt:
|
@@ -260,6 +357,7 @@ class LLM:
|
|
260
357
|
messages=messages,
|
261
358
|
temperature=temperature,
|
262
359
|
stream=False, # force non-streaming
|
360
|
+
tools=formatted_tools,
|
263
361
|
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
264
362
|
)
|
265
363
|
reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
|
@@ -291,6 +389,7 @@ class LLM:
|
|
291
389
|
for chunk in litellm.completion(
|
292
390
|
model=self.model,
|
293
391
|
messages=messages,
|
392
|
+
tools=formatted_tools,
|
294
393
|
temperature=temperature,
|
295
394
|
stream=True,
|
296
395
|
**kwargs
|
@@ -305,6 +404,7 @@ class LLM:
|
|
305
404
|
for chunk in litellm.completion(
|
306
405
|
model=self.model,
|
307
406
|
messages=messages,
|
407
|
+
tools=formatted_tools,
|
308
408
|
temperature=temperature,
|
309
409
|
stream=True,
|
310
410
|
**kwargs
|
@@ -318,6 +418,7 @@ class LLM:
|
|
318
418
|
final_response = litellm.completion(
|
319
419
|
model=self.model,
|
320
420
|
messages=messages,
|
421
|
+
tools=formatted_tools,
|
321
422
|
temperature=temperature,
|
322
423
|
stream=False, # No streaming for tool call check
|
323
424
|
**kwargs
|
@@ -552,6 +653,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
552
653
|
except Exception as error:
|
553
654
|
display_error(f"Error in get_response: {str(error)}")
|
554
655
|
raise
|
656
|
+
|
657
|
+
# Log completion time if in debug mode
|
658
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
659
|
+
total_time = time.time() - start_time
|
660
|
+
logging.debug(f"get_response completed in {total_time:.2f} seconds")
|
555
661
|
|
556
662
|
async def get_response_async(
|
557
663
|
self,
|
@@ -577,6 +683,56 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
577
683
|
"""Async version of get_response with identical functionality."""
|
578
684
|
try:
|
579
685
|
import litellm
|
686
|
+
logging.info(f"Getting async response from {self.model}")
|
687
|
+
# Log all self values when in debug mode
|
688
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
689
|
+
debug_info = {
|
690
|
+
"model": self.model,
|
691
|
+
"timeout": self.timeout,
|
692
|
+
"temperature": self.temperature,
|
693
|
+
"top_p": self.top_p,
|
694
|
+
"n": self.n,
|
695
|
+
"max_tokens": self.max_tokens,
|
696
|
+
"presence_penalty": self.presence_penalty,
|
697
|
+
"frequency_penalty": self.frequency_penalty,
|
698
|
+
"logit_bias": self.logit_bias,
|
699
|
+
"response_format": self.response_format,
|
700
|
+
"seed": self.seed,
|
701
|
+
"logprobs": self.logprobs,
|
702
|
+
"top_logprobs": self.top_logprobs,
|
703
|
+
"api_version": self.api_version,
|
704
|
+
"stop_phrases": self.stop_phrases,
|
705
|
+
"api_key": "***" if self.api_key else None, # Mask API key for security
|
706
|
+
"base_url": self.base_url,
|
707
|
+
"verbose": self.verbose,
|
708
|
+
"markdown": self.markdown,
|
709
|
+
"self_reflect": self.self_reflect,
|
710
|
+
"max_reflect": self.max_reflect,
|
711
|
+
"min_reflect": self.min_reflect,
|
712
|
+
"reasoning_steps": self.reasoning_steps
|
713
|
+
}
|
714
|
+
logging.debug(f"LLM async instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
|
715
|
+
|
716
|
+
# Log the parameter values passed to get_response_async
|
717
|
+
param_info = {
|
718
|
+
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
|
719
|
+
"system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
|
720
|
+
"chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
|
721
|
+
"temperature": temperature,
|
722
|
+
"tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
|
723
|
+
"output_json": str(output_json.__class__.__name__) if output_json else None,
|
724
|
+
"output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
|
725
|
+
"verbose": verbose,
|
726
|
+
"markdown": markdown,
|
727
|
+
"self_reflect": self_reflect,
|
728
|
+
"max_reflect": max_reflect,
|
729
|
+
"min_reflect": min_reflect,
|
730
|
+
"agent_name": agent_name,
|
731
|
+
"agent_role": agent_role,
|
732
|
+
"agent_tools": agent_tools,
|
733
|
+
"kwargs": str(kwargs)
|
734
|
+
}
|
735
|
+
logging.debug(f"get_response_async parameters: {json.dumps(param_info, indent=2, default=str)}")
|
580
736
|
reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
|
581
737
|
litellm.set_verbose = False
|
582
738
|
|
@@ -983,6 +1139,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
983
1139
|
raise LLMContextLengthExceededException(str(error))
|
984
1140
|
display_error(f"Error in get_response_async: {str(error)}")
|
985
1141
|
raise
|
1142
|
+
|
1143
|
+
# Log completion time if in debug mode
|
1144
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1145
|
+
total_time = time.time() - start_time
|
1146
|
+
logging.debug(f"get_response_async completed in {total_time:.2f} seconds")
|
986
1147
|
|
987
1148
|
def can_use_tools(self) -> bool:
|
988
1149
|
"""Check if this model can use tool functions"""
|
@@ -1065,6 +1226,24 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1065
1226
|
|
1066
1227
|
logger.debug("Using synchronous response function")
|
1067
1228
|
|
1229
|
+
# Log all self values when in debug mode
|
1230
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1231
|
+
debug_info = {
|
1232
|
+
"model": self.model,
|
1233
|
+
"timeout": self.timeout,
|
1234
|
+
"temperature": temperature,
|
1235
|
+
"top_p": self.top_p,
|
1236
|
+
"n": self.n,
|
1237
|
+
"max_tokens": self.max_tokens,
|
1238
|
+
"presence_penalty": self.presence_penalty,
|
1239
|
+
"frequency_penalty": self.frequency_penalty,
|
1240
|
+
"stream": stream,
|
1241
|
+
"verbose": verbose,
|
1242
|
+
"markdown": markdown,
|
1243
|
+
"kwargs": str(kwargs)
|
1244
|
+
}
|
1245
|
+
logger.debug(f"Response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
|
1246
|
+
|
1068
1247
|
# Build messages list
|
1069
1248
|
messages = []
|
1070
1249
|
if system_prompt:
|
@@ -1150,6 +1329,24 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1150
1329
|
|
1151
1330
|
logger.debug("Using asynchronous response function")
|
1152
1331
|
|
1332
|
+
# Log all self values when in debug mode
|
1333
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1334
|
+
debug_info = {
|
1335
|
+
"model": self.model,
|
1336
|
+
"timeout": self.timeout,
|
1337
|
+
"temperature": temperature,
|
1338
|
+
"top_p": self.top_p,
|
1339
|
+
"n": self.n,
|
1340
|
+
"max_tokens": self.max_tokens,
|
1341
|
+
"presence_penalty": self.presence_penalty,
|
1342
|
+
"frequency_penalty": self.frequency_penalty,
|
1343
|
+
"stream": stream,
|
1344
|
+
"verbose": verbose,
|
1345
|
+
"markdown": markdown,
|
1346
|
+
"kwargs": str(kwargs)
|
1347
|
+
}
|
1348
|
+
logger.debug(f"Async response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
|
1349
|
+
|
1153
1350
|
# Build messages list
|
1154
1351
|
messages = []
|
1155
1352
|
if system_prompt:
|
@@ -1210,4 +1407,117 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1210
1407
|
|
1211
1408
|
except Exception as error:
|
1212
1409
|
display_error(f"Error in response_async: {str(error)}")
|
1213
|
-
raise
|
1410
|
+
raise
|
1411
|
+
|
1412
|
+
def _generate_tool_definition(self, function_name: str) -> Optional[Dict]:
|
1413
|
+
"""Generate a tool definition from a function name."""
|
1414
|
+
logging.debug(f"Attempting to generate tool definition for: {function_name}")
|
1415
|
+
|
1416
|
+
# First try to get the tool definition if it exists
|
1417
|
+
tool_def_name = f"{function_name}_definition"
|
1418
|
+
tool_def = globals().get(tool_def_name)
|
1419
|
+
logging.debug(f"Looking for {tool_def_name} in globals: {tool_def is not None}")
|
1420
|
+
|
1421
|
+
if not tool_def:
|
1422
|
+
import __main__
|
1423
|
+
tool_def = getattr(__main__, tool_def_name, None)
|
1424
|
+
logging.debug(f"Looking for {tool_def_name} in __main__: {tool_def is not None}")
|
1425
|
+
|
1426
|
+
if tool_def:
|
1427
|
+
logging.debug(f"Found tool definition: {tool_def}")
|
1428
|
+
return tool_def
|
1429
|
+
|
1430
|
+
# Try to find the function
|
1431
|
+
func = globals().get(function_name)
|
1432
|
+
logging.debug(f"Looking for {function_name} in globals: {func is not None}")
|
1433
|
+
|
1434
|
+
if not func:
|
1435
|
+
import __main__
|
1436
|
+
func = getattr(__main__, function_name, None)
|
1437
|
+
logging.debug(f"Looking for {function_name} in __main__: {func is not None}")
|
1438
|
+
|
1439
|
+
if not func or not callable(func):
|
1440
|
+
logging.debug(f"Function {function_name} not found or not callable")
|
1441
|
+
return None
|
1442
|
+
|
1443
|
+
import inspect
|
1444
|
+
# Handle Langchain and CrewAI tools
|
1445
|
+
if inspect.isclass(func) and hasattr(func, 'run') and not hasattr(func, '_run'):
|
1446
|
+
original_func = func
|
1447
|
+
func = func.run
|
1448
|
+
function_name = original_func.__name__
|
1449
|
+
elif inspect.isclass(func) and hasattr(func, '_run'):
|
1450
|
+
original_func = func
|
1451
|
+
func = func._run
|
1452
|
+
function_name = original_func.__name__
|
1453
|
+
|
1454
|
+
sig = inspect.signature(func)
|
1455
|
+
logging.debug(f"Function signature: {sig}")
|
1456
|
+
|
1457
|
+
# Skip self, *args, **kwargs
|
1458
|
+
parameters_list = []
|
1459
|
+
for name, param in sig.parameters.items():
|
1460
|
+
if name == "self":
|
1461
|
+
continue
|
1462
|
+
if param.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):
|
1463
|
+
continue
|
1464
|
+
parameters_list.append((name, param))
|
1465
|
+
|
1466
|
+
parameters = {
|
1467
|
+
"type": "object",
|
1468
|
+
"properties": {},
|
1469
|
+
"required": []
|
1470
|
+
}
|
1471
|
+
|
1472
|
+
# Parse docstring for parameter descriptions
|
1473
|
+
docstring = inspect.getdoc(func)
|
1474
|
+
logging.debug(f"Function docstring: {docstring}")
|
1475
|
+
|
1476
|
+
param_descriptions = {}
|
1477
|
+
if docstring:
|
1478
|
+
import re
|
1479
|
+
param_section = re.split(r'\s*Args:\s*', docstring)
|
1480
|
+
logging.debug(f"Param section split: {param_section}")
|
1481
|
+
if len(param_section) > 1:
|
1482
|
+
param_lines = param_section[1].split('\n')
|
1483
|
+
for line in param_lines:
|
1484
|
+
line = line.strip()
|
1485
|
+
if line and ':' in line:
|
1486
|
+
param_name, param_desc = line.split(':', 1)
|
1487
|
+
param_descriptions[param_name.strip()] = param_desc.strip()
|
1488
|
+
|
1489
|
+
logging.debug(f"Parameter descriptions: {param_descriptions}")
|
1490
|
+
|
1491
|
+
for name, param in parameters_list:
|
1492
|
+
param_type = "string" # Default type
|
1493
|
+
if param.annotation != inspect.Parameter.empty:
|
1494
|
+
if param.annotation == int:
|
1495
|
+
param_type = "integer"
|
1496
|
+
elif param.annotation == float:
|
1497
|
+
param_type = "number"
|
1498
|
+
elif param.annotation == bool:
|
1499
|
+
param_type = "boolean"
|
1500
|
+
elif param.annotation == list:
|
1501
|
+
param_type = "array"
|
1502
|
+
elif param.annotation == dict:
|
1503
|
+
param_type = "object"
|
1504
|
+
|
1505
|
+
parameters["properties"][name] = {
|
1506
|
+
"type": param_type,
|
1507
|
+
"description": param_descriptions.get(name, "Parameter description not available")
|
1508
|
+
}
|
1509
|
+
|
1510
|
+
if param.default == inspect.Parameter.empty:
|
1511
|
+
parameters["required"].append(name)
|
1512
|
+
|
1513
|
+
logging.debug(f"Generated parameters: {parameters}")
|
1514
|
+
tool_def = {
|
1515
|
+
"type": "function",
|
1516
|
+
"function": {
|
1517
|
+
"name": function_name,
|
1518
|
+
"description": docstring.split('\n\n')[0] if docstring else "No description available",
|
1519
|
+
"parameters": parameters
|
1520
|
+
}
|
1521
|
+
}
|
1522
|
+
logging.debug(f"Generated tool definition: {tool_def}")
|
1523
|
+
return tool_def
|
@@ -1,7 +1,7 @@
|
|
1
1
|
praisonaiagents/__init__.py,sha256=frdIvimDY-kU9j-9yXV1z4NtXypfPvyvlnac5mgBCuQ,1288
|
2
2
|
praisonaiagents/main.py,sha256=0kB9gn9meXtr4EIrdgA2lAioKIHCRJ61audsGDwuTm4,14428
|
3
3
|
praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
|
4
|
-
praisonaiagents/agent/agent.py,sha256=
|
4
|
+
praisonaiagents/agent/agent.py,sha256=h3s0-1M88zujllDHnKijHmYeVihD75d-K9s2Y3IHLY4,61850
|
5
5
|
praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
|
6
6
|
praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
|
7
7
|
praisonaiagents/agents/agents.py,sha256=94YPQl-hl-EPY6-Xk2Rj9wlIs9YtiLQbsutSOXWX8QI,36156
|
@@ -10,7 +10,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
|
|
10
10
|
praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
|
11
11
|
praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
|
12
12
|
praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
|
13
|
-
praisonaiagents/llm/llm.py,sha256=
|
13
|
+
praisonaiagents/llm/llm.py,sha256=pYXKXuvJgbqItO8MDmAZVYZwb5es1HDfn10refHz0Ck,73025
|
14
14
|
praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
|
15
15
|
praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
|
16
16
|
praisonaiagents/process/process.py,sha256=HPw84OhnKQW3EyrDkpoQu0DcpxThbrzR2hWUgwQh9Pw,59955
|
@@ -37,7 +37,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
37
37
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
38
38
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
39
39
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
40
|
-
praisonaiagents-0.0.
|
41
|
-
praisonaiagents-0.0.
|
42
|
-
praisonaiagents-0.0.
|
43
|
-
praisonaiagents-0.0.
|
40
|
+
praisonaiagents-0.0.62.dist-info/METADATA,sha256=w9bbiQEKBjIErkraz8jMhhN1qkeA4WBPf2ylGn9Skz4,830
|
41
|
+
praisonaiagents-0.0.62.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
|
42
|
+
praisonaiagents-0.0.62.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
43
|
+
praisonaiagents-0.0.62.dist-info/RECORD,,
|
File without changes
|