praisonaiagents 0.0.61__py3-none-any.whl → 0.0.63__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -714,6 +714,22 @@ Your Goal: {self.goal}
714
714
  return None
715
715
 
716
716
  def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
717
+ # Log all parameter values when in debug mode
718
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
719
+ param_info = {
720
+ "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
721
+ "temperature": temperature,
722
+ "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
723
+ "output_json": str(output_json.__class__.__name__) if output_json else None,
724
+ "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
725
+ "reasoning_steps": reasoning_steps,
726
+ "agent_name": self.name,
727
+ "agent_role": self.role,
728
+ "agent_goal": self.goal
729
+ }
730
+ logging.debug(f"Agent.chat parameters: {json.dumps(param_info, indent=2, default=str)}")
731
+
732
+ start_time = time.time()
717
733
  reasoning_steps = reasoning_steps or self.reasoning_steps
718
734
  # Search for existing knowledge if any knowledge is provided
719
735
  if self.knowledge:
@@ -738,7 +754,7 @@ Your Goal: {self.goal}
738
754
  system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
739
755
  chat_history=self.chat_history,
740
756
  temperature=temperature,
741
- tools=tools,
757
+ tools=self.tools if tools is None else tools,
742
758
  output_json=output_json,
743
759
  output_pydantic=output_pydantic,
744
760
  verbose=self.verbose,
@@ -749,7 +765,7 @@ Your Goal: {self.goal}
749
765
  console=self.console,
750
766
  agent_name=self.name,
751
767
  agent_role=self.role,
752
- agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
768
+ agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
753
769
  execute_tool_fn=self.execute_tool, # Pass tool execution function
754
770
  reasoning_steps=reasoning_steps
755
771
  )
@@ -757,6 +773,11 @@ Your Goal: {self.goal}
757
773
  self.chat_history.append({"role": "user", "content": prompt})
758
774
  self.chat_history.append({"role": "assistant", "content": response_text})
759
775
 
776
+ # Log completion time if in debug mode
777
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
778
+ total_time = time.time() - start_time
779
+ logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
780
+
760
781
  return response_text
761
782
  except Exception as e:
762
783
  display_error(f"Error in LLM chat: {e}")
@@ -944,6 +965,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
944
965
  display_error(f"Error in chat: {e}", console=self.console)
945
966
  return None
946
967
 
968
+ # Log completion time if in debug mode
969
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
970
+ total_time = time.time() - start_time
971
+ logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
972
+
973
+ return response_text
974
+
947
975
  def clean_json_output(self, output: str) -> str:
948
976
  """Clean and extract JSON from response text."""
949
977
  cleaned = output.strip()
@@ -958,6 +986,22 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
958
986
 
959
987
  async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
960
988
  """Async version of chat method. TODO: Requires Syncing with chat method."""
989
+ # Log all parameter values when in debug mode
990
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
991
+ param_info = {
992
+ "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
993
+ "temperature": temperature,
994
+ "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
995
+ "output_json": str(output_json.__class__.__name__) if output_json else None,
996
+ "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
997
+ "reasoning_steps": reasoning_steps,
998
+ "agent_name": self.name,
999
+ "agent_role": self.role,
1000
+ "agent_goal": self.goal
1001
+ }
1002
+ logging.debug(f"Agent.achat parameters: {json.dumps(param_info, indent=2, default=str)}")
1003
+
1004
+ start_time = time.time()
961
1005
  reasoning_steps = reasoning_steps or self.reasoning_steps
962
1006
  try:
963
1007
  # Search for existing knowledge if any knowledge is provided
@@ -996,9 +1040,15 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
996
1040
  self.chat_history.append({"role": "user", "content": prompt})
997
1041
  self.chat_history.append({"role": "assistant", "content": response_text})
998
1042
 
1043
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1044
+ total_time = time.time() - start_time
1045
+ logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
999
1046
  return response_text
1000
1047
  except Exception as e:
1001
1048
  display_error(f"Error in LLM chat: {e}")
1049
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1050
+ total_time = time.time() - start_time
1051
+ logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
1002
1052
  return None
1003
1053
 
1004
1054
  # For OpenAI client
@@ -1081,7 +1131,11 @@ Your Goal: {self.goal}
1081
1131
  temperature=temperature,
1082
1132
  tools=formatted_tools
1083
1133
  )
1084
- return await self._achat_completion(response, tools)
1134
+ result = await self._achat_completion(response, tools)
1135
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1136
+ total_time = time.time() - start_time
1137
+ logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1138
+ return result
1085
1139
  elif output_json or output_pydantic:
1086
1140
  response = await async_client.chat.completions.create(
1087
1141
  model=self.llm,
@@ -1090,6 +1144,9 @@ Your Goal: {self.goal}
1090
1144
  response_format={"type": "json_object"}
1091
1145
  )
1092
1146
  # Return the raw response
1147
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1148
+ total_time = time.time() - start_time
1149
+ logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1093
1150
  return response.choices[0].message.content
1094
1151
  else:
1095
1152
  response = await async_client.chat.completions.create(
@@ -1097,12 +1154,21 @@ Your Goal: {self.goal}
1097
1154
  messages=messages,
1098
1155
  temperature=temperature
1099
1156
  )
1157
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1158
+ total_time = time.time() - start_time
1159
+ logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1100
1160
  return response.choices[0].message.content
1101
1161
  except Exception as e:
1102
1162
  display_error(f"Error in chat completion: {e}")
1163
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1164
+ total_time = time.time() - start_time
1165
+ logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
1103
1166
  return None
1104
1167
  except Exception as e:
1105
1168
  display_error(f"Error in achat: {e}")
1169
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1170
+ total_time = time.time() - start_time
1171
+ logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
1106
1172
  return None
1107
1173
 
1108
1174
  async def _achat_completion(self, response, tools, reasoning_steps=False):
@@ -172,6 +172,36 @@ class LLM:
172
172
  # Enable error dropping for cleaner output
173
173
  litellm.drop_params = True
174
174
  self._setup_event_tracking(events)
175
+
176
+ # Log all initialization parameters when in debug mode
177
+ if not isinstance(verbose, bool) and verbose >= 10:
178
+ debug_info = {
179
+ "model": self.model,
180
+ "timeout": self.timeout,
181
+ "temperature": self.temperature,
182
+ "top_p": self.top_p,
183
+ "n": self.n,
184
+ "max_tokens": self.max_tokens,
185
+ "presence_penalty": self.presence_penalty,
186
+ "frequency_penalty": self.frequency_penalty,
187
+ "logit_bias": self.logit_bias,
188
+ "response_format": self.response_format,
189
+ "seed": self.seed,
190
+ "logprobs": self.logprobs,
191
+ "top_logprobs": self.top_logprobs,
192
+ "api_version": self.api_version,
193
+ "stop_phrases": self.stop_phrases,
194
+ "api_key": "***" if self.api_key else None, # Mask API key for security
195
+ "base_url": self.base_url,
196
+ "verbose": self.verbose,
197
+ "markdown": self.markdown,
198
+ "self_reflect": self.self_reflect,
199
+ "max_reflect": self.max_reflect,
200
+ "min_reflect": self.min_reflect,
201
+ "reasoning_steps": self.reasoning_steps,
202
+ "extra_settings": {k: v for k, v in self.extra_settings.items() if k not in ["api_key"]}
203
+ }
204
+ logging.debug(f"LLM instance initialized with: {json.dumps(debug_info, indent=2, default=str)}")
175
205
 
176
206
  def get_response(
177
207
  self,
@@ -195,6 +225,56 @@ class LLM:
195
225
  **kwargs
196
226
  ) -> str:
197
227
  """Enhanced get_response with all OpenAI-like features"""
228
+ logging.info(f"Getting response from {self.model}")
229
+ # Log all self values when in debug mode
230
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
231
+ debug_info = {
232
+ "model": self.model,
233
+ "timeout": self.timeout,
234
+ "temperature": self.temperature,
235
+ "top_p": self.top_p,
236
+ "n": self.n,
237
+ "max_tokens": self.max_tokens,
238
+ "presence_penalty": self.presence_penalty,
239
+ "frequency_penalty": self.frequency_penalty,
240
+ "logit_bias": self.logit_bias,
241
+ "response_format": self.response_format,
242
+ "seed": self.seed,
243
+ "logprobs": self.logprobs,
244
+ "top_logprobs": self.top_logprobs,
245
+ "api_version": self.api_version,
246
+ "stop_phrases": self.stop_phrases,
247
+ "api_key": "***" if self.api_key else None, # Mask API key for security
248
+ "base_url": self.base_url,
249
+ "verbose": self.verbose,
250
+ "markdown": self.markdown,
251
+ "self_reflect": self.self_reflect,
252
+ "max_reflect": self.max_reflect,
253
+ "min_reflect": self.min_reflect,
254
+ "reasoning_steps": self.reasoning_steps
255
+ }
256
+ logging.debug(f"LLM instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
257
+
258
+ # Log the parameter values passed to get_response
259
+ param_info = {
260
+ "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
261
+ "system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
262
+ "chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
263
+ "temperature": temperature,
264
+ "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
265
+ "output_json": str(output_json.__class__.__name__) if output_json else None,
266
+ "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
267
+ "verbose": verbose,
268
+ "markdown": markdown,
269
+ "self_reflect": self_reflect,
270
+ "max_reflect": max_reflect,
271
+ "min_reflect": min_reflect,
272
+ "agent_name": agent_name,
273
+ "agent_role": agent_role,
274
+ "agent_tools": agent_tools,
275
+ "kwargs": str(kwargs)
276
+ }
277
+ logging.debug(f"get_response parameters: {json.dumps(param_info, indent=2, default=str)}")
198
278
  try:
199
279
  import litellm
200
280
  # This below **kwargs** is passed to .completion() directly. so reasoning_steps has to be popped. OR find alternate best way of handling this.
@@ -202,6 +282,23 @@ class LLM:
202
282
  # Disable litellm debug messages
203
283
  litellm.set_verbose = False
204
284
 
285
+ # Format tools if provided
286
+ formatted_tools = None
287
+ if tools:
288
+ formatted_tools = []
289
+ for tool in tools:
290
+ if callable(tool):
291
+ tool_def = self._generate_tool_definition(tool.__name__)
292
+ elif isinstance(tool, str):
293
+ tool_def = self._generate_tool_definition(tool)
294
+ else:
295
+ continue
296
+
297
+ if tool_def:
298
+ formatted_tools.append(tool_def)
299
+ if not formatted_tools:
300
+ formatted_tools = None
301
+
205
302
  # Build messages list
206
303
  messages = []
207
304
  if system_prompt:
@@ -260,6 +357,7 @@ class LLM:
260
357
  messages=messages,
261
358
  temperature=temperature,
262
359
  stream=False, # force non-streaming
360
+ tools=formatted_tools,
263
361
  **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
264
362
  )
265
363
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
@@ -291,6 +389,7 @@ class LLM:
291
389
  for chunk in litellm.completion(
292
390
  model=self.model,
293
391
  messages=messages,
392
+ tools=formatted_tools,
294
393
  temperature=temperature,
295
394
  stream=True,
296
395
  **kwargs
@@ -305,6 +404,7 @@ class LLM:
305
404
  for chunk in litellm.completion(
306
405
  model=self.model,
307
406
  messages=messages,
407
+ tools=formatted_tools,
308
408
  temperature=temperature,
309
409
  stream=True,
310
410
  **kwargs
@@ -318,6 +418,7 @@ class LLM:
318
418
  final_response = litellm.completion(
319
419
  model=self.model,
320
420
  messages=messages,
421
+ tools=formatted_tools,
321
422
  temperature=temperature,
322
423
  stream=False, # No streaming for tool call check
323
424
  **kwargs
@@ -337,20 +438,29 @@ class LLM:
337
438
  function_name = tool_call["function"]["name"]
338
439
  arguments = json.loads(tool_call["function"]["arguments"])
339
440
 
340
- if verbose:
341
- display_tool_call(f"Agent {agent_name} is calling function '{function_name}' with arguments: {arguments}", console=console)
342
-
441
+ logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
343
442
  tool_result = execute_tool_fn(function_name, arguments)
443
+ logging.debug(f"[TOOL_EXEC_DEBUG] Tool execution result: {tool_result}")
344
444
 
345
- if tool_result:
346
- if verbose:
347
- display_tool_call(f"Function '{function_name}' returned: {tool_result}", console=console)
445
+ if verbose:
446
+ display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
447
+ if tool_result:
448
+ display_message += f"Function returned: {tool_result}"
449
+ logging.debug(f"[TOOL_EXEC_DEBUG] Display message with result: {display_message}")
450
+ else:
451
+ display_message += "Function returned no output"
452
+ logging.debug("[TOOL_EXEC_DEBUG] Tool returned no output")
453
+
454
+ logging.debug(f"[TOOL_EXEC_DEBUG] About to display tool call with message: {display_message}")
455
+ display_tool_call(display_message, console=console)
456
+
348
457
  messages.append({
349
458
  "role": "tool",
350
459
  "tool_call_id": tool_call["id"],
351
460
  "content": json.dumps(tool_result)
352
461
  })
353
462
  else:
463
+ logging.debug("[TOOL_EXEC_DEBUG] Verbose mode off, not displaying tool call")
354
464
  messages.append({
355
465
  "role": "tool",
356
466
  "tool_call_id": tool_call["id"],
@@ -552,6 +662,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
552
662
  except Exception as error:
553
663
  display_error(f"Error in get_response: {str(error)}")
554
664
  raise
665
+
666
+ # Log completion time if in debug mode
667
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
668
+ total_time = time.time() - start_time
669
+ logging.debug(f"get_response completed in {total_time:.2f} seconds")
555
670
 
556
671
  async def get_response_async(
557
672
  self,
@@ -577,6 +692,56 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
577
692
  """Async version of get_response with identical functionality."""
578
693
  try:
579
694
  import litellm
695
+ logging.info(f"Getting async response from {self.model}")
696
+ # Log all self values when in debug mode
697
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
698
+ debug_info = {
699
+ "model": self.model,
700
+ "timeout": self.timeout,
701
+ "temperature": self.temperature,
702
+ "top_p": self.top_p,
703
+ "n": self.n,
704
+ "max_tokens": self.max_tokens,
705
+ "presence_penalty": self.presence_penalty,
706
+ "frequency_penalty": self.frequency_penalty,
707
+ "logit_bias": self.logit_bias,
708
+ "response_format": self.response_format,
709
+ "seed": self.seed,
710
+ "logprobs": self.logprobs,
711
+ "top_logprobs": self.top_logprobs,
712
+ "api_version": self.api_version,
713
+ "stop_phrases": self.stop_phrases,
714
+ "api_key": "***" if self.api_key else None, # Mask API key for security
715
+ "base_url": self.base_url,
716
+ "verbose": self.verbose,
717
+ "markdown": self.markdown,
718
+ "self_reflect": self.self_reflect,
719
+ "max_reflect": self.max_reflect,
720
+ "min_reflect": self.min_reflect,
721
+ "reasoning_steps": self.reasoning_steps
722
+ }
723
+ logging.debug(f"LLM async instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
724
+
725
+ # Log the parameter values passed to get_response_async
726
+ param_info = {
727
+ "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
728
+ "system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
729
+ "chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
730
+ "temperature": temperature,
731
+ "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
732
+ "output_json": str(output_json.__class__.__name__) if output_json else None,
733
+ "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
734
+ "verbose": verbose,
735
+ "markdown": markdown,
736
+ "self_reflect": self_reflect,
737
+ "max_reflect": max_reflect,
738
+ "min_reflect": min_reflect,
739
+ "agent_name": agent_name,
740
+ "agent_role": agent_role,
741
+ "agent_tools": agent_tools,
742
+ "kwargs": str(kwargs)
743
+ }
744
+ logging.debug(f"get_response_async parameters: {json.dumps(param_info, indent=2, default=str)}")
580
745
  reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
581
746
  litellm.set_verbose = False
582
747
 
@@ -767,14 +932,15 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
767
932
  function_name = tool_call.function.name
768
933
  arguments = json.loads(tool_call.function.arguments)
769
934
 
770
- if verbose:
771
- display_tool_call(f"Agent {agent_name} is calling function '{function_name}' with arguments: {arguments}", console=console)
772
-
773
935
  tool_result = await execute_tool_fn(function_name, arguments)
774
936
 
775
- if tool_result:
776
- if verbose:
777
- display_tool_call(f"Function '{function_name}' returned: {tool_result}", console=console)
937
+ if verbose:
938
+ display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
939
+ if tool_result:
940
+ display_message += f"Function returned: {tool_result}"
941
+ else:
942
+ display_message += "Function returned no output"
943
+ display_tool_call(display_message, console=console)
778
944
  messages.append({
779
945
  "role": "tool",
780
946
  "tool_call_id": tool_call.id,
@@ -983,6 +1149,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
983
1149
  raise LLMContextLengthExceededException(str(error))
984
1150
  display_error(f"Error in get_response_async: {str(error)}")
985
1151
  raise
1152
+
1153
+ # Log completion time if in debug mode
1154
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1155
+ total_time = time.time() - start_time
1156
+ logging.debug(f"get_response_async completed in {total_time:.2f} seconds")
986
1157
 
987
1158
  def can_use_tools(self) -> bool:
988
1159
  """Check if this model can use tool functions"""
@@ -1065,6 +1236,24 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1065
1236
 
1066
1237
  logger.debug("Using synchronous response function")
1067
1238
 
1239
+ # Log all self values when in debug mode
1240
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1241
+ debug_info = {
1242
+ "model": self.model,
1243
+ "timeout": self.timeout,
1244
+ "temperature": temperature,
1245
+ "top_p": self.top_p,
1246
+ "n": self.n,
1247
+ "max_tokens": self.max_tokens,
1248
+ "presence_penalty": self.presence_penalty,
1249
+ "frequency_penalty": self.frequency_penalty,
1250
+ "stream": stream,
1251
+ "verbose": verbose,
1252
+ "markdown": markdown,
1253
+ "kwargs": str(kwargs)
1254
+ }
1255
+ logger.debug(f"Response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
1256
+
1068
1257
  # Build messages list
1069
1258
  messages = []
1070
1259
  if system_prompt:
@@ -1150,6 +1339,24 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1150
1339
 
1151
1340
  logger.debug("Using asynchronous response function")
1152
1341
 
1342
+ # Log all self values when in debug mode
1343
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1344
+ debug_info = {
1345
+ "model": self.model,
1346
+ "timeout": self.timeout,
1347
+ "temperature": temperature,
1348
+ "top_p": self.top_p,
1349
+ "n": self.n,
1350
+ "max_tokens": self.max_tokens,
1351
+ "presence_penalty": self.presence_penalty,
1352
+ "frequency_penalty": self.frequency_penalty,
1353
+ "stream": stream,
1354
+ "verbose": verbose,
1355
+ "markdown": markdown,
1356
+ "kwargs": str(kwargs)
1357
+ }
1358
+ logger.debug(f"Async response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
1359
+
1153
1360
  # Build messages list
1154
1361
  messages = []
1155
1362
  if system_prompt:
@@ -1210,4 +1417,117 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1210
1417
 
1211
1418
  except Exception as error:
1212
1419
  display_error(f"Error in response_async: {str(error)}")
1213
- raise
1420
+ raise
1421
+
1422
+ def _generate_tool_definition(self, function_name: str) -> Optional[Dict]:
1423
+ """Generate a tool definition from a function name."""
1424
+ logging.debug(f"Attempting to generate tool definition for: {function_name}")
1425
+
1426
+ # First try to get the tool definition if it exists
1427
+ tool_def_name = f"{function_name}_definition"
1428
+ tool_def = globals().get(tool_def_name)
1429
+ logging.debug(f"Looking for {tool_def_name} in globals: {tool_def is not None}")
1430
+
1431
+ if not tool_def:
1432
+ import __main__
1433
+ tool_def = getattr(__main__, tool_def_name, None)
1434
+ logging.debug(f"Looking for {tool_def_name} in __main__: {tool_def is not None}")
1435
+
1436
+ if tool_def:
1437
+ logging.debug(f"Found tool definition: {tool_def}")
1438
+ return tool_def
1439
+
1440
+ # Try to find the function
1441
+ func = globals().get(function_name)
1442
+ logging.debug(f"Looking for {function_name} in globals: {func is not None}")
1443
+
1444
+ if not func:
1445
+ import __main__
1446
+ func = getattr(__main__, function_name, None)
1447
+ logging.debug(f"Looking for {function_name} in __main__: {func is not None}")
1448
+
1449
+ if not func or not callable(func):
1450
+ logging.debug(f"Function {function_name} not found or not callable")
1451
+ return None
1452
+
1453
+ import inspect
1454
+ # Handle Langchain and CrewAI tools
1455
+ if inspect.isclass(func) and hasattr(func, 'run') and not hasattr(func, '_run'):
1456
+ original_func = func
1457
+ func = func.run
1458
+ function_name = original_func.__name__
1459
+ elif inspect.isclass(func) and hasattr(func, '_run'):
1460
+ original_func = func
1461
+ func = func._run
1462
+ function_name = original_func.__name__
1463
+
1464
+ sig = inspect.signature(func)
1465
+ logging.debug(f"Function signature: {sig}")
1466
+
1467
+ # Skip self, *args, **kwargs
1468
+ parameters_list = []
1469
+ for name, param in sig.parameters.items():
1470
+ if name == "self":
1471
+ continue
1472
+ if param.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):
1473
+ continue
1474
+ parameters_list.append((name, param))
1475
+
1476
+ parameters = {
1477
+ "type": "object",
1478
+ "properties": {},
1479
+ "required": []
1480
+ }
1481
+
1482
+ # Parse docstring for parameter descriptions
1483
+ docstring = inspect.getdoc(func)
1484
+ logging.debug(f"Function docstring: {docstring}")
1485
+
1486
+ param_descriptions = {}
1487
+ if docstring:
1488
+ import re
1489
+ param_section = re.split(r'\s*Args:\s*', docstring)
1490
+ logging.debug(f"Param section split: {param_section}")
1491
+ if len(param_section) > 1:
1492
+ param_lines = param_section[1].split('\n')
1493
+ for line in param_lines:
1494
+ line = line.strip()
1495
+ if line and ':' in line:
1496
+ param_name, param_desc = line.split(':', 1)
1497
+ param_descriptions[param_name.strip()] = param_desc.strip()
1498
+
1499
+ logging.debug(f"Parameter descriptions: {param_descriptions}")
1500
+
1501
+ for name, param in parameters_list:
1502
+ param_type = "string" # Default type
1503
+ if param.annotation != inspect.Parameter.empty:
1504
+ if param.annotation == int:
1505
+ param_type = "integer"
1506
+ elif param.annotation == float:
1507
+ param_type = "number"
1508
+ elif param.annotation == bool:
1509
+ param_type = "boolean"
1510
+ elif param.annotation == list:
1511
+ param_type = "array"
1512
+ elif param.annotation == dict:
1513
+ param_type = "object"
1514
+
1515
+ parameters["properties"][name] = {
1516
+ "type": param_type,
1517
+ "description": param_descriptions.get(name, "Parameter description not available")
1518
+ }
1519
+
1520
+ if param.default == inspect.Parameter.empty:
1521
+ parameters["required"].append(name)
1522
+
1523
+ logging.debug(f"Generated parameters: {parameters}")
1524
+ tool_def = {
1525
+ "type": "function",
1526
+ "function": {
1527
+ "name": function_name,
1528
+ "description": docstring.split('\n\n')[0] if docstring else "No description available",
1529
+ "parameters": parameters
1530
+ }
1531
+ }
1532
+ logging.debug(f"Generated tool definition: {tool_def}")
1533
+ return tool_def
praisonaiagents/main.py CHANGED
@@ -86,6 +86,7 @@ async def execute_callback(display_type: str, **kwargs):
86
86
  def _clean_display_content(content: str, max_length: int = 20000) -> str:
87
87
  """Helper function to clean and truncate content for display."""
88
88
  if not content or not str(content).strip():
89
+ logging.debug(f"Empty content received in _clean_display_content: {repr(content)}")
89
90
  return ""
90
91
 
91
92
  content = str(content)
@@ -174,11 +175,14 @@ def display_instruction(message: str, console=None, agent_name: str = None, agen
174
175
  console.print(Panel.fit(Text(message, style="bold blue"), title="Instruction", border_style="cyan"))
175
176
 
176
177
  def display_tool_call(message: str, console=None):
178
+ logging.debug(f"display_tool_call called with message: {repr(message)}")
177
179
  if not message or not message.strip():
180
+ logging.debug("Empty message in display_tool_call, returning early")
178
181
  return
179
182
  if console is None:
180
183
  console = Console()
181
184
  message = _clean_display_content(str(message))
185
+ logging.debug(f"Cleaned message in display_tool_call: {repr(message)}")
182
186
 
183
187
  # Execute callback if registered
184
188
  if 'tool_call' in sync_display_callbacks:
@@ -202,7 +206,8 @@ def display_error(message: str, console=None):
202
206
 
203
207
  def display_generating(content: str = "", start_time: Optional[float] = None):
204
208
  if not content or not str(content).strip():
205
- return Panel("", title="", border_style="green")
209
+ logging.debug("Empty content in display_generating, returning early")
210
+ return None
206
211
 
207
212
  elapsed_str = ""
208
213
  if start_time is not None:
@@ -293,11 +298,14 @@ async def adisplay_instruction(message: str, console=None, agent_name: str = Non
293
298
 
294
299
  async def adisplay_tool_call(message: str, console=None):
295
300
  """Async version of display_tool_call."""
301
+ logging.debug(f"adisplay_tool_call called with message: {repr(message)}")
296
302
  if not message or not message.strip():
303
+ logging.debug("Empty message in adisplay_tool_call, returning early")
297
304
  return
298
305
  if console is None:
299
306
  console = Console()
300
307
  message = _clean_display_content(str(message))
308
+ logging.debug(f"Cleaned message in adisplay_tool_call: {repr(message)}")
301
309
 
302
310
  if 'tool_call' in async_display_callbacks:
303
311
  await async_display_callbacks['tool_call'](message=message)
@@ -321,7 +329,8 @@ async def adisplay_error(message: str, console=None):
321
329
  async def adisplay_generating(content: str = "", start_time: Optional[float] = None):
322
330
  """Async version of display_generating."""
323
331
  if not content or not str(content).strip():
324
- return Panel("", title="", border_style="green")
332
+ logging.debug("Empty content in adisplay_generating, returning early")
333
+ return None
325
334
 
326
335
  elapsed_str = ""
327
336
  if start_time is not None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.61
3
+ Version: 0.0.63
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,7 +1,7 @@
1
1
  praisonaiagents/__init__.py,sha256=frdIvimDY-kU9j-9yXV1z4NtXypfPvyvlnac5mgBCuQ,1288
2
- praisonaiagents/main.py,sha256=0kB9gn9meXtr4EIrdgA2lAioKIHCRJ61audsGDwuTm4,14428
2
+ praisonaiagents/main.py,sha256=l29nGEbV2ReBi4szURbnH0Fk0w2F_QZTmECysyZjYcA,15066
3
3
  praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
4
- praisonaiagents/agent/agent.py,sha256=_v8WrWK1oP4OpPgp30nH4xbPyREnjOnRT1cyHUa2T9Q,57582
4
+ praisonaiagents/agent/agent.py,sha256=h3s0-1M88zujllDHnKijHmYeVihD75d-K9s2Y3IHLY4,61850
5
5
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
6
6
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
7
7
  praisonaiagents/agents/agents.py,sha256=94YPQl-hl-EPY6-Xk2Rj9wlIs9YtiLQbsutSOXWX8QI,36156
@@ -10,7 +10,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
10
10
  praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
11
11
  praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
12
12
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
13
- praisonaiagents/llm/llm.py,sha256=SYfiMOmduEOhwraewmXSydu6tNBb9n5uKRxnO9moGYM,58151
13
+ praisonaiagents/llm/llm.py,sha256=hByGXJFHC4BPcsu2b1RZN239mfNp4sz5WuTJDallfaQ,73982
14
14
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
15
15
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
16
16
  praisonaiagents/process/process.py,sha256=HPw84OhnKQW3EyrDkpoQu0DcpxThbrzR2hWUgwQh9Pw,59955
@@ -37,7 +37,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
37
37
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
38
38
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
39
39
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
40
- praisonaiagents-0.0.61.dist-info/METADATA,sha256=I_883Gdgeer-wm8RVmBk-kAdkHQloL0ewZ96wqwW26c,830
41
- praisonaiagents-0.0.61.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
42
- praisonaiagents-0.0.61.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
43
- praisonaiagents-0.0.61.dist-info/RECORD,,
40
+ praisonaiagents-0.0.63.dist-info/METADATA,sha256=oZNwr62vIILdar4v3DrKgAuVnYEzvNNUH_o9Ndy7NMI,830
41
+ praisonaiagents-0.0.63.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
42
+ praisonaiagents-0.0.63.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
43
+ praisonaiagents-0.0.63.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.8.0)
2
+ Generator: setuptools (75.8.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5