praisonaiagents 0.0.60__py3-none-any.whl → 0.0.62__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,6 +3,7 @@ Praison AI Agents - A package for hierarchical AI agent task execution
3
3
  """
4
4
 
5
5
  from .agent.agent import Agent
6
+ from .agent.image_agent import ImageAgent
6
7
  from .agents.agents import PraisonAIAgents
7
8
  from .task.task import Task
8
9
  from .tools.tools import Tools
@@ -30,6 +31,7 @@ Agents = PraisonAIAgents
30
31
 
31
32
  __all__ = [
32
33
  'Agent',
34
+ 'ImageAgent',
33
35
  'PraisonAIAgents',
34
36
  'Agents',
35
37
  'Tools',
@@ -1,4 +1,5 @@
1
1
  """Agent module for AI agents"""
2
2
  from .agent import Agent
3
+ from .image_agent import ImageAgent
3
4
 
4
- __all__ = ['Agent']
5
+ __all__ = ['Agent', 'ImageAgent']
@@ -16,7 +16,6 @@ from ..main import (
16
16
  display_self_reflection,
17
17
  ReflectionOutput,
18
18
  client,
19
- error_logs,
20
19
  adisplay_instruction
21
20
  )
22
21
  import inspect
@@ -715,6 +714,22 @@ Your Goal: {self.goal}
715
714
  return None
716
715
 
717
716
  def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
717
+ # Log all parameter values when in debug mode
718
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
719
+ param_info = {
720
+ "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
721
+ "temperature": temperature,
722
+ "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
723
+ "output_json": str(output_json.__class__.__name__) if output_json else None,
724
+ "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
725
+ "reasoning_steps": reasoning_steps,
726
+ "agent_name": self.name,
727
+ "agent_role": self.role,
728
+ "agent_goal": self.goal
729
+ }
730
+ logging.debug(f"Agent.chat parameters: {json.dumps(param_info, indent=2, default=str)}")
731
+
732
+ start_time = time.time()
718
733
  reasoning_steps = reasoning_steps or self.reasoning_steps
719
734
  # Search for existing knowledge if any knowledge is provided
720
735
  if self.knowledge:
@@ -739,7 +754,7 @@ Your Goal: {self.goal}
739
754
  system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
740
755
  chat_history=self.chat_history,
741
756
  temperature=temperature,
742
- tools=tools,
757
+ tools=self.tools if tools is None else tools,
743
758
  output_json=output_json,
744
759
  output_pydantic=output_pydantic,
745
760
  verbose=self.verbose,
@@ -750,7 +765,7 @@ Your Goal: {self.goal}
750
765
  console=self.console,
751
766
  agent_name=self.name,
752
767
  agent_role=self.role,
753
- agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
768
+ agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
754
769
  execute_tool_fn=self.execute_tool, # Pass tool execution function
755
770
  reasoning_steps=reasoning_steps
756
771
  )
@@ -758,6 +773,11 @@ Your Goal: {self.goal}
758
773
  self.chat_history.append({"role": "user", "content": prompt})
759
774
  self.chat_history.append({"role": "assistant", "content": response_text})
760
775
 
776
+ # Log completion time if in debug mode
777
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
778
+ total_time = time.time() - start_time
779
+ logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
780
+
761
781
  return response_text
762
782
  except Exception as e:
763
783
  display_error(f"Error in LLM chat: {e}")
@@ -945,6 +965,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
945
965
  display_error(f"Error in chat: {e}", console=self.console)
946
966
  return None
947
967
 
968
+ # Log completion time if in debug mode
969
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
970
+ total_time = time.time() - start_time
971
+ logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
972
+
973
+ return response_text
974
+
948
975
  def clean_json_output(self, output: str) -> str:
949
976
  """Clean and extract JSON from response text."""
950
977
  cleaned = output.strip()
@@ -959,6 +986,22 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
959
986
 
960
987
  async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
961
988
  """Async version of chat method. TODO: Requires Syncing with chat method."""
989
+ # Log all parameter values when in debug mode
990
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
991
+ param_info = {
992
+ "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
993
+ "temperature": temperature,
994
+ "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
995
+ "output_json": str(output_json.__class__.__name__) if output_json else None,
996
+ "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
997
+ "reasoning_steps": reasoning_steps,
998
+ "agent_name": self.name,
999
+ "agent_role": self.role,
1000
+ "agent_goal": self.goal
1001
+ }
1002
+ logging.debug(f"Agent.achat parameters: {json.dumps(param_info, indent=2, default=str)}")
1003
+
1004
+ start_time = time.time()
962
1005
  reasoning_steps = reasoning_steps or self.reasoning_steps
963
1006
  try:
964
1007
  # Search for existing knowledge if any knowledge is provided
@@ -997,9 +1040,15 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
997
1040
  self.chat_history.append({"role": "user", "content": prompt})
998
1041
  self.chat_history.append({"role": "assistant", "content": response_text})
999
1042
 
1043
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1044
+ total_time = time.time() - start_time
1045
+ logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1000
1046
  return response_text
1001
1047
  except Exception as e:
1002
1048
  display_error(f"Error in LLM chat: {e}")
1049
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1050
+ total_time = time.time() - start_time
1051
+ logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
1003
1052
  return None
1004
1053
 
1005
1054
  # For OpenAI client
@@ -1082,7 +1131,11 @@ Your Goal: {self.goal}
1082
1131
  temperature=temperature,
1083
1132
  tools=formatted_tools
1084
1133
  )
1085
- return await self._achat_completion(response, tools)
1134
+ result = await self._achat_completion(response, tools)
1135
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1136
+ total_time = time.time() - start_time
1137
+ logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1138
+ return result
1086
1139
  elif output_json or output_pydantic:
1087
1140
  response = await async_client.chat.completions.create(
1088
1141
  model=self.llm,
@@ -1091,6 +1144,9 @@ Your Goal: {self.goal}
1091
1144
  response_format={"type": "json_object"}
1092
1145
  )
1093
1146
  # Return the raw response
1147
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1148
+ total_time = time.time() - start_time
1149
+ logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1094
1150
  return response.choices[0].message.content
1095
1151
  else:
1096
1152
  response = await async_client.chat.completions.create(
@@ -1098,12 +1154,21 @@ Your Goal: {self.goal}
1098
1154
  messages=messages,
1099
1155
  temperature=temperature
1100
1156
  )
1157
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1158
+ total_time = time.time() - start_time
1159
+ logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1101
1160
  return response.choices[0].message.content
1102
1161
  except Exception as e:
1103
1162
  display_error(f"Error in chat completion: {e}")
1163
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1164
+ total_time = time.time() - start_time
1165
+ logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
1104
1166
  return None
1105
1167
  except Exception as e:
1106
1168
  display_error(f"Error in achat: {e}")
1169
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1170
+ total_time = time.time() - start_time
1171
+ logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
1107
1172
  return None
1108
1173
 
1109
1174
  async def _achat_completion(self, response, tools, reasoning_steps=False):
@@ -0,0 +1,213 @@
1
+ """
2
+ ImageAgent - A specialized agent class for generating images using AI models.
3
+
4
+ This class extends the base Agent class to provide specific functionality for image generation,
5
+ including support for different image models, sizes, and quality settings.
6
+ """
7
+
8
+ from typing import Optional, Any, Dict, Union, List
9
+ from ..agent.agent import Agent
10
+ from pydantic import BaseModel, Field
11
+ import logging
12
+ import warnings
13
+
14
+ # Filter out Pydantic warning about fields
15
+ warnings.filterwarnings("ignore", "Valid config keys have changed in V2", UserWarning)
16
+
17
+ from rich.console import Console
18
+ from rich.panel import Panel
19
+ from rich.progress import Progress, SpinnerColumn, TextColumn
20
+
21
+ class ImageGenerationConfig(BaseModel):
22
+ """Configuration for image generation settings."""
23
+ style: str = Field(default="natural", description="Style of the generated image")
24
+ response_format: str = Field(default="url", description="Format of the response (url or b64_json)")
25
+ timeout: int = Field(default=600, description="Timeout in seconds for the API call")
26
+ api_base: Optional[str] = Field(default=None, description="Optional API base URL")
27
+ api_key: Optional[str] = Field(default=None, description="Optional API key")
28
+ api_version: Optional[str] = Field(default=None, description="Optional API version (required for Azure dall-e-3)")
29
+
30
+ class ImageAgent(Agent):
31
+ """
32
+ A specialized agent for generating images using AI models.
33
+
34
+ This agent extends the base Agent class with specific functionality for image generation,
35
+ including support for different models, sizes, and quality settings.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ name: Optional[str] = None,
41
+ role: Optional[str] = None,
42
+ goal: Optional[str] = None,
43
+ backstory: Optional[str] = None,
44
+ instructions: Optional[str] = None,
45
+ llm: Optional[Union[str, Any]] = None,
46
+ style: str = "natural",
47
+ response_format: str = "url",
48
+ timeout: int = 600,
49
+ api_base: Optional[str] = None,
50
+ api_key: Optional[str] = None,
51
+ api_version: Optional[str] = None,
52
+ verbose: Union[bool, int] = True,
53
+ **kwargs
54
+ ):
55
+ """Initialize ImageAgent with parameters."""
56
+ # Set default role and goal if not provided
57
+ role = role or "Image Generation Assistant"
58
+ goal = goal or "Generate high-quality images based on text descriptions"
59
+ backstory = backstory or "I am an AI assistant specialized in generating images from textual descriptions"
60
+
61
+ # Initialize the base agent
62
+ super().__init__(
63
+ name=name,
64
+ role=role,
65
+ goal=goal,
66
+ backstory=backstory,
67
+ instructions=instructions,
68
+ llm=llm,
69
+ verbose=verbose,
70
+ **kwargs
71
+ )
72
+
73
+ # Store image generation configuration
74
+ self.image_config = ImageGenerationConfig(
75
+ style=style,
76
+ response_format=response_format,
77
+ timeout=timeout,
78
+ api_base=api_base,
79
+ api_key=api_key,
80
+ api_version=api_version
81
+ )
82
+
83
+ # Lazy load litellm
84
+ self._litellm = None
85
+
86
+ # Configure logging based on verbose level
87
+ self._configure_logging(verbose)
88
+
89
+ def _configure_logging(self, verbose: Union[bool, int]) -> None:
90
+ """Configure logging levels based on verbose setting."""
91
+ # Only suppress logs if not in debug mode
92
+ if not isinstance(verbose, bool) and verbose >= 10:
93
+ # Enable detailed debug logging
94
+ logging.getLogger("asyncio").setLevel(logging.DEBUG)
95
+ logging.getLogger("selector_events").setLevel(logging.DEBUG)
96
+ logging.getLogger("litellm.utils").setLevel(logging.DEBUG)
97
+ logging.getLogger("litellm.main").setLevel(logging.DEBUG)
98
+ if hasattr(self, 'litellm'):
99
+ self.litellm.suppress_debug_messages = False
100
+ self.litellm.set_verbose = True
101
+ # Don't filter warnings in debug mode
102
+ warnings.resetwarnings()
103
+ else:
104
+ # Suppress debug logging for normal operation
105
+ logging.getLogger("asyncio").setLevel(logging.WARNING)
106
+ logging.getLogger("selector_events").setLevel(logging.WARNING)
107
+ logging.getLogger("litellm.utils").setLevel(logging.WARNING)
108
+ logging.getLogger("litellm.main").setLevel(logging.WARNING)
109
+ logging.getLogger("httpx").setLevel(logging.WARNING)
110
+ logging.getLogger("httpcore").setLevel(logging.WARNING)
111
+ if hasattr(self, 'litellm'):
112
+ self.litellm.suppress_debug_messages = True
113
+ self.litellm._logging._disable_debugging()
114
+ # Suppress all warnings including Pydantic's
115
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
116
+ warnings.filterwarnings("ignore", category=UserWarning)
117
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
118
+
119
+ @property
120
+ def litellm(self):
121
+ """Lazy load litellm module when needed."""
122
+ if self._litellm is None:
123
+ try:
124
+ import litellm
125
+ from litellm import image_generation
126
+
127
+ # Configure litellm to disable success handler logs
128
+ litellm.success_callback = []
129
+ litellm._logging._disable_debugging()
130
+
131
+ self._litellm = image_generation
132
+ # Configure logging after litellm is loaded
133
+ self._configure_logging(self.verbose)
134
+ except ImportError:
135
+ raise ImportError(
136
+ "litellm is required for image generation. "
137
+ "Please install it with: pip install litellm"
138
+ )
139
+ return self._litellm
140
+
141
+ def generate_image(self, prompt: str, **kwargs) -> Dict[str, Any]:
142
+ """Generate an image based on the provided prompt."""
143
+ # Merge default config with any provided kwargs
144
+ config = self.image_config.dict(exclude_none=True)
145
+ config.update(kwargs)
146
+
147
+ # Use llm parameter as the model
148
+ config['model'] = self.llm
149
+
150
+ with Progress(
151
+ SpinnerColumn(),
152
+ TextColumn("[progress.description]{task.description}"),
153
+ transient=True
154
+ ) as progress:
155
+ try:
156
+ # Add a task for image generation
157
+ task = progress.add_task(f"[cyan]Generating image with {self.llm}...", total=None)
158
+
159
+ # Use litellm's image generation
160
+ response = self.litellm(
161
+ prompt=prompt,
162
+ **config
163
+ )
164
+
165
+ # Mark task as complete
166
+ progress.update(task, completed=True)
167
+ return response
168
+
169
+ except Exception as e:
170
+ error_msg = f"Error generating image: {str(e)}"
171
+ if self.verbose:
172
+ self.console.print(f"[red]{error_msg}[/red]")
173
+ logging.error(error_msg)
174
+ raise
175
+
176
+ async def agenerate_image(self, prompt: str, **kwargs) -> Dict[str, Any]:
177
+ """Async wrapper for generate_image."""
178
+ return self.generate_image(prompt, **kwargs)
179
+
180
+ def chat(self, prompt: str, **kwargs) -> Dict[str, Any]:
181
+ """Generate an image from the prompt."""
182
+ try:
183
+ result = self.generate_image(prompt, **kwargs)
184
+ if self.verbose:
185
+ self.console.print(f"[green]Successfully generated image from prompt[/green]")
186
+ return result
187
+ except Exception as e:
188
+ error_msg = f"Failed to generate image: {str(e)}"
189
+ if self.verbose:
190
+ self.console.print(f"[red]{error_msg}[/red]")
191
+ return {"error": str(e)}
192
+
193
+ async def achat(
194
+ self,
195
+ prompt: str,
196
+ temperature: float = 0.2,
197
+ tools: Optional[List[Any]] = None,
198
+ output_json: Optional[str] = None,
199
+ output_pydantic: Optional[Any] = None,
200
+ reasoning_steps: bool = False,
201
+ **kwargs
202
+ ) -> Union[str, Dict[str, Any]]:
203
+ """Async chat method for image generation."""
204
+ try:
205
+ image_result = await self.agenerate_image(prompt, **kwargs)
206
+ if self.verbose:
207
+ self.console.print(f"[green]Successfully generated image from prompt[/green]")
208
+ return image_result
209
+ except Exception as e:
210
+ error_msg = f"Failed to generate image: {str(e)}"
211
+ if self.verbose:
212
+ self.console.print(f"[red]{error_msg}[/red]")
213
+ return {"error": str(e)}
@@ -8,10 +8,10 @@ It automatically handles agent creation, task setup, and execution flow.
8
8
  from .agents import PraisonAIAgents
9
9
  from ..agent.agent import Agent
10
10
  from ..task.task import Task
11
- from typing import List, Any, Optional, Dict, Union
11
+ from typing import List, Any, Optional, Dict
12
12
  import logging
13
13
  import os
14
- from pydantic import BaseModel, Field
14
+ from pydantic import BaseModel
15
15
  from ..main import display_instruction, display_tool_call, display_interaction, client
16
16
 
17
17
  # Define Pydantic models for structured output
@@ -172,6 +172,36 @@ class LLM:
172
172
  # Enable error dropping for cleaner output
173
173
  litellm.drop_params = True
174
174
  self._setup_event_tracking(events)
175
+
176
+ # Log all initialization parameters when in debug mode
177
+ if not isinstance(verbose, bool) and verbose >= 10:
178
+ debug_info = {
179
+ "model": self.model,
180
+ "timeout": self.timeout,
181
+ "temperature": self.temperature,
182
+ "top_p": self.top_p,
183
+ "n": self.n,
184
+ "max_tokens": self.max_tokens,
185
+ "presence_penalty": self.presence_penalty,
186
+ "frequency_penalty": self.frequency_penalty,
187
+ "logit_bias": self.logit_bias,
188
+ "response_format": self.response_format,
189
+ "seed": self.seed,
190
+ "logprobs": self.logprobs,
191
+ "top_logprobs": self.top_logprobs,
192
+ "api_version": self.api_version,
193
+ "stop_phrases": self.stop_phrases,
194
+ "api_key": "***" if self.api_key else None, # Mask API key for security
195
+ "base_url": self.base_url,
196
+ "verbose": self.verbose,
197
+ "markdown": self.markdown,
198
+ "self_reflect": self.self_reflect,
199
+ "max_reflect": self.max_reflect,
200
+ "min_reflect": self.min_reflect,
201
+ "reasoning_steps": self.reasoning_steps,
202
+ "extra_settings": {k: v for k, v in self.extra_settings.items() if k not in ["api_key"]}
203
+ }
204
+ logging.debug(f"LLM instance initialized with: {json.dumps(debug_info, indent=2, default=str)}")
175
205
 
176
206
  def get_response(
177
207
  self,
@@ -195,6 +225,56 @@ class LLM:
195
225
  **kwargs
196
226
  ) -> str:
197
227
  """Enhanced get_response with all OpenAI-like features"""
228
+ logging.info(f"Getting response from {self.model}")
229
+ # Log all self values when in debug mode
230
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
231
+ debug_info = {
232
+ "model": self.model,
233
+ "timeout": self.timeout,
234
+ "temperature": self.temperature,
235
+ "top_p": self.top_p,
236
+ "n": self.n,
237
+ "max_tokens": self.max_tokens,
238
+ "presence_penalty": self.presence_penalty,
239
+ "frequency_penalty": self.frequency_penalty,
240
+ "logit_bias": self.logit_bias,
241
+ "response_format": self.response_format,
242
+ "seed": self.seed,
243
+ "logprobs": self.logprobs,
244
+ "top_logprobs": self.top_logprobs,
245
+ "api_version": self.api_version,
246
+ "stop_phrases": self.stop_phrases,
247
+ "api_key": "***" if self.api_key else None, # Mask API key for security
248
+ "base_url": self.base_url,
249
+ "verbose": self.verbose,
250
+ "markdown": self.markdown,
251
+ "self_reflect": self.self_reflect,
252
+ "max_reflect": self.max_reflect,
253
+ "min_reflect": self.min_reflect,
254
+ "reasoning_steps": self.reasoning_steps
255
+ }
256
+ logging.debug(f"LLM instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
257
+
258
+ # Log the parameter values passed to get_response
259
+ param_info = {
260
+ "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
261
+ "system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
262
+ "chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
263
+ "temperature": temperature,
264
+ "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
265
+ "output_json": str(output_json.__class__.__name__) if output_json else None,
266
+ "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
267
+ "verbose": verbose,
268
+ "markdown": markdown,
269
+ "self_reflect": self_reflect,
270
+ "max_reflect": max_reflect,
271
+ "min_reflect": min_reflect,
272
+ "agent_name": agent_name,
273
+ "agent_role": agent_role,
274
+ "agent_tools": agent_tools,
275
+ "kwargs": str(kwargs)
276
+ }
277
+ logging.debug(f"get_response parameters: {json.dumps(param_info, indent=2, default=str)}")
198
278
  try:
199
279
  import litellm
200
280
  # This below **kwargs** is passed to .completion() directly. so reasoning_steps has to be popped. OR find alternate best way of handling this.
@@ -202,6 +282,23 @@ class LLM:
202
282
  # Disable litellm debug messages
203
283
  litellm.set_verbose = False
204
284
 
285
+ # Format tools if provided
286
+ formatted_tools = None
287
+ if tools:
288
+ formatted_tools = []
289
+ for tool in tools:
290
+ if callable(tool):
291
+ tool_def = self._generate_tool_definition(tool.__name__)
292
+ elif isinstance(tool, str):
293
+ tool_def = self._generate_tool_definition(tool)
294
+ else:
295
+ continue
296
+
297
+ if tool_def:
298
+ formatted_tools.append(tool_def)
299
+ if not formatted_tools:
300
+ formatted_tools = None
301
+
205
302
  # Build messages list
206
303
  messages = []
207
304
  if system_prompt:
@@ -260,6 +357,7 @@ class LLM:
260
357
  messages=messages,
261
358
  temperature=temperature,
262
359
  stream=False, # force non-streaming
360
+ tools=formatted_tools,
263
361
  **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
264
362
  )
265
363
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
@@ -291,6 +389,7 @@ class LLM:
291
389
  for chunk in litellm.completion(
292
390
  model=self.model,
293
391
  messages=messages,
392
+ tools=formatted_tools,
294
393
  temperature=temperature,
295
394
  stream=True,
296
395
  **kwargs
@@ -305,6 +404,7 @@ class LLM:
305
404
  for chunk in litellm.completion(
306
405
  model=self.model,
307
406
  messages=messages,
407
+ tools=formatted_tools,
308
408
  temperature=temperature,
309
409
  stream=True,
310
410
  **kwargs
@@ -318,6 +418,7 @@ class LLM:
318
418
  final_response = litellm.completion(
319
419
  model=self.model,
320
420
  messages=messages,
421
+ tools=formatted_tools,
321
422
  temperature=temperature,
322
423
  stream=False, # No streaming for tool call check
323
424
  **kwargs
@@ -552,6 +653,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
552
653
  except Exception as error:
553
654
  display_error(f"Error in get_response: {str(error)}")
554
655
  raise
656
+
657
+ # Log completion time if in debug mode
658
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
659
+ total_time = time.time() - start_time
660
+ logging.debug(f"get_response completed in {total_time:.2f} seconds")
555
661
 
556
662
  async def get_response_async(
557
663
  self,
@@ -577,6 +683,56 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
577
683
  """Async version of get_response with identical functionality."""
578
684
  try:
579
685
  import litellm
686
+ logging.info(f"Getting async response from {self.model}")
687
+ # Log all self values when in debug mode
688
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
689
+ debug_info = {
690
+ "model": self.model,
691
+ "timeout": self.timeout,
692
+ "temperature": self.temperature,
693
+ "top_p": self.top_p,
694
+ "n": self.n,
695
+ "max_tokens": self.max_tokens,
696
+ "presence_penalty": self.presence_penalty,
697
+ "frequency_penalty": self.frequency_penalty,
698
+ "logit_bias": self.logit_bias,
699
+ "response_format": self.response_format,
700
+ "seed": self.seed,
701
+ "logprobs": self.logprobs,
702
+ "top_logprobs": self.top_logprobs,
703
+ "api_version": self.api_version,
704
+ "stop_phrases": self.stop_phrases,
705
+ "api_key": "***" if self.api_key else None, # Mask API key for security
706
+ "base_url": self.base_url,
707
+ "verbose": self.verbose,
708
+ "markdown": self.markdown,
709
+ "self_reflect": self.self_reflect,
710
+ "max_reflect": self.max_reflect,
711
+ "min_reflect": self.min_reflect,
712
+ "reasoning_steps": self.reasoning_steps
713
+ }
714
+ logging.debug(f"LLM async instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
715
+
716
+ # Log the parameter values passed to get_response_async
717
+ param_info = {
718
+ "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
719
+ "system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
720
+ "chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
721
+ "temperature": temperature,
722
+ "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
723
+ "output_json": str(output_json.__class__.__name__) if output_json else None,
724
+ "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
725
+ "verbose": verbose,
726
+ "markdown": markdown,
727
+ "self_reflect": self_reflect,
728
+ "max_reflect": max_reflect,
729
+ "min_reflect": min_reflect,
730
+ "agent_name": agent_name,
731
+ "agent_role": agent_role,
732
+ "agent_tools": agent_tools,
733
+ "kwargs": str(kwargs)
734
+ }
735
+ logging.debug(f"get_response_async parameters: {json.dumps(param_info, indent=2, default=str)}")
580
736
  reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
581
737
  litellm.set_verbose = False
582
738
 
@@ -983,6 +1139,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
983
1139
  raise LLMContextLengthExceededException(str(error))
984
1140
  display_error(f"Error in get_response_async: {str(error)}")
985
1141
  raise
1142
+
1143
+ # Log completion time if in debug mode
1144
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1145
+ total_time = time.time() - start_time
1146
+ logging.debug(f"get_response_async completed in {total_time:.2f} seconds")
986
1147
 
987
1148
  def can_use_tools(self) -> bool:
988
1149
  """Check if this model can use tool functions"""
@@ -1065,6 +1226,24 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1065
1226
 
1066
1227
  logger.debug("Using synchronous response function")
1067
1228
 
1229
+ # Log all self values when in debug mode
1230
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1231
+ debug_info = {
1232
+ "model": self.model,
1233
+ "timeout": self.timeout,
1234
+ "temperature": temperature,
1235
+ "top_p": self.top_p,
1236
+ "n": self.n,
1237
+ "max_tokens": self.max_tokens,
1238
+ "presence_penalty": self.presence_penalty,
1239
+ "frequency_penalty": self.frequency_penalty,
1240
+ "stream": stream,
1241
+ "verbose": verbose,
1242
+ "markdown": markdown,
1243
+ "kwargs": str(kwargs)
1244
+ }
1245
+ logger.debug(f"Response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
1246
+
1068
1247
  # Build messages list
1069
1248
  messages = []
1070
1249
  if system_prompt:
@@ -1128,7 +1307,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1128
1307
  raise
1129
1308
 
1130
1309
  # Async version of response function. Response without tool calls
1131
- async def response_async(
1310
+ async def aresponse(
1132
1311
  self,
1133
1312
  prompt: Union[str, List[Dict]],
1134
1313
  system_prompt: Optional[str] = None,
@@ -1150,6 +1329,24 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1150
1329
 
1151
1330
  logger.debug("Using asynchronous response function")
1152
1331
 
1332
+ # Log all self values when in debug mode
1333
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1334
+ debug_info = {
1335
+ "model": self.model,
1336
+ "timeout": self.timeout,
1337
+ "temperature": temperature,
1338
+ "top_p": self.top_p,
1339
+ "n": self.n,
1340
+ "max_tokens": self.max_tokens,
1341
+ "presence_penalty": self.presence_penalty,
1342
+ "frequency_penalty": self.frequency_penalty,
1343
+ "stream": stream,
1344
+ "verbose": verbose,
1345
+ "markdown": markdown,
1346
+ "kwargs": str(kwargs)
1347
+ }
1348
+ logger.debug(f"Async response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
1349
+
1153
1350
  # Build messages list
1154
1351
  messages = []
1155
1352
  if system_prompt:
@@ -1210,4 +1407,117 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1210
1407
 
1211
1408
  except Exception as error:
1212
1409
  display_error(f"Error in response_async: {str(error)}")
1213
- raise
1410
+ raise
1411
+
1412
+ def _generate_tool_definition(self, function_name: str) -> Optional[Dict]:
1413
+ """Generate a tool definition from a function name."""
1414
+ logging.debug(f"Attempting to generate tool definition for: {function_name}")
1415
+
1416
+ # First try to get the tool definition if it exists
1417
+ tool_def_name = f"{function_name}_definition"
1418
+ tool_def = globals().get(tool_def_name)
1419
+ logging.debug(f"Looking for {tool_def_name} in globals: {tool_def is not None}")
1420
+
1421
+ if not tool_def:
1422
+ import __main__
1423
+ tool_def = getattr(__main__, tool_def_name, None)
1424
+ logging.debug(f"Looking for {tool_def_name} in __main__: {tool_def is not None}")
1425
+
1426
+ if tool_def:
1427
+ logging.debug(f"Found tool definition: {tool_def}")
1428
+ return tool_def
1429
+
1430
+ # Try to find the function
1431
+ func = globals().get(function_name)
1432
+ logging.debug(f"Looking for {function_name} in globals: {func is not None}")
1433
+
1434
+ if not func:
1435
+ import __main__
1436
+ func = getattr(__main__, function_name, None)
1437
+ logging.debug(f"Looking for {function_name} in __main__: {func is not None}")
1438
+
1439
+ if not func or not callable(func):
1440
+ logging.debug(f"Function {function_name} not found or not callable")
1441
+ return None
1442
+
1443
+ import inspect
1444
+ # Handle Langchain and CrewAI tools
1445
+ if inspect.isclass(func) and hasattr(func, 'run') and not hasattr(func, '_run'):
1446
+ original_func = func
1447
+ func = func.run
1448
+ function_name = original_func.__name__
1449
+ elif inspect.isclass(func) and hasattr(func, '_run'):
1450
+ original_func = func
1451
+ func = func._run
1452
+ function_name = original_func.__name__
1453
+
1454
+ sig = inspect.signature(func)
1455
+ logging.debug(f"Function signature: {sig}")
1456
+
1457
+ # Skip self, *args, **kwargs
1458
+ parameters_list = []
1459
+ for name, param in sig.parameters.items():
1460
+ if name == "self":
1461
+ continue
1462
+ if param.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):
1463
+ continue
1464
+ parameters_list.append((name, param))
1465
+
1466
+ parameters = {
1467
+ "type": "object",
1468
+ "properties": {},
1469
+ "required": []
1470
+ }
1471
+
1472
+ # Parse docstring for parameter descriptions
1473
+ docstring = inspect.getdoc(func)
1474
+ logging.debug(f"Function docstring: {docstring}")
1475
+
1476
+ param_descriptions = {}
1477
+ if docstring:
1478
+ import re
1479
+ param_section = re.split(r'\s*Args:\s*', docstring)
1480
+ logging.debug(f"Param section split: {param_section}")
1481
+ if len(param_section) > 1:
1482
+ param_lines = param_section[1].split('\n')
1483
+ for line in param_lines:
1484
+ line = line.strip()
1485
+ if line and ':' in line:
1486
+ param_name, param_desc = line.split(':', 1)
1487
+ param_descriptions[param_name.strip()] = param_desc.strip()
1488
+
1489
+ logging.debug(f"Parameter descriptions: {param_descriptions}")
1490
+
1491
+ for name, param in parameters_list:
1492
+ param_type = "string" # Default type
1493
+ if param.annotation != inspect.Parameter.empty:
1494
+ if param.annotation == int:
1495
+ param_type = "integer"
1496
+ elif param.annotation == float:
1497
+ param_type = "number"
1498
+ elif param.annotation == bool:
1499
+ param_type = "boolean"
1500
+ elif param.annotation == list:
1501
+ param_type = "array"
1502
+ elif param.annotation == dict:
1503
+ param_type = "object"
1504
+
1505
+ parameters["properties"][name] = {
1506
+ "type": param_type,
1507
+ "description": param_descriptions.get(name, "Parameter description not available")
1508
+ }
1509
+
1510
+ if param.default == inspect.Parameter.empty:
1511
+ parameters["required"].append(name)
1512
+
1513
+ logging.debug(f"Generated parameters: {parameters}")
1514
+ tool_def = {
1515
+ "type": "function",
1516
+ "function": {
1517
+ "name": function_name,
1518
+ "description": docstring.split('\n\n')[0] if docstring else "No description available",
1519
+ "parameters": parameters
1520
+ }
1521
+ }
1522
+ logging.debug(f"Generated tool definition: {tool_def}")
1523
+ return tool_def
@@ -1,8 +1,6 @@
1
1
  from typing import Dict, Optional, Union, Any
2
2
  import json
3
3
  from datetime import datetime
4
- from openai import OpenAI
5
- from pydantic import BaseModel
6
4
  import os
7
5
  import logging
8
6
 
@@ -124,7 +122,7 @@ class GenerateCOT:
124
122
  try:
125
123
  score = float(self._ask_ai(prompt))
126
124
  return min(max(score, 0), 1)
127
- except:
125
+ except ValueError:
128
126
  return 0.0
129
127
 
130
128
  def cot_run(self, question: str) -> str:
@@ -336,6 +334,7 @@ class GenerateCOT:
336
334
  ) -> Optional[str]:
337
335
  """Export solutions in CSV format."""
338
336
  try:
337
+ import csv
339
338
  self._is_qa_pairs(qa_pairs) # Validate format
340
339
  if qa_pairs:
341
340
  self.qa_pairs.update(qa_pairs)
@@ -425,7 +424,6 @@ class GenerateCOT:
425
424
  logger.debug(f"Attempting to upload {filepath} to HuggingFace as {dataset_name}")
426
425
  try:
427
426
  from datasets import Dataset
428
- from huggingface_hub import HfApi, login
429
427
  import pandas as pd
430
428
 
431
429
  logger.debug(f"Loading data from {filepath}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.60
3
+ Version: 0.0.62
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,15 +1,16 @@
1
- praisonaiagents/__init__.py,sha256=JtPibbmeFv3meIb3vkKjckB0p7m-Vqt2RYPwOH8P41k,1228
1
+ praisonaiagents/__init__.py,sha256=frdIvimDY-kU9j-9yXV1z4NtXypfPvyvlnac5mgBCuQ,1288
2
2
  praisonaiagents/main.py,sha256=0kB9gn9meXtr4EIrdgA2lAioKIHCRJ61audsGDwuTm4,14428
3
- praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=VZPci7g_LAvTz7_rWvWAZ1JgCGKQc1vSfI6x0fqi2os,57598
3
+ praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
4
+ praisonaiagents/agent/agent.py,sha256=h3s0-1M88zujllDHnKijHmYeVihD75d-K9s2Y3IHLY4,61850
5
+ praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
5
6
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
6
7
  praisonaiagents/agents/agents.py,sha256=94YPQl-hl-EPY6-Xk2Rj9wlIs9YtiLQbsutSOXWX8QI,36156
7
- praisonaiagents/agents/autoagents.py,sha256=bjC2O5oZmoJItJXIMPTWc2lsp_AJC9tMiTQOal2hwPA,13532
8
+ praisonaiagents/agents/autoagents.py,sha256=olYDn--rlJp-SckxILqmREkkgNlzCgEEcAUzfMj-54E,13518
8
9
  praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
9
10
  praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
10
11
  praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
11
12
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
12
- praisonaiagents/llm/llm.py,sha256=t47T80kL2QEUSAGfNYHQG130d-CRUDxXi0AwStW0zkk,58156
13
+ praisonaiagents/llm/llm.py,sha256=pYXKXuvJgbqItO8MDmAZVYZwb5es1HDfn10refHz0Ck,73025
13
14
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
14
15
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
15
16
  praisonaiagents/process/process.py,sha256=HPw84OhnKQW3EyrDkpoQu0DcpxThbrzR2hWUgwQh9Pw,59955
@@ -35,8 +36,8 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
35
36
  praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
36
37
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
37
38
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
38
- praisonaiagents/tools/train/data/generatecot.py,sha256=EwrF6p-AWQQJktJeZu2O52ipbHGPd5y1IEmTLw-YSCs,19479
39
- praisonaiagents-0.0.60.dist-info/METADATA,sha256=I7mcUMdieRsRLzjdMO2Qkzt6dEY8Tm2X1XJIkydeIAg,830
40
- praisonaiagents-0.0.60.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
41
- praisonaiagents-0.0.60.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
42
- praisonaiagents-0.0.60.dist-info/RECORD,,
39
+ praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
40
+ praisonaiagents-0.0.62.dist-info/METADATA,sha256=w9bbiQEKBjIErkraz8jMhhN1qkeA4WBPf2ylGn9Skz4,830
41
+ praisonaiagents-0.0.62.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
42
+ praisonaiagents-0.0.62.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
43
+ praisonaiagents-0.0.62.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.8.0)
2
+ Generator: setuptools (75.8.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5