praisonaiagents 0.0.118__tar.gz → 0.0.120__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agent/agent.py +14 -5
  3. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agents/autoagents.py +65 -23
  4. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/llm/llm.py +256 -219
  5. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/PKG-INFO +1 -1
  6. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/pyproject.toml +1 -1
  7. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/README.md +0 -0
  8. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/__init__.py +0 -0
  9. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agent/__init__.py +0 -0
  10. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agent/handoff.py +0 -0
  11. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agent/image_agent.py +0 -0
  12. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agents/__init__.py +0 -0
  13. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agents/agents.py +0 -0
  14. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/approval.py +0 -0
  15. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/guardrails/__init__.py +0 -0
  16. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  17. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  18. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/knowledge/__init__.py +0 -0
  19. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/knowledge/chunking.py +0 -0
  20. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/knowledge/knowledge.py +0 -0
  21. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/llm/__init__.py +0 -0
  22. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/llm/openai_client.py +0 -0
  23. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/main.py +0 -0
  24. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/mcp/__init__.py +0 -0
  25. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/mcp/mcp.py +0 -0
  26. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/mcp/mcp_sse.py +0 -0
  27. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/memory/__init__.py +0 -0
  28. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/memory/memory.py +0 -0
  29. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/process/__init__.py +0 -0
  30. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/process/process.py +0 -0
  31. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/session.py +0 -0
  32. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/task/__init__.py +0 -0
  33. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/task/task.py +0 -0
  34. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/telemetry/__init__.py +0 -0
  35. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/telemetry/integration.py +0 -0
  36. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/telemetry/telemetry.py +0 -0
  37. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/README.md +0 -0
  38. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/__init__.py +0 -0
  39. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/arxiv_tools.py +0 -0
  40. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/calculator_tools.py +0 -0
  41. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/csv_tools.py +0 -0
  42. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/duckdb_tools.py +0 -0
  43. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  44. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/excel_tools.py +0 -0
  45. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/file_tools.py +0 -0
  46. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/json_tools.py +0 -0
  47. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/newspaper_tools.py +0 -0
  48. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/pandas_tools.py +0 -0
  49. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/python_tools.py +0 -0
  50. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/searxng_tools.py +0 -0
  51. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/shell_tools.py +0 -0
  52. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/spider_tools.py +0 -0
  53. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/test.py +0 -0
  54. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/tools.py +0 -0
  55. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  56. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  57. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/xml_tools.py +0 -0
  58. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/yaml_tools.py +0 -0
  59. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/yfinance_tools.py +0 -0
  60. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  61. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  62. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/requires.txt +0 -0
  63. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/top_level.txt +0 -0
  64. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/setup.cfg +0 -0
  65. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/tests/test-graph-memory.py +0 -0
  66. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/tests/test.py +0 -0
  67. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/tests/test_handoff_compatibility.py +0 -0
  68. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/tests/test_ollama_async_fix.py +0 -0
  69. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/tests/test_ollama_fix.py +0 -0
  70. {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/tests/test_posthog_fixed.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.118
3
+ Version: 0.0.120
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -1233,7 +1233,8 @@ Your Goal: {self.goal}
1233
1233
  # Add to chat history and return raw response
1234
1234
  self.chat_history.append({"role": "user", "content": original_prompt})
1235
1235
  self.chat_history.append({"role": "assistant", "content": response_text})
1236
- if self.verbose:
1236
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1237
+ if self.verbose and not self._using_custom_llm:
1237
1238
  display_interaction(original_prompt, response_text, markdown=self.markdown,
1238
1239
  generation_time=time.time() - start_time, console=self.console)
1239
1240
  return response_text
@@ -1243,7 +1244,9 @@ Your Goal: {self.goal}
1243
1244
  self.chat_history.append({"role": "assistant", "content": response_text})
1244
1245
  if self.verbose:
1245
1246
  logging.debug(f"Agent {self.name} final response: {response_text}")
1246
- display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1247
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1248
+ if self.verbose and not self._using_custom_llm:
1249
+ display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1247
1250
  # Return only reasoning content if reasoning_steps is True
1248
1251
  if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
1249
1252
  # Apply guardrail to reasoning content
@@ -1279,7 +1282,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1279
1282
  # Return the original response without reflection
1280
1283
  self.chat_history.append({"role": "user", "content": prompt})
1281
1284
  self.chat_history.append({"role": "assistant", "content": response_text})
1282
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1285
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1286
+ if self.verbose and not self._using_custom_llm:
1287
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1283
1288
  return response_text
1284
1289
 
1285
1290
  reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
@@ -1302,7 +1307,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1302
1307
  display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
1303
1308
  self.chat_history.append({"role": "user", "content": prompt})
1304
1309
  self.chat_history.append({"role": "assistant", "content": response_text})
1305
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1310
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1311
+ if self.verbose and not self._using_custom_llm:
1312
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1306
1313
  # Apply guardrail validation after satisfactory reflection
1307
1314
  try:
1308
1315
  validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
@@ -1317,7 +1324,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1317
1324
  display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
1318
1325
  self.chat_history.append({"role": "user", "content": prompt})
1319
1326
  self.chat_history.append({"role": "assistant", "content": response_text})
1320
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1327
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1328
+ if self.verbose and not self._using_custom_llm:
1329
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1321
1330
  # Apply guardrail validation after max reflections
1322
1331
  try:
1323
1332
  validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
@@ -13,7 +13,8 @@ import logging
13
13
  import os
14
14
  from pydantic import BaseModel, ConfigDict
15
15
  from ..main import display_instruction, display_tool_call, display_interaction
16
- from ..llm import get_openai_client
16
+ from ..llm import get_openai_client, LLM
17
+ import json
17
18
 
18
19
  # Define Pydantic models for structured output
19
20
  class TaskConfig(BaseModel):
@@ -238,33 +239,74 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
238
239
  """
239
240
 
240
241
  try:
241
- # Get OpenAI client
242
+ # Try to use OpenAI's structured output if available
243
+ use_openai_structured = False
244
+ client = None
245
+
242
246
  try:
243
- client = get_openai_client()
244
- except ValueError as e:
245
- # AutoAgents requires OpenAI for structured output generation
246
- raise ValueError(
247
- "AutoAgents requires OpenAI API for automatic agent generation. "
248
- "Please set OPENAI_API_KEY environment variable or use PraisonAIAgents class directly "
249
- "with manually configured agents for non-OpenAI providers."
250
- ) from e
247
+ # Check if we have OpenAI API and the model supports structured output
248
+ if self.llm and (self.llm.startswith('gpt-') or self.llm.startswith('o1-') or self.llm.startswith('o3-')):
249
+ client = get_openai_client()
250
+ use_openai_structured = True
251
+ except:
252
+ # If OpenAI client is not available, we'll use the LLM class
253
+ pass
254
+
255
+ if use_openai_structured and client:
256
+ # Use OpenAI's structured output for OpenAI models (backward compatibility)
257
+ response = client.beta.chat.completions.parse(
258
+ model=self.llm,
259
+ response_format=AutoAgentsConfig,
260
+ messages=[
261
+ {"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
262
+ {"role": "user", "content": prompt}
263
+ ]
264
+ )
265
+ config = response.choices[0].message.parsed
266
+ else:
267
+ # Use LLM class for all other providers (Gemini, Anthropic, etc.)
268
+ llm_instance = LLM(
269
+ model=self.llm,
270
+ base_url=self.base_url,
271
+ api_key=self.api_key
272
+ )
251
273
 
252
- response = client.beta.chat.completions.parse(
253
- model=self.llm,
254
- response_format=AutoAgentsConfig,
255
- messages=[
256
- {"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
257
- {"role": "user", "content": prompt}
258
- ]
259
- )
274
+ response_text = llm_instance.response(
275
+ prompt=prompt,
276
+ system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
277
+ output_pydantic=AutoAgentsConfig,
278
+ temperature=0.7,
279
+ stream=False,
280
+ verbose=False
281
+ )
282
+
283
+ # Parse the JSON response
284
+ try:
285
+ # First try to parse as is
286
+ config_dict = json.loads(response_text)
287
+ config = AutoAgentsConfig(**config_dict)
288
+ except json.JSONDecodeError:
289
+ # If that fails, try to extract JSON from the response
290
+ # Handle cases where the model might wrap JSON in markdown blocks
291
+ cleaned_response = response_text.strip()
292
+ if cleaned_response.startswith("```json"):
293
+ cleaned_response = cleaned_response[7:]
294
+ if cleaned_response.startswith("```"):
295
+ cleaned_response = cleaned_response[3:]
296
+ if cleaned_response.endswith("```"):
297
+ cleaned_response = cleaned_response[:-3]
298
+ cleaned_response = cleaned_response.strip()
299
+
300
+ config_dict = json.loads(cleaned_response)
301
+ config = AutoAgentsConfig(**config_dict)
260
302
 
261
303
  # Ensure we have exactly max_agents number of agents
262
- if len(response.choices[0].message.parsed.agents) > self.max_agents:
263
- response.choices[0].message.parsed.agents = response.choices[0].message.parsed.agents[:self.max_agents]
264
- elif len(response.choices[0].message.parsed.agents) < self.max_agents:
265
- logging.warning(f"Generated {len(response.choices[0].message.parsed.agents)} agents, expected {self.max_agents}")
304
+ if len(config.agents) > self.max_agents:
305
+ config.agents = config.agents[:self.max_agents]
306
+ elif len(config.agents) < self.max_agents:
307
+ logging.warning(f"Generated {len(config.agents)} agents, expected {self.max_agents}")
266
308
 
267
- return response.choices[0].message.parsed
309
+ return config
268
310
  except Exception as e:
269
311
  logging.error(f"Error generating configuration: {e}")
270
312
  raise
@@ -87,6 +87,65 @@ class LLM:
87
87
  "llama-3.2-90b-text-preview": 6144 # 8,192 actual
88
88
  }
89
89
 
90
+ def _log_llm_config(self, method_name: str, **config):
91
+ """Centralized debug logging for LLM configuration and parameters.
92
+
93
+ Args:
94
+ method_name: The name of the method calling this logger (e.g., '__init__', 'get_response')
95
+ **config: Configuration parameters to log
96
+ """
97
+ # Check for debug logging - either global debug level OR explicit verbose mode
98
+ verbose = config.get('verbose', self.verbose if hasattr(self, 'verbose') else False)
99
+ should_log = logging.getLogger().getEffectiveLevel() == logging.DEBUG or (not isinstance(verbose, bool) and verbose >= 10)
100
+
101
+ if should_log:
102
+ # Mask sensitive information
103
+ safe_config = config.copy()
104
+ if 'api_key' in safe_config:
105
+ safe_config['api_key'] = "***" if safe_config['api_key'] is not None else None
106
+ if 'extra_settings' in safe_config and isinstance(safe_config['extra_settings'], dict):
107
+ safe_config['extra_settings'] = {k: v for k, v in safe_config['extra_settings'].items() if k not in ["api_key"]}
108
+
109
+ # Handle special formatting for certain fields
110
+ if 'prompt' in safe_config:
111
+ prompt = safe_config['prompt']
112
+ # Convert to string first for consistent logging behavior
113
+ prompt_str = str(prompt) if not isinstance(prompt, str) else prompt
114
+ if len(prompt_str) > 100:
115
+ safe_config['prompt'] = prompt_str[:100] + "..."
116
+ else:
117
+ safe_config['prompt'] = prompt_str
118
+ if 'system_prompt' in safe_config:
119
+ sp = safe_config['system_prompt']
120
+ if sp and isinstance(sp, str) and len(sp) > 100:
121
+ safe_config['system_prompt'] = sp[:100] + "..."
122
+ if 'chat_history' in safe_config:
123
+ ch = safe_config['chat_history']
124
+ safe_config['chat_history'] = f"[{len(ch)} messages]" if ch else None
125
+ if 'tools' in safe_config:
126
+ tools = safe_config['tools']
127
+ # Check if tools is iterable before processing
128
+ if tools and hasattr(tools, '__iter__') and not isinstance(tools, str):
129
+ safe_config['tools'] = [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools]
130
+ else:
131
+ safe_config['tools'] = None
132
+ if 'output_json' in safe_config:
133
+ oj = safe_config['output_json']
134
+ safe_config['output_json'] = str(oj.__class__.__name__) if oj else None
135
+ if 'output_pydantic' in safe_config:
136
+ op = safe_config['output_pydantic']
137
+ safe_config['output_pydantic'] = str(op.__class__.__name__) if op else None
138
+
139
+ # Log based on method name - check more specific conditions first
140
+ if method_name == '__init__':
141
+ logging.debug(f"LLM instance initialized with: {json.dumps(safe_config, indent=2, default=str)}")
142
+ elif "parameters" in method_name:
143
+ logging.debug(f"{method_name}: {json.dumps(safe_config, indent=2, default=str)}")
144
+ elif "_async" in method_name:
145
+ logging.debug(f"LLM async instance configuration: {json.dumps(safe_config, indent=2, default=str)}")
146
+ else:
147
+ logging.debug(f"{method_name} configuration: {json.dumps(safe_config, indent=2, default=str)}")
148
+
90
149
  def __init__(
91
150
  self,
92
151
  model: str,
@@ -181,35 +240,34 @@ class LLM:
181
240
  litellm.modify_params = True
182
241
  self._setup_event_tracking(events)
183
242
 
184
- # Log all initialization parameters when in debug mode
185
- if not isinstance(verbose, bool) and verbose >= 10:
186
- debug_info = {
187
- "model": self.model,
188
- "timeout": self.timeout,
189
- "temperature": self.temperature,
190
- "top_p": self.top_p,
191
- "n": self.n,
192
- "max_tokens": self.max_tokens,
193
- "presence_penalty": self.presence_penalty,
194
- "frequency_penalty": self.frequency_penalty,
195
- "logit_bias": self.logit_bias,
196
- "response_format": self.response_format,
197
- "seed": self.seed,
198
- "logprobs": self.logprobs,
199
- "top_logprobs": self.top_logprobs,
200
- "api_version": self.api_version,
201
- "stop_phrases": self.stop_phrases,
202
- "api_key": "***" if self.api_key else None, # Mask API key for security
203
- "base_url": self.base_url,
204
- "verbose": self.verbose,
205
- "markdown": self.markdown,
206
- "self_reflect": self.self_reflect,
207
- "max_reflect": self.max_reflect,
208
- "min_reflect": self.min_reflect,
209
- "reasoning_steps": self.reasoning_steps,
210
- "extra_settings": {k: v for k, v in self.extra_settings.items() if k not in ["api_key"]}
211
- }
212
- logging.debug(f"LLM instance initialized with: {json.dumps(debug_info, indent=2, default=str)}")
243
+ # Log all initialization parameters when in debug mode or verbose >= 10
244
+ self._log_llm_config(
245
+ '__init__',
246
+ model=self.model,
247
+ timeout=self.timeout,
248
+ temperature=self.temperature,
249
+ top_p=self.top_p,
250
+ n=self.n,
251
+ max_tokens=self.max_tokens,
252
+ presence_penalty=self.presence_penalty,
253
+ frequency_penalty=self.frequency_penalty,
254
+ logit_bias=self.logit_bias,
255
+ response_format=self.response_format,
256
+ seed=self.seed,
257
+ logprobs=self.logprobs,
258
+ top_logprobs=self.top_logprobs,
259
+ api_version=self.api_version,
260
+ stop_phrases=self.stop_phrases,
261
+ api_key=self.api_key,
262
+ base_url=self.base_url,
263
+ verbose=self.verbose,
264
+ markdown=self.markdown,
265
+ self_reflect=self.self_reflect,
266
+ max_reflect=self.max_reflect,
267
+ min_reflect=self.min_reflect,
268
+ reasoning_steps=self.reasoning_steps,
269
+ extra_settings=self.extra_settings
270
+ )
213
271
 
214
272
  def _is_ollama_provider(self) -> bool:
215
273
  """Detect if this is an Ollama provider regardless of naming convention"""
@@ -229,6 +287,39 @@ class LLM:
229
287
 
230
288
  return any(endpoint in base_url or endpoint in api_base for endpoint in ollama_endpoints)
231
289
 
290
+ def _process_stream_delta(self, delta, response_text: str, tool_calls: List[Dict], formatted_tools: Optional[List] = None) -> tuple:
291
+ """
292
+ Process a streaming delta chunk to extract content and tool calls.
293
+
294
+ Args:
295
+ delta: The delta object from a streaming chunk
296
+ response_text: The accumulated response text so far
297
+ tool_calls: The accumulated tool calls list so far
298
+ formatted_tools: Optional list of formatted tools for tool call support check
299
+
300
+ Returns:
301
+ tuple: (updated_response_text, updated_tool_calls)
302
+ """
303
+ # Process content
304
+ if delta.content:
305
+ response_text += delta.content
306
+
307
+ # Capture tool calls from streaming chunks if provider supports it
308
+ if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
309
+ for tc in delta.tool_calls:
310
+ if tc.index >= len(tool_calls):
311
+ tool_calls.append({
312
+ "id": tc.id,
313
+ "type": "function",
314
+ "function": {"name": "", "arguments": ""}
315
+ })
316
+ if tc.function.name:
317
+ tool_calls[tc.index]["function"]["name"] = tc.function.name
318
+ if tc.function.arguments:
319
+ tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
320
+
321
+ return response_text, tool_calls
322
+
232
323
  def _parse_tool_call_arguments(self, tool_call: Dict, is_ollama: bool = False) -> tuple:
233
324
  """
234
325
  Safely parse tool call arguments with proper error handling
@@ -497,54 +588,53 @@ class LLM:
497
588
  """Enhanced get_response with all OpenAI-like features"""
498
589
  logging.info(f"Getting response from {self.model}")
499
590
  # Log all self values when in debug mode
500
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
501
- debug_info = {
502
- "model": self.model,
503
- "timeout": self.timeout,
504
- "temperature": self.temperature,
505
- "top_p": self.top_p,
506
- "n": self.n,
507
- "max_tokens": self.max_tokens,
508
- "presence_penalty": self.presence_penalty,
509
- "frequency_penalty": self.frequency_penalty,
510
- "logit_bias": self.logit_bias,
511
- "response_format": self.response_format,
512
- "seed": self.seed,
513
- "logprobs": self.logprobs,
514
- "top_logprobs": self.top_logprobs,
515
- "api_version": self.api_version,
516
- "stop_phrases": self.stop_phrases,
517
- "api_key": "***" if self.api_key else None, # Mask API key for security
518
- "base_url": self.base_url,
519
- "verbose": self.verbose,
520
- "markdown": self.markdown,
521
- "self_reflect": self.self_reflect,
522
- "max_reflect": self.max_reflect,
523
- "min_reflect": self.min_reflect,
524
- "reasoning_steps": self.reasoning_steps
525
- }
526
- logging.debug(f"LLM instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
527
-
528
- # Log the parameter values passed to get_response
529
- param_info = {
530
- "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
531
- "system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
532
- "chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
533
- "temperature": temperature,
534
- "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
535
- "output_json": str(output_json.__class__.__name__) if output_json else None,
536
- "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
537
- "verbose": verbose,
538
- "markdown": markdown,
539
- "self_reflect": self_reflect,
540
- "max_reflect": max_reflect,
541
- "min_reflect": min_reflect,
542
- "agent_name": agent_name,
543
- "agent_role": agent_role,
544
- "agent_tools": agent_tools,
545
- "kwargs": str(kwargs)
546
- }
547
- logging.debug(f"get_response parameters: {json.dumps(param_info, indent=2, default=str)}")
591
+ self._log_llm_config(
592
+ 'LLM instance',
593
+ model=self.model,
594
+ timeout=self.timeout,
595
+ temperature=self.temperature,
596
+ top_p=self.top_p,
597
+ n=self.n,
598
+ max_tokens=self.max_tokens,
599
+ presence_penalty=self.presence_penalty,
600
+ frequency_penalty=self.frequency_penalty,
601
+ logit_bias=self.logit_bias,
602
+ response_format=self.response_format,
603
+ seed=self.seed,
604
+ logprobs=self.logprobs,
605
+ top_logprobs=self.top_logprobs,
606
+ api_version=self.api_version,
607
+ stop_phrases=self.stop_phrases,
608
+ api_key=self.api_key,
609
+ base_url=self.base_url,
610
+ verbose=self.verbose,
611
+ markdown=self.markdown,
612
+ self_reflect=self.self_reflect,
613
+ max_reflect=self.max_reflect,
614
+ min_reflect=self.min_reflect,
615
+ reasoning_steps=self.reasoning_steps
616
+ )
617
+
618
+ # Log the parameter values passed to get_response
619
+ self._log_llm_config(
620
+ 'get_response parameters',
621
+ prompt=prompt,
622
+ system_prompt=system_prompt,
623
+ chat_history=chat_history,
624
+ temperature=temperature,
625
+ tools=tools,
626
+ output_json=output_json,
627
+ output_pydantic=output_pydantic,
628
+ verbose=verbose,
629
+ markdown=markdown,
630
+ self_reflect=self_reflect,
631
+ max_reflect=max_reflect,
632
+ min_reflect=min_reflect,
633
+ agent_name=agent_name,
634
+ agent_role=agent_role,
635
+ agent_tools=agent_tools,
636
+ kwargs=str(kwargs)
637
+ )
548
638
  try:
549
639
  import litellm
550
640
  # This below **kwargs** is passed to .completion() directly. so reasoning_steps has to be popped. OR find alternate best way of handling this.
@@ -651,23 +741,11 @@ class LLM:
651
741
  ):
652
742
  if chunk and chunk.choices and chunk.choices[0].delta:
653
743
  delta = chunk.choices[0].delta
744
+ response_text, tool_calls = self._process_stream_delta(
745
+ delta, response_text, tool_calls, formatted_tools
746
+ )
654
747
  if delta.content:
655
- response_text += delta.content
656
748
  live.update(display_generating(response_text, current_time))
657
-
658
- # Capture tool calls from streaming chunks if provider supports it
659
- if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
660
- for tc in delta.tool_calls:
661
- if tc.index >= len(tool_calls):
662
- tool_calls.append({
663
- "id": tc.id,
664
- "type": "function",
665
- "function": {"name": "", "arguments": ""}
666
- })
667
- if tc.function.name:
668
- tool_calls[tc.index]["function"]["name"] = tc.function.name
669
- if tc.function.arguments:
670
- tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
671
749
  else:
672
750
  # Non-verbose streaming
673
751
  for chunk in litellm.completion(
@@ -681,22 +759,9 @@ class LLM:
681
759
  ):
682
760
  if chunk and chunk.choices and chunk.choices[0].delta:
683
761
  delta = chunk.choices[0].delta
684
- if delta.content:
685
- response_text += delta.content
686
-
687
- # Capture tool calls from streaming chunks if provider supports it
688
- if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
689
- for tc in delta.tool_calls:
690
- if tc.index >= len(tool_calls):
691
- tool_calls.append({
692
- "id": tc.id,
693
- "type": "function",
694
- "function": {"name": "", "arguments": ""}
695
- })
696
- if tc.function.name:
697
- tool_calls[tc.index]["function"]["name"] = tc.function.name
698
- if tc.function.arguments:
699
- tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
762
+ response_text, tool_calls = self._process_stream_delta(
763
+ delta, response_text, tool_calls, formatted_tools
764
+ )
700
765
 
701
766
  response_text = response_text.strip()
702
767
 
@@ -1178,54 +1243,53 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1178
1243
  import litellm
1179
1244
  logging.info(f"Getting async response from {self.model}")
1180
1245
  # Log all self values when in debug mode
1181
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1182
- debug_info = {
1183
- "model": self.model,
1184
- "timeout": self.timeout,
1185
- "temperature": self.temperature,
1186
- "top_p": self.top_p,
1187
- "n": self.n,
1188
- "max_tokens": self.max_tokens,
1189
- "presence_penalty": self.presence_penalty,
1190
- "frequency_penalty": self.frequency_penalty,
1191
- "logit_bias": self.logit_bias,
1192
- "response_format": self.response_format,
1193
- "seed": self.seed,
1194
- "logprobs": self.logprobs,
1195
- "top_logprobs": self.top_logprobs,
1196
- "api_version": self.api_version,
1197
- "stop_phrases": self.stop_phrases,
1198
- "api_key": "***" if self.api_key else None, # Mask API key for security
1199
- "base_url": self.base_url,
1200
- "verbose": self.verbose,
1201
- "markdown": self.markdown,
1202
- "self_reflect": self.self_reflect,
1203
- "max_reflect": self.max_reflect,
1204
- "min_reflect": self.min_reflect,
1205
- "reasoning_steps": self.reasoning_steps
1206
- }
1207
- logging.debug(f"LLM async instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
1208
-
1209
- # Log the parameter values passed to get_response_async
1210
- param_info = {
1211
- "prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
1212
- "system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
1213
- "chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
1214
- "temperature": temperature,
1215
- "tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
1216
- "output_json": str(output_json.__class__.__name__) if output_json else None,
1217
- "output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
1218
- "verbose": verbose,
1219
- "markdown": markdown,
1220
- "self_reflect": self_reflect,
1221
- "max_reflect": max_reflect,
1222
- "min_reflect": min_reflect,
1223
- "agent_name": agent_name,
1224
- "agent_role": agent_role,
1225
- "agent_tools": agent_tools,
1226
- "kwargs": str(kwargs)
1227
- }
1228
- logging.debug(f"get_response_async parameters: {json.dumps(param_info, indent=2, default=str)}")
1246
+ self._log_llm_config(
1247
+ 'get_response_async',
1248
+ model=self.model,
1249
+ timeout=self.timeout,
1250
+ temperature=self.temperature,
1251
+ top_p=self.top_p,
1252
+ n=self.n,
1253
+ max_tokens=self.max_tokens,
1254
+ presence_penalty=self.presence_penalty,
1255
+ frequency_penalty=self.frequency_penalty,
1256
+ logit_bias=self.logit_bias,
1257
+ response_format=self.response_format,
1258
+ seed=self.seed,
1259
+ logprobs=self.logprobs,
1260
+ top_logprobs=self.top_logprobs,
1261
+ api_version=self.api_version,
1262
+ stop_phrases=self.stop_phrases,
1263
+ api_key=self.api_key,
1264
+ base_url=self.base_url,
1265
+ verbose=self.verbose,
1266
+ markdown=self.markdown,
1267
+ self_reflect=self.self_reflect,
1268
+ max_reflect=self.max_reflect,
1269
+ min_reflect=self.min_reflect,
1270
+ reasoning_steps=self.reasoning_steps
1271
+ )
1272
+
1273
+ # Log the parameter values passed to get_response_async
1274
+ self._log_llm_config(
1275
+ 'get_response_async parameters',
1276
+ prompt=prompt,
1277
+ system_prompt=system_prompt,
1278
+ chat_history=chat_history,
1279
+ temperature=temperature,
1280
+ tools=tools,
1281
+ output_json=output_json,
1282
+ output_pydantic=output_pydantic,
1283
+ verbose=verbose,
1284
+ markdown=markdown,
1285
+ self_reflect=self_reflect,
1286
+ max_reflect=max_reflect,
1287
+ min_reflect=min_reflect,
1288
+ agent_name=agent_name,
1289
+ agent_role=agent_role,
1290
+ agent_tools=agent_tools,
1291
+ kwargs=str(kwargs)
1292
+ )
1229
1293
  reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
1230
1294
  litellm.set_verbose = False
1231
1295
 
@@ -1297,24 +1361,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1297
1361
  ):
1298
1362
  if chunk and chunk.choices and chunk.choices[0].delta:
1299
1363
  delta = chunk.choices[0].delta
1364
+ response_text, tool_calls = self._process_stream_delta(
1365
+ delta, response_text, tool_calls, formatted_tools
1366
+ )
1300
1367
  if delta.content:
1301
- response_text += delta.content
1302
1368
  print("\033[K", end="\r")
1303
1369
  print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
1304
-
1305
- # Capture tool calls from streaming chunks if provider supports it
1306
- if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
1307
- for tc in delta.tool_calls:
1308
- if tc.index >= len(tool_calls):
1309
- tool_calls.append({
1310
- "id": tc.id,
1311
- "type": "function",
1312
- "function": {"name": "", "arguments": ""}
1313
- })
1314
- if tc.function.name:
1315
- tool_calls[tc.index]["function"]["name"] = tc.function.name
1316
- if tc.function.arguments:
1317
- tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
1318
1370
  else:
1319
1371
  # Non-verbose streaming
1320
1372
  async for chunk in await litellm.acompletion(
@@ -1328,22 +1380,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1328
1380
  ):
1329
1381
  if chunk and chunk.choices and chunk.choices[0].delta:
1330
1382
  delta = chunk.choices[0].delta
1331
- if delta.content:
1332
- response_text += delta.content
1333
-
1334
- # Capture tool calls from streaming chunks if provider supports it
1335
- if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
1336
- for tc in delta.tool_calls:
1337
- if tc.index >= len(tool_calls):
1338
- tool_calls.append({
1339
- "id": tc.id,
1340
- "type": "function",
1341
- "function": {"name": "", "arguments": ""}
1342
- })
1343
- if tc.function.name:
1344
- tool_calls[tc.index]["function"]["name"] = tc.function.name
1345
- if tc.function.arguments:
1346
- tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
1383
+ response_text, tool_calls = self._process_stream_delta(
1384
+ delta, response_text, tool_calls, formatted_tools
1385
+ )
1347
1386
 
1348
1387
  response_text = response_text.strip()
1349
1388
 
@@ -1884,22 +1923,21 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1884
1923
  logger.debug("Using synchronous response function")
1885
1924
 
1886
1925
  # Log all self values when in debug mode
1887
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1888
- debug_info = {
1889
- "model": self.model,
1890
- "timeout": self.timeout,
1891
- "temperature": temperature,
1892
- "top_p": self.top_p,
1893
- "n": self.n,
1894
- "max_tokens": self.max_tokens,
1895
- "presence_penalty": self.presence_penalty,
1896
- "frequency_penalty": self.frequency_penalty,
1897
- "stream": stream,
1898
- "verbose": verbose,
1899
- "markdown": markdown,
1900
- "kwargs": str(kwargs)
1901
- }
1902
- logger.debug(f"Response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
1926
+ self._log_llm_config(
1927
+ 'Response method',
1928
+ model=self.model,
1929
+ timeout=self.timeout,
1930
+ temperature=temperature,
1931
+ top_p=self.top_p,
1932
+ n=self.n,
1933
+ max_tokens=self.max_tokens,
1934
+ presence_penalty=self.presence_penalty,
1935
+ frequency_penalty=self.frequency_penalty,
1936
+ stream=stream,
1937
+ verbose=verbose,
1938
+ markdown=markdown,
1939
+ kwargs=str(kwargs)
1940
+ )
1903
1941
 
1904
1942
  # Build messages list using shared helper (simplified version without JSON output)
1905
1943
  messages, _ = self._build_messages(
@@ -1985,22 +2023,21 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1985
2023
  logger.debug("Using asynchronous response function")
1986
2024
 
1987
2025
  # Log all self values when in debug mode
1988
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1989
- debug_info = {
1990
- "model": self.model,
1991
- "timeout": self.timeout,
1992
- "temperature": temperature,
1993
- "top_p": self.top_p,
1994
- "n": self.n,
1995
- "max_tokens": self.max_tokens,
1996
- "presence_penalty": self.presence_penalty,
1997
- "frequency_penalty": self.frequency_penalty,
1998
- "stream": stream,
1999
- "verbose": verbose,
2000
- "markdown": markdown,
2001
- "kwargs": str(kwargs)
2002
- }
2003
- logger.debug(f"Async response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
2026
+ self._log_llm_config(
2027
+ 'Async response method',
2028
+ model=self.model,
2029
+ timeout=self.timeout,
2030
+ temperature=temperature,
2031
+ top_p=self.top_p,
2032
+ n=self.n,
2033
+ max_tokens=self.max_tokens,
2034
+ presence_penalty=self.presence_penalty,
2035
+ frequency_penalty=self.frequency_penalty,
2036
+ stream=stream,
2037
+ verbose=verbose,
2038
+ markdown=markdown,
2039
+ kwargs=str(kwargs)
2040
+ )
2004
2041
 
2005
2042
  # Build messages list using shared helper (simplified version without JSON output)
2006
2043
  messages, _ = self._build_messages(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.118
3
+ Version: 0.0.120
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.118"
7
+ version = "0.0.120"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [