praisonaiagents 0.0.146__py3-none-any.whl → 0.0.148__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,67 +2,16 @@
2
2
  Praison AI Agents - A package for hierarchical AI agent task execution
3
3
  """
4
4
 
5
- # Configure logging before any other imports
6
- import os
7
- import logging
8
- import warnings
9
- import re
10
- from rich.logging import RichHandler
5
+ # Apply warning patch BEFORE any imports to intercept warnings at the source
6
+ from . import _warning_patch
11
7
 
12
- # Set environment variables to suppress warnings at the source
13
- os.environ["LITELLM_TELEMETRY"] = "False"
14
- os.environ["LITELLM_DROP_PARAMS"] = "True"
15
- # Disable httpx warnings
16
- os.environ["HTTPX_DISABLE_WARNINGS"] = "True"
8
+ # Import centralized logging configuration FIRST
9
+ from . import _logging
17
10
 
18
- # Get log level from environment variable
19
- LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
20
-
21
- # Determine if warnings should be suppressed (not in DEBUG mode and not in tests)
22
- def _should_suppress_warnings():
23
- import sys
24
- return (LOGLEVEL != 'DEBUG' and
25
- not hasattr(sys, '_called_from_test') and
26
- 'pytest' not in sys.modules and
27
- os.environ.get('PYTEST_CURRENT_TEST') is None)
28
-
29
- # Configure root logger
30
- logging.basicConfig(
31
- level=getattr(logging, LOGLEVEL, logging.INFO),
32
- format="%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s",
33
- datefmt="[%X]",
34
- handlers=[RichHandler(rich_tracebacks=True)]
35
- )
36
-
37
- # Suppress specific noisy loggers - more aggressive suppression (only when not in DEBUG mode)
38
- if _should_suppress_warnings():
39
- logging.getLogger("litellm").setLevel(logging.CRITICAL)
40
- logging.getLogger("litellm_logging").setLevel(logging.CRITICAL)
41
- logging.getLogger("httpx").setLevel(logging.CRITICAL)
42
- logging.getLogger("httpcore").setLevel(logging.CRITICAL)
43
- logging.getLogger("pydantic").setLevel(logging.WARNING)
44
- logging.getLogger("markdown_it").setLevel(logging.WARNING)
45
- logging.getLogger("rich.markdown").setLevel(logging.WARNING)
46
-
47
- # Note: litellm child loggers (litellm.utils, litellm.proxy, etc.) automatically inherit
48
- # the CRITICAL level from the parent litellm logger due to Python's hierarchical logging
49
-
50
- # Comprehensive warning suppression for litellm and dependencies (issue #1033)
51
- # These warnings clutter output and are not actionable for users
52
-
53
- # Set warning filter to suppress all warnings from problematic modules at import time
54
- if _should_suppress_warnings():
55
- # Module-specific warning suppression - applied before imports (only when not in DEBUG mode)
56
- for module in ['litellm', 'httpx', 'httpcore', 'pydantic']:
57
- warnings.filterwarnings("ignore", category=DeprecationWarning, module=module)
58
- warnings.filterwarnings("ignore", category=UserWarning, module=module)
59
-
60
- # Specific filters for known problematic warnings
61
- warnings.filterwarnings("ignore", message="There is no current event loop")
62
- warnings.filterwarnings("ignore", message=".*Use 'content=<...>' to upload raw bytes/text content.*")
63
- warnings.filterwarnings("ignore", message=".*The `dict` method is deprecated; use `model_dump` instead.*")
64
- warnings.filterwarnings("ignore", message=".*model_dump.*deprecated.*")
11
+ # Configure root logger after logging is initialized
12
+ _logging.configure_root_logger()
65
13
 
14
+ # Now import everything else
66
15
  from .agent.agent import Agent
67
16
  from .agent.image_agent import ImageAgent
68
17
  from .agent.context_agent import ContextAgent, create_context_agent
@@ -131,30 +80,6 @@ except ImportError:
131
80
  # Add Agents as an alias for PraisonAIAgents
132
81
  Agents = PraisonAIAgents
133
82
 
134
- # Additional warning suppression after all imports (runtime suppression)
135
- if _should_suppress_warnings():
136
- # Try to import and configure litellm to suppress its warnings
137
- try:
138
- import litellm
139
- # Disable all litellm logging and telemetry
140
- litellm.telemetry = False
141
- litellm.drop_params = True
142
- # Set litellm to suppress warnings
143
- litellm.suppress_debug_info = True
144
- if hasattr(litellm, '_logging_obj'):
145
- litellm._logging_obj.setLevel(logging.CRITICAL)
146
- except (ImportError, AttributeError):
147
- pass
148
-
149
- # Suppress pydantic warnings that might occur at runtime (safer approach)
150
- try:
151
- warnings.filterwarnings("ignore", category=UserWarning, module="pydantic",
152
- message=".*model_dump.*deprecated.*")
153
- warnings.filterwarnings("ignore", category=UserWarning, module="pydantic",
154
- message=".*dict.*method.*deprecated.*")
155
- except Exception:
156
- pass
157
-
158
83
  # Apply telemetry auto-instrumentation after all imports
159
84
  if _telemetry_available:
160
85
  try:
@@ -210,4 +135,4 @@ __all__ = [
210
135
 
211
136
  # Add MCP to __all__ if available
212
137
  if _mcp_available:
213
- __all__.append('MCP')
138
+ __all__.append('MCP')
@@ -0,0 +1,134 @@
1
+ """
2
+ Centralized logging configuration for PraisonAI Agents.
3
+ This module consolidates all logging configuration in one place to avoid duplication.
4
+ """
5
+
6
+ import os
7
+ import logging
8
+ from typing import List
9
+
10
+ # ========================================================================
11
+ # ENVIRONMENT CONFIGURATION
12
+ # ========================================================================
13
+ def _configure_environment():
14
+ """Set environment variables to suppress debug messages at the source."""
15
+ env_vars = {
16
+ # LiteLLM configuration
17
+ "LITELLM_TELEMETRY": "False",
18
+ "LITELLM_DROP_PARAMS": "True",
19
+ "LITELLM_LOG": "ERROR",
20
+ "LITELLM_DEBUG": "False",
21
+ "LITELLM_SUPPRESS_DEBUG_INFO": "True",
22
+ "LITELLM_VERBOSE": "False",
23
+ "LITELLM_SET_VERBOSE": "False",
24
+ # HTTPX configuration
25
+ "HTTPX_DISABLE_WARNINGS": "True",
26
+ "HTTPX_LOG_LEVEL": "ERROR",
27
+ # Pydantic configuration
28
+ "PYDANTIC_WARNINGS_ENABLED": "False",
29
+ }
30
+
31
+ for key, value in env_vars.items():
32
+ os.environ[key] = value
33
+
34
+
35
+ # ========================================================================
36
+ # LOGGER CONFIGURATION
37
+ # ========================================================================
38
+ def _get_all_noisy_loggers() -> List[str]:
39
+ """Get list of all loggers that should be suppressed."""
40
+ return [
41
+ # LiteLLM and variants
42
+ "litellm", "LiteLLM", "LiteLLM Router", "LiteLLM Proxy",
43
+ # HTTP libraries
44
+ "httpx", "httpx._trace", "httpx._client",
45
+ "httpcore", "httpcore._trace",
46
+ # OpenAI
47
+ "openai._base_client", "openai._client",
48
+ # Markdown
49
+ "markdown_it", "rich.markdown",
50
+ # System
51
+ "asyncio", "selector_events", "pydantic",
52
+ "praisonaiagents.telemetry.telemetry",
53
+ ]
54
+
55
+
56
+ def _configure_loggers():
57
+ """Configure all loggers based on LOGLEVEL environment variable."""
58
+ loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
59
+
60
+ # When DEBUG is set, allow some HTTP logging for API endpoints
61
+ if loglevel == 'DEBUG':
62
+ allowed_debug_loggers = {"httpx", "httpx._client", "openai._client"}
63
+
64
+ for logger_name in _get_all_noisy_loggers():
65
+ if logger_name not in allowed_debug_loggers:
66
+ logger = logging.getLogger(logger_name)
67
+ logger.setLevel(logging.CRITICAL)
68
+ logger.handlers = []
69
+ logger.propagate = False
70
+
71
+ # Ensure allowed loggers are at INFO level to show API calls
72
+ for logger_name in allowed_debug_loggers:
73
+ logger = logging.getLogger(logger_name)
74
+ logger.setLevel(logging.INFO)
75
+ else:
76
+ # Suppress all noisy loggers when not in DEBUG mode
77
+ for logger_name in _get_all_noisy_loggers():
78
+ logger = logging.getLogger(logger_name)
79
+ logger.setLevel(logging.CRITICAL)
80
+ logger.handlers = []
81
+ logger.propagate = False
82
+
83
+
84
+ # ========================================================================
85
+ # LITELLM CONFIGURATION
86
+ # ========================================================================
87
+ def _configure_litellm():
88
+ """Configure litellm after it's imported."""
89
+ try:
90
+ import litellm
91
+ litellm.telemetry = False
92
+ litellm.drop_params = True
93
+ litellm.suppress_debug_info = True
94
+
95
+ if hasattr(litellm, '_logging_obj'):
96
+ litellm._logging_obj.setLevel(logging.CRITICAL)
97
+
98
+ if hasattr(litellm, 'set_verbose'):
99
+ litellm.set_verbose = False
100
+
101
+ except (ImportError, AttributeError):
102
+ pass
103
+
104
+
105
+ # ========================================================================
106
+ # ROOT LOGGER CONFIGURATION
107
+ # ========================================================================
108
+ def configure_root_logger():
109
+ """Configure the root logger with RichHandler."""
110
+ from rich.logging import RichHandler
111
+
112
+ loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
113
+
114
+ logging.basicConfig(
115
+ level=getattr(logging, loglevel, logging.INFO),
116
+ format="%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s",
117
+ datefmt="[%X]",
118
+ handlers=[RichHandler(rich_tracebacks=True)],
119
+ force=True
120
+ )
121
+
122
+
123
+ # ========================================================================
124
+ # INITIALIZATION
125
+ # ========================================================================
126
+ def initialize_logging():
127
+ """Initialize all logging configuration."""
128
+ _configure_environment()
129
+ _configure_loggers()
130
+ _configure_litellm()
131
+
132
+
133
+ # Auto-initialize on import
134
+ initialize_logging()
@@ -0,0 +1,73 @@
1
+ """
2
+ Minimal warning patch to suppress specific third-party warnings.
3
+ This module patches the warnings module to intercept specific messages.
4
+ """
5
+
6
+ import warnings
7
+ import functools
8
+ import sys
9
+
10
+ # Apply aggressive warning filters first
11
+ warnings.filterwarnings("ignore", message=".*Pydantic serializer warnings.*")
12
+ warnings.filterwarnings("ignore", message=".*PydanticSerializationUnexpectedValue.*")
13
+ warnings.filterwarnings("ignore", message=".*Expected 9 fields but got.*")
14
+ warnings.filterwarnings("ignore", message=".*Expected `StreamingChoices` but got.*")
15
+ warnings.filterwarnings("ignore", message=".*serialized value may not be as expected.*")
16
+ warnings.filterwarnings("ignore", message=".*Use 'content=<...>' to upload raw bytes/text content.*")
17
+ warnings.filterwarnings("ignore", message=".*The `dict` method is deprecated.*")
18
+ warnings.filterwarnings("ignore", category=UserWarning, module="pydantic.*")
19
+
20
+ # Store the original warn function
21
+ _original_warn = warnings.warn
22
+ _original_warn_explicit = warnings.warn_explicit
23
+
24
+ # Messages to suppress (partial matches)
25
+ SUPPRESSED_PATTERNS = [
26
+ "Use 'content=<...>' to upload raw bytes/text content",
27
+ "The `dict` method is deprecated; use `model_dump` instead",
28
+ "Pydantic serializer warnings",
29
+ "PydanticSerializationUnexpectedValue",
30
+ "Expected 9 fields but got 5 for type `Message`",
31
+ "Expected `StreamingChoices` but got `Choices`",
32
+ "serialized value may not be as expected"
33
+ ]
34
+
35
+ @functools.wraps(_original_warn)
36
+ def _patched_warn(message, category=None, stacklevel=1, source=None):
37
+ """Patched warn function that suppresses specific messages."""
38
+ msg_str = str(message)
39
+
40
+ for pattern in SUPPRESSED_PATTERNS:
41
+ if pattern in msg_str:
42
+ return
43
+
44
+ if category == UserWarning and "pydantic" in msg_str.lower():
45
+ return
46
+
47
+ _original_warn(message, category, stacklevel, source)
48
+
49
+ @functools.wraps(_original_warn_explicit)
50
+ def _patched_warn_explicit(message, category, filename, lineno, module=None, registry=None, module_globals=None, source=None):
51
+ """Patched warn_explicit function that suppresses specific messages."""
52
+ msg_str = str(message)
53
+
54
+ for pattern in SUPPRESSED_PATTERNS:
55
+ if pattern in msg_str:
56
+ return
57
+
58
+ if category == UserWarning and "pydantic" in msg_str.lower():
59
+ return
60
+
61
+ if module and "pydantic" in str(module):
62
+ return
63
+
64
+ _original_warn_explicit(message, category, filename, lineno, module, registry, module_globals, source)
65
+
66
+ # Apply the patches
67
+ warnings.warn = _patched_warn
68
+ warnings.warn_explicit = _patched_warn_explicit
69
+
70
+ # Also patch sys.modules warnings if it exists
71
+ if 'warnings' in sys.modules:
72
+ sys.modules['warnings'].warn = _patched_warn
73
+ sys.modules['warnings'].warn_explicit = _patched_warn_explicit
@@ -331,8 +331,15 @@ class Agent:
331
331
 
332
332
  # Configure logging to suppress unwanted outputs
333
333
  logging.getLogger("litellm").setLevel(logging.WARNING)
334
- logging.getLogger("httpx").setLevel(logging.WARNING)
335
- logging.getLogger("httpcore").setLevel(logging.WARNING)
334
+
335
+ # Allow httpx logging when LOGLEVEL=debug, otherwise suppress it
336
+ loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
337
+ if loglevel == 'DEBUG':
338
+ logging.getLogger("httpx").setLevel(logging.INFO)
339
+ logging.getLogger("httpcore").setLevel(logging.INFO)
340
+ else:
341
+ logging.getLogger("httpx").setLevel(logging.WARNING)
342
+ logging.getLogger("httpcore").setLevel(logging.WARNING)
336
343
 
337
344
  # If instructions are provided, use them to set role, goal, and backstory
338
345
  if instructions:
@@ -1229,14 +1236,14 @@ Your Goal: {self.goal}"""
1229
1236
  border_style="green",
1230
1237
  expand=False
1231
1238
  )
1232
- else:
1233
- # No content yet: show generating message
1234
- return Panel(
1235
- f"[bold cyan]Generating response...[/bold cyan]",
1236
- title=f"[bold]{self.name}[/bold] - {elapsed:.1f}s",
1237
- border_style="cyan",
1238
- expand=False
1239
- )
1239
+ # else:
1240
+ # # No content yet: show generating message
1241
+ # return Panel(
1242
+ # f"[bold cyan]Generating response...[/bold cyan]",
1243
+ # title=f"[bold]{self.name}[/bold] - {elapsed:.1f}s",
1244
+ # border_style="cyan",
1245
+ # expand=False
1246
+ # )
1240
1247
 
1241
1248
  def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=None, task_name=None, task_description=None, task_id=None):
1242
1249
  # Reset the final display flag for each new conversation
@@ -1,38 +1,9 @@
1
- import logging
2
- import warnings
3
1
  import os
4
- import re
5
2
 
6
- # Disable litellm telemetry before any imports
3
+ # Ensure litellm telemetry is disabled before imports
7
4
  os.environ["LITELLM_TELEMETRY"] = "False"
8
5
 
9
- # Check if warnings should be suppressed (consistent with main __init__.py)
10
- def _should_suppress_warnings():
11
- import sys
12
- LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
13
- return (LOGLEVEL != 'DEBUG' and
14
- not hasattr(sys, '_called_from_test') and
15
- 'pytest' not in sys.modules and
16
- os.environ.get('PYTEST_CURRENT_TEST') is None)
17
-
18
- # Suppress all relevant logs at module level - more aggressive suppression consistent with main __init__.py (only when not in DEBUG mode)
19
- if _should_suppress_warnings():
20
- logging.getLogger("litellm").setLevel(logging.CRITICAL)
21
- logging.getLogger("openai").setLevel(logging.WARNING)
22
- logging.getLogger("httpx").setLevel(logging.CRITICAL)
23
- logging.getLogger("httpcore").setLevel(logging.CRITICAL)
24
- logging.getLogger("pydantic").setLevel(logging.WARNING)
25
-
26
- # Note: litellm child loggers automatically inherit the CRITICAL level from the parent logger
27
-
28
- # Warning filters are centrally managed in the main __init__.py file
29
- # Apply additional local suppression for safety during LLM imports (only when not in DEBUG mode)
30
- if _should_suppress_warnings():
31
- for module in ['litellm', 'httpx', 'httpcore', 'pydantic']:
32
- warnings.filterwarnings("ignore", category=DeprecationWarning, module=module)
33
- warnings.filterwarnings("ignore", category=UserWarning, module=module)
34
-
35
- # Import after suppressing warnings
6
+ # Import modules
36
7
  from .llm import LLM, LLMContextLengthExceededException
37
8
  from .openai_client import (
38
9
  OpenAIClient,
@@ -57,22 +28,6 @@ from .model_router import (
57
28
  create_routing_agent
58
29
  )
59
30
 
60
- # Ensure comprehensive litellm configuration after import (only when not in DEBUG mode)
61
- if _should_suppress_warnings():
62
- try:
63
- import litellm
64
- # Disable all litellm logging and telemetry features
65
- litellm.telemetry = False
66
- litellm.drop_params = True
67
- if hasattr(litellm, 'suppress_debug_info'):
68
- litellm.suppress_debug_info = True
69
- # Set all litellm loggers to CRITICAL level
70
- if hasattr(litellm, '_logging_obj'):
71
- litellm._logging_obj.setLevel(logging.CRITICAL)
72
- # Note: Child loggers inherit from parent, no need to iterate over all loggers
73
- except ImportError:
74
- pass
75
-
76
31
  __all__ = [
77
32
  "LLM",
78
33
  "LLMContextLengthExceededException",
@@ -92,4 +47,4 @@ __all__ = [
92
47
  "ModelProfile",
93
48
  "TaskComplexity",
94
49
  "create_routing_agent"
95
- ]
50
+ ]
@@ -20,8 +20,7 @@ from ..main import (
20
20
  from rich.console import Console
21
21
  from rich.live import Live
22
22
 
23
- # Disable litellm telemetry before any imports
24
- os.environ["LITELLM_TELEMETRY"] = "False"
23
+ # Logging is already configured in _logging.py via __init__.py
25
24
 
26
25
  # TODO: Include in-build tool calling in LLM class
27
26
  # TODO: Restructure so that duplicate calls are not made (Sync with agent.py)
@@ -95,7 +94,7 @@ class LLM:
95
94
  OLLAMA_FINAL_ANSWER_PROMPT = "Based on the tool results above, please provide the final answer to the original question."
96
95
 
97
96
  # Ollama iteration threshold for summary generation
98
- OLLAMA_SUMMARY_ITERATION_THRESHOLD = 3
97
+ OLLAMA_SUMMARY_ITERATION_THRESHOLD = 1
99
98
 
100
99
  def _log_llm_config(self, method_name: str, **config):
101
100
  """Centralized debug logging for LLM configuration and parameters.
@@ -191,26 +190,35 @@ class LLM:
191
190
  litellm._async_success_callback = []
192
191
  litellm.callbacks = []
193
192
 
193
+ # Suppress all litellm debug info
194
+ litellm.suppress_debug_info = True
195
+ if hasattr(litellm, '_logging'):
196
+ litellm._logging._disable_debugging()
197
+
194
198
  verbose = extra_settings.get('verbose', True)
195
199
 
196
- # Only suppress logs if not in debug mode
197
- if not isinstance(verbose, bool) and verbose >= 10:
198
- # Enable detailed debug logging
199
- logging.getLogger("asyncio").setLevel(logging.DEBUG)
200
- logging.getLogger("selector_events").setLevel(logging.DEBUG)
201
- logging.getLogger("litellm.utils").setLevel(logging.DEBUG)
202
- logging.getLogger("litellm.main").setLevel(logging.DEBUG)
203
- litellm.suppress_debug_messages = False
204
- litellm.set_verbose = True
200
+ # Always suppress litellm's internal debug messages
201
+ # These are from external libraries and not useful for debugging user code
202
+ logging.getLogger("litellm.utils").setLevel(logging.WARNING)
203
+ logging.getLogger("litellm.main").setLevel(logging.WARNING)
204
+
205
+ # Allow httpx logging when LOGLEVEL=debug, otherwise suppress it
206
+ loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
207
+ if loglevel == 'DEBUG':
208
+ logging.getLogger("litellm.llms.custom_httpx.http_handler").setLevel(logging.INFO)
205
209
  else:
206
- # Suppress debug logging for normal operation
207
- logging.getLogger("asyncio").setLevel(logging.WARNING)
208
- logging.getLogger("selector_events").setLevel(logging.WARNING)
209
- logging.getLogger("litellm.utils").setLevel(logging.WARNING)
210
- logging.getLogger("litellm.main").setLevel(logging.WARNING)
211
- litellm.suppress_debug_messages = True
210
+ logging.getLogger("litellm.llms.custom_httpx.http_handler").setLevel(logging.WARNING)
211
+
212
+ logging.getLogger("litellm.litellm_logging").setLevel(logging.WARNING)
213
+ logging.getLogger("litellm.transformation").setLevel(logging.WARNING)
214
+ litellm.suppress_debug_messages = True
215
+ if hasattr(litellm, '_logging'):
212
216
  litellm._logging._disable_debugging()
213
- warnings.filterwarnings("ignore", category=RuntimeWarning)
217
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
218
+
219
+ # Keep asyncio at WARNING unless explicitly in high debug mode
220
+ logging.getLogger("asyncio").setLevel(logging.WARNING)
221
+ logging.getLogger("selector_events").setLevel(logging.WARNING)
214
222
 
215
223
  except ImportError:
216
224
  raise ImportError(
@@ -329,29 +337,47 @@ class LLM:
329
337
  # For Ollama, always generate summary when we have tool results
330
338
  # This prevents infinite loops caused by empty/minimal responses
331
339
 
332
- # Build tool summary more naturally to match OpenAI-style responses
333
- if len(tool_results) == 1:
340
+ # Filter out error results first
341
+ valid_results = []
342
+ for result in tool_results:
343
+ # Skip error responses
344
+ if isinstance(result, dict) and 'error' in result:
345
+ continue
346
+ valid_results.append(result)
347
+
348
+ # If no valid results, return None to continue
349
+ if not valid_results:
350
+ return None
351
+
352
+ # Generate a natural summary based on the tool results
353
+ if len(valid_results) == 1:
334
354
  # Single tool result - create natural response
335
- result = tool_results[0]
336
- if isinstance(result, dict) and 'result' in result:
337
- return str(result['result'])
338
- else:
339
- return str(result)
355
+ result = valid_results[0]
356
+ # For simple numeric results, create a more natural response
357
+ if isinstance(result, (int, float)):
358
+ return f"The result is {result}."
359
+ return str(result)
340
360
  else:
341
361
  # Multiple tool results - create coherent summary
342
- summary_lines = []
343
- for i, result in enumerate(tool_results):
344
- if isinstance(result, dict) and 'result' in result:
345
- function_name = result.get('function_name', 'Tool')
346
- summary_lines.append(f"{function_name}: {result['result']}")
347
- else:
348
- summary_lines.append(f"Tool {i+1}: {result}")
362
+ summary_parts = []
349
363
 
350
- # Create more natural summary text
351
- if len(summary_lines) == 2:
352
- return f"{summary_lines[0]}. {summary_lines[1]}."
353
- else:
354
- return "Based on the tool execution: " + ". ".join(summary_lines) + "."
364
+ for result in valid_results:
365
+ result_str = str(result)
366
+ # Clean up the result string
367
+ result_str = result_str.strip()
368
+
369
+ # If result is just a number, keep it simple
370
+ if isinstance(result, (int, float)):
371
+ # Don't add extra context, let the LLM's response provide that
372
+ pass
373
+ # Ensure string results end with proper punctuation
374
+ elif result_str and not result_str[-1] in '.!?':
375
+ result_str += '.'
376
+
377
+ summary_parts.append(result_str)
378
+
379
+ # Join the parts naturally
380
+ return " ".join(summary_parts)
355
381
 
356
382
  def _format_ollama_tool_result_message(self, function_name: str, tool_result: Any) -> Dict[str, str]:
357
383
  """
@@ -474,7 +500,22 @@ class LLM:
474
500
 
475
501
  for param_name, param_value in arguments.items():
476
502
  if param_name in valid_params:
477
- filtered_args[param_name] = param_value
503
+ # Cast parameter value to the expected type
504
+ param = sig.parameters[param_name]
505
+ if param.annotation != inspect.Parameter.empty:
506
+ try:
507
+ if param.annotation == int and isinstance(param_value, str):
508
+ filtered_args[param_name] = int(param_value)
509
+ elif param.annotation == float and isinstance(param_value, str):
510
+ filtered_args[param_name] = float(param_value)
511
+ elif param.annotation == bool and isinstance(param_value, str):
512
+ filtered_args[param_name] = param_value.lower() in ('true', '1', 'yes')
513
+ else:
514
+ filtered_args[param_name] = param_value
515
+ except (ValueError, TypeError):
516
+ filtered_args[param_name] = param_value
517
+ else:
518
+ filtered_args[param_name] = param_value
478
519
  else:
479
520
  invalid_params.append(param_name)
480
521
 
@@ -514,19 +555,10 @@ class LLM:
514
555
  if not (self._is_ollama_provider() and iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD):
515
556
  return False, None, iteration_count
516
557
 
517
- # For Ollama: if we have meaningful tool results but empty responses,
518
- # give LLM one final chance with explicit prompt for final answer
519
- if accumulated_tool_results and iteration_count == self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
520
- # Add explicit prompt asking for final answer
521
- messages.append({
522
- "role": "user",
523
- "content": self.OLLAMA_FINAL_ANSWER_PROMPT
524
- })
525
- # Continue to next iteration to get the final response
526
- iteration_count += 1
527
- return False, None, iteration_count
528
- else:
529
- # If still no response after final answer prompt, generate summary
558
+ # For Ollama: if we have meaningful tool results, generate summary immediately
559
+ # Don't wait for more iterations as Ollama tends to repeat tool calls
560
+ if accumulated_tool_results and iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
561
+ # Generate summary from tool results
530
562
  tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
531
563
  if tool_summary:
532
564
  return True, tool_summary, iteration_count
@@ -874,7 +906,7 @@ class LLM:
874
906
  if display_text and str(display_text).strip():
875
907
  display_instruction(
876
908
  f"Agent {agent_name} is processing prompt: {display_text}",
877
- console=console,
909
+ console=self.console,
878
910
  agent_name=agent_name,
879
911
  agent_role=agent_role,
880
912
  agent_tools=agent_tools
@@ -920,7 +952,7 @@ class LLM:
920
952
  f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
921
953
  markdown=markdown,
922
954
  generation_time=generation_time_val,
923
- console=console,
955
+ console=self.console,
924
956
  agent_name=agent_name,
925
957
  agent_role=agent_role,
926
958
  agent_tools=agent_tools,
@@ -936,7 +968,7 @@ class LLM:
936
968
  response_text,
937
969
  markdown=markdown,
938
970
  generation_time=generation_time_val,
939
- console=console,
971
+ console=self.console,
940
972
  agent_name=agent_name,
941
973
  agent_role=agent_role,
942
974
  agent_tools=agent_tools,
@@ -971,6 +1003,11 @@ class LLM:
971
1003
  # Provider doesn't support streaming with tools, use non-streaming
972
1004
  use_streaming = False
973
1005
 
1006
+ # Gemini has issues with streaming + tools, disable streaming for Gemini when tools are present
1007
+ if use_streaming and formatted_tools and self._is_gemini_model():
1008
+ logging.debug("Disabling streaming for Gemini model with tools due to JSON parsing issues")
1009
+ use_streaming = False
1010
+
974
1011
  # Track whether fallback was successful to avoid duplicate API calls
975
1012
  fallback_completed = False
976
1013
 
@@ -984,7 +1021,7 @@ class LLM:
984
1021
  try:
985
1022
  if verbose:
986
1023
  # Verbose streaming: show display_generating during streaming
987
- with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
1024
+ with Live(display_generating("", current_time), console=self.console, refresh_per_second=4) as live:
988
1025
  for chunk in litellm.completion(
989
1026
  **self._build_completion_params(
990
1027
  messages=messages,
@@ -1031,8 +1068,9 @@ class LLM:
1031
1068
  try:
1032
1069
  if verbose:
1033
1070
  # When verbose=True, always use streaming for better UX
1034
- with Live(display_generating("", current_time), console=console, refresh_per_second=4, transient=True) as live:
1071
+ with Live(display_generating("", current_time), console=self.console, refresh_per_second=4, transient=True) as live:
1035
1072
  response_text = ""
1073
+ tool_calls = []
1036
1074
  # Use streaming when verbose for progressive display
1037
1075
  for chunk in litellm.completion(
1038
1076
  **self._build_completion_params(
@@ -1047,19 +1085,20 @@ class LLM:
1047
1085
  ):
1048
1086
  if chunk and chunk.choices and chunk.choices[0].delta:
1049
1087
  delta = chunk.choices[0].delta
1050
- if hasattr(delta, "content") and delta.content:
1051
- response_text += delta.content
1052
- live.update(display_generating(response_text, current_time))
1088
+ response_text, tool_calls = self._process_stream_delta(
1089
+ delta, response_text, tool_calls, formatted_tools
1090
+ )
1091
+ live.update(display_generating(response_text, current_time))
1053
1092
 
1054
1093
  # Clear the live display after completion
1055
- console.print()
1094
+ self.console.print()
1056
1095
 
1057
1096
  # Create final response structure
1058
1097
  final_response = {
1059
1098
  "choices": [{
1060
1099
  "message": {
1061
1100
  "content": response_text,
1062
- "tool_calls": None
1101
+ "tool_calls": tool_calls if tool_calls else None
1063
1102
  }
1064
1103
  }]
1065
1104
  }
@@ -1076,7 +1115,9 @@ class LLM:
1076
1115
  **kwargs
1077
1116
  )
1078
1117
  )
1079
- response_text = final_response["choices"][0]["message"]["content"]
1118
+ # Handle None content from Gemini
1119
+ response_content = final_response["choices"][0]["message"].get("content")
1120
+ response_text = response_content if response_content is not None else ""
1080
1121
 
1081
1122
  # Execute callbacks and display based on verbose setting
1082
1123
  if verbose and not interaction_displayed:
@@ -1086,7 +1127,7 @@ class LLM:
1086
1127
  response_text,
1087
1128
  markdown=markdown,
1088
1129
  generation_time=time.time() - current_time,
1089
- console=console,
1130
+ console=self.console,
1090
1131
  agent_name=agent_name,
1091
1132
  agent_role=agent_role,
1092
1133
  agent_tools=agent_tools,
@@ -1173,8 +1214,9 @@ class LLM:
1173
1214
  # Non-streaming approach (when tools require it, streaming is disabled, or streaming fallback)
1174
1215
  if verbose:
1175
1216
  # When verbose=True, always use streaming for better UX
1176
- with Live(display_generating("", current_time), console=console, refresh_per_second=4, transient=True) as live:
1217
+ with Live(display_generating("", current_time), console=self.console, refresh_per_second=4, transient=True) as live:
1177
1218
  response_text = ""
1219
+ tool_calls = []
1178
1220
  # Use streaming when verbose for progressive display
1179
1221
  for chunk in litellm.completion(
1180
1222
  **self._build_completion_params(
@@ -1189,19 +1231,20 @@ class LLM:
1189
1231
  ):
1190
1232
  if chunk and chunk.choices and chunk.choices[0].delta:
1191
1233
  delta = chunk.choices[0].delta
1192
- if hasattr(delta, "content") and delta.content:
1193
- response_text += delta.content
1194
- live.update(display_generating(response_text, current_time))
1234
+ response_text, tool_calls = self._process_stream_delta(
1235
+ delta, response_text, tool_calls, formatted_tools
1236
+ )
1237
+ live.update(display_generating(response_text, current_time))
1195
1238
 
1196
1239
  # Clear the live display after completion
1197
- console.print()
1240
+ self.console.print()
1198
1241
 
1199
1242
  # Create final response structure
1200
1243
  final_response = {
1201
1244
  "choices": [{
1202
1245
  "message": {
1203
1246
  "content": response_text,
1204
- "tool_calls": None
1247
+ "tool_calls": tool_calls if tool_calls else None
1205
1248
  }
1206
1249
  }]
1207
1250
  }
@@ -1218,7 +1261,9 @@ class LLM:
1218
1261
  **kwargs
1219
1262
  )
1220
1263
  )
1221
- response_text = final_response["choices"][0]["message"]["content"]
1264
+ # Handle None content from Gemini
1265
+ response_content = final_response["choices"][0]["message"].get("content")
1266
+ response_text = response_content if response_content is not None else ""
1222
1267
 
1223
1268
  # Execute callbacks and display based on verbose setting
1224
1269
  if verbose and not interaction_displayed:
@@ -1228,7 +1273,7 @@ class LLM:
1228
1273
  response_text,
1229
1274
  markdown=markdown,
1230
1275
  generation_time=time.time() - current_time,
1231
- console=console,
1276
+ console=self.console,
1232
1277
  agent_name=agent_name,
1233
1278
  agent_role=agent_role,
1234
1279
  agent_tools=agent_tools,
@@ -1257,6 +1302,41 @@ class LLM:
1257
1302
 
1258
1303
  tool_calls = final_response["choices"][0]["message"].get("tool_calls")
1259
1304
 
1305
+
1306
+ # For Ollama, parse tool calls from response text if not in tool_calls field
1307
+ if self._is_ollama_provider() and not tool_calls and response_text and formatted_tools:
1308
+ # Try to parse JSON tool call from response text
1309
+ try:
1310
+ response_json = json.loads(response_text.strip())
1311
+ if isinstance(response_json, dict) and "name" in response_json:
1312
+ # Convert Ollama format to standard tool_calls format
1313
+ tool_calls = [{
1314
+ "id": f"tool_{iteration_count}",
1315
+ "type": "function",
1316
+ "function": {
1317
+ "name": response_json["name"],
1318
+ "arguments": json.dumps(response_json.get("arguments", {}))
1319
+ }
1320
+ }]
1321
+ logging.debug(f"Parsed Ollama tool call from response: {tool_calls}")
1322
+ elif isinstance(response_json, list):
1323
+ # Handle multiple tool calls
1324
+ tool_calls = []
1325
+ for idx, tool_json in enumerate(response_json):
1326
+ if isinstance(tool_json, dict) and "name" in tool_json:
1327
+ tool_calls.append({
1328
+ "id": f"tool_{iteration_count}_{idx}",
1329
+ "type": "function",
1330
+ "function": {
1331
+ "name": tool_json["name"],
1332
+ "arguments": json.dumps(tool_json.get("arguments", {}))
1333
+ }
1334
+ })
1335
+ if tool_calls:
1336
+ logging.debug(f"Parsed multiple Ollama tool calls from response: {tool_calls}")
1337
+ except (json.JSONDecodeError, KeyError) as e:
1338
+ logging.debug(f"Could not parse Ollama tool call from response: {e}")
1339
+
1260
1340
  # For Ollama, if response is empty but we have tools, prompt for tool usage
1261
1341
  if self._is_ollama_provider() and (not response_text or response_text.strip() == "") and formatted_tools and iteration_count == 0:
1262
1342
  messages.append({
@@ -1287,6 +1367,8 @@ class LLM:
1287
1367
 
1288
1368
  should_continue = False
1289
1369
  tool_results = [] # Store current iteration tool results
1370
+ tool_result_mapping = {} # Store function results by name for Ollama chaining
1371
+
1290
1372
  for tool_call in tool_calls:
1291
1373
  # Handle both object and dict access patterns
1292
1374
  is_ollama = self._is_ollama_provider()
@@ -1294,6 +1376,15 @@ class LLM:
1294
1376
 
1295
1377
  # Validate and filter arguments for Ollama provider
1296
1378
  if is_ollama and tools:
1379
+ # First check if any argument references a previous tool result
1380
+ if is_ollama and tool_result_mapping:
1381
+ # Replace function names with their results in arguments
1382
+ for arg_name, arg_value in list(arguments.items()):
1383
+ if isinstance(arg_value, str) and arg_value in tool_result_mapping:
1384
+ # Replace function name with its result
1385
+ arguments[arg_name] = tool_result_mapping[arg_value]
1386
+ logging.debug(f"[OLLAMA_FIX] Replaced {arg_value} with {tool_result_mapping[arg_value]} in {function_name} arguments")
1387
+
1297
1388
  arguments = self._validate_and_filter_ollama_arguments(function_name, arguments, tools)
1298
1389
 
1299
1390
  logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
@@ -1301,6 +1392,19 @@ class LLM:
1301
1392
  logging.debug(f"[TOOL_EXEC_DEBUG] Tool execution result: {tool_result}")
1302
1393
  tool_results.append(tool_result) # Store the result
1303
1394
  accumulated_tool_results.append(tool_result) # Accumulate across iterations
1395
+
1396
+ # For Ollama, store the result for potential chaining
1397
+ if is_ollama:
1398
+ # Extract numeric value from result if it contains one
1399
+ if isinstance(tool_result, (int, float)):
1400
+ tool_result_mapping[function_name] = tool_result
1401
+ elif isinstance(tool_result, str):
1402
+ import re
1403
+ match = re.search(r'\b(\d+)\b', tool_result)
1404
+ if match:
1405
+ tool_result_mapping[function_name] = int(match.group(1))
1406
+ else:
1407
+ tool_result_mapping[function_name] = tool_result
1304
1408
 
1305
1409
  if verbose:
1306
1410
  display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -1312,7 +1416,7 @@ class LLM:
1312
1416
  logging.debug("[TOOL_EXEC_DEBUG] Tool returned no output")
1313
1417
 
1314
1418
  logging.debug(f"[TOOL_EXEC_DEBUG] About to display tool call with message: {display_message}")
1315
- display_tool_call(display_message, console=console)
1419
+ display_tool_call(display_message, console=self.console)
1316
1420
 
1317
1421
  # Check if this is Ollama provider
1318
1422
  if self._is_ollama_provider():
@@ -1336,10 +1440,11 @@ class LLM:
1336
1440
  iteration_count += 1
1337
1441
  continue
1338
1442
 
1339
- # Check if the LLM provided a final answer alongside the tool calls
1340
- # If response_text contains substantive content, treat it as the final answer
1341
- if response_text and len(response_text.strip()) > 10:
1342
- # LLM provided a final answer after tool execution, don't continue
1443
+ # For most providers (including Gemini), we need to continue the loop
1444
+ # to get a final response that incorporates the tool results
1445
+ # Only break if the response explicitly indicates completion
1446
+ if response_text and len(response_text.strip()) > 50 and "final answer" in response_text.lower():
1447
+ # LLM provided an explicit final answer, don't continue
1343
1448
  final_response_text = response_text.strip()
1344
1449
  break
1345
1450
 
@@ -1364,8 +1469,10 @@ class LLM:
1364
1469
  final_response_text = response_text.strip() if response_text else "Task completed."
1365
1470
  break
1366
1471
 
1367
- # Otherwise, continue the loop to check if more tools are needed
1472
+ # Otherwise, continue the loop to get final response with tool results
1368
1473
  iteration_count += 1
1474
+ # Clear response_text so we don't accidentally use the initial response
1475
+ response_text = ""
1369
1476
  continue
1370
1477
  else:
1371
1478
  # No tool calls, we're done with this iteration
@@ -1381,8 +1488,12 @@ class LLM:
1381
1488
  break
1382
1489
 
1383
1490
  # If we've executed tools in previous iterations, this response contains the final answer
1384
- if iteration_count > 0 and not final_response_text:
1491
+ if iteration_count > 0:
1385
1492
  final_response_text = response_text.strip() if response_text else ""
1493
+ break
1494
+
1495
+ # First iteration with no tool calls - just return the response
1496
+ final_response_text = response_text.strip() if response_text else ""
1386
1497
  break
1387
1498
 
1388
1499
  except Exception as e:
@@ -1391,6 +1502,40 @@ class LLM:
1391
1502
 
1392
1503
  # End of while loop - return final response
1393
1504
  if final_response_text:
1505
+ # Display the final response if verbose mode is enabled
1506
+ if verbose and not interaction_displayed:
1507
+ generation_time_val = time.time() - start_time
1508
+ display_interaction(
1509
+ original_prompt,
1510
+ final_response_text,
1511
+ markdown=markdown,
1512
+ generation_time=generation_time_val,
1513
+ console=self.console,
1514
+ agent_name=agent_name,
1515
+ agent_role=agent_role,
1516
+ agent_tools=agent_tools,
1517
+ task_name=task_name,
1518
+ task_description=task_description,
1519
+ task_id=task_id
1520
+ )
1521
+ interaction_displayed = True
1522
+ callback_executed = True
1523
+ elif not callback_executed:
1524
+ # Execute callback if not already done
1525
+ execute_sync_callback(
1526
+ 'interaction',
1527
+ message=original_prompt,
1528
+ response=final_response_text,
1529
+ markdown=markdown,
1530
+ generation_time=time.time() - start_time,
1531
+ agent_name=agent_name,
1532
+ agent_role=agent_role,
1533
+ agent_tools=agent_tools,
1534
+ task_name=task_name,
1535
+ task_description=task_description,
1536
+ task_id=task_id
1537
+ )
1538
+ callback_executed = True
1394
1539
  return final_response_text
1395
1540
 
1396
1541
  # No tool calls were made in this iteration, return the response
@@ -1405,7 +1550,7 @@ class LLM:
1405
1550
  f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{response_text}",
1406
1551
  markdown=markdown,
1407
1552
  generation_time=generation_time_val,
1408
- console=console,
1553
+ console=self.console,
1409
1554
  agent_name=agent_name,
1410
1555
  agent_role=agent_role,
1411
1556
  agent_tools=agent_tools,
@@ -1419,7 +1564,7 @@ class LLM:
1419
1564
  response_text,
1420
1565
  markdown=markdown,
1421
1566
  generation_time=generation_time_val,
1422
- console=console,
1567
+ console=self.console,
1423
1568
  agent_name=agent_name,
1424
1569
  agent_role=agent_role,
1425
1570
  agent_tools=agent_tools,
@@ -1459,7 +1604,7 @@ class LLM:
1459
1604
 
1460
1605
  if verbose and not interaction_displayed:
1461
1606
  display_interaction(original_prompt, response_text, markdown=markdown,
1462
- generation_time=time.time() - start_time, console=console,
1607
+ generation_time=time.time() - start_time, console=self.console,
1463
1608
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1464
1609
  task_name=task_name, task_description=task_description, task_id=task_id)
1465
1610
  interaction_displayed = True
@@ -1485,7 +1630,7 @@ class LLM:
1485
1630
  if not self_reflect:
1486
1631
  if verbose and not interaction_displayed:
1487
1632
  display_interaction(original_prompt, response_text, markdown=markdown,
1488
- generation_time=time.time() - start_time, console=console,
1633
+ generation_time=time.time() - start_time, console=self.console,
1489
1634
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1490
1635
  task_name=task_name, task_description=task_description, task_id=task_id)
1491
1636
  interaction_displayed = True
@@ -1551,7 +1696,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1551
1696
  f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
1552
1697
  markdown=markdown,
1553
1698
  generation_time=time.time() - start_time,
1554
- console=console,
1699
+ console=self.console,
1555
1700
  agent_name=agent_name,
1556
1701
  agent_role=agent_role,
1557
1702
  agent_tools=agent_tools,
@@ -1565,7 +1710,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1565
1710
  reflection_text,
1566
1711
  markdown=markdown,
1567
1712
  generation_time=time.time() - start_time,
1568
- console=console,
1713
+ console=self.console,
1569
1714
  agent_name=agent_name,
1570
1715
  agent_role=agent_role,
1571
1716
  agent_tools=agent_tools,
@@ -1576,7 +1721,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1576
1721
  else:
1577
1722
  # Existing streaming approach
1578
1723
  if verbose:
1579
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
1724
+ with Live(display_generating("", start_time), console=self.console, refresh_per_second=4) as live:
1580
1725
  reflection_text = ""
1581
1726
  for chunk in litellm.completion(
1582
1727
  **self._build_completion_params(
@@ -1616,13 +1761,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1616
1761
  if verbose:
1617
1762
  display_self_reflection(
1618
1763
  f"Agent {agent_name} self reflection: reflection='{reflection_data['reflection']}' satisfactory='{reflection_data['satisfactory']}'",
1619
- console=console
1764
+ console=self.console
1620
1765
  )
1621
1766
 
1622
1767
  if satisfactory and reflection_count >= min_reflect - 1:
1623
1768
  if verbose and not interaction_displayed:
1624
1769
  display_interaction(prompt, response_text, markdown=markdown,
1625
- generation_time=time.time() - start_time, console=console,
1770
+ generation_time=time.time() - start_time, console=self.console,
1626
1771
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1627
1772
  task_name=task_name, task_description=task_description, task_id=task_id)
1628
1773
  interaction_displayed = True
@@ -1631,7 +1776,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1631
1776
  if reflection_count >= max_reflect - 1:
1632
1777
  if verbose and not interaction_displayed:
1633
1778
  display_interaction(prompt, response_text, markdown=markdown,
1634
- generation_time=time.time() - start_time, console=console,
1779
+ generation_time=time.time() - start_time, console=self.console,
1635
1780
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1636
1781
  task_name=task_name, task_description=task_description, task_id=task_id)
1637
1782
  interaction_displayed = True
@@ -1647,7 +1792,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1647
1792
 
1648
1793
  # Get new response after reflection
1649
1794
  if verbose:
1650
- with Live(display_generating("", time.time()), console=console, refresh_per_second=4) as live:
1795
+ with Live(display_generating("", time.time()), console=self.console, refresh_per_second=4) as live:
1651
1796
  response_text = ""
1652
1797
  for chunk in litellm.completion(
1653
1798
  **self._build_completion_params(
@@ -1688,7 +1833,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1688
1833
  if reflection_count >= max_reflect:
1689
1834
  if verbose and not interaction_displayed:
1690
1835
  display_interaction(prompt, response_text, markdown=markdown,
1691
- generation_time=time.time() - start_time, console=console,
1836
+ generation_time=time.time() - start_time, console=self.console,
1692
1837
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1693
1838
  task_name=task_name, task_description=task_description, task_id=task_id)
1694
1839
  interaction_displayed = True
@@ -1701,7 +1846,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1701
1846
  # If we've exhausted reflection attempts
1702
1847
  if verbose and not interaction_displayed:
1703
1848
  display_interaction(prompt, response_text, markdown=markdown,
1704
- generation_time=time.time() - start_time, console=console)
1849
+ generation_time=time.time() - start_time, console=self.console)
1705
1850
  interaction_displayed = True
1706
1851
  return response_text
1707
1852
 
@@ -2119,7 +2264,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2119
2264
  f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
2120
2265
  markdown=markdown,
2121
2266
  generation_time=time.time() - start_time,
2122
- console=console,
2267
+ console=self.console,
2123
2268
  agent_name=agent_name,
2124
2269
  agent_role=agent_role,
2125
2270
  agent_tools=agent_tools,
@@ -2134,7 +2279,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2134
2279
  response_text,
2135
2280
  markdown=markdown,
2136
2281
  generation_time=time.time() - start_time,
2137
- console=console,
2282
+ console=self.console,
2138
2283
  agent_name=agent_name,
2139
2284
  agent_role=agent_role,
2140
2285
  agent_tools=agent_tools,
@@ -2214,9 +2359,16 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2214
2359
  **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
2215
2360
  )
2216
2361
  )
2217
- response_text = tool_response.choices[0].message.get("content", "")
2362
+ # Handle None content from Gemini
2363
+ response_content = tool_response.choices[0].message.get("content")
2364
+ response_text = response_content if response_content is not None else ""
2218
2365
  tool_calls = tool_response.choices[0].message.get("tool_calls", [])
2219
2366
 
2367
+ # Debug logging for Gemini responses
2368
+ if self._is_gemini_model():
2369
+ logging.debug(f"Gemini response content: {response_content} -> {response_text}")
2370
+ logging.debug(f"Gemini tool calls: {tool_calls}")
2371
+
2220
2372
  if verbose and not interaction_displayed:
2221
2373
  # Display the complete response at once
2222
2374
  display_interaction(
@@ -2224,7 +2376,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2224
2376
  response_text,
2225
2377
  markdown=markdown,
2226
2378
  generation_time=time.time() - start_time,
2227
- console=console,
2379
+ console=self.console,
2228
2380
  agent_name=agent_name,
2229
2381
  agent_role=agent_role,
2230
2382
  agent_tools=agent_tools,
@@ -2282,7 +2434,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2282
2434
  display_message += f"Function returned: {tool_result}"
2283
2435
  else:
2284
2436
  display_message += "Function returned no output"
2285
- display_tool_call(display_message, console=console)
2437
+ display_tool_call(display_message, console=self.console)
2286
2438
  # Check if it's Ollama provider
2287
2439
  if self._is_ollama_provider():
2288
2440
  # For Ollama, use user role and format as natural language
@@ -2329,7 +2481,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2329
2481
  f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
2330
2482
  markdown=markdown,
2331
2483
  generation_time=time.time() - start_time,
2332
- console=console,
2484
+ console=self.console,
2333
2485
  agent_name=agent_name,
2334
2486
  agent_role=agent_role,
2335
2487
  agent_tools=agent_tools,
@@ -2344,7 +2496,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2344
2496
  response_text,
2345
2497
  markdown=markdown,
2346
2498
  generation_time=time.time() - start_time,
2347
- console=console,
2499
+ console=self.console,
2348
2500
  agent_name=agent_name,
2349
2501
  agent_role=agent_role,
2350
2502
  agent_tools=agent_tools,
@@ -2455,7 +2607,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2455
2607
  self.chat_history.append({"role": "assistant", "content": response_text})
2456
2608
  if verbose and not interaction_displayed:
2457
2609
  display_interaction(original_prompt, response_text, markdown=markdown,
2458
- generation_time=time.time() - start_time, console=console,
2610
+ generation_time=time.time() - start_time, console=self.console,
2459
2611
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
2460
2612
  task_name=task_name, task_description=task_description, task_id=task_id)
2461
2613
  interaction_displayed = True
@@ -2473,7 +2625,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2473
2625
  f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{display_text}",
2474
2626
  markdown=markdown,
2475
2627
  generation_time=time.time() - start_time,
2476
- console=console,
2628
+ console=self.console,
2477
2629
  agent_name=agent_name,
2478
2630
  agent_role=agent_role,
2479
2631
  agent_tools=agent_tools,
@@ -2483,7 +2635,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2483
2635
  )
2484
2636
  else:
2485
2637
  display_interaction(original_prompt, display_text, markdown=markdown,
2486
- generation_time=time.time() - start_time, console=console,
2638
+ generation_time=time.time() - start_time, console=self.console,
2487
2639
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
2488
2640
  task_name=task_name, task_description=task_description, task_id=task_id)
2489
2641
  interaction_displayed = True
@@ -2530,7 +2682,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2530
2682
  f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
2531
2683
  markdown=markdown,
2532
2684
  generation_time=time.time() - start_time,
2533
- console=console,
2685
+ console=self.console,
2534
2686
  agent_name=agent_name,
2535
2687
  agent_role=agent_role,
2536
2688
  agent_tools=agent_tools,
@@ -2544,7 +2696,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2544
2696
  reflection_text,
2545
2697
  markdown=markdown,
2546
2698
  generation_time=time.time() - start_time,
2547
- console=console,
2699
+ console=self.console,
2548
2700
  agent_name=agent_name,
2549
2701
  agent_role=agent_role,
2550
2702
  agent_tools=agent_tools,
@@ -2555,7 +2707,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2555
2707
  else:
2556
2708
  # Existing streaming approach
2557
2709
  if verbose:
2558
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
2710
+ with Live(display_generating("", start_time), console=self.console, refresh_per_second=4) as live:
2559
2711
  reflection_text = ""
2560
2712
  async for chunk in await litellm.acompletion(
2561
2713
  **self._build_completion_params(
@@ -2596,13 +2748,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2596
2748
  if verbose:
2597
2749
  display_self_reflection(
2598
2750
  f"Agent {agent_name} self reflection: reflection='{reflection_data['reflection']}' satisfactory='{reflection_data['satisfactory']}'",
2599
- console=console
2751
+ console=self.console
2600
2752
  )
2601
2753
 
2602
2754
  if satisfactory and reflection_count >= min_reflect - 1:
2603
2755
  if verbose and not interaction_displayed:
2604
2756
  display_interaction(prompt, response_text, markdown=markdown,
2605
- generation_time=time.time() - start_time, console=console,
2757
+ generation_time=time.time() - start_time, console=self.console,
2606
2758
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
2607
2759
  task_name=task_name, task_description=task_description, task_id=task_id)
2608
2760
  interaction_displayed = True
@@ -2611,7 +2763,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2611
2763
  if reflection_count >= max_reflect - 1:
2612
2764
  if verbose and not interaction_displayed:
2613
2765
  display_interaction(prompt, response_text, markdown=markdown,
2614
- generation_time=time.time() - start_time, console=console,
2766
+ generation_time=time.time() - start_time, console=self.console,
2615
2767
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
2616
2768
  task_name=task_name, task_description=task_description, task_id=task_id)
2617
2769
  interaction_displayed = True
praisonaiagents/main.py CHANGED
@@ -12,11 +12,7 @@ from rich.markdown import Markdown
12
12
  from rich.live import Live
13
13
  import asyncio
14
14
 
15
- # Logging is already configured in __init__.py, just clean up handlers for litellm
16
- logging.getLogger("litellm").handlers = []
17
- logging.getLogger("litellm.utils").handlers = []
18
- logging.getLogger("litellm").propagate = False
19
- logging.getLogger("litellm.utils").propagate = False
15
+ # Logging is already configured in _logging.py via __init__.py
20
16
 
21
17
  # Global list to store error logs
22
18
  error_logs = []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.146
3
+ Version: 0.0.148
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -1,9 +1,11 @@
1
- praisonaiagents/__init__.py,sha256=2UYF3ZisjF3gH9UTmUcMEDCnV4at3GjsKC8quewQdHI,7125
1
+ praisonaiagents/__init__.py,sha256=pmoUafSn5f-ubjgY0rBBTw7N1wsRtg7X8-UGe477OH4,3619
2
+ praisonaiagents/_logging.py,sha256=WfgUX6jo9hClpgHVKSGz8gqkna9DDNhPJBv-wjhcJoM,4648
3
+ praisonaiagents/_warning_patch.py,sha256=FSLdw1SnA9b1PSxHWaRIcuG9IiIwO5JT6uo_m3CM0NI,2816
2
4
  praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9813
3
- praisonaiagents/main.py,sha256=b5dKlkf6NMeumSzixreHB9ui90f8YMAi5r1fCbTpQVw,17225
5
+ praisonaiagents/main.py,sha256=jT6ur_GWYZRZk0YC8xHm80Vy86y4EGk-zOv4_fc-thU,17013
4
6
  praisonaiagents/session.py,sha256=FHWButPBaFGA4x1U_2gImroQChHnFy231_aAa_n5KOQ,20364
5
7
  praisonaiagents/agent/__init__.py,sha256=KBqW_augD-HcaV3FL88gUmhDCpwnSTavGENi7RqneTo,505
6
- praisonaiagents/agent/agent.py,sha256=bl46q5Jl_cnR_teWdbRus7lo6-bEDwGDU9RzN5GgUNo,143159
8
+ praisonaiagents/agent/agent.py,sha256=SISsqrK_IUzjnnits4N9tOwtB66TmcmLVSc_t_7TmUI,143500
7
9
  praisonaiagents/agent/context_agent.py,sha256=zNI2Waghn5eo8g3QM1Dc7ZNSr2xw41D87GIK81FjW-Y,107489
8
10
  praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
9
11
  praisonaiagents/agent/image_agent.py,sha256=xKDhW8T1Y3e15lQpY6N2pdvBNJmAoWDibJa4BYa-Njs,10205
@@ -17,8 +19,8 @@ praisonaiagents/guardrails/llm_guardrail.py,sha256=czdOIoY-3PZOchX317tz4O2h2WYE4
17
19
  praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
18
20
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
19
21
  praisonaiagents/knowledge/knowledge.py,sha256=OzK81oA6sjk9nAUWphS7AkXxvalrv2AHB4FtHjzYgxI,30115
20
- praisonaiagents/llm/__init__.py,sha256=M6peks8Yy-mnYQmbGkam2iTukn5iJAgsmPhyYAeQmR4,3197
21
- praisonaiagents/llm/llm.py,sha256=eBRtEcMOMMrPP3eePZ5QAU7AVgr9-ZR_dJvQnqW7JSA,163163
22
+ praisonaiagents/llm/__init__.py,sha256=SqdU1pRqPrR6jZeWYyDeTvmZKCACywk0v4P0k5Fuowk,1107
23
+ praisonaiagents/llm/llm.py,sha256=UQmnKyPQSnaxCIlUwJmHC4vjQfFvlZIg2w883qQTkTM,172825
22
24
  praisonaiagents/llm/model_capabilities.py,sha256=cxOvZcjZ_PIEpUYKn3S2FMyypfOSfbGpx4vmV7Y5vhI,3967
23
25
  praisonaiagents/llm/model_router.py,sha256=Jy2pShlkLxqXF3quz-MRB3-6L9vaUSgUrf2YJs_Tsg0,13995
24
26
  praisonaiagents/llm/openai_client.py,sha256=3EVjIs3tnBNFDy_4ZxX9DJVq54kS0FMm38m5Gkpun7U,57234
@@ -62,7 +64,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
62
64
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
63
65
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
64
66
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
65
- praisonaiagents-0.0.146.dist-info/METADATA,sha256=z97omUooPCk4RMFwbwLv5Qpgkw_bRJRbSDpn0rpxnv8,2146
66
- praisonaiagents-0.0.146.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
67
- praisonaiagents-0.0.146.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
68
- praisonaiagents-0.0.146.dist-info/RECORD,,
67
+ praisonaiagents-0.0.148.dist-info/METADATA,sha256=N2tVsvw141roESJw1oS87WT1DbDM0Rg-jiFO0OxQfCA,2146
68
+ praisonaiagents-0.0.148.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
69
+ praisonaiagents-0.0.148.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
70
+ praisonaiagents-0.0.148.dist-info/RECORD,,