praisonaiagents 0.0.146__py3-none-any.whl → 0.0.147__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,67 +2,16 @@
2
2
  Praison AI Agents - A package for hierarchical AI agent task execution
3
3
  """
4
4
 
5
- # Configure logging before any other imports
6
- import os
7
- import logging
8
- import warnings
9
- import re
10
- from rich.logging import RichHandler
5
+ # Apply warning patch BEFORE any imports to intercept warnings at the source
6
+ from . import _warning_patch
11
7
 
12
- # Set environment variables to suppress warnings at the source
13
- os.environ["LITELLM_TELEMETRY"] = "False"
14
- os.environ["LITELLM_DROP_PARAMS"] = "True"
15
- # Disable httpx warnings
16
- os.environ["HTTPX_DISABLE_WARNINGS"] = "True"
8
+ # Import centralized logging configuration FIRST
9
+ from . import _logging
17
10
 
18
- # Get log level from environment variable
19
- LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
20
-
21
- # Determine if warnings should be suppressed (not in DEBUG mode and not in tests)
22
- def _should_suppress_warnings():
23
- import sys
24
- return (LOGLEVEL != 'DEBUG' and
25
- not hasattr(sys, '_called_from_test') and
26
- 'pytest' not in sys.modules and
27
- os.environ.get('PYTEST_CURRENT_TEST') is None)
28
-
29
- # Configure root logger
30
- logging.basicConfig(
31
- level=getattr(logging, LOGLEVEL, logging.INFO),
32
- format="%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s",
33
- datefmt="[%X]",
34
- handlers=[RichHandler(rich_tracebacks=True)]
35
- )
36
-
37
- # Suppress specific noisy loggers - more aggressive suppression (only when not in DEBUG mode)
38
- if _should_suppress_warnings():
39
- logging.getLogger("litellm").setLevel(logging.CRITICAL)
40
- logging.getLogger("litellm_logging").setLevel(logging.CRITICAL)
41
- logging.getLogger("httpx").setLevel(logging.CRITICAL)
42
- logging.getLogger("httpcore").setLevel(logging.CRITICAL)
43
- logging.getLogger("pydantic").setLevel(logging.WARNING)
44
- logging.getLogger("markdown_it").setLevel(logging.WARNING)
45
- logging.getLogger("rich.markdown").setLevel(logging.WARNING)
46
-
47
- # Note: litellm child loggers (litellm.utils, litellm.proxy, etc.) automatically inherit
48
- # the CRITICAL level from the parent litellm logger due to Python's hierarchical logging
49
-
50
- # Comprehensive warning suppression for litellm and dependencies (issue #1033)
51
- # These warnings clutter output and are not actionable for users
52
-
53
- # Set warning filter to suppress all warnings from problematic modules at import time
54
- if _should_suppress_warnings():
55
- # Module-specific warning suppression - applied before imports (only when not in DEBUG mode)
56
- for module in ['litellm', 'httpx', 'httpcore', 'pydantic']:
57
- warnings.filterwarnings("ignore", category=DeprecationWarning, module=module)
58
- warnings.filterwarnings("ignore", category=UserWarning, module=module)
59
-
60
- # Specific filters for known problematic warnings
61
- warnings.filterwarnings("ignore", message="There is no current event loop")
62
- warnings.filterwarnings("ignore", message=".*Use 'content=<...>' to upload raw bytes/text content.*")
63
- warnings.filterwarnings("ignore", message=".*The `dict` method is deprecated; use `model_dump` instead.*")
64
- warnings.filterwarnings("ignore", message=".*model_dump.*deprecated.*")
11
+ # Configure root logger after logging is initialized
12
+ _logging.configure_root_logger()
65
13
 
14
+ # Now import everything else
66
15
  from .agent.agent import Agent
67
16
  from .agent.image_agent import ImageAgent
68
17
  from .agent.context_agent import ContextAgent, create_context_agent
@@ -131,30 +80,6 @@ except ImportError:
131
80
  # Add Agents as an alias for PraisonAIAgents
132
81
  Agents = PraisonAIAgents
133
82
 
134
- # Additional warning suppression after all imports (runtime suppression)
135
- if _should_suppress_warnings():
136
- # Try to import and configure litellm to suppress its warnings
137
- try:
138
- import litellm
139
- # Disable all litellm logging and telemetry
140
- litellm.telemetry = False
141
- litellm.drop_params = True
142
- # Set litellm to suppress warnings
143
- litellm.suppress_debug_info = True
144
- if hasattr(litellm, '_logging_obj'):
145
- litellm._logging_obj.setLevel(logging.CRITICAL)
146
- except (ImportError, AttributeError):
147
- pass
148
-
149
- # Suppress pydantic warnings that might occur at runtime (safer approach)
150
- try:
151
- warnings.filterwarnings("ignore", category=UserWarning, module="pydantic",
152
- message=".*model_dump.*deprecated.*")
153
- warnings.filterwarnings("ignore", category=UserWarning, module="pydantic",
154
- message=".*dict.*method.*deprecated.*")
155
- except Exception:
156
- pass
157
-
158
83
  # Apply telemetry auto-instrumentation after all imports
159
84
  if _telemetry_available:
160
85
  try:
@@ -210,4 +135,4 @@ __all__ = [
210
135
 
211
136
  # Add MCP to __all__ if available
212
137
  if _mcp_available:
213
- __all__.append('MCP')
138
+ __all__.append('MCP')
@@ -0,0 +1,134 @@
1
+ """
2
+ Centralized logging configuration for PraisonAI Agents.
3
+ This module consolidates all logging configuration in one place to avoid duplication.
4
+ """
5
+
6
+ import os
7
+ import logging
8
+ from typing import List
9
+
10
+ # ========================================================================
11
+ # ENVIRONMENT CONFIGURATION
12
+ # ========================================================================
13
+ def _configure_environment():
14
+ """Set environment variables to suppress debug messages at the source."""
15
+ env_vars = {
16
+ # LiteLLM configuration
17
+ "LITELLM_TELEMETRY": "False",
18
+ "LITELLM_DROP_PARAMS": "True",
19
+ "LITELLM_LOG": "ERROR",
20
+ "LITELLM_DEBUG": "False",
21
+ "LITELLM_SUPPRESS_DEBUG_INFO": "True",
22
+ "LITELLM_VERBOSE": "False",
23
+ "LITELLM_SET_VERBOSE": "False",
24
+ # HTTPX configuration
25
+ "HTTPX_DISABLE_WARNINGS": "True",
26
+ "HTTPX_LOG_LEVEL": "ERROR",
27
+ # Pydantic configuration
28
+ "PYDANTIC_WARNINGS_ENABLED": "False",
29
+ }
30
+
31
+ for key, value in env_vars.items():
32
+ os.environ[key] = value
33
+
34
+
35
+ # ========================================================================
36
+ # LOGGER CONFIGURATION
37
+ # ========================================================================
38
+ def _get_all_noisy_loggers() -> List[str]:
39
+ """Get list of all loggers that should be suppressed."""
40
+ return [
41
+ # LiteLLM and variants
42
+ "litellm", "LiteLLM", "LiteLLM Router", "LiteLLM Proxy",
43
+ # HTTP libraries
44
+ "httpx", "httpx._trace", "httpx._client",
45
+ "httpcore", "httpcore._trace",
46
+ # OpenAI
47
+ "openai._base_client", "openai._client",
48
+ # Markdown
49
+ "markdown_it", "rich.markdown",
50
+ # System
51
+ "asyncio", "selector_events", "pydantic",
52
+ "praisonaiagents.telemetry.telemetry",
53
+ ]
54
+
55
+
56
+ def _configure_loggers():
57
+ """Configure all loggers based on LOGLEVEL environment variable."""
58
+ loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
59
+
60
+ # When DEBUG is set, allow some HTTP logging for API endpoints
61
+ if loglevel == 'DEBUG':
62
+ allowed_debug_loggers = {"httpx._client", "openai._client"}
63
+
64
+ for logger_name in _get_all_noisy_loggers():
65
+ if logger_name not in allowed_debug_loggers:
66
+ logger = logging.getLogger(logger_name)
67
+ logger.setLevel(logging.CRITICAL)
68
+ logger.handlers = []
69
+ logger.propagate = False
70
+
71
+ # Ensure allowed loggers are at INFO level to show API calls
72
+ for logger_name in allowed_debug_loggers:
73
+ logger = logging.getLogger(logger_name)
74
+ logger.setLevel(logging.INFO)
75
+ else:
76
+ # Suppress all noisy loggers when not in DEBUG mode
77
+ for logger_name in _get_all_noisy_loggers():
78
+ logger = logging.getLogger(logger_name)
79
+ logger.setLevel(logging.CRITICAL)
80
+ logger.handlers = []
81
+ logger.propagate = False
82
+
83
+
84
+ # ========================================================================
85
+ # LITELLM CONFIGURATION
86
+ # ========================================================================
87
+ def _configure_litellm():
88
+ """Configure litellm after it's imported."""
89
+ try:
90
+ import litellm
91
+ litellm.telemetry = False
92
+ litellm.drop_params = True
93
+ litellm.suppress_debug_info = True
94
+
95
+ if hasattr(litellm, '_logging_obj'):
96
+ litellm._logging_obj.setLevel(logging.CRITICAL)
97
+
98
+ if hasattr(litellm, 'set_verbose'):
99
+ litellm.set_verbose = False
100
+
101
+ except (ImportError, AttributeError):
102
+ pass
103
+
104
+
105
+ # ========================================================================
106
+ # ROOT LOGGER CONFIGURATION
107
+ # ========================================================================
108
+ def configure_root_logger():
109
+ """Configure the root logger with RichHandler."""
110
+ from rich.logging import RichHandler
111
+
112
+ loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
113
+
114
+ logging.basicConfig(
115
+ level=getattr(logging, loglevel, logging.INFO),
116
+ format="%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s",
117
+ datefmt="[%X]",
118
+ handlers=[RichHandler(rich_tracebacks=True)],
119
+ force=True
120
+ )
121
+
122
+
123
+ # ========================================================================
124
+ # INITIALIZATION
125
+ # ========================================================================
126
+ def initialize_logging():
127
+ """Initialize all logging configuration."""
128
+ _configure_environment()
129
+ _configure_loggers()
130
+ _configure_litellm()
131
+
132
+
133
+ # Auto-initialize on import
134
+ initialize_logging()
@@ -0,0 +1,73 @@
1
+ """
2
+ Minimal warning patch to suppress specific third-party warnings.
3
+ This module patches the warnings module to intercept specific messages.
4
+ """
5
+
6
+ import warnings
7
+ import functools
8
+ import sys
9
+
10
+ # Apply aggressive warning filters first
11
+ warnings.filterwarnings("ignore", message=".*Pydantic serializer warnings.*")
12
+ warnings.filterwarnings("ignore", message=".*PydanticSerializationUnexpectedValue.*")
13
+ warnings.filterwarnings("ignore", message=".*Expected 9 fields but got.*")
14
+ warnings.filterwarnings("ignore", message=".*Expected `StreamingChoices` but got.*")
15
+ warnings.filterwarnings("ignore", message=".*serialized value may not be as expected.*")
16
+ warnings.filterwarnings("ignore", message=".*Use 'content=<...>' to upload raw bytes/text content.*")
17
+ warnings.filterwarnings("ignore", message=".*The `dict` method is deprecated.*")
18
+ warnings.filterwarnings("ignore", category=UserWarning, module="pydantic.*")
19
+
20
+ # Store the original warn function
21
+ _original_warn = warnings.warn
22
+ _original_warn_explicit = warnings.warn_explicit
23
+
24
+ # Messages to suppress (partial matches)
25
+ SUPPRESSED_PATTERNS = [
26
+ "Use 'content=<...>' to upload raw bytes/text content",
27
+ "The `dict` method is deprecated; use `model_dump` instead",
28
+ "Pydantic serializer warnings",
29
+ "PydanticSerializationUnexpectedValue",
30
+ "Expected 9 fields but got 5 for type `Message`",
31
+ "Expected `StreamingChoices` but got `Choices`",
32
+ "serialized value may not be as expected"
33
+ ]
34
+
35
+ @functools.wraps(_original_warn)
36
+ def _patched_warn(message, category=None, stacklevel=1, source=None):
37
+ """Patched warn function that suppresses specific messages."""
38
+ msg_str = str(message)
39
+
40
+ for pattern in SUPPRESSED_PATTERNS:
41
+ if pattern in msg_str:
42
+ return
43
+
44
+ if category == UserWarning and "pydantic" in msg_str.lower():
45
+ return
46
+
47
+ _original_warn(message, category, stacklevel, source)
48
+
49
+ @functools.wraps(_original_warn_explicit)
50
+ def _patched_warn_explicit(message, category, filename, lineno, module=None, registry=None, module_globals=None, source=None):
51
+ """Patched warn_explicit function that suppresses specific messages."""
52
+ msg_str = str(message)
53
+
54
+ for pattern in SUPPRESSED_PATTERNS:
55
+ if pattern in msg_str:
56
+ return
57
+
58
+ if category == UserWarning and "pydantic" in msg_str.lower():
59
+ return
60
+
61
+ if module and "pydantic" in str(module):
62
+ return
63
+
64
+ _original_warn_explicit(message, category, filename, lineno, module, registry, module_globals, source)
65
+
66
+ # Apply the patches
67
+ warnings.warn = _patched_warn
68
+ warnings.warn_explicit = _patched_warn_explicit
69
+
70
+ # Also patch sys.modules warnings if it exists
71
+ if 'warnings' in sys.modules:
72
+ sys.modules['warnings'].warn = _patched_warn
73
+ sys.modules['warnings'].warn_explicit = _patched_warn_explicit
@@ -1,38 +1,9 @@
1
- import logging
2
- import warnings
3
1
  import os
4
- import re
5
2
 
6
- # Disable litellm telemetry before any imports
3
+ # Ensure litellm telemetry is disabled before imports
7
4
  os.environ["LITELLM_TELEMETRY"] = "False"
8
5
 
9
- # Check if warnings should be suppressed (consistent with main __init__.py)
10
- def _should_suppress_warnings():
11
- import sys
12
- LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
13
- return (LOGLEVEL != 'DEBUG' and
14
- not hasattr(sys, '_called_from_test') and
15
- 'pytest' not in sys.modules and
16
- os.environ.get('PYTEST_CURRENT_TEST') is None)
17
-
18
- # Suppress all relevant logs at module level - more aggressive suppression consistent with main __init__.py (only when not in DEBUG mode)
19
- if _should_suppress_warnings():
20
- logging.getLogger("litellm").setLevel(logging.CRITICAL)
21
- logging.getLogger("openai").setLevel(logging.WARNING)
22
- logging.getLogger("httpx").setLevel(logging.CRITICAL)
23
- logging.getLogger("httpcore").setLevel(logging.CRITICAL)
24
- logging.getLogger("pydantic").setLevel(logging.WARNING)
25
-
26
- # Note: litellm child loggers automatically inherit the CRITICAL level from the parent logger
27
-
28
- # Warning filters are centrally managed in the main __init__.py file
29
- # Apply additional local suppression for safety during LLM imports (only when not in DEBUG mode)
30
- if _should_suppress_warnings():
31
- for module in ['litellm', 'httpx', 'httpcore', 'pydantic']:
32
- warnings.filterwarnings("ignore", category=DeprecationWarning, module=module)
33
- warnings.filterwarnings("ignore", category=UserWarning, module=module)
34
-
35
- # Import after suppressing warnings
6
+ # Import modules
36
7
  from .llm import LLM, LLMContextLengthExceededException
37
8
  from .openai_client import (
38
9
  OpenAIClient,
@@ -57,22 +28,6 @@ from .model_router import (
57
28
  create_routing_agent
58
29
  )
59
30
 
60
- # Ensure comprehensive litellm configuration after import (only when not in DEBUG mode)
61
- if _should_suppress_warnings():
62
- try:
63
- import litellm
64
- # Disable all litellm logging and telemetry features
65
- litellm.telemetry = False
66
- litellm.drop_params = True
67
- if hasattr(litellm, 'suppress_debug_info'):
68
- litellm.suppress_debug_info = True
69
- # Set all litellm loggers to CRITICAL level
70
- if hasattr(litellm, '_logging_obj'):
71
- litellm._logging_obj.setLevel(logging.CRITICAL)
72
- # Note: Child loggers inherit from parent, no need to iterate over all loggers
73
- except ImportError:
74
- pass
75
-
76
31
  __all__ = [
77
32
  "LLM",
78
33
  "LLMContextLengthExceededException",
@@ -92,4 +47,4 @@ __all__ = [
92
47
  "ModelProfile",
93
48
  "TaskComplexity",
94
49
  "create_routing_agent"
95
- ]
50
+ ]
@@ -20,8 +20,7 @@ from ..main import (
20
20
  from rich.console import Console
21
21
  from rich.live import Live
22
22
 
23
- # Disable litellm telemetry before any imports
24
- os.environ["LITELLM_TELEMETRY"] = "False"
23
+ # Logging is already configured in _logging.py via __init__.py
25
24
 
26
25
  # TODO: Include in-build tool calling in LLM class
27
26
  # TODO: Restructure so that duplicate calls are not made (Sync with agent.py)
@@ -95,7 +94,7 @@ class LLM:
95
94
  OLLAMA_FINAL_ANSWER_PROMPT = "Based on the tool results above, please provide the final answer to the original question."
96
95
 
97
96
  # Ollama iteration threshold for summary generation
98
- OLLAMA_SUMMARY_ITERATION_THRESHOLD = 3
97
+ OLLAMA_SUMMARY_ITERATION_THRESHOLD = 1
99
98
 
100
99
  def _log_llm_config(self, method_name: str, **config):
101
100
  """Centralized debug logging for LLM configuration and parameters.
@@ -191,26 +190,28 @@ class LLM:
191
190
  litellm._async_success_callback = []
192
191
  litellm.callbacks = []
193
192
 
193
+ # Suppress all litellm debug info
194
+ litellm.suppress_debug_info = True
195
+ if hasattr(litellm, '_logging'):
196
+ litellm._logging._disable_debugging()
197
+
194
198
  verbose = extra_settings.get('verbose', True)
195
199
 
196
- # Only suppress logs if not in debug mode
197
- if not isinstance(verbose, bool) and verbose >= 10:
198
- # Enable detailed debug logging
199
- logging.getLogger("asyncio").setLevel(logging.DEBUG)
200
- logging.getLogger("selector_events").setLevel(logging.DEBUG)
201
- logging.getLogger("litellm.utils").setLevel(logging.DEBUG)
202
- logging.getLogger("litellm.main").setLevel(logging.DEBUG)
203
- litellm.suppress_debug_messages = False
204
- litellm.set_verbose = True
205
- else:
206
- # Suppress debug logging for normal operation
207
- logging.getLogger("asyncio").setLevel(logging.WARNING)
208
- logging.getLogger("selector_events").setLevel(logging.WARNING)
209
- logging.getLogger("litellm.utils").setLevel(logging.WARNING)
210
- logging.getLogger("litellm.main").setLevel(logging.WARNING)
211
- litellm.suppress_debug_messages = True
200
+ # Always suppress litellm's internal debug messages
201
+ # These are from external libraries and not useful for debugging user code
202
+ logging.getLogger("litellm.utils").setLevel(logging.WARNING)
203
+ logging.getLogger("litellm.main").setLevel(logging.WARNING)
204
+ logging.getLogger("litellm.llms.custom_httpx.http_handler").setLevel(logging.WARNING)
205
+ logging.getLogger("litellm.litellm_logging").setLevel(logging.WARNING)
206
+ logging.getLogger("litellm.transformation").setLevel(logging.WARNING)
207
+ litellm.suppress_debug_messages = True
208
+ if hasattr(litellm, '_logging'):
212
209
  litellm._logging._disable_debugging()
213
- warnings.filterwarnings("ignore", category=RuntimeWarning)
210
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
211
+
212
+ # Keep asyncio at WARNING unless explicitly in high debug mode
213
+ logging.getLogger("asyncio").setLevel(logging.WARNING)
214
+ logging.getLogger("selector_events").setLevel(logging.WARNING)
214
215
 
215
216
  except ImportError:
216
217
  raise ImportError(
@@ -329,29 +330,47 @@ class LLM:
329
330
  # For Ollama, always generate summary when we have tool results
330
331
  # This prevents infinite loops caused by empty/minimal responses
331
332
 
332
- # Build tool summary more naturally to match OpenAI-style responses
333
- if len(tool_results) == 1:
333
+ # Filter out error results first
334
+ valid_results = []
335
+ for result in tool_results:
336
+ # Skip error responses
337
+ if isinstance(result, dict) and 'error' in result:
338
+ continue
339
+ valid_results.append(result)
340
+
341
+ # If no valid results, return None to continue
342
+ if not valid_results:
343
+ return None
344
+
345
+ # Generate a natural summary based on the tool results
346
+ if len(valid_results) == 1:
334
347
  # Single tool result - create natural response
335
- result = tool_results[0]
336
- if isinstance(result, dict) and 'result' in result:
337
- return str(result['result'])
338
- else:
339
- return str(result)
348
+ result = valid_results[0]
349
+ # For simple numeric results, create a more natural response
350
+ if isinstance(result, (int, float)):
351
+ return f"The result is {result}."
352
+ return str(result)
340
353
  else:
341
354
  # Multiple tool results - create coherent summary
342
- summary_lines = []
343
- for i, result in enumerate(tool_results):
344
- if isinstance(result, dict) and 'result' in result:
345
- function_name = result.get('function_name', 'Tool')
346
- summary_lines.append(f"{function_name}: {result['result']}")
347
- else:
348
- summary_lines.append(f"Tool {i+1}: {result}")
355
+ summary_parts = []
349
356
 
350
- # Create more natural summary text
351
- if len(summary_lines) == 2:
352
- return f"{summary_lines[0]}. {summary_lines[1]}."
353
- else:
354
- return "Based on the tool execution: " + ". ".join(summary_lines) + "."
357
+ for result in valid_results:
358
+ result_str = str(result)
359
+ # Clean up the result string
360
+ result_str = result_str.strip()
361
+
362
+ # If result is just a number, keep it simple
363
+ if isinstance(result, (int, float)):
364
+ # Don't add extra context, let the LLM's response provide that
365
+ pass
366
+ # Ensure string results end with proper punctuation
367
+ elif result_str and not result_str[-1] in '.!?':
368
+ result_str += '.'
369
+
370
+ summary_parts.append(result_str)
371
+
372
+ # Join the parts naturally
373
+ return " ".join(summary_parts)
355
374
 
356
375
  def _format_ollama_tool_result_message(self, function_name: str, tool_result: Any) -> Dict[str, str]:
357
376
  """
@@ -474,7 +493,22 @@ class LLM:
474
493
 
475
494
  for param_name, param_value in arguments.items():
476
495
  if param_name in valid_params:
477
- filtered_args[param_name] = param_value
496
+ # Cast parameter value to the expected type
497
+ param = sig.parameters[param_name]
498
+ if param.annotation != inspect.Parameter.empty:
499
+ try:
500
+ if param.annotation == int and isinstance(param_value, str):
501
+ filtered_args[param_name] = int(param_value)
502
+ elif param.annotation == float and isinstance(param_value, str):
503
+ filtered_args[param_name] = float(param_value)
504
+ elif param.annotation == bool and isinstance(param_value, str):
505
+ filtered_args[param_name] = param_value.lower() in ('true', '1', 'yes')
506
+ else:
507
+ filtered_args[param_name] = param_value
508
+ except (ValueError, TypeError):
509
+ filtered_args[param_name] = param_value
510
+ else:
511
+ filtered_args[param_name] = param_value
478
512
  else:
479
513
  invalid_params.append(param_name)
480
514
 
@@ -514,19 +548,10 @@ class LLM:
514
548
  if not (self._is_ollama_provider() and iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD):
515
549
  return False, None, iteration_count
516
550
 
517
- # For Ollama: if we have meaningful tool results but empty responses,
518
- # give LLM one final chance with explicit prompt for final answer
519
- if accumulated_tool_results and iteration_count == self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
520
- # Add explicit prompt asking for final answer
521
- messages.append({
522
- "role": "user",
523
- "content": self.OLLAMA_FINAL_ANSWER_PROMPT
524
- })
525
- # Continue to next iteration to get the final response
526
- iteration_count += 1
527
- return False, None, iteration_count
528
- else:
529
- # If still no response after final answer prompt, generate summary
551
+ # For Ollama: if we have meaningful tool results, generate summary immediately
552
+ # Don't wait for more iterations as Ollama tends to repeat tool calls
553
+ if accumulated_tool_results and iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
554
+ # Generate summary from tool results
530
555
  tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
531
556
  if tool_summary:
532
557
  return True, tool_summary, iteration_count
@@ -874,7 +899,7 @@ class LLM:
874
899
  if display_text and str(display_text).strip():
875
900
  display_instruction(
876
901
  f"Agent {agent_name} is processing prompt: {display_text}",
877
- console=console,
902
+ console=self.console,
878
903
  agent_name=agent_name,
879
904
  agent_role=agent_role,
880
905
  agent_tools=agent_tools
@@ -920,7 +945,7 @@ class LLM:
920
945
  f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
921
946
  markdown=markdown,
922
947
  generation_time=generation_time_val,
923
- console=console,
948
+ console=self.console,
924
949
  agent_name=agent_name,
925
950
  agent_role=agent_role,
926
951
  agent_tools=agent_tools,
@@ -936,7 +961,7 @@ class LLM:
936
961
  response_text,
937
962
  markdown=markdown,
938
963
  generation_time=generation_time_val,
939
- console=console,
964
+ console=self.console,
940
965
  agent_name=agent_name,
941
966
  agent_role=agent_role,
942
967
  agent_tools=agent_tools,
@@ -971,6 +996,11 @@ class LLM:
971
996
  # Provider doesn't support streaming with tools, use non-streaming
972
997
  use_streaming = False
973
998
 
999
+ # Gemini has issues with streaming + tools, disable streaming for Gemini when tools are present
1000
+ if use_streaming and formatted_tools and self._is_gemini_model():
1001
+ logging.debug("Disabling streaming for Gemini model with tools due to JSON parsing issues")
1002
+ use_streaming = False
1003
+
974
1004
  # Track whether fallback was successful to avoid duplicate API calls
975
1005
  fallback_completed = False
976
1006
 
@@ -984,7 +1014,7 @@ class LLM:
984
1014
  try:
985
1015
  if verbose:
986
1016
  # Verbose streaming: show display_generating during streaming
987
- with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
1017
+ with Live(display_generating("", current_time), console=self.console, refresh_per_second=4) as live:
988
1018
  for chunk in litellm.completion(
989
1019
  **self._build_completion_params(
990
1020
  messages=messages,
@@ -1031,8 +1061,9 @@ class LLM:
1031
1061
  try:
1032
1062
  if verbose:
1033
1063
  # When verbose=True, always use streaming for better UX
1034
- with Live(display_generating("", current_time), console=console, refresh_per_second=4, transient=True) as live:
1064
+ with Live(display_generating("", current_time), console=self.console, refresh_per_second=4, transient=True) as live:
1035
1065
  response_text = ""
1066
+ tool_calls = []
1036
1067
  # Use streaming when verbose for progressive display
1037
1068
  for chunk in litellm.completion(
1038
1069
  **self._build_completion_params(
@@ -1047,19 +1078,20 @@ class LLM:
1047
1078
  ):
1048
1079
  if chunk and chunk.choices and chunk.choices[0].delta:
1049
1080
  delta = chunk.choices[0].delta
1050
- if hasattr(delta, "content") and delta.content:
1051
- response_text += delta.content
1052
- live.update(display_generating(response_text, current_time))
1081
+ response_text, tool_calls = self._process_stream_delta(
1082
+ delta, response_text, tool_calls, formatted_tools
1083
+ )
1084
+ live.update(display_generating(response_text, current_time))
1053
1085
 
1054
1086
  # Clear the live display after completion
1055
- console.print()
1087
+ self.console.print()
1056
1088
 
1057
1089
  # Create final response structure
1058
1090
  final_response = {
1059
1091
  "choices": [{
1060
1092
  "message": {
1061
1093
  "content": response_text,
1062
- "tool_calls": None
1094
+ "tool_calls": tool_calls if tool_calls else None
1063
1095
  }
1064
1096
  }]
1065
1097
  }
@@ -1076,7 +1108,9 @@ class LLM:
1076
1108
  **kwargs
1077
1109
  )
1078
1110
  )
1079
- response_text = final_response["choices"][0]["message"]["content"]
1111
+ # Handle None content from Gemini
1112
+ response_content = final_response["choices"][0]["message"].get("content")
1113
+ response_text = response_content if response_content is not None else ""
1080
1114
 
1081
1115
  # Execute callbacks and display based on verbose setting
1082
1116
  if verbose and not interaction_displayed:
@@ -1086,7 +1120,7 @@ class LLM:
1086
1120
  response_text,
1087
1121
  markdown=markdown,
1088
1122
  generation_time=time.time() - current_time,
1089
- console=console,
1123
+ console=self.console,
1090
1124
  agent_name=agent_name,
1091
1125
  agent_role=agent_role,
1092
1126
  agent_tools=agent_tools,
@@ -1173,8 +1207,9 @@ class LLM:
1173
1207
  # Non-streaming approach (when tools require it, streaming is disabled, or streaming fallback)
1174
1208
  if verbose:
1175
1209
  # When verbose=True, always use streaming for better UX
1176
- with Live(display_generating("", current_time), console=console, refresh_per_second=4, transient=True) as live:
1210
+ with Live(display_generating("", current_time), console=self.console, refresh_per_second=4, transient=True) as live:
1177
1211
  response_text = ""
1212
+ tool_calls = []
1178
1213
  # Use streaming when verbose for progressive display
1179
1214
  for chunk in litellm.completion(
1180
1215
  **self._build_completion_params(
@@ -1189,19 +1224,20 @@ class LLM:
1189
1224
  ):
1190
1225
  if chunk and chunk.choices and chunk.choices[0].delta:
1191
1226
  delta = chunk.choices[0].delta
1192
- if hasattr(delta, "content") and delta.content:
1193
- response_text += delta.content
1194
- live.update(display_generating(response_text, current_time))
1227
+ response_text, tool_calls = self._process_stream_delta(
1228
+ delta, response_text, tool_calls, formatted_tools
1229
+ )
1230
+ live.update(display_generating(response_text, current_time))
1195
1231
 
1196
1232
  # Clear the live display after completion
1197
- console.print()
1233
+ self.console.print()
1198
1234
 
1199
1235
  # Create final response structure
1200
1236
  final_response = {
1201
1237
  "choices": [{
1202
1238
  "message": {
1203
1239
  "content": response_text,
1204
- "tool_calls": None
1240
+ "tool_calls": tool_calls if tool_calls else None
1205
1241
  }
1206
1242
  }]
1207
1243
  }
@@ -1218,7 +1254,9 @@ class LLM:
1218
1254
  **kwargs
1219
1255
  )
1220
1256
  )
1221
- response_text = final_response["choices"][0]["message"]["content"]
1257
+ # Handle None content from Gemini
1258
+ response_content = final_response["choices"][0]["message"].get("content")
1259
+ response_text = response_content if response_content is not None else ""
1222
1260
 
1223
1261
  # Execute callbacks and display based on verbose setting
1224
1262
  if verbose and not interaction_displayed:
@@ -1228,7 +1266,7 @@ class LLM:
1228
1266
  response_text,
1229
1267
  markdown=markdown,
1230
1268
  generation_time=time.time() - current_time,
1231
- console=console,
1269
+ console=self.console,
1232
1270
  agent_name=agent_name,
1233
1271
  agent_role=agent_role,
1234
1272
  agent_tools=agent_tools,
@@ -1257,6 +1295,41 @@ class LLM:
1257
1295
 
1258
1296
  tool_calls = final_response["choices"][0]["message"].get("tool_calls")
1259
1297
 
1298
+
1299
+ # For Ollama, parse tool calls from response text if not in tool_calls field
1300
+ if self._is_ollama_provider() and not tool_calls and response_text and formatted_tools:
1301
+ # Try to parse JSON tool call from response text
1302
+ try:
1303
+ response_json = json.loads(response_text.strip())
1304
+ if isinstance(response_json, dict) and "name" in response_json:
1305
+ # Convert Ollama format to standard tool_calls format
1306
+ tool_calls = [{
1307
+ "id": f"tool_{iteration_count}",
1308
+ "type": "function",
1309
+ "function": {
1310
+ "name": response_json["name"],
1311
+ "arguments": json.dumps(response_json.get("arguments", {}))
1312
+ }
1313
+ }]
1314
+ logging.debug(f"Parsed Ollama tool call from response: {tool_calls}")
1315
+ elif isinstance(response_json, list):
1316
+ # Handle multiple tool calls
1317
+ tool_calls = []
1318
+ for idx, tool_json in enumerate(response_json):
1319
+ if isinstance(tool_json, dict) and "name" in tool_json:
1320
+ tool_calls.append({
1321
+ "id": f"tool_{iteration_count}_{idx}",
1322
+ "type": "function",
1323
+ "function": {
1324
+ "name": tool_json["name"],
1325
+ "arguments": json.dumps(tool_json.get("arguments", {}))
1326
+ }
1327
+ })
1328
+ if tool_calls:
1329
+ logging.debug(f"Parsed multiple Ollama tool calls from response: {tool_calls}")
1330
+ except (json.JSONDecodeError, KeyError) as e:
1331
+ logging.debug(f"Could not parse Ollama tool call from response: {e}")
1332
+
1260
1333
  # For Ollama, if response is empty but we have tools, prompt for tool usage
1261
1334
  if self._is_ollama_provider() and (not response_text or response_text.strip() == "") and formatted_tools and iteration_count == 0:
1262
1335
  messages.append({
@@ -1287,6 +1360,8 @@ class LLM:
1287
1360
 
1288
1361
  should_continue = False
1289
1362
  tool_results = [] # Store current iteration tool results
1363
+ tool_result_mapping = {} # Store function results by name for Ollama chaining
1364
+
1290
1365
  for tool_call in tool_calls:
1291
1366
  # Handle both object and dict access patterns
1292
1367
  is_ollama = self._is_ollama_provider()
@@ -1294,6 +1369,15 @@ class LLM:
1294
1369
 
1295
1370
  # Validate and filter arguments for Ollama provider
1296
1371
  if is_ollama and tools:
1372
+ # First check if any argument references a previous tool result
1373
+ if is_ollama and tool_result_mapping:
1374
+ # Replace function names with their results in arguments
1375
+ for arg_name, arg_value in list(arguments.items()):
1376
+ if isinstance(arg_value, str) and arg_value in tool_result_mapping:
1377
+ # Replace function name with its result
1378
+ arguments[arg_name] = tool_result_mapping[arg_value]
1379
+ logging.debug(f"[OLLAMA_FIX] Replaced {arg_value} with {tool_result_mapping[arg_value]} in {function_name} arguments")
1380
+
1297
1381
  arguments = self._validate_and_filter_ollama_arguments(function_name, arguments, tools)
1298
1382
 
1299
1383
  logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
@@ -1301,6 +1385,19 @@ class LLM:
1301
1385
  logging.debug(f"[TOOL_EXEC_DEBUG] Tool execution result: {tool_result}")
1302
1386
  tool_results.append(tool_result) # Store the result
1303
1387
  accumulated_tool_results.append(tool_result) # Accumulate across iterations
1388
+
1389
+ # For Ollama, store the result for potential chaining
1390
+ if is_ollama:
1391
+ # Extract numeric value from result if it contains one
1392
+ if isinstance(tool_result, (int, float)):
1393
+ tool_result_mapping[function_name] = tool_result
1394
+ elif isinstance(tool_result, str):
1395
+ import re
1396
+ match = re.search(r'\b(\d+)\b', tool_result)
1397
+ if match:
1398
+ tool_result_mapping[function_name] = int(match.group(1))
1399
+ else:
1400
+ tool_result_mapping[function_name] = tool_result
1304
1401
 
1305
1402
  if verbose:
1306
1403
  display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -1312,7 +1409,7 @@ class LLM:
1312
1409
  logging.debug("[TOOL_EXEC_DEBUG] Tool returned no output")
1313
1410
 
1314
1411
  logging.debug(f"[TOOL_EXEC_DEBUG] About to display tool call with message: {display_message}")
1315
- display_tool_call(display_message, console=console)
1412
+ display_tool_call(display_message, console=self.console)
1316
1413
 
1317
1414
  # Check if this is Ollama provider
1318
1415
  if self._is_ollama_provider():
@@ -1336,10 +1433,11 @@ class LLM:
1336
1433
  iteration_count += 1
1337
1434
  continue
1338
1435
 
1339
- # Check if the LLM provided a final answer alongside the tool calls
1340
- # If response_text contains substantive content, treat it as the final answer
1341
- if response_text and len(response_text.strip()) > 10:
1342
- # LLM provided a final answer after tool execution, don't continue
1436
+ # For most providers (including Gemini), we need to continue the loop
1437
+ # to get a final response that incorporates the tool results
1438
+ # Only break if the response explicitly indicates completion
1439
+ if response_text and len(response_text.strip()) > 50 and "final answer" in response_text.lower():
1440
+ # LLM provided an explicit final answer, don't continue
1343
1441
  final_response_text = response_text.strip()
1344
1442
  break
1345
1443
 
@@ -1364,8 +1462,10 @@ class LLM:
1364
1462
  final_response_text = response_text.strip() if response_text else "Task completed."
1365
1463
  break
1366
1464
 
1367
- # Otherwise, continue the loop to check if more tools are needed
1465
+ # Otherwise, continue the loop to get final response with tool results
1368
1466
  iteration_count += 1
1467
+ # Clear response_text so we don't accidentally use the initial response
1468
+ response_text = ""
1369
1469
  continue
1370
1470
  else:
1371
1471
  # No tool calls, we're done with this iteration
@@ -1381,8 +1481,12 @@ class LLM:
1381
1481
  break
1382
1482
 
1383
1483
  # If we've executed tools in previous iterations, this response contains the final answer
1384
- if iteration_count > 0 and not final_response_text:
1484
+ if iteration_count > 0:
1385
1485
  final_response_text = response_text.strip() if response_text else ""
1486
+ break
1487
+
1488
+ # First iteration with no tool calls - just return the response
1489
+ final_response_text = response_text.strip() if response_text else ""
1386
1490
  break
1387
1491
 
1388
1492
  except Exception as e:
@@ -1391,6 +1495,40 @@ class LLM:
1391
1495
 
1392
1496
  # End of while loop - return final response
1393
1497
  if final_response_text:
1498
+ # Display the final response if verbose mode is enabled
1499
+ if verbose and not interaction_displayed:
1500
+ generation_time_val = time.time() - start_time
1501
+ display_interaction(
1502
+ original_prompt,
1503
+ final_response_text,
1504
+ markdown=markdown,
1505
+ generation_time=generation_time_val,
1506
+ console=self.console,
1507
+ agent_name=agent_name,
1508
+ agent_role=agent_role,
1509
+ agent_tools=agent_tools,
1510
+ task_name=task_name,
1511
+ task_description=task_description,
1512
+ task_id=task_id
1513
+ )
1514
+ interaction_displayed = True
1515
+ callback_executed = True
1516
+ elif not callback_executed:
1517
+ # Execute callback if not already done
1518
+ execute_sync_callback(
1519
+ 'interaction',
1520
+ message=original_prompt,
1521
+ response=final_response_text,
1522
+ markdown=markdown,
1523
+ generation_time=time.time() - start_time,
1524
+ agent_name=agent_name,
1525
+ agent_role=agent_role,
1526
+ agent_tools=agent_tools,
1527
+ task_name=task_name,
1528
+ task_description=task_description,
1529
+ task_id=task_id
1530
+ )
1531
+ callback_executed = True
1394
1532
  return final_response_text
1395
1533
 
1396
1534
  # No tool calls were made in this iteration, return the response
@@ -1405,7 +1543,7 @@ class LLM:
1405
1543
  f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{response_text}",
1406
1544
  markdown=markdown,
1407
1545
  generation_time=generation_time_val,
1408
- console=console,
1546
+ console=self.console,
1409
1547
  agent_name=agent_name,
1410
1548
  agent_role=agent_role,
1411
1549
  agent_tools=agent_tools,
@@ -1419,7 +1557,7 @@ class LLM:
1419
1557
  response_text,
1420
1558
  markdown=markdown,
1421
1559
  generation_time=generation_time_val,
1422
- console=console,
1560
+ console=self.console,
1423
1561
  agent_name=agent_name,
1424
1562
  agent_role=agent_role,
1425
1563
  agent_tools=agent_tools,
@@ -1459,7 +1597,7 @@ class LLM:
1459
1597
 
1460
1598
  if verbose and not interaction_displayed:
1461
1599
  display_interaction(original_prompt, response_text, markdown=markdown,
1462
- generation_time=time.time() - start_time, console=console,
1600
+ generation_time=time.time() - start_time, console=self.console,
1463
1601
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1464
1602
  task_name=task_name, task_description=task_description, task_id=task_id)
1465
1603
  interaction_displayed = True
@@ -1485,7 +1623,7 @@ class LLM:
1485
1623
  if not self_reflect:
1486
1624
  if verbose and not interaction_displayed:
1487
1625
  display_interaction(original_prompt, response_text, markdown=markdown,
1488
- generation_time=time.time() - start_time, console=console,
1626
+ generation_time=time.time() - start_time, console=self.console,
1489
1627
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1490
1628
  task_name=task_name, task_description=task_description, task_id=task_id)
1491
1629
  interaction_displayed = True
@@ -1551,7 +1689,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1551
1689
  f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
1552
1690
  markdown=markdown,
1553
1691
  generation_time=time.time() - start_time,
1554
- console=console,
1692
+ console=self.console,
1555
1693
  agent_name=agent_name,
1556
1694
  agent_role=agent_role,
1557
1695
  agent_tools=agent_tools,
@@ -1565,7 +1703,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1565
1703
  reflection_text,
1566
1704
  markdown=markdown,
1567
1705
  generation_time=time.time() - start_time,
1568
- console=console,
1706
+ console=self.console,
1569
1707
  agent_name=agent_name,
1570
1708
  agent_role=agent_role,
1571
1709
  agent_tools=agent_tools,
@@ -1576,7 +1714,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1576
1714
  else:
1577
1715
  # Existing streaming approach
1578
1716
  if verbose:
1579
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
1717
+ with Live(display_generating("", start_time), console=self.console, refresh_per_second=4) as live:
1580
1718
  reflection_text = ""
1581
1719
  for chunk in litellm.completion(
1582
1720
  **self._build_completion_params(
@@ -1616,13 +1754,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1616
1754
  if verbose:
1617
1755
  display_self_reflection(
1618
1756
  f"Agent {agent_name} self reflection: reflection='{reflection_data['reflection']}' satisfactory='{reflection_data['satisfactory']}'",
1619
- console=console
1757
+ console=self.console
1620
1758
  )
1621
1759
 
1622
1760
  if satisfactory and reflection_count >= min_reflect - 1:
1623
1761
  if verbose and not interaction_displayed:
1624
1762
  display_interaction(prompt, response_text, markdown=markdown,
1625
- generation_time=time.time() - start_time, console=console,
1763
+ generation_time=time.time() - start_time, console=self.console,
1626
1764
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1627
1765
  task_name=task_name, task_description=task_description, task_id=task_id)
1628
1766
  interaction_displayed = True
@@ -1631,7 +1769,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1631
1769
  if reflection_count >= max_reflect - 1:
1632
1770
  if verbose and not interaction_displayed:
1633
1771
  display_interaction(prompt, response_text, markdown=markdown,
1634
- generation_time=time.time() - start_time, console=console,
1772
+ generation_time=time.time() - start_time, console=self.console,
1635
1773
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1636
1774
  task_name=task_name, task_description=task_description, task_id=task_id)
1637
1775
  interaction_displayed = True
@@ -1647,7 +1785,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1647
1785
 
1648
1786
  # Get new response after reflection
1649
1787
  if verbose:
1650
- with Live(display_generating("", time.time()), console=console, refresh_per_second=4) as live:
1788
+ with Live(display_generating("", time.time()), console=self.console, refresh_per_second=4) as live:
1651
1789
  response_text = ""
1652
1790
  for chunk in litellm.completion(
1653
1791
  **self._build_completion_params(
@@ -1688,7 +1826,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1688
1826
  if reflection_count >= max_reflect:
1689
1827
  if verbose and not interaction_displayed:
1690
1828
  display_interaction(prompt, response_text, markdown=markdown,
1691
- generation_time=time.time() - start_time, console=console,
1829
+ generation_time=time.time() - start_time, console=self.console,
1692
1830
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1693
1831
  task_name=task_name, task_description=task_description, task_id=task_id)
1694
1832
  interaction_displayed = True
@@ -1701,7 +1839,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1701
1839
  # If we've exhausted reflection attempts
1702
1840
  if verbose and not interaction_displayed:
1703
1841
  display_interaction(prompt, response_text, markdown=markdown,
1704
- generation_time=time.time() - start_time, console=console)
1842
+ generation_time=time.time() - start_time, console=self.console)
1705
1843
  interaction_displayed = True
1706
1844
  return response_text
1707
1845
 
@@ -2119,7 +2257,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2119
2257
  f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
2120
2258
  markdown=markdown,
2121
2259
  generation_time=time.time() - start_time,
2122
- console=console,
2260
+ console=self.console,
2123
2261
  agent_name=agent_name,
2124
2262
  agent_role=agent_role,
2125
2263
  agent_tools=agent_tools,
@@ -2134,7 +2272,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2134
2272
  response_text,
2135
2273
  markdown=markdown,
2136
2274
  generation_time=time.time() - start_time,
2137
- console=console,
2275
+ console=self.console,
2138
2276
  agent_name=agent_name,
2139
2277
  agent_role=agent_role,
2140
2278
  agent_tools=agent_tools,
@@ -2214,9 +2352,16 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2214
2352
  **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
2215
2353
  )
2216
2354
  )
2217
- response_text = tool_response.choices[0].message.get("content", "")
2355
+ # Handle None content from Gemini
2356
+ response_content = tool_response.choices[0].message.get("content")
2357
+ response_text = response_content if response_content is not None else ""
2218
2358
  tool_calls = tool_response.choices[0].message.get("tool_calls", [])
2219
2359
 
2360
+ # Debug logging for Gemini responses
2361
+ if self._is_gemini_model():
2362
+ logging.debug(f"Gemini response content: {response_content} -> {response_text}")
2363
+ logging.debug(f"Gemini tool calls: {tool_calls}")
2364
+
2220
2365
  if verbose and not interaction_displayed:
2221
2366
  # Display the complete response at once
2222
2367
  display_interaction(
@@ -2224,7 +2369,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2224
2369
  response_text,
2225
2370
  markdown=markdown,
2226
2371
  generation_time=time.time() - start_time,
2227
- console=console,
2372
+ console=self.console,
2228
2373
  agent_name=agent_name,
2229
2374
  agent_role=agent_role,
2230
2375
  agent_tools=agent_tools,
@@ -2282,7 +2427,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2282
2427
  display_message += f"Function returned: {tool_result}"
2283
2428
  else:
2284
2429
  display_message += "Function returned no output"
2285
- display_tool_call(display_message, console=console)
2430
+ display_tool_call(display_message, console=self.console)
2286
2431
  # Check if it's Ollama provider
2287
2432
  if self._is_ollama_provider():
2288
2433
  # For Ollama, use user role and format as natural language
@@ -2329,7 +2474,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2329
2474
  f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
2330
2475
  markdown=markdown,
2331
2476
  generation_time=time.time() - start_time,
2332
- console=console,
2477
+ console=self.console,
2333
2478
  agent_name=agent_name,
2334
2479
  agent_role=agent_role,
2335
2480
  agent_tools=agent_tools,
@@ -2344,7 +2489,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2344
2489
  response_text,
2345
2490
  markdown=markdown,
2346
2491
  generation_time=time.time() - start_time,
2347
- console=console,
2492
+ console=self.console,
2348
2493
  agent_name=agent_name,
2349
2494
  agent_role=agent_role,
2350
2495
  agent_tools=agent_tools,
@@ -2455,7 +2600,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2455
2600
  self.chat_history.append({"role": "assistant", "content": response_text})
2456
2601
  if verbose and not interaction_displayed:
2457
2602
  display_interaction(original_prompt, response_text, markdown=markdown,
2458
- generation_time=time.time() - start_time, console=console,
2603
+ generation_time=time.time() - start_time, console=self.console,
2459
2604
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
2460
2605
  task_name=task_name, task_description=task_description, task_id=task_id)
2461
2606
  interaction_displayed = True
@@ -2473,7 +2618,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2473
2618
  f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{display_text}",
2474
2619
  markdown=markdown,
2475
2620
  generation_time=time.time() - start_time,
2476
- console=console,
2621
+ console=self.console,
2477
2622
  agent_name=agent_name,
2478
2623
  agent_role=agent_role,
2479
2624
  agent_tools=agent_tools,
@@ -2483,7 +2628,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2483
2628
  )
2484
2629
  else:
2485
2630
  display_interaction(original_prompt, display_text, markdown=markdown,
2486
- generation_time=time.time() - start_time, console=console,
2631
+ generation_time=time.time() - start_time, console=self.console,
2487
2632
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
2488
2633
  task_name=task_name, task_description=task_description, task_id=task_id)
2489
2634
  interaction_displayed = True
@@ -2530,7 +2675,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2530
2675
  f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
2531
2676
  markdown=markdown,
2532
2677
  generation_time=time.time() - start_time,
2533
- console=console,
2678
+ console=self.console,
2534
2679
  agent_name=agent_name,
2535
2680
  agent_role=agent_role,
2536
2681
  agent_tools=agent_tools,
@@ -2544,7 +2689,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2544
2689
  reflection_text,
2545
2690
  markdown=markdown,
2546
2691
  generation_time=time.time() - start_time,
2547
- console=console,
2692
+ console=self.console,
2548
2693
  agent_name=agent_name,
2549
2694
  agent_role=agent_role,
2550
2695
  agent_tools=agent_tools,
@@ -2555,7 +2700,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2555
2700
  else:
2556
2701
  # Existing streaming approach
2557
2702
  if verbose:
2558
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
2703
+ with Live(display_generating("", start_time), console=self.console, refresh_per_second=4) as live:
2559
2704
  reflection_text = ""
2560
2705
  async for chunk in await litellm.acompletion(
2561
2706
  **self._build_completion_params(
@@ -2596,13 +2741,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2596
2741
  if verbose:
2597
2742
  display_self_reflection(
2598
2743
  f"Agent {agent_name} self reflection: reflection='{reflection_data['reflection']}' satisfactory='{reflection_data['satisfactory']}'",
2599
- console=console
2744
+ console=self.console
2600
2745
  )
2601
2746
 
2602
2747
  if satisfactory and reflection_count >= min_reflect - 1:
2603
2748
  if verbose and not interaction_displayed:
2604
2749
  display_interaction(prompt, response_text, markdown=markdown,
2605
- generation_time=time.time() - start_time, console=console,
2750
+ generation_time=time.time() - start_time, console=self.console,
2606
2751
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
2607
2752
  task_name=task_name, task_description=task_description, task_id=task_id)
2608
2753
  interaction_displayed = True
@@ -2611,7 +2756,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2611
2756
  if reflection_count >= max_reflect - 1:
2612
2757
  if verbose and not interaction_displayed:
2613
2758
  display_interaction(prompt, response_text, markdown=markdown,
2614
- generation_time=time.time() - start_time, console=console,
2759
+ generation_time=time.time() - start_time, console=self.console,
2615
2760
  agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
2616
2761
  task_name=task_name, task_description=task_description, task_id=task_id)
2617
2762
  interaction_displayed = True
praisonaiagents/main.py CHANGED
@@ -12,11 +12,7 @@ from rich.markdown import Markdown
12
12
  from rich.live import Live
13
13
  import asyncio
14
14
 
15
- # Logging is already configured in __init__.py, just clean up handlers for litellm
16
- logging.getLogger("litellm").handlers = []
17
- logging.getLogger("litellm.utils").handlers = []
18
- logging.getLogger("litellm").propagate = False
19
- logging.getLogger("litellm.utils").propagate = False
15
+ # Logging is already configured in _logging.py via __init__.py
20
16
 
21
17
  # Global list to store error logs
22
18
  error_logs = []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.146
3
+ Version: 0.0.147
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -1,6 +1,8 @@
1
- praisonaiagents/__init__.py,sha256=2UYF3ZisjF3gH9UTmUcMEDCnV4at3GjsKC8quewQdHI,7125
1
+ praisonaiagents/__init__.py,sha256=pmoUafSn5f-ubjgY0rBBTw7N1wsRtg7X8-UGe477OH4,3619
2
+ praisonaiagents/_logging.py,sha256=XvqJwqWY-W3HmYDXW_ZziZscFt98e5uyNDEPI_Hmukk,4639
3
+ praisonaiagents/_warning_patch.py,sha256=FSLdw1SnA9b1PSxHWaRIcuG9IiIwO5JT6uo_m3CM0NI,2816
2
4
  praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9813
3
- praisonaiagents/main.py,sha256=b5dKlkf6NMeumSzixreHB9ui90f8YMAi5r1fCbTpQVw,17225
5
+ praisonaiagents/main.py,sha256=jT6ur_GWYZRZk0YC8xHm80Vy86y4EGk-zOv4_fc-thU,17013
4
6
  praisonaiagents/session.py,sha256=FHWButPBaFGA4x1U_2gImroQChHnFy231_aAa_n5KOQ,20364
5
7
  praisonaiagents/agent/__init__.py,sha256=KBqW_augD-HcaV3FL88gUmhDCpwnSTavGENi7RqneTo,505
6
8
  praisonaiagents/agent/agent.py,sha256=bl46q5Jl_cnR_teWdbRus7lo6-bEDwGDU9RzN5GgUNo,143159
@@ -17,8 +19,8 @@ praisonaiagents/guardrails/llm_guardrail.py,sha256=czdOIoY-3PZOchX317tz4O2h2WYE4
17
19
  praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
18
20
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
19
21
  praisonaiagents/knowledge/knowledge.py,sha256=OzK81oA6sjk9nAUWphS7AkXxvalrv2AHB4FtHjzYgxI,30115
20
- praisonaiagents/llm/__init__.py,sha256=M6peks8Yy-mnYQmbGkam2iTukn5iJAgsmPhyYAeQmR4,3197
21
- praisonaiagents/llm/llm.py,sha256=eBRtEcMOMMrPP3eePZ5QAU7AVgr9-ZR_dJvQnqW7JSA,163163
22
+ praisonaiagents/llm/__init__.py,sha256=SqdU1pRqPrR6jZeWYyDeTvmZKCACywk0v4P0k5Fuowk,1107
23
+ praisonaiagents/llm/llm.py,sha256=YeCSyohRcRbOODDW_hfvnNjRDmccA2TO9FmlFv3ohMc,172499
22
24
  praisonaiagents/llm/model_capabilities.py,sha256=cxOvZcjZ_PIEpUYKn3S2FMyypfOSfbGpx4vmV7Y5vhI,3967
23
25
  praisonaiagents/llm/model_router.py,sha256=Jy2pShlkLxqXF3quz-MRB3-6L9vaUSgUrf2YJs_Tsg0,13995
24
26
  praisonaiagents/llm/openai_client.py,sha256=3EVjIs3tnBNFDy_4ZxX9DJVq54kS0FMm38m5Gkpun7U,57234
@@ -62,7 +64,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
62
64
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
63
65
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
64
66
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
65
- praisonaiagents-0.0.146.dist-info/METADATA,sha256=z97omUooPCk4RMFwbwLv5Qpgkw_bRJRbSDpn0rpxnv8,2146
66
- praisonaiagents-0.0.146.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
67
- praisonaiagents-0.0.146.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
68
- praisonaiagents-0.0.146.dist-info/RECORD,,
67
+ praisonaiagents-0.0.147.dist-info/METADATA,sha256=f-ieQHTStmnO_SYd1AveX7hGERBTPYElLsRAAXRjxkc,2146
68
+ praisonaiagents-0.0.147.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
69
+ praisonaiagents-0.0.147.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
70
+ praisonaiagents-0.0.147.dist-info/RECORD,,