praisonaiagents 0.0.155__tar.gz → 0.0.156__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/llm/llm.py +109 -44
  3. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents.egg-info/PKG-INFO +1 -1
  4. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/pyproject.toml +1 -1
  5. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/README.md +0 -0
  6. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/__init__.py +0 -0
  7. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/_logging.py +0 -0
  8. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/_warning_patch.py +0 -0
  9. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/agent/__init__.py +0 -0
  10. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/agent/agent.py +0 -0
  11. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/agent/context_agent.py +0 -0
  12. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/agent/handoff.py +0 -0
  13. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/agent/image_agent.py +0 -0
  14. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/agent/router_agent.py +0 -0
  15. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/agents/__init__.py +0 -0
  16. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/agents/agents.py +0 -0
  17. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/agents/autoagents.py +0 -0
  18. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/approval.py +0 -0
  19. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/flow_display.py +0 -0
  20. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/guardrails/__init__.py +0 -0
  21. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  22. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  23. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/knowledge/__init__.py +0 -0
  24. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/knowledge/chunking.py +0 -0
  25. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/knowledge/knowledge.py +0 -0
  26. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/llm/__init__.py +0 -0
  27. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/llm/model_capabilities.py +0 -0
  28. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/llm/model_router.py +0 -0
  29. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/llm/openai_client.py +0 -0
  30. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/main.py +0 -0
  31. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/mcp/__init__.py +0 -0
  32. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/mcp/mcp.py +0 -0
  33. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
  34. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/mcp/mcp_sse.py +0 -0
  35. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/memory/__init__.py +0 -0
  36. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/memory/memory.py +0 -0
  37. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/process/__init__.py +0 -0
  38. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/process/process.py +0 -0
  39. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/session.py +0 -0
  40. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/task/__init__.py +0 -0
  41. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/task/task.py +0 -0
  42. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/telemetry/__init__.py +0 -0
  43. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/telemetry/integration.py +0 -0
  44. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/telemetry/performance_cli.py +0 -0
  45. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/telemetry/performance_monitor.py +0 -0
  46. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/telemetry/performance_utils.py +0 -0
  47. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/telemetry/telemetry.py +0 -0
  48. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/telemetry/token_collector.py +0 -0
  49. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/telemetry/token_telemetry.py +0 -0
  50. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/README.md +0 -0
  51. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/__init__.py +0 -0
  52. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/arxiv_tools.py +0 -0
  53. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/calculator_tools.py +0 -0
  54. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/csv_tools.py +0 -0
  55. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/duckdb_tools.py +0 -0
  56. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  57. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/excel_tools.py +0 -0
  58. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/file_tools.py +0 -0
  59. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/json_tools.py +0 -0
  60. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/mongodb_tools.py +0 -0
  61. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/newspaper_tools.py +0 -0
  62. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/pandas_tools.py +0 -0
  63. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/python_tools.py +0 -0
  64. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/searxng_tools.py +0 -0
  65. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/shell_tools.py +0 -0
  66. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/spider_tools.py +0 -0
  67. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/test.py +0 -0
  68. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/tools.py +0 -0
  69. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  70. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  71. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/xml_tools.py +0 -0
  72. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/yaml_tools.py +0 -0
  73. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents/tools/yfinance_tools.py +0 -0
  74. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  75. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  76. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents.egg-info/requires.txt +0 -0
  77. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/praisonaiagents.egg-info/top_level.txt +0 -0
  78. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/setup.cfg +0 -0
  79. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test-graph-memory.py +0 -0
  80. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test.py +0 -0
  81. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_basic_agents_demo.py +0 -0
  82. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_context_agent.py +0 -0
  83. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_embedding_logging.py +0 -0
  84. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_fix_comprehensive.py +0 -0
  85. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_gemini_streaming_fix.py +0 -0
  86. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_handoff_compatibility.py +0 -0
  87. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_http_stream_basic.py +0 -0
  88. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_llm_self_reflection_direct.py +0 -0
  89. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_ollama_async_fix.py +0 -0
  90. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_ollama_fix.py +0 -0
  91. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_ollama_sequential_fix.py +0 -0
  92. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_posthog_fixed.py +0 -0
  93. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_self_reflection_comprehensive.py +0 -0
  94. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_self_reflection_fix_simple.py +0 -0
  95. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_self_reflection_fix_verification.py +0 -0
  96. {praisonaiagents-0.0.155 → praisonaiagents-0.0.156}/tests/test_validation_feedback.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.155
3
+ Version: 0.0.156
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -53,6 +53,9 @@ class LLM:
53
53
  Anthropic, and others through LiteLLM.
54
54
  """
55
55
 
56
+ # Class-level flag for one-time logging configuration
57
+ _logging_configured = False
58
+
56
59
  # Default window sizes for different models (75% of actual to be safe)
57
60
  MODEL_WINDOWS = {
58
61
  # OpenAI
@@ -103,6 +106,57 @@ class LLM:
103
106
  # Ollama iteration threshold for summary generation
104
107
  OLLAMA_SUMMARY_ITERATION_THRESHOLD = 1
105
108
 
109
+ @classmethod
110
+ def _configure_logging(cls):
111
+ """Configure logging settings once for all LLM instances."""
112
+ try:
113
+ import litellm
114
+ # Disable telemetry
115
+ litellm.telemetry = False
116
+
117
+ # Set litellm options globally
118
+ litellm.set_verbose = False
119
+ litellm.success_callback = []
120
+ litellm._async_success_callback = []
121
+ litellm.callbacks = []
122
+
123
+ # Suppress all litellm debug info
124
+ litellm.suppress_debug_info = True
125
+ if hasattr(litellm, '_logging'):
126
+ litellm._logging._disable_debugging()
127
+
128
+ # Always suppress litellm's internal debug messages
129
+ logging.getLogger("litellm.utils").setLevel(logging.WARNING)
130
+ logging.getLogger("litellm.main").setLevel(logging.WARNING)
131
+ logging.getLogger("litellm.litellm_logging").setLevel(logging.WARNING)
132
+ logging.getLogger("litellm.transformation").setLevel(logging.WARNING)
133
+
134
+ # Allow httpx logging when LOGLEVEL=debug, otherwise suppress it
135
+ loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
136
+ if loglevel == 'DEBUG':
137
+ logging.getLogger("litellm.llms.custom_httpx.http_handler").setLevel(logging.INFO)
138
+ else:
139
+ logging.getLogger("litellm.llms.custom_httpx.http_handler").setLevel(logging.WARNING)
140
+
141
+ # Keep asyncio at WARNING unless explicitly in high debug mode
142
+ logging.getLogger("asyncio").setLevel(logging.WARNING)
143
+ logging.getLogger("selector_events").setLevel(logging.WARNING)
144
+
145
+ # Enable error dropping for cleaner output
146
+ litellm.drop_params = True
147
+ # Enable parameter modification for providers like Anthropic
148
+ litellm.modify_params = True
149
+
150
+ if hasattr(litellm, '_logging'):
151
+ litellm._logging._disable_debugging()
152
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
153
+
154
+ cls._logging_configured = True
155
+
156
+ except ImportError:
157
+ # If litellm not installed, we'll handle it in __init__
158
+ pass
159
+
106
160
  def _log_llm_config(self, method_name: str, **config):
107
161
  """Centralized debug logging for LLM configuration and parameters.
108
162
 
@@ -186,47 +240,13 @@ class LLM:
186
240
  events: List[Any] = [],
187
241
  **extra_settings
188
242
  ):
243
+ # Configure logging only once at the class level
244
+ if not LLM._logging_configured:
245
+ LLM._configure_logging()
246
+
247
+ # Import litellm after logging is configured
189
248
  try:
190
249
  import litellm
191
- # Disable telemetry
192
- litellm.telemetry = False
193
-
194
- # Set litellm options globally
195
- litellm.set_verbose = False
196
- litellm.success_callback = []
197
- litellm._async_success_callback = []
198
- litellm.callbacks = []
199
-
200
- # Suppress all litellm debug info
201
- litellm.suppress_debug_info = True
202
- if hasattr(litellm, '_logging'):
203
- litellm._logging._disable_debugging()
204
-
205
- verbose = extra_settings.get('verbose', True)
206
-
207
- # Always suppress litellm's internal debug messages
208
- # These are from external libraries and not useful for debugging user code
209
- logging.getLogger("litellm.utils").setLevel(logging.WARNING)
210
- logging.getLogger("litellm.main").setLevel(logging.WARNING)
211
-
212
- # Allow httpx logging when LOGLEVEL=debug, otherwise suppress it
213
- loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
214
- if loglevel == 'DEBUG':
215
- logging.getLogger("litellm.llms.custom_httpx.http_handler").setLevel(logging.INFO)
216
- else:
217
- logging.getLogger("litellm.llms.custom_httpx.http_handler").setLevel(logging.WARNING)
218
-
219
- logging.getLogger("litellm.litellm_logging").setLevel(logging.WARNING)
220
- logging.getLogger("litellm.transformation").setLevel(logging.WARNING)
221
- litellm.suppress_debug_messages = True
222
- if hasattr(litellm, '_logging'):
223
- litellm._logging._disable_debugging()
224
- warnings.filterwarnings("ignore", category=RuntimeWarning)
225
-
226
- # Keep asyncio at WARNING unless explicitly in high debug mode
227
- logging.getLogger("asyncio").setLevel(logging.WARNING)
228
- logging.getLogger("selector_events").setLevel(logging.WARNING)
229
-
230
250
  except ImportError:
231
251
  raise ImportError(
232
252
  "LiteLLM is required but not installed. "
@@ -252,9 +272,9 @@ class LLM:
252
272
  self.base_url = base_url
253
273
  self.events = events
254
274
  self.extra_settings = extra_settings
255
- self.console = Console()
275
+ self._console = None # Lazy load console when needed
256
276
  self.chat_history = []
257
- self.verbose = verbose
277
+ self.verbose = extra_settings.get('verbose', True)
258
278
  self.markdown = extra_settings.get('markdown', True)
259
279
  self.self_reflect = extra_settings.get('self_reflect', False)
260
280
  self.max_reflect = extra_settings.get('max_reflect', 3)
@@ -267,7 +287,12 @@ class LLM:
267
287
  self.session_token_metrics: Optional[TokenMetrics] = None
268
288
  self.current_agent_name: Optional[str] = None
269
289
 
290
+ # Cache for formatted tools and messages
291
+ self._formatted_tools_cache = {}
292
+ self._max_cache_size = 100
293
+
270
294
  # Enable error dropping for cleaner output
295
+ import litellm
271
296
  litellm.drop_params = True
272
297
  # Enable parameter modification for providers like Anthropic
273
298
  litellm.modify_params = True
@@ -301,6 +326,14 @@ class LLM:
301
326
  reasoning_steps=self.reasoning_steps,
302
327
  extra_settings=self.extra_settings
303
328
  )
329
+
330
+ @property
331
+ def console(self):
332
+ """Lazily initialize Rich Console only when needed."""
333
+ if self._console is None:
334
+ from rich.console import Console
335
+ self._console = Console()
336
+ return self._console
304
337
 
305
338
  def _is_ollama_provider(self) -> bool:
306
339
  """Detect if this is an Ollama provider regardless of naming convention"""
@@ -733,6 +766,29 @@ class LLM:
733
766
 
734
767
  return fixed_schema
735
768
 
769
+ def _get_tools_cache_key(self, tools):
770
+ """Generate a cache key for tools list."""
771
+ if tools is None:
772
+ return "none"
773
+ if not tools:
774
+ return "empty"
775
+ # Create a simple hash based on tool names/content
776
+ tool_parts = []
777
+ for tool in tools:
778
+ if isinstance(tool, dict) and 'type' in tool and tool['type'] == 'function':
779
+ if 'function' in tool and isinstance(tool['function'], dict) and 'name' in tool['function']:
780
+ tool_parts.append(f"openai:{tool['function']['name']}")
781
+ elif callable(tool) and hasattr(tool, '__name__'):
782
+ tool_parts.append(f"callable:{tool.__name__}")
783
+ elif isinstance(tool, str):
784
+ tool_parts.append(f"string:{tool}")
785
+ elif isinstance(tool, dict) and len(tool) == 1:
786
+ tool_name = next(iter(tool.keys()))
787
+ tool_parts.append(f"gemini:{tool_name}")
788
+ else:
789
+ tool_parts.append(f"other:{id(tool)}")
790
+ return "|".join(sorted(tool_parts))
791
+
736
792
  def _format_tools_for_litellm(self, tools: Optional[List[Any]]) -> Optional[List[Dict]]:
737
793
  """Format tools for LiteLLM - handles all tool formats.
738
794
 
@@ -751,6 +807,11 @@ class LLM:
751
807
  """
752
808
  if not tools:
753
809
  return None
810
+
811
+ # Check cache first
812
+ tools_key = self._get_tools_cache_key(tools)
813
+ if tools_key in self._formatted_tools_cache:
814
+ return self._formatted_tools_cache[tools_key]
754
815
 
755
816
  formatted_tools = []
756
817
  for tool in tools:
@@ -808,8 +869,12 @@ class LLM:
808
869
  except (TypeError, ValueError) as e:
809
870
  logging.error(f"Tools are not JSON serializable: {e}")
810
871
  return None
811
-
812
- return formatted_tools if formatted_tools else None
872
+
873
+ # Cache the formatted tools
874
+ result = formatted_tools if formatted_tools else None
875
+ if len(self._formatted_tools_cache) < self._max_cache_size:
876
+ self._formatted_tools_cache[tools_key] = result
877
+ return result
813
878
 
814
879
  def get_response(
815
880
  self,
@@ -956,7 +1021,7 @@ class LLM:
956
1021
 
957
1022
  # Track token usage
958
1023
  if self.metrics:
959
- self._track_token_usage(final_response, model)
1024
+ self._track_token_usage(final_response, self.model)
960
1025
 
961
1026
  # Execute callbacks and display based on verbose setting
962
1027
  generation_time_val = time.time() - current_time
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.155
3
+ Version: 0.0.156
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.155"
7
+ version = "0.0.156"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [