praisonaiagents 0.0.154__py3-none-any.whl → 0.0.156__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -46,6 +46,21 @@ if TYPE_CHECKING:
46
46
  from ..handoff import Handoff
47
47
 
48
48
  class Agent:
49
+ @classmethod
50
+ def _configure_logging(cls):
51
+ """Configure logging settings once for all agent instances."""
52
+ # Configure logging to suppress unwanted outputs
53
+ logging.getLogger("litellm").setLevel(logging.WARNING)
54
+
55
+ # Allow httpx logging when LOGLEVEL=debug, otherwise suppress it
56
+ loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
57
+ if loglevel == 'DEBUG':
58
+ logging.getLogger("httpx").setLevel(logging.INFO)
59
+ logging.getLogger("httpcore").setLevel(logging.INFO)
60
+ else:
61
+ logging.getLogger("httpx").setLevel(logging.WARNING)
62
+ logging.getLogger("httpcore").setLevel(logging.WARNING)
63
+
49
64
  def _generate_tool_definition(self, function_name):
50
65
  """
51
66
  Generate a tool definition from a function name by inspecting the function.
@@ -332,17 +347,10 @@ class Agent:
332
347
  if all(x is None for x in [name, role, goal, backstory, instructions]):
333
348
  raise ValueError("At least one of name, role, goal, backstory, or instructions must be provided")
334
349
 
335
- # Configure logging to suppress unwanted outputs
336
- logging.getLogger("litellm").setLevel(logging.WARNING)
337
-
338
- # Allow httpx logging when LOGLEVEL=debug, otherwise suppress it
339
- loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
340
- if loglevel == 'DEBUG':
341
- logging.getLogger("httpx").setLevel(logging.INFO)
342
- logging.getLogger("httpcore").setLevel(logging.INFO)
343
- else:
344
- logging.getLogger("httpx").setLevel(logging.WARNING)
345
- logging.getLogger("httpcore").setLevel(logging.WARNING)
350
+ # Configure logging only once at the class level
351
+ if not hasattr(Agent, '_logging_configured'):
352
+ Agent._configure_logging()
353
+ Agent._logging_configured = True
346
354
 
347
355
  # If instructions are provided, use them to set role, goal, and backstory
348
356
  if instructions:
@@ -480,7 +488,7 @@ class Agent:
480
488
  self.reflect_prompt = reflect_prompt
481
489
  # Use the same model selection logic for reflect_llm
482
490
  self.reflect_llm = reflect_llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
483
- self.console = Console() # Create a single console instance for the agent
491
+ self._console = None # Lazy load console when needed
484
492
 
485
493
  # Initialize system prompt
486
494
  self.system_prompt = f"""{self.backstory}\n
@@ -488,8 +496,8 @@ Your Role: {self.role}\n
488
496
  Your Goal: {self.goal}
489
497
  """
490
498
 
491
- # Generate unique IDs
492
- self.agent_id = str(uuid.uuid4())
499
+ # Lazy generate unique ID when needed
500
+ self._agent_id = None
493
501
 
494
502
  # Store user_id
495
503
  self.user_id = user_id or "praison"
@@ -500,6 +508,14 @@ Your Goal: {self.goal}
500
508
  self.max_guardrail_retries = max_guardrail_retries
501
509
  self._guardrail_fn = None
502
510
  self._setup_guardrail()
511
+
512
+ # Cache for system prompts and formatted tools
513
+ # Note: In single-threaded usage (common case), these are safe
514
+ # For multi-threaded usage, consider using threading.Lock
515
+ self._system_prompt_cache = {}
516
+ self._formatted_tools_cache = {}
517
+ # Limit cache size to prevent unbounded growth
518
+ self._max_cache_size = 100
503
519
 
504
520
  # Process handoffs and convert them to tools
505
521
  self.handoffs = handoffs if handoffs else []
@@ -508,16 +524,23 @@ Your Goal: {self.goal}
508
524
  # Check if knowledge parameter has any values
509
525
  if not knowledge:
510
526
  self.knowledge = None
527
+ self._knowledge_sources = None
528
+ self._knowledge_processed = True # No knowledge to process
511
529
  else:
512
- # Initialize Knowledge with provided or default config
513
- from praisonaiagents.knowledge import Knowledge
514
- self.knowledge = Knowledge(knowledge_config or None)
515
-
516
- # Handle knowledge
517
- if knowledge:
518
- for source in knowledge:
519
- self._process_knowledge(source)
530
+ # Store knowledge sources for lazy processing
531
+ self._knowledge_sources = knowledge
532
+ self._knowledge_processed = False
533
+ self._knowledge_config = knowledge_config
534
+ self.knowledge = None # Will be initialized on first use
520
535
 
536
+ @property
537
+ def console(self):
538
+ """Lazily initialize Rich Console only when needed."""
539
+ if self._console is None:
540
+ from rich.console import Console
541
+ self._console = Console()
542
+ return self._console
543
+
521
544
  @property
522
545
  def _openai_client(self):
523
546
  """Lazily initialize OpenAI client only when needed."""
@@ -536,6 +559,14 @@ Your Goal: {self.goal}
536
559
  raise e
537
560
  return self.__openai_client
538
561
 
562
+ @property
563
+ def agent_id(self):
564
+ """Lazily generate agent ID when first accessed."""
565
+ if self._agent_id is None:
566
+ import uuid
567
+ self._agent_id = str(uuid.uuid4())
568
+ return self._agent_id
569
+
539
570
  @property
540
571
  def llm_model(self):
541
572
  """Unified property to get the LLM model regardless of configuration type.
@@ -554,6 +585,19 @@ Your Goal: {self.goal}
554
585
  # Default fallback
555
586
  return "gpt-4o"
556
587
 
588
+ def _ensure_knowledge_processed(self):
589
+ """Ensure knowledge is initialized and processed when first accessed."""
590
+ if not self._knowledge_processed and self._knowledge_sources:
591
+ # Initialize Knowledge with provided or default config
592
+ from praisonaiagents.knowledge import Knowledge
593
+ self.knowledge = Knowledge(self._knowledge_config or None)
594
+
595
+ # Process all knowledge sources
596
+ for source in self._knowledge_sources:
597
+ self._process_knowledge(source)
598
+
599
+ self._knowledge_processed = True
600
+
557
601
  def _process_knowledge(self, knowledge_item):
558
602
  """Process and store knowledge from a file path, URL, or string."""
559
603
  try:
@@ -740,6 +784,23 @@ Your Goal: {self.goal}
740
784
 
741
785
  return current_response
742
786
 
787
+ def _get_tools_cache_key(self, tools):
788
+ """Generate a cache key for tools list."""
789
+ if tools is None:
790
+ return "none"
791
+ if not tools:
792
+ return "empty"
793
+ # Create a simple hash based on tool names
794
+ tool_names = []
795
+ for tool in tools:
796
+ if callable(tool) and hasattr(tool, '__name__'):
797
+ tool_names.append(tool.__name__)
798
+ elif isinstance(tool, dict) and 'function' in tool and 'name' in tool['function']:
799
+ tool_names.append(tool['function']['name'])
800
+ elif isinstance(tool, str):
801
+ tool_names.append(tool)
802
+ return "|".join(sorted(tool_names))
803
+
743
804
  def _build_system_prompt(self, tools=None):
744
805
  """Build the system prompt with tool information.
745
806
 
@@ -751,6 +812,13 @@ Your Goal: {self.goal}
751
812
  """
752
813
  if not self.use_system_prompt:
753
814
  return None
815
+
816
+ # Check cache first
817
+ tools_key = self._get_tools_cache_key(tools)
818
+ cache_key = f"{self.role}:{self.goal}:{tools_key}"
819
+
820
+ if cache_key in self._system_prompt_cache:
821
+ return self._system_prompt_cache[cache_key]
754
822
 
755
823
  system_prompt = f"""{self.backstory}\n
756
824
  Your Role: {self.role}\n
@@ -785,6 +853,10 @@ Your Goal: {self.goal}"""
785
853
  if tool_names:
786
854
  system_prompt += f"\n\nYou have access to the following tools: {', '.join(tool_names)}. Use these tools when appropriate to help complete your tasks. Always use tools when they can help provide accurate information or perform actions."
787
855
 
856
+ # Cache the generated system prompt
857
+ # Simple cache size limit to prevent unbounded growth
858
+ if len(self._system_prompt_cache) < self._max_cache_size:
859
+ self._system_prompt_cache[cache_key] = system_prompt
788
860
  return system_prompt
789
861
 
790
862
  def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pydantic=None, tools=None):
@@ -860,6 +932,11 @@ Your Goal: {self.goal}"""
860
932
 
861
933
  if not tools:
862
934
  return []
935
+
936
+ # Check cache first
937
+ tools_key = self._get_tools_cache_key(tools)
938
+ if tools_key in self._formatted_tools_cache:
939
+ return self._formatted_tools_cache[tools_key]
863
940
 
864
941
  formatted_tools = []
865
942
  for tool in tools:
@@ -909,7 +986,11 @@ Your Goal: {self.goal}"""
909
986
  except (TypeError, ValueError) as e:
910
987
  logging.error(f"Tools are not JSON serializable: {e}")
911
988
  return []
912
-
989
+
990
+ # Cache the formatted tools
991
+ # Simple cache size limit to prevent unbounded growth
992
+ if len(self._formatted_tools_cache) < self._max_cache_size:
993
+ self._formatted_tools_cache[tools_key] = formatted_tools
913
994
  return formatted_tools
914
995
 
915
996
  def generate_task(self) -> 'Task':
@@ -1280,6 +1361,9 @@ Your Goal: {self.goal}"""
1280
1361
  if stream is None:
1281
1362
  stream = self.stream
1282
1363
  # Search for existing knowledge if any knowledge is provided
1364
+ if self._knowledge_sources and not self._knowledge_processed:
1365
+ self._ensure_knowledge_processed()
1366
+
1283
1367
  if self.knowledge:
1284
1368
  search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
1285
1369
  if search_results:
@@ -1638,6 +1722,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1638
1722
  tools = self.tools
1639
1723
 
1640
1724
  # Search for existing knowledge if any knowledge is provided
1725
+ if self._knowledge_sources and not self._knowledge_processed:
1726
+ self._ensure_knowledge_processed()
1727
+
1641
1728
  if self.knowledge:
1642
1729
  search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
1643
1730
  if search_results:
@@ -2031,6 +2118,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2031
2118
  if self._using_custom_llm:
2032
2119
  # Handle knowledge search
2033
2120
  actual_prompt = prompt
2121
+ if self._knowledge_sources and not self._knowledge_processed:
2122
+ self._ensure_knowledge_processed()
2123
+
2034
2124
  if self.knowledge:
2035
2125
  search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
2036
2126
  if search_results:
@@ -2110,6 +2200,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2110
2200
  # For OpenAI-style models, implement proper streaming without display
2111
2201
  # Handle knowledge search
2112
2202
  actual_prompt = prompt
2203
+ if self._knowledge_sources and not self._knowledge_processed:
2204
+ self._ensure_knowledge_processed()
2205
+
2113
2206
  if self.knowledge:
2114
2207
  search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
2115
2208
  if search_results:
@@ -59,11 +59,11 @@ class MongoDBMemory:
59
59
  self.collection_name = self.vector_store_config.get("collection", "knowledge_base")
60
60
  self.use_vector_search = self.vector_store_config.get("use_vector_search", True)
61
61
 
62
+ # Initialize embedding model before MongoDB to ensure embedding_model_name is available
63
+ self._init_embedding_model()
64
+
62
65
  # Initialize MongoDB client
63
66
  self._init_mongodb()
64
-
65
- # Initialize embedding model
66
- self._init_embedding_model()
67
67
 
68
68
  def _init_mongodb(self):
69
69
  """Initialize MongoDB client and collection."""
@@ -159,7 +159,14 @@ class MongoDBMemory:
159
159
  }
160
160
  }
161
161
 
162
- self.collection.create_search_index(vector_index_def, "vector_index")
162
+ # Use SearchIndexModel for PyMongo 4.6+ compatibility
163
+ try:
164
+ from pymongo.operations import SearchIndexModel
165
+ search_index_model = SearchIndexModel(definition=vector_index_def, name="vector_index")
166
+ self.collection.create_search_index(search_index_model)
167
+ except ImportError:
168
+ # Fallback for older PyMongo versions
169
+ self.collection.create_search_index(vector_index_def, "vector_index")
163
170
 
164
171
  except Exception as e:
165
172
  logging.warning(f"Could not create vector search index: {e}")
@@ -53,6 +53,9 @@ class LLM:
53
53
  Anthropic, and others through LiteLLM.
54
54
  """
55
55
 
56
+ # Class-level flag for one-time logging configuration
57
+ _logging_configured = False
58
+
56
59
  # Default window sizes for different models (75% of actual to be safe)
57
60
  MODEL_WINDOWS = {
58
61
  # OpenAI
@@ -103,6 +106,57 @@ class LLM:
103
106
  # Ollama iteration threshold for summary generation
104
107
  OLLAMA_SUMMARY_ITERATION_THRESHOLD = 1
105
108
 
109
+ @classmethod
110
+ def _configure_logging(cls):
111
+ """Configure logging settings once for all LLM instances."""
112
+ try:
113
+ import litellm
114
+ # Disable telemetry
115
+ litellm.telemetry = False
116
+
117
+ # Set litellm options globally
118
+ litellm.set_verbose = False
119
+ litellm.success_callback = []
120
+ litellm._async_success_callback = []
121
+ litellm.callbacks = []
122
+
123
+ # Suppress all litellm debug info
124
+ litellm.suppress_debug_info = True
125
+ if hasattr(litellm, '_logging'):
126
+ litellm._logging._disable_debugging()
127
+
128
+ # Always suppress litellm's internal debug messages
129
+ logging.getLogger("litellm.utils").setLevel(logging.WARNING)
130
+ logging.getLogger("litellm.main").setLevel(logging.WARNING)
131
+ logging.getLogger("litellm.litellm_logging").setLevel(logging.WARNING)
132
+ logging.getLogger("litellm.transformation").setLevel(logging.WARNING)
133
+
134
+ # Allow httpx logging when LOGLEVEL=debug, otherwise suppress it
135
+ loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
136
+ if loglevel == 'DEBUG':
137
+ logging.getLogger("litellm.llms.custom_httpx.http_handler").setLevel(logging.INFO)
138
+ else:
139
+ logging.getLogger("litellm.llms.custom_httpx.http_handler").setLevel(logging.WARNING)
140
+
141
+ # Keep asyncio at WARNING unless explicitly in high debug mode
142
+ logging.getLogger("asyncio").setLevel(logging.WARNING)
143
+ logging.getLogger("selector_events").setLevel(logging.WARNING)
144
+
145
+ # Enable error dropping for cleaner output
146
+ litellm.drop_params = True
147
+ # Enable parameter modification for providers like Anthropic
148
+ litellm.modify_params = True
149
+
150
+ if hasattr(litellm, '_logging'):
151
+ litellm._logging._disable_debugging()
152
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
153
+
154
+ cls._logging_configured = True
155
+
156
+ except ImportError:
157
+ # If litellm not installed, we'll handle it in __init__
158
+ pass
159
+
106
160
  def _log_llm_config(self, method_name: str, **config):
107
161
  """Centralized debug logging for LLM configuration and parameters.
108
162
 
@@ -186,47 +240,13 @@ class LLM:
186
240
  events: List[Any] = [],
187
241
  **extra_settings
188
242
  ):
243
+ # Configure logging only once at the class level
244
+ if not LLM._logging_configured:
245
+ LLM._configure_logging()
246
+
247
+ # Import litellm after logging is configured
189
248
  try:
190
249
  import litellm
191
- # Disable telemetry
192
- litellm.telemetry = False
193
-
194
- # Set litellm options globally
195
- litellm.set_verbose = False
196
- litellm.success_callback = []
197
- litellm._async_success_callback = []
198
- litellm.callbacks = []
199
-
200
- # Suppress all litellm debug info
201
- litellm.suppress_debug_info = True
202
- if hasattr(litellm, '_logging'):
203
- litellm._logging._disable_debugging()
204
-
205
- verbose = extra_settings.get('verbose', True)
206
-
207
- # Always suppress litellm's internal debug messages
208
- # These are from external libraries and not useful for debugging user code
209
- logging.getLogger("litellm.utils").setLevel(logging.WARNING)
210
- logging.getLogger("litellm.main").setLevel(logging.WARNING)
211
-
212
- # Allow httpx logging when LOGLEVEL=debug, otherwise suppress it
213
- loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
214
- if loglevel == 'DEBUG':
215
- logging.getLogger("litellm.llms.custom_httpx.http_handler").setLevel(logging.INFO)
216
- else:
217
- logging.getLogger("litellm.llms.custom_httpx.http_handler").setLevel(logging.WARNING)
218
-
219
- logging.getLogger("litellm.litellm_logging").setLevel(logging.WARNING)
220
- logging.getLogger("litellm.transformation").setLevel(logging.WARNING)
221
- litellm.suppress_debug_messages = True
222
- if hasattr(litellm, '_logging'):
223
- litellm._logging._disable_debugging()
224
- warnings.filterwarnings("ignore", category=RuntimeWarning)
225
-
226
- # Keep asyncio at WARNING unless explicitly in high debug mode
227
- logging.getLogger("asyncio").setLevel(logging.WARNING)
228
- logging.getLogger("selector_events").setLevel(logging.WARNING)
229
-
230
250
  except ImportError:
231
251
  raise ImportError(
232
252
  "LiteLLM is required but not installed. "
@@ -252,9 +272,9 @@ class LLM:
252
272
  self.base_url = base_url
253
273
  self.events = events
254
274
  self.extra_settings = extra_settings
255
- self.console = Console()
275
+ self._console = None # Lazy load console when needed
256
276
  self.chat_history = []
257
- self.verbose = verbose
277
+ self.verbose = extra_settings.get('verbose', True)
258
278
  self.markdown = extra_settings.get('markdown', True)
259
279
  self.self_reflect = extra_settings.get('self_reflect', False)
260
280
  self.max_reflect = extra_settings.get('max_reflect', 3)
@@ -267,7 +287,12 @@ class LLM:
267
287
  self.session_token_metrics: Optional[TokenMetrics] = None
268
288
  self.current_agent_name: Optional[str] = None
269
289
 
290
+ # Cache for formatted tools and messages
291
+ self._formatted_tools_cache = {}
292
+ self._max_cache_size = 100
293
+
270
294
  # Enable error dropping for cleaner output
295
+ import litellm
271
296
  litellm.drop_params = True
272
297
  # Enable parameter modification for providers like Anthropic
273
298
  litellm.modify_params = True
@@ -301,6 +326,14 @@ class LLM:
301
326
  reasoning_steps=self.reasoning_steps,
302
327
  extra_settings=self.extra_settings
303
328
  )
329
+
330
+ @property
331
+ def console(self):
332
+ """Lazily initialize Rich Console only when needed."""
333
+ if self._console is None:
334
+ from rich.console import Console
335
+ self._console = Console()
336
+ return self._console
304
337
 
305
338
  def _is_ollama_provider(self) -> bool:
306
339
  """Detect if this is an Ollama provider regardless of naming convention"""
@@ -733,6 +766,29 @@ class LLM:
733
766
 
734
767
  return fixed_schema
735
768
 
769
+ def _get_tools_cache_key(self, tools):
770
+ """Generate a cache key for tools list."""
771
+ if tools is None:
772
+ return "none"
773
+ if not tools:
774
+ return "empty"
775
+ # Create a simple hash based on tool names/content
776
+ tool_parts = []
777
+ for tool in tools:
778
+ if isinstance(tool, dict) and 'type' in tool and tool['type'] == 'function':
779
+ if 'function' in tool and isinstance(tool['function'], dict) and 'name' in tool['function']:
780
+ tool_parts.append(f"openai:{tool['function']['name']}")
781
+ elif callable(tool) and hasattr(tool, '__name__'):
782
+ tool_parts.append(f"callable:{tool.__name__}")
783
+ elif isinstance(tool, str):
784
+ tool_parts.append(f"string:{tool}")
785
+ elif isinstance(tool, dict) and len(tool) == 1:
786
+ tool_name = next(iter(tool.keys()))
787
+ tool_parts.append(f"gemini:{tool_name}")
788
+ else:
789
+ tool_parts.append(f"other:{id(tool)}")
790
+ return "|".join(sorted(tool_parts))
791
+
736
792
  def _format_tools_for_litellm(self, tools: Optional[List[Any]]) -> Optional[List[Dict]]:
737
793
  """Format tools for LiteLLM - handles all tool formats.
738
794
 
@@ -751,6 +807,11 @@ class LLM:
751
807
  """
752
808
  if not tools:
753
809
  return None
810
+
811
+ # Check cache first
812
+ tools_key = self._get_tools_cache_key(tools)
813
+ if tools_key in self._formatted_tools_cache:
814
+ return self._formatted_tools_cache[tools_key]
754
815
 
755
816
  formatted_tools = []
756
817
  for tool in tools:
@@ -808,8 +869,12 @@ class LLM:
808
869
  except (TypeError, ValueError) as e:
809
870
  logging.error(f"Tools are not JSON serializable: {e}")
810
871
  return None
811
-
812
- return formatted_tools if formatted_tools else None
872
+
873
+ # Cache the formatted tools
874
+ result = formatted_tools if formatted_tools else None
875
+ if len(self._formatted_tools_cache) < self._max_cache_size:
876
+ self._formatted_tools_cache[tools_key] = result
877
+ return result
813
878
 
814
879
  def get_response(
815
880
  self,
@@ -956,7 +1021,7 @@ class LLM:
956
1021
 
957
1022
  # Track token usage
958
1023
  if self.metrics:
959
- self._track_token_usage(final_response, model)
1024
+ self._track_token_usage(final_response, self.model)
960
1025
 
961
1026
  # Execute callbacks and display based on verbose setting
962
1027
  generation_time_val = time.time() - current_time
@@ -377,8 +377,16 @@ class Memory:
377
377
 
378
378
  # Create vector indexes for both short and long term collections
379
379
  try:
380
- self.mongo_short_term.create_search_index(vector_index_def, "vector_index")
381
- self.mongo_long_term.create_search_index(vector_index_def, "vector_index")
380
+ # Use SearchIndexModel for PyMongo 4.6+ compatibility
381
+ try:
382
+ from pymongo.operations import SearchIndexModel
383
+ search_index_model = SearchIndexModel(definition=vector_index_def, name="vector_index")
384
+ self.mongo_short_term.create_search_index(search_index_model)
385
+ self.mongo_long_term.create_search_index(search_index_model)
386
+ except ImportError:
387
+ # Fallback for older PyMongo versions
388
+ self.mongo_short_term.create_search_index(vector_index_def, "vector_index")
389
+ self.mongo_long_term.create_search_index(vector_index_def, "vector_index")
382
390
  self._log_verbose("Vector search indexes created successfully")
383
391
  except Exception as e:
384
392
  self._log_verbose(f"Could not create vector search indexes: {e}", logging.WARNING)
@@ -335,7 +335,14 @@ class MongoDBTools:
335
335
  }
336
336
 
337
337
  try:
338
- collection.create_search_index(index_definition, index_name)
338
+ # Use SearchIndexModel for PyMongo 4.6+ compatibility
339
+ try:
340
+ from pymongo.operations import SearchIndexModel
341
+ search_index_model = SearchIndexModel(definition=index_definition, name=index_name)
342
+ collection.create_search_index(search_index_model)
343
+ except ImportError:
344
+ # Fallback for older PyMongo versions
345
+ collection.create_search_index(index_definition, index_name)
339
346
  return {
340
347
  "success": True,
341
348
  "message": f"Vector search index '{index_name}' created successfully"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.154
3
+ Version: 0.0.156
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -6,7 +6,7 @@ praisonaiagents/flow_display.py,sha256=E84J_H3h8L-AqL_F1JzEUInQYdjmIEuNL1LZr4__H
6
6
  praisonaiagents/main.py,sha256=NuAmE-ZrH4X0O9ysNA2AfxEQ8APPssO_ZR_f7h97QOo,17370
7
7
  praisonaiagents/session.py,sha256=FHWButPBaFGA4x1U_2gImroQChHnFy231_aAa_n5KOQ,20364
8
8
  praisonaiagents/agent/__init__.py,sha256=KBqW_augD-HcaV3FL88gUmhDCpwnSTavGENi7RqneTo,505
9
- praisonaiagents/agent/agent.py,sha256=pecp8Bt7_vXCB4MfUuMTZ3no4WipKOzFPGhFF5ADC5Y,144243
9
+ praisonaiagents/agent/agent.py,sha256=sJ_mMp2v8ZCok32G-wzO71HEtjuBqlrEPttUXQgiui8,148240
10
10
  praisonaiagents/agent/context_agent.py,sha256=zNI2Waghn5eo8g3QM1Dc7ZNSr2xw41D87GIK81FjW-Y,107489
11
11
  praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
12
12
  praisonaiagents/agent/image_agent.py,sha256=xKDhW8T1Y3e15lQpY6N2pdvBNJmAoWDibJa4BYa-Njs,10205
@@ -19,9 +19,9 @@ praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXC
19
19
  praisonaiagents/guardrails/llm_guardrail.py,sha256=czdOIoY-3PZOchX317tz4O2h2WYE42Ua4tqVzyuoNlI,4859
20
20
  praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
21
21
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
22
- praisonaiagents/knowledge/knowledge.py,sha256=OzK81oA6sjk9nAUWphS7AkXxvalrv2AHB4FtHjzYgxI,30115
22
+ praisonaiagents/knowledge/knowledge.py,sha256=tog38b0SjFMoLuFBo0M1zHl9Dzzxa9YRv9FO7OZSpns,30587
23
23
  praisonaiagents/llm/__init__.py,sha256=SqdU1pRqPrR6jZeWYyDeTvmZKCACywk0v4P0k5Fuowk,1107
24
- praisonaiagents/llm/llm.py,sha256=155R1XHZLSDZsq67Hmglwc4N_SE2gKgid0KCFYNX3ww,176594
24
+ praisonaiagents/llm/llm.py,sha256=4bFBazXH6TSHKt1-V6UNivf22_uVJXEwovWMpvPmYYg,179165
25
25
  praisonaiagents/llm/model_capabilities.py,sha256=cxOvZcjZ_PIEpUYKn3S2FMyypfOSfbGpx4vmV7Y5vhI,3967
26
26
  praisonaiagents/llm/model_router.py,sha256=Jy2pShlkLxqXF3quz-MRB3-6L9vaUSgUrf2YJs_Tsg0,13995
27
27
  praisonaiagents/llm/openai_client.py,sha256=3EVjIs3tnBNFDy_4ZxX9DJVq54kS0FMm38m5Gkpun7U,57234
@@ -30,7 +30,7 @@ praisonaiagents/mcp/mcp.py,sha256=ChaSwLCcFBB9b8eNuj0DoKbK1EqpyF1T_7xz0FX-5-A,23
30
30
  praisonaiagents/mcp/mcp_http_stream.py,sha256=TDFWMJMo8VqLXtXCW73REpmkU3t9n7CAGMa9b4dhI-c,23366
31
31
  praisonaiagents/mcp/mcp_sse.py,sha256=KO10tAgZ5vSKeRhkJIZcdJ0ZmhRybS39i1KybWt4D7M,9128
32
32
  praisonaiagents/memory/__init__.py,sha256=aEFdhgtTqDdMhc_JCWM-f4XI9cZIj7Wz5g_MUa-0amg,397
33
- praisonaiagents/memory/memory.py,sha256=HjanP8sSi91wifvPkQDH40uGYdDZPOeir29fCu6y-b8,64584
33
+ praisonaiagents/memory/memory.py,sha256=B2DMuvvr4W_EnrpoN16K73qSqYdduqhMcV8ASzyh2L8,65116
34
34
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
35
35
  praisonaiagents/process/process.py,sha256=wXKZ2Z26vB9osmVbD5xqkUlUQRvWEpvL8j9hiuiHrQ0,78246
36
36
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
@@ -53,7 +53,7 @@ praisonaiagents/tools/duckduckgo_tools.py,sha256=ynlB5ZyWfHYjUq0JZXH12TganqTihgD
53
53
  praisonaiagents/tools/excel_tools.py,sha256=e2HqcwnyBueOyss0xEKxff3zB4w4sNWCOMXvZfbDYlE,11309
54
54
  praisonaiagents/tools/file_tools.py,sha256=N0fjTxxi89UupAvtEUwXjPrBvbppf8bwaNLfnjZ05q4,10824
55
55
  praisonaiagents/tools/json_tools.py,sha256=ApUYNuQ1qnbmYNCxSlx6Tth_H1yo8mhWtZ7Rr2WS6C4,16507
56
- praisonaiagents/tools/mongodb_tools.py,sha256=Y1n0X58nJkRYMb_ZjlXeH2PENbJlN-OLcfo44N647Dc,21073
56
+ praisonaiagents/tools/mongodb_tools.py,sha256=gmRxV4In0aFVFYWNEOLzg9yP-jCuvFp4Lm_6vWjtlnQ,21505
57
57
  praisonaiagents/tools/newspaper_tools.py,sha256=NyhojNPeyULBGcAWGOT1X70qVkh3FgZrpH-S7PEmrwI,12667
58
58
  praisonaiagents/tools/pandas_tools.py,sha256=yzCeY4jetKrFIRA15Tr5OQ5d94T8DaSpzglx2UiWfPs,11092
59
59
  praisonaiagents/tools/python_tools.py,sha256=4dWJddySR0snCEcQudemg5qvbuNrUYxO-jXnzuWixqM,16461
@@ -67,7 +67,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
67
67
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
68
68
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
69
69
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
70
- praisonaiagents-0.0.154.dist-info/METADATA,sha256=55cpHdKamPJ_tSTDYyXli-REfXLqafPKW7GH-Gf_Sdo,2146
71
- praisonaiagents-0.0.154.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
72
- praisonaiagents-0.0.154.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
73
- praisonaiagents-0.0.154.dist-info/RECORD,,
70
+ praisonaiagents-0.0.156.dist-info/METADATA,sha256=I3PDqNvozu1_arRs4qEUh72poDF_Zi5yoUtZVPGwPB0,2146
71
+ praisonaiagents-0.0.156.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
72
+ praisonaiagents-0.0.156.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
73
+ praisonaiagents-0.0.156.dist-info/RECORD,,