praisonaiagents 0.0.154__py3-none-any.whl → 0.0.155__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -46,6 +46,21 @@ if TYPE_CHECKING:
46
46
  from ..handoff import Handoff
47
47
 
48
48
  class Agent:
49
+ @classmethod
50
+ def _configure_logging(cls):
51
+ """Configure logging settings once for all agent instances."""
52
+ # Configure logging to suppress unwanted outputs
53
+ logging.getLogger("litellm").setLevel(logging.WARNING)
54
+
55
+ # Allow httpx logging when LOGLEVEL=debug, otherwise suppress it
56
+ loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
57
+ if loglevel == 'DEBUG':
58
+ logging.getLogger("httpx").setLevel(logging.INFO)
59
+ logging.getLogger("httpcore").setLevel(logging.INFO)
60
+ else:
61
+ logging.getLogger("httpx").setLevel(logging.WARNING)
62
+ logging.getLogger("httpcore").setLevel(logging.WARNING)
63
+
49
64
  def _generate_tool_definition(self, function_name):
50
65
  """
51
66
  Generate a tool definition from a function name by inspecting the function.
@@ -332,17 +347,10 @@ class Agent:
332
347
  if all(x is None for x in [name, role, goal, backstory, instructions]):
333
348
  raise ValueError("At least one of name, role, goal, backstory, or instructions must be provided")
334
349
 
335
- # Configure logging to suppress unwanted outputs
336
- logging.getLogger("litellm").setLevel(logging.WARNING)
337
-
338
- # Allow httpx logging when LOGLEVEL=debug, otherwise suppress it
339
- loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
340
- if loglevel == 'DEBUG':
341
- logging.getLogger("httpx").setLevel(logging.INFO)
342
- logging.getLogger("httpcore").setLevel(logging.INFO)
343
- else:
344
- logging.getLogger("httpx").setLevel(logging.WARNING)
345
- logging.getLogger("httpcore").setLevel(logging.WARNING)
350
+ # Configure logging only once at the class level
351
+ if not hasattr(Agent, '_logging_configured'):
352
+ Agent._configure_logging()
353
+ Agent._logging_configured = True
346
354
 
347
355
  # If instructions are provided, use them to set role, goal, and backstory
348
356
  if instructions:
@@ -480,7 +488,7 @@ class Agent:
480
488
  self.reflect_prompt = reflect_prompt
481
489
  # Use the same model selection logic for reflect_llm
482
490
  self.reflect_llm = reflect_llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
483
- self.console = Console() # Create a single console instance for the agent
491
+ self._console = None # Lazy load console when needed
484
492
 
485
493
  # Initialize system prompt
486
494
  self.system_prompt = f"""{self.backstory}\n
@@ -488,8 +496,8 @@ Your Role: {self.role}\n
488
496
  Your Goal: {self.goal}
489
497
  """
490
498
 
491
- # Generate unique IDs
492
- self.agent_id = str(uuid.uuid4())
499
+ # Lazy generate unique ID when needed
500
+ self._agent_id = None
493
501
 
494
502
  # Store user_id
495
503
  self.user_id = user_id or "praison"
@@ -500,6 +508,14 @@ Your Goal: {self.goal}
500
508
  self.max_guardrail_retries = max_guardrail_retries
501
509
  self._guardrail_fn = None
502
510
  self._setup_guardrail()
511
+
512
+ # Cache for system prompts and formatted tools
513
+ # Note: In single-threaded usage (common case), these are safe
514
+ # For multi-threaded usage, consider using threading.Lock
515
+ self._system_prompt_cache = {}
516
+ self._formatted_tools_cache = {}
517
+ # Limit cache size to prevent unbounded growth
518
+ self._max_cache_size = 100
503
519
 
504
520
  # Process handoffs and convert them to tools
505
521
  self.handoffs = handoffs if handoffs else []
@@ -508,16 +524,23 @@ Your Goal: {self.goal}
508
524
  # Check if knowledge parameter has any values
509
525
  if not knowledge:
510
526
  self.knowledge = None
527
+ self._knowledge_sources = None
528
+ self._knowledge_processed = True # No knowledge to process
511
529
  else:
512
- # Initialize Knowledge with provided or default config
513
- from praisonaiagents.knowledge import Knowledge
514
- self.knowledge = Knowledge(knowledge_config or None)
515
-
516
- # Handle knowledge
517
- if knowledge:
518
- for source in knowledge:
519
- self._process_knowledge(source)
530
+ # Store knowledge sources for lazy processing
531
+ self._knowledge_sources = knowledge
532
+ self._knowledge_processed = False
533
+ self._knowledge_config = knowledge_config
534
+ self.knowledge = None # Will be initialized on first use
520
535
 
536
+ @property
537
+ def console(self):
538
+ """Lazily initialize Rich Console only when needed."""
539
+ if self._console is None:
540
+ from rich.console import Console
541
+ self._console = Console()
542
+ return self._console
543
+
521
544
  @property
522
545
  def _openai_client(self):
523
546
  """Lazily initialize OpenAI client only when needed."""
@@ -536,6 +559,14 @@ Your Goal: {self.goal}
536
559
  raise e
537
560
  return self.__openai_client
538
561
 
562
+ @property
563
+ def agent_id(self):
564
+ """Lazily generate agent ID when first accessed."""
565
+ if self._agent_id is None:
566
+ import uuid
567
+ self._agent_id = str(uuid.uuid4())
568
+ return self._agent_id
569
+
539
570
  @property
540
571
  def llm_model(self):
541
572
  """Unified property to get the LLM model regardless of configuration type.
@@ -554,6 +585,19 @@ Your Goal: {self.goal}
554
585
  # Default fallback
555
586
  return "gpt-4o"
556
587
 
588
+ def _ensure_knowledge_processed(self):
589
+ """Ensure knowledge is initialized and processed when first accessed."""
590
+ if not self._knowledge_processed and self._knowledge_sources:
591
+ # Initialize Knowledge with provided or default config
592
+ from praisonaiagents.knowledge import Knowledge
593
+ self.knowledge = Knowledge(self._knowledge_config or None)
594
+
595
+ # Process all knowledge sources
596
+ for source in self._knowledge_sources:
597
+ self._process_knowledge(source)
598
+
599
+ self._knowledge_processed = True
600
+
557
601
  def _process_knowledge(self, knowledge_item):
558
602
  """Process and store knowledge from a file path, URL, or string."""
559
603
  try:
@@ -740,6 +784,23 @@ Your Goal: {self.goal}
740
784
 
741
785
  return current_response
742
786
 
787
+ def _get_tools_cache_key(self, tools):
788
+ """Generate a cache key for tools list."""
789
+ if tools is None:
790
+ return "none"
791
+ if not tools:
792
+ return "empty"
793
+ # Create a simple hash based on tool names
794
+ tool_names = []
795
+ for tool in tools:
796
+ if callable(tool) and hasattr(tool, '__name__'):
797
+ tool_names.append(tool.__name__)
798
+ elif isinstance(tool, dict) and 'function' in tool and 'name' in tool['function']:
799
+ tool_names.append(tool['function']['name'])
800
+ elif isinstance(tool, str):
801
+ tool_names.append(tool)
802
+ return "|".join(sorted(tool_names))
803
+
743
804
  def _build_system_prompt(self, tools=None):
744
805
  """Build the system prompt with tool information.
745
806
 
@@ -751,6 +812,13 @@ Your Goal: {self.goal}
751
812
  """
752
813
  if not self.use_system_prompt:
753
814
  return None
815
+
816
+ # Check cache first
817
+ tools_key = self._get_tools_cache_key(tools)
818
+ cache_key = f"{self.role}:{self.goal}:{tools_key}"
819
+
820
+ if cache_key in self._system_prompt_cache:
821
+ return self._system_prompt_cache[cache_key]
754
822
 
755
823
  system_prompt = f"""{self.backstory}\n
756
824
  Your Role: {self.role}\n
@@ -785,6 +853,10 @@ Your Goal: {self.goal}"""
785
853
  if tool_names:
786
854
  system_prompt += f"\n\nYou have access to the following tools: {', '.join(tool_names)}. Use these tools when appropriate to help complete your tasks. Always use tools when they can help provide accurate information or perform actions."
787
855
 
856
+ # Cache the generated system prompt
857
+ # Simple cache size limit to prevent unbounded growth
858
+ if len(self._system_prompt_cache) < self._max_cache_size:
859
+ self._system_prompt_cache[cache_key] = system_prompt
788
860
  return system_prompt
789
861
 
790
862
  def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pydantic=None, tools=None):
@@ -860,6 +932,11 @@ Your Goal: {self.goal}"""
860
932
 
861
933
  if not tools:
862
934
  return []
935
+
936
+ # Check cache first
937
+ tools_key = self._get_tools_cache_key(tools)
938
+ if tools_key in self._formatted_tools_cache:
939
+ return self._formatted_tools_cache[tools_key]
863
940
 
864
941
  formatted_tools = []
865
942
  for tool in tools:
@@ -909,7 +986,11 @@ Your Goal: {self.goal}"""
909
986
  except (TypeError, ValueError) as e:
910
987
  logging.error(f"Tools are not JSON serializable: {e}")
911
988
  return []
912
-
989
+
990
+ # Cache the formatted tools
991
+ # Simple cache size limit to prevent unbounded growth
992
+ if len(self._formatted_tools_cache) < self._max_cache_size:
993
+ self._formatted_tools_cache[tools_key] = formatted_tools
913
994
  return formatted_tools
914
995
 
915
996
  def generate_task(self) -> 'Task':
@@ -1280,6 +1361,9 @@ Your Goal: {self.goal}"""
1280
1361
  if stream is None:
1281
1362
  stream = self.stream
1282
1363
  # Search for existing knowledge if any knowledge is provided
1364
+ if self._knowledge_sources and not self._knowledge_processed:
1365
+ self._ensure_knowledge_processed()
1366
+
1283
1367
  if self.knowledge:
1284
1368
  search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
1285
1369
  if search_results:
@@ -1638,6 +1722,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1638
1722
  tools = self.tools
1639
1723
 
1640
1724
  # Search for existing knowledge if any knowledge is provided
1725
+ if self._knowledge_sources and not self._knowledge_processed:
1726
+ self._ensure_knowledge_processed()
1727
+
1641
1728
  if self.knowledge:
1642
1729
  search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
1643
1730
  if search_results:
@@ -2031,6 +2118,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2031
2118
  if self._using_custom_llm:
2032
2119
  # Handle knowledge search
2033
2120
  actual_prompt = prompt
2121
+ if self._knowledge_sources and not self._knowledge_processed:
2122
+ self._ensure_knowledge_processed()
2123
+
2034
2124
  if self.knowledge:
2035
2125
  search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
2036
2126
  if search_results:
@@ -2110,6 +2200,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2110
2200
  # For OpenAI-style models, implement proper streaming without display
2111
2201
  # Handle knowledge search
2112
2202
  actual_prompt = prompt
2203
+ if self._knowledge_sources and not self._knowledge_processed:
2204
+ self._ensure_knowledge_processed()
2205
+
2113
2206
  if self.knowledge:
2114
2207
  search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
2115
2208
  if search_results:
@@ -59,11 +59,11 @@ class MongoDBMemory:
59
59
  self.collection_name = self.vector_store_config.get("collection", "knowledge_base")
60
60
  self.use_vector_search = self.vector_store_config.get("use_vector_search", True)
61
61
 
62
+ # Initialize embedding model before MongoDB to ensure embedding_model_name is available
63
+ self._init_embedding_model()
64
+
62
65
  # Initialize MongoDB client
63
66
  self._init_mongodb()
64
-
65
- # Initialize embedding model
66
- self._init_embedding_model()
67
67
 
68
68
  def _init_mongodb(self):
69
69
  """Initialize MongoDB client and collection."""
@@ -159,7 +159,14 @@ class MongoDBMemory:
159
159
  }
160
160
  }
161
161
 
162
- self.collection.create_search_index(vector_index_def, "vector_index")
162
+ # Use SearchIndexModel for PyMongo 4.6+ compatibility
163
+ try:
164
+ from pymongo.operations import SearchIndexModel
165
+ search_index_model = SearchIndexModel(definition=vector_index_def, name="vector_index")
166
+ self.collection.create_search_index(search_index_model)
167
+ except ImportError:
168
+ # Fallback for older PyMongo versions
169
+ self.collection.create_search_index(vector_index_def, "vector_index")
163
170
 
164
171
  except Exception as e:
165
172
  logging.warning(f"Could not create vector search index: {e}")
@@ -377,8 +377,16 @@ class Memory:
377
377
 
378
378
  # Create vector indexes for both short and long term collections
379
379
  try:
380
- self.mongo_short_term.create_search_index(vector_index_def, "vector_index")
381
- self.mongo_long_term.create_search_index(vector_index_def, "vector_index")
380
+ # Use SearchIndexModel for PyMongo 4.6+ compatibility
381
+ try:
382
+ from pymongo.operations import SearchIndexModel
383
+ search_index_model = SearchIndexModel(definition=vector_index_def, name="vector_index")
384
+ self.mongo_short_term.create_search_index(search_index_model)
385
+ self.mongo_long_term.create_search_index(search_index_model)
386
+ except ImportError:
387
+ # Fallback for older PyMongo versions
388
+ self.mongo_short_term.create_search_index(vector_index_def, "vector_index")
389
+ self.mongo_long_term.create_search_index(vector_index_def, "vector_index")
382
390
  self._log_verbose("Vector search indexes created successfully")
383
391
  except Exception as e:
384
392
  self._log_verbose(f"Could not create vector search indexes: {e}", logging.WARNING)
@@ -335,7 +335,14 @@ class MongoDBTools:
335
335
  }
336
336
 
337
337
  try:
338
- collection.create_search_index(index_definition, index_name)
338
+ # Use SearchIndexModel for PyMongo 4.6+ compatibility
339
+ try:
340
+ from pymongo.operations import SearchIndexModel
341
+ search_index_model = SearchIndexModel(definition=index_definition, name=index_name)
342
+ collection.create_search_index(search_index_model)
343
+ except ImportError:
344
+ # Fallback for older PyMongo versions
345
+ collection.create_search_index(index_definition, index_name)
339
346
  return {
340
347
  "success": True,
341
348
  "message": f"Vector search index '{index_name}' created successfully"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.154
3
+ Version: 0.0.155
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -6,7 +6,7 @@ praisonaiagents/flow_display.py,sha256=E84J_H3h8L-AqL_F1JzEUInQYdjmIEuNL1LZr4__H
6
6
  praisonaiagents/main.py,sha256=NuAmE-ZrH4X0O9ysNA2AfxEQ8APPssO_ZR_f7h97QOo,17370
7
7
  praisonaiagents/session.py,sha256=FHWButPBaFGA4x1U_2gImroQChHnFy231_aAa_n5KOQ,20364
8
8
  praisonaiagents/agent/__init__.py,sha256=KBqW_augD-HcaV3FL88gUmhDCpwnSTavGENi7RqneTo,505
9
- praisonaiagents/agent/agent.py,sha256=pecp8Bt7_vXCB4MfUuMTZ3no4WipKOzFPGhFF5ADC5Y,144243
9
+ praisonaiagents/agent/agent.py,sha256=sJ_mMp2v8ZCok32G-wzO71HEtjuBqlrEPttUXQgiui8,148240
10
10
  praisonaiagents/agent/context_agent.py,sha256=zNI2Waghn5eo8g3QM1Dc7ZNSr2xw41D87GIK81FjW-Y,107489
11
11
  praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
12
12
  praisonaiagents/agent/image_agent.py,sha256=xKDhW8T1Y3e15lQpY6N2pdvBNJmAoWDibJa4BYa-Njs,10205
@@ -19,7 +19,7 @@ praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXC
19
19
  praisonaiagents/guardrails/llm_guardrail.py,sha256=czdOIoY-3PZOchX317tz4O2h2WYE42Ua4tqVzyuoNlI,4859
20
20
  praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
21
21
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
22
- praisonaiagents/knowledge/knowledge.py,sha256=OzK81oA6sjk9nAUWphS7AkXxvalrv2AHB4FtHjzYgxI,30115
22
+ praisonaiagents/knowledge/knowledge.py,sha256=tog38b0SjFMoLuFBo0M1zHl9Dzzxa9YRv9FO7OZSpns,30587
23
23
  praisonaiagents/llm/__init__.py,sha256=SqdU1pRqPrR6jZeWYyDeTvmZKCACywk0v4P0k5Fuowk,1107
24
24
  praisonaiagents/llm/llm.py,sha256=155R1XHZLSDZsq67Hmglwc4N_SE2gKgid0KCFYNX3ww,176594
25
25
  praisonaiagents/llm/model_capabilities.py,sha256=cxOvZcjZ_PIEpUYKn3S2FMyypfOSfbGpx4vmV7Y5vhI,3967
@@ -30,7 +30,7 @@ praisonaiagents/mcp/mcp.py,sha256=ChaSwLCcFBB9b8eNuj0DoKbK1EqpyF1T_7xz0FX-5-A,23
30
30
  praisonaiagents/mcp/mcp_http_stream.py,sha256=TDFWMJMo8VqLXtXCW73REpmkU3t9n7CAGMa9b4dhI-c,23366
31
31
  praisonaiagents/mcp/mcp_sse.py,sha256=KO10tAgZ5vSKeRhkJIZcdJ0ZmhRybS39i1KybWt4D7M,9128
32
32
  praisonaiagents/memory/__init__.py,sha256=aEFdhgtTqDdMhc_JCWM-f4XI9cZIj7Wz5g_MUa-0amg,397
33
- praisonaiagents/memory/memory.py,sha256=HjanP8sSi91wifvPkQDH40uGYdDZPOeir29fCu6y-b8,64584
33
+ praisonaiagents/memory/memory.py,sha256=B2DMuvvr4W_EnrpoN16K73qSqYdduqhMcV8ASzyh2L8,65116
34
34
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
35
35
  praisonaiagents/process/process.py,sha256=wXKZ2Z26vB9osmVbD5xqkUlUQRvWEpvL8j9hiuiHrQ0,78246
36
36
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
@@ -53,7 +53,7 @@ praisonaiagents/tools/duckduckgo_tools.py,sha256=ynlB5ZyWfHYjUq0JZXH12TganqTihgD
53
53
  praisonaiagents/tools/excel_tools.py,sha256=e2HqcwnyBueOyss0xEKxff3zB4w4sNWCOMXvZfbDYlE,11309
54
54
  praisonaiagents/tools/file_tools.py,sha256=N0fjTxxi89UupAvtEUwXjPrBvbppf8bwaNLfnjZ05q4,10824
55
55
  praisonaiagents/tools/json_tools.py,sha256=ApUYNuQ1qnbmYNCxSlx6Tth_H1yo8mhWtZ7Rr2WS6C4,16507
56
- praisonaiagents/tools/mongodb_tools.py,sha256=Y1n0X58nJkRYMb_ZjlXeH2PENbJlN-OLcfo44N647Dc,21073
56
+ praisonaiagents/tools/mongodb_tools.py,sha256=gmRxV4In0aFVFYWNEOLzg9yP-jCuvFp4Lm_6vWjtlnQ,21505
57
57
  praisonaiagents/tools/newspaper_tools.py,sha256=NyhojNPeyULBGcAWGOT1X70qVkh3FgZrpH-S7PEmrwI,12667
58
58
  praisonaiagents/tools/pandas_tools.py,sha256=yzCeY4jetKrFIRA15Tr5OQ5d94T8DaSpzglx2UiWfPs,11092
59
59
  praisonaiagents/tools/python_tools.py,sha256=4dWJddySR0snCEcQudemg5qvbuNrUYxO-jXnzuWixqM,16461
@@ -67,7 +67,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
67
67
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
68
68
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
69
69
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
70
- praisonaiagents-0.0.154.dist-info/METADATA,sha256=55cpHdKamPJ_tSTDYyXli-REfXLqafPKW7GH-Gf_Sdo,2146
71
- praisonaiagents-0.0.154.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
72
- praisonaiagents-0.0.154.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
73
- praisonaiagents-0.0.154.dist-info/RECORD,,
70
+ praisonaiagents-0.0.155.dist-info/METADATA,sha256=mabCM-bwfCTmlaJuAUCxrdNXS0OWg8hUcIf70c60PC4,2146
71
+ praisonaiagents-0.0.155.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
72
+ praisonaiagents-0.0.155.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
73
+ praisonaiagents-0.0.155.dist-info/RECORD,,