praisonaiagents 0.0.153__py3-none-any.whl → 0.0.155__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +123 -24
- praisonaiagents/knowledge/knowledge.py +11 -4
- praisonaiagents/llm/llm.py +10 -3
- praisonaiagents/mcp/mcp_http_stream.py +124 -17
- praisonaiagents/memory/memory.py +10 -2
- praisonaiagents/tools/mongodb_tools.py +8 -1
- {praisonaiagents-0.0.153.dist-info → praisonaiagents-0.0.155.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.153.dist-info → praisonaiagents-0.0.155.dist-info}/RECORD +10 -10
- {praisonaiagents-0.0.153.dist-info → praisonaiagents-0.0.155.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.153.dist-info → praisonaiagents-0.0.155.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -46,6 +46,21 @@ if TYPE_CHECKING:
|
|
46
46
|
from ..handoff import Handoff
|
47
47
|
|
48
48
|
class Agent:
|
49
|
+
@classmethod
|
50
|
+
def _configure_logging(cls):
|
51
|
+
"""Configure logging settings once for all agent instances."""
|
52
|
+
# Configure logging to suppress unwanted outputs
|
53
|
+
logging.getLogger("litellm").setLevel(logging.WARNING)
|
54
|
+
|
55
|
+
# Allow httpx logging when LOGLEVEL=debug, otherwise suppress it
|
56
|
+
loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
|
57
|
+
if loglevel == 'DEBUG':
|
58
|
+
logging.getLogger("httpx").setLevel(logging.INFO)
|
59
|
+
logging.getLogger("httpcore").setLevel(logging.INFO)
|
60
|
+
else:
|
61
|
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
62
|
+
logging.getLogger("httpcore").setLevel(logging.WARNING)
|
63
|
+
|
49
64
|
def _generate_tool_definition(self, function_name):
|
50
65
|
"""
|
51
66
|
Generate a tool definition from a function name by inspecting the function.
|
@@ -332,17 +347,10 @@ class Agent:
|
|
332
347
|
if all(x is None for x in [name, role, goal, backstory, instructions]):
|
333
348
|
raise ValueError("At least one of name, role, goal, backstory, or instructions must be provided")
|
334
349
|
|
335
|
-
# Configure logging
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
|
340
|
-
if loglevel == 'DEBUG':
|
341
|
-
logging.getLogger("httpx").setLevel(logging.INFO)
|
342
|
-
logging.getLogger("httpcore").setLevel(logging.INFO)
|
343
|
-
else:
|
344
|
-
logging.getLogger("httpx").setLevel(logging.WARNING)
|
345
|
-
logging.getLogger("httpcore").setLevel(logging.WARNING)
|
350
|
+
# Configure logging only once at the class level
|
351
|
+
if not hasattr(Agent, '_logging_configured'):
|
352
|
+
Agent._configure_logging()
|
353
|
+
Agent._logging_configured = True
|
346
354
|
|
347
355
|
# If instructions are provided, use them to set role, goal, and backstory
|
348
356
|
if instructions:
|
@@ -383,6 +391,7 @@ class Agent:
|
|
383
391
|
llm_config['base_url'] = base_url
|
384
392
|
if api_key:
|
385
393
|
llm_config['api_key'] = api_key
|
394
|
+
llm_config['metrics'] = metrics
|
386
395
|
self.llm_instance = LLM(**llm_config)
|
387
396
|
else:
|
388
397
|
# Create LLM with model string and base_url
|
@@ -390,7 +399,8 @@ class Agent:
|
|
390
399
|
self.llm_instance = LLM(
|
391
400
|
model=model_name,
|
392
401
|
base_url=base_url,
|
393
|
-
api_key=api_key
|
402
|
+
api_key=api_key,
|
403
|
+
metrics=metrics
|
394
404
|
)
|
395
405
|
self._using_custom_llm = True
|
396
406
|
except ImportError as e:
|
@@ -406,6 +416,9 @@ class Agent:
|
|
406
416
|
if api_key and 'api_key' not in llm:
|
407
417
|
llm = llm.copy()
|
408
418
|
llm['api_key'] = api_key
|
419
|
+
# Add metrics parameter
|
420
|
+
llm = llm.copy()
|
421
|
+
llm['metrics'] = metrics
|
409
422
|
self.llm_instance = LLM(**llm) # Pass all dict items as kwargs
|
410
423
|
self._using_custom_llm = True
|
411
424
|
except ImportError as e:
|
@@ -421,6 +434,7 @@ class Agent:
|
|
421
434
|
llm_params = {'model': llm}
|
422
435
|
if api_key:
|
423
436
|
llm_params['api_key'] = api_key
|
437
|
+
llm_params['metrics'] = metrics
|
424
438
|
self.llm_instance = LLM(**llm_params)
|
425
439
|
self._using_custom_llm = True
|
426
440
|
|
@@ -474,7 +488,7 @@ class Agent:
|
|
474
488
|
self.reflect_prompt = reflect_prompt
|
475
489
|
# Use the same model selection logic for reflect_llm
|
476
490
|
self.reflect_llm = reflect_llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
|
477
|
-
self.
|
491
|
+
self._console = None # Lazy load console when needed
|
478
492
|
|
479
493
|
# Initialize system prompt
|
480
494
|
self.system_prompt = f"""{self.backstory}\n
|
@@ -482,8 +496,8 @@ Your Role: {self.role}\n
|
|
482
496
|
Your Goal: {self.goal}
|
483
497
|
"""
|
484
498
|
|
485
|
-
#
|
486
|
-
self.
|
499
|
+
# Lazy generate unique ID when needed
|
500
|
+
self._agent_id = None
|
487
501
|
|
488
502
|
# Store user_id
|
489
503
|
self.user_id = user_id or "praison"
|
@@ -494,6 +508,14 @@ Your Goal: {self.goal}
|
|
494
508
|
self.max_guardrail_retries = max_guardrail_retries
|
495
509
|
self._guardrail_fn = None
|
496
510
|
self._setup_guardrail()
|
511
|
+
|
512
|
+
# Cache for system prompts and formatted tools
|
513
|
+
# Note: In single-threaded usage (common case), these are safe
|
514
|
+
# For multi-threaded usage, consider using threading.Lock
|
515
|
+
self._system_prompt_cache = {}
|
516
|
+
self._formatted_tools_cache = {}
|
517
|
+
# Limit cache size to prevent unbounded growth
|
518
|
+
self._max_cache_size = 100
|
497
519
|
|
498
520
|
# Process handoffs and convert them to tools
|
499
521
|
self.handoffs = handoffs if handoffs else []
|
@@ -502,16 +524,23 @@ Your Goal: {self.goal}
|
|
502
524
|
# Check if knowledge parameter has any values
|
503
525
|
if not knowledge:
|
504
526
|
self.knowledge = None
|
527
|
+
self._knowledge_sources = None
|
528
|
+
self._knowledge_processed = True # No knowledge to process
|
505
529
|
else:
|
506
|
-
#
|
507
|
-
|
508
|
-
self.
|
509
|
-
|
510
|
-
#
|
511
|
-
if knowledge:
|
512
|
-
for source in knowledge:
|
513
|
-
self._process_knowledge(source)
|
530
|
+
# Store knowledge sources for lazy processing
|
531
|
+
self._knowledge_sources = knowledge
|
532
|
+
self._knowledge_processed = False
|
533
|
+
self._knowledge_config = knowledge_config
|
534
|
+
self.knowledge = None # Will be initialized on first use
|
514
535
|
|
536
|
+
@property
|
537
|
+
def console(self):
|
538
|
+
"""Lazily initialize Rich Console only when needed."""
|
539
|
+
if self._console is None:
|
540
|
+
from rich.console import Console
|
541
|
+
self._console = Console()
|
542
|
+
return self._console
|
543
|
+
|
515
544
|
@property
|
516
545
|
def _openai_client(self):
|
517
546
|
"""Lazily initialize OpenAI client only when needed."""
|
@@ -530,6 +559,14 @@ Your Goal: {self.goal}
|
|
530
559
|
raise e
|
531
560
|
return self.__openai_client
|
532
561
|
|
562
|
+
@property
|
563
|
+
def agent_id(self):
|
564
|
+
"""Lazily generate agent ID when first accessed."""
|
565
|
+
if self._agent_id is None:
|
566
|
+
import uuid
|
567
|
+
self._agent_id = str(uuid.uuid4())
|
568
|
+
return self._agent_id
|
569
|
+
|
533
570
|
@property
|
534
571
|
def llm_model(self):
|
535
572
|
"""Unified property to get the LLM model regardless of configuration type.
|
@@ -548,6 +585,19 @@ Your Goal: {self.goal}
|
|
548
585
|
# Default fallback
|
549
586
|
return "gpt-4o"
|
550
587
|
|
588
|
+
def _ensure_knowledge_processed(self):
|
589
|
+
"""Ensure knowledge is initialized and processed when first accessed."""
|
590
|
+
if not self._knowledge_processed and self._knowledge_sources:
|
591
|
+
# Initialize Knowledge with provided or default config
|
592
|
+
from praisonaiagents.knowledge import Knowledge
|
593
|
+
self.knowledge = Knowledge(self._knowledge_config or None)
|
594
|
+
|
595
|
+
# Process all knowledge sources
|
596
|
+
for source in self._knowledge_sources:
|
597
|
+
self._process_knowledge(source)
|
598
|
+
|
599
|
+
self._knowledge_processed = True
|
600
|
+
|
551
601
|
def _process_knowledge(self, knowledge_item):
|
552
602
|
"""Process and store knowledge from a file path, URL, or string."""
|
553
603
|
try:
|
@@ -734,6 +784,23 @@ Your Goal: {self.goal}
|
|
734
784
|
|
735
785
|
return current_response
|
736
786
|
|
787
|
+
def _get_tools_cache_key(self, tools):
|
788
|
+
"""Generate a cache key for tools list."""
|
789
|
+
if tools is None:
|
790
|
+
return "none"
|
791
|
+
if not tools:
|
792
|
+
return "empty"
|
793
|
+
# Create a simple hash based on tool names
|
794
|
+
tool_names = []
|
795
|
+
for tool in tools:
|
796
|
+
if callable(tool) and hasattr(tool, '__name__'):
|
797
|
+
tool_names.append(tool.__name__)
|
798
|
+
elif isinstance(tool, dict) and 'function' in tool and 'name' in tool['function']:
|
799
|
+
tool_names.append(tool['function']['name'])
|
800
|
+
elif isinstance(tool, str):
|
801
|
+
tool_names.append(tool)
|
802
|
+
return "|".join(sorted(tool_names))
|
803
|
+
|
737
804
|
def _build_system_prompt(self, tools=None):
|
738
805
|
"""Build the system prompt with tool information.
|
739
806
|
|
@@ -745,6 +812,13 @@ Your Goal: {self.goal}
|
|
745
812
|
"""
|
746
813
|
if not self.use_system_prompt:
|
747
814
|
return None
|
815
|
+
|
816
|
+
# Check cache first
|
817
|
+
tools_key = self._get_tools_cache_key(tools)
|
818
|
+
cache_key = f"{self.role}:{self.goal}:{tools_key}"
|
819
|
+
|
820
|
+
if cache_key in self._system_prompt_cache:
|
821
|
+
return self._system_prompt_cache[cache_key]
|
748
822
|
|
749
823
|
system_prompt = f"""{self.backstory}\n
|
750
824
|
Your Role: {self.role}\n
|
@@ -779,6 +853,10 @@ Your Goal: {self.goal}"""
|
|
779
853
|
if tool_names:
|
780
854
|
system_prompt += f"\n\nYou have access to the following tools: {', '.join(tool_names)}. Use these tools when appropriate to help complete your tasks. Always use tools when they can help provide accurate information or perform actions."
|
781
855
|
|
856
|
+
# Cache the generated system prompt
|
857
|
+
# Simple cache size limit to prevent unbounded growth
|
858
|
+
if len(self._system_prompt_cache) < self._max_cache_size:
|
859
|
+
self._system_prompt_cache[cache_key] = system_prompt
|
782
860
|
return system_prompt
|
783
861
|
|
784
862
|
def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pydantic=None, tools=None):
|
@@ -854,6 +932,11 @@ Your Goal: {self.goal}"""
|
|
854
932
|
|
855
933
|
if not tools:
|
856
934
|
return []
|
935
|
+
|
936
|
+
# Check cache first
|
937
|
+
tools_key = self._get_tools_cache_key(tools)
|
938
|
+
if tools_key in self._formatted_tools_cache:
|
939
|
+
return self._formatted_tools_cache[tools_key]
|
857
940
|
|
858
941
|
formatted_tools = []
|
859
942
|
for tool in tools:
|
@@ -903,7 +986,11 @@ Your Goal: {self.goal}"""
|
|
903
986
|
except (TypeError, ValueError) as e:
|
904
987
|
logging.error(f"Tools are not JSON serializable: {e}")
|
905
988
|
return []
|
906
|
-
|
989
|
+
|
990
|
+
# Cache the formatted tools
|
991
|
+
# Simple cache size limit to prevent unbounded growth
|
992
|
+
if len(self._formatted_tools_cache) < self._max_cache_size:
|
993
|
+
self._formatted_tools_cache[tools_key] = formatted_tools
|
907
994
|
return formatted_tools
|
908
995
|
|
909
996
|
def generate_task(self) -> 'Task':
|
@@ -1274,6 +1361,9 @@ Your Goal: {self.goal}"""
|
|
1274
1361
|
if stream is None:
|
1275
1362
|
stream = self.stream
|
1276
1363
|
# Search for existing knowledge if any knowledge is provided
|
1364
|
+
if self._knowledge_sources and not self._knowledge_processed:
|
1365
|
+
self._ensure_knowledge_processed()
|
1366
|
+
|
1277
1367
|
if self.knowledge:
|
1278
1368
|
search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
|
1279
1369
|
if search_results:
|
@@ -1632,6 +1722,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1632
1722
|
tools = self.tools
|
1633
1723
|
|
1634
1724
|
# Search for existing knowledge if any knowledge is provided
|
1725
|
+
if self._knowledge_sources and not self._knowledge_processed:
|
1726
|
+
self._ensure_knowledge_processed()
|
1727
|
+
|
1635
1728
|
if self.knowledge:
|
1636
1729
|
search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
|
1637
1730
|
if search_results:
|
@@ -2025,6 +2118,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2025
2118
|
if self._using_custom_llm:
|
2026
2119
|
# Handle knowledge search
|
2027
2120
|
actual_prompt = prompt
|
2121
|
+
if self._knowledge_sources and not self._knowledge_processed:
|
2122
|
+
self._ensure_knowledge_processed()
|
2123
|
+
|
2028
2124
|
if self.knowledge:
|
2029
2125
|
search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
|
2030
2126
|
if search_results:
|
@@ -2104,6 +2200,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2104
2200
|
# For OpenAI-style models, implement proper streaming without display
|
2105
2201
|
# Handle knowledge search
|
2106
2202
|
actual_prompt = prompt
|
2203
|
+
if self._knowledge_sources and not self._knowledge_processed:
|
2204
|
+
self._ensure_knowledge_processed()
|
2205
|
+
|
2107
2206
|
if self.knowledge:
|
2108
2207
|
search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
|
2109
2208
|
if search_results:
|
@@ -59,11 +59,11 @@ class MongoDBMemory:
|
|
59
59
|
self.collection_name = self.vector_store_config.get("collection", "knowledge_base")
|
60
60
|
self.use_vector_search = self.vector_store_config.get("use_vector_search", True)
|
61
61
|
|
62
|
+
# Initialize embedding model before MongoDB to ensure embedding_model_name is available
|
63
|
+
self._init_embedding_model()
|
64
|
+
|
62
65
|
# Initialize MongoDB client
|
63
66
|
self._init_mongodb()
|
64
|
-
|
65
|
-
# Initialize embedding model
|
66
|
-
self._init_embedding_model()
|
67
67
|
|
68
68
|
def _init_mongodb(self):
|
69
69
|
"""Initialize MongoDB client and collection."""
|
@@ -159,7 +159,14 @@ class MongoDBMemory:
|
|
159
159
|
}
|
160
160
|
}
|
161
161
|
|
162
|
-
|
162
|
+
# Use SearchIndexModel for PyMongo 4.6+ compatibility
|
163
|
+
try:
|
164
|
+
from pymongo.operations import SearchIndexModel
|
165
|
+
search_index_model = SearchIndexModel(definition=vector_index_def, name="vector_index")
|
166
|
+
self.collection.create_search_index(search_index_model)
|
167
|
+
except ImportError:
|
168
|
+
# Fallback for older PyMongo versions
|
169
|
+
self.collection.create_search_index(vector_index_def, "vector_index")
|
163
170
|
|
164
171
|
except Exception as e:
|
165
172
|
logging.warning(f"Could not create vector search index: {e}")
|
praisonaiagents/llm/llm.py
CHANGED
@@ -260,6 +260,7 @@ class LLM:
|
|
260
260
|
self.max_reflect = extra_settings.get('max_reflect', 3)
|
261
261
|
self.min_reflect = extra_settings.get('min_reflect', 1)
|
262
262
|
self.reasoning_steps = extra_settings.get('reasoning_steps', False)
|
263
|
+
self.metrics = extra_settings.get('metrics', False)
|
263
264
|
|
264
265
|
# Token tracking
|
265
266
|
self.last_token_metrics: Optional[TokenMetrics] = None
|
@@ -954,7 +955,8 @@ class LLM:
|
|
954
955
|
final_response = resp
|
955
956
|
|
956
957
|
# Track token usage
|
957
|
-
self.
|
958
|
+
if self.metrics:
|
959
|
+
self._track_token_usage(final_response, model)
|
958
960
|
|
959
961
|
# Execute callbacks and display based on verbose setting
|
960
962
|
generation_time_val = time.time() - current_time
|
@@ -1135,7 +1137,8 @@ class LLM:
|
|
1135
1137
|
response_text = response_content if response_content is not None else ""
|
1136
1138
|
|
1137
1139
|
# Track token usage
|
1138
|
-
|
1140
|
+
if self.metrics:
|
1141
|
+
self._track_token_usage(final_response, self.model)
|
1139
1142
|
|
1140
1143
|
# Execute callbacks and display based on verbose setting
|
1141
1144
|
if verbose and not interaction_displayed:
|
@@ -1284,7 +1287,8 @@ class LLM:
|
|
1284
1287
|
response_text = response_content if response_content is not None else ""
|
1285
1288
|
|
1286
1289
|
# Track token usage
|
1287
|
-
|
1290
|
+
if self.metrics:
|
1291
|
+
self._track_token_usage(final_response, self.model)
|
1288
1292
|
|
1289
1293
|
# Execute callbacks and display based on verbose setting
|
1290
1294
|
if verbose and not interaction_displayed:
|
@@ -2887,6 +2891,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2887
2891
|
if not TokenMetrics or not _token_collector:
|
2888
2892
|
return None
|
2889
2893
|
|
2894
|
+
# Note: metrics check moved to call sites for performance
|
2895
|
+
# This method should only be called when self.metrics=True
|
2896
|
+
|
2890
2897
|
try:
|
2891
2898
|
usage = response.get("usage", {})
|
2892
2899
|
if not usage:
|
@@ -5,12 +5,14 @@ over HTTP Stream transport, implementing the Streamable HTTP transport protocol.
|
|
5
5
|
"""
|
6
6
|
|
7
7
|
import asyncio
|
8
|
+
import atexit
|
8
9
|
import logging
|
9
10
|
import threading
|
10
11
|
import inspect
|
11
12
|
import json
|
12
13
|
import time
|
13
14
|
import uuid
|
15
|
+
import weakref
|
14
16
|
from typing import List, Dict, Any, Optional, Callable, Iterable, Union
|
15
17
|
from urllib.parse import urlparse, urljoin
|
16
18
|
|
@@ -25,6 +27,10 @@ logger = logging.getLogger("mcp-http-stream")
|
|
25
27
|
# Global event loop for async operations
|
26
28
|
_event_loop = None
|
27
29
|
|
30
|
+
# Global registry of active clients for cleanup
|
31
|
+
_active_clients = weakref.WeakSet()
|
32
|
+
_cleanup_registered = False
|
33
|
+
|
28
34
|
def get_event_loop():
|
29
35
|
"""Get or create a global event loop."""
|
30
36
|
global _event_loop
|
@@ -34,6 +40,31 @@ def get_event_loop():
|
|
34
40
|
return _event_loop
|
35
41
|
|
36
42
|
|
43
|
+
def _cleanup_all_clients():
|
44
|
+
"""Clean up all active clients at program exit."""
|
45
|
+
if not _active_clients:
|
46
|
+
return
|
47
|
+
|
48
|
+
# Create a copy to avoid modification during iteration
|
49
|
+
clients_to_cleanup = list(_active_clients)
|
50
|
+
|
51
|
+
for client in clients_to_cleanup:
|
52
|
+
try:
|
53
|
+
if hasattr(client, '_force_cleanup'):
|
54
|
+
client._force_cleanup()
|
55
|
+
except Exception:
|
56
|
+
# Ignore exceptions during cleanup
|
57
|
+
pass
|
58
|
+
|
59
|
+
|
60
|
+
def _register_cleanup():
|
61
|
+
"""Register the cleanup function to run at program exit."""
|
62
|
+
global _cleanup_registered
|
63
|
+
if not _cleanup_registered:
|
64
|
+
atexit.register(_cleanup_all_clients)
|
65
|
+
_cleanup_registered = True
|
66
|
+
|
67
|
+
|
37
68
|
class HTTPStreamMCPTool:
|
38
69
|
"""A wrapper for an MCP tool that can be used with praisonaiagents."""
|
39
70
|
|
@@ -388,6 +419,7 @@ class HTTPStreamMCPClient:
|
|
388
419
|
self.session = None
|
389
420
|
self.tools = []
|
390
421
|
self.transport = None
|
422
|
+
self._closed = False
|
391
423
|
|
392
424
|
# Set up logging
|
393
425
|
if debug:
|
@@ -396,6 +428,10 @@ class HTTPStreamMCPClient:
|
|
396
428
|
# Set to WARNING by default to hide INFO messages
|
397
429
|
logger.setLevel(logging.WARNING)
|
398
430
|
|
431
|
+
# Register this client for cleanup and setup exit handler
|
432
|
+
_active_clients.add(self)
|
433
|
+
_register_cleanup()
|
434
|
+
|
399
435
|
self._initialize()
|
400
436
|
|
401
437
|
def _initialize(self):
|
@@ -456,6 +492,10 @@ class HTTPStreamMCPClient:
|
|
456
492
|
timeout=self.timeout
|
457
493
|
)
|
458
494
|
tools.append(wrapper)
|
495
|
+
|
496
|
+
# Set up cleanup finalizer now that transport and session are created
|
497
|
+
self._finalizer = weakref.finalize(self, self._static_cleanup,
|
498
|
+
self.transport, self._session_context)
|
459
499
|
|
460
500
|
return tools
|
461
501
|
|
@@ -477,30 +517,97 @@ class HTTPStreamMCPClient:
|
|
477
517
|
|
478
518
|
async def aclose(self):
|
479
519
|
"""Async cleanup method to close all resources."""
|
480
|
-
if self.
|
481
|
-
|
482
|
-
|
483
|
-
|
520
|
+
if self._closed:
|
521
|
+
return
|
522
|
+
|
523
|
+
self._closed = True
|
524
|
+
|
525
|
+
try:
|
526
|
+
if hasattr(self, '_session_context') and self._session_context:
|
527
|
+
await self._session_context.__aexit__(None, None, None)
|
528
|
+
except Exception:
|
529
|
+
pass
|
530
|
+
|
531
|
+
try:
|
532
|
+
if self.transport:
|
533
|
+
await self.transport.__aexit__(None, None, None)
|
534
|
+
except Exception:
|
535
|
+
pass
|
484
536
|
|
485
537
|
def close(self):
|
486
538
|
"""Synchronous cleanup method to close all resources."""
|
487
|
-
if
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
539
|
+
if self._closed:
|
540
|
+
return
|
541
|
+
|
542
|
+
try:
|
543
|
+
# Use the global event loop for non-blocking cleanup
|
544
|
+
loop = get_event_loop()
|
545
|
+
if not loop.is_closed():
|
546
|
+
# Schedule cleanup without blocking - add callback for fallback
|
547
|
+
future = asyncio.run_coroutine_threadsafe(self.aclose(), loop)
|
548
|
+
|
549
|
+
# Add a completion callback for fallback cleanup if async fails
|
550
|
+
def _cleanup_callback(fut):
|
551
|
+
try:
|
552
|
+
fut.result() # This will raise if aclose() failed
|
553
|
+
except Exception:
|
554
|
+
# If async cleanup failed, try force cleanup
|
555
|
+
try:
|
556
|
+
self._force_cleanup()
|
557
|
+
except Exception:
|
558
|
+
pass
|
559
|
+
|
560
|
+
future.add_done_callback(_cleanup_callback)
|
561
|
+
else:
|
562
|
+
# Event loop is closed, use force cleanup immediately
|
563
|
+
self._force_cleanup()
|
564
|
+
except Exception:
|
565
|
+
# If async scheduling fails, try force cleanup
|
566
|
+
self._force_cleanup()
|
567
|
+
|
568
|
+
def _force_cleanup(self):
|
569
|
+
"""Force cleanup of resources synchronously (for emergencies)."""
|
570
|
+
if self._closed:
|
571
|
+
return
|
572
|
+
|
573
|
+
self._closed = True
|
574
|
+
|
575
|
+
# Force close transport session if it exists
|
576
|
+
try:
|
577
|
+
if self.transport and hasattr(self.transport, '_session') and self.transport._session:
|
578
|
+
session = self.transport._session
|
579
|
+
if not session.closed:
|
580
|
+
# Force close the aiohttp session
|
581
|
+
if hasattr(session, '_connector') and session._connector:
|
582
|
+
try:
|
583
|
+
# Close connector directly
|
584
|
+
session._connector.close()
|
585
|
+
except Exception:
|
586
|
+
pass
|
587
|
+
except Exception:
|
588
|
+
pass
|
589
|
+
|
590
|
+
@staticmethod
|
591
|
+
def _static_cleanup(transport, session_context):
|
592
|
+
"""Static cleanup method for weakref finalizer."""
|
593
|
+
try:
|
594
|
+
# This is called by weakref finalizer, so we can't do async operations
|
595
|
+
# Just ensure any session is closed if possible
|
596
|
+
if transport and hasattr(transport, '_session') and transport._session:
|
597
|
+
session = transport._session
|
598
|
+
if not session.closed and hasattr(session, '_connector'):
|
599
|
+
try:
|
600
|
+
session._connector.close()
|
601
|
+
except Exception:
|
602
|
+
pass
|
603
|
+
except Exception:
|
604
|
+
pass
|
497
605
|
|
498
606
|
def __del__(self):
|
499
607
|
"""Cleanup when object is garbage collected."""
|
500
608
|
try:
|
501
|
-
|
502
|
-
|
503
|
-
self.close()
|
609
|
+
if not self._closed:
|
610
|
+
self._force_cleanup()
|
504
611
|
except Exception:
|
505
612
|
# Never raise exceptions in __del__
|
506
613
|
pass
|
praisonaiagents/memory/memory.py
CHANGED
@@ -377,8 +377,16 @@ class Memory:
|
|
377
377
|
|
378
378
|
# Create vector indexes for both short and long term collections
|
379
379
|
try:
|
380
|
-
|
381
|
-
|
380
|
+
# Use SearchIndexModel for PyMongo 4.6+ compatibility
|
381
|
+
try:
|
382
|
+
from pymongo.operations import SearchIndexModel
|
383
|
+
search_index_model = SearchIndexModel(definition=vector_index_def, name="vector_index")
|
384
|
+
self.mongo_short_term.create_search_index(search_index_model)
|
385
|
+
self.mongo_long_term.create_search_index(search_index_model)
|
386
|
+
except ImportError:
|
387
|
+
# Fallback for older PyMongo versions
|
388
|
+
self.mongo_short_term.create_search_index(vector_index_def, "vector_index")
|
389
|
+
self.mongo_long_term.create_search_index(vector_index_def, "vector_index")
|
382
390
|
self._log_verbose("Vector search indexes created successfully")
|
383
391
|
except Exception as e:
|
384
392
|
self._log_verbose(f"Could not create vector search indexes: {e}", logging.WARNING)
|
@@ -335,7 +335,14 @@ class MongoDBTools:
|
|
335
335
|
}
|
336
336
|
|
337
337
|
try:
|
338
|
-
|
338
|
+
# Use SearchIndexModel for PyMongo 4.6+ compatibility
|
339
|
+
try:
|
340
|
+
from pymongo.operations import SearchIndexModel
|
341
|
+
search_index_model = SearchIndexModel(definition=index_definition, name=index_name)
|
342
|
+
collection.create_search_index(search_index_model)
|
343
|
+
except ImportError:
|
344
|
+
# Fallback for older PyMongo versions
|
345
|
+
collection.create_search_index(index_definition, index_name)
|
339
346
|
return {
|
340
347
|
"success": True,
|
341
348
|
"message": f"Vector search index '{index_name}' created successfully"
|
@@ -6,7 +6,7 @@ praisonaiagents/flow_display.py,sha256=E84J_H3h8L-AqL_F1JzEUInQYdjmIEuNL1LZr4__H
|
|
6
6
|
praisonaiagents/main.py,sha256=NuAmE-ZrH4X0O9ysNA2AfxEQ8APPssO_ZR_f7h97QOo,17370
|
7
7
|
praisonaiagents/session.py,sha256=FHWButPBaFGA4x1U_2gImroQChHnFy231_aAa_n5KOQ,20364
|
8
8
|
praisonaiagents/agent/__init__.py,sha256=KBqW_augD-HcaV3FL88gUmhDCpwnSTavGENi7RqneTo,505
|
9
|
-
praisonaiagents/agent/agent.py,sha256=
|
9
|
+
praisonaiagents/agent/agent.py,sha256=sJ_mMp2v8ZCok32G-wzO71HEtjuBqlrEPttUXQgiui8,148240
|
10
10
|
praisonaiagents/agent/context_agent.py,sha256=zNI2Waghn5eo8g3QM1Dc7ZNSr2xw41D87GIK81FjW-Y,107489
|
11
11
|
praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
|
12
12
|
praisonaiagents/agent/image_agent.py,sha256=xKDhW8T1Y3e15lQpY6N2pdvBNJmAoWDibJa4BYa-Njs,10205
|
@@ -19,18 +19,18 @@ praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXC
|
|
19
19
|
praisonaiagents/guardrails/llm_guardrail.py,sha256=czdOIoY-3PZOchX317tz4O2h2WYE42Ua4tqVzyuoNlI,4859
|
20
20
|
praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
|
21
21
|
praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
|
22
|
-
praisonaiagents/knowledge/knowledge.py,sha256=
|
22
|
+
praisonaiagents/knowledge/knowledge.py,sha256=tog38b0SjFMoLuFBo0M1zHl9Dzzxa9YRv9FO7OZSpns,30587
|
23
23
|
praisonaiagents/llm/__init__.py,sha256=SqdU1pRqPrR6jZeWYyDeTvmZKCACywk0v4P0k5Fuowk,1107
|
24
|
-
praisonaiagents/llm/llm.py,sha256=
|
24
|
+
praisonaiagents/llm/llm.py,sha256=155R1XHZLSDZsq67Hmglwc4N_SE2gKgid0KCFYNX3ww,176594
|
25
25
|
praisonaiagents/llm/model_capabilities.py,sha256=cxOvZcjZ_PIEpUYKn3S2FMyypfOSfbGpx4vmV7Y5vhI,3967
|
26
26
|
praisonaiagents/llm/model_router.py,sha256=Jy2pShlkLxqXF3quz-MRB3-6L9vaUSgUrf2YJs_Tsg0,13995
|
27
27
|
praisonaiagents/llm/openai_client.py,sha256=3EVjIs3tnBNFDy_4ZxX9DJVq54kS0FMm38m5Gkpun7U,57234
|
28
28
|
praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
|
29
29
|
praisonaiagents/mcp/mcp.py,sha256=ChaSwLCcFBB9b8eNuj0DoKbK1EqpyF1T_7xz0FX-5-A,23264
|
30
|
-
praisonaiagents/mcp/mcp_http_stream.py,sha256=
|
30
|
+
praisonaiagents/mcp/mcp_http_stream.py,sha256=TDFWMJMo8VqLXtXCW73REpmkU3t9n7CAGMa9b4dhI-c,23366
|
31
31
|
praisonaiagents/mcp/mcp_sse.py,sha256=KO10tAgZ5vSKeRhkJIZcdJ0ZmhRybS39i1KybWt4D7M,9128
|
32
32
|
praisonaiagents/memory/__init__.py,sha256=aEFdhgtTqDdMhc_JCWM-f4XI9cZIj7Wz5g_MUa-0amg,397
|
33
|
-
praisonaiagents/memory/memory.py,sha256=
|
33
|
+
praisonaiagents/memory/memory.py,sha256=B2DMuvvr4W_EnrpoN16K73qSqYdduqhMcV8ASzyh2L8,65116
|
34
34
|
praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
|
35
35
|
praisonaiagents/process/process.py,sha256=wXKZ2Z26vB9osmVbD5xqkUlUQRvWEpvL8j9hiuiHrQ0,78246
|
36
36
|
praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
|
@@ -53,7 +53,7 @@ praisonaiagents/tools/duckduckgo_tools.py,sha256=ynlB5ZyWfHYjUq0JZXH12TganqTihgD
|
|
53
53
|
praisonaiagents/tools/excel_tools.py,sha256=e2HqcwnyBueOyss0xEKxff3zB4w4sNWCOMXvZfbDYlE,11309
|
54
54
|
praisonaiagents/tools/file_tools.py,sha256=N0fjTxxi89UupAvtEUwXjPrBvbppf8bwaNLfnjZ05q4,10824
|
55
55
|
praisonaiagents/tools/json_tools.py,sha256=ApUYNuQ1qnbmYNCxSlx6Tth_H1yo8mhWtZ7Rr2WS6C4,16507
|
56
|
-
praisonaiagents/tools/mongodb_tools.py,sha256=
|
56
|
+
praisonaiagents/tools/mongodb_tools.py,sha256=gmRxV4In0aFVFYWNEOLzg9yP-jCuvFp4Lm_6vWjtlnQ,21505
|
57
57
|
praisonaiagents/tools/newspaper_tools.py,sha256=NyhojNPeyULBGcAWGOT1X70qVkh3FgZrpH-S7PEmrwI,12667
|
58
58
|
praisonaiagents/tools/pandas_tools.py,sha256=yzCeY4jetKrFIRA15Tr5OQ5d94T8DaSpzglx2UiWfPs,11092
|
59
59
|
praisonaiagents/tools/python_tools.py,sha256=4dWJddySR0snCEcQudemg5qvbuNrUYxO-jXnzuWixqM,16461
|
@@ -67,7 +67,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
67
67
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
68
68
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
69
69
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
70
|
-
praisonaiagents-0.0.
|
71
|
-
praisonaiagents-0.0.
|
72
|
-
praisonaiagents-0.0.
|
73
|
-
praisonaiagents-0.0.
|
70
|
+
praisonaiagents-0.0.155.dist-info/METADATA,sha256=mabCM-bwfCTmlaJuAUCxrdNXS0OWg8hUcIf70c60PC4,2146
|
71
|
+
praisonaiagents-0.0.155.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
72
|
+
praisonaiagents-0.0.155.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
73
|
+
praisonaiagents-0.0.155.dist-info/RECORD,,
|
File without changes
|
File without changes
|