praisonaiagents 0.0.142__tar.gz → 0.0.143__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/PKG-INFO +5 -1
  2. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/__init__.py +12 -3
  3. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/agent/agent.py +17 -0
  4. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/agent/image_agent.py +19 -4
  5. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/knowledge/knowledge.py +360 -1
  6. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/llm/openai_client.py +144 -0
  7. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/mcp/mcp.py +54 -14
  8. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/memory/memory.py +390 -12
  9. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/telemetry/__init__.py +7 -0
  10. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/telemetry/telemetry.py +253 -23
  11. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/__init__.py +17 -1
  12. praisonaiagents-0.0.143/praisonaiagents/tools/mongodb_tools.py +610 -0
  13. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents.egg-info/PKG-INFO +5 -1
  14. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents.egg-info/SOURCES.txt +1 -0
  15. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents.egg-info/requires.txt +5 -0
  16. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/pyproject.toml +9 -2
  17. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/README.md +0 -0
  18. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/agent/__init__.py +0 -0
  19. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/agent/handoff.py +0 -0
  20. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/agent/router_agent.py +0 -0
  21. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/agents/__init__.py +0 -0
  22. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/agents/agents.py +0 -0
  23. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/agents/autoagents.py +0 -0
  24. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/approval.py +0 -0
  25. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/guardrails/__init__.py +0 -0
  26. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  27. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  28. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/knowledge/__init__.py +0 -0
  29. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/knowledge/chunking.py +0 -0
  30. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/llm/__init__.py +0 -0
  31. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/llm/llm.py +0 -0
  32. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/llm/model_capabilities.py +0 -0
  33. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/llm/model_router.py +0 -0
  34. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/main.py +0 -0
  35. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/mcp/__init__.py +0 -0
  36. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
  37. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/mcp/mcp_sse.py +0 -0
  38. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/memory/__init__.py +0 -0
  39. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/process/__init__.py +0 -0
  40. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/process/process.py +0 -0
  41. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/session.py +0 -0
  42. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/task/__init__.py +0 -0
  43. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/task/task.py +0 -0
  44. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/telemetry/integration.py +0 -0
  45. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/README.md +0 -0
  46. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/arxiv_tools.py +0 -0
  47. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/calculator_tools.py +0 -0
  48. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/csv_tools.py +0 -0
  49. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/duckdb_tools.py +0 -0
  50. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  51. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/excel_tools.py +0 -0
  52. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/file_tools.py +0 -0
  53. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/json_tools.py +0 -0
  54. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/newspaper_tools.py +0 -0
  55. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/pandas_tools.py +0 -0
  56. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/python_tools.py +0 -0
  57. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/searxng_tools.py +0 -0
  58. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/shell_tools.py +0 -0
  59. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/spider_tools.py +0 -0
  60. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/test.py +0 -0
  61. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/tools.py +0 -0
  62. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  63. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  64. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/xml_tools.py +0 -0
  65. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/yaml_tools.py +0 -0
  66. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents/tools/yfinance_tools.py +0 -0
  67. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  68. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/praisonaiagents.egg-info/top_level.txt +0 -0
  69. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/setup.cfg +0 -0
  70. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test-graph-memory.py +0 -0
  71. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test.py +0 -0
  72. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test_fix_comprehensive.py +0 -0
  73. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test_handoff_compatibility.py +0 -0
  74. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test_http_stream_basic.py +0 -0
  75. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test_llm_self_reflection_direct.py +0 -0
  76. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test_ollama_async_fix.py +0 -0
  77. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test_ollama_fix.py +0 -0
  78. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test_ollama_sequential_fix.py +0 -0
  79. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test_posthog_fixed.py +0 -0
  80. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test_self_reflection_comprehensive.py +0 -0
  81. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test_self_reflection_fix_simple.py +0 -0
  82. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test_self_reflection_fix_verification.py +0 -0
  83. {praisonaiagents-0.0.142 → praisonaiagents-0.0.143}/tests/test_validation_feedback.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.142
3
+ Version: 0.0.143
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -32,6 +32,9 @@ Requires-Dist: fastapi>=0.115.0; extra == "api"
32
32
  Requires-Dist: uvicorn>=0.34.0; extra == "api"
33
33
  Provides-Extra: telemetry
34
34
  Requires-Dist: posthog>=3.0.0; extra == "telemetry"
35
+ Provides-Extra: mongodb
36
+ Requires-Dist: pymongo>=4.6.3; extra == "mongodb"
37
+ Requires-Dist: motor>=3.4.0; extra == "mongodb"
35
38
  Provides-Extra: all
36
39
  Requires-Dist: praisonaiagents[memory]; extra == "all"
37
40
  Requires-Dist: praisonaiagents[knowledge]; extra == "all"
@@ -40,3 +43,4 @@ Requires-Dist: praisonaiagents[llm]; extra == "all"
40
43
  Requires-Dist: praisonaiagents[mcp]; extra == "all"
41
44
  Requires-Dist: praisonaiagents[api]; extra == "all"
42
45
  Requires-Dist: praisonaiagents[telemetry]; extra == "all"
46
+ Requires-Dist: praisonaiagents[mongodb]; extra == "all"
@@ -34,7 +34,13 @@ from .tools.tools import Tools
34
34
  from .agents.autoagents import AutoAgents
35
35
  from .knowledge.knowledge import Knowledge
36
36
  from .knowledge.chunking import Chunking
37
- from .mcp.mcp import MCP
37
+ # MCP support (optional)
38
+ try:
39
+ from .mcp.mcp import MCP
40
+ _mcp_available = True
41
+ except ImportError:
42
+ _mcp_available = False
43
+ MCP = None
38
44
  from .session import Session
39
45
  from .memory.memory import Memory
40
46
  from .guardrails import GuardrailResult, LLMGuardrail
@@ -124,7 +130,6 @@ __all__ = [
124
130
  'async_display_callbacks',
125
131
  'Knowledge',
126
132
  'Chunking',
127
- 'MCP',
128
133
  'GuardrailResult',
129
134
  'LLMGuardrail',
130
135
  'Handoff',
@@ -137,4 +142,8 @@ __all__ = [
137
142
  'disable_telemetry',
138
143
  'MinimalTelemetry',
139
144
  'TelemetryCollector'
140
- ]
145
+ ]
146
+
147
+ # Add MCP to __all__ if available
148
+ if _mcp_available:
149
+ __all__.append('MCP')
@@ -1383,6 +1383,23 @@ Your Goal: {self.goal}"""
1383
1383
  # Rollback chat history on guardrail failure
1384
1384
  self.chat_history = self.chat_history[:chat_history_length]
1385
1385
  return None
1386
+ # Only consider satisfactory after minimum reflections
1387
+ if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
1388
+ if self.verbose:
1389
+ display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
1390
+ # User message already added before LLM call via _build_messages
1391
+ self.chat_history.append({"role": "assistant", "content": response_text})
1392
+ # Apply guardrail validation after satisfactory reflection
1393
+ try:
1394
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
1395
+ # Execute callback after validation
1396
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
1397
+ return validated_response
1398
+ except Exception as e:
1399
+ logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
1400
+ # Rollback chat history on guardrail failure
1401
+ self.chat_history = self.chat_history[:chat_history_length]
1402
+ return None
1386
1403
 
1387
1404
  if not self.self_reflect:
1388
1405
  # User message already added before LLM call via _build_messages
@@ -151,9 +151,23 @@ class ImageAgent(Agent):
151
151
  # Use the model name in config
152
152
  config['model'] = model_name
153
153
 
154
- # Check if we're using a Gemini model and remove unsupported parameters
155
- if 'gemini' in model_name.lower():
156
- # Gemini models don't support response_format parameter
154
+ # Filter parameters based on the provider to avoid unsupported parameter errors
155
+ custom_llm_provider = None
156
+ try:
157
+ import litellm
158
+ _, custom_llm_provider, _, _ = litellm.get_llm_provider(model=model_name)
159
+ except (ImportError, AttributeError, ValueError, TypeError, Exception) as e:
160
+ # Log the specific error for debugging but continue with string-based fallback
161
+ # Include generic Exception to catch provider-specific errors like BadRequestError
162
+ logging.debug(f"Provider detection failed for model '{model_name}': {e}")
163
+
164
+ if custom_llm_provider == "vertex_ai":
165
+ # Vertex AI only supports 'n' and 'size' parameters for image generation
166
+ supported_params = ['n', 'size', 'model']
167
+ config = {k: v for k, v in config.items() if k in supported_params}
168
+ elif custom_llm_provider == "gemini" or (custom_llm_provider is None and 'gemini' in model_name.lower()):
169
+ # Gemini provider doesn't support response_format parameter
170
+ # Apply this filter if provider is explicitly 'gemini' or as fallback for gemini models
157
171
  config.pop('response_format', None)
158
172
 
159
173
  with Progress(
@@ -165,9 +179,10 @@ class ImageAgent(Agent):
165
179
  # Add a task for image generation
166
180
  task = progress.add_task(f"[cyan]Generating image with {model_name}...", total=None)
167
181
 
168
- # Use litellm's image generation
182
+ # Use litellm's image generation with parameter dropping enabled as safety net
169
183
  response = self.litellm(
170
184
  prompt=prompt,
185
+ drop_params=True,
171
186
  **config
172
187
  )
173
188
 
@@ -2,6 +2,7 @@ import os
2
2
  import logging
3
3
  import uuid
4
4
  import time
5
+ from datetime import datetime
5
6
  from .chunking import Chunking
6
7
  from functools import cached_property
7
8
  from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn
@@ -47,6 +48,342 @@ class CustomMemory:
47
48
  "event": "ADD"
48
49
  }]
49
50
 
51
+ class MongoDBMemory:
52
+ """MongoDB-based memory store for knowledge management."""
53
+
54
+ def __init__(self, config):
55
+ self.config = config
56
+ self.vector_store_config = config.get("vector_store", {}).get("config", {})
57
+ self.connection_string = self.vector_store_config.get("connection_string", "mongodb://localhost:27017/")
58
+ self.database_name = self.vector_store_config.get("database", "praisonai")
59
+ self.collection_name = self.vector_store_config.get("collection", "knowledge_base")
60
+ self.use_vector_search = self.vector_store_config.get("use_vector_search", True)
61
+
62
+ # Initialize MongoDB client
63
+ self._init_mongodb()
64
+
65
+ # Initialize embedding model
66
+ self._init_embedding_model()
67
+
68
+ def _init_mongodb(self):
69
+ """Initialize MongoDB client and collection."""
70
+ try:
71
+ from pymongo import MongoClient
72
+
73
+ self.client = MongoClient(
74
+ self.connection_string,
75
+ maxPoolSize=50,
76
+ retryWrites=True,
77
+ retryReads=True
78
+ )
79
+
80
+ # Test connection
81
+ self.client.admin.command('ping')
82
+
83
+ # Setup database and collection
84
+ self.db = self.client[self.database_name]
85
+ self.collection = self.db[self.collection_name]
86
+
87
+ # Create indexes
88
+ self._create_indexes()
89
+
90
+ except Exception as e:
91
+ raise Exception(f"Failed to initialize MongoDB: {e}")
92
+
93
+ def _init_embedding_model(self):
94
+ """Initialize embedding model from config."""
95
+ try:
96
+ # Set up embedding model based on config
97
+ embedder_config = self.config.get("embedder", {})
98
+ if embedder_config.get("provider") == "openai":
99
+ import openai
100
+ self.embedding_model = openai.OpenAI()
101
+ self.embedding_model_name = embedder_config.get("config", {}).get("model", "text-embedding-3-small")
102
+ else:
103
+ # Default to OpenAI
104
+ import openai
105
+ self.embedding_model = openai.OpenAI()
106
+ self.embedding_model_name = "text-embedding-3-small"
107
+ except Exception as e:
108
+ raise Exception(f"Failed to initialize embedding model: {e}")
109
+
110
+ def _get_embedding_dimensions(self, model_name: str) -> int:
111
+ """Get embedding dimensions based on model name."""
112
+ # Common embedding model dimensions
113
+ model_dimensions = {
114
+ "text-embedding-3-small": 1536,
115
+ "text-embedding-3-large": 3072,
116
+ "text-embedding-ada-002": 1536,
117
+ "text-embedding-002": 1536,
118
+ # Add more models as needed
119
+ }
120
+
121
+ # Check if model name contains known model identifiers
122
+ for model_key, dimensions in model_dimensions.items():
123
+ if model_key in model_name.lower():
124
+ return dimensions
125
+
126
+ # Default to 1536 for unknown models (OpenAI standard)
127
+ return 1536
128
+
129
+ def _create_indexes(self):
130
+ """Create necessary indexes for MongoDB."""
131
+ try:
132
+ # Text search index
133
+ self.collection.create_index([("content", "text")])
134
+
135
+ # Metadata indexes
136
+ self.collection.create_index([("metadata.filename", 1)])
137
+ self.collection.create_index([("created_at", -1)])
138
+
139
+ # Vector search index for Atlas (if enabled)
140
+ if self.use_vector_search:
141
+ self._create_vector_index()
142
+
143
+ except Exception as e:
144
+ logging.warning(f"Could not create MongoDB indexes: {e}")
145
+
146
+ def _create_vector_index(self):
147
+ """Create vector search index for Atlas Vector Search."""
148
+ try:
149
+ vector_index_def = {
150
+ "mappings": {
151
+ "dynamic": True,
152
+ "fields": {
153
+ "embedding": {
154
+ "type": "knnVector",
155
+ "dimensions": self._get_embedding_dimensions(self.embedding_model_name),
156
+ "similarity": "cosine"
157
+ }
158
+ }
159
+ }
160
+ }
161
+
162
+ self.collection.create_search_index(vector_index_def, "vector_index")
163
+
164
+ except Exception as e:
165
+ logging.warning(f"Could not create vector search index: {e}")
166
+
167
+ def _get_embedding(self, text):
168
+ """Get embedding for text."""
169
+ try:
170
+ response = self.embedding_model.embeddings.create(
171
+ input=text,
172
+ model=self.embedding_model_name
173
+ )
174
+ return response.data[0].embedding
175
+ except Exception as e:
176
+ logging.error(f"Error getting embedding: {e}")
177
+ return None
178
+
179
+ def add(self, messages, user_id=None, agent_id=None, run_id=None, metadata=None):
180
+ """Add memory to MongoDB."""
181
+ try:
182
+ # Handle different message formats
183
+ if isinstance(messages, list):
184
+ content = "\n".join([msg.get("content", str(msg)) if isinstance(msg, dict) else str(msg) for msg in messages])
185
+ else:
186
+ content = str(messages)
187
+
188
+ # Generate embedding
189
+ embedding = self._get_embedding(content) if self.use_vector_search else None
190
+
191
+ # Create document
192
+ doc = {
193
+ "content": content,
194
+ "metadata": metadata or {},
195
+ "user_id": user_id,
196
+ "agent_id": agent_id,
197
+ "run_id": run_id,
198
+ "created_at": datetime.utcnow(),
199
+ "memory_type": "knowledge"
200
+ }
201
+
202
+ if embedding:
203
+ doc["embedding"] = embedding
204
+
205
+ # Insert document
206
+ result = self.collection.insert_one(doc)
207
+
208
+ return [{
209
+ "id": str(result.inserted_id),
210
+ "memory": content,
211
+ "event": "ADD"
212
+ }]
213
+
214
+ except Exception as e:
215
+ logging.error(f"Error adding memory to MongoDB: {e}")
216
+ return []
217
+
218
+ def search(self, query, user_id=None, agent_id=None, run_id=None, rerank=False, **kwargs):
219
+ """Search memories in MongoDB."""
220
+ try:
221
+ results = []
222
+
223
+ # Vector search if enabled
224
+ if self.use_vector_search:
225
+ embedding = self._get_embedding(query)
226
+ if embedding:
227
+ pipeline = [
228
+ {
229
+ "$vectorSearch": {
230
+ "index": "vector_index",
231
+ "path": "embedding",
232
+ "queryVector": embedding,
233
+ "numCandidates": kwargs.get("limit", 10) * 10,
234
+ "limit": kwargs.get("limit", 10)
235
+ }
236
+ },
237
+ {
238
+ "$addFields": {
239
+ "score": {"$meta": "vectorSearchScore"}
240
+ }
241
+ }
242
+ ]
243
+
244
+ # Add filters if provided
245
+ if user_id or agent_id or run_id:
246
+ match_filter = {}
247
+ if user_id:
248
+ match_filter["user_id"] = user_id
249
+ if agent_id:
250
+ match_filter["agent_id"] = agent_id
251
+ if run_id:
252
+ match_filter["run_id"] = run_id
253
+
254
+ pipeline.append({"$match": match_filter})
255
+
256
+ for doc in self.collection.aggregate(pipeline):
257
+ results.append({
258
+ "id": str(doc["_id"]),
259
+ "memory": doc["content"],
260
+ "metadata": doc.get("metadata", {}),
261
+ "score": doc.get("score", 1.0)
262
+ })
263
+
264
+ # Fallback to text search
265
+ if not results:
266
+ search_filter = {"$text": {"$search": query}}
267
+
268
+ # Add additional filters
269
+ if user_id:
270
+ search_filter["user_id"] = user_id
271
+ if agent_id:
272
+ search_filter["agent_id"] = agent_id
273
+ if run_id:
274
+ search_filter["run_id"] = run_id
275
+
276
+ for doc in self.collection.find(search_filter).limit(kwargs.get("limit", 10)):
277
+ results.append({
278
+ "id": str(doc["_id"]),
279
+ "memory": doc["content"],
280
+ "metadata": doc.get("metadata", {}),
281
+ "score": 1.0
282
+ })
283
+
284
+ return results
285
+
286
+ except Exception as e:
287
+ logging.error(f"Error searching MongoDB: {e}")
288
+ return []
289
+
290
+ def get_all(self, user_id=None, agent_id=None, run_id=None):
291
+ """Get all memories from MongoDB."""
292
+ try:
293
+ search_filter = {}
294
+ if user_id:
295
+ search_filter["user_id"] = user_id
296
+ if agent_id:
297
+ search_filter["agent_id"] = agent_id
298
+ if run_id:
299
+ search_filter["run_id"] = run_id
300
+
301
+ results = []
302
+ for doc in self.collection.find(search_filter):
303
+ results.append({
304
+ "id": str(doc["_id"]),
305
+ "memory": doc["content"],
306
+ "metadata": doc.get("metadata", {}),
307
+ "created_at": doc.get("created_at")
308
+ })
309
+
310
+ return results
311
+
312
+ except Exception as e:
313
+ logging.error(f"Error getting all memories from MongoDB: {e}")
314
+ return []
315
+
316
+ def get(self, memory_id):
317
+ """Get a specific memory by ID."""
318
+ try:
319
+ from bson import ObjectId
320
+ doc = self.collection.find_one({"_id": ObjectId(memory_id)})
321
+ if doc:
322
+ return {
323
+ "id": str(doc["_id"]),
324
+ "memory": doc["content"],
325
+ "metadata": doc.get("metadata", {}),
326
+ "created_at": doc.get("created_at")
327
+ }
328
+ return None
329
+
330
+ except Exception as e:
331
+ logging.error(f"Error getting memory from MongoDB: {e}")
332
+ return None
333
+
334
+ def update(self, memory_id, data):
335
+ """Update a memory."""
336
+ try:
337
+ from bson import ObjectId
338
+ result = self.collection.update_one(
339
+ {"_id": ObjectId(memory_id)},
340
+ {"$set": {"content": data, "updated_at": datetime.utcnow()}}
341
+ )
342
+ return result.modified_count > 0
343
+
344
+ except Exception as e:
345
+ logging.error(f"Error updating memory in MongoDB: {e}")
346
+ return False
347
+
348
+ def delete(self, memory_id):
349
+ """Delete a memory."""
350
+ try:
351
+ from bson import ObjectId
352
+ result = self.collection.delete_one({"_id": ObjectId(memory_id)})
353
+ return result.deleted_count > 0
354
+
355
+ except Exception as e:
356
+ logging.error(f"Error deleting memory from MongoDB: {e}")
357
+ return False
358
+
359
+ def delete_all(self, user_id=None, agent_id=None, run_id=None):
360
+ """Delete all memories."""
361
+ try:
362
+ search_filter = {}
363
+ if user_id:
364
+ search_filter["user_id"] = user_id
365
+ if agent_id:
366
+ search_filter["agent_id"] = agent_id
367
+ if run_id:
368
+ search_filter["run_id"] = run_id
369
+
370
+ result = self.collection.delete_many(search_filter)
371
+ return result.deleted_count
372
+
373
+ except Exception as e:
374
+ logging.error(f"Error deleting all memories from MongoDB: {e}")
375
+ return 0
376
+
377
+ def reset(self):
378
+ """Reset all memories."""
379
+ try:
380
+ result = self.collection.delete_many({})
381
+ return result.deleted_count
382
+
383
+ except Exception as e:
384
+ logging.error(f"Error resetting MongoDB memories: {e}")
385
+ return 0
386
+
50
387
  class Knowledge:
51
388
  def __init__(self, config=None, verbose=None):
52
389
  self._config = config
@@ -119,8 +456,20 @@ class Knowledge:
119
456
  if "vector_store" in self._config:
120
457
  if "provider" in self._config["vector_store"]:
121
458
  base_config["vector_store"]["provider"] = self._config["vector_store"]["provider"]
459
+
460
+ # Special handling for MongoDB vector store
461
+ if self._config["vector_store"]["provider"] == "mongodb":
462
+ base_config["vector_store"] = {
463
+ "provider": "mongodb",
464
+ "config": {
465
+ "connection_string": self._config["vector_store"]["config"].get("connection_string", "mongodb://localhost:27017/"),
466
+ "database": self._config["vector_store"]["config"].get("database", "praisonai"),
467
+ "collection": self._config["vector_store"]["config"].get("collection", "knowledge_base"),
468
+ "use_vector_search": self._config["vector_store"]["config"].get("use_vector_search", True)
469
+ }
470
+ }
122
471
 
123
- if "config" in self._config["vector_store"]:
472
+ if "config" in self._config["vector_store"] and self._config["vector_store"]["provider"] != "mongodb":
124
473
  config_copy = self._config["vector_store"]["config"].copy()
125
474
  # Only exclude client as it's managed internally
126
475
  if "client" in config_copy:
@@ -146,6 +495,16 @@ class Knowledge:
146
495
 
147
496
  @cached_property
148
497
  def memory(self):
498
+ # Check if MongoDB provider is specified
499
+ if (self.config.get("vector_store", {}).get("provider") == "mongodb"):
500
+ try:
501
+ return MongoDBMemory(self.config)
502
+ except Exception as e:
503
+ logger.error(f"Failed to initialize MongoDB memory: {e}")
504
+ # Fall back to default memory
505
+ pass
506
+
507
+ # Default Mem0 memory
149
508
  try:
150
509
  return CustomMemory.from_config(self.config)
151
510
  except (NotImplementedError, ValueError) as e:
@@ -1049,6 +1049,150 @@ class OpenAIClient:
1049
1049
  break
1050
1050
 
1051
1051
  return final_response
1052
+
1053
+ def chat_completion_with_tools_stream(
1054
+ self,
1055
+ messages: List[Dict[str, Any]],
1056
+ model: str = "gpt-4o",
1057
+ temperature: float = 0.7,
1058
+ tools: Optional[List[Any]] = None,
1059
+ execute_tool_fn: Optional[Callable] = None,
1060
+ reasoning_steps: bool = False,
1061
+ verbose: bool = True,
1062
+ max_iterations: int = 10,
1063
+ **kwargs
1064
+ ):
1065
+ """
1066
+ Create a streaming chat completion with tool support.
1067
+
1068
+ This method yields chunks of the response as they are generated,
1069
+ enabling real-time streaming to the user.
1070
+
1071
+ Args:
1072
+ messages: List of message dictionaries
1073
+ model: Model to use
1074
+ temperature: Temperature for generation
1075
+ tools: List of tools (can be callables, dicts, or strings)
1076
+ execute_tool_fn: Function to execute tools
1077
+ reasoning_steps: Whether to show reasoning
1078
+ verbose: Whether to show verbose output
1079
+ max_iterations: Maximum tool calling iterations
1080
+ **kwargs: Additional API parameters
1081
+
1082
+ Yields:
1083
+ String chunks of the response as they are generated
1084
+ """
1085
+ # Format tools for OpenAI API
1086
+ formatted_tools = self.format_tools(tools)
1087
+
1088
+ # Continue tool execution loop until no more tool calls are needed
1089
+ iteration_count = 0
1090
+
1091
+ while iteration_count < max_iterations:
1092
+ try:
1093
+ # Create streaming response
1094
+ response_stream = self._sync_client.chat.completions.create(
1095
+ model=model,
1096
+ messages=messages,
1097
+ temperature=temperature,
1098
+ tools=formatted_tools if formatted_tools else None,
1099
+ stream=True,
1100
+ **kwargs
1101
+ )
1102
+
1103
+ full_response_text = ""
1104
+ reasoning_content = ""
1105
+ chunks = []
1106
+
1107
+ # Stream the response chunk by chunk
1108
+ for chunk in response_stream:
1109
+ chunks.append(chunk)
1110
+ if chunk.choices and chunk.choices[0].delta.content:
1111
+ content = chunk.choices[0].delta.content
1112
+ full_response_text += content
1113
+ yield content
1114
+
1115
+ # Handle reasoning content if enabled
1116
+ if reasoning_steps and chunk.choices and hasattr(chunk.choices[0].delta, "reasoning_content"):
1117
+ rc = chunk.choices[0].delta.reasoning_content
1118
+ if rc:
1119
+ reasoning_content += rc
1120
+ yield f"[Reasoning: {rc}]"
1121
+
1122
+ # Process the complete response to check for tool calls
1123
+ final_response = process_stream_chunks(chunks)
1124
+
1125
+ if not final_response:
1126
+ return
1127
+
1128
+ # Check for tool calls
1129
+ tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
1130
+
1131
+ if tool_calls and execute_tool_fn:
1132
+ # Convert ToolCall dataclass objects to dict for JSON serialization
1133
+ serializable_tool_calls = []
1134
+ for tc in tool_calls:
1135
+ if isinstance(tc, ToolCall):
1136
+ # Convert dataclass to dict
1137
+ serializable_tool_calls.append({
1138
+ "id": tc.id,
1139
+ "type": tc.type,
1140
+ "function": tc.function
1141
+ })
1142
+ else:
1143
+ # Already an OpenAI object, keep as is
1144
+ serializable_tool_calls.append(tc)
1145
+
1146
+ messages.append({
1147
+ "role": "assistant",
1148
+ "content": final_response.choices[0].message.content,
1149
+ "tool_calls": serializable_tool_calls
1150
+ })
1151
+
1152
+ for tool_call in tool_calls:
1153
+ # Handle both ToolCall dataclass and OpenAI object
1154
+ try:
1155
+ if isinstance(tool_call, ToolCall):
1156
+ function_name = tool_call.function["name"]
1157
+ arguments = json.loads(tool_call.function["arguments"])
1158
+ else:
1159
+ function_name = tool_call.function.name
1160
+ arguments = json.loads(tool_call.function.arguments)
1161
+ except json.JSONDecodeError as e:
1162
+ if verbose:
1163
+ yield f"\n[Error parsing arguments for {function_name if 'function_name' in locals() else 'unknown function'}: {str(e)}]"
1164
+ continue
1165
+
1166
+ if verbose:
1167
+ yield f"\n[Calling function: {function_name}]"
1168
+
1169
+ # Execute the tool with error handling
1170
+ try:
1171
+ tool_result = execute_tool_fn(function_name, arguments)
1172
+ results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
1173
+ except Exception as e:
1174
+ results_str = f"Error executing function: {str(e)}"
1175
+ if verbose:
1176
+ yield f"\n[Function error: {str(e)}]"
1177
+
1178
+ if verbose:
1179
+ yield f"\n[Function result: {results_str}]"
1180
+
1181
+ messages.append({
1182
+ "role": "tool",
1183
+ "tool_call_id": tool_call.id if hasattr(tool_call, 'id') else tool_call['id'],
1184
+ "content": results_str
1185
+ })
1186
+
1187
+ # Continue the loop to allow more tool calls
1188
+ iteration_count += 1
1189
+ else:
1190
+ # No tool calls, we're done
1191
+ break
1192
+
1193
+ except Exception as e:
1194
+ yield f"Error: {str(e)}"
1195
+ break
1052
1196
 
1053
1197
  def parse_structured_output(
1054
1198
  self,