praisonaiagents 0.0.141__tar.gz → 0.0.143__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/PKG-INFO +5 -1
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/__init__.py +12 -3
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/agent/agent.py +32 -5
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/agent/image_agent.py +19 -4
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/knowledge/knowledge.py +360 -1
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/llm/llm.py +45 -8
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/llm/openai_client.py +144 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/mcp/mcp.py +54 -14
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/memory/memory.py +390 -12
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/telemetry/__init__.py +9 -2
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/telemetry/telemetry.py +255 -25
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/__init__.py +17 -1
- praisonaiagents-0.0.143/praisonaiagents/tools/mongodb_tools.py +610 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents.egg-info/PKG-INFO +5 -1
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents.egg-info/SOURCES.txt +1 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents.egg-info/requires.txt +5 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/pyproject.toml +9 -2
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/README.md +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/agent/handoff.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/agent/router_agent.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/agents/agents.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/agents/autoagents.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/approval.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/guardrails/__init__.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/guardrails/guardrail_result.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/knowledge/__init__.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/knowledge/chunking.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/llm/__init__.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/llm/model_capabilities.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/llm/model_router.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/mcp/__init__.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/mcp/mcp_sse.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/memory/__init__.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/process/process.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/session.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/telemetry/integration.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/README.md +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/arxiv_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/calculator_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/csv_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/duckdb_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/excel_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/file_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/json_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/newspaper_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/pandas_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/python_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/searxng_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/shell_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/spider_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/test.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/train/data/generatecot.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/wikipedia_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/xml_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/yaml_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents/tools/yfinance_tools.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/setup.cfg +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test-graph-memory.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test_fix_comprehensive.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test_handoff_compatibility.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test_http_stream_basic.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test_llm_self_reflection_direct.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test_ollama_async_fix.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test_ollama_fix.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test_ollama_sequential_fix.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test_posthog_fixed.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test_self_reflection_comprehensive.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test_self_reflection_fix_simple.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test_self_reflection_fix_verification.py +0 -0
- {praisonaiagents-0.0.141 → praisonaiagents-0.0.143}/tests/test_validation_feedback.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: praisonaiagents
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.143
|
4
4
|
Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
|
5
5
|
Author: Mervin Praison
|
6
6
|
Requires-Python: >=3.10
|
@@ -32,6 +32,9 @@ Requires-Dist: fastapi>=0.115.0; extra == "api"
|
|
32
32
|
Requires-Dist: uvicorn>=0.34.0; extra == "api"
|
33
33
|
Provides-Extra: telemetry
|
34
34
|
Requires-Dist: posthog>=3.0.0; extra == "telemetry"
|
35
|
+
Provides-Extra: mongodb
|
36
|
+
Requires-Dist: pymongo>=4.6.3; extra == "mongodb"
|
37
|
+
Requires-Dist: motor>=3.4.0; extra == "mongodb"
|
35
38
|
Provides-Extra: all
|
36
39
|
Requires-Dist: praisonaiagents[memory]; extra == "all"
|
37
40
|
Requires-Dist: praisonaiagents[knowledge]; extra == "all"
|
@@ -40,3 +43,4 @@ Requires-Dist: praisonaiagents[llm]; extra == "all"
|
|
40
43
|
Requires-Dist: praisonaiagents[mcp]; extra == "all"
|
41
44
|
Requires-Dist: praisonaiagents[api]; extra == "all"
|
42
45
|
Requires-Dist: praisonaiagents[telemetry]; extra == "all"
|
46
|
+
Requires-Dist: praisonaiagents[mongodb]; extra == "all"
|
@@ -34,7 +34,13 @@ from .tools.tools import Tools
|
|
34
34
|
from .agents.autoagents import AutoAgents
|
35
35
|
from .knowledge.knowledge import Knowledge
|
36
36
|
from .knowledge.chunking import Chunking
|
37
|
-
|
37
|
+
# MCP support (optional)
|
38
|
+
try:
|
39
|
+
from .mcp.mcp import MCP
|
40
|
+
_mcp_available = True
|
41
|
+
except ImportError:
|
42
|
+
_mcp_available = False
|
43
|
+
MCP = None
|
38
44
|
from .session import Session
|
39
45
|
from .memory.memory import Memory
|
40
46
|
from .guardrails import GuardrailResult, LLMGuardrail
|
@@ -124,7 +130,6 @@ __all__ = [
|
|
124
130
|
'async_display_callbacks',
|
125
131
|
'Knowledge',
|
126
132
|
'Chunking',
|
127
|
-
'MCP',
|
128
133
|
'GuardrailResult',
|
129
134
|
'LLMGuardrail',
|
130
135
|
'Handoff',
|
@@ -137,4 +142,8 @@ __all__ = [
|
|
137
142
|
'disable_telemetry',
|
138
143
|
'MinimalTelemetry',
|
139
144
|
'TelemetryCollector'
|
140
|
-
]
|
145
|
+
]
|
146
|
+
|
147
|
+
# Add MCP to __all__ if available
|
148
|
+
if _mcp_available:
|
149
|
+
__all__.append('MCP')
|
@@ -354,6 +354,8 @@ class Agent:
|
|
354
354
|
self.instructions = instructions
|
355
355
|
# Check for model name in environment variable if not provided
|
356
356
|
self._using_custom_llm = False
|
357
|
+
# Flag to track if final result has been displayed to prevent duplicates
|
358
|
+
self._final_display_shown = False
|
357
359
|
|
358
360
|
# Store OpenAI client parameters for lazy initialization
|
359
361
|
self._openai_api_key = api_key
|
@@ -1173,18 +1175,23 @@ Your Goal: {self.goal}"""
|
|
1173
1175
|
task_description=task_description,
|
1174
1176
|
task_id=task_id
|
1175
1177
|
)
|
1176
|
-
#
|
1177
|
-
|
1178
|
+
# Always display final interaction when verbose is True to ensure consistent formatting
|
1179
|
+
# This ensures both OpenAI and custom LLM providers (like Gemini) show formatted output
|
1180
|
+
if self.verbose and not self._final_display_shown:
|
1178
1181
|
display_interaction(prompt, response, markdown=self.markdown,
|
1179
1182
|
generation_time=generation_time, console=self.console,
|
1180
1183
|
agent_name=self.name,
|
1181
1184
|
agent_role=self.role,
|
1182
1185
|
agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
|
1183
|
-
task_name=
|
1184
|
-
task_description=
|
1185
|
-
task_id=
|
1186
|
+
task_name=None, # Not available in this context
|
1187
|
+
task_description=None, # Not available in this context
|
1188
|
+
task_id=None) # Not available in this context
|
1189
|
+
self._final_display_shown = True
|
1186
1190
|
|
1187
1191
|
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True, task_name=None, task_description=None, task_id=None):
|
1192
|
+
# Reset the final display flag for each new conversation
|
1193
|
+
self._final_display_shown = False
|
1194
|
+
|
1188
1195
|
# Log all parameter values when in debug mode
|
1189
1196
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1190
1197
|
param_info = {
|
@@ -1376,6 +1383,23 @@ Your Goal: {self.goal}"""
|
|
1376
1383
|
# Rollback chat history on guardrail failure
|
1377
1384
|
self.chat_history = self.chat_history[:chat_history_length]
|
1378
1385
|
return None
|
1386
|
+
# Only consider satisfactory after minimum reflections
|
1387
|
+
if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
|
1388
|
+
if self.verbose:
|
1389
|
+
display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
|
1390
|
+
# User message already added before LLM call via _build_messages
|
1391
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
1392
|
+
# Apply guardrail validation after satisfactory reflection
|
1393
|
+
try:
|
1394
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
|
1395
|
+
# Execute callback after validation
|
1396
|
+
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
|
1397
|
+
return validated_response
|
1398
|
+
except Exception as e:
|
1399
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
|
1400
|
+
# Rollback chat history on guardrail failure
|
1401
|
+
self.chat_history = self.chat_history[:chat_history_length]
|
1402
|
+
return None
|
1379
1403
|
|
1380
1404
|
if not self.self_reflect:
|
1381
1405
|
# User message already added before LLM call via _build_messages
|
@@ -1533,6 +1557,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1533
1557
|
|
1534
1558
|
async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
|
1535
1559
|
"""Async version of chat method with self-reflection support."""
|
1560
|
+
# Reset the final display flag for each new conversation
|
1561
|
+
self._final_display_shown = False
|
1562
|
+
|
1536
1563
|
# Log all parameter values when in debug mode
|
1537
1564
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1538
1565
|
param_info = {
|
@@ -151,9 +151,23 @@ class ImageAgent(Agent):
|
|
151
151
|
# Use the model name in config
|
152
152
|
config['model'] = model_name
|
153
153
|
|
154
|
-
#
|
155
|
-
|
156
|
-
|
154
|
+
# Filter parameters based on the provider to avoid unsupported parameter errors
|
155
|
+
custom_llm_provider = None
|
156
|
+
try:
|
157
|
+
import litellm
|
158
|
+
_, custom_llm_provider, _, _ = litellm.get_llm_provider(model=model_name)
|
159
|
+
except (ImportError, AttributeError, ValueError, TypeError, Exception) as e:
|
160
|
+
# Log the specific error for debugging but continue with string-based fallback
|
161
|
+
# Include generic Exception to catch provider-specific errors like BadRequestError
|
162
|
+
logging.debug(f"Provider detection failed for model '{model_name}': {e}")
|
163
|
+
|
164
|
+
if custom_llm_provider == "vertex_ai":
|
165
|
+
# Vertex AI only supports 'n' and 'size' parameters for image generation
|
166
|
+
supported_params = ['n', 'size', 'model']
|
167
|
+
config = {k: v for k, v in config.items() if k in supported_params}
|
168
|
+
elif custom_llm_provider == "gemini" or (custom_llm_provider is None and 'gemini' in model_name.lower()):
|
169
|
+
# Gemini provider doesn't support response_format parameter
|
170
|
+
# Apply this filter if provider is explicitly 'gemini' or as fallback for gemini models
|
157
171
|
config.pop('response_format', None)
|
158
172
|
|
159
173
|
with Progress(
|
@@ -165,9 +179,10 @@ class ImageAgent(Agent):
|
|
165
179
|
# Add a task for image generation
|
166
180
|
task = progress.add_task(f"[cyan]Generating image with {model_name}...", total=None)
|
167
181
|
|
168
|
-
# Use litellm's image generation
|
182
|
+
# Use litellm's image generation with parameter dropping enabled as safety net
|
169
183
|
response = self.litellm(
|
170
184
|
prompt=prompt,
|
185
|
+
drop_params=True,
|
171
186
|
**config
|
172
187
|
)
|
173
188
|
|
@@ -2,6 +2,7 @@ import os
|
|
2
2
|
import logging
|
3
3
|
import uuid
|
4
4
|
import time
|
5
|
+
from datetime import datetime
|
5
6
|
from .chunking import Chunking
|
6
7
|
from functools import cached_property
|
7
8
|
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn
|
@@ -47,6 +48,342 @@ class CustomMemory:
|
|
47
48
|
"event": "ADD"
|
48
49
|
}]
|
49
50
|
|
51
|
+
class MongoDBMemory:
|
52
|
+
"""MongoDB-based memory store for knowledge management."""
|
53
|
+
|
54
|
+
def __init__(self, config):
|
55
|
+
self.config = config
|
56
|
+
self.vector_store_config = config.get("vector_store", {}).get("config", {})
|
57
|
+
self.connection_string = self.vector_store_config.get("connection_string", "mongodb://localhost:27017/")
|
58
|
+
self.database_name = self.vector_store_config.get("database", "praisonai")
|
59
|
+
self.collection_name = self.vector_store_config.get("collection", "knowledge_base")
|
60
|
+
self.use_vector_search = self.vector_store_config.get("use_vector_search", True)
|
61
|
+
|
62
|
+
# Initialize MongoDB client
|
63
|
+
self._init_mongodb()
|
64
|
+
|
65
|
+
# Initialize embedding model
|
66
|
+
self._init_embedding_model()
|
67
|
+
|
68
|
+
def _init_mongodb(self):
|
69
|
+
"""Initialize MongoDB client and collection."""
|
70
|
+
try:
|
71
|
+
from pymongo import MongoClient
|
72
|
+
|
73
|
+
self.client = MongoClient(
|
74
|
+
self.connection_string,
|
75
|
+
maxPoolSize=50,
|
76
|
+
retryWrites=True,
|
77
|
+
retryReads=True
|
78
|
+
)
|
79
|
+
|
80
|
+
# Test connection
|
81
|
+
self.client.admin.command('ping')
|
82
|
+
|
83
|
+
# Setup database and collection
|
84
|
+
self.db = self.client[self.database_name]
|
85
|
+
self.collection = self.db[self.collection_name]
|
86
|
+
|
87
|
+
# Create indexes
|
88
|
+
self._create_indexes()
|
89
|
+
|
90
|
+
except Exception as e:
|
91
|
+
raise Exception(f"Failed to initialize MongoDB: {e}")
|
92
|
+
|
93
|
+
def _init_embedding_model(self):
|
94
|
+
"""Initialize embedding model from config."""
|
95
|
+
try:
|
96
|
+
# Set up embedding model based on config
|
97
|
+
embedder_config = self.config.get("embedder", {})
|
98
|
+
if embedder_config.get("provider") == "openai":
|
99
|
+
import openai
|
100
|
+
self.embedding_model = openai.OpenAI()
|
101
|
+
self.embedding_model_name = embedder_config.get("config", {}).get("model", "text-embedding-3-small")
|
102
|
+
else:
|
103
|
+
# Default to OpenAI
|
104
|
+
import openai
|
105
|
+
self.embedding_model = openai.OpenAI()
|
106
|
+
self.embedding_model_name = "text-embedding-3-small"
|
107
|
+
except Exception as e:
|
108
|
+
raise Exception(f"Failed to initialize embedding model: {e}")
|
109
|
+
|
110
|
+
def _get_embedding_dimensions(self, model_name: str) -> int:
|
111
|
+
"""Get embedding dimensions based on model name."""
|
112
|
+
# Common embedding model dimensions
|
113
|
+
model_dimensions = {
|
114
|
+
"text-embedding-3-small": 1536,
|
115
|
+
"text-embedding-3-large": 3072,
|
116
|
+
"text-embedding-ada-002": 1536,
|
117
|
+
"text-embedding-002": 1536,
|
118
|
+
# Add more models as needed
|
119
|
+
}
|
120
|
+
|
121
|
+
# Check if model name contains known model identifiers
|
122
|
+
for model_key, dimensions in model_dimensions.items():
|
123
|
+
if model_key in model_name.lower():
|
124
|
+
return dimensions
|
125
|
+
|
126
|
+
# Default to 1536 for unknown models (OpenAI standard)
|
127
|
+
return 1536
|
128
|
+
|
129
|
+
def _create_indexes(self):
|
130
|
+
"""Create necessary indexes for MongoDB."""
|
131
|
+
try:
|
132
|
+
# Text search index
|
133
|
+
self.collection.create_index([("content", "text")])
|
134
|
+
|
135
|
+
# Metadata indexes
|
136
|
+
self.collection.create_index([("metadata.filename", 1)])
|
137
|
+
self.collection.create_index([("created_at", -1)])
|
138
|
+
|
139
|
+
# Vector search index for Atlas (if enabled)
|
140
|
+
if self.use_vector_search:
|
141
|
+
self._create_vector_index()
|
142
|
+
|
143
|
+
except Exception as e:
|
144
|
+
logging.warning(f"Could not create MongoDB indexes: {e}")
|
145
|
+
|
146
|
+
def _create_vector_index(self):
|
147
|
+
"""Create vector search index for Atlas Vector Search."""
|
148
|
+
try:
|
149
|
+
vector_index_def = {
|
150
|
+
"mappings": {
|
151
|
+
"dynamic": True,
|
152
|
+
"fields": {
|
153
|
+
"embedding": {
|
154
|
+
"type": "knnVector",
|
155
|
+
"dimensions": self._get_embedding_dimensions(self.embedding_model_name),
|
156
|
+
"similarity": "cosine"
|
157
|
+
}
|
158
|
+
}
|
159
|
+
}
|
160
|
+
}
|
161
|
+
|
162
|
+
self.collection.create_search_index(vector_index_def, "vector_index")
|
163
|
+
|
164
|
+
except Exception as e:
|
165
|
+
logging.warning(f"Could not create vector search index: {e}")
|
166
|
+
|
167
|
+
def _get_embedding(self, text):
|
168
|
+
"""Get embedding for text."""
|
169
|
+
try:
|
170
|
+
response = self.embedding_model.embeddings.create(
|
171
|
+
input=text,
|
172
|
+
model=self.embedding_model_name
|
173
|
+
)
|
174
|
+
return response.data[0].embedding
|
175
|
+
except Exception as e:
|
176
|
+
logging.error(f"Error getting embedding: {e}")
|
177
|
+
return None
|
178
|
+
|
179
|
+
def add(self, messages, user_id=None, agent_id=None, run_id=None, metadata=None):
|
180
|
+
"""Add memory to MongoDB."""
|
181
|
+
try:
|
182
|
+
# Handle different message formats
|
183
|
+
if isinstance(messages, list):
|
184
|
+
content = "\n".join([msg.get("content", str(msg)) if isinstance(msg, dict) else str(msg) for msg in messages])
|
185
|
+
else:
|
186
|
+
content = str(messages)
|
187
|
+
|
188
|
+
# Generate embedding
|
189
|
+
embedding = self._get_embedding(content) if self.use_vector_search else None
|
190
|
+
|
191
|
+
# Create document
|
192
|
+
doc = {
|
193
|
+
"content": content,
|
194
|
+
"metadata": metadata or {},
|
195
|
+
"user_id": user_id,
|
196
|
+
"agent_id": agent_id,
|
197
|
+
"run_id": run_id,
|
198
|
+
"created_at": datetime.utcnow(),
|
199
|
+
"memory_type": "knowledge"
|
200
|
+
}
|
201
|
+
|
202
|
+
if embedding:
|
203
|
+
doc["embedding"] = embedding
|
204
|
+
|
205
|
+
# Insert document
|
206
|
+
result = self.collection.insert_one(doc)
|
207
|
+
|
208
|
+
return [{
|
209
|
+
"id": str(result.inserted_id),
|
210
|
+
"memory": content,
|
211
|
+
"event": "ADD"
|
212
|
+
}]
|
213
|
+
|
214
|
+
except Exception as e:
|
215
|
+
logging.error(f"Error adding memory to MongoDB: {e}")
|
216
|
+
return []
|
217
|
+
|
218
|
+
def search(self, query, user_id=None, agent_id=None, run_id=None, rerank=False, **kwargs):
|
219
|
+
"""Search memories in MongoDB."""
|
220
|
+
try:
|
221
|
+
results = []
|
222
|
+
|
223
|
+
# Vector search if enabled
|
224
|
+
if self.use_vector_search:
|
225
|
+
embedding = self._get_embedding(query)
|
226
|
+
if embedding:
|
227
|
+
pipeline = [
|
228
|
+
{
|
229
|
+
"$vectorSearch": {
|
230
|
+
"index": "vector_index",
|
231
|
+
"path": "embedding",
|
232
|
+
"queryVector": embedding,
|
233
|
+
"numCandidates": kwargs.get("limit", 10) * 10,
|
234
|
+
"limit": kwargs.get("limit", 10)
|
235
|
+
}
|
236
|
+
},
|
237
|
+
{
|
238
|
+
"$addFields": {
|
239
|
+
"score": {"$meta": "vectorSearchScore"}
|
240
|
+
}
|
241
|
+
}
|
242
|
+
]
|
243
|
+
|
244
|
+
# Add filters if provided
|
245
|
+
if user_id or agent_id or run_id:
|
246
|
+
match_filter = {}
|
247
|
+
if user_id:
|
248
|
+
match_filter["user_id"] = user_id
|
249
|
+
if agent_id:
|
250
|
+
match_filter["agent_id"] = agent_id
|
251
|
+
if run_id:
|
252
|
+
match_filter["run_id"] = run_id
|
253
|
+
|
254
|
+
pipeline.append({"$match": match_filter})
|
255
|
+
|
256
|
+
for doc in self.collection.aggregate(pipeline):
|
257
|
+
results.append({
|
258
|
+
"id": str(doc["_id"]),
|
259
|
+
"memory": doc["content"],
|
260
|
+
"metadata": doc.get("metadata", {}),
|
261
|
+
"score": doc.get("score", 1.0)
|
262
|
+
})
|
263
|
+
|
264
|
+
# Fallback to text search
|
265
|
+
if not results:
|
266
|
+
search_filter = {"$text": {"$search": query}}
|
267
|
+
|
268
|
+
# Add additional filters
|
269
|
+
if user_id:
|
270
|
+
search_filter["user_id"] = user_id
|
271
|
+
if agent_id:
|
272
|
+
search_filter["agent_id"] = agent_id
|
273
|
+
if run_id:
|
274
|
+
search_filter["run_id"] = run_id
|
275
|
+
|
276
|
+
for doc in self.collection.find(search_filter).limit(kwargs.get("limit", 10)):
|
277
|
+
results.append({
|
278
|
+
"id": str(doc["_id"]),
|
279
|
+
"memory": doc["content"],
|
280
|
+
"metadata": doc.get("metadata", {}),
|
281
|
+
"score": 1.0
|
282
|
+
})
|
283
|
+
|
284
|
+
return results
|
285
|
+
|
286
|
+
except Exception as e:
|
287
|
+
logging.error(f"Error searching MongoDB: {e}")
|
288
|
+
return []
|
289
|
+
|
290
|
+
def get_all(self, user_id=None, agent_id=None, run_id=None):
|
291
|
+
"""Get all memories from MongoDB."""
|
292
|
+
try:
|
293
|
+
search_filter = {}
|
294
|
+
if user_id:
|
295
|
+
search_filter["user_id"] = user_id
|
296
|
+
if agent_id:
|
297
|
+
search_filter["agent_id"] = agent_id
|
298
|
+
if run_id:
|
299
|
+
search_filter["run_id"] = run_id
|
300
|
+
|
301
|
+
results = []
|
302
|
+
for doc in self.collection.find(search_filter):
|
303
|
+
results.append({
|
304
|
+
"id": str(doc["_id"]),
|
305
|
+
"memory": doc["content"],
|
306
|
+
"metadata": doc.get("metadata", {}),
|
307
|
+
"created_at": doc.get("created_at")
|
308
|
+
})
|
309
|
+
|
310
|
+
return results
|
311
|
+
|
312
|
+
except Exception as e:
|
313
|
+
logging.error(f"Error getting all memories from MongoDB: {e}")
|
314
|
+
return []
|
315
|
+
|
316
|
+
def get(self, memory_id):
|
317
|
+
"""Get a specific memory by ID."""
|
318
|
+
try:
|
319
|
+
from bson import ObjectId
|
320
|
+
doc = self.collection.find_one({"_id": ObjectId(memory_id)})
|
321
|
+
if doc:
|
322
|
+
return {
|
323
|
+
"id": str(doc["_id"]),
|
324
|
+
"memory": doc["content"],
|
325
|
+
"metadata": doc.get("metadata", {}),
|
326
|
+
"created_at": doc.get("created_at")
|
327
|
+
}
|
328
|
+
return None
|
329
|
+
|
330
|
+
except Exception as e:
|
331
|
+
logging.error(f"Error getting memory from MongoDB: {e}")
|
332
|
+
return None
|
333
|
+
|
334
|
+
def update(self, memory_id, data):
|
335
|
+
"""Update a memory."""
|
336
|
+
try:
|
337
|
+
from bson import ObjectId
|
338
|
+
result = self.collection.update_one(
|
339
|
+
{"_id": ObjectId(memory_id)},
|
340
|
+
{"$set": {"content": data, "updated_at": datetime.utcnow()}}
|
341
|
+
)
|
342
|
+
return result.modified_count > 0
|
343
|
+
|
344
|
+
except Exception as e:
|
345
|
+
logging.error(f"Error updating memory in MongoDB: {e}")
|
346
|
+
return False
|
347
|
+
|
348
|
+
def delete(self, memory_id):
|
349
|
+
"""Delete a memory."""
|
350
|
+
try:
|
351
|
+
from bson import ObjectId
|
352
|
+
result = self.collection.delete_one({"_id": ObjectId(memory_id)})
|
353
|
+
return result.deleted_count > 0
|
354
|
+
|
355
|
+
except Exception as e:
|
356
|
+
logging.error(f"Error deleting memory from MongoDB: {e}")
|
357
|
+
return False
|
358
|
+
|
359
|
+
def delete_all(self, user_id=None, agent_id=None, run_id=None):
|
360
|
+
"""Delete all memories."""
|
361
|
+
try:
|
362
|
+
search_filter = {}
|
363
|
+
if user_id:
|
364
|
+
search_filter["user_id"] = user_id
|
365
|
+
if agent_id:
|
366
|
+
search_filter["agent_id"] = agent_id
|
367
|
+
if run_id:
|
368
|
+
search_filter["run_id"] = run_id
|
369
|
+
|
370
|
+
result = self.collection.delete_many(search_filter)
|
371
|
+
return result.deleted_count
|
372
|
+
|
373
|
+
except Exception as e:
|
374
|
+
logging.error(f"Error deleting all memories from MongoDB: {e}")
|
375
|
+
return 0
|
376
|
+
|
377
|
+
def reset(self):
|
378
|
+
"""Reset all memories."""
|
379
|
+
try:
|
380
|
+
result = self.collection.delete_many({})
|
381
|
+
return result.deleted_count
|
382
|
+
|
383
|
+
except Exception as e:
|
384
|
+
logging.error(f"Error resetting MongoDB memories: {e}")
|
385
|
+
return 0
|
386
|
+
|
50
387
|
class Knowledge:
|
51
388
|
def __init__(self, config=None, verbose=None):
|
52
389
|
self._config = config
|
@@ -119,8 +456,20 @@ class Knowledge:
|
|
119
456
|
if "vector_store" in self._config:
|
120
457
|
if "provider" in self._config["vector_store"]:
|
121
458
|
base_config["vector_store"]["provider"] = self._config["vector_store"]["provider"]
|
459
|
+
|
460
|
+
# Special handling for MongoDB vector store
|
461
|
+
if self._config["vector_store"]["provider"] == "mongodb":
|
462
|
+
base_config["vector_store"] = {
|
463
|
+
"provider": "mongodb",
|
464
|
+
"config": {
|
465
|
+
"connection_string": self._config["vector_store"]["config"].get("connection_string", "mongodb://localhost:27017/"),
|
466
|
+
"database": self._config["vector_store"]["config"].get("database", "praisonai"),
|
467
|
+
"collection": self._config["vector_store"]["config"].get("collection", "knowledge_base"),
|
468
|
+
"use_vector_search": self._config["vector_store"]["config"].get("use_vector_search", True)
|
469
|
+
}
|
470
|
+
}
|
122
471
|
|
123
|
-
if "config" in self._config["vector_store"]:
|
472
|
+
if "config" in self._config["vector_store"] and self._config["vector_store"]["provider"] != "mongodb":
|
124
473
|
config_copy = self._config["vector_store"]["config"].copy()
|
125
474
|
# Only exclude client as it's managed internally
|
126
475
|
if "client" in config_copy:
|
@@ -146,6 +495,16 @@ class Knowledge:
|
|
146
495
|
|
147
496
|
@cached_property
|
148
497
|
def memory(self):
|
498
|
+
# Check if MongoDB provider is specified
|
499
|
+
if (self.config.get("vector_store", {}).get("provider") == "mongodb"):
|
500
|
+
try:
|
501
|
+
return MongoDBMemory(self.config)
|
502
|
+
except Exception as e:
|
503
|
+
logger.error(f"Failed to initialize MongoDB memory: {e}")
|
504
|
+
# Fall back to default memory
|
505
|
+
pass
|
506
|
+
|
507
|
+
# Default Mem0 memory
|
149
508
|
try:
|
150
509
|
return CustomMemory.from_config(self.config)
|
151
510
|
except (NotImplementedError, ValueError) as e:
|
@@ -329,16 +329,29 @@ class LLM:
|
|
329
329
|
# For Ollama, always generate summary when we have tool results
|
330
330
|
# This prevents infinite loops caused by empty/minimal responses
|
331
331
|
|
332
|
-
# Build tool summary
|
333
|
-
|
334
|
-
|
332
|
+
# Build tool summary more naturally to match OpenAI-style responses
|
333
|
+
if len(tool_results) == 1:
|
334
|
+
# Single tool result - create natural response
|
335
|
+
result = tool_results[0]
|
335
336
|
if isinstance(result, dict) and 'result' in result:
|
336
|
-
|
337
|
-
summary_lines.append(f"- {function_name}: {result['result']}")
|
337
|
+
return str(result['result'])
|
338
338
|
else:
|
339
|
-
|
340
|
-
|
341
|
-
|
339
|
+
return str(result)
|
340
|
+
else:
|
341
|
+
# Multiple tool results - create coherent summary
|
342
|
+
summary_lines = []
|
343
|
+
for i, result in enumerate(tool_results):
|
344
|
+
if isinstance(result, dict) and 'result' in result:
|
345
|
+
function_name = result.get('function_name', 'Tool')
|
346
|
+
summary_lines.append(f"{function_name}: {result['result']}")
|
347
|
+
else:
|
348
|
+
summary_lines.append(f"Tool {i+1}: {result}")
|
349
|
+
|
350
|
+
# Create more natural summary text
|
351
|
+
if len(summary_lines) == 2:
|
352
|
+
return f"{summary_lines[0]}. {summary_lines[1]}."
|
353
|
+
else:
|
354
|
+
return "Based on the tool execution: " + ". ".join(summary_lines) + "."
|
342
355
|
|
343
356
|
def _format_ollama_tool_result_message(self, function_name: str, tool_result: Any) -> Dict[str, str]:
|
344
357
|
"""
|
@@ -1173,6 +1186,7 @@ class LLM:
|
|
1173
1186
|
final_response_text = response_text.strip()
|
1174
1187
|
break
|
1175
1188
|
|
1189
|
+
|
1176
1190
|
# Special handling for Ollama to prevent infinite loops
|
1177
1191
|
# Only generate summary after multiple iterations to allow sequential execution
|
1178
1192
|
should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
|
@@ -1198,6 +1212,17 @@ class LLM:
|
|
1198
1212
|
continue
|
1199
1213
|
else:
|
1200
1214
|
# No tool calls, we're done with this iteration
|
1215
|
+
|
1216
|
+
# Special early stopping logic for Ollama when tool results are available
|
1217
|
+
# Ollama often provides empty responses after successful tool execution
|
1218
|
+
if (self._is_ollama_provider() and accumulated_tool_results and iteration_count >= 1 and
|
1219
|
+
(not response_text or response_text.strip() == "")):
|
1220
|
+
# Generate coherent response from tool results
|
1221
|
+
tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
|
1222
|
+
if tool_summary:
|
1223
|
+
final_response_text = tool_summary
|
1224
|
+
break
|
1225
|
+
|
1201
1226
|
# If we've executed tools in previous iterations, this response contains the final answer
|
1202
1227
|
if iteration_count > 0 and not final_response_text:
|
1203
1228
|
final_response_text = response_text.strip() if response_text else ""
|
@@ -1956,6 +1981,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1956
1981
|
final_response_text = response_text.strip()
|
1957
1982
|
break
|
1958
1983
|
|
1984
|
+
|
1959
1985
|
# Special handling for Ollama to prevent infinite loops
|
1960
1986
|
# Only generate summary after multiple iterations to allow sequential execution
|
1961
1987
|
should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
|
@@ -1981,6 +2007,17 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1981
2007
|
continue
|
1982
2008
|
else:
|
1983
2009
|
# No tool calls, we're done with this iteration
|
2010
|
+
|
2011
|
+
# Special early stopping logic for Ollama when tool results are available
|
2012
|
+
# Ollama often provides empty responses after successful tool execution
|
2013
|
+
if (self._is_ollama_provider() and accumulated_tool_results and iteration_count >= 1 and
|
2014
|
+
(not response_text or response_text.strip() == "")):
|
2015
|
+
# Generate coherent response from tool results
|
2016
|
+
tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
|
2017
|
+
if tool_summary:
|
2018
|
+
final_response_text = tool_summary
|
2019
|
+
break
|
2020
|
+
|
1984
2021
|
# If we've executed tools in previous iterations, this response contains the final answer
|
1985
2022
|
if iteration_count > 0 and not final_response_text:
|
1986
2023
|
final_response_text = response_text.strip()
|