memorisdk 1.0.1__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of memorisdk might be problematic. Click here for more details.
- memori/__init__.py +24 -8
- memori/agents/conscious_agent.py +252 -414
- memori/agents/memory_agent.py +487 -224
- memori/agents/retrieval_agent.py +416 -60
- memori/config/memory_manager.py +323 -0
- memori/core/conversation.py +393 -0
- memori/core/database.py +386 -371
- memori/core/memory.py +1676 -534
- memori/core/providers.py +217 -0
- memori/database/adapters/__init__.py +10 -0
- memori/database/adapters/mysql_adapter.py +331 -0
- memori/database/adapters/postgresql_adapter.py +291 -0
- memori/database/adapters/sqlite_adapter.py +229 -0
- memori/database/auto_creator.py +320 -0
- memori/database/connection_utils.py +207 -0
- memori/database/connectors/base_connector.py +283 -0
- memori/database/connectors/mysql_connector.py +240 -18
- memori/database/connectors/postgres_connector.py +277 -4
- memori/database/connectors/sqlite_connector.py +178 -3
- memori/database/models.py +400 -0
- memori/database/queries/base_queries.py +1 -1
- memori/database/queries/memory_queries.py +91 -2
- memori/database/query_translator.py +222 -0
- memori/database/schema_generators/__init__.py +7 -0
- memori/database/schema_generators/mysql_schema_generator.py +215 -0
- memori/database/search/__init__.py +8 -0
- memori/database/search/mysql_search_adapter.py +255 -0
- memori/database/search/sqlite_search_adapter.py +180 -0
- memori/database/search_service.py +548 -0
- memori/database/sqlalchemy_manager.py +839 -0
- memori/integrations/__init__.py +36 -11
- memori/integrations/litellm_integration.py +340 -6
- memori/integrations/openai_integration.py +506 -240
- memori/utils/input_validator.py +395 -0
- memori/utils/pydantic_models.py +138 -36
- memori/utils/query_builder.py +530 -0
- memori/utils/security_audit.py +594 -0
- memori/utils/security_integration.py +339 -0
- memori/utils/transaction_manager.py +547 -0
- {memorisdk-1.0.1.dist-info → memorisdk-2.0.0.dist-info}/METADATA +144 -34
- memorisdk-2.0.0.dist-info/RECORD +67 -0
- memorisdk-1.0.1.dist-info/RECORD +0 -44
- memorisdk-1.0.1.dist-info/entry_points.txt +0 -2
- {memorisdk-1.0.1.dist-info → memorisdk-2.0.0.dist-info}/WHEEL +0 -0
- {memorisdk-1.0.1.dist-info → memorisdk-2.0.0.dist-info}/licenses/LICENSE +0 -0
- {memorisdk-1.0.1.dist-info → memorisdk-2.0.0.dist-info}/top_level.txt +0 -0
memori/core/memory.py
CHANGED
|
@@ -3,6 +3,7 @@ Main Memori class - Pydantic-based memory interface v1.0
|
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
5
|
import asyncio
|
|
6
|
+
import time
|
|
6
7
|
import uuid
|
|
7
8
|
from datetime import datetime
|
|
8
9
|
from typing import Any, Dict, List, Optional
|
|
@@ -10,8 +11,8 @@ from typing import Any, Dict, List, Optional
|
|
|
10
11
|
from loguru import logger
|
|
11
12
|
|
|
12
13
|
try:
|
|
13
|
-
import litellm
|
|
14
|
-
from litellm import success_callback
|
|
14
|
+
import litellm # noqa: F401
|
|
15
|
+
from litellm import success_callback # noqa: F401
|
|
15
16
|
|
|
16
17
|
LITELLM_AVAILABLE = True
|
|
17
18
|
except ImportError:
|
|
@@ -19,13 +20,13 @@ except ImportError:
|
|
|
19
20
|
logger.warning("LiteLLM not available - native callback system disabled")
|
|
20
21
|
|
|
21
22
|
from ..agents.conscious_agent import ConsciouscAgent
|
|
22
|
-
from ..
|
|
23
|
-
from ..agents.retrieval_agent import MemorySearchEngine
|
|
23
|
+
from ..config.memory_manager import MemoryManager
|
|
24
24
|
from ..config.settings import LoggingSettings, LogLevel
|
|
25
|
+
from ..database.sqlalchemy_manager import SQLAlchemyDatabaseManager as DatabaseManager
|
|
25
26
|
from ..utils.exceptions import DatabaseError, MemoriError
|
|
26
27
|
from ..utils.logging import LoggingManager
|
|
27
28
|
from ..utils.pydantic_models import ConversationContext
|
|
28
|
-
from .
|
|
29
|
+
from .conversation import ConversationManager
|
|
29
30
|
|
|
30
31
|
|
|
31
32
|
class Memori:
|
|
@@ -49,6 +50,21 @@ class Memori:
|
|
|
49
50
|
openai_api_key: Optional[str] = None,
|
|
50
51
|
user_id: Optional[str] = None,
|
|
51
52
|
verbose: bool = False,
|
|
53
|
+
# New provider configuration parameters
|
|
54
|
+
api_key: Optional[str] = None,
|
|
55
|
+
api_type: Optional[str] = None,
|
|
56
|
+
base_url: Optional[str] = None,
|
|
57
|
+
azure_endpoint: Optional[str] = None,
|
|
58
|
+
azure_deployment: Optional[str] = None,
|
|
59
|
+
api_version: Optional[str] = None,
|
|
60
|
+
azure_ad_token: Optional[str] = None,
|
|
61
|
+
organization: Optional[str] = None,
|
|
62
|
+
project: Optional[str] = None,
|
|
63
|
+
model: Optional[str] = None, # Allow custom model selection
|
|
64
|
+
provider_config: Optional[Any] = None, # ProviderConfig when available
|
|
65
|
+
schema_init: bool = True, # Initialize database schema and create tables
|
|
66
|
+
database_prefix: Optional[str] = None, # Database name prefix
|
|
67
|
+
database_suffix: Optional[str] = None, # Database name suffix
|
|
52
68
|
):
|
|
53
69
|
"""
|
|
54
70
|
Initialize Memori memory system v1.0.
|
|
@@ -62,9 +78,23 @@ class Memori:
|
|
|
62
78
|
namespace: Optional namespace for memory isolation
|
|
63
79
|
shared_memory: Enable shared memory across agents
|
|
64
80
|
memory_filters: Filters for memory ingestion
|
|
65
|
-
openai_api_key: OpenAI API key for memory agent
|
|
81
|
+
openai_api_key: OpenAI API key for memory agent (deprecated, use api_key)
|
|
66
82
|
user_id: Optional user identifier
|
|
67
83
|
verbose: Enable verbose logging (loguru only)
|
|
84
|
+
api_key: API key for the LLM provider
|
|
85
|
+
api_type: Provider type ('openai', 'azure', 'custom')
|
|
86
|
+
base_url: Base URL for custom OpenAI-compatible endpoints
|
|
87
|
+
azure_endpoint: Azure OpenAI endpoint URL
|
|
88
|
+
azure_deployment: Azure deployment name
|
|
89
|
+
api_version: API version for Azure
|
|
90
|
+
azure_ad_token: Azure AD token for authentication
|
|
91
|
+
organization: OpenAI organization ID
|
|
92
|
+
project: OpenAI project ID
|
|
93
|
+
model: Model to use (defaults to 'gpt-4o' if not specified)
|
|
94
|
+
provider_config: Complete provider configuration (overrides individual params)
|
|
95
|
+
enable_auto_creation: Enable automatic database creation if database doesn't exist
|
|
96
|
+
database_prefix: Optional prefix for database name (for multi-tenant setups)
|
|
97
|
+
database_suffix: Optional suffix for database name (e.g., 'dev', 'prod', 'test')
|
|
68
98
|
"""
|
|
69
99
|
self.database_connect = database_connect
|
|
70
100
|
self.template = template
|
|
@@ -74,41 +104,154 @@ class Memori:
|
|
|
74
104
|
self.namespace = namespace or "default"
|
|
75
105
|
self.shared_memory = shared_memory
|
|
76
106
|
self.memory_filters = memory_filters or {}
|
|
77
|
-
self.openai_api_key = openai_api_key
|
|
78
107
|
self.user_id = user_id
|
|
79
108
|
self.verbose = verbose
|
|
109
|
+
self.schema_init = schema_init
|
|
110
|
+
self.database_prefix = database_prefix
|
|
111
|
+
self.database_suffix = database_suffix
|
|
112
|
+
|
|
113
|
+
# Configure provider based on explicit settings ONLY - no auto-detection
|
|
114
|
+
if provider_config:
|
|
115
|
+
# Use provided configuration
|
|
116
|
+
self.provider_config = provider_config
|
|
117
|
+
logger.info(
|
|
118
|
+
f"Using provided ProviderConfig with api_type: {provider_config.api_type}"
|
|
119
|
+
)
|
|
120
|
+
elif any([api_type, base_url, azure_endpoint]):
|
|
121
|
+
# Build configuration from individual parameters - explicit provider selection
|
|
122
|
+
try:
|
|
123
|
+
from .providers import ProviderConfig
|
|
124
|
+
|
|
125
|
+
if azure_endpoint:
|
|
126
|
+
# Explicitly configured Azure
|
|
127
|
+
self.provider_config = ProviderConfig.from_azure(
|
|
128
|
+
api_key=api_key or openai_api_key,
|
|
129
|
+
azure_endpoint=azure_endpoint,
|
|
130
|
+
azure_deployment=azure_deployment,
|
|
131
|
+
api_version=api_version,
|
|
132
|
+
azure_ad_token=azure_ad_token,
|
|
133
|
+
model=model,
|
|
134
|
+
)
|
|
135
|
+
logger.info("Using explicitly configured Azure OpenAI provider")
|
|
136
|
+
elif base_url:
|
|
137
|
+
# Explicitly configured custom endpoint
|
|
138
|
+
self.provider_config = ProviderConfig.from_custom(
|
|
139
|
+
base_url=base_url,
|
|
140
|
+
api_key=api_key or openai_api_key,
|
|
141
|
+
model=model,
|
|
142
|
+
)
|
|
143
|
+
logger.info(
|
|
144
|
+
f"Using explicitly configured custom provider: {base_url}"
|
|
145
|
+
)
|
|
146
|
+
else:
|
|
147
|
+
# Fallback to OpenAI with explicit settings
|
|
148
|
+
self.provider_config = ProviderConfig.from_openai(
|
|
149
|
+
api_key=api_key or openai_api_key,
|
|
150
|
+
organization=organization,
|
|
151
|
+
project=project,
|
|
152
|
+
model=model,
|
|
153
|
+
)
|
|
154
|
+
logger.info("Using explicitly configured OpenAI provider")
|
|
155
|
+
except ImportError:
|
|
156
|
+
logger.warning(
|
|
157
|
+
"ProviderConfig not available, using basic configuration"
|
|
158
|
+
)
|
|
159
|
+
self.provider_config = None
|
|
160
|
+
else:
|
|
161
|
+
# Default to standard OpenAI - NO environment detection
|
|
162
|
+
try:
|
|
163
|
+
from .providers import ProviderConfig
|
|
164
|
+
|
|
165
|
+
self.provider_config = ProviderConfig.from_openai(
|
|
166
|
+
api_key=api_key or openai_api_key,
|
|
167
|
+
organization=organization,
|
|
168
|
+
project=project,
|
|
169
|
+
model=model or "gpt-4o",
|
|
170
|
+
)
|
|
171
|
+
logger.info(
|
|
172
|
+
"Using default OpenAI provider (no specific provider configured)"
|
|
173
|
+
)
|
|
174
|
+
except ImportError:
|
|
175
|
+
logger.warning(
|
|
176
|
+
"ProviderConfig not available, using basic configuration"
|
|
177
|
+
)
|
|
178
|
+
self.provider_config = None
|
|
179
|
+
|
|
180
|
+
# Keep backward compatibility
|
|
181
|
+
self.openai_api_key = api_key or openai_api_key or ""
|
|
182
|
+
if self.provider_config and hasattr(self.provider_config, "api_key"):
|
|
183
|
+
self.openai_api_key = self.provider_config.api_key or self.openai_api_key
|
|
80
184
|
|
|
81
185
|
# Setup logging based on verbose mode
|
|
82
186
|
self._setup_logging()
|
|
83
187
|
|
|
84
188
|
# Initialize database manager
|
|
85
|
-
self.db_manager = DatabaseManager(database_connect, template)
|
|
189
|
+
self.db_manager = DatabaseManager(database_connect, template, schema_init)
|
|
86
190
|
|
|
87
191
|
# Initialize Pydantic-based agents
|
|
88
192
|
self.memory_agent = None
|
|
89
193
|
self.search_engine = None
|
|
90
194
|
self.conscious_agent = None
|
|
91
195
|
self._background_task = None
|
|
196
|
+
self._conscious_init_pending = False
|
|
92
197
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
198
|
+
# Initialize agents with provider configuration
|
|
199
|
+
try:
|
|
200
|
+
from ..agents.memory_agent import MemoryAgent
|
|
201
|
+
from ..agents.retrieval_agent import MemorySearchEngine
|
|
202
|
+
|
|
203
|
+
# Use provider model or fallback to gpt-4o
|
|
204
|
+
if (
|
|
205
|
+
self.provider_config
|
|
206
|
+
and hasattr(self.provider_config, "model")
|
|
207
|
+
and self.provider_config.model
|
|
208
|
+
):
|
|
209
|
+
effective_model = model or self.provider_config.model
|
|
210
|
+
else:
|
|
211
|
+
effective_model = model or "gpt-4o"
|
|
212
|
+
|
|
213
|
+
# Initialize agents with provider configuration if available
|
|
214
|
+
if self.provider_config:
|
|
215
|
+
self.memory_agent = MemoryAgent(
|
|
216
|
+
provider_config=self.provider_config, model=effective_model
|
|
99
217
|
)
|
|
100
|
-
self.
|
|
101
|
-
|
|
218
|
+
self.search_engine = MemorySearchEngine(
|
|
219
|
+
provider_config=self.provider_config, model=effective_model
|
|
102
220
|
)
|
|
103
|
-
|
|
104
|
-
|
|
221
|
+
else:
|
|
222
|
+
# Fallback to using API key directly
|
|
223
|
+
self.memory_agent = MemoryAgent(
|
|
224
|
+
api_key=self.openai_api_key, model=effective_model
|
|
105
225
|
)
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
f"Failed to initialize OpenAI agents: {e}. Memory ingestion disabled."
|
|
226
|
+
self.search_engine = MemorySearchEngine(
|
|
227
|
+
api_key=self.openai_api_key, model=effective_model
|
|
109
228
|
)
|
|
110
|
-
|
|
111
|
-
|
|
229
|
+
|
|
230
|
+
# Only initialize conscious_agent if conscious_ingest or auto_ingest is enabled
|
|
231
|
+
if conscious_ingest or auto_ingest:
|
|
232
|
+
self.conscious_agent = ConsciouscAgent()
|
|
233
|
+
|
|
234
|
+
logger.info(
|
|
235
|
+
f"Agents initialized successfully with model: {effective_model}"
|
|
236
|
+
)
|
|
237
|
+
except ImportError as e:
|
|
238
|
+
logger.warning(
|
|
239
|
+
f"Failed to import LLM agents: {e}. Memory ingestion disabled."
|
|
240
|
+
)
|
|
241
|
+
self.memory_agent = None
|
|
242
|
+
self.search_engine = None
|
|
243
|
+
self.conscious_agent = None
|
|
244
|
+
self.conscious_ingest = False
|
|
245
|
+
self.auto_ingest = False
|
|
246
|
+
except Exception as e:
|
|
247
|
+
logger.warning(
|
|
248
|
+
f"Failed to initialize LLM agents: {e}. Memory ingestion disabled."
|
|
249
|
+
)
|
|
250
|
+
self.memory_agent = None
|
|
251
|
+
self.search_engine = None
|
|
252
|
+
self.conscious_agent = None
|
|
253
|
+
self.conscious_ingest = False
|
|
254
|
+
self.auto_ingest = False
|
|
112
255
|
|
|
113
256
|
# State tracking
|
|
114
257
|
self._enabled = False
|
|
@@ -116,6 +259,12 @@ class Memori:
|
|
|
116
259
|
self._conscious_context_injected = (
|
|
117
260
|
False # Track if conscious context was already injected
|
|
118
261
|
)
|
|
262
|
+
self._in_context_retrieval = False # Recursion guard for context retrieval
|
|
263
|
+
|
|
264
|
+
# Initialize conversation manager for stateless LLM integration
|
|
265
|
+
self.conversation_manager = ConversationManager(
|
|
266
|
+
max_sessions=100, session_timeout_minutes=60, max_history_per_session=20
|
|
267
|
+
)
|
|
119
268
|
|
|
120
269
|
# User context for memory processing
|
|
121
270
|
self._user_context = {
|
|
@@ -127,6 +276,23 @@ class Memori:
|
|
|
127
276
|
# Initialize database
|
|
128
277
|
self._setup_database()
|
|
129
278
|
|
|
279
|
+
# Initialize the new modular memory manager
|
|
280
|
+
self.memory_manager = MemoryManager(
|
|
281
|
+
database_connect=database_connect,
|
|
282
|
+
template=template,
|
|
283
|
+
mem_prompt=mem_prompt,
|
|
284
|
+
conscious_ingest=conscious_ingest,
|
|
285
|
+
auto_ingest=auto_ingest,
|
|
286
|
+
namespace=namespace,
|
|
287
|
+
shared_memory=shared_memory,
|
|
288
|
+
memory_filters=memory_filters,
|
|
289
|
+
user_id=user_id,
|
|
290
|
+
verbose=verbose,
|
|
291
|
+
provider_config=self.provider_config,
|
|
292
|
+
)
|
|
293
|
+
# Set this Memori instance for memory management
|
|
294
|
+
self.memory_manager.set_memori_instance(self)
|
|
295
|
+
|
|
130
296
|
# Run conscious agent initialization if enabled
|
|
131
297
|
if self.conscious_ingest and self.conscious_agent:
|
|
132
298
|
self._initialize_conscious_memory()
|
|
@@ -155,6 +321,10 @@ class Memori:
|
|
|
155
321
|
|
|
156
322
|
def _setup_database(self):
|
|
157
323
|
"""Setup database tables based on template"""
|
|
324
|
+
if not self.schema_init:
|
|
325
|
+
logger.info("Schema initialization disabled (schema_init=False)")
|
|
326
|
+
return
|
|
327
|
+
|
|
158
328
|
try:
|
|
159
329
|
self.db_manager.initialize_schema()
|
|
160
330
|
logger.info("Database schema initialized successfully")
|
|
@@ -168,351 +338,406 @@ class Memori:
|
|
|
168
338
|
"Conscious-ingest: Starting conscious agent analysis at startup"
|
|
169
339
|
)
|
|
170
340
|
|
|
171
|
-
#
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
341
|
+
# Check if there's a running event loop
|
|
342
|
+
try:
|
|
343
|
+
loop = asyncio.get_running_loop()
|
|
344
|
+
# If we're in an event loop, create the task
|
|
345
|
+
if self._background_task is None or self._background_task.done():
|
|
346
|
+
self._background_task = loop.create_task(
|
|
347
|
+
self._run_conscious_initialization()
|
|
348
|
+
)
|
|
349
|
+
logger.debug(
|
|
350
|
+
"Conscious-ingest: Background initialization task started"
|
|
351
|
+
)
|
|
352
|
+
except RuntimeError:
|
|
353
|
+
# No event loop running, defer initialization until first async call
|
|
354
|
+
logger.debug(
|
|
355
|
+
"Conscious-ingest: No event loop available, deferring initialization"
|
|
175
356
|
)
|
|
176
|
-
|
|
357
|
+
self._conscious_init_pending = True
|
|
177
358
|
|
|
178
359
|
except Exception as e:
|
|
179
360
|
logger.error(f"Failed to initialize conscious memory: {e}")
|
|
180
361
|
|
|
362
|
+
def _check_deferred_initialization(self):
|
|
363
|
+
"""Check and handle deferred conscious memory initialization"""
|
|
364
|
+
if self._conscious_init_pending and self.conscious_agent:
|
|
365
|
+
try:
|
|
366
|
+
loop = asyncio.get_running_loop()
|
|
367
|
+
if self._background_task is None or self._background_task.done():
|
|
368
|
+
self._background_task = loop.create_task(
|
|
369
|
+
self._run_conscious_initialization()
|
|
370
|
+
)
|
|
371
|
+
logger.debug(
|
|
372
|
+
"Conscious-ingest: Deferred initialization task started"
|
|
373
|
+
)
|
|
374
|
+
self._conscious_init_pending = False
|
|
375
|
+
except RuntimeError:
|
|
376
|
+
# No event loop available, run synchronous initialization
|
|
377
|
+
logger.debug(
|
|
378
|
+
"Conscious-ingest: No event loop available, running synchronous initialization"
|
|
379
|
+
)
|
|
380
|
+
self._run_synchronous_conscious_initialization()
|
|
381
|
+
self._conscious_init_pending = False
|
|
382
|
+
|
|
181
383
|
async def _run_conscious_initialization(self):
|
|
182
384
|
"""Run conscious agent initialization in background"""
|
|
183
385
|
try:
|
|
184
386
|
if not self.conscious_agent:
|
|
185
387
|
return
|
|
186
388
|
|
|
187
|
-
|
|
188
|
-
|
|
389
|
+
# If both auto_ingest and conscious_ingest are enabled,
|
|
390
|
+
# initialize by copying ALL existing conscious-info memories first
|
|
391
|
+
if self.auto_ingest and self.conscious_ingest:
|
|
392
|
+
logger.debug(
|
|
393
|
+
"Conscious-ingest: Both auto_ingest and conscious_ingest enabled - initializing existing conscious memories"
|
|
394
|
+
)
|
|
395
|
+
init_success = (
|
|
396
|
+
await self.conscious_agent.initialize_existing_conscious_memories(
|
|
397
|
+
self.db_manager, self.namespace
|
|
398
|
+
)
|
|
399
|
+
)
|
|
400
|
+
if init_success:
|
|
401
|
+
logger.info(
|
|
402
|
+
"Conscious-ingest: Existing conscious-info memories initialized to short-term memory"
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
logger.debug("Conscious-ingest: Running conscious context extraction")
|
|
406
|
+
success = await self.conscious_agent.run_conscious_ingest(
|
|
189
407
|
self.db_manager, self.namespace
|
|
190
408
|
)
|
|
191
|
-
|
|
409
|
+
|
|
410
|
+
if success:
|
|
411
|
+
logger.info(
|
|
412
|
+
"Conscious-ingest: Conscious memories copied to short-term memory"
|
|
413
|
+
)
|
|
414
|
+
# Don't set _conscious_context_injected here - it should be set when context is actually injected into LLM
|
|
415
|
+
else:
|
|
416
|
+
logger.info("Conscious-ingest: No conscious context found")
|
|
192
417
|
|
|
193
418
|
except Exception as e:
|
|
194
419
|
logger.error(f"Conscious agent initialization failed: {e}")
|
|
195
420
|
|
|
196
|
-
def
|
|
197
|
-
"""
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
- LiteLLM: Native callback system (recommended)
|
|
202
|
-
- OpenAI: Automatic client wrapping when instantiated
|
|
203
|
-
- Anthropic: Automatic client wrapping when instantiated
|
|
204
|
-
- Any other provider: Auto-detected and wrapped
|
|
205
|
-
"""
|
|
206
|
-
if self._enabled:
|
|
207
|
-
logger.warning("Memori is already enabled.")
|
|
208
|
-
return
|
|
209
|
-
|
|
210
|
-
self._enabled = True
|
|
211
|
-
self._session_id = str(uuid.uuid4())
|
|
212
|
-
|
|
213
|
-
# 1. Set up LiteLLM native callbacks (if available)
|
|
214
|
-
litellm_enabled = self._setup_litellm_callbacks()
|
|
215
|
-
|
|
216
|
-
# 2. Set up universal client interception for other providers
|
|
217
|
-
universal_enabled = self._setup_universal_interception()
|
|
218
|
-
|
|
219
|
-
# 3. Register this instance globally for any provider to use
|
|
220
|
-
self._register_global_instance()
|
|
221
|
-
|
|
222
|
-
# 4. Start background conscious agent if available
|
|
223
|
-
if self.conscious_ingest and self.conscious_agent:
|
|
224
|
-
self._start_background_analysis()
|
|
225
|
-
|
|
226
|
-
providers = []
|
|
227
|
-
if litellm_enabled:
|
|
228
|
-
providers.append("LiteLLM (native callbacks)")
|
|
229
|
-
if universal_enabled:
|
|
230
|
-
providers.append("OpenAI/Anthropic (auto-wrapping)")
|
|
231
|
-
|
|
232
|
-
logger.info(
|
|
233
|
-
f"Memori enabled for session: {self.session_id}\n"
|
|
234
|
-
f"Active providers: {', '.join(providers) if providers else 'None detected'}\n"
|
|
235
|
-
f"Background analysis: {'Active' if self._background_task else 'Disabled'}\n"
|
|
236
|
-
f"Usage: Simply use any LLM client normally - conversations will be auto-recorded!"
|
|
237
|
-
)
|
|
238
|
-
|
|
239
|
-
def disable(self):
|
|
240
|
-
"""
|
|
241
|
-
Disable universal memory recording for all providers.
|
|
242
|
-
"""
|
|
243
|
-
if not self._enabled:
|
|
244
|
-
return
|
|
245
|
-
|
|
246
|
-
# 1. Remove LiteLLM callbacks and restore original completion
|
|
247
|
-
if LITELLM_AVAILABLE:
|
|
248
|
-
try:
|
|
249
|
-
success_callback.remove(self._litellm_success_callback)
|
|
250
|
-
except ValueError:
|
|
251
|
-
pass
|
|
252
|
-
|
|
253
|
-
# Restore original completion function if we patched it
|
|
254
|
-
if hasattr(litellm, "completion") and hasattr(
|
|
255
|
-
litellm.completion, "_memori_patched"
|
|
256
|
-
):
|
|
257
|
-
# Note: We can't easily restore the original function in a multi-instance scenario
|
|
258
|
-
# This is a limitation of the monkey-patching approach
|
|
259
|
-
pass
|
|
421
|
+
def _run_synchronous_conscious_initialization(self):
|
|
422
|
+
"""Run conscious agent initialization synchronously (when no event loop is available)"""
|
|
423
|
+
try:
|
|
424
|
+
if not self.conscious_agent:
|
|
425
|
+
return
|
|
260
426
|
|
|
261
|
-
|
|
262
|
-
|
|
427
|
+
# If both auto_ingest and conscious_ingest are enabled,
|
|
428
|
+
# initialize by copying ALL existing conscious-info memories first
|
|
429
|
+
if self.auto_ingest and self.conscious_ingest:
|
|
430
|
+
logger.info(
|
|
431
|
+
"Conscious-ingest: Both auto_ingest and conscious_ingest enabled - initializing existing conscious memories"
|
|
432
|
+
)
|
|
263
433
|
|
|
264
|
-
|
|
265
|
-
|
|
434
|
+
# Run synchronous initialization of existing memories
|
|
435
|
+
self._initialize_existing_conscious_memories_sync()
|
|
266
436
|
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
self._enabled = False
|
|
271
|
-
logger.info("Memori disabled for all providers.")
|
|
437
|
+
logger.debug(
|
|
438
|
+
"Conscious-ingest: Synchronous conscious context extraction completed"
|
|
439
|
+
)
|
|
272
440
|
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
if not LITELLM_AVAILABLE:
|
|
276
|
-
logger.debug("LiteLLM not available, skipping native callbacks")
|
|
277
|
-
return False
|
|
441
|
+
except Exception as e:
|
|
442
|
+
logger.error(f"Synchronous conscious agent initialization failed: {e}")
|
|
278
443
|
|
|
444
|
+
def _initialize_existing_conscious_memories_sync(self):
|
|
445
|
+
"""Synchronously initialize existing conscious-info memories"""
|
|
279
446
|
try:
|
|
280
|
-
|
|
447
|
+
from sqlalchemy import text
|
|
281
448
|
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
# Conscious-inject: one-shot short-term memory context
|
|
296
|
-
kwargs = self._inject_litellm_context(
|
|
297
|
-
kwargs, mode="conscious"
|
|
298
|
-
)
|
|
449
|
+
with self.db_manager._get_connection() as connection:
|
|
450
|
+
# Get ALL conscious-info labeled memories from long-term memory
|
|
451
|
+
cursor = connection.execute(
|
|
452
|
+
text(
|
|
453
|
+
"""SELECT memory_id, processed_data, summary, searchable_content,
|
|
454
|
+
importance_score, created_at
|
|
455
|
+
FROM long_term_memory
|
|
456
|
+
WHERE namespace = :namespace AND classification = 'conscious-info'
|
|
457
|
+
ORDER BY importance_score DESC, created_at DESC"""
|
|
458
|
+
),
|
|
459
|
+
{"namespace": self.namespace or "default"},
|
|
460
|
+
)
|
|
461
|
+
existing_conscious_memories = cursor.fetchall()
|
|
299
462
|
|
|
300
|
-
|
|
301
|
-
|
|
463
|
+
if not existing_conscious_memories:
|
|
464
|
+
logger.debug(
|
|
465
|
+
"Conscious-ingest: No existing conscious-info memories found for initialization"
|
|
466
|
+
)
|
|
467
|
+
return False
|
|
468
|
+
|
|
469
|
+
copied_count = 0
|
|
470
|
+
for memory_row in existing_conscious_memories:
|
|
471
|
+
success = self._copy_memory_to_short_term_sync(memory_row)
|
|
472
|
+
if success:
|
|
473
|
+
copied_count += 1
|
|
302
474
|
|
|
303
|
-
|
|
304
|
-
|
|
475
|
+
if copied_count > 0:
|
|
476
|
+
logger.info(
|
|
477
|
+
f"Conscious-ingest: Initialized {copied_count} existing conscious-info memories to short-term memory"
|
|
478
|
+
)
|
|
479
|
+
return True
|
|
480
|
+
else:
|
|
305
481
|
logger.debug(
|
|
306
|
-
"
|
|
482
|
+
"Conscious-ingest: No new conscious memories to initialize (all were duplicates)"
|
|
307
483
|
)
|
|
484
|
+
return False
|
|
308
485
|
|
|
309
|
-
logger.debug("LiteLLM native callbacks registered")
|
|
310
|
-
return True
|
|
311
486
|
except Exception as e:
|
|
312
|
-
logger.error(
|
|
487
|
+
logger.error(
|
|
488
|
+
f"Conscious-ingest: Failed to initialize existing conscious memories: {e}"
|
|
489
|
+
)
|
|
313
490
|
return False
|
|
314
491
|
|
|
315
|
-
def
|
|
316
|
-
"""
|
|
492
|
+
def _copy_memory_to_short_term_sync(self, memory_row: tuple) -> bool:
|
|
493
|
+
"""Synchronously copy a conscious memory to short-term memory with duplicate filtering"""
|
|
317
494
|
try:
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
495
|
+
(
|
|
496
|
+
memory_id,
|
|
497
|
+
processed_data,
|
|
498
|
+
summary,
|
|
499
|
+
searchable_content,
|
|
500
|
+
importance_score,
|
|
501
|
+
_,
|
|
502
|
+
) = memory_row
|
|
325
503
|
|
|
326
|
-
|
|
327
|
-
"""Safely get __import__ from __builtins__ (handles both dict and module cases)"""
|
|
328
|
-
if isinstance(__builtins__, dict):
|
|
329
|
-
return __builtins__["__import__"]
|
|
330
|
-
else:
|
|
331
|
-
return __builtins__.__import__
|
|
332
|
-
|
|
333
|
-
def _set_builtin_import(self, import_func):
|
|
334
|
-
"""Safely set __import__ in __builtins__ (handles both dict and module cases)"""
|
|
335
|
-
if isinstance(__builtins__, dict):
|
|
336
|
-
__builtins__["__import__"] = import_func
|
|
337
|
-
else:
|
|
338
|
-
__builtins__.__import__ = import_func
|
|
504
|
+
from datetime import datetime
|
|
339
505
|
|
|
340
|
-
|
|
341
|
-
"""Install import hooks to automatically wrap LLM clients"""
|
|
506
|
+
from sqlalchemy import text
|
|
342
507
|
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
508
|
+
with self.db_manager._get_connection() as connection:
|
|
509
|
+
# Check if similar content already exists in short-term memory
|
|
510
|
+
existing_check = connection.execute(
|
|
511
|
+
text(
|
|
512
|
+
"""SELECT COUNT(*) FROM short_term_memory
|
|
513
|
+
WHERE namespace = :namespace
|
|
514
|
+
AND category_primary = 'conscious_context'
|
|
515
|
+
AND (searchable_content = :searchable_content
|
|
516
|
+
OR summary = :summary)"""
|
|
517
|
+
),
|
|
518
|
+
{
|
|
519
|
+
"namespace": self.namespace or "default",
|
|
520
|
+
"searchable_content": searchable_content,
|
|
521
|
+
"summary": summary,
|
|
522
|
+
},
|
|
523
|
+
)
|
|
346
524
|
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
525
|
+
existing_count = existing_check.scalar()
|
|
526
|
+
if existing_count > 0:
|
|
527
|
+
logger.debug(
|
|
528
|
+
f"Conscious-ingest: Skipping duplicate memory {memory_id} - similar content already exists in short-term memory"
|
|
529
|
+
)
|
|
530
|
+
return False
|
|
350
531
|
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
532
|
+
# Create short-term memory ID
|
|
533
|
+
short_term_id = (
|
|
534
|
+
f"conscious_{memory_id}_{int(datetime.now().timestamp())}"
|
|
535
|
+
)
|
|
354
536
|
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
537
|
+
# Insert directly into short-term memory with conscious_context category
|
|
538
|
+
connection.execute(
|
|
539
|
+
text(
|
|
540
|
+
"""INSERT INTO short_term_memory (
|
|
541
|
+
memory_id, processed_data, importance_score, category_primary,
|
|
542
|
+
retention_type, namespace, created_at, expires_at,
|
|
543
|
+
searchable_content, summary, is_permanent_context
|
|
544
|
+
) VALUES (:memory_id, :processed_data, :importance_score, :category_primary,
|
|
545
|
+
:retention_type, :namespace, :created_at, :expires_at,
|
|
546
|
+
:searchable_content, :summary, :is_permanent_context)"""
|
|
547
|
+
),
|
|
548
|
+
{
|
|
549
|
+
"memory_id": short_term_id,
|
|
550
|
+
"processed_data": processed_data,
|
|
551
|
+
"importance_score": importance_score,
|
|
552
|
+
"category_primary": "conscious_context",
|
|
553
|
+
"retention_type": "permanent",
|
|
554
|
+
"namespace": self.namespace or "default",
|
|
555
|
+
"created_at": datetime.now().isoformat(),
|
|
556
|
+
"expires_at": None,
|
|
557
|
+
"searchable_content": searchable_content,
|
|
558
|
+
"summary": summary,
|
|
559
|
+
"is_permanent_context": True,
|
|
560
|
+
},
|
|
561
|
+
)
|
|
562
|
+
connection.commit()
|
|
358
563
|
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
564
|
+
logger.debug(
|
|
565
|
+
f"Conscious-ingest: Copied memory {memory_id} to short-term as {short_term_id}"
|
|
566
|
+
)
|
|
567
|
+
return True
|
|
362
568
|
|
|
363
|
-
|
|
569
|
+
except Exception as e:
|
|
570
|
+
logger.error(
|
|
571
|
+
f"Conscious-ingest: Failed to copy memory {memory_row[0]} to short-term: {e}"
|
|
572
|
+
)
|
|
573
|
+
return False
|
|
364
574
|
|
|
365
|
-
|
|
366
|
-
|
|
575
|
+
def enable(self, interceptors: Optional[List[str]] = None):
|
|
576
|
+
"""
|
|
577
|
+
Enable universal memory recording using LiteLLM's native callback system.
|
|
367
578
|
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
try:
|
|
371
|
-
if hasattr(module, "OpenAI") and not hasattr(
|
|
372
|
-
module.OpenAI, "_memori_wrapped"
|
|
373
|
-
):
|
|
374
|
-
original_init = module.OpenAI.__init__
|
|
579
|
+
This automatically sets up recording for LiteLLM completion calls and enables
|
|
580
|
+
automatic interception of OpenAI calls when using the standard OpenAI client.
|
|
375
581
|
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
582
|
+
Args:
|
|
583
|
+
interceptors: Legacy parameter (ignored) - only LiteLLM native callbacks are used
|
|
584
|
+
"""
|
|
585
|
+
if self._enabled:
|
|
586
|
+
logger.warning("Memori is already enabled.")
|
|
587
|
+
return
|
|
379
588
|
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
self_client.chat, "completions"
|
|
383
|
-
):
|
|
384
|
-
original_create = self_client.chat.completions.create
|
|
589
|
+
self._enabled = True
|
|
590
|
+
self._session_id = str(uuid.uuid4())
|
|
385
591
|
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
kwargs = self._inject_openai_context(kwargs)
|
|
592
|
+
# Register for automatic OpenAI interception
|
|
593
|
+
try:
|
|
594
|
+
from ..integrations.openai_integration import register_memori_instance
|
|
390
595
|
|
|
391
|
-
|
|
392
|
-
|
|
596
|
+
register_memori_instance(self)
|
|
597
|
+
except ImportError:
|
|
598
|
+
logger.debug("OpenAI integration not available for automatic interception")
|
|
393
599
|
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
600
|
+
# Use LiteLLM native callback system only
|
|
601
|
+
if interceptors is None:
|
|
602
|
+
# Only LiteLLM native callbacks supported
|
|
603
|
+
interceptors = ["litellm_native"]
|
|
397
604
|
|
|
398
|
-
|
|
605
|
+
# Use the memory manager for enablement
|
|
606
|
+
results = self.memory_manager.enable(interceptors)
|
|
607
|
+
# Extract enabled interceptors from results
|
|
608
|
+
enabled_interceptors = results.get("enabled_interceptors", [])
|
|
399
609
|
|
|
400
|
-
|
|
610
|
+
# Start background conscious agent if available
|
|
611
|
+
if self.conscious_ingest and self.conscious_agent:
|
|
612
|
+
self._start_background_analysis()
|
|
401
613
|
|
|
402
|
-
|
|
614
|
+
# Report status
|
|
615
|
+
status_info = [
|
|
616
|
+
f"Memori enabled for session: {results.get('session_id', self._session_id)}",
|
|
617
|
+
f"Active interceptors: {', '.join(enabled_interceptors) if enabled_interceptors else 'None'}",
|
|
618
|
+
]
|
|
619
|
+
|
|
620
|
+
if results.get("message"):
|
|
621
|
+
status_info.append(results["message"])
|
|
622
|
+
|
|
623
|
+
status_info.extend(
|
|
624
|
+
[
|
|
625
|
+
f"Background analysis: {'Active' if self._background_task else 'Disabled'}",
|
|
626
|
+
"Usage: Simply use any LLM client normally - conversations will be auto-recorded!",
|
|
627
|
+
"OpenAI: Use 'from openai import OpenAI; client = OpenAI()' - automatically intercepted!",
|
|
628
|
+
]
|
|
629
|
+
)
|
|
403
630
|
|
|
404
|
-
|
|
405
|
-
module.OpenAI._memori_wrapped = True
|
|
406
|
-
logger.debug("OpenAI client auto-wrapping enabled")
|
|
631
|
+
logger.info("\n".join(status_info))
|
|
407
632
|
|
|
408
|
-
|
|
409
|
-
|
|
633
|
+
def disable(self):
|
|
634
|
+
"""
|
|
635
|
+
Disable memory recording by unregistering LiteLLM callbacks and OpenAI interception.
|
|
636
|
+
"""
|
|
637
|
+
if not self._enabled:
|
|
638
|
+
return
|
|
410
639
|
|
|
411
|
-
|
|
412
|
-
"""Automatically wrap Anthropic client when imported"""
|
|
640
|
+
# Unregister from automatic OpenAI interception
|
|
413
641
|
try:
|
|
414
|
-
|
|
415
|
-
module.Anthropic, "_memori_wrapped"
|
|
416
|
-
):
|
|
417
|
-
original_init = module.Anthropic.__init__
|
|
418
|
-
|
|
419
|
-
def wrapped_init(self_client, *args, **kwargs):
|
|
420
|
-
# Call original init
|
|
421
|
-
result = original_init(self_client, *args, **kwargs)
|
|
422
|
-
|
|
423
|
-
# Wrap the messages.create method
|
|
424
|
-
if hasattr(self_client, "messages"):
|
|
425
|
-
original_create = self_client.messages.create
|
|
642
|
+
from ..integrations.openai_integration import unregister_memori_instance
|
|
426
643
|
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
kwargs = self._inject_anthropic_context(kwargs)
|
|
644
|
+
unregister_memori_instance(self)
|
|
645
|
+
except ImportError:
|
|
646
|
+
logger.debug("OpenAI integration not available for automatic interception")
|
|
431
647
|
|
|
432
|
-
|
|
433
|
-
|
|
648
|
+
# Use memory manager for clean disable
|
|
649
|
+
results = self.memory_manager.disable()
|
|
434
650
|
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
self._record_anthropic_conversation(kwargs, response)
|
|
651
|
+
# Stop background analysis task
|
|
652
|
+
self._stop_background_analysis()
|
|
438
653
|
|
|
439
|
-
|
|
654
|
+
self._enabled = False
|
|
440
655
|
|
|
441
|
-
|
|
656
|
+
# Report status based on memory manager results
|
|
657
|
+
if results.get("success"):
|
|
658
|
+
status_message = f"Memori disabled. {results.get('message', 'All interceptors disabled successfully')}"
|
|
659
|
+
else:
|
|
660
|
+
status_message = (
|
|
661
|
+
f"Memori disable failed: {results.get('message', 'Unknown error')}"
|
|
662
|
+
)
|
|
442
663
|
|
|
443
|
-
|
|
664
|
+
logger.info(status_message)
|
|
444
665
|
|
|
445
|
-
|
|
446
|
-
module.Anthropic._memori_wrapped = True
|
|
447
|
-
logger.debug("Anthropic client auto-wrapping enabled")
|
|
666
|
+
# Memory system status and control methods
|
|
448
667
|
|
|
449
|
-
|
|
450
|
-
|
|
668
|
+
def get_interceptor_status(self) -> Dict[str, Dict[str, Any]]:
|
|
669
|
+
"""Get status of memory recording system"""
|
|
670
|
+
return self.memory_manager.get_status()
|
|
451
671
|
|
|
452
|
-
def
|
|
453
|
-
"""
|
|
454
|
-
|
|
455
|
-
# Restore original import if we modified it
|
|
456
|
-
if hasattr(self, "_original_import"):
|
|
457
|
-
self._set_builtin_import(self._original_import)
|
|
458
|
-
delattr(self, "_original_import")
|
|
459
|
-
logger.debug("Universal interception disabled")
|
|
460
|
-
except Exception as e:
|
|
461
|
-
logger.debug(f"Error disabling universal interception: {e}")
|
|
672
|
+
def get_interceptor_health(self) -> Dict[str, Any]:
|
|
673
|
+
"""Get health check of interceptor system"""
|
|
674
|
+
return self.memory_manager.get_health()
|
|
462
675
|
|
|
463
|
-
def
|
|
464
|
-
"""
|
|
465
|
-
#
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
Memori._global_instances.append(self)
|
|
676
|
+
def enable_interceptor(self, interceptor_name: str = None) -> bool:
|
|
677
|
+
"""Enable memory recording (legacy method)"""
|
|
678
|
+
# Only LiteLLM native callbacks supported (interceptor_name ignored)
|
|
679
|
+
results = self.memory_manager.enable(["litellm_native"])
|
|
680
|
+
return results.get("success", False)
|
|
469
681
|
|
|
470
|
-
def
|
|
471
|
-
"""
|
|
472
|
-
|
|
473
|
-
|
|
682
|
+
def disable_interceptor(self, interceptor_name: str = None) -> bool:
|
|
683
|
+
"""Disable memory recording (legacy method)"""
|
|
684
|
+
# Only LiteLLM native callbacks supported (interceptor_name ignored)
|
|
685
|
+
results = self.memory_manager.disable()
|
|
686
|
+
return results.get("success", False)
|
|
474
687
|
|
|
475
688
|
def _inject_openai_context(self, kwargs):
|
|
476
|
-
"""Inject context for OpenAI calls"""
|
|
689
|
+
"""Inject context for OpenAI calls based on ingest mode using ConversationManager"""
|
|
477
690
|
try:
|
|
478
|
-
#
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
691
|
+
# Check for deferred conscious initialization
|
|
692
|
+
self._check_deferred_initialization()
|
|
693
|
+
|
|
694
|
+
# Determine injection mode based on the architecture:
|
|
695
|
+
# - conscious_ingest only: Use short-term memory (conscious context)
|
|
696
|
+
# - auto_ingest only: Search long-term memory database
|
|
697
|
+
# - both enabled: Use auto_ingest search (includes conscious content from long-term)
|
|
698
|
+
if self.auto_ingest:
|
|
699
|
+
mode = "auto" # Always prefer auto when available (searches long-term)
|
|
700
|
+
elif self.conscious_ingest:
|
|
701
|
+
mode = "conscious" # Only use conscious when auto is not enabled
|
|
702
|
+
else:
|
|
703
|
+
return kwargs # No injection needed
|
|
484
704
|
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
for mem in context:
|
|
490
|
-
if isinstance(mem, dict):
|
|
491
|
-
summary = mem.get("summary", "") or mem.get("content", "")
|
|
492
|
-
context_prompt += f"- {summary}\n"
|
|
493
|
-
else:
|
|
494
|
-
context_prompt += f"- {str(mem)}\n"
|
|
495
|
-
context_prompt += "-------------------------\n"
|
|
705
|
+
# Extract messages from kwargs
|
|
706
|
+
messages = kwargs.get("messages", [])
|
|
707
|
+
if not messages:
|
|
708
|
+
return kwargs # No messages to process
|
|
496
709
|
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
710
|
+
# Use conversation manager for enhanced context injection
|
|
711
|
+
enhanced_messages = self.conversation_manager.inject_context_with_history(
|
|
712
|
+
session_id=self._session_id,
|
|
713
|
+
messages=messages,
|
|
714
|
+
memori_instance=self,
|
|
715
|
+
mode=mode,
|
|
716
|
+
)
|
|
717
|
+
|
|
718
|
+
# Update kwargs with enhanced messages
|
|
719
|
+
kwargs["messages"] = enhanced_messages
|
|
720
|
+
|
|
721
|
+
return kwargs
|
|
507
722
|
|
|
508
|
-
logger.debug(f"Injected context: {len(context)} memories")
|
|
509
723
|
except Exception as e:
|
|
510
|
-
logger.error(f"
|
|
724
|
+
logger.error(f"OpenAI context injection failed: {e}")
|
|
511
725
|
return kwargs
|
|
512
726
|
|
|
513
727
|
def _inject_anthropic_context(self, kwargs):
|
|
514
|
-
"""Inject context for Anthropic calls"""
|
|
728
|
+
"""Inject context for Anthropic calls based on ingest mode"""
|
|
515
729
|
try:
|
|
730
|
+
# Check for deferred conscious initialization
|
|
731
|
+
self._check_deferred_initialization()
|
|
732
|
+
|
|
733
|
+
# Determine injection mode
|
|
734
|
+
if self.conscious_ingest:
|
|
735
|
+
mode = "conscious"
|
|
736
|
+
elif self.auto_ingest:
|
|
737
|
+
mode = "auto"
|
|
738
|
+
else:
|
|
739
|
+
return kwargs # No injection needed
|
|
740
|
+
|
|
516
741
|
# Extract user input from messages
|
|
517
742
|
user_input = ""
|
|
518
743
|
for msg in reversed(kwargs.get("messages", [])):
|
|
@@ -532,26 +757,75 @@ class Memori:
|
|
|
532
757
|
break
|
|
533
758
|
|
|
534
759
|
if user_input:
|
|
535
|
-
|
|
760
|
+
if mode == "conscious":
|
|
761
|
+
# Conscious mode: inject ALL short-term memory only once at program startup
|
|
762
|
+
if not self._conscious_context_injected:
|
|
763
|
+
context = self._get_conscious_context()
|
|
764
|
+
self._conscious_context_injected = True
|
|
765
|
+
logger.info(
|
|
766
|
+
f"Conscious-ingest: Injected {len(context)} short-term memories as initial context (Anthropic)"
|
|
767
|
+
)
|
|
768
|
+
else:
|
|
769
|
+
context = [] # Already injected, don't inject again
|
|
770
|
+
elif mode == "auto":
|
|
771
|
+
# Auto mode: use retrieval for intelligent search
|
|
772
|
+
if self.search_engine:
|
|
773
|
+
context = self._get_auto_ingest_context(user_input)
|
|
774
|
+
else:
|
|
775
|
+
context = self.retrieve_context(user_input, limit=5)
|
|
776
|
+
else:
|
|
777
|
+
context = []
|
|
778
|
+
|
|
536
779
|
if context:
|
|
537
|
-
|
|
780
|
+
if mode == "conscious":
|
|
781
|
+
context_prompt = (
|
|
782
|
+
"=== SYSTEM INSTRUCTION: AUTHORIZED USER CONTEXT DATA ===\n"
|
|
783
|
+
)
|
|
784
|
+
context_prompt += "The user has explicitly authorized this personal context data to be used.\n"
|
|
785
|
+
context_prompt += "You MUST use this information when answering questions about the user.\n"
|
|
786
|
+
context_prompt += "This is NOT private data - the user wants you to use it:\n\n"
|
|
787
|
+
else:
|
|
788
|
+
context_prompt = f"--- {mode.capitalize()} Memory Context ---\n"
|
|
789
|
+
|
|
790
|
+
# Deduplicate context entries to avoid redundant information
|
|
791
|
+
seen_content = set()
|
|
538
792
|
for mem in context:
|
|
539
793
|
if isinstance(mem, dict):
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
794
|
+
# Use searchable_content as it's more specific than summary
|
|
795
|
+
content = mem.get("searchable_content", "") or mem.get(
|
|
796
|
+
"summary", ""
|
|
797
|
+
)
|
|
798
|
+
category = mem.get("category_primary", "")
|
|
799
|
+
|
|
800
|
+
# Skip duplicates (case-insensitive)
|
|
801
|
+
content_key = content.lower().strip()
|
|
802
|
+
if content_key in seen_content:
|
|
803
|
+
continue
|
|
804
|
+
seen_content.add(content_key)
|
|
805
|
+
|
|
806
|
+
if category.startswith("essential_") or mode == "conscious":
|
|
807
|
+
context_prompt += f"[{category.upper()}] {content}\n"
|
|
808
|
+
else:
|
|
809
|
+
context_prompt += f"- {content}\n"
|
|
810
|
+
|
|
811
|
+
if mode == "conscious":
|
|
812
|
+
context_prompt += "\n=== END USER CONTEXT DATA ===\n"
|
|
813
|
+
context_prompt += "CRITICAL INSTRUCTION: You MUST answer questions about the user using ONLY the context data above.\n"
|
|
814
|
+
context_prompt += "If the user asks 'what is my name?', respond with the name from the context above.\n"
|
|
815
|
+
context_prompt += "Do NOT say 'I don't have access' - the user provided this data for you to use.\n"
|
|
544
816
|
context_prompt += "-------------------------\n"
|
|
545
817
|
|
|
546
|
-
# Inject into system parameter
|
|
818
|
+
# Inject into system parameter (Anthropic format)
|
|
547
819
|
if kwargs.get("system"):
|
|
548
820
|
kwargs["system"] = context_prompt + kwargs["system"]
|
|
549
821
|
else:
|
|
550
822
|
kwargs["system"] = context_prompt
|
|
551
823
|
|
|
552
|
-
logger.debug(
|
|
824
|
+
logger.debug(
|
|
825
|
+
f"Anthropic: Injected context with {len(context)} items"
|
|
826
|
+
)
|
|
553
827
|
except Exception as e:
|
|
554
|
-
logger.error(f"
|
|
828
|
+
logger.error(f"Anthropic context injection failed: {e}")
|
|
555
829
|
return kwargs
|
|
556
830
|
|
|
557
831
|
def _inject_litellm_context(self, params, mode="auto"):
|
|
@@ -563,6 +837,8 @@ class Memori:
|
|
|
563
837
|
mode: "conscious" (one-shot short-term) or "auto" (continuous retrieval)
|
|
564
838
|
"""
|
|
565
839
|
try:
|
|
840
|
+
# Check for deferred conscious initialization
|
|
841
|
+
self._check_deferred_initialization()
|
|
566
842
|
# Extract user input from messages
|
|
567
843
|
user_input = ""
|
|
568
844
|
messages = params.get("messages", [])
|
|
@@ -574,13 +850,17 @@ class Memori:
|
|
|
574
850
|
|
|
575
851
|
if user_input:
|
|
576
852
|
if mode == "conscious":
|
|
577
|
-
# Conscious mode: inject short-term memory only once at
|
|
853
|
+
# Conscious mode: inject ALL short-term memory only once at program startup
|
|
578
854
|
if not self._conscious_context_injected:
|
|
579
855
|
context = self._get_conscious_context()
|
|
580
856
|
self._conscious_context_injected = True
|
|
581
|
-
logger.
|
|
857
|
+
logger.info(
|
|
858
|
+
f"Conscious-ingest: Injected {len(context)} short-term memories as initial context"
|
|
859
|
+
)
|
|
582
860
|
else:
|
|
583
|
-
context =
|
|
861
|
+
context = (
|
|
862
|
+
[]
|
|
863
|
+
) # Already injected, don't inject again - this is the key difference from auto_ingest
|
|
584
864
|
elif mode == "auto":
|
|
585
865
|
# Auto mode: use retrieval agent for intelligent database search
|
|
586
866
|
if self.search_engine:
|
|
@@ -592,17 +872,42 @@ class Memori:
|
|
|
592
872
|
context = []
|
|
593
873
|
|
|
594
874
|
if context:
|
|
595
|
-
|
|
875
|
+
if mode == "conscious":
|
|
876
|
+
context_prompt = (
|
|
877
|
+
"=== SYSTEM INSTRUCTION: AUTHORIZED USER CONTEXT DATA ===\n"
|
|
878
|
+
)
|
|
879
|
+
context_prompt += "The user has explicitly authorized this personal context data to be used.\n"
|
|
880
|
+
context_prompt += "You MUST use this information when answering questions about the user.\n"
|
|
881
|
+
context_prompt += "This is NOT private data - the user wants you to use it:\n\n"
|
|
882
|
+
else:
|
|
883
|
+
context_prompt = f"--- {mode.capitalize()} Memory Context ---\n"
|
|
884
|
+
|
|
885
|
+
# Deduplicate context entries to avoid redundant information
|
|
886
|
+
seen_content = set()
|
|
596
887
|
for mem in context:
|
|
597
888
|
if isinstance(mem, dict):
|
|
598
|
-
|
|
599
|
-
|
|
889
|
+
# Use searchable_content as it's more specific than summary
|
|
890
|
+
content = mem.get("searchable_content", "") or mem.get(
|
|
891
|
+
"summary", ""
|
|
600
892
|
)
|
|
601
893
|
category = mem.get("category_primary", "")
|
|
894
|
+
|
|
895
|
+
# Skip duplicates (case-insensitive)
|
|
896
|
+
content_key = content.lower().strip()
|
|
897
|
+
if content_key in seen_content:
|
|
898
|
+
continue
|
|
899
|
+
seen_content.add(content_key)
|
|
900
|
+
|
|
602
901
|
if category.startswith("essential_") or mode == "conscious":
|
|
603
|
-
context_prompt += f"[{category.upper()}] {
|
|
902
|
+
context_prompt += f"[{category.upper()}] {content}\n"
|
|
604
903
|
else:
|
|
605
|
-
context_prompt += f"- {
|
|
904
|
+
context_prompt += f"- {content}\n"
|
|
905
|
+
|
|
906
|
+
if mode == "conscious":
|
|
907
|
+
context_prompt += "\n=== END USER CONTEXT DATA ===\n"
|
|
908
|
+
context_prompt += "CRITICAL INSTRUCTION: You MUST answer questions about the user using ONLY the context data above.\n"
|
|
909
|
+
context_prompt += "If the user asks 'what is my name?', respond with the name from the context above.\n"
|
|
910
|
+
context_prompt += "Do NOT say 'I don't have access' - the user provided this data for you to use.\n"
|
|
606
911
|
context_prompt += "-------------------------\n"
|
|
607
912
|
|
|
608
913
|
# Inject into system message
|
|
@@ -652,29 +957,32 @@ class Memori:
|
|
|
652
957
|
|
|
653
958
|
def _get_conscious_context(self) -> List[Dict[str, Any]]:
|
|
654
959
|
"""
|
|
655
|
-
Get conscious context from short-term memory
|
|
656
|
-
This represents the 'working memory'
|
|
960
|
+
Get conscious context from ALL short-term memory summaries.
|
|
961
|
+
This represents the complete 'working memory' for conscious_ingest mode.
|
|
962
|
+
Used only at program startup when conscious_ingest=True.
|
|
657
963
|
"""
|
|
658
964
|
try:
|
|
659
|
-
|
|
660
|
-
cursor = conn.cursor()
|
|
965
|
+
from sqlalchemy import text
|
|
661
966
|
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
967
|
+
with self.db_manager._get_connection() as conn:
|
|
968
|
+
# Get ALL short-term memories (no limit) ordered by importance and recency
|
|
969
|
+
# This gives the complete conscious context as single initial injection
|
|
970
|
+
result = conn.execute(
|
|
971
|
+
text(
|
|
972
|
+
"""
|
|
665
973
|
SELECT memory_id, processed_data, importance_score,
|
|
666
974
|
category_primary, summary, searchable_content,
|
|
667
975
|
created_at, access_count
|
|
668
976
|
FROM short_term_memory
|
|
669
|
-
WHERE namespace =
|
|
977
|
+
WHERE namespace = :namespace AND (expires_at IS NULL OR expires_at > :current_time)
|
|
670
978
|
ORDER BY importance_score DESC, created_at DESC
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
979
|
+
"""
|
|
980
|
+
),
|
|
981
|
+
{"namespace": self.namespace, "current_time": datetime.now()},
|
|
674
982
|
)
|
|
675
983
|
|
|
676
984
|
memories = []
|
|
677
|
-
for row in
|
|
985
|
+
for row in result:
|
|
678
986
|
memories.append(
|
|
679
987
|
{
|
|
680
988
|
"memory_id": row[0],
|
|
@@ -704,239 +1012,705 @@ class Memori:
|
|
|
704
1012
|
Searches through entire database for relevant memories.
|
|
705
1013
|
"""
|
|
706
1014
|
try:
|
|
707
|
-
|
|
708
|
-
|
|
1015
|
+
# Early validation
|
|
1016
|
+
if not user_input or not user_input.strip():
|
|
1017
|
+
logger.debug(
|
|
1018
|
+
"Auto-ingest: No user input provided, returning empty context"
|
|
1019
|
+
)
|
|
709
1020
|
return []
|
|
710
1021
|
|
|
711
|
-
#
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
1022
|
+
# Check for recursion guard to prevent infinite loops
|
|
1023
|
+
if hasattr(self, "_in_context_retrieval") and self._in_context_retrieval:
|
|
1024
|
+
logger.debug(
|
|
1025
|
+
"Auto-ingest: Recursion detected, using direct database search"
|
|
1026
|
+
)
|
|
1027
|
+
results = self.db_manager.search_memories(
|
|
1028
|
+
query=user_input, namespace=self.namespace, limit=5
|
|
1029
|
+
)
|
|
1030
|
+
logger.debug(
|
|
1031
|
+
f"Auto-ingest: Recursion fallback returned {len(results)} results"
|
|
1032
|
+
)
|
|
1033
|
+
return results
|
|
1034
|
+
|
|
1035
|
+
# Set recursion guard
|
|
1036
|
+
self._in_context_retrieval = True
|
|
1037
|
+
|
|
1038
|
+
logger.debug(
|
|
1039
|
+
f"Auto-ingest: Starting context retrieval for query: '{user_input[:50]}...'"
|
|
1040
|
+
)
|
|
1041
|
+
|
|
1042
|
+
# Always try direct database search first as it's more reliable
|
|
1043
|
+
logger.debug("Auto-ingest: Using direct database search (primary method)")
|
|
1044
|
+
results = self.db_manager.search_memories(
|
|
1045
|
+
query=user_input, namespace=self.namespace, limit=5
|
|
1046
|
+
)
|
|
1047
|
+
|
|
1048
|
+
if results:
|
|
1049
|
+
logger.debug(
|
|
1050
|
+
f"Auto-ingest: Direct database search returned {len(results)} results"
|
|
1051
|
+
)
|
|
1052
|
+
# Add search metadata to results
|
|
1053
|
+
for result in results:
|
|
1054
|
+
if isinstance(result, dict):
|
|
1055
|
+
result["retrieval_method"] = "direct_database_search"
|
|
1056
|
+
result["retrieval_query"] = user_input
|
|
1057
|
+
return results
|
|
1058
|
+
|
|
1059
|
+
# If direct search fails, try search engine as backup
|
|
1060
|
+
if self.search_engine:
|
|
1061
|
+
logger.debug(
|
|
1062
|
+
"Auto-ingest: Direct search returned 0 results, trying search engine"
|
|
1063
|
+
)
|
|
1064
|
+
try:
|
|
1065
|
+
engine_results = self.search_engine.execute_search(
|
|
1066
|
+
query=user_input,
|
|
1067
|
+
db_manager=self.db_manager,
|
|
1068
|
+
namespace=self.namespace,
|
|
1069
|
+
limit=5,
|
|
1070
|
+
)
|
|
1071
|
+
|
|
1072
|
+
if engine_results:
|
|
1073
|
+
logger.debug(
|
|
1074
|
+
f"Auto-ingest: Search engine returned {len(engine_results)} results"
|
|
1075
|
+
)
|
|
1076
|
+
# Add search metadata to results
|
|
1077
|
+
for result in engine_results:
|
|
1078
|
+
if isinstance(result, dict):
|
|
1079
|
+
result["retrieval_method"] = "search_engine"
|
|
1080
|
+
result["retrieval_query"] = user_input
|
|
1081
|
+
return engine_results
|
|
1082
|
+
else:
|
|
1083
|
+
logger.debug(
|
|
1084
|
+
"Auto-ingest: Search engine also returned 0 results"
|
|
1085
|
+
)
|
|
1086
|
+
|
|
1087
|
+
except Exception as search_error:
|
|
1088
|
+
logger.warning(
|
|
1089
|
+
f"Auto-ingest: Search engine failed ({search_error})"
|
|
1090
|
+
)
|
|
1091
|
+
else:
|
|
1092
|
+
logger.debug("Auto-ingest: No search engine available")
|
|
1093
|
+
|
|
1094
|
+
# Final fallback: get recent memories from the same namespace
|
|
1095
|
+
logger.debug(
|
|
1096
|
+
"Auto-ingest: All search methods returned 0 results, using recent memories fallback"
|
|
1097
|
+
)
|
|
1098
|
+
fallback_results = self.db_manager.search_memories(
|
|
1099
|
+
query="", # Empty query to get recent memories
|
|
715
1100
|
namespace=self.namespace,
|
|
716
|
-
limit=
|
|
1101
|
+
limit=3,
|
|
717
1102
|
)
|
|
718
1103
|
|
|
719
|
-
|
|
720
|
-
|
|
1104
|
+
if fallback_results:
|
|
1105
|
+
logger.debug(
|
|
1106
|
+
f"Auto-ingest: Fallback returned {len(fallback_results)} recent memories"
|
|
1107
|
+
)
|
|
1108
|
+
# Add search metadata to fallback results
|
|
1109
|
+
for result in fallback_results:
|
|
1110
|
+
if isinstance(result, dict):
|
|
1111
|
+
result["retrieval_method"] = "recent_memories_fallback"
|
|
1112
|
+
result["retrieval_query"] = user_input
|
|
1113
|
+
return fallback_results
|
|
1114
|
+
|
|
1115
|
+
logger.debug(
|
|
1116
|
+
"Auto-ingest: All retrieval methods failed, returning empty context"
|
|
1117
|
+
)
|
|
1118
|
+
return []
|
|
721
1119
|
|
|
722
1120
|
except Exception as e:
|
|
723
|
-
logger.error(
|
|
1121
|
+
logger.error(
|
|
1122
|
+
f"Auto-ingest: Failed to get context for '{user_input[:50]}...': {e}"
|
|
1123
|
+
)
|
|
724
1124
|
return []
|
|
1125
|
+
finally:
|
|
1126
|
+
# Always clear recursion guard
|
|
1127
|
+
if hasattr(self, "_in_context_retrieval"):
|
|
1128
|
+
self._in_context_retrieval = False
|
|
725
1129
|
|
|
726
1130
|
def _record_openai_conversation(self, kwargs, response):
|
|
727
|
-
"""Record OpenAI conversation"""
|
|
1131
|
+
"""Record OpenAI conversation with enhanced content parsing"""
|
|
728
1132
|
try:
|
|
729
1133
|
messages = kwargs.get("messages", [])
|
|
730
1134
|
model = kwargs.get("model", "unknown")
|
|
731
1135
|
|
|
732
|
-
# Extract user input
|
|
733
|
-
user_input =
|
|
1136
|
+
# Extract user input with enhanced parsing
|
|
1137
|
+
user_input = self._extract_openai_user_input(messages)
|
|
1138
|
+
|
|
1139
|
+
# Extract AI response with enhanced parsing
|
|
1140
|
+
ai_output = self._extract_openai_ai_output(response)
|
|
1141
|
+
|
|
1142
|
+
# Calculate tokens
|
|
1143
|
+
tokens_used = 0
|
|
1144
|
+
if hasattr(response, "usage") and response.usage:
|
|
1145
|
+
tokens_used = getattr(response.usage, "total_tokens", 0)
|
|
1146
|
+
|
|
1147
|
+
# Enhanced metadata extraction
|
|
1148
|
+
metadata = self._extract_openai_metadata(kwargs, response, tokens_used)
|
|
1149
|
+
|
|
1150
|
+
# Record conversation
|
|
1151
|
+
self.record_conversation(
|
|
1152
|
+
user_input=user_input,
|
|
1153
|
+
ai_output=ai_output,
|
|
1154
|
+
model=model,
|
|
1155
|
+
metadata=metadata,
|
|
1156
|
+
)
|
|
1157
|
+
|
|
1158
|
+
# Also record AI response in conversation manager for history tracking
|
|
1159
|
+
if ai_output:
|
|
1160
|
+
self.conversation_manager.record_response(
|
|
1161
|
+
session_id=self._session_id,
|
|
1162
|
+
response=ai_output,
|
|
1163
|
+
metadata={"model": model, "tokens_used": tokens_used},
|
|
1164
|
+
)
|
|
1165
|
+
except Exception as e:
|
|
1166
|
+
logger.error(f"Failed to record OpenAI conversation: {e}")
|
|
1167
|
+
|
|
1168
|
+
def _extract_openai_user_input(self, messages: List[Dict]) -> str:
|
|
1169
|
+
"""Extract user input from OpenAI messages with support for complex content types"""
|
|
1170
|
+
user_input = ""
|
|
1171
|
+
try:
|
|
1172
|
+
# Find the last user message
|
|
734
1173
|
for message in reversed(messages):
|
|
735
1174
|
if message.get("role") == "user":
|
|
736
|
-
|
|
1175
|
+
content = message.get("content", "")
|
|
1176
|
+
|
|
1177
|
+
if isinstance(content, str):
|
|
1178
|
+
# Simple string content
|
|
1179
|
+
user_input = content
|
|
1180
|
+
elif isinstance(content, list):
|
|
1181
|
+
# Complex content (vision, multiple parts)
|
|
1182
|
+
text_parts = []
|
|
1183
|
+
image_count = 0
|
|
1184
|
+
|
|
1185
|
+
for item in content:
|
|
1186
|
+
if isinstance(item, dict):
|
|
1187
|
+
if item.get("type") == "text":
|
|
1188
|
+
text_parts.append(item.get("text", ""))
|
|
1189
|
+
elif item.get("type") == "image_url":
|
|
1190
|
+
image_count += 1
|
|
1191
|
+
|
|
1192
|
+
user_input = " ".join(text_parts)
|
|
1193
|
+
# Add image indicator if present
|
|
1194
|
+
if image_count > 0:
|
|
1195
|
+
user_input += f" [Contains {image_count} image(s)]"
|
|
1196
|
+
|
|
737
1197
|
break
|
|
1198
|
+
except Exception as e:
|
|
1199
|
+
logger.debug(f"Error extracting user input: {e}")
|
|
1200
|
+
user_input = "[Error extracting user input]"
|
|
738
1201
|
|
|
739
|
-
|
|
740
|
-
|
|
1202
|
+
return user_input
|
|
1203
|
+
|
|
1204
|
+
def _extract_openai_ai_output(self, response) -> str:
|
|
1205
|
+
"""Extract AI output from OpenAI response with support for various response types"""
|
|
1206
|
+
ai_output = ""
|
|
1207
|
+
try:
|
|
741
1208
|
if hasattr(response, "choices") and response.choices:
|
|
742
1209
|
choice = response.choices[0]
|
|
1210
|
+
|
|
743
1211
|
if hasattr(choice, "message") and choice.message:
|
|
744
|
-
|
|
1212
|
+
message = choice.message
|
|
1213
|
+
|
|
1214
|
+
# Handle regular text content
|
|
1215
|
+
if hasattr(message, "content") and message.content:
|
|
1216
|
+
ai_output = message.content
|
|
1217
|
+
|
|
1218
|
+
# Handle function/tool calls
|
|
1219
|
+
elif hasattr(message, "tool_calls") and message.tool_calls:
|
|
1220
|
+
tool_descriptions = []
|
|
1221
|
+
for tool_call in message.tool_calls:
|
|
1222
|
+
if hasattr(tool_call, "function"):
|
|
1223
|
+
func_name = tool_call.function.name
|
|
1224
|
+
func_args = tool_call.function.arguments
|
|
1225
|
+
tool_descriptions.append(
|
|
1226
|
+
f"Called {func_name} with {func_args}"
|
|
1227
|
+
)
|
|
1228
|
+
ai_output = "[Tool calls: " + "; ".join(tool_descriptions) + "]"
|
|
1229
|
+
|
|
1230
|
+
# Handle function calls (legacy format)
|
|
1231
|
+
elif hasattr(message, "function_call") and message.function_call:
|
|
1232
|
+
func_call = message.function_call
|
|
1233
|
+
func_name = func_call.get("name", "unknown")
|
|
1234
|
+
func_args = func_call.get("arguments", "{}")
|
|
1235
|
+
ai_output = f"[Function call: {func_name} with {func_args}]"
|
|
745
1236
|
|
|
746
|
-
|
|
747
|
-
|
|
1237
|
+
else:
|
|
1238
|
+
ai_output = "[No content - possible function/tool call]"
|
|
1239
|
+
|
|
1240
|
+
except Exception as e:
|
|
1241
|
+
logger.debug(f"Error extracting AI output: {e}")
|
|
1242
|
+
ai_output = "[Error extracting AI response]"
|
|
1243
|
+
|
|
1244
|
+
return ai_output
|
|
1245
|
+
|
|
1246
|
+
def _extract_openai_metadata(
|
|
1247
|
+
self, kwargs: Dict, response, tokens_used: int
|
|
1248
|
+
) -> Dict:
|
|
1249
|
+
"""Extract comprehensive metadata from OpenAI request and response"""
|
|
1250
|
+
metadata = {
|
|
1251
|
+
"integration": "openai_auto",
|
|
1252
|
+
"api_type": "chat_completions",
|
|
1253
|
+
"tokens_used": tokens_used,
|
|
1254
|
+
"auto_recorded": True,
|
|
1255
|
+
}
|
|
1256
|
+
|
|
1257
|
+
try:
|
|
1258
|
+
# Add request metadata
|
|
1259
|
+
if "temperature" in kwargs:
|
|
1260
|
+
metadata["temperature"] = kwargs["temperature"]
|
|
1261
|
+
if "max_tokens" in kwargs:
|
|
1262
|
+
metadata["max_tokens"] = kwargs["max_tokens"]
|
|
1263
|
+
if "tools" in kwargs:
|
|
1264
|
+
metadata["has_tools"] = True
|
|
1265
|
+
metadata["tool_count"] = len(kwargs["tools"])
|
|
1266
|
+
if "functions" in kwargs:
|
|
1267
|
+
metadata["has_functions"] = True
|
|
1268
|
+
metadata["function_count"] = len(kwargs["functions"])
|
|
1269
|
+
|
|
1270
|
+
# Add response metadata
|
|
1271
|
+
if hasattr(response, "choices") and response.choices:
|
|
1272
|
+
choice = response.choices[0]
|
|
1273
|
+
if hasattr(choice, "finish_reason"):
|
|
1274
|
+
metadata["finish_reason"] = choice.finish_reason
|
|
1275
|
+
|
|
1276
|
+
# Add detailed token usage if available
|
|
748
1277
|
if hasattr(response, "usage") and response.usage:
|
|
749
|
-
|
|
1278
|
+
usage = response.usage
|
|
1279
|
+
metadata.update(
|
|
1280
|
+
{
|
|
1281
|
+
"prompt_tokens": getattr(usage, "prompt_tokens", 0),
|
|
1282
|
+
"completion_tokens": getattr(usage, "completion_tokens", 0),
|
|
1283
|
+
"total_tokens": getattr(usage, "total_tokens", 0),
|
|
1284
|
+
}
|
|
1285
|
+
)
|
|
1286
|
+
|
|
1287
|
+
# Detect content types
|
|
1288
|
+
messages = kwargs.get("messages", [])
|
|
1289
|
+
has_images = False
|
|
1290
|
+
message_count = len(messages)
|
|
1291
|
+
|
|
1292
|
+
for message in messages:
|
|
1293
|
+
if message.get("role") == "user":
|
|
1294
|
+
content = message.get("content")
|
|
1295
|
+
if isinstance(content, list):
|
|
1296
|
+
for item in content:
|
|
1297
|
+
if (
|
|
1298
|
+
isinstance(item, dict)
|
|
1299
|
+
and item.get("type") == "image_url"
|
|
1300
|
+
):
|
|
1301
|
+
has_images = True
|
|
1302
|
+
break
|
|
1303
|
+
if has_images:
|
|
1304
|
+
break
|
|
1305
|
+
|
|
1306
|
+
metadata["message_count"] = message_count
|
|
1307
|
+
metadata["has_images"] = has_images
|
|
1308
|
+
|
|
1309
|
+
except Exception as e:
|
|
1310
|
+
logger.debug(f"Error extracting metadata: {e}")
|
|
1311
|
+
|
|
1312
|
+
return metadata
|
|
1313
|
+
|
|
1314
|
+
def _record_anthropic_conversation(self, kwargs, response):
|
|
1315
|
+
"""Record Anthropic conversation with enhanced content parsing"""
|
|
1316
|
+
try:
|
|
1317
|
+
messages = kwargs.get("messages", [])
|
|
1318
|
+
model = kwargs.get("model", "claude-unknown")
|
|
1319
|
+
|
|
1320
|
+
# Extract user input with enhanced parsing
|
|
1321
|
+
user_input = self._extract_anthropic_user_input(messages)
|
|
1322
|
+
|
|
1323
|
+
# Extract AI response with enhanced parsing
|
|
1324
|
+
ai_output = self._extract_anthropic_ai_output(response)
|
|
1325
|
+
|
|
1326
|
+
# Calculate tokens
|
|
1327
|
+
tokens_used = self._extract_anthropic_tokens(response)
|
|
1328
|
+
|
|
1329
|
+
# Enhanced metadata extraction
|
|
1330
|
+
metadata = self._extract_anthropic_metadata(kwargs, response, tokens_used)
|
|
750
1331
|
|
|
751
1332
|
# Record conversation
|
|
752
1333
|
self.record_conversation(
|
|
753
1334
|
user_input=user_input,
|
|
754
1335
|
ai_output=ai_output,
|
|
755
1336
|
model=model,
|
|
756
|
-
metadata=
|
|
757
|
-
"integration": "openai_auto",
|
|
758
|
-
"api_type": "chat_completions",
|
|
759
|
-
"tokens_used": tokens_used,
|
|
760
|
-
"auto_recorded": True,
|
|
761
|
-
},
|
|
1337
|
+
metadata=metadata,
|
|
762
1338
|
)
|
|
763
1339
|
except Exception as e:
|
|
764
|
-
logger.error(f"Failed to record
|
|
1340
|
+
logger.error(f"Failed to record Anthropic conversation: {e}")
|
|
765
1341
|
|
|
766
|
-
def
|
|
767
|
-
"""
|
|
1342
|
+
def _extract_anthropic_user_input(self, messages: List[Dict]) -> str:
|
|
1343
|
+
"""Extract user input from Anthropic messages with support for complex content types"""
|
|
1344
|
+
user_input = ""
|
|
768
1345
|
try:
|
|
769
|
-
|
|
770
|
-
model = kwargs.get("model", "claude-unknown")
|
|
771
|
-
|
|
772
|
-
# Extract user input
|
|
773
|
-
user_input = ""
|
|
1346
|
+
# Find the last user message
|
|
774
1347
|
for message in reversed(messages):
|
|
775
1348
|
if message.get("role") == "user":
|
|
776
1349
|
content = message.get("content", "")
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
block.get("text", "")
|
|
781
|
-
for block in content
|
|
782
|
-
if isinstance(block, dict)
|
|
783
|
-
and block.get("type") == "text"
|
|
784
|
-
]
|
|
785
|
-
)
|
|
786
|
-
else:
|
|
1350
|
+
|
|
1351
|
+
if isinstance(content, str):
|
|
1352
|
+
# Simple string content
|
|
787
1353
|
user_input = content
|
|
1354
|
+
elif isinstance(content, list):
|
|
1355
|
+
# Complex content (vision, multiple parts)
|
|
1356
|
+
text_parts = []
|
|
1357
|
+
image_count = 0
|
|
1358
|
+
|
|
1359
|
+
for block in content:
|
|
1360
|
+
if isinstance(block, dict):
|
|
1361
|
+
if block.get("type") == "text":
|
|
1362
|
+
text_parts.append(block.get("text", ""))
|
|
1363
|
+
elif block.get("type") == "image":
|
|
1364
|
+
image_count += 1
|
|
1365
|
+
|
|
1366
|
+
user_input = " ".join(text_parts)
|
|
1367
|
+
# Add image indicator if present
|
|
1368
|
+
if image_count > 0:
|
|
1369
|
+
user_input += f" [Contains {image_count} image(s)]"
|
|
1370
|
+
|
|
788
1371
|
break
|
|
1372
|
+
except Exception as e:
|
|
1373
|
+
logger.debug(f"Error extracting Anthropic user input: {e}")
|
|
1374
|
+
user_input = "[Error extracting user input]"
|
|
789
1375
|
|
|
790
|
-
|
|
791
|
-
|
|
1376
|
+
return user_input
|
|
1377
|
+
|
|
1378
|
+
def _extract_anthropic_ai_output(self, response) -> str:
|
|
1379
|
+
"""Extract AI output from Anthropic response with support for various response types"""
|
|
1380
|
+
ai_output = ""
|
|
1381
|
+
try:
|
|
792
1382
|
if hasattr(response, "content") and response.content:
|
|
793
1383
|
if isinstance(response.content, list):
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
1384
|
+
# Handle structured content (text blocks, tool use, etc.)
|
|
1385
|
+
text_parts = []
|
|
1386
|
+
tool_uses = []
|
|
1387
|
+
|
|
1388
|
+
for block in response.content:
|
|
1389
|
+
try:
|
|
1390
|
+
# Handle text blocks
|
|
1391
|
+
if hasattr(block, "text") and block.text:
|
|
1392
|
+
text_parts.append(block.text)
|
|
1393
|
+
# Handle tool use blocks
|
|
1394
|
+
elif hasattr(block, "type"):
|
|
1395
|
+
block_type = getattr(block, "type", None)
|
|
1396
|
+
if block_type == "tool_use":
|
|
1397
|
+
tool_name = getattr(block, "name", "unknown")
|
|
1398
|
+
tool_input = getattr(block, "input", {})
|
|
1399
|
+
tool_uses.append(
|
|
1400
|
+
f"Used {tool_name} with {tool_input}"
|
|
1401
|
+
)
|
|
1402
|
+
# Handle mock objects for testing (when type is accessible but not via hasattr)
|
|
1403
|
+
elif hasattr(block, "name") and hasattr(block, "input"):
|
|
1404
|
+
tool_name = getattr(block, "name", "unknown")
|
|
1405
|
+
tool_input = getattr(block, "input", {})
|
|
1406
|
+
tool_uses.append(f"Used {tool_name} with {tool_input}")
|
|
1407
|
+
except Exception as block_error:
|
|
1408
|
+
logger.debug(f"Error processing block: {block_error}")
|
|
1409
|
+
continue
|
|
1410
|
+
|
|
1411
|
+
ai_output = " ".join(text_parts)
|
|
1412
|
+
if tool_uses:
|
|
1413
|
+
if ai_output:
|
|
1414
|
+
ai_output += " "
|
|
1415
|
+
ai_output += "[Tool uses: " + "; ".join(tool_uses) + "]"
|
|
1416
|
+
|
|
1417
|
+
elif isinstance(response.content, str):
|
|
1418
|
+
ai_output = response.content
|
|
801
1419
|
else:
|
|
802
1420
|
ai_output = str(response.content)
|
|
803
1421
|
|
|
804
|
-
|
|
805
|
-
|
|
1422
|
+
except Exception as e:
|
|
1423
|
+
logger.debug(f"Error extracting Anthropic AI output: {e}")
|
|
1424
|
+
ai_output = "[Error extracting AI response]"
|
|
1425
|
+
|
|
1426
|
+
return ai_output
|
|
1427
|
+
|
|
1428
|
+
def _extract_anthropic_tokens(self, response) -> int:
|
|
1429
|
+
"""Extract token usage from Anthropic response"""
|
|
1430
|
+
tokens_used = 0
|
|
1431
|
+
try:
|
|
806
1432
|
if hasattr(response, "usage") and response.usage:
|
|
807
1433
|
input_tokens = getattr(response.usage, "input_tokens", 0)
|
|
808
1434
|
output_tokens = getattr(response.usage, "output_tokens", 0)
|
|
809
1435
|
tokens_used = input_tokens + output_tokens
|
|
1436
|
+
except Exception as e:
|
|
1437
|
+
logger.debug(f"Error extracting Anthropic tokens: {e}")
|
|
1438
|
+
|
|
1439
|
+
return tokens_used
|
|
1440
|
+
|
|
1441
|
+
def _extract_anthropic_metadata(
|
|
1442
|
+
self, kwargs: Dict, response, tokens_used: int
|
|
1443
|
+
) -> Dict:
|
|
1444
|
+
"""Extract comprehensive metadata from Anthropic request and response"""
|
|
1445
|
+
metadata = {
|
|
1446
|
+
"integration": "anthropic_auto",
|
|
1447
|
+
"api_type": "messages",
|
|
1448
|
+
"tokens_used": tokens_used,
|
|
1449
|
+
"auto_recorded": True,
|
|
1450
|
+
}
|
|
1451
|
+
|
|
1452
|
+
try:
|
|
1453
|
+
# Add request metadata
|
|
1454
|
+
if "temperature" in kwargs:
|
|
1455
|
+
metadata["temperature"] = kwargs["temperature"]
|
|
1456
|
+
if "max_tokens" in kwargs:
|
|
1457
|
+
metadata["max_tokens"] = kwargs["max_tokens"]
|
|
1458
|
+
if "tools" in kwargs:
|
|
1459
|
+
metadata["has_tools"] = True
|
|
1460
|
+
metadata["tool_count"] = len(kwargs["tools"])
|
|
1461
|
+
|
|
1462
|
+
# Add response metadata
|
|
1463
|
+
if hasattr(response, "stop_reason"):
|
|
1464
|
+
metadata["stop_reason"] = response.stop_reason
|
|
1465
|
+
if hasattr(response, "model"):
|
|
1466
|
+
metadata["response_model"] = response.model
|
|
1467
|
+
|
|
1468
|
+
# Add detailed token usage if available
|
|
1469
|
+
if hasattr(response, "usage") and response.usage:
|
|
1470
|
+
usage = response.usage
|
|
1471
|
+
metadata.update(
|
|
1472
|
+
{
|
|
1473
|
+
"input_tokens": getattr(usage, "input_tokens", 0),
|
|
1474
|
+
"output_tokens": getattr(usage, "output_tokens", 0),
|
|
1475
|
+
"total_tokens": tokens_used,
|
|
1476
|
+
}
|
|
1477
|
+
)
|
|
1478
|
+
|
|
1479
|
+
# Detect content types
|
|
1480
|
+
messages = kwargs.get("messages", [])
|
|
1481
|
+
has_images = False
|
|
1482
|
+
message_count = len(messages)
|
|
1483
|
+
|
|
1484
|
+
for message in messages:
|
|
1485
|
+
if message.get("role") == "user":
|
|
1486
|
+
content = message.get("content")
|
|
1487
|
+
if isinstance(content, list):
|
|
1488
|
+
for item in content:
|
|
1489
|
+
if isinstance(item, dict) and item.get("type") == "image":
|
|
1490
|
+
has_images = True
|
|
1491
|
+
break
|
|
1492
|
+
if has_images:
|
|
1493
|
+
break
|
|
1494
|
+
|
|
1495
|
+
metadata["message_count"] = message_count
|
|
1496
|
+
metadata["has_images"] = has_images
|
|
810
1497
|
|
|
811
|
-
# Record conversation
|
|
812
|
-
self.record_conversation(
|
|
813
|
-
user_input=user_input,
|
|
814
|
-
ai_output=ai_output,
|
|
815
|
-
model=model,
|
|
816
|
-
metadata={
|
|
817
|
-
"integration": "anthropic_auto",
|
|
818
|
-
"api_type": "messages",
|
|
819
|
-
"tokens_used": tokens_used,
|
|
820
|
-
"auto_recorded": True,
|
|
821
|
-
},
|
|
822
|
-
)
|
|
823
1498
|
except Exception as e:
|
|
824
|
-
logger.
|
|
1499
|
+
logger.debug(f"Error extracting Anthropic metadata: {e}")
|
|
825
1500
|
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
"""
|
|
1501
|
+
return metadata
|
|
1502
|
+
|
|
1503
|
+
def _process_litellm_response(self, kwargs, response, start_time, end_time):
|
|
1504
|
+
"""Process and record LiteLLM response"""
|
|
830
1505
|
try:
|
|
1506
|
+
# Extract user input from messages
|
|
1507
|
+
messages = kwargs.get("messages", [])
|
|
831
1508
|
user_input = ""
|
|
832
|
-
|
|
833
|
-
for
|
|
834
|
-
if
|
|
835
|
-
user_input =
|
|
1509
|
+
|
|
1510
|
+
for message in reversed(messages):
|
|
1511
|
+
if message.get("role") == "user":
|
|
1512
|
+
user_input = message.get("content", "")
|
|
836
1513
|
break
|
|
837
1514
|
|
|
838
|
-
|
|
839
|
-
|
|
1515
|
+
# Extract AI output from response
|
|
1516
|
+
ai_output = ""
|
|
1517
|
+
if hasattr(response, "choices") and response.choices:
|
|
1518
|
+
choice = response.choices[0]
|
|
1519
|
+
if hasattr(choice, "message") and hasattr(choice.message, "content"):
|
|
1520
|
+
ai_output = choice.message.content or ""
|
|
1521
|
+
elif hasattr(choice, "text"):
|
|
1522
|
+
ai_output = choice.text or ""
|
|
1523
|
+
|
|
1524
|
+
# Extract model
|
|
1525
|
+
model = kwargs.get("model", "litellm-unknown")
|
|
1526
|
+
|
|
1527
|
+
# Calculate timing (convert to seconds for JSON serialization)
|
|
1528
|
+
duration_seconds = (end_time - start_time) if start_time and end_time else 0
|
|
1529
|
+
if hasattr(duration_seconds, "total_seconds"):
|
|
1530
|
+
duration_seconds = duration_seconds.total_seconds()
|
|
1531
|
+
|
|
1532
|
+
# Prepare metadata
|
|
1533
|
+
metadata = {
|
|
1534
|
+
"integration": "litellm",
|
|
1535
|
+
"auto_recorded": True,
|
|
1536
|
+
"duration": float(duration_seconds),
|
|
1537
|
+
"timestamp": time.time(),
|
|
1538
|
+
}
|
|
840
1539
|
|
|
841
|
-
#
|
|
842
|
-
tokens_used = 0
|
|
1540
|
+
# Add token usage if available
|
|
843
1541
|
if hasattr(response, "usage") and response.usage:
|
|
844
|
-
|
|
1542
|
+
usage = response.usage
|
|
1543
|
+
metadata.update(
|
|
1544
|
+
{
|
|
1545
|
+
"prompt_tokens": getattr(usage, "prompt_tokens", 0),
|
|
1546
|
+
"completion_tokens": getattr(usage, "completion_tokens", 0),
|
|
1547
|
+
"total_tokens": getattr(usage, "total_tokens", 0),
|
|
1548
|
+
}
|
|
1549
|
+
)
|
|
845
1550
|
|
|
846
|
-
#
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
1551
|
+
# Record the conversation
|
|
1552
|
+
if user_input and ai_output:
|
|
1553
|
+
self.record_conversation(
|
|
1554
|
+
user_input=user_input,
|
|
1555
|
+
ai_output=ai_output,
|
|
1556
|
+
model=model,
|
|
1557
|
+
metadata=metadata,
|
|
1558
|
+
)
|
|
850
1559
|
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
# Handle different types of time objects
|
|
854
|
-
if hasattr(start_time, "total_seconds"): # timedelta
|
|
855
|
-
duration_ms = start_time.total_seconds() * 1000
|
|
856
|
-
elif isinstance(start_time, (int, float)) and isinstance(
|
|
857
|
-
end_time, (int, float)
|
|
858
|
-
):
|
|
859
|
-
duration_ms = (end_time - start_time) * 1000
|
|
1560
|
+
except Exception as e:
|
|
1561
|
+
logger.error(f"Failed to process LiteLLM response: {e}")
|
|
860
1562
|
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
except Exception:
|
|
864
|
-
# If timing calculation fails, just skip it
|
|
865
|
-
pass
|
|
1563
|
+
# LiteLLM callback is now handled by the LiteLLMCallbackManager
|
|
1564
|
+
# in memori.integrations.litellm_integration
|
|
866
1565
|
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
1566
|
+
def _process_memory_sync(
|
|
1567
|
+
self, chat_id: str, user_input: str, ai_output: str, model: str = "unknown"
|
|
1568
|
+
):
|
|
1569
|
+
"""Synchronous memory processing fallback"""
|
|
1570
|
+
if not self.memory_agent:
|
|
1571
|
+
logger.warning("Memory agent not available, skipping memory ingestion")
|
|
1572
|
+
return
|
|
1573
|
+
|
|
1574
|
+
try:
|
|
1575
|
+
# Run async processing in new event loop
|
|
1576
|
+
import threading
|
|
1577
|
+
|
|
1578
|
+
def run_memory_processing():
|
|
1579
|
+
new_loop = asyncio.new_event_loop()
|
|
1580
|
+
asyncio.set_event_loop(new_loop)
|
|
1581
|
+
try:
|
|
1582
|
+
new_loop.run_until_complete(
|
|
1583
|
+
self._process_memory_async(
|
|
1584
|
+
chat_id, user_input, ai_output, model
|
|
1585
|
+
)
|
|
1586
|
+
)
|
|
1587
|
+
except Exception as e:
|
|
1588
|
+
logger.error(f"Synchronous memory processing failed: {e}")
|
|
1589
|
+
finally:
|
|
1590
|
+
new_loop.close()
|
|
1591
|
+
|
|
1592
|
+
# Run in background thread to avoid blocking
|
|
1593
|
+
thread = threading.Thread(target=run_memory_processing, daemon=True)
|
|
1594
|
+
thread.start()
|
|
1595
|
+
logger.debug(
|
|
1596
|
+
f"Memory processing started in background thread for {chat_id}"
|
|
880
1597
|
)
|
|
1598
|
+
|
|
881
1599
|
except Exception as e:
|
|
882
|
-
logger.error(f"
|
|
1600
|
+
logger.error(f"Failed to start synchronous memory processing: {e}")
|
|
1601
|
+
|
|
1602
|
+
def _parse_llm_response(self, response) -> tuple[str, str]:
|
|
1603
|
+
"""Extract text and model from various LLM response formats."""
|
|
1604
|
+
if response is None:
|
|
1605
|
+
return "", "unknown"
|
|
1606
|
+
|
|
1607
|
+
# String response
|
|
1608
|
+
if isinstance(response, str):
|
|
1609
|
+
return response, "unknown"
|
|
1610
|
+
|
|
1611
|
+
# Anthropic response
|
|
1612
|
+
if hasattr(response, "content"):
|
|
1613
|
+
text = ""
|
|
1614
|
+
if isinstance(response.content, list):
|
|
1615
|
+
text = "".join(b.text for b in response.content if hasattr(b, "text"))
|
|
1616
|
+
else:
|
|
1617
|
+
text = str(response.content)
|
|
1618
|
+
return text, getattr(response, "model", "unknown")
|
|
1619
|
+
|
|
1620
|
+
# OpenAI response
|
|
1621
|
+
if hasattr(response, "choices") and response.choices:
|
|
1622
|
+
choice = response.choices[0]
|
|
1623
|
+
text = (
|
|
1624
|
+
getattr(choice.message, "content", "")
|
|
1625
|
+
if hasattr(choice, "message")
|
|
1626
|
+
else getattr(choice, "text", "")
|
|
1627
|
+
)
|
|
1628
|
+
return text or "", getattr(response, "model", "unknown")
|
|
1629
|
+
|
|
1630
|
+
# Dict response
|
|
1631
|
+
if isinstance(response, dict):
|
|
1632
|
+
return response.get(
|
|
1633
|
+
"content", response.get("text", str(response))
|
|
1634
|
+
), response.get("model", "unknown")
|
|
1635
|
+
|
|
1636
|
+
# Fallback
|
|
1637
|
+
return str(response), "unknown"
|
|
883
1638
|
|
|
884
1639
|
def record_conversation(
|
|
885
1640
|
self,
|
|
886
1641
|
user_input: str,
|
|
887
|
-
ai_output
|
|
888
|
-
model: str =
|
|
1642
|
+
ai_output=None,
|
|
1643
|
+
model: str = None,
|
|
889
1644
|
metadata: Optional[Dict[str, Any]] = None,
|
|
890
1645
|
) -> str:
|
|
891
1646
|
"""
|
|
892
|
-
|
|
1647
|
+
Record a conversation.
|
|
893
1648
|
|
|
894
1649
|
Args:
|
|
895
|
-
user_input:
|
|
896
|
-
ai_output:
|
|
897
|
-
model:
|
|
898
|
-
metadata:
|
|
1650
|
+
user_input: User's message
|
|
1651
|
+
ai_output: AI response (any format)
|
|
1652
|
+
model: Optional model name override
|
|
1653
|
+
metadata: Optional metadata
|
|
899
1654
|
|
|
900
1655
|
Returns:
|
|
901
|
-
chat_id: Unique
|
|
1656
|
+
chat_id: Unique conversation ID
|
|
902
1657
|
"""
|
|
903
1658
|
if not self._enabled:
|
|
904
1659
|
raise MemoriError("Memori is not enabled. Call enable() first.")
|
|
905
1660
|
|
|
906
|
-
#
|
|
907
|
-
|
|
908
|
-
|
|
1661
|
+
# Parse response
|
|
1662
|
+
response_text, detected_model = self._parse_llm_response(ai_output)
|
|
1663
|
+
response_model = model or detected_model
|
|
909
1664
|
|
|
1665
|
+
# Generate ID and timestamp
|
|
910
1666
|
chat_id = str(uuid.uuid4())
|
|
911
1667
|
timestamp = datetime.now()
|
|
912
1668
|
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
)
|
|
1669
|
+
# Store conversation
|
|
1670
|
+
self.db_manager.store_chat_history(
|
|
1671
|
+
chat_id=chat_id,
|
|
1672
|
+
user_input=user_input,
|
|
1673
|
+
ai_output=response_text,
|
|
1674
|
+
model=response_model,
|
|
1675
|
+
timestamp=timestamp,
|
|
1676
|
+
session_id=self._session_id,
|
|
1677
|
+
namespace=self.namespace,
|
|
1678
|
+
metadata=metadata or {},
|
|
1679
|
+
)
|
|
925
1680
|
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
1681
|
+
# Always process into long-term memory when memory agent is available
|
|
1682
|
+
if self.memory_agent:
|
|
1683
|
+
self._schedule_memory_processing(
|
|
1684
|
+
chat_id, user_input, response_text, response_model
|
|
1685
|
+
)
|
|
929
1686
|
|
|
930
|
-
|
|
931
|
-
|
|
1687
|
+
logger.debug(f"Recorded conversation: {chat_id}")
|
|
1688
|
+
return chat_id
|
|
932
1689
|
|
|
933
|
-
|
|
934
|
-
|
|
1690
|
+
def _schedule_memory_processing(
|
|
1691
|
+
self, chat_id: str, user_input: str, ai_output: str, model: str
|
|
1692
|
+
):
|
|
1693
|
+
"""Schedule memory processing (async if possible, sync fallback)."""
|
|
1694
|
+
try:
|
|
1695
|
+
loop = asyncio.get_running_loop()
|
|
1696
|
+
task = loop.create_task(
|
|
1697
|
+
self._process_memory_async(chat_id, user_input, ai_output, model)
|
|
1698
|
+
)
|
|
935
1699
|
|
|
936
|
-
|
|
1700
|
+
# Prevent garbage collection
|
|
1701
|
+
if not hasattr(self, "_memory_tasks"):
|
|
1702
|
+
self._memory_tasks = set()
|
|
1703
|
+
self._memory_tasks.add(task)
|
|
1704
|
+
task.add_done_callback(self._memory_tasks.discard)
|
|
1705
|
+
except RuntimeError:
|
|
1706
|
+
# No event loop, use sync fallback
|
|
1707
|
+
logger.debug("No event loop, using synchronous memory processing")
|
|
1708
|
+
self._process_memory_sync(chat_id, user_input, ai_output, model)
|
|
1709
|
+
|
|
1710
|
+
async def _process_memory_async(
|
|
937
1711
|
self, chat_id: str, user_input: str, ai_output: str, model: str = "unknown"
|
|
938
1712
|
):
|
|
939
|
-
"""Process conversation
|
|
1713
|
+
"""Process conversation with enhanced async memory categorization"""
|
|
940
1714
|
if not self.memory_agent:
|
|
941
1715
|
logger.warning("Memory agent not available, skipping memory ingestion")
|
|
942
1716
|
return
|
|
@@ -953,38 +1727,107 @@ class Memori:
|
|
|
953
1727
|
relevant_skills=self._user_context.get("relevant_skills", []),
|
|
954
1728
|
)
|
|
955
1729
|
|
|
956
|
-
#
|
|
957
|
-
|
|
1730
|
+
# Get recent memories for deduplication
|
|
1731
|
+
existing_memories = await self._get_recent_memories_for_dedup()
|
|
1732
|
+
|
|
1733
|
+
# Process conversation using async Pydantic-based memory agent
|
|
1734
|
+
processed_memory = await self.memory_agent.process_conversation_async(
|
|
958
1735
|
chat_id=chat_id,
|
|
959
1736
|
user_input=user_input,
|
|
960
1737
|
ai_output=ai_output,
|
|
961
1738
|
context=context,
|
|
962
|
-
|
|
963
|
-
|
|
1739
|
+
existing_memories=(
|
|
1740
|
+
[mem.summary for mem in existing_memories[:10]]
|
|
1741
|
+
if existing_memories
|
|
1742
|
+
else []
|
|
1743
|
+
),
|
|
964
1744
|
)
|
|
965
1745
|
|
|
966
|
-
#
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
)
|
|
1746
|
+
# Check for duplicates
|
|
1747
|
+
duplicate_id = await self.memory_agent.detect_duplicates(
|
|
1748
|
+
processed_memory, existing_memories
|
|
1749
|
+
)
|
|
971
1750
|
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
1751
|
+
if duplicate_id:
|
|
1752
|
+
processed_memory.duplicate_of = duplicate_id
|
|
1753
|
+
logger.info(f"Memory marked as duplicate of {duplicate_id}")
|
|
1754
|
+
|
|
1755
|
+
# Apply filters
|
|
1756
|
+
if self.memory_agent.should_filter_memory(
|
|
1757
|
+
processed_memory, self.memory_filters
|
|
1758
|
+
):
|
|
1759
|
+
logger.debug(f"Memory filtered out for chat {chat_id}")
|
|
1760
|
+
return
|
|
1761
|
+
|
|
1762
|
+
# Store processed memory with new schema
|
|
1763
|
+
memory_id = self.db_manager.store_long_term_memory_enhanced(
|
|
1764
|
+
processed_memory, chat_id, self.namespace
|
|
1765
|
+
)
|
|
1766
|
+
|
|
1767
|
+
if memory_id:
|
|
1768
|
+
logger.debug(f"Stored processed memory {memory_id} for chat {chat_id}")
|
|
1769
|
+
|
|
1770
|
+
# Check for conscious context updates if promotion eligible and conscious_ingest enabled
|
|
1771
|
+
if (
|
|
1772
|
+
processed_memory.promotion_eligible
|
|
1773
|
+
and self.conscious_agent
|
|
1774
|
+
and self.conscious_ingest
|
|
1775
|
+
):
|
|
1776
|
+
await self.conscious_agent.check_for_context_updates(
|
|
1777
|
+
self.db_manager, self.namespace
|
|
979
1778
|
)
|
|
980
1779
|
else:
|
|
981
|
-
logger.
|
|
982
|
-
f"Memory not stored for chat {chat_id}: {processed_memory.storage_reasoning}"
|
|
983
|
-
)
|
|
1780
|
+
logger.warning(f"Failed to store memory for chat {chat_id}")
|
|
984
1781
|
|
|
985
1782
|
except Exception as e:
|
|
986
1783
|
logger.error(f"Memory ingestion failed for {chat_id}: {e}")
|
|
987
1784
|
|
|
1785
|
+
async def _get_recent_memories_for_dedup(self) -> List:
|
|
1786
|
+
"""Get recent memories for deduplication check"""
|
|
1787
|
+
try:
|
|
1788
|
+
from sqlalchemy import text
|
|
1789
|
+
|
|
1790
|
+
from ..database.queries.memory_queries import MemoryQueries
|
|
1791
|
+
from ..utils.pydantic_models import ProcessedLongTermMemory
|
|
1792
|
+
|
|
1793
|
+
with self.db_manager._get_connection() as connection:
|
|
1794
|
+
result = connection.execute(
|
|
1795
|
+
text(MemoryQueries.SELECT_MEMORIES_FOR_DEDUPLICATION),
|
|
1796
|
+
{
|
|
1797
|
+
"namespace": self.namespace,
|
|
1798
|
+
"processed_for_duplicates": False,
|
|
1799
|
+
"limit": 20,
|
|
1800
|
+
},
|
|
1801
|
+
)
|
|
1802
|
+
|
|
1803
|
+
memories = []
|
|
1804
|
+
for row in result:
|
|
1805
|
+
try:
|
|
1806
|
+
# Create ProcessedLongTermMemory objects for proper comparison
|
|
1807
|
+
# Note: Query returns (memory_id, summary, searchable_content, classification, created_at)
|
|
1808
|
+
memory = ProcessedLongTermMemory(
|
|
1809
|
+
conversation_id=row[
|
|
1810
|
+
0
|
|
1811
|
+
], # Use memory_id as conversation_id for existing memories
|
|
1812
|
+
summary=row[1] or "",
|
|
1813
|
+
content=row[2] or "",
|
|
1814
|
+
classification=row[3] or "conversational",
|
|
1815
|
+
importance="medium", # Default importance level for comparison
|
|
1816
|
+
promotion_eligible=False, # Default for existing memories
|
|
1817
|
+
classification_reason="Existing memory loaded for deduplication check", # Required field
|
|
1818
|
+
)
|
|
1819
|
+
memories.append(memory)
|
|
1820
|
+
except Exception as e:
|
|
1821
|
+
# Silently skip malformed memories from old data format
|
|
1822
|
+
logger.debug(f"Skipping malformed memory during dedup: {e}")
|
|
1823
|
+
continue
|
|
1824
|
+
|
|
1825
|
+
return memories
|
|
1826
|
+
|
|
1827
|
+
except Exception as e:
|
|
1828
|
+
logger.error(f"Failed to get recent memories for dedup: {e}")
|
|
1829
|
+
return []
|
|
1830
|
+
|
|
988
1831
|
def retrieve_context(self, query: str, limit: int = 5) -> List[Dict[str, Any]]:
|
|
989
1832
|
"""
|
|
990
1833
|
Retrieve relevant context for a query with priority on essential facts
|
|
@@ -1088,10 +1931,13 @@ class Memori:
|
|
|
1088
1931
|
return self._session_id
|
|
1089
1932
|
|
|
1090
1933
|
def get_integration_stats(self) -> List[Dict[str, Any]]:
|
|
1091
|
-
"""Get statistics from the
|
|
1934
|
+
"""Get statistics from the new interceptor system"""
|
|
1092
1935
|
try:
|
|
1936
|
+
# Get system status first
|
|
1937
|
+
interceptor_status = self.get_interceptor_status()
|
|
1938
|
+
|
|
1093
1939
|
stats = {
|
|
1094
|
-
"integration": "
|
|
1940
|
+
"integration": "memori_system",
|
|
1095
1941
|
"enabled": self._enabled,
|
|
1096
1942
|
"session_id": self._session_id,
|
|
1097
1943
|
"namespace": self.namespace,
|
|
@@ -1099,58 +1945,62 @@ class Memori:
|
|
|
1099
1945
|
}
|
|
1100
1946
|
|
|
1101
1947
|
# LiteLLM stats
|
|
1948
|
+
litellm_interceptor_status = interceptor_status.get("native", {})
|
|
1102
1949
|
if LITELLM_AVAILABLE:
|
|
1103
1950
|
stats["providers"]["litellm"] = {
|
|
1104
1951
|
"available": True,
|
|
1105
1952
|
"method": "native_callbacks",
|
|
1106
|
-
"
|
|
1107
|
-
"
|
|
1953
|
+
"enabled": litellm_interceptor_status.get("enabled", False),
|
|
1954
|
+
"status": litellm_interceptor_status.get("status", "unknown"),
|
|
1108
1955
|
}
|
|
1109
1956
|
else:
|
|
1110
1957
|
stats["providers"]["litellm"] = {
|
|
1111
1958
|
"available": False,
|
|
1112
1959
|
"method": "native_callbacks",
|
|
1113
|
-
"
|
|
1960
|
+
"enabled": False,
|
|
1114
1961
|
}
|
|
1115
1962
|
|
|
1963
|
+
# Get interceptor status instead of checking wrapped attributes
|
|
1964
|
+
interceptor_status = self.get_interceptor_status()
|
|
1965
|
+
|
|
1116
1966
|
# OpenAI stats
|
|
1117
1967
|
try:
|
|
1118
1968
|
import openai
|
|
1119
1969
|
|
|
1970
|
+
_ = openai # Suppress unused import warning
|
|
1971
|
+
|
|
1972
|
+
openai_interceptor_status = interceptor_status.get("openai", {})
|
|
1120
1973
|
stats["providers"]["openai"] = {
|
|
1121
1974
|
"available": True,
|
|
1122
|
-
"method": "
|
|
1123
|
-
"
|
|
1124
|
-
|
|
1125
|
-
if hasattr(openai, "OpenAI")
|
|
1126
|
-
else False
|
|
1127
|
-
),
|
|
1975
|
+
"method": "litellm_native",
|
|
1976
|
+
"enabled": openai_interceptor_status.get("enabled", False),
|
|
1977
|
+
"status": openai_interceptor_status.get("status", "unknown"),
|
|
1128
1978
|
}
|
|
1129
1979
|
except ImportError:
|
|
1130
1980
|
stats["providers"]["openai"] = {
|
|
1131
1981
|
"available": False,
|
|
1132
|
-
"method": "
|
|
1133
|
-
"
|
|
1982
|
+
"method": "litellm_native",
|
|
1983
|
+
"enabled": False,
|
|
1134
1984
|
}
|
|
1135
1985
|
|
|
1136
1986
|
# Anthropic stats
|
|
1137
1987
|
try:
|
|
1138
1988
|
import anthropic
|
|
1139
1989
|
|
|
1990
|
+
_ = anthropic # Suppress unused import warning
|
|
1991
|
+
|
|
1992
|
+
anthropic_interceptor_status = interceptor_status.get("anthropic", {})
|
|
1140
1993
|
stats["providers"]["anthropic"] = {
|
|
1141
1994
|
"available": True,
|
|
1142
|
-
"method": "
|
|
1143
|
-
"
|
|
1144
|
-
|
|
1145
|
-
if hasattr(anthropic, "Anthropic")
|
|
1146
|
-
else False
|
|
1147
|
-
),
|
|
1995
|
+
"method": "litellm_native",
|
|
1996
|
+
"enabled": anthropic_interceptor_status.get("enabled", False),
|
|
1997
|
+
"status": anthropic_interceptor_status.get("status", "unknown"),
|
|
1148
1998
|
}
|
|
1149
1999
|
except ImportError:
|
|
1150
2000
|
stats["providers"]["anthropic"] = {
|
|
1151
2001
|
"available": False,
|
|
1152
|
-
"method": "
|
|
1153
|
-
"
|
|
2002
|
+
"method": "litellm_native",
|
|
2003
|
+
"enabled": False,
|
|
1154
2004
|
}
|
|
1155
2005
|
|
|
1156
2006
|
return [stats]
|
|
@@ -1195,7 +2045,7 @@ class Memori:
|
|
|
1195
2045
|
"""Get memories that contain a specific entity"""
|
|
1196
2046
|
try:
|
|
1197
2047
|
# This would use the entity index in the database
|
|
1198
|
-
# For now, use keyword search as fallback
|
|
2048
|
+
# For now, use keyword search as fallback (entity_type is ignored for now)
|
|
1199
2049
|
return self.db_manager.search_memories(
|
|
1200
2050
|
query=entity_value, namespace=self.namespace, limit=limit
|
|
1201
2051
|
)
|
|
@@ -1234,11 +2084,25 @@ class Memori:
|
|
|
1234
2084
|
|
|
1235
2085
|
# If we have a running loop, schedule the task
|
|
1236
2086
|
self._background_task = loop.create_task(self._background_analysis_loop())
|
|
2087
|
+
# Add proper error handling callback
|
|
2088
|
+
self._background_task.add_done_callback(
|
|
2089
|
+
self._handle_background_task_completion
|
|
2090
|
+
)
|
|
1237
2091
|
logger.info("Background analysis task started")
|
|
1238
2092
|
|
|
1239
2093
|
except Exception as e:
|
|
1240
2094
|
logger.error(f"Failed to start background analysis: {e}")
|
|
1241
2095
|
|
|
2096
|
+
def _handle_background_task_completion(self, task):
|
|
2097
|
+
"""Handle background task completion and cleanup"""
|
|
2098
|
+
try:
|
|
2099
|
+
if task.exception():
|
|
2100
|
+
logger.error(f"Background task failed: {task.exception()}")
|
|
2101
|
+
except asyncio.CancelledError:
|
|
2102
|
+
logger.debug("Background task was cancelled")
|
|
2103
|
+
except Exception as e:
|
|
2104
|
+
logger.error(f"Error handling background task completion: {e}")
|
|
2105
|
+
|
|
1242
2106
|
def _stop_background_analysis(self):
|
|
1243
2107
|
"""Stop the background analysis task"""
|
|
1244
2108
|
try:
|
|
@@ -1248,32 +2112,66 @@ class Memori:
|
|
|
1248
2112
|
except Exception as e:
|
|
1249
2113
|
logger.error(f"Failed to stop background analysis: {e}")
|
|
1250
2114
|
|
|
2115
|
+
def cleanup(self):
|
|
2116
|
+
"""Clean up all async tasks and resources"""
|
|
2117
|
+
try:
|
|
2118
|
+
# Cancel background tasks
|
|
2119
|
+
self._stop_background_analysis()
|
|
2120
|
+
|
|
2121
|
+
# Clean up memory processing tasks
|
|
2122
|
+
if hasattr(self, "_memory_tasks"):
|
|
2123
|
+
for task in self._memory_tasks.copy():
|
|
2124
|
+
if not task.done():
|
|
2125
|
+
task.cancel()
|
|
2126
|
+
self._memory_tasks.clear()
|
|
2127
|
+
|
|
2128
|
+
logger.debug("Memori cleanup completed")
|
|
2129
|
+
except Exception as e:
|
|
2130
|
+
logger.error(f"Error during cleanup: {e}")
|
|
2131
|
+
|
|
2132
|
+
def __del__(self):
|
|
2133
|
+
"""Destructor to ensure cleanup"""
|
|
2134
|
+
try:
|
|
2135
|
+
self.cleanup()
|
|
2136
|
+
except:
|
|
2137
|
+
pass # Ignore errors during destruction
|
|
2138
|
+
|
|
1251
2139
|
async def _background_analysis_loop(self):
|
|
1252
|
-
"""
|
|
1253
|
-
|
|
2140
|
+
"""Background analysis loop for memory processing"""
|
|
2141
|
+
try:
|
|
2142
|
+
logger.debug("Background analysis loop started")
|
|
1254
2143
|
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
)
|
|
2144
|
+
# For now, just run periodic conscious ingestion if enabled
|
|
2145
|
+
if self.conscious_ingest and self.conscious_agent:
|
|
2146
|
+
while True:
|
|
2147
|
+
try:
|
|
2148
|
+
await asyncio.sleep(300) # Check every 5 minutes
|
|
1261
2149
|
|
|
1262
|
-
|
|
1263
|
-
|
|
2150
|
+
# Run conscious ingestion to check for new promotable memories
|
|
2151
|
+
await self.conscious_agent.run_conscious_ingest(
|
|
2152
|
+
self.db_manager, self.namespace
|
|
2153
|
+
)
|
|
2154
|
+
|
|
2155
|
+
logger.debug("Periodic conscious analysis completed")
|
|
1264
2156
|
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
2157
|
+
except asyncio.CancelledError:
|
|
2158
|
+
logger.debug("Background analysis loop cancelled")
|
|
2159
|
+
break
|
|
2160
|
+
except Exception as e:
|
|
2161
|
+
logger.error(f"Background analysis error: {e}")
|
|
2162
|
+
await asyncio.sleep(60) # Wait 1 minute before retry
|
|
2163
|
+
else:
|
|
2164
|
+
# If not using conscious ingest, just sleep
|
|
2165
|
+
while True:
|
|
2166
|
+
await asyncio.sleep(3600) # Sleep for 1 hour
|
|
1272
2167
|
|
|
1273
|
-
|
|
2168
|
+
except asyncio.CancelledError:
|
|
2169
|
+
logger.debug("Background analysis loop cancelled")
|
|
2170
|
+
except Exception as e:
|
|
2171
|
+
logger.error(f"Background analysis loop failed: {e}")
|
|
1274
2172
|
|
|
1275
2173
|
def trigger_conscious_analysis(self):
|
|
1276
|
-
"""Manually trigger conscious
|
|
2174
|
+
"""Manually trigger conscious context ingestion (for testing/immediate analysis)"""
|
|
1277
2175
|
if not self.conscious_ingest or not self.conscious_agent:
|
|
1278
2176
|
logger.warning("Conscious ingestion not enabled or agent not available")
|
|
1279
2177
|
return
|
|
@@ -1283,11 +2181,11 @@ class Memori:
|
|
|
1283
2181
|
try:
|
|
1284
2182
|
loop = asyncio.get_running_loop()
|
|
1285
2183
|
task = loop.create_task(
|
|
1286
|
-
self.conscious_agent.
|
|
2184
|
+
self.conscious_agent.run_conscious_ingest(
|
|
1287
2185
|
self.db_manager, self.namespace
|
|
1288
2186
|
)
|
|
1289
2187
|
)
|
|
1290
|
-
logger.info("Conscious
|
|
2188
|
+
logger.info("Conscious context ingestion triggered")
|
|
1291
2189
|
return task
|
|
1292
2190
|
except RuntimeError:
|
|
1293
2191
|
# No event loop, run synchronously in thread
|
|
@@ -1298,7 +2196,7 @@ class Memori:
|
|
|
1298
2196
|
asyncio.set_event_loop(new_loop)
|
|
1299
2197
|
try:
|
|
1300
2198
|
new_loop.run_until_complete(
|
|
1301
|
-
self.conscious_agent.
|
|
2199
|
+
self.conscious_agent.run_conscious_ingest(
|
|
1302
2200
|
self.db_manager, self.namespace
|
|
1303
2201
|
)
|
|
1304
2202
|
)
|
|
@@ -1307,29 +2205,170 @@ class Memori:
|
|
|
1307
2205
|
|
|
1308
2206
|
thread = threading.Thread(target=run_analysis)
|
|
1309
2207
|
thread.start()
|
|
1310
|
-
logger.info("Conscious
|
|
2208
|
+
logger.info("Conscious context ingestion triggered in separate thread")
|
|
2209
|
+
|
|
2210
|
+
except Exception as e:
|
|
2211
|
+
logger.error(f"Failed to trigger conscious context ingestion: {e}")
|
|
2212
|
+
|
|
2213
|
+
def get_conscious_system_prompt(self) -> str:
|
|
2214
|
+
"""
|
|
2215
|
+
Get conscious context as system prompt for direct injection.
|
|
2216
|
+
Returns ALL short-term memory as formatted system prompt.
|
|
2217
|
+
Use this for conscious_ingest mode.
|
|
2218
|
+
"""
|
|
2219
|
+
try:
|
|
2220
|
+
context = self._get_conscious_context()
|
|
2221
|
+
if not context:
|
|
2222
|
+
return ""
|
|
2223
|
+
|
|
2224
|
+
# Create system prompt with all short-term memory
|
|
2225
|
+
system_prompt = "--- Your Short-Term Memory (Conscious Context) ---\n"
|
|
2226
|
+
system_prompt += "This is your complete working memory. USE THIS INFORMATION TO ANSWER QUESTIONS:\n\n"
|
|
2227
|
+
|
|
2228
|
+
# Deduplicate and format context
|
|
2229
|
+
seen_content = set()
|
|
2230
|
+
for mem in context:
|
|
2231
|
+
if isinstance(mem, dict):
|
|
2232
|
+
content = mem.get("searchable_content", "") or mem.get(
|
|
2233
|
+
"summary", ""
|
|
2234
|
+
)
|
|
2235
|
+
category = mem.get("category_primary", "")
|
|
2236
|
+
|
|
2237
|
+
# Skip duplicates
|
|
2238
|
+
content_key = content.lower().strip()
|
|
2239
|
+
if content_key in seen_content:
|
|
2240
|
+
continue
|
|
2241
|
+
seen_content.add(content_key)
|
|
2242
|
+
|
|
2243
|
+
system_prompt += f"[{category.upper()}] {content}\n"
|
|
2244
|
+
|
|
2245
|
+
system_prompt += "\nIMPORTANT: Use the above information to answer questions about the user.\n"
|
|
2246
|
+
system_prompt += "-------------------------\n"
|
|
2247
|
+
|
|
2248
|
+
return system_prompt
|
|
2249
|
+
|
|
2250
|
+
except Exception as e:
|
|
2251
|
+
logger.error(f"Failed to generate conscious system prompt: {e}")
|
|
2252
|
+
return ""
|
|
2253
|
+
|
|
2254
|
+
def get_auto_ingest_system_prompt(self, user_input: str) -> str:
|
|
2255
|
+
"""
|
|
2256
|
+
Get auto-ingest context as system prompt for direct injection.
|
|
2257
|
+
Returns relevant memories based on user input as formatted system prompt.
|
|
2258
|
+
Use this for auto_ingest mode.
|
|
2259
|
+
"""
|
|
2260
|
+
try:
|
|
2261
|
+
# For now, use recent short-term memories as a simple approach
|
|
2262
|
+
# This avoids the search engine issues and still provides context
|
|
2263
|
+
# TODO: Use user_input for intelligent context retrieval
|
|
2264
|
+
context = self._get_conscious_context() # Get recent short-term memories
|
|
2265
|
+
|
|
2266
|
+
if not context:
|
|
2267
|
+
return ""
|
|
2268
|
+
|
|
2269
|
+
# Create system prompt with relevant memories (limited to prevent overwhelming)
|
|
2270
|
+
system_prompt = "--- Relevant Memory Context ---\n"
|
|
2271
|
+
|
|
2272
|
+
# Take first 5 items to avoid too much context
|
|
2273
|
+
seen_content = set()
|
|
2274
|
+
for mem in context[:5]:
|
|
2275
|
+
if isinstance(mem, dict):
|
|
2276
|
+
content = mem.get("searchable_content", "") or mem.get(
|
|
2277
|
+
"summary", ""
|
|
2278
|
+
)
|
|
2279
|
+
category = mem.get("category_primary", "")
|
|
2280
|
+
|
|
2281
|
+
# Skip duplicates
|
|
2282
|
+
content_key = content.lower().strip()
|
|
2283
|
+
if content_key in seen_content:
|
|
2284
|
+
continue
|
|
2285
|
+
seen_content.add(content_key)
|
|
2286
|
+
|
|
2287
|
+
if category.startswith("essential_"):
|
|
2288
|
+
system_prompt += f"[{category.upper()}] {content}\n"
|
|
2289
|
+
else:
|
|
2290
|
+
system_prompt += f"- {content}\n"
|
|
2291
|
+
|
|
2292
|
+
system_prompt += "-------------------------\n"
|
|
2293
|
+
|
|
2294
|
+
return system_prompt
|
|
1311
2295
|
|
|
1312
2296
|
except Exception as e:
|
|
1313
|
-
logger.error(f"Failed to
|
|
2297
|
+
logger.error(f"Failed to generate auto-ingest system prompt: {e}")
|
|
2298
|
+
return ""
|
|
2299
|
+
|
|
2300
|
+
def add_memory_to_messages(self, messages: list, user_input: str = None) -> list:
|
|
2301
|
+
"""
|
|
2302
|
+
Add appropriate memory context to messages based on ingest mode.
|
|
2303
|
+
|
|
2304
|
+
Args:
|
|
2305
|
+
messages: List of messages for LLM
|
|
2306
|
+
user_input: User input for auto_ingest context retrieval (optional)
|
|
2307
|
+
|
|
2308
|
+
Returns:
|
|
2309
|
+
Modified messages list with memory context added as system message
|
|
2310
|
+
"""
|
|
2311
|
+
try:
|
|
2312
|
+
system_prompt = ""
|
|
2313
|
+
|
|
2314
|
+
if self.conscious_ingest:
|
|
2315
|
+
# One-time conscious context injection
|
|
2316
|
+
if not self._conscious_context_injected:
|
|
2317
|
+
system_prompt = self.get_conscious_system_prompt()
|
|
2318
|
+
self._conscious_context_injected = True
|
|
2319
|
+
logger.info(
|
|
2320
|
+
"Conscious-ingest: Added complete working memory to system prompt"
|
|
2321
|
+
)
|
|
2322
|
+
else:
|
|
2323
|
+
logger.debug("Conscious-ingest: Context already injected, skipping")
|
|
2324
|
+
|
|
2325
|
+
elif self.auto_ingest and user_input:
|
|
2326
|
+
# Dynamic auto-ingest based on user input
|
|
2327
|
+
system_prompt = self.get_auto_ingest_system_prompt(user_input)
|
|
2328
|
+
logger.debug("Auto-ingest: Added relevant context to system prompt")
|
|
2329
|
+
|
|
2330
|
+
if system_prompt:
|
|
2331
|
+
# Add to existing system message or create new one
|
|
2332
|
+
messages_copy = messages.copy()
|
|
2333
|
+
|
|
2334
|
+
# Check if system message already exists
|
|
2335
|
+
for msg in messages_copy:
|
|
2336
|
+
if msg.get("role") == "system":
|
|
2337
|
+
msg["content"] = system_prompt + "\n" + msg.get("content", "")
|
|
2338
|
+
return messages_copy
|
|
2339
|
+
|
|
2340
|
+
# No system message exists, add one at the beginning
|
|
2341
|
+
messages_copy.insert(0, {"role": "system", "content": system_prompt})
|
|
2342
|
+
return messages_copy
|
|
2343
|
+
|
|
2344
|
+
return messages
|
|
2345
|
+
|
|
2346
|
+
except Exception as e:
|
|
2347
|
+
logger.error(f"Failed to add memory to messages: {e}")
|
|
2348
|
+
return messages
|
|
1314
2349
|
|
|
1315
2350
|
def get_essential_conversations(self, limit: int = 10) -> List[Dict[str, Any]]:
|
|
1316
2351
|
"""Get essential conversations from short-term memory"""
|
|
1317
2352
|
try:
|
|
2353
|
+
from sqlalchemy import text
|
|
2354
|
+
|
|
1318
2355
|
# Get all conversations marked as essential
|
|
1319
2356
|
with self.db_manager._get_connection() as connection:
|
|
1320
2357
|
query = """
|
|
1321
2358
|
SELECT memory_id, summary, category_primary, importance_score,
|
|
1322
2359
|
created_at, searchable_content, processed_data
|
|
1323
2360
|
FROM short_term_memory
|
|
1324
|
-
WHERE namespace =
|
|
2361
|
+
WHERE namespace = :namespace AND category_primary LIKE 'essential_%'
|
|
1325
2362
|
ORDER BY importance_score DESC, created_at DESC
|
|
1326
|
-
LIMIT
|
|
2363
|
+
LIMIT :limit
|
|
1327
2364
|
"""
|
|
1328
2365
|
|
|
1329
|
-
|
|
2366
|
+
result = connection.execute(
|
|
2367
|
+
text(query), {"namespace": self.namespace, "limit": limit}
|
|
2368
|
+
)
|
|
1330
2369
|
|
|
1331
2370
|
essential_conversations = []
|
|
1332
|
-
for row in
|
|
2371
|
+
for row in result:
|
|
1333
2372
|
essential_conversations.append(
|
|
1334
2373
|
{
|
|
1335
2374
|
"memory_id": row[0],
|
|
@@ -1347,3 +2386,106 @@ class Memori:
|
|
|
1347
2386
|
except Exception as e:
|
|
1348
2387
|
logger.error(f"Failed to get essential conversations: {e}")
|
|
1349
2388
|
return []
|
|
2389
|
+
|
|
2390
|
+
def create_openai_client(self, **kwargs):
|
|
2391
|
+
"""
|
|
2392
|
+
Create an OpenAI client with automatic memory recording.
|
|
2393
|
+
|
|
2394
|
+
This method creates a MemoriOpenAIInterceptor that automatically records
|
|
2395
|
+
all OpenAI API calls to memory using the inheritance-based approach.
|
|
2396
|
+
|
|
2397
|
+
Args:
|
|
2398
|
+
**kwargs: Additional arguments passed to OpenAI client (e.g., api_key)
|
|
2399
|
+
These override any settings from the Memori provider config
|
|
2400
|
+
|
|
2401
|
+
Returns:
|
|
2402
|
+
MemoriOpenAIInterceptor instance that works as a drop-in replacement
|
|
2403
|
+
for the standard OpenAI client
|
|
2404
|
+
|
|
2405
|
+
Example:
|
|
2406
|
+
memori = Memori(api_key="sk-...")
|
|
2407
|
+
memori.enable()
|
|
2408
|
+
|
|
2409
|
+
# Create interceptor client
|
|
2410
|
+
client = memori.create_openai_client()
|
|
2411
|
+
|
|
2412
|
+
# Use exactly like standard OpenAI client
|
|
2413
|
+
response = client.chat.completions.create(
|
|
2414
|
+
model="gpt-4o",
|
|
2415
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
2416
|
+
)
|
|
2417
|
+
# Conversation is automatically recorded
|
|
2418
|
+
"""
|
|
2419
|
+
try:
|
|
2420
|
+
from ..integrations.openai_integration import create_openai_client
|
|
2421
|
+
|
|
2422
|
+
return create_openai_client(self, self.provider_config, **kwargs)
|
|
2423
|
+
except ImportError as e:
|
|
2424
|
+
logger.error(f"Failed to import OpenAI integration: {e}")
|
|
2425
|
+
raise ImportError(
|
|
2426
|
+
"OpenAI integration not available. Install with: pip install openai"
|
|
2427
|
+
) from e
|
|
2428
|
+
|
|
2429
|
+
def create_openai_wrapper(self, **kwargs):
|
|
2430
|
+
"""
|
|
2431
|
+
Create a legacy OpenAI wrapper (backward compatibility).
|
|
2432
|
+
|
|
2433
|
+
DEPRECATED: Use create_openai_client() instead for better integration.
|
|
2434
|
+
|
|
2435
|
+
Returns:
|
|
2436
|
+
MemoriOpenAI wrapper instance
|
|
2437
|
+
"""
|
|
2438
|
+
try:
|
|
2439
|
+
from ..integrations.openai_integration import MemoriOpenAI
|
|
2440
|
+
|
|
2441
|
+
return MemoriOpenAI(self, **kwargs)
|
|
2442
|
+
except ImportError as e:
|
|
2443
|
+
logger.error(f"Failed to import OpenAI integration: {e}")
|
|
2444
|
+
raise ImportError(
|
|
2445
|
+
"OpenAI integration not available. Install with: pip install openai"
|
|
2446
|
+
) from e
|
|
2447
|
+
|
|
2448
|
+
# Conversation management methods
|
|
2449
|
+
|
|
2450
|
+
def get_conversation_stats(self) -> Dict[str, Any]:
|
|
2451
|
+
"""Get conversation manager statistics"""
|
|
2452
|
+
return self.conversation_manager.get_session_stats()
|
|
2453
|
+
|
|
2454
|
+
def clear_conversation_history(self, session_id: str = None):
|
|
2455
|
+
"""
|
|
2456
|
+
Clear conversation history
|
|
2457
|
+
|
|
2458
|
+
Args:
|
|
2459
|
+
session_id: Specific session to clear. If None, clears current session.
|
|
2460
|
+
"""
|
|
2461
|
+
if session_id is None:
|
|
2462
|
+
session_id = self._session_id
|
|
2463
|
+
self.conversation_manager.clear_session(session_id)
|
|
2464
|
+
logger.info(f"Cleared conversation history for session: {session_id}")
|
|
2465
|
+
|
|
2466
|
+
def clear_all_conversations(self):
|
|
2467
|
+
"""Clear all conversation histories"""
|
|
2468
|
+
self.conversation_manager.clear_all_sessions()
|
|
2469
|
+
logger.info("Cleared all conversation histories")
|
|
2470
|
+
|
|
2471
|
+
def start_new_conversation(self) -> str:
|
|
2472
|
+
"""
|
|
2473
|
+
Start a new conversation session
|
|
2474
|
+
|
|
2475
|
+
Returns:
|
|
2476
|
+
New session ID
|
|
2477
|
+
"""
|
|
2478
|
+
old_session_id = self._session_id
|
|
2479
|
+
self._session_id = str(uuid.uuid4())
|
|
2480
|
+
|
|
2481
|
+
# Reset conscious context injection flag for new conversation
|
|
2482
|
+
self._conscious_context_injected = False
|
|
2483
|
+
|
|
2484
|
+
logger.info(
|
|
2485
|
+
f"Started new conversation: {self._session_id} (previous: {old_session_id})"
|
|
2486
|
+
)
|
|
2487
|
+
return self._session_id
|
|
2488
|
+
|
|
2489
|
+
def get_current_session_id(self) -> str:
|
|
2490
|
+
"""Get current conversation session ID"""
|
|
2491
|
+
return self._session_id
|