memorisdk 2.0.0__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of memorisdk might be problematic. Click here for more details.

Files changed (63) hide show
  1. memori/__init__.py +3 -3
  2. memori/agents/conscious_agent.py +289 -77
  3. memori/agents/memory_agent.py +19 -9
  4. memori/agents/retrieval_agent.py +138 -63
  5. memori/config/manager.py +7 -7
  6. memori/config/memory_manager.py +25 -25
  7. memori/config/settings.py +13 -6
  8. memori/core/conversation.py +15 -15
  9. memori/core/database.py +14 -13
  10. memori/core/memory.py +438 -123
  11. memori/core/providers.py +25 -25
  12. memori/database/__init__.py +11 -0
  13. memori/database/adapters/__init__.py +11 -0
  14. memori/database/adapters/mongodb_adapter.py +739 -0
  15. memori/database/adapters/mysql_adapter.py +8 -8
  16. memori/database/adapters/postgresql_adapter.py +6 -6
  17. memori/database/adapters/sqlite_adapter.py +6 -6
  18. memori/database/auto_creator.py +8 -9
  19. memori/database/connection_utils.py +5 -5
  20. memori/database/connectors/__init__.py +11 -0
  21. memori/database/connectors/base_connector.py +18 -19
  22. memori/database/connectors/mongodb_connector.py +527 -0
  23. memori/database/connectors/mysql_connector.py +13 -15
  24. memori/database/connectors/postgres_connector.py +12 -12
  25. memori/database/connectors/sqlite_connector.py +11 -11
  26. memori/database/models.py +2 -2
  27. memori/database/mongodb_manager.py +1402 -0
  28. memori/database/queries/base_queries.py +3 -4
  29. memori/database/queries/chat_queries.py +3 -5
  30. memori/database/queries/entity_queries.py +3 -5
  31. memori/database/queries/memory_queries.py +3 -5
  32. memori/database/query_translator.py +11 -11
  33. memori/database/schema_generators/__init__.py +11 -0
  34. memori/database/schema_generators/mongodb_schema_generator.py +666 -0
  35. memori/database/schema_generators/mysql_schema_generator.py +2 -4
  36. memori/database/search/__init__.py +11 -0
  37. memori/database/search/mongodb_search_adapter.py +653 -0
  38. memori/database/search/mysql_search_adapter.py +8 -8
  39. memori/database/search/sqlite_search_adapter.py +6 -6
  40. memori/database/search_service.py +218 -66
  41. memori/database/sqlalchemy_manager.py +72 -25
  42. memori/integrations/__init__.py +1 -1
  43. memori/integrations/anthropic_integration.py +1 -3
  44. memori/integrations/litellm_integration.py +23 -6
  45. memori/integrations/openai_integration.py +31 -3
  46. memori/tools/memory_tool.py +104 -13
  47. memori/utils/exceptions.py +58 -58
  48. memori/utils/helpers.py +11 -12
  49. memori/utils/input_validator.py +10 -12
  50. memori/utils/logging.py +4 -4
  51. memori/utils/pydantic_models.py +57 -57
  52. memori/utils/query_builder.py +20 -20
  53. memori/utils/security_audit.py +28 -28
  54. memori/utils/security_integration.py +9 -9
  55. memori/utils/transaction_manager.py +20 -19
  56. memori/utils/validators.py +6 -6
  57. {memorisdk-2.0.0.dist-info → memorisdk-2.1.0.dist-info}/METADATA +36 -20
  58. memorisdk-2.1.0.dist-info/RECORD +71 -0
  59. memori/scripts/llm_text.py +0 -50
  60. memorisdk-2.0.0.dist-info/RECORD +0 -67
  61. {memorisdk-2.0.0.dist-info → memorisdk-2.1.0.dist-info}/WHEEL +0 -0
  62. {memorisdk-2.0.0.dist-info → memorisdk-2.1.0.dist-info}/licenses/LICENSE +0 -0
  63. {memorisdk-2.0.0.dist-info → memorisdk-2.1.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1402 @@
1
+ """
2
+ MongoDB-based database manager for Memori v2.0
3
+ Provides MongoDB support parallel to SQLAlchemy with same interface
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import json
9
+ import uuid
10
+ from datetime import datetime, timezone
11
+ from typing import TYPE_CHECKING, Any
12
+ from urllib.parse import urlparse
13
+
14
+ from loguru import logger
15
+
16
+ if TYPE_CHECKING:
17
+ from pymongo import MongoClient
18
+ from pymongo.collection import Collection
19
+ from pymongo.database import Database
20
+
21
+ try:
22
+ import pymongo # noqa: F401
23
+ from bson import ObjectId # noqa: F401
24
+ from pymongo import MongoClient as _MongoClient
25
+ from pymongo.collection import Collection as _Collection
26
+ from pymongo.database import Database as _Database
27
+ from pymongo.errors import ( # noqa: F401
28
+ ConnectionFailure,
29
+ DuplicateKeyError,
30
+ OperationFailure,
31
+ )
32
+
33
+ PYMONGO_AVAILABLE = True
34
+ MongoClient = _MongoClient
35
+ Collection = _Collection
36
+ Database = _Database
37
+ except ImportError:
38
+ PYMONGO_AVAILABLE = False
39
+ MongoClient = None # type: ignore
40
+ Collection = None # type: ignore
41
+ Database = None # type: ignore
42
+ logger.warning("pymongo not available - MongoDB support disabled")
43
+
44
+ from ..utils.exceptions import DatabaseError
45
+ from ..utils.pydantic_models import ProcessedLongTermMemory
46
+
47
+
48
+ class MongoDBDatabaseManager:
49
+ """MongoDB-based database manager with interface compatible with SQLAlchemy manager"""
50
+
51
+ # Constants for collection names
52
+ CHAT_HISTORY_COLLECTION = "chat_history"
53
+ SHORT_TERM_MEMORY_COLLECTION = "short_term_memory"
54
+ LONG_TERM_MEMORY_COLLECTION = "long_term_memory"
55
+
56
+ # Database type identifier for database-agnostic code
57
+ database_type = "mongodb"
58
+
59
+ def __init__(
60
+ self, database_connect: str, template: str = "basic", schema_init: bool = True
61
+ ):
62
+ if not PYMONGO_AVAILABLE:
63
+ raise DatabaseError(
64
+ "MongoDB support requires pymongo. Install with: pip install pymongo"
65
+ )
66
+
67
+ self.database_connect = database_connect
68
+ self.template = template
69
+ self.schema_init = schema_init
70
+
71
+ # Parse MongoDB connection string
72
+ self._parse_connection_string()
73
+
74
+ # Initialize MongoDB connection
75
+ self.client = None
76
+ self.database = None
77
+ self.database_type = "mongodb"
78
+
79
+ # Collection names (matching SQLAlchemy table names)
80
+ self.CHAT_HISTORY_COLLECTION = "chat_history"
81
+ self.SHORT_TERM_MEMORY_COLLECTION = "short_term_memory"
82
+ self.LONG_TERM_MEMORY_COLLECTION = "long_term_memory"
83
+
84
+ # Collections cache
85
+ self._collections = {}
86
+
87
+ logger.info(f"Initialized MongoDB database manager for {self.database_name}")
88
+
89
+ def _parse_connection_string(self):
90
+ """Parse MongoDB connection string to extract components"""
91
+ try:
92
+ # Handle both mongodb:// and mongodb+srv:// schemes
93
+ parsed = urlparse(self.database_connect)
94
+
95
+ # Extract host - ensure it's a proper hostname/IP
96
+ hostname = parsed.hostname
97
+ if hostname and hostname != "localhost":
98
+ # Check if it's a valid hostname/IP, if not fall back to localhost
99
+ import socket
100
+
101
+ try:
102
+ socket.gethostbyname(hostname)
103
+ self.host = hostname
104
+ except socket.gaierror:
105
+ logger.warning(
106
+ f"Cannot resolve hostname '{hostname}', falling back to localhost"
107
+ )
108
+ self.host = "localhost"
109
+ else:
110
+ self.host = hostname or "localhost"
111
+
112
+ self.port = parsed.port or 27017
113
+ self.database_name = parsed.path.lstrip("/") or "memori"
114
+ self.username = parsed.username
115
+ self.password = parsed.password
116
+
117
+ # Extract query parameters
118
+ self.options = {}
119
+ if parsed.query:
120
+ params = parsed.query.split("&")
121
+ for param in params:
122
+ if "=" in param:
123
+ key, value = param.split("=", 1)
124
+ self.options[key] = value
125
+
126
+ logger.debug(
127
+ f"Parsed MongoDB connection: {self.host}:{self.port}/{self.database_name}"
128
+ )
129
+
130
+ except Exception as e:
131
+ logger.warning(f"Failed to parse MongoDB connection string: {e}")
132
+ # Set defaults
133
+ self.host = "localhost"
134
+ self.port = 27017
135
+ self.database_name = "memori"
136
+ self.username = None
137
+ self.password = None
138
+ self.options = {}
139
+
140
+ def _get_client(self) -> MongoClient:
141
+ """Get MongoDB client connection with caching and fallbacks"""
142
+ if self.client is None:
143
+ try:
144
+ # Create MongoDB client with appropriate options
145
+ client_options = {
146
+ "serverSelectionTimeoutMS": 5000, # 5 second timeout
147
+ "connectTimeoutMS": 10000, # 10 second connect timeout
148
+ "socketTimeoutMS": 10000, # 10 second socket timeout
149
+ "maxPoolSize": 50, # Connection pool size
150
+ "retryWrites": True, # Enable retryable writes
151
+ "directConnection": True, # Direct connection to avoid replica set issues
152
+ }
153
+
154
+ # Add any additional options from connection string
155
+ client_options.update(self.options)
156
+
157
+ # Try original connection string first
158
+ try:
159
+ self.client = MongoClient(self.database_connect, **client_options)
160
+ # Test connection
161
+ self.client.admin.command("ping")
162
+ logger.info("Connected to MongoDB using original connection string")
163
+ except Exception as original_error:
164
+ logger.warning(f"Original connection failed: {original_error}")
165
+
166
+ # Try fallback with explicit host:port
167
+ fallback_uri = (
168
+ f"mongodb://{self.host}:{self.port}/{self.database_name}"
169
+ )
170
+ logger.info(f"Trying fallback connection: {fallback_uri}")
171
+
172
+ self.client = MongoClient(fallback_uri, **client_options)
173
+ # Test connection
174
+ self.client.admin.command("ping")
175
+ logger.info(
176
+ f"Connected to MongoDB at {self.host}:{self.port}/{self.database_name}"
177
+ )
178
+
179
+ except Exception as e:
180
+ error_msg = f"Failed to connect to MongoDB: {e}"
181
+ logger.error(error_msg)
182
+ logger.error("Please check that:")
183
+ logger.error("1. MongoDB is running")
184
+ logger.error("2. Connection string is correct")
185
+ logger.error("3. Network connectivity is available")
186
+ raise DatabaseError(error_msg)
187
+
188
+ return self.client
189
+
190
+ def _get_database(self) -> Database:
191
+ """Get MongoDB database with caching and creation if needed"""
192
+ if self.database is None:
193
+ client = self._get_client()
194
+ self.database = client[self.database_name]
195
+
196
+ # Ensure database exists by creating a dummy collection if needed
197
+ try:
198
+ # Try to get database stats - this will fail if DB doesn't exist
199
+ self.database.command("dbstats")
200
+ except Exception:
201
+ # Database doesn't exist, create it by creating a dummy collection
202
+ logger.info(f"Creating MongoDB database: {self.database_name}")
203
+ self.database.create_collection("_init")
204
+ # Remove the dummy collection
205
+ self.database.drop_collection("_init")
206
+ logger.info(f"Database {self.database_name} created successfully")
207
+
208
+ return self.database
209
+
210
+ def _get_collection(self, collection_name: str) -> Collection:
211
+ """Get MongoDB collection with caching"""
212
+ if collection_name not in self._collections:
213
+ database = self._get_database()
214
+ self._collections[collection_name] = database[collection_name]
215
+ return self._collections[collection_name]
216
+
217
+ def _convert_datetime_fields(self, document: dict[str, Any]) -> dict[str, Any]:
218
+ """Convert datetime strings to datetime objects"""
219
+ datetime_fields = [
220
+ "created_at",
221
+ "expires_at",
222
+ "last_accessed",
223
+ "extraction_timestamp",
224
+ "timestamp",
225
+ ]
226
+
227
+ for field in datetime_fields:
228
+ if field in document and document[field] is not None:
229
+ if isinstance(document[field], str):
230
+ try:
231
+ # Handle various ISO format variations
232
+ document[field] = datetime.fromisoformat(
233
+ document[field].replace("Z", "+00:00")
234
+ )
235
+ except:
236
+ document[field] = datetime.now(timezone.utc)
237
+ elif not isinstance(document[field], datetime):
238
+ document[field] = datetime.now(timezone.utc)
239
+
240
+ # Add created_at if missing
241
+ if "created_at" not in document:
242
+ document["created_at"] = datetime.now(timezone.utc)
243
+
244
+ return document
245
+
246
+ def _convert_to_dict(self, document: dict[str, Any]) -> dict[str, Any]:
247
+ """Convert MongoDB document to dictionary format compatible with SQLAlchemy results"""
248
+ if not document:
249
+ return {}
250
+
251
+ result = document.copy()
252
+
253
+ # Convert ObjectId to string
254
+ if "_id" in result:
255
+ result["_id"] = str(result["_id"])
256
+
257
+ # Convert datetime objects to ISO strings for compatibility
258
+ datetime_fields = [
259
+ "created_at",
260
+ "expires_at",
261
+ "last_accessed",
262
+ "extraction_timestamp",
263
+ "timestamp",
264
+ ]
265
+ for field in datetime_fields:
266
+ if field in result and isinstance(result[field], datetime):
267
+ result[field] = result[field].isoformat()
268
+
269
+ # Ensure JSON fields are properly handled
270
+ json_fields = [
271
+ "processed_data",
272
+ "entities_json",
273
+ "keywords_json",
274
+ "supersedes_json",
275
+ "related_memories_json",
276
+ "metadata_json",
277
+ ]
278
+ for field in json_fields:
279
+ if field in result and isinstance(result[field], str):
280
+ try:
281
+ result[field] = json.loads(result[field])
282
+ except:
283
+ pass # Keep as string if not valid JSON
284
+
285
+ return result
286
+
287
+ def initialize_schema(self):
288
+ """Initialize MongoDB collections and indexes"""
289
+ if not self.schema_init:
290
+ logger.info("Schema initialization disabled (schema_init=False)")
291
+ return
292
+
293
+ try:
294
+ database = self._get_database()
295
+ existing_collections = database.list_collection_names()
296
+
297
+ # Create collections if they don't exist
298
+ collections = [
299
+ self.CHAT_HISTORY_COLLECTION,
300
+ self.SHORT_TERM_MEMORY_COLLECTION,
301
+ self.LONG_TERM_MEMORY_COLLECTION,
302
+ ]
303
+
304
+ for collection_name in collections:
305
+ if collection_name not in existing_collections:
306
+ database.create_collection(collection_name)
307
+ logger.info(f"Created MongoDB collection: {collection_name}")
308
+
309
+ # Create indexes for performance
310
+ self._create_indexes()
311
+
312
+ logger.info("MongoDB schema initialized successfully")
313
+
314
+ except Exception as e:
315
+ logger.error(f"Failed to initialize MongoDB schema: {e}")
316
+ raise DatabaseError(f"Failed to initialize MongoDB schema: {e}")
317
+
318
+ def _create_indexes(self):
319
+ """Create essential indexes for performance"""
320
+ try:
321
+ # Chat history indexes
322
+ chat_collection = self._get_collection(self.CHAT_HISTORY_COLLECTION)
323
+ chat_collection.create_index([("chat_id", 1)], unique=True, background=True)
324
+ chat_collection.create_index(
325
+ [("namespace", 1), ("session_id", 1)], background=True
326
+ )
327
+ chat_collection.create_index([("timestamp", -1)], background=True)
328
+ chat_collection.create_index([("model", 1)], background=True)
329
+
330
+ # Short-term memory indexes
331
+ st_collection = self._get_collection(self.SHORT_TERM_MEMORY_COLLECTION)
332
+ st_collection.create_index([("memory_id", 1)], unique=True, background=True)
333
+ st_collection.create_index(
334
+ [("namespace", 1), ("category_primary", 1), ("importance_score", -1)],
335
+ background=True,
336
+ )
337
+ st_collection.create_index([("expires_at", 1)], background=True)
338
+ st_collection.create_index([("created_at", -1)], background=True)
339
+ st_collection.create_index([("is_permanent_context", 1)], background=True)
340
+
341
+ # Enhanced text search index for short-term memory with weights
342
+ try:
343
+ # Check if text index already exists
344
+ existing_indexes = st_collection.list_indexes()
345
+ text_index_exists = any(
346
+ idx.get("name") == "text_search_index" for idx in existing_indexes
347
+ )
348
+
349
+ if not text_index_exists:
350
+ st_collection.create_index(
351
+ [
352
+ ("searchable_content", "text"),
353
+ ("summary", "text"),
354
+ ("topic", "text"),
355
+ ],
356
+ background=True, # Use background=True for non-blocking
357
+ weights={
358
+ "searchable_content": 10, # Highest weight for main content
359
+ "summary": 5, # Medium weight for summary
360
+ "topic": 3, # Lower weight for topic
361
+ },
362
+ name="text_search_index",
363
+ )
364
+ logger.info(
365
+ "Created enhanced text search index for short-term memory with weights"
366
+ )
367
+ else:
368
+ logger.debug(
369
+ "Text search index already exists for short-term memory"
370
+ )
371
+ except Exception as e:
372
+ logger.warning(f"Text index creation failed for short-term memory: {e}")
373
+
374
+ # Long-term memory indexes
375
+ lt_collection = self._get_collection(self.LONG_TERM_MEMORY_COLLECTION)
376
+ lt_collection.create_index([("memory_id", 1)], unique=True, background=True)
377
+ lt_collection.create_index(
378
+ [("namespace", 1), ("category_primary", 1), ("importance_score", -1)],
379
+ background=True,
380
+ )
381
+ lt_collection.create_index([("classification", 1)], background=True)
382
+ lt_collection.create_index([("topic", 1)], background=True)
383
+ lt_collection.create_index([("created_at", -1)], background=True)
384
+ lt_collection.create_index([("conscious_processed", 1)], background=True)
385
+ lt_collection.create_index(
386
+ [("processed_for_duplicates", 1)], background=True
387
+ )
388
+ lt_collection.create_index([("promotion_eligible", 1)], background=True)
389
+
390
+ # Enhanced text search index for long-term memory with weights
391
+ try:
392
+ # Check if text index already exists
393
+ existing_indexes = lt_collection.list_indexes()
394
+ text_index_exists = any(
395
+ idx.get("name") == "text_search_index" for idx in existing_indexes
396
+ )
397
+
398
+ if not text_index_exists:
399
+ lt_collection.create_index(
400
+ [
401
+ ("searchable_content", "text"),
402
+ ("summary", "text"),
403
+ ("topic", "text"),
404
+ ("classification_reason", "text"),
405
+ ],
406
+ background=True, # Use background=True for non-blocking
407
+ weights={
408
+ "searchable_content": 10, # Highest weight for main content
409
+ "summary": 8, # High weight for summary
410
+ "topic": 5, # Medium weight for topic
411
+ "classification_reason": 2, # Lower weight for reasoning
412
+ },
413
+ name="text_search_index",
414
+ )
415
+ logger.info(
416
+ "Created enhanced text search index for long-term memory with weights"
417
+ )
418
+ else:
419
+ logger.debug(
420
+ "Text search index already exists for long-term memory"
421
+ )
422
+ except Exception as e:
423
+ logger.warning(f"Text index creation failed for long-term memory: {e}")
424
+
425
+ # Verify text indexes are functional
426
+ self._verify_text_indexes()
427
+
428
+ logger.debug("MongoDB indexes created successfully")
429
+
430
+ except Exception as e:
431
+ logger.warning(f"Failed to create some MongoDB indexes: {e}")
432
+
433
+ def _verify_text_indexes(self):
434
+ """Verify that text indexes are functional by performing test searches"""
435
+ try:
436
+ # Test short-term memory text index
437
+ st_collection = self._get_collection(self.SHORT_TERM_MEMORY_COLLECTION)
438
+ try:
439
+ # Perform a simple text search to verify index works
440
+ _ = st_collection.find_one({"$text": {"$search": "test"}})
441
+ logger.debug("Short-term memory text index verification successful")
442
+ except Exception as e:
443
+ logger.warning(
444
+ f"Short-term memory text index may not be functional: {e}"
445
+ )
446
+
447
+ # Test long-term memory text index
448
+ lt_collection = self._get_collection(self.LONG_TERM_MEMORY_COLLECTION)
449
+ try:
450
+ # Perform a simple text search to verify index works
451
+ _ = lt_collection.find_one({"$text": {"$search": "test"}})
452
+ logger.debug("Long-term memory text index verification successful")
453
+ except Exception as e:
454
+ logger.warning(
455
+ f"Long-term memory text index may not be functional: {e}"
456
+ )
457
+
458
+ # Check if text indexes exist
459
+ st_indexes = list(st_collection.list_indexes())
460
+ lt_indexes = list(lt_collection.list_indexes())
461
+
462
+ st_has_text_index = any(
463
+ "text" in idx.get("key", {}).values() for idx in st_indexes
464
+ )
465
+ lt_has_text_index = any(
466
+ "text" in idx.get("key", {}).values() for idx in lt_indexes
467
+ )
468
+
469
+ if st_has_text_index:
470
+ logger.info("Short-term memory collection has text index")
471
+ else:
472
+ logger.warning("Short-term memory collection missing text index")
473
+
474
+ if lt_has_text_index:
475
+ logger.info("Long-term memory collection has text index")
476
+ else:
477
+ logger.warning("Long-term memory collection missing text index")
478
+
479
+ except Exception as e:
480
+ logger.error(f"Text index verification failed: {e}")
481
+
482
+ def store_chat_history(
483
+ self,
484
+ chat_id: str,
485
+ user_input: str,
486
+ ai_output: str,
487
+ model: str,
488
+ timestamp: datetime,
489
+ session_id: str,
490
+ namespace: str = "default",
491
+ tokens_used: int = 0,
492
+ metadata: dict[str, Any] | None = None,
493
+ ):
494
+ """Store chat history in MongoDB"""
495
+ try:
496
+ collection = self._get_collection(self.CHAT_HISTORY_COLLECTION)
497
+
498
+ document = {
499
+ "chat_id": chat_id,
500
+ "user_input": user_input,
501
+ "ai_output": ai_output,
502
+ "model": model,
503
+ "timestamp": timestamp,
504
+ "session_id": session_id,
505
+ "namespace": namespace,
506
+ "tokens_used": tokens_used,
507
+ "metadata_json": metadata or {},
508
+ }
509
+
510
+ # Convert datetime fields
511
+ document = self._convert_datetime_fields(document)
512
+
513
+ # Use upsert (insert or update) for compatibility with SQLAlchemy behavior
514
+ collection.replace_one({"chat_id": chat_id}, document, upsert=True)
515
+
516
+ logger.debug(f"Stored chat history: {chat_id}")
517
+
518
+ except Exception as e:
519
+ logger.error(f"Failed to store chat history: {e}")
520
+ raise DatabaseError(f"Failed to store chat history: {e}")
521
+
522
+ def get_chat_history(
523
+ self,
524
+ namespace: str = "default",
525
+ session_id: str | None = None,
526
+ limit: int = 10,
527
+ ) -> list[dict[str, Any]]:
528
+ """Get chat history from MongoDB"""
529
+ try:
530
+ collection = self._get_collection(self.CHAT_HISTORY_COLLECTION)
531
+
532
+ # Build filter
533
+ filter_doc = {"namespace": namespace}
534
+ if session_id:
535
+ filter_doc["session_id"] = session_id
536
+
537
+ # Execute query
538
+ cursor = collection.find(filter_doc).sort("timestamp", -1).limit(limit)
539
+
540
+ results = []
541
+ for document in cursor:
542
+ results.append(self._convert_to_dict(document))
543
+
544
+ logger.debug(f"Retrieved {len(results)} chat history entries")
545
+ return results
546
+
547
+ except Exception as e:
548
+ logger.error(f"Failed to get chat history: {e}")
549
+ return []
550
+
551
+ def store_short_term_memory(
552
+ self,
553
+ memory_id: str,
554
+ processed_data: str,
555
+ importance_score: float,
556
+ category_primary: str,
557
+ retention_type: str,
558
+ namespace: str = "default",
559
+ expires_at: datetime | None = None,
560
+ searchable_content: str = "",
561
+ summary: str = "",
562
+ is_permanent_context: bool = False,
563
+ metadata: dict[str, Any] | None = None,
564
+ ):
565
+ """Store short-term memory in MongoDB"""
566
+ try:
567
+ collection = self._get_collection(self.SHORT_TERM_MEMORY_COLLECTION)
568
+
569
+ document = {
570
+ "memory_id": memory_id,
571
+ "processed_data": processed_data,
572
+ "importance_score": importance_score,
573
+ "category_primary": category_primary,
574
+ "retention_type": retention_type,
575
+ "namespace": namespace,
576
+ "created_at": datetime.now(timezone.utc),
577
+ "expires_at": expires_at,
578
+ "searchable_content": searchable_content,
579
+ "summary": summary,
580
+ "is_permanent_context": is_permanent_context,
581
+ "metadata_json": metadata or {},
582
+ "access_count": 0,
583
+ "last_accessed": datetime.now(timezone.utc),
584
+ }
585
+
586
+ # Convert datetime fields
587
+ document = self._convert_datetime_fields(document)
588
+
589
+ # Use upsert (insert or update) for compatibility with SQLAlchemy behavior
590
+ collection.replace_one({"memory_id": memory_id}, document, upsert=True)
591
+
592
+ logger.debug(f"Stored short-term memory: {memory_id}")
593
+
594
+ except Exception as e:
595
+ logger.error(f"Failed to store short-term memory: {e}")
596
+ raise DatabaseError(f"Failed to store short-term memory: {e}")
597
+
598
+ def find_short_term_memory_by_id(
599
+ self,
600
+ memory_id: str,
601
+ namespace: str = "default",
602
+ ) -> dict[str, Any] | None:
603
+ """Find a specific short-term memory by memory_id"""
604
+ try:
605
+ collection = self._get_collection(self.SHORT_TERM_MEMORY_COLLECTION)
606
+
607
+ # Find memory by memory_id and namespace
608
+ document = collection.find_one(
609
+ {"memory_id": memory_id, "namespace": namespace}
610
+ )
611
+
612
+ if document:
613
+ return self._convert_to_dict(document)
614
+ return None
615
+
616
+ except Exception as e:
617
+ logger.error(f"Failed to find short-term memory by ID {memory_id}: {e}")
618
+ return None
619
+
620
+ def get_short_term_memory(
621
+ self,
622
+ namespace: str = "default",
623
+ category_filter: str | None = None,
624
+ limit: int = 10,
625
+ include_expired: bool = False,
626
+ ) -> list[dict[str, Any]]:
627
+ """Get short-term memory from MongoDB"""
628
+ try:
629
+ collection = self._get_collection(self.SHORT_TERM_MEMORY_COLLECTION)
630
+
631
+ # Build filter
632
+ filter_doc = {"namespace": namespace}
633
+
634
+ if category_filter:
635
+ filter_doc["category_primary"] = category_filter
636
+
637
+ if not include_expired:
638
+ current_time = datetime.now(timezone.utc)
639
+ filter_doc["$or"] = [
640
+ {"expires_at": {"$exists": False}},
641
+ {"expires_at": None},
642
+ {"expires_at": {"$gt": current_time}},
643
+ ]
644
+
645
+ # Execute query
646
+ cursor = (
647
+ collection.find(filter_doc)
648
+ .sort([("importance_score", -1), ("created_at", -1)])
649
+ .limit(limit)
650
+ )
651
+
652
+ results = []
653
+ for document in cursor:
654
+ results.append(self._convert_to_dict(document))
655
+
656
+ logger.debug(f"Retrieved {len(results)} short-term memory entries")
657
+ return results
658
+
659
+ except Exception as e:
660
+ logger.error(f"Failed to get short-term memory: {e}")
661
+ return []
662
+
663
+ def search_short_term_memory(
664
+ self,
665
+ query: str,
666
+ namespace: str = "default",
667
+ limit: int = 10,
668
+ ) -> list[dict[str, Any]]:
669
+ """Search short-term memory using MongoDB text search"""
670
+ try:
671
+ # Clean the query to remove common prefixes that interfere with search
672
+ cleaned_query = query.strip()
673
+
674
+ # Remove "User query:" prefix if present (this was causing search failures)
675
+ if cleaned_query.lower().startswith("user query:"):
676
+ cleaned_query = cleaned_query[11:].strip()
677
+ logger.debug(
678
+ f"Cleaned short-term search query from '{query}' to '{cleaned_query}'"
679
+ )
680
+
681
+ if not cleaned_query:
682
+ logger.debug(
683
+ "Empty query provided for short-term search, returning all short-term memories"
684
+ )
685
+ return self.get_short_term_memory(namespace=namespace, limit=limit)
686
+
687
+ collection = self._get_collection(self.SHORT_TERM_MEMORY_COLLECTION)
688
+
689
+ current_time = datetime.now(timezone.utc)
690
+ search_filter = {
691
+ "$and": [
692
+ {"$text": {"$search": cleaned_query}}, # Use cleaned query
693
+ {"namespace": namespace},
694
+ {
695
+ "$or": [
696
+ {"expires_at": {"$exists": False}},
697
+ {"expires_at": None},
698
+ {"expires_at": {"$gt": current_time}},
699
+ ]
700
+ },
701
+ ]
702
+ }
703
+
704
+ logger.debug(
705
+ f"Executing short-term MongoDB text search with cleaned query '{cleaned_query}' and filter: {search_filter}"
706
+ )
707
+
708
+ # Execute MongoDB text search with text score projection
709
+ cursor = (
710
+ collection.find(search_filter, {"score": {"$meta": "textScore"}})
711
+ .sort(
712
+ [
713
+ ("score", {"$meta": "textScore"}),
714
+ ("importance_score", -1),
715
+ ("created_at", -1),
716
+ ]
717
+ )
718
+ .limit(limit)
719
+ )
720
+
721
+ results = []
722
+ for document in cursor:
723
+ memory = self._convert_to_dict(document)
724
+ memory["memory_type"] = "short_term"
725
+ memory["search_strategy"] = "mongodb_text"
726
+ # Preserve text search score
727
+ if "score" in document:
728
+ memory["text_score"] = document["score"]
729
+ results.append(memory)
730
+
731
+ logger.debug(
732
+ f"Short-term memory search returned {len(results)} results for query: '{query}'"
733
+ )
734
+ return results
735
+
736
+ except Exception as e:
737
+ logger.error(f"Short-term memory search failed: {e}")
738
+ return []
739
+
740
+ def update_short_term_memory_access(
741
+ self, memory_id: str, namespace: str = "default"
742
+ ):
743
+ """Update access count and last accessed time for short-term memory"""
744
+ try:
745
+ collection = self._get_collection(self.SHORT_TERM_MEMORY_COLLECTION)
746
+
747
+ collection.update_one(
748
+ {"memory_id": memory_id, "namespace": namespace},
749
+ {
750
+ "$inc": {"access_count": 1},
751
+ "$set": {"last_accessed": datetime.now(timezone.utc)},
752
+ },
753
+ )
754
+
755
+ except Exception as e:
756
+ logger.debug(f"Failed to update short-term memory access: {e}")
757
+
758
+ def get_conscious_memories(
759
+ self,
760
+ namespace: str = "default",
761
+ processed_only: bool = False,
762
+ ) -> list[dict[str, Any]]:
763
+ """Get conscious-info labeled memories from long-term memory"""
764
+ try:
765
+ collection = self._get_collection(self.LONG_TERM_MEMORY_COLLECTION)
766
+
767
+ # Build filter for conscious-info classification
768
+ filter_doc = {"namespace": namespace, "classification": "conscious-info"}
769
+
770
+ if processed_only:
771
+ # Get only processed memories
772
+ filter_doc["conscious_processed"] = True
773
+ else:
774
+ # Get ALL conscious-info memories regardless of processed status
775
+ # This is the correct behavior for initial conscious ingestion
776
+ pass # No additional filter needed
777
+
778
+ # Execute query
779
+ cursor = collection.find(filter_doc).sort(
780
+ [("importance_score", -1), ("created_at", -1)]
781
+ )
782
+
783
+ results = []
784
+ for document in cursor:
785
+ results.append(self._convert_to_dict(document))
786
+
787
+ logger.debug(f"Retrieved {len(results)} conscious memories")
788
+ return results
789
+
790
+ except Exception as e:
791
+ logger.error(f"Failed to get conscious memories: {e}")
792
+ return []
793
+
794
+ def get_unprocessed_conscious_memories(
795
+ self,
796
+ namespace: str = "default",
797
+ ) -> list[dict[str, Any]]:
798
+ """Get unprocessed conscious-info labeled memories from long-term memory"""
799
+ try:
800
+ collection = self._get_collection(self.LONG_TERM_MEMORY_COLLECTION)
801
+
802
+ # Build filter for unprocessed conscious-info memories
803
+ filter_doc = {
804
+ "namespace": namespace,
805
+ "classification": "conscious-info",
806
+ "$or": [
807
+ {"conscious_processed": False},
808
+ {"conscious_processed": {"$exists": False}},
809
+ {"conscious_processed": None},
810
+ ],
811
+ }
812
+
813
+ # Execute query
814
+ cursor = collection.find(filter_doc).sort(
815
+ [("importance_score", -1), ("created_at", -1)]
816
+ )
817
+
818
+ results = []
819
+ for document in cursor:
820
+ results.append(self._convert_to_dict(document))
821
+
822
+ logger.debug(f"Retrieved {len(results)} unprocessed conscious memories")
823
+ return results
824
+
825
+ except Exception as e:
826
+ logger.error(f"Failed to get unprocessed conscious memories: {e}")
827
+ return []
828
+
829
+ def mark_conscious_memories_processed(
830
+ self, memory_ids: list[str], namespace: str = "default"
831
+ ):
832
+ """Mark conscious memories as processed"""
833
+ try:
834
+ collection = self._get_collection(self.LONG_TERM_MEMORY_COLLECTION)
835
+
836
+ # Update all memories in the list
837
+ result = collection.update_many(
838
+ {"memory_id": {"$in": memory_ids}, "namespace": namespace},
839
+ {"$set": {"conscious_processed": True}},
840
+ )
841
+
842
+ logger.debug(
843
+ f"Marked {result.modified_count} memories as conscious processed"
844
+ )
845
+
846
+ except Exception as e:
847
+ logger.error(f"Failed to mark conscious memories processed: {e}")
848
+
849
+ def store_long_term_memory_enhanced(
850
+ self, memory: ProcessedLongTermMemory, chat_id: str, namespace: str = "default"
851
+ ) -> str:
852
+ """Store a ProcessedLongTermMemory in MongoDB with enhanced schema"""
853
+ memory_id = str(uuid.uuid4())
854
+
855
+ try:
856
+ collection = self._get_collection(self.LONG_TERM_MEMORY_COLLECTION)
857
+
858
+ # Enrich searchable content with keywords and entities for better search
859
+ enriched_content_parts = [memory.content]
860
+
861
+ # Add summary for richer search content
862
+ if memory.summary and memory.summary.strip():
863
+ enriched_content_parts.append(memory.summary)
864
+
865
+ # Add keywords to searchable content
866
+ if memory.keywords:
867
+ keyword_text = " ".join(memory.keywords)
868
+ enriched_content_parts.append(keyword_text)
869
+
870
+ # Add entities to searchable content
871
+ if memory.entities:
872
+ entity_text = " ".join(memory.entities)
873
+ enriched_content_parts.append(entity_text)
874
+
875
+ # Create enriched searchable content
876
+ enriched_searchable_content = " ".join(enriched_content_parts)
877
+
878
+ # Convert Pydantic model to MongoDB document
879
+ document = {
880
+ "memory_id": memory_id,
881
+ "original_chat_id": chat_id,
882
+ "processed_data": memory.model_dump(mode="json"),
883
+ "importance_score": memory.importance_score,
884
+ "category_primary": memory.classification.value,
885
+ "retention_type": "long_term",
886
+ "namespace": namespace,
887
+ "created_at": datetime.now(timezone.utc),
888
+ "searchable_content": enriched_searchable_content,
889
+ "summary": memory.summary,
890
+ "novelty_score": 0.5,
891
+ "relevance_score": 0.5,
892
+ "actionability_score": 0.5,
893
+ "classification": memory.classification.value,
894
+ "memory_importance": memory.importance.value,
895
+ "topic": memory.topic,
896
+ "entities_json": memory.entities,
897
+ "keywords_json": memory.keywords,
898
+ "is_user_context": memory.is_user_context,
899
+ "is_preference": memory.is_preference,
900
+ "is_skill_knowledge": memory.is_skill_knowledge,
901
+ "is_current_project": memory.is_current_project,
902
+ "promotion_eligible": memory.promotion_eligible,
903
+ "duplicate_of": memory.duplicate_of,
904
+ "supersedes_json": memory.supersedes,
905
+ "related_memories_json": memory.related_memories,
906
+ "confidence_score": memory.confidence_score,
907
+ "extraction_timestamp": memory.extraction_timestamp,
908
+ "classification_reason": memory.classification_reason,
909
+ "processed_for_duplicates": False,
910
+ "conscious_processed": False, # Ensure new memories start as unprocessed
911
+ "access_count": 0,
912
+ }
913
+
914
+ # Convert datetime fields
915
+ document = self._convert_datetime_fields(document)
916
+
917
+ # Insert document
918
+ collection.insert_one(document)
919
+
920
+ logger.debug(f"Stored enhanced long-term memory {memory_id}")
921
+ return memory_id
922
+
923
+ except Exception as e:
924
+ logger.error(f"Failed to store enhanced long-term memory: {e}")
925
+ raise DatabaseError(f"Failed to store enhanced long-term memory: {e}")
926
+
927
+ def search_memories(
928
+ self,
929
+ query: str,
930
+ namespace: str = "default",
931
+ category_filter: list[str] | None = None,
932
+ limit: int = 10,
933
+ ) -> list[dict[str, Any]]:
934
+ """Search memories using MongoDB text search with SQL-compatible interface"""
935
+ try:
936
+ logger.debug(
937
+ f"MongoDB search_memories called: query='{query}', namespace='{namespace}', limit={limit}"
938
+ )
939
+
940
+ # Handle empty queries consistently with SQL
941
+ if not query or not query.strip():
942
+ logger.debug(
943
+ "Empty query provided, returning empty results for consistency"
944
+ )
945
+ return []
946
+
947
+ # Clean query (remove common problematic prefixes)
948
+ cleaned_query = query.strip()
949
+ if cleaned_query.lower().startswith("user query:"):
950
+ cleaned_query = cleaned_query[11:].strip()
951
+ logger.debug(f"Cleaned query from '{query}' to '{cleaned_query}'")
952
+
953
+ if not cleaned_query:
954
+ return []
955
+
956
+ results = []
957
+ collections_to_search = [
958
+ (self.SHORT_TERM_MEMORY_COLLECTION, "short_term"),
959
+ (self.LONG_TERM_MEMORY_COLLECTION, "long_term"),
960
+ ]
961
+
962
+ # Search each collection
963
+ for collection_name, memory_type in collections_to_search:
964
+ collection = self._get_collection(collection_name)
965
+
966
+ try:
967
+ # Build search filter
968
+ search_filter: dict[str, Any] = {
969
+ "$text": {"$search": cleaned_query},
970
+ "namespace": namespace,
971
+ }
972
+
973
+ # Add category filter if specified
974
+ if category_filter:
975
+ search_filter["category_primary"] = {"$in": category_filter}
976
+
977
+ # For short-term memories, exclude expired ones
978
+ if memory_type == "short_term":
979
+ current_time = datetime.now(timezone.utc)
980
+ search_filter = {
981
+ "$and": [
982
+ {"$text": {"$search": cleaned_query}},
983
+ {"namespace": namespace},
984
+ {
985
+ "$or": [
986
+ {"expires_at": {"$exists": False}},
987
+ {"expires_at": None},
988
+ {"expires_at": {"$gt": current_time}},
989
+ ]
990
+ },
991
+ ]
992
+ }
993
+ if category_filter:
994
+ search_filter["$and"].append(
995
+ {"category_primary": {"$in": category_filter}}
996
+ )
997
+
998
+ # Execute search with standardized projection
999
+ cursor = (
1000
+ collection.find(
1001
+ search_filter, {"score": {"$meta": "textScore"}}
1002
+ )
1003
+ .sort(
1004
+ [
1005
+ ("score", {"$meta": "textScore"}),
1006
+ ("importance_score", -1),
1007
+ ("created_at", -1),
1008
+ ]
1009
+ )
1010
+ .limit(limit)
1011
+ )
1012
+
1013
+ for document in cursor:
1014
+ memory = self._convert_to_dict(document)
1015
+
1016
+ # Standardize fields for SQL compatibility
1017
+ memory["memory_type"] = memory_type
1018
+ memory["search_strategy"] = "mongodb_text"
1019
+ memory["search_score"] = document.get(
1020
+ "score", 0.8
1021
+ ) # MongoDB text score
1022
+
1023
+ # Ensure all required fields are present
1024
+ if "importance_score" not in memory:
1025
+ memory["importance_score"] = 0.5
1026
+ if "created_at" not in memory:
1027
+ memory["created_at"] = datetime.now(
1028
+ timezone.utc
1029
+ ).isoformat()
1030
+
1031
+ results.append(memory)
1032
+
1033
+ except Exception as search_error:
1034
+ logger.error(
1035
+ f"MongoDB search failed for {collection_name}: {search_error}"
1036
+ )
1037
+ continue
1038
+
1039
+ # Sort results by search score for consistency
1040
+ results.sort(
1041
+ key=lambda x: (x.get("search_score", 0), x.get("importance_score", 0)),
1042
+ reverse=True,
1043
+ )
1044
+
1045
+ logger.debug(f"MongoDB search returned {len(results)} results")
1046
+ return results[:limit]
1047
+
1048
+ except Exception as e:
1049
+ logger.error(f"MongoDB search_memories failed: {e}")
1050
+ # Return empty list to maintain compatibility with SQL manager
1051
+ return []
1052
+
1053
+ def get_memory_stats(self, namespace: str = "default") -> dict[str, Any]:
1054
+ """Get comprehensive memory statistics"""
1055
+ try:
1056
+ database = self._get_database()
1057
+
1058
+ stats = {}
1059
+
1060
+ # Basic counts
1061
+ stats["chat_history_count"] = self._get_collection(
1062
+ self.CHAT_HISTORY_COLLECTION
1063
+ ).count_documents({"namespace": namespace})
1064
+
1065
+ stats["short_term_count"] = self._get_collection(
1066
+ self.SHORT_TERM_MEMORY_COLLECTION
1067
+ ).count_documents({"namespace": namespace})
1068
+
1069
+ stats["long_term_count"] = self._get_collection(
1070
+ self.LONG_TERM_MEMORY_COLLECTION
1071
+ ).count_documents({"namespace": namespace})
1072
+
1073
+ # Category breakdown for short-term memories
1074
+ short_categories = self._get_collection(
1075
+ self.SHORT_TERM_MEMORY_COLLECTION
1076
+ ).aggregate(
1077
+ [
1078
+ {"$match": {"namespace": namespace}},
1079
+ {"$group": {"_id": "$category_primary", "count": {"$sum": 1}}},
1080
+ ]
1081
+ )
1082
+
1083
+ categories = {}
1084
+ for doc in short_categories:
1085
+ categories[doc["_id"]] = doc["count"]
1086
+
1087
+ # Category breakdown for long-term memories
1088
+ long_categories = self._get_collection(
1089
+ self.LONG_TERM_MEMORY_COLLECTION
1090
+ ).aggregate(
1091
+ [
1092
+ {"$match": {"namespace": namespace}},
1093
+ {"$group": {"_id": "$category_primary", "count": {"$sum": 1}}},
1094
+ ]
1095
+ )
1096
+
1097
+ for doc in long_categories:
1098
+ categories[doc.get("_id", "unknown")] = (
1099
+ categories.get(doc.get("_id", "unknown"), 0) + doc["count"]
1100
+ )
1101
+
1102
+ stats["memories_by_category"] = categories
1103
+
1104
+ # Average importance scores
1105
+ short_avg_pipeline = [
1106
+ {"$match": {"namespace": namespace}},
1107
+ {
1108
+ "$group": {
1109
+ "_id": None,
1110
+ "avg_importance": {"$avg": "$importance_score"},
1111
+ }
1112
+ },
1113
+ ]
1114
+ short_avg_result = list(
1115
+ self._get_collection(self.SHORT_TERM_MEMORY_COLLECTION).aggregate(
1116
+ short_avg_pipeline
1117
+ )
1118
+ )
1119
+ short_avg = short_avg_result[0]["avg_importance"] if short_avg_result else 0
1120
+
1121
+ long_avg_pipeline = [
1122
+ {"$match": {"namespace": namespace}},
1123
+ {
1124
+ "$group": {
1125
+ "_id": None,
1126
+ "avg_importance": {"$avg": "$importance_score"},
1127
+ }
1128
+ },
1129
+ ]
1130
+ long_avg_result = list(
1131
+ self._get_collection(self.LONG_TERM_MEMORY_COLLECTION).aggregate(
1132
+ long_avg_pipeline
1133
+ )
1134
+ )
1135
+ long_avg = long_avg_result[0]["avg_importance"] if long_avg_result else 0
1136
+
1137
+ total_memories = stats["short_term_count"] + stats["long_term_count"]
1138
+ if total_memories > 0:
1139
+ # Weight averages by count
1140
+ total_avg = (
1141
+ (short_avg * stats["short_term_count"])
1142
+ + (long_avg * stats["long_term_count"])
1143
+ ) / total_memories
1144
+ stats["average_importance"] = float(total_avg) if total_avg else 0.0
1145
+ else:
1146
+ stats["average_importance"] = 0.0
1147
+
1148
+ # Database info
1149
+ stats["database_type"] = self.database_type
1150
+ stats["database_url"] = (
1151
+ self.database_connect.split("@")[-1]
1152
+ if "@" in self.database_connect
1153
+ else self.database_connect
1154
+ )
1155
+
1156
+ # MongoDB-specific stats
1157
+ try:
1158
+ db_stats = database.command("dbStats")
1159
+ stats["storage_size"] = db_stats.get("storageSize", 0)
1160
+ stats["data_size"] = db_stats.get("dataSize", 0)
1161
+ stats["index_size"] = db_stats.get("indexSize", 0)
1162
+ stats["collections"] = db_stats.get("collections", 0)
1163
+ except Exception as e:
1164
+ logger.debug(f"Could not get database stats: {e}")
1165
+
1166
+ return stats
1167
+
1168
+ except Exception as e:
1169
+ logger.error(f"Failed to get memory stats: {e}")
1170
+ return {"error": str(e)}
1171
+
1172
+ def clear_memory(self, namespace: str = "default", memory_type: str | None = None):
1173
+ """Clear memory data"""
1174
+ try:
1175
+ if memory_type == "short_term":
1176
+ self._get_collection(self.SHORT_TERM_MEMORY_COLLECTION).delete_many(
1177
+ {"namespace": namespace}
1178
+ )
1179
+ elif memory_type == "long_term":
1180
+ self._get_collection(self.LONG_TERM_MEMORY_COLLECTION).delete_many(
1181
+ {"namespace": namespace}
1182
+ )
1183
+ elif memory_type == "chat_history":
1184
+ self._get_collection(self.CHAT_HISTORY_COLLECTION).delete_many(
1185
+ {"namespace": namespace}
1186
+ )
1187
+ else: # Clear all
1188
+ self._get_collection(self.SHORT_TERM_MEMORY_COLLECTION).delete_many(
1189
+ {"namespace": namespace}
1190
+ )
1191
+ self._get_collection(self.LONG_TERM_MEMORY_COLLECTION).delete_many(
1192
+ {"namespace": namespace}
1193
+ )
1194
+ self._get_collection(self.CHAT_HISTORY_COLLECTION).delete_many(
1195
+ {"namespace": namespace}
1196
+ )
1197
+
1198
+ logger.info(
1199
+ f"Cleared {memory_type or 'all'} memory for namespace: {namespace}"
1200
+ )
1201
+
1202
+ except Exception as e:
1203
+ logger.error(f"Failed to clear memory: {e}")
1204
+ raise DatabaseError(f"Failed to clear memory: {e}")
1205
+
1206
+ def _get_connection(self):
1207
+ """
1208
+ Compatibility method for legacy code that expects raw database connections.
1209
+ Returns a MongoDB-compatible connection wrapper.
1210
+ """
1211
+ from contextlib import contextmanager
1212
+
1213
+ @contextmanager
1214
+ def connection_context():
1215
+ class MongoDBConnection:
1216
+ """Wrapper that provides SQLAlchemy-like interface for MongoDB"""
1217
+
1218
+ def __init__(self, manager):
1219
+ self.manager = manager
1220
+ self.database = manager._get_database()
1221
+
1222
+ def execute(self, query, parameters=None):
1223
+ """Execute query with parameter substitution"""
1224
+ try:
1225
+ # This is a compatibility shim for raw SQL-like queries
1226
+ # Convert basic queries to MongoDB operations
1227
+ if isinstance(query, str):
1228
+ # Handle common SQL-like patterns and convert to MongoDB
1229
+ if "SELECT" in query.upper():
1230
+ return self._handle_select_query(query, parameters)
1231
+ elif "INSERT" in query.upper():
1232
+ return self._handle_insert_query(query, parameters)
1233
+ elif "UPDATE" in query.upper():
1234
+ return self._handle_update_query(query, parameters)
1235
+ elif "DELETE" in query.upper():
1236
+ return self._handle_delete_query(query, parameters)
1237
+
1238
+ # Fallback for direct MongoDB operations
1239
+ return MockQueryResult([])
1240
+
1241
+ except Exception as e:
1242
+ logger.warning(f"Query execution failed: {e}")
1243
+ return MockQueryResult([])
1244
+
1245
+ def _handle_select_query(self, query, parameters):
1246
+ """Handle SELECT-like queries"""
1247
+ # Simple pattern matching for common queries
1248
+ if "short_term_memory" in query:
1249
+ collection = self.manager._get_collection(
1250
+ self.manager.SHORT_TERM_MEMORY_COLLECTION
1251
+ )
1252
+ filter_doc = {}
1253
+ if parameters:
1254
+ # Basic parameter substitution
1255
+ if "namespace" in parameters:
1256
+ filter_doc["namespace"] = parameters["namespace"]
1257
+
1258
+ cursor = (
1259
+ collection.find(filter_doc)
1260
+ .sort("created_at", -1)
1261
+ .limit(100)
1262
+ )
1263
+ results = [self.manager._convert_to_dict(doc) for doc in cursor]
1264
+ return MockQueryResult(results)
1265
+
1266
+ return MockQueryResult([])
1267
+
1268
+ def _handle_insert_query(self, _query, _parameters):
1269
+ """Handle INSERT-like queries"""
1270
+ # This is a compatibility shim - not fully implemented
1271
+ return MockQueryResult([])
1272
+
1273
+ def _handle_update_query(self, _query, _parameters):
1274
+ """Handle UPDATE-like queries"""
1275
+ # This is a compatibility shim - not fully implemented
1276
+ return MockQueryResult([])
1277
+
1278
+ def _handle_delete_query(self, _query, _parameters):
1279
+ """Handle DELETE-like queries"""
1280
+ # This is a compatibility shim - not fully implemented
1281
+ return MockQueryResult([])
1282
+
1283
+ def commit(self):
1284
+ """Commit transaction (no-op for MongoDB single operations)"""
1285
+ pass
1286
+
1287
+ def rollback(self):
1288
+ """Rollback transaction (no-op for MongoDB single operations)"""
1289
+ pass
1290
+
1291
+ def close(self):
1292
+ """Close connection (no-op, connection pooling handled by client)"""
1293
+ pass
1294
+
1295
+ def scalar(self):
1296
+ """Compatibility method"""
1297
+ return None
1298
+
1299
+ def fetchall(self):
1300
+ """Compatibility method"""
1301
+ return []
1302
+
1303
+ yield MongoDBConnection(self)
1304
+
1305
+ return connection_context()
1306
+
1307
+ def close(self):
1308
+ """Close MongoDB connection"""
1309
+ if self.client:
1310
+ self.client.close()
1311
+ self.client = None
1312
+ self.database = None
1313
+ self._collections.clear()
1314
+ logger.info("MongoDB connection closed")
1315
+
1316
+ def get_database_info(self) -> dict[str, Any]:
1317
+ """Get MongoDB database information and capabilities"""
1318
+ try:
1319
+ client = self._get_client()
1320
+ database = self._get_database()
1321
+
1322
+ info = {
1323
+ "database_type": self.database_type,
1324
+ "database_name": self.database_name,
1325
+ "connection_string": (
1326
+ self.database_connect.replace(
1327
+ f"{self.username}:{self.password}@", "***:***@"
1328
+ )
1329
+ if self.username and self.password
1330
+ else self.database_connect
1331
+ ),
1332
+ }
1333
+
1334
+ # Server information
1335
+ try:
1336
+ server_info = client.server_info()
1337
+ info["version"] = server_info.get("version", "unknown")
1338
+ info["driver"] = "pymongo"
1339
+ except Exception:
1340
+ info["version"] = "unknown"
1341
+ info["driver"] = "pymongo"
1342
+
1343
+ # Database stats
1344
+ try:
1345
+ stats = database.command("dbStats")
1346
+ info["collections_count"] = stats.get("collections", 0)
1347
+ info["data_size"] = stats.get("dataSize", 0)
1348
+ info["storage_size"] = stats.get("storageSize", 0)
1349
+ info["indexes_count"] = stats.get("indexes", 0)
1350
+ except Exception:
1351
+ pass
1352
+
1353
+ # Capabilities
1354
+ info["supports_fulltext"] = True
1355
+ info["auto_creation_enabled"] = (
1356
+ True # MongoDB creates collections automatically
1357
+ )
1358
+
1359
+ return info
1360
+
1361
+ except Exception as e:
1362
+ logger.warning(f"Could not get MongoDB database info: {e}")
1363
+ return {
1364
+ "database_type": self.database_type,
1365
+ "version": "unknown",
1366
+ "supports_fulltext": True,
1367
+ "error": str(e),
1368
+ }
1369
+
1370
+
1371
+ class MockQueryResult:
1372
+ """Mock query result for compatibility with SQLAlchemy-style code"""
1373
+
1374
+ def __init__(self, results):
1375
+ self.results = results
1376
+ self._index = 0
1377
+
1378
+ def fetchall(self):
1379
+ """Return all results"""
1380
+ return self.results
1381
+
1382
+ def fetchone(self):
1383
+ """Return one result"""
1384
+ if self._index < len(self.results):
1385
+ result = self.results[self._index]
1386
+ self._index += 1
1387
+ return result
1388
+ return None
1389
+
1390
+ def scalar(self):
1391
+ """Return scalar value"""
1392
+ if self.results:
1393
+ first_result = self.results[0]
1394
+ if isinstance(first_result, dict):
1395
+ # Return first value from dict
1396
+ return next(iter(first_result.values()))
1397
+ return first_result
1398
+ return None
1399
+
1400
+ def __iter__(self):
1401
+ """Make iterable"""
1402
+ return iter(self.results)