MemoryOS 1.0.1__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MemoryOS might be problematic. Click here for more details.

Files changed (82) hide show
  1. {memoryos-1.0.1.dist-info → memoryos-1.1.1.dist-info}/METADATA +7 -2
  2. {memoryos-1.0.1.dist-info → memoryos-1.1.1.dist-info}/RECORD +79 -65
  3. {memoryos-1.0.1.dist-info → memoryos-1.1.1.dist-info}/WHEEL +1 -1
  4. memos/__init__.py +1 -1
  5. memos/api/client.py +109 -0
  6. memos/api/config.py +11 -9
  7. memos/api/context/dependencies.py +15 -55
  8. memos/api/middleware/request_context.py +9 -40
  9. memos/api/product_api.py +2 -3
  10. memos/api/product_models.py +91 -16
  11. memos/api/routers/product_router.py +23 -16
  12. memos/api/start_api.py +10 -0
  13. memos/configs/graph_db.py +4 -0
  14. memos/configs/mem_scheduler.py +38 -3
  15. memos/context/context.py +255 -0
  16. memos/embedders/factory.py +2 -0
  17. memos/graph_dbs/nebular.py +230 -232
  18. memos/graph_dbs/neo4j.py +35 -1
  19. memos/graph_dbs/neo4j_community.py +7 -0
  20. memos/llms/factory.py +2 -0
  21. memos/llms/openai.py +74 -2
  22. memos/log.py +27 -15
  23. memos/mem_cube/general.py +3 -1
  24. memos/mem_os/core.py +60 -22
  25. memos/mem_os/main.py +3 -6
  26. memos/mem_os/product.py +35 -11
  27. memos/mem_reader/factory.py +2 -0
  28. memos/mem_reader/simple_struct.py +127 -74
  29. memos/mem_scheduler/analyzer/__init__.py +0 -0
  30. memos/mem_scheduler/analyzer/mos_for_test_scheduler.py +569 -0
  31. memos/mem_scheduler/analyzer/scheduler_for_eval.py +280 -0
  32. memos/mem_scheduler/base_scheduler.py +126 -56
  33. memos/mem_scheduler/general_modules/dispatcher.py +2 -2
  34. memos/mem_scheduler/general_modules/misc.py +99 -1
  35. memos/mem_scheduler/general_modules/scheduler_logger.py +17 -11
  36. memos/mem_scheduler/general_scheduler.py +40 -88
  37. memos/mem_scheduler/memory_manage_modules/__init__.py +5 -0
  38. memos/mem_scheduler/memory_manage_modules/memory_filter.py +308 -0
  39. memos/mem_scheduler/{general_modules → memory_manage_modules}/retriever.py +34 -7
  40. memos/mem_scheduler/monitors/dispatcher_monitor.py +9 -8
  41. memos/mem_scheduler/monitors/general_monitor.py +119 -39
  42. memos/mem_scheduler/optimized_scheduler.py +124 -0
  43. memos/mem_scheduler/orm_modules/__init__.py +0 -0
  44. memos/mem_scheduler/orm_modules/base_model.py +635 -0
  45. memos/mem_scheduler/orm_modules/monitor_models.py +261 -0
  46. memos/mem_scheduler/scheduler_factory.py +2 -0
  47. memos/mem_scheduler/schemas/monitor_schemas.py +96 -29
  48. memos/mem_scheduler/utils/config_utils.py +100 -0
  49. memos/mem_scheduler/utils/db_utils.py +33 -0
  50. memos/mem_scheduler/utils/filter_utils.py +1 -1
  51. memos/mem_scheduler/webservice_modules/__init__.py +0 -0
  52. memos/memories/activation/kv.py +2 -1
  53. memos/memories/textual/item.py +95 -16
  54. memos/memories/textual/naive.py +1 -1
  55. memos/memories/textual/tree.py +27 -3
  56. memos/memories/textual/tree_text_memory/organize/handler.py +4 -2
  57. memos/memories/textual/tree_text_memory/organize/manager.py +28 -14
  58. memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +1 -2
  59. memos/memories/textual/tree_text_memory/organize/reorganizer.py +75 -23
  60. memos/memories/textual/tree_text_memory/retrieve/bochasearch.py +7 -5
  61. memos/memories/textual/tree_text_memory/retrieve/internet_retriever.py +6 -2
  62. memos/memories/textual/tree_text_memory/retrieve/internet_retriever_factory.py +2 -0
  63. memos/memories/textual/tree_text_memory/retrieve/recall.py +70 -22
  64. memos/memories/textual/tree_text_memory/retrieve/searcher.py +101 -33
  65. memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py +5 -4
  66. memos/memos_tools/singleton.py +174 -0
  67. memos/memos_tools/thread_safe_dict.py +22 -0
  68. memos/memos_tools/thread_safe_dict_segment.py +382 -0
  69. memos/parsers/factory.py +2 -0
  70. memos/reranker/concat.py +59 -0
  71. memos/reranker/cosine_local.py +1 -0
  72. memos/reranker/factory.py +5 -0
  73. memos/reranker/http_bge.py +225 -12
  74. memos/templates/mem_scheduler_prompts.py +242 -0
  75. memos/types.py +4 -1
  76. memos/api/context/context.py +0 -147
  77. memos/api/context/context_thread.py +0 -96
  78. memos/mem_scheduler/mos_for_test_scheduler.py +0 -146
  79. {memoryos-1.0.1.dist-info → memoryos-1.1.1.dist-info}/entry_points.txt +0 -0
  80. {memoryos-1.0.1.dist-info → memoryos-1.1.1.dist-info/licenses}/LICENSE +0 -0
  81. /memos/mem_scheduler/{general_modules → webservice_modules}/rabbitmq_service.py +0 -0
  82. /memos/mem_scheduler/{general_modules → webservice_modules}/redis_service.py +0 -0
@@ -0,0 +1,635 @@
1
+ import json
2
+ import os
3
+ import tempfile
4
+ import time
5
+
6
+ from abc import abstractmethod
7
+ from datetime import datetime, timedelta
8
+ from pathlib import Path
9
+ from typing import Any, TypeVar
10
+
11
+ from sqlalchemy import Boolean, Column, DateTime, String, Text, and_, create_engine
12
+ from sqlalchemy.engine import Engine
13
+ from sqlalchemy.ext.declarative import declarative_base
14
+ from sqlalchemy.orm import Session, sessionmaker
15
+
16
+ from memos.log import get_logger
17
+ from memos.mem_user.user_manager import UserManager
18
+
19
+
20
+ T = TypeVar("T") # The model type (MemoryMonitorManager, QueryMonitorManager, etc.)
21
+ ORM = TypeVar("ORM") # The ORM model type
22
+
23
+ logger = get_logger(__name__)
24
+
25
+ Base = declarative_base()
26
+
27
+
28
+ class LockableORM(Base):
29
+ """Abstract base class for lockable ORM models"""
30
+
31
+ __abstract__ = True
32
+
33
+ # Primary composite key
34
+ user_id = Column(String(255), primary_key=True)
35
+ mem_cube_id = Column(String(255), primary_key=True)
36
+
37
+ # Serialized data
38
+ serialized_data = Column(Text, nullable=False)
39
+
40
+ lock_acquired = Column(Boolean, default=False)
41
+ lock_expiry = Column(DateTime, nullable=True)
42
+
43
+ # Version control tag (0-255, cycles back to 0)
44
+ version_control = Column(String(3), default="0")
45
+
46
+
47
+ class BaseDBManager(UserManager):
48
+ """Abstract base class for database managers with proper locking mechanism
49
+
50
+ This class provides a foundation for managing database operations with
51
+ distributed locking capabilities to ensure data consistency across
52
+ multiple processes or threads.
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ engine: Engine,
58
+ user_id: str | None = None,
59
+ mem_cube_id: str | None = None,
60
+ lock_timeout: int = 10,
61
+ ):
62
+ """Initialize the database manager
63
+
64
+ Args:
65
+ engine: SQLAlchemy engine instance
66
+ user_id: Unique identifier for the user
67
+ mem_cube_id: Unique identifier for the memory cube
68
+ lock_timeout: Timeout in seconds for lock acquisition
69
+ """
70
+ # Do not use super init func to avoid UserManager initialization
71
+ self.engine = engine
72
+ self.SessionLocal = None
73
+ self.obj = None
74
+ self.user_id = user_id
75
+ self.mem_cube_id = mem_cube_id
76
+ self.lock_timeout = lock_timeout
77
+ self.last_version_control = None # Track the last version control tag
78
+
79
+ self.init_manager(
80
+ engine=self.engine,
81
+ user_id=self.user_id,
82
+ mem_cube_id=self.mem_cube_id,
83
+ )
84
+
85
+ @property
86
+ @abstractmethod
87
+ def orm_class(self) -> type[LockableORM]:
88
+ """Return the ORM model class for this manager
89
+
90
+ Returns:
91
+ The SQLAlchemy ORM model class
92
+ """
93
+ raise NotImplementedError()
94
+
95
+ @property
96
+ @abstractmethod
97
+ def obj_class(self) -> Any:
98
+ """Return the business object class for this manager
99
+
100
+ Returns:
101
+ The business logic object class
102
+ """
103
+ raise NotImplementedError()
104
+
105
+ def init_manager(self, engine: Engine, user_id: str, mem_cube_id: str):
106
+ """Initialize the database manager with engine and identifiers
107
+
108
+ Args:
109
+ engine: SQLAlchemy engine instance
110
+ user_id: User identifier
111
+ mem_cube_id: Memory cube identifier
112
+
113
+ Raises:
114
+ RuntimeError: If database initialization fails
115
+ """
116
+ try:
117
+ self.SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
118
+
119
+ logger.info(f"{self.orm_class} initialized with engine {engine}")
120
+ logger.info(f"Set user_id to {user_id}; mem_cube_id to {mem_cube_id}")
121
+
122
+ # Create tables if they don't exist
123
+ self._create_table_with_error_handling(engine)
124
+ logger.debug(f"Successfully created/verified table for {self.orm_class.__tablename__}")
125
+
126
+ except Exception as e:
127
+ error_msg = f"Failed to initialize database manager for {self.orm_class.__name__}: {e}"
128
+ logger.error(error_msg, exc_info=True)
129
+ raise RuntimeError(error_msg) from e
130
+
131
+ def _create_table_with_error_handling(self, engine: Engine):
132
+ """Create table with proper error handling for common database conflicts
133
+
134
+ Args:
135
+ engine: SQLAlchemy engine instance
136
+
137
+ Raises:
138
+ RuntimeError: If table creation fails after handling known issues
139
+ """
140
+ try:
141
+ self.orm_class.__table__.create(bind=engine, checkfirst=True)
142
+ except Exception as e:
143
+ error_str = str(e).lower()
144
+
145
+ # Handle common SQLite index already exists error
146
+ if "index" in error_str and "already exists" in error_str:
147
+ logger.warning(f"Index already exists for {self.orm_class.__tablename__}: {e}")
148
+ # Try to create just the table without indexes
149
+ try:
150
+ # Create a temporary table definition without indexes
151
+ table_without_indexes = self.orm_class.__table__.copy()
152
+ table_without_indexes._indexes.clear() # Remove all indexes
153
+ table_without_indexes.create(bind=engine, checkfirst=True)
154
+ logger.info(
155
+ f"Created table {self.orm_class.__tablename__} without problematic indexes"
156
+ )
157
+ except Exception as table_error:
158
+ logger.error(f"Failed to create table even without indexes: {table_error}")
159
+ raise
160
+ else:
161
+ # Re-raise other types of errors
162
+ raise
163
+
164
+ def _get_session(self) -> Session:
165
+ """Get a database session"""
166
+ return self.SessionLocal()
167
+
168
+ def _serialize(self, obj: T) -> str:
169
+ """Serialize the object to JSON"""
170
+ if hasattr(obj, "to_json"):
171
+ return obj.to_json()
172
+ return json.dumps(obj)
173
+
174
+ def _deserialize(self, data: str, model_class: type[T]) -> T:
175
+ """Deserialize JSON to object"""
176
+ if hasattr(model_class, "from_json"):
177
+ return model_class.from_json(data)
178
+ return json.loads(data)
179
+
180
+ def acquire_lock(self, block: bool = True, **kwargs) -> bool:
181
+ """Acquire a distributed lock for the current user and memory cube
182
+
183
+ Args:
184
+ block: Whether to block until lock is acquired
185
+ **kwargs: Additional filter criteria
186
+
187
+ Returns:
188
+ True if lock was acquired, False otherwise
189
+ """
190
+ session = self._get_session()
191
+
192
+ try:
193
+ now = datetime.now()
194
+ expiry = now + timedelta(seconds=self.lock_timeout)
195
+
196
+ # Query for existing record with lock information
197
+ query = (
198
+ session.query(self.orm_class)
199
+ .filter_by(**kwargs)
200
+ .filter(
201
+ and_(
202
+ self.orm_class.user_id == self.user_id,
203
+ self.orm_class.mem_cube_id == self.mem_cube_id,
204
+ )
205
+ )
206
+ )
207
+
208
+ record = query.first()
209
+
210
+ # If no record exists, lock can be acquired immediately
211
+ if record is None:
212
+ logger.info(
213
+ f"No existing record found for {self.user_id}/{self.mem_cube_id}, lock can be acquired"
214
+ )
215
+ return True
216
+
217
+ # Check if lock is currently held and not expired
218
+ if record.lock_acquired and record.lock_expiry and now < record.lock_expiry:
219
+ if block:
220
+ # Wait for lock to be released or expire
221
+ logger.info(
222
+ f"Waiting for lock to be released for {self.user_id}/{self.mem_cube_id}"
223
+ )
224
+ while record.lock_acquired and record.lock_expiry and now < record.lock_expiry:
225
+ time.sleep(0.1) # Small delay before retry
226
+ session.refresh(record) # Refresh record state
227
+ now = datetime.now()
228
+ else:
229
+ logger.warning(
230
+ f"Lock is held for {self.user_id}/{self.mem_cube_id}, cannot acquire"
231
+ )
232
+ return False
233
+
234
+ # Acquire the lock by updating the record
235
+ query.update(
236
+ {
237
+ "lock_acquired": True,
238
+ "lock_expiry": expiry,
239
+ },
240
+ synchronize_session=False,
241
+ )
242
+
243
+ session.commit()
244
+ logger.info(f"Lock acquired for {self.user_id}/{self.mem_cube_id}")
245
+ return True
246
+
247
+ except Exception as e:
248
+ session.rollback()
249
+ logger.error(f"Failed to acquire lock for {self.user_id}/{self.mem_cube_id}: {e}")
250
+ return False
251
+ finally:
252
+ session.close()
253
+
254
+ def release_locks(self, user_id: str, mem_cube_id: str, **kwargs):
255
+ """Release locks for the specified user and memory cube
256
+
257
+ Args:
258
+ user_id: User identifier
259
+ mem_cube_id: Memory cube identifier
260
+ **kwargs: Additional filter criteria
261
+ """
262
+ session = self._get_session()
263
+
264
+ try:
265
+ # Update all matching records to release locks
266
+ result = (
267
+ session.query(self.orm_class)
268
+ .filter_by(**kwargs)
269
+ .filter(
270
+ and_(
271
+ self.orm_class.user_id == user_id, self.orm_class.mem_cube_id == mem_cube_id
272
+ )
273
+ )
274
+ .update(
275
+ {
276
+ "lock_acquired": False,
277
+ "lock_expiry": None, # Clear expiry time as well
278
+ },
279
+ synchronize_session=False,
280
+ )
281
+ )
282
+ session.commit()
283
+ logger.info(f"Lock released for {user_id}/{mem_cube_id} (affected {result} records)")
284
+
285
+ except Exception as e:
286
+ session.rollback()
287
+ logger.error(f"Failed to release lock for {user_id}/{mem_cube_id}: {e}")
288
+ finally:
289
+ session.close()
290
+
291
+ def _get_primary_key(self) -> dict[str, Any]:
292
+ """Get the primary key dictionary for the current instance
293
+
294
+ Returns:
295
+ Dictionary containing user_id and mem_cube_id
296
+ """
297
+ return {"user_id": self.user_id, "mem_cube_id": self.mem_cube_id}
298
+
299
+ def _increment_version_control(self, current_tag: str) -> str:
300
+ """Increment the version control tag, cycling from 255 back to 0
301
+
302
+ Args:
303
+ current_tag: Current version control tag as string
304
+
305
+ Returns:
306
+ Next version control tag as string
307
+ """
308
+ try:
309
+ current_value = int(current_tag)
310
+ next_value = (current_value + 1) % 256 # Cycle from 255 back to 0
311
+ return str(next_value)
312
+ except (ValueError, TypeError):
313
+ # If current_tag is invalid, start from 0
314
+ logger.warning(f"Invalid version_control '{current_tag}', resetting to '0'")
315
+ return "0"
316
+
317
+ @abstractmethod
318
+ def merge_items(self, orm_instance, obj_instance, size_limit):
319
+ """Merge items from database with current object instance
320
+
321
+ Args:
322
+ orm_instance: ORM instance from database
323
+ obj_instance: Current business object instance
324
+ size_limit: Maximum number of items to keep after merge
325
+ """
326
+
327
+ def sync_with_orm(self, size_limit: int | None = None) -> None:
328
+ """
329
+ Synchronize data between the database and the business object.
330
+
331
+ This method performs a three-step synchronization process:
332
+ 1. Acquire lock and get existing data from database
333
+ 2. Merge database items with current object items
334
+ 3. Write merged data back to database and release lock
335
+
336
+ Args:
337
+ size_limit: Optional maximum number of items to keep after synchronization.
338
+ If specified, only the most recent items will be retained.
339
+ """
340
+ logger.info(
341
+ f"Starting sync_with_orm for {self.user_id}/{self.mem_cube_id} with size_limit={size_limit}"
342
+ )
343
+ user_id = self.user_id
344
+ mem_cube_id = self.mem_cube_id
345
+
346
+ session = self._get_session()
347
+
348
+ try:
349
+ # Acquire lock before any database operations
350
+ lock_status = self.acquire_lock(block=True)
351
+ if not lock_status:
352
+ logger.error("Failed to acquire lock for synchronization")
353
+ return
354
+
355
+ # 1. Get existing data from database
356
+ orm_instance = (
357
+ session.query(self.orm_class)
358
+ .filter_by(user_id=user_id, mem_cube_id=mem_cube_id)
359
+ .first()
360
+ )
361
+
362
+ # If no existing record, create a new one
363
+ if orm_instance is None:
364
+ if self.obj is None:
365
+ logger.warning("No object to synchronize and no existing database record")
366
+ return
367
+
368
+ orm_instance = self.orm_class(
369
+ user_id=user_id,
370
+ mem_cube_id=mem_cube_id,
371
+ serialized_data=self.obj.to_json(),
372
+ version_control="0", # Start with tag 0 for new records
373
+ )
374
+ logger.info(
375
+ "No existing ORM instance found. Created a new one. "
376
+ "Note: size_limit was not applied because there is no existing data to merge."
377
+ )
378
+ session.add(orm_instance)
379
+ session.commit()
380
+ # Update last_version_control for new record
381
+ self.last_version_control = "0"
382
+ return
383
+
384
+ # 2. Check version control and merge data from database with current object
385
+ if self.obj is not None:
386
+ current_db_tag = orm_instance.version_control
387
+ new_tag = self._increment_version_control(current_db_tag)
388
+ # Check if this is the first sync (last_version_control is None)
389
+ if self.last_version_control is None:
390
+ # First sync, increment version and perform merge
391
+ logger.info(
392
+ f"First sync, incrementing version from {current_db_tag} to {new_tag} for {self.user_id}/{self.mem_cube_id}"
393
+ )
394
+ elif current_db_tag == self.last_version_control:
395
+ logger.info(
396
+ f"Version control unchanged ({current_db_tag}), directly update {self.user_id}/{self.mem_cube_id}"
397
+ )
398
+ else:
399
+ # Version control has changed, increment it and perform merge
400
+ logger.info(
401
+ f"Version control changed from {self.last_version_control} to {current_db_tag}, incrementing to {new_tag} for {self.user_id}/{self.mem_cube_id}"
402
+ )
403
+ try:
404
+ self.merge_items(
405
+ orm_instance=orm_instance, obj_instance=self.obj, size_limit=size_limit
406
+ )
407
+ except Exception as merge_error:
408
+ logger.error(f"Error during merge_items: {merge_error}", exc_info=True)
409
+ logger.warning("Continuing with current object data without merge")
410
+
411
+ # 3. Write merged data back to database
412
+ orm_instance.serialized_data = self.obj.to_json()
413
+ orm_instance.version_control = new_tag
414
+ logger.info(f"Updated serialized_data for {self.user_id}/{self.mem_cube_id}")
415
+
416
+ # Update last_version_control to current value
417
+ self.last_version_control = orm_instance.version_control
418
+ else:
419
+ logger.warning("No current object to merge with database data")
420
+
421
+ session.commit()
422
+ logger.info(f"Synchronization completed for {self.user_id}/{self.mem_cube_id}")
423
+
424
+ except Exception as e:
425
+ session.rollback()
426
+ logger.error(
427
+ f"Error during synchronization for {user_id}/{mem_cube_id}: {e}", exc_info=True
428
+ )
429
+ finally:
430
+ # Always release locks and close session
431
+ self.release_locks(user_id=user_id, mem_cube_id=mem_cube_id)
432
+ session.close()
433
+
434
+ def save_to_db(self, obj_instance) -> None:
435
+ """Save the current state of the business object to the database
436
+
437
+ Args:
438
+ obj_instance: The business object instance to save
439
+ """
440
+ user_id = self.user_id
441
+ mem_cube_id = self.mem_cube_id
442
+
443
+ session = self._get_session()
444
+
445
+ try:
446
+ # Acquire lock before database operations
447
+ lock_status = self.acquire_lock(block=True)
448
+ if not lock_status:
449
+ logger.error("Failed to acquire lock for saving to database")
450
+ return
451
+
452
+ # Check if record already exists
453
+ orm_instance = (
454
+ session.query(self.orm_class)
455
+ .filter_by(user_id=user_id, mem_cube_id=mem_cube_id)
456
+ .first()
457
+ )
458
+
459
+ if orm_instance is None:
460
+ # Create new record
461
+ orm_instance = self.orm_class(
462
+ user_id=user_id,
463
+ mem_cube_id=mem_cube_id,
464
+ serialized_data=obj_instance.to_json(),
465
+ version_control="0", # Start with version 0 for new records
466
+ )
467
+ session.add(orm_instance)
468
+ logger.info(f"Created new database record for {user_id}/{mem_cube_id}")
469
+ # Update last_version_control for new record
470
+ self.last_version_control = "0"
471
+ else:
472
+ # Update existing record with version control
473
+ current_version = orm_instance.version_control
474
+ new_version = self._increment_version_control(current_version)
475
+ orm_instance.serialized_data = obj_instance.to_json()
476
+ orm_instance.version_control = new_version
477
+ logger.info(
478
+ f"Updated existing database record for {user_id}/{mem_cube_id} with version {new_version}"
479
+ )
480
+ # Update last_version_control
481
+ self.last_version_control = new_version
482
+
483
+ session.commit()
484
+
485
+ except Exception as e:
486
+ session.rollback()
487
+ logger.error(f"Error saving to database for {user_id}/{mem_cube_id}: {e}")
488
+ finally:
489
+ # Always release locks and close session
490
+ self.release_locks(user_id=user_id, mem_cube_id=mem_cube_id)
491
+ session.close()
492
+
493
+ def load_from_db(self, acquire_lock: bool = False):
494
+ """Load the business object from the database
495
+
496
+ Args:
497
+ acquire_lock: Whether to acquire a lock during the load operation
498
+
499
+ Returns:
500
+ The deserialized business object instance, or None if not found
501
+ """
502
+ user_id = self.user_id
503
+ mem_cube_id = self.mem_cube_id
504
+
505
+ session = self._get_session()
506
+
507
+ try:
508
+ if acquire_lock:
509
+ lock_status = self.acquire_lock(block=True)
510
+ if not lock_status:
511
+ logger.error("Failed to acquire lock for loading from database")
512
+ return None
513
+
514
+ # Query for the database record
515
+ orm_instance = (
516
+ session.query(self.orm_class)
517
+ .filter_by(user_id=user_id, mem_cube_id=mem_cube_id)
518
+ .first()
519
+ )
520
+
521
+ if orm_instance is None:
522
+ logger.info(f"No database record found for {user_id}/{mem_cube_id}")
523
+ return None
524
+
525
+ # Deserialize the business object from JSON
526
+ db_instance = self.obj_class.from_json(orm_instance.serialized_data)
527
+ # Update last_version_control to track the loaded version
528
+ self.last_version_control = orm_instance.version_control
529
+ logger.info(
530
+ f"Successfully loaded object from database for {user_id}/{mem_cube_id} with version {orm_instance.version_control}"
531
+ )
532
+
533
+ return db_instance
534
+
535
+ except Exception as e:
536
+ logger.error(f"Error loading from database for {user_id}/{mem_cube_id}: {e}")
537
+ return None
538
+ finally:
539
+ if acquire_lock:
540
+ self.release_locks(user_id=user_id, mem_cube_id=mem_cube_id)
541
+ session.close()
542
+
543
+ def close(self):
544
+ """Close the database manager and clean up resources
545
+
546
+ This method releases any held locks and disposes of the database engine.
547
+ Should be called when the manager is no longer needed.
548
+ """
549
+ try:
550
+ # Release any locks held by this manager instance
551
+ if self.user_id and self.mem_cube_id:
552
+ self.release_locks(user_id=self.user_id, mem_cube_id=self.mem_cube_id)
553
+ logger.info(f"Released locks for {self.user_id}/{self.mem_cube_id}")
554
+
555
+ # Dispose of the engine to close all connections
556
+ if self.engine:
557
+ self.engine.dispose()
558
+ logger.info("Database engine disposed")
559
+
560
+ except Exception as e:
561
+ logger.error(f"Error during close operation: {e}")
562
+
563
+ @staticmethod
564
+ def create_default_engine() -> Engine:
565
+ """Create SQLAlchemy engine with default database path
566
+
567
+ Returns:
568
+ SQLAlchemy Engine instance using default scheduler_orm.db
569
+ """
570
+ temp_dir = tempfile.mkdtemp()
571
+ db_path = os.path.join(temp_dir, "test_scheduler_orm.db")
572
+
573
+ # Clean up any existing file (though unlikely)
574
+ if os.path.exists(db_path):
575
+ os.remove(db_path)
576
+ # Remove the temp directory if still exists (should be empty)
577
+ if os.path.exists(temp_dir) and not os.listdir(temp_dir):
578
+ os.rmdir(temp_dir)
579
+
580
+ # Ensure parent directory exists (re-create in case rmdir removed it)
581
+ parent_dir = Path(db_path).parent
582
+ parent_dir.mkdir(parents=True, exist_ok=True)
583
+
584
+ # Log the creation of the default engine with database path
585
+ logger.info(
586
+ "Creating default SQLAlchemy engine with temporary SQLite database at: %s", db_path
587
+ )
588
+
589
+ return create_engine(f"sqlite:///{db_path}", echo=False)
590
+
591
+ @staticmethod
592
+ def create_engine_from_db_path(db_path: str) -> Engine:
593
+ """Create SQLAlchemy engine from database path
594
+
595
+ Args:
596
+ db_path: Path to database file
597
+
598
+ Returns:
599
+ SQLAlchemy Engine instance
600
+ """
601
+ # Ensure the directory exists
602
+ Path(db_path).parent.mkdir(parents=True, exist_ok=True)
603
+
604
+ return create_engine(f"sqlite:///{db_path}", echo=False)
605
+
606
+ @staticmethod
607
+ def create_mysql_db_path(
608
+ host: str = "localhost",
609
+ port: int = 3306,
610
+ username: str = "root",
611
+ password: str = "",
612
+ database: str = "scheduler_orm",
613
+ charset: str = "utf8mb4",
614
+ ) -> str:
615
+ """Create MySQL database connection URL
616
+
617
+ Args:
618
+ host: MySQL server hostname
619
+ port: MySQL server port
620
+ username: Database username
621
+ password: Database password (optional)
622
+ database: Database name
623
+ charset: Character set encoding
624
+
625
+ Returns:
626
+ MySQL connection URL string
627
+ """
628
+ # Build MySQL connection URL with proper formatting
629
+ if password:
630
+ db_path = (
631
+ f"mysql+pymysql://{username}:{password}@{host}:{port}/{database}?charset={charset}"
632
+ )
633
+ else:
634
+ db_path = f"mysql+pymysql://{username}@{host}:{port}/{database}?charset={charset}"
635
+ return db_path