MemoryOS 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MemoryOS might be problematic. Click here for more details.

Files changed (114) hide show
  1. {memoryos-0.2.0.dist-info → memoryos-0.2.2.dist-info}/METADATA +67 -26
  2. memoryos-0.2.2.dist-info/RECORD +169 -0
  3. memoryos-0.2.2.dist-info/entry_points.txt +3 -0
  4. memos/__init__.py +1 -1
  5. memos/api/config.py +562 -0
  6. memos/api/context/context.py +147 -0
  7. memos/api/context/dependencies.py +90 -0
  8. memos/api/exceptions.py +28 -0
  9. memos/api/mcp_serve.py +502 -0
  10. memos/api/product_api.py +35 -0
  11. memos/api/product_models.py +163 -0
  12. memos/api/routers/__init__.py +1 -0
  13. memos/api/routers/product_router.py +386 -0
  14. memos/chunkers/sentence_chunker.py +8 -2
  15. memos/cli.py +113 -0
  16. memos/configs/embedder.py +27 -0
  17. memos/configs/graph_db.py +132 -3
  18. memos/configs/internet_retriever.py +6 -0
  19. memos/configs/llm.py +47 -0
  20. memos/configs/mem_cube.py +1 -1
  21. memos/configs/mem_os.py +5 -0
  22. memos/configs/mem_reader.py +9 -0
  23. memos/configs/mem_scheduler.py +107 -7
  24. memos/configs/mem_user.py +58 -0
  25. memos/configs/memory.py +5 -4
  26. memos/dependency.py +52 -0
  27. memos/embedders/ark.py +92 -0
  28. memos/embedders/factory.py +4 -0
  29. memos/embedders/sentence_transformer.py +8 -2
  30. memos/embedders/universal_api.py +32 -0
  31. memos/graph_dbs/base.py +11 -3
  32. memos/graph_dbs/factory.py +4 -0
  33. memos/graph_dbs/nebular.py +1364 -0
  34. memos/graph_dbs/neo4j.py +333 -124
  35. memos/graph_dbs/neo4j_community.py +300 -0
  36. memos/llms/base.py +9 -0
  37. memos/llms/deepseek.py +54 -0
  38. memos/llms/factory.py +10 -1
  39. memos/llms/hf.py +170 -13
  40. memos/llms/hf_singleton.py +114 -0
  41. memos/llms/ollama.py +4 -0
  42. memos/llms/openai.py +67 -1
  43. memos/llms/qwen.py +63 -0
  44. memos/llms/vllm.py +153 -0
  45. memos/log.py +1 -1
  46. memos/mem_cube/general.py +77 -16
  47. memos/mem_cube/utils.py +109 -0
  48. memos/mem_os/core.py +251 -51
  49. memos/mem_os/main.py +94 -12
  50. memos/mem_os/product.py +1220 -43
  51. memos/mem_os/utils/default_config.py +352 -0
  52. memos/mem_os/utils/format_utils.py +1401 -0
  53. memos/mem_reader/simple_struct.py +18 -10
  54. memos/mem_scheduler/base_scheduler.py +441 -40
  55. memos/mem_scheduler/general_scheduler.py +249 -248
  56. memos/mem_scheduler/modules/base.py +14 -5
  57. memos/mem_scheduler/modules/dispatcher.py +67 -4
  58. memos/mem_scheduler/modules/misc.py +104 -0
  59. memos/mem_scheduler/modules/monitor.py +240 -50
  60. memos/mem_scheduler/modules/rabbitmq_service.py +319 -0
  61. memos/mem_scheduler/modules/redis_service.py +32 -22
  62. memos/mem_scheduler/modules/retriever.py +167 -23
  63. memos/mem_scheduler/modules/scheduler_logger.py +255 -0
  64. memos/mem_scheduler/mos_for_test_scheduler.py +140 -0
  65. memos/mem_scheduler/schemas/__init__.py +0 -0
  66. memos/mem_scheduler/schemas/general_schemas.py +43 -0
  67. memos/mem_scheduler/{modules/schemas.py → schemas/message_schemas.py} +63 -61
  68. memos/mem_scheduler/schemas/monitor_schemas.py +329 -0
  69. memos/mem_scheduler/utils/__init__.py +0 -0
  70. memos/mem_scheduler/utils/filter_utils.py +176 -0
  71. memos/mem_scheduler/utils/misc_utils.py +61 -0
  72. memos/mem_user/factory.py +94 -0
  73. memos/mem_user/mysql_persistent_user_manager.py +271 -0
  74. memos/mem_user/mysql_user_manager.py +500 -0
  75. memos/mem_user/persistent_factory.py +96 -0
  76. memos/mem_user/persistent_user_manager.py +260 -0
  77. memos/mem_user/user_manager.py +4 -4
  78. memos/memories/activation/item.py +29 -0
  79. memos/memories/activation/kv.py +10 -3
  80. memos/memories/activation/vllmkv.py +219 -0
  81. memos/memories/factory.py +2 -0
  82. memos/memories/textual/base.py +1 -1
  83. memos/memories/textual/general.py +43 -97
  84. memos/memories/textual/item.py +5 -33
  85. memos/memories/textual/tree.py +22 -12
  86. memos/memories/textual/tree_text_memory/organize/conflict.py +9 -5
  87. memos/memories/textual/tree_text_memory/organize/manager.py +26 -18
  88. memos/memories/textual/tree_text_memory/organize/redundancy.py +25 -44
  89. memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +50 -48
  90. memos/memories/textual/tree_text_memory/organize/reorganizer.py +81 -56
  91. memos/memories/textual/tree_text_memory/retrieve/internet_retriever.py +6 -3
  92. memos/memories/textual/tree_text_memory/retrieve/internet_retriever_factory.py +2 -0
  93. memos/memories/textual/tree_text_memory/retrieve/recall.py +0 -1
  94. memos/memories/textual/tree_text_memory/retrieve/reranker.py +2 -2
  95. memos/memories/textual/tree_text_memory/retrieve/retrieval_mid_structs.py +2 -0
  96. memos/memories/textual/tree_text_memory/retrieve/searcher.py +52 -28
  97. memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py +42 -15
  98. memos/memories/textual/tree_text_memory/retrieve/utils.py +11 -7
  99. memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py +62 -58
  100. memos/memos_tools/dinding_report_bot.py +422 -0
  101. memos/memos_tools/notification_service.py +44 -0
  102. memos/memos_tools/notification_utils.py +96 -0
  103. memos/parsers/markitdown.py +8 -2
  104. memos/settings.py +3 -1
  105. memos/templates/mem_reader_prompts.py +66 -23
  106. memos/templates/mem_scheduler_prompts.py +126 -43
  107. memos/templates/mos_prompts.py +87 -0
  108. memos/templates/tree_reorganize_prompts.py +85 -30
  109. memos/vec_dbs/base.py +12 -0
  110. memos/vec_dbs/qdrant.py +46 -20
  111. memoryos-0.2.0.dist-info/RECORD +0 -128
  112. memos/mem_scheduler/utils.py +0 -26
  113. {memoryos-0.2.0.dist-info → memoryos-0.2.2.dist-info}/LICENSE +0 -0
  114. {memoryos-0.2.0.dist-info → memoryos-0.2.2.dist-info}/WHEEL +0 -0
@@ -4,7 +4,7 @@ from concurrent.futures import ThreadPoolExecutor
4
4
 
5
5
  from memos.log import get_logger
6
6
  from memos.mem_scheduler.modules.base import BaseSchedulerModule
7
- from memos.mem_scheduler.modules.schemas import ScheduleMessageItem
7
+ from memos.mem_scheduler.schemas.message_schemas import ScheduleMessageItem
8
8
 
9
9
 
10
10
  logger = get_logger(__name__)
@@ -22,7 +22,7 @@ class SchedulerDispatcher(BaseSchedulerModule):
22
22
  - Bulk handler registration
23
23
  """
24
24
 
25
- def __init__(self, max_workers=3, enable_parallel_dispatch=False):
25
+ def __init__(self, max_workers=30, enable_parallel_dispatch=False):
26
26
  super().__init__()
27
27
  # Main dispatcher thread pool
28
28
  self.max_workers = max_workers
@@ -73,6 +73,38 @@ class SchedulerDispatcher(BaseSchedulerModule):
73
73
  def _default_message_handler(self, messages: list[ScheduleMessageItem]) -> None:
74
74
  logger.debug(f"Using _default_message_handler to deal with messages: {messages}")
75
75
 
76
+ def group_messages_by_user_and_cube(
77
+ self, messages: list[ScheduleMessageItem]
78
+ ) -> dict[str, dict[str, list[ScheduleMessageItem]]]:
79
+ """
80
+ Groups messages into a nested dictionary structure first by user_id, then by mem_cube_id.
81
+
82
+ Args:
83
+ messages: List of ScheduleMessageItem objects to be grouped
84
+
85
+ Returns:
86
+ A nested dictionary with the structure:
87
+ {
88
+ "user_id_1": {
89
+ "mem_cube_id_1": [msg1, msg2, ...],
90
+ "mem_cube_id_2": [msg3, msg4, ...],
91
+ ...
92
+ },
93
+ "user_id_2": {
94
+ ...
95
+ },
96
+ ...
97
+ }
98
+ Where each msg is the original ScheduleMessageItem object
99
+ """
100
+ grouped_dict = defaultdict(lambda: defaultdict(list))
101
+
102
+ for msg in messages:
103
+ grouped_dict[msg.user_id][msg.mem_cube_id].append(msg)
104
+
105
+ # Convert defaultdict to regular dict for cleaner output
106
+ return {user_id: dict(cube_groups) for user_id, cube_groups in grouped_dict.items()}
107
+
76
108
  def dispatch(self, msg_list: list[ScheduleMessageItem]):
77
109
  """
78
110
  Dispatch a list of messages to their respective handlers.
@@ -96,8 +128,39 @@ class SchedulerDispatcher(BaseSchedulerModule):
96
128
  else:
97
129
  handler = self.handlers[label]
98
130
  # dispatch to different handler
99
- logger.debug(f"Dispatch {len(msgs)} messages to {label} handler.")
131
+ logger.debug(f"Dispatch {len(msgs)} message(s) to {label} handler.")
100
132
  if self.enable_parallel_dispatch and self.dispatcher_executor is not None:
133
+ # Capture variables in lambda to avoid loop variable issues
101
134
  self.dispatcher_executor.submit(handler, msgs)
135
+ logger.info(f"Dispatched {len(msgs)} message(s) as future task")
102
136
  else:
103
- handler(msgs) # Direct serial execution
137
+ handler(msgs)
138
+
139
+ def join(self, timeout: float | None = None) -> bool:
140
+ """Wait for all dispatched tasks to complete.
141
+
142
+ Args:
143
+ timeout: Maximum time to wait in seconds. None means wait forever.
144
+
145
+ Returns:
146
+ bool: True if all tasks completed, False if timeout occurred.
147
+ """
148
+ if not self.enable_parallel_dispatch or self.dispatcher_executor is None:
149
+ return True # 串行模式无需等待
150
+
151
+ self.dispatcher_executor.shutdown(wait=True, timeout=timeout)
152
+ return True
153
+
154
+ def shutdown(self) -> None:
155
+ """Gracefully shutdown the dispatcher."""
156
+ if self.dispatcher_executor is not None:
157
+ self.dispatcher_executor.shutdown(wait=True)
158
+ self._running = False
159
+ logger.info("Dispatcher has been shutdown.")
160
+
161
+ def __enter__(self):
162
+ self._running = True
163
+ return self
164
+
165
+ def __exit__(self, exc_type, exc_val, exc_tb):
166
+ self.shutdown()
@@ -0,0 +1,104 @@
1
+ import json
2
+
3
+ from contextlib import suppress
4
+ from datetime import datetime
5
+ from queue import Empty, Full, Queue
6
+ from typing import TYPE_CHECKING, TypeVar
7
+
8
+ from pydantic import field_serializer
9
+
10
+
11
+ if TYPE_CHECKING:
12
+ from pydantic import BaseModel
13
+
14
+ T = TypeVar("T")
15
+
16
+ BaseModelType = TypeVar("T", bound="BaseModel")
17
+
18
+
19
+ class DictConversionMixin:
20
+ """
21
+ Provides conversion functionality between Pydantic models and dictionaries,
22
+ including datetime serialization handling.
23
+ """
24
+
25
+ @field_serializer("timestamp", check_fields=False)
26
+ def serialize_datetime(self, dt: datetime | None, _info) -> str | None:
27
+ """
28
+ Custom datetime serialization logic.
29
+ - Supports timezone-aware datetime objects
30
+ - Compatible with models without timestamp field (via check_fields=False)
31
+ """
32
+ if dt is None:
33
+ return None
34
+ return dt.isoformat()
35
+
36
+ def to_dict(self) -> dict:
37
+ """
38
+ Convert model instance to dictionary.
39
+ - Uses model_dump to ensure field consistency
40
+ - Prioritizes custom serializer for timestamp handling
41
+ """
42
+ dump_data = self.model_dump()
43
+ if hasattr(self, "timestamp") and self.timestamp is not None:
44
+ dump_data["timestamp"] = self.serialize_datetime(self.timestamp, None)
45
+ return dump_data
46
+
47
+ @classmethod
48
+ def from_dict(cls: type[BaseModelType], data: dict) -> BaseModelType:
49
+ """
50
+ Create model instance from dictionary.
51
+ - Automatically converts timestamp strings to datetime objects
52
+ """
53
+ data_copy = data.copy() # Avoid modifying original dictionary
54
+ if "timestamp" in data_copy and isinstance(data_copy["timestamp"], str):
55
+ try:
56
+ data_copy["timestamp"] = datetime.fromisoformat(data_copy["timestamp"])
57
+ except ValueError:
58
+ # Handle invalid time formats - adjust as needed (e.g., log warning or set to None)
59
+ data_copy["timestamp"] = None
60
+
61
+ return cls(**data_copy)
62
+
63
+ def __str__(self) -> str:
64
+ """
65
+ Convert to formatted JSON string.
66
+ - Used for user-friendly display in print() or str() calls
67
+ """
68
+ return json.dumps(
69
+ self.to_dict(),
70
+ indent=4,
71
+ ensure_ascii=False,
72
+ default=lambda o: str(o), # Handle other non-serializable objects
73
+ )
74
+
75
+
76
+ class AutoDroppingQueue(Queue[T]):
77
+ """A thread-safe queue that automatically drops the oldest item when full."""
78
+
79
+ def __init__(self, maxsize: int = 0):
80
+ super().__init__(maxsize=maxsize)
81
+
82
+ def put(self, item: T, block: bool = False, timeout: float | None = None) -> None:
83
+ """Put an item into the queue.
84
+
85
+ If the queue is full, the oldest item will be automatically removed to make space.
86
+ This operation is thread-safe.
87
+
88
+ Args:
89
+ item: The item to be put into the queue
90
+ block: Ignored (kept for compatibility with Queue interface)
91
+ timeout: Ignored (kept for compatibility with Queue interface)
92
+ """
93
+ try:
94
+ # First try non-blocking put
95
+ super().put(item, block=block, timeout=timeout)
96
+ except Full:
97
+ with suppress(Empty):
98
+ self.get_nowait() # Remove oldest item
99
+ # Retry putting the new item
100
+ super().put(item, block=block, timeout=timeout)
101
+
102
+ def get_queue_content_without_pop(self) -> list[T]:
103
+ """Return a copy of the queue's contents without modifying it."""
104
+ return list(self.queue)
@@ -1,11 +1,27 @@
1
- import json
2
-
1
+ from datetime import datetime
3
2
  from typing import Any
4
3
 
4
+ from memos.configs.mem_scheduler import BaseSchedulerConfig
5
+ from memos.llms.base import BaseLLM
5
6
  from memos.log import get_logger
6
7
  from memos.mem_cube.general import GeneralMemCube
7
8
  from memos.mem_scheduler.modules.base import BaseSchedulerModule
8
- from memos.mem_scheduler.utils import extract_json_dict
9
+ from memos.mem_scheduler.schemas.general_schemas import (
10
+ DEFAULT_ACTIVATION_MEM_MONITOR_SIZE_LIMIT,
11
+ DEFAULT_WEIGHT_VECTOR_FOR_RANKING,
12
+ DEFAULT_WORKING_MEM_MONITOR_SIZE_LIMIT,
13
+ MONITOR_ACTIVATION_MEMORY_TYPE,
14
+ MONITOR_WORKING_MEMORY_TYPE,
15
+ MemCubeID,
16
+ UserID,
17
+ )
18
+ from memos.mem_scheduler.schemas.monitor_schemas import (
19
+ MemoryMonitorItem,
20
+ MemoryMonitorManager,
21
+ QueryMonitorItem,
22
+ QueryMonitorQueue,
23
+ )
24
+ from memos.mem_scheduler.utils.misc_utils import extract_json_dict
9
25
  from memos.memories.textual.tree import TreeTextMemory
10
26
 
11
27
 
@@ -13,33 +29,224 @@ logger = get_logger(__name__)
13
29
 
14
30
 
15
31
  class SchedulerMonitor(BaseSchedulerModule):
16
- def __init__(self, chat_llm, activation_mem_size=5):
32
+ """Monitors and manages scheduling operations with LLM integration."""
33
+
34
+ def __init__(self, process_llm: BaseLLM, config: BaseSchedulerConfig):
17
35
  super().__init__()
18
- self.statistics = {}
19
- self.intent_history: list[str] = []
20
- self.activation_mem_size = activation_mem_size
21
- self.activation_memory_freq_list = [
22
- {"memory": None, "count": 0} for _ in range(self.activation_mem_size)
23
- ]
24
-
25
- self._chat_llm = chat_llm
26
-
27
- def update_stats(self, mem_cube):
28
- self.statistics["activation_mem_size"] = self.activation_mem_size
29
- mem_cube_info = self.get_mem_cube_info(mem_cube)
30
- self.statistics.update(mem_cube_info)
31
-
32
- def get_mem_cube_info(self, mem_cube: GeneralMemCube):
33
- mem_cube_info = {}
34
-
35
- text_mem = mem_cube.text_mem
36
- if isinstance(text_mem, TreeTextMemory):
37
- memory_size_dict = text_mem.memory_manager.memory_size
38
- mem_cube_info["text_mem"] = memory_size_dict
36
+
37
+ # hyper-parameters
38
+ self.config: BaseSchedulerConfig = config
39
+ self.act_mem_update_interval = self.config.get("act_mem_update_interval", 30)
40
+ self.query_trigger_interval = self.config.get("query_trigger_interval", 10)
41
+
42
+ # Partial Retention Strategy
43
+ self.partial_retention_number = 2
44
+ self.working_mem_monitor_capacity = DEFAULT_WORKING_MEM_MONITOR_SIZE_LIMIT
45
+ self.activation_mem_monitor_capacity = DEFAULT_ACTIVATION_MEM_MONITOR_SIZE_LIMIT
46
+
47
+ # attributes
48
+ # recording query_messages
49
+ self.query_monitors: QueryMonitorQueue[QueryMonitorItem] = QueryMonitorQueue(
50
+ maxsize=self.config.context_window_size
51
+ )
52
+
53
+ self.working_memory_monitors: dict[UserID, dict[MemCubeID, MemoryMonitorManager]] = {}
54
+ self.activation_memory_monitors: dict[UserID, dict[MemCubeID, MemoryMonitorManager]] = {}
55
+
56
+ # Lifecycle monitor
57
+ self.last_activation_mem_update_time = datetime.min
58
+ self.last_query_consume_time = datetime.min
59
+
60
+ self._process_llm = process_llm
61
+
62
+ def extract_query_keywords(self, query: str) -> list:
63
+ """Extracts core keywords from a user query based on specific semantic rules."""
64
+ prompt_name = "query_keywords_extraction"
65
+ prompt = self.build_prompt(
66
+ template_name=prompt_name,
67
+ query=query,
68
+ )
69
+ llm_response = self._process_llm.generate([{"role": "user", "content": prompt}])
70
+ try:
71
+ # Parse JSON output from LLM response
72
+ keywords = extract_json_dict(llm_response)
73
+ assert isinstance(keywords, list)
74
+ except Exception as e:
75
+ logger.error(
76
+ f"Failed to parse keywords from LLM response: {llm_response}. Error: {e!s}"
77
+ )
78
+ keywords = [query]
79
+ return keywords
80
+
81
+ def register_memory_manager_if_not_exists(
82
+ self,
83
+ user_id: str,
84
+ mem_cube_id: str,
85
+ memory_monitors: dict[UserID, dict[MemCubeID, MemoryMonitorManager]],
86
+ max_capacity: int,
87
+ ) -> None:
88
+ """
89
+ Register a new MemoryMonitorManager for the given user and memory cube if it doesn't exist.
90
+
91
+ Checks if a MemoryMonitorManager already exists for the specified user_id and mem_cube_id.
92
+ If not, creates a new MemoryMonitorManager with appropriate capacity settings and registers it.
93
+
94
+ Args:
95
+ user_id: The ID of the user to associate with the memory manager
96
+ mem_cube_id: The ID of the memory cube to monitor
97
+
98
+ Note:
99
+ This function will update the loose_max_working_memory_capacity based on the current
100
+ WorkingMemory size plus partial retention number before creating a new manager.
101
+ """
102
+ # Check if a MemoryMonitorManager already exists for the current user_id and mem_cube_id
103
+ # If doesn't exist, create and register a new one
104
+ if (user_id not in memory_monitors) or (mem_cube_id not in memory_monitors[user_id]):
105
+ # Initialize MemoryMonitorManager with user ID, memory cube ID, and max capacity
106
+ monitor_manager = MemoryMonitorManager(
107
+ user_id=user_id, mem_cube_id=mem_cube_id, max_capacity=max_capacity
108
+ )
109
+
110
+ # Safely register the new manager in the nested dictionary structure
111
+ memory_monitors.setdefault(user_id, {})[mem_cube_id] = monitor_manager
112
+ logger.info(
113
+ f"Registered new MemoryMonitorManager for user_id={user_id},"
114
+ f" mem_cube_id={mem_cube_id} with max_capacity={max_capacity}"
115
+ )
39
116
  else:
40
- logger.error("Not Implemented")
117
+ logger.info(
118
+ f"MemoryMonitorManager already exists for user_id={user_id}, "
119
+ f"mem_cube_id={mem_cube_id} in the provided memory_monitors dictionary"
120
+ )
121
+
122
+ def update_working_memory_monitors(
123
+ self,
124
+ new_working_memory_monitors: list[MemoryMonitorItem],
125
+ user_id: str,
126
+ mem_cube_id: str,
127
+ mem_cube: GeneralMemCube,
128
+ ):
129
+ text_mem_base: TreeTextMemory = mem_cube.text_mem
130
+ assert isinstance(text_mem_base, TreeTextMemory)
131
+ self.working_mem_monitor_capacity = min(
132
+ DEFAULT_WORKING_MEM_MONITOR_SIZE_LIMIT,
133
+ (
134
+ text_mem_base.memory_manager.memory_size["WorkingMemory"]
135
+ + self.partial_retention_number
136
+ ),
137
+ )
41
138
 
42
- return mem_cube_info
139
+ # register monitors
140
+ self.register_memory_manager_if_not_exists(
141
+ user_id=user_id,
142
+ mem_cube_id=mem_cube_id,
143
+ memory_monitors=self.working_memory_monitors,
144
+ max_capacity=self.working_mem_monitor_capacity,
145
+ )
146
+
147
+ self.working_memory_monitors[user_id][mem_cube_id].update_memories(
148
+ new_memory_monitors=new_working_memory_monitors,
149
+ partial_retention_number=self.partial_retention_number,
150
+ )
151
+
152
+ def update_activation_memory_monitors(
153
+ self, user_id: str, mem_cube_id: str, mem_cube: GeneralMemCube
154
+ ):
155
+ self.register_memory_manager_if_not_exists(
156
+ user_id=user_id,
157
+ mem_cube_id=mem_cube_id,
158
+ memory_monitors=self.activation_memory_monitors,
159
+ max_capacity=self.activation_mem_monitor_capacity,
160
+ )
161
+
162
+ # === update activation memory monitors ===
163
+ # Sort by importance_score in descending order and take top k
164
+ top_k_memories = sorted(
165
+ self.working_memory_monitors[user_id][mem_cube_id].memories,
166
+ key=lambda m: m.get_importance_score(weight_vector=DEFAULT_WEIGHT_VECTOR_FOR_RANKING),
167
+ reverse=True,
168
+ )[: self.activation_mem_monitor_capacity]
169
+
170
+ # Update the activation memory monitors with these important memories
171
+ self.activation_memory_monitors[user_id][mem_cube_id].update_memories(
172
+ new_memory_monitors=top_k_memories,
173
+ partial_retention_number=self.partial_retention_number,
174
+ )
175
+
176
+ def timed_trigger(self, last_time: datetime, interval_seconds: float) -> bool:
177
+ now = datetime.now()
178
+ elapsed = (now - last_time).total_seconds()
179
+ if elapsed >= interval_seconds:
180
+ return True
181
+ logger.debug(f"Time trigger not ready, {elapsed:.1f}s elapsed (needs {interval_seconds}s)")
182
+ return False
183
+
184
+ def get_monitor_memories(
185
+ self,
186
+ user_id: str,
187
+ mem_cube_id: str,
188
+ memory_type: str = MONITOR_WORKING_MEMORY_TYPE,
189
+ top_k: int = 10,
190
+ ) -> list[str]:
191
+ """Retrieves memory items managed by the scheduler, sorted by recording count.
192
+
193
+ Args:
194
+ user_id: Unique identifier of the user
195
+ mem_cube_id: Unique identifier of the memory cube
196
+ memory_type: Type of memory to retrieve (MONITOR_WORKING_MEMORY_TYPE or
197
+ MONITOR_ACTIVATION_MEMORY_TYPE)
198
+ top_k: Maximum number of memory items to return (default: 10)
199
+
200
+ Returns:
201
+ List of memory texts, sorted by recording count in descending order.
202
+ Returns empty list if no MemoryMonitorManager exists for the given parameters.
203
+ """
204
+ # Select the appropriate monitor dictionary based on memory_type
205
+ if memory_type == MONITOR_WORKING_MEMORY_TYPE:
206
+ monitor_dict = self.working_memory_monitors
207
+ elif memory_type == MONITOR_ACTIVATION_MEMORY_TYPE:
208
+ monitor_dict = self.activation_memory_monitors
209
+ else:
210
+ logger.warning(f"Invalid memory type: {memory_type}")
211
+ return []
212
+
213
+ if user_id not in monitor_dict or mem_cube_id not in monitor_dict[user_id]:
214
+ logger.warning(
215
+ f"MemoryMonitorManager not found for user {user_id}, "
216
+ f"mem_cube {mem_cube_id}, type {memory_type}"
217
+ )
218
+ return []
219
+
220
+ manager: MemoryMonitorManager = monitor_dict[user_id][mem_cube_id]
221
+ # Sort memories by recording_count in descending order and return top_k items
222
+ sorted_memory_monitors = manager.get_sorted_mem_monitors(reverse=True)
223
+ sorted_text_memories = [m.memory_text for m in sorted_memory_monitors[:top_k]]
224
+ return sorted_text_memories
225
+
226
+ def get_monitors_info(self, user_id: str, mem_cube_id: str) -> dict[str, Any]:
227
+ """Retrieves monitoring information for a specific memory cube."""
228
+ if (
229
+ user_id not in self.working_memory_monitors
230
+ or mem_cube_id not in self.working_memory_monitors[user_id]
231
+ ):
232
+ logger.warning(
233
+ f"MemoryMonitorManager not found for user {user_id}, mem_cube {mem_cube_id}"
234
+ )
235
+ return {}
236
+
237
+ info_dict = {}
238
+ for manager in [
239
+ self.working_memory_monitors[user_id][mem_cube_id],
240
+ self.activation_memory_monitors[user_id][mem_cube_id],
241
+ ]:
242
+ info_dict[str(type(manager))] = {
243
+ "user_id": user_id,
244
+ "mem_cube_id": mem_cube_id,
245
+ "memory_count": manager.memory_size,
246
+ "max_capacity": manager.max_capacity,
247
+ "top_memories": self.get_scheduler_working_memories(user_id, mem_cube_id, top_k=1),
248
+ }
249
+ return info_dict
43
250
 
44
251
  def detect_intent(
45
252
  self,
@@ -55,28 +262,11 @@ class SchedulerMonitor(BaseSchedulerModule):
55
262
  q_list=q_list,
56
263
  working_memory_list=text_working_memory,
57
264
  )
58
- response = self._chat_llm.generate([{"role": "user", "content": prompt}])
59
- response = extract_json_dict(response)
60
- return response
61
-
62
- def update_freq(
63
- self,
64
- answer: str,
65
- activation_memory_freq_list: list[dict],
66
- prompt_name="freq_detecting",
67
- ) -> list[dict]:
68
- """
69
- Use LLM to detect which memories in activation_memory_freq_list appear in the answer,
70
- increment their count by 1, and return the updated list.
71
- """
72
- prompt = self.build_prompt(
73
- template_name=prompt_name,
74
- answer=answer,
75
- activation_memory_freq_list=activation_memory_freq_list,
76
- )
77
- response = self._chat_llm.generate([{"role": "user", "content": prompt}])
265
+ response = self._process_llm.generate([{"role": "user", "content": prompt}])
78
266
  try:
79
- result = json.loads(response)
267
+ response = extract_json_dict(response)
268
+ assert ("trigger_retrieval" in response) and ("missing_evidences" in response)
80
269
  except Exception:
81
- result = activation_memory_freq_list
82
- return result
270
+ logger.error(f"Fail to extract json dict from response: {response}")
271
+ response = {"trigger_retrieval": False, "missing_evidences": q_list}
272
+ return response