MemoryOS 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of MemoryOS might be problematic. Click here for more details.
- {memoryos-0.2.0.dist-info → memoryos-0.2.2.dist-info}/METADATA +67 -26
- memoryos-0.2.2.dist-info/RECORD +169 -0
- memoryos-0.2.2.dist-info/entry_points.txt +3 -0
- memos/__init__.py +1 -1
- memos/api/config.py +562 -0
- memos/api/context/context.py +147 -0
- memos/api/context/dependencies.py +90 -0
- memos/api/exceptions.py +28 -0
- memos/api/mcp_serve.py +502 -0
- memos/api/product_api.py +35 -0
- memos/api/product_models.py +163 -0
- memos/api/routers/__init__.py +1 -0
- memos/api/routers/product_router.py +386 -0
- memos/chunkers/sentence_chunker.py +8 -2
- memos/cli.py +113 -0
- memos/configs/embedder.py +27 -0
- memos/configs/graph_db.py +132 -3
- memos/configs/internet_retriever.py +6 -0
- memos/configs/llm.py +47 -0
- memos/configs/mem_cube.py +1 -1
- memos/configs/mem_os.py +5 -0
- memos/configs/mem_reader.py +9 -0
- memos/configs/mem_scheduler.py +107 -7
- memos/configs/mem_user.py +58 -0
- memos/configs/memory.py +5 -4
- memos/dependency.py +52 -0
- memos/embedders/ark.py +92 -0
- memos/embedders/factory.py +4 -0
- memos/embedders/sentence_transformer.py +8 -2
- memos/embedders/universal_api.py +32 -0
- memos/graph_dbs/base.py +11 -3
- memos/graph_dbs/factory.py +4 -0
- memos/graph_dbs/nebular.py +1364 -0
- memos/graph_dbs/neo4j.py +333 -124
- memos/graph_dbs/neo4j_community.py +300 -0
- memos/llms/base.py +9 -0
- memos/llms/deepseek.py +54 -0
- memos/llms/factory.py +10 -1
- memos/llms/hf.py +170 -13
- memos/llms/hf_singleton.py +114 -0
- memos/llms/ollama.py +4 -0
- memos/llms/openai.py +67 -1
- memos/llms/qwen.py +63 -0
- memos/llms/vllm.py +153 -0
- memos/log.py +1 -1
- memos/mem_cube/general.py +77 -16
- memos/mem_cube/utils.py +109 -0
- memos/mem_os/core.py +251 -51
- memos/mem_os/main.py +94 -12
- memos/mem_os/product.py +1220 -43
- memos/mem_os/utils/default_config.py +352 -0
- memos/mem_os/utils/format_utils.py +1401 -0
- memos/mem_reader/simple_struct.py +18 -10
- memos/mem_scheduler/base_scheduler.py +441 -40
- memos/mem_scheduler/general_scheduler.py +249 -248
- memos/mem_scheduler/modules/base.py +14 -5
- memos/mem_scheduler/modules/dispatcher.py +67 -4
- memos/mem_scheduler/modules/misc.py +104 -0
- memos/mem_scheduler/modules/monitor.py +240 -50
- memos/mem_scheduler/modules/rabbitmq_service.py +319 -0
- memos/mem_scheduler/modules/redis_service.py +32 -22
- memos/mem_scheduler/modules/retriever.py +167 -23
- memos/mem_scheduler/modules/scheduler_logger.py +255 -0
- memos/mem_scheduler/mos_for_test_scheduler.py +140 -0
- memos/mem_scheduler/schemas/__init__.py +0 -0
- memos/mem_scheduler/schemas/general_schemas.py +43 -0
- memos/mem_scheduler/{modules/schemas.py → schemas/message_schemas.py} +63 -61
- memos/mem_scheduler/schemas/monitor_schemas.py +329 -0
- memos/mem_scheduler/utils/__init__.py +0 -0
- memos/mem_scheduler/utils/filter_utils.py +176 -0
- memos/mem_scheduler/utils/misc_utils.py +61 -0
- memos/mem_user/factory.py +94 -0
- memos/mem_user/mysql_persistent_user_manager.py +271 -0
- memos/mem_user/mysql_user_manager.py +500 -0
- memos/mem_user/persistent_factory.py +96 -0
- memos/mem_user/persistent_user_manager.py +260 -0
- memos/mem_user/user_manager.py +4 -4
- memos/memories/activation/item.py +29 -0
- memos/memories/activation/kv.py +10 -3
- memos/memories/activation/vllmkv.py +219 -0
- memos/memories/factory.py +2 -0
- memos/memories/textual/base.py +1 -1
- memos/memories/textual/general.py +43 -97
- memos/memories/textual/item.py +5 -33
- memos/memories/textual/tree.py +22 -12
- memos/memories/textual/tree_text_memory/organize/conflict.py +9 -5
- memos/memories/textual/tree_text_memory/organize/manager.py +26 -18
- memos/memories/textual/tree_text_memory/organize/redundancy.py +25 -44
- memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +50 -48
- memos/memories/textual/tree_text_memory/organize/reorganizer.py +81 -56
- memos/memories/textual/tree_text_memory/retrieve/internet_retriever.py +6 -3
- memos/memories/textual/tree_text_memory/retrieve/internet_retriever_factory.py +2 -0
- memos/memories/textual/tree_text_memory/retrieve/recall.py +0 -1
- memos/memories/textual/tree_text_memory/retrieve/reranker.py +2 -2
- memos/memories/textual/tree_text_memory/retrieve/retrieval_mid_structs.py +2 -0
- memos/memories/textual/tree_text_memory/retrieve/searcher.py +52 -28
- memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py +42 -15
- memos/memories/textual/tree_text_memory/retrieve/utils.py +11 -7
- memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py +62 -58
- memos/memos_tools/dinding_report_bot.py +422 -0
- memos/memos_tools/notification_service.py +44 -0
- memos/memos_tools/notification_utils.py +96 -0
- memos/parsers/markitdown.py +8 -2
- memos/settings.py +3 -1
- memos/templates/mem_reader_prompts.py +66 -23
- memos/templates/mem_scheduler_prompts.py +126 -43
- memos/templates/mos_prompts.py +87 -0
- memos/templates/tree_reorganize_prompts.py +85 -30
- memos/vec_dbs/base.py +12 -0
- memos/vec_dbs/qdrant.py +46 -20
- memoryos-0.2.0.dist-info/RECORD +0 -128
- memos/mem_scheduler/utils.py +0 -26
- {memoryos-0.2.0.dist-info → memoryos-0.2.2.dist-info}/LICENSE +0 -0
- {memoryos-0.2.0.dist-info → memoryos-0.2.2.dist-info}/WHEEL +0 -0
memos/mem_os/product.py
CHANGED
|
@@ -1,80 +1,972 @@
|
|
|
1
1
|
import json
|
|
2
|
+
import os
|
|
3
|
+
import random
|
|
4
|
+
import time
|
|
2
5
|
|
|
3
6
|
from collections.abc import Generator
|
|
4
|
-
from
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from typing import Any, Literal
|
|
5
9
|
|
|
10
|
+
from dotenv import load_dotenv
|
|
11
|
+
from transformers import AutoTokenizer
|
|
12
|
+
|
|
13
|
+
from memos.configs.mem_cube import GeneralMemCubeConfig
|
|
6
14
|
from memos.configs.mem_os import MOSConfig
|
|
15
|
+
from memos.log import get_logger
|
|
16
|
+
from memos.mem_cube.general import GeneralMemCube
|
|
7
17
|
from memos.mem_os.core import MOSCore
|
|
8
|
-
from memos.
|
|
9
|
-
|
|
10
|
-
|
|
18
|
+
from memos.mem_os.utils.format_utils import (
|
|
19
|
+
clean_json_response,
|
|
20
|
+
convert_graph_to_tree_forworkmem,
|
|
21
|
+
ensure_unique_tree_ids,
|
|
22
|
+
filter_nodes_by_tree_ids,
|
|
23
|
+
remove_embedding_recursive,
|
|
24
|
+
sort_children_by_memory_type,
|
|
25
|
+
split_continuous_references,
|
|
26
|
+
)
|
|
27
|
+
from memos.mem_scheduler.schemas.general_schemas import (
|
|
28
|
+
ANSWER_LABEL,
|
|
29
|
+
QUERY_LABEL,
|
|
30
|
+
)
|
|
31
|
+
from memos.mem_scheduler.schemas.message_schemas import ScheduleMessageItem
|
|
32
|
+
from memos.mem_user.persistent_factory import PersistentUserManagerFactory
|
|
33
|
+
from memos.mem_user.user_manager import UserRole
|
|
34
|
+
from memos.memories.textual.item import (
|
|
35
|
+
TextualMemoryItem,
|
|
36
|
+
)
|
|
37
|
+
from memos.templates.mos_prompts import MEMOS_PRODUCT_BASE_PROMPT, MEMOS_PRODUCT_ENHANCE_PROMPT
|
|
11
38
|
from memos.types import MessageList
|
|
12
39
|
|
|
13
40
|
|
|
41
|
+
logger = get_logger(__name__)
|
|
42
|
+
|
|
43
|
+
load_dotenv()
|
|
44
|
+
|
|
45
|
+
CUBE_PATH = os.getenv("MOS_CUBE_PATH", "/tmp/data/")
|
|
46
|
+
|
|
47
|
+
|
|
14
48
|
class MOSProduct(MOSCore):
|
|
15
49
|
"""
|
|
16
|
-
The MOSProduct class inherits from MOSCore
|
|
50
|
+
The MOSProduct class inherits from MOSCore and manages multiple users.
|
|
51
|
+
Each user has their own configuration and cube access, but shares the same model instances.
|
|
17
52
|
"""
|
|
18
53
|
|
|
19
|
-
def __init__(
|
|
20
|
-
|
|
54
|
+
def __init__(
|
|
55
|
+
self,
|
|
56
|
+
default_config: MOSConfig | None = None,
|
|
57
|
+
max_user_instances: int = 1,
|
|
58
|
+
default_cube_config: GeneralMemCubeConfig | None = None,
|
|
59
|
+
online_bot=None,
|
|
60
|
+
error_bot=None,
|
|
61
|
+
):
|
|
62
|
+
"""
|
|
63
|
+
Initialize MOSProduct with an optional default configuration.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
default_config (MOSConfig | None): Default configuration for new users
|
|
67
|
+
max_user_instances (int): Maximum number of user instances to keep in memory
|
|
68
|
+
default_cube_config (GeneralMemCubeConfig | None): Default cube configuration for loading cubes
|
|
69
|
+
online_bot: DingDing online_bot function or None if disabled
|
|
70
|
+
error_bot: DingDing error_bot function or None if disabled
|
|
71
|
+
"""
|
|
72
|
+
# Initialize with a root config for shared resources
|
|
73
|
+
if default_config is None:
|
|
74
|
+
# Create a minimal config for root user
|
|
75
|
+
root_config = MOSConfig(
|
|
76
|
+
user_id="root",
|
|
77
|
+
session_id="root_session",
|
|
78
|
+
chat_model=default_config.chat_model if default_config else None,
|
|
79
|
+
mem_reader=default_config.mem_reader if default_config else None,
|
|
80
|
+
enable_mem_scheduler=default_config.enable_mem_scheduler
|
|
81
|
+
if default_config
|
|
82
|
+
else False,
|
|
83
|
+
mem_scheduler=default_config.mem_scheduler if default_config else None,
|
|
84
|
+
)
|
|
85
|
+
else:
|
|
86
|
+
root_config = default_config.model_copy(deep=True)
|
|
87
|
+
root_config.user_id = "root"
|
|
88
|
+
root_config.session_id = "root_session"
|
|
89
|
+
|
|
90
|
+
# Create persistent user manager BEFORE calling parent constructor
|
|
91
|
+
persistent_user_manager_client = PersistentUserManagerFactory.from_config(
|
|
92
|
+
config_factory=root_config.user_manager
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# Initialize parent MOSCore with root config and persistent user manager
|
|
96
|
+
super().__init__(root_config, user_manager=persistent_user_manager_client)
|
|
97
|
+
|
|
98
|
+
# Product-specific attributes
|
|
99
|
+
self.default_config = default_config
|
|
100
|
+
self.default_cube_config = default_cube_config
|
|
101
|
+
self.max_user_instances = max_user_instances
|
|
102
|
+
self.online_bot = online_bot
|
|
103
|
+
self.error_bot = error_bot
|
|
104
|
+
|
|
105
|
+
# User-specific data structures
|
|
106
|
+
self.user_configs: dict[str, MOSConfig] = {}
|
|
107
|
+
self.user_cube_access: dict[str, set[str]] = {} # user_id -> set of cube_ids
|
|
108
|
+
self.user_chat_histories: dict[str, dict] = {}
|
|
109
|
+
|
|
110
|
+
# Note: self.user_manager is now the persistent user manager from parent class
|
|
111
|
+
# No need for separate global_user_manager as they are the same instance
|
|
112
|
+
|
|
113
|
+
# Initialize tiktoken for streaming
|
|
114
|
+
try:
|
|
115
|
+
# Use gpt2 encoding which is more stable and widely compatible
|
|
116
|
+
self.tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B")
|
|
117
|
+
logger.info("tokenizer initialized successfully for streaming")
|
|
118
|
+
except Exception as e:
|
|
119
|
+
logger.warning(
|
|
120
|
+
f"Failed to initialize tokenizer, will use character-based chunking: {e}"
|
|
121
|
+
)
|
|
122
|
+
self.tokenizer = None
|
|
123
|
+
|
|
124
|
+
# Restore user instances from persistent storage
|
|
125
|
+
self._restore_user_instances(default_cube_config=default_cube_config)
|
|
126
|
+
logger.info(f"User instances restored successfully, now user is {self.mem_cubes.keys()}")
|
|
127
|
+
|
|
128
|
+
def _restore_user_instances(
|
|
129
|
+
self, default_cube_config: GeneralMemCubeConfig | None = None
|
|
130
|
+
) -> None:
|
|
131
|
+
"""Restore user instances from persistent storage after service restart.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
default_cube_config (GeneralMemCubeConfig | None, optional): Default cube configuration. Defaults to None.
|
|
135
|
+
"""
|
|
136
|
+
try:
|
|
137
|
+
# Get all user configurations from persistent storage
|
|
138
|
+
user_configs = self.user_manager.list_user_configs()
|
|
139
|
+
|
|
140
|
+
# Get the raw database records for sorting by updated_at
|
|
141
|
+
session = self.user_manager._get_session()
|
|
142
|
+
try:
|
|
143
|
+
from memos.mem_user.persistent_user_manager import UserConfig
|
|
144
|
+
|
|
145
|
+
db_configs = session.query(UserConfig).all()
|
|
146
|
+
# Create a mapping of user_id to updated_at timestamp
|
|
147
|
+
updated_at_map = {config.user_id: config.updated_at for config in db_configs}
|
|
148
|
+
|
|
149
|
+
# Sort by updated_at timestamp (most recent first) and limit by max_instances
|
|
150
|
+
sorted_configs = sorted(
|
|
151
|
+
user_configs.items(), key=lambda x: updated_at_map.get(x[0], ""), reverse=True
|
|
152
|
+
)[: self.max_user_instances]
|
|
153
|
+
finally:
|
|
154
|
+
session.close()
|
|
155
|
+
|
|
156
|
+
for user_id, config in sorted_configs:
|
|
157
|
+
if user_id != "root": # Skip root user
|
|
158
|
+
try:
|
|
159
|
+
# Store user config and cube access
|
|
160
|
+
self.user_configs[user_id] = config
|
|
161
|
+
self._load_user_cube_access(user_id)
|
|
162
|
+
|
|
163
|
+
# Pre-load all cubes for this user with default config
|
|
164
|
+
self._preload_user_cubes(user_id, default_cube_config)
|
|
165
|
+
|
|
166
|
+
logger.info(
|
|
167
|
+
f"Restored user configuration and pre-loaded cubes for {user_id}"
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
except Exception as e:
|
|
171
|
+
logger.error(f"Failed to restore user configuration for {user_id}: {e}")
|
|
172
|
+
|
|
173
|
+
except Exception as e:
|
|
174
|
+
logger.error(f"Error during user instance restoration: {e}")
|
|
175
|
+
|
|
176
|
+
def _preload_user_cubes(
|
|
177
|
+
self, user_id: str, default_cube_config: GeneralMemCubeConfig | None = None
|
|
178
|
+
) -> None:
|
|
179
|
+
"""Pre-load all cubes for a user into memory.
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
user_id (str): The user ID to pre-load cubes for.
|
|
183
|
+
default_cube_config (GeneralMemCubeConfig | None, optional): Default cube configuration. Defaults to None.
|
|
184
|
+
"""
|
|
185
|
+
try:
|
|
186
|
+
# Get user's accessible cubes from persistent storage
|
|
187
|
+
accessible_cubes = self.user_manager.get_user_cubes(user_id)
|
|
188
|
+
|
|
189
|
+
for cube in accessible_cubes:
|
|
190
|
+
if cube.cube_id not in self.mem_cubes:
|
|
191
|
+
try:
|
|
192
|
+
if cube.cube_path and os.path.exists(cube.cube_path):
|
|
193
|
+
# Pre-load cube with all memory types and default config
|
|
194
|
+
self.register_mem_cube(
|
|
195
|
+
cube.cube_path,
|
|
196
|
+
cube.cube_id,
|
|
197
|
+
user_id,
|
|
198
|
+
memory_types=["act_mem"]
|
|
199
|
+
if self.config.enable_activation_memory
|
|
200
|
+
else [],
|
|
201
|
+
default_config=default_cube_config,
|
|
202
|
+
)
|
|
203
|
+
logger.info(f"Pre-loaded cube {cube.cube_id} for user {user_id}")
|
|
204
|
+
else:
|
|
205
|
+
logger.warning(
|
|
206
|
+
f"Cube path {cube.cube_path} does not exist for cube {cube.cube_id}, skipping pre-load"
|
|
207
|
+
)
|
|
208
|
+
except Exception as e:
|
|
209
|
+
logger.error(
|
|
210
|
+
f"Failed to pre-load cube {cube.cube_id} for user {user_id}: {e}",
|
|
211
|
+
exc_info=True,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
except Exception as e:
|
|
215
|
+
logger.error(f"Error pre-loading cubes for user {user_id}: {e}", exc_info=True)
|
|
216
|
+
|
|
217
|
+
def _load_user_cubes(
|
|
218
|
+
self, user_id: str, default_cube_config: GeneralMemCubeConfig | None = None
|
|
219
|
+
) -> None:
|
|
220
|
+
"""Load all cubes for a user into memory.
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
user_id (str): The user ID to load cubes for.
|
|
224
|
+
default_cube_config (GeneralMemCubeConfig | None, optional): Default cube configuration. Defaults to None.
|
|
225
|
+
"""
|
|
226
|
+
# Get user's accessible cubes from persistent storage
|
|
227
|
+
accessible_cubes = self.user_manager.get_user_cubes(user_id)
|
|
228
|
+
|
|
229
|
+
for cube in accessible_cubes[:1]:
|
|
230
|
+
if cube.cube_id not in self.mem_cubes:
|
|
231
|
+
try:
|
|
232
|
+
if cube.cube_path and os.path.exists(cube.cube_path):
|
|
233
|
+
# Use MOSCore's register_mem_cube method directly with default config
|
|
234
|
+
# Only load act_mem since text_mem is stored in database
|
|
235
|
+
self.register_mem_cube(
|
|
236
|
+
cube.cube_path,
|
|
237
|
+
cube.cube_id,
|
|
238
|
+
user_id,
|
|
239
|
+
memory_types=["act_mem"],
|
|
240
|
+
default_config=default_cube_config,
|
|
241
|
+
)
|
|
242
|
+
else:
|
|
243
|
+
logger.warning(
|
|
244
|
+
f"Cube path {cube.cube_path} does not exist for cube {cube.cube_id}"
|
|
245
|
+
)
|
|
246
|
+
except Exception as e:
|
|
247
|
+
logger.error(f"Failed to load cube {cube.cube_id} for user {user_id}: {e}")
|
|
248
|
+
|
|
249
|
+
def _ensure_user_instance(self, user_id: str, max_instances: int | None = None) -> None:
|
|
250
|
+
"""
|
|
251
|
+
Ensure user configuration exists, creating it if necessary.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
user_id (str): The user ID
|
|
255
|
+
max_instances (int): Maximum instances to keep in memory (overrides class default)
|
|
256
|
+
"""
|
|
257
|
+
if user_id in self.user_configs:
|
|
258
|
+
return
|
|
259
|
+
|
|
260
|
+
# Try to get config from persistent storage first
|
|
261
|
+
stored_config = self.user_manager.get_user_config(user_id)
|
|
262
|
+
if stored_config:
|
|
263
|
+
self.user_configs[user_id] = stored_config
|
|
264
|
+
self._load_user_cube_access(user_id)
|
|
265
|
+
else:
|
|
266
|
+
# Use default config
|
|
267
|
+
if not self.default_config:
|
|
268
|
+
raise ValueError(f"No configuration available for user {user_id}")
|
|
269
|
+
user_config = self.default_config.model_copy(deep=True)
|
|
270
|
+
user_config.user_id = user_id
|
|
271
|
+
user_config.session_id = f"{user_id}_session"
|
|
272
|
+
self.user_configs[user_id] = user_config
|
|
273
|
+
self._load_user_cube_access(user_id)
|
|
274
|
+
|
|
275
|
+
# Apply LRU eviction if needed
|
|
276
|
+
max_instances = max_instances or self.max_user_instances
|
|
277
|
+
if len(self.user_configs) > max_instances:
|
|
278
|
+
# Remove least recently used instance (excluding root)
|
|
279
|
+
user_ids = [uid for uid in self.user_configs if uid != "root"]
|
|
280
|
+
if user_ids:
|
|
281
|
+
oldest_user_id = user_ids[0]
|
|
282
|
+
del self.user_configs[oldest_user_id]
|
|
283
|
+
if oldest_user_id in self.user_cube_access:
|
|
284
|
+
del self.user_cube_access[oldest_user_id]
|
|
285
|
+
logger.info(f"Removed least recently used user configuration: {oldest_user_id}")
|
|
286
|
+
|
|
287
|
+
def _load_user_cube_access(self, user_id: str) -> None:
|
|
288
|
+
"""Load user's cube access permissions."""
|
|
289
|
+
try:
|
|
290
|
+
# Get user's accessible cubes from persistent storage
|
|
291
|
+
accessible_cubes = self.user_manager.get_user_cube_access(user_id)
|
|
292
|
+
self.user_cube_access[user_id] = set(accessible_cubes)
|
|
293
|
+
except Exception as e:
|
|
294
|
+
logger.warning(f"Failed to load cube access for user {user_id}: {e}")
|
|
295
|
+
self.user_cube_access[user_id] = set()
|
|
296
|
+
|
|
297
|
+
def _get_user_config(self, user_id: str) -> MOSConfig:
|
|
298
|
+
"""Get user configuration."""
|
|
299
|
+
if user_id not in self.user_configs:
|
|
300
|
+
self._ensure_user_instance(user_id)
|
|
301
|
+
return self.user_configs[user_id]
|
|
302
|
+
|
|
303
|
+
def _validate_user_cube_access(self, user_id: str, cube_id: str) -> None:
|
|
304
|
+
"""Validate user has access to the cube."""
|
|
305
|
+
if user_id not in self.user_cube_access:
|
|
306
|
+
self._load_user_cube_access(user_id)
|
|
307
|
+
|
|
308
|
+
if cube_id not in self.user_cube_access.get(user_id, set()):
|
|
309
|
+
raise ValueError(f"User '{user_id}' does not have access to cube '{cube_id}'")
|
|
310
|
+
|
|
311
|
+
def _validate_user_access(self, user_id: str, cube_id: str | None = None) -> None:
|
|
312
|
+
"""Validate user access using MOSCore's built-in validation."""
|
|
313
|
+
# Use MOSCore's built-in user validation
|
|
314
|
+
if cube_id:
|
|
315
|
+
self._validate_cube_access(user_id, cube_id)
|
|
316
|
+
else:
|
|
317
|
+
self._validate_user_exists(user_id)
|
|
318
|
+
|
|
319
|
+
def _create_user_config(self, user_id: str, config: MOSConfig) -> MOSConfig:
|
|
320
|
+
"""Create a new user configuration."""
|
|
321
|
+
# Create a copy of config with the specific user_id
|
|
322
|
+
user_config = config.model_copy(deep=True)
|
|
323
|
+
user_config.user_id = user_id
|
|
324
|
+
user_config.session_id = f"{user_id}_session"
|
|
325
|
+
|
|
326
|
+
# Save configuration to persistent storage
|
|
327
|
+
self.user_manager.save_user_config(user_id, user_config)
|
|
328
|
+
|
|
329
|
+
return user_config
|
|
330
|
+
|
|
331
|
+
def _get_or_create_user_config(
|
|
332
|
+
self, user_id: str, config: MOSConfig | None = None
|
|
333
|
+
) -> MOSConfig:
|
|
334
|
+
"""Get existing user config or create a new one."""
|
|
335
|
+
if user_id in self.user_configs:
|
|
336
|
+
return self.user_configs[user_id]
|
|
337
|
+
|
|
338
|
+
# Try to get config from persistent storage first
|
|
339
|
+
stored_config = self.user_manager.get_user_config(user_id)
|
|
340
|
+
if stored_config:
|
|
341
|
+
return self._create_user_config(user_id, stored_config)
|
|
21
342
|
|
|
22
|
-
|
|
343
|
+
# Use provided config or default config
|
|
344
|
+
user_config = config or self.default_config
|
|
345
|
+
if not user_config:
|
|
346
|
+
raise ValueError(f"No configuration provided for user {user_id}")
|
|
347
|
+
|
|
348
|
+
return self._create_user_config(user_id, user_config)
|
|
349
|
+
|
|
350
|
+
def _build_system_prompt(
|
|
351
|
+
self, memories_all: list[TextualMemoryItem], base_prompt: str | None = None
|
|
352
|
+
) -> str:
|
|
353
|
+
"""
|
|
354
|
+
Build custom system prompt for the user with memory references.
|
|
355
|
+
|
|
356
|
+
Args:
|
|
357
|
+
user_id (str): The user ID.
|
|
358
|
+
memories (list[TextualMemoryItem]): The memories to build the system prompt.
|
|
359
|
+
|
|
360
|
+
Returns:
|
|
361
|
+
str: The custom system prompt.
|
|
362
|
+
"""
|
|
363
|
+
|
|
364
|
+
# Build base prompt
|
|
365
|
+
# Add memory context if available
|
|
366
|
+
if memories_all:
|
|
367
|
+
memory_context = "\n\n## Available ID Memories:\n"
|
|
368
|
+
for i, memory in enumerate(memories_all, 1):
|
|
369
|
+
# Format: [memory_id]: memory_content
|
|
370
|
+
memory_id = f"{memory.id.split('-')[0]}" if hasattr(memory, "id") else f"mem_{i}"
|
|
371
|
+
memory_content = memory.memory[:500] if hasattr(memory, "memory") else str(memory)
|
|
372
|
+
memory_content = memory_content.replace("\n", " ")
|
|
373
|
+
memory_context += f"{memory_id}: {memory_content}\n"
|
|
374
|
+
return MEMOS_PRODUCT_BASE_PROMPT + memory_context
|
|
375
|
+
|
|
376
|
+
return MEMOS_PRODUCT_BASE_PROMPT
|
|
377
|
+
|
|
378
|
+
def _build_enhance_system_prompt(
|
|
379
|
+
self, user_id: str, memories_all: list[TextualMemoryItem]
|
|
380
|
+
) -> str:
|
|
381
|
+
"""
|
|
382
|
+
Build enhance prompt for the user with memory references.
|
|
383
|
+
"""
|
|
384
|
+
if memories_all:
|
|
385
|
+
personal_memory_context = "\n\n## Available ID and PersonalMemory Memories:\n"
|
|
386
|
+
outer_memory_context = "\n\n## Available ID and OuterMemory Memories:\n"
|
|
387
|
+
for i, memory in enumerate(memories_all, 1):
|
|
388
|
+
# Format: [memory_id]: memory_content
|
|
389
|
+
if memory.metadata.memory_type != "OuterMemory":
|
|
390
|
+
memory_id = (
|
|
391
|
+
f"{memory.id.split('-')[0]}" if hasattr(memory, "id") else f"mem_{i}"
|
|
392
|
+
)
|
|
393
|
+
memory_content = (
|
|
394
|
+
memory.memory[:500] if hasattr(memory, "memory") else str(memory)
|
|
395
|
+
)
|
|
396
|
+
personal_memory_context += f"{memory_id}: {memory_content}\n"
|
|
397
|
+
else:
|
|
398
|
+
memory_id = (
|
|
399
|
+
f"{memory.id.split('-')[0]}" if hasattr(memory, "id") else f"mem_{i}"
|
|
400
|
+
)
|
|
401
|
+
memory_content = (
|
|
402
|
+
memory.memory[:500] if hasattr(memory, "memory") else str(memory)
|
|
403
|
+
)
|
|
404
|
+
memory_content = memory_content.replace("\n", " ")
|
|
405
|
+
outer_memory_context += f"{memory_id}: {memory_content}\n"
|
|
406
|
+
return MEMOS_PRODUCT_ENHANCE_PROMPT + personal_memory_context + outer_memory_context
|
|
407
|
+
return MEMOS_PRODUCT_ENHANCE_PROMPT
|
|
408
|
+
|
|
409
|
+
def _process_streaming_references_complete(self, text_buffer: str) -> tuple[str, str]:
|
|
410
|
+
"""
|
|
411
|
+
Complete streaming reference processing to ensure reference tags are never split.
|
|
412
|
+
|
|
413
|
+
Args:
|
|
414
|
+
text_buffer (str): The accumulated text buffer.
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
tuple[str, str]: (processed_text, remaining_buffer)
|
|
418
|
+
"""
|
|
419
|
+
import re
|
|
420
|
+
|
|
421
|
+
# Pattern to match complete reference tags: [refid:memoriesID]
|
|
422
|
+
complete_pattern = r"\[\d+:[^\]]+\]"
|
|
423
|
+
|
|
424
|
+
# Find all complete reference tags
|
|
425
|
+
complete_matches = list(re.finditer(complete_pattern, text_buffer))
|
|
426
|
+
|
|
427
|
+
if complete_matches:
|
|
428
|
+
# Find the last complete tag
|
|
429
|
+
last_match = complete_matches[-1]
|
|
430
|
+
end_pos = last_match.end()
|
|
431
|
+
|
|
432
|
+
# Get text up to the end of the last complete tag
|
|
433
|
+
processed_text = text_buffer[:end_pos]
|
|
434
|
+
remaining_buffer = text_buffer[end_pos:]
|
|
435
|
+
|
|
436
|
+
# Apply reference splitting to the processed text
|
|
437
|
+
processed_text = split_continuous_references(processed_text)
|
|
438
|
+
|
|
439
|
+
return processed_text, remaining_buffer
|
|
440
|
+
|
|
441
|
+
# Check for incomplete reference tags
|
|
442
|
+
# Look for opening bracket with number and colon
|
|
443
|
+
opening_pattern = r"\[\d+:"
|
|
444
|
+
opening_matches = list(re.finditer(opening_pattern, text_buffer))
|
|
445
|
+
|
|
446
|
+
if opening_matches:
|
|
447
|
+
# Find the last opening tag
|
|
448
|
+
last_opening = opening_matches[-1]
|
|
449
|
+
opening_start = last_opening.start()
|
|
450
|
+
|
|
451
|
+
# Check if we have a complete opening pattern
|
|
452
|
+
if last_opening.end() <= len(text_buffer):
|
|
453
|
+
# We have a complete opening pattern, keep everything in buffer
|
|
454
|
+
return "", text_buffer
|
|
455
|
+
else:
|
|
456
|
+
# Incomplete opening pattern, return text before it
|
|
457
|
+
processed_text = text_buffer[:opening_start]
|
|
458
|
+
# Apply reference splitting to the processed text
|
|
459
|
+
processed_text = split_continuous_references(processed_text)
|
|
460
|
+
return processed_text, text_buffer[opening_start:]
|
|
461
|
+
|
|
462
|
+
# Check for partial opening pattern (starts with [ but not complete)
|
|
463
|
+
if "[" in text_buffer:
|
|
464
|
+
ref_start = text_buffer.find("[")
|
|
465
|
+
processed_text = text_buffer[:ref_start]
|
|
466
|
+
# Apply reference splitting to the processed text
|
|
467
|
+
processed_text = split_continuous_references(processed_text)
|
|
468
|
+
return processed_text, text_buffer[ref_start:]
|
|
469
|
+
|
|
470
|
+
# No reference tags found, apply reference splitting and return all text
|
|
471
|
+
processed_text = split_continuous_references(text_buffer)
|
|
472
|
+
return processed_text, ""
|
|
473
|
+
|
|
474
|
+
def _extract_references_from_response(self, response: str) -> tuple[str, list[dict]]:
|
|
475
|
+
"""
|
|
476
|
+
Extract reference information from the response and return clean text.
|
|
477
|
+
|
|
478
|
+
Args:
|
|
479
|
+
response (str): The complete response text.
|
|
480
|
+
|
|
481
|
+
Returns:
|
|
482
|
+
tuple[str, list[dict]]: A tuple containing:
|
|
483
|
+
- clean_text: Text with reference markers removed
|
|
484
|
+
- references: List of reference information
|
|
485
|
+
"""
|
|
486
|
+
import re
|
|
487
|
+
|
|
488
|
+
try:
|
|
489
|
+
references = []
|
|
490
|
+
# Pattern to match [refid:memoriesID]
|
|
491
|
+
pattern = r"\[(\d+):([^\]]+)\]"
|
|
492
|
+
|
|
493
|
+
matches = re.findall(pattern, response)
|
|
494
|
+
for ref_number, memory_id in matches:
|
|
495
|
+
references.append({"memory_id": memory_id, "reference_number": int(ref_number)})
|
|
496
|
+
|
|
497
|
+
# Remove all reference markers from the text to get clean text
|
|
498
|
+
clean_text = re.sub(pattern, "", response)
|
|
499
|
+
|
|
500
|
+
# Clean up any extra whitespace that might be left after removing markers
|
|
501
|
+
clean_text = re.sub(r"\s+", " ", clean_text).strip()
|
|
502
|
+
|
|
503
|
+
return clean_text, references
|
|
504
|
+
except Exception as e:
|
|
505
|
+
logger.error(f"Error extracting references from response: {e}", exc_info=True)
|
|
506
|
+
return response, []
|
|
507
|
+
|
|
508
|
+
def _chunk_response_with_tiktoken(
|
|
509
|
+
self, response: str, chunk_size: int = 5
|
|
510
|
+
) -> Generator[str, None, None]:
|
|
511
|
+
"""
|
|
512
|
+
Chunk response using tiktoken for proper token-based streaming.
|
|
513
|
+
|
|
514
|
+
Args:
|
|
515
|
+
response (str): The response text to chunk.
|
|
516
|
+
chunk_size (int): Number of tokens per chunk.
|
|
517
|
+
|
|
518
|
+
Yields:
|
|
519
|
+
str: Chunked text pieces.
|
|
520
|
+
"""
|
|
521
|
+
if self.tokenizer:
|
|
522
|
+
# Use tiktoken for proper token-based chunking
|
|
523
|
+
tokens = self.tokenizer.encode(response)
|
|
524
|
+
|
|
525
|
+
for i in range(0, len(tokens), chunk_size):
|
|
526
|
+
token_chunk = tokens[i : i + chunk_size]
|
|
527
|
+
chunk_text = self.tokenizer.decode(token_chunk)
|
|
528
|
+
yield chunk_text
|
|
529
|
+
else:
|
|
530
|
+
# Fallback to character-based chunking
|
|
531
|
+
char_chunk_size = chunk_size * 4 # Approximate character to token ratio
|
|
532
|
+
for i in range(0, len(response), char_chunk_size):
|
|
533
|
+
yield response[i : i + char_chunk_size]
|
|
534
|
+
|
|
535
|
+
def _send_message_to_scheduler(
|
|
536
|
+
self,
|
|
537
|
+
user_id: str,
|
|
538
|
+
mem_cube_id: str,
|
|
539
|
+
query: str,
|
|
540
|
+
label: str,
|
|
541
|
+
):
|
|
542
|
+
"""
|
|
543
|
+
Send message to scheduler.
|
|
544
|
+
args:
|
|
545
|
+
user_id: str,
|
|
546
|
+
mem_cube_id: str,
|
|
547
|
+
query: str,
|
|
548
|
+
"""
|
|
549
|
+
|
|
550
|
+
if self.enable_mem_scheduler and (self.mem_scheduler is not None):
|
|
551
|
+
message_item = ScheduleMessageItem(
|
|
552
|
+
user_id=user_id,
|
|
553
|
+
mem_cube_id=mem_cube_id,
|
|
554
|
+
mem_cube=self.mem_cubes[mem_cube_id],
|
|
555
|
+
label=label,
|
|
556
|
+
content=query,
|
|
557
|
+
timestamp=datetime.now(),
|
|
558
|
+
)
|
|
559
|
+
self.mem_scheduler.submit_messages(messages=[message_item])
|
|
560
|
+
|
|
561
|
+
def _filter_memories_by_threshold(
|
|
562
|
+
self, memories: list[TextualMemoryItem], threshold: float = 0.20
|
|
563
|
+
) -> list[TextualMemoryItem]:
|
|
564
|
+
"""
|
|
565
|
+
Filter memories by threshold.
|
|
566
|
+
"""
|
|
567
|
+
return [memory for memory in memories if memory.metadata.relativity >= threshold]
|
|
568
|
+
|
|
569
|
+
def register_mem_cube(
|
|
570
|
+
self,
|
|
571
|
+
mem_cube_name_or_path_or_object: str | GeneralMemCube,
|
|
572
|
+
mem_cube_id: str | None = None,
|
|
573
|
+
user_id: str | None = None,
|
|
574
|
+
memory_types: list[Literal["text_mem", "act_mem", "para_mem"]] | None = None,
|
|
575
|
+
default_config: GeneralMemCubeConfig | None = None,
|
|
576
|
+
) -> None:
|
|
577
|
+
"""
|
|
578
|
+
Register a MemCube with the MOS.
|
|
579
|
+
|
|
580
|
+
Args:
|
|
581
|
+
mem_cube_name_or_path_or_object (str | GeneralMemCube): The name, path, or GeneralMemCube object to register.
|
|
582
|
+
mem_cube_id (str, optional): The identifier for the MemCube. If not provided, a default ID is used.
|
|
583
|
+
user_id (str, optional): The user ID to register the cube for.
|
|
584
|
+
memory_types (list[str], optional): List of memory types to load.
|
|
585
|
+
If None, loads all available memory types.
|
|
586
|
+
Options: ["text_mem", "act_mem", "para_mem"]
|
|
587
|
+
default_config (GeneralMemCubeConfig, optional): Default configuration for the cube.
|
|
588
|
+
"""
|
|
589
|
+
# Handle different input types
|
|
590
|
+
if isinstance(mem_cube_name_or_path_or_object, GeneralMemCube):
|
|
591
|
+
# Direct GeneralMemCube object provided
|
|
592
|
+
mem_cube = mem_cube_name_or_path_or_object
|
|
593
|
+
if mem_cube_id is None:
|
|
594
|
+
mem_cube_id = f"cube_{id(mem_cube)}" # Generate a unique ID
|
|
595
|
+
else:
|
|
596
|
+
# String path provided
|
|
597
|
+
mem_cube_name_or_path = mem_cube_name_or_path_or_object
|
|
598
|
+
if mem_cube_id is None:
|
|
599
|
+
mem_cube_id = mem_cube_name_or_path
|
|
600
|
+
|
|
601
|
+
if mem_cube_id in self.mem_cubes:
|
|
602
|
+
logger.info(f"MemCube with ID {mem_cube_id} already in MOS, skip install.")
|
|
603
|
+
return
|
|
604
|
+
|
|
605
|
+
# Create MemCube from path
|
|
606
|
+
if os.path.exists(mem_cube_name_or_path):
|
|
607
|
+
mem_cube = GeneralMemCube.init_from_dir(
|
|
608
|
+
mem_cube_name_or_path, memory_types, default_config
|
|
609
|
+
)
|
|
610
|
+
else:
|
|
611
|
+
logger.warning(
|
|
612
|
+
f"MemCube {mem_cube_name_or_path} does not exist, try to init from remote repo."
|
|
613
|
+
)
|
|
614
|
+
mem_cube = GeneralMemCube.init_from_remote_repo(
|
|
615
|
+
mem_cube_name_or_path, memory_types=memory_types, default_config=default_config
|
|
616
|
+
)
|
|
617
|
+
|
|
618
|
+
# Register the MemCube
|
|
619
|
+
logger.info(
|
|
620
|
+
f"Registering MemCube {mem_cube_id} with cube config {mem_cube.config.model_dump(mode='json')}"
|
|
621
|
+
)
|
|
622
|
+
self.mem_cubes[mem_cube_id] = mem_cube
|
|
623
|
+
|
|
624
|
+
def user_register(
|
|
625
|
+
self,
|
|
626
|
+
user_id: str,
|
|
627
|
+
user_name: str | None = None,
|
|
628
|
+
config: MOSConfig | None = None,
|
|
629
|
+
interests: str | None = None,
|
|
630
|
+
default_mem_cube: GeneralMemCube | None = None,
|
|
631
|
+
default_cube_config: GeneralMemCubeConfig | None = None,
|
|
632
|
+
) -> dict[str, str]:
|
|
633
|
+
"""Register a new user with configuration and default cube.
|
|
634
|
+
|
|
635
|
+
Args:
|
|
636
|
+
user_id (str): The user ID for registration.
|
|
637
|
+
user_name (str): The user name for registration.
|
|
638
|
+
config (MOSConfig | None, optional): User-specific configuration. Defaults to None.
|
|
639
|
+
interests (str | None, optional): User interests as string. Defaults to None.
|
|
640
|
+
default_mem_cube (GeneralMemCube | None, optional): Default memory cube. Defaults to None.
|
|
641
|
+
default_cube_config (GeneralMemCubeConfig | None, optional): Default cube configuration. Defaults to None.
|
|
642
|
+
|
|
643
|
+
Returns:
|
|
644
|
+
dict[str, str]: Registration result with status and message.
|
|
645
|
+
"""
|
|
646
|
+
try:
|
|
647
|
+
# Use provided config or default config
|
|
648
|
+
user_config = config or self.default_config
|
|
649
|
+
if not user_config:
|
|
650
|
+
return {
|
|
651
|
+
"status": "error",
|
|
652
|
+
"message": "No configuration provided for user registration",
|
|
653
|
+
}
|
|
654
|
+
if not user_name:
|
|
655
|
+
user_name = user_id
|
|
656
|
+
|
|
657
|
+
# Create user with configuration using persistent user manager
|
|
658
|
+
self.user_manager.create_user_with_config(user_id, user_config, UserRole.USER, user_id)
|
|
659
|
+
|
|
660
|
+
# Create user configuration
|
|
661
|
+
user_config = self._create_user_config(user_id, user_config)
|
|
662
|
+
|
|
663
|
+
# Create a default cube for the user using MOSCore's methods
|
|
664
|
+
default_cube_name = f"{user_name}_{user_id}_default_cube"
|
|
665
|
+
mem_cube_name_or_path = f"{CUBE_PATH}/{default_cube_name}"
|
|
666
|
+
default_cube_id = self.create_cube_for_user(
|
|
667
|
+
cube_name=default_cube_name, owner_id=user_id, cube_path=mem_cube_name_or_path
|
|
668
|
+
)
|
|
669
|
+
|
|
670
|
+
if default_mem_cube:
|
|
671
|
+
try:
|
|
672
|
+
default_mem_cube.dump(mem_cube_name_or_path)
|
|
673
|
+
except Exception as e:
|
|
674
|
+
logger.error(f"Failed to dump default cube: {e}")
|
|
675
|
+
|
|
676
|
+
# Register the default cube with MOS
|
|
677
|
+
self.register_mem_cube(
|
|
678
|
+
mem_cube_name_or_path_or_object=default_mem_cube,
|
|
679
|
+
mem_cube_id=default_cube_id,
|
|
680
|
+
user_id=user_id,
|
|
681
|
+
memory_types=["act_mem"] if self.config.enable_activation_memory else [],
|
|
682
|
+
default_config=default_cube_config, # use default cube config
|
|
683
|
+
)
|
|
684
|
+
|
|
685
|
+
# Add interests to the default cube if provided
|
|
686
|
+
if interests:
|
|
687
|
+
self.add(memory_content=interests, mem_cube_id=default_cube_id, user_id=user_id)
|
|
688
|
+
|
|
689
|
+
return {
|
|
690
|
+
"status": "success",
|
|
691
|
+
"message": f"User {user_name} registered successfully with default cube {default_cube_id}",
|
|
692
|
+
"user_id": user_id,
|
|
693
|
+
"default_cube_id": default_cube_id,
|
|
694
|
+
}
|
|
695
|
+
|
|
696
|
+
except Exception as e:
|
|
697
|
+
return {"status": "error", "message": f"Failed to register user: {e!s}"}
|
|
698
|
+
|
|
699
|
+
def get_suggestion_query(self, user_id: str, language: str = "zh") -> list[str]:
|
|
23
700
|
"""Get suggestion query from LLM.
|
|
24
701
|
Args:
|
|
25
|
-
user_id (str
|
|
702
|
+
user_id (str): User ID.
|
|
703
|
+
language (str): Language for suggestions ("zh" or "en").
|
|
26
704
|
|
|
27
705
|
Returns:
|
|
28
706
|
list[str]: The suggestion query list.
|
|
29
707
|
"""
|
|
30
708
|
|
|
31
|
-
|
|
709
|
+
if language == "zh":
|
|
710
|
+
suggestion_prompt = """
|
|
711
|
+
你是一个有用的助手,可以帮助用户生成建议查询。
|
|
712
|
+
我将获取用户最近的一些记忆,
|
|
713
|
+
你应该生成一些建议查询,这些查询应该是用户想要查询的内容,
|
|
714
|
+
用户最近的记忆是:
|
|
715
|
+
{memories}
|
|
716
|
+
请生成3个建议查询用中文,
|
|
717
|
+
输出应该是json格式,键是"query",值是一个建议查询列表。
|
|
718
|
+
|
|
719
|
+
示例:
|
|
720
|
+
{{
|
|
721
|
+
"query": ["查询1", "查询2", "查询3"]
|
|
722
|
+
}}
|
|
723
|
+
"""
|
|
724
|
+
else: # English
|
|
725
|
+
suggestion_prompt = """
|
|
726
|
+
You are a helpful assistant that can help users to generate suggestion query.
|
|
727
|
+
I will get some user recently memories,
|
|
728
|
+
you should generate some suggestion query, the query should be user what to query,
|
|
729
|
+
user recently memories is:
|
|
730
|
+
{memories}
|
|
731
|
+
if the user recently memories is empty, please generate 3 suggestion query in English,
|
|
732
|
+
output should be a json format, the key is "query", the value is a list of suggestion query.
|
|
733
|
+
|
|
734
|
+
example:
|
|
735
|
+
{{
|
|
736
|
+
"query": ["query1", "query2", "query3"]
|
|
737
|
+
}}
|
|
738
|
+
"""
|
|
739
|
+
text_mem_result = super().search("my recently memories", user_id=user_id, top_k=3)[
|
|
740
|
+
"text_mem"
|
|
741
|
+
]
|
|
742
|
+
if text_mem_result:
|
|
743
|
+
memories = "\n".join([m.memory[:200] for m in text_mem_result[0]["memories"]])
|
|
744
|
+
else:
|
|
745
|
+
memories = ""
|
|
746
|
+
message_list = [{"role": "system", "content": suggestion_prompt.format(memories=memories)}]
|
|
747
|
+
response = self.chat_llm.generate(message_list)
|
|
748
|
+
clean_response = clean_json_response(response)
|
|
749
|
+
response_json = json.loads(clean_response)
|
|
750
|
+
return response_json["query"]
|
|
751
|
+
|
|
752
|
+
def chat_with_references(
|
|
32
753
|
self,
|
|
33
754
|
query: str,
|
|
34
755
|
user_id: str,
|
|
35
756
|
cube_id: str | None = None,
|
|
36
757
|
history: MessageList | None = None,
|
|
758
|
+
top_k: int = 10,
|
|
759
|
+
internet_search: bool = False,
|
|
37
760
|
) -> Generator[str, None, None]:
|
|
38
|
-
"""
|
|
761
|
+
"""
|
|
762
|
+
Chat with LLM with memory references and streaming output.
|
|
763
|
+
|
|
39
764
|
Args:
|
|
40
765
|
query (str): Query string.
|
|
41
|
-
user_id (str
|
|
766
|
+
user_id (str): User ID.
|
|
42
767
|
cube_id (str, optional): Custom cube ID for user.
|
|
43
|
-
history (
|
|
768
|
+
history (MessageList, optional): Chat history.
|
|
44
769
|
|
|
45
770
|
Returns:
|
|
46
|
-
Generator[str, None, None]: The response string generator.
|
|
47
|
-
"""
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
yield f"data: {json.dumps({'type': '
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
771
|
+
Generator[str, None, None]: The response string generator with reference processing.
|
|
772
|
+
"""
|
|
773
|
+
|
|
774
|
+
self._load_user_cubes(user_id, self.default_cube_config)
|
|
775
|
+
time_start = time.time()
|
|
776
|
+
memories_list = []
|
|
777
|
+
yield f"data: {json.dumps({'type': 'status', 'data': '0'})}\n\n"
|
|
778
|
+
memories_result = super().search(
|
|
779
|
+
query,
|
|
780
|
+
user_id,
|
|
781
|
+
install_cube_ids=[cube_id] if cube_id else None,
|
|
782
|
+
top_k=top_k,
|
|
783
|
+
mode="fine",
|
|
784
|
+
internet_search=internet_search,
|
|
785
|
+
)["text_mem"]
|
|
786
|
+
yield f"data: {json.dumps({'type': 'status', 'data': '1'})}\n\n"
|
|
787
|
+
search_time_end = time.time()
|
|
788
|
+
logger.info(
|
|
789
|
+
f"time chat: search text_mem time user_id: {user_id} time is: {search_time_end - time_start}"
|
|
790
|
+
)
|
|
791
|
+
self._send_message_to_scheduler(
|
|
792
|
+
user_id=user_id, mem_cube_id=cube_id, query=query, label=QUERY_LABEL
|
|
793
|
+
)
|
|
794
|
+
if memories_result:
|
|
795
|
+
memories_list = memories_result[0]["memories"]
|
|
796
|
+
memories_list = self._filter_memories_by_threshold(memories_list)
|
|
797
|
+
# Build custom system prompt with relevant memories)
|
|
798
|
+
system_prompt = self._build_enhance_system_prompt(user_id, memories_list)
|
|
799
|
+
# Get chat history
|
|
800
|
+
if user_id not in self.chat_history_manager:
|
|
801
|
+
self._register_chat_history(user_id)
|
|
802
|
+
|
|
803
|
+
chat_history = self.chat_history_manager[user_id]
|
|
804
|
+
if history:
|
|
805
|
+
chat_history.chat_history = history[-10:]
|
|
806
|
+
current_messages = [
|
|
807
|
+
{"role": "system", "content": system_prompt},
|
|
808
|
+
*chat_history.chat_history,
|
|
809
|
+
{"role": "user", "content": query},
|
|
810
|
+
]
|
|
811
|
+
logger.info(
|
|
812
|
+
f"user_id: {user_id}, cube_id: {cube_id}, current_system_prompt: {system_prompt}"
|
|
813
|
+
)
|
|
814
|
+
yield f"data: {json.dumps({'type': 'status', 'data': '2'})}\n\n"
|
|
815
|
+
# Generate response with custom prompt
|
|
816
|
+
past_key_values = None
|
|
817
|
+
response_stream = None
|
|
818
|
+
if self.config.enable_activation_memory:
|
|
819
|
+
# Handle activation memory (copy MOSCore logic)
|
|
820
|
+
for mem_cube_id, mem_cube in self.mem_cubes.items():
|
|
821
|
+
if mem_cube.act_mem and mem_cube_id == cube_id:
|
|
822
|
+
kv_cache = next(iter(mem_cube.act_mem.get_all()), None)
|
|
823
|
+
past_key_values = (
|
|
824
|
+
kv_cache.memory if (kv_cache and hasattr(kv_cache, "memory")) else None
|
|
825
|
+
)
|
|
826
|
+
if past_key_values is not None:
|
|
827
|
+
logger.info("past_key_values is not None will apply to chat")
|
|
828
|
+
else:
|
|
829
|
+
logger.info("past_key_values is None will not apply to chat")
|
|
830
|
+
break
|
|
831
|
+
if self.config.chat_model.backend == "huggingface":
|
|
832
|
+
response_stream = self.chat_llm.generate_stream(
|
|
833
|
+
current_messages, past_key_values=past_key_values
|
|
834
|
+
)
|
|
835
|
+
elif self.config.chat_model.backend == "vllm":
|
|
836
|
+
response_stream = self.chat_llm.generate_stream(current_messages)
|
|
837
|
+
else:
|
|
838
|
+
if self.config.chat_model.backend in ["huggingface", "vllm"]:
|
|
839
|
+
response_stream = self.chat_llm.generate_stream(current_messages)
|
|
840
|
+
else:
|
|
841
|
+
response_stream = self.chat_llm.generate(current_messages)
|
|
842
|
+
|
|
843
|
+
time_end = time.time()
|
|
844
|
+
chat_time_end = time.time()
|
|
845
|
+
logger.info(
|
|
846
|
+
f"time chat: chat time user_id: {user_id} time is: {chat_time_end - search_time_end}"
|
|
847
|
+
)
|
|
848
|
+
# Simulate streaming output with proper reference handling using tiktoken
|
|
849
|
+
|
|
850
|
+
# Initialize buffer for streaming
|
|
851
|
+
buffer = ""
|
|
852
|
+
full_response = ""
|
|
853
|
+
token_count = 0
|
|
854
|
+
# Use tiktoken for proper token-based chunking
|
|
855
|
+
if self.config.chat_model.backend not in ["huggingface", "vllm"]:
|
|
856
|
+
# For non-huggingface backends, we need to collect the full response first
|
|
857
|
+
full_response_text = ""
|
|
858
|
+
for chunk in response_stream:
|
|
859
|
+
if chunk in ["<think>", "</think>"]:
|
|
860
|
+
continue
|
|
861
|
+
full_response_text += chunk
|
|
862
|
+
response_stream = self._chunk_response_with_tiktoken(full_response_text, chunk_size=5)
|
|
863
|
+
for chunk in response_stream:
|
|
864
|
+
if chunk in ["<think>", "</think>"]:
|
|
865
|
+
continue
|
|
866
|
+
token_count += 1
|
|
867
|
+
buffer += chunk
|
|
868
|
+
full_response += chunk
|
|
869
|
+
|
|
870
|
+
# Process buffer to ensure complete reference tags
|
|
871
|
+
processed_chunk, remaining_buffer = self._process_streaming_references_complete(buffer)
|
|
872
|
+
|
|
873
|
+
if processed_chunk:
|
|
874
|
+
chunk_data = f"data: {json.dumps({'type': 'text', 'data': processed_chunk}, ensure_ascii=False)}\n\n"
|
|
875
|
+
yield chunk_data
|
|
876
|
+
buffer = remaining_buffer
|
|
877
|
+
|
|
878
|
+
# Process any remaining buffer
|
|
879
|
+
if buffer:
|
|
880
|
+
processed_chunk, remaining_buffer = self._process_streaming_references_complete(buffer)
|
|
881
|
+
if processed_chunk:
|
|
882
|
+
chunk_data = f"data: {json.dumps({'type': 'text', 'data': processed_chunk}, ensure_ascii=False)}\n\n"
|
|
883
|
+
yield chunk_data
|
|
884
|
+
|
|
885
|
+
# Prepare reference data
|
|
886
|
+
reference = []
|
|
887
|
+
for memories in memories_list:
|
|
888
|
+
memories_json = memories.model_dump()
|
|
889
|
+
memories_json["metadata"]["ref_id"] = f"{memories.id.split('-')[0]}"
|
|
890
|
+
memories_json["metadata"]["embedding"] = []
|
|
891
|
+
memories_json["metadata"]["sources"] = []
|
|
892
|
+
memories_json["metadata"]["memory"] = memories.memory
|
|
893
|
+
memories_json["metadata"]["id"] = memories.id
|
|
894
|
+
reference.append({"metadata": memories_json["metadata"]})
|
|
895
|
+
|
|
896
|
+
yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n"
|
|
897
|
+
# set kvcache improve speed
|
|
898
|
+
speed_improvement = round(float((len(system_prompt) / 2) * 0.0048 + 44.5), 1)
|
|
899
|
+
total_time = round(float(time_end - time_start), 1)
|
|
900
|
+
|
|
901
|
+
yield f"data: {json.dumps({'type': 'time', 'data': {'total_time': total_time, 'speed_improvement': f'{speed_improvement}%'}})}\n\n"
|
|
59
902
|
yield f"data: {json.dumps({'type': 'end'})}\n\n"
|
|
60
903
|
|
|
904
|
+
logger.info(f"user_id: {user_id}, cube_id: {cube_id}, current_messages: {current_messages}")
|
|
905
|
+
logger.info(f"user_id: {user_id}, cube_id: {cube_id}, full_response: {full_response}")
|
|
906
|
+
|
|
907
|
+
clean_response, extracted_references = self._extract_references_from_response(full_response)
|
|
908
|
+
logger.info(f"Extracted {len(extracted_references)} references from response")
|
|
909
|
+
|
|
910
|
+
# Send chat report if online_bot is available
|
|
911
|
+
try:
|
|
912
|
+
from memos.memos_tools.notification_utils import send_online_bot_notification
|
|
913
|
+
|
|
914
|
+
# Prepare data for online_bot
|
|
915
|
+
chat_data = {
|
|
916
|
+
"query": query,
|
|
917
|
+
"user_id": user_id,
|
|
918
|
+
"cube_id": cube_id,
|
|
919
|
+
"system_prompt": system_prompt,
|
|
920
|
+
"full_response": full_response,
|
|
921
|
+
}
|
|
922
|
+
|
|
923
|
+
system_data = {
|
|
924
|
+
"references": extracted_references,
|
|
925
|
+
"time_start": time_start,
|
|
926
|
+
"time_end": time_end,
|
|
927
|
+
"speed_improvement": speed_improvement,
|
|
928
|
+
}
|
|
929
|
+
|
|
930
|
+
emoji_config = {"chat": "💬", "system_info": "📊"}
|
|
931
|
+
|
|
932
|
+
send_online_bot_notification(
|
|
933
|
+
online_bot=self.online_bot,
|
|
934
|
+
header_name="MemOS Chat Report",
|
|
935
|
+
sub_title_name="chat_with_references",
|
|
936
|
+
title_color="#00956D",
|
|
937
|
+
other_data1=chat_data,
|
|
938
|
+
other_data2=system_data,
|
|
939
|
+
emoji=emoji_config,
|
|
940
|
+
)
|
|
941
|
+
except Exception as e:
|
|
942
|
+
logger.warning(f"Failed to send chat notification: {e}")
|
|
943
|
+
|
|
944
|
+
self._send_message_to_scheduler(
|
|
945
|
+
user_id=user_id, mem_cube_id=cube_id, query=clean_response, label=ANSWER_LABEL
|
|
946
|
+
)
|
|
947
|
+
self.add(
|
|
948
|
+
user_id=user_id,
|
|
949
|
+
messages=[
|
|
950
|
+
{
|
|
951
|
+
"role": "user",
|
|
952
|
+
"content": query,
|
|
953
|
+
"chat_time": str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
|
|
954
|
+
},
|
|
955
|
+
{
|
|
956
|
+
"role": "assistant",
|
|
957
|
+
"content": clean_response, # Store clean text without reference markers
|
|
958
|
+
"chat_time": str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
|
|
959
|
+
},
|
|
960
|
+
],
|
|
961
|
+
mem_cube_id=cube_id,
|
|
962
|
+
)
|
|
963
|
+
|
|
61
964
|
def get_all(
|
|
62
965
|
self,
|
|
63
966
|
user_id: str,
|
|
64
|
-
memory_type: Literal["text_mem", "act_mem", "param_mem"],
|
|
65
|
-
|
|
66
|
-
) -> list[
|
|
67
|
-
dict[
|
|
68
|
-
str,
|
|
69
|
-
str
|
|
70
|
-
| list[
|
|
71
|
-
TextualMemoryMetadata
|
|
72
|
-
| TreeNodeTextualMemoryMetadata
|
|
73
|
-
| ActivationMemoryItem
|
|
74
|
-
| ParametricMemoryItem
|
|
75
|
-
],
|
|
76
|
-
]
|
|
77
|
-
]:
|
|
967
|
+
memory_type: Literal["text_mem", "act_mem", "param_mem", "para_mem"],
|
|
968
|
+
mem_cube_ids: list[str] | None = None,
|
|
969
|
+
) -> list[dict[str, Any]]:
|
|
78
970
|
"""Get all memory items for a user.
|
|
79
971
|
|
|
80
972
|
Args:
|
|
@@ -83,7 +975,292 @@ class MOSProduct(MOSCore):
|
|
|
83
975
|
memory_type (Literal["text_mem", "act_mem", "param_mem"]): The type of memory to get.
|
|
84
976
|
|
|
85
977
|
Returns:
|
|
86
|
-
list[
|
|
978
|
+
list[dict[str, Any]]: A list of memory items with cube_id and memories structure.
|
|
979
|
+
"""
|
|
980
|
+
|
|
981
|
+
# Load user cubes if not already loaded
|
|
982
|
+
self._load_user_cubes(user_id, self.default_cube_config)
|
|
983
|
+
time_start = time.time()
|
|
984
|
+
memory_list = super().get_all(
|
|
985
|
+
mem_cube_id=mem_cube_ids[0] if mem_cube_ids else None, user_id=user_id
|
|
986
|
+
)[memory_type]
|
|
987
|
+
get_all_time_end = time.time()
|
|
988
|
+
logger.info(
|
|
989
|
+
f"time get_all: get_all time user_id: {user_id} time is: {get_all_time_end - time_start}"
|
|
990
|
+
)
|
|
991
|
+
reformat_memory_list = []
|
|
992
|
+
if memory_type == "text_mem":
|
|
993
|
+
for memory in memory_list:
|
|
994
|
+
memories = remove_embedding_recursive(memory["memories"])
|
|
995
|
+
custom_type_ratios = {
|
|
996
|
+
"WorkingMemory": 0.20,
|
|
997
|
+
"LongTermMemory": 0.40,
|
|
998
|
+
"UserMemory": 0.40,
|
|
999
|
+
}
|
|
1000
|
+
tree_result, node_type_count = convert_graph_to_tree_forworkmem(
|
|
1001
|
+
memories, target_node_count=200, type_ratios=custom_type_ratios
|
|
1002
|
+
)
|
|
1003
|
+
# Ensure all node IDs are unique in the tree structure
|
|
1004
|
+
tree_result = ensure_unique_tree_ids(tree_result)
|
|
1005
|
+
memories_filtered = filter_nodes_by_tree_ids(tree_result, memories)
|
|
1006
|
+
children = tree_result["children"]
|
|
1007
|
+
children_sort = sort_children_by_memory_type(children)
|
|
1008
|
+
tree_result["children"] = children_sort
|
|
1009
|
+
memories_filtered["tree_structure"] = tree_result
|
|
1010
|
+
reformat_memory_list.append(
|
|
1011
|
+
{
|
|
1012
|
+
"cube_id": memory["cube_id"],
|
|
1013
|
+
"memories": [memories_filtered],
|
|
1014
|
+
"memory_statistics": node_type_count,
|
|
1015
|
+
}
|
|
1016
|
+
)
|
|
1017
|
+
elif memory_type == "act_mem":
|
|
1018
|
+
memories_list = []
|
|
1019
|
+
act_mem_params = self.mem_cubes[mem_cube_ids[0]].act_mem.get_all()
|
|
1020
|
+
if act_mem_params:
|
|
1021
|
+
memories_data = act_mem_params[0].model_dump()
|
|
1022
|
+
records = memories_data.get("records", [])
|
|
1023
|
+
for record in records["text_memories"]:
|
|
1024
|
+
memories_list.append(
|
|
1025
|
+
{
|
|
1026
|
+
"id": memories_data["id"],
|
|
1027
|
+
"text": record,
|
|
1028
|
+
"create_time": records["timestamp"],
|
|
1029
|
+
"size": random.randint(1, 20),
|
|
1030
|
+
"modify_times": 1,
|
|
1031
|
+
}
|
|
1032
|
+
)
|
|
1033
|
+
reformat_memory_list.append(
|
|
1034
|
+
{
|
|
1035
|
+
"cube_id": "xxxxxxxxxxxxxxxx" if not mem_cube_ids else mem_cube_ids[0],
|
|
1036
|
+
"memories": memories_list,
|
|
1037
|
+
}
|
|
1038
|
+
)
|
|
1039
|
+
elif memory_type == "para_mem":
|
|
1040
|
+
act_mem_params = self.mem_cubes[mem_cube_ids[0]].act_mem.get_all()
|
|
1041
|
+
logger.info(f"act_mem_params: {act_mem_params}")
|
|
1042
|
+
reformat_memory_list.append(
|
|
1043
|
+
{
|
|
1044
|
+
"cube_id": "xxxxxxxxxxxxxxxx" if not mem_cube_ids else mem_cube_ids[0],
|
|
1045
|
+
"memories": act_mem_params[0].model_dump(),
|
|
1046
|
+
}
|
|
1047
|
+
)
|
|
1048
|
+
make_format_time_end = time.time()
|
|
1049
|
+
logger.info(
|
|
1050
|
+
f"time get_all: make_format time user_id: {user_id} time is: {make_format_time_end - get_all_time_end}"
|
|
1051
|
+
)
|
|
1052
|
+
return reformat_memory_list
|
|
1053
|
+
|
|
1054
|
+
def _get_subgraph(
|
|
1055
|
+
self, query: str, mem_cube_id: str, user_id: str | None = None, top_k: int = 5
|
|
1056
|
+
) -> list[dict[str, Any]]:
|
|
1057
|
+
result = {"para_mem": [], "act_mem": [], "text_mem": []}
|
|
1058
|
+
if self.config.enable_textual_memory and self.mem_cubes[mem_cube_id].text_mem:
|
|
1059
|
+
result["text_mem"].append(
|
|
1060
|
+
{
|
|
1061
|
+
"cube_id": mem_cube_id,
|
|
1062
|
+
"memories": self.mem_cubes[mem_cube_id].text_mem.get_relevant_subgraph(
|
|
1063
|
+
query, top_k=top_k
|
|
1064
|
+
),
|
|
1065
|
+
}
|
|
1066
|
+
)
|
|
1067
|
+
return result
|
|
1068
|
+
|
|
1069
|
+
def get_subgraph(
|
|
1070
|
+
self,
|
|
1071
|
+
user_id: str,
|
|
1072
|
+
query: str,
|
|
1073
|
+
mem_cube_ids: list[str] | None = None,
|
|
1074
|
+
top_k: int = 20,
|
|
1075
|
+
) -> list[dict[str, Any]]:
|
|
1076
|
+
"""Get all memory items for a user.
|
|
1077
|
+
|
|
1078
|
+
Args:
|
|
1079
|
+
user_id (str): The ID of the user.
|
|
1080
|
+
cube_id (str | None, optional): The ID of the cube. Defaults to None.
|
|
1081
|
+
mem_cube_ids (list[str], optional): The IDs of the cubes. Defaults to None.
|
|
1082
|
+
|
|
1083
|
+
Returns:
|
|
1084
|
+
list[dict[str, Any]]: A list of memory items with cube_id and memories structure.
|
|
1085
|
+
"""
|
|
1086
|
+
|
|
1087
|
+
# Load user cubes if not already loaded
|
|
1088
|
+
self._load_user_cubes(user_id, self.default_cube_config)
|
|
1089
|
+
memory_list = self._get_subgraph(
|
|
1090
|
+
query=query, mem_cube_id=mem_cube_ids[0], user_id=user_id, top_k=top_k
|
|
1091
|
+
)["text_mem"]
|
|
1092
|
+
reformat_memory_list = []
|
|
1093
|
+
for memory in memory_list:
|
|
1094
|
+
memories = remove_embedding_recursive(memory["memories"])
|
|
1095
|
+
custom_type_ratios = {"WorkingMemory": 0.20, "LongTermMemory": 0.40, "UserMemory": 0.4}
|
|
1096
|
+
tree_result, node_type_count = convert_graph_to_tree_forworkmem(
|
|
1097
|
+
memories, target_node_count=150, type_ratios=custom_type_ratios
|
|
1098
|
+
)
|
|
1099
|
+
# Ensure all node IDs are unique in the tree structure
|
|
1100
|
+
tree_result = ensure_unique_tree_ids(tree_result)
|
|
1101
|
+
memories_filtered = filter_nodes_by_tree_ids(tree_result, memories)
|
|
1102
|
+
children = tree_result["children"]
|
|
1103
|
+
children_sort = sort_children_by_memory_type(children)
|
|
1104
|
+
tree_result["children"] = children_sort
|
|
1105
|
+
memories_filtered["tree_structure"] = tree_result
|
|
1106
|
+
reformat_memory_list.append(
|
|
1107
|
+
{
|
|
1108
|
+
"cube_id": memory["cube_id"],
|
|
1109
|
+
"memories": [memories_filtered],
|
|
1110
|
+
"memory_statistics": node_type_count,
|
|
1111
|
+
}
|
|
1112
|
+
)
|
|
1113
|
+
|
|
1114
|
+
return reformat_memory_list
|
|
1115
|
+
|
|
1116
|
+
def search(
|
|
1117
|
+
self,
|
|
1118
|
+
query: str,
|
|
1119
|
+
user_id: str,
|
|
1120
|
+
install_cube_ids: list[str] | None = None,
|
|
1121
|
+
top_k: int = 10,
|
|
1122
|
+
mode: Literal["fast", "fine"] = "fast",
|
|
1123
|
+
):
|
|
1124
|
+
"""Search memories for a specific user."""
|
|
1125
|
+
|
|
1126
|
+
# Load user cubes if not already loaded
|
|
1127
|
+
time_start = time.time()
|
|
1128
|
+
self._load_user_cubes(user_id, self.default_cube_config)
|
|
1129
|
+
load_user_cubes_time_end = time.time()
|
|
1130
|
+
logger.info(
|
|
1131
|
+
f"time search: load_user_cubes time user_id: {user_id} time is: {load_user_cubes_time_end - time_start}"
|
|
1132
|
+
)
|
|
1133
|
+
search_result = super().search(query, user_id, install_cube_ids, top_k, mode=mode)
|
|
1134
|
+
search_time_end = time.time()
|
|
1135
|
+
logger.info(
|
|
1136
|
+
f"time search: search text_mem time user_id: {user_id} time is: {search_time_end - load_user_cubes_time_end}"
|
|
1137
|
+
)
|
|
1138
|
+
text_memory_list = search_result["text_mem"]
|
|
1139
|
+
reformat_memory_list = []
|
|
1140
|
+
for memory in text_memory_list:
|
|
1141
|
+
memories_list = []
|
|
1142
|
+
for data in memory["memories"]:
|
|
1143
|
+
memories = data.model_dump()
|
|
1144
|
+
memories["ref_id"] = f"[{memories['id'].split('-')[0]}]"
|
|
1145
|
+
memories["metadata"]["embedding"] = []
|
|
1146
|
+
memories["metadata"]["sources"] = []
|
|
1147
|
+
memories["metadata"]["ref_id"] = f"[{memories['id'].split('-')[0]}]"
|
|
1148
|
+
memories["metadata"]["id"] = memories["id"]
|
|
1149
|
+
memories["metadata"]["memory"] = memories["memory"]
|
|
1150
|
+
memories_list.append(memories)
|
|
1151
|
+
reformat_memory_list.append({"cube_id": memory["cube_id"], "memories": memories_list})
|
|
1152
|
+
search_result["text_mem"] = reformat_memory_list
|
|
1153
|
+
time_end = time.time()
|
|
1154
|
+
logger.info(
|
|
1155
|
+
f"time search: total time for user_id: {user_id} time is: {time_end - time_start}"
|
|
1156
|
+
)
|
|
1157
|
+
return search_result
|
|
1158
|
+
|
|
1159
|
+
def add(
|
|
1160
|
+
self,
|
|
1161
|
+
user_id: str,
|
|
1162
|
+
messages: MessageList | None = None,
|
|
1163
|
+
memory_content: str | None = None,
|
|
1164
|
+
doc_path: str | None = None,
|
|
1165
|
+
mem_cube_id: str | None = None,
|
|
1166
|
+
source: str | None = None,
|
|
1167
|
+
user_profile: bool = False,
|
|
1168
|
+
):
|
|
1169
|
+
"""Add memory for a specific user."""
|
|
1170
|
+
|
|
1171
|
+
# Load user cubes if not already loaded
|
|
1172
|
+
self._load_user_cubes(user_id, self.default_cube_config)
|
|
1173
|
+
|
|
1174
|
+
result = super().add(messages, memory_content, doc_path, mem_cube_id, user_id)
|
|
1175
|
+
if user_profile:
|
|
1176
|
+
try:
|
|
1177
|
+
user_interests = memory_content.split("'userInterests': '")[1].split("', '")[0]
|
|
1178
|
+
user_interests = user_interests.replace(",", " ")
|
|
1179
|
+
user_profile_memories = self.mem_cubes[
|
|
1180
|
+
mem_cube_id
|
|
1181
|
+
].text_mem.internet_retriever.retrieve_from_internet(query=user_interests, top_k=5)
|
|
1182
|
+
for memory in user_profile_memories:
|
|
1183
|
+
self.mem_cubes[mem_cube_id].text_mem.add(memory)
|
|
1184
|
+
except Exception as e:
|
|
1185
|
+
logger.error(
|
|
1186
|
+
f"Failed to retrieve user profile: {e}, memory_content: {memory_content}"
|
|
1187
|
+
)
|
|
1188
|
+
|
|
1189
|
+
return result
|
|
1190
|
+
|
|
1191
|
+
def list_users(self) -> list:
|
|
1192
|
+
"""List all registered users."""
|
|
1193
|
+
return self.user_manager.list_users()
|
|
1194
|
+
|
|
1195
|
+
def get_user_info(self, user_id: str) -> dict:
|
|
1196
|
+
"""Get user information including accessible cubes."""
|
|
1197
|
+
# Use MOSCore's built-in user validation
|
|
1198
|
+
# Validate user access
|
|
1199
|
+
self._validate_user_access(user_id)
|
|
1200
|
+
|
|
1201
|
+
result = super().get_user_info()
|
|
1202
|
+
|
|
1203
|
+
return result
|
|
1204
|
+
|
|
1205
|
+
def share_cube_with_user(self, cube_id: str, owner_user_id: str, target_user_id: str) -> bool:
|
|
1206
|
+
"""Share a cube with another user."""
|
|
1207
|
+
# Use MOSCore's built-in cube access validation
|
|
1208
|
+
self._validate_cube_access(owner_user_id, cube_id)
|
|
1209
|
+
|
|
1210
|
+
result = super().share_cube_with_user(cube_id, target_user_id)
|
|
1211
|
+
|
|
1212
|
+
return result
|
|
1213
|
+
|
|
1214
|
+
def clear_user_chat_history(self, user_id: str) -> None:
|
|
1215
|
+
"""Clear chat history for a specific user."""
|
|
1216
|
+
# Validate user access
|
|
1217
|
+
self._validate_user_access(user_id)
|
|
1218
|
+
|
|
1219
|
+
super().clear_messages(user_id)
|
|
1220
|
+
|
|
1221
|
+
def update_user_config(self, user_id: str, config: MOSConfig) -> bool:
|
|
1222
|
+
"""Update user configuration.
|
|
1223
|
+
|
|
1224
|
+
Args:
|
|
1225
|
+
user_id (str): The user ID.
|
|
1226
|
+
config (MOSConfig): The new configuration.
|
|
1227
|
+
|
|
1228
|
+
Returns:
|
|
1229
|
+
bool: True if successful, False otherwise.
|
|
1230
|
+
"""
|
|
1231
|
+
try:
|
|
1232
|
+
# Save to persistent storage
|
|
1233
|
+
success = self.user_manager.save_user_config(user_id, config)
|
|
1234
|
+
if success:
|
|
1235
|
+
# Update in-memory config
|
|
1236
|
+
self.user_configs[user_id] = config
|
|
1237
|
+
logger.info(f"Updated configuration for user {user_id}")
|
|
1238
|
+
|
|
1239
|
+
return success
|
|
1240
|
+
except Exception as e:
|
|
1241
|
+
logger.error(f"Failed to update user config for {user_id}: {e}")
|
|
1242
|
+
return False
|
|
1243
|
+
|
|
1244
|
+
def get_user_config(self, user_id: str) -> MOSConfig | None:
|
|
1245
|
+
"""Get user configuration.
|
|
1246
|
+
|
|
1247
|
+
Args:
|
|
1248
|
+
user_id (str): The user ID.
|
|
1249
|
+
|
|
1250
|
+
Returns:
|
|
1251
|
+
MOSConfig | None: The user's configuration or None if not found.
|
|
87
1252
|
"""
|
|
88
|
-
|
|
89
|
-
|
|
1253
|
+
return self.user_manager.get_user_config(user_id)
|
|
1254
|
+
|
|
1255
|
+
def get_active_user_count(self) -> int:
|
|
1256
|
+
"""Get the number of active user configurations in memory."""
|
|
1257
|
+
return len(self.user_configs)
|
|
1258
|
+
|
|
1259
|
+
def get_user_instance_info(self) -> dict[str, Any]:
|
|
1260
|
+
"""Get information about user configurations in memory."""
|
|
1261
|
+
return {
|
|
1262
|
+
"active_instances": len(self.user_configs),
|
|
1263
|
+
"max_instances": self.max_user_instances,
|
|
1264
|
+
"user_ids": list(self.user_configs.keys()),
|
|
1265
|
+
"lru_order": list(self.user_configs.keys()), # OrderedDict maintains insertion order
|
|
1266
|
+
}
|