MemoryOS 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MemoryOS might be problematic. Click here for more details.

Files changed (114) hide show
  1. {memoryos-0.2.0.dist-info → memoryos-0.2.2.dist-info}/METADATA +67 -26
  2. memoryos-0.2.2.dist-info/RECORD +169 -0
  3. memoryos-0.2.2.dist-info/entry_points.txt +3 -0
  4. memos/__init__.py +1 -1
  5. memos/api/config.py +562 -0
  6. memos/api/context/context.py +147 -0
  7. memos/api/context/dependencies.py +90 -0
  8. memos/api/exceptions.py +28 -0
  9. memos/api/mcp_serve.py +502 -0
  10. memos/api/product_api.py +35 -0
  11. memos/api/product_models.py +163 -0
  12. memos/api/routers/__init__.py +1 -0
  13. memos/api/routers/product_router.py +386 -0
  14. memos/chunkers/sentence_chunker.py +8 -2
  15. memos/cli.py +113 -0
  16. memos/configs/embedder.py +27 -0
  17. memos/configs/graph_db.py +132 -3
  18. memos/configs/internet_retriever.py +6 -0
  19. memos/configs/llm.py +47 -0
  20. memos/configs/mem_cube.py +1 -1
  21. memos/configs/mem_os.py +5 -0
  22. memos/configs/mem_reader.py +9 -0
  23. memos/configs/mem_scheduler.py +107 -7
  24. memos/configs/mem_user.py +58 -0
  25. memos/configs/memory.py +5 -4
  26. memos/dependency.py +52 -0
  27. memos/embedders/ark.py +92 -0
  28. memos/embedders/factory.py +4 -0
  29. memos/embedders/sentence_transformer.py +8 -2
  30. memos/embedders/universal_api.py +32 -0
  31. memos/graph_dbs/base.py +11 -3
  32. memos/graph_dbs/factory.py +4 -0
  33. memos/graph_dbs/nebular.py +1364 -0
  34. memos/graph_dbs/neo4j.py +333 -124
  35. memos/graph_dbs/neo4j_community.py +300 -0
  36. memos/llms/base.py +9 -0
  37. memos/llms/deepseek.py +54 -0
  38. memos/llms/factory.py +10 -1
  39. memos/llms/hf.py +170 -13
  40. memos/llms/hf_singleton.py +114 -0
  41. memos/llms/ollama.py +4 -0
  42. memos/llms/openai.py +67 -1
  43. memos/llms/qwen.py +63 -0
  44. memos/llms/vllm.py +153 -0
  45. memos/log.py +1 -1
  46. memos/mem_cube/general.py +77 -16
  47. memos/mem_cube/utils.py +109 -0
  48. memos/mem_os/core.py +251 -51
  49. memos/mem_os/main.py +94 -12
  50. memos/mem_os/product.py +1220 -43
  51. memos/mem_os/utils/default_config.py +352 -0
  52. memos/mem_os/utils/format_utils.py +1401 -0
  53. memos/mem_reader/simple_struct.py +18 -10
  54. memos/mem_scheduler/base_scheduler.py +441 -40
  55. memos/mem_scheduler/general_scheduler.py +249 -248
  56. memos/mem_scheduler/modules/base.py +14 -5
  57. memos/mem_scheduler/modules/dispatcher.py +67 -4
  58. memos/mem_scheduler/modules/misc.py +104 -0
  59. memos/mem_scheduler/modules/monitor.py +240 -50
  60. memos/mem_scheduler/modules/rabbitmq_service.py +319 -0
  61. memos/mem_scheduler/modules/redis_service.py +32 -22
  62. memos/mem_scheduler/modules/retriever.py +167 -23
  63. memos/mem_scheduler/modules/scheduler_logger.py +255 -0
  64. memos/mem_scheduler/mos_for_test_scheduler.py +140 -0
  65. memos/mem_scheduler/schemas/__init__.py +0 -0
  66. memos/mem_scheduler/schemas/general_schemas.py +43 -0
  67. memos/mem_scheduler/{modules/schemas.py → schemas/message_schemas.py} +63 -61
  68. memos/mem_scheduler/schemas/monitor_schemas.py +329 -0
  69. memos/mem_scheduler/utils/__init__.py +0 -0
  70. memos/mem_scheduler/utils/filter_utils.py +176 -0
  71. memos/mem_scheduler/utils/misc_utils.py +61 -0
  72. memos/mem_user/factory.py +94 -0
  73. memos/mem_user/mysql_persistent_user_manager.py +271 -0
  74. memos/mem_user/mysql_user_manager.py +500 -0
  75. memos/mem_user/persistent_factory.py +96 -0
  76. memos/mem_user/persistent_user_manager.py +260 -0
  77. memos/mem_user/user_manager.py +4 -4
  78. memos/memories/activation/item.py +29 -0
  79. memos/memories/activation/kv.py +10 -3
  80. memos/memories/activation/vllmkv.py +219 -0
  81. memos/memories/factory.py +2 -0
  82. memos/memories/textual/base.py +1 -1
  83. memos/memories/textual/general.py +43 -97
  84. memos/memories/textual/item.py +5 -33
  85. memos/memories/textual/tree.py +22 -12
  86. memos/memories/textual/tree_text_memory/organize/conflict.py +9 -5
  87. memos/memories/textual/tree_text_memory/organize/manager.py +26 -18
  88. memos/memories/textual/tree_text_memory/organize/redundancy.py +25 -44
  89. memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +50 -48
  90. memos/memories/textual/tree_text_memory/organize/reorganizer.py +81 -56
  91. memos/memories/textual/tree_text_memory/retrieve/internet_retriever.py +6 -3
  92. memos/memories/textual/tree_text_memory/retrieve/internet_retriever_factory.py +2 -0
  93. memos/memories/textual/tree_text_memory/retrieve/recall.py +0 -1
  94. memos/memories/textual/tree_text_memory/retrieve/reranker.py +2 -2
  95. memos/memories/textual/tree_text_memory/retrieve/retrieval_mid_structs.py +2 -0
  96. memos/memories/textual/tree_text_memory/retrieve/searcher.py +52 -28
  97. memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py +42 -15
  98. memos/memories/textual/tree_text_memory/retrieve/utils.py +11 -7
  99. memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py +62 -58
  100. memos/memos_tools/dinding_report_bot.py +422 -0
  101. memos/memos_tools/notification_service.py +44 -0
  102. memos/memos_tools/notification_utils.py +96 -0
  103. memos/parsers/markitdown.py +8 -2
  104. memos/settings.py +3 -1
  105. memos/templates/mem_reader_prompts.py +66 -23
  106. memos/templates/mem_scheduler_prompts.py +126 -43
  107. memos/templates/mos_prompts.py +87 -0
  108. memos/templates/tree_reorganize_prompts.py +85 -30
  109. memos/vec_dbs/base.py +12 -0
  110. memos/vec_dbs/qdrant.py +46 -20
  111. memoryos-0.2.0.dist-info/RECORD +0 -128
  112. memos/mem_scheduler/utils.py +0 -26
  113. {memoryos-0.2.0.dist-info → memoryos-0.2.2.dist-info}/LICENSE +0 -0
  114. {memoryos-0.2.0.dist-info → memoryos-0.2.2.dist-info}/WHEEL +0 -0
memos/api/config.py ADDED
@@ -0,0 +1,562 @@
1
+ import json
2
+ import os
3
+
4
+ from typing import Any
5
+
6
+ from dotenv import load_dotenv
7
+
8
+ from memos.configs.mem_cube import GeneralMemCubeConfig
9
+ from memos.configs.mem_os import MOSConfig
10
+ from memos.mem_cube.general import GeneralMemCube
11
+
12
+
13
+ # Load environment variables
14
+ load_dotenv()
15
+
16
+
17
+ class APIConfig:
18
+ """Centralized configuration management for MemOS APIs."""
19
+
20
+ @staticmethod
21
+ def get_openai_config() -> dict[str, Any]:
22
+ """Get OpenAI configuration."""
23
+ return {
24
+ "model_name_or_path": os.getenv("MOS_OPENAI_MODEL", "gpt-4o-mini"),
25
+ "temperature": float(os.getenv("MOS_CHAT_TEMPERATURE", "0.8")),
26
+ "max_tokens": int(os.getenv("MOS_MAX_TOKENS", "1024")),
27
+ "top_p": float(os.getenv("MOS_TOP_P", "0.9")),
28
+ "top_k": int(os.getenv("MOS_TOP_K", "50")),
29
+ "remove_think_prefix": True,
30
+ "api_key": os.getenv("OPENAI_API_KEY", "your-api-key-here"),
31
+ "api_base": os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1"),
32
+ }
33
+
34
+ @staticmethod
35
+ def qwen_config() -> dict[str, Any]:
36
+ """Get Qwen configuration."""
37
+ return {
38
+ "model_name_or_path": os.getenv("MOS_CHAT_MODEL", "Qwen/Qwen3-1.7B"),
39
+ "temperature": float(os.getenv("MOS_CHAT_TEMPERATURE", "0.8")),
40
+ "max_tokens": int(os.getenv("MOS_MAX_TOKENS", "4096")),
41
+ "remove_think_prefix": True,
42
+ }
43
+
44
+ @staticmethod
45
+ def vllm_config() -> dict[str, Any]:
46
+ """Get Qwen configuration."""
47
+ return {
48
+ "model_name_or_path": os.getenv("MOS_CHAT_MODEL", "Qwen/Qwen3-1.7B"),
49
+ "temperature": float(os.getenv("MOS_CHAT_TEMPERATURE", "0.8")),
50
+ "max_tokens": int(os.getenv("MOS_MAX_TOKENS", "4096")),
51
+ "remove_think_prefix": True,
52
+ "api_key": os.getenv("VLLM_API_KEY", ""),
53
+ "api_base": os.getenv("VLLM_API_BASE", "http://localhost:8088/v1"),
54
+ "model_schema": os.getenv("MOS_MODEL_SCHEMA", "memos.configs.llm.VLLMLLMConfig"),
55
+ }
56
+
57
+ @staticmethod
58
+ def get_activation_config() -> dict[str, Any]:
59
+ """Get Ollama configuration."""
60
+ return {
61
+ "backend": "kv_cache",
62
+ "config": {
63
+ "memory_filename": "activation_memory.pickle",
64
+ "extractor_llm": {
65
+ "backend": "huggingface_singleton",
66
+ "config": {
67
+ "model_name_or_path": os.getenv("MOS_CHAT_MODEL", "Qwen/Qwen3-1.7B"),
68
+ "temperature": 0.8,
69
+ "max_tokens": 1024,
70
+ "top_p": 0.9,
71
+ "top_k": 50,
72
+ "add_generation_prompt": True,
73
+ "remove_think_prefix": False,
74
+ },
75
+ },
76
+ },
77
+ }
78
+
79
+ @staticmethod
80
+ def get_activation_vllm_config() -> dict[str, Any]:
81
+ """Get Ollama configuration."""
82
+ return {
83
+ "backend": "vllm_kv_cache",
84
+ "config": {
85
+ "memory_filename": "activation_memory.pickle",
86
+ "extractor_llm": {
87
+ "backend": "vllm",
88
+ "config": APIConfig.vllm_config(),
89
+ },
90
+ },
91
+ }
92
+
93
+ @staticmethod
94
+ def get_embedder_config() -> dict[str, Any]:
95
+ """Get embedder configuration."""
96
+ embedder_backend = os.getenv("MOS_EMBEDDER_BACKEND", "ollama")
97
+
98
+ if embedder_backend == "universal_api":
99
+ return {
100
+ "backend": "universal_api",
101
+ "config": {
102
+ "provider": os.getenv("MOS_EMBEDDER_PROVIDER", "openai"),
103
+ "api_key": os.getenv("OPENAI_API_KEY", "sk-xxxx"),
104
+ "model_name_or_path": os.getenv("MOS_EMBEDDER_MODEL", "text-embedding-3-large"),
105
+ "base_url": os.getenv("OPENAI_API_BASE", "http://openai.com"),
106
+ },
107
+ }
108
+ else: # ollama
109
+ return {
110
+ "backend": "ollama",
111
+ "config": {
112
+ "model_name_or_path": os.getenv(
113
+ "MOS_EMBEDDER_MODEL", "nomic-embed-text:latest"
114
+ ),
115
+ "api_base": os.getenv("OLLAMA_API_BASE", "http://localhost:11434"),
116
+ },
117
+ }
118
+
119
+ @staticmethod
120
+ def get_internet_config() -> dict[str, Any]:
121
+ """Get embedder configuration."""
122
+ return {
123
+ "backend": "xinyu",
124
+ "config": {
125
+ "api_key": os.getenv("XINYU_API_KEY"),
126
+ "search_engine_id": os.getenv("XINYU_SEARCH_ENGINE_ID"),
127
+ "max_results": 15,
128
+ "num_per_request": 10,
129
+ "reader": {
130
+ "backend": "simple_struct",
131
+ "config": {
132
+ "llm": {
133
+ "backend": "openai",
134
+ "config": {
135
+ "model_name_or_path": os.getenv("MEMRADER_MODEL"),
136
+ "temperature": 0.6,
137
+ "max_tokens": 5000,
138
+ "top_p": 0.95,
139
+ "top_k": 20,
140
+ "api_key": "EMPTY",
141
+ "api_base": os.getenv("MEMRADER_API_BASE"),
142
+ "remove_think_prefix": True,
143
+ "extra_body": {"chat_template_kwargs": {"enable_thinking": False}},
144
+ },
145
+ },
146
+ "embedder": APIConfig.get_embedder_config(),
147
+ "chunker": {
148
+ "backend": "sentence",
149
+ "config": {
150
+ "tokenizer_or_token_counter": "gpt2",
151
+ "chunk_size": 512,
152
+ "chunk_overlap": 128,
153
+ "min_sentences_per_chunk": 1,
154
+ },
155
+ },
156
+ },
157
+ },
158
+ },
159
+ }
160
+
161
+ @staticmethod
162
+ def get_neo4j_community_config(user_id: str | None = None) -> dict[str, Any]:
163
+ """Get Neo4j community configuration."""
164
+ return {
165
+ "uri": os.getenv("NEO4J_URI", "bolt://localhost:7687"),
166
+ "user": os.getenv("NEO4J_USER", "neo4j"),
167
+ "db_name": os.getenv("NEO4J_DB_NAME", "shared-tree-textual-memory"),
168
+ "password": os.getenv("NEO4J_PASSWORD", "12345678"),
169
+ "user_name": f"memos{user_id.replace('-', '')}",
170
+ "auto_create": True,
171
+ "use_multi_db": False,
172
+ "embedding_dimension": int(os.getenv("EMBEDDING_DIMENSION", 3072)),
173
+ "vec_config": {
174
+ # Pass nested config to initialize external vector DB
175
+ # If you use qdrant, please use Server instead of local mode.
176
+ "backend": "qdrant",
177
+ "config": {
178
+ "collection_name": "neo4j_vec_db",
179
+ "vector_dimension": int(os.getenv("EMBEDDING_DIMENSION", 3072)),
180
+ "distance_metric": "cosine",
181
+ "host": "localhost",
182
+ "port": 6333,
183
+ },
184
+ },
185
+ }
186
+
187
+ @staticmethod
188
+ def get_neo4j_config(user_id: str | None = None) -> dict[str, Any]:
189
+ """Get Neo4j configuration."""
190
+ if os.getenv("MOS_NEO4J_SHARED_DB", "false").lower() == "true":
191
+ return APIConfig.get_neo4j_shared_config(user_id)
192
+ else:
193
+ return APIConfig.get_noshared_neo4j_config(user_id)
194
+
195
+ @staticmethod
196
+ def get_noshared_neo4j_config(user_id) -> dict[str, Any]:
197
+ """Get Neo4j configuration."""
198
+ return {
199
+ "uri": os.getenv("NEO4J_URI", "bolt://localhost:7687"),
200
+ "user": os.getenv("NEO4J_USER", "neo4j"),
201
+ "db_name": f"memos{user_id.replace('-', '')}",
202
+ "password": os.getenv("NEO4J_PASSWORD", "12345678"),
203
+ "auto_create": True,
204
+ "use_multi_db": True,
205
+ "embedding_dimension": int(os.getenv("EMBEDDING_DIMENSION", 3072)),
206
+ }
207
+
208
+ @staticmethod
209
+ def get_neo4j_shared_config(user_id: str | None = None) -> dict[str, Any]:
210
+ """Get Neo4j configuration."""
211
+ return {
212
+ "uri": os.getenv("NEO4J_URI", "bolt://localhost:7687"),
213
+ "user": os.getenv("NEO4J_USER", "neo4j"),
214
+ "db_name": os.getenv("NEO4J_DB_NAME", "shared-tree-textual-memory"),
215
+ "password": os.getenv("NEO4J_PASSWORD", "12345678"),
216
+ "user_name": f"memos{user_id.replace('-', '')}",
217
+ "auto_create": True,
218
+ "use_multi_db": False,
219
+ "embedding_dimension": int(os.getenv("EMBEDDING_DIMENSION", 3072)),
220
+ }
221
+
222
+ @staticmethod
223
+ def get_nebular_config(user_id: str | None = None) -> dict[str, Any]:
224
+ """Get Nebular configuration."""
225
+ return {
226
+ "uri": json.loads(os.getenv("NEBULAR_HOSTS", '["localhost"]')),
227
+ "user": os.getenv("NEBULAR_USER", "root"),
228
+ "password": os.getenv("NEBULAR_PASSWORD", "xxxxxx"),
229
+ "space": os.getenv("NEBULAR_SPACE", "shared-tree-textual-memory"),
230
+ "user_name": f"memos{user_id.replace('-', '')}",
231
+ "use_multi_db": False,
232
+ "auto_create": True,
233
+ "embedding_dimension": int(os.getenv("EMBEDDING_DIMENSION", 3072)),
234
+ }
235
+
236
+ @staticmethod
237
+ def get_mysql_config() -> dict[str, Any]:
238
+ """Get MySQL configuration."""
239
+ return {
240
+ "host": os.getenv("MYSQL_HOST", "localhost"),
241
+ "port": int(os.getenv("MYSQL_PORT", "3306")),
242
+ "username": os.getenv("MYSQL_USERNAME", "root"),
243
+ "password": os.getenv("MYSQL_PASSWORD", "12345678"),
244
+ "database": os.getenv("MYSQL_DATABASE", "memos_users"),
245
+ "charset": os.getenv("MYSQL_CHARSET", "utf8mb4"),
246
+ }
247
+
248
+ @staticmethod
249
+ def get_scheduler_config() -> dict[str, Any]:
250
+ """Get scheduler configuration."""
251
+ return {
252
+ "backend": "general_scheduler",
253
+ "config": {
254
+ "top_k": int(os.getenv("MOS_SCHEDULER_TOP_K", "10")),
255
+ "top_n": int(os.getenv("MOS_SCHEDULER_TOP_N", "5")),
256
+ "act_mem_update_interval": int(
257
+ os.getenv("MOS_SCHEDULER_ACT_MEM_UPDATE_INTERVAL", "300")
258
+ ),
259
+ "context_window_size": int(os.getenv("MOS_SCHEDULER_CONTEXT_WINDOW_SIZE", "5")),
260
+ "thread_pool_max_workers": int(
261
+ os.getenv("MOS_SCHEDULER_THREAD_POOL_MAX_WORKERS", "10")
262
+ ),
263
+ "consume_interval_seconds": int(
264
+ os.getenv("MOS_SCHEDULER_CONSUME_INTERVAL_SECONDS", "3")
265
+ ),
266
+ "enable_parallel_dispatch": os.getenv(
267
+ "MOS_SCHEDULER_ENABLE_PARALLEL_DISPATCH", "true"
268
+ ).lower()
269
+ == "true",
270
+ "enable_act_memory_update": True,
271
+ },
272
+ }
273
+
274
+ @staticmethod
275
+ def is_scheduler_enabled() -> bool:
276
+ """Check if scheduler is enabled via environment variable."""
277
+ return os.getenv("MOS_ENABLE_SCHEDULER", "false").lower() == "true"
278
+
279
+ @staticmethod
280
+ def is_default_cube_config_enabled() -> bool:
281
+ """Check if default cube config is enabled via environment variable."""
282
+ return os.getenv("MOS_ENABLE_DEFAULT_CUBE_CONFIG", "false").lower() == "true"
283
+
284
+ @staticmethod
285
+ def is_dingding_bot_enabled() -> bool:
286
+ """Check if DingDing bot is enabled via environment variable."""
287
+ return os.getenv("ENABLE_DINGDING_BOT", "false").lower() == "true"
288
+
289
+ @staticmethod
290
+ def get_dingding_bot_config() -> dict[str, Any] | None:
291
+ """Get DingDing bot configuration if enabled."""
292
+ if not APIConfig.is_dingding_bot_enabled():
293
+ return None
294
+
295
+ return {
296
+ "enabled": True,
297
+ "access_token_user": os.getenv("DINGDING_ACCESS_TOKEN_USER", ""),
298
+ "secret_user": os.getenv("DINGDING_SECRET_USER", ""),
299
+ "access_token_error": os.getenv("DINGDING_ACCESS_TOKEN_ERROR", ""),
300
+ "secret_error": os.getenv("DINGDING_SECRET_ERROR", ""),
301
+ "robot_code": os.getenv("DINGDING_ROBOT_CODE", ""),
302
+ "app_key": os.getenv("DINGDING_APP_KEY", ""),
303
+ "app_secret": os.getenv("DINGDING_APP_SECRET", ""),
304
+ "oss_endpoint": os.getenv("OSS_ENDPOINT", ""),
305
+ "oss_region": os.getenv("OSS_REGION", ""),
306
+ "oss_bucket_name": os.getenv("OSS_BUCKET_NAME", ""),
307
+ "oss_access_key_id": os.getenv("OSS_ACCESS_KEY_ID", ""),
308
+ "oss_access_key_secret": os.getenv("OSS_ACCESS_KEY_SECRET", ""),
309
+ "oss_public_base_url": os.getenv("OSS_PUBLIC_BASE_URL", ""),
310
+ }
311
+
312
+ @staticmethod
313
+ def get_product_default_config() -> dict[str, Any]:
314
+ """Get default configuration for Product API."""
315
+ openai_config = APIConfig.get_openai_config()
316
+ qwen_config = APIConfig.qwen_config()
317
+ vllm_config = APIConfig.vllm_config()
318
+ backend_model = {
319
+ "openai": openai_config,
320
+ "huggingface": qwen_config,
321
+ "vllm": vllm_config,
322
+ }
323
+ backend = os.getenv("MOS_CHAT_MODEL_PROVIDER", "openai")
324
+ mysql_config = APIConfig.get_mysql_config()
325
+ config = {
326
+ "user_id": os.getenv("MOS_USER_ID", "root"),
327
+ "chat_model": {"backend": backend, "config": backend_model[backend]},
328
+ "mem_reader": {
329
+ "backend": "simple_struct",
330
+ "config": {
331
+ "llm": {
332
+ "backend": "openai",
333
+ "config": openai_config,
334
+ },
335
+ "embedder": APIConfig.get_embedder_config(),
336
+ "chunker": {
337
+ "backend": "sentence",
338
+ "config": {
339
+ "tokenizer_or_token_counter": "gpt2",
340
+ "chunk_size": 512,
341
+ "chunk_overlap": 128,
342
+ "min_sentences_per_chunk": 1,
343
+ },
344
+ },
345
+ },
346
+ },
347
+ "enable_textual_memory": True,
348
+ "enable_activation_memory": os.getenv("ENABLE_ACTIVATION_MEMORY", "false").lower()
349
+ == "true",
350
+ "top_k": int(os.getenv("MOS_TOP_K", "50")),
351
+ "max_turns_window": int(os.getenv("MOS_MAX_TURNS_WINDOW", "20")),
352
+ }
353
+
354
+ # Add scheduler configuration if enabled
355
+ if APIConfig.is_scheduler_enabled():
356
+ config["mem_scheduler"] = APIConfig.get_scheduler_config()
357
+ config["enable_mem_scheduler"] = True
358
+ else:
359
+ config["enable_mem_scheduler"] = False
360
+
361
+ # Add user manager configuration if enabled
362
+ if os.getenv("MOS_USER_MANAGER_BACKEND", "sqlite").lower() == "mysql":
363
+ config["user_manager"] = {
364
+ "backend": "mysql",
365
+ "config": mysql_config,
366
+ }
367
+
368
+ return config
369
+
370
+ @staticmethod
371
+ def get_start_default_config() -> dict[str, Any]:
372
+ """Get default configuration for Start API."""
373
+ config = {
374
+ "user_id": os.getenv("MOS_USER_ID", "default_user"),
375
+ "session_id": os.getenv("MOS_SESSION_ID", "default_session"),
376
+ "enable_textual_memory": True,
377
+ "enable_activation_memory": os.getenv("ENABLE_ACTIVATION_MEMORY", "false").lower()
378
+ == "true",
379
+ "top_k": int(os.getenv("MOS_TOP_K", "5")),
380
+ "chat_model": {
381
+ "backend": os.getenv("MOS_CHAT_MODEL_PROVIDER", "openai"),
382
+ "config": {
383
+ "model_name_or_path": os.getenv("MOS_CHAT_MODEL", "gpt-4o-mini"),
384
+ "api_key": os.getenv("OPENAI_API_KEY", "sk-xxxxxx"),
385
+ "temperature": float(os.getenv("MOS_CHAT_TEMPERATURE", 0.7)),
386
+ "api_base": os.getenv("OPENAI_API_BASE", "http://xxxxxx:3000/v1"),
387
+ "max_tokens": int(os.getenv("MOS_MAX_TOKENS", 1024)),
388
+ "top_p": float(os.getenv("MOS_TOP_P", 0.9)),
389
+ "top_k": int(os.getenv("MOS_TOP_K", 50)),
390
+ "remove_think_prefix": True,
391
+ },
392
+ },
393
+ }
394
+
395
+ # Add scheduler configuration if enabled
396
+ if APIConfig.is_scheduler_enabled():
397
+ config["mem_scheduler"] = APIConfig.get_scheduler_config()
398
+ config["enable_mem_scheduler"] = True
399
+ else:
400
+ config["enable_mem_scheduler"] = False
401
+
402
+ return config
403
+
404
+ @staticmethod
405
+ def create_user_config(user_name: str, user_id: str) -> tuple[MOSConfig, GeneralMemCube]:
406
+ """Create configuration for a specific user."""
407
+ openai_config = APIConfig.get_openai_config()
408
+ qwen_config = APIConfig.qwen_config()
409
+ vllm_config = APIConfig.vllm_config()
410
+ mysql_config = APIConfig.get_mysql_config()
411
+ backend = os.getenv("MOS_CHAT_MODEL_PROVIDER", "openai")
412
+ backend_model = {
413
+ "openai": openai_config,
414
+ "huggingface": qwen_config,
415
+ "vllm": vllm_config,
416
+ }
417
+ # Create MOSConfig
418
+ config_dict = {
419
+ "user_id": user_id,
420
+ "chat_model": {
421
+ "backend": backend,
422
+ "config": backend_model[backend],
423
+ },
424
+ "mem_reader": {
425
+ "backend": "simple_struct",
426
+ "config": {
427
+ "llm": {
428
+ "backend": "openai",
429
+ "config": openai_config,
430
+ },
431
+ "embedder": APIConfig.get_embedder_config(),
432
+ "chunker": {
433
+ "backend": "sentence",
434
+ "config": {
435
+ "tokenizer_or_token_counter": "gpt2",
436
+ "chunk_size": 512,
437
+ "chunk_overlap": 128,
438
+ "min_sentences_per_chunk": 1,
439
+ },
440
+ },
441
+ },
442
+ },
443
+ "enable_textual_memory": True,
444
+ "enable_activation_memory": os.getenv("ENABLE_ACTIVATION_MEMORY", "false").lower()
445
+ == "true",
446
+ "top_k": 30,
447
+ "max_turns_window": 20,
448
+ }
449
+ # Add scheduler configuration if enabled
450
+ if APIConfig.is_scheduler_enabled():
451
+ config_dict["mem_scheduler"] = APIConfig.get_scheduler_config()
452
+ config_dict["enable_mem_scheduler"] = True
453
+ else:
454
+ config_dict["enable_mem_scheduler"] = False
455
+
456
+ # Add user manager configuration if enabled
457
+ if os.getenv("MOS_USER_MANAGER_BACKEND", "sqlite").lower() == "mysql":
458
+ config_dict["user_manager"] = {
459
+ "backend": "mysql",
460
+ "config": mysql_config,
461
+ }
462
+
463
+ default_config = MOSConfig(**config_dict)
464
+
465
+ neo4j_community_config = APIConfig.get_neo4j_community_config(user_id)
466
+ neo4j_config = APIConfig.get_neo4j_config(user_id)
467
+ nebular_config = APIConfig.get_nebular_config(user_id)
468
+ internet_config = (
469
+ APIConfig.get_internet_config()
470
+ if os.getenv("ENABLE_INTERNET", "false").lower() == "true"
471
+ else None
472
+ )
473
+ graph_db_backend_map = {
474
+ "neo4j-community": neo4j_community_config,
475
+ "neo4j": neo4j_config,
476
+ "nebular": nebular_config,
477
+ }
478
+ graph_db_backend = os.getenv("NEO4J_BACKEND", "neo4j-community").lower()
479
+ if graph_db_backend in graph_db_backend_map:
480
+ # Create MemCube config
481
+
482
+ default_cube_config = GeneralMemCubeConfig.model_validate(
483
+ {
484
+ "user_id": user_id,
485
+ "cube_id": f"{user_name}_default_cube",
486
+ "text_mem": {
487
+ "backend": "tree_text",
488
+ "config": {
489
+ "extractor_llm": {"backend": "openai", "config": openai_config},
490
+ "dispatcher_llm": {"backend": "openai", "config": openai_config},
491
+ "graph_db": {
492
+ "backend": graph_db_backend,
493
+ "config": graph_db_backend_map[graph_db_backend],
494
+ },
495
+ "embedder": APIConfig.get_embedder_config(),
496
+ "internet_retriever": internet_config,
497
+ },
498
+ },
499
+ "act_mem": {}
500
+ if os.getenv("ENABLE_ACTIVATION_MEMORY", "false").lower() == "false"
501
+ else APIConfig.get_activation_vllm_config(),
502
+ "para_mem": {},
503
+ }
504
+ )
505
+ else:
506
+ raise ValueError(f"Invalid Neo4j backend: {graph_db_backend}")
507
+ default_mem_cube = GeneralMemCube(default_cube_config)
508
+ return default_config, default_mem_cube
509
+
510
+ @staticmethod
511
+ def get_default_cube_config() -> GeneralMemCubeConfig | None:
512
+ """Get default cube configuration for product initialization.
513
+
514
+ Returns:
515
+ GeneralMemCubeConfig | None: Default cube configuration if enabled, None otherwise.
516
+ """
517
+ if not APIConfig.is_default_cube_config_enabled():
518
+ return None
519
+
520
+ openai_config = APIConfig.get_openai_config()
521
+ neo4j_community_config = APIConfig.get_neo4j_community_config(user_id="default")
522
+ neo4j_config = APIConfig.get_neo4j_config(user_id="default")
523
+ nebular_config = APIConfig.get_nebular_config(user_id="default")
524
+ graph_db_backend_map = {
525
+ "neo4j-community": neo4j_community_config,
526
+ "neo4j": neo4j_config,
527
+ "nebular": nebular_config,
528
+ }
529
+ internet_config = (
530
+ APIConfig.get_internet_config()
531
+ if os.getenv("ENABLE_INTERNET", "false").lower() == "true"
532
+ else None
533
+ )
534
+ graph_db_backend = os.getenv("NEO4J_BACKEND", "neo4j-community").lower()
535
+ if graph_db_backend in graph_db_backend_map:
536
+ return GeneralMemCubeConfig.model_validate(
537
+ {
538
+ "user_id": "default",
539
+ "cube_id": "default_cube",
540
+ "text_mem": {
541
+ "backend": "tree_text",
542
+ "config": {
543
+ "extractor_llm": {"backend": "openai", "config": openai_config},
544
+ "dispatcher_llm": {"backend": "openai", "config": openai_config},
545
+ "graph_db": {
546
+ "backend": graph_db_backend,
547
+ "config": graph_db_backend_map[graph_db_backend],
548
+ },
549
+ "embedder": APIConfig.get_embedder_config(),
550
+ "reorganize": os.getenv("MOS_ENABLE_REORGANIZE", "false").lower()
551
+ == "true",
552
+ "internet_retriever": internet_config,
553
+ },
554
+ },
555
+ "act_mem": {}
556
+ if os.getenv("ENABLE_ACTIVATION_MEMORY", "false").lower() == "false"
557
+ else APIConfig.get_activation_vllm_config(),
558
+ "para_mem": {},
559
+ }
560
+ )
561
+ else:
562
+ raise ValueError(f"Invalid Neo4j backend: {graph_db_backend}")