MemoryOS 0.1.13__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MemoryOS might be problematic. Click here for more details.

Files changed (84) hide show
  1. {memoryos-0.1.13.dist-info → memoryos-0.2.1.dist-info}/METADATA +78 -49
  2. memoryos-0.2.1.dist-info/RECORD +152 -0
  3. memoryos-0.2.1.dist-info/entry_points.txt +3 -0
  4. memos/__init__.py +1 -1
  5. memos/api/config.py +471 -0
  6. memos/api/exceptions.py +28 -0
  7. memos/api/mcp_serve.py +502 -0
  8. memos/api/product_api.py +35 -0
  9. memos/api/product_models.py +159 -0
  10. memos/api/routers/__init__.py +1 -0
  11. memos/api/routers/product_router.py +358 -0
  12. memos/chunkers/sentence_chunker.py +8 -2
  13. memos/cli.py +113 -0
  14. memos/configs/embedder.py +27 -0
  15. memos/configs/graph_db.py +83 -2
  16. memos/configs/llm.py +48 -0
  17. memos/configs/mem_cube.py +1 -1
  18. memos/configs/mem_reader.py +4 -0
  19. memos/configs/mem_scheduler.py +91 -5
  20. memos/configs/memory.py +10 -4
  21. memos/dependency.py +52 -0
  22. memos/embedders/ark.py +92 -0
  23. memos/embedders/factory.py +4 -0
  24. memos/embedders/sentence_transformer.py +8 -2
  25. memos/embedders/universal_api.py +32 -0
  26. memos/graph_dbs/base.py +2 -2
  27. memos/graph_dbs/factory.py +2 -0
  28. memos/graph_dbs/item.py +46 -0
  29. memos/graph_dbs/neo4j.py +377 -101
  30. memos/graph_dbs/neo4j_community.py +300 -0
  31. memos/llms/base.py +9 -0
  32. memos/llms/deepseek.py +54 -0
  33. memos/llms/factory.py +10 -1
  34. memos/llms/hf.py +170 -13
  35. memos/llms/hf_singleton.py +114 -0
  36. memos/llms/ollama.py +4 -0
  37. memos/llms/openai.py +68 -1
  38. memos/llms/qwen.py +63 -0
  39. memos/llms/vllm.py +153 -0
  40. memos/mem_cube/general.py +77 -16
  41. memos/mem_cube/utils.py +102 -0
  42. memos/mem_os/core.py +131 -41
  43. memos/mem_os/main.py +93 -11
  44. memos/mem_os/product.py +1098 -35
  45. memos/mem_os/utils/default_config.py +352 -0
  46. memos/mem_os/utils/format_utils.py +1154 -0
  47. memos/mem_reader/simple_struct.py +13 -8
  48. memos/mem_scheduler/base_scheduler.py +467 -36
  49. memos/mem_scheduler/general_scheduler.py +125 -244
  50. memos/mem_scheduler/modules/base.py +9 -0
  51. memos/mem_scheduler/modules/dispatcher.py +68 -2
  52. memos/mem_scheduler/modules/misc.py +39 -0
  53. memos/mem_scheduler/modules/monitor.py +228 -49
  54. memos/mem_scheduler/modules/rabbitmq_service.py +317 -0
  55. memos/mem_scheduler/modules/redis_service.py +32 -22
  56. memos/mem_scheduler/modules/retriever.py +250 -23
  57. memos/mem_scheduler/modules/schemas.py +189 -7
  58. memos/mem_scheduler/mos_for_test_scheduler.py +143 -0
  59. memos/mem_scheduler/utils.py +51 -2
  60. memos/mem_user/persistent_user_manager.py +260 -0
  61. memos/memories/activation/item.py +25 -0
  62. memos/memories/activation/kv.py +10 -3
  63. memos/memories/activation/vllmkv.py +219 -0
  64. memos/memories/factory.py +2 -0
  65. memos/memories/textual/general.py +7 -5
  66. memos/memories/textual/item.py +3 -1
  67. memos/memories/textual/tree.py +14 -6
  68. memos/memories/textual/tree_text_memory/organize/conflict.py +198 -0
  69. memos/memories/textual/tree_text_memory/organize/manager.py +72 -23
  70. memos/memories/textual/tree_text_memory/organize/redundancy.py +193 -0
  71. memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +233 -0
  72. memos/memories/textual/tree_text_memory/organize/reorganizer.py +606 -0
  73. memos/memories/textual/tree_text_memory/retrieve/recall.py +0 -1
  74. memos/memories/textual/tree_text_memory/retrieve/reranker.py +2 -2
  75. memos/memories/textual/tree_text_memory/retrieve/searcher.py +6 -5
  76. memos/parsers/markitdown.py +8 -2
  77. memos/templates/mem_reader_prompts.py +105 -36
  78. memos/templates/mem_scheduler_prompts.py +96 -47
  79. memos/templates/tree_reorganize_prompts.py +223 -0
  80. memos/vec_dbs/base.py +12 -0
  81. memos/vec_dbs/qdrant.py +46 -20
  82. memoryos-0.1.13.dist-info/RECORD +0 -122
  83. {memoryos-0.1.13.dist-info → memoryos-0.2.1.dist-info}/LICENSE +0 -0
  84. {memoryos-0.1.13.dist-info → memoryos-0.2.1.dist-info}/WHEEL +0 -0
memos/api/config.py ADDED
@@ -0,0 +1,471 @@
1
+ import os
2
+
3
+ from typing import Any
4
+
5
+ from dotenv import load_dotenv
6
+
7
+ from memos.configs.mem_cube import GeneralMemCubeConfig
8
+ from memos.configs.mem_os import MOSConfig
9
+ from memos.mem_cube.general import GeneralMemCube
10
+
11
+
12
+ # Load environment variables
13
+ load_dotenv()
14
+
15
+
16
+ class APIConfig:
17
+ """Centralized configuration management for MemOS APIs."""
18
+
19
+ @staticmethod
20
+ def get_openai_config() -> dict[str, Any]:
21
+ """Get OpenAI configuration."""
22
+ return {
23
+ "model_name_or_path": os.getenv("MOS_OPENAI_MODEL", "gpt-4o-mini"),
24
+ "temperature": float(os.getenv("MOS_CHAT_TEMPERATURE", "0.8")),
25
+ "max_tokens": int(os.getenv("MOS_MAX_TOKENS", "1024")),
26
+ "top_p": float(os.getenv("MOS_TOP_P", "0.9")),
27
+ "top_k": int(os.getenv("MOS_TOP_K", "50")),
28
+ "remove_think_prefix": True,
29
+ "api_key": os.getenv("OPENAI_API_KEY", "your-api-key-here"),
30
+ "api_base": os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1"),
31
+ }
32
+
33
+ @staticmethod
34
+ def qwen_config() -> dict[str, Any]:
35
+ """Get Qwen configuration."""
36
+ return {
37
+ "model_name_or_path": os.getenv("MOS_CHAT_MODEL", "Qwen/Qwen3-1.7B"),
38
+ "temperature": float(os.getenv("MOS_CHAT_TEMPERATURE", "0.8")),
39
+ "max_tokens": int(os.getenv("MOS_MAX_TOKENS", "4096")),
40
+ "remove_think_prefix": True,
41
+ }
42
+
43
+ @staticmethod
44
+ def vllm_config() -> dict[str, Any]:
45
+ """Get Qwen configuration."""
46
+ return {
47
+ "model_name_or_path": os.getenv("MOS_CHAT_MODEL", "Qwen/Qwen3-1.7B"),
48
+ "temperature": float(os.getenv("MOS_CHAT_TEMPERATURE", "0.8")),
49
+ "max_tokens": int(os.getenv("MOS_MAX_TOKENS", "4096")),
50
+ "remove_think_prefix": True,
51
+ "api_key": os.getenv("VLLM_API_KEY", ""),
52
+ "api_base": os.getenv("VLLM_API_BASE", "http://localhost:8088/v1"),
53
+ "model_schema": os.getenv("MOS_MODEL_SCHEMA", "memos.configs.llm.VLLMLLMConfig"),
54
+ }
55
+
56
+ @staticmethod
57
+ def get_activation_config() -> dict[str, Any]:
58
+ """Get Ollama configuration."""
59
+ return {
60
+ "backend": "kv_cache",
61
+ "config": {
62
+ "memory_filename": "activation_memory.pickle",
63
+ "extractor_llm": {
64
+ "backend": "huggingface_singleton",
65
+ "config": {
66
+ "model_name_or_path": os.getenv("MOS_CHAT_MODEL", "Qwen/Qwen3-1.7B"),
67
+ "temperature": 0.8,
68
+ "max_tokens": 1024,
69
+ "top_p": 0.9,
70
+ "top_k": 50,
71
+ "add_generation_prompt": True,
72
+ "remove_think_prefix": False,
73
+ },
74
+ },
75
+ },
76
+ }
77
+
78
+ @staticmethod
79
+ def get_activation_vllm_config() -> dict[str, Any]:
80
+ """Get Ollama configuration."""
81
+ return {
82
+ "backend": "vllm_kv_cache",
83
+ "config": {
84
+ "memory_filename": "activation_memory.pickle",
85
+ "extractor_llm": {
86
+ "backend": "vllm",
87
+ "config": APIConfig.vllm_config(),
88
+ },
89
+ },
90
+ }
91
+
92
+ @staticmethod
93
+ def get_embedder_config() -> dict[str, Any]:
94
+ """Get embedder configuration."""
95
+ embedder_backend = os.getenv("MOS_EMBEDDER_BACKEND", "ollama")
96
+
97
+ if embedder_backend == "universal_api":
98
+ return {
99
+ "backend": "universal_api",
100
+ "config": {
101
+ "provider": os.getenv("MOS_EMBEDDER_PROVIDER", "openai"),
102
+ "api_key": os.getenv("OPENAI_API_KEY", "sk-xxxx"),
103
+ "model_name_or_path": os.getenv("MOS_EMBEDDER_MODEL", "text-embedding-3-large"),
104
+ "base_url": os.getenv("OPENAI_API_BASE", "http://openai.com"),
105
+ },
106
+ }
107
+ else: # ollama
108
+ return {
109
+ "backend": "ollama",
110
+ "config": {
111
+ "model_name_or_path": os.getenv(
112
+ "MOS_EMBEDDER_MODEL", "nomic-embed-text:latest"
113
+ ),
114
+ "api_base": os.getenv("OLLAMA_API_BASE", "http://localhost:11434"),
115
+ },
116
+ }
117
+
118
+ @staticmethod
119
+ def get_neo4j_community_config(user_id: str | None = None) -> dict[str, Any]:
120
+ """Get Neo4j community configuration."""
121
+ return {
122
+ "uri": os.getenv("NEO4J_URI", "bolt://localhost:7687"),
123
+ "user": os.getenv("NEO4J_USER", "neo4j"),
124
+ "db_name": os.getenv("NEO4J_DB_NAME", "shared-tree-textual-memory"),
125
+ "password": os.getenv("NEO4J_PASSWORD", "12345678"),
126
+ "user_name": f"memos{user_id.replace('-', '')}",
127
+ "auto_create": True,
128
+ "use_multi_db": False,
129
+ "embedding_dimension": 3072,
130
+ "vec_config": {
131
+ # Pass nested config to initialize external vector DB
132
+ # If you use qdrant, please use Server instead of local mode.
133
+ "backend": "qdrant",
134
+ "config": {
135
+ "collection_name": "neo4j_vec_db",
136
+ "vector_dimension": 3072,
137
+ "distance_metric": "cosine",
138
+ "host": "localhost",
139
+ "port": 6333,
140
+ },
141
+ },
142
+ }
143
+
144
+ @staticmethod
145
+ def get_neo4j_config(user_id: str | None = None) -> dict[str, Any]:
146
+ """Get Neo4j configuration."""
147
+ if os.getenv("MOS_NEO4J_SHARED_DB", "false").lower() == "true":
148
+ return APIConfig.get_neo4j_shared_config(user_id)
149
+ else:
150
+ return APIConfig.get_noshared_neo4j_config(user_id)
151
+
152
+ @staticmethod
153
+ def get_noshared_neo4j_config(user_id) -> dict[str, Any]:
154
+ """Get Neo4j configuration."""
155
+ return {
156
+ "uri": os.getenv("NEO4J_URI", "bolt://localhost:7687"),
157
+ "user": os.getenv("NEO4J_USER", "neo4j"),
158
+ "db_name": f"memos{user_id.replace('-', '')}",
159
+ "password": os.getenv("NEO4J_PASSWORD", "12345678"),
160
+ "auto_create": True,
161
+ "use_multi_db": True,
162
+ "embedding_dimension": 3072,
163
+ }
164
+
165
+ @staticmethod
166
+ def get_neo4j_shared_config(user_id: str | None = None) -> dict[str, Any]:
167
+ """Get Neo4j configuration."""
168
+ return {
169
+ "uri": os.getenv("NEO4J_URI", "bolt://localhost:7687"),
170
+ "user": os.getenv("NEO4J_USER", "neo4j"),
171
+ "db_name": os.getenv("NEO4J_DB_NAME", "shared-tree-textual-memory"),
172
+ "password": os.getenv("NEO4J_PASSWORD", "12345678"),
173
+ "user_name": f"memos{user_id.replace('-', '')}",
174
+ "auto_create": True,
175
+ "use_multi_db": False,
176
+ "embedding_dimension": 3072,
177
+ }
178
+
179
+ @staticmethod
180
+ def get_scheduler_config() -> dict[str, Any]:
181
+ """Get scheduler configuration."""
182
+ return {
183
+ "backend": "general_scheduler",
184
+ "config": {
185
+ "top_k": int(os.getenv("MOS_SCHEDULER_TOP_K", "10")),
186
+ "top_n": int(os.getenv("MOS_SCHEDULER_TOP_N", "5")),
187
+ "act_mem_update_interval": int(
188
+ os.getenv("MOS_SCHEDULER_ACT_MEM_UPDATE_INTERVAL", "300")
189
+ ),
190
+ "context_window_size": int(os.getenv("MOS_SCHEDULER_CONTEXT_WINDOW_SIZE", "5")),
191
+ "thread_pool_max_workers": int(
192
+ os.getenv("MOS_SCHEDULER_THREAD_POOL_MAX_WORKERS", "10")
193
+ ),
194
+ "consume_interval_seconds": int(
195
+ os.getenv("MOS_SCHEDULER_CONSUME_INTERVAL_SECONDS", "3")
196
+ ),
197
+ "enable_parallel_dispatch": os.getenv(
198
+ "MOS_SCHEDULER_ENABLE_PARALLEL_DISPATCH", "true"
199
+ ).lower()
200
+ == "true",
201
+ "enable_act_memory_update": True,
202
+ },
203
+ }
204
+
205
+ @staticmethod
206
+ def is_scheduler_enabled() -> bool:
207
+ """Check if scheduler is enabled via environment variable."""
208
+ return os.getenv("MOS_ENABLE_SCHEDULER", "false").lower() == "true"
209
+
210
+ @staticmethod
211
+ def is_default_cube_config_enabled() -> bool:
212
+ """Check if default cube config is enabled via environment variable."""
213
+ return os.getenv("MOS_ENABLE_DEFAULT_CUBE_CONFIG", "false").lower() == "true"
214
+
215
+ @staticmethod
216
+ def get_product_default_config() -> dict[str, Any]:
217
+ """Get default configuration for Product API."""
218
+ openai_config = APIConfig.get_openai_config()
219
+ qwen_config = APIConfig.qwen_config()
220
+ vllm_config = APIConfig.vllm_config()
221
+ backend_model = {
222
+ "openai": openai_config,
223
+ "huggingface": qwen_config,
224
+ "vllm": vllm_config,
225
+ }
226
+ backend = os.getenv("MOS_CHAT_MODEL_PROVIDER", "openai")
227
+ config = {
228
+ "user_id": os.getenv("MOS_USER_ID", "root"),
229
+ "chat_model": {"backend": backend, "config": backend_model[backend]},
230
+ "mem_reader": {
231
+ "backend": "simple_struct",
232
+ "config": {
233
+ "llm": {
234
+ "backend": "openai",
235
+ "config": openai_config,
236
+ },
237
+ "embedder": APIConfig.get_embedder_config(),
238
+ "chunker": {
239
+ "backend": "sentence",
240
+ "config": {
241
+ "tokenizer_or_token_counter": "gpt2",
242
+ "chunk_size": 512,
243
+ "chunk_overlap": 128,
244
+ "min_sentences_per_chunk": 1,
245
+ },
246
+ },
247
+ },
248
+ },
249
+ "enable_textual_memory": True,
250
+ "enable_activation_memory": os.getenv("ENABLE_ACTIVATION_MEMORY", "false").lower()
251
+ == "true",
252
+ "top_k": int(os.getenv("MOS_TOP_K", "50")),
253
+ "max_turns_window": int(os.getenv("MOS_MAX_TURNS_WINDOW", "20")),
254
+ }
255
+
256
+ # Add scheduler configuration if enabled
257
+ if APIConfig.is_scheduler_enabled():
258
+ config["mem_scheduler"] = APIConfig.get_scheduler_config()
259
+ config["enable_mem_scheduler"] = True
260
+ else:
261
+ config["enable_mem_scheduler"] = False
262
+
263
+ return config
264
+
265
+ @staticmethod
266
+ def get_start_default_config() -> dict[str, Any]:
267
+ """Get default configuration for Start API."""
268
+ config = {
269
+ "user_id": os.getenv("MOS_USER_ID", "default_user"),
270
+ "session_id": os.getenv("MOS_SESSION_ID", "default_session"),
271
+ "enable_textual_memory": True,
272
+ "enable_activation_memory": os.getenv("ENABLE_ACTIVATION_MEMORY", "false").lower()
273
+ == "true",
274
+ "top_k": int(os.getenv("MOS_TOP_K", "5")),
275
+ "chat_model": {
276
+ "backend": os.getenv("MOS_CHAT_MODEL_PROVIDER", "openai"),
277
+ "config": {
278
+ "model_name_or_path": os.getenv("MOS_CHAT_MODEL", "gpt-4o-mini"),
279
+ "api_key": os.getenv("OPENAI_API_KEY", "sk-xxxxxx"),
280
+ "temperature": float(os.getenv("MOS_CHAT_TEMPERATURE", 0.7)),
281
+ "api_base": os.getenv("OPENAI_API_BASE", "http://xxxxxx:3000/v1"),
282
+ "max_tokens": int(os.getenv("MOS_MAX_TOKENS", 1024)),
283
+ "top_p": float(os.getenv("MOS_TOP_P", 0.9)),
284
+ "top_k": int(os.getenv("MOS_TOP_K", 50)),
285
+ "remove_think_prefix": True,
286
+ },
287
+ },
288
+ }
289
+
290
+ # Add scheduler configuration if enabled
291
+ if APIConfig.is_scheduler_enabled():
292
+ config["mem_scheduler"] = APIConfig.get_scheduler_config()
293
+ config["enable_mem_scheduler"] = True
294
+ else:
295
+ config["enable_mem_scheduler"] = False
296
+
297
+ return config
298
+
299
+ @staticmethod
300
+ def create_user_config(user_name: str, user_id: str) -> tuple[MOSConfig, GeneralMemCube]:
301
+ """Create configuration for a specific user."""
302
+ openai_config = APIConfig.get_openai_config()
303
+
304
+ qwen_config = APIConfig.qwen_config()
305
+ vllm_config = APIConfig.vllm_config()
306
+ backend = os.getenv("MOS_CHAT_MODEL_PROVIDER", "openai")
307
+ backend_model = {
308
+ "openai": openai_config,
309
+ "huggingface": qwen_config,
310
+ "vllm": vllm_config,
311
+ }
312
+ # Create MOSConfig
313
+ config_dict = {
314
+ "user_id": user_id,
315
+ "chat_model": {
316
+ "backend": backend,
317
+ "config": backend_model[backend],
318
+ },
319
+ "mem_reader": {
320
+ "backend": "simple_struct",
321
+ "config": {
322
+ "llm": {
323
+ "backend": "openai",
324
+ "config": openai_config,
325
+ },
326
+ "embedder": APIConfig.get_embedder_config(),
327
+ "chunker": {
328
+ "backend": "sentence",
329
+ "config": {
330
+ "tokenizer_or_token_counter": "gpt2",
331
+ "chunk_size": 512,
332
+ "chunk_overlap": 128,
333
+ "min_sentences_per_chunk": 1,
334
+ },
335
+ },
336
+ },
337
+ },
338
+ "enable_textual_memory": True,
339
+ "enable_activation_memory": os.getenv("ENABLE_ACTIVATION_MEMORY", "false").lower()
340
+ == "true",
341
+ "top_k": 30,
342
+ "max_turns_window": 20,
343
+ }
344
+
345
+ # Add scheduler configuration if enabled
346
+ if APIConfig.is_scheduler_enabled():
347
+ config_dict["mem_scheduler"] = APIConfig.get_scheduler_config()
348
+ config_dict["enable_mem_scheduler"] = True
349
+ else:
350
+ config_dict["enable_mem_scheduler"] = False
351
+
352
+ default_config = MOSConfig(**config_dict)
353
+
354
+ if os.getenv("NEO4J_BACKEND", "neo4j_community").lower() == "neo4j_community":
355
+ neo4j_community_config = APIConfig.get_neo4j_community_config(user_id)
356
+ # Create MemCube config
357
+ default_cube_config = GeneralMemCubeConfig.model_validate(
358
+ {
359
+ "user_id": user_id,
360
+ "cube_id": f"{user_name}_default_cube",
361
+ "text_mem": {
362
+ "backend": "tree_text",
363
+ "config": {
364
+ "extractor_llm": {"backend": "openai", "config": openai_config},
365
+ "dispatcher_llm": {"backend": "openai", "config": openai_config},
366
+ "graph_db": {
367
+ "backend": "neo4j-community",
368
+ "config": neo4j_community_config,
369
+ },
370
+ "embedder": APIConfig.get_embedder_config(),
371
+ },
372
+ },
373
+ "act_mem": {}
374
+ if os.getenv("ENABLE_ACTIVATION_MEMORY", "false").lower() == "false"
375
+ else APIConfig.get_activation_vllm_config(),
376
+ "para_mem": {},
377
+ }
378
+ )
379
+ else:
380
+ neo4j_config = APIConfig.get_neo4j_config(user_id)
381
+ # Create MemCube config
382
+ default_cube_config = GeneralMemCubeConfig.model_validate(
383
+ {
384
+ "user_id": user_id,
385
+ "cube_id": f"{user_name}_default_cube",
386
+ "text_mem": {
387
+ "backend": "tree_text",
388
+ "config": {
389
+ "extractor_llm": {"backend": "openai", "config": openai_config},
390
+ "dispatcher_llm": {"backend": "openai", "config": openai_config},
391
+ "graph_db": {
392
+ "backend": "neo4j",
393
+ "config": neo4j_config,
394
+ },
395
+ "embedder": APIConfig.get_embedder_config(),
396
+ },
397
+ },
398
+ "act_mem": {}
399
+ if os.getenv("ENABLE_ACTIVATION_MEMORY", "false").lower() == "false"
400
+ else APIConfig.get_activation_vllm_config(),
401
+ "para_mem": {},
402
+ }
403
+ )
404
+
405
+ default_mem_cube = GeneralMemCube(default_cube_config)
406
+ return default_config, default_mem_cube
407
+
408
+ @staticmethod
409
+ def get_default_cube_config() -> GeneralMemCubeConfig | None:
410
+ """Get default cube configuration for product initialization.
411
+
412
+ Returns:
413
+ GeneralMemCubeConfig | None: Default cube configuration if enabled, None otherwise.
414
+ """
415
+ if not APIConfig.is_default_cube_config_enabled():
416
+ return None
417
+
418
+ openai_config = APIConfig.get_openai_config()
419
+
420
+ if os.getenv("NEO4J_BACKEND", "neo4j_community").lower() == "neo4j_community":
421
+ neo4j_community_config = APIConfig.get_neo4j_community_config(user_id="default")
422
+ return GeneralMemCubeConfig.model_validate(
423
+ {
424
+ "user_id": "default",
425
+ "cube_id": "default_cube",
426
+ "text_mem": {
427
+ "backend": "tree_text",
428
+ "config": {
429
+ "extractor_llm": {"backend": "openai", "config": openai_config},
430
+ "dispatcher_llm": {"backend": "openai", "config": openai_config},
431
+ "graph_db": {
432
+ "backend": "neo4j-community",
433
+ "config": neo4j_community_config,
434
+ },
435
+ "embedder": APIConfig.get_embedder_config(),
436
+ "reorganize": os.getenv("MOS_ENABLE_REORGANIZE", "false").lower()
437
+ == "true",
438
+ },
439
+ },
440
+ "act_mem": {}
441
+ if os.getenv("ENABLE_ACTIVATION_MEMORY", "false").lower() == "false"
442
+ else APIConfig.get_activation_vllm_config(),
443
+ "para_mem": {},
444
+ }
445
+ )
446
+ else:
447
+ neo4j_config = APIConfig.get_neo4j_config(user_id="default")
448
+ return GeneralMemCubeConfig.model_validate(
449
+ {
450
+ "user_id": "default",
451
+ "cube_id": "default_cube",
452
+ "text_mem": {
453
+ "backend": "tree_text",
454
+ "config": {
455
+ "extractor_llm": {"backend": "openai", "config": openai_config},
456
+ "dispatcher_llm": {"backend": "openai", "config": openai_config},
457
+ "graph_db": {
458
+ "backend": "neo4j",
459
+ "config": neo4j_config,
460
+ },
461
+ "embedder": APIConfig.get_embedder_config(),
462
+ "reorganize": os.getenv("MOS_ENABLE_REORGANIZE", "false").lower()
463
+ == "true",
464
+ },
465
+ },
466
+ "act_mem": {}
467
+ if os.getenv("ENABLE_ACTIVATION_MEMORY", "false").lower() == "false"
468
+ else APIConfig.get_activation_vllm_config(),
469
+ "para_mem": {},
470
+ }
471
+ )
@@ -0,0 +1,28 @@
1
+ import logging
2
+
3
+ from fastapi.requests import Request
4
+ from fastapi.responses import JSONResponse
5
+
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ class APIExceptionHandler:
11
+ """Centralized exception handling for MemOS APIs."""
12
+
13
+ @staticmethod
14
+ async def value_error_handler(request: Request, exc: ValueError):
15
+ """Handle ValueError exceptions globally."""
16
+ return JSONResponse(
17
+ status_code=400,
18
+ content={"code": 400, "message": str(exc), "data": None},
19
+ )
20
+
21
+ @staticmethod
22
+ async def global_exception_handler(request: Request, exc: Exception):
23
+ """Handle all unhandled exceptions globally."""
24
+ logger.exception("Unhandled error:")
25
+ return JSONResponse(
26
+ status_code=500,
27
+ content={"code": 500, "message": str(exc), "data": None},
28
+ )