MemoryOS 0.2.2__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of MemoryOS might be problematic. Click here for more details.
- {memoryos-0.2.2.dist-info → memoryos-1.0.1.dist-info}/METADATA +7 -1
- {memoryos-0.2.2.dist-info → memoryos-1.0.1.dist-info}/RECORD +81 -66
- memos/__init__.py +1 -1
- memos/api/config.py +31 -8
- memos/api/context/context.py +1 -1
- memos/api/context/context_thread.py +96 -0
- memos/api/middleware/request_context.py +94 -0
- memos/api/product_api.py +5 -1
- memos/api/product_models.py +16 -0
- memos/api/routers/product_router.py +39 -3
- memos/api/start_api.py +3 -0
- memos/configs/internet_retriever.py +13 -0
- memos/configs/mem_scheduler.py +38 -16
- memos/configs/memory.py +13 -0
- memos/configs/reranker.py +18 -0
- memos/graph_dbs/base.py +33 -4
- memos/graph_dbs/nebular.py +631 -236
- memos/graph_dbs/neo4j.py +18 -7
- memos/graph_dbs/neo4j_community.py +6 -3
- memos/llms/vllm.py +2 -0
- memos/log.py +125 -8
- memos/mem_os/core.py +49 -11
- memos/mem_os/main.py +1 -1
- memos/mem_os/product.py +392 -215
- memos/mem_os/utils/default_config.py +1 -1
- memos/mem_os/utils/format_utils.py +11 -47
- memos/mem_os/utils/reference_utils.py +153 -0
- memos/mem_reader/simple_struct.py +112 -43
- memos/mem_scheduler/base_scheduler.py +58 -55
- memos/mem_scheduler/{modules → general_modules}/base.py +1 -2
- memos/mem_scheduler/{modules → general_modules}/dispatcher.py +54 -15
- memos/mem_scheduler/{modules → general_modules}/rabbitmq_service.py +4 -4
- memos/mem_scheduler/{modules → general_modules}/redis_service.py +1 -1
- memos/mem_scheduler/{modules → general_modules}/retriever.py +19 -5
- memos/mem_scheduler/{modules → general_modules}/scheduler_logger.py +10 -4
- memos/mem_scheduler/general_scheduler.py +110 -67
- memos/mem_scheduler/monitors/__init__.py +0 -0
- memos/mem_scheduler/monitors/dispatcher_monitor.py +305 -0
- memos/mem_scheduler/{modules/monitor.py → monitors/general_monitor.py} +57 -19
- memos/mem_scheduler/mos_for_test_scheduler.py +7 -1
- memos/mem_scheduler/schemas/general_schemas.py +3 -2
- memos/mem_scheduler/schemas/message_schemas.py +2 -1
- memos/mem_scheduler/schemas/monitor_schemas.py +10 -2
- memos/mem_scheduler/utils/misc_utils.py +43 -2
- memos/mem_user/mysql_user_manager.py +4 -2
- memos/memories/activation/item.py +1 -1
- memos/memories/activation/kv.py +20 -8
- memos/memories/textual/base.py +1 -1
- memos/memories/textual/general.py +1 -1
- memos/memories/textual/item.py +1 -1
- memos/memories/textual/tree.py +31 -1
- memos/memories/textual/tree_text_memory/organize/{conflict.py → handler.py} +30 -48
- memos/memories/textual/tree_text_memory/organize/manager.py +8 -96
- memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +2 -0
- memos/memories/textual/tree_text_memory/organize/reorganizer.py +102 -140
- memos/memories/textual/tree_text_memory/retrieve/bochasearch.py +231 -0
- memos/memories/textual/tree_text_memory/retrieve/internet_retriever_factory.py +9 -0
- memos/memories/textual/tree_text_memory/retrieve/recall.py +67 -10
- memos/memories/textual/tree_text_memory/retrieve/reranker.py +1 -1
- memos/memories/textual/tree_text_memory/retrieve/searcher.py +246 -134
- memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py +7 -2
- memos/memories/textual/tree_text_memory/retrieve/utils.py +7 -5
- memos/memos_tools/lockfree_dict.py +120 -0
- memos/memos_tools/notification_utils.py +46 -0
- memos/memos_tools/thread_safe_dict.py +288 -0
- memos/reranker/__init__.py +4 -0
- memos/reranker/base.py +24 -0
- memos/reranker/cosine_local.py +95 -0
- memos/reranker/factory.py +43 -0
- memos/reranker/http_bge.py +99 -0
- memos/reranker/noop.py +16 -0
- memos/templates/mem_reader_prompts.py +290 -39
- memos/templates/mem_scheduler_prompts.py +23 -10
- memos/templates/mos_prompts.py +133 -31
- memos/templates/tree_reorganize_prompts.py +24 -17
- memos/utils.py +19 -0
- memos/memories/textual/tree_text_memory/organize/redundancy.py +0 -193
- {memoryos-0.2.2.dist-info → memoryos-1.0.1.dist-info}/LICENSE +0 -0
- {memoryos-0.2.2.dist-info → memoryos-1.0.1.dist-info}/WHEEL +0 -0
- {memoryos-0.2.2.dist-info → memoryos-1.0.1.dist-info}/entry_points.txt +0 -0
- /memos/mem_scheduler/{modules → general_modules}/__init__.py +0 -0
- /memos/mem_scheduler/{modules → general_modules}/misc.py +0 -0
|
@@ -9,13 +9,14 @@ from memos.configs.mem_scheduler import AuthConfig, BaseSchedulerConfig
|
|
|
9
9
|
from memos.llms.base import BaseLLM
|
|
10
10
|
from memos.log import get_logger
|
|
11
11
|
from memos.mem_cube.general import GeneralMemCube
|
|
12
|
-
from memos.mem_scheduler.
|
|
13
|
-
from memos.mem_scheduler.
|
|
14
|
-
from memos.mem_scheduler.
|
|
15
|
-
from memos.mem_scheduler.
|
|
16
|
-
from memos.mem_scheduler.
|
|
17
|
-
from memos.mem_scheduler.
|
|
18
|
-
from memos.mem_scheduler.
|
|
12
|
+
from memos.mem_scheduler.general_modules.dispatcher import SchedulerDispatcher
|
|
13
|
+
from memos.mem_scheduler.general_modules.misc import AutoDroppingQueue as Queue
|
|
14
|
+
from memos.mem_scheduler.general_modules.rabbitmq_service import RabbitMQSchedulerModule
|
|
15
|
+
from memos.mem_scheduler.general_modules.redis_service import RedisSchedulerModule
|
|
16
|
+
from memos.mem_scheduler.general_modules.retriever import SchedulerRetriever
|
|
17
|
+
from memos.mem_scheduler.general_modules.scheduler_logger import SchedulerLoggerModule
|
|
18
|
+
from memos.mem_scheduler.monitors.dispatcher_monitor import SchedulerDispatcherMonitor
|
|
19
|
+
from memos.mem_scheduler.monitors.general_monitor import SchedulerGeneralMonitor
|
|
19
20
|
from memos.mem_scheduler.schemas.general_schemas import (
|
|
20
21
|
DEFAULT_ACT_MEM_DUMP_PATH,
|
|
21
22
|
DEFAULT_CONSUME_INTERVAL_SECONDS,
|
|
@@ -52,19 +53,20 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
52
53
|
# hyper-parameters
|
|
53
54
|
self.top_k = self.config.get("top_k", 10)
|
|
54
55
|
self.context_window_size = self.config.get("context_window_size", 5)
|
|
55
|
-
self.
|
|
56
|
+
self.enable_activation_memory = self.config.get("enable_activation_memory", False)
|
|
56
57
|
self.act_mem_dump_path = self.config.get("act_mem_dump_path", DEFAULT_ACT_MEM_DUMP_PATH)
|
|
57
58
|
self.search_method = TreeTextMemory_SEARCH_METHOD
|
|
58
59
|
self.enable_parallel_dispatch = self.config.get("enable_parallel_dispatch", False)
|
|
59
|
-
self.
|
|
60
|
+
self.thread_pool_max_workers = self.config.get(
|
|
60
61
|
"thread_pool_max_workers", DEFAULT_THREAD__POOL_MAX_WORKERS
|
|
61
62
|
)
|
|
62
63
|
|
|
63
64
|
self.retriever: SchedulerRetriever | None = None
|
|
64
|
-
self.monitor:
|
|
65
|
-
|
|
65
|
+
self.monitor: SchedulerGeneralMonitor | None = None
|
|
66
|
+
self.dispatcher_monitor: SchedulerDispatcherMonitor | None = None
|
|
66
67
|
self.dispatcher = SchedulerDispatcher(
|
|
67
|
-
max_workers=self.
|
|
68
|
+
max_workers=self.thread_pool_max_workers,
|
|
69
|
+
enable_parallel_dispatch=self.enable_parallel_dispatch,
|
|
68
70
|
)
|
|
69
71
|
|
|
70
72
|
# internal message queue
|
|
@@ -84,6 +86,8 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
84
86
|
# other attributes
|
|
85
87
|
self._context_lock = threading.Lock()
|
|
86
88
|
self.current_user_id: UserID | str | None = None
|
|
89
|
+
self.current_mem_cube_id: MemCubeID | str | None = None
|
|
90
|
+
self.current_mem_cube: GeneralMemCube | None = None
|
|
87
91
|
self.auth_config_path: str | Path | None = self.config.get("auth_config_path", None)
|
|
88
92
|
self.auth_config = None
|
|
89
93
|
self.rabbitmq_config = None
|
|
@@ -95,14 +99,19 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
95
99
|
# initialize submodules
|
|
96
100
|
self.chat_llm = chat_llm
|
|
97
101
|
self.process_llm = process_llm
|
|
98
|
-
self.monitor =
|
|
102
|
+
self.monitor = SchedulerGeneralMonitor(process_llm=self.process_llm, config=self.config)
|
|
103
|
+
self.dispatcher_monitor = SchedulerDispatcherMonitor(config=self.config)
|
|
99
104
|
self.retriever = SchedulerRetriever(process_llm=self.process_llm, config=self.config)
|
|
100
105
|
|
|
106
|
+
if self.enable_parallel_dispatch:
|
|
107
|
+
self.dispatcher_monitor.initialize(dispatcher=self.dispatcher)
|
|
108
|
+
self.dispatcher_monitor.start()
|
|
109
|
+
|
|
101
110
|
# initialize with auth_cofig
|
|
102
111
|
if self.auth_config_path is not None and Path(self.auth_config_path).exists():
|
|
103
|
-
self.auth_config = AuthConfig.
|
|
112
|
+
self.auth_config = AuthConfig.from_local_config(config_path=self.auth_config_path)
|
|
104
113
|
elif AuthConfig.default_config_exists():
|
|
105
|
-
self.auth_config = AuthConfig.
|
|
114
|
+
self.auth_config = AuthConfig.from_local_config()
|
|
106
115
|
else:
|
|
107
116
|
self.auth_config = None
|
|
108
117
|
|
|
@@ -130,8 +139,8 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
130
139
|
self.current_mem_cube_id = msg.mem_cube_id
|
|
131
140
|
self.current_mem_cube = msg.mem_cube
|
|
132
141
|
|
|
133
|
-
def
|
|
134
|
-
self, memories: list[TextualMemoryItem]
|
|
142
|
+
def transform_working_memories_to_monitors(
|
|
143
|
+
self, query_keywords, memories: list[TextualMemoryItem]
|
|
135
144
|
) -> list[MemoryMonitorItem]:
|
|
136
145
|
"""
|
|
137
146
|
Convert a list of TextualMemoryItem objects into MemoryMonitorItem objects
|
|
@@ -143,10 +152,6 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
143
152
|
Returns:
|
|
144
153
|
List of MemoryMonitorItem objects with computed importance scores.
|
|
145
154
|
"""
|
|
146
|
-
query_keywords = self.monitor.query_monitors.get_keywords_collections()
|
|
147
|
-
logger.debug(
|
|
148
|
-
f"Processing {len(memories)} memories with {len(query_keywords)} query keywords"
|
|
149
|
-
)
|
|
150
155
|
|
|
151
156
|
result = []
|
|
152
157
|
mem_length = len(memories)
|
|
@@ -178,7 +183,7 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
178
183
|
)
|
|
179
184
|
result.append(mem_monitor)
|
|
180
185
|
|
|
181
|
-
logger.
|
|
186
|
+
logger.info(f"Transformed {len(result)} memories to monitors")
|
|
182
187
|
return result
|
|
183
188
|
|
|
184
189
|
def replace_working_memory(
|
|
@@ -195,7 +200,8 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
195
200
|
text_mem_base: TreeTextMemory = text_mem_base
|
|
196
201
|
|
|
197
202
|
# process rerank memories with llm
|
|
198
|
-
|
|
203
|
+
query_monitor = self.monitor.query_monitors[user_id][mem_cube_id]
|
|
204
|
+
query_history = query_monitor.get_queries_with_timesort()
|
|
199
205
|
memories_with_new_order, rerank_success_flag = (
|
|
200
206
|
self.retriever.process_and_rerank_memories(
|
|
201
207
|
queries=query_history,
|
|
@@ -206,14 +212,20 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
206
212
|
)
|
|
207
213
|
|
|
208
214
|
# update working memory monitors
|
|
209
|
-
|
|
210
|
-
|
|
215
|
+
query_keywords = query_monitor.get_keywords_collections()
|
|
216
|
+
logger.info(
|
|
217
|
+
f"Processing {len(memories_with_new_order)} memories with {len(query_keywords)} query keywords"
|
|
218
|
+
)
|
|
219
|
+
new_working_memory_monitors = self.transform_working_memories_to_monitors(
|
|
220
|
+
query_keywords=query_keywords,
|
|
221
|
+
memories=memories_with_new_order,
|
|
211
222
|
)
|
|
212
223
|
|
|
213
224
|
if not rerank_success_flag:
|
|
214
225
|
for one in new_working_memory_monitors:
|
|
215
226
|
one.sorting_score = 0
|
|
216
227
|
|
|
228
|
+
logger.info(f"update {len(new_working_memory_monitors)} working_memory_monitors")
|
|
217
229
|
self.monitor.update_working_memory_monitors(
|
|
218
230
|
new_working_memory_monitors=new_working_memory_monitors,
|
|
219
231
|
user_id=user_id,
|
|
@@ -245,25 +257,6 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
245
257
|
|
|
246
258
|
return memories_with_new_order
|
|
247
259
|
|
|
248
|
-
def initialize_working_memory_monitors(
|
|
249
|
-
self,
|
|
250
|
-
user_id: UserID | str,
|
|
251
|
-
mem_cube_id: MemCubeID | str,
|
|
252
|
-
mem_cube: GeneralMemCube,
|
|
253
|
-
):
|
|
254
|
-
text_mem_base: TreeTextMemory = mem_cube.text_mem
|
|
255
|
-
working_memories = text_mem_base.get_working_memory()
|
|
256
|
-
|
|
257
|
-
working_memory_monitors = self.transform_memories_to_monitors(
|
|
258
|
-
memories=working_memories,
|
|
259
|
-
)
|
|
260
|
-
self.monitor.update_working_memory_monitors(
|
|
261
|
-
new_working_memory_monitors=working_memory_monitors,
|
|
262
|
-
user_id=user_id,
|
|
263
|
-
mem_cube_id=mem_cube_id,
|
|
264
|
-
mem_cube=mem_cube,
|
|
265
|
-
)
|
|
266
|
-
|
|
267
260
|
def update_activation_memory(
|
|
268
261
|
self,
|
|
269
262
|
new_memories: list[str | TextualMemoryItem],
|
|
@@ -324,6 +317,7 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
324
317
|
|
|
325
318
|
cache_item = act_mem.extract(new_text_memory)
|
|
326
319
|
cache_item.records.text_memories = new_text_memories
|
|
320
|
+
cache_item.records.timestamp = datetime.utcnow()
|
|
327
321
|
|
|
328
322
|
act_mem.add([cache_item])
|
|
329
323
|
act_mem.dump(self.act_mem_dump_path)
|
|
@@ -367,13 +361,9 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
367
361
|
or len(self.monitor.working_memory_monitors[user_id][mem_cube_id].memories) == 0
|
|
368
362
|
):
|
|
369
363
|
logger.warning(
|
|
370
|
-
"No memories found in working_memory_monitors,
|
|
371
|
-
)
|
|
372
|
-
self.initialize_working_memory_monitors(
|
|
373
|
-
user_id=user_id,
|
|
374
|
-
mem_cube_id=mem_cube_id,
|
|
375
|
-
mem_cube=mem_cube,
|
|
364
|
+
"No memories found in working_memory_monitors, activation memory update is skipped"
|
|
376
365
|
)
|
|
366
|
+
return
|
|
377
367
|
|
|
378
368
|
self.monitor.update_activation_memory_monitors(
|
|
379
369
|
user_id=user_id, mem_cube_id=mem_cube_id, mem_cube=mem_cube
|
|
@@ -387,6 +377,11 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
387
377
|
logger.info(
|
|
388
378
|
f"Collected {len(new_activation_memories)} new memory entries for processing"
|
|
389
379
|
)
|
|
380
|
+
# Print the content of each new activation memory
|
|
381
|
+
for i, memory in enumerate(new_activation_memories[:5], 1):
|
|
382
|
+
logger.info(
|
|
383
|
+
f"Part of New Activation Memorires | {i}/{len(new_activation_memories)}: {memory[:20]}"
|
|
384
|
+
)
|
|
390
385
|
|
|
391
386
|
self.update_activation_memory(
|
|
392
387
|
new_memories=new_activation_memories,
|
|
@@ -396,19 +391,20 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
396
391
|
mem_cube=mem_cube,
|
|
397
392
|
)
|
|
398
393
|
|
|
399
|
-
self.monitor.last_activation_mem_update_time = datetime.
|
|
394
|
+
self.monitor.last_activation_mem_update_time = datetime.utcnow()
|
|
400
395
|
|
|
401
396
|
logger.debug(
|
|
402
397
|
f"Activation memory update completed at {self.monitor.last_activation_mem_update_time}"
|
|
403
398
|
)
|
|
399
|
+
|
|
404
400
|
else:
|
|
405
401
|
logger.info(
|
|
406
402
|
f"Skipping update - {interval_seconds} second interval not yet reached. "
|
|
407
403
|
f"Last update time is {self.monitor.last_activation_mem_update_time} and now is"
|
|
408
|
-
f"{datetime.
|
|
404
|
+
f"{datetime.utcnow()}"
|
|
409
405
|
)
|
|
410
406
|
except Exception as e:
|
|
411
|
-
logger.error(f"Error: {e}", exc_info=True)
|
|
407
|
+
logger.error(f"Error in update_activation_memory_periodically: {e}", exc_info=True)
|
|
412
408
|
|
|
413
409
|
def submit_messages(self, messages: ScheduleMessageItem | list[ScheduleMessageItem]):
|
|
414
410
|
"""Submit multiple messages to the message queue."""
|
|
@@ -506,7 +502,9 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
506
502
|
|
|
507
503
|
# Initialize dispatcher resources
|
|
508
504
|
if self.enable_parallel_dispatch:
|
|
509
|
-
logger.info(
|
|
505
|
+
logger.info(
|
|
506
|
+
f"Initializing dispatcher thread pool with {self.thread_pool_max_workers} workers"
|
|
507
|
+
)
|
|
510
508
|
|
|
511
509
|
# Start consumer thread
|
|
512
510
|
self._running = True
|
|
@@ -541,10 +539,15 @@ class BaseScheduler(RabbitMQSchedulerModule, RedisSchedulerModule, SchedulerLogg
|
|
|
541
539
|
logger.info("Consumer thread stopped")
|
|
542
540
|
|
|
543
541
|
# Shutdown dispatcher
|
|
544
|
-
if
|
|
542
|
+
if self.dispatcher:
|
|
545
543
|
logger.info("Shutting down dispatcher...")
|
|
546
544
|
self.dispatcher.shutdown()
|
|
547
545
|
|
|
546
|
+
# Shutdown dispatcher_monitor
|
|
547
|
+
if self.dispatcher_monitor:
|
|
548
|
+
logger.info("Shutting down monitor...")
|
|
549
|
+
self.dispatcher_monitor.stop()
|
|
550
|
+
|
|
548
551
|
# Clean up queues
|
|
549
552
|
self._cleanup_queues()
|
|
550
553
|
logger.info("Memory Scheduler stopped completely")
|
|
@@ -17,8 +17,7 @@ class BaseSchedulerModule:
|
|
|
17
17
|
|
|
18
18
|
self._chat_llm = None
|
|
19
19
|
self._process_llm = None
|
|
20
|
-
|
|
21
|
-
self.current_mem_cube: GeneralMemCube | None = None
|
|
20
|
+
|
|
22
21
|
self.mem_cubes: dict[str, GeneralMemCube] = {}
|
|
23
22
|
|
|
24
23
|
def load_template(self, template_name: str) -> str:
|
|
@@ -1,9 +1,11 @@
|
|
|
1
|
+
import concurrent
|
|
2
|
+
|
|
1
3
|
from collections import defaultdict
|
|
2
4
|
from collections.abc import Callable
|
|
3
5
|
from concurrent.futures import ThreadPoolExecutor
|
|
4
6
|
|
|
5
7
|
from memos.log import get_logger
|
|
6
|
-
from memos.mem_scheduler.
|
|
8
|
+
from memos.mem_scheduler.general_modules.base import BaseSchedulerModule
|
|
7
9
|
from memos.mem_scheduler.schemas.message_schemas import ScheduleMessageItem
|
|
8
10
|
|
|
9
11
|
|
|
@@ -26,20 +28,27 @@ class SchedulerDispatcher(BaseSchedulerModule):
|
|
|
26
28
|
super().__init__()
|
|
27
29
|
# Main dispatcher thread pool
|
|
28
30
|
self.max_workers = max_workers
|
|
31
|
+
|
|
29
32
|
# Only initialize thread pool if in parallel mode
|
|
30
33
|
self.enable_parallel_dispatch = enable_parallel_dispatch
|
|
34
|
+
self.thread_name_prefix = "dispatcher"
|
|
31
35
|
if self.enable_parallel_dispatch:
|
|
32
36
|
self.dispatcher_executor = ThreadPoolExecutor(
|
|
33
|
-
max_workers=self.max_workers, thread_name_prefix=
|
|
37
|
+
max_workers=self.max_workers, thread_name_prefix=self.thread_name_prefix
|
|
34
38
|
)
|
|
35
39
|
else:
|
|
36
40
|
self.dispatcher_executor = None
|
|
37
41
|
logger.info(f"enable_parallel_dispatch is set to {self.enable_parallel_dispatch}")
|
|
42
|
+
|
|
38
43
|
# Registered message handlers
|
|
39
44
|
self.handlers: dict[str, Callable] = {}
|
|
45
|
+
|
|
40
46
|
# Dispatcher running state
|
|
41
47
|
self._running = False
|
|
42
48
|
|
|
49
|
+
# Set to track active futures for monitoring purposes
|
|
50
|
+
self._futures = set()
|
|
51
|
+
|
|
43
52
|
def register_handler(self, label: str, handler: Callable[[list[ScheduleMessageItem]], None]):
|
|
44
53
|
"""
|
|
45
54
|
Register a handler function for a specific message label.
|
|
@@ -105,6 +114,13 @@ class SchedulerDispatcher(BaseSchedulerModule):
|
|
|
105
114
|
# Convert defaultdict to regular dict for cleaner output
|
|
106
115
|
return {user_id: dict(cube_groups) for user_id, cube_groups in grouped_dict.items()}
|
|
107
116
|
|
|
117
|
+
def _handle_future_result(self, future):
|
|
118
|
+
self._futures.remove(future)
|
|
119
|
+
try:
|
|
120
|
+
future.result() # this will throw exception
|
|
121
|
+
except Exception as e:
|
|
122
|
+
logger.error(f"Handler execution failed: {e!s}", exc_info=True)
|
|
123
|
+
|
|
108
124
|
def dispatch(self, msg_list: list[ScheduleMessageItem]):
|
|
109
125
|
"""
|
|
110
126
|
Dispatch a list of messages to their respective handlers.
|
|
@@ -112,26 +128,26 @@ class SchedulerDispatcher(BaseSchedulerModule):
|
|
|
112
128
|
Args:
|
|
113
129
|
msg_list: List of ScheduleMessageItem objects to process
|
|
114
130
|
"""
|
|
131
|
+
if not msg_list:
|
|
132
|
+
logger.debug("Received empty message list, skipping dispatch")
|
|
133
|
+
return
|
|
115
134
|
|
|
116
|
-
# Group messages by their labels
|
|
135
|
+
# Group messages by their labels, and organize messages by label
|
|
117
136
|
label_groups = defaultdict(list)
|
|
118
|
-
|
|
119
|
-
# Organize messages by label
|
|
120
137
|
for message in msg_list:
|
|
121
138
|
label_groups[message.label].append(message)
|
|
122
139
|
|
|
123
140
|
# Process each label group
|
|
124
141
|
for label, msgs in label_groups.items():
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
handler = self._default_message_handler
|
|
128
|
-
else:
|
|
129
|
-
handler = self.handlers[label]
|
|
142
|
+
handler = self.handlers.get(label, self._default_message_handler)
|
|
143
|
+
|
|
130
144
|
# dispatch to different handler
|
|
131
145
|
logger.debug(f"Dispatch {len(msgs)} message(s) to {label} handler.")
|
|
132
146
|
if self.enable_parallel_dispatch and self.dispatcher_executor is not None:
|
|
133
147
|
# Capture variables in lambda to avoid loop variable issues
|
|
134
|
-
self.dispatcher_executor.submit(handler, msgs)
|
|
148
|
+
future = self.dispatcher_executor.submit(handler, msgs)
|
|
149
|
+
self._futures.add(future)
|
|
150
|
+
future.add_done_callback(self._handle_future_result)
|
|
135
151
|
logger.info(f"Dispatched {len(msgs)} message(s) as future task")
|
|
136
152
|
else:
|
|
137
153
|
handler(msgs)
|
|
@@ -148,15 +164,38 @@ class SchedulerDispatcher(BaseSchedulerModule):
|
|
|
148
164
|
if not self.enable_parallel_dispatch or self.dispatcher_executor is None:
|
|
149
165
|
return True # 串行模式无需等待
|
|
150
166
|
|
|
151
|
-
|
|
152
|
-
|
|
167
|
+
done, not_done = concurrent.futures.wait(
|
|
168
|
+
self._futures, timeout=timeout, return_when=concurrent.futures.ALL_COMPLETED
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# Check for exceptions in completed tasks
|
|
172
|
+
for future in done:
|
|
173
|
+
try:
|
|
174
|
+
future.result()
|
|
175
|
+
except Exception:
|
|
176
|
+
logger.error("Handler failed during shutdown", exc_info=True)
|
|
177
|
+
|
|
178
|
+
return len(not_done) == 0
|
|
153
179
|
|
|
154
180
|
def shutdown(self) -> None:
|
|
155
181
|
"""Gracefully shutdown the dispatcher."""
|
|
182
|
+
self._running = False
|
|
183
|
+
|
|
156
184
|
if self.dispatcher_executor is not None:
|
|
185
|
+
# Cancel pending tasks
|
|
186
|
+
cancelled = 0
|
|
187
|
+
for future in self._futures:
|
|
188
|
+
if future.cancel():
|
|
189
|
+
cancelled += 1
|
|
190
|
+
logger.info(f"Cancelled {cancelled}/{len(self._futures)} pending tasks")
|
|
191
|
+
|
|
192
|
+
# Shutdown executor
|
|
193
|
+
try:
|
|
157
194
|
self.dispatcher_executor.shutdown(wait=True)
|
|
158
|
-
|
|
159
|
-
|
|
195
|
+
except Exception as e:
|
|
196
|
+
logger.error(f"Executor shutdown error: {e}", exc_info=True)
|
|
197
|
+
finally:
|
|
198
|
+
self._futures.clear()
|
|
160
199
|
|
|
161
200
|
def __enter__(self):
|
|
162
201
|
self._running = True
|
|
@@ -8,8 +8,8 @@ from pathlib import Path
|
|
|
8
8
|
from memos.configs.mem_scheduler import AuthConfig, RabbitMQConfig
|
|
9
9
|
from memos.dependency import require_python_package
|
|
10
10
|
from memos.log import get_logger
|
|
11
|
-
from memos.mem_scheduler.
|
|
12
|
-
from memos.mem_scheduler.
|
|
11
|
+
from memos.mem_scheduler.general_modules.base import BaseSchedulerModule
|
|
12
|
+
from memos.mem_scheduler.general_modules.misc import AutoDroppingQueue
|
|
13
13
|
from memos.mem_scheduler.schemas.general_schemas import DIRECT_EXCHANGE_TYPE, FANOUT_EXCHANGE_TYPE
|
|
14
14
|
|
|
15
15
|
|
|
@@ -71,9 +71,9 @@ class RabbitMQSchedulerModule(BaseSchedulerModule):
|
|
|
71
71
|
|
|
72
72
|
if config is None:
|
|
73
73
|
if config_path is None and AuthConfig.default_config_exists():
|
|
74
|
-
auth_config = AuthConfig.
|
|
74
|
+
auth_config = AuthConfig.from_local_config()
|
|
75
75
|
elif Path(config_path).exists():
|
|
76
|
-
auth_config = AuthConfig.
|
|
76
|
+
auth_config = AuthConfig.from_local_config(config_path=config_path)
|
|
77
77
|
else:
|
|
78
78
|
logger.error("Fail to initialize auth_config")
|
|
79
79
|
return
|
|
@@ -6,7 +6,7 @@ from typing import Any
|
|
|
6
6
|
|
|
7
7
|
from memos.dependency import require_python_package
|
|
8
8
|
from memos.log import get_logger
|
|
9
|
-
from memos.mem_scheduler.
|
|
9
|
+
from memos.mem_scheduler.general_modules.base import BaseSchedulerModule
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
logger = get_logger(__name__)
|
|
@@ -2,8 +2,9 @@ from memos.configs.mem_scheduler import BaseSchedulerConfig
|
|
|
2
2
|
from memos.llms.base import BaseLLM
|
|
3
3
|
from memos.log import get_logger
|
|
4
4
|
from memos.mem_cube.general import GeneralMemCube
|
|
5
|
-
from memos.mem_scheduler.
|
|
5
|
+
from memos.mem_scheduler.general_modules.base import BaseSchedulerModule
|
|
6
6
|
from memos.mem_scheduler.schemas.general_schemas import (
|
|
7
|
+
TreeTextMemory_FINE_SEARCH_METHOD,
|
|
7
8
|
TreeTextMemory_SEARCH_METHOD,
|
|
8
9
|
)
|
|
9
10
|
from memos.mem_scheduler.utils.filter_utils import (
|
|
@@ -32,7 +33,12 @@ class SchedulerRetriever(BaseSchedulerModule):
|
|
|
32
33
|
self.process_llm = process_llm
|
|
33
34
|
|
|
34
35
|
def search(
|
|
35
|
-
self,
|
|
36
|
+
self,
|
|
37
|
+
query: str,
|
|
38
|
+
mem_cube: GeneralMemCube,
|
|
39
|
+
top_k: int,
|
|
40
|
+
method: str = TreeTextMemory_SEARCH_METHOD,
|
|
41
|
+
info: dict | None = None,
|
|
36
42
|
) -> list[TextualMemoryItem]:
|
|
37
43
|
"""Search in text memory with the given query.
|
|
38
44
|
|
|
@@ -46,13 +52,21 @@ class SchedulerRetriever(BaseSchedulerModule):
|
|
|
46
52
|
"""
|
|
47
53
|
text_mem_base = mem_cube.text_mem
|
|
48
54
|
try:
|
|
49
|
-
if method
|
|
55
|
+
if method in [TreeTextMemory_SEARCH_METHOD, TreeTextMemory_FINE_SEARCH_METHOD]:
|
|
50
56
|
assert isinstance(text_mem_base, TreeTextMemory)
|
|
57
|
+
if info is None:
|
|
58
|
+
logger.warning(
|
|
59
|
+
"Please input 'info' when use tree.search so that "
|
|
60
|
+
"the database would store the consume history."
|
|
61
|
+
)
|
|
62
|
+
info = {"user_id": "", "session_id": ""}
|
|
63
|
+
|
|
64
|
+
mode = "fast" if method == TreeTextMemory_SEARCH_METHOD else "fine"
|
|
51
65
|
results_long_term = text_mem_base.search(
|
|
52
|
-
query=query, top_k=top_k, memory_type="LongTermMemory"
|
|
66
|
+
query=query, top_k=top_k, memory_type="LongTermMemory", mode=mode, info=info
|
|
53
67
|
)
|
|
54
68
|
results_user = text_mem_base.search(
|
|
55
|
-
query=query, top_k=top_k, memory_type="UserMemory"
|
|
69
|
+
query=query, top_k=top_k, memory_type="UserMemory", mode=mode, info=info
|
|
56
70
|
)
|
|
57
71
|
results = results_long_term + results_user
|
|
58
72
|
else:
|
|
@@ -2,7 +2,7 @@ from collections.abc import Callable
|
|
|
2
2
|
|
|
3
3
|
from memos.log import get_logger
|
|
4
4
|
from memos.mem_cube.general import GeneralMemCube
|
|
5
|
-
from memos.mem_scheduler.
|
|
5
|
+
from memos.mem_scheduler.general_modules.base import BaseSchedulerModule
|
|
6
6
|
from memos.mem_scheduler.schemas.general_schemas import (
|
|
7
7
|
ACTIVATION_MEMORY_TYPE,
|
|
8
8
|
ADD_LABEL,
|
|
@@ -180,6 +180,11 @@ class SchedulerLoggerModule(BaseSchedulerModule):
|
|
|
180
180
|
mem_cube_id=mem_cube_id,
|
|
181
181
|
mem_cube=mem_cube,
|
|
182
182
|
)
|
|
183
|
+
logger.info(
|
|
184
|
+
f"{len(added_memories)} {TEXT_MEMORY_TYPE} memorie(s) "
|
|
185
|
+
f"transformed to {ACTIVATION_MEMORY_TYPE} memories."
|
|
186
|
+
)
|
|
187
|
+
|
|
183
188
|
log_message_b = self.create_autofilled_log_item(
|
|
184
189
|
log_content=mem,
|
|
185
190
|
label=label,
|
|
@@ -189,12 +194,13 @@ class SchedulerLoggerModule(BaseSchedulerModule):
|
|
|
189
194
|
mem_cube_id=mem_cube_id,
|
|
190
195
|
mem_cube=mem_cube,
|
|
191
196
|
)
|
|
192
|
-
log_func_callback([log_message_a, log_message_b])
|
|
193
197
|
logger.info(
|
|
194
|
-
f"{len(added_memories)} {
|
|
195
|
-
f"transformed to {
|
|
198
|
+
f"{len(added_memories)} {ACTIVATION_MEMORY_TYPE} memorie(s) "
|
|
199
|
+
f"transformed to {PARAMETER_MEMORY_TYPE} memories."
|
|
196
200
|
)
|
|
197
201
|
|
|
202
|
+
log_func_callback([log_message_a, log_message_b])
|
|
203
|
+
|
|
198
204
|
@log_exceptions(logger=logger)
|
|
199
205
|
def log_adding_memory(
|
|
200
206
|
self,
|