MemoryOS 2.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- memoryos-2.0.3.dist-info/METADATA +418 -0
- memoryos-2.0.3.dist-info/RECORD +315 -0
- memoryos-2.0.3.dist-info/WHEEL +4 -0
- memoryos-2.0.3.dist-info/entry_points.txt +3 -0
- memoryos-2.0.3.dist-info/licenses/LICENSE +201 -0
- memos/__init__.py +20 -0
- memos/api/client.py +571 -0
- memos/api/config.py +1018 -0
- memos/api/context/dependencies.py +50 -0
- memos/api/exceptions.py +53 -0
- memos/api/handlers/__init__.py +62 -0
- memos/api/handlers/add_handler.py +158 -0
- memos/api/handlers/base_handler.py +194 -0
- memos/api/handlers/chat_handler.py +1401 -0
- memos/api/handlers/component_init.py +388 -0
- memos/api/handlers/config_builders.py +190 -0
- memos/api/handlers/feedback_handler.py +93 -0
- memos/api/handlers/formatters_handler.py +237 -0
- memos/api/handlers/memory_handler.py +316 -0
- memos/api/handlers/scheduler_handler.py +497 -0
- memos/api/handlers/search_handler.py +222 -0
- memos/api/handlers/suggestion_handler.py +117 -0
- memos/api/mcp_serve.py +614 -0
- memos/api/middleware/request_context.py +101 -0
- memos/api/product_api.py +38 -0
- memos/api/product_models.py +1206 -0
- memos/api/routers/__init__.py +1 -0
- memos/api/routers/product_router.py +477 -0
- memos/api/routers/server_router.py +394 -0
- memos/api/server_api.py +44 -0
- memos/api/start_api.py +433 -0
- memos/chunkers/__init__.py +4 -0
- memos/chunkers/base.py +24 -0
- memos/chunkers/charactertext_chunker.py +41 -0
- memos/chunkers/factory.py +24 -0
- memos/chunkers/markdown_chunker.py +62 -0
- memos/chunkers/sentence_chunker.py +54 -0
- memos/chunkers/simple_chunker.py +50 -0
- memos/cli.py +113 -0
- memos/configs/__init__.py +0 -0
- memos/configs/base.py +82 -0
- memos/configs/chunker.py +59 -0
- memos/configs/embedder.py +88 -0
- memos/configs/graph_db.py +236 -0
- memos/configs/internet_retriever.py +100 -0
- memos/configs/llm.py +151 -0
- memos/configs/mem_agent.py +54 -0
- memos/configs/mem_chat.py +81 -0
- memos/configs/mem_cube.py +105 -0
- memos/configs/mem_os.py +83 -0
- memos/configs/mem_reader.py +91 -0
- memos/configs/mem_scheduler.py +385 -0
- memos/configs/mem_user.py +70 -0
- memos/configs/memory.py +324 -0
- memos/configs/parser.py +38 -0
- memos/configs/reranker.py +18 -0
- memos/configs/utils.py +8 -0
- memos/configs/vec_db.py +80 -0
- memos/context/context.py +355 -0
- memos/dependency.py +52 -0
- memos/deprecation.py +262 -0
- memos/embedders/__init__.py +0 -0
- memos/embedders/ark.py +95 -0
- memos/embedders/base.py +106 -0
- memos/embedders/factory.py +29 -0
- memos/embedders/ollama.py +77 -0
- memos/embedders/sentence_transformer.py +49 -0
- memos/embedders/universal_api.py +51 -0
- memos/exceptions.py +30 -0
- memos/graph_dbs/__init__.py +0 -0
- memos/graph_dbs/base.py +274 -0
- memos/graph_dbs/factory.py +27 -0
- memos/graph_dbs/item.py +46 -0
- memos/graph_dbs/nebular.py +1794 -0
- memos/graph_dbs/neo4j.py +1942 -0
- memos/graph_dbs/neo4j_community.py +1058 -0
- memos/graph_dbs/polardb.py +5446 -0
- memos/hello_world.py +97 -0
- memos/llms/__init__.py +0 -0
- memos/llms/base.py +25 -0
- memos/llms/deepseek.py +13 -0
- memos/llms/factory.py +38 -0
- memos/llms/hf.py +443 -0
- memos/llms/hf_singleton.py +114 -0
- memos/llms/ollama.py +135 -0
- memos/llms/openai.py +222 -0
- memos/llms/openai_new.py +198 -0
- memos/llms/qwen.py +13 -0
- memos/llms/utils.py +14 -0
- memos/llms/vllm.py +218 -0
- memos/log.py +237 -0
- memos/mem_agent/base.py +19 -0
- memos/mem_agent/deepsearch_agent.py +391 -0
- memos/mem_agent/factory.py +36 -0
- memos/mem_chat/__init__.py +0 -0
- memos/mem_chat/base.py +30 -0
- memos/mem_chat/factory.py +21 -0
- memos/mem_chat/simple.py +200 -0
- memos/mem_cube/__init__.py +0 -0
- memos/mem_cube/base.py +30 -0
- memos/mem_cube/general.py +240 -0
- memos/mem_cube/navie.py +172 -0
- memos/mem_cube/utils.py +169 -0
- memos/mem_feedback/base.py +15 -0
- memos/mem_feedback/feedback.py +1192 -0
- memos/mem_feedback/simple_feedback.py +40 -0
- memos/mem_feedback/utils.py +230 -0
- memos/mem_os/client.py +5 -0
- memos/mem_os/core.py +1203 -0
- memos/mem_os/main.py +582 -0
- memos/mem_os/product.py +1608 -0
- memos/mem_os/product_server.py +455 -0
- memos/mem_os/utils/default_config.py +359 -0
- memos/mem_os/utils/format_utils.py +1403 -0
- memos/mem_os/utils/reference_utils.py +162 -0
- memos/mem_reader/__init__.py +0 -0
- memos/mem_reader/base.py +47 -0
- memos/mem_reader/factory.py +53 -0
- memos/mem_reader/memory.py +298 -0
- memos/mem_reader/multi_modal_struct.py +965 -0
- memos/mem_reader/read_multi_modal/__init__.py +43 -0
- memos/mem_reader/read_multi_modal/assistant_parser.py +311 -0
- memos/mem_reader/read_multi_modal/base.py +273 -0
- memos/mem_reader/read_multi_modal/file_content_parser.py +826 -0
- memos/mem_reader/read_multi_modal/image_parser.py +359 -0
- memos/mem_reader/read_multi_modal/multi_modal_parser.py +252 -0
- memos/mem_reader/read_multi_modal/string_parser.py +139 -0
- memos/mem_reader/read_multi_modal/system_parser.py +327 -0
- memos/mem_reader/read_multi_modal/text_content_parser.py +131 -0
- memos/mem_reader/read_multi_modal/tool_parser.py +210 -0
- memos/mem_reader/read_multi_modal/user_parser.py +218 -0
- memos/mem_reader/read_multi_modal/utils.py +358 -0
- memos/mem_reader/simple_struct.py +912 -0
- memos/mem_reader/strategy_struct.py +163 -0
- memos/mem_reader/utils.py +157 -0
- memos/mem_scheduler/__init__.py +0 -0
- memos/mem_scheduler/analyzer/__init__.py +0 -0
- memos/mem_scheduler/analyzer/api_analyzer.py +714 -0
- memos/mem_scheduler/analyzer/eval_analyzer.py +219 -0
- memos/mem_scheduler/analyzer/mos_for_test_scheduler.py +571 -0
- memos/mem_scheduler/analyzer/scheduler_for_eval.py +280 -0
- memos/mem_scheduler/base_scheduler.py +1319 -0
- memos/mem_scheduler/general_modules/__init__.py +0 -0
- memos/mem_scheduler/general_modules/api_misc.py +137 -0
- memos/mem_scheduler/general_modules/base.py +80 -0
- memos/mem_scheduler/general_modules/init_components_for_scheduler.py +425 -0
- memos/mem_scheduler/general_modules/misc.py +313 -0
- memos/mem_scheduler/general_modules/scheduler_logger.py +389 -0
- memos/mem_scheduler/general_modules/task_threads.py +315 -0
- memos/mem_scheduler/general_scheduler.py +1495 -0
- memos/mem_scheduler/memory_manage_modules/__init__.py +5 -0
- memos/mem_scheduler/memory_manage_modules/memory_filter.py +306 -0
- memos/mem_scheduler/memory_manage_modules/retriever.py +547 -0
- memos/mem_scheduler/monitors/__init__.py +0 -0
- memos/mem_scheduler/monitors/dispatcher_monitor.py +366 -0
- memos/mem_scheduler/monitors/general_monitor.py +394 -0
- memos/mem_scheduler/monitors/task_schedule_monitor.py +254 -0
- memos/mem_scheduler/optimized_scheduler.py +410 -0
- memos/mem_scheduler/orm_modules/__init__.py +0 -0
- memos/mem_scheduler/orm_modules/api_redis_model.py +518 -0
- memos/mem_scheduler/orm_modules/base_model.py +729 -0
- memos/mem_scheduler/orm_modules/monitor_models.py +261 -0
- memos/mem_scheduler/orm_modules/redis_model.py +699 -0
- memos/mem_scheduler/scheduler_factory.py +23 -0
- memos/mem_scheduler/schemas/__init__.py +0 -0
- memos/mem_scheduler/schemas/analyzer_schemas.py +52 -0
- memos/mem_scheduler/schemas/api_schemas.py +233 -0
- memos/mem_scheduler/schemas/general_schemas.py +55 -0
- memos/mem_scheduler/schemas/message_schemas.py +173 -0
- memos/mem_scheduler/schemas/monitor_schemas.py +406 -0
- memos/mem_scheduler/schemas/task_schemas.py +132 -0
- memos/mem_scheduler/task_schedule_modules/__init__.py +0 -0
- memos/mem_scheduler/task_schedule_modules/dispatcher.py +740 -0
- memos/mem_scheduler/task_schedule_modules/local_queue.py +247 -0
- memos/mem_scheduler/task_schedule_modules/orchestrator.py +74 -0
- memos/mem_scheduler/task_schedule_modules/redis_queue.py +1385 -0
- memos/mem_scheduler/task_schedule_modules/task_queue.py +162 -0
- memos/mem_scheduler/utils/__init__.py +0 -0
- memos/mem_scheduler/utils/api_utils.py +77 -0
- memos/mem_scheduler/utils/config_utils.py +100 -0
- memos/mem_scheduler/utils/db_utils.py +50 -0
- memos/mem_scheduler/utils/filter_utils.py +176 -0
- memos/mem_scheduler/utils/metrics.py +125 -0
- memos/mem_scheduler/utils/misc_utils.py +290 -0
- memos/mem_scheduler/utils/monitor_event_utils.py +67 -0
- memos/mem_scheduler/utils/status_tracker.py +229 -0
- memos/mem_scheduler/webservice_modules/__init__.py +0 -0
- memos/mem_scheduler/webservice_modules/rabbitmq_service.py +485 -0
- memos/mem_scheduler/webservice_modules/redis_service.py +380 -0
- memos/mem_user/factory.py +94 -0
- memos/mem_user/mysql_persistent_user_manager.py +271 -0
- memos/mem_user/mysql_user_manager.py +502 -0
- memos/mem_user/persistent_factory.py +98 -0
- memos/mem_user/persistent_user_manager.py +260 -0
- memos/mem_user/redis_persistent_user_manager.py +225 -0
- memos/mem_user/user_manager.py +488 -0
- memos/memories/__init__.py +0 -0
- memos/memories/activation/__init__.py +0 -0
- memos/memories/activation/base.py +42 -0
- memos/memories/activation/item.py +56 -0
- memos/memories/activation/kv.py +292 -0
- memos/memories/activation/vllmkv.py +219 -0
- memos/memories/base.py +19 -0
- memos/memories/factory.py +42 -0
- memos/memories/parametric/__init__.py +0 -0
- memos/memories/parametric/base.py +19 -0
- memos/memories/parametric/item.py +11 -0
- memos/memories/parametric/lora.py +41 -0
- memos/memories/textual/__init__.py +0 -0
- memos/memories/textual/base.py +92 -0
- memos/memories/textual/general.py +236 -0
- memos/memories/textual/item.py +304 -0
- memos/memories/textual/naive.py +187 -0
- memos/memories/textual/prefer_text_memory/__init__.py +0 -0
- memos/memories/textual/prefer_text_memory/adder.py +504 -0
- memos/memories/textual/prefer_text_memory/config.py +106 -0
- memos/memories/textual/prefer_text_memory/extractor.py +221 -0
- memos/memories/textual/prefer_text_memory/factory.py +85 -0
- memos/memories/textual/prefer_text_memory/retrievers.py +177 -0
- memos/memories/textual/prefer_text_memory/spliter.py +132 -0
- memos/memories/textual/prefer_text_memory/utils.py +93 -0
- memos/memories/textual/preference.py +344 -0
- memos/memories/textual/simple_preference.py +161 -0
- memos/memories/textual/simple_tree.py +69 -0
- memos/memories/textual/tree.py +459 -0
- memos/memories/textual/tree_text_memory/__init__.py +0 -0
- memos/memories/textual/tree_text_memory/organize/__init__.py +0 -0
- memos/memories/textual/tree_text_memory/organize/handler.py +184 -0
- memos/memories/textual/tree_text_memory/organize/manager.py +518 -0
- memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +238 -0
- memos/memories/textual/tree_text_memory/organize/reorganizer.py +622 -0
- memos/memories/textual/tree_text_memory/retrieve/__init__.py +0 -0
- memos/memories/textual/tree_text_memory/retrieve/advanced_searcher.py +364 -0
- memos/memories/textual/tree_text_memory/retrieve/bm25_util.py +186 -0
- memos/memories/textual/tree_text_memory/retrieve/bochasearch.py +419 -0
- memos/memories/textual/tree_text_memory/retrieve/internet_retriever.py +270 -0
- memos/memories/textual/tree_text_memory/retrieve/internet_retriever_factory.py +102 -0
- memos/memories/textual/tree_text_memory/retrieve/reasoner.py +61 -0
- memos/memories/textual/tree_text_memory/retrieve/recall.py +497 -0
- memos/memories/textual/tree_text_memory/retrieve/reranker.py +111 -0
- memos/memories/textual/tree_text_memory/retrieve/retrieval_mid_structs.py +16 -0
- memos/memories/textual/tree_text_memory/retrieve/retrieve_utils.py +472 -0
- memos/memories/textual/tree_text_memory/retrieve/searcher.py +848 -0
- memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py +135 -0
- memos/memories/textual/tree_text_memory/retrieve/utils.py +54 -0
- memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py +387 -0
- memos/memos_tools/dinding_report_bot.py +453 -0
- memos/memos_tools/lockfree_dict.py +120 -0
- memos/memos_tools/notification_service.py +44 -0
- memos/memos_tools/notification_utils.py +142 -0
- memos/memos_tools/singleton.py +174 -0
- memos/memos_tools/thread_safe_dict.py +310 -0
- memos/memos_tools/thread_safe_dict_segment.py +382 -0
- memos/multi_mem_cube/__init__.py +0 -0
- memos/multi_mem_cube/composite_cube.py +86 -0
- memos/multi_mem_cube/single_cube.py +874 -0
- memos/multi_mem_cube/views.py +54 -0
- memos/parsers/__init__.py +0 -0
- memos/parsers/base.py +15 -0
- memos/parsers/factory.py +21 -0
- memos/parsers/markitdown.py +28 -0
- memos/reranker/__init__.py +4 -0
- memos/reranker/base.py +25 -0
- memos/reranker/concat.py +103 -0
- memos/reranker/cosine_local.py +102 -0
- memos/reranker/factory.py +72 -0
- memos/reranker/http_bge.py +324 -0
- memos/reranker/http_bge_strategy.py +327 -0
- memos/reranker/noop.py +19 -0
- memos/reranker/strategies/__init__.py +4 -0
- memos/reranker/strategies/base.py +61 -0
- memos/reranker/strategies/concat_background.py +94 -0
- memos/reranker/strategies/concat_docsource.py +110 -0
- memos/reranker/strategies/dialogue_common.py +109 -0
- memos/reranker/strategies/factory.py +31 -0
- memos/reranker/strategies/single_turn.py +107 -0
- memos/reranker/strategies/singleturn_outmem.py +98 -0
- memos/settings.py +10 -0
- memos/templates/__init__.py +0 -0
- memos/templates/advanced_search_prompts.py +211 -0
- memos/templates/cloud_service_prompt.py +107 -0
- memos/templates/instruction_completion.py +66 -0
- memos/templates/mem_agent_prompts.py +85 -0
- memos/templates/mem_feedback_prompts.py +822 -0
- memos/templates/mem_reader_prompts.py +1096 -0
- memos/templates/mem_reader_strategy_prompts.py +238 -0
- memos/templates/mem_scheduler_prompts.py +626 -0
- memos/templates/mem_search_prompts.py +93 -0
- memos/templates/mos_prompts.py +403 -0
- memos/templates/prefer_complete_prompt.py +735 -0
- memos/templates/tool_mem_prompts.py +139 -0
- memos/templates/tree_reorganize_prompts.py +230 -0
- memos/types/__init__.py +34 -0
- memos/types/general_types.py +151 -0
- memos/types/openai_chat_completion_types/__init__.py +15 -0
- memos/types/openai_chat_completion_types/chat_completion_assistant_message_param.py +56 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_image_param.py +27 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_input_audio_param.py +23 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_param.py +43 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_refusal_param.py +16 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_text_param.py +16 -0
- memos/types/openai_chat_completion_types/chat_completion_message_custom_tool_call_param.py +27 -0
- memos/types/openai_chat_completion_types/chat_completion_message_function_tool_call_param.py +32 -0
- memos/types/openai_chat_completion_types/chat_completion_message_param.py +18 -0
- memos/types/openai_chat_completion_types/chat_completion_message_tool_call_union_param.py +15 -0
- memos/types/openai_chat_completion_types/chat_completion_system_message_param.py +36 -0
- memos/types/openai_chat_completion_types/chat_completion_tool_message_param.py +30 -0
- memos/types/openai_chat_completion_types/chat_completion_user_message_param.py +34 -0
- memos/utils.py +123 -0
- memos/vec_dbs/__init__.py +0 -0
- memos/vec_dbs/base.py +117 -0
- memos/vec_dbs/factory.py +23 -0
- memos/vec_dbs/item.py +50 -0
- memos/vec_dbs/milvus.py +654 -0
- memos/vec_dbs/qdrant.py +355 -0
|
@@ -0,0 +1,394 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from threading import Lock
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from sqlalchemy.engine import Engine
|
|
6
|
+
|
|
7
|
+
from memos.configs.mem_scheduler import BaseSchedulerConfig
|
|
8
|
+
from memos.llms.base import BaseLLM
|
|
9
|
+
from memos.log import get_logger
|
|
10
|
+
from memos.mem_cube.general import GeneralMemCube
|
|
11
|
+
from memos.mem_scheduler.general_modules.base import BaseSchedulerModule
|
|
12
|
+
from memos.mem_scheduler.orm_modules.base_model import BaseDBManager
|
|
13
|
+
from memos.mem_scheduler.orm_modules.monitor_models import (
|
|
14
|
+
DBManagerForMemoryMonitorManager,
|
|
15
|
+
DBManagerForQueryMonitorQueue,
|
|
16
|
+
)
|
|
17
|
+
from memos.mem_scheduler.schemas.general_schemas import (
|
|
18
|
+
DEFAULT_ACTIVATION_MEM_MONITOR_SIZE_LIMIT,
|
|
19
|
+
DEFAULT_WEIGHT_VECTOR_FOR_RANKING,
|
|
20
|
+
DEFAULT_WORKING_MEM_MONITOR_SIZE_LIMIT,
|
|
21
|
+
MONITOR_ACTIVATION_MEMORY_TYPE,
|
|
22
|
+
MONITOR_WORKING_MEMORY_TYPE,
|
|
23
|
+
)
|
|
24
|
+
from memos.mem_scheduler.schemas.monitor_schemas import (
|
|
25
|
+
MemoryMonitorItem,
|
|
26
|
+
MemoryMonitorManager,
|
|
27
|
+
QueryMonitorQueue,
|
|
28
|
+
)
|
|
29
|
+
from memos.mem_scheduler.utils.db_utils import get_utc_now
|
|
30
|
+
from memos.mem_scheduler.utils.misc_utils import extract_json_obj
|
|
31
|
+
from memos.memories.textual.tree import TreeTextMemory
|
|
32
|
+
from memos.types import MemCubeID, UserID
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
logger = get_logger(__name__)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class SchedulerGeneralMonitor(BaseSchedulerModule):
|
|
39
|
+
"""Monitors and manages scheduling operations with LLM integration."""
|
|
40
|
+
|
|
41
|
+
def __init__(
|
|
42
|
+
self, process_llm: BaseLLM, config: BaseSchedulerConfig, db_engine: Engine | None = None
|
|
43
|
+
):
|
|
44
|
+
super().__init__()
|
|
45
|
+
|
|
46
|
+
# hyper-parameters
|
|
47
|
+
self.config: BaseSchedulerConfig = config
|
|
48
|
+
self.act_mem_update_interval = self.config.get("act_mem_update_interval", 30)
|
|
49
|
+
self.query_trigger_interval = self.config.get("query_trigger_interval", 10)
|
|
50
|
+
|
|
51
|
+
# Partial Retention Strategy
|
|
52
|
+
self.partial_retention_number = 2
|
|
53
|
+
self.working_mem_monitor_capacity = self.config.get(
|
|
54
|
+
"working_mem_monitor_capacity", DEFAULT_WORKING_MEM_MONITOR_SIZE_LIMIT
|
|
55
|
+
)
|
|
56
|
+
self.activation_mem_monitor_capacity = self.config.get(
|
|
57
|
+
"activation_mem_monitor_capacity", DEFAULT_ACTIVATION_MEM_MONITOR_SIZE_LIMIT
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# ORM-based monitor managers
|
|
61
|
+
self.db_engine = db_engine
|
|
62
|
+
if self.db_engine is None:
|
|
63
|
+
logger.warning(
|
|
64
|
+
"No database engine provided; falling back to default temporary SQLite engine. "
|
|
65
|
+
"This is intended for testing only. Consider providing a configured engine for production use."
|
|
66
|
+
)
|
|
67
|
+
self.db_engine = BaseDBManager.create_default_sqlite_engine()
|
|
68
|
+
|
|
69
|
+
self.query_monitors: dict[UserID, dict[MemCubeID, DBManagerForQueryMonitorQueue]] = {}
|
|
70
|
+
self.working_memory_monitors: dict[
|
|
71
|
+
UserID, dict[MemCubeID, DBManagerForMemoryMonitorManager]
|
|
72
|
+
] = {}
|
|
73
|
+
self.activation_memory_monitors: dict[
|
|
74
|
+
UserID, dict[MemCubeID, DBManagerForMemoryMonitorManager]
|
|
75
|
+
] = {}
|
|
76
|
+
|
|
77
|
+
# Lifecycle monitor
|
|
78
|
+
self.last_activation_mem_update_time = get_utc_now()
|
|
79
|
+
self.last_query_consume_time = get_utc_now()
|
|
80
|
+
|
|
81
|
+
self._register_lock = Lock()
|
|
82
|
+
self._process_llm = process_llm
|
|
83
|
+
|
|
84
|
+
def extract_query_keywords(self, query: str) -> list:
|
|
85
|
+
"""Extracts core keywords from a user query based on specific semantic rules."""
|
|
86
|
+
prompt_name = "query_keywords_extraction"
|
|
87
|
+
prompt = self.build_prompt(
|
|
88
|
+
template_name=prompt_name,
|
|
89
|
+
query=query,
|
|
90
|
+
)
|
|
91
|
+
llm_response = self._process_llm.generate([{"role": "user", "content": prompt}])
|
|
92
|
+
try:
|
|
93
|
+
# Parse JSON output from LLM response
|
|
94
|
+
keywords = extract_json_obj(llm_response)
|
|
95
|
+
assert isinstance(keywords, list)
|
|
96
|
+
except Exception as e:
|
|
97
|
+
logger.error(
|
|
98
|
+
f"Failed to parse keywords from LLM response: {llm_response}. Error: {e!s}"
|
|
99
|
+
)
|
|
100
|
+
keywords = [query]
|
|
101
|
+
return keywords
|
|
102
|
+
|
|
103
|
+
def register_query_monitor_if_not_exists(
|
|
104
|
+
self,
|
|
105
|
+
user_id: UserID | str,
|
|
106
|
+
mem_cube_id: MemCubeID | str,
|
|
107
|
+
) -> None:
|
|
108
|
+
# First check (lock-free, fast path)
|
|
109
|
+
if user_id in self.query_monitors and mem_cube_id in self.query_monitors[user_id]:
|
|
110
|
+
return
|
|
111
|
+
|
|
112
|
+
# Second check (with lock, ensures uniqueness)
|
|
113
|
+
with self._register_lock:
|
|
114
|
+
if user_id not in self.query_monitors:
|
|
115
|
+
self.query_monitors[user_id] = {}
|
|
116
|
+
if mem_cube_id not in self.query_monitors[user_id]:
|
|
117
|
+
if self.db_engine:
|
|
118
|
+
# Create ORM manager with initial QueryMonitorQueue
|
|
119
|
+
initial_queue = QueryMonitorQueue(maxsize=self.config.context_window_size)
|
|
120
|
+
db_manager = DBManagerForQueryMonitorQueue(
|
|
121
|
+
engine=self.db_engine,
|
|
122
|
+
user_id=str(user_id),
|
|
123
|
+
mem_cube_id=str(mem_cube_id),
|
|
124
|
+
obj=initial_queue,
|
|
125
|
+
)
|
|
126
|
+
self.query_monitors[user_id][mem_cube_id] = db_manager
|
|
127
|
+
else:
|
|
128
|
+
# Fallback to in-memory (this shouldn't happen with proper config)
|
|
129
|
+
logger.warning("ORM persistence disabled, using in-memory fallback")
|
|
130
|
+
# For backward compatibility, we'll need to handle this case differently
|
|
131
|
+
raise RuntimeError("ORM persistence is required but not properly configured")
|
|
132
|
+
|
|
133
|
+
def register_memory_manager_if_not_exists(
|
|
134
|
+
self,
|
|
135
|
+
user_id: UserID | str,
|
|
136
|
+
mem_cube_id: MemCubeID | str,
|
|
137
|
+
memory_monitors: dict[UserID, dict[MemCubeID, DBManagerForMemoryMonitorManager]],
|
|
138
|
+
max_capacity: int,
|
|
139
|
+
) -> None:
|
|
140
|
+
"""
|
|
141
|
+
Register a new MemoryMonitorManager ORM manager for the given user and memory cube if it doesn't exist.
|
|
142
|
+
Thread-safe implementation using double-checked locking pattern.
|
|
143
|
+
|
|
144
|
+
Checks if a MemoryMonitorManager ORM manager already exists for the specified user_id and mem_cube_id.
|
|
145
|
+
If not, creates a new ORM manager with appropriate capacity settings and registers it.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
user_id: The ID of the user to associate with the memory manager
|
|
149
|
+
mem_cube_id: The ID of the memory cube to monitor
|
|
150
|
+
memory_monitors: Dictionary storing existing memory monitor ORM managers
|
|
151
|
+
max_capacity: Maximum capacity for the new memory monitor manager
|
|
152
|
+
"""
|
|
153
|
+
# First check (lock-free, fast path)
|
|
154
|
+
# Quickly verify existence without lock overhead
|
|
155
|
+
if user_id in memory_monitors and mem_cube_id in memory_monitors[user_id]:
|
|
156
|
+
logger.info(
|
|
157
|
+
f"MemoryMonitorManager ORM manager already exists for user_id={user_id}, "
|
|
158
|
+
f"mem_cube_id={mem_cube_id} in the provided memory_monitors dictionary"
|
|
159
|
+
)
|
|
160
|
+
return
|
|
161
|
+
|
|
162
|
+
# Second check (with lock, ensures uniqueness)
|
|
163
|
+
# Acquire lock before modification and verify again to prevent race conditions
|
|
164
|
+
with self._register_lock:
|
|
165
|
+
# Re-check after acquiring lock, as another thread might have created it
|
|
166
|
+
if user_id in memory_monitors and mem_cube_id in memory_monitors[user_id]:
|
|
167
|
+
logger.info(
|
|
168
|
+
f"MemoryMonitorManager ORM manager already exists for user_id={user_id}, "
|
|
169
|
+
f"mem_cube_id={mem_cube_id} in the provided memory_monitors dictionary"
|
|
170
|
+
)
|
|
171
|
+
return
|
|
172
|
+
|
|
173
|
+
if self.db_engine:
|
|
174
|
+
# Initialize MemoryMonitorManager with user ID, memory cube ID, and max capacity
|
|
175
|
+
monitor_manager = MemoryMonitorManager(
|
|
176
|
+
user_id=user_id, mem_cube_id=mem_cube_id, max_capacity=max_capacity
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
# Create ORM manager
|
|
180
|
+
db_manager = DBManagerForMemoryMonitorManager(
|
|
181
|
+
engine=self.db_engine,
|
|
182
|
+
user_id=str(user_id),
|
|
183
|
+
mem_cube_id=str(mem_cube_id),
|
|
184
|
+
obj=monitor_manager,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Safely register the new ORM manager in the nested dictionary structure
|
|
188
|
+
memory_monitors.setdefault(user_id, {})[mem_cube_id] = db_manager
|
|
189
|
+
logger.info(
|
|
190
|
+
f"Registered new MemoryMonitorManager ORM manager for user_id={user_id},"
|
|
191
|
+
f" mem_cube_id={mem_cube_id} with max_capacity={max_capacity}"
|
|
192
|
+
)
|
|
193
|
+
else:
|
|
194
|
+
raise RuntimeError("ORM persistence is required but not properly configured")
|
|
195
|
+
|
|
196
|
+
def update_working_memory_monitors(
|
|
197
|
+
self,
|
|
198
|
+
new_working_memory_monitors: list[MemoryMonitorItem],
|
|
199
|
+
user_id: str,
|
|
200
|
+
mem_cube_id: str,
|
|
201
|
+
mem_cube: GeneralMemCube,
|
|
202
|
+
):
|
|
203
|
+
text_mem_base = mem_cube.text_mem
|
|
204
|
+
|
|
205
|
+
if isinstance(text_mem_base, TreeTextMemory):
|
|
206
|
+
self.working_mem_monitor_capacity = min(
|
|
207
|
+
DEFAULT_WORKING_MEM_MONITOR_SIZE_LIMIT,
|
|
208
|
+
(
|
|
209
|
+
int(text_mem_base.memory_manager.memory_size["WorkingMemory"])
|
|
210
|
+
+ self.partial_retention_number
|
|
211
|
+
),
|
|
212
|
+
)
|
|
213
|
+
else:
|
|
214
|
+
# Fallback for NaiveTextMemory and others
|
|
215
|
+
self.working_mem_monitor_capacity = DEFAULT_WORKING_MEM_MONITOR_SIZE_LIMIT
|
|
216
|
+
|
|
217
|
+
# register monitors
|
|
218
|
+
self.register_memory_manager_if_not_exists(
|
|
219
|
+
user_id=user_id,
|
|
220
|
+
mem_cube_id=mem_cube_id,
|
|
221
|
+
memory_monitors=self.working_memory_monitors,
|
|
222
|
+
max_capacity=self.working_mem_monitor_capacity,
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
# Get the ORM manager and update memories with database sync
|
|
226
|
+
db_manager = self.working_memory_monitors[user_id][mem_cube_id]
|
|
227
|
+
db_manager.obj.update_memories(
|
|
228
|
+
new_memory_monitors=new_working_memory_monitors,
|
|
229
|
+
partial_retention_number=self.partial_retention_number,
|
|
230
|
+
)
|
|
231
|
+
# Sync with database
|
|
232
|
+
db_manager.sync_with_orm(size_limit=self.working_mem_monitor_capacity)
|
|
233
|
+
|
|
234
|
+
def update_activation_memory_monitors(
|
|
235
|
+
self, user_id: str, mem_cube_id: str, mem_cube: GeneralMemCube
|
|
236
|
+
):
|
|
237
|
+
self.register_memory_manager_if_not_exists(
|
|
238
|
+
user_id=user_id,
|
|
239
|
+
mem_cube_id=mem_cube_id,
|
|
240
|
+
memory_monitors=self.activation_memory_monitors,
|
|
241
|
+
max_capacity=self.activation_mem_monitor_capacity,
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
# === update activation memory monitors ===
|
|
245
|
+
# Sort by importance_score in descending order and take top k
|
|
246
|
+
working_db_manager = self.working_memory_monitors[user_id][mem_cube_id]
|
|
247
|
+
top_k_memories = sorted(
|
|
248
|
+
working_db_manager.obj.memories,
|
|
249
|
+
key=lambda m: m.get_importance_score(weight_vector=DEFAULT_WEIGHT_VECTOR_FOR_RANKING),
|
|
250
|
+
reverse=True,
|
|
251
|
+
)[: self.activation_mem_monitor_capacity]
|
|
252
|
+
|
|
253
|
+
# Update the activation memory monitors with these important memories
|
|
254
|
+
activation_db_manager = self.activation_memory_monitors[user_id][mem_cube_id]
|
|
255
|
+
activation_db_manager.obj.update_memories(
|
|
256
|
+
new_memory_monitors=top_k_memories,
|
|
257
|
+
partial_retention_number=self.partial_retention_number,
|
|
258
|
+
)
|
|
259
|
+
# Sync with database
|
|
260
|
+
activation_db_manager.sync_with_orm(size_limit=self.activation_mem_monitor_capacity)
|
|
261
|
+
|
|
262
|
+
def timed_trigger(self, last_time: datetime, interval_seconds: float) -> bool:
|
|
263
|
+
now = get_utc_now()
|
|
264
|
+
elapsed = (now - last_time).total_seconds()
|
|
265
|
+
if elapsed >= interval_seconds:
|
|
266
|
+
return True
|
|
267
|
+
logger.info(f"Time trigger not ready, {elapsed:.1f}s elapsed (needs {interval_seconds}s)")
|
|
268
|
+
return False
|
|
269
|
+
|
|
270
|
+
def get_monitor_memories(
|
|
271
|
+
self,
|
|
272
|
+
user_id: str,
|
|
273
|
+
mem_cube_id: str,
|
|
274
|
+
memory_type: str = MONITOR_WORKING_MEMORY_TYPE,
|
|
275
|
+
top_k: int = 10,
|
|
276
|
+
) -> list[str]:
|
|
277
|
+
"""Retrieves memory items managed by the scheduler, sorted by recording count.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
user_id: Unique identifier of the user
|
|
281
|
+
mem_cube_id: Unique identifier of the memory cube
|
|
282
|
+
memory_type: Type of memory to retrieve (MONITOR_WORKING_MEMORY_TYPE or
|
|
283
|
+
MONITOR_ACTIVATION_MEMORY_TYPE)
|
|
284
|
+
top_k: Maximum number of memory items to return (default: 10)
|
|
285
|
+
|
|
286
|
+
Returns:
|
|
287
|
+
List of memory texts, sorted by recording count in descending order.
|
|
288
|
+
Returns empty list if no MemoryMonitorManager exists for the given parameters.
|
|
289
|
+
"""
|
|
290
|
+
# Select the appropriate monitor dictionary based on memory_type
|
|
291
|
+
if memory_type == MONITOR_WORKING_MEMORY_TYPE:
|
|
292
|
+
monitor_dict = self.working_memory_monitors
|
|
293
|
+
elif memory_type == MONITOR_ACTIVATION_MEMORY_TYPE:
|
|
294
|
+
monitor_dict = self.activation_memory_monitors
|
|
295
|
+
else:
|
|
296
|
+
logger.warning(f"Invalid memory type: {memory_type}")
|
|
297
|
+
return []
|
|
298
|
+
|
|
299
|
+
if user_id not in monitor_dict or mem_cube_id not in monitor_dict[user_id]:
|
|
300
|
+
logger.warning(
|
|
301
|
+
f"MemoryMonitorManager not found for user {user_id}, "
|
|
302
|
+
f"mem_cube {mem_cube_id}, type {memory_type}"
|
|
303
|
+
)
|
|
304
|
+
return []
|
|
305
|
+
|
|
306
|
+
db_manager: DBManagerForMemoryMonitorManager = monitor_dict[user_id][mem_cube_id]
|
|
307
|
+
# Load latest data from database before accessing
|
|
308
|
+
db_manager.sync_with_orm()
|
|
309
|
+
|
|
310
|
+
# Sort memories by recording_count in descending order and return top_k items
|
|
311
|
+
sorted_memory_monitors = db_manager.obj.get_sorted_mem_monitors(reverse=True)
|
|
312
|
+
sorted_text_memories = [m.memory_text for m in sorted_memory_monitors[:top_k]]
|
|
313
|
+
return sorted_text_memories
|
|
314
|
+
|
|
315
|
+
def get_monitors_info(self, user_id: str, mem_cube_id: str) -> dict[str, Any]:
|
|
316
|
+
"""Retrieves monitoring information for a specific memory cube."""
|
|
317
|
+
if (
|
|
318
|
+
user_id not in self.working_memory_monitors
|
|
319
|
+
or mem_cube_id not in self.working_memory_monitors[user_id]
|
|
320
|
+
):
|
|
321
|
+
logger.warning(
|
|
322
|
+
f"MemoryMonitorManager not found for user {user_id}, mem_cube {mem_cube_id}"
|
|
323
|
+
)
|
|
324
|
+
return {}
|
|
325
|
+
|
|
326
|
+
info_dict = {}
|
|
327
|
+
for db_manager in [
|
|
328
|
+
self.working_memory_monitors[user_id][mem_cube_id],
|
|
329
|
+
self.activation_memory_monitors[user_id][mem_cube_id],
|
|
330
|
+
]:
|
|
331
|
+
# Sync with database to get latest data
|
|
332
|
+
db_manager.sync_with_orm()
|
|
333
|
+
manager = db_manager.obj
|
|
334
|
+
info_dict[str(type(manager))] = {
|
|
335
|
+
"user_id": user_id,
|
|
336
|
+
"mem_cube_id": mem_cube_id,
|
|
337
|
+
"memory_count": manager.memory_size,
|
|
338
|
+
"max_capacity": manager.max_capacity,
|
|
339
|
+
"top_memories": self.get_monitor_memories(user_id, mem_cube_id, top_k=1),
|
|
340
|
+
}
|
|
341
|
+
return info_dict
|
|
342
|
+
|
|
343
|
+
def detect_intent(
|
|
344
|
+
self,
|
|
345
|
+
q_list: list[str],
|
|
346
|
+
text_working_memory: list[str],
|
|
347
|
+
prompt_name="intent_recognizing",
|
|
348
|
+
) -> dict[str, Any]:
|
|
349
|
+
"""
|
|
350
|
+
Detect the intent of the user input.
|
|
351
|
+
"""
|
|
352
|
+
prompt = self.build_prompt(
|
|
353
|
+
template_name=prompt_name,
|
|
354
|
+
q_list=q_list,
|
|
355
|
+
working_memory_list=text_working_memory,
|
|
356
|
+
)
|
|
357
|
+
response = self._process_llm.generate([{"role": "user", "content": prompt}])
|
|
358
|
+
try:
|
|
359
|
+
response = extract_json_obj(response)
|
|
360
|
+
assert ("trigger_retrieval" in response) and ("missing_evidences" in response)
|
|
361
|
+
except Exception:
|
|
362
|
+
logger.error(f"Fail to extract json dict from response: {response}")
|
|
363
|
+
response = {"trigger_retrieval": False, "missing_evidences": q_list}
|
|
364
|
+
return response
|
|
365
|
+
|
|
366
|
+
def close(self):
|
|
367
|
+
"""Close all database connections and clean up resources"""
|
|
368
|
+
logger.info("Closing database connections for all monitors")
|
|
369
|
+
|
|
370
|
+
# Close all query monitor database managers
|
|
371
|
+
for user_monitors in self.query_monitors.values():
|
|
372
|
+
for db_manager in user_monitors.values():
|
|
373
|
+
try:
|
|
374
|
+
db_manager.close()
|
|
375
|
+
except Exception as e:
|
|
376
|
+
logger.error(f"Error closing query monitor DB manager: {e}")
|
|
377
|
+
|
|
378
|
+
# Close all working memory monitor database managers
|
|
379
|
+
for user_monitors in self.working_memory_monitors.values():
|
|
380
|
+
for db_manager in user_monitors.values():
|
|
381
|
+
try:
|
|
382
|
+
db_manager.close()
|
|
383
|
+
except Exception as e:
|
|
384
|
+
logger.error(f"Error closing working memory monitor DB manager: {e}")
|
|
385
|
+
|
|
386
|
+
# Close all activation memory monitor database managers
|
|
387
|
+
for user_monitors in self.activation_memory_monitors.values():
|
|
388
|
+
for db_manager in user_monitors.values():
|
|
389
|
+
try:
|
|
390
|
+
db_manager.close()
|
|
391
|
+
except Exception as e:
|
|
392
|
+
logger.error(f"Error closing activation memory monitor DB manager: {e}")
|
|
393
|
+
|
|
394
|
+
logger.info("All database connections closed")
|
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from memos.log import get_logger
|
|
4
|
+
from memos.mem_scheduler.task_schedule_modules.local_queue import SchedulerLocalQueue
|
|
5
|
+
from memos.mem_scheduler.task_schedule_modules.redis_queue import SchedulerRedisQueue
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
logger = get_logger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class TaskScheduleMonitor:
|
|
12
|
+
"""
|
|
13
|
+
Monitor for task scheduling queue status.
|
|
14
|
+
|
|
15
|
+
Initialize with the underlying `memos_message_queue` implementation
|
|
16
|
+
(either SchedulerRedisQueue or SchedulerLocalQueue) and optionally a
|
|
17
|
+
dispatcher for local running task counts.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
memos_message_queue: SchedulerRedisQueue | SchedulerLocalQueue,
|
|
23
|
+
dispatcher: object | None = None,
|
|
24
|
+
get_status_parallel: bool = False,
|
|
25
|
+
) -> None:
|
|
26
|
+
self.queue = memos_message_queue
|
|
27
|
+
self.dispatcher = dispatcher
|
|
28
|
+
self.get_status_parallel = get_status_parallel
|
|
29
|
+
|
|
30
|
+
@staticmethod
|
|
31
|
+
def init_task_status() -> dict:
|
|
32
|
+
return {"running": 0, "remaining": 0, "pending": 0}
|
|
33
|
+
|
|
34
|
+
def get_tasks_status(self) -> dict:
|
|
35
|
+
if isinstance(self.queue, SchedulerRedisQueue):
|
|
36
|
+
return self._get_redis_tasks_status()
|
|
37
|
+
elif isinstance(self.queue, SchedulerLocalQueue):
|
|
38
|
+
return self._get_local_tasks_status()
|
|
39
|
+
else:
|
|
40
|
+
logger.error(
|
|
41
|
+
f"Unsupported queue type for TaskScheduleMonitor: {type(self.queue).__name__}"
|
|
42
|
+
)
|
|
43
|
+
raise NotImplementedError()
|
|
44
|
+
|
|
45
|
+
def print_tasks_status(self, tasks_status: dict | None = None) -> None:
|
|
46
|
+
"""
|
|
47
|
+
Nicely print task queue status grouped by "user_id:mem_cube_id".
|
|
48
|
+
|
|
49
|
+
For Redis queues, stream keys follow the pattern
|
|
50
|
+
"{prefix}:{user_id}:{mem_cube_id}:{task_label}" — group by user/mem
|
|
51
|
+
and show per-task_label counts. For local queues, only totals are
|
|
52
|
+
available, so print aggregate metrics.
|
|
53
|
+
"""
|
|
54
|
+
try:
|
|
55
|
+
status = tasks_status if isinstance(tasks_status, dict) else self.get_tasks_status()
|
|
56
|
+
except Exception as e:
|
|
57
|
+
logger.warning(f"Failed to get tasks status: {e}")
|
|
58
|
+
return
|
|
59
|
+
|
|
60
|
+
if not isinstance(status, dict) or not status:
|
|
61
|
+
print("[Tasks] No status available.")
|
|
62
|
+
return
|
|
63
|
+
|
|
64
|
+
total_running = int(status.get("running", 0) or 0)
|
|
65
|
+
total_remaining = int(status.get("remaining", 0) or 0)
|
|
66
|
+
|
|
67
|
+
header = f"Task Queue Status | running={total_running}, remaining={total_remaining}"
|
|
68
|
+
print(header)
|
|
69
|
+
|
|
70
|
+
if isinstance(self.queue, SchedulerRedisQueue):
|
|
71
|
+
# Build grouping: {"user_id:mem_cube_id": {task_label: {counts}}}
|
|
72
|
+
try:
|
|
73
|
+
from collections import defaultdict
|
|
74
|
+
except Exception:
|
|
75
|
+
defaultdict = None
|
|
76
|
+
|
|
77
|
+
group_stats = (
|
|
78
|
+
defaultdict(lambda: defaultdict(lambda: {"running": 0, "remaining": 0}))
|
|
79
|
+
if defaultdict is not None
|
|
80
|
+
else {}
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# Keys that look like stream entries (exclude the totals keys)
|
|
84
|
+
stream_keys = [
|
|
85
|
+
k for k in status if isinstance(k, str) and k not in ("running", "remaining")
|
|
86
|
+
]
|
|
87
|
+
|
|
88
|
+
for stream_key in stream_keys:
|
|
89
|
+
stream_stat = status.get(stream_key, {})
|
|
90
|
+
if not isinstance(stream_stat, dict):
|
|
91
|
+
continue
|
|
92
|
+
parts = stream_key.split(":")
|
|
93
|
+
# Safely parse from the right to avoid prefix colons
|
|
94
|
+
if len(parts) < 3:
|
|
95
|
+
# Not enough parts to form user:mem:label — skip
|
|
96
|
+
continue
|
|
97
|
+
task_label = parts[-1]
|
|
98
|
+
mem_cube_id = parts[-2]
|
|
99
|
+
user_id = parts[-3]
|
|
100
|
+
group_key = f"{user_id}:{mem_cube_id}"
|
|
101
|
+
|
|
102
|
+
try:
|
|
103
|
+
group_stats[group_key][task_label]["running"] += int(
|
|
104
|
+
stream_stat.get("running", 0) or 0
|
|
105
|
+
)
|
|
106
|
+
group_stats[group_key][task_label]["remaining"] += int(
|
|
107
|
+
stream_stat.get("remaining", 0) or 0
|
|
108
|
+
)
|
|
109
|
+
except Exception:
|
|
110
|
+
# Keep printing robust in face of bad data
|
|
111
|
+
pass
|
|
112
|
+
|
|
113
|
+
if not group_stats:
|
|
114
|
+
print("[Tasks] No per-stream details found.")
|
|
115
|
+
return
|
|
116
|
+
|
|
117
|
+
# Pretty print per group
|
|
118
|
+
for group_key in sorted(group_stats.keys()):
|
|
119
|
+
print("")
|
|
120
|
+
print(f"[{group_key}]")
|
|
121
|
+
|
|
122
|
+
labels = sorted(group_stats[group_key].keys())
|
|
123
|
+
label_width = max(10, max((len(label) for label in labels), default=10))
|
|
124
|
+
# Table header
|
|
125
|
+
header_line = f"{'Task Label'.ljust(label_width)} {'Running':>7} {'Remaining':>9}"
|
|
126
|
+
sep_line = f"{'-' * label_width} {'-' * 7} {'-' * 9}"
|
|
127
|
+
print(header_line)
|
|
128
|
+
print(sep_line)
|
|
129
|
+
|
|
130
|
+
for label in labels:
|
|
131
|
+
counts = group_stats[group_key][label]
|
|
132
|
+
line = (
|
|
133
|
+
f"{label.ljust(label_width)} "
|
|
134
|
+
f"{int(counts.get('running', 0)):>7} "
|
|
135
|
+
f"{int(counts.get('remaining', 0)):>9} "
|
|
136
|
+
)
|
|
137
|
+
print(line)
|
|
138
|
+
|
|
139
|
+
elif isinstance(self.queue, SchedulerLocalQueue):
|
|
140
|
+
# Local queue: only aggregate totals available; print them clearly
|
|
141
|
+
print("")
|
|
142
|
+
print("[Local Queue Totals]")
|
|
143
|
+
label_width = 12
|
|
144
|
+
header_line = f"{'Metric'.ljust(label_width)} {'Value':>7}"
|
|
145
|
+
sep_line = f"{'-' * label_width} {'-' * 7}"
|
|
146
|
+
print(header_line)
|
|
147
|
+
print(sep_line)
|
|
148
|
+
print(f"{'Running'.ljust(label_width)} {total_running:>7}")
|
|
149
|
+
print(f"{'Remaining'.ljust(label_width)} {total_remaining:>7}")
|
|
150
|
+
|
|
151
|
+
def _get_local_tasks_status(self) -> dict:
|
|
152
|
+
task_status = self.init_task_status()
|
|
153
|
+
|
|
154
|
+
try:
|
|
155
|
+
# remaining is the sum of per-stream qsize
|
|
156
|
+
qsize_map = self.queue.qsize()
|
|
157
|
+
remaining_total = sum(v for k, v in qsize_map.items() if isinstance(v, int))
|
|
158
|
+
task_status["remaining"] = remaining_total
|
|
159
|
+
task_status["pending"] = remaining_total
|
|
160
|
+
# running from dispatcher if available
|
|
161
|
+
if self.dispatcher and hasattr(self.dispatcher, "get_running_task_count"):
|
|
162
|
+
task_status["running"] = int(self.dispatcher.get_running_task_count())
|
|
163
|
+
except Exception as e:
|
|
164
|
+
logger.warning(f"Failed to collect local queue status: {e}")
|
|
165
|
+
return task_status
|
|
166
|
+
|
|
167
|
+
def _get_redis_tasks_status(self) -> dict:
|
|
168
|
+
task_status = self.init_task_status()
|
|
169
|
+
|
|
170
|
+
stream_keys = self.queue.get_stream_keys(stream_key_prefix=self.queue.stream_key_prefix)
|
|
171
|
+
|
|
172
|
+
# Parallel path: use asyncio.to_thread for blocking redis calls
|
|
173
|
+
if self.get_status_parallel:
|
|
174
|
+
try:
|
|
175
|
+
import asyncio
|
|
176
|
+
|
|
177
|
+
async def _collect_async() -> dict:
|
|
178
|
+
# Collect xlen and group info in parallel for each stream
|
|
179
|
+
xlen_tasks = [
|
|
180
|
+
asyncio.to_thread(self.queue.redis.xlen, stream_key)
|
|
181
|
+
for stream_key in stream_keys
|
|
182
|
+
]
|
|
183
|
+
groups_tasks = [
|
|
184
|
+
asyncio.to_thread(self.queue.redis.xinfo_groups, stream_key)
|
|
185
|
+
for stream_key in stream_keys
|
|
186
|
+
]
|
|
187
|
+
xlen_results = await asyncio.gather(*xlen_tasks, return_exceptions=True)
|
|
188
|
+
groups_results = await asyncio.gather(*groups_tasks, return_exceptions=True)
|
|
189
|
+
|
|
190
|
+
local = self.init_task_status()
|
|
191
|
+
for idx, stream_key in enumerate(stream_keys):
|
|
192
|
+
local[stream_key] = self.init_task_status()
|
|
193
|
+
groups_info = groups_results[idx] if idx < len(groups_results) else None
|
|
194
|
+
xlen_val = xlen_results[idx] if idx < len(xlen_results) else 0
|
|
195
|
+
if isinstance(xlen_val, Exception):
|
|
196
|
+
xlen_val = 0
|
|
197
|
+
if isinstance(groups_info, Exception):
|
|
198
|
+
continue
|
|
199
|
+
pending = 0
|
|
200
|
+
if groups_info:
|
|
201
|
+
for group in groups_info:
|
|
202
|
+
if group.get("name") == self.queue.consumer_group:
|
|
203
|
+
pending = int(group.get("pending", 0))
|
|
204
|
+
break
|
|
205
|
+
total_messages = max(0, int(xlen_val or 0))
|
|
206
|
+
remaining = max(0, total_messages - pending)
|
|
207
|
+
# running = in-progress (delivered, not yet acked)
|
|
208
|
+
local[stream_key]["running"] += pending
|
|
209
|
+
# pending = not yet delivered (remaining)
|
|
210
|
+
local[stream_key]["pending"] += remaining
|
|
211
|
+
local[stream_key]["remaining"] += remaining
|
|
212
|
+
local["running"] += pending
|
|
213
|
+
local["pending"] += remaining
|
|
214
|
+
local["remaining"] += remaining
|
|
215
|
+
return local
|
|
216
|
+
|
|
217
|
+
try:
|
|
218
|
+
asyncio.get_running_loop()
|
|
219
|
+
loop_running = True
|
|
220
|
+
except RuntimeError:
|
|
221
|
+
loop_running = False
|
|
222
|
+
|
|
223
|
+
if not loop_running:
|
|
224
|
+
return asyncio.run(_collect_async())
|
|
225
|
+
except Exception as e:
|
|
226
|
+
logger.debug(f"Parallel status collection failed, fallback to sequential: {e}")
|
|
227
|
+
|
|
228
|
+
# Sequential fallback
|
|
229
|
+
for stream_key in stream_keys:
|
|
230
|
+
task_status[stream_key] = self.init_task_status()
|
|
231
|
+
try:
|
|
232
|
+
groups_info = self.queue.redis.xinfo_groups(stream_key)
|
|
233
|
+
except Exception:
|
|
234
|
+
groups_info = None
|
|
235
|
+
try:
|
|
236
|
+
xlen_val = int(self.queue.redis.xlen(stream_key))
|
|
237
|
+
except Exception:
|
|
238
|
+
xlen_val = 0
|
|
239
|
+
if groups_info:
|
|
240
|
+
for group in groups_info:
|
|
241
|
+
if group.get("name") == self.queue.consumer_group:
|
|
242
|
+
pending = int(group.get("pending", 0))
|
|
243
|
+
remaining = max(0, xlen_val - pending)
|
|
244
|
+
# running = in-progress (delivered, not yet acked)
|
|
245
|
+
task_status[stream_key]["running"] += pending
|
|
246
|
+
# pending = not yet delivered (remaining)
|
|
247
|
+
task_status[stream_key]["pending"] += remaining
|
|
248
|
+
task_status[stream_key]["remaining"] += remaining
|
|
249
|
+
task_status["running"] += pending
|
|
250
|
+
task_status["pending"] += remaining
|
|
251
|
+
task_status["remaining"] += remaining
|
|
252
|
+
break
|
|
253
|
+
|
|
254
|
+
return task_status
|