MemoryOS 2.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- memoryos-2.0.3.dist-info/METADATA +418 -0
- memoryos-2.0.3.dist-info/RECORD +315 -0
- memoryos-2.0.3.dist-info/WHEEL +4 -0
- memoryos-2.0.3.dist-info/entry_points.txt +3 -0
- memoryos-2.0.3.dist-info/licenses/LICENSE +201 -0
- memos/__init__.py +20 -0
- memos/api/client.py +571 -0
- memos/api/config.py +1018 -0
- memos/api/context/dependencies.py +50 -0
- memos/api/exceptions.py +53 -0
- memos/api/handlers/__init__.py +62 -0
- memos/api/handlers/add_handler.py +158 -0
- memos/api/handlers/base_handler.py +194 -0
- memos/api/handlers/chat_handler.py +1401 -0
- memos/api/handlers/component_init.py +388 -0
- memos/api/handlers/config_builders.py +190 -0
- memos/api/handlers/feedback_handler.py +93 -0
- memos/api/handlers/formatters_handler.py +237 -0
- memos/api/handlers/memory_handler.py +316 -0
- memos/api/handlers/scheduler_handler.py +497 -0
- memos/api/handlers/search_handler.py +222 -0
- memos/api/handlers/suggestion_handler.py +117 -0
- memos/api/mcp_serve.py +614 -0
- memos/api/middleware/request_context.py +101 -0
- memos/api/product_api.py +38 -0
- memos/api/product_models.py +1206 -0
- memos/api/routers/__init__.py +1 -0
- memos/api/routers/product_router.py +477 -0
- memos/api/routers/server_router.py +394 -0
- memos/api/server_api.py +44 -0
- memos/api/start_api.py +433 -0
- memos/chunkers/__init__.py +4 -0
- memos/chunkers/base.py +24 -0
- memos/chunkers/charactertext_chunker.py +41 -0
- memos/chunkers/factory.py +24 -0
- memos/chunkers/markdown_chunker.py +62 -0
- memos/chunkers/sentence_chunker.py +54 -0
- memos/chunkers/simple_chunker.py +50 -0
- memos/cli.py +113 -0
- memos/configs/__init__.py +0 -0
- memos/configs/base.py +82 -0
- memos/configs/chunker.py +59 -0
- memos/configs/embedder.py +88 -0
- memos/configs/graph_db.py +236 -0
- memos/configs/internet_retriever.py +100 -0
- memos/configs/llm.py +151 -0
- memos/configs/mem_agent.py +54 -0
- memos/configs/mem_chat.py +81 -0
- memos/configs/mem_cube.py +105 -0
- memos/configs/mem_os.py +83 -0
- memos/configs/mem_reader.py +91 -0
- memos/configs/mem_scheduler.py +385 -0
- memos/configs/mem_user.py +70 -0
- memos/configs/memory.py +324 -0
- memos/configs/parser.py +38 -0
- memos/configs/reranker.py +18 -0
- memos/configs/utils.py +8 -0
- memos/configs/vec_db.py +80 -0
- memos/context/context.py +355 -0
- memos/dependency.py +52 -0
- memos/deprecation.py +262 -0
- memos/embedders/__init__.py +0 -0
- memos/embedders/ark.py +95 -0
- memos/embedders/base.py +106 -0
- memos/embedders/factory.py +29 -0
- memos/embedders/ollama.py +77 -0
- memos/embedders/sentence_transformer.py +49 -0
- memos/embedders/universal_api.py +51 -0
- memos/exceptions.py +30 -0
- memos/graph_dbs/__init__.py +0 -0
- memos/graph_dbs/base.py +274 -0
- memos/graph_dbs/factory.py +27 -0
- memos/graph_dbs/item.py +46 -0
- memos/graph_dbs/nebular.py +1794 -0
- memos/graph_dbs/neo4j.py +1942 -0
- memos/graph_dbs/neo4j_community.py +1058 -0
- memos/graph_dbs/polardb.py +5446 -0
- memos/hello_world.py +97 -0
- memos/llms/__init__.py +0 -0
- memos/llms/base.py +25 -0
- memos/llms/deepseek.py +13 -0
- memos/llms/factory.py +38 -0
- memos/llms/hf.py +443 -0
- memos/llms/hf_singleton.py +114 -0
- memos/llms/ollama.py +135 -0
- memos/llms/openai.py +222 -0
- memos/llms/openai_new.py +198 -0
- memos/llms/qwen.py +13 -0
- memos/llms/utils.py +14 -0
- memos/llms/vllm.py +218 -0
- memos/log.py +237 -0
- memos/mem_agent/base.py +19 -0
- memos/mem_agent/deepsearch_agent.py +391 -0
- memos/mem_agent/factory.py +36 -0
- memos/mem_chat/__init__.py +0 -0
- memos/mem_chat/base.py +30 -0
- memos/mem_chat/factory.py +21 -0
- memos/mem_chat/simple.py +200 -0
- memos/mem_cube/__init__.py +0 -0
- memos/mem_cube/base.py +30 -0
- memos/mem_cube/general.py +240 -0
- memos/mem_cube/navie.py +172 -0
- memos/mem_cube/utils.py +169 -0
- memos/mem_feedback/base.py +15 -0
- memos/mem_feedback/feedback.py +1192 -0
- memos/mem_feedback/simple_feedback.py +40 -0
- memos/mem_feedback/utils.py +230 -0
- memos/mem_os/client.py +5 -0
- memos/mem_os/core.py +1203 -0
- memos/mem_os/main.py +582 -0
- memos/mem_os/product.py +1608 -0
- memos/mem_os/product_server.py +455 -0
- memos/mem_os/utils/default_config.py +359 -0
- memos/mem_os/utils/format_utils.py +1403 -0
- memos/mem_os/utils/reference_utils.py +162 -0
- memos/mem_reader/__init__.py +0 -0
- memos/mem_reader/base.py +47 -0
- memos/mem_reader/factory.py +53 -0
- memos/mem_reader/memory.py +298 -0
- memos/mem_reader/multi_modal_struct.py +965 -0
- memos/mem_reader/read_multi_modal/__init__.py +43 -0
- memos/mem_reader/read_multi_modal/assistant_parser.py +311 -0
- memos/mem_reader/read_multi_modal/base.py +273 -0
- memos/mem_reader/read_multi_modal/file_content_parser.py +826 -0
- memos/mem_reader/read_multi_modal/image_parser.py +359 -0
- memos/mem_reader/read_multi_modal/multi_modal_parser.py +252 -0
- memos/mem_reader/read_multi_modal/string_parser.py +139 -0
- memos/mem_reader/read_multi_modal/system_parser.py +327 -0
- memos/mem_reader/read_multi_modal/text_content_parser.py +131 -0
- memos/mem_reader/read_multi_modal/tool_parser.py +210 -0
- memos/mem_reader/read_multi_modal/user_parser.py +218 -0
- memos/mem_reader/read_multi_modal/utils.py +358 -0
- memos/mem_reader/simple_struct.py +912 -0
- memos/mem_reader/strategy_struct.py +163 -0
- memos/mem_reader/utils.py +157 -0
- memos/mem_scheduler/__init__.py +0 -0
- memos/mem_scheduler/analyzer/__init__.py +0 -0
- memos/mem_scheduler/analyzer/api_analyzer.py +714 -0
- memos/mem_scheduler/analyzer/eval_analyzer.py +219 -0
- memos/mem_scheduler/analyzer/mos_for_test_scheduler.py +571 -0
- memos/mem_scheduler/analyzer/scheduler_for_eval.py +280 -0
- memos/mem_scheduler/base_scheduler.py +1319 -0
- memos/mem_scheduler/general_modules/__init__.py +0 -0
- memos/mem_scheduler/general_modules/api_misc.py +137 -0
- memos/mem_scheduler/general_modules/base.py +80 -0
- memos/mem_scheduler/general_modules/init_components_for_scheduler.py +425 -0
- memos/mem_scheduler/general_modules/misc.py +313 -0
- memos/mem_scheduler/general_modules/scheduler_logger.py +389 -0
- memos/mem_scheduler/general_modules/task_threads.py +315 -0
- memos/mem_scheduler/general_scheduler.py +1495 -0
- memos/mem_scheduler/memory_manage_modules/__init__.py +5 -0
- memos/mem_scheduler/memory_manage_modules/memory_filter.py +306 -0
- memos/mem_scheduler/memory_manage_modules/retriever.py +547 -0
- memos/mem_scheduler/monitors/__init__.py +0 -0
- memos/mem_scheduler/monitors/dispatcher_monitor.py +366 -0
- memos/mem_scheduler/monitors/general_monitor.py +394 -0
- memos/mem_scheduler/monitors/task_schedule_monitor.py +254 -0
- memos/mem_scheduler/optimized_scheduler.py +410 -0
- memos/mem_scheduler/orm_modules/__init__.py +0 -0
- memos/mem_scheduler/orm_modules/api_redis_model.py +518 -0
- memos/mem_scheduler/orm_modules/base_model.py +729 -0
- memos/mem_scheduler/orm_modules/monitor_models.py +261 -0
- memos/mem_scheduler/orm_modules/redis_model.py +699 -0
- memos/mem_scheduler/scheduler_factory.py +23 -0
- memos/mem_scheduler/schemas/__init__.py +0 -0
- memos/mem_scheduler/schemas/analyzer_schemas.py +52 -0
- memos/mem_scheduler/schemas/api_schemas.py +233 -0
- memos/mem_scheduler/schemas/general_schemas.py +55 -0
- memos/mem_scheduler/schemas/message_schemas.py +173 -0
- memos/mem_scheduler/schemas/monitor_schemas.py +406 -0
- memos/mem_scheduler/schemas/task_schemas.py +132 -0
- memos/mem_scheduler/task_schedule_modules/__init__.py +0 -0
- memos/mem_scheduler/task_schedule_modules/dispatcher.py +740 -0
- memos/mem_scheduler/task_schedule_modules/local_queue.py +247 -0
- memos/mem_scheduler/task_schedule_modules/orchestrator.py +74 -0
- memos/mem_scheduler/task_schedule_modules/redis_queue.py +1385 -0
- memos/mem_scheduler/task_schedule_modules/task_queue.py +162 -0
- memos/mem_scheduler/utils/__init__.py +0 -0
- memos/mem_scheduler/utils/api_utils.py +77 -0
- memos/mem_scheduler/utils/config_utils.py +100 -0
- memos/mem_scheduler/utils/db_utils.py +50 -0
- memos/mem_scheduler/utils/filter_utils.py +176 -0
- memos/mem_scheduler/utils/metrics.py +125 -0
- memos/mem_scheduler/utils/misc_utils.py +290 -0
- memos/mem_scheduler/utils/monitor_event_utils.py +67 -0
- memos/mem_scheduler/utils/status_tracker.py +229 -0
- memos/mem_scheduler/webservice_modules/__init__.py +0 -0
- memos/mem_scheduler/webservice_modules/rabbitmq_service.py +485 -0
- memos/mem_scheduler/webservice_modules/redis_service.py +380 -0
- memos/mem_user/factory.py +94 -0
- memos/mem_user/mysql_persistent_user_manager.py +271 -0
- memos/mem_user/mysql_user_manager.py +502 -0
- memos/mem_user/persistent_factory.py +98 -0
- memos/mem_user/persistent_user_manager.py +260 -0
- memos/mem_user/redis_persistent_user_manager.py +225 -0
- memos/mem_user/user_manager.py +488 -0
- memos/memories/__init__.py +0 -0
- memos/memories/activation/__init__.py +0 -0
- memos/memories/activation/base.py +42 -0
- memos/memories/activation/item.py +56 -0
- memos/memories/activation/kv.py +292 -0
- memos/memories/activation/vllmkv.py +219 -0
- memos/memories/base.py +19 -0
- memos/memories/factory.py +42 -0
- memos/memories/parametric/__init__.py +0 -0
- memos/memories/parametric/base.py +19 -0
- memos/memories/parametric/item.py +11 -0
- memos/memories/parametric/lora.py +41 -0
- memos/memories/textual/__init__.py +0 -0
- memos/memories/textual/base.py +92 -0
- memos/memories/textual/general.py +236 -0
- memos/memories/textual/item.py +304 -0
- memos/memories/textual/naive.py +187 -0
- memos/memories/textual/prefer_text_memory/__init__.py +0 -0
- memos/memories/textual/prefer_text_memory/adder.py +504 -0
- memos/memories/textual/prefer_text_memory/config.py +106 -0
- memos/memories/textual/prefer_text_memory/extractor.py +221 -0
- memos/memories/textual/prefer_text_memory/factory.py +85 -0
- memos/memories/textual/prefer_text_memory/retrievers.py +177 -0
- memos/memories/textual/prefer_text_memory/spliter.py +132 -0
- memos/memories/textual/prefer_text_memory/utils.py +93 -0
- memos/memories/textual/preference.py +344 -0
- memos/memories/textual/simple_preference.py +161 -0
- memos/memories/textual/simple_tree.py +69 -0
- memos/memories/textual/tree.py +459 -0
- memos/memories/textual/tree_text_memory/__init__.py +0 -0
- memos/memories/textual/tree_text_memory/organize/__init__.py +0 -0
- memos/memories/textual/tree_text_memory/organize/handler.py +184 -0
- memos/memories/textual/tree_text_memory/organize/manager.py +518 -0
- memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +238 -0
- memos/memories/textual/tree_text_memory/organize/reorganizer.py +622 -0
- memos/memories/textual/tree_text_memory/retrieve/__init__.py +0 -0
- memos/memories/textual/tree_text_memory/retrieve/advanced_searcher.py +364 -0
- memos/memories/textual/tree_text_memory/retrieve/bm25_util.py +186 -0
- memos/memories/textual/tree_text_memory/retrieve/bochasearch.py +419 -0
- memos/memories/textual/tree_text_memory/retrieve/internet_retriever.py +270 -0
- memos/memories/textual/tree_text_memory/retrieve/internet_retriever_factory.py +102 -0
- memos/memories/textual/tree_text_memory/retrieve/reasoner.py +61 -0
- memos/memories/textual/tree_text_memory/retrieve/recall.py +497 -0
- memos/memories/textual/tree_text_memory/retrieve/reranker.py +111 -0
- memos/memories/textual/tree_text_memory/retrieve/retrieval_mid_structs.py +16 -0
- memos/memories/textual/tree_text_memory/retrieve/retrieve_utils.py +472 -0
- memos/memories/textual/tree_text_memory/retrieve/searcher.py +848 -0
- memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py +135 -0
- memos/memories/textual/tree_text_memory/retrieve/utils.py +54 -0
- memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py +387 -0
- memos/memos_tools/dinding_report_bot.py +453 -0
- memos/memos_tools/lockfree_dict.py +120 -0
- memos/memos_tools/notification_service.py +44 -0
- memos/memos_tools/notification_utils.py +142 -0
- memos/memos_tools/singleton.py +174 -0
- memos/memos_tools/thread_safe_dict.py +310 -0
- memos/memos_tools/thread_safe_dict_segment.py +382 -0
- memos/multi_mem_cube/__init__.py +0 -0
- memos/multi_mem_cube/composite_cube.py +86 -0
- memos/multi_mem_cube/single_cube.py +874 -0
- memos/multi_mem_cube/views.py +54 -0
- memos/parsers/__init__.py +0 -0
- memos/parsers/base.py +15 -0
- memos/parsers/factory.py +21 -0
- memos/parsers/markitdown.py +28 -0
- memos/reranker/__init__.py +4 -0
- memos/reranker/base.py +25 -0
- memos/reranker/concat.py +103 -0
- memos/reranker/cosine_local.py +102 -0
- memos/reranker/factory.py +72 -0
- memos/reranker/http_bge.py +324 -0
- memos/reranker/http_bge_strategy.py +327 -0
- memos/reranker/noop.py +19 -0
- memos/reranker/strategies/__init__.py +4 -0
- memos/reranker/strategies/base.py +61 -0
- memos/reranker/strategies/concat_background.py +94 -0
- memos/reranker/strategies/concat_docsource.py +110 -0
- memos/reranker/strategies/dialogue_common.py +109 -0
- memos/reranker/strategies/factory.py +31 -0
- memos/reranker/strategies/single_turn.py +107 -0
- memos/reranker/strategies/singleturn_outmem.py +98 -0
- memos/settings.py +10 -0
- memos/templates/__init__.py +0 -0
- memos/templates/advanced_search_prompts.py +211 -0
- memos/templates/cloud_service_prompt.py +107 -0
- memos/templates/instruction_completion.py +66 -0
- memos/templates/mem_agent_prompts.py +85 -0
- memos/templates/mem_feedback_prompts.py +822 -0
- memos/templates/mem_reader_prompts.py +1096 -0
- memos/templates/mem_reader_strategy_prompts.py +238 -0
- memos/templates/mem_scheduler_prompts.py +626 -0
- memos/templates/mem_search_prompts.py +93 -0
- memos/templates/mos_prompts.py +403 -0
- memos/templates/prefer_complete_prompt.py +735 -0
- memos/templates/tool_mem_prompts.py +139 -0
- memos/templates/tree_reorganize_prompts.py +230 -0
- memos/types/__init__.py +34 -0
- memos/types/general_types.py +151 -0
- memos/types/openai_chat_completion_types/__init__.py +15 -0
- memos/types/openai_chat_completion_types/chat_completion_assistant_message_param.py +56 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_image_param.py +27 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_input_audio_param.py +23 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_param.py +43 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_refusal_param.py +16 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_text_param.py +16 -0
- memos/types/openai_chat_completion_types/chat_completion_message_custom_tool_call_param.py +27 -0
- memos/types/openai_chat_completion_types/chat_completion_message_function_tool_call_param.py +32 -0
- memos/types/openai_chat_completion_types/chat_completion_message_param.py +18 -0
- memos/types/openai_chat_completion_types/chat_completion_message_tool_call_union_param.py +15 -0
- memos/types/openai_chat_completion_types/chat_completion_system_message_param.py +36 -0
- memos/types/openai_chat_completion_types/chat_completion_tool_message_param.py +30 -0
- memos/types/openai_chat_completion_types/chat_completion_user_message_param.py +34 -0
- memos/utils.py +123 -0
- memos/vec_dbs/__init__.py +0 -0
- memos/vec_dbs/base.py +117 -0
- memos/vec_dbs/factory.py +23 -0
- memos/vec_dbs/item.py +50 -0
- memos/vec_dbs/milvus.py +654 -0
- memos/vec_dbs/qdrant.py +355 -0
memos/mem_os/main.py
ADDED
|
@@ -0,0 +1,582 @@
|
|
|
1
|
+
import concurrent.futures
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from memos.configs.mem_os import MOSConfig
|
|
8
|
+
from memos.context.context import ContextThreadPoolExecutor
|
|
9
|
+
from memos.llms.factory import LLMFactory
|
|
10
|
+
from memos.log import get_logger
|
|
11
|
+
from memos.mem_os.core import MOSCore
|
|
12
|
+
from memos.mem_os.utils.default_config import get_default
|
|
13
|
+
from memos.memories.textual.base import BaseTextMemory
|
|
14
|
+
from memos.templates.mos_prompts import (
|
|
15
|
+
COT_DECOMPOSE_PROMPT,
|
|
16
|
+
PRO_MODE_WELCOME_MESSAGE,
|
|
17
|
+
SYNTHESIS_PROMPT,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
logger = get_logger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class MOS(MOSCore):
|
|
25
|
+
"""
|
|
26
|
+
The MOS (Memory Operating System) class inherits from MOSCore.
|
|
27
|
+
This class maintains backward compatibility with the original MOS interface.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, config: MOSConfig | None = None):
|
|
31
|
+
"""
|
|
32
|
+
Initialize MOS with optional automatic configuration.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
config (MOSConfig, optional): MOS configuration. If None, will use automatic configuration from environment variables.
|
|
36
|
+
"""
|
|
37
|
+
if config is None:
|
|
38
|
+
# Auto-configure if no config provided
|
|
39
|
+
config, default_cube = self._auto_configure()
|
|
40
|
+
self._auto_registered_cube = default_cube
|
|
41
|
+
else:
|
|
42
|
+
self._auto_registered_cube = None
|
|
43
|
+
|
|
44
|
+
self.enable_cot = config.PRO_MODE
|
|
45
|
+
if config.PRO_MODE:
|
|
46
|
+
print(PRO_MODE_WELCOME_MESSAGE)
|
|
47
|
+
logger.info(PRO_MODE_WELCOME_MESSAGE)
|
|
48
|
+
super().__init__(config)
|
|
49
|
+
|
|
50
|
+
# Auto-register cube if one was created
|
|
51
|
+
if self._auto_registered_cube is not None:
|
|
52
|
+
self.register_mem_cube(self._auto_registered_cube)
|
|
53
|
+
logger.info(
|
|
54
|
+
f"Auto-registered default cube: {self._auto_registered_cube.config.cube_id}"
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
def _auto_configure(self, **kwargs) -> tuple[MOSConfig, Any]:
|
|
58
|
+
"""
|
|
59
|
+
Automatically configure MOS with default settings.
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
tuple[MOSConfig, Any]: MOS configuration and default MemCube
|
|
63
|
+
"""
|
|
64
|
+
# Get configuration from environment variables
|
|
65
|
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
|
66
|
+
openai_api_base = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
|
|
67
|
+
text_mem_type = os.getenv("MOS_TEXT_MEM_TYPE", "general_text")
|
|
68
|
+
|
|
69
|
+
if not openai_api_key:
|
|
70
|
+
raise ValueError("OPENAI_API_KEY environment variable is required")
|
|
71
|
+
|
|
72
|
+
logger.info(f"Auto-configuring MOS with text_mem_type: {text_mem_type}")
|
|
73
|
+
return get_default(
|
|
74
|
+
openai_api_key=openai_api_key,
|
|
75
|
+
openai_api_base=openai_api_base,
|
|
76
|
+
text_mem_type=text_mem_type,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
@classmethod
|
|
80
|
+
def simple(cls) -> "MOS":
|
|
81
|
+
"""
|
|
82
|
+
Create a MOS instance with automatic configuration from environment variables.
|
|
83
|
+
|
|
84
|
+
This is the simplest way to get started with MemOS.
|
|
85
|
+
|
|
86
|
+
Environment variables needed:
|
|
87
|
+
- OPENAI_API_KEY: Your OpenAI API key
|
|
88
|
+
- OPENAI_API_BASE: OpenAI API base URL (optional, defaults to "https://api.openai.com/v1")
|
|
89
|
+
- MOS_TEXT_MEM_TYPE: Text memory type (optional, defaults to "general_text")
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
MOS: Configured MOS instance with auto-registered default cube
|
|
93
|
+
|
|
94
|
+
Example:
|
|
95
|
+
```python
|
|
96
|
+
# Set environment variables
|
|
97
|
+
export OPENAI_API_KEY="your-api-key"
|
|
98
|
+
export MOS_TEXT_MEM_TYPE="general_text"
|
|
99
|
+
|
|
100
|
+
# Then use
|
|
101
|
+
memory = MOS.simple()
|
|
102
|
+
memory.add_memory("Hello world!")
|
|
103
|
+
response = memory.chat("What did I just say?")
|
|
104
|
+
```
|
|
105
|
+
"""
|
|
106
|
+
return cls()
|
|
107
|
+
|
|
108
|
+
def chat(self, query: str, user_id: str | None = None, base_prompt: str | None = None) -> str:
|
|
109
|
+
"""
|
|
110
|
+
Enhanced chat method with optional CoT (Chain of Thought) enhancement.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
query (str): The user's query.
|
|
114
|
+
user_id (str, optional): User ID for context.
|
|
115
|
+
base_prompt (str, optional): A custom base prompt to use for the chat.
|
|
116
|
+
It can be a template string with a `{memories}` placeholder.
|
|
117
|
+
If not provided, a default prompt is used.
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
str: The response from the MOS.
|
|
121
|
+
"""
|
|
122
|
+
# Check if CoT enhancement is enabled (either explicitly or via PRO mode)
|
|
123
|
+
|
|
124
|
+
if not self.enable_cot:
|
|
125
|
+
# Use the original chat method from core
|
|
126
|
+
return super().chat(query, user_id, base_prompt=base_prompt)
|
|
127
|
+
|
|
128
|
+
# Enhanced chat with CoT decomposition
|
|
129
|
+
return self._chat_with_cot_enhancement(query, user_id, base_prompt=base_prompt)
|
|
130
|
+
|
|
131
|
+
def _chat_with_cot_enhancement(
|
|
132
|
+
self, query: str, user_id: str | None = None, base_prompt: str | None = None
|
|
133
|
+
) -> str:
|
|
134
|
+
"""
|
|
135
|
+
Chat with CoT enhancement for complex query decomposition.
|
|
136
|
+
This method includes all the same validation and processing logic as the core chat method.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
query (str): The user's query.
|
|
140
|
+
user_id (str, optional): User ID for context.
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
str: The enhanced response.
|
|
144
|
+
"""
|
|
145
|
+
# Step 1: Perform all the same validation and setup as core chat method
|
|
146
|
+
target_user_id = user_id if user_id is not None else self.user_id
|
|
147
|
+
accessible_cubes = self.user_manager.get_user_cubes(target_user_id)
|
|
148
|
+
user_cube_ids = [cube.cube_id for cube in accessible_cubes]
|
|
149
|
+
|
|
150
|
+
# Register chat history if needed
|
|
151
|
+
if target_user_id not in self.chat_history_manager:
|
|
152
|
+
self._register_chat_history(target_user_id)
|
|
153
|
+
|
|
154
|
+
chat_history = self.chat_history_manager[target_user_id]
|
|
155
|
+
|
|
156
|
+
try:
|
|
157
|
+
# Step 2: Decompose the query using CoT
|
|
158
|
+
logger.info(f"🔍 [CoT] Decomposing query: {query}")
|
|
159
|
+
decomposition_result = self.cot_decompose(
|
|
160
|
+
query, self.config.chat_model, target_user_id, self.chat_llm
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
# Check if the query is complex and needs decomposition
|
|
164
|
+
if not decomposition_result.get("is_complex", False):
|
|
165
|
+
logger.info("🔍 [CoT] Query is not complex, using standard chat")
|
|
166
|
+
return super().chat(query, user_id, base_prompt=base_prompt)
|
|
167
|
+
|
|
168
|
+
sub_questions = decomposition_result.get("sub_questions", [])
|
|
169
|
+
logger.info(f"🔍 [CoT] Decomposed into {len(sub_questions)} sub-questions")
|
|
170
|
+
|
|
171
|
+
# Step 3: Get search engine for sub-questions (with proper validation)
|
|
172
|
+
search_engine = self._get_search_engine_for_cot_with_validation(user_cube_ids)
|
|
173
|
+
if not search_engine:
|
|
174
|
+
logger.warning("🔍 [CoT] No search engine available, using standard chat")
|
|
175
|
+
return super().chat(query, user_id, base_prompt=base_prompt)
|
|
176
|
+
|
|
177
|
+
# Step 4: Get answers for sub-questions
|
|
178
|
+
logger.info("🔍 [CoT] Getting answers for sub-questions...")
|
|
179
|
+
sub_questions, sub_answers = self.get_sub_answers(
|
|
180
|
+
sub_questions=sub_questions,
|
|
181
|
+
search_engine=search_engine,
|
|
182
|
+
llm_config=self.config.chat_model,
|
|
183
|
+
user_id=target_user_id,
|
|
184
|
+
top_k=getattr(self.config, "cot_top_k", 3),
|
|
185
|
+
llm=self.chat_llm,
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
# Step 5: Generate enhanced response using sub-answers
|
|
189
|
+
logger.info("🔍 [CoT] Generating enhanced response...")
|
|
190
|
+
enhanced_response = self._generate_enhanced_response_with_context(
|
|
191
|
+
original_query=query,
|
|
192
|
+
sub_questions=sub_questions,
|
|
193
|
+
sub_answers=sub_answers,
|
|
194
|
+
chat_history=chat_history,
|
|
195
|
+
user_id=target_user_id,
|
|
196
|
+
search_engine=search_engine,
|
|
197
|
+
base_prompt=base_prompt,
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
# Step 6: Update chat history (same as core method)
|
|
201
|
+
chat_history.chat_history.append({"role": "user", "content": query})
|
|
202
|
+
chat_history.chat_history.append({"role": "assistant", "content": enhanced_response})
|
|
203
|
+
self.chat_history_manager[target_user_id] = chat_history
|
|
204
|
+
|
|
205
|
+
# Step 7: Submit message to scheduler (same as core method)
|
|
206
|
+
if len(accessible_cubes) == 1:
|
|
207
|
+
mem_cube_id = accessible_cubes[0].cube_id
|
|
208
|
+
if self.enable_mem_scheduler and self.mem_scheduler is not None:
|
|
209
|
+
from datetime import datetime
|
|
210
|
+
|
|
211
|
+
from memos.mem_scheduler.schemas import (
|
|
212
|
+
ANSWER_LABEL,
|
|
213
|
+
ScheduleMessageItem,
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
message_item = ScheduleMessageItem(
|
|
217
|
+
user_id=target_user_id,
|
|
218
|
+
mem_cube_id=mem_cube_id,
|
|
219
|
+
label=ANSWER_LABEL,
|
|
220
|
+
content=enhanced_response,
|
|
221
|
+
timestamp=datetime.now().isoformat(),
|
|
222
|
+
)
|
|
223
|
+
self.mem_scheduler.submit_messages(messages=[message_item])
|
|
224
|
+
|
|
225
|
+
return enhanced_response
|
|
226
|
+
|
|
227
|
+
except Exception as e:
|
|
228
|
+
logger.error(f"🔍 [CoT] Error in CoT enhancement: {e}")
|
|
229
|
+
logger.info("🔍 [CoT] Falling back to standard chat")
|
|
230
|
+
return super().chat(query, user_id, base_prompt=base_prompt)
|
|
231
|
+
|
|
232
|
+
def _get_search_engine_for_cot_with_validation(
|
|
233
|
+
self, user_cube_ids: list[str]
|
|
234
|
+
) -> BaseTextMemory | None:
|
|
235
|
+
"""
|
|
236
|
+
Get the best available search engine for CoT operations with proper validation.
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
user_cube_ids (list[str]): List of cube IDs the user has access to.
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
BaseTextMemory or None: The search engine to use for CoT.
|
|
243
|
+
"""
|
|
244
|
+
if not self.mem_cubes:
|
|
245
|
+
return None
|
|
246
|
+
|
|
247
|
+
# Get the first available text memory from user's accessible cubes
|
|
248
|
+
for mem_cube_id, mem_cube in self.mem_cubes.items():
|
|
249
|
+
if mem_cube_id not in user_cube_ids:
|
|
250
|
+
continue
|
|
251
|
+
if mem_cube.text_mem:
|
|
252
|
+
return mem_cube.text_mem
|
|
253
|
+
|
|
254
|
+
return None
|
|
255
|
+
|
|
256
|
+
def _generate_enhanced_response_with_context(
|
|
257
|
+
self,
|
|
258
|
+
original_query: str,
|
|
259
|
+
sub_questions: list[str],
|
|
260
|
+
sub_answers: list[str],
|
|
261
|
+
chat_history: Any,
|
|
262
|
+
user_id: str | None = None,
|
|
263
|
+
search_engine: BaseTextMemory | None = None,
|
|
264
|
+
base_prompt: str | None = None,
|
|
265
|
+
) -> str:
|
|
266
|
+
"""
|
|
267
|
+
Generate an enhanced response using sub-questions and their answers, with chat context.
|
|
268
|
+
|
|
269
|
+
Args:
|
|
270
|
+
original_query (str): The original user query.
|
|
271
|
+
sub_questions (list[str]): List of sub-questions.
|
|
272
|
+
sub_answers (list[str]): List of answers to sub-questions.
|
|
273
|
+
chat_history: The user's chat history.
|
|
274
|
+
user_id (str, optional): User ID for context.
|
|
275
|
+
search_engine (BaseTextMemory, optional): Search engine for context retrieval.
|
|
276
|
+
base_prompt (str, optional): A custom base prompt for the chat.
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
str: The enhanced response.
|
|
280
|
+
"""
|
|
281
|
+
# Build the synthesis prompt
|
|
282
|
+
qa_text = ""
|
|
283
|
+
for i, (question, answer) in enumerate(zip(sub_questions, sub_answers, strict=False), 1):
|
|
284
|
+
qa_text += f"Q{i}: {question}\nA{i}: {answer}\n\n"
|
|
285
|
+
|
|
286
|
+
# Build messages with chat history context (similar to core method)
|
|
287
|
+
if (search_engine is not None) and self.config.enable_textual_memory:
|
|
288
|
+
if self.enable_cot:
|
|
289
|
+
search_memories = search_engine.search(
|
|
290
|
+
original_query, top_k=self.config.top_k, mode="fine"
|
|
291
|
+
)
|
|
292
|
+
else:
|
|
293
|
+
search_memories = search_engine.search(
|
|
294
|
+
original_query, top_k=self.config.top_k, mode="fast"
|
|
295
|
+
)
|
|
296
|
+
system_prompt = self._build_system_prompt(
|
|
297
|
+
search_memories, base_prompt=base_prompt
|
|
298
|
+
) # Use the same system prompt builder
|
|
299
|
+
else:
|
|
300
|
+
system_prompt = self._build_system_prompt(base_prompt=base_prompt)
|
|
301
|
+
current_messages = [
|
|
302
|
+
{"role": "system", "content": system_prompt + SYNTHESIS_PROMPT.format(qa_text=qa_text)},
|
|
303
|
+
*chat_history.chat_history,
|
|
304
|
+
{
|
|
305
|
+
"role": "user",
|
|
306
|
+
"content": original_query,
|
|
307
|
+
},
|
|
308
|
+
]
|
|
309
|
+
|
|
310
|
+
# Handle activation memory if enabled (same as core method)
|
|
311
|
+
past_key_values = None
|
|
312
|
+
if self.config.enable_activation_memory:
|
|
313
|
+
if self.config.chat_model.backend not in ["huggingface", "huggingface_singleton"]:
|
|
314
|
+
logger.error(
|
|
315
|
+
"Activation memory only used for huggingface backend. Skipping activation memory."
|
|
316
|
+
)
|
|
317
|
+
else:
|
|
318
|
+
# Get accessible cubes for the user
|
|
319
|
+
target_user_id = user_id if user_id is not None else self.user_id
|
|
320
|
+
accessible_cubes = self.user_manager.get_user_cubes(target_user_id)
|
|
321
|
+
user_cube_ids = [cube.cube_id for cube in accessible_cubes]
|
|
322
|
+
|
|
323
|
+
for mem_cube_id, mem_cube in self.mem_cubes.items():
|
|
324
|
+
if mem_cube_id not in user_cube_ids:
|
|
325
|
+
continue
|
|
326
|
+
if mem_cube.act_mem:
|
|
327
|
+
kv_cache = next(iter(mem_cube.act_mem.get_all()), None)
|
|
328
|
+
past_key_values = (
|
|
329
|
+
kv_cache.memory if (kv_cache and hasattr(kv_cache, "memory")) else None
|
|
330
|
+
)
|
|
331
|
+
break
|
|
332
|
+
|
|
333
|
+
try:
|
|
334
|
+
# Generate the enhanced response using the chat LLM with same parameters as core
|
|
335
|
+
if past_key_values is not None:
|
|
336
|
+
enhanced_response = self.chat_llm.generate(
|
|
337
|
+
current_messages, past_key_values=past_key_values
|
|
338
|
+
)
|
|
339
|
+
else:
|
|
340
|
+
enhanced_response = self.chat_llm.generate(current_messages)
|
|
341
|
+
|
|
342
|
+
logger.info("🔍 [CoT] Generated enhanced response")
|
|
343
|
+
return enhanced_response
|
|
344
|
+
except Exception as e:
|
|
345
|
+
logger.error(f"🔍 [CoT] Error generating enhanced response: {e}")
|
|
346
|
+
# Fallback to standard chat
|
|
347
|
+
return super().chat(original_query, user_id, base_prompt=base_prompt)
|
|
348
|
+
|
|
349
|
+
@classmethod
|
|
350
|
+
def cot_decompose(
|
|
351
|
+
cls, query: str, llm_config: Any, user_id: str | None = None, llm: LLMFactory | None = None
|
|
352
|
+
) -> list[str] | dict[str, Any]:
|
|
353
|
+
"""
|
|
354
|
+
Decompose a complex query into sub-questions using Chain of Thought reasoning.
|
|
355
|
+
|
|
356
|
+
Args:
|
|
357
|
+
query (str): The complex query to decompose
|
|
358
|
+
llm_config: LLM configuration for decomposition
|
|
359
|
+
user_id (str, optional): User ID for context
|
|
360
|
+
|
|
361
|
+
Returns:
|
|
362
|
+
Union[List[str], Dict[str, Any]]: List of decomposed sub-questions or dict with complexity analysis
|
|
363
|
+
"""
|
|
364
|
+
# Create a temporary LLM instance for decomposition
|
|
365
|
+
if llm is None:
|
|
366
|
+
llm = LLMFactory.from_config(llm_config)
|
|
367
|
+
|
|
368
|
+
# System prompt for CoT decomposition with complexity analysis
|
|
369
|
+
system_prompt = COT_DECOMPOSE_PROMPT.format(query=query)
|
|
370
|
+
|
|
371
|
+
messages = [{"role": "system", "content": system_prompt}]
|
|
372
|
+
|
|
373
|
+
try:
|
|
374
|
+
response = llm.generate(messages)
|
|
375
|
+
# Try to parse JSON response
|
|
376
|
+
result = json.loads(response)
|
|
377
|
+
return result
|
|
378
|
+
except json.JSONDecodeError as e:
|
|
379
|
+
logger.warning(f"Failed to parse JSON response from LLM: {e}")
|
|
380
|
+
logger.warning(f"Raw response: {response}")
|
|
381
|
+
|
|
382
|
+
# Try to extract JSON-like content from the response
|
|
383
|
+
try:
|
|
384
|
+
# Look for JSON-like content between curly braces
|
|
385
|
+
import re
|
|
386
|
+
|
|
387
|
+
json_match = re.search(r"\{.*\}", response, re.DOTALL)
|
|
388
|
+
if json_match:
|
|
389
|
+
json_str = json_match.group(0)
|
|
390
|
+
result = json.loads(json_str)
|
|
391
|
+
return result
|
|
392
|
+
except Exception:
|
|
393
|
+
pass
|
|
394
|
+
|
|
395
|
+
# If all parsing attempts fail, return default
|
|
396
|
+
return {"is_complex": False, "sub_questions": []}
|
|
397
|
+
except Exception as e:
|
|
398
|
+
logger.error(f"Unexpected error in cot_decompose: {e}")
|
|
399
|
+
return {"is_complex": False, "sub_questions": []}
|
|
400
|
+
|
|
401
|
+
@classmethod
|
|
402
|
+
def get_sub_answers(
|
|
403
|
+
cls,
|
|
404
|
+
sub_questions: list[str] | dict[str, Any],
|
|
405
|
+
search_results: dict[str, Any] | None = None,
|
|
406
|
+
search_engine: BaseTextMemory | None = None,
|
|
407
|
+
llm_config: LLMFactory | None = None,
|
|
408
|
+
user_id: str | None = None,
|
|
409
|
+
top_k: int = 5,
|
|
410
|
+
llm: LLMFactory | None = None,
|
|
411
|
+
) -> tuple[list[str], list[str]]:
|
|
412
|
+
"""
|
|
413
|
+
Get answers for sub-questions using either search results or a search engine.
|
|
414
|
+
|
|
415
|
+
Args:
|
|
416
|
+
sub_questions (Union[List[str], Dict[str, Any]]): List of sub-questions from cot_decompose or dict with analysis
|
|
417
|
+
search_results (Dict[str, Any], optional): Search results containing relevant information
|
|
418
|
+
search_engine (BaseTextMemory, optional): Text memory engine for searching
|
|
419
|
+
llm_config (Any, optional): LLM configuration for processing (required if search_engine is provided)
|
|
420
|
+
user_id (str, optional): User ID for context
|
|
421
|
+
top_k (int): Number of top results to retrieve from search engine
|
|
422
|
+
|
|
423
|
+
Returns:
|
|
424
|
+
Tuple[List[str], List[str]]: (sub_questions, sub_answers)
|
|
425
|
+
"""
|
|
426
|
+
# Extract sub-questions from decomposition result if needed
|
|
427
|
+
if isinstance(sub_questions, dict):
|
|
428
|
+
if not sub_questions.get("is_complex", False):
|
|
429
|
+
return [], []
|
|
430
|
+
sub_questions = sub_questions.get("sub_questions", [])
|
|
431
|
+
|
|
432
|
+
if not sub_questions:
|
|
433
|
+
return [], []
|
|
434
|
+
|
|
435
|
+
# Validate inputs
|
|
436
|
+
if search_results is None and search_engine is None:
|
|
437
|
+
raise ValueError("Either search_results or search_engine must be provided")
|
|
438
|
+
if llm is None:
|
|
439
|
+
llm = LLMFactory.from_config(llm_config)
|
|
440
|
+
|
|
441
|
+
# Step 1: Get search results if search_engine is provided
|
|
442
|
+
if search_engine is not None:
|
|
443
|
+
search_results = cls._search_with_engine(sub_questions, search_engine, top_k)
|
|
444
|
+
|
|
445
|
+
# Step 2: Generate answers for each sub-question using LLM in parallel
|
|
446
|
+
def generate_answer_for_question(question_index: int, sub_question: str) -> tuple[int, str]:
|
|
447
|
+
"""Generate answer for a single sub-question."""
|
|
448
|
+
# Extract relevant information from search results
|
|
449
|
+
relevant_info = []
|
|
450
|
+
if search_results and search_results.get("text_mem"):
|
|
451
|
+
for cube_result in search_results["text_mem"]:
|
|
452
|
+
for memory in cube_result.get("memories", []):
|
|
453
|
+
relevant_info.append(memory.memory)
|
|
454
|
+
|
|
455
|
+
# Build system prompt with memories (similar to MOSCore._build_system_prompt)
|
|
456
|
+
base_prompt = (
|
|
457
|
+
"You are a knowledgeable and helpful AI assistant. "
|
|
458
|
+
"You have access to relevant information that helps you provide accurate answers. "
|
|
459
|
+
"Use the provided information to answer the question comprehensively. "
|
|
460
|
+
"If the information is not sufficient, acknowledge the limitations."
|
|
461
|
+
)
|
|
462
|
+
|
|
463
|
+
# Add memory context if available
|
|
464
|
+
if relevant_info:
|
|
465
|
+
memory_context = "\n\n## Relevant Information:\n"
|
|
466
|
+
for j, info in enumerate(relevant_info[:top_k], 1): # Take top 3 most relevant
|
|
467
|
+
memory_context += f"{j}. {info}\n"
|
|
468
|
+
system_prompt = base_prompt + memory_context
|
|
469
|
+
else:
|
|
470
|
+
system_prompt = (
|
|
471
|
+
base_prompt
|
|
472
|
+
+ "\n\n## Relevant Information:\nNo specific information found in memory."
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
# Create messages for LLM
|
|
476
|
+
messages = [
|
|
477
|
+
{"role": "system", "content": system_prompt},
|
|
478
|
+
{"role": "user", "content": sub_question},
|
|
479
|
+
]
|
|
480
|
+
|
|
481
|
+
try:
|
|
482
|
+
# Generate answer using LLM
|
|
483
|
+
response = llm.generate(messages)
|
|
484
|
+
return question_index, response
|
|
485
|
+
except Exception as e:
|
|
486
|
+
logger.error(f"Failed to generate answer for sub-question '{sub_question}': {e}")
|
|
487
|
+
return question_index, f"Unable to generate answer for: {sub_question}"
|
|
488
|
+
|
|
489
|
+
# Generate answers in parallel while maintaining order
|
|
490
|
+
sub_answers = [None] * len(sub_questions)
|
|
491
|
+
with ContextThreadPoolExecutor(max_workers=min(len(sub_questions), 10)) as executor:
|
|
492
|
+
# Submit all answer generation tasks
|
|
493
|
+
future_to_index = {
|
|
494
|
+
executor.submit(generate_answer_for_question, i, question): i
|
|
495
|
+
for i, question in enumerate(sub_questions)
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
# Collect results as they complete, but store them in the correct position
|
|
499
|
+
for future in concurrent.futures.as_completed(future_to_index):
|
|
500
|
+
try:
|
|
501
|
+
question_index, answer = future.result()
|
|
502
|
+
sub_answers[question_index] = answer
|
|
503
|
+
except Exception as e:
|
|
504
|
+
question_index = future_to_index[future]
|
|
505
|
+
logger.error(
|
|
506
|
+
f"Exception occurred while generating answer for question at index {question_index}: {e}"
|
|
507
|
+
)
|
|
508
|
+
sub_answers[question_index] = (
|
|
509
|
+
f"Error generating answer for question {question_index + 1}"
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
return sub_questions, sub_answers
|
|
513
|
+
|
|
514
|
+
@classmethod
|
|
515
|
+
def _search_with_engine(
|
|
516
|
+
cls, sub_questions: list[str], search_engine: BaseTextMemory, top_k: int
|
|
517
|
+
) -> dict[str, Any]:
|
|
518
|
+
"""
|
|
519
|
+
Search for sub-questions using the provided search engine in parallel.
|
|
520
|
+
|
|
521
|
+
Args:
|
|
522
|
+
sub_questions (List[str]): List of sub-questions to search for
|
|
523
|
+
search_engine (BaseTextMemory): Text memory engine for searching
|
|
524
|
+
top_k (int): Number of top results to retrieve
|
|
525
|
+
|
|
526
|
+
Returns:
|
|
527
|
+
Dict[str, Any]: Search results in the expected format
|
|
528
|
+
"""
|
|
529
|
+
|
|
530
|
+
def search_single_question(question: str) -> list[Any]:
|
|
531
|
+
"""Search for a single question using the search engine."""
|
|
532
|
+
try:
|
|
533
|
+
# Handle different search method signatures
|
|
534
|
+
if hasattr(search_engine, "search"):
|
|
535
|
+
# Try different parameter combinations based on the engine type
|
|
536
|
+
try:
|
|
537
|
+
# For tree_text memory
|
|
538
|
+
return search_engine.search(question, top_k, mode="fast")
|
|
539
|
+
except TypeError:
|
|
540
|
+
try:
|
|
541
|
+
# For general_text memory
|
|
542
|
+
return search_engine.search(question, top_k)
|
|
543
|
+
except TypeError:
|
|
544
|
+
# For naive_text memory
|
|
545
|
+
return search_engine.search(question, top_k)
|
|
546
|
+
else:
|
|
547
|
+
return []
|
|
548
|
+
except Exception as e:
|
|
549
|
+
logger.error(f"Search failed for question '{question}': {e}")
|
|
550
|
+
return []
|
|
551
|
+
|
|
552
|
+
# Search in parallel while maintaining order
|
|
553
|
+
all_memories = []
|
|
554
|
+
with ContextThreadPoolExecutor(max_workers=min(len(sub_questions), 10)) as executor:
|
|
555
|
+
# Submit all search tasks and keep track of their order
|
|
556
|
+
future_to_index = {
|
|
557
|
+
executor.submit(search_single_question, question): i
|
|
558
|
+
for i, question in enumerate(sub_questions)
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
# Initialize results list with None values to maintain order
|
|
562
|
+
results = [None] * len(sub_questions)
|
|
563
|
+
|
|
564
|
+
# Collect results as they complete, but store them in the correct position
|
|
565
|
+
for future in concurrent.futures.as_completed(future_to_index):
|
|
566
|
+
index = future_to_index[future]
|
|
567
|
+
try:
|
|
568
|
+
memories = future.result()
|
|
569
|
+
results[index] = memories
|
|
570
|
+
except Exception as e:
|
|
571
|
+
logger.error(
|
|
572
|
+
f"Exception occurred while searching for question at index {index}: {e}"
|
|
573
|
+
)
|
|
574
|
+
results[index] = []
|
|
575
|
+
|
|
576
|
+
# Combine all results in the correct order
|
|
577
|
+
for result in results:
|
|
578
|
+
if result is not None:
|
|
579
|
+
all_memories.extend(result)
|
|
580
|
+
|
|
581
|
+
# Format results in the expected structure
|
|
582
|
+
return {"text_mem": [{"cube_id": "search_engine", "memories": all_memories}]}
|