MemoryOS 2.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- memoryos-2.0.3.dist-info/METADATA +418 -0
- memoryos-2.0.3.dist-info/RECORD +315 -0
- memoryos-2.0.3.dist-info/WHEEL +4 -0
- memoryos-2.0.3.dist-info/entry_points.txt +3 -0
- memoryos-2.0.3.dist-info/licenses/LICENSE +201 -0
- memos/__init__.py +20 -0
- memos/api/client.py +571 -0
- memos/api/config.py +1018 -0
- memos/api/context/dependencies.py +50 -0
- memos/api/exceptions.py +53 -0
- memos/api/handlers/__init__.py +62 -0
- memos/api/handlers/add_handler.py +158 -0
- memos/api/handlers/base_handler.py +194 -0
- memos/api/handlers/chat_handler.py +1401 -0
- memos/api/handlers/component_init.py +388 -0
- memos/api/handlers/config_builders.py +190 -0
- memos/api/handlers/feedback_handler.py +93 -0
- memos/api/handlers/formatters_handler.py +237 -0
- memos/api/handlers/memory_handler.py +316 -0
- memos/api/handlers/scheduler_handler.py +497 -0
- memos/api/handlers/search_handler.py +222 -0
- memos/api/handlers/suggestion_handler.py +117 -0
- memos/api/mcp_serve.py +614 -0
- memos/api/middleware/request_context.py +101 -0
- memos/api/product_api.py +38 -0
- memos/api/product_models.py +1206 -0
- memos/api/routers/__init__.py +1 -0
- memos/api/routers/product_router.py +477 -0
- memos/api/routers/server_router.py +394 -0
- memos/api/server_api.py +44 -0
- memos/api/start_api.py +433 -0
- memos/chunkers/__init__.py +4 -0
- memos/chunkers/base.py +24 -0
- memos/chunkers/charactertext_chunker.py +41 -0
- memos/chunkers/factory.py +24 -0
- memos/chunkers/markdown_chunker.py +62 -0
- memos/chunkers/sentence_chunker.py +54 -0
- memos/chunkers/simple_chunker.py +50 -0
- memos/cli.py +113 -0
- memos/configs/__init__.py +0 -0
- memos/configs/base.py +82 -0
- memos/configs/chunker.py +59 -0
- memos/configs/embedder.py +88 -0
- memos/configs/graph_db.py +236 -0
- memos/configs/internet_retriever.py +100 -0
- memos/configs/llm.py +151 -0
- memos/configs/mem_agent.py +54 -0
- memos/configs/mem_chat.py +81 -0
- memos/configs/mem_cube.py +105 -0
- memos/configs/mem_os.py +83 -0
- memos/configs/mem_reader.py +91 -0
- memos/configs/mem_scheduler.py +385 -0
- memos/configs/mem_user.py +70 -0
- memos/configs/memory.py +324 -0
- memos/configs/parser.py +38 -0
- memos/configs/reranker.py +18 -0
- memos/configs/utils.py +8 -0
- memos/configs/vec_db.py +80 -0
- memos/context/context.py +355 -0
- memos/dependency.py +52 -0
- memos/deprecation.py +262 -0
- memos/embedders/__init__.py +0 -0
- memos/embedders/ark.py +95 -0
- memos/embedders/base.py +106 -0
- memos/embedders/factory.py +29 -0
- memos/embedders/ollama.py +77 -0
- memos/embedders/sentence_transformer.py +49 -0
- memos/embedders/universal_api.py +51 -0
- memos/exceptions.py +30 -0
- memos/graph_dbs/__init__.py +0 -0
- memos/graph_dbs/base.py +274 -0
- memos/graph_dbs/factory.py +27 -0
- memos/graph_dbs/item.py +46 -0
- memos/graph_dbs/nebular.py +1794 -0
- memos/graph_dbs/neo4j.py +1942 -0
- memos/graph_dbs/neo4j_community.py +1058 -0
- memos/graph_dbs/polardb.py +5446 -0
- memos/hello_world.py +97 -0
- memos/llms/__init__.py +0 -0
- memos/llms/base.py +25 -0
- memos/llms/deepseek.py +13 -0
- memos/llms/factory.py +38 -0
- memos/llms/hf.py +443 -0
- memos/llms/hf_singleton.py +114 -0
- memos/llms/ollama.py +135 -0
- memos/llms/openai.py +222 -0
- memos/llms/openai_new.py +198 -0
- memos/llms/qwen.py +13 -0
- memos/llms/utils.py +14 -0
- memos/llms/vllm.py +218 -0
- memos/log.py +237 -0
- memos/mem_agent/base.py +19 -0
- memos/mem_agent/deepsearch_agent.py +391 -0
- memos/mem_agent/factory.py +36 -0
- memos/mem_chat/__init__.py +0 -0
- memos/mem_chat/base.py +30 -0
- memos/mem_chat/factory.py +21 -0
- memos/mem_chat/simple.py +200 -0
- memos/mem_cube/__init__.py +0 -0
- memos/mem_cube/base.py +30 -0
- memos/mem_cube/general.py +240 -0
- memos/mem_cube/navie.py +172 -0
- memos/mem_cube/utils.py +169 -0
- memos/mem_feedback/base.py +15 -0
- memos/mem_feedback/feedback.py +1192 -0
- memos/mem_feedback/simple_feedback.py +40 -0
- memos/mem_feedback/utils.py +230 -0
- memos/mem_os/client.py +5 -0
- memos/mem_os/core.py +1203 -0
- memos/mem_os/main.py +582 -0
- memos/mem_os/product.py +1608 -0
- memos/mem_os/product_server.py +455 -0
- memos/mem_os/utils/default_config.py +359 -0
- memos/mem_os/utils/format_utils.py +1403 -0
- memos/mem_os/utils/reference_utils.py +162 -0
- memos/mem_reader/__init__.py +0 -0
- memos/mem_reader/base.py +47 -0
- memos/mem_reader/factory.py +53 -0
- memos/mem_reader/memory.py +298 -0
- memos/mem_reader/multi_modal_struct.py +965 -0
- memos/mem_reader/read_multi_modal/__init__.py +43 -0
- memos/mem_reader/read_multi_modal/assistant_parser.py +311 -0
- memos/mem_reader/read_multi_modal/base.py +273 -0
- memos/mem_reader/read_multi_modal/file_content_parser.py +826 -0
- memos/mem_reader/read_multi_modal/image_parser.py +359 -0
- memos/mem_reader/read_multi_modal/multi_modal_parser.py +252 -0
- memos/mem_reader/read_multi_modal/string_parser.py +139 -0
- memos/mem_reader/read_multi_modal/system_parser.py +327 -0
- memos/mem_reader/read_multi_modal/text_content_parser.py +131 -0
- memos/mem_reader/read_multi_modal/tool_parser.py +210 -0
- memos/mem_reader/read_multi_modal/user_parser.py +218 -0
- memos/mem_reader/read_multi_modal/utils.py +358 -0
- memos/mem_reader/simple_struct.py +912 -0
- memos/mem_reader/strategy_struct.py +163 -0
- memos/mem_reader/utils.py +157 -0
- memos/mem_scheduler/__init__.py +0 -0
- memos/mem_scheduler/analyzer/__init__.py +0 -0
- memos/mem_scheduler/analyzer/api_analyzer.py +714 -0
- memos/mem_scheduler/analyzer/eval_analyzer.py +219 -0
- memos/mem_scheduler/analyzer/mos_for_test_scheduler.py +571 -0
- memos/mem_scheduler/analyzer/scheduler_for_eval.py +280 -0
- memos/mem_scheduler/base_scheduler.py +1319 -0
- memos/mem_scheduler/general_modules/__init__.py +0 -0
- memos/mem_scheduler/general_modules/api_misc.py +137 -0
- memos/mem_scheduler/general_modules/base.py +80 -0
- memos/mem_scheduler/general_modules/init_components_for_scheduler.py +425 -0
- memos/mem_scheduler/general_modules/misc.py +313 -0
- memos/mem_scheduler/general_modules/scheduler_logger.py +389 -0
- memos/mem_scheduler/general_modules/task_threads.py +315 -0
- memos/mem_scheduler/general_scheduler.py +1495 -0
- memos/mem_scheduler/memory_manage_modules/__init__.py +5 -0
- memos/mem_scheduler/memory_manage_modules/memory_filter.py +306 -0
- memos/mem_scheduler/memory_manage_modules/retriever.py +547 -0
- memos/mem_scheduler/monitors/__init__.py +0 -0
- memos/mem_scheduler/monitors/dispatcher_monitor.py +366 -0
- memos/mem_scheduler/monitors/general_monitor.py +394 -0
- memos/mem_scheduler/monitors/task_schedule_monitor.py +254 -0
- memos/mem_scheduler/optimized_scheduler.py +410 -0
- memos/mem_scheduler/orm_modules/__init__.py +0 -0
- memos/mem_scheduler/orm_modules/api_redis_model.py +518 -0
- memos/mem_scheduler/orm_modules/base_model.py +729 -0
- memos/mem_scheduler/orm_modules/monitor_models.py +261 -0
- memos/mem_scheduler/orm_modules/redis_model.py +699 -0
- memos/mem_scheduler/scheduler_factory.py +23 -0
- memos/mem_scheduler/schemas/__init__.py +0 -0
- memos/mem_scheduler/schemas/analyzer_schemas.py +52 -0
- memos/mem_scheduler/schemas/api_schemas.py +233 -0
- memos/mem_scheduler/schemas/general_schemas.py +55 -0
- memos/mem_scheduler/schemas/message_schemas.py +173 -0
- memos/mem_scheduler/schemas/monitor_schemas.py +406 -0
- memos/mem_scheduler/schemas/task_schemas.py +132 -0
- memos/mem_scheduler/task_schedule_modules/__init__.py +0 -0
- memos/mem_scheduler/task_schedule_modules/dispatcher.py +740 -0
- memos/mem_scheduler/task_schedule_modules/local_queue.py +247 -0
- memos/mem_scheduler/task_schedule_modules/orchestrator.py +74 -0
- memos/mem_scheduler/task_schedule_modules/redis_queue.py +1385 -0
- memos/mem_scheduler/task_schedule_modules/task_queue.py +162 -0
- memos/mem_scheduler/utils/__init__.py +0 -0
- memos/mem_scheduler/utils/api_utils.py +77 -0
- memos/mem_scheduler/utils/config_utils.py +100 -0
- memos/mem_scheduler/utils/db_utils.py +50 -0
- memos/mem_scheduler/utils/filter_utils.py +176 -0
- memos/mem_scheduler/utils/metrics.py +125 -0
- memos/mem_scheduler/utils/misc_utils.py +290 -0
- memos/mem_scheduler/utils/monitor_event_utils.py +67 -0
- memos/mem_scheduler/utils/status_tracker.py +229 -0
- memos/mem_scheduler/webservice_modules/__init__.py +0 -0
- memos/mem_scheduler/webservice_modules/rabbitmq_service.py +485 -0
- memos/mem_scheduler/webservice_modules/redis_service.py +380 -0
- memos/mem_user/factory.py +94 -0
- memos/mem_user/mysql_persistent_user_manager.py +271 -0
- memos/mem_user/mysql_user_manager.py +502 -0
- memos/mem_user/persistent_factory.py +98 -0
- memos/mem_user/persistent_user_manager.py +260 -0
- memos/mem_user/redis_persistent_user_manager.py +225 -0
- memos/mem_user/user_manager.py +488 -0
- memos/memories/__init__.py +0 -0
- memos/memories/activation/__init__.py +0 -0
- memos/memories/activation/base.py +42 -0
- memos/memories/activation/item.py +56 -0
- memos/memories/activation/kv.py +292 -0
- memos/memories/activation/vllmkv.py +219 -0
- memos/memories/base.py +19 -0
- memos/memories/factory.py +42 -0
- memos/memories/parametric/__init__.py +0 -0
- memos/memories/parametric/base.py +19 -0
- memos/memories/parametric/item.py +11 -0
- memos/memories/parametric/lora.py +41 -0
- memos/memories/textual/__init__.py +0 -0
- memos/memories/textual/base.py +92 -0
- memos/memories/textual/general.py +236 -0
- memos/memories/textual/item.py +304 -0
- memos/memories/textual/naive.py +187 -0
- memos/memories/textual/prefer_text_memory/__init__.py +0 -0
- memos/memories/textual/prefer_text_memory/adder.py +504 -0
- memos/memories/textual/prefer_text_memory/config.py +106 -0
- memos/memories/textual/prefer_text_memory/extractor.py +221 -0
- memos/memories/textual/prefer_text_memory/factory.py +85 -0
- memos/memories/textual/prefer_text_memory/retrievers.py +177 -0
- memos/memories/textual/prefer_text_memory/spliter.py +132 -0
- memos/memories/textual/prefer_text_memory/utils.py +93 -0
- memos/memories/textual/preference.py +344 -0
- memos/memories/textual/simple_preference.py +161 -0
- memos/memories/textual/simple_tree.py +69 -0
- memos/memories/textual/tree.py +459 -0
- memos/memories/textual/tree_text_memory/__init__.py +0 -0
- memos/memories/textual/tree_text_memory/organize/__init__.py +0 -0
- memos/memories/textual/tree_text_memory/organize/handler.py +184 -0
- memos/memories/textual/tree_text_memory/organize/manager.py +518 -0
- memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +238 -0
- memos/memories/textual/tree_text_memory/organize/reorganizer.py +622 -0
- memos/memories/textual/tree_text_memory/retrieve/__init__.py +0 -0
- memos/memories/textual/tree_text_memory/retrieve/advanced_searcher.py +364 -0
- memos/memories/textual/tree_text_memory/retrieve/bm25_util.py +186 -0
- memos/memories/textual/tree_text_memory/retrieve/bochasearch.py +419 -0
- memos/memories/textual/tree_text_memory/retrieve/internet_retriever.py +270 -0
- memos/memories/textual/tree_text_memory/retrieve/internet_retriever_factory.py +102 -0
- memos/memories/textual/tree_text_memory/retrieve/reasoner.py +61 -0
- memos/memories/textual/tree_text_memory/retrieve/recall.py +497 -0
- memos/memories/textual/tree_text_memory/retrieve/reranker.py +111 -0
- memos/memories/textual/tree_text_memory/retrieve/retrieval_mid_structs.py +16 -0
- memos/memories/textual/tree_text_memory/retrieve/retrieve_utils.py +472 -0
- memos/memories/textual/tree_text_memory/retrieve/searcher.py +848 -0
- memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py +135 -0
- memos/memories/textual/tree_text_memory/retrieve/utils.py +54 -0
- memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py +387 -0
- memos/memos_tools/dinding_report_bot.py +453 -0
- memos/memos_tools/lockfree_dict.py +120 -0
- memos/memos_tools/notification_service.py +44 -0
- memos/memos_tools/notification_utils.py +142 -0
- memos/memos_tools/singleton.py +174 -0
- memos/memos_tools/thread_safe_dict.py +310 -0
- memos/memos_tools/thread_safe_dict_segment.py +382 -0
- memos/multi_mem_cube/__init__.py +0 -0
- memos/multi_mem_cube/composite_cube.py +86 -0
- memos/multi_mem_cube/single_cube.py +874 -0
- memos/multi_mem_cube/views.py +54 -0
- memos/parsers/__init__.py +0 -0
- memos/parsers/base.py +15 -0
- memos/parsers/factory.py +21 -0
- memos/parsers/markitdown.py +28 -0
- memos/reranker/__init__.py +4 -0
- memos/reranker/base.py +25 -0
- memos/reranker/concat.py +103 -0
- memos/reranker/cosine_local.py +102 -0
- memos/reranker/factory.py +72 -0
- memos/reranker/http_bge.py +324 -0
- memos/reranker/http_bge_strategy.py +327 -0
- memos/reranker/noop.py +19 -0
- memos/reranker/strategies/__init__.py +4 -0
- memos/reranker/strategies/base.py +61 -0
- memos/reranker/strategies/concat_background.py +94 -0
- memos/reranker/strategies/concat_docsource.py +110 -0
- memos/reranker/strategies/dialogue_common.py +109 -0
- memos/reranker/strategies/factory.py +31 -0
- memos/reranker/strategies/single_turn.py +107 -0
- memos/reranker/strategies/singleturn_outmem.py +98 -0
- memos/settings.py +10 -0
- memos/templates/__init__.py +0 -0
- memos/templates/advanced_search_prompts.py +211 -0
- memos/templates/cloud_service_prompt.py +107 -0
- memos/templates/instruction_completion.py +66 -0
- memos/templates/mem_agent_prompts.py +85 -0
- memos/templates/mem_feedback_prompts.py +822 -0
- memos/templates/mem_reader_prompts.py +1096 -0
- memos/templates/mem_reader_strategy_prompts.py +238 -0
- memos/templates/mem_scheduler_prompts.py +626 -0
- memos/templates/mem_search_prompts.py +93 -0
- memos/templates/mos_prompts.py +403 -0
- memos/templates/prefer_complete_prompt.py +735 -0
- memos/templates/tool_mem_prompts.py +139 -0
- memos/templates/tree_reorganize_prompts.py +230 -0
- memos/types/__init__.py +34 -0
- memos/types/general_types.py +151 -0
- memos/types/openai_chat_completion_types/__init__.py +15 -0
- memos/types/openai_chat_completion_types/chat_completion_assistant_message_param.py +56 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_image_param.py +27 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_input_audio_param.py +23 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_param.py +43 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_refusal_param.py +16 -0
- memos/types/openai_chat_completion_types/chat_completion_content_part_text_param.py +16 -0
- memos/types/openai_chat_completion_types/chat_completion_message_custom_tool_call_param.py +27 -0
- memos/types/openai_chat_completion_types/chat_completion_message_function_tool_call_param.py +32 -0
- memos/types/openai_chat_completion_types/chat_completion_message_param.py +18 -0
- memos/types/openai_chat_completion_types/chat_completion_message_tool_call_union_param.py +15 -0
- memos/types/openai_chat_completion_types/chat_completion_system_message_param.py +36 -0
- memos/types/openai_chat_completion_types/chat_completion_tool_message_param.py +30 -0
- memos/types/openai_chat_completion_types/chat_completion_user_message_param.py +34 -0
- memos/utils.py +123 -0
- memos/vec_dbs/__init__.py +0 -0
- memos/vec_dbs/base.py +117 -0
- memos/vec_dbs/factory.py +23 -0
- memos/vec_dbs/item.py +50 -0
- memos/vec_dbs/milvus.py +654 -0
- memos/vec_dbs/qdrant.py +355 -0
|
@@ -0,0 +1,1401 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Chat handler for chat functionality (Class-based version).
|
|
3
|
+
|
|
4
|
+
This module provides a complete implementation of chat handlers,
|
|
5
|
+
consolidating all chat-related logic without depending on mos_server.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
import re
|
|
11
|
+
import time
|
|
12
|
+
import traceback
|
|
13
|
+
|
|
14
|
+
from collections.abc import Generator
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
from typing import Any, Literal
|
|
17
|
+
|
|
18
|
+
from fastapi import HTTPException
|
|
19
|
+
from fastapi.responses import StreamingResponse
|
|
20
|
+
|
|
21
|
+
from memos.api.handlers.base_handler import BaseHandler, HandlerDependencies
|
|
22
|
+
from memos.api.product_models import (
|
|
23
|
+
APIADDRequest,
|
|
24
|
+
APIChatCompleteRequest,
|
|
25
|
+
APISearchRequest,
|
|
26
|
+
ChatPlaygroundRequest,
|
|
27
|
+
ChatRequest,
|
|
28
|
+
)
|
|
29
|
+
from memos.context.context import ContextThread
|
|
30
|
+
from memos.mem_os.utils.format_utils import clean_json_response
|
|
31
|
+
from memos.mem_os.utils.reference_utils import (
|
|
32
|
+
prepare_reference_data,
|
|
33
|
+
process_streaming_references_complete,
|
|
34
|
+
)
|
|
35
|
+
from memos.mem_reader.read_multi_modal.utils import detect_lang
|
|
36
|
+
from memos.mem_scheduler.schemas.message_schemas import ScheduleMessageItem
|
|
37
|
+
from memos.mem_scheduler.schemas.task_schemas import (
|
|
38
|
+
ANSWER_TASK_LABEL,
|
|
39
|
+
QUERY_TASK_LABEL,
|
|
40
|
+
)
|
|
41
|
+
from memos.templates.cloud_service_prompt import get_cloud_chat_prompt
|
|
42
|
+
from memos.templates.mos_prompts import (
|
|
43
|
+
FURTHER_SUGGESTION_PROMPT,
|
|
44
|
+
get_memos_prompt,
|
|
45
|
+
)
|
|
46
|
+
from memos.types import MessageList
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class ChatHandler(BaseHandler):
|
|
50
|
+
"""
|
|
51
|
+
Handler for chat operations.
|
|
52
|
+
|
|
53
|
+
Composes SearchHandler and AddHandler to provide complete chat functionality
|
|
54
|
+
without depending on mos_server. All chat logic is centralized here.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
def __init__(
|
|
58
|
+
self,
|
|
59
|
+
dependencies: HandlerDependencies,
|
|
60
|
+
chat_llms: dict[str, Any],
|
|
61
|
+
search_handler=None,
|
|
62
|
+
add_handler=None,
|
|
63
|
+
online_bot=None,
|
|
64
|
+
):
|
|
65
|
+
"""
|
|
66
|
+
Initialize chat handler.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
dependencies: HandlerDependencies instance
|
|
70
|
+
chat_llms: Dictionary mapping model names to LLM instances
|
|
71
|
+
search_handler: Optional SearchHandler instance (created if not provided)
|
|
72
|
+
add_handler: Optional AddHandler instance (created if not provided)
|
|
73
|
+
online_bot: Optional DingDing bot function for notifications
|
|
74
|
+
"""
|
|
75
|
+
super().__init__(dependencies)
|
|
76
|
+
self._validate_dependencies("llm", "naive_mem_cube", "mem_reader", "mem_scheduler")
|
|
77
|
+
|
|
78
|
+
# Lazy import to avoid circular dependencies
|
|
79
|
+
if search_handler is None:
|
|
80
|
+
from memos.api.handlers.search_handler import SearchHandler
|
|
81
|
+
|
|
82
|
+
search_handler = SearchHandler(dependencies)
|
|
83
|
+
|
|
84
|
+
if add_handler is None:
|
|
85
|
+
from memos.api.handlers.add_handler import AddHandler
|
|
86
|
+
|
|
87
|
+
add_handler = AddHandler(dependencies)
|
|
88
|
+
|
|
89
|
+
self.chat_llms = chat_llms
|
|
90
|
+
self.search_handler = search_handler
|
|
91
|
+
self.add_handler = add_handler
|
|
92
|
+
self.online_bot = online_bot
|
|
93
|
+
|
|
94
|
+
# Check if scheduler is enabled
|
|
95
|
+
self.enable_mem_scheduler = (
|
|
96
|
+
hasattr(dependencies, "enable_mem_scheduler") and dependencies.enable_mem_scheduler
|
|
97
|
+
)
|
|
98
|
+
self.dependencies = dependencies
|
|
99
|
+
|
|
100
|
+
def handle_chat_complete(self, chat_req: APIChatCompleteRequest) -> dict[str, Any]:
|
|
101
|
+
"""
|
|
102
|
+
Chat with MemOS for chat complete response (non-streaming).
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
chat_req: Chat complete request
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
Dictionary with chat complete response and reasoning
|
|
109
|
+
|
|
110
|
+
Raises:
|
|
111
|
+
HTTPException: If chat fails
|
|
112
|
+
"""
|
|
113
|
+
try:
|
|
114
|
+
# Resolve readable cube IDs (for search)
|
|
115
|
+
readable_cube_ids = chat_req.readable_cube_ids or [chat_req.user_id]
|
|
116
|
+
|
|
117
|
+
# Step 1: Search for relevant memories
|
|
118
|
+
search_req = APISearchRequest(
|
|
119
|
+
query=chat_req.query,
|
|
120
|
+
user_id=chat_req.user_id,
|
|
121
|
+
readable_cube_ids=readable_cube_ids,
|
|
122
|
+
mode=chat_req.mode,
|
|
123
|
+
internet_search=chat_req.internet_search,
|
|
124
|
+
top_k=chat_req.top_k,
|
|
125
|
+
chat_history=chat_req.history,
|
|
126
|
+
session_id=chat_req.session_id,
|
|
127
|
+
include_preference=chat_req.include_preference,
|
|
128
|
+
pref_top_k=chat_req.pref_top_k,
|
|
129
|
+
filter=chat_req.filter,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
search_response = self.search_handler.handle_search_memories(search_req)
|
|
133
|
+
|
|
134
|
+
# Extract memories from search results
|
|
135
|
+
memories_list = []
|
|
136
|
+
if search_response.data and search_response.data.get("text_mem"):
|
|
137
|
+
text_mem_results = search_response.data["text_mem"]
|
|
138
|
+
if text_mem_results and text_mem_results[0].get("memories"):
|
|
139
|
+
memories_list = text_mem_results[0]["memories"]
|
|
140
|
+
|
|
141
|
+
# Drop internet memories forced
|
|
142
|
+
memories_list = [
|
|
143
|
+
mem
|
|
144
|
+
for mem in memories_list
|
|
145
|
+
if mem.get("metadata", {}).get("memory_type") != "OuterMemory"
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
# Filter memories by threshold
|
|
149
|
+
filtered_memories = self._filter_memories_by_threshold(
|
|
150
|
+
memories_list, chat_req.threshold or 0.5
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
# Step 2: Build system prompt
|
|
154
|
+
system_prompt = self._build_system_prompt(
|
|
155
|
+
query=chat_req.query,
|
|
156
|
+
memories=filtered_memories,
|
|
157
|
+
pref_string=search_response.data.get("pref_string", ""),
|
|
158
|
+
base_prompt=chat_req.system_prompt,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# Prepare message history
|
|
162
|
+
history_info = chat_req.history[-20:] if chat_req.history else []
|
|
163
|
+
current_messages = [
|
|
164
|
+
{"role": "system", "content": system_prompt},
|
|
165
|
+
*history_info,
|
|
166
|
+
{"role": "user", "content": chat_req.query},
|
|
167
|
+
]
|
|
168
|
+
|
|
169
|
+
self.logger.info("[Cloud Service] Starting to generate chat complete response...")
|
|
170
|
+
|
|
171
|
+
# Step 3: Generate complete response from LLM
|
|
172
|
+
if chat_req.model_name_or_path and chat_req.model_name_or_path not in self.chat_llms:
|
|
173
|
+
raise HTTPException(
|
|
174
|
+
status_code=400,
|
|
175
|
+
detail=f"Model {chat_req.model_name_or_path} not suport, choose from {list(self.chat_llms.keys())}",
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
model = chat_req.model_name_or_path or next(iter(self.chat_llms.keys()))
|
|
179
|
+
|
|
180
|
+
self.logger.info(f"[Cloud Service] Chat Complete Model: {model}")
|
|
181
|
+
strat = time.time()
|
|
182
|
+
response = self.chat_llms[model].generate(current_messages, model_name_or_path=model)
|
|
183
|
+
end = time.time()
|
|
184
|
+
self.logger.info(f"[Cloud Service] Chat Complete Time: {end - strat} seconds")
|
|
185
|
+
|
|
186
|
+
if not response:
|
|
187
|
+
self.logger.error(
|
|
188
|
+
f"[Cloud Service] Chat Complete Failed, LLM response is {response}"
|
|
189
|
+
)
|
|
190
|
+
raise HTTPException(
|
|
191
|
+
status_code=500, detail="Chat complete failed, LLM response is None"
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
self.logger.info(
|
|
195
|
+
f"[Cloud Service] Chat Complete LLM Input: {json.dumps(current_messages, ensure_ascii=False)} Chat Complete LLM Response: {response}"
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
# Step 4: start add after chat asynchronously
|
|
199
|
+
if chat_req.add_message_on_answer:
|
|
200
|
+
# Resolve writable cube IDs (for add)
|
|
201
|
+
writable_cube_ids = chat_req.writable_cube_ids or [chat_req.user_id]
|
|
202
|
+
start = time.time()
|
|
203
|
+
self._start_add_to_memory(
|
|
204
|
+
user_id=chat_req.user_id,
|
|
205
|
+
writable_cube_ids=writable_cube_ids,
|
|
206
|
+
session_id=chat_req.session_id or "default_session",
|
|
207
|
+
query=chat_req.query,
|
|
208
|
+
full_response=response,
|
|
209
|
+
async_mode="async",
|
|
210
|
+
)
|
|
211
|
+
end = time.time()
|
|
212
|
+
self.logger.info(f"[Cloud Service] Chat Add Time: {end - start} seconds")
|
|
213
|
+
|
|
214
|
+
match = re.search(r"<think>([\s\S]*?)</think>", response)
|
|
215
|
+
reasoning_text = match.group(1) if match else None
|
|
216
|
+
final_text = (
|
|
217
|
+
re.sub(r"<think>[\s\S]*?</think>", "", response, count=1) if match else response
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
return {
|
|
221
|
+
"message": "Chat completed successfully",
|
|
222
|
+
"data": {"response": final_text, "reasoning": reasoning_text},
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
except ValueError as err:
|
|
226
|
+
raise HTTPException(status_code=404, detail=str(traceback.format_exc())) from err
|
|
227
|
+
except Exception as err:
|
|
228
|
+
self.logger.error(f"[Cloud Service] Failed to chat complete: {traceback.format_exc()}")
|
|
229
|
+
raise HTTPException(status_code=500, detail=str(traceback.format_exc())) from err
|
|
230
|
+
|
|
231
|
+
def handle_chat_stream(self, chat_req: ChatRequest) -> StreamingResponse:
|
|
232
|
+
"""
|
|
233
|
+
Chat with MemOS via Server-Sent Events (SSE) stream for chat stream response.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
chat_req: Chat stream request
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
StreamingResponse with SSE formatted chat stream
|
|
240
|
+
|
|
241
|
+
Raises:
|
|
242
|
+
HTTPException: If stream initialization fails
|
|
243
|
+
"""
|
|
244
|
+
try:
|
|
245
|
+
|
|
246
|
+
def generate_chat_response() -> Generator[str, None, None]:
|
|
247
|
+
"""Generate chat stream response as SSE stream."""
|
|
248
|
+
try:
|
|
249
|
+
# Resolve readable cube IDs (for search)
|
|
250
|
+
readable_cube_ids = chat_req.readable_cube_ids or (
|
|
251
|
+
[chat_req.mem_cube_id] if chat_req.mem_cube_id else [chat_req.user_id]
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
search_req = APISearchRequest(
|
|
255
|
+
query=chat_req.query,
|
|
256
|
+
user_id=chat_req.user_id,
|
|
257
|
+
readable_cube_ids=readable_cube_ids,
|
|
258
|
+
mode=chat_req.mode,
|
|
259
|
+
internet_search=chat_req.internet_search,
|
|
260
|
+
top_k=chat_req.top_k,
|
|
261
|
+
chat_history=chat_req.history,
|
|
262
|
+
session_id=chat_req.session_id,
|
|
263
|
+
include_preference=chat_req.include_preference,
|
|
264
|
+
pref_top_k=chat_req.pref_top_k,
|
|
265
|
+
filter=chat_req.filter,
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
search_response = self.search_handler.handle_search_memories(search_req)
|
|
269
|
+
|
|
270
|
+
# Use first readable cube ID for scheduler (backward compatibility)
|
|
271
|
+
scheduler_cube_id = (
|
|
272
|
+
readable_cube_ids[0] if readable_cube_ids else chat_req.user_id
|
|
273
|
+
)
|
|
274
|
+
self._send_message_to_scheduler(
|
|
275
|
+
user_id=chat_req.user_id,
|
|
276
|
+
mem_cube_id=scheduler_cube_id,
|
|
277
|
+
query=chat_req.query,
|
|
278
|
+
label=QUERY_TASK_LABEL,
|
|
279
|
+
)
|
|
280
|
+
# Extract memories from search results
|
|
281
|
+
memories_list = []
|
|
282
|
+
if search_response.data and search_response.data.get("text_mem"):
|
|
283
|
+
text_mem_results = search_response.data["text_mem"]
|
|
284
|
+
if text_mem_results and text_mem_results[0].get("memories"):
|
|
285
|
+
memories_list = text_mem_results[0]["memories"]
|
|
286
|
+
|
|
287
|
+
# Drop internet memories forced
|
|
288
|
+
memories_list = [
|
|
289
|
+
mem
|
|
290
|
+
for mem in memories_list
|
|
291
|
+
if mem.get("metadata", {}).get("memory_type") != "OuterMemory"
|
|
292
|
+
]
|
|
293
|
+
|
|
294
|
+
# Filter memories by threshold
|
|
295
|
+
filtered_memories = self._filter_memories_by_threshold(memories_list)
|
|
296
|
+
|
|
297
|
+
# Step 2: Build system prompt with memories
|
|
298
|
+
system_prompt = self._build_system_prompt(
|
|
299
|
+
query=chat_req.query,
|
|
300
|
+
memories=filtered_memories,
|
|
301
|
+
pref_string=search_response.data.get("pref_string", ""),
|
|
302
|
+
base_prompt=chat_req.system_prompt,
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
# Prepare messages
|
|
306
|
+
history_info = chat_req.history[-20:] if chat_req.history else []
|
|
307
|
+
current_messages = [
|
|
308
|
+
{"role": "system", "content": system_prompt},
|
|
309
|
+
*history_info,
|
|
310
|
+
{"role": "user", "content": chat_req.query},
|
|
311
|
+
]
|
|
312
|
+
|
|
313
|
+
self.logger.info(
|
|
314
|
+
f"[Cloud Service] chat stream user_id: {chat_req.user_id}, readable_cube_ids: {readable_cube_ids}, "
|
|
315
|
+
f"current_system_prompt: {system_prompt}"
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
# Step 3: Generate streaming response from LLM
|
|
319
|
+
if (
|
|
320
|
+
chat_req.model_name_or_path
|
|
321
|
+
and chat_req.model_name_or_path not in self.chat_llms
|
|
322
|
+
):
|
|
323
|
+
raise HTTPException(
|
|
324
|
+
status_code=400,
|
|
325
|
+
detail=f"Model {chat_req.model_name_or_path} not suport, choose from {list(self.chat_llms.keys())}",
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
model = chat_req.model_name_or_path or next(iter(self.chat_llms.keys()))
|
|
329
|
+
self.logger.info(f"[Cloud Service] Chat Stream Model: {model}")
|
|
330
|
+
|
|
331
|
+
start = time.time()
|
|
332
|
+
response_stream = self.chat_llms[model].generate_stream(
|
|
333
|
+
current_messages, model_name_or_path=model
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
# Stream the response
|
|
337
|
+
buffer = ""
|
|
338
|
+
full_response = ""
|
|
339
|
+
in_think = False
|
|
340
|
+
|
|
341
|
+
for chunk in response_stream:
|
|
342
|
+
if chunk == "<think>":
|
|
343
|
+
in_think = True
|
|
344
|
+
continue
|
|
345
|
+
if chunk == "</think>":
|
|
346
|
+
in_think = False
|
|
347
|
+
continue
|
|
348
|
+
|
|
349
|
+
if in_think:
|
|
350
|
+
chunk_data = f"data: {json.dumps({'type': 'reasoning', 'data': chunk}, ensure_ascii=False)}\n\n"
|
|
351
|
+
yield chunk_data
|
|
352
|
+
continue
|
|
353
|
+
|
|
354
|
+
buffer += chunk
|
|
355
|
+
full_response += chunk
|
|
356
|
+
|
|
357
|
+
chunk_data = f"data: {json.dumps({'type': 'text', 'data': chunk}, ensure_ascii=False)}\n\n"
|
|
358
|
+
yield chunk_data
|
|
359
|
+
|
|
360
|
+
end = time.time()
|
|
361
|
+
self.logger.info(f"[Cloud Service] Chat Stream Time: {end - start} seconds")
|
|
362
|
+
|
|
363
|
+
self.logger.info(
|
|
364
|
+
f"[Cloud Service] Chat Stream LLM Input: {json.dumps(current_messages, ensure_ascii=False)} Chat Stream LLM Response: {full_response}"
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
current_messages.append({"role": "assistant", "content": full_response})
|
|
368
|
+
if chat_req.add_message_on_answer:
|
|
369
|
+
# Resolve writable cube IDs (for add)
|
|
370
|
+
writable_cube_ids = chat_req.writable_cube_ids or (
|
|
371
|
+
[chat_req.mem_cube_id] if chat_req.mem_cube_id else [chat_req.user_id]
|
|
372
|
+
)
|
|
373
|
+
start = time.time()
|
|
374
|
+
self._start_add_to_memory(
|
|
375
|
+
user_id=chat_req.user_id,
|
|
376
|
+
writable_cube_ids=writable_cube_ids,
|
|
377
|
+
session_id=chat_req.session_id or "default_session",
|
|
378
|
+
query=chat_req.query,
|
|
379
|
+
full_response=full_response,
|
|
380
|
+
async_mode="async",
|
|
381
|
+
)
|
|
382
|
+
end = time.time()
|
|
383
|
+
self.logger.info(
|
|
384
|
+
f"[Cloud Service] Chat Stream Add Time: {end - start} seconds"
|
|
385
|
+
)
|
|
386
|
+
except Exception as e:
|
|
387
|
+
self.logger.error(f"[Cloud Service] Error in chat stream: {e}", exc_info=True)
|
|
388
|
+
error_data = f"data: {json.dumps({'type': 'error', 'content': str(traceback.format_exc())})}\n\n"
|
|
389
|
+
yield error_data
|
|
390
|
+
|
|
391
|
+
return StreamingResponse(
|
|
392
|
+
generate_chat_response(),
|
|
393
|
+
media_type="text/event-stream",
|
|
394
|
+
headers={
|
|
395
|
+
"Cache-Control": "no-cache",
|
|
396
|
+
"Connection": "keep-alive",
|
|
397
|
+
"Content-Type": "text/event-stream",
|
|
398
|
+
"Access-Control-Allow-Origin": "*",
|
|
399
|
+
"Access-Control-Allow-Headers": "*",
|
|
400
|
+
"Access-Control-Allow-Methods": "*",
|
|
401
|
+
},
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
except ValueError as err:
|
|
405
|
+
raise HTTPException(status_code=404, detail=str(traceback.format_exc())) from err
|
|
406
|
+
except Exception as err:
|
|
407
|
+
self.logger.error(
|
|
408
|
+
f"[Cloud Service] Failed to start chat stream: {traceback.format_exc()}"
|
|
409
|
+
)
|
|
410
|
+
raise HTTPException(status_code=500, detail=str(traceback.format_exc())) from err
|
|
411
|
+
|
|
412
|
+
def handle_chat_stream_playground(self, chat_req: ChatPlaygroundRequest) -> StreamingResponse:
|
|
413
|
+
"""
|
|
414
|
+
Chat with MemOS via Server-Sent Events (SSE) stream for playground chat stream response.
|
|
415
|
+
|
|
416
|
+
Args:
|
|
417
|
+
chat_req: Chat stream request
|
|
418
|
+
|
|
419
|
+
Returns:
|
|
420
|
+
StreamingResponse with SSE formatted chat stream
|
|
421
|
+
|
|
422
|
+
Raises:
|
|
423
|
+
HTTPException: If stream initialization fails
|
|
424
|
+
"""
|
|
425
|
+
try:
|
|
426
|
+
|
|
427
|
+
def generate_chat_response() -> Generator[str, None, None]:
|
|
428
|
+
"""Generate playground chat stream response as SSE stream."""
|
|
429
|
+
try:
|
|
430
|
+
import time
|
|
431
|
+
|
|
432
|
+
time_start = time.time()
|
|
433
|
+
|
|
434
|
+
# Step 1: Search for memories using search handler
|
|
435
|
+
yield f"data: {json.dumps({'type': 'status', 'data': '0'})}\n\n"
|
|
436
|
+
|
|
437
|
+
# Resolve readable cube IDs (for search)
|
|
438
|
+
readable_cube_ids = chat_req.readable_cube_ids or (
|
|
439
|
+
[chat_req.mem_cube_id] if chat_req.mem_cube_id else [chat_req.user_id]
|
|
440
|
+
)
|
|
441
|
+
# Resolve writable cube IDs (for add)
|
|
442
|
+
writable_cube_ids = chat_req.writable_cube_ids or (
|
|
443
|
+
[chat_req.mem_cube_id] if chat_req.mem_cube_id else [chat_req.user_id]
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
# ====== first search text mem with parse goal ======
|
|
447
|
+
search_req = APISearchRequest(
|
|
448
|
+
query=chat_req.query,
|
|
449
|
+
user_id=chat_req.user_id,
|
|
450
|
+
readable_cube_ids=readable_cube_ids,
|
|
451
|
+
mode="fast",
|
|
452
|
+
internet_search=False,
|
|
453
|
+
top_k=20,
|
|
454
|
+
chat_history=chat_req.history,
|
|
455
|
+
session_id=chat_req.session_id,
|
|
456
|
+
include_preference=True,
|
|
457
|
+
pref_top_k=chat_req.pref_top_k,
|
|
458
|
+
filter=chat_req.filter,
|
|
459
|
+
search_tool_memory=False,
|
|
460
|
+
)
|
|
461
|
+
start_time = time.time()
|
|
462
|
+
search_response = self.search_handler.handle_search_memories(search_req)
|
|
463
|
+
end_time = time.time()
|
|
464
|
+
self.logger.info(
|
|
465
|
+
f"[PLAYGROUND CHAT] first search time: {end_time - start_time}"
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
yield f"data: {json.dumps({'type': 'status', 'data': '1'})}\n\n"
|
|
469
|
+
|
|
470
|
+
# Extract memories from search results (first search)
|
|
471
|
+
memories_list = []
|
|
472
|
+
if search_response.data and search_response.data.get("text_mem"):
|
|
473
|
+
text_mem_results = search_response.data["text_mem"]
|
|
474
|
+
if text_mem_results and text_mem_results[0].get("memories"):
|
|
475
|
+
memories_list = text_mem_results[0]["memories"]
|
|
476
|
+
|
|
477
|
+
# Filter memories by threshold
|
|
478
|
+
filtered_memories = self._filter_memories_by_threshold(memories_list)[:5]
|
|
479
|
+
|
|
480
|
+
# Prepare reference data (first search)
|
|
481
|
+
reference = prepare_reference_data(filtered_memories)
|
|
482
|
+
# get preference string
|
|
483
|
+
pref_string = search_response.data.get("pref_string", "")
|
|
484
|
+
|
|
485
|
+
yield f"data: {json.dumps({'type': 'reference', 'data': reference}, ensure_ascii=False)}\n\n"
|
|
486
|
+
|
|
487
|
+
# Prepare preference markdown string
|
|
488
|
+
if chat_req.include_preference:
|
|
489
|
+
pref_list = search_response.data.get("pref_mem") or []
|
|
490
|
+
pref_memories = pref_list[0].get("memories", []) if pref_list else []
|
|
491
|
+
pref_md_string = self._build_pref_md_string_for_playground(pref_memories)
|
|
492
|
+
yield f"data: {json.dumps({'type': 'pref_md_string', 'data': pref_md_string}, ensure_ascii=False)}\n\n"
|
|
493
|
+
|
|
494
|
+
# Use first readable cube ID for scheduler (backward compatibility)
|
|
495
|
+
scheduler_cube_id = (
|
|
496
|
+
readable_cube_ids[0] if readable_cube_ids else chat_req.user_id
|
|
497
|
+
)
|
|
498
|
+
self._send_message_to_scheduler(
|
|
499
|
+
user_id=chat_req.user_id,
|
|
500
|
+
mem_cube_id=scheduler_cube_id,
|
|
501
|
+
query=chat_req.query,
|
|
502
|
+
label=QUERY_TASK_LABEL,
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
# parse goal for internet search
|
|
506
|
+
searcher = self.dependencies.searcher
|
|
507
|
+
parsed_goal = searcher.task_goal_parser.parse(
|
|
508
|
+
task_description=chat_req.query,
|
|
509
|
+
context="\n".join([memory.get("memory", "") for memory in memories_list]),
|
|
510
|
+
conversation=chat_req.history,
|
|
511
|
+
mode="fine",
|
|
512
|
+
)
|
|
513
|
+
self.logger.info(f"[PLAYGROUND CHAT] parsed_goal: {parsed_goal}")
|
|
514
|
+
|
|
515
|
+
if chat_req.beginner_guide_step == "first":
|
|
516
|
+
chat_req.internet_search = False
|
|
517
|
+
parsed_goal.internet_search = False
|
|
518
|
+
elif chat_req.beginner_guide_step == "second":
|
|
519
|
+
chat_req.internet_search = True
|
|
520
|
+
parsed_goal.internet_search = True
|
|
521
|
+
|
|
522
|
+
if chat_req.internet_search or parsed_goal.internet_search:
|
|
523
|
+
# internet status
|
|
524
|
+
yield f"data: {json.dumps({'type': 'status', 'data': 'start_internet_search'})}\n\n"
|
|
525
|
+
|
|
526
|
+
# ====== second deep search ======
|
|
527
|
+
search_req = APISearchRequest(
|
|
528
|
+
query=(parsed_goal.rephrased_query or chat_req.query)
|
|
529
|
+
+ (f" {parsed_goal.memories}" if parsed_goal.memories else ""),
|
|
530
|
+
user_id=chat_req.user_id,
|
|
531
|
+
readable_cube_ids=readable_cube_ids,
|
|
532
|
+
mode="fast",
|
|
533
|
+
internet_search=chat_req.internet_search or parsed_goal.internet_search,
|
|
534
|
+
top_k=100, # for playground, we need to search more memories
|
|
535
|
+
chat_history=chat_req.history,
|
|
536
|
+
session_id=chat_req.session_id,
|
|
537
|
+
include_preference=False,
|
|
538
|
+
pref_top_k=chat_req.pref_top_k,
|
|
539
|
+
filter=chat_req.filter,
|
|
540
|
+
search_memory_type="All",
|
|
541
|
+
search_tool_memory=False,
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
self.logger.info(f"[PLAYGROUND CHAT] second search query: {search_req.query}")
|
|
545
|
+
|
|
546
|
+
start_time = time.time()
|
|
547
|
+
search_response = self.search_handler.handle_search_memories(search_req)
|
|
548
|
+
end_time = time.time()
|
|
549
|
+
self.logger.info(
|
|
550
|
+
f"[PLAYGROUND CHAT] second search time: {end_time - start_time}"
|
|
551
|
+
)
|
|
552
|
+
|
|
553
|
+
# for playground, add the query to memory without response
|
|
554
|
+
self._start_add_to_memory(
|
|
555
|
+
user_id=chat_req.user_id,
|
|
556
|
+
writable_cube_ids=writable_cube_ids,
|
|
557
|
+
session_id=chat_req.session_id or "default_session",
|
|
558
|
+
query=chat_req.query,
|
|
559
|
+
full_response=None,
|
|
560
|
+
async_mode="sync",
|
|
561
|
+
)
|
|
562
|
+
|
|
563
|
+
# Extract memories from search results (second search)
|
|
564
|
+
memories_list = []
|
|
565
|
+
if search_response.data and search_response.data.get("text_mem"):
|
|
566
|
+
text_mem_results = search_response.data["text_mem"]
|
|
567
|
+
if text_mem_results and text_mem_results[0].get("memories"):
|
|
568
|
+
memories_list = text_mem_results[0]["memories"]
|
|
569
|
+
|
|
570
|
+
# Filter memories by threshold, min_num is the min number of memories for playground
|
|
571
|
+
second_filtered_memories = self._filter_memories_by_threshold(
|
|
572
|
+
memories_list, min_num=35
|
|
573
|
+
)
|
|
574
|
+
|
|
575
|
+
# dedup and supplement memories
|
|
576
|
+
fast_length = len(filtered_memories)
|
|
577
|
+
supplement_length = max(0, 50 - fast_length) # 50 is the max mem for playground
|
|
578
|
+
second_dedup_memories = self._dedup_and_supplement_memories(
|
|
579
|
+
filtered_memories, second_filtered_memories
|
|
580
|
+
)[:supplement_length]
|
|
581
|
+
filtered_memories = filtered_memories + second_dedup_memories
|
|
582
|
+
|
|
583
|
+
# Prepare remain reference data (second search)
|
|
584
|
+
reference = prepare_reference_data(filtered_memories)
|
|
585
|
+
# get internet reference
|
|
586
|
+
internet_reference = self._get_internet_reference(
|
|
587
|
+
search_response.data.get("text_mem")[0]["memories"]
|
|
588
|
+
)
|
|
589
|
+
yield f"data: {json.dumps({'type': 'reference', 'data': reference}, ensure_ascii=False)}\n\n"
|
|
590
|
+
|
|
591
|
+
# Step 2: Build system prompt with memories
|
|
592
|
+
lang = detect_lang(chat_req.query)
|
|
593
|
+
if pref_string:
|
|
594
|
+
pref_string += (
|
|
595
|
+
"\n# 注意\n- 在思考内容中,不要出现引用序号和id [1,2,3]等标记,否则会导致引用错误。"
|
|
596
|
+
if lang == "zh"
|
|
597
|
+
else "\n#warning\n- In thinking content, do not appear the reference number and id [1,2,3]etc. otherwise it will cause reference error."
|
|
598
|
+
)
|
|
599
|
+
system_prompt = self._build_enhance_system_prompt(
|
|
600
|
+
filtered_memories, pref_string, lang=lang
|
|
601
|
+
)
|
|
602
|
+
|
|
603
|
+
# Prepare messages
|
|
604
|
+
history_info = chat_req.history[-20:] if chat_req.history else []
|
|
605
|
+
current_messages = [
|
|
606
|
+
{"role": "system", "content": system_prompt},
|
|
607
|
+
*history_info,
|
|
608
|
+
{"role": "user", "content": chat_req.query},
|
|
609
|
+
]
|
|
610
|
+
|
|
611
|
+
self.logger.info(
|
|
612
|
+
f"[PLAYGROUND CHAT] user_id: {chat_req.user_id}, readable_cube_ids: {readable_cube_ids}, "
|
|
613
|
+
f"current_system_prompt: {system_prompt}"
|
|
614
|
+
)
|
|
615
|
+
|
|
616
|
+
# Step 3: Generate streaming response from LLM
|
|
617
|
+
try:
|
|
618
|
+
model = next(iter(self.chat_llms.keys()))
|
|
619
|
+
self.logger.info(f"[PLAYGROUND CHAT] Chat Playground Stream Model: {model}")
|
|
620
|
+
start = time.time()
|
|
621
|
+
response_stream = self.chat_llms[model].generate_stream(
|
|
622
|
+
current_messages, model_name_or_path=model
|
|
623
|
+
)
|
|
624
|
+
|
|
625
|
+
# Stream the response
|
|
626
|
+
buffer = ""
|
|
627
|
+
full_response = ""
|
|
628
|
+
in_think = False
|
|
629
|
+
|
|
630
|
+
for chunk in response_stream:
|
|
631
|
+
if chunk == "<think>":
|
|
632
|
+
in_think = True
|
|
633
|
+
yield f"data: {json.dumps({'type': 'status', 'data': 'reasoning'})}\n\n"
|
|
634
|
+
continue
|
|
635
|
+
if chunk == "</think>":
|
|
636
|
+
in_think = False
|
|
637
|
+
yield f"data: {json.dumps({'type': 'status', 'data': '2'})}\n\n"
|
|
638
|
+
continue
|
|
639
|
+
|
|
640
|
+
if in_think:
|
|
641
|
+
chunk_data = f"data: {json.dumps({'type': 'reasoning', 'data': chunk}, ensure_ascii=False)}\n\n"
|
|
642
|
+
yield chunk_data
|
|
643
|
+
continue
|
|
644
|
+
|
|
645
|
+
buffer += chunk
|
|
646
|
+
full_response += chunk
|
|
647
|
+
|
|
648
|
+
# Process buffer to ensure complete reference tags
|
|
649
|
+
processed_chunk, remaining_buffer = (
|
|
650
|
+
process_streaming_references_complete(buffer)
|
|
651
|
+
)
|
|
652
|
+
|
|
653
|
+
if processed_chunk:
|
|
654
|
+
chunk_data = f"data: {json.dumps({'type': 'text', 'data': processed_chunk}, ensure_ascii=False)}\n\n"
|
|
655
|
+
yield chunk_data
|
|
656
|
+
buffer = remaining_buffer
|
|
657
|
+
|
|
658
|
+
# Process any remaining buffer
|
|
659
|
+
if buffer:
|
|
660
|
+
processed_chunk, _ = process_streaming_references_complete(buffer)
|
|
661
|
+
if processed_chunk:
|
|
662
|
+
chunk_data = f"data: {json.dumps({'type': 'text', 'data': processed_chunk}, ensure_ascii=False)}\n\n"
|
|
663
|
+
yield chunk_data
|
|
664
|
+
|
|
665
|
+
end = time.time()
|
|
666
|
+
self.logger.info(
|
|
667
|
+
f"[PLAYGROUND CHAT] Chat Playground Stream Time: {end - start} seconds"
|
|
668
|
+
)
|
|
669
|
+
self.logger.info(
|
|
670
|
+
f"[PLAYGROUND CHAT] Chat Playground Stream LLM Input: {json.dumps(current_messages, ensure_ascii=False)} Chat Playground Stream LLM Response: {full_response}"
|
|
671
|
+
)
|
|
672
|
+
|
|
673
|
+
except Exception as llm_error:
|
|
674
|
+
# Log the error
|
|
675
|
+
self.logger.error(
|
|
676
|
+
f"[PLAYGROUND CHAT] Error during LLM generation: {llm_error}",
|
|
677
|
+
exc_info=True,
|
|
678
|
+
)
|
|
679
|
+
# Send error message to client
|
|
680
|
+
error_msg = f"模型生成错误: {llm_error!s}"
|
|
681
|
+
yield f"data: {json.dumps({'type': 'error', 'data': error_msg}, ensure_ascii=False)}\n\n"
|
|
682
|
+
# Re-raise to let outer exception handler process it
|
|
683
|
+
raise
|
|
684
|
+
|
|
685
|
+
if chat_req.internet_search or parsed_goal.internet_search:
|
|
686
|
+
# Yield internet reference after text response
|
|
687
|
+
yield f"data: {json.dumps({'type': 'internet_reference', 'data': internet_reference}, ensure_ascii=False)}\n\n"
|
|
688
|
+
|
|
689
|
+
# Calculate timing
|
|
690
|
+
time_end = time.time()
|
|
691
|
+
speed_improvement = round(float((len(system_prompt) / 2) * 0.0048 + 44.5), 1)
|
|
692
|
+
total_time = round(float(time_end - time_start), 1)
|
|
693
|
+
|
|
694
|
+
yield f"data: {json.dumps({'type': 'time', 'data': {'total_time': total_time, 'speed_improvement': f'{speed_improvement}%'}})}\n\n"
|
|
695
|
+
|
|
696
|
+
# Get further suggestion
|
|
697
|
+
current_messages.append({"role": "assistant", "content": full_response})
|
|
698
|
+
further_suggestion = self._get_further_suggestion(current_messages)
|
|
699
|
+
self.logger.info(f"[PLAYGROUND CHAT] further_suggestion: {further_suggestion}")
|
|
700
|
+
yield f"data: {json.dumps({'type': 'suggestion', 'data': further_suggestion}, ensure_ascii=False)}\n\n"
|
|
701
|
+
|
|
702
|
+
yield f"data: {json.dumps({'type': 'end'})}\n\n"
|
|
703
|
+
|
|
704
|
+
# Use first readable cube ID for post-processing (backward compatibility)
|
|
705
|
+
scheduler_cube_id = (
|
|
706
|
+
readable_cube_ids[0] if readable_cube_ids else chat_req.user_id
|
|
707
|
+
)
|
|
708
|
+
self._start_post_chat_processing(
|
|
709
|
+
user_id=chat_req.user_id,
|
|
710
|
+
cube_id=scheduler_cube_id,
|
|
711
|
+
session_id=chat_req.session_id or "default_session",
|
|
712
|
+
query=chat_req.query,
|
|
713
|
+
full_response=full_response,
|
|
714
|
+
system_prompt=system_prompt,
|
|
715
|
+
time_start=time_start,
|
|
716
|
+
time_end=time_end,
|
|
717
|
+
speed_improvement=speed_improvement,
|
|
718
|
+
current_messages=current_messages,
|
|
719
|
+
)
|
|
720
|
+
self._start_add_to_memory(
|
|
721
|
+
user_id=chat_req.user_id,
|
|
722
|
+
writable_cube_ids=writable_cube_ids,
|
|
723
|
+
session_id=chat_req.session_id or "default_session",
|
|
724
|
+
query=chat_req.query,
|
|
725
|
+
full_response=full_response,
|
|
726
|
+
async_mode="sync",
|
|
727
|
+
)
|
|
728
|
+
|
|
729
|
+
except Exception as e:
|
|
730
|
+
self.logger.error(
|
|
731
|
+
f"[PLAYGROUND CHAT] Error in playground chat stream: {e}", exc_info=True
|
|
732
|
+
)
|
|
733
|
+
error_data = f"data: {json.dumps({'type': 'error', 'content': str(traceback.format_exc())})}\n\n"
|
|
734
|
+
yield error_data
|
|
735
|
+
|
|
736
|
+
return StreamingResponse(
|
|
737
|
+
generate_chat_response(),
|
|
738
|
+
media_type="text/event-stream",
|
|
739
|
+
headers={
|
|
740
|
+
"Cache-Control": "no-cache",
|
|
741
|
+
"Connection": "keep-alive",
|
|
742
|
+
"Content-Type": "text/event-stream",
|
|
743
|
+
"Access-Control-Allow-Origin": "*",
|
|
744
|
+
"Access-Control-Allow-Headers": "*",
|
|
745
|
+
"Access-Control-Allow-Methods": "*",
|
|
746
|
+
},
|
|
747
|
+
)
|
|
748
|
+
|
|
749
|
+
except ValueError as err:
|
|
750
|
+
raise HTTPException(status_code=404, detail=str(traceback.format_exc())) from err
|
|
751
|
+
except Exception as err:
|
|
752
|
+
self.logger.error(
|
|
753
|
+
f"[PLAYGROUND CHAT] Failed to start playground chat stream: {traceback.format_exc()}"
|
|
754
|
+
)
|
|
755
|
+
raise HTTPException(status_code=500, detail=str(traceback.format_exc())) from err
|
|
756
|
+
|
|
757
|
+
def _dedup_and_supplement_memories(
|
|
758
|
+
self, first_filtered_memories: list, second_filtered_memories: list
|
|
759
|
+
) -> list:
|
|
760
|
+
"""
|
|
761
|
+
Remove memories from second_filtered_memories whose content already exists in
|
|
762
|
+
first_filtered_memories, return the remaining list.
|
|
763
|
+
"""
|
|
764
|
+
|
|
765
|
+
def _norm(text: str) -> str:
|
|
766
|
+
# Use normalized text as the dedup key; keep original text in the payload.
|
|
767
|
+
return " ".join(text.split())
|
|
768
|
+
|
|
769
|
+
first_memory_texts = {_norm(memory.get("memory", "")) for memory in first_filtered_memories}
|
|
770
|
+
|
|
771
|
+
remaining_memories = []
|
|
772
|
+
for memory in second_filtered_memories:
|
|
773
|
+
key = _norm(memory.get("memory", ""))
|
|
774
|
+
if key in first_memory_texts:
|
|
775
|
+
continue
|
|
776
|
+
first_memory_texts.add(key)
|
|
777
|
+
remaining_memories.append(memory)
|
|
778
|
+
return remaining_memories
|
|
779
|
+
|
|
780
|
+
def _get_internet_reference(
|
|
781
|
+
self, search_response: list[dict[str, any]]
|
|
782
|
+
) -> list[dict[str, any]]:
|
|
783
|
+
"""Get internet reference from search response."""
|
|
784
|
+
unique_set = set()
|
|
785
|
+
result = []
|
|
786
|
+
|
|
787
|
+
for item in search_response:
|
|
788
|
+
meta = item.get("metadata", {})
|
|
789
|
+
if meta.get("source") == "web" and meta.get("internet_info"):
|
|
790
|
+
info = meta.get("internet_info")
|
|
791
|
+
key = json.dumps(info, sort_keys=True)
|
|
792
|
+
if key not in unique_set:
|
|
793
|
+
unique_set.add(key)
|
|
794
|
+
result.append(info)
|
|
795
|
+
return result
|
|
796
|
+
|
|
797
|
+
def _build_pref_md_string_for_playground(self, pref_mem_list: list[any]) -> str:
|
|
798
|
+
"""Build preference markdown string for playground."""
|
|
799
|
+
explicit = []
|
|
800
|
+
implicit = []
|
|
801
|
+
for pref_mem in pref_mem_list:
|
|
802
|
+
if pref_mem["metadata"]["preference_type"] == "explicit_preference":
|
|
803
|
+
explicit.append(
|
|
804
|
+
{
|
|
805
|
+
"content": pref_mem["metadata"]["preference"],
|
|
806
|
+
"reasoning": pref_mem["metadata"]["reasoning"],
|
|
807
|
+
}
|
|
808
|
+
)
|
|
809
|
+
elif pref_mem["metadata"]["preference_type"] == "implicit_preference":
|
|
810
|
+
implicit.append(
|
|
811
|
+
{
|
|
812
|
+
"content": pref_mem["metadata"]["preference"],
|
|
813
|
+
"reasoning": pref_mem["metadata"]["reasoning"],
|
|
814
|
+
}
|
|
815
|
+
)
|
|
816
|
+
|
|
817
|
+
explicit_md = "\n\n".join(
|
|
818
|
+
[
|
|
819
|
+
f"显性偏好 {i + 1}:\n- 抽取内容: {pref['content']}\n- 抽取理由: {pref['reasoning']}"
|
|
820
|
+
for i, pref in enumerate(explicit)
|
|
821
|
+
]
|
|
822
|
+
)
|
|
823
|
+
implicit_md = "\n\n".join(
|
|
824
|
+
[
|
|
825
|
+
f"隐性偏好 {i + 1}:\n- 抽取内容: {pref['content']}\n- 抽取理由: {pref['reasoning']}"
|
|
826
|
+
for i, pref in enumerate(implicit)
|
|
827
|
+
]
|
|
828
|
+
)
|
|
829
|
+
|
|
830
|
+
return f"{explicit_md}\n\n{implicit_md}"
|
|
831
|
+
|
|
832
|
+
def _build_system_prompt(
|
|
833
|
+
self,
|
|
834
|
+
query: str,
|
|
835
|
+
memories: list | None = None,
|
|
836
|
+
pref_string: str | None = None,
|
|
837
|
+
base_prompt: str | None = None,
|
|
838
|
+
**kwargs,
|
|
839
|
+
) -> str:
|
|
840
|
+
"""Build system prompt with optional memories context."""
|
|
841
|
+
if base_prompt is None:
|
|
842
|
+
lang = detect_lang(query)
|
|
843
|
+
base_prompt = get_cloud_chat_prompt(lang=lang)
|
|
844
|
+
|
|
845
|
+
memory_context = ""
|
|
846
|
+
if memories:
|
|
847
|
+
memory_list = []
|
|
848
|
+
for i, memory in enumerate(memories, 1):
|
|
849
|
+
text_memory = memory.get("memory", "")
|
|
850
|
+
memory_list.append(f"{i}. {text_memory}")
|
|
851
|
+
memory_context = "\n".join(memory_list)
|
|
852
|
+
if pref_string:
|
|
853
|
+
memory_context += f"\n\n{pref_string}"
|
|
854
|
+
|
|
855
|
+
if "{memories}" in base_prompt:
|
|
856
|
+
return base_prompt.format(memories=memory_context)
|
|
857
|
+
elif base_prompt and memories:
|
|
858
|
+
# For backward compatibility, append memories if no placeholder is found
|
|
859
|
+
memory_context_with_header = "\n\n## Fact Memories:\n" + memory_context
|
|
860
|
+
return base_prompt + memory_context_with_header
|
|
861
|
+
return base_prompt
|
|
862
|
+
|
|
863
|
+
def _build_enhance_system_prompt(
|
|
864
|
+
self,
|
|
865
|
+
memories_list: list,
|
|
866
|
+
pref_string: str = "",
|
|
867
|
+
lang: str = "en",
|
|
868
|
+
tone: str = "friendly",
|
|
869
|
+
verbosity: str = "mid",
|
|
870
|
+
) -> str:
|
|
871
|
+
"""
|
|
872
|
+
Build enhanced system prompt with memories (for streaming response).
|
|
873
|
+
|
|
874
|
+
Args:
|
|
875
|
+
memories_list: List of memory items
|
|
876
|
+
pref_string: Preference string
|
|
877
|
+
tone: Tone of the prompt
|
|
878
|
+
verbosity: Verbosity level
|
|
879
|
+
|
|
880
|
+
Returns:
|
|
881
|
+
System prompt string
|
|
882
|
+
"""
|
|
883
|
+
now = datetime.now()
|
|
884
|
+
formatted_date = now.strftime("%Y-%m-%d %H:%M (%A)")
|
|
885
|
+
sys_body = get_memos_prompt(
|
|
886
|
+
date=formatted_date, tone=tone, verbosity=verbosity, mode="enhance", lang=lang
|
|
887
|
+
)
|
|
888
|
+
|
|
889
|
+
# Format memories
|
|
890
|
+
mem_block_o, mem_block_p = self._format_mem_block(memories_list)
|
|
891
|
+
|
|
892
|
+
return (
|
|
893
|
+
sys_body
|
|
894
|
+
+ "\n\n# Memories\n## PersonalMemory (ordered)\n"
|
|
895
|
+
+ mem_block_p
|
|
896
|
+
+ "\n## OuterMemory (from Internet Search, ordered)\n"
|
|
897
|
+
+ mem_block_o
|
|
898
|
+
+ f"\n\n{pref_string}"
|
|
899
|
+
)
|
|
900
|
+
|
|
901
|
+
def _format_mem_block(
|
|
902
|
+
self, memories_all: list, max_items: int = 20, max_chars_each: int = 320
|
|
903
|
+
) -> tuple[str, str]:
|
|
904
|
+
"""
|
|
905
|
+
Format memory block for prompt.
|
|
906
|
+
|
|
907
|
+
Args:
|
|
908
|
+
memories_all: List of memory items
|
|
909
|
+
max_items: Maximum number of items to format
|
|
910
|
+
max_chars_each: Maximum characters per item
|
|
911
|
+
|
|
912
|
+
Returns:
|
|
913
|
+
Tuple of (outer_memory_block, personal_memory_block)
|
|
914
|
+
"""
|
|
915
|
+
if not memories_all:
|
|
916
|
+
return "(none)", "(none)"
|
|
917
|
+
|
|
918
|
+
lines_o = []
|
|
919
|
+
lines_p = []
|
|
920
|
+
|
|
921
|
+
for idx, m in enumerate(memories_all[:max_items], 1):
|
|
922
|
+
mid = m.get("id", "").split("-")[0] if m.get("id") else f"mem_{idx}"
|
|
923
|
+
memory_content = m.get("memory", "")
|
|
924
|
+
metadata = m.get("metadata", {})
|
|
925
|
+
memory_type = metadata.get("memory_type", "")
|
|
926
|
+
created_time = metadata.get("updated_at", "") or metadata.get("created_at", "")
|
|
927
|
+
|
|
928
|
+
# format time to YYYY-MM-DD HH:MM (ISO 8601 -> YYYY-MM-DD HH:MM)
|
|
929
|
+
if created_time and isinstance(created_time, str):
|
|
930
|
+
try:
|
|
931
|
+
dt = datetime.fromisoformat(created_time)
|
|
932
|
+
created_time = dt.strftime("%Y-%m-%d %H:%M")
|
|
933
|
+
except ValueError:
|
|
934
|
+
pass # keep original value
|
|
935
|
+
|
|
936
|
+
tag = "O" if "Outer" in str(memory_type) else "P"
|
|
937
|
+
txt = memory_content.replace("\n", " ").strip()
|
|
938
|
+
if len(txt) > max_chars_each:
|
|
939
|
+
txt = txt[: max_chars_each - 1] + "…"
|
|
940
|
+
|
|
941
|
+
mid = mid or f"mem_{idx}"
|
|
942
|
+
if tag == "O":
|
|
943
|
+
lines_o.append(f"[{idx}:{mid}] :: [{tag}] {txt}\n")
|
|
944
|
+
elif tag == "P":
|
|
945
|
+
txt = f"(CreatedTime: {created_time}) {txt}"
|
|
946
|
+
lines_p.append(f"[{idx}:{mid}] :: [{tag}] {txt}")
|
|
947
|
+
|
|
948
|
+
return "\n".join(lines_o), "\n".join(lines_p)
|
|
949
|
+
|
|
950
|
+
def _filter_memories_by_threshold(
|
|
951
|
+
self,
|
|
952
|
+
memories: list,
|
|
953
|
+
threshold: float = 0.30,
|
|
954
|
+
min_num: int = 3,
|
|
955
|
+
memory_type: Literal["OuterMemory"] = "OuterMemory",
|
|
956
|
+
) -> list:
|
|
957
|
+
"""
|
|
958
|
+
Filter memories by threshold and type.
|
|
959
|
+
|
|
960
|
+
Args:
|
|
961
|
+
memories: List of memory items
|
|
962
|
+
threshold: Relevance threshold
|
|
963
|
+
min_num: Minimum number of memories to keep
|
|
964
|
+
memory_type: Memory type to filter
|
|
965
|
+
|
|
966
|
+
Returns:
|
|
967
|
+
Filtered list of memories
|
|
968
|
+
"""
|
|
969
|
+
if not memories:
|
|
970
|
+
return []
|
|
971
|
+
|
|
972
|
+
# Handle dict format (from search results)
|
|
973
|
+
def get_relativity(m):
|
|
974
|
+
if isinstance(m, dict):
|
|
975
|
+
return m.get("metadata", {}).get("relativity", 0.0)
|
|
976
|
+
return getattr(getattr(m, "metadata", None), "relativity", 0.0)
|
|
977
|
+
|
|
978
|
+
def get_memory_type(m):
|
|
979
|
+
if isinstance(m, dict):
|
|
980
|
+
return m.get("metadata", {}).get("memory_type", "")
|
|
981
|
+
return getattr(getattr(m, "metadata", None), "memory_type", "")
|
|
982
|
+
|
|
983
|
+
sorted_memories = sorted(memories, key=get_relativity, reverse=True)
|
|
984
|
+
filtered_person = [m for m in memories if get_memory_type(m) != memory_type]
|
|
985
|
+
filtered_outer = [m for m in memories if get_memory_type(m) == memory_type]
|
|
986
|
+
|
|
987
|
+
filtered = []
|
|
988
|
+
per_memory_count = 0
|
|
989
|
+
|
|
990
|
+
for m in sorted_memories:
|
|
991
|
+
if get_relativity(m) >= threshold:
|
|
992
|
+
if get_memory_type(m) != memory_type:
|
|
993
|
+
per_memory_count += 1
|
|
994
|
+
filtered.append(m)
|
|
995
|
+
|
|
996
|
+
if len(filtered) < min_num:
|
|
997
|
+
filtered = filtered_person[:min_num] + filtered_outer[:min_num]
|
|
998
|
+
else:
|
|
999
|
+
if per_memory_count < min_num:
|
|
1000
|
+
filtered += filtered_person[per_memory_count:min_num]
|
|
1001
|
+
|
|
1002
|
+
filtered_memory = sorted(filtered, key=get_relativity, reverse=True)
|
|
1003
|
+
return filtered_memory
|
|
1004
|
+
|
|
1005
|
+
def _get_further_suggestion(
|
|
1006
|
+
self,
|
|
1007
|
+
current_messages: MessageList,
|
|
1008
|
+
) -> list[str]:
|
|
1009
|
+
"""Get further suggestion based on current messages."""
|
|
1010
|
+
try:
|
|
1011
|
+
dialogue_info = "\n".join(
|
|
1012
|
+
[f"{msg['role']}: {msg['content']}" for msg in current_messages[-2:]]
|
|
1013
|
+
)
|
|
1014
|
+
further_suggestion_prompt = FURTHER_SUGGESTION_PROMPT.format(dialogue=dialogue_info)
|
|
1015
|
+
message_list = [{"role": "system", "content": further_suggestion_prompt}]
|
|
1016
|
+
response = self.llm.generate(message_list)
|
|
1017
|
+
clean_response = clean_json_response(response)
|
|
1018
|
+
response_json = json.loads(clean_response)
|
|
1019
|
+
return response_json["query"]
|
|
1020
|
+
except Exception as e:
|
|
1021
|
+
self.logger.error(f"Error getting further suggestion: {e}", exc_info=True)
|
|
1022
|
+
return []
|
|
1023
|
+
|
|
1024
|
+
def _extract_references_from_response(self, response: str) -> tuple[str, list[dict]]:
|
|
1025
|
+
"""Extract reference information from the response and return clean text."""
|
|
1026
|
+
import re
|
|
1027
|
+
|
|
1028
|
+
try:
|
|
1029
|
+
references = []
|
|
1030
|
+
# Pattern to match [refid:memoriesID]
|
|
1031
|
+
pattern = r"\[(\d+):([^\]]+)\]"
|
|
1032
|
+
|
|
1033
|
+
matches = re.findall(pattern, response)
|
|
1034
|
+
for ref_number, memory_id in matches:
|
|
1035
|
+
references.append({"memory_id": memory_id, "reference_number": int(ref_number)})
|
|
1036
|
+
|
|
1037
|
+
# Remove all reference markers from the text to get clean text
|
|
1038
|
+
clean_text = re.sub(pattern, "", response)
|
|
1039
|
+
|
|
1040
|
+
# Clean up any extra whitespace that might be left after removing markers
|
|
1041
|
+
clean_text = re.sub(r"\s+", " ", clean_text).strip()
|
|
1042
|
+
|
|
1043
|
+
return clean_text, references
|
|
1044
|
+
except Exception as e:
|
|
1045
|
+
self.logger.error(f"Error extracting references from response: {e}", exc_info=True)
|
|
1046
|
+
return response, []
|
|
1047
|
+
|
|
1048
|
+
def _extract_struct_data_from_history(self, chat_data: list[dict]) -> dict:
|
|
1049
|
+
"""
|
|
1050
|
+
Extract structured message data from chat history.
|
|
1051
|
+
|
|
1052
|
+
Args:
|
|
1053
|
+
chat_data: List of chat messages
|
|
1054
|
+
|
|
1055
|
+
Returns:
|
|
1056
|
+
Dictionary with system, memory, and chat_history
|
|
1057
|
+
"""
|
|
1058
|
+
system_content = ""
|
|
1059
|
+
memory_content = ""
|
|
1060
|
+
chat_history = []
|
|
1061
|
+
|
|
1062
|
+
for item in chat_data:
|
|
1063
|
+
role = item.get("role")
|
|
1064
|
+
content = item.get("content", "")
|
|
1065
|
+
if role == "system":
|
|
1066
|
+
parts = content.split("# Memories", 1)
|
|
1067
|
+
system_content = parts[0].strip()
|
|
1068
|
+
if len(parts) > 1:
|
|
1069
|
+
memory_content = "# Memories" + parts[1].strip()
|
|
1070
|
+
elif role in ("user", "assistant"):
|
|
1071
|
+
chat_history.append({"role": role, "content": content})
|
|
1072
|
+
|
|
1073
|
+
if chat_history and chat_history[-1]["role"] == "assistant":
|
|
1074
|
+
if len(chat_history) >= 2 and chat_history[-2]["role"] == "user":
|
|
1075
|
+
chat_history = chat_history[:-2]
|
|
1076
|
+
else:
|
|
1077
|
+
chat_history = chat_history[:-1]
|
|
1078
|
+
|
|
1079
|
+
return {"system": system_content, "memory": memory_content, "chat_history": chat_history}
|
|
1080
|
+
|
|
1081
|
+
def _send_message_to_scheduler(
|
|
1082
|
+
self,
|
|
1083
|
+
user_id: str,
|
|
1084
|
+
mem_cube_id: str,
|
|
1085
|
+
query: str,
|
|
1086
|
+
label: str,
|
|
1087
|
+
) -> None:
|
|
1088
|
+
"""
|
|
1089
|
+
Send message to scheduler.
|
|
1090
|
+
|
|
1091
|
+
Args:
|
|
1092
|
+
user_id: User ID
|
|
1093
|
+
mem_cube_id: Memory cube ID
|
|
1094
|
+
query: Query content
|
|
1095
|
+
label: Message label
|
|
1096
|
+
"""
|
|
1097
|
+
try:
|
|
1098
|
+
message_item = ScheduleMessageItem(
|
|
1099
|
+
user_id=user_id,
|
|
1100
|
+
mem_cube_id=mem_cube_id,
|
|
1101
|
+
label=label,
|
|
1102
|
+
content=query,
|
|
1103
|
+
timestamp=datetime.utcnow(),
|
|
1104
|
+
)
|
|
1105
|
+
self.mem_scheduler.submit_messages(messages=[message_item])
|
|
1106
|
+
self.logger.info(f"Sent message to scheduler with label: {label}")
|
|
1107
|
+
except Exception as e:
|
|
1108
|
+
self.logger.error(f"Failed to send message to scheduler: {e}", exc_info=True)
|
|
1109
|
+
|
|
1110
|
+
async def _add_conversation_to_memory(
|
|
1111
|
+
self,
|
|
1112
|
+
user_id: str,
|
|
1113
|
+
writable_cube_ids: list[str],
|
|
1114
|
+
session_id: str,
|
|
1115
|
+
query: str,
|
|
1116
|
+
clean_response: str | None = None,
|
|
1117
|
+
async_mode: Literal["async", "sync"] = "sync",
|
|
1118
|
+
) -> None:
|
|
1119
|
+
messages = [
|
|
1120
|
+
{
|
|
1121
|
+
"role": "user",
|
|
1122
|
+
"content": query,
|
|
1123
|
+
"chat_time": str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
|
|
1124
|
+
}
|
|
1125
|
+
]
|
|
1126
|
+
if clean_response:
|
|
1127
|
+
messages.append(
|
|
1128
|
+
{
|
|
1129
|
+
"role": "assistant",
|
|
1130
|
+
"content": clean_response,
|
|
1131
|
+
"chat_time": str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
|
|
1132
|
+
}
|
|
1133
|
+
)
|
|
1134
|
+
add_req = APIADDRequest(
|
|
1135
|
+
user_id=user_id,
|
|
1136
|
+
writable_cube_ids=writable_cube_ids,
|
|
1137
|
+
session_id=session_id,
|
|
1138
|
+
messages=messages,
|
|
1139
|
+
async_mode=async_mode,
|
|
1140
|
+
)
|
|
1141
|
+
|
|
1142
|
+
self.add_handler.handle_add_memories(add_req)
|
|
1143
|
+
|
|
1144
|
+
async def _post_chat_processing(
|
|
1145
|
+
self,
|
|
1146
|
+
user_id: str,
|
|
1147
|
+
cube_id: str,
|
|
1148
|
+
session_id: str,
|
|
1149
|
+
query: str,
|
|
1150
|
+
full_response: str,
|
|
1151
|
+
system_prompt: str,
|
|
1152
|
+
time_start: float,
|
|
1153
|
+
time_end: float,
|
|
1154
|
+
speed_improvement: float,
|
|
1155
|
+
current_messages: list,
|
|
1156
|
+
) -> None:
|
|
1157
|
+
"""
|
|
1158
|
+
Asynchronous post-chat processing with complete functionality.
|
|
1159
|
+
|
|
1160
|
+
Includes:
|
|
1161
|
+
- Reference extraction
|
|
1162
|
+
- DingDing notification
|
|
1163
|
+
- Scheduler messaging
|
|
1164
|
+
- Memory addition
|
|
1165
|
+
|
|
1166
|
+
Args:
|
|
1167
|
+
user_id: User ID
|
|
1168
|
+
cube_id: Memory cube ID
|
|
1169
|
+
session_id: Session ID
|
|
1170
|
+
query: User query
|
|
1171
|
+
full_response: Full LLM response
|
|
1172
|
+
system_prompt: System prompt used
|
|
1173
|
+
time_start: Start timestamp
|
|
1174
|
+
time_end: End timestamp
|
|
1175
|
+
speed_improvement: Speed improvement metric
|
|
1176
|
+
current_messages: Current message history
|
|
1177
|
+
"""
|
|
1178
|
+
try:
|
|
1179
|
+
self.logger.info(
|
|
1180
|
+
f"user_id: {user_id}, cube_id: {cube_id}, current_messages: {current_messages}"
|
|
1181
|
+
)
|
|
1182
|
+
self.logger.info(
|
|
1183
|
+
f"user_id: {user_id}, cube_id: {cube_id}, full_response: {full_response}"
|
|
1184
|
+
)
|
|
1185
|
+
|
|
1186
|
+
# Extract references and clean response
|
|
1187
|
+
clean_response, extracted_references = self._extract_references_from_response(
|
|
1188
|
+
full_response
|
|
1189
|
+
)
|
|
1190
|
+
struct_message = self._extract_struct_data_from_history(current_messages)
|
|
1191
|
+
self.logger.info(f"Extracted {len(extracted_references)} references from response")
|
|
1192
|
+
|
|
1193
|
+
# Send DingDing notification if enabled
|
|
1194
|
+
if self.online_bot:
|
|
1195
|
+
self.logger.info("Online Bot Open!")
|
|
1196
|
+
try:
|
|
1197
|
+
from memos.memos_tools.notification_utils import (
|
|
1198
|
+
send_online_bot_notification_async,
|
|
1199
|
+
)
|
|
1200
|
+
|
|
1201
|
+
# Prepare notification data
|
|
1202
|
+
chat_data = {"query": query, "user_id": user_id, "cube_id": cube_id}
|
|
1203
|
+
chat_data.update(
|
|
1204
|
+
{
|
|
1205
|
+
"memory": struct_message["memory"],
|
|
1206
|
+
"chat_history": struct_message["chat_history"],
|
|
1207
|
+
"full_response": full_response,
|
|
1208
|
+
}
|
|
1209
|
+
)
|
|
1210
|
+
|
|
1211
|
+
system_data = {
|
|
1212
|
+
"references": extracted_references,
|
|
1213
|
+
"time_start": time_start,
|
|
1214
|
+
"time_end": time_end,
|
|
1215
|
+
"speed_improvement": speed_improvement,
|
|
1216
|
+
}
|
|
1217
|
+
|
|
1218
|
+
emoji_config = {"chat": "💬", "system_info": "📊"}
|
|
1219
|
+
|
|
1220
|
+
await send_online_bot_notification_async(
|
|
1221
|
+
online_bot=self.online_bot,
|
|
1222
|
+
header_name="MemOS Chat Report",
|
|
1223
|
+
sub_title_name="chat_with_references",
|
|
1224
|
+
title_color="#00956D",
|
|
1225
|
+
other_data1=chat_data,
|
|
1226
|
+
other_data2=system_data,
|
|
1227
|
+
emoji=emoji_config,
|
|
1228
|
+
)
|
|
1229
|
+
except Exception as e:
|
|
1230
|
+
self.logger.warning(f"Failed to send chat notification (async): {e}")
|
|
1231
|
+
|
|
1232
|
+
# Send answer to scheduler
|
|
1233
|
+
self._send_message_to_scheduler(
|
|
1234
|
+
user_id=user_id, mem_cube_id=cube_id, query=clean_response, label=ANSWER_TASK_LABEL
|
|
1235
|
+
)
|
|
1236
|
+
|
|
1237
|
+
self.logger.info(f"Post-chat processing completed for user {user_id}")
|
|
1238
|
+
|
|
1239
|
+
except Exception as e:
|
|
1240
|
+
self.logger.error(
|
|
1241
|
+
f"Error in post-chat processing for user {user_id}: {e}", exc_info=True
|
|
1242
|
+
)
|
|
1243
|
+
|
|
1244
|
+
def _start_post_chat_processing(
|
|
1245
|
+
self,
|
|
1246
|
+
user_id: str,
|
|
1247
|
+
cube_id: str,
|
|
1248
|
+
session_id: str,
|
|
1249
|
+
query: str,
|
|
1250
|
+
full_response: str,
|
|
1251
|
+
system_prompt: str,
|
|
1252
|
+
time_start: float,
|
|
1253
|
+
time_end: float,
|
|
1254
|
+
speed_improvement: float,
|
|
1255
|
+
current_messages: list,
|
|
1256
|
+
) -> None:
|
|
1257
|
+
"""
|
|
1258
|
+
Start asynchronous post-chat processing in a background thread.
|
|
1259
|
+
|
|
1260
|
+
Args:
|
|
1261
|
+
user_id: User ID
|
|
1262
|
+
cube_id: Memory cube ID
|
|
1263
|
+
session_id: Session ID
|
|
1264
|
+
query: User query
|
|
1265
|
+
full_response: Full LLM response
|
|
1266
|
+
system_prompt: System prompt used
|
|
1267
|
+
time_start: Start timestamp
|
|
1268
|
+
time_end: End timestamp
|
|
1269
|
+
speed_improvement: Speed improvement metric
|
|
1270
|
+
current_messages: Current message history
|
|
1271
|
+
"""
|
|
1272
|
+
|
|
1273
|
+
def run_async_in_thread():
|
|
1274
|
+
"""Running asynchronous tasks in a new thread"""
|
|
1275
|
+
try:
|
|
1276
|
+
loop = asyncio.new_event_loop()
|
|
1277
|
+
asyncio.set_event_loop(loop)
|
|
1278
|
+
try:
|
|
1279
|
+
loop.run_until_complete(
|
|
1280
|
+
self._post_chat_processing(
|
|
1281
|
+
user_id=user_id,
|
|
1282
|
+
cube_id=cube_id,
|
|
1283
|
+
session_id=session_id,
|
|
1284
|
+
query=query,
|
|
1285
|
+
full_response=full_response,
|
|
1286
|
+
system_prompt=system_prompt,
|
|
1287
|
+
time_start=time_start,
|
|
1288
|
+
time_end=time_end,
|
|
1289
|
+
speed_improvement=speed_improvement,
|
|
1290
|
+
current_messages=current_messages,
|
|
1291
|
+
)
|
|
1292
|
+
)
|
|
1293
|
+
finally:
|
|
1294
|
+
loop.close()
|
|
1295
|
+
except Exception as e:
|
|
1296
|
+
self.logger.error(
|
|
1297
|
+
f"Error in thread-based post-chat processing for user {user_id}: {e}",
|
|
1298
|
+
exc_info=True,
|
|
1299
|
+
)
|
|
1300
|
+
|
|
1301
|
+
try:
|
|
1302
|
+
# Try to get the current event loop
|
|
1303
|
+
asyncio.get_running_loop()
|
|
1304
|
+
# Create task and store reference to prevent garbage collection
|
|
1305
|
+
task = asyncio.create_task(
|
|
1306
|
+
self._post_chat_processing(
|
|
1307
|
+
user_id=user_id,
|
|
1308
|
+
cube_id=cube_id,
|
|
1309
|
+
session_id=session_id,
|
|
1310
|
+
query=query,
|
|
1311
|
+
full_response=full_response,
|
|
1312
|
+
system_prompt=system_prompt,
|
|
1313
|
+
time_start=time_start,
|
|
1314
|
+
time_end=time_end,
|
|
1315
|
+
speed_improvement=speed_improvement,
|
|
1316
|
+
current_messages=current_messages,
|
|
1317
|
+
)
|
|
1318
|
+
)
|
|
1319
|
+
# Add exception handling for the background task
|
|
1320
|
+
task.add_done_callback(
|
|
1321
|
+
lambda t: self.logger.error(
|
|
1322
|
+
f"Error in background post-chat processing for user {user_id}: {t.exception()}",
|
|
1323
|
+
exc_info=True,
|
|
1324
|
+
)
|
|
1325
|
+
if t.exception()
|
|
1326
|
+
else None
|
|
1327
|
+
)
|
|
1328
|
+
except RuntimeError:
|
|
1329
|
+
# No event loop, run in a new thread with context propagation
|
|
1330
|
+
thread = ContextThread(
|
|
1331
|
+
target=run_async_in_thread,
|
|
1332
|
+
name=f"PostChatProcessing-{user_id}",
|
|
1333
|
+
daemon=True,
|
|
1334
|
+
)
|
|
1335
|
+
thread.start()
|
|
1336
|
+
|
|
1337
|
+
def _start_add_to_memory(
|
|
1338
|
+
self,
|
|
1339
|
+
user_id: str,
|
|
1340
|
+
writable_cube_ids: list[str],
|
|
1341
|
+
session_id: str,
|
|
1342
|
+
query: str,
|
|
1343
|
+
full_response: str | None = None,
|
|
1344
|
+
async_mode: Literal["async", "sync"] = "sync",
|
|
1345
|
+
) -> None:
|
|
1346
|
+
def run_async_in_thread():
|
|
1347
|
+
try:
|
|
1348
|
+
loop = asyncio.new_event_loop()
|
|
1349
|
+
asyncio.set_event_loop(loop)
|
|
1350
|
+
try:
|
|
1351
|
+
clean_response = full_response
|
|
1352
|
+
if full_response:
|
|
1353
|
+
clean_response, _ = self._extract_references_from_response(full_response)
|
|
1354
|
+
loop.run_until_complete(
|
|
1355
|
+
self._add_conversation_to_memory(
|
|
1356
|
+
user_id=user_id,
|
|
1357
|
+
writable_cube_ids=writable_cube_ids,
|
|
1358
|
+
session_id=session_id,
|
|
1359
|
+
query=query,
|
|
1360
|
+
clean_response=clean_response,
|
|
1361
|
+
async_mode=async_mode,
|
|
1362
|
+
)
|
|
1363
|
+
)
|
|
1364
|
+
finally:
|
|
1365
|
+
loop.close()
|
|
1366
|
+
except Exception as e:
|
|
1367
|
+
self.logger.error(
|
|
1368
|
+
f"Error in thread-based add to memory for user {user_id}: {e}",
|
|
1369
|
+
exc_info=True,
|
|
1370
|
+
)
|
|
1371
|
+
|
|
1372
|
+
try:
|
|
1373
|
+
asyncio.get_running_loop()
|
|
1374
|
+
clean_response = full_response
|
|
1375
|
+
if full_response:
|
|
1376
|
+
clean_response, _ = self._extract_references_from_response(full_response)
|
|
1377
|
+
task = asyncio.create_task(
|
|
1378
|
+
self._add_conversation_to_memory(
|
|
1379
|
+
user_id=user_id,
|
|
1380
|
+
writable_cube_ids=writable_cube_ids,
|
|
1381
|
+
session_id=session_id,
|
|
1382
|
+
query=query,
|
|
1383
|
+
clean_response=clean_response,
|
|
1384
|
+
async_mode=async_mode,
|
|
1385
|
+
)
|
|
1386
|
+
)
|
|
1387
|
+
task.add_done_callback(
|
|
1388
|
+
lambda t: self.logger.error(
|
|
1389
|
+
f"Error in background add to memory for user {user_id}: {t.exception()}",
|
|
1390
|
+
exc_info=True,
|
|
1391
|
+
)
|
|
1392
|
+
if t.exception()
|
|
1393
|
+
else None
|
|
1394
|
+
)
|
|
1395
|
+
except RuntimeError:
|
|
1396
|
+
thread = ContextThread(
|
|
1397
|
+
target=run_async_in_thread,
|
|
1398
|
+
name=f"AddToMemory-{user_id}",
|
|
1399
|
+
daemon=True,
|
|
1400
|
+
)
|
|
1401
|
+
thread.start()
|