AstrBot 3.5.6__py3-none-any.whl → 4.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astrbot/api/__init__.py +16 -4
- astrbot/api/all.py +2 -1
- astrbot/api/event/__init__.py +5 -6
- astrbot/api/event/filter/__init__.py +37 -34
- astrbot/api/platform/__init__.py +7 -8
- astrbot/api/provider/__init__.py +8 -7
- astrbot/api/star/__init__.py +3 -4
- astrbot/api/util/__init__.py +2 -2
- astrbot/cli/__init__.py +1 -0
- astrbot/cli/__main__.py +18 -197
- astrbot/cli/commands/__init__.py +6 -0
- astrbot/cli/commands/cmd_conf.py +209 -0
- astrbot/cli/commands/cmd_init.py +56 -0
- astrbot/cli/commands/cmd_plug.py +245 -0
- astrbot/cli/commands/cmd_run.py +62 -0
- astrbot/cli/utils/__init__.py +18 -0
- astrbot/cli/utils/basic.py +76 -0
- astrbot/cli/utils/plugin.py +246 -0
- astrbot/cli/utils/version_comparator.py +90 -0
- astrbot/core/__init__.py +17 -19
- astrbot/core/agent/agent.py +14 -0
- astrbot/core/agent/handoff.py +38 -0
- astrbot/core/agent/hooks.py +30 -0
- astrbot/core/agent/mcp_client.py +385 -0
- astrbot/core/agent/message.py +175 -0
- astrbot/core/agent/response.py +14 -0
- astrbot/core/agent/run_context.py +22 -0
- astrbot/core/agent/runners/__init__.py +3 -0
- astrbot/core/agent/runners/base.py +65 -0
- astrbot/core/agent/runners/coze/coze_agent_runner.py +367 -0
- astrbot/core/agent/runners/coze/coze_api_client.py +324 -0
- astrbot/core/agent/runners/dashscope/dashscope_agent_runner.py +403 -0
- astrbot/core/agent/runners/dify/dify_agent_runner.py +336 -0
- astrbot/core/agent/runners/dify/dify_api_client.py +195 -0
- astrbot/core/agent/runners/tool_loop_agent_runner.py +400 -0
- astrbot/core/agent/tool.py +285 -0
- astrbot/core/agent/tool_executor.py +17 -0
- astrbot/core/astr_agent_context.py +19 -0
- astrbot/core/astr_agent_hooks.py +36 -0
- astrbot/core/astr_agent_run_util.py +80 -0
- astrbot/core/astr_agent_tool_exec.py +246 -0
- astrbot/core/astrbot_config_mgr.py +275 -0
- astrbot/core/config/__init__.py +2 -2
- astrbot/core/config/astrbot_config.py +60 -20
- astrbot/core/config/default.py +1972 -453
- astrbot/core/config/i18n_utils.py +110 -0
- astrbot/core/conversation_mgr.py +285 -75
- astrbot/core/core_lifecycle.py +167 -62
- astrbot/core/db/__init__.py +305 -102
- astrbot/core/db/migration/helper.py +69 -0
- astrbot/core/db/migration/migra_3_to_4.py +357 -0
- astrbot/core/db/migration/migra_45_to_46.py +44 -0
- astrbot/core/db/migration/migra_webchat_session.py +131 -0
- astrbot/core/db/migration/shared_preferences_v3.py +48 -0
- astrbot/core/db/migration/sqlite_v3.py +497 -0
- astrbot/core/db/po.py +259 -55
- astrbot/core/db/sqlite.py +773 -528
- astrbot/core/db/vec_db/base.py +73 -0
- astrbot/core/db/vec_db/faiss_impl/__init__.py +3 -0
- astrbot/core/db/vec_db/faiss_impl/document_storage.py +392 -0
- astrbot/core/db/vec_db/faiss_impl/embedding_storage.py +93 -0
- astrbot/core/db/vec_db/faiss_impl/sqlite_init.sql +17 -0
- astrbot/core/db/vec_db/faiss_impl/vec_db.py +204 -0
- astrbot/core/event_bus.py +26 -22
- astrbot/core/exceptions.py +9 -0
- astrbot/core/file_token_service.py +98 -0
- astrbot/core/initial_loader.py +19 -10
- astrbot/core/knowledge_base/chunking/__init__.py +9 -0
- astrbot/core/knowledge_base/chunking/base.py +25 -0
- astrbot/core/knowledge_base/chunking/fixed_size.py +59 -0
- astrbot/core/knowledge_base/chunking/recursive.py +161 -0
- astrbot/core/knowledge_base/kb_db_sqlite.py +301 -0
- astrbot/core/knowledge_base/kb_helper.py +642 -0
- astrbot/core/knowledge_base/kb_mgr.py +330 -0
- astrbot/core/knowledge_base/models.py +120 -0
- astrbot/core/knowledge_base/parsers/__init__.py +13 -0
- astrbot/core/knowledge_base/parsers/base.py +51 -0
- astrbot/core/knowledge_base/parsers/markitdown_parser.py +26 -0
- astrbot/core/knowledge_base/parsers/pdf_parser.py +101 -0
- astrbot/core/knowledge_base/parsers/text_parser.py +42 -0
- astrbot/core/knowledge_base/parsers/url_parser.py +103 -0
- astrbot/core/knowledge_base/parsers/util.py +13 -0
- astrbot/core/knowledge_base/prompts.py +65 -0
- astrbot/core/knowledge_base/retrieval/__init__.py +14 -0
- astrbot/core/knowledge_base/retrieval/hit_stopwords.txt +767 -0
- astrbot/core/knowledge_base/retrieval/manager.py +276 -0
- astrbot/core/knowledge_base/retrieval/rank_fusion.py +142 -0
- astrbot/core/knowledge_base/retrieval/sparse_retriever.py +136 -0
- astrbot/core/log.py +21 -15
- astrbot/core/message/components.py +413 -287
- astrbot/core/message/message_event_result.py +35 -24
- astrbot/core/persona_mgr.py +192 -0
- astrbot/core/pipeline/__init__.py +14 -14
- astrbot/core/pipeline/content_safety_check/stage.py +13 -9
- astrbot/core/pipeline/content_safety_check/strategies/__init__.py +1 -2
- astrbot/core/pipeline/content_safety_check/strategies/baidu_aip.py +13 -14
- astrbot/core/pipeline/content_safety_check/strategies/keywords.py +2 -1
- astrbot/core/pipeline/content_safety_check/strategies/strategy.py +6 -6
- astrbot/core/pipeline/context.py +7 -1
- astrbot/core/pipeline/context_utils.py +107 -0
- astrbot/core/pipeline/preprocess_stage/stage.py +63 -36
- astrbot/core/pipeline/process_stage/method/agent_request.py +48 -0
- astrbot/core/pipeline/process_stage/method/agent_sub_stages/internal.py +464 -0
- astrbot/core/pipeline/process_stage/method/agent_sub_stages/third_party.py +202 -0
- astrbot/core/pipeline/process_stage/method/star_request.py +26 -32
- astrbot/core/pipeline/process_stage/stage.py +21 -15
- astrbot/core/pipeline/process_stage/utils.py +125 -0
- astrbot/core/pipeline/rate_limit_check/stage.py +34 -36
- astrbot/core/pipeline/respond/stage.py +142 -101
- astrbot/core/pipeline/result_decorate/stage.py +124 -57
- astrbot/core/pipeline/scheduler.py +21 -16
- astrbot/core/pipeline/session_status_check/stage.py +37 -0
- astrbot/core/pipeline/stage.py +11 -76
- astrbot/core/pipeline/waking_check/stage.py +69 -33
- astrbot/core/pipeline/whitelist_check/stage.py +10 -7
- astrbot/core/platform/__init__.py +6 -6
- astrbot/core/platform/astr_message_event.py +107 -129
- astrbot/core/platform/astrbot_message.py +32 -12
- astrbot/core/platform/manager.py +62 -18
- astrbot/core/platform/message_session.py +30 -0
- astrbot/core/platform/platform.py +16 -24
- astrbot/core/platform/platform_metadata.py +9 -4
- astrbot/core/platform/register.py +12 -7
- astrbot/core/platform/sources/aiocqhttp/aiocqhttp_message_event.py +136 -60
- astrbot/core/platform/sources/aiocqhttp/aiocqhttp_platform_adapter.py +126 -46
- astrbot/core/platform/sources/dingtalk/dingtalk_adapter.py +63 -31
- astrbot/core/platform/sources/dingtalk/dingtalk_event.py +30 -26
- astrbot/core/platform/sources/discord/client.py +129 -0
- astrbot/core/platform/sources/discord/components.py +139 -0
- astrbot/core/platform/sources/discord/discord_platform_adapter.py +473 -0
- astrbot/core/platform/sources/discord/discord_platform_event.py +313 -0
- astrbot/core/platform/sources/lark/lark_adapter.py +27 -18
- astrbot/core/platform/sources/lark/lark_event.py +39 -13
- astrbot/core/platform/sources/misskey/misskey_adapter.py +770 -0
- astrbot/core/platform/sources/misskey/misskey_api.py +964 -0
- astrbot/core/platform/sources/misskey/misskey_event.py +163 -0
- astrbot/core/platform/sources/misskey/misskey_utils.py +550 -0
- astrbot/core/platform/sources/qqofficial/qqofficial_message_event.py +149 -33
- astrbot/core/platform/sources/qqofficial/qqofficial_platform_adapter.py +41 -26
- astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_adapter.py +36 -17
- astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_event.py +3 -1
- astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_server.py +14 -8
- astrbot/core/platform/sources/satori/satori_adapter.py +792 -0
- astrbot/core/platform/sources/satori/satori_event.py +432 -0
- astrbot/core/platform/sources/slack/client.py +164 -0
- astrbot/core/platform/sources/slack/slack_adapter.py +416 -0
- astrbot/core/platform/sources/slack/slack_event.py +253 -0
- astrbot/core/platform/sources/telegram/tg_adapter.py +100 -43
- astrbot/core/platform/sources/telegram/tg_event.py +136 -36
- astrbot/core/platform/sources/webchat/webchat_adapter.py +72 -22
- astrbot/core/platform/sources/webchat/webchat_event.py +46 -22
- astrbot/core/platform/sources/webchat/webchat_queue_mgr.py +35 -0
- astrbot/core/platform/sources/wechatpadpro/wechatpadpro_adapter.py +926 -0
- astrbot/core/platform/sources/wechatpadpro/wechatpadpro_message_event.py +178 -0
- astrbot/core/platform/sources/wechatpadpro/xml_data_parser.py +159 -0
- astrbot/core/platform/sources/wecom/wecom_adapter.py +169 -27
- astrbot/core/platform/sources/wecom/wecom_event.py +162 -77
- astrbot/core/platform/sources/wecom/wecom_kf.py +279 -0
- astrbot/core/platform/sources/wecom/wecom_kf_message.py +196 -0
- astrbot/core/platform/sources/wecom_ai_bot/WXBizJsonMsgCrypt.py +297 -0
- astrbot/core/platform/sources/wecom_ai_bot/__init__.py +15 -0
- astrbot/core/platform/sources/wecom_ai_bot/ierror.py +19 -0
- astrbot/core/platform/sources/wecom_ai_bot/wecomai_adapter.py +472 -0
- astrbot/core/platform/sources/wecom_ai_bot/wecomai_api.py +417 -0
- astrbot/core/platform/sources/wecom_ai_bot/wecomai_event.py +152 -0
- astrbot/core/platform/sources/wecom_ai_bot/wecomai_queue_mgr.py +153 -0
- astrbot/core/platform/sources/wecom_ai_bot/wecomai_server.py +168 -0
- astrbot/core/platform/sources/wecom_ai_bot/wecomai_utils.py +209 -0
- astrbot/core/platform/sources/weixin_official_account/weixin_offacc_adapter.py +306 -0
- astrbot/core/platform/sources/weixin_official_account/weixin_offacc_event.py +186 -0
- astrbot/core/platform_message_history_mgr.py +49 -0
- astrbot/core/provider/__init__.py +2 -3
- astrbot/core/provider/entites.py +8 -8
- astrbot/core/provider/entities.py +154 -98
- astrbot/core/provider/func_tool_manager.py +446 -458
- astrbot/core/provider/manager.py +345 -207
- astrbot/core/provider/provider.py +188 -73
- astrbot/core/provider/register.py +9 -7
- astrbot/core/provider/sources/anthropic_source.py +295 -115
- astrbot/core/provider/sources/azure_tts_source.py +224 -0
- astrbot/core/provider/sources/bailian_rerank_source.py +236 -0
- astrbot/core/provider/sources/dashscope_tts.py +138 -14
- astrbot/core/provider/sources/edge_tts_source.py +24 -19
- astrbot/core/provider/sources/fishaudio_tts_api_source.py +58 -13
- astrbot/core/provider/sources/gemini_embedding_source.py +61 -0
- astrbot/core/provider/sources/gemini_source.py +310 -132
- astrbot/core/provider/sources/gemini_tts_source.py +81 -0
- astrbot/core/provider/sources/groq_source.py +15 -0
- astrbot/core/provider/sources/gsv_selfhosted_source.py +151 -0
- astrbot/core/provider/sources/gsvi_tts_source.py +14 -7
- astrbot/core/provider/sources/minimax_tts_api_source.py +159 -0
- astrbot/core/provider/sources/openai_embedding_source.py +40 -0
- astrbot/core/provider/sources/openai_source.py +241 -145
- astrbot/core/provider/sources/openai_tts_api_source.py +18 -7
- astrbot/core/provider/sources/sensevoice_selfhosted_source.py +13 -11
- astrbot/core/provider/sources/vllm_rerank_source.py +71 -0
- astrbot/core/provider/sources/volcengine_tts.py +115 -0
- astrbot/core/provider/sources/whisper_api_source.py +18 -13
- astrbot/core/provider/sources/whisper_selfhosted_source.py +19 -12
- astrbot/core/provider/sources/xinference_rerank_source.py +116 -0
- astrbot/core/provider/sources/xinference_stt_provider.py +197 -0
- astrbot/core/provider/sources/zhipu_source.py +6 -73
- astrbot/core/star/__init__.py +43 -11
- astrbot/core/star/config.py +17 -18
- astrbot/core/star/context.py +362 -138
- astrbot/core/star/filter/__init__.py +4 -3
- astrbot/core/star/filter/command.py +111 -35
- astrbot/core/star/filter/command_group.py +46 -34
- astrbot/core/star/filter/custom_filter.py +6 -5
- astrbot/core/star/filter/event_message_type.py +4 -2
- astrbot/core/star/filter/permission.py +4 -2
- astrbot/core/star/filter/platform_adapter_type.py +45 -12
- astrbot/core/star/filter/regex.py +4 -2
- astrbot/core/star/register/__init__.py +19 -15
- astrbot/core/star/register/star.py +41 -13
- astrbot/core/star/register/star_handler.py +236 -86
- astrbot/core/star/session_llm_manager.py +280 -0
- astrbot/core/star/session_plugin_manager.py +170 -0
- astrbot/core/star/star.py +36 -43
- astrbot/core/star/star_handler.py +47 -85
- astrbot/core/star/star_manager.py +442 -260
- astrbot/core/star/star_tools.py +167 -45
- astrbot/core/star/updator.py +17 -20
- astrbot/core/umop_config_router.py +106 -0
- astrbot/core/updator.py +38 -13
- astrbot/core/utils/astrbot_path.py +39 -0
- astrbot/core/utils/command_parser.py +1 -1
- astrbot/core/utils/io.py +119 -60
- astrbot/core/utils/log_pipe.py +1 -1
- astrbot/core/utils/metrics.py +11 -10
- astrbot/core/utils/migra_helper.py +73 -0
- astrbot/core/utils/path_util.py +63 -62
- astrbot/core/utils/pip_installer.py +37 -15
- astrbot/core/utils/session_lock.py +29 -0
- astrbot/core/utils/session_waiter.py +19 -20
- astrbot/core/utils/shared_preferences.py +174 -34
- astrbot/core/utils/t2i/__init__.py +4 -1
- astrbot/core/utils/t2i/local_strategy.py +386 -238
- astrbot/core/utils/t2i/network_strategy.py +109 -49
- astrbot/core/utils/t2i/renderer.py +29 -14
- astrbot/core/utils/t2i/template/astrbot_powershell.html +184 -0
- astrbot/core/utils/t2i/template_manager.py +111 -0
- astrbot/core/utils/tencent_record_helper.py +115 -1
- astrbot/core/utils/version_comparator.py +10 -13
- astrbot/core/zip_updator.py +112 -65
- astrbot/dashboard/routes/__init__.py +20 -13
- astrbot/dashboard/routes/auth.py +20 -9
- astrbot/dashboard/routes/chat.py +297 -141
- astrbot/dashboard/routes/config.py +652 -55
- astrbot/dashboard/routes/conversation.py +107 -37
- astrbot/dashboard/routes/file.py +26 -0
- astrbot/dashboard/routes/knowledge_base.py +1244 -0
- astrbot/dashboard/routes/log.py +27 -2
- astrbot/dashboard/routes/persona.py +202 -0
- astrbot/dashboard/routes/plugin.py +197 -139
- astrbot/dashboard/routes/route.py +27 -7
- astrbot/dashboard/routes/session_management.py +354 -0
- astrbot/dashboard/routes/stat.py +85 -18
- astrbot/dashboard/routes/static_file.py +5 -2
- astrbot/dashboard/routes/t2i.py +233 -0
- astrbot/dashboard/routes/tools.py +184 -120
- astrbot/dashboard/routes/update.py +59 -36
- astrbot/dashboard/server.py +96 -36
- astrbot/dashboard/utils.py +165 -0
- astrbot-4.7.0.dist-info/METADATA +294 -0
- astrbot-4.7.0.dist-info/RECORD +274 -0
- {astrbot-3.5.6.dist-info → astrbot-4.7.0.dist-info}/WHEEL +1 -1
- astrbot/core/db/plugin/sqlite_impl.py +0 -112
- astrbot/core/db/sqlite_init.sql +0 -50
- astrbot/core/pipeline/platform_compatibility/stage.py +0 -56
- astrbot/core/pipeline/process_stage/method/llm_request.py +0 -606
- astrbot/core/platform/sources/gewechat/client.py +0 -806
- astrbot/core/platform/sources/gewechat/downloader.py +0 -55
- astrbot/core/platform/sources/gewechat/gewechat_event.py +0 -255
- astrbot/core/platform/sources/gewechat/gewechat_platform_adapter.py +0 -103
- astrbot/core/platform/sources/gewechat/xml_data_parser.py +0 -110
- astrbot/core/provider/sources/dashscope_source.py +0 -203
- astrbot/core/provider/sources/dify_source.py +0 -281
- astrbot/core/provider/sources/llmtuner_source.py +0 -132
- astrbot/core/rag/embedding/openai_source.py +0 -20
- astrbot/core/rag/knowledge_db_mgr.py +0 -94
- astrbot/core/rag/store/__init__.py +0 -9
- astrbot/core/rag/store/chroma_db.py +0 -42
- astrbot/core/utils/dify_api_client.py +0 -152
- astrbot-3.5.6.dist-info/METADATA +0 -249
- astrbot-3.5.6.dist-info/RECORD +0 -158
- {astrbot-3.5.6.dist-info → astrbot-4.7.0.dist-info}/entry_points.txt +0 -0
- {astrbot-3.5.6.dist-info → astrbot-4.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,642 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
import re
|
|
4
|
+
import time
|
|
5
|
+
import uuid
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
import aiofiles
|
|
9
|
+
|
|
10
|
+
from astrbot.core import logger
|
|
11
|
+
from astrbot.core.db.vec_db.base import BaseVecDB
|
|
12
|
+
from astrbot.core.db.vec_db.faiss_impl.vec_db import FaissVecDB
|
|
13
|
+
from astrbot.core.provider.manager import ProviderManager
|
|
14
|
+
from astrbot.core.provider.provider import (
|
|
15
|
+
EmbeddingProvider,
|
|
16
|
+
RerankProvider,
|
|
17
|
+
)
|
|
18
|
+
from astrbot.core.provider.provider import (
|
|
19
|
+
Provider as LLMProvider,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
from .chunking.base import BaseChunker
|
|
23
|
+
from .chunking.recursive import RecursiveCharacterChunker
|
|
24
|
+
from .kb_db_sqlite import KBSQLiteDatabase
|
|
25
|
+
from .models import KBDocument, KBMedia, KnowledgeBase
|
|
26
|
+
from .parsers.url_parser import extract_text_from_url
|
|
27
|
+
from .parsers.util import select_parser
|
|
28
|
+
from .prompts import TEXT_REPAIR_SYSTEM_PROMPT
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class RateLimiter:
|
|
32
|
+
"""一个简单的速率限制器"""
|
|
33
|
+
|
|
34
|
+
def __init__(self, max_rpm: int):
|
|
35
|
+
self.max_per_minute = max_rpm
|
|
36
|
+
self.interval = 60.0 / max_rpm if max_rpm > 0 else 0
|
|
37
|
+
self.last_call_time = 0
|
|
38
|
+
|
|
39
|
+
async def __aenter__(self):
|
|
40
|
+
if self.interval == 0:
|
|
41
|
+
return
|
|
42
|
+
|
|
43
|
+
now = time.monotonic()
|
|
44
|
+
elapsed = now - self.last_call_time
|
|
45
|
+
|
|
46
|
+
if elapsed < self.interval:
|
|
47
|
+
await asyncio.sleep(self.interval - elapsed)
|
|
48
|
+
|
|
49
|
+
self.last_call_time = time.monotonic()
|
|
50
|
+
|
|
51
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
52
|
+
pass
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
async def _repair_and_translate_chunk_with_retry(
|
|
56
|
+
chunk: str,
|
|
57
|
+
repair_llm_service: LLMProvider,
|
|
58
|
+
rate_limiter: RateLimiter,
|
|
59
|
+
max_retries: int = 2,
|
|
60
|
+
) -> list[str]:
|
|
61
|
+
"""
|
|
62
|
+
Repairs, translates, and optionally re-chunks a single text chunk using the small LLM, with rate limiting.
|
|
63
|
+
"""
|
|
64
|
+
# 为了防止 LLM 上下文污染,在 user_prompt 中也加入明确的指令
|
|
65
|
+
user_prompt = f"""IGNORE ALL PREVIOUS INSTRUCTIONS. Your ONLY task is to process the following text chunk according to the system prompt provided.
|
|
66
|
+
|
|
67
|
+
Text chunk to process:
|
|
68
|
+
---
|
|
69
|
+
{chunk}
|
|
70
|
+
---
|
|
71
|
+
"""
|
|
72
|
+
for attempt in range(max_retries + 1):
|
|
73
|
+
try:
|
|
74
|
+
async with rate_limiter:
|
|
75
|
+
response = await repair_llm_service.text_chat(
|
|
76
|
+
prompt=user_prompt, system_prompt=TEXT_REPAIR_SYSTEM_PROMPT
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
llm_output = response.completion_text
|
|
80
|
+
|
|
81
|
+
if "<discard_chunk />" in llm_output:
|
|
82
|
+
return [] # Signal to discard this chunk
|
|
83
|
+
|
|
84
|
+
# More robust regex to handle potential LLM formatting errors (spaces, newlines in tags)
|
|
85
|
+
matches = re.findall(
|
|
86
|
+
r"<\s*repaired_text\s*>\s*(.*?)\s*<\s*/\s*repaired_text\s*>",
|
|
87
|
+
llm_output,
|
|
88
|
+
re.DOTALL,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
if matches:
|
|
92
|
+
# Further cleaning to ensure no empty strings are returned
|
|
93
|
+
return [m.strip() for m in matches if m.strip()]
|
|
94
|
+
else:
|
|
95
|
+
# If no valid tags and not explicitly discarded, discard it to be safe.
|
|
96
|
+
return []
|
|
97
|
+
except Exception as e:
|
|
98
|
+
logger.warning(
|
|
99
|
+
f" - LLM call failed on attempt {attempt + 1}/{max_retries + 1}. Error: {str(e)}"
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
logger.error(
|
|
103
|
+
f" - Failed to process chunk after {max_retries + 1} attempts. Using original text."
|
|
104
|
+
)
|
|
105
|
+
return [chunk]
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class KBHelper:
|
|
109
|
+
vec_db: BaseVecDB
|
|
110
|
+
kb: KnowledgeBase
|
|
111
|
+
|
|
112
|
+
def __init__(
|
|
113
|
+
self,
|
|
114
|
+
kb_db: KBSQLiteDatabase,
|
|
115
|
+
kb: KnowledgeBase,
|
|
116
|
+
provider_manager: ProviderManager,
|
|
117
|
+
kb_root_dir: str,
|
|
118
|
+
chunker: BaseChunker,
|
|
119
|
+
):
|
|
120
|
+
self.kb_db = kb_db
|
|
121
|
+
self.kb = kb
|
|
122
|
+
self.prov_mgr = provider_manager
|
|
123
|
+
self.kb_root_dir = kb_root_dir
|
|
124
|
+
self.chunker = chunker
|
|
125
|
+
|
|
126
|
+
self.kb_dir = Path(self.kb_root_dir) / self.kb.kb_id
|
|
127
|
+
self.kb_medias_dir = Path(self.kb_dir) / "medias" / self.kb.kb_id
|
|
128
|
+
self.kb_files_dir = Path(self.kb_dir) / "files" / self.kb.kb_id
|
|
129
|
+
|
|
130
|
+
self.kb_medias_dir.mkdir(parents=True, exist_ok=True)
|
|
131
|
+
self.kb_files_dir.mkdir(parents=True, exist_ok=True)
|
|
132
|
+
|
|
133
|
+
async def initialize(self):
|
|
134
|
+
await self._ensure_vec_db()
|
|
135
|
+
|
|
136
|
+
async def get_ep(self) -> EmbeddingProvider:
|
|
137
|
+
if not self.kb.embedding_provider_id:
|
|
138
|
+
raise ValueError(f"知识库 {self.kb.kb_name} 未配置 Embedding Provider")
|
|
139
|
+
ep: EmbeddingProvider = await self.prov_mgr.get_provider_by_id(
|
|
140
|
+
self.kb.embedding_provider_id,
|
|
141
|
+
) # type: ignore
|
|
142
|
+
if not ep:
|
|
143
|
+
raise ValueError(
|
|
144
|
+
f"无法找到 ID 为 {self.kb.embedding_provider_id} 的 Embedding Provider",
|
|
145
|
+
)
|
|
146
|
+
return ep
|
|
147
|
+
|
|
148
|
+
async def get_rp(self) -> RerankProvider | None:
|
|
149
|
+
if not self.kb.rerank_provider_id:
|
|
150
|
+
return None
|
|
151
|
+
rp: RerankProvider = await self.prov_mgr.get_provider_by_id(
|
|
152
|
+
self.kb.rerank_provider_id,
|
|
153
|
+
) # type: ignore
|
|
154
|
+
if not rp:
|
|
155
|
+
raise ValueError(
|
|
156
|
+
f"无法找到 ID 为 {self.kb.rerank_provider_id} 的 Rerank Provider",
|
|
157
|
+
)
|
|
158
|
+
return rp
|
|
159
|
+
|
|
160
|
+
async def _ensure_vec_db(self) -> FaissVecDB:
|
|
161
|
+
if not self.kb.embedding_provider_id:
|
|
162
|
+
raise ValueError(f"知识库 {self.kb.kb_name} 未配置 Embedding Provider")
|
|
163
|
+
|
|
164
|
+
ep = await self.get_ep()
|
|
165
|
+
rp = await self.get_rp()
|
|
166
|
+
|
|
167
|
+
vec_db = FaissVecDB(
|
|
168
|
+
doc_store_path=str(self.kb_dir / "doc.db"),
|
|
169
|
+
index_store_path=str(self.kb_dir / "index.faiss"),
|
|
170
|
+
embedding_provider=ep,
|
|
171
|
+
rerank_provider=rp,
|
|
172
|
+
)
|
|
173
|
+
await vec_db.initialize()
|
|
174
|
+
self.vec_db = vec_db
|
|
175
|
+
return vec_db
|
|
176
|
+
|
|
177
|
+
async def delete_vec_db(self):
|
|
178
|
+
"""删除知识库的向量数据库和所有相关文件"""
|
|
179
|
+
import shutil
|
|
180
|
+
|
|
181
|
+
await self.terminate()
|
|
182
|
+
if self.kb_dir.exists():
|
|
183
|
+
shutil.rmtree(self.kb_dir)
|
|
184
|
+
|
|
185
|
+
async def terminate(self):
|
|
186
|
+
if self.vec_db:
|
|
187
|
+
await self.vec_db.close()
|
|
188
|
+
|
|
189
|
+
async def upload_document(
|
|
190
|
+
self,
|
|
191
|
+
file_name: str,
|
|
192
|
+
file_content: bytes | None,
|
|
193
|
+
file_type: str,
|
|
194
|
+
chunk_size: int = 512,
|
|
195
|
+
chunk_overlap: int = 50,
|
|
196
|
+
batch_size: int = 32,
|
|
197
|
+
tasks_limit: int = 3,
|
|
198
|
+
max_retries: int = 3,
|
|
199
|
+
progress_callback=None,
|
|
200
|
+
pre_chunked_text: list[str] | None = None,
|
|
201
|
+
) -> KBDocument:
|
|
202
|
+
"""上传并处理文档(带原子性保证和失败清理)
|
|
203
|
+
|
|
204
|
+
流程:
|
|
205
|
+
1. 保存原始文件
|
|
206
|
+
2. 解析文档内容
|
|
207
|
+
3. 提取多媒体资源
|
|
208
|
+
4. 分块处理
|
|
209
|
+
5. 生成向量并存储
|
|
210
|
+
6. 保存元数据(事务)
|
|
211
|
+
7. 更新统计
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
progress_callback: 进度回调函数,接收参数 (stage, current, total)
|
|
215
|
+
- stage: 当前阶段 ('parsing', 'chunking', 'embedding')
|
|
216
|
+
- current: 当前进度
|
|
217
|
+
- total: 总数
|
|
218
|
+
|
|
219
|
+
"""
|
|
220
|
+
await self._ensure_vec_db()
|
|
221
|
+
doc_id = str(uuid.uuid4())
|
|
222
|
+
media_paths: list[Path] = []
|
|
223
|
+
file_size = 0
|
|
224
|
+
|
|
225
|
+
# file_path = self.kb_files_dir / f"{doc_id}.{file_type}"
|
|
226
|
+
# async with aiofiles.open(file_path, "wb") as f:
|
|
227
|
+
# await f.write(file_content)
|
|
228
|
+
|
|
229
|
+
try:
|
|
230
|
+
chunks_text = []
|
|
231
|
+
saved_media = []
|
|
232
|
+
|
|
233
|
+
if pre_chunked_text is not None:
|
|
234
|
+
# 如果提供了预分块文本,直接使用
|
|
235
|
+
chunks_text = pre_chunked_text
|
|
236
|
+
file_size = sum(len(chunk) for chunk in chunks_text)
|
|
237
|
+
logger.info(f"使用预分块文本进行上传,共 {len(chunks_text)} 个块。")
|
|
238
|
+
else:
|
|
239
|
+
# 否则,执行标准的文件解析和分块流程
|
|
240
|
+
if file_content is None:
|
|
241
|
+
raise ValueError(
|
|
242
|
+
"当未提供 pre_chunked_text 时,file_content 不能为空。"
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
file_size = len(file_content)
|
|
246
|
+
|
|
247
|
+
# 阶段1: 解析文档
|
|
248
|
+
if progress_callback:
|
|
249
|
+
await progress_callback("parsing", 0, 100)
|
|
250
|
+
|
|
251
|
+
parser = await select_parser(f".{file_type}")
|
|
252
|
+
parse_result = await parser.parse(file_content, file_name)
|
|
253
|
+
text_content = parse_result.text
|
|
254
|
+
media_items = parse_result.media
|
|
255
|
+
|
|
256
|
+
if progress_callback:
|
|
257
|
+
await progress_callback("parsing", 100, 100)
|
|
258
|
+
|
|
259
|
+
# 保存媒体文件
|
|
260
|
+
for media_item in media_items:
|
|
261
|
+
media = await self._save_media(
|
|
262
|
+
doc_id=doc_id,
|
|
263
|
+
media_type=media_item.media_type,
|
|
264
|
+
file_name=media_item.file_name,
|
|
265
|
+
content=media_item.content,
|
|
266
|
+
mime_type=media_item.mime_type,
|
|
267
|
+
)
|
|
268
|
+
saved_media.append(media)
|
|
269
|
+
media_paths.append(Path(media.file_path))
|
|
270
|
+
|
|
271
|
+
# 阶段2: 分块
|
|
272
|
+
if progress_callback:
|
|
273
|
+
await progress_callback("chunking", 0, 100)
|
|
274
|
+
|
|
275
|
+
chunks_text = await self.chunker.chunk(
|
|
276
|
+
text_content,
|
|
277
|
+
chunk_size=chunk_size,
|
|
278
|
+
chunk_overlap=chunk_overlap,
|
|
279
|
+
)
|
|
280
|
+
contents = []
|
|
281
|
+
metadatas = []
|
|
282
|
+
for idx, chunk_text in enumerate(chunks_text):
|
|
283
|
+
contents.append(chunk_text)
|
|
284
|
+
metadatas.append(
|
|
285
|
+
{
|
|
286
|
+
"kb_id": self.kb.kb_id,
|
|
287
|
+
"kb_doc_id": doc_id,
|
|
288
|
+
"chunk_index": idx,
|
|
289
|
+
},
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
if progress_callback:
|
|
293
|
+
await progress_callback("chunking", 100, 100)
|
|
294
|
+
|
|
295
|
+
# 阶段3: 生成向量(带进度回调)
|
|
296
|
+
async def embedding_progress_callback(current, total):
|
|
297
|
+
if progress_callback:
|
|
298
|
+
await progress_callback("embedding", current, total)
|
|
299
|
+
|
|
300
|
+
await self.vec_db.insert_batch(
|
|
301
|
+
contents=contents,
|
|
302
|
+
metadatas=metadatas,
|
|
303
|
+
batch_size=batch_size,
|
|
304
|
+
tasks_limit=tasks_limit,
|
|
305
|
+
max_retries=max_retries,
|
|
306
|
+
progress_callback=embedding_progress_callback,
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
# 保存文档的元数据
|
|
310
|
+
doc = KBDocument(
|
|
311
|
+
doc_id=doc_id,
|
|
312
|
+
kb_id=self.kb.kb_id,
|
|
313
|
+
doc_name=file_name,
|
|
314
|
+
file_type=file_type,
|
|
315
|
+
file_size=file_size,
|
|
316
|
+
# file_path=str(file_path),
|
|
317
|
+
file_path="",
|
|
318
|
+
chunk_count=len(chunks_text),
|
|
319
|
+
media_count=0,
|
|
320
|
+
)
|
|
321
|
+
async with self.kb_db.get_db() as session:
|
|
322
|
+
async with session.begin():
|
|
323
|
+
session.add(doc)
|
|
324
|
+
for media in saved_media:
|
|
325
|
+
session.add(media)
|
|
326
|
+
await session.commit()
|
|
327
|
+
|
|
328
|
+
await session.refresh(doc)
|
|
329
|
+
|
|
330
|
+
vec_db: FaissVecDB = self.vec_db # type: ignore
|
|
331
|
+
await self.kb_db.update_kb_stats(kb_id=self.kb.kb_id, vec_db=vec_db)
|
|
332
|
+
await self.refresh_kb()
|
|
333
|
+
await self.refresh_document(doc_id)
|
|
334
|
+
return doc
|
|
335
|
+
except Exception as e:
|
|
336
|
+
logger.error(f"上传文档失败: {e}")
|
|
337
|
+
# if file_path.exists():
|
|
338
|
+
# file_path.unlink()
|
|
339
|
+
|
|
340
|
+
for media_path in media_paths:
|
|
341
|
+
try:
|
|
342
|
+
if media_path.exists():
|
|
343
|
+
media_path.unlink()
|
|
344
|
+
except Exception as me:
|
|
345
|
+
logger.warning(f"清理多媒体文件失败 {media_path}: {me}")
|
|
346
|
+
|
|
347
|
+
raise e
|
|
348
|
+
|
|
349
|
+
async def list_documents(
|
|
350
|
+
self,
|
|
351
|
+
offset: int = 0,
|
|
352
|
+
limit: int = 100,
|
|
353
|
+
) -> list[KBDocument]:
|
|
354
|
+
"""列出知识库的所有文档"""
|
|
355
|
+
docs = await self.kb_db.list_documents_by_kb(self.kb.kb_id, offset, limit)
|
|
356
|
+
return docs
|
|
357
|
+
|
|
358
|
+
async def get_document(self, doc_id: str) -> KBDocument | None:
|
|
359
|
+
"""获取单个文档"""
|
|
360
|
+
doc = await self.kb_db.get_document_by_id(doc_id)
|
|
361
|
+
return doc
|
|
362
|
+
|
|
363
|
+
async def delete_document(self, doc_id: str):
|
|
364
|
+
"""删除单个文档及其相关数据"""
|
|
365
|
+
await self.kb_db.delete_document_by_id(
|
|
366
|
+
doc_id=doc_id,
|
|
367
|
+
vec_db=self.vec_db, # type: ignore
|
|
368
|
+
)
|
|
369
|
+
await self.kb_db.update_kb_stats(
|
|
370
|
+
kb_id=self.kb.kb_id,
|
|
371
|
+
vec_db=self.vec_db, # type: ignore
|
|
372
|
+
)
|
|
373
|
+
await self.refresh_kb()
|
|
374
|
+
|
|
375
|
+
async def delete_chunk(self, chunk_id: str, doc_id: str):
|
|
376
|
+
"""删除单个文本块及其相关数据"""
|
|
377
|
+
vec_db: FaissVecDB = self.vec_db # type: ignore
|
|
378
|
+
await vec_db.delete(chunk_id)
|
|
379
|
+
await self.kb_db.update_kb_stats(
|
|
380
|
+
kb_id=self.kb.kb_id,
|
|
381
|
+
vec_db=self.vec_db, # type: ignore
|
|
382
|
+
)
|
|
383
|
+
await self.refresh_kb()
|
|
384
|
+
await self.refresh_document(doc_id)
|
|
385
|
+
|
|
386
|
+
async def refresh_kb(self):
|
|
387
|
+
if self.kb:
|
|
388
|
+
kb = await self.kb_db.get_kb_by_id(self.kb.kb_id)
|
|
389
|
+
if kb:
|
|
390
|
+
self.kb = kb
|
|
391
|
+
|
|
392
|
+
async def refresh_document(self, doc_id: str) -> None:
|
|
393
|
+
"""更新文档的元数据"""
|
|
394
|
+
doc = await self.get_document(doc_id)
|
|
395
|
+
if not doc:
|
|
396
|
+
raise ValueError(f"无法找到 ID 为 {doc_id} 的文档")
|
|
397
|
+
chunk_count = await self.get_chunk_count_by_doc_id(doc_id)
|
|
398
|
+
doc.chunk_count = chunk_count
|
|
399
|
+
async with self.kb_db.get_db() as session:
|
|
400
|
+
async with session.begin():
|
|
401
|
+
session.add(doc)
|
|
402
|
+
await session.commit()
|
|
403
|
+
await session.refresh(doc)
|
|
404
|
+
|
|
405
|
+
async def get_chunks_by_doc_id(
|
|
406
|
+
self,
|
|
407
|
+
doc_id: str,
|
|
408
|
+
offset: int = 0,
|
|
409
|
+
limit: int = 100,
|
|
410
|
+
) -> list[dict]:
|
|
411
|
+
"""获取文档的所有块及其元数据"""
|
|
412
|
+
vec_db: FaissVecDB = self.vec_db # type: ignore
|
|
413
|
+
chunks = await vec_db.document_storage.get_documents(
|
|
414
|
+
metadata_filters={"kb_doc_id": doc_id},
|
|
415
|
+
offset=offset,
|
|
416
|
+
limit=limit,
|
|
417
|
+
)
|
|
418
|
+
result = []
|
|
419
|
+
for chunk in chunks:
|
|
420
|
+
chunk_md = json.loads(chunk["metadata"])
|
|
421
|
+
result.append(
|
|
422
|
+
{
|
|
423
|
+
"chunk_id": chunk["doc_id"],
|
|
424
|
+
"doc_id": chunk_md["kb_doc_id"],
|
|
425
|
+
"kb_id": chunk_md["kb_id"],
|
|
426
|
+
"chunk_index": chunk_md["chunk_index"],
|
|
427
|
+
"content": chunk["text"],
|
|
428
|
+
"char_count": len(chunk["text"]),
|
|
429
|
+
},
|
|
430
|
+
)
|
|
431
|
+
return result
|
|
432
|
+
|
|
433
|
+
async def get_chunk_count_by_doc_id(self, doc_id: str) -> int:
|
|
434
|
+
"""获取文档的块数量"""
|
|
435
|
+
vec_db: FaissVecDB = self.vec_db # type: ignore
|
|
436
|
+
count = await vec_db.count_documents(metadata_filter={"kb_doc_id": doc_id})
|
|
437
|
+
return count
|
|
438
|
+
|
|
439
|
+
async def _save_media(
|
|
440
|
+
self,
|
|
441
|
+
doc_id: str,
|
|
442
|
+
media_type: str,
|
|
443
|
+
file_name: str,
|
|
444
|
+
content: bytes,
|
|
445
|
+
mime_type: str,
|
|
446
|
+
) -> KBMedia:
|
|
447
|
+
"""保存多媒体资源"""
|
|
448
|
+
media_id = str(uuid.uuid4())
|
|
449
|
+
ext = Path(file_name).suffix
|
|
450
|
+
|
|
451
|
+
# 保存文件
|
|
452
|
+
file_path = self.kb_medias_dir / doc_id / f"{media_id}{ext}"
|
|
453
|
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
454
|
+
async with aiofiles.open(file_path, "wb") as f:
|
|
455
|
+
await f.write(content)
|
|
456
|
+
|
|
457
|
+
media = KBMedia(
|
|
458
|
+
media_id=media_id,
|
|
459
|
+
doc_id=doc_id,
|
|
460
|
+
kb_id=self.kb.kb_id,
|
|
461
|
+
media_type=media_type,
|
|
462
|
+
file_name=file_name,
|
|
463
|
+
file_path=str(file_path),
|
|
464
|
+
file_size=len(content),
|
|
465
|
+
mime_type=mime_type,
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
return media
|
|
469
|
+
|
|
470
|
+
async def upload_from_url(
|
|
471
|
+
self,
|
|
472
|
+
url: str,
|
|
473
|
+
chunk_size: int = 512,
|
|
474
|
+
chunk_overlap: int = 50,
|
|
475
|
+
batch_size: int = 32,
|
|
476
|
+
tasks_limit: int = 3,
|
|
477
|
+
max_retries: int = 3,
|
|
478
|
+
progress_callback=None,
|
|
479
|
+
enable_cleaning: bool = False,
|
|
480
|
+
cleaning_provider_id: str | None = None,
|
|
481
|
+
) -> KBDocument:
|
|
482
|
+
"""从 URL 上传并处理文档(带原子性保证和失败清理)
|
|
483
|
+
Args:
|
|
484
|
+
url: 要提取内容的网页 URL
|
|
485
|
+
chunk_size: 文本块大小
|
|
486
|
+
chunk_overlap: 文本块重叠大小
|
|
487
|
+
batch_size: 批处理大小
|
|
488
|
+
tasks_limit: 并发任务限制
|
|
489
|
+
max_retries: 最大重试次数
|
|
490
|
+
progress_callback: 进度回调函数,接收参数 (stage, current, total)
|
|
491
|
+
- stage: 当前阶段 ('extracting', 'cleaning', 'parsing', 'chunking', 'embedding')
|
|
492
|
+
- current: 当前进度
|
|
493
|
+
- total: 总数
|
|
494
|
+
Returns:
|
|
495
|
+
KBDocument: 上传的文档对象
|
|
496
|
+
Raises:
|
|
497
|
+
ValueError: 如果 URL 为空或无法提取内容
|
|
498
|
+
IOError: 如果网络请求失败
|
|
499
|
+
"""
|
|
500
|
+
# 获取 Tavily API 密钥
|
|
501
|
+
config = self.prov_mgr.acm.default_conf
|
|
502
|
+
tavily_keys = config.get("provider_settings", {}).get(
|
|
503
|
+
"websearch_tavily_key", []
|
|
504
|
+
)
|
|
505
|
+
if not tavily_keys:
|
|
506
|
+
raise ValueError(
|
|
507
|
+
"Error: Tavily API key is not configured in provider_settings."
|
|
508
|
+
)
|
|
509
|
+
|
|
510
|
+
# 阶段1: 从 URL 提取内容
|
|
511
|
+
if progress_callback:
|
|
512
|
+
await progress_callback("extracting", 0, 100)
|
|
513
|
+
|
|
514
|
+
try:
|
|
515
|
+
text_content = await extract_text_from_url(url, tavily_keys)
|
|
516
|
+
except Exception as e:
|
|
517
|
+
logger.error(f"Failed to extract content from URL {url}: {e}")
|
|
518
|
+
raise OSError(f"Failed to extract content from URL {url}: {e}") from e
|
|
519
|
+
|
|
520
|
+
if not text_content:
|
|
521
|
+
raise ValueError(f"No content extracted from URL: {url}")
|
|
522
|
+
|
|
523
|
+
if progress_callback:
|
|
524
|
+
await progress_callback("extracting", 100, 100)
|
|
525
|
+
|
|
526
|
+
# 阶段2: (可选)清洗内容并分块
|
|
527
|
+
final_chunks = await self._clean_and_rechunk_content(
|
|
528
|
+
content=text_content,
|
|
529
|
+
url=url,
|
|
530
|
+
progress_callback=progress_callback,
|
|
531
|
+
enable_cleaning=enable_cleaning,
|
|
532
|
+
cleaning_provider_id=cleaning_provider_id,
|
|
533
|
+
chunk_size=chunk_size,
|
|
534
|
+
chunk_overlap=chunk_overlap,
|
|
535
|
+
)
|
|
536
|
+
|
|
537
|
+
if enable_cleaning and not final_chunks:
|
|
538
|
+
raise ValueError(
|
|
539
|
+
"内容清洗后未提取到有效文本。请尝试关闭内容清洗功能,或更换更高性能的LLM模型后重试。"
|
|
540
|
+
)
|
|
541
|
+
|
|
542
|
+
# 创建一个虚拟文件名
|
|
543
|
+
file_name = url.split("/")[-1] or f"document_from_{url}"
|
|
544
|
+
if not Path(file_name).suffix:
|
|
545
|
+
file_name += ".url"
|
|
546
|
+
|
|
547
|
+
# 复用现有的 upload_document 方法,但传入预分块文本
|
|
548
|
+
return await self.upload_document(
|
|
549
|
+
file_name=file_name,
|
|
550
|
+
file_content=None,
|
|
551
|
+
file_type="url", # 使用 'url' 作为特殊文件类型
|
|
552
|
+
chunk_size=chunk_size,
|
|
553
|
+
chunk_overlap=chunk_overlap,
|
|
554
|
+
batch_size=batch_size,
|
|
555
|
+
tasks_limit=tasks_limit,
|
|
556
|
+
max_retries=max_retries,
|
|
557
|
+
progress_callback=progress_callback,
|
|
558
|
+
pre_chunked_text=final_chunks,
|
|
559
|
+
)
|
|
560
|
+
|
|
561
|
+
async def _clean_and_rechunk_content(
|
|
562
|
+
self,
|
|
563
|
+
content: str,
|
|
564
|
+
url: str,
|
|
565
|
+
progress_callback=None,
|
|
566
|
+
enable_cleaning: bool = False,
|
|
567
|
+
cleaning_provider_id: str | None = None,
|
|
568
|
+
repair_max_rpm: int = 60,
|
|
569
|
+
chunk_size: int = 512,
|
|
570
|
+
chunk_overlap: int = 50,
|
|
571
|
+
) -> list[str]:
|
|
572
|
+
"""
|
|
573
|
+
对从 URL 获取的内容进行清洗、修复、翻译和重新分块。
|
|
574
|
+
"""
|
|
575
|
+
if not enable_cleaning:
|
|
576
|
+
# 如果不启用清洗,则使用从前端传递的参数进行分块
|
|
577
|
+
logger.info(
|
|
578
|
+
f"内容清洗未启用,使用指定参数进行分块: chunk_size={chunk_size}, chunk_overlap={chunk_overlap}"
|
|
579
|
+
)
|
|
580
|
+
return await self.chunker.chunk(
|
|
581
|
+
content, chunk_size=chunk_size, chunk_overlap=chunk_overlap
|
|
582
|
+
)
|
|
583
|
+
|
|
584
|
+
if not cleaning_provider_id:
|
|
585
|
+
logger.warning(
|
|
586
|
+
"启用了内容清洗,但未提供 cleaning_provider_id,跳过清洗并使用默认分块。"
|
|
587
|
+
)
|
|
588
|
+
return await self.chunker.chunk(content)
|
|
589
|
+
|
|
590
|
+
if progress_callback:
|
|
591
|
+
await progress_callback("cleaning", 0, 100)
|
|
592
|
+
|
|
593
|
+
try:
|
|
594
|
+
# 获取指定的 LLM Provider
|
|
595
|
+
llm_provider = await self.prov_mgr.get_provider_by_id(cleaning_provider_id)
|
|
596
|
+
if not llm_provider or not isinstance(llm_provider, LLMProvider):
|
|
597
|
+
raise ValueError(
|
|
598
|
+
f"无法找到 ID 为 {cleaning_provider_id} 的 LLM Provider 或类型不正确"
|
|
599
|
+
)
|
|
600
|
+
|
|
601
|
+
# 初步分块
|
|
602
|
+
# 优化分隔符,优先按段落分割,以获得更高质量的文本块
|
|
603
|
+
text_splitter = RecursiveCharacterChunker(
|
|
604
|
+
chunk_size=chunk_size,
|
|
605
|
+
chunk_overlap=chunk_overlap,
|
|
606
|
+
separators=["\n\n", "\n", " "], # 优先使用段落分隔符
|
|
607
|
+
)
|
|
608
|
+
initial_chunks = await text_splitter.chunk(content)
|
|
609
|
+
logger.info(f"初步分块完成,生成 {len(initial_chunks)} 个块用于修复。")
|
|
610
|
+
|
|
611
|
+
# 并发处理所有块
|
|
612
|
+
rate_limiter = RateLimiter(repair_max_rpm)
|
|
613
|
+
tasks = [
|
|
614
|
+
_repair_and_translate_chunk_with_retry(
|
|
615
|
+
chunk, llm_provider, rate_limiter
|
|
616
|
+
)
|
|
617
|
+
for chunk in initial_chunks
|
|
618
|
+
]
|
|
619
|
+
|
|
620
|
+
repaired_results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
621
|
+
|
|
622
|
+
final_chunks = []
|
|
623
|
+
for i, result in enumerate(repaired_results):
|
|
624
|
+
if isinstance(result, Exception):
|
|
625
|
+
logger.warning(f"块 {i} 处理异常: {str(result)}. 回退到原始块。")
|
|
626
|
+
final_chunks.append(initial_chunks[i])
|
|
627
|
+
elif isinstance(result, list):
|
|
628
|
+
final_chunks.extend(result)
|
|
629
|
+
|
|
630
|
+
logger.info(
|
|
631
|
+
f"文本修复完成: {len(initial_chunks)} 个原始块 -> {len(final_chunks)} 个最终块。"
|
|
632
|
+
)
|
|
633
|
+
|
|
634
|
+
if progress_callback:
|
|
635
|
+
await progress_callback("cleaning", 100, 100)
|
|
636
|
+
|
|
637
|
+
return final_chunks
|
|
638
|
+
|
|
639
|
+
except Exception as e:
|
|
640
|
+
logger.error(f"使用 Provider '{cleaning_provider_id}' 清洗内容失败: {e}")
|
|
641
|
+
# 清洗失败,返回默认分块结果,保证流程不中断
|
|
642
|
+
return await self.chunker.chunk(content)
|