AstrBot 3.5.6__py3-none-any.whl → 4.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astrbot/api/__init__.py +16 -4
- astrbot/api/all.py +2 -1
- astrbot/api/event/__init__.py +5 -6
- astrbot/api/event/filter/__init__.py +37 -34
- astrbot/api/platform/__init__.py +7 -8
- astrbot/api/provider/__init__.py +8 -7
- astrbot/api/star/__init__.py +3 -4
- astrbot/api/util/__init__.py +2 -2
- astrbot/cli/__init__.py +1 -0
- astrbot/cli/__main__.py +18 -197
- astrbot/cli/commands/__init__.py +6 -0
- astrbot/cli/commands/cmd_conf.py +209 -0
- astrbot/cli/commands/cmd_init.py +56 -0
- astrbot/cli/commands/cmd_plug.py +245 -0
- astrbot/cli/commands/cmd_run.py +62 -0
- astrbot/cli/utils/__init__.py +18 -0
- astrbot/cli/utils/basic.py +76 -0
- astrbot/cli/utils/plugin.py +246 -0
- astrbot/cli/utils/version_comparator.py +90 -0
- astrbot/core/__init__.py +17 -19
- astrbot/core/agent/agent.py +14 -0
- astrbot/core/agent/handoff.py +38 -0
- astrbot/core/agent/hooks.py +30 -0
- astrbot/core/agent/mcp_client.py +385 -0
- astrbot/core/agent/message.py +175 -0
- astrbot/core/agent/response.py +14 -0
- astrbot/core/agent/run_context.py +22 -0
- astrbot/core/agent/runners/__init__.py +3 -0
- astrbot/core/agent/runners/base.py +65 -0
- astrbot/core/agent/runners/coze/coze_agent_runner.py +367 -0
- astrbot/core/agent/runners/coze/coze_api_client.py +324 -0
- astrbot/core/agent/runners/dashscope/dashscope_agent_runner.py +403 -0
- astrbot/core/agent/runners/dify/dify_agent_runner.py +336 -0
- astrbot/core/agent/runners/dify/dify_api_client.py +195 -0
- astrbot/core/agent/runners/tool_loop_agent_runner.py +400 -0
- astrbot/core/agent/tool.py +285 -0
- astrbot/core/agent/tool_executor.py +17 -0
- astrbot/core/astr_agent_context.py +19 -0
- astrbot/core/astr_agent_hooks.py +36 -0
- astrbot/core/astr_agent_run_util.py +80 -0
- astrbot/core/astr_agent_tool_exec.py +246 -0
- astrbot/core/astrbot_config_mgr.py +275 -0
- astrbot/core/config/__init__.py +2 -2
- astrbot/core/config/astrbot_config.py +60 -20
- astrbot/core/config/default.py +1972 -453
- astrbot/core/config/i18n_utils.py +110 -0
- astrbot/core/conversation_mgr.py +285 -75
- astrbot/core/core_lifecycle.py +167 -62
- astrbot/core/db/__init__.py +305 -102
- astrbot/core/db/migration/helper.py +69 -0
- astrbot/core/db/migration/migra_3_to_4.py +357 -0
- astrbot/core/db/migration/migra_45_to_46.py +44 -0
- astrbot/core/db/migration/migra_webchat_session.py +131 -0
- astrbot/core/db/migration/shared_preferences_v3.py +48 -0
- astrbot/core/db/migration/sqlite_v3.py +497 -0
- astrbot/core/db/po.py +259 -55
- astrbot/core/db/sqlite.py +773 -528
- astrbot/core/db/vec_db/base.py +73 -0
- astrbot/core/db/vec_db/faiss_impl/__init__.py +3 -0
- astrbot/core/db/vec_db/faiss_impl/document_storage.py +392 -0
- astrbot/core/db/vec_db/faiss_impl/embedding_storage.py +93 -0
- astrbot/core/db/vec_db/faiss_impl/sqlite_init.sql +17 -0
- astrbot/core/db/vec_db/faiss_impl/vec_db.py +204 -0
- astrbot/core/event_bus.py +26 -22
- astrbot/core/exceptions.py +9 -0
- astrbot/core/file_token_service.py +98 -0
- astrbot/core/initial_loader.py +19 -10
- astrbot/core/knowledge_base/chunking/__init__.py +9 -0
- astrbot/core/knowledge_base/chunking/base.py +25 -0
- astrbot/core/knowledge_base/chunking/fixed_size.py +59 -0
- astrbot/core/knowledge_base/chunking/recursive.py +161 -0
- astrbot/core/knowledge_base/kb_db_sqlite.py +301 -0
- astrbot/core/knowledge_base/kb_helper.py +642 -0
- astrbot/core/knowledge_base/kb_mgr.py +330 -0
- astrbot/core/knowledge_base/models.py +120 -0
- astrbot/core/knowledge_base/parsers/__init__.py +13 -0
- astrbot/core/knowledge_base/parsers/base.py +51 -0
- astrbot/core/knowledge_base/parsers/markitdown_parser.py +26 -0
- astrbot/core/knowledge_base/parsers/pdf_parser.py +101 -0
- astrbot/core/knowledge_base/parsers/text_parser.py +42 -0
- astrbot/core/knowledge_base/parsers/url_parser.py +103 -0
- astrbot/core/knowledge_base/parsers/util.py +13 -0
- astrbot/core/knowledge_base/prompts.py +65 -0
- astrbot/core/knowledge_base/retrieval/__init__.py +14 -0
- astrbot/core/knowledge_base/retrieval/hit_stopwords.txt +767 -0
- astrbot/core/knowledge_base/retrieval/manager.py +276 -0
- astrbot/core/knowledge_base/retrieval/rank_fusion.py +142 -0
- astrbot/core/knowledge_base/retrieval/sparse_retriever.py +136 -0
- astrbot/core/log.py +21 -15
- astrbot/core/message/components.py +413 -287
- astrbot/core/message/message_event_result.py +35 -24
- astrbot/core/persona_mgr.py +192 -0
- astrbot/core/pipeline/__init__.py +14 -14
- astrbot/core/pipeline/content_safety_check/stage.py +13 -9
- astrbot/core/pipeline/content_safety_check/strategies/__init__.py +1 -2
- astrbot/core/pipeline/content_safety_check/strategies/baidu_aip.py +13 -14
- astrbot/core/pipeline/content_safety_check/strategies/keywords.py +2 -1
- astrbot/core/pipeline/content_safety_check/strategies/strategy.py +6 -6
- astrbot/core/pipeline/context.py +7 -1
- astrbot/core/pipeline/context_utils.py +107 -0
- astrbot/core/pipeline/preprocess_stage/stage.py +63 -36
- astrbot/core/pipeline/process_stage/method/agent_request.py +48 -0
- astrbot/core/pipeline/process_stage/method/agent_sub_stages/internal.py +464 -0
- astrbot/core/pipeline/process_stage/method/agent_sub_stages/third_party.py +202 -0
- astrbot/core/pipeline/process_stage/method/star_request.py +26 -32
- astrbot/core/pipeline/process_stage/stage.py +21 -15
- astrbot/core/pipeline/process_stage/utils.py +125 -0
- astrbot/core/pipeline/rate_limit_check/stage.py +34 -36
- astrbot/core/pipeline/respond/stage.py +142 -101
- astrbot/core/pipeline/result_decorate/stage.py +124 -57
- astrbot/core/pipeline/scheduler.py +21 -16
- astrbot/core/pipeline/session_status_check/stage.py +37 -0
- astrbot/core/pipeline/stage.py +11 -76
- astrbot/core/pipeline/waking_check/stage.py +69 -33
- astrbot/core/pipeline/whitelist_check/stage.py +10 -7
- astrbot/core/platform/__init__.py +6 -6
- astrbot/core/platform/astr_message_event.py +107 -129
- astrbot/core/platform/astrbot_message.py +32 -12
- astrbot/core/platform/manager.py +62 -18
- astrbot/core/platform/message_session.py +30 -0
- astrbot/core/platform/platform.py +16 -24
- astrbot/core/platform/platform_metadata.py +9 -4
- astrbot/core/platform/register.py +12 -7
- astrbot/core/platform/sources/aiocqhttp/aiocqhttp_message_event.py +136 -60
- astrbot/core/platform/sources/aiocqhttp/aiocqhttp_platform_adapter.py +126 -46
- astrbot/core/platform/sources/dingtalk/dingtalk_adapter.py +63 -31
- astrbot/core/platform/sources/dingtalk/dingtalk_event.py +30 -26
- astrbot/core/platform/sources/discord/client.py +129 -0
- astrbot/core/platform/sources/discord/components.py +139 -0
- astrbot/core/platform/sources/discord/discord_platform_adapter.py +473 -0
- astrbot/core/platform/sources/discord/discord_platform_event.py +313 -0
- astrbot/core/platform/sources/lark/lark_adapter.py +27 -18
- astrbot/core/platform/sources/lark/lark_event.py +39 -13
- astrbot/core/platform/sources/misskey/misskey_adapter.py +770 -0
- astrbot/core/platform/sources/misskey/misskey_api.py +964 -0
- astrbot/core/platform/sources/misskey/misskey_event.py +163 -0
- astrbot/core/platform/sources/misskey/misskey_utils.py +550 -0
- astrbot/core/platform/sources/qqofficial/qqofficial_message_event.py +149 -33
- astrbot/core/platform/sources/qqofficial/qqofficial_platform_adapter.py +41 -26
- astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_adapter.py +36 -17
- astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_event.py +3 -1
- astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_server.py +14 -8
- astrbot/core/platform/sources/satori/satori_adapter.py +792 -0
- astrbot/core/platform/sources/satori/satori_event.py +432 -0
- astrbot/core/platform/sources/slack/client.py +164 -0
- astrbot/core/platform/sources/slack/slack_adapter.py +416 -0
- astrbot/core/platform/sources/slack/slack_event.py +253 -0
- astrbot/core/platform/sources/telegram/tg_adapter.py +100 -43
- astrbot/core/platform/sources/telegram/tg_event.py +136 -36
- astrbot/core/platform/sources/webchat/webchat_adapter.py +72 -22
- astrbot/core/platform/sources/webchat/webchat_event.py +46 -22
- astrbot/core/platform/sources/webchat/webchat_queue_mgr.py +35 -0
- astrbot/core/platform/sources/wechatpadpro/wechatpadpro_adapter.py +926 -0
- astrbot/core/platform/sources/wechatpadpro/wechatpadpro_message_event.py +178 -0
- astrbot/core/platform/sources/wechatpadpro/xml_data_parser.py +159 -0
- astrbot/core/platform/sources/wecom/wecom_adapter.py +169 -27
- astrbot/core/platform/sources/wecom/wecom_event.py +162 -77
- astrbot/core/platform/sources/wecom/wecom_kf.py +279 -0
- astrbot/core/platform/sources/wecom/wecom_kf_message.py +196 -0
- astrbot/core/platform/sources/wecom_ai_bot/WXBizJsonMsgCrypt.py +297 -0
- astrbot/core/platform/sources/wecom_ai_bot/__init__.py +15 -0
- astrbot/core/platform/sources/wecom_ai_bot/ierror.py +19 -0
- astrbot/core/platform/sources/wecom_ai_bot/wecomai_adapter.py +472 -0
- astrbot/core/platform/sources/wecom_ai_bot/wecomai_api.py +417 -0
- astrbot/core/platform/sources/wecom_ai_bot/wecomai_event.py +152 -0
- astrbot/core/platform/sources/wecom_ai_bot/wecomai_queue_mgr.py +153 -0
- astrbot/core/platform/sources/wecom_ai_bot/wecomai_server.py +168 -0
- astrbot/core/platform/sources/wecom_ai_bot/wecomai_utils.py +209 -0
- astrbot/core/platform/sources/weixin_official_account/weixin_offacc_adapter.py +306 -0
- astrbot/core/platform/sources/weixin_official_account/weixin_offacc_event.py +186 -0
- astrbot/core/platform_message_history_mgr.py +49 -0
- astrbot/core/provider/__init__.py +2 -3
- astrbot/core/provider/entites.py +8 -8
- astrbot/core/provider/entities.py +154 -98
- astrbot/core/provider/func_tool_manager.py +446 -458
- astrbot/core/provider/manager.py +345 -207
- astrbot/core/provider/provider.py +188 -73
- astrbot/core/provider/register.py +9 -7
- astrbot/core/provider/sources/anthropic_source.py +295 -115
- astrbot/core/provider/sources/azure_tts_source.py +224 -0
- astrbot/core/provider/sources/bailian_rerank_source.py +236 -0
- astrbot/core/provider/sources/dashscope_tts.py +138 -14
- astrbot/core/provider/sources/edge_tts_source.py +24 -19
- astrbot/core/provider/sources/fishaudio_tts_api_source.py +58 -13
- astrbot/core/provider/sources/gemini_embedding_source.py +61 -0
- astrbot/core/provider/sources/gemini_source.py +310 -132
- astrbot/core/provider/sources/gemini_tts_source.py +81 -0
- astrbot/core/provider/sources/groq_source.py +15 -0
- astrbot/core/provider/sources/gsv_selfhosted_source.py +151 -0
- astrbot/core/provider/sources/gsvi_tts_source.py +14 -7
- astrbot/core/provider/sources/minimax_tts_api_source.py +159 -0
- astrbot/core/provider/sources/openai_embedding_source.py +40 -0
- astrbot/core/provider/sources/openai_source.py +241 -145
- astrbot/core/provider/sources/openai_tts_api_source.py +18 -7
- astrbot/core/provider/sources/sensevoice_selfhosted_source.py +13 -11
- astrbot/core/provider/sources/vllm_rerank_source.py +71 -0
- astrbot/core/provider/sources/volcengine_tts.py +115 -0
- astrbot/core/provider/sources/whisper_api_source.py +18 -13
- astrbot/core/provider/sources/whisper_selfhosted_source.py +19 -12
- astrbot/core/provider/sources/xinference_rerank_source.py +116 -0
- astrbot/core/provider/sources/xinference_stt_provider.py +197 -0
- astrbot/core/provider/sources/zhipu_source.py +6 -73
- astrbot/core/star/__init__.py +43 -11
- astrbot/core/star/config.py +17 -18
- astrbot/core/star/context.py +362 -138
- astrbot/core/star/filter/__init__.py +4 -3
- astrbot/core/star/filter/command.py +111 -35
- astrbot/core/star/filter/command_group.py +46 -34
- astrbot/core/star/filter/custom_filter.py +6 -5
- astrbot/core/star/filter/event_message_type.py +4 -2
- astrbot/core/star/filter/permission.py +4 -2
- astrbot/core/star/filter/platform_adapter_type.py +45 -12
- astrbot/core/star/filter/regex.py +4 -2
- astrbot/core/star/register/__init__.py +19 -15
- astrbot/core/star/register/star.py +41 -13
- astrbot/core/star/register/star_handler.py +236 -86
- astrbot/core/star/session_llm_manager.py +280 -0
- astrbot/core/star/session_plugin_manager.py +170 -0
- astrbot/core/star/star.py +36 -43
- astrbot/core/star/star_handler.py +47 -85
- astrbot/core/star/star_manager.py +442 -260
- astrbot/core/star/star_tools.py +167 -45
- astrbot/core/star/updator.py +17 -20
- astrbot/core/umop_config_router.py +106 -0
- astrbot/core/updator.py +38 -13
- astrbot/core/utils/astrbot_path.py +39 -0
- astrbot/core/utils/command_parser.py +1 -1
- astrbot/core/utils/io.py +119 -60
- astrbot/core/utils/log_pipe.py +1 -1
- astrbot/core/utils/metrics.py +11 -10
- astrbot/core/utils/migra_helper.py +73 -0
- astrbot/core/utils/path_util.py +63 -62
- astrbot/core/utils/pip_installer.py +37 -15
- astrbot/core/utils/session_lock.py +29 -0
- astrbot/core/utils/session_waiter.py +19 -20
- astrbot/core/utils/shared_preferences.py +174 -34
- astrbot/core/utils/t2i/__init__.py +4 -1
- astrbot/core/utils/t2i/local_strategy.py +386 -238
- astrbot/core/utils/t2i/network_strategy.py +109 -49
- astrbot/core/utils/t2i/renderer.py +29 -14
- astrbot/core/utils/t2i/template/astrbot_powershell.html +184 -0
- astrbot/core/utils/t2i/template_manager.py +111 -0
- astrbot/core/utils/tencent_record_helper.py +115 -1
- astrbot/core/utils/version_comparator.py +10 -13
- astrbot/core/zip_updator.py +112 -65
- astrbot/dashboard/routes/__init__.py +20 -13
- astrbot/dashboard/routes/auth.py +20 -9
- astrbot/dashboard/routes/chat.py +297 -141
- astrbot/dashboard/routes/config.py +652 -55
- astrbot/dashboard/routes/conversation.py +107 -37
- astrbot/dashboard/routes/file.py +26 -0
- astrbot/dashboard/routes/knowledge_base.py +1244 -0
- astrbot/dashboard/routes/log.py +27 -2
- astrbot/dashboard/routes/persona.py +202 -0
- astrbot/dashboard/routes/plugin.py +197 -139
- astrbot/dashboard/routes/route.py +27 -7
- astrbot/dashboard/routes/session_management.py +354 -0
- astrbot/dashboard/routes/stat.py +85 -18
- astrbot/dashboard/routes/static_file.py +5 -2
- astrbot/dashboard/routes/t2i.py +233 -0
- astrbot/dashboard/routes/tools.py +184 -120
- astrbot/dashboard/routes/update.py +59 -36
- astrbot/dashboard/server.py +96 -36
- astrbot/dashboard/utils.py +165 -0
- astrbot-4.7.0.dist-info/METADATA +294 -0
- astrbot-4.7.0.dist-info/RECORD +274 -0
- {astrbot-3.5.6.dist-info → astrbot-4.7.0.dist-info}/WHEEL +1 -1
- astrbot/core/db/plugin/sqlite_impl.py +0 -112
- astrbot/core/db/sqlite_init.sql +0 -50
- astrbot/core/pipeline/platform_compatibility/stage.py +0 -56
- astrbot/core/pipeline/process_stage/method/llm_request.py +0 -606
- astrbot/core/platform/sources/gewechat/client.py +0 -806
- astrbot/core/platform/sources/gewechat/downloader.py +0 -55
- astrbot/core/platform/sources/gewechat/gewechat_event.py +0 -255
- astrbot/core/platform/sources/gewechat/gewechat_platform_adapter.py +0 -103
- astrbot/core/platform/sources/gewechat/xml_data_parser.py +0 -110
- astrbot/core/provider/sources/dashscope_source.py +0 -203
- astrbot/core/provider/sources/dify_source.py +0 -281
- astrbot/core/provider/sources/llmtuner_source.py +0 -132
- astrbot/core/rag/embedding/openai_source.py +0 -20
- astrbot/core/rag/knowledge_db_mgr.py +0 -94
- astrbot/core/rag/store/__init__.py +0 -9
- astrbot/core/rag/store/chroma_db.py +0 -42
- astrbot/core/utils/dify_api_client.py +0 -152
- astrbot-3.5.6.dist-info/METADATA +0 -249
- astrbot-3.5.6.dist-info/RECORD +0 -158
- {astrbot-3.5.6.dist-info → astrbot-4.7.0.dist-info}/entry_points.txt +0 -0
- {astrbot-3.5.6.dist-info → astrbot-4.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,79 +1,101 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
import base64
|
|
3
|
+
import inspect
|
|
2
4
|
import json
|
|
3
5
|
import os
|
|
4
|
-
import inspect
|
|
5
6
|
import random
|
|
6
|
-
import
|
|
7
|
-
|
|
7
|
+
import re
|
|
8
|
+
from collections.abc import AsyncGenerator
|
|
8
9
|
|
|
9
|
-
from openai import
|
|
10
|
+
from openai import AsyncAzureOpenAI, AsyncOpenAI
|
|
11
|
+
from openai._exceptions import NotFoundError
|
|
12
|
+
from openai.lib.streaming.chat._completions import ChatCompletionStreamState
|
|
10
13
|
from openai.types.chat.chat_completion import ChatCompletion
|
|
14
|
+
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
|
|
11
15
|
|
|
12
|
-
|
|
13
|
-
from
|
|
14
|
-
from
|
|
15
|
-
from astrbot.core.
|
|
16
|
+
import astrbot.core.message.components as Comp
|
|
17
|
+
from astrbot import logger
|
|
18
|
+
from astrbot.api.provider import Provider
|
|
19
|
+
from astrbot.core.agent.message import Message
|
|
20
|
+
from astrbot.core.agent.tool import ToolSet
|
|
16
21
|
from astrbot.core.message.message_event_result import MessageChain
|
|
22
|
+
from astrbot.core.provider.entities import LLMResponse, ToolCallsResult
|
|
23
|
+
from astrbot.core.utils.io import download_image_by_url
|
|
17
24
|
|
|
18
|
-
from astrbot.core.db import BaseDatabase
|
|
19
|
-
from astrbot.api.provider import Provider, Personality
|
|
20
|
-
from astrbot import logger
|
|
21
|
-
from astrbot.core.provider.func_tool_manager import FuncCall
|
|
22
|
-
from typing import List, AsyncGenerator
|
|
23
25
|
from ..register import register_provider_adapter
|
|
24
|
-
from astrbot.core.provider.entities import LLMResponse
|
|
25
26
|
|
|
26
27
|
|
|
27
28
|
@register_provider_adapter(
|
|
28
|
-
"openai_chat_completion",
|
|
29
|
+
"openai_chat_completion",
|
|
30
|
+
"OpenAI API Chat Completion 提供商适配器",
|
|
29
31
|
)
|
|
30
32
|
class ProviderOpenAIOfficial(Provider):
|
|
31
|
-
def __init__(
|
|
32
|
-
|
|
33
|
-
provider_config: dict,
|
|
34
|
-
provider_settings: dict,
|
|
35
|
-
db_helper: BaseDatabase,
|
|
36
|
-
persistant_history=True,
|
|
37
|
-
default_persona: Personality = None,
|
|
38
|
-
) -> None:
|
|
39
|
-
super().__init__(
|
|
40
|
-
provider_config,
|
|
41
|
-
provider_settings,
|
|
42
|
-
persistant_history,
|
|
43
|
-
db_helper,
|
|
44
|
-
default_persona,
|
|
45
|
-
)
|
|
33
|
+
def __init__(self, provider_config, provider_settings) -> None:
|
|
34
|
+
super().__init__(provider_config, provider_settings)
|
|
46
35
|
self.chosen_api_key = None
|
|
47
|
-
self.api_keys:
|
|
36
|
+
self.api_keys: list = super().get_keys()
|
|
48
37
|
self.chosen_api_key = self.api_keys[0] if len(self.api_keys) > 0 else None
|
|
49
38
|
self.timeout = provider_config.get("timeout", 120)
|
|
39
|
+
self.custom_headers = provider_config.get("custom_headers", {})
|
|
50
40
|
if isinstance(self.timeout, str):
|
|
51
41
|
self.timeout = int(self.timeout)
|
|
52
|
-
|
|
42
|
+
|
|
43
|
+
if not isinstance(self.custom_headers, dict) or not self.custom_headers:
|
|
44
|
+
self.custom_headers = None
|
|
45
|
+
else:
|
|
46
|
+
for key in self.custom_headers:
|
|
47
|
+
self.custom_headers[key] = str(self.custom_headers[key])
|
|
48
|
+
|
|
53
49
|
if "api_version" in provider_config:
|
|
54
|
-
#
|
|
50
|
+
# Using Azure OpenAI API
|
|
55
51
|
self.client = AsyncAzureOpenAI(
|
|
56
52
|
api_key=self.chosen_api_key,
|
|
57
53
|
api_version=provider_config.get("api_version", None),
|
|
58
|
-
|
|
54
|
+
default_headers=self.custom_headers,
|
|
55
|
+
base_url=provider_config.get("api_base", ""),
|
|
59
56
|
timeout=self.timeout,
|
|
60
57
|
)
|
|
61
58
|
else:
|
|
62
|
-
#
|
|
59
|
+
# Using OpenAI Official API
|
|
63
60
|
self.client = AsyncOpenAI(
|
|
64
61
|
api_key=self.chosen_api_key,
|
|
65
62
|
base_url=provider_config.get("api_base", None),
|
|
63
|
+
default_headers=self.custom_headers,
|
|
66
64
|
timeout=self.timeout,
|
|
67
65
|
)
|
|
68
66
|
|
|
69
67
|
self.default_params = inspect.signature(
|
|
70
|
-
self.client.chat.completions.create
|
|
68
|
+
self.client.chat.completions.create,
|
|
71
69
|
).parameters.keys()
|
|
72
70
|
|
|
73
71
|
model_config = provider_config.get("model_config", {})
|
|
74
72
|
model = model_config.get("model", "unknown")
|
|
75
73
|
self.set_model(model)
|
|
76
74
|
|
|
75
|
+
self.reasoning_key = "reasoning_content"
|
|
76
|
+
|
|
77
|
+
def _maybe_inject_xai_search(self, payloads: dict, **kwargs):
|
|
78
|
+
"""当开启 xAI 原生搜索时,向请求体注入 Live Search 参数。
|
|
79
|
+
|
|
80
|
+
- 仅在 provider_config.xai_native_search 为 True 时生效
|
|
81
|
+
- 默认注入 {"mode": "auto"}
|
|
82
|
+
- 允许通过 kwargs 使用 xai_search_mode 覆盖(on/auto/off)
|
|
83
|
+
"""
|
|
84
|
+
if not bool(self.provider_config.get("xai_native_search", False)):
|
|
85
|
+
return
|
|
86
|
+
|
|
87
|
+
mode = kwargs.get("xai_search_mode", "auto")
|
|
88
|
+
mode = str(mode).lower()
|
|
89
|
+
if mode not in ("auto", "on", "off"):
|
|
90
|
+
mode = "auto"
|
|
91
|
+
|
|
92
|
+
# off 时不注入,保持与未开启一致
|
|
93
|
+
if mode == "off":
|
|
94
|
+
return
|
|
95
|
+
|
|
96
|
+
# OpenAI SDK 不识别的字段会在 _query/_query_stream 中放入 extra_body
|
|
97
|
+
payloads["search_parameters"] = {"mode": mode}
|
|
98
|
+
|
|
77
99
|
async def get_models(self):
|
|
78
100
|
try:
|
|
79
101
|
models_str = []
|
|
@@ -85,12 +107,12 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
85
107
|
except NotFoundError as e:
|
|
86
108
|
raise Exception(f"获取模型列表失败:{e}")
|
|
87
109
|
|
|
88
|
-
async def _query(self, payloads: dict, tools:
|
|
110
|
+
async def _query(self, payloads: dict, tools: ToolSet | None) -> LLMResponse:
|
|
89
111
|
if tools:
|
|
90
112
|
model = payloads.get("model", "").lower()
|
|
91
113
|
omit_empty_param_field = "gemini" in model
|
|
92
114
|
tool_list = tools.get_func_desc_openai_style(
|
|
93
|
-
omit_empty_parameter_field=omit_empty_param_field
|
|
115
|
+
omit_empty_parameter_field=omit_empty_param_field,
|
|
94
116
|
)
|
|
95
117
|
if tool_list:
|
|
96
118
|
payloads["tools"] = tool_list
|
|
@@ -98,45 +120,66 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
98
120
|
# 不在默认参数中的参数放在 extra_body 中
|
|
99
121
|
extra_body = {}
|
|
100
122
|
to_del = []
|
|
101
|
-
for key in payloads
|
|
123
|
+
for key in payloads:
|
|
102
124
|
if key not in self.default_params:
|
|
103
125
|
extra_body[key] = payloads[key]
|
|
104
126
|
to_del.append(key)
|
|
105
127
|
for key in to_del:
|
|
106
128
|
del payloads[key]
|
|
107
129
|
|
|
130
|
+
# 读取并合并 custom_extra_body 配置
|
|
131
|
+
custom_extra_body = self.provider_config.get("custom_extra_body", {})
|
|
132
|
+
if isinstance(custom_extra_body, dict):
|
|
133
|
+
extra_body.update(custom_extra_body)
|
|
134
|
+
|
|
135
|
+
model = payloads.get("model", "").lower()
|
|
136
|
+
|
|
137
|
+
# 针对 deepseek 模型的特殊处理:deepseek-reasoner调用必须移除 tools ,否则将被切换至 deepseek-chat
|
|
138
|
+
if model == "deepseek-reasoner" and "tools" in payloads:
|
|
139
|
+
del payloads["tools"]
|
|
140
|
+
|
|
108
141
|
completion = await self.client.chat.completions.create(
|
|
109
|
-
**payloads,
|
|
142
|
+
**payloads,
|
|
143
|
+
stream=False,
|
|
144
|
+
extra_body=extra_body,
|
|
110
145
|
)
|
|
111
146
|
|
|
112
147
|
if not isinstance(completion, ChatCompletion):
|
|
113
148
|
raise Exception(
|
|
114
|
-
f"API 返回的 completion 类型错误:{type(completion)}: {completion}。"
|
|
149
|
+
f"API 返回的 completion 类型错误:{type(completion)}: {completion}。",
|
|
115
150
|
)
|
|
116
151
|
|
|
117
152
|
logger.debug(f"completion: {completion}")
|
|
118
153
|
|
|
119
|
-
llm_response = await self.
|
|
154
|
+
llm_response = await self._parse_openai_completion(completion, tools)
|
|
120
155
|
|
|
121
156
|
return llm_response
|
|
122
157
|
|
|
123
158
|
async def _query_stream(
|
|
124
|
-
self,
|
|
159
|
+
self,
|
|
160
|
+
payloads: dict,
|
|
161
|
+
tools: ToolSet | None,
|
|
125
162
|
) -> AsyncGenerator[LLMResponse, None]:
|
|
126
163
|
"""流式查询API,逐步返回结果"""
|
|
127
164
|
if tools:
|
|
128
165
|
model = payloads.get("model", "").lower()
|
|
129
166
|
omit_empty_param_field = "gemini" in model
|
|
130
167
|
tool_list = tools.get_func_desc_openai_style(
|
|
131
|
-
omit_empty_parameter_field=omit_empty_param_field
|
|
168
|
+
omit_empty_parameter_field=omit_empty_param_field,
|
|
132
169
|
)
|
|
133
170
|
if tool_list:
|
|
134
171
|
payloads["tools"] = tool_list
|
|
135
172
|
|
|
136
173
|
# 不在默认参数中的参数放在 extra_body 中
|
|
137
174
|
extra_body = {}
|
|
175
|
+
|
|
176
|
+
# 读取并合并 custom_extra_body 配置
|
|
177
|
+
custom_extra_body = self.provider_config.get("custom_extra_body", {})
|
|
178
|
+
if isinstance(custom_extra_body, dict):
|
|
179
|
+
extra_body.update(custom_extra_body)
|
|
180
|
+
|
|
138
181
|
to_del = []
|
|
139
|
-
for key in payloads
|
|
182
|
+
for key in payloads:
|
|
140
183
|
if key not in self.default_params:
|
|
141
184
|
extra_body[key] = payloads[key]
|
|
142
185
|
to_del.append(key)
|
|
@@ -144,7 +187,9 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
144
187
|
del payloads[key]
|
|
145
188
|
|
|
146
189
|
stream = await self.client.chat.completions.create(
|
|
147
|
-
**payloads,
|
|
190
|
+
**payloads,
|
|
191
|
+
stream=True,
|
|
192
|
+
extra_body=extra_body,
|
|
148
193
|
)
|
|
149
194
|
|
|
150
195
|
llm_response = LLMResponse("assistant", is_chunk=True)
|
|
@@ -159,57 +204,115 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
159
204
|
if len(chunk.choices) == 0:
|
|
160
205
|
continue
|
|
161
206
|
delta = chunk.choices[0].delta
|
|
162
|
-
#
|
|
207
|
+
# logger.debug(f"chunk delta: {delta}")
|
|
208
|
+
# handle the content delta
|
|
209
|
+
reasoning = self._extract_reasoning_content(chunk)
|
|
210
|
+
_y = False
|
|
211
|
+
if reasoning:
|
|
212
|
+
llm_response.reasoning_content = reasoning
|
|
213
|
+
_y = True
|
|
163
214
|
if delta.content:
|
|
164
215
|
completion_text = delta.content
|
|
165
216
|
llm_response.result_chain = MessageChain(
|
|
166
|
-
chain=[Comp.Plain(completion_text)]
|
|
217
|
+
chain=[Comp.Plain(completion_text)],
|
|
167
218
|
)
|
|
219
|
+
_y = True
|
|
220
|
+
if _y:
|
|
168
221
|
yield llm_response
|
|
169
222
|
|
|
170
223
|
final_completion = state.get_final_completion()
|
|
171
|
-
llm_response = await self.
|
|
224
|
+
llm_response = await self._parse_openai_completion(final_completion, tools)
|
|
172
225
|
|
|
173
226
|
yield llm_response
|
|
174
227
|
|
|
175
|
-
|
|
176
|
-
self,
|
|
177
|
-
|
|
178
|
-
|
|
228
|
+
def _extract_reasoning_content(
|
|
229
|
+
self,
|
|
230
|
+
completion: ChatCompletion | ChatCompletionChunk,
|
|
231
|
+
) -> str:
|
|
232
|
+
"""Extract reasoning content from OpenAI ChatCompletion if available."""
|
|
233
|
+
reasoning_text = ""
|
|
234
|
+
if len(completion.choices) == 0:
|
|
235
|
+
return reasoning_text
|
|
236
|
+
if isinstance(completion, ChatCompletion):
|
|
237
|
+
choice = completion.choices[0]
|
|
238
|
+
reasoning_attr = getattr(choice.message, self.reasoning_key, None)
|
|
239
|
+
if reasoning_attr:
|
|
240
|
+
reasoning_text = str(reasoning_attr)
|
|
241
|
+
elif isinstance(completion, ChatCompletionChunk):
|
|
242
|
+
delta = completion.choices[0].delta
|
|
243
|
+
reasoning_attr = getattr(delta, self.reasoning_key, None)
|
|
244
|
+
if reasoning_attr:
|
|
245
|
+
reasoning_text = str(reasoning_attr)
|
|
246
|
+
return reasoning_text
|
|
247
|
+
|
|
248
|
+
async def _parse_openai_completion(
|
|
249
|
+
self, completion: ChatCompletion, tools: ToolSet | None
|
|
250
|
+
) -> LLMResponse:
|
|
251
|
+
"""Parse OpenAI ChatCompletion into LLMResponse"""
|
|
179
252
|
llm_response = LLMResponse("assistant")
|
|
180
253
|
|
|
181
254
|
if len(completion.choices) == 0:
|
|
182
255
|
raise Exception("API 返回的 completion 为空。")
|
|
183
256
|
choice = completion.choices[0]
|
|
184
257
|
|
|
185
|
-
|
|
258
|
+
# parse the text completion
|
|
259
|
+
if choice.message.content is not None:
|
|
186
260
|
# text completion
|
|
187
261
|
completion_text = str(choice.message.content).strip()
|
|
262
|
+
# specially, some providers may set <think> tags around reasoning content in the completion text,
|
|
263
|
+
# we use regex to remove them, and store then in reasoning_content field
|
|
264
|
+
reasoning_pattern = re.compile(r"<think>(.*?)</think>", re.DOTALL)
|
|
265
|
+
matches = reasoning_pattern.findall(completion_text)
|
|
266
|
+
if matches:
|
|
267
|
+
llm_response.reasoning_content = "\n".join(
|
|
268
|
+
[match.strip() for match in matches],
|
|
269
|
+
)
|
|
270
|
+
completion_text = reasoning_pattern.sub("", completion_text).strip()
|
|
188
271
|
llm_response.result_chain = MessageChain().message(completion_text)
|
|
189
272
|
|
|
190
|
-
if
|
|
191
|
-
|
|
273
|
+
# parse the reasoning content if any
|
|
274
|
+
# the priority is higher than the <think> tag extraction
|
|
275
|
+
llm_response.reasoning_content = self._extract_reasoning_content(completion)
|
|
276
|
+
|
|
277
|
+
# parse tool calls if any
|
|
278
|
+
if choice.message.tool_calls and tools is not None:
|
|
192
279
|
args_ls = []
|
|
193
280
|
func_name_ls = []
|
|
194
281
|
tool_call_ids = []
|
|
282
|
+
tool_call_extra_content_dict = {}
|
|
195
283
|
for tool_call in choice.message.tool_calls:
|
|
284
|
+
if isinstance(tool_call, str):
|
|
285
|
+
# workaround for #1359
|
|
286
|
+
tool_call = json.loads(tool_call)
|
|
196
287
|
for tool in tools.func_list:
|
|
197
|
-
if
|
|
198
|
-
|
|
288
|
+
if (
|
|
289
|
+
tool_call.type == "function"
|
|
290
|
+
and tool.name == tool_call.function.name
|
|
291
|
+
):
|
|
292
|
+
# workaround for #1454
|
|
293
|
+
if isinstance(tool_call.function.arguments, str):
|
|
294
|
+
args = json.loads(tool_call.function.arguments)
|
|
295
|
+
else:
|
|
296
|
+
args = tool_call.function.arguments
|
|
199
297
|
args_ls.append(args)
|
|
200
298
|
func_name_ls.append(tool_call.function.name)
|
|
201
299
|
tool_call_ids.append(tool_call.id)
|
|
300
|
+
|
|
301
|
+
# gemini-2.5 / gemini-3 series extra_content handling
|
|
302
|
+
extra_content = getattr(tool_call, "extra_content", None)
|
|
303
|
+
if extra_content is not None:
|
|
304
|
+
tool_call_extra_content_dict[tool_call.id] = extra_content
|
|
202
305
|
llm_response.role = "tool"
|
|
203
306
|
llm_response.tools_call_args = args_ls
|
|
204
307
|
llm_response.tools_call_name = func_name_ls
|
|
205
308
|
llm_response.tools_call_ids = tool_call_ids
|
|
206
|
-
|
|
309
|
+
llm_response.tools_call_extra_content = tool_call_extra_content_dict
|
|
310
|
+
# specially handle finish reason
|
|
207
311
|
if choice.finish_reason == "content_filter":
|
|
208
312
|
raise Exception(
|
|
209
|
-
"API 返回的 completion 由于内容安全过滤被拒绝(非 AstrBot)。"
|
|
313
|
+
"API 返回的 completion 由于内容安全过滤被拒绝(非 AstrBot)。",
|
|
210
314
|
)
|
|
211
|
-
|
|
212
|
-
if not llm_response.completion_text and not llm_response.tools_call_args:
|
|
315
|
+
if llm_response.completion_text is None and not llm_response.tools_call_args:
|
|
213
316
|
logger.error(f"API 返回的 completion 无法解析:{completion}。")
|
|
214
317
|
raise Exception(f"API 返回的 completion 无法解析:{completion}。")
|
|
215
318
|
|
|
@@ -219,18 +322,23 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
219
322
|
|
|
220
323
|
async def _prepare_chat_payload(
|
|
221
324
|
self,
|
|
222
|
-
prompt: str,
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
tool_calls_result=None,
|
|
325
|
+
prompt: str | None,
|
|
326
|
+
image_urls: list[str] | None = None,
|
|
327
|
+
contexts: list[dict] | list[Message] | None = None,
|
|
328
|
+
system_prompt: str | None = None,
|
|
329
|
+
tool_calls_result: ToolCallsResult | list[ToolCallsResult] | None = None,
|
|
330
|
+
model: str | None = None,
|
|
229
331
|
**kwargs,
|
|
230
332
|
) -> tuple:
|
|
231
333
|
"""准备聊天所需的有效载荷和上下文"""
|
|
232
|
-
|
|
233
|
-
|
|
334
|
+
if contexts is None:
|
|
335
|
+
contexts = []
|
|
336
|
+
new_record = None
|
|
337
|
+
if prompt is not None:
|
|
338
|
+
new_record = await self.assemble_context(prompt, image_urls)
|
|
339
|
+
context_query = self._ensure_message_to_dicts(contexts)
|
|
340
|
+
if new_record:
|
|
341
|
+
context_query.append(new_record)
|
|
234
342
|
if system_prompt:
|
|
235
343
|
context_query.insert(0, {"role": "system", "content": system_prompt})
|
|
236
344
|
|
|
@@ -240,30 +348,37 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
240
348
|
|
|
241
349
|
# tool calls result
|
|
242
350
|
if tool_calls_result:
|
|
243
|
-
|
|
351
|
+
if isinstance(tool_calls_result, ToolCallsResult):
|
|
352
|
+
context_query.extend(tool_calls_result.to_openai_messages())
|
|
353
|
+
else:
|
|
354
|
+
for tcr in tool_calls_result:
|
|
355
|
+
context_query.extend(tcr.to_openai_messages())
|
|
244
356
|
|
|
245
357
|
model_config = self.provider_config.get("model_config", {})
|
|
246
|
-
model_config["model"] = self.get_model()
|
|
358
|
+
model_config["model"] = model or self.get_model()
|
|
247
359
|
|
|
248
360
|
payloads = {"messages": context_query, **model_config}
|
|
249
361
|
|
|
250
|
-
|
|
362
|
+
# xAI origin search tool inject
|
|
363
|
+
self._maybe_inject_xai_search(payloads, **kwargs)
|
|
364
|
+
|
|
365
|
+
return payloads, context_query
|
|
251
366
|
|
|
252
367
|
async def _handle_api_error(
|
|
253
368
|
self,
|
|
254
369
|
e: Exception,
|
|
255
370
|
payloads: dict,
|
|
256
371
|
context_query: list,
|
|
257
|
-
func_tool:
|
|
372
|
+
func_tool: ToolSet | None,
|
|
258
373
|
chosen_key: str,
|
|
259
|
-
available_api_keys:
|
|
374
|
+
available_api_keys: list[str],
|
|
260
375
|
retry_cnt: int,
|
|
261
376
|
max_retries: int,
|
|
262
377
|
) -> tuple:
|
|
263
378
|
"""处理API错误并尝试恢复"""
|
|
264
379
|
if "429" in str(e):
|
|
265
380
|
logger.warning(
|
|
266
|
-
f"API 调用过于频繁,尝试使用其他 Key 重试。当前 Key: {chosen_key[:12]}"
|
|
381
|
+
f"API 调用过于频繁,尝试使用其他 Key 重试。当前 Key: {chosen_key[:12]}",
|
|
267
382
|
)
|
|
268
383
|
# 最后一次不等待
|
|
269
384
|
if retry_cnt < max_retries - 1:
|
|
@@ -279,11 +394,10 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
279
394
|
context_query,
|
|
280
395
|
func_tool,
|
|
281
396
|
)
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
elif "maximum context length" in str(e):
|
|
397
|
+
raise e
|
|
398
|
+
if "maximum context length" in str(e):
|
|
285
399
|
logger.warning(
|
|
286
|
-
f"上下文长度超过限制。尝试弹出最早的记录然后重试。当前记录条数: {len(context_query)}"
|
|
400
|
+
f"上下文长度超过限制。尝试弹出最早的记录然后重试。当前记录条数: {len(context_query)}",
|
|
287
401
|
)
|
|
288
402
|
await self.pop_record(context_query)
|
|
289
403
|
payloads["messages"] = context_query
|
|
@@ -295,7 +409,7 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
295
409
|
context_query,
|
|
296
410
|
func_tool,
|
|
297
411
|
)
|
|
298
|
-
|
|
412
|
+
if "The model is not a VLM" in str(e): # siliconcloud
|
|
299
413
|
# 尝试删除所有 image
|
|
300
414
|
new_contexts = await self._remove_image_from_context(context_query)
|
|
301
415
|
payloads["messages"] = new_contexts
|
|
@@ -308,52 +422,50 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
308
422
|
context_query,
|
|
309
423
|
func_tool,
|
|
310
424
|
)
|
|
311
|
-
|
|
425
|
+
if (
|
|
312
426
|
"Function calling is not enabled" in str(e)
|
|
313
427
|
or ("tool" in str(e).lower() and "support" in str(e).lower())
|
|
314
428
|
or ("function" in str(e).lower() and "support" in str(e).lower())
|
|
315
429
|
):
|
|
316
430
|
# openai, ollama, gemini openai, siliconcloud 的错误提示与 code 不统一,只能通过字符串匹配
|
|
317
431
|
logger.info(
|
|
318
|
-
f"{self.get_model()} 不支持函数工具调用,已自动去除,不影响使用。"
|
|
432
|
+
f"{self.get_model()} 不支持函数工具调用,已自动去除,不影响使用。",
|
|
319
433
|
)
|
|
320
|
-
|
|
321
|
-
del payloads["tools"]
|
|
434
|
+
payloads.pop("tools", None)
|
|
322
435
|
return False, chosen_key, available_api_keys, payloads, context_query, None
|
|
323
|
-
|
|
324
|
-
logger.error(f"发生了错误。Provider 配置如下: {self.provider_config}")
|
|
436
|
+
logger.error(f"发生了错误。Provider 配置如下: {self.provider_config}")
|
|
325
437
|
|
|
326
|
-
|
|
327
|
-
|
|
438
|
+
if "tool" in str(e).lower() and "support" in str(e).lower():
|
|
439
|
+
logger.error("疑似该模型不支持函数调用工具调用。请输入 /tool off_all")
|
|
328
440
|
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
441
|
+
if "Connection error." in str(e):
|
|
442
|
+
proxy = os.environ.get("http_proxy", None)
|
|
443
|
+
if proxy:
|
|
444
|
+
logger.error(
|
|
445
|
+
f"可能为代理原因,请检查代理是否正常。当前代理: {proxy}",
|
|
446
|
+
)
|
|
335
447
|
|
|
336
|
-
|
|
448
|
+
raise e
|
|
337
449
|
|
|
338
450
|
async def text_chat(
|
|
339
451
|
self,
|
|
340
|
-
prompt
|
|
341
|
-
session_id
|
|
342
|
-
image_urls
|
|
343
|
-
func_tool
|
|
344
|
-
contexts=
|
|
452
|
+
prompt=None,
|
|
453
|
+
session_id=None,
|
|
454
|
+
image_urls=None,
|
|
455
|
+
func_tool=None,
|
|
456
|
+
contexts=None,
|
|
345
457
|
system_prompt=None,
|
|
346
458
|
tool_calls_result=None,
|
|
459
|
+
model=None,
|
|
347
460
|
**kwargs,
|
|
348
461
|
) -> LLMResponse:
|
|
349
|
-
payloads, context_query
|
|
462
|
+
payloads, context_query = await self._prepare_chat_payload(
|
|
350
463
|
prompt,
|
|
351
|
-
session_id,
|
|
352
464
|
image_urls,
|
|
353
|
-
func_tool,
|
|
354
465
|
contexts,
|
|
355
466
|
system_prompt,
|
|
356
467
|
tool_calls_result,
|
|
468
|
+
model=model,
|
|
357
469
|
**kwargs,
|
|
358
470
|
)
|
|
359
471
|
|
|
@@ -369,12 +481,6 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
369
481
|
self.client.api_key = chosen_key
|
|
370
482
|
llm_response = await self._query(payloads, func_tool)
|
|
371
483
|
break
|
|
372
|
-
except UnprocessableEntityError as e:
|
|
373
|
-
logger.warning(f"不可处理的实体错误:{e},尝试删除图片。")
|
|
374
|
-
# 尝试删除所有 image
|
|
375
|
-
new_contexts = await self._remove_image_from_context(context_query)
|
|
376
|
-
payloads["messages"] = new_contexts
|
|
377
|
-
context_query = new_contexts
|
|
378
484
|
except Exception as e:
|
|
379
485
|
last_exception = e
|
|
380
486
|
(
|
|
@@ -397,7 +503,7 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
397
503
|
if success:
|
|
398
504
|
break
|
|
399
505
|
|
|
400
|
-
if retry_cnt == max_retries - 1:
|
|
506
|
+
if retry_cnt == max_retries - 1 or llm_response is None:
|
|
401
507
|
logger.error(f"API 调用失败,重试 {max_retries} 次仍然失败。")
|
|
402
508
|
if last_exception is None:
|
|
403
509
|
raise Exception("未知错误")
|
|
@@ -406,24 +512,24 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
406
512
|
|
|
407
513
|
async def text_chat_stream(
|
|
408
514
|
self,
|
|
409
|
-
prompt
|
|
410
|
-
session_id
|
|
411
|
-
image_urls
|
|
412
|
-
func_tool
|
|
413
|
-
contexts=
|
|
515
|
+
prompt=None,
|
|
516
|
+
session_id=None,
|
|
517
|
+
image_urls=None,
|
|
518
|
+
func_tool=None,
|
|
519
|
+
contexts=None,
|
|
414
520
|
system_prompt=None,
|
|
415
521
|
tool_calls_result=None,
|
|
522
|
+
model=None,
|
|
416
523
|
**kwargs,
|
|
417
524
|
) -> AsyncGenerator[LLMResponse, None]:
|
|
418
525
|
"""流式对话,与服务商交互并逐步返回结果"""
|
|
419
|
-
payloads, context_query
|
|
526
|
+
payloads, context_query = await self._prepare_chat_payload(
|
|
420
527
|
prompt,
|
|
421
|
-
session_id,
|
|
422
528
|
image_urls,
|
|
423
|
-
func_tool,
|
|
424
529
|
contexts,
|
|
425
530
|
system_prompt,
|
|
426
531
|
tool_calls_result,
|
|
532
|
+
model=model,
|
|
427
533
|
**kwargs,
|
|
428
534
|
)
|
|
429
535
|
|
|
@@ -439,12 +545,6 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
439
545
|
async for response in self._query_stream(payloads, func_tool):
|
|
440
546
|
yield response
|
|
441
547
|
break
|
|
442
|
-
except UnprocessableEntityError as e:
|
|
443
|
-
logger.warning(f"不可处理的实体错误:{e},尝试删除图片。")
|
|
444
|
-
# 尝试删除所有 image
|
|
445
|
-
new_contexts = await self._remove_image_from_context(context_query)
|
|
446
|
-
payloads["messages"] = new_contexts
|
|
447
|
-
context_query = new_contexts
|
|
448
548
|
except Exception as e:
|
|
449
549
|
last_exception = e
|
|
450
550
|
(
|
|
@@ -473,19 +573,12 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
473
573
|
raise Exception("未知错误")
|
|
474
574
|
raise last_exception
|
|
475
575
|
|
|
476
|
-
async def _remove_image_from_context(self, contexts:
|
|
477
|
-
"""
|
|
478
|
-
从上下文中删除所有带有 image 的记录
|
|
479
|
-
"""
|
|
576
|
+
async def _remove_image_from_context(self, contexts: list):
|
|
577
|
+
"""从上下文中删除所有带有 image 的记录"""
|
|
480
578
|
new_contexts = []
|
|
481
579
|
|
|
482
|
-
flag = False
|
|
483
580
|
for context in contexts:
|
|
484
|
-
if
|
|
485
|
-
flag = False # 删除 image 后,下一条(LLM 响应)也要删除
|
|
486
|
-
continue
|
|
487
|
-
if isinstance(context["content"], list):
|
|
488
|
-
flag = True
|
|
581
|
+
if "content" in context and isinstance(context["content"], list):
|
|
489
582
|
# continue
|
|
490
583
|
new_content = []
|
|
491
584
|
for item in context["content"]:
|
|
@@ -502,13 +595,17 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
502
595
|
def get_current_key(self) -> str:
|
|
503
596
|
return self.client.api_key
|
|
504
597
|
|
|
505
|
-
def get_keys(self) ->
|
|
598
|
+
def get_keys(self) -> list[str]:
|
|
506
599
|
return self.api_keys
|
|
507
600
|
|
|
508
601
|
def set_key(self, key):
|
|
509
602
|
self.client.api_key = key
|
|
510
603
|
|
|
511
|
-
async def assemble_context(
|
|
604
|
+
async def assemble_context(
|
|
605
|
+
self,
|
|
606
|
+
text: str,
|
|
607
|
+
image_urls: list[str] | None = None,
|
|
608
|
+
) -> dict:
|
|
512
609
|
"""组装成符合 OpenAI 格式的 role 为 user 的消息段"""
|
|
513
610
|
if image_urls:
|
|
514
611
|
user_content = {
|
|
@@ -528,19 +625,18 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
528
625
|
logger.warning(f"图片 {image_url} 得到的结果为空,将忽略。")
|
|
529
626
|
continue
|
|
530
627
|
user_content["content"].append(
|
|
531
|
-
{
|
|
628
|
+
{
|
|
629
|
+
"type": "image_url",
|
|
630
|
+
"image_url": {"url": image_data},
|
|
631
|
+
},
|
|
532
632
|
)
|
|
533
633
|
return user_content
|
|
534
|
-
|
|
535
|
-
return {"role": "user", "content": text}
|
|
634
|
+
return {"role": "user", "content": text}
|
|
536
635
|
|
|
537
636
|
async def encode_image_bs64(self, image_url: str) -> str:
|
|
538
|
-
"""
|
|
539
|
-
将图片转换为 base64
|
|
540
|
-
"""
|
|
637
|
+
"""将图片转换为 base64"""
|
|
541
638
|
if image_url.startswith("base64://"):
|
|
542
639
|
return image_url.replace("base64://", "data:image/jpeg;base64,")
|
|
543
640
|
with open(image_url, "rb") as f:
|
|
544
641
|
image_bs64 = base64.b64encode(f.read()).decode("utf-8")
|
|
545
642
|
return "data:image/jpeg;base64," + image_bs64
|
|
546
|
-
return ""
|