AstrBot 4.10.3__py3-none-any.whl → 4.10.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astrbot/builtin_stars/astrbot/main.py +2 -10
- astrbot/builtin_stars/python_interpreter/main.py +130 -131
- astrbot/cli/__init__.py +1 -1
- astrbot/core/agent/message.py +23 -1
- astrbot/core/agent/runners/tool_loop_agent_runner.py +24 -7
- astrbot/core/astr_agent_hooks.py +6 -0
- astrbot/core/backup/exporter.py +1 -0
- astrbot/core/config/astrbot_config.py +2 -0
- astrbot/core/config/default.py +47 -6
- astrbot/core/knowledge_base/chunking/recursive.py +10 -2
- astrbot/core/pipeline/process_stage/method/agent_sub_stages/internal.py +184 -174
- astrbot/core/pipeline/result_decorate/stage.py +65 -57
- astrbot/core/pipeline/waking_check/stage.py +29 -2
- astrbot/core/platform/sources/aiocqhttp/aiocqhttp_platform_adapter.py +15 -29
- astrbot/core/platform/sources/dingtalk/dingtalk_adapter.py +1 -6
- astrbot/core/platform/sources/dingtalk/dingtalk_event.py +15 -1
- astrbot/core/platform/sources/lark/lark_adapter.py +2 -10
- astrbot/core/platform/sources/misskey/misskey_adapter.py +0 -5
- astrbot/core/platform/sources/misskey/misskey_utils.py +0 -3
- astrbot/core/platform/sources/qqofficial/qqofficial_platform_adapter.py +4 -9
- astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_adapter.py +4 -9
- astrbot/core/platform/sources/satori/satori_adapter.py +6 -1
- astrbot/core/platform/sources/slack/slack_adapter.py +3 -6
- astrbot/core/platform/sources/webchat/webchat_adapter.py +0 -1
- astrbot/core/platform/sources/wechatpadpro/wechatpadpro_adapter.py +3 -5
- astrbot/core/provider/entities.py +9 -1
- astrbot/core/provider/sources/anthropic_source.py +60 -3
- astrbot/core/provider/sources/gemini_source.py +37 -3
- astrbot/core/provider/sources/minimax_tts_api_source.py +4 -1
- astrbot/core/provider/sources/openai_source.py +25 -31
- astrbot/core/provider/sources/xai_source.py +29 -0
- astrbot/core/provider/sources/xinference_stt_provider.py +24 -12
- astrbot/core/star/star_manager.py +41 -0
- astrbot/core/utils/pip_installer.py +20 -1
- astrbot/dashboard/routes/backup.py +519 -15
- astrbot/dashboard/routes/config.py +45 -0
- astrbot/dashboard/server.py +1 -0
- {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/METADATA +1 -1
- {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/RECORD +42 -41
- {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/WHEEL +0 -0
- {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/entry_points.txt +0 -0
- {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -41,7 +41,6 @@ class AiocqhttpAdapter(Platform):
|
|
|
41
41
|
super().__init__(platform_config, event_queue)
|
|
42
42
|
|
|
43
43
|
self.settings = platform_settings
|
|
44
|
-
self.unique_session = platform_settings["unique_session"]
|
|
45
44
|
self.host = platform_config["ws_reverse_host"]
|
|
46
45
|
self.port = platform_config["ws_reverse_port"]
|
|
47
46
|
|
|
@@ -136,14 +135,11 @@ class AiocqhttpAdapter(Platform):
|
|
|
136
135
|
abm.group_id = str(event.group_id)
|
|
137
136
|
else:
|
|
138
137
|
abm.type = MessageType.FRIEND_MESSAGE
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
abm.
|
|
143
|
-
|
|
144
|
-
if abm.type == MessageType.GROUP_MESSAGE
|
|
145
|
-
else abm.sender.user_id
|
|
146
|
-
)
|
|
138
|
+
abm.session_id = (
|
|
139
|
+
str(event.group_id)
|
|
140
|
+
if abm.type == MessageType.GROUP_MESSAGE
|
|
141
|
+
else abm.sender.user_id
|
|
142
|
+
)
|
|
147
143
|
abm.message_str = ""
|
|
148
144
|
abm.message = []
|
|
149
145
|
abm.timestamp = int(time.time())
|
|
@@ -164,16 +160,11 @@ class AiocqhttpAdapter(Platform):
|
|
|
164
160
|
abm.type = MessageType.GROUP_MESSAGE
|
|
165
161
|
else:
|
|
166
162
|
abm.type = MessageType.FRIEND_MESSAGE
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
abm.session_id = (
|
|
173
|
-
str(event.group_id)
|
|
174
|
-
if abm.type == MessageType.GROUP_MESSAGE
|
|
175
|
-
else abm.sender.user_id
|
|
176
|
-
)
|
|
163
|
+
abm.session_id = (
|
|
164
|
+
str(event.group_id)
|
|
165
|
+
if abm.type == MessageType.GROUP_MESSAGE
|
|
166
|
+
else abm.sender.user_id
|
|
167
|
+
)
|
|
177
168
|
abm.message_str = ""
|
|
178
169
|
abm.message = []
|
|
179
170
|
abm.raw_message = event
|
|
@@ -210,16 +201,11 @@ class AiocqhttpAdapter(Platform):
|
|
|
210
201
|
abm.group.group_name = event.get("group_name", "N/A")
|
|
211
202
|
elif event["message_type"] == "private":
|
|
212
203
|
abm.type = MessageType.FRIEND_MESSAGE
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
abm.session_id = (
|
|
219
|
-
str(event.group_id)
|
|
220
|
-
if abm.type == MessageType.GROUP_MESSAGE
|
|
221
|
-
else abm.sender.user_id
|
|
222
|
-
)
|
|
204
|
+
abm.session_id = (
|
|
205
|
+
str(event.group_id)
|
|
206
|
+
if abm.type == MessageType.GROUP_MESSAGE
|
|
207
|
+
else abm.sender.user_id
|
|
208
|
+
)
|
|
223
209
|
|
|
224
210
|
abm.message_id = str(event.message_id)
|
|
225
211
|
abm.message = []
|
|
@@ -50,8 +50,6 @@ class DingtalkPlatformAdapter(Platform):
|
|
|
50
50
|
) -> None:
|
|
51
51
|
super().__init__(platform_config, event_queue)
|
|
52
52
|
|
|
53
|
-
self.unique_session = platform_settings["unique_session"]
|
|
54
|
-
|
|
55
53
|
self.client_id = platform_config["client_id"]
|
|
56
54
|
self.client_secret = platform_config["client_secret"]
|
|
57
55
|
|
|
@@ -129,10 +127,7 @@ class DingtalkPlatformAdapter(Platform):
|
|
|
129
127
|
if id := self._id_to_sid(user.dingtalk_id):
|
|
130
128
|
abm.message.append(At(qq=id))
|
|
131
129
|
abm.group_id = message.conversation_id
|
|
132
|
-
|
|
133
|
-
abm.session_id = abm.sender.user_id
|
|
134
|
-
else:
|
|
135
|
-
abm.session_id = abm.group_id
|
|
130
|
+
abm.session_id = abm.group_id
|
|
136
131
|
else:
|
|
137
132
|
abm.session_id = abm.sender.user_id
|
|
138
133
|
|
|
@@ -25,6 +25,20 @@ class DingtalkMessageEvent(AstrMessageEvent):
|
|
|
25
25
|
client: dingtalk_stream.ChatbotHandler,
|
|
26
26
|
message: MessageChain,
|
|
27
27
|
):
|
|
28
|
+
icm = cast(dingtalk_stream.ChatbotMessage, self.message_obj.raw_message)
|
|
29
|
+
ats = []
|
|
30
|
+
# fixes: #4218
|
|
31
|
+
# 钉钉 at 机器人需要使用 sender_staff_id 而不是 sender_id
|
|
32
|
+
for i in message.chain:
|
|
33
|
+
if isinstance(i, Comp.At):
|
|
34
|
+
print(i.qq, icm.sender_id, icm.sender_staff_id)
|
|
35
|
+
if str(i.qq) in str(icm.sender_id or ""):
|
|
36
|
+
# 适配器会将开头的 $:LWCP_v1:$ 去掉,因此我们用 in 判断
|
|
37
|
+
ats.append(f"@{icm.sender_staff_id}")
|
|
38
|
+
else:
|
|
39
|
+
ats.append(f"@{i.qq}")
|
|
40
|
+
at_str = " ".join(ats)
|
|
41
|
+
|
|
28
42
|
for segment in message.chain:
|
|
29
43
|
if isinstance(segment, Comp.Plain):
|
|
30
44
|
segment.text = segment.text.strip()
|
|
@@ -32,7 +46,7 @@ class DingtalkMessageEvent(AstrMessageEvent):
|
|
|
32
46
|
None,
|
|
33
47
|
client.reply_markdown,
|
|
34
48
|
segment.text,
|
|
35
|
-
segment.text,
|
|
49
|
+
f"{at_str} {segment.text}".strip(),
|
|
36
50
|
cast(dingtalk_stream.ChatbotMessage, self.message_obj.raw_message),
|
|
37
51
|
)
|
|
38
52
|
elif isinstance(segment, Comp.Image):
|
|
@@ -44,8 +44,6 @@ class LarkPlatformAdapter(Platform):
|
|
|
44
44
|
) -> None:
|
|
45
45
|
super().__init__(platform_config, event_queue)
|
|
46
46
|
|
|
47
|
-
self.unique_session = platform_settings["unique_session"]
|
|
48
|
-
|
|
49
47
|
self.appid = platform_config["app_id"]
|
|
50
48
|
self.appsecret = platform_config["app_secret"]
|
|
51
49
|
self.domain = platform_config.get("domain", lark.FEISHU_DOMAIN)
|
|
@@ -317,14 +315,8 @@ class LarkPlatformAdapter(Platform):
|
|
|
317
315
|
user_id=event.event.sender.sender_id.open_id,
|
|
318
316
|
nickname=event.event.sender.sender_id.open_id[:8],
|
|
319
317
|
)
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
if abm.type == MessageType.GROUP_MESSAGE:
|
|
323
|
-
abm.session_id = abm.group_id
|
|
324
|
-
else:
|
|
325
|
-
abm.session_id = abm.sender.user_id
|
|
326
|
-
elif abm.type == MessageType.GROUP_MESSAGE:
|
|
327
|
-
abm.session_id = f"{abm.sender.user_id}%{abm.group_id}" # 也保留群组id
|
|
318
|
+
if abm.type == MessageType.GROUP_MESSAGE:
|
|
319
|
+
abm.session_id = abm.group_id
|
|
328
320
|
else:
|
|
329
321
|
abm.session_id = abm.sender.user_id
|
|
330
322
|
|
|
@@ -91,8 +91,6 @@ class MisskeyPlatformAdapter(Platform):
|
|
|
91
91
|
except Exception:
|
|
92
92
|
self.max_download_bytes = None
|
|
93
93
|
|
|
94
|
-
self.unique_session = platform_settings["unique_session"]
|
|
95
|
-
|
|
96
94
|
self.api: MisskeyAPI | None = None
|
|
97
95
|
self._running = False
|
|
98
96
|
self.client_self_id = ""
|
|
@@ -641,7 +639,6 @@ class MisskeyPlatformAdapter(Platform):
|
|
|
641
639
|
sender_info,
|
|
642
640
|
self.client_self_id,
|
|
643
641
|
is_chat=False,
|
|
644
|
-
unique_session=self.unique_session,
|
|
645
642
|
)
|
|
646
643
|
cache_user_info(
|
|
647
644
|
self._user_cache,
|
|
@@ -690,7 +687,6 @@ class MisskeyPlatformAdapter(Platform):
|
|
|
690
687
|
sender_info,
|
|
691
688
|
self.client_self_id,
|
|
692
689
|
is_chat=True,
|
|
693
|
-
unique_session=self.unique_session,
|
|
694
690
|
)
|
|
695
691
|
cache_user_info(
|
|
696
692
|
self._user_cache,
|
|
@@ -720,7 +716,6 @@ class MisskeyPlatformAdapter(Platform):
|
|
|
720
716
|
self.client_self_id,
|
|
721
717
|
is_chat=False,
|
|
722
718
|
room_id=room_id,
|
|
723
|
-
unique_session=self.unique_session,
|
|
724
719
|
)
|
|
725
720
|
|
|
726
721
|
cache_user_info(
|
|
@@ -338,7 +338,6 @@ def create_base_message(
|
|
|
338
338
|
client_self_id: str,
|
|
339
339
|
is_chat: bool = False,
|
|
340
340
|
room_id: str | None = None,
|
|
341
|
-
unique_session: bool = False,
|
|
342
341
|
) -> AstrBotMessage:
|
|
343
342
|
"""创建基础消息对象"""
|
|
344
343
|
message = AstrBotMessage()
|
|
@@ -353,8 +352,6 @@ def create_base_message(
|
|
|
353
352
|
if room_id:
|
|
354
353
|
session_prefix = "room"
|
|
355
354
|
session_id = f"{session_prefix}%{room_id}"
|
|
356
|
-
if unique_session:
|
|
357
|
-
session_id += f"_{sender_info['sender_id']}"
|
|
358
355
|
message.type = MessageType.GROUP_MESSAGE
|
|
359
356
|
message.group_id = room_id
|
|
360
357
|
elif is_chat:
|
|
@@ -44,11 +44,8 @@ class botClient(Client):
|
|
|
44
44
|
message,
|
|
45
45
|
MessageType.GROUP_MESSAGE,
|
|
46
46
|
)
|
|
47
|
-
abm.
|
|
48
|
-
|
|
49
|
-
if self.platform.unique_session
|
|
50
|
-
else cast(str, message.group_openid)
|
|
51
|
-
)
|
|
47
|
+
abm.group_id = cast(str, message.group_openid)
|
|
48
|
+
abm.session_id = abm.group_id
|
|
52
49
|
self._commit(abm)
|
|
53
50
|
|
|
54
51
|
# 收到频道消息
|
|
@@ -57,9 +54,8 @@ class botClient(Client):
|
|
|
57
54
|
message,
|
|
58
55
|
MessageType.GROUP_MESSAGE,
|
|
59
56
|
)
|
|
60
|
-
abm.
|
|
61
|
-
|
|
62
|
-
)
|
|
57
|
+
abm.group_id = message.channel_id
|
|
58
|
+
abm.session_id = abm.group_id
|
|
63
59
|
self._commit(abm)
|
|
64
60
|
|
|
65
61
|
# 收到私聊消息
|
|
@@ -104,7 +100,6 @@ class QQOfficialPlatformAdapter(Platform):
|
|
|
104
100
|
|
|
105
101
|
self.appid = platform_config["appid"]
|
|
106
102
|
self.secret = platform_config["secret"]
|
|
107
|
-
self.unique_session: bool = platform_settings["unique_session"]
|
|
108
103
|
qq_group = platform_config["enable_group_c2c"]
|
|
109
104
|
guild_dm = platform_config["enable_guild_direct_message"]
|
|
110
105
|
|
|
@@ -35,11 +35,8 @@ class botClient(Client):
|
|
|
35
35
|
message,
|
|
36
36
|
MessageType.GROUP_MESSAGE,
|
|
37
37
|
)
|
|
38
|
-
abm.
|
|
39
|
-
|
|
40
|
-
if self.platform.unique_session
|
|
41
|
-
else cast(str, message.group_openid)
|
|
42
|
-
)
|
|
38
|
+
abm.group_id = cast(str, message.group_openid)
|
|
39
|
+
abm.session_id = abm.group_id
|
|
43
40
|
self._commit(abm)
|
|
44
41
|
|
|
45
42
|
# 收到频道消息
|
|
@@ -48,9 +45,8 @@ class botClient(Client):
|
|
|
48
45
|
message,
|
|
49
46
|
MessageType.GROUP_MESSAGE,
|
|
50
47
|
)
|
|
51
|
-
abm.
|
|
52
|
-
|
|
53
|
-
)
|
|
48
|
+
abm.group_id = message.channel_id
|
|
49
|
+
abm.session_id = abm.group_id
|
|
54
50
|
self._commit(abm)
|
|
55
51
|
|
|
56
52
|
# 收到私聊消息
|
|
@@ -95,7 +91,6 @@ class QQOfficialWebhookPlatformAdapter(Platform):
|
|
|
95
91
|
|
|
96
92
|
self.appid = platform_config["appid"]
|
|
97
93
|
self.secret = platform_config["secret"]
|
|
98
|
-
self.unique_session = platform_settings["unique_session"]
|
|
99
94
|
self.unified_webhook_mode = platform_config.get("unified_webhook_mode", False)
|
|
100
95
|
|
|
101
96
|
intents = botpy.Intents(
|
|
@@ -142,7 +142,12 @@ class SatoriPlatformAdapter(Platform):
|
|
|
142
142
|
raise ValueError(f"WebSocket URL必须以ws://或wss://开头: {self.endpoint}")
|
|
143
143
|
|
|
144
144
|
try:
|
|
145
|
-
websocket = await connect(
|
|
145
|
+
websocket = await connect(
|
|
146
|
+
self.endpoint,
|
|
147
|
+
additional_headers={},
|
|
148
|
+
max_size=10 * 1024 * 1024, # 10MB
|
|
149
|
+
)
|
|
150
|
+
|
|
146
151
|
self.ws = websocket
|
|
147
152
|
|
|
148
153
|
await asyncio.sleep(0.1)
|
|
@@ -41,7 +41,6 @@ class SlackAdapter(Platform):
|
|
|
41
41
|
) -> None:
|
|
42
42
|
super().__init__(platform_config, event_queue)
|
|
43
43
|
self.settings = platform_settings
|
|
44
|
-
self.unique_session = platform_settings.get("unique_session", False)
|
|
45
44
|
|
|
46
45
|
self.bot_token = platform_config.get("bot_token")
|
|
47
46
|
self.app_token = platform_config.get("app_token")
|
|
@@ -147,12 +146,10 @@ class SlackAdapter(Platform):
|
|
|
147
146
|
abm.group_id = channel_id
|
|
148
147
|
|
|
149
148
|
# 设置会话ID
|
|
150
|
-
if
|
|
151
|
-
abm.session_id =
|
|
149
|
+
if abm.type == MessageType.GROUP_MESSAGE:
|
|
150
|
+
abm.session_id = abm.group_id
|
|
152
151
|
else:
|
|
153
|
-
abm.session_id =
|
|
154
|
-
channel_id if abm.type == MessageType.GROUP_MESSAGE else user_id
|
|
155
|
-
)
|
|
152
|
+
abm.session_id = user_id
|
|
156
153
|
|
|
157
154
|
abm.message_id = event.get("client_msg_id", uuid.uuid4().hex)
|
|
158
155
|
abm.timestamp = int(float(event.get("ts", time.time())))
|
|
@@ -79,7 +79,6 @@ class WebChatAdapter(Platform):
|
|
|
79
79
|
super().__init__(platform_config, event_queue)
|
|
80
80
|
|
|
81
81
|
self.settings = platform_settings
|
|
82
|
-
self.unique_session = platform_settings["unique_session"]
|
|
83
82
|
self.imgs_dir = os.path.join(get_astrbot_data_path(), "webchat", "imgs")
|
|
84
83
|
os.makedirs(self.imgs_dir, exist_ok=True)
|
|
85
84
|
|
|
@@ -47,7 +47,6 @@ class WeChatPadProAdapter(Platform):
|
|
|
47
47
|
self._shutdown_event = None
|
|
48
48
|
self.wxnewpass = None
|
|
49
49
|
self.settings = platform_settings
|
|
50
|
-
self.unique_session = platform_settings.get("unique_session", False)
|
|
51
50
|
|
|
52
51
|
self.metadata = PlatformMetadata(
|
|
53
52
|
name="wechatpadpro",
|
|
@@ -509,11 +508,10 @@ class WeChatPadProAdapter(Platform):
|
|
|
509
508
|
if accurate_nickname:
|
|
510
509
|
abm.sender.nickname = accurate_nickname
|
|
511
510
|
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
abm.session_id = f"{from_user_name}#{abm.sender.user_id}"
|
|
511
|
+
if abm.type == MessageType.GROUP_MESSAGE:
|
|
512
|
+
abm.session_id = abm.group_id
|
|
515
513
|
else:
|
|
516
|
-
abm.session_id =
|
|
514
|
+
abm.session_id = abm.sender.user_id
|
|
517
515
|
|
|
518
516
|
msg_source = raw_message.get("msg_source", "")
|
|
519
517
|
if self.wxid in msg_source:
|
|
@@ -272,6 +272,8 @@ class LLMResponse:
|
|
|
272
272
|
"""Tool call extra content. tool_call_id -> extra_content dict"""
|
|
273
273
|
reasoning_content: str = ""
|
|
274
274
|
"""The reasoning content extracted from the LLM, if any."""
|
|
275
|
+
reasoning_signature: str | None = None
|
|
276
|
+
"""The signature of the reasoning content, if any."""
|
|
275
277
|
|
|
276
278
|
raw_completion: (
|
|
277
279
|
ChatCompletion | GenerateContentResponse | AnthropicMessage | None
|
|
@@ -292,12 +294,14 @@ class LLMResponse:
|
|
|
292
294
|
def __init__(
|
|
293
295
|
self,
|
|
294
296
|
role: str,
|
|
295
|
-
completion_text: str =
|
|
297
|
+
completion_text: str | None = None,
|
|
296
298
|
result_chain: MessageChain | None = None,
|
|
297
299
|
tools_call_args: list[dict[str, Any]] | None = None,
|
|
298
300
|
tools_call_name: list[str] | None = None,
|
|
299
301
|
tools_call_ids: list[str] | None = None,
|
|
300
302
|
tools_call_extra_content: dict[str, dict[str, Any]] | None = None,
|
|
303
|
+
reasoning_content: str | None = None,
|
|
304
|
+
reasoning_signature: str | None = None,
|
|
301
305
|
raw_completion: ChatCompletion
|
|
302
306
|
| GenerateContentResponse
|
|
303
307
|
| AnthropicMessage
|
|
@@ -317,6 +321,8 @@ class LLMResponse:
|
|
|
317
321
|
raw_completion (ChatCompletion, optional): 原始响应, OpenAI 格式. Defaults to None.
|
|
318
322
|
|
|
319
323
|
"""
|
|
324
|
+
if reasoning_content is None:
|
|
325
|
+
reasoning_content = ""
|
|
320
326
|
if tools_call_args is None:
|
|
321
327
|
tools_call_args = []
|
|
322
328
|
if tools_call_name is None:
|
|
@@ -333,6 +339,8 @@ class LLMResponse:
|
|
|
333
339
|
self.tools_call_name = tools_call_name
|
|
334
340
|
self.tools_call_ids = tools_call_ids
|
|
335
341
|
self.tools_call_extra_content = tools_call_extra_content
|
|
342
|
+
self.reasoning_content = reasoning_content
|
|
343
|
+
self.reasoning_signature = reasoning_signature
|
|
336
344
|
self.raw_completion = raw_completion
|
|
337
345
|
self.is_chunk = is_chunk
|
|
338
346
|
|
|
@@ -48,6 +48,8 @@ class ProviderAnthropic(Provider):
|
|
|
48
48
|
base_url=self.base_url,
|
|
49
49
|
)
|
|
50
50
|
|
|
51
|
+
self.thinking_config = provider_config.get("anth_thinking_config", {})
|
|
52
|
+
|
|
51
53
|
self.set_model(provider_config.get("model", "unknown"))
|
|
52
54
|
|
|
53
55
|
def _prepare_payload(self, messages: list[dict]):
|
|
@@ -64,11 +66,32 @@ class ProviderAnthropic(Provider):
|
|
|
64
66
|
new_messages = []
|
|
65
67
|
for message in messages:
|
|
66
68
|
if message["role"] == "system":
|
|
67
|
-
system_prompt = message["content"]
|
|
69
|
+
system_prompt = message["content"] or "<empty system prompt>"
|
|
68
70
|
elif message["role"] == "assistant":
|
|
69
71
|
blocks = []
|
|
70
|
-
|
|
72
|
+
reasoning_content = ""
|
|
73
|
+
thinking_signature = ""
|
|
74
|
+
if isinstance(message["content"], str) and message["content"].strip():
|
|
71
75
|
blocks.append({"type": "text", "text": message["content"]})
|
|
76
|
+
elif isinstance(message["content"], list):
|
|
77
|
+
for part in message["content"]:
|
|
78
|
+
if part.get("type") == "think":
|
|
79
|
+
# only pick the last think part for now
|
|
80
|
+
reasoning_content = part.get("think")
|
|
81
|
+
thinking_signature = part.get("encrypted")
|
|
82
|
+
else:
|
|
83
|
+
blocks.append(part)
|
|
84
|
+
|
|
85
|
+
if reasoning_content and thinking_signature:
|
|
86
|
+
blocks.insert(
|
|
87
|
+
0,
|
|
88
|
+
{
|
|
89
|
+
"type": "thinking",
|
|
90
|
+
"thinking": reasoning_content,
|
|
91
|
+
"signature": thinking_signature,
|
|
92
|
+
},
|
|
93
|
+
)
|
|
94
|
+
|
|
72
95
|
if "tool_calls" in message and isinstance(message["tool_calls"], list):
|
|
73
96
|
for tool_call in message["tool_calls"]:
|
|
74
97
|
blocks.append( # noqa: PERF401
|
|
@@ -100,7 +123,7 @@ class ProviderAnthropic(Provider):
|
|
|
100
123
|
{
|
|
101
124
|
"type": "tool_result",
|
|
102
125
|
"tool_use_id": message["tool_call_id"],
|
|
103
|
-
"content": message["content"],
|
|
126
|
+
"content": message["content"] or "<empty response>",
|
|
104
127
|
},
|
|
105
128
|
],
|
|
106
129
|
},
|
|
@@ -135,6 +158,11 @@ class ProviderAnthropic(Provider):
|
|
|
135
158
|
|
|
136
159
|
if "max_tokens" not in payloads:
|
|
137
160
|
payloads["max_tokens"] = 1024
|
|
161
|
+
if self.thinking_config.get("budget"):
|
|
162
|
+
payloads["thinking"] = {
|
|
163
|
+
"budget_tokens": self.thinking_config.get("budget"),
|
|
164
|
+
"type": "enabled",
|
|
165
|
+
}
|
|
138
166
|
|
|
139
167
|
completion = await self.client.messages.create(
|
|
140
168
|
**payloads, stream=False, extra_body=extra_body
|
|
@@ -153,6 +181,11 @@ class ProviderAnthropic(Provider):
|
|
|
153
181
|
completion_text = str(content_block.text).strip()
|
|
154
182
|
llm_response.completion_text = completion_text
|
|
155
183
|
|
|
184
|
+
if content_block.type == "thinking":
|
|
185
|
+
reasoning_content = str(content_block.thinking).strip()
|
|
186
|
+
llm_response.reasoning_content = reasoning_content
|
|
187
|
+
llm_response.reasoning_signature = content_block.signature
|
|
188
|
+
|
|
156
189
|
if content_block.type == "tool_use":
|
|
157
190
|
llm_response.tools_call_args.append(content_block.input)
|
|
158
191
|
llm_response.tools_call_name.append(content_block.name)
|
|
@@ -184,9 +217,16 @@ class ProviderAnthropic(Provider):
|
|
|
184
217
|
id = None
|
|
185
218
|
usage = TokenUsage()
|
|
186
219
|
extra_body = self.provider_config.get("custom_extra_body", {})
|
|
220
|
+
reasoning_content = ""
|
|
221
|
+
reasoning_signature = ""
|
|
187
222
|
|
|
188
223
|
if "max_tokens" not in payloads:
|
|
189
224
|
payloads["max_tokens"] = 1024
|
|
225
|
+
if self.thinking_config.get("budget"):
|
|
226
|
+
payloads["thinking"] = {
|
|
227
|
+
"budget_tokens": self.thinking_config.get("budget"),
|
|
228
|
+
"type": "enabled",
|
|
229
|
+
}
|
|
190
230
|
|
|
191
231
|
async with self.client.messages.stream(
|
|
192
232
|
**payloads, extra_body=extra_body
|
|
@@ -226,6 +266,21 @@ class ProviderAnthropic(Provider):
|
|
|
226
266
|
usage=usage,
|
|
227
267
|
id=id,
|
|
228
268
|
)
|
|
269
|
+
elif event.delta.type == "thinking_delta":
|
|
270
|
+
# 思考增量
|
|
271
|
+
reasoning = event.delta.thinking
|
|
272
|
+
if reasoning:
|
|
273
|
+
yield LLMResponse(
|
|
274
|
+
role="assistant",
|
|
275
|
+
reasoning_content=reasoning,
|
|
276
|
+
is_chunk=True,
|
|
277
|
+
usage=usage,
|
|
278
|
+
id=id,
|
|
279
|
+
reasoning_signature=reasoning_signature or None,
|
|
280
|
+
)
|
|
281
|
+
reasoning_content += reasoning
|
|
282
|
+
elif event.delta.type == "signature_delta":
|
|
283
|
+
reasoning_signature = event.delta.signature
|
|
229
284
|
elif event.delta.type == "input_json_delta":
|
|
230
285
|
# 工具调用参数增量
|
|
231
286
|
if event.index in tool_use_buffer:
|
|
@@ -282,6 +337,8 @@ class ProviderAnthropic(Provider):
|
|
|
282
337
|
is_chunk=False,
|
|
283
338
|
usage=usage,
|
|
284
339
|
id=id,
|
|
340
|
+
reasoning_content=reasoning_content,
|
|
341
|
+
reasoning_signature=reasoning_signature or None,
|
|
285
342
|
)
|
|
286
343
|
|
|
287
344
|
if final_tool_calls:
|
|
@@ -321,9 +321,37 @@ class ProviderGoogleGenAI(Provider):
|
|
|
321
321
|
append_or_extend(gemini_contents, parts, types.UserContent)
|
|
322
322
|
|
|
323
323
|
elif role == "assistant":
|
|
324
|
-
if content:
|
|
324
|
+
if isinstance(content, str):
|
|
325
325
|
parts = [types.Part.from_text(text=content)]
|
|
326
326
|
append_or_extend(gemini_contents, parts, types.ModelContent)
|
|
327
|
+
elif isinstance(content, list):
|
|
328
|
+
parts = []
|
|
329
|
+
thinking_signature = None
|
|
330
|
+
text = ""
|
|
331
|
+
for part in content:
|
|
332
|
+
# for most cases, assistant content only contains two parts: think and text
|
|
333
|
+
if part.get("type") == "think":
|
|
334
|
+
thinking_signature = part.get("encrypted") or None
|
|
335
|
+
else:
|
|
336
|
+
text += str(part.get("text"))
|
|
337
|
+
|
|
338
|
+
if thinking_signature and isinstance(thinking_signature, str):
|
|
339
|
+
try:
|
|
340
|
+
thinking_signature = base64.b64decode(thinking_signature)
|
|
341
|
+
except Exception as e:
|
|
342
|
+
logger.warning(
|
|
343
|
+
f"Failed to decode google gemini thinking signature: {e}",
|
|
344
|
+
exc_info=True,
|
|
345
|
+
)
|
|
346
|
+
thinking_signature = None
|
|
347
|
+
parts.append(
|
|
348
|
+
types.Part(
|
|
349
|
+
text=text,
|
|
350
|
+
thought_signature=thinking_signature,
|
|
351
|
+
)
|
|
352
|
+
)
|
|
353
|
+
append_or_extend(gemini_contents, parts, types.ModelContent)
|
|
354
|
+
|
|
327
355
|
elif not native_tool_enabled and "tool_calls" in message:
|
|
328
356
|
parts = []
|
|
329
357
|
for tool in message["tool_calls"]:
|
|
@@ -441,7 +469,8 @@ class ProviderGoogleGenAI(Provider):
|
|
|
441
469
|
for part in result_parts:
|
|
442
470
|
if part.text:
|
|
443
471
|
chain.append(Comp.Plain(part.text))
|
|
444
|
-
|
|
472
|
+
|
|
473
|
+
if (
|
|
445
474
|
part.function_call
|
|
446
475
|
and part.function_call.name is not None
|
|
447
476
|
and part.function_call.args is not None
|
|
@@ -458,13 +487,18 @@ class ProviderGoogleGenAI(Provider):
|
|
|
458
487
|
llm_response.tools_call_extra_content[tool_call_id] = {
|
|
459
488
|
"google": {"thought_signature": ts_bs64}
|
|
460
489
|
}
|
|
461
|
-
|
|
490
|
+
|
|
491
|
+
if (
|
|
462
492
|
part.inline_data
|
|
463
493
|
and part.inline_data.mime_type
|
|
464
494
|
and part.inline_data.mime_type.startswith("image/")
|
|
465
495
|
and part.inline_data.data
|
|
466
496
|
):
|
|
467
497
|
chain.append(Comp.Image.fromBytes(part.inline_data.data))
|
|
498
|
+
|
|
499
|
+
if ts := part.thought_signature:
|
|
500
|
+
# only keep the last thinking signature
|
|
501
|
+
llm_response.reasoning_signature = base64.b64encode(ts).decode("utf-8")
|
|
468
502
|
return MessageChain(chain=chain)
|
|
469
503
|
|
|
470
504
|
async def _query(self, payloads: dict, tools: ToolSet | None) -> LLMResponse:
|
|
@@ -51,7 +51,7 @@ class ProviderMiniMaxTTSAPI(TTSProvider):
|
|
|
51
51
|
"voice_id": ""
|
|
52
52
|
if self.is_timber_weight
|
|
53
53
|
else provider_config.get("minimax-voice-id", ""),
|
|
54
|
-
"emotion": provider_config.get("minimax-voice-emotion", "
|
|
54
|
+
"emotion": provider_config.get("minimax-voice-emotion", "auto"),
|
|
55
55
|
"latex_read": provider_config.get("minimax-voice-latex", False),
|
|
56
56
|
"english_normalization": provider_config.get(
|
|
57
57
|
"minimax-voice-english-normalization",
|
|
@@ -59,6 +59,9 @@ class ProviderMiniMaxTTSAPI(TTSProvider):
|
|
|
59
59
|
),
|
|
60
60
|
}
|
|
61
61
|
|
|
62
|
+
if self.voice_setting["emotion"] == "auto":
|
|
63
|
+
self.voice_setting.pop("emotion", None)
|
|
64
|
+
|
|
62
65
|
self.audio_setting: dict = {
|
|
63
66
|
"sample_rate": 32000,
|
|
64
67
|
"bitrate": 128000,
|