AstrBot 4.13.2__py3-none-any.whl → 4.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astrbot/builtin_stars/astrbot/main.py +0 -5
- astrbot/cli/__init__.py +1 -1
- astrbot/core/agent/agent.py +2 -1
- astrbot/core/agent/handoff.py +14 -1
- astrbot/core/agent/runners/tool_loop_agent_runner.py +14 -1
- astrbot/core/agent/tool.py +5 -0
- astrbot/core/astr_agent_run_util.py +21 -3
- astrbot/core/astr_agent_tool_exec.py +178 -3
- astrbot/core/astr_main_agent.py +980 -0
- astrbot/core/astr_main_agent_resources.py +453 -0
- astrbot/core/computer/computer_client.py +10 -1
- astrbot/core/computer/tools/fs.py +22 -14
- astrbot/core/config/default.py +84 -58
- astrbot/core/core_lifecycle.py +43 -1
- astrbot/core/cron/__init__.py +3 -0
- astrbot/core/cron/events.py +67 -0
- astrbot/core/cron/manager.py +376 -0
- astrbot/core/db/__init__.py +60 -0
- astrbot/core/db/po.py +31 -0
- astrbot/core/db/sqlite.py +120 -0
- astrbot/core/event_bus.py +0 -1
- astrbot/core/message/message_event_result.py +21 -3
- astrbot/core/pipeline/process_stage/method/agent_sub_stages/internal.py +111 -580
- astrbot/core/pipeline/scheduler.py +0 -2
- astrbot/core/platform/astr_message_event.py +0 -3
- astrbot/core/platform/platform.py +9 -0
- astrbot/core/platform/platform_metadata.py +2 -0
- astrbot/core/platform/sources/dingtalk/dingtalk_adapter.py +1 -0
- astrbot/core/platform/sources/qqofficial/qqofficial_platform_adapter.py +1 -0
- astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_adapter.py +1 -0
- astrbot/core/platform/sources/webchat/webchat_adapter.py +1 -0
- astrbot/core/platform/sources/wecom/wecom_adapter.py +1 -0
- astrbot/core/platform/sources/wecom_ai_bot/wecomai_adapter.py +1 -0
- astrbot/core/platform/sources/weixin_official_account/weixin_offacc_adapter.py +1 -0
- astrbot/core/provider/entities.py +1 -1
- astrbot/core/skills/skill_manager.py +9 -8
- astrbot/core/star/context.py +8 -0
- astrbot/core/star/filter/custom_filter.py +3 -3
- astrbot/core/star/register/star_handler.py +1 -1
- astrbot/core/subagent_orchestrator.py +96 -0
- astrbot/core/tools/cron_tools.py +174 -0
- astrbot/core/utils/history_saver.py +31 -0
- astrbot/core/utils/trace.py +4 -0
- astrbot/dashboard/routes/__init__.py +4 -0
- astrbot/dashboard/routes/cron.py +174 -0
- astrbot/dashboard/routes/log.py +36 -0
- astrbot/dashboard/routes/plugin.py +11 -0
- astrbot/dashboard/routes/skills.py +12 -37
- astrbot/dashboard/routes/subagent.py +117 -0
- astrbot/dashboard/routes/tools.py +41 -14
- astrbot/dashboard/server.py +3 -0
- {astrbot-4.13.2.dist-info → astrbot-4.14.0.dist-info}/METADATA +21 -2
- {astrbot-4.13.2.dist-info → astrbot-4.14.0.dist-info}/RECORD +56 -50
- astrbot/builtin_stars/astrbot/process_llm_request.py +0 -308
- astrbot/builtin_stars/reminder/main.py +0 -266
- astrbot/builtin_stars/reminder/metadata.yaml +0 -4
- astrbot/core/pipeline/process_stage/utils.py +0 -219
- {astrbot-4.13.2.dist-info → astrbot-4.14.0.dist-info}/WHEEL +0 -0
- {astrbot-4.13.2.dist-info → astrbot-4.14.0.dist-info}/entry_points.txt +0 -0
- {astrbot-4.13.2.dist-info → astrbot-4.14.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,980 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import builtins
|
|
5
|
+
import copy
|
|
6
|
+
import datetime
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
import zoneinfo
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
|
|
12
|
+
from astrbot.api import sp
|
|
13
|
+
from astrbot.core import logger
|
|
14
|
+
from astrbot.core.agent.handoff import HandoffTool
|
|
15
|
+
from astrbot.core.agent.mcp_client import MCPTool
|
|
16
|
+
from astrbot.core.agent.message import TextPart
|
|
17
|
+
from astrbot.core.agent.tool import ToolSet
|
|
18
|
+
from astrbot.core.astr_agent_context import AgentContextWrapper, AstrAgentContext
|
|
19
|
+
from astrbot.core.astr_agent_hooks import MAIN_AGENT_HOOKS
|
|
20
|
+
from astrbot.core.astr_agent_run_util import AgentRunner
|
|
21
|
+
from astrbot.core.astr_agent_tool_exec import FunctionToolExecutor
|
|
22
|
+
from astrbot.core.astr_main_agent_resources import (
|
|
23
|
+
CHATUI_SPECIAL_DEFAULT_PERSONA_PROMPT,
|
|
24
|
+
EXECUTE_SHELL_TOOL,
|
|
25
|
+
FILE_DOWNLOAD_TOOL,
|
|
26
|
+
FILE_UPLOAD_TOOL,
|
|
27
|
+
KNOWLEDGE_BASE_QUERY_TOOL,
|
|
28
|
+
LIVE_MODE_SYSTEM_PROMPT,
|
|
29
|
+
LLM_SAFETY_MODE_SYSTEM_PROMPT,
|
|
30
|
+
LOCAL_EXECUTE_SHELL_TOOL,
|
|
31
|
+
LOCAL_PYTHON_TOOL,
|
|
32
|
+
PYTHON_TOOL,
|
|
33
|
+
SANDBOX_MODE_PROMPT,
|
|
34
|
+
SEND_MESSAGE_TO_USER_TOOL,
|
|
35
|
+
TOOL_CALL_PROMPT,
|
|
36
|
+
TOOL_CALL_PROMPT_SKILLS_LIKE_MODE,
|
|
37
|
+
retrieve_knowledge_base,
|
|
38
|
+
)
|
|
39
|
+
from astrbot.core.conversation_mgr import Conversation
|
|
40
|
+
from astrbot.core.message.components import File, Image, Reply
|
|
41
|
+
from astrbot.core.platform.astr_message_event import AstrMessageEvent
|
|
42
|
+
from astrbot.core.provider import Provider
|
|
43
|
+
from astrbot.core.provider.entities import ProviderRequest
|
|
44
|
+
from astrbot.core.skills.skill_manager import SkillManager, build_skills_prompt
|
|
45
|
+
from astrbot.core.star.context import Context
|
|
46
|
+
from astrbot.core.star.star_handler import star_map
|
|
47
|
+
from astrbot.core.tools.cron_tools import (
|
|
48
|
+
CREATE_CRON_JOB_TOOL,
|
|
49
|
+
DELETE_CRON_JOB_TOOL,
|
|
50
|
+
LIST_CRON_JOBS_TOOL,
|
|
51
|
+
)
|
|
52
|
+
from astrbot.core.utils.file_extract import extract_file_moonshotai
|
|
53
|
+
from astrbot.core.utils.llm_metadata import LLM_METADATAS
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclass(slots=True)
|
|
57
|
+
class MainAgentBuildConfig:
|
|
58
|
+
"""The main agent build configuration.
|
|
59
|
+
Most of the configs can be found in the cmd_config.json"""
|
|
60
|
+
|
|
61
|
+
tool_call_timeout: int
|
|
62
|
+
"""The timeout (in seconds) for a tool call.
|
|
63
|
+
When the tool call exceeds this time,
|
|
64
|
+
a timeout error as a tool result will be returned.
|
|
65
|
+
"""
|
|
66
|
+
tool_schema_mode: str = "full"
|
|
67
|
+
"""The tool schema mode, can be 'full' or 'skills-like'."""
|
|
68
|
+
provider_wake_prefix: str = ""
|
|
69
|
+
"""The wake prefix for the provider. If the user message does not start with this prefix,
|
|
70
|
+
the main agent will not be triggered."""
|
|
71
|
+
streaming_response: bool = True
|
|
72
|
+
"""Whether to use streaming response."""
|
|
73
|
+
sanitize_context_by_modalities: bool = False
|
|
74
|
+
"""Whether to sanitize the context based on the provider's supported modalities.
|
|
75
|
+
This will remove unsupported message types(e.g. image) from the context to prevent issues."""
|
|
76
|
+
kb_agentic_mode: bool = False
|
|
77
|
+
"""Whether to use agentic mode for knowledge base retrieval.
|
|
78
|
+
This will inject the knowledge base query tool into the main agent's toolset to allow dynamic querying."""
|
|
79
|
+
file_extract_enabled: bool = False
|
|
80
|
+
"""Whether to enable file content extraction for uploaded files."""
|
|
81
|
+
file_extract_prov: str = "moonshotai"
|
|
82
|
+
"""The file extraction provider."""
|
|
83
|
+
file_extract_msh_api_key: str = ""
|
|
84
|
+
"""The API key for Moonshot AI file extraction provider."""
|
|
85
|
+
context_limit_reached_strategy: str = "truncate_by_turns"
|
|
86
|
+
"""The strategy to handle context length limit reached."""
|
|
87
|
+
llm_compress_instruction: str = ""
|
|
88
|
+
"""The instruction for compression in llm_compress strategy."""
|
|
89
|
+
llm_compress_keep_recent: int = 6
|
|
90
|
+
"""The number of most recent turns to keep during llm_compress strategy."""
|
|
91
|
+
llm_compress_provider_id: str = ""
|
|
92
|
+
"""The provider ID for the LLM used in context compression."""
|
|
93
|
+
max_context_length: int = -1
|
|
94
|
+
"""The maximum number of turns to keep in context. -1 means no limit.
|
|
95
|
+
This enforce max turns before compression"""
|
|
96
|
+
dequeue_context_length: int = 1
|
|
97
|
+
"""The number of oldest turns to remove when context length limit is reached."""
|
|
98
|
+
llm_safety_mode: bool = True
|
|
99
|
+
"""This will inject healthy and safe system prompt into the main agent,
|
|
100
|
+
to prevent LLM output harmful information"""
|
|
101
|
+
safety_mode_strategy: str = "system_prompt"
|
|
102
|
+
computer_use_runtime: str = "local"
|
|
103
|
+
"""The runtime for agent computer use: none, local, or sandbox."""
|
|
104
|
+
sandbox_cfg: dict = field(default_factory=dict)
|
|
105
|
+
add_cron_tools: bool = True
|
|
106
|
+
"""This will add cron job management tools to the main agent for proactive cron job execution."""
|
|
107
|
+
provider_settings: dict = field(default_factory=dict)
|
|
108
|
+
subagent_orchestrator: dict = field(default_factory=dict)
|
|
109
|
+
timezone: str | None = None
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
@dataclass(slots=True)
|
|
113
|
+
class MainAgentBuildResult:
|
|
114
|
+
agent_runner: AgentRunner
|
|
115
|
+
provider_request: ProviderRequest
|
|
116
|
+
provider: Provider
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _select_provider(
|
|
120
|
+
event: AstrMessageEvent, plugin_context: Context
|
|
121
|
+
) -> Provider | None:
|
|
122
|
+
"""Select chat provider for the event."""
|
|
123
|
+
sel_provider = event.get_extra("selected_provider")
|
|
124
|
+
if sel_provider and isinstance(sel_provider, str):
|
|
125
|
+
provider = plugin_context.get_provider_by_id(sel_provider)
|
|
126
|
+
if not provider:
|
|
127
|
+
logger.error("未找到指定的提供商: %s。", sel_provider)
|
|
128
|
+
if not isinstance(provider, Provider):
|
|
129
|
+
logger.error(
|
|
130
|
+
"选择的提供商类型无效(%s),跳过 LLM 请求处理。", type(provider)
|
|
131
|
+
)
|
|
132
|
+
return None
|
|
133
|
+
return provider
|
|
134
|
+
try:
|
|
135
|
+
return plugin_context.get_using_provider(umo=event.unified_msg_origin)
|
|
136
|
+
except ValueError as exc:
|
|
137
|
+
logger.error("Error occurred while selecting provider: %s", exc)
|
|
138
|
+
return None
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
async def _get_session_conv(
|
|
142
|
+
event: AstrMessageEvent, plugin_context: Context
|
|
143
|
+
) -> Conversation:
|
|
144
|
+
conv_mgr = plugin_context.conversation_manager
|
|
145
|
+
umo = event.unified_msg_origin
|
|
146
|
+
cid = await conv_mgr.get_curr_conversation_id(umo)
|
|
147
|
+
if not cid:
|
|
148
|
+
cid = await conv_mgr.new_conversation(umo, event.get_platform_id())
|
|
149
|
+
conversation = await conv_mgr.get_conversation(umo, cid)
|
|
150
|
+
if not conversation:
|
|
151
|
+
cid = await conv_mgr.new_conversation(umo, event.get_platform_id())
|
|
152
|
+
conversation = await conv_mgr.get_conversation(umo, cid)
|
|
153
|
+
if not conversation:
|
|
154
|
+
raise RuntimeError("无法创建新的对话。")
|
|
155
|
+
return conversation
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
async def _apply_kb(
|
|
159
|
+
event: AstrMessageEvent,
|
|
160
|
+
req: ProviderRequest,
|
|
161
|
+
plugin_context: Context,
|
|
162
|
+
config: MainAgentBuildConfig,
|
|
163
|
+
) -> None:
|
|
164
|
+
if not config.kb_agentic_mode:
|
|
165
|
+
if req.prompt is None:
|
|
166
|
+
return
|
|
167
|
+
try:
|
|
168
|
+
kb_result = await retrieve_knowledge_base(
|
|
169
|
+
query=req.prompt,
|
|
170
|
+
umo=event.unified_msg_origin,
|
|
171
|
+
context=plugin_context,
|
|
172
|
+
)
|
|
173
|
+
if not kb_result:
|
|
174
|
+
return
|
|
175
|
+
if req.system_prompt is not None:
|
|
176
|
+
req.system_prompt += (
|
|
177
|
+
f"\n\n[Related Knowledge Base Results]:\n{kb_result}"
|
|
178
|
+
)
|
|
179
|
+
except Exception as exc: # noqa: BLE001
|
|
180
|
+
logger.error("Error occurred while retrieving knowledge base: %s", exc)
|
|
181
|
+
else:
|
|
182
|
+
if req.func_tool is None:
|
|
183
|
+
req.func_tool = ToolSet()
|
|
184
|
+
req.func_tool.add_tool(KNOWLEDGE_BASE_QUERY_TOOL)
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
async def _apply_file_extract(
|
|
188
|
+
event: AstrMessageEvent,
|
|
189
|
+
req: ProviderRequest,
|
|
190
|
+
config: MainAgentBuildConfig,
|
|
191
|
+
) -> None:
|
|
192
|
+
file_paths = []
|
|
193
|
+
file_names = []
|
|
194
|
+
for comp in event.message_obj.message:
|
|
195
|
+
if isinstance(comp, File):
|
|
196
|
+
file_paths.append(await comp.get_file())
|
|
197
|
+
file_names.append(comp.name)
|
|
198
|
+
elif isinstance(comp, Reply) and comp.chain:
|
|
199
|
+
for reply_comp in comp.chain:
|
|
200
|
+
if isinstance(reply_comp, File):
|
|
201
|
+
file_paths.append(await reply_comp.get_file())
|
|
202
|
+
file_names.append(reply_comp.name)
|
|
203
|
+
if not file_paths:
|
|
204
|
+
return
|
|
205
|
+
if not req.prompt:
|
|
206
|
+
req.prompt = "总结一下文件里面讲了什么?"
|
|
207
|
+
if config.file_extract_prov == "moonshotai":
|
|
208
|
+
if not config.file_extract_msh_api_key:
|
|
209
|
+
logger.error("Moonshot AI API key for file extract is not set")
|
|
210
|
+
return
|
|
211
|
+
file_contents = await asyncio.gather(
|
|
212
|
+
*[
|
|
213
|
+
extract_file_moonshotai(
|
|
214
|
+
file_path,
|
|
215
|
+
config.file_extract_msh_api_key,
|
|
216
|
+
)
|
|
217
|
+
for file_path in file_paths
|
|
218
|
+
]
|
|
219
|
+
)
|
|
220
|
+
else:
|
|
221
|
+
logger.error("Unsupported file extract provider: %s", config.file_extract_prov)
|
|
222
|
+
return
|
|
223
|
+
|
|
224
|
+
for file_content, file_name in zip(file_contents, file_names):
|
|
225
|
+
req.contexts.append(
|
|
226
|
+
{
|
|
227
|
+
"role": "system",
|
|
228
|
+
"content": (
|
|
229
|
+
"File Extract Results of user uploaded files:\n"
|
|
230
|
+
f"{file_content}\nFile Name: {file_name or 'Unknown'}"
|
|
231
|
+
),
|
|
232
|
+
},
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def _apply_prompt_prefix(req: ProviderRequest, cfg: dict) -> None:
|
|
237
|
+
prefix = cfg.get("prompt_prefix")
|
|
238
|
+
if not prefix:
|
|
239
|
+
return
|
|
240
|
+
if "{{prompt}}" in prefix:
|
|
241
|
+
req.prompt = prefix.replace("{{prompt}}", req.prompt)
|
|
242
|
+
else:
|
|
243
|
+
req.prompt = f"{prefix}{req.prompt}"
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def _apply_local_env_tools(req: ProviderRequest) -> None:
|
|
247
|
+
if req.func_tool is None:
|
|
248
|
+
req.func_tool = ToolSet()
|
|
249
|
+
req.func_tool.add_tool(LOCAL_EXECUTE_SHELL_TOOL)
|
|
250
|
+
req.func_tool.add_tool(LOCAL_PYTHON_TOOL)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
async def _ensure_persona_and_skills(
|
|
254
|
+
req: ProviderRequest,
|
|
255
|
+
cfg: dict,
|
|
256
|
+
plugin_context: Context,
|
|
257
|
+
event: AstrMessageEvent,
|
|
258
|
+
) -> None:
|
|
259
|
+
"""Ensure persona and skills are applied to the request's system prompt or user prompt."""
|
|
260
|
+
if not req.conversation:
|
|
261
|
+
return
|
|
262
|
+
|
|
263
|
+
# get persona ID
|
|
264
|
+
|
|
265
|
+
# 1. from session service config - highest priority
|
|
266
|
+
persona_id = (
|
|
267
|
+
await sp.get_async(
|
|
268
|
+
scope="umo",
|
|
269
|
+
scope_id=event.unified_msg_origin,
|
|
270
|
+
key="session_service_config",
|
|
271
|
+
default={},
|
|
272
|
+
)
|
|
273
|
+
).get("persona_id")
|
|
274
|
+
|
|
275
|
+
if not persona_id:
|
|
276
|
+
# 2. from conversation setting - second priority
|
|
277
|
+
persona_id = req.conversation.persona_id
|
|
278
|
+
|
|
279
|
+
if persona_id == "[%None]":
|
|
280
|
+
# explicitly set to no persona
|
|
281
|
+
pass
|
|
282
|
+
elif persona_id is None:
|
|
283
|
+
# 3. from config default persona setting - last priority
|
|
284
|
+
persona_id = cfg.get("default_personality")
|
|
285
|
+
|
|
286
|
+
persona = next(
|
|
287
|
+
builtins.filter(
|
|
288
|
+
lambda persona: persona["name"] == persona_id,
|
|
289
|
+
plugin_context.persona_manager.personas_v3,
|
|
290
|
+
),
|
|
291
|
+
None,
|
|
292
|
+
)
|
|
293
|
+
if persona:
|
|
294
|
+
# Inject persona system prompt
|
|
295
|
+
if prompt := persona["prompt"]:
|
|
296
|
+
req.system_prompt += f"\n# Persona Instructions\n\n{prompt}\n"
|
|
297
|
+
if begin_dialogs := copy.deepcopy(persona.get("_begin_dialogs_processed")):
|
|
298
|
+
req.contexts[:0] = begin_dialogs
|
|
299
|
+
else:
|
|
300
|
+
# special handling for webchat persona
|
|
301
|
+
if event.get_platform_name() == "webchat" and persona_id != "[%None]":
|
|
302
|
+
persona_id = "_chatui_default_"
|
|
303
|
+
req.system_prompt += CHATUI_SPECIAL_DEFAULT_PERSONA_PROMPT
|
|
304
|
+
|
|
305
|
+
# Inject skills prompt
|
|
306
|
+
runtime = cfg.get("computer_use_runtime", "local")
|
|
307
|
+
skill_manager = SkillManager()
|
|
308
|
+
skills = skill_manager.list_skills(active_only=True, runtime=runtime)
|
|
309
|
+
|
|
310
|
+
if skills:
|
|
311
|
+
if persona and persona.get("skills") is not None:
|
|
312
|
+
if not persona["skills"]:
|
|
313
|
+
skills = []
|
|
314
|
+
else:
|
|
315
|
+
allowed = set(persona["skills"])
|
|
316
|
+
skills = [skill for skill in skills if skill.name in allowed]
|
|
317
|
+
if skills:
|
|
318
|
+
req.system_prompt += f"\n{build_skills_prompt(skills)}\n"
|
|
319
|
+
if runtime == "none":
|
|
320
|
+
req.system_prompt += (
|
|
321
|
+
"User has not enabled the Computer Use feature. "
|
|
322
|
+
"You cannot use shell or Python to perform skills. "
|
|
323
|
+
"If you need to use these capabilities, ask the user to enable Computer Use in the AstrBot WebUI -> Config."
|
|
324
|
+
)
|
|
325
|
+
tmgr = plugin_context.get_llm_tool_manager()
|
|
326
|
+
|
|
327
|
+
# sub agents integration
|
|
328
|
+
orch_cfg = plugin_context.get_config().get("subagent_orchestrator", {})
|
|
329
|
+
so = plugin_context.subagent_orchestrator
|
|
330
|
+
if orch_cfg.get("main_enable", False) and so:
|
|
331
|
+
remove_dup = bool(orch_cfg.get("remove_main_duplicate_tools", False))
|
|
332
|
+
|
|
333
|
+
assigned_tools: set[str] = set()
|
|
334
|
+
agents = orch_cfg.get("agents", [])
|
|
335
|
+
if isinstance(agents, list):
|
|
336
|
+
for a in agents:
|
|
337
|
+
if not isinstance(a, dict):
|
|
338
|
+
continue
|
|
339
|
+
if a.get("enabled", True) is False:
|
|
340
|
+
continue
|
|
341
|
+
persona_tools = None
|
|
342
|
+
pid = a.get("persona_id")
|
|
343
|
+
if pid:
|
|
344
|
+
persona_tools = next(
|
|
345
|
+
(
|
|
346
|
+
p.get("tools")
|
|
347
|
+
for p in plugin_context.persona_manager.personas_v3
|
|
348
|
+
if p["name"] == pid
|
|
349
|
+
),
|
|
350
|
+
None,
|
|
351
|
+
)
|
|
352
|
+
tools = a.get("tools", [])
|
|
353
|
+
if persona_tools is not None:
|
|
354
|
+
tools = persona_tools
|
|
355
|
+
if tools is None:
|
|
356
|
+
assigned_tools.update(
|
|
357
|
+
[
|
|
358
|
+
tool.name
|
|
359
|
+
for tool in tmgr.func_list
|
|
360
|
+
if not isinstance(tool, HandoffTool)
|
|
361
|
+
]
|
|
362
|
+
)
|
|
363
|
+
continue
|
|
364
|
+
if not isinstance(tools, list):
|
|
365
|
+
continue
|
|
366
|
+
for t in tools:
|
|
367
|
+
name = str(t).strip()
|
|
368
|
+
if name:
|
|
369
|
+
assigned_tools.add(name)
|
|
370
|
+
|
|
371
|
+
if req.func_tool is None:
|
|
372
|
+
toolset = ToolSet()
|
|
373
|
+
else:
|
|
374
|
+
toolset = req.func_tool
|
|
375
|
+
|
|
376
|
+
# add subagent handoff tools
|
|
377
|
+
for tool in so.handoffs:
|
|
378
|
+
toolset.add_tool(tool)
|
|
379
|
+
|
|
380
|
+
# check duplicates
|
|
381
|
+
if remove_dup:
|
|
382
|
+
names = toolset.names()
|
|
383
|
+
for tool_name in assigned_tools:
|
|
384
|
+
if tool_name in names:
|
|
385
|
+
toolset.remove_tool(tool_name)
|
|
386
|
+
|
|
387
|
+
req.func_tool = toolset
|
|
388
|
+
|
|
389
|
+
router_prompt = (
|
|
390
|
+
plugin_context.get_config()
|
|
391
|
+
.get("subagent_orchestrator", {})
|
|
392
|
+
.get("router_system_prompt", "")
|
|
393
|
+
).strip()
|
|
394
|
+
if router_prompt:
|
|
395
|
+
req.system_prompt += f"\n{router_prompt}\n"
|
|
396
|
+
return
|
|
397
|
+
|
|
398
|
+
# inject toolset in the persona
|
|
399
|
+
if (persona and persona.get("tools") is None) or not persona:
|
|
400
|
+
toolset = tmgr.get_full_tool_set()
|
|
401
|
+
for tool in list(toolset):
|
|
402
|
+
if not tool.active:
|
|
403
|
+
toolset.remove_tool(tool.name)
|
|
404
|
+
else:
|
|
405
|
+
toolset = ToolSet()
|
|
406
|
+
if persona["tools"]:
|
|
407
|
+
for tool_name in persona["tools"]:
|
|
408
|
+
tool = tmgr.get_func(tool_name)
|
|
409
|
+
if tool and tool.active:
|
|
410
|
+
toolset.add_tool(tool)
|
|
411
|
+
if not req.func_tool:
|
|
412
|
+
req.func_tool = toolset
|
|
413
|
+
else:
|
|
414
|
+
req.func_tool.merge(toolset)
|
|
415
|
+
try:
|
|
416
|
+
event.trace.record(
|
|
417
|
+
"sel_persona", persona_id=persona_id, persona_toolset=toolset.names()
|
|
418
|
+
)
|
|
419
|
+
except Exception:
|
|
420
|
+
pass
|
|
421
|
+
logger.debug("Tool set for persona %s: %s", persona_id, toolset.names())
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
async def _request_img_caption(
|
|
425
|
+
provider_id: str,
|
|
426
|
+
cfg: dict,
|
|
427
|
+
image_urls: list[str],
|
|
428
|
+
plugin_context: Context,
|
|
429
|
+
) -> str:
|
|
430
|
+
prov = plugin_context.get_provider_by_id(provider_id)
|
|
431
|
+
if prov is None:
|
|
432
|
+
raise ValueError(
|
|
433
|
+
f"Cannot get image caption because provider `{provider_id}` is not exist.",
|
|
434
|
+
)
|
|
435
|
+
if not isinstance(prov, Provider):
|
|
436
|
+
raise ValueError(
|
|
437
|
+
f"Cannot get image caption because provider `{provider_id}` is not a valid Provider, it is {type(prov)}.",
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
img_cap_prompt = cfg.get(
|
|
441
|
+
"image_caption_prompt",
|
|
442
|
+
"Please describe the image.",
|
|
443
|
+
)
|
|
444
|
+
logger.debug("Processing image caption with provider: %s", provider_id)
|
|
445
|
+
llm_resp = await prov.text_chat(
|
|
446
|
+
prompt=img_cap_prompt,
|
|
447
|
+
image_urls=image_urls,
|
|
448
|
+
)
|
|
449
|
+
return llm_resp.completion_text
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
async def _ensure_img_caption(
|
|
453
|
+
req: ProviderRequest,
|
|
454
|
+
cfg: dict,
|
|
455
|
+
plugin_context: Context,
|
|
456
|
+
image_caption_provider: str,
|
|
457
|
+
) -> None:
|
|
458
|
+
try:
|
|
459
|
+
caption = await _request_img_caption(
|
|
460
|
+
image_caption_provider,
|
|
461
|
+
cfg,
|
|
462
|
+
req.image_urls,
|
|
463
|
+
plugin_context,
|
|
464
|
+
)
|
|
465
|
+
if caption:
|
|
466
|
+
req.extra_user_content_parts.append(
|
|
467
|
+
TextPart(text=f"<image_caption>{caption}</image_caption>")
|
|
468
|
+
)
|
|
469
|
+
req.image_urls = []
|
|
470
|
+
except Exception as exc: # noqa: BLE001
|
|
471
|
+
logger.error("处理图片描述失败: %s", exc)
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
async def _process_quote_message(
|
|
475
|
+
event: AstrMessageEvent,
|
|
476
|
+
req: ProviderRequest,
|
|
477
|
+
img_cap_prov_id: str,
|
|
478
|
+
plugin_context: Context,
|
|
479
|
+
) -> None:
|
|
480
|
+
quote = None
|
|
481
|
+
for comp in event.message_obj.message:
|
|
482
|
+
if isinstance(comp, Reply):
|
|
483
|
+
quote = comp
|
|
484
|
+
break
|
|
485
|
+
if not quote:
|
|
486
|
+
return
|
|
487
|
+
|
|
488
|
+
content_parts = []
|
|
489
|
+
sender_info = f"({quote.sender_nickname}): " if quote.sender_nickname else ""
|
|
490
|
+
message_str = quote.message_str or "[Empty Text]"
|
|
491
|
+
content_parts.append(f"{sender_info}{message_str}")
|
|
492
|
+
|
|
493
|
+
image_seg = None
|
|
494
|
+
if quote.chain:
|
|
495
|
+
for comp in quote.chain:
|
|
496
|
+
if isinstance(comp, Image):
|
|
497
|
+
image_seg = comp
|
|
498
|
+
break
|
|
499
|
+
|
|
500
|
+
if image_seg:
|
|
501
|
+
try:
|
|
502
|
+
prov = None
|
|
503
|
+
if img_cap_prov_id:
|
|
504
|
+
prov = plugin_context.get_provider_by_id(img_cap_prov_id)
|
|
505
|
+
if prov is None:
|
|
506
|
+
prov = plugin_context.get_using_provider(event.unified_msg_origin)
|
|
507
|
+
|
|
508
|
+
if prov and isinstance(prov, Provider):
|
|
509
|
+
llm_resp = await prov.text_chat(
|
|
510
|
+
prompt="Please describe the image content.",
|
|
511
|
+
image_urls=[await image_seg.convert_to_file_path()],
|
|
512
|
+
)
|
|
513
|
+
if llm_resp.completion_text:
|
|
514
|
+
content_parts.append(
|
|
515
|
+
f"[Image Caption in quoted message]: {llm_resp.completion_text}"
|
|
516
|
+
)
|
|
517
|
+
else:
|
|
518
|
+
logger.warning("No provider found for image captioning in quote.")
|
|
519
|
+
except BaseException as exc:
|
|
520
|
+
logger.error("处理引用图片失败: %s", exc)
|
|
521
|
+
|
|
522
|
+
quoted_content = "\n".join(content_parts)
|
|
523
|
+
quoted_text = f"<Quoted Message>\n{quoted_content}\n</Quoted Message>"
|
|
524
|
+
req.extra_user_content_parts.append(TextPart(text=quoted_text))
|
|
525
|
+
|
|
526
|
+
|
|
527
|
+
def _append_system_reminders(
|
|
528
|
+
event: AstrMessageEvent,
|
|
529
|
+
req: ProviderRequest,
|
|
530
|
+
cfg: dict,
|
|
531
|
+
timezone: str | None,
|
|
532
|
+
) -> None:
|
|
533
|
+
system_parts: list[str] = []
|
|
534
|
+
if cfg.get("identifier"):
|
|
535
|
+
user_id = event.message_obj.sender.user_id
|
|
536
|
+
user_nickname = event.message_obj.sender.nickname
|
|
537
|
+
system_parts.append(f"User ID: {user_id}, Nickname: {user_nickname}")
|
|
538
|
+
|
|
539
|
+
if cfg.get("group_name_display") and event.message_obj.group_id:
|
|
540
|
+
if not event.message_obj.group:
|
|
541
|
+
logger.error(
|
|
542
|
+
"Group name display enabled but group object is None. Group ID: %s",
|
|
543
|
+
event.message_obj.group_id,
|
|
544
|
+
)
|
|
545
|
+
else:
|
|
546
|
+
group_name = event.message_obj.group.group_name
|
|
547
|
+
if group_name:
|
|
548
|
+
system_parts.append(f"Group name: {group_name}")
|
|
549
|
+
|
|
550
|
+
if cfg.get("datetime_system_prompt"):
|
|
551
|
+
current_time = None
|
|
552
|
+
if timezone:
|
|
553
|
+
try:
|
|
554
|
+
now = datetime.datetime.now(zoneinfo.ZoneInfo(timezone))
|
|
555
|
+
current_time = now.strftime("%Y-%m-%d %H:%M (%Z)")
|
|
556
|
+
except Exception as exc: # noqa: BLE001
|
|
557
|
+
logger.error("时区设置错误: %s, 使用本地时区", exc)
|
|
558
|
+
if not current_time:
|
|
559
|
+
current_time = (
|
|
560
|
+
datetime.datetime.now().astimezone().strftime("%Y-%m-%d %H:%M (%Z)")
|
|
561
|
+
)
|
|
562
|
+
system_parts.append(f"Current datetime: {current_time}")
|
|
563
|
+
|
|
564
|
+
if system_parts:
|
|
565
|
+
system_content = (
|
|
566
|
+
"<system_reminder>" + "\n".join(system_parts) + "</system_reminder>"
|
|
567
|
+
)
|
|
568
|
+
req.extra_user_content_parts.append(TextPart(text=system_content))
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
async def _decorate_llm_request(
|
|
572
|
+
event: AstrMessageEvent,
|
|
573
|
+
req: ProviderRequest,
|
|
574
|
+
plugin_context: Context,
|
|
575
|
+
config: MainAgentBuildConfig,
|
|
576
|
+
) -> None:
|
|
577
|
+
cfg = config.provider_settings or plugin_context.get_config(
|
|
578
|
+
umo=event.unified_msg_origin
|
|
579
|
+
).get("provider_settings", {})
|
|
580
|
+
|
|
581
|
+
_apply_prompt_prefix(req, cfg)
|
|
582
|
+
|
|
583
|
+
if req.conversation:
|
|
584
|
+
await _ensure_persona_and_skills(req, cfg, plugin_context, event)
|
|
585
|
+
|
|
586
|
+
img_cap_prov_id: str = cfg.get("default_image_caption_provider_id") or ""
|
|
587
|
+
if img_cap_prov_id and req.image_urls:
|
|
588
|
+
await _ensure_img_caption(
|
|
589
|
+
req,
|
|
590
|
+
cfg,
|
|
591
|
+
plugin_context,
|
|
592
|
+
img_cap_prov_id,
|
|
593
|
+
)
|
|
594
|
+
|
|
595
|
+
img_cap_prov_id = cfg.get("default_image_caption_provider_id") or ""
|
|
596
|
+
await _process_quote_message(
|
|
597
|
+
event,
|
|
598
|
+
req,
|
|
599
|
+
img_cap_prov_id,
|
|
600
|
+
plugin_context,
|
|
601
|
+
)
|
|
602
|
+
|
|
603
|
+
tz = config.timezone
|
|
604
|
+
if tz is None:
|
|
605
|
+
tz = plugin_context.get_config().get("timezone")
|
|
606
|
+
_append_system_reminders(event, req, cfg, tz)
|
|
607
|
+
|
|
608
|
+
|
|
609
|
+
def _modalities_fix(provider: Provider, req: ProviderRequest) -> None:
|
|
610
|
+
if req.image_urls:
|
|
611
|
+
provider_cfg = provider.provider_config.get("modalities", ["image"])
|
|
612
|
+
if "image" not in provider_cfg:
|
|
613
|
+
logger.debug(
|
|
614
|
+
"Provider %s does not support image, using placeholder.", provider
|
|
615
|
+
)
|
|
616
|
+
image_count = len(req.image_urls)
|
|
617
|
+
placeholder = " ".join(["[图片]"] * image_count)
|
|
618
|
+
if req.prompt:
|
|
619
|
+
req.prompt = f"{placeholder} {req.prompt}"
|
|
620
|
+
else:
|
|
621
|
+
req.prompt = placeholder
|
|
622
|
+
req.image_urls = []
|
|
623
|
+
if req.func_tool:
|
|
624
|
+
provider_cfg = provider.provider_config.get("modalities", ["tool_use"])
|
|
625
|
+
if "tool_use" not in provider_cfg:
|
|
626
|
+
logger.debug(
|
|
627
|
+
"Provider %s does not support tool_use, clearing tools.", provider
|
|
628
|
+
)
|
|
629
|
+
req.func_tool = None
|
|
630
|
+
|
|
631
|
+
|
|
632
|
+
def _sanitize_context_by_modalities(
|
|
633
|
+
config: MainAgentBuildConfig,
|
|
634
|
+
provider: Provider,
|
|
635
|
+
req: ProviderRequest,
|
|
636
|
+
) -> None:
|
|
637
|
+
if not config.sanitize_context_by_modalities:
|
|
638
|
+
return
|
|
639
|
+
if not isinstance(req.contexts, list) or not req.contexts:
|
|
640
|
+
return
|
|
641
|
+
modalities = provider.provider_config.get("modalities", None)
|
|
642
|
+
if not modalities or not isinstance(modalities, list):
|
|
643
|
+
return
|
|
644
|
+
supports_image = bool("image" in modalities)
|
|
645
|
+
supports_tool_use = bool("tool_use" in modalities)
|
|
646
|
+
if supports_image and supports_tool_use:
|
|
647
|
+
return
|
|
648
|
+
|
|
649
|
+
sanitized_contexts: list[dict] = []
|
|
650
|
+
removed_image_blocks = 0
|
|
651
|
+
removed_tool_messages = 0
|
|
652
|
+
removed_tool_calls = 0
|
|
653
|
+
|
|
654
|
+
for msg in req.contexts:
|
|
655
|
+
if not isinstance(msg, dict):
|
|
656
|
+
continue
|
|
657
|
+
role = msg.get("role")
|
|
658
|
+
if not role:
|
|
659
|
+
continue
|
|
660
|
+
|
|
661
|
+
new_msg = msg
|
|
662
|
+
if not supports_tool_use:
|
|
663
|
+
if role == "tool":
|
|
664
|
+
removed_tool_messages += 1
|
|
665
|
+
continue
|
|
666
|
+
if role == "assistant" and "tool_calls" in new_msg:
|
|
667
|
+
if "tool_calls" in new_msg:
|
|
668
|
+
removed_tool_calls += 1
|
|
669
|
+
new_msg.pop("tool_calls", None)
|
|
670
|
+
new_msg.pop("tool_call_id", None)
|
|
671
|
+
|
|
672
|
+
if not supports_image:
|
|
673
|
+
content = new_msg.get("content")
|
|
674
|
+
if isinstance(content, list):
|
|
675
|
+
filtered_parts: list = []
|
|
676
|
+
removed_any_image = False
|
|
677
|
+
for part in content:
|
|
678
|
+
if isinstance(part, dict):
|
|
679
|
+
part_type = str(part.get("type", "")).lower()
|
|
680
|
+
if part_type in {"image_url", "image"}:
|
|
681
|
+
removed_any_image = True
|
|
682
|
+
removed_image_blocks += 1
|
|
683
|
+
continue
|
|
684
|
+
filtered_parts.append(part)
|
|
685
|
+
if removed_any_image:
|
|
686
|
+
new_msg["content"] = filtered_parts
|
|
687
|
+
|
|
688
|
+
if role == "assistant":
|
|
689
|
+
content = new_msg.get("content")
|
|
690
|
+
has_tool_calls = bool(new_msg.get("tool_calls"))
|
|
691
|
+
if not has_tool_calls:
|
|
692
|
+
if not content:
|
|
693
|
+
continue
|
|
694
|
+
if isinstance(content, str) and not content.strip():
|
|
695
|
+
continue
|
|
696
|
+
|
|
697
|
+
sanitized_contexts.append(new_msg)
|
|
698
|
+
|
|
699
|
+
if removed_image_blocks or removed_tool_messages or removed_tool_calls:
|
|
700
|
+
logger.debug(
|
|
701
|
+
"sanitize_context_by_modalities applied: "
|
|
702
|
+
"removed_image_blocks=%s, removed_tool_messages=%s, removed_tool_calls=%s",
|
|
703
|
+
removed_image_blocks,
|
|
704
|
+
removed_tool_messages,
|
|
705
|
+
removed_tool_calls,
|
|
706
|
+
)
|
|
707
|
+
req.contexts = sanitized_contexts
|
|
708
|
+
|
|
709
|
+
|
|
710
|
+
def _plugin_tool_fix(event: AstrMessageEvent, req: ProviderRequest) -> None:
|
|
711
|
+
"""根据事件中的插件设置,过滤请求中的工具列表。
|
|
712
|
+
|
|
713
|
+
注意:没有 handler_module_path 的工具(如 MCP 工具)会被保留,
|
|
714
|
+
因为它们不属于任何插件,不应被插件过滤逻辑影响。
|
|
715
|
+
"""
|
|
716
|
+
if event.plugins_name is not None and req.func_tool:
|
|
717
|
+
new_tool_set = ToolSet()
|
|
718
|
+
for tool in req.func_tool.tools:
|
|
719
|
+
if isinstance(tool, MCPTool):
|
|
720
|
+
# 保留 MCP 工具
|
|
721
|
+
new_tool_set.add_tool(tool)
|
|
722
|
+
continue
|
|
723
|
+
mp = tool.handler_module_path
|
|
724
|
+
if not mp:
|
|
725
|
+
continue
|
|
726
|
+
plugin = star_map.get(mp)
|
|
727
|
+
if not plugin:
|
|
728
|
+
continue
|
|
729
|
+
if plugin.name in event.plugins_name or plugin.reserved:
|
|
730
|
+
new_tool_set.add_tool(tool)
|
|
731
|
+
req.func_tool = new_tool_set
|
|
732
|
+
|
|
733
|
+
|
|
734
|
+
async def _handle_webchat(
|
|
735
|
+
event: AstrMessageEvent, req: ProviderRequest, prov: Provider
|
|
736
|
+
) -> None:
|
|
737
|
+
from astrbot.core import db_helper
|
|
738
|
+
|
|
739
|
+
chatui_session_id = event.session_id.split("!")[-1]
|
|
740
|
+
user_prompt = req.prompt
|
|
741
|
+
session = await db_helper.get_platform_session_by_id(chatui_session_id)
|
|
742
|
+
|
|
743
|
+
if not user_prompt or not chatui_session_id or not session or session.display_name:
|
|
744
|
+
return
|
|
745
|
+
|
|
746
|
+
llm_resp = await prov.text_chat(
|
|
747
|
+
system_prompt=(
|
|
748
|
+
"You are a conversation title generator. "
|
|
749
|
+
"Generate a concise title in the same language as the user’s input, "
|
|
750
|
+
"no more than 10 words, capturing only the core topic."
|
|
751
|
+
"If the input is a greeting, small talk, or has no clear topic, "
|
|
752
|
+
"(e.g., “hi”, “hello”, “haha”), return <None>. "
|
|
753
|
+
"Output only the title itself or <None>, with no explanations."
|
|
754
|
+
),
|
|
755
|
+
prompt=f"Generate a concise title for the following user query:\n{user_prompt}",
|
|
756
|
+
)
|
|
757
|
+
if llm_resp and llm_resp.completion_text:
|
|
758
|
+
title = llm_resp.completion_text.strip()
|
|
759
|
+
if not title or "<None>" in title:
|
|
760
|
+
return
|
|
761
|
+
logger.info(
|
|
762
|
+
"Generated chatui title for session %s: %s", chatui_session_id, title
|
|
763
|
+
)
|
|
764
|
+
await db_helper.update_platform_session(
|
|
765
|
+
session_id=chatui_session_id,
|
|
766
|
+
display_name=title,
|
|
767
|
+
)
|
|
768
|
+
|
|
769
|
+
|
|
770
|
+
def _apply_llm_safety_mode(config: MainAgentBuildConfig, req: ProviderRequest) -> None:
|
|
771
|
+
if config.safety_mode_strategy == "system_prompt":
|
|
772
|
+
req.system_prompt = (
|
|
773
|
+
f"{LLM_SAFETY_MODE_SYSTEM_PROMPT}\n\n{req.system_prompt or ''}"
|
|
774
|
+
)
|
|
775
|
+
else:
|
|
776
|
+
logger.warning(
|
|
777
|
+
"Unsupported llm_safety_mode strategy: %s.",
|
|
778
|
+
config.safety_mode_strategy,
|
|
779
|
+
)
|
|
780
|
+
|
|
781
|
+
|
|
782
|
+
def _apply_sandbox_tools(
|
|
783
|
+
config: MainAgentBuildConfig, req: ProviderRequest, session_id: str
|
|
784
|
+
) -> None:
|
|
785
|
+
if req.func_tool is None:
|
|
786
|
+
req.func_tool = ToolSet()
|
|
787
|
+
if config.sandbox_cfg.get("booter") == "shipyard":
|
|
788
|
+
ep = config.sandbox_cfg.get("shipyard_endpoint", "")
|
|
789
|
+
at = config.sandbox_cfg.get("shipyard_access_token", "")
|
|
790
|
+
if not ep or not at:
|
|
791
|
+
logger.error("Shipyard sandbox configuration is incomplete.")
|
|
792
|
+
return
|
|
793
|
+
os.environ["SHIPYARD_ENDPOINT"] = ep
|
|
794
|
+
os.environ["SHIPYARD_ACCESS_TOKEN"] = at
|
|
795
|
+
req.func_tool.add_tool(EXECUTE_SHELL_TOOL)
|
|
796
|
+
req.func_tool.add_tool(PYTHON_TOOL)
|
|
797
|
+
req.func_tool.add_tool(FILE_UPLOAD_TOOL)
|
|
798
|
+
req.func_tool.add_tool(FILE_DOWNLOAD_TOOL)
|
|
799
|
+
req.system_prompt += f"\n{SANDBOX_MODE_PROMPT}\n"
|
|
800
|
+
|
|
801
|
+
|
|
802
|
+
def _proactive_cron_job_tools(req: ProviderRequest) -> None:
|
|
803
|
+
if req.func_tool is None:
|
|
804
|
+
req.func_tool = ToolSet()
|
|
805
|
+
req.func_tool.add_tool(CREATE_CRON_JOB_TOOL)
|
|
806
|
+
req.func_tool.add_tool(DELETE_CRON_JOB_TOOL)
|
|
807
|
+
req.func_tool.add_tool(LIST_CRON_JOBS_TOOL)
|
|
808
|
+
|
|
809
|
+
|
|
810
|
+
def _get_compress_provider(
|
|
811
|
+
config: MainAgentBuildConfig, plugin_context: Context
|
|
812
|
+
) -> Provider | None:
|
|
813
|
+
if not config.llm_compress_provider_id:
|
|
814
|
+
return None
|
|
815
|
+
if config.context_limit_reached_strategy != "llm_compress":
|
|
816
|
+
return None
|
|
817
|
+
provider = plugin_context.get_provider_by_id(config.llm_compress_provider_id)
|
|
818
|
+
if provider is None:
|
|
819
|
+
logger.warning(
|
|
820
|
+
"未找到指定的上下文压缩模型 %s,将跳过压缩。",
|
|
821
|
+
config.llm_compress_provider_id,
|
|
822
|
+
)
|
|
823
|
+
return None
|
|
824
|
+
if not isinstance(provider, Provider):
|
|
825
|
+
logger.warning(
|
|
826
|
+
"指定的上下文压缩模型 %s 不是对话模型,将跳过压缩。",
|
|
827
|
+
config.llm_compress_provider_id,
|
|
828
|
+
)
|
|
829
|
+
return None
|
|
830
|
+
return provider
|
|
831
|
+
|
|
832
|
+
|
|
833
|
+
async def build_main_agent(
|
|
834
|
+
*,
|
|
835
|
+
event: AstrMessageEvent,
|
|
836
|
+
plugin_context: Context,
|
|
837
|
+
config: MainAgentBuildConfig,
|
|
838
|
+
provider: Provider | None = None,
|
|
839
|
+
req: ProviderRequest | None = None,
|
|
840
|
+
) -> MainAgentBuildResult | None:
|
|
841
|
+
"""构建主对话代理(Main Agent),并且自动 reset。"""
|
|
842
|
+
provider = provider or _select_provider(event, plugin_context)
|
|
843
|
+
if provider is None:
|
|
844
|
+
logger.info("未找到任何对话模型(提供商),跳过 LLM 请求处理。")
|
|
845
|
+
return None
|
|
846
|
+
|
|
847
|
+
if req is None:
|
|
848
|
+
if event.get_extra("provider_request"):
|
|
849
|
+
req = event.get_extra("provider_request")
|
|
850
|
+
assert isinstance(req, ProviderRequest), (
|
|
851
|
+
"provider_request 必须是 ProviderRequest 类型。"
|
|
852
|
+
)
|
|
853
|
+
if req.conversation:
|
|
854
|
+
req.contexts = json.loads(req.conversation.history)
|
|
855
|
+
else:
|
|
856
|
+
req = ProviderRequest()
|
|
857
|
+
req.prompt = ""
|
|
858
|
+
req.image_urls = []
|
|
859
|
+
if sel_model := event.get_extra("selected_model"):
|
|
860
|
+
req.model = sel_model
|
|
861
|
+
if config.provider_wake_prefix and not event.message_str.startswith(
|
|
862
|
+
config.provider_wake_prefix
|
|
863
|
+
):
|
|
864
|
+
return None
|
|
865
|
+
|
|
866
|
+
req.prompt = event.message_str[len(config.provider_wake_prefix) :]
|
|
867
|
+
for comp in event.message_obj.message:
|
|
868
|
+
if isinstance(comp, Image):
|
|
869
|
+
image_path = await comp.convert_to_file_path()
|
|
870
|
+
req.image_urls.append(image_path)
|
|
871
|
+
req.extra_user_content_parts.append(
|
|
872
|
+
TextPart(text=f"[Image Attachment: path {image_path}]")
|
|
873
|
+
)
|
|
874
|
+
elif isinstance(comp, File):
|
|
875
|
+
file_path = await comp.get_file()
|
|
876
|
+
file_name = comp.name or os.path.basename(file_path)
|
|
877
|
+
req.extra_user_content_parts.append(
|
|
878
|
+
TextPart(
|
|
879
|
+
text=f"[File Attachment: name {file_name}, path {file_path}]"
|
|
880
|
+
)
|
|
881
|
+
)
|
|
882
|
+
|
|
883
|
+
conversation = await _get_session_conv(event, plugin_context)
|
|
884
|
+
req.conversation = conversation
|
|
885
|
+
req.contexts = json.loads(conversation.history)
|
|
886
|
+
event.set_extra("provider_request", req)
|
|
887
|
+
|
|
888
|
+
if isinstance(req.contexts, str):
|
|
889
|
+
req.contexts = json.loads(req.contexts)
|
|
890
|
+
|
|
891
|
+
if config.file_extract_enabled:
|
|
892
|
+
try:
|
|
893
|
+
await _apply_file_extract(event, req, config)
|
|
894
|
+
except Exception as exc: # noqa: BLE001
|
|
895
|
+
logger.error("Error occurred while applying file extract: %s", exc)
|
|
896
|
+
|
|
897
|
+
if not req.prompt and not req.image_urls:
|
|
898
|
+
if not event.get_group_id() and req.extra_user_content_parts:
|
|
899
|
+
req.prompt = "<attachment>"
|
|
900
|
+
else:
|
|
901
|
+
return None
|
|
902
|
+
|
|
903
|
+
await _decorate_llm_request(event, req, plugin_context, config)
|
|
904
|
+
|
|
905
|
+
await _apply_kb(event, req, plugin_context, config)
|
|
906
|
+
|
|
907
|
+
if not req.session_id:
|
|
908
|
+
req.session_id = event.unified_msg_origin
|
|
909
|
+
|
|
910
|
+
_modalities_fix(provider, req)
|
|
911
|
+
_plugin_tool_fix(event, req)
|
|
912
|
+
_sanitize_context_by_modalities(config, provider, req)
|
|
913
|
+
|
|
914
|
+
if config.llm_safety_mode:
|
|
915
|
+
_apply_llm_safety_mode(config, req)
|
|
916
|
+
|
|
917
|
+
if config.computer_use_runtime == "sandbox":
|
|
918
|
+
_apply_sandbox_tools(config, req, req.session_id)
|
|
919
|
+
elif config.computer_use_runtime == "local":
|
|
920
|
+
_apply_local_env_tools(req)
|
|
921
|
+
|
|
922
|
+
agent_runner = AgentRunner()
|
|
923
|
+
astr_agent_ctx = AstrAgentContext(
|
|
924
|
+
context=plugin_context,
|
|
925
|
+
event=event,
|
|
926
|
+
)
|
|
927
|
+
|
|
928
|
+
if config.add_cron_tools:
|
|
929
|
+
_proactive_cron_job_tools(req)
|
|
930
|
+
|
|
931
|
+
if event.platform_meta.support_proactive_message:
|
|
932
|
+
if req.func_tool is None:
|
|
933
|
+
req.func_tool = ToolSet()
|
|
934
|
+
req.func_tool.add_tool(SEND_MESSAGE_TO_USER_TOOL)
|
|
935
|
+
|
|
936
|
+
if provider.provider_config.get("max_context_tokens", 0) <= 0:
|
|
937
|
+
model = provider.get_model()
|
|
938
|
+
if model_info := LLM_METADATAS.get(model):
|
|
939
|
+
provider.provider_config["max_context_tokens"] = model_info["limit"][
|
|
940
|
+
"context"
|
|
941
|
+
]
|
|
942
|
+
|
|
943
|
+
if event.get_platform_name() == "webchat":
|
|
944
|
+
asyncio.create_task(_handle_webchat(event, req, provider))
|
|
945
|
+
|
|
946
|
+
if req.func_tool and req.func_tool.tools:
|
|
947
|
+
tool_prompt = (
|
|
948
|
+
TOOL_CALL_PROMPT
|
|
949
|
+
if config.tool_schema_mode == "full"
|
|
950
|
+
else TOOL_CALL_PROMPT_SKILLS_LIKE_MODE
|
|
951
|
+
)
|
|
952
|
+
req.system_prompt += f"\n{tool_prompt}\n"
|
|
953
|
+
|
|
954
|
+
action_type = event.get_extra("action_type")
|
|
955
|
+
if action_type == "live":
|
|
956
|
+
req.system_prompt += f"\n{LIVE_MODE_SYSTEM_PROMPT}\n"
|
|
957
|
+
|
|
958
|
+
await agent_runner.reset(
|
|
959
|
+
provider=provider,
|
|
960
|
+
request=req,
|
|
961
|
+
run_context=AgentContextWrapper(
|
|
962
|
+
context=astr_agent_ctx,
|
|
963
|
+
tool_call_timeout=config.tool_call_timeout,
|
|
964
|
+
),
|
|
965
|
+
tool_executor=FunctionToolExecutor(),
|
|
966
|
+
agent_hooks=MAIN_AGENT_HOOKS,
|
|
967
|
+
streaming=config.streaming_response,
|
|
968
|
+
llm_compress_instruction=config.llm_compress_instruction,
|
|
969
|
+
llm_compress_keep_recent=config.llm_compress_keep_recent,
|
|
970
|
+
llm_compress_provider=_get_compress_provider(config, plugin_context),
|
|
971
|
+
truncate_turns=config.dequeue_context_length,
|
|
972
|
+
enforce_max_turns=config.max_context_length,
|
|
973
|
+
tool_schema_mode=config.tool_schema_mode,
|
|
974
|
+
)
|
|
975
|
+
|
|
976
|
+
return MainAgentBuildResult(
|
|
977
|
+
agent_runner=agent_runner,
|
|
978
|
+
provider_request=req,
|
|
979
|
+
provider=provider,
|
|
980
|
+
)
|