AstrBot 4.13.2__py3-none-any.whl → 4.14.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. astrbot/builtin_stars/astrbot/main.py +0 -6
  2. astrbot/builtin_stars/session_controller/main.py +1 -2
  3. astrbot/cli/__init__.py +1 -1
  4. astrbot/core/agent/agent.py +2 -1
  5. astrbot/core/agent/handoff.py +14 -1
  6. astrbot/core/agent/runners/tool_loop_agent_runner.py +14 -1
  7. astrbot/core/agent/tool.py +5 -0
  8. astrbot/core/astr_agent_run_util.py +21 -3
  9. astrbot/core/astr_agent_tool_exec.py +178 -3
  10. astrbot/core/astr_main_agent.py +980 -0
  11. astrbot/core/astr_main_agent_resources.py +453 -0
  12. astrbot/core/computer/computer_client.py +10 -1
  13. astrbot/core/computer/tools/fs.py +22 -14
  14. astrbot/core/config/default.py +84 -58
  15. astrbot/core/core_lifecycle.py +43 -1
  16. astrbot/core/cron/__init__.py +3 -0
  17. astrbot/core/cron/events.py +67 -0
  18. astrbot/core/cron/manager.py +376 -0
  19. astrbot/core/db/__init__.py +60 -0
  20. astrbot/core/db/po.py +31 -0
  21. astrbot/core/db/sqlite.py +120 -0
  22. astrbot/core/event_bus.py +0 -1
  23. astrbot/core/message/message_event_result.py +21 -3
  24. astrbot/core/pipeline/process_stage/method/agent_sub_stages/internal.py +111 -580
  25. astrbot/core/pipeline/scheduler.py +0 -2
  26. astrbot/core/platform/astr_message_event.py +5 -5
  27. astrbot/core/platform/platform.py +9 -0
  28. astrbot/core/platform/platform_metadata.py +2 -0
  29. astrbot/core/platform/sources/dingtalk/dingtalk_adapter.py +1 -0
  30. astrbot/core/platform/sources/qqofficial/qqofficial_platform_adapter.py +1 -0
  31. astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_adapter.py +1 -0
  32. astrbot/core/platform/sources/webchat/webchat_adapter.py +1 -0
  33. astrbot/core/platform/sources/wecom/wecom_adapter.py +1 -0
  34. astrbot/core/platform/sources/wecom_ai_bot/wecomai_adapter.py +1 -0
  35. astrbot/core/platform/sources/weixin_official_account/weixin_offacc_adapter.py +1 -0
  36. astrbot/core/provider/entities.py +1 -1
  37. astrbot/core/skills/skill_manager.py +9 -8
  38. astrbot/core/star/context.py +8 -0
  39. astrbot/core/star/filter/custom_filter.py +3 -3
  40. astrbot/core/star/register/star_handler.py +1 -1
  41. astrbot/core/subagent_orchestrator.py +96 -0
  42. astrbot/core/tools/cron_tools.py +174 -0
  43. astrbot/core/utils/history_saver.py +31 -0
  44. astrbot/core/utils/trace.py +4 -0
  45. astrbot/dashboard/routes/__init__.py +4 -0
  46. astrbot/dashboard/routes/cron.py +174 -0
  47. astrbot/dashboard/routes/log.py +36 -0
  48. astrbot/dashboard/routes/plugin.py +11 -0
  49. astrbot/dashboard/routes/skills.py +12 -37
  50. astrbot/dashboard/routes/subagent.py +117 -0
  51. astrbot/dashboard/routes/tools.py +41 -14
  52. astrbot/dashboard/server.py +3 -0
  53. {astrbot-4.13.2.dist-info → astrbot-4.14.1.dist-info}/METADATA +21 -2
  54. {astrbot-4.13.2.dist-info → astrbot-4.14.1.dist-info}/RECORD +57 -51
  55. astrbot/builtin_stars/astrbot/process_llm_request.py +0 -308
  56. astrbot/builtin_stars/reminder/main.py +0 -266
  57. astrbot/builtin_stars/reminder/metadata.yaml +0 -4
  58. astrbot/core/pipeline/process_stage/utils.py +0 -219
  59. {astrbot-4.13.2.dist-info → astrbot-4.14.1.dist-info}/WHEEL +0 -0
  60. {astrbot-4.13.2.dist-info → astrbot-4.14.1.dist-info}/entry_points.txt +0 -0
  61. {astrbot-4.13.2.dist-info → astrbot-4.14.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,55 +1,36 @@
1
1
  """本地 Agent 模式的 LLM 调用 Stage"""
2
2
 
3
3
  import asyncio
4
- import json
5
- import os
4
+ import base64
6
5
  from collections.abc import AsyncGenerator
6
+ from dataclasses import replace
7
7
 
8
8
  from astrbot.core import logger
9
- from astrbot.core.agent.message import Message, TextPart
9
+ from astrbot.core.agent.message import Message
10
10
  from astrbot.core.agent.response import AgentStats
11
- from astrbot.core.agent.tool import ToolSet
12
- from astrbot.core.astr_agent_context import AstrAgentContext
13
- from astrbot.core.conversation_mgr import Conversation
14
- from astrbot.core.message.components import File, Image, Reply
11
+ from astrbot.core.astr_main_agent import (
12
+ MainAgentBuildConfig,
13
+ MainAgentBuildResult,
14
+ build_main_agent,
15
+ )
16
+ from astrbot.core.message.components import File, Image
15
17
  from astrbot.core.message.message_event_result import (
16
18
  MessageChain,
17
19
  MessageEventResult,
18
20
  ResultContentType,
19
21
  )
20
22
  from astrbot.core.platform.astr_message_event import AstrMessageEvent
21
- from astrbot.core.provider import Provider
22
23
  from astrbot.core.provider.entities import (
23
24
  LLMResponse,
24
25
  ProviderRequest,
25
26
  )
26
- from astrbot.core.star.star_handler import EventType, star_map
27
- from astrbot.core.utils.file_extract import extract_file_moonshotai
28
- from astrbot.core.utils.llm_metadata import LLM_METADATAS
27
+ from astrbot.core.star.star_handler import EventType
29
28
  from astrbot.core.utils.metrics import Metric
30
29
  from astrbot.core.utils.session_lock import session_lock_manager
31
30
 
32
- from .....astr_agent_context import AgentContextWrapper
33
- from .....astr_agent_hooks import MAIN_AGENT_HOOKS
34
- from .....astr_agent_run_util import AgentRunner, run_agent, run_live_agent
35
- from .....astr_agent_tool_exec import FunctionToolExecutor
31
+ from .....astr_agent_run_util import run_agent, run_live_agent
36
32
  from ....context import PipelineContext, call_event_hook
37
33
  from ...stage import Stage
38
- from ...utils import (
39
- CHATUI_EXTRA_PROMPT,
40
- EXECUTE_SHELL_TOOL,
41
- FILE_DOWNLOAD_TOOL,
42
- FILE_UPLOAD_TOOL,
43
- KNOWLEDGE_BASE_QUERY_TOOL,
44
- LIVE_MODE_SYSTEM_PROMPT,
45
- LLM_SAFETY_MODE_SYSTEM_PROMPT,
46
- PYTHON_TOOL,
47
- SANDBOX_MODE_PROMPT,
48
- TOOL_CALL_PROMPT,
49
- TOOL_CALL_PROMPT_SKILLS_LIKE_MODE,
50
- decoded_blocked,
51
- retrieve_knowledge_base,
52
- )
53
34
 
54
35
 
55
36
  class InternalAgentSubStage(Stage):
@@ -111,419 +92,49 @@ class InternalAgentSubStage(Stage):
111
92
  "safety_mode_strategy", "system_prompt"
112
93
  )
113
94
 
95
+ self.computer_use_runtime = settings.get("computer_use_runtime")
114
96
  self.sandbox_cfg = settings.get("sandbox", {})
115
97
 
116
- self.conv_manager = ctx.plugin_manager.context.conversation_manager
98
+ # Proactive capability configuration
99
+ proactive_cfg = settings.get("proactive_capability", {})
100
+ self.add_cron_tools = proactive_cfg.get("add_cron_tools", True)
117
101
 
118
- def _select_provider(self, event: AstrMessageEvent):
119
- """选择使用的 LLM 提供商"""
120
- sel_provider = event.get_extra("selected_provider")
121
- _ctx = self.ctx.plugin_manager.context
122
- if sel_provider and isinstance(sel_provider, str):
123
- provider = _ctx.get_provider_by_id(sel_provider)
124
- if not provider:
125
- logger.error(f"未找到指定的提供商: {sel_provider}。")
126
- return provider
127
- try:
128
- prov = _ctx.get_using_provider(umo=event.unified_msg_origin)
129
- except ValueError as e:
130
- logger.error(f"Error occurred while selecting provider: {e}")
131
- return None
132
- return prov
133
-
134
- async def _get_session_conv(self, event: AstrMessageEvent) -> Conversation:
135
- umo = event.unified_msg_origin
136
- conv_mgr = self.conv_manager
137
-
138
- # 获取对话上下文
139
- cid = await conv_mgr.get_curr_conversation_id(umo)
140
- if not cid:
141
- cid = await conv_mgr.new_conversation(umo, event.get_platform_id())
142
- conversation = await conv_mgr.get_conversation(umo, cid)
143
- if not conversation:
144
- cid = await conv_mgr.new_conversation(umo, event.get_platform_id())
145
- conversation = await conv_mgr.get_conversation(umo, cid)
146
- if not conversation:
147
- raise RuntimeError("无法创建新的对话。")
148
- return conversation
149
-
150
- async def _apply_kb(
151
- self,
152
- event: AstrMessageEvent,
153
- req: ProviderRequest,
154
- ):
155
- """Apply knowledge base context to the provider request"""
156
- if not self.kb_agentic_mode:
157
- if req.prompt is None:
158
- return
159
- try:
160
- kb_result = await retrieve_knowledge_base(
161
- query=req.prompt,
162
- umo=event.unified_msg_origin,
163
- context=self.ctx.plugin_manager.context,
164
- )
165
- if not kb_result:
166
- return
167
- if req.system_prompt is not None:
168
- req.system_prompt += (
169
- f"\n\n[Related Knowledge Base Results]:\n{kb_result}"
170
- )
171
- except Exception as e:
172
- logger.error(f"Error occurred while retrieving knowledge base: {e}")
173
- else:
174
- if req.func_tool is None:
175
- req.func_tool = ToolSet()
176
- req.func_tool.add_tool(KNOWLEDGE_BASE_QUERY_TOOL)
177
-
178
- async def _apply_file_extract(
179
- self,
180
- event: AstrMessageEvent,
181
- req: ProviderRequest,
182
- ):
183
- """Apply file extract to the provider request"""
184
- file_paths = []
185
- file_names = []
186
- for comp in event.message_obj.message:
187
- if isinstance(comp, File):
188
- file_paths.append(await comp.get_file())
189
- file_names.append(comp.name)
190
- elif isinstance(comp, Reply) and comp.chain:
191
- for reply_comp in comp.chain:
192
- if isinstance(reply_comp, File):
193
- file_paths.append(await reply_comp.get_file())
194
- file_names.append(reply_comp.name)
195
- if not file_paths:
196
- return
197
- if not req.prompt:
198
- req.prompt = "总结一下文件里面讲了什么?"
199
- if self.file_extract_prov == "moonshotai":
200
- if not self.file_extract_msh_api_key:
201
- logger.error("Moonshot AI API key for file extract is not set")
202
- return
203
- file_contents = await asyncio.gather(
204
- *[
205
- extract_file_moonshotai(file_path, self.file_extract_msh_api_key)
206
- for file_path in file_paths
207
- ]
208
- )
209
- else:
210
- logger.error(f"Unsupported file extract provider: {self.file_extract_prov}")
211
- return
212
-
213
- # add file extract results to contexts
214
- for file_content, file_name in zip(file_contents, file_names):
215
- req.contexts.append(
216
- {
217
- "role": "system",
218
- "content": f"File Extract Results of user uploaded files:\n{file_content}\nFile Name: {file_name or 'Unknown'}",
219
- },
220
- )
221
-
222
- def _modalities_fix(
223
- self,
224
- provider: Provider,
225
- req: ProviderRequest,
226
- ):
227
- """检查提供商的模态能力,清理请求中的不支持内容"""
228
- if req.image_urls:
229
- provider_cfg = provider.provider_config.get("modalities", ["image"])
230
- if "image" not in provider_cfg:
231
- logger.debug(
232
- f"用户设置提供商 {provider} 不支持图像,将图像替换为占位符。"
233
- )
234
- # 为每个图片添加占位符到 prompt
235
- image_count = len(req.image_urls)
236
- placeholder = " ".join(["[图片]"] * image_count)
237
- if req.prompt:
238
- req.prompt = f"{placeholder} {req.prompt}"
239
- else:
240
- req.prompt = placeholder
241
- req.image_urls = []
242
- if req.func_tool:
243
- provider_cfg = provider.provider_config.get("modalities", ["tool_use"])
244
- # 如果模型不支持工具使用,但请求中包含工具列表,则清空。
245
- if "tool_use" not in provider_cfg:
246
- logger.debug(
247
- f"用户设置提供商 {provider} 不支持工具使用,清空工具列表。",
248
- )
249
- req.func_tool = None
250
-
251
- def _sanitize_context_by_modalities(
252
- self,
253
- provider: Provider,
254
- req: ProviderRequest,
255
- ) -> None:
256
- """Sanitize `req.contexts` (including history) by current provider modalities."""
257
- if not self.sanitize_context_by_modalities:
258
- return
259
-
260
- if not isinstance(req.contexts, list) or not req.contexts:
261
- return
262
-
263
- modalities = provider.provider_config.get("modalities", None)
264
- # if modalities is not configured, do not sanitize.
265
- if not modalities or not isinstance(modalities, list):
266
- return
267
-
268
- supports_image = bool("image" in modalities)
269
- supports_tool_use = bool("tool_use" in modalities)
270
-
271
- if supports_image and supports_tool_use:
272
- return
273
-
274
- sanitized_contexts: list[dict] = []
275
- removed_image_blocks = 0
276
- removed_tool_messages = 0
277
- removed_tool_calls = 0
278
-
279
- for msg in req.contexts:
280
- if not isinstance(msg, dict):
281
- continue
282
-
283
- role = msg.get("role")
284
- if not role:
285
- continue
286
-
287
- new_msg: dict = msg
288
-
289
- # tool_use sanitize
290
- if not supports_tool_use:
291
- if role == "tool":
292
- # tool response block
293
- removed_tool_messages += 1
294
- continue
295
- if role == "assistant" and "tool_calls" in new_msg:
296
- # assistant message with tool calls
297
- if "tool_calls" in new_msg:
298
- removed_tool_calls += 1
299
- new_msg.pop("tool_calls", None)
300
- new_msg.pop("tool_call_id", None)
301
-
302
- # image sanitize
303
- if not supports_image:
304
- content = new_msg.get("content")
305
- if isinstance(content, list):
306
- filtered_parts: list = []
307
- removed_any_image = False
308
- for part in content:
309
- if isinstance(part, dict):
310
- part_type = str(part.get("type", "")).lower()
311
- if part_type in {"image_url", "image"}:
312
- removed_any_image = True
313
- removed_image_blocks += 1
314
- continue
315
- filtered_parts.append(part)
316
-
317
- if removed_any_image:
318
- new_msg["content"] = filtered_parts
319
-
320
- # drop empty assistant messages (e.g. only tool_calls without content)
321
- if role == "assistant":
322
- content = new_msg.get("content")
323
- has_tool_calls = bool(new_msg.get("tool_calls"))
324
- if not has_tool_calls:
325
- if not content:
326
- continue
327
- if isinstance(content, str) and not content.strip():
328
- continue
329
-
330
- sanitized_contexts.append(new_msg)
331
-
332
- if removed_image_blocks or removed_tool_messages or removed_tool_calls:
333
- logger.debug(
334
- "sanitize_context_by_modalities applied: "
335
- f"removed_image_blocks={removed_image_blocks}, "
336
- f"removed_tool_messages={removed_tool_messages}, "
337
- f"removed_tool_calls={removed_tool_calls}"
338
- )
339
-
340
- req.contexts = sanitized_contexts
341
-
342
- def _plugin_tool_fix(
343
- self,
344
- event: AstrMessageEvent,
345
- req: ProviderRequest,
346
- ):
347
- """根据事件中的插件设置,过滤请求中的工具列表"""
348
- if event.plugins_name is not None and req.func_tool:
349
- new_tool_set = ToolSet()
350
- for tool in req.func_tool.tools:
351
- mp = tool.handler_module_path
352
- if not mp:
353
- continue
354
- plugin = star_map.get(mp)
355
- if not plugin:
356
- continue
357
- if plugin.name in event.plugins_name or plugin.reserved:
358
- new_tool_set.add_tool(tool)
359
- req.func_tool = new_tool_set
360
-
361
- async def _handle_webchat(
362
- self,
363
- event: AstrMessageEvent,
364
- req: ProviderRequest,
365
- prov: Provider,
366
- ):
367
- """处理 WebChat 平台的特殊情况,包括第一次 LLM 对话时总结对话内容生成 title"""
368
- from astrbot.core import db_helper
369
-
370
- chatui_session_id = event.session_id.split("!")[-1]
371
- user_prompt = req.prompt
372
-
373
- session = await db_helper.get_platform_session_by_id(chatui_session_id)
374
-
375
- if (
376
- not user_prompt
377
- or not chatui_session_id
378
- or not session
379
- or session.display_name
380
- ):
381
- return
382
-
383
- llm_resp = await prov.text_chat(
384
- system_prompt=(
385
- "You are a conversation title generator. "
386
- "Generate a concise title in the same language as the user’s input, "
387
- "no more than 10 words, capturing only the core topic."
388
- "If the input is a greeting, small talk, or has no clear topic, "
389
- "(e.g., “hi”, “hello”, “haha”), return <None>. "
390
- "Output only the title itself or <None>, with no explanations."
391
- ),
392
- prompt=(
393
- f"Generate a concise title for the following user query:\n{user_prompt}"
394
- ),
395
- )
396
- if llm_resp and llm_resp.completion_text:
397
- title = llm_resp.completion_text.strip()
398
- if not title or "<None>" in title:
399
- return
400
- logger.info(
401
- f"Generated chatui title for session {chatui_session_id}: {title}"
402
- )
403
- await db_helper.update_platform_session(
404
- session_id=chatui_session_id,
405
- display_name=title,
406
- )
407
-
408
- async def _save_to_history(
409
- self,
410
- event: AstrMessageEvent,
411
- req: ProviderRequest,
412
- llm_response: LLMResponse | None,
413
- all_messages: list[Message],
414
- runner_stats: AgentStats | None,
415
- ):
416
- if (
417
- not req
418
- or not req.conversation
419
- or not llm_response
420
- or llm_response.role != "assistant"
421
- ):
422
- return
423
-
424
- if not llm_response.completion_text and not req.tool_calls_result:
425
- logger.debug("LLM 响应为空,不保存记录。")
426
- return
427
-
428
- # using agent context messages to save to history
429
- message_to_save = []
430
- skipped_initial_system = False
431
- for message in all_messages:
432
- if message.role == "system" and not skipped_initial_system:
433
- skipped_initial_system = True
434
- continue # skip first system message
435
- if message.role in ["assistant", "user"] and getattr(
436
- message, "_no_save", None
437
- ):
438
- # we do not save user and assistant messages that are marked as _no_save
439
- continue
440
- message_to_save.append(message.model_dump())
441
-
442
- # get token usage from agent runner stats
443
- token_usage = None
444
- if runner_stats:
445
- token_usage = runner_stats.token_usage.total
446
-
447
- await self.conv_manager.update_conversation(
448
- event.unified_msg_origin,
449
- req.conversation.cid,
450
- history=message_to_save,
451
- token_usage=token_usage,
452
- )
102
+ self.conv_manager = ctx.plugin_manager.context.conversation_manager
453
103
 
454
- def _get_compress_provider(self) -> Provider | None:
455
- if not self.llm_compress_provider_id:
456
- return None
457
- if self.context_limit_reached_strategy != "llm_compress":
458
- return None
459
- provider = self.ctx.plugin_manager.context.get_provider_by_id(
460
- self.llm_compress_provider_id,
104
+ self.main_agent_cfg = MainAgentBuildConfig(
105
+ tool_call_timeout=self.tool_call_timeout,
106
+ tool_schema_mode=self.tool_schema_mode,
107
+ sanitize_context_by_modalities=self.sanitize_context_by_modalities,
108
+ kb_agentic_mode=self.kb_agentic_mode,
109
+ file_extract_enabled=self.file_extract_enabled,
110
+ file_extract_prov=self.file_extract_prov,
111
+ file_extract_msh_api_key=self.file_extract_msh_api_key,
112
+ context_limit_reached_strategy=self.context_limit_reached_strategy,
113
+ llm_compress_instruction=self.llm_compress_instruction,
114
+ llm_compress_keep_recent=self.llm_compress_keep_recent,
115
+ llm_compress_provider_id=self.llm_compress_provider_id,
116
+ max_context_length=self.max_context_length,
117
+ dequeue_context_length=self.dequeue_context_length,
118
+ llm_safety_mode=self.llm_safety_mode,
119
+ safety_mode_strategy=self.safety_mode_strategy,
120
+ computer_use_runtime=self.computer_use_runtime,
121
+ sandbox_cfg=self.sandbox_cfg,
122
+ add_cron_tools=self.add_cron_tools,
123
+ provider_settings=settings,
124
+ subagent_orchestrator=conf.get("subagent_orchestrator", {}),
125
+ timezone=self.ctx.plugin_manager.context.get_config().get("timezone"),
461
126
  )
462
- if provider is None:
463
- logger.warning(
464
- f"未找到指定的上下文压缩模型 {self.llm_compress_provider_id},将跳过压缩。",
465
- )
466
- return None
467
- if not isinstance(provider, Provider):
468
- logger.warning(
469
- f"指定的上下文压缩模型 {self.llm_compress_provider_id} 不是对话模型,将跳过压缩。"
470
- )
471
- return None
472
- return provider
473
-
474
- def _apply_llm_safety_mode(self, req: ProviderRequest) -> None:
475
- """Apply LLM safety mode to the provider request."""
476
- if self.safety_mode_strategy == "system_prompt":
477
- req.system_prompt = (
478
- f"{LLM_SAFETY_MODE_SYSTEM_PROMPT}\n\n{req.system_prompt or ''}"
479
- )
480
- else:
481
- logger.warning(
482
- f"Unsupported llm_safety_mode strategy: {self.safety_mode_strategy}.",
483
- )
484
-
485
- def _apply_sandbox_tools(self, req: ProviderRequest, session_id: str) -> None:
486
- """Add sandbox tools to the provider request."""
487
- if req.func_tool is None:
488
- req.func_tool = ToolSet()
489
- if self.sandbox_cfg.get("booter") == "shipyard":
490
- ep = self.sandbox_cfg.get("shipyard_endpoint", "")
491
- at = self.sandbox_cfg.get("shipyard_access_token", "")
492
- if not ep or not at:
493
- logger.error("Shipyard sandbox configuration is incomplete.")
494
- return
495
- os.environ["SHIPYARD_ENDPOINT"] = ep
496
- os.environ["SHIPYARD_ACCESS_TOKEN"] = at
497
- req.func_tool.add_tool(EXECUTE_SHELL_TOOL)
498
- req.func_tool.add_tool(PYTHON_TOOL)
499
- req.func_tool.add_tool(FILE_UPLOAD_TOOL)
500
- req.func_tool.add_tool(FILE_DOWNLOAD_TOOL)
501
- req.system_prompt += f"\n{SANDBOX_MODE_PROMPT}\n"
502
127
 
503
128
  async def process(
504
129
  self, event: AstrMessageEvent, provider_wake_prefix: str
505
130
  ) -> AsyncGenerator[None, None]:
506
- req: ProviderRequest | None = None
507
-
508
131
  try:
509
- provider = self._select_provider(event)
510
- if provider is None:
511
- logger.info("未找到任何对话模型(提供商),跳过 LLM 请求处理。")
512
- return
513
- if not isinstance(provider, Provider):
514
- logger.error(
515
- f"选择的提供商类型无效({type(provider)}),跳过 LLM 请求处理。"
516
- )
517
- return
518
-
519
132
  streaming_response = self.streaming_response
520
133
  if (enable_streaming := event.get_extra("enable_streaming")) is not None:
521
134
  streaming_response = bool(enable_streaming)
522
135
 
523
- # 检查消息内容是否有效,避免空消息触发钩子
524
136
  has_provider_request = event.get_extra("provider_request") is not None
525
137
  has_valid_message = bool(event.message_str and event.message_str.strip())
526
- # 检查是否有图片或其他媒体内容
527
138
  has_media_content = any(
528
139
  isinstance(comp, Image | File) for comp in event.message_obj.message
529
140
  )
@@ -536,161 +147,50 @@ class InternalAgentSubStage(Stage):
536
147
  logger.debug("skip llm request: empty message and no provider_request")
537
148
  return
538
149
 
539
- api_base = provider.provider_config.get("api_base", "")
540
- for host in decoded_blocked:
541
- if host in api_base:
542
- logger.error(
543
- f"Provider API base {api_base} is blocked due to security reasons. Please use another ai provider."
544
- )
545
- return
546
-
547
150
  logger.debug("ready to request llm provider")
548
151
 
549
- # 通知等待调用 LLM(在获取锁之前)
550
152
  await call_event_hook(event, EventType.OnWaitingLLMRequestEvent)
551
153
 
552
154
  async with session_lock_manager.acquire_lock(event.unified_msg_origin):
553
155
  logger.debug("acquired session lock for llm request")
554
- if event.get_extra("provider_request"):
555
- req = event.get_extra("provider_request")
556
- assert isinstance(req, ProviderRequest), (
557
- "provider_request 必须是 ProviderRequest 类型。"
558
- )
559
156
 
560
- if req.conversation:
561
- req.contexts = json.loads(req.conversation.history)
562
-
563
- else:
564
- req = ProviderRequest()
565
- req.prompt = ""
566
- req.image_urls = []
567
- if sel_model := event.get_extra("selected_model"):
568
- req.model = sel_model
569
- if provider_wake_prefix and not event.message_str.startswith(
570
- provider_wake_prefix
571
- ):
572
- return
573
-
574
- req.prompt = event.message_str[len(provider_wake_prefix) :]
575
- # func_tool selection 现在已经转移到 astrbot/builtin_stars/astrbot 插件中进行选择。
576
- # req.func_tool = self.ctx.plugin_manager.context.get_llm_tool_manager()
577
- for comp in event.message_obj.message:
578
- if isinstance(comp, Image):
579
- image_path = await comp.convert_to_file_path()
580
- req.image_urls.append(image_path)
581
-
582
- req.extra_user_content_parts.append(
583
- TextPart(text=f"[Image Attachment: path {image_path}]")
584
- )
585
- elif isinstance(comp, File):
586
- file_path = await comp.get_file()
587
- file_name = comp.name or os.path.basename(file_path)
588
- req.extra_user_content_parts.append(
589
- TextPart(
590
- text=f"[File Attachment: name {file_name}, path {file_path}]"
591
- )
592
- )
593
-
594
- conversation = await self._get_session_conv(event)
595
- req.conversation = conversation
596
- req.contexts = json.loads(conversation.history)
597
-
598
- event.set_extra("provider_request", req)
599
-
600
- # fix contexts json str
601
- if isinstance(req.contexts, str):
602
- req.contexts = json.loads(req.contexts)
603
-
604
- # apply file extract
605
- if self.file_extract_enabled:
606
- try:
607
- await self._apply_file_extract(event, req)
608
- except Exception as e:
609
- logger.error(f"Error occurred while applying file extract: {e}")
157
+ build_cfg = replace(
158
+ self.main_agent_cfg,
159
+ provider_wake_prefix=provider_wake_prefix,
160
+ streaming_response=streaming_response,
161
+ )
610
162
 
611
- if not req.prompt and not req.image_urls:
612
- if not event.get_group_id() and req.extra_user_content_parts:
613
- req.prompt = "<attachment>"
614
- else:
615
- return
163
+ build_result: MainAgentBuildResult | None = await build_main_agent(
164
+ event=event,
165
+ plugin_context=self.ctx.plugin_manager.context,
166
+ config=build_cfg,
167
+ )
616
168
 
617
- # call event hook
618
- if await call_event_hook(event, EventType.OnLLMRequestEvent, req):
169
+ if build_result is None:
619
170
  return
620
171
 
621
- # apply knowledge base feature
622
- await self._apply_kb(event, req)
623
-
624
- # truncate contexts to fit max length
625
- # NOW moved to ContextManager inside ToolLoopAgentRunner
626
- # if req.contexts:
627
- # req.contexts = self._truncate_contexts(req.contexts)
628
- # self._fix_messages(req.contexts)
629
-
630
- # session_id
631
- if not req.session_id:
632
- req.session_id = event.unified_msg_origin
633
-
634
- # check provider modalities, if provider does not support image/tool_use, clear them in request.
635
- self._modalities_fix(provider, req)
636
-
637
- # filter tools, only keep tools from this pipeline's selected plugins
638
- self._plugin_tool_fix(event, req)
639
-
640
- # sanitize contexts (including history) by provider modalities
641
- self._sanitize_context_by_modalities(provider, req)
172
+ agent_runner = build_result.agent_runner
173
+ req = build_result.provider_request
174
+ provider = build_result.provider
642
175
 
643
- # apply llm safety mode
644
- if self.llm_safety_mode:
645
- self._apply_llm_safety_mode(req)
646
-
647
- # apply sandbox tools
648
- if self.sandbox_cfg.get("enable", False):
649
- self._apply_sandbox_tools(req, req.session_id)
176
+ api_base = provider.provider_config.get("api_base", "")
177
+ for host in decoded_blocked:
178
+ if host in api_base:
179
+ logger.error(
180
+ "Provider API base %s is blocked due to security reasons. Please use another ai provider.",
181
+ api_base,
182
+ )
183
+ return
650
184
 
651
185
  stream_to_general = (
652
186
  self.unsupported_streaming_strategy == "turn_off"
653
187
  and not event.platform_meta.support_streaming_message
654
188
  )
655
189
 
656
- # run agent
657
- agent_runner = AgentRunner()
658
- logger.debug(
659
- f"handle provider[id: {provider.provider_config['id']}] request: {req}",
660
- )
661
- astr_agent_ctx = AstrAgentContext(
662
- context=self.ctx.plugin_manager.context,
663
- event=event,
664
- )
665
-
666
- # inject model context length limit
667
- if provider.provider_config.get("max_context_tokens", 0) <= 0:
668
- model = provider.get_model()
669
- if model_info := LLM_METADATAS.get(model):
670
- provider.provider_config["max_context_tokens"] = model_info[
671
- "limit"
672
- ]["context"]
673
-
674
- # ChatUI 对话的标题生成
675
- if event.get_platform_name() == "webchat":
676
- asyncio.create_task(self._handle_webchat(event, req, provider))
677
-
678
- # 注入 ChatUI 额外 prompt
679
- # 比如 follow-up questions 提示等
680
- req.system_prompt += f"\n{CHATUI_EXTRA_PROMPT}\n"
681
-
682
- # 注入基本 prompt
683
- if req.func_tool and req.func_tool.tools:
684
- tool_prompt = (
685
- TOOL_CALL_PROMPT
686
- if self.tool_schema_mode == "full"
687
- else TOOL_CALL_PROMPT_SKILLS_LIKE_MODE
688
- )
689
- req.system_prompt += f"\n{tool_prompt}\n"
190
+ if await call_event_hook(event, EventType.OnLLMRequestEvent, req):
191
+ return
690
192
 
691
193
  action_type = event.get_extra("action_type")
692
- if action_type == "live":
693
- req.system_prompt += f"\n{LIVE_MODE_SYSTEM_PROMPT}\n"
694
194
 
695
195
  event.trace.record(
696
196
  "astr_agent_prepare",
@@ -703,24 +203,6 @@ class InternalAgentSubStage(Stage):
703
203
  },
704
204
  )
705
205
 
706
- await agent_runner.reset(
707
- provider=provider,
708
- request=req,
709
- run_context=AgentContextWrapper(
710
- context=astr_agent_ctx,
711
- tool_call_timeout=self.tool_call_timeout,
712
- ),
713
- tool_executor=FunctionToolExecutor(),
714
- agent_hooks=MAIN_AGENT_HOOKS,
715
- streaming=streaming_response,
716
- llm_compress_instruction=self.llm_compress_instruction,
717
- llm_compress_keep_recent=self.llm_compress_keep_recent,
718
- llm_compress_provider=self._get_compress_provider(),
719
- truncate_turns=self.dequeue_context_length,
720
- enforce_max_turns=self.max_context_length,
721
- tool_schema_mode=self.tool_schema_mode,
722
- )
723
-
724
206
  # 检测 Live Mode
725
207
  if action_type == "live":
726
208
  # Live Mode: 使用 run_live_agent
@@ -840,3 +322,52 @@ class InternalAgentSubStage(Stage):
840
322
  f"Error occurred while processing agent request: {e}"
841
323
  )
842
324
  )
325
+
326
+ async def _save_to_history(
327
+ self,
328
+ event: AstrMessageEvent,
329
+ req: ProviderRequest,
330
+ llm_response: LLMResponse | None,
331
+ all_messages: list[Message],
332
+ runner_stats: AgentStats | None,
333
+ ):
334
+ if (
335
+ not req
336
+ or not req.conversation
337
+ or not llm_response
338
+ or llm_response.role != "assistant"
339
+ ):
340
+ return
341
+
342
+ if not llm_response.completion_text and not req.tool_calls_result:
343
+ logger.debug("LLM 响应为空,不保存记录。")
344
+ return
345
+
346
+ message_to_save = []
347
+ skipped_initial_system = False
348
+ for message in all_messages:
349
+ if message.role == "system" and not skipped_initial_system:
350
+ skipped_initial_system = True
351
+ continue
352
+ if message.role in ["assistant", "user"] and getattr(
353
+ message, "_no_save", None
354
+ ):
355
+ continue
356
+ message_to_save.append(message.model_dump())
357
+
358
+ token_usage = None
359
+ if runner_stats:
360
+ token_usage = runner_stats.token_usage.total
361
+
362
+ await self.conv_manager.update_conversation(
363
+ event.unified_msg_origin,
364
+ req.conversation.cid,
365
+ history=message_to_save,
366
+ token_usage=token_usage,
367
+ )
368
+
369
+
370
+ # we prevent astrbot from connecting to known malicious hosts
371
+ # these hosts are base64 encoded
372
+ BLOCKED = {"dGZid2h2d3IuY2xvdWQuc2VhbG9zLmlv", "a291cmljaGF0"}
373
+ decoded_blocked = [base64.b64decode(b).decode("utf-8") for b in BLOCKED]