AstrBot 4.0.0b4__py3-none-any.whl → 4.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. astrbot/api/event/filter/__init__.py +2 -0
  2. astrbot/cli/utils/basic.py +12 -3
  3. astrbot/core/astrbot_config_mgr.py +16 -9
  4. astrbot/core/config/default.py +82 -4
  5. astrbot/core/initial_loader.py +4 -1
  6. astrbot/core/message/components.py +59 -50
  7. astrbot/core/pipeline/process_stage/method/llm_request.py +6 -2
  8. astrbot/core/pipeline/result_decorate/stage.py +5 -1
  9. astrbot/core/platform/manager.py +25 -3
  10. astrbot/core/platform/sources/aiocqhttp/aiocqhttp_message_event.py +26 -14
  11. astrbot/core/platform/sources/aiocqhttp/aiocqhttp_platform_adapter.py +11 -4
  12. astrbot/core/platform/sources/satori/satori_adapter.py +482 -0
  13. astrbot/core/platform/sources/satori/satori_event.py +221 -0
  14. astrbot/core/platform/sources/telegram/tg_adapter.py +0 -1
  15. astrbot/core/provider/entities.py +17 -15
  16. astrbot/core/provider/sources/gemini_source.py +57 -18
  17. astrbot/core/provider/sources/openai_source.py +12 -5
  18. astrbot/core/provider/sources/vllm_rerank_source.py +6 -0
  19. astrbot/core/star/__init__.py +7 -5
  20. astrbot/core/star/filter/command.py +9 -3
  21. astrbot/core/star/filter/platform_adapter_type.py +3 -0
  22. astrbot/core/star/register/__init__.py +2 -0
  23. astrbot/core/star/register/star_handler.py +18 -4
  24. astrbot/core/star/star_handler.py +9 -1
  25. astrbot/core/star/star_tools.py +116 -21
  26. astrbot/core/updator.py +7 -5
  27. astrbot/core/utils/io.py +1 -1
  28. astrbot/core/utils/t2i/network_strategy.py +11 -18
  29. astrbot/core/utils/t2i/renderer.py +8 -2
  30. astrbot/core/utils/t2i/template/astrbot_powershell.html +184 -0
  31. astrbot/core/utils/t2i/template_manager.py +112 -0
  32. astrbot/core/zip_updator.py +26 -4
  33. astrbot/dashboard/routes/chat.py +6 -1
  34. astrbot/dashboard/routes/config.py +24 -49
  35. astrbot/dashboard/routes/route.py +19 -2
  36. astrbot/dashboard/routes/t2i.py +230 -0
  37. astrbot/dashboard/routes/update.py +3 -5
  38. astrbot/dashboard/server.py +13 -4
  39. {astrbot-4.0.0b4.dist-info → astrbot-4.1.0.dist-info}/METADATA +40 -53
  40. {astrbot-4.0.0b4.dist-info → astrbot-4.1.0.dist-info}/RECORD +43 -38
  41. {astrbot-4.0.0b4.dist-info → astrbot-4.1.0.dist-info}/WHEEL +0 -0
  42. {astrbot-4.0.0b4.dist-info → astrbot-4.1.0.dist-info}/entry_points.txt +0 -0
  43. {astrbot-4.0.0b4.dist-info → astrbot-4.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -183,7 +183,6 @@ class TelegramPlatformAdapter(Platform):
183
183
  return None
184
184
 
185
185
  if not re.match(r"^[a-z0-9_]+$", cmd_name) or len(cmd_name) > 32:
186
- logger.debug(f"跳过无法注册的命令: {cmd_name}")
187
186
  return None
188
187
 
189
188
  # Build description.
@@ -4,9 +4,11 @@ import json
4
4
  from astrbot.core.utils.io import download_image_by_url
5
5
  from astrbot import logger
6
6
  from dataclasses import dataclass, field
7
- from typing import List, Dict, Type
7
+ from typing import List, Dict, Type, Any
8
8
  from astrbot.core.agent.tool import ToolSet
9
9
  from openai.types.chat.chat_completion import ChatCompletion
10
+ from google.genai.types import GenerateContentResponse
11
+ from anthropic.types import Message
10
12
  from openai.types.chat.chat_completion_message_tool_call import (
11
13
  ChatCompletionMessageToolCall,
12
14
  )
@@ -30,11 +32,11 @@ class ProviderMetaData:
30
32
  desc: str = ""
31
33
  """提供商适配器描述."""
32
34
  provider_type: ProviderType = ProviderType.CHAT_COMPLETION
33
- cls_type: Type = None
35
+ cls_type: Type | None = None
34
36
 
35
- default_config_tmpl: dict = None
37
+ default_config_tmpl: dict | None = None
36
38
  """平台的默认配置模板"""
37
- provider_display_name: str = None
39
+ provider_display_name: str | None = None
38
40
  """显示在 WebUI 配置页中的提供商名称,如空则是 type"""
39
41
 
40
42
 
@@ -58,7 +60,7 @@ class ToolCallMessageSegment:
58
60
  class AssistantMessageSegment:
59
61
  """OpenAI 格式的上下文中 role 为 assistant 的消息段。参考: https://platform.openai.com/docs/guides/function-calling"""
60
62
 
61
- content: str = None
63
+ content: str | None = None
62
64
  tool_calls: List[ChatCompletionMessageToolCall | Dict] = field(default_factory=list)
63
65
  role: str = "assistant"
64
66
 
@@ -205,17 +207,17 @@ class ProviderRequest:
205
207
  class LLMResponse:
206
208
  role: str
207
209
  """角色, assistant, tool, err"""
208
- result_chain: MessageChain = None
210
+ result_chain: MessageChain | None = None
209
211
  """返回的消息链"""
210
- tools_call_args: List[Dict[str, any]] = field(default_factory=list)
212
+ tools_call_args: List[Dict[str, Any]] = field(default_factory=list)
211
213
  """工具调用参数"""
212
214
  tools_call_name: List[str] = field(default_factory=list)
213
215
  """工具调用名称"""
214
216
  tools_call_ids: List[str] = field(default_factory=list)
215
217
  """工具调用 ID"""
216
218
 
217
- raw_completion: ChatCompletion = None
218
- _new_record: Dict[str, any] = None
219
+ raw_completion: ChatCompletion | GenerateContentResponse | Message | None = None
220
+ _new_record: Dict[str, Any] | None = None
219
221
 
220
222
  _completion_text: str = ""
221
223
 
@@ -226,12 +228,12 @@ class LLMResponse:
226
228
  self,
227
229
  role: str,
228
230
  completion_text: str = "",
229
- result_chain: MessageChain = None,
230
- tools_call_args: List[Dict[str, any]] = None,
231
- tools_call_name: List[str] = None,
232
- tools_call_ids: List[str] = None,
233
- raw_completion: ChatCompletion = None,
234
- _new_record: Dict[str, any] = None,
231
+ result_chain: MessageChain | None = None,
232
+ tools_call_args: List[Dict[str, Any]] | None = None,
233
+ tools_call_name: List[str] | None = None,
234
+ tools_call_ids: List[str] | None = None,
235
+ raw_completion: ChatCompletion | None = None,
236
+ _new_record: Dict[str, Any] | None = None,
235
237
  is_chunk: bool = False,
236
238
  ):
237
239
  """初始化 LLMResponse
@@ -15,7 +15,7 @@ from astrbot import logger
15
15
  from astrbot.api.provider import Provider
16
16
  from astrbot.core.message.message_event_result import MessageChain
17
17
  from astrbot.core.provider.entities import LLMResponse
18
- from astrbot.core.provider.func_tool_manager import FuncCall
18
+ from astrbot.core.provider.func_tool_manager import ToolSet
19
19
  from astrbot.core.utils.io import download_image_by_url
20
20
 
21
21
  from ..register import register_provider_adapter
@@ -61,7 +61,7 @@ class ProviderGoogleGenAI(Provider):
61
61
  default_persona,
62
62
  )
63
63
  self.api_keys: list = provider_config.get("key", [])
64
- self.chosen_api_key: str = self.api_keys[0] if len(self.api_keys) > 0 else None
64
+ self.chosen_api_key: str = self.api_keys[0] if len(self.api_keys) > 0 else ""
65
65
  self.timeout: int = int(provider_config.get("timeout", 180))
66
66
 
67
67
  self.api_base: Optional[str] = provider_config.get("api_base", None)
@@ -96,6 +96,9 @@ class ProviderGoogleGenAI(Provider):
96
96
 
97
97
  async def _handle_api_error(self, e: APIError, keys: list[str]) -> bool:
98
98
  """处理API错误,返回是否需要重试"""
99
+ if e.message is None:
100
+ e.message = ""
101
+
99
102
  if e.code == 429 or "API key not valid" in e.message:
100
103
  keys.remove(self.chosen_api_key)
101
104
  if len(keys) > 0:
@@ -119,7 +122,7 @@ class ProviderGoogleGenAI(Provider):
119
122
  async def _prepare_query_config(
120
123
  self,
121
124
  payloads: dict,
122
- tools: Optional[FuncCall] = None,
125
+ tools: Optional[ToolSet] = None,
123
126
  system_instruction: Optional[str] = None,
124
127
  modalities: Optional[list[str]] = None,
125
128
  temperature: float = 0.7,
@@ -321,11 +324,15 @@ class ProviderGoogleGenAI(Provider):
321
324
 
322
325
  @staticmethod
323
326
  def _process_content_parts(
324
- result: types.GenerateContentResponse, llm_response: LLMResponse
327
+ candidate: types.Candidate, llm_response: LLMResponse
325
328
  ) -> MessageChain:
326
329
  """处理内容部分并构建消息链"""
327
- finish_reason = result.candidates[0].finish_reason
328
- result_parts: Optional[types.Part] = result.candidates[0].content.parts
330
+ if not candidate.content:
331
+ logger.warning(f"收到的 candidate.content 为空: {candidate}")
332
+ raise Exception("API 返回的 candidate.content 为空。")
333
+
334
+ finish_reason = candidate.finish_reason
335
+ result_parts: list[types.Part] | None = candidate.content.parts
329
336
 
330
337
  if finish_reason == types.FinishReason.SAFETY:
331
338
  raise Exception("模型生成内容未通过 Gemini 平台的安全检查")
@@ -343,22 +350,28 @@ class ProviderGoogleGenAI(Provider):
343
350
  raise Exception("模型生成内容违反 Gemini 平台政策")
344
351
 
345
352
  if not result_parts:
346
- logger.debug(result.candidates)
347
- raise Exception("API 返回的内容为空。")
353
+ logger.warning(f"收到的 candidate.content.parts 为空: {candidate}")
354
+ raise Exception("API 返回的 candidate.content.parts 为空。")
348
355
 
349
356
  chain = []
350
357
  part: types.Part
351
358
 
352
359
  # 暂时这样Fallback
353
360
  if all(
354
- part.inline_data and part.inline_data.mime_type.startswith("image/")
361
+ part.inline_data
362
+ and part.inline_data.mime_type
363
+ and part.inline_data.mime_type.startswith("image/")
355
364
  for part in result_parts
356
365
  ):
357
366
  chain.append(Comp.Plain("这是图片"))
358
367
  for part in result_parts:
359
368
  if part.text:
360
369
  chain.append(Comp.Plain(part.text))
361
- elif part.function_call:
370
+ elif (
371
+ part.function_call
372
+ and part.function_call.name is not None
373
+ and part.function_call.args is not None
374
+ ):
362
375
  llm_response.role = "tool"
363
376
  llm_response.tools_call_name.append(part.function_call.name)
364
377
  llm_response.tools_call_args.append(part.function_call.args)
@@ -366,11 +379,16 @@ class ProviderGoogleGenAI(Provider):
366
379
  llm_response.tools_call_ids.append(
367
380
  part.function_call.id or part.function_call.name
368
381
  )
369
- elif part.inline_data and part.inline_data.mime_type.startswith("image/"):
382
+ elif (
383
+ part.inline_data
384
+ and part.inline_data.mime_type
385
+ and part.inline_data.mime_type.startswith("image/")
386
+ and part.inline_data.data
387
+ ):
370
388
  chain.append(Comp.Image.fromBytes(part.inline_data.data))
371
389
  return MessageChain(chain=chain)
372
390
 
373
- async def _query(self, payloads: dict, tools: FuncCall) -> LLMResponse:
391
+ async def _query(self, payloads: dict, tools: ToolSet | None) -> LLMResponse:
374
392
  """非流式请求 Gemini API"""
375
393
  system_instruction = next(
376
394
  (msg["content"] for msg in payloads["messages"] if msg["role"] == "system"),
@@ -396,6 +414,10 @@ class ProviderGoogleGenAI(Provider):
396
414
  config=config,
397
415
  )
398
416
 
417
+ if not result.candidates:
418
+ logger.error(f"请求失败, 返回的 candidates 为空: {result}")
419
+ raise Exception("请求失败, 返回的 candidates 为空。")
420
+
399
421
  if result.candidates[0].finish_reason == types.FinishReason.RECITATION:
400
422
  if temperature > 2:
401
423
  raise Exception("温度参数已超过最大值2,仍然发生recitation")
@@ -408,6 +430,8 @@ class ProviderGoogleGenAI(Provider):
408
430
  break
409
431
 
410
432
  except APIError as e:
433
+ if e.message is None:
434
+ e.message = ""
411
435
  if "Developer instruction is not enabled" in e.message:
412
436
  logger.warning(
413
437
  f"{self.get_model()} 不支持 system prompt,已自动去除(影响人格设置)"
@@ -432,11 +456,13 @@ class ProviderGoogleGenAI(Provider):
432
456
 
433
457
  llm_response = LLMResponse("assistant")
434
458
  llm_response.raw_completion = result
435
- llm_response.result_chain = self._process_content_parts(result, llm_response)
459
+ llm_response.result_chain = self._process_content_parts(
460
+ result.candidates[0], llm_response
461
+ )
436
462
  return llm_response
437
463
 
438
464
  async def _query_stream(
439
- self, payloads: dict, tools: FuncCall
465
+ self, payloads: dict, tools: ToolSet | None
440
466
  ) -> AsyncGenerator[LLMResponse, None]:
441
467
  """流式请求 Gemini API"""
442
468
  system_instruction = next(
@@ -459,6 +485,8 @@ class ProviderGoogleGenAI(Provider):
459
485
  )
460
486
  break
461
487
  except APIError as e:
488
+ if e.message is None:
489
+ e.message = ""
462
490
  if "Developer instruction is not enabled" in e.message:
463
491
  logger.warning(
464
492
  f"{self.get_model()} 不支持 system prompt,已自动去除(影响人格设置)"
@@ -478,13 +506,20 @@ class ProviderGoogleGenAI(Provider):
478
506
  async for chunk in result:
479
507
  llm_response = LLMResponse("assistant", is_chunk=True)
480
508
 
509
+ if not chunk.candidates:
510
+ logger.warning(f"收到的 chunk 中 candidates 为空: {chunk}")
511
+ continue
512
+ if not chunk.candidates[0].content:
513
+ logger.warning(f"收到的 chunk 中 content 为空: {chunk}")
514
+ continue
515
+
481
516
  if chunk.candidates[0].content.parts and any(
482
517
  part.function_call for part in chunk.candidates[0].content.parts
483
518
  ):
484
519
  llm_response = LLMResponse("assistant", is_chunk=False)
485
520
  llm_response.raw_completion = chunk
486
521
  llm_response.result_chain = self._process_content_parts(
487
- chunk, llm_response
522
+ chunk.candidates[0], llm_response
488
523
  )
489
524
  yield llm_response
490
525
  return
@@ -500,7 +535,7 @@ class ProviderGoogleGenAI(Provider):
500
535
  final_response = LLMResponse("assistant", is_chunk=False)
501
536
  final_response.raw_completion = chunk
502
537
  final_response.result_chain = self._process_content_parts(
503
- chunk, final_response
538
+ chunk.candidates[0], final_response
504
539
  )
505
540
  break
506
541
 
@@ -566,6 +601,8 @@ class ProviderGoogleGenAI(Provider):
566
601
  continue
567
602
  break
568
603
 
604
+ raise Exception("请求失败。")
605
+
569
606
  async def text_chat_stream(
570
607
  self,
571
608
  prompt,
@@ -621,7 +658,9 @@ class ProviderGoogleGenAI(Provider):
621
658
  return [
622
659
  m.name.replace("models/", "")
623
660
  for m in models
624
- if "generateContent" in m.supported_actions
661
+ if m.supported_actions
662
+ and "generateContent" in m.supported_actions
663
+ and m.name
625
664
  ]
626
665
  except APIError as e:
627
666
  raise Exception(f"获取模型列表失败: {e.message}")
@@ -636,7 +675,7 @@ class ProviderGoogleGenAI(Provider):
636
675
  self.chosen_api_key = key
637
676
  self._init_client()
638
677
 
639
- async def assemble_context(self, text: str, image_urls: list[str] = None):
678
+ async def assemble_context(self, text: str, image_urls: list[str] | None = None):
640
679
  """
641
680
  组装上下文。
642
681
  """
@@ -99,12 +99,13 @@ class ProviderOpenAIOfficial(Provider):
99
99
  for key in to_del:
100
100
  del payloads[key]
101
101
 
102
- model = payloads.get("model", "")
103
- # 针对 qwen3 模型的特殊处理:非流式调用必须设置 enable_thinking=false
104
- if "qwen3" in model.lower():
105
- extra_body["enable_thinking"] = False
102
+ # 读取并合并 custom_extra_body 配置
103
+ custom_extra_body = self.provider_config.get("custom_extra_body", {})
104
+ if isinstance(custom_extra_body, dict):
105
+ extra_body.update(custom_extra_body)
106
+
106
107
  # 针对 deepseek 模型的特殊处理:deepseek-reasoner调用必须移除 tools ,否则将被切换至 deepseek-chat
107
- elif model == "deepseek-reasoner" and "tools" in payloads:
108
+ if model == "deepseek-reasoner" and "tools" in payloads:
108
109
  del payloads["tools"]
109
110
 
110
111
  completion = await self.client.chat.completions.create(
@@ -137,6 +138,12 @@ class ProviderOpenAIOfficial(Provider):
137
138
 
138
139
  # 不在默认参数中的参数放在 extra_body 中
139
140
  extra_body = {}
141
+
142
+ # 读取并合并 custom_extra_body 配置
143
+ custom_extra_body = self.provider_config.get("custom_extra_body", {})
144
+ if isinstance(custom_extra_body, dict):
145
+ extra_body.update(custom_extra_body)
146
+
140
147
  to_del = []
141
148
  for key in payloads.keys():
142
149
  if key not in self.default_params:
@@ -1,4 +1,5 @@
1
1
  import aiohttp
2
+ from astrbot import logger
2
3
  from ..provider import RerankProvider
3
4
  from ..register import register_provider_adapter
4
5
  from ..entities import ProviderType, RerankResult
@@ -44,6 +45,11 @@ class VLLMRerankProvider(RerankProvider):
44
45
  response_data = await response.json()
45
46
  results = response_data.get("results", [])
46
47
 
48
+ if not results:
49
+ logger.warning(
50
+ f"Rerank API 返回了空的列表数据。原始响应: {response_data}"
51
+ )
52
+
47
53
  return [
48
54
  RerankResult(
49
55
  index=result["index"],
@@ -27,14 +27,16 @@ class Star(CommandParserMixin):
27
27
  star_map[cls.__module__].star_cls_type = cls
28
28
  star_map[cls.__module__].module_path = cls.__module__
29
29
 
30
- @staticmethod
31
- async def text_to_image(text: str, return_url=True) -> str:
30
+ async def text_to_image(self, text: str, return_url=True) -> str:
32
31
  """将文本转换为图片"""
33
- return await html_renderer.render_t2i(text, return_url=return_url)
32
+ return await html_renderer.render_t2i(
33
+ text,
34
+ return_url=return_url,
35
+ template_name=self.context._config.get("t2i_active_template"),
36
+ )
34
37
 
35
- @staticmethod
36
38
  async def html_render(
37
- tmpl: str, data: dict, return_url=True, options: dict | None = None
39
+ self, tmpl: str, data: dict, return_url=True, options: dict | None = None
38
40
  ) -> str:
39
41
  """渲染 HTML"""
40
42
  return await html_renderer.render_custom_template(
@@ -7,7 +7,6 @@ from astrbot.core.config import AstrBotConfig
7
7
  from .custom_filter import CustomFilter
8
8
  from ..star_handler import StarHandlerMetadata
9
9
 
10
-
11
10
  class GreedyStr(str):
12
11
  """标记指令完成其他参数接收后的所有剩余文本。"""
13
12
 
@@ -153,10 +152,17 @@ class CommandFilter(HandlerFilter):
153
152
  _full = f"{parent_command_name} {candidate}"
154
153
  else:
155
154
  _full = candidate
156
- if message_str.startswith(f"{_full} ") or message_str == _full:
157
- message_str = message_str[len(_full) :].strip()
155
+ if message_str == _full:
156
+ # 完全等于命令名 没参数
157
+ message_str = ""
158
+ ok = True
159
+ break
160
+ elif message_str.startswith(_full):
161
+ # 命令名后面无论是空格还是直接连参数都可以
162
+ message_str = message_str[len(_full):].lstrip()
158
163
  ok = True
159
164
  break
165
+
160
166
  if not ok:
161
167
  return False
162
168
 
@@ -18,6 +18,7 @@ class PlatformAdapterType(enum.Flag):
18
18
  KOOK = enum.auto()
19
19
  VOCECHAT = enum.auto()
20
20
  WEIXIN_OFFICIAL_ACCOUNT = enum.auto()
21
+ SATORI = enum.auto()
21
22
  ALL = (
22
23
  AIOCQHTTP
23
24
  | QQOFFICIAL
@@ -31,6 +32,7 @@ class PlatformAdapterType(enum.Flag):
31
32
  | KOOK
32
33
  | VOCECHAT
33
34
  | WEIXIN_OFFICIAL_ACCOUNT
35
+ | SATORI
34
36
  )
35
37
 
36
38
 
@@ -47,6 +49,7 @@ ADAPTER_NAME_2_TYPE = {
47
49
  "wechatpadpro": PlatformAdapterType.WECHATPADPRO,
48
50
  "vocechat": PlatformAdapterType.VOCECHAT,
49
51
  "weixin_official_account": PlatformAdapterType.WEIXIN_OFFICIAL_ACCOUNT,
52
+ "satori": PlatformAdapterType.SATORI,
50
53
  }
51
54
 
52
55
 
@@ -8,6 +8,7 @@ from .star_handler import (
8
8
  register_permission_type,
9
9
  register_custom_filter,
10
10
  register_on_astrbot_loaded,
11
+ register_on_platform_loaded,
11
12
  register_on_llm_request,
12
13
  register_on_llm_response,
13
14
  register_llm_tool,
@@ -26,6 +27,7 @@ __all__ = [
26
27
  "register_permission_type",
27
28
  "register_custom_filter",
28
29
  "register_on_astrbot_loaded",
30
+ "register_on_platform_loaded",
29
31
  "register_on_llm_request",
30
32
  "register_on_llm_response",
31
33
  "register_llm_tool",
@@ -267,6 +267,18 @@ def register_on_astrbot_loaded(**kwargs):
267
267
  return decorator
268
268
 
269
269
 
270
+ def register_on_platform_loaded(**kwargs):
271
+ """
272
+ 当平台加载完成时
273
+ """
274
+
275
+ def decorator(awaitable):
276
+ _ = get_handler_or_create(awaitable, EventType.OnPlatformLoadedEvent, **kwargs)
277
+ return awaitable
278
+
279
+ return decorator
280
+
281
+
270
282
  def register_on_llm_request(**kwargs):
271
283
  """当有 LLM 请求时的事件
272
284
 
@@ -376,9 +388,11 @@ def register_llm_tool(name: str = None, **kwargs):
376
388
  # print(f"Registering tool {llm_tool_name} for agent", registering_agent._agent.name)
377
389
  if registering_agent._agent.tools is None:
378
390
  registering_agent._agent.tools = []
379
- registering_agent._agent.tools.append(llm_tools.spec_to_func(
380
- llm_tool_name, args, docstring.description.strip(), awaitable
381
- ))
391
+ registering_agent._agent.tools.append(
392
+ llm_tools.spec_to_func(
393
+ llm_tool_name, args, docstring.description.strip(), awaitable
394
+ )
395
+ )
382
396
 
383
397
  return awaitable
384
398
 
@@ -421,7 +435,7 @@ def register_agent(
421
435
  run_hooks=run_hooks or BaseAgentRunHooks[AstrAgentContext](),
422
436
  )
423
437
  handoff_tool = HandoffTool(agent=agent)
424
- handoff_tool.handler=awaitable
438
+ handoff_tool.handler = awaitable
425
439
  llm_tools.func_list.append(handoff_tool)
426
440
  return RegisteringAgent(agent)
427
441
 
@@ -34,19 +34,26 @@ class StarHandlerRegistry(Generic[T]):
34
34
  ) -> List[StarHandlerMetadata]:
35
35
  handlers = []
36
36
  for handler in self._handlers:
37
+ # 过滤事件类型
37
38
  if handler.event_type != event_type:
38
39
  continue
40
+ # 过滤启用状态
39
41
  if only_activated:
40
42
  plugin = star_map.get(handler.handler_module_path)
41
43
  if not (plugin and plugin.activated):
42
44
  continue
45
+ # 过滤插件白名单
43
46
  if plugins_name is not None and plugins_name != ["*"]:
44
47
  plugin = star_map.get(handler.handler_module_path)
45
48
  if not plugin:
46
49
  continue
47
50
  if (
48
51
  plugin.name not in plugins_name
49
- and event_type != EventType.OnAstrBotLoadedEvent
52
+ and event_type
53
+ not in (
54
+ EventType.OnAstrBotLoadedEvent,
55
+ EventType.OnPlatformLoadedEvent,
56
+ )
50
57
  and not plugin.reserved
51
58
  ):
52
59
  continue
@@ -90,6 +97,7 @@ class EventType(enum.Enum):
90
97
  """
91
98
 
92
99
  OnAstrBotLoadedEvent = enum.auto() # AstrBot 加载完成
100
+ OnPlatformLoadedEvent = enum.auto() # 平台加载完成
93
101
 
94
102
  AdapterMessageEvent = enum.auto() # 收到适配器发来的消息
95
103
  OnLLMRequestEvent = enum.auto() # 收到 LLM 请求(可以是用户也可以是插件)