AstrBot 4.10.3__py3-none-any.whl → 4.10.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. astrbot/builtin_stars/astrbot/main.py +2 -10
  2. astrbot/builtin_stars/python_interpreter/main.py +130 -131
  3. astrbot/cli/__init__.py +1 -1
  4. astrbot/core/agent/message.py +23 -1
  5. astrbot/core/agent/runners/tool_loop_agent_runner.py +24 -7
  6. astrbot/core/astr_agent_hooks.py +6 -0
  7. astrbot/core/backup/exporter.py +1 -0
  8. astrbot/core/config/astrbot_config.py +2 -0
  9. astrbot/core/config/default.py +47 -6
  10. astrbot/core/knowledge_base/chunking/recursive.py +10 -2
  11. astrbot/core/pipeline/process_stage/method/agent_sub_stages/internal.py +184 -174
  12. astrbot/core/pipeline/result_decorate/stage.py +65 -57
  13. astrbot/core/pipeline/waking_check/stage.py +29 -2
  14. astrbot/core/platform/sources/aiocqhttp/aiocqhttp_platform_adapter.py +15 -29
  15. astrbot/core/platform/sources/dingtalk/dingtalk_adapter.py +1 -6
  16. astrbot/core/platform/sources/dingtalk/dingtalk_event.py +15 -1
  17. astrbot/core/platform/sources/lark/lark_adapter.py +2 -10
  18. astrbot/core/platform/sources/misskey/misskey_adapter.py +0 -5
  19. astrbot/core/platform/sources/misskey/misskey_utils.py +0 -3
  20. astrbot/core/platform/sources/qqofficial/qqofficial_platform_adapter.py +4 -9
  21. astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_adapter.py +4 -9
  22. astrbot/core/platform/sources/satori/satori_adapter.py +6 -1
  23. astrbot/core/platform/sources/slack/slack_adapter.py +3 -6
  24. astrbot/core/platform/sources/webchat/webchat_adapter.py +0 -1
  25. astrbot/core/platform/sources/wechatpadpro/wechatpadpro_adapter.py +3 -5
  26. astrbot/core/provider/entities.py +9 -1
  27. astrbot/core/provider/sources/anthropic_source.py +60 -3
  28. astrbot/core/provider/sources/gemini_source.py +37 -3
  29. astrbot/core/provider/sources/minimax_tts_api_source.py +4 -1
  30. astrbot/core/provider/sources/openai_source.py +25 -31
  31. astrbot/core/provider/sources/xai_source.py +29 -0
  32. astrbot/core/provider/sources/xinference_stt_provider.py +24 -12
  33. astrbot/core/star/star_manager.py +41 -0
  34. astrbot/core/utils/pip_installer.py +20 -1
  35. astrbot/dashboard/routes/backup.py +519 -15
  36. astrbot/dashboard/routes/config.py +45 -0
  37. astrbot/dashboard/server.py +1 -0
  38. {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/METADATA +1 -1
  39. {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/RECORD +42 -41
  40. {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/WHEEL +0 -0
  41. {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/entry_points.txt +0 -0
  42. {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/licenses/LICENSE +0 -0
@@ -6,6 +6,7 @@ import json
6
6
  from collections.abc import AsyncGenerator
7
7
 
8
8
  from astrbot.core import logger
9
+ from astrbot.core.agent.message import Message
9
10
  from astrbot.core.agent.tool import ToolSet
10
11
  from astrbot.core.astr_agent_context import AstrAgentContext
11
12
  from astrbot.core.conversation_mgr import Conversation
@@ -294,6 +295,7 @@ class InternalAgentSubStage(Stage):
294
295
  event: AstrMessageEvent,
295
296
  req: ProviderRequest,
296
297
  llm_response: LLMResponse | None,
298
+ all_messages: list[Message],
297
299
  ):
298
300
  if (
299
301
  not req
@@ -307,31 +309,23 @@ class InternalAgentSubStage(Stage):
307
309
  logger.debug("LLM 响应为空,不保存记录。")
308
310
  return
309
311
 
310
- if req.contexts is None:
311
- req.contexts = []
312
-
313
- # 历史上下文
314
- messages = copy.deepcopy(req.contexts)
315
- # 这一轮对话请求的用户输入
316
- messages.append(await req.assemble_context())
317
- # 这一轮对话的 LLM 响应
318
- if req.tool_calls_result:
319
- if not isinstance(req.tool_calls_result, list):
320
- messages.extend(req.tool_calls_result.to_openai_messages())
321
- elif isinstance(req.tool_calls_result, list):
322
- for tcr in req.tool_calls_result:
323
- messages.extend(tcr.to_openai_messages())
324
- messages.append(
325
- {
326
- "role": "assistant",
327
- "content": llm_response.completion_text or "*No response*",
328
- }
329
- )
330
- messages = list(filter(lambda item: "_no_save" not in item, messages))
312
+ # using agent context messages to save to history
313
+ message_to_save = []
314
+ for message in all_messages:
315
+ if message.role == "system":
316
+ # we do not save system messages to history
317
+ continue
318
+ if message.role in ["assistant", "user"] and getattr(
319
+ message, "_no_save", None
320
+ ):
321
+ # we do not save user and assistant messages that are marked as _no_save
322
+ continue
323
+ message_to_save.append(message.model_dump())
324
+
331
325
  await self.conv_manager.update_conversation(
332
326
  event.unified_msg_origin,
333
327
  req.conversation.cid,
334
- history=messages,
328
+ history=message_to_save,
335
329
  )
336
330
 
337
331
  def _fix_messages(self, messages: list[dict]) -> list[dict]:
@@ -355,174 +349,190 @@ class InternalAgentSubStage(Stage):
355
349
  ) -> AsyncGenerator[None, None]:
356
350
  req: ProviderRequest | None = None
357
351
 
358
- provider = self._select_provider(event)
359
- if provider is None:
360
- return
361
- if not isinstance(provider, Provider):
362
- logger.error(f"选择的提供商类型无效({type(provider)}),跳过 LLM 请求处理。")
363
- return
364
-
365
- streaming_response = self.streaming_response
366
- if (enable_streaming := event.get_extra("enable_streaming")) is not None:
367
- streaming_response = bool(enable_streaming)
368
-
369
- logger.debug("ready to request llm provider")
370
- async with session_lock_manager.acquire_lock(event.unified_msg_origin):
371
- logger.debug("acquired session lock for llm request")
372
- if event.get_extra("provider_request"):
373
- req = event.get_extra("provider_request")
374
- assert isinstance(req, ProviderRequest), (
375
- "provider_request 必须是 ProviderRequest 类型。"
352
+ try:
353
+ provider = self._select_provider(event)
354
+ if provider is None:
355
+ return
356
+ if not isinstance(provider, Provider):
357
+ logger.error(
358
+ f"选择的提供商类型无效({type(provider)}),跳过 LLM 请求处理。"
376
359
  )
360
+ return
377
361
 
378
- if req.conversation:
379
- req.contexts = json.loads(req.conversation.history)
380
-
381
- else:
382
- req = ProviderRequest()
383
- req.prompt = ""
384
- req.image_urls = []
385
- if sel_model := event.get_extra("selected_model"):
386
- req.model = sel_model
387
- if provider_wake_prefix and not event.message_str.startswith(
388
- provider_wake_prefix
389
- ):
390
- return
391
-
392
- req.prompt = event.message_str[len(provider_wake_prefix) :]
393
- # func_tool selection 现在已经转移到 astrbot/builtin_stars/astrbot 插件中进行选择。
394
- # req.func_tool = self.ctx.plugin_manager.context.get_llm_tool_manager()
395
- for comp in event.message_obj.message:
396
- if isinstance(comp, Image):
397
- image_path = await comp.convert_to_file_path()
398
- req.image_urls.append(image_path)
399
-
400
- conversation = await self._get_session_conv(event)
401
- req.conversation = conversation
402
- req.contexts = json.loads(conversation.history)
403
-
404
- event.set_extra("provider_request", req)
405
-
406
- # fix contexts json str
407
- if isinstance(req.contexts, str):
408
- req.contexts = json.loads(req.contexts)
409
-
410
- # apply file extract
411
- if self.file_extract_enabled:
412
- try:
413
- await self._apply_file_extract(event, req)
414
- except Exception as e:
415
- logger.error(f"Error occurred while applying file extract: {e}")
362
+ streaming_response = self.streaming_response
363
+ if (enable_streaming := event.get_extra("enable_streaming")) is not None:
364
+ streaming_response = bool(enable_streaming)
365
+
366
+ logger.debug("ready to request llm provider")
367
+ async with session_lock_manager.acquire_lock(event.unified_msg_origin):
368
+ logger.debug("acquired session lock for llm request")
369
+ if event.get_extra("provider_request"):
370
+ req = event.get_extra("provider_request")
371
+ assert isinstance(req, ProviderRequest), (
372
+ "provider_request 必须是 ProviderRequest 类型。"
373
+ )
416
374
 
417
- if not req.prompt and not req.image_urls:
418
- return
375
+ if req.conversation:
376
+ req.contexts = json.loads(req.conversation.history)
419
377
 
420
- # call event hook
421
- if await call_event_hook(event, EventType.OnLLMRequestEvent, req):
422
- return
378
+ else:
379
+ req = ProviderRequest()
380
+ req.prompt = ""
381
+ req.image_urls = []
382
+ if sel_model := event.get_extra("selected_model"):
383
+ req.model = sel_model
384
+ if provider_wake_prefix and not event.message_str.startswith(
385
+ provider_wake_prefix
386
+ ):
387
+ return
388
+
389
+ req.prompt = event.message_str[len(provider_wake_prefix) :]
390
+ # func_tool selection 现在已经转移到 astrbot/builtin_stars/astrbot 插件中进行选择。
391
+ # req.func_tool = self.ctx.plugin_manager.context.get_llm_tool_manager()
392
+ for comp in event.message_obj.message:
393
+ if isinstance(comp, Image):
394
+ image_path = await comp.convert_to_file_path()
395
+ req.image_urls.append(image_path)
396
+
397
+ conversation = await self._get_session_conv(event)
398
+ req.conversation = conversation
399
+ req.contexts = json.loads(conversation.history)
400
+
401
+ event.set_extra("provider_request", req)
402
+
403
+ # fix contexts json str
404
+ if isinstance(req.contexts, str):
405
+ req.contexts = json.loads(req.contexts)
406
+
407
+ # apply file extract
408
+ if self.file_extract_enabled:
409
+ try:
410
+ await self._apply_file_extract(event, req)
411
+ except Exception as e:
412
+ logger.error(f"Error occurred while applying file extract: {e}")
413
+
414
+ if not req.prompt and not req.image_urls:
415
+ return
423
416
 
424
- # apply knowledge base feature
425
- await self._apply_kb(event, req)
417
+ # call event hook
418
+ if await call_event_hook(event, EventType.OnLLMRequestEvent, req):
419
+ return
426
420
 
427
- # truncate contexts to fit max length
428
- if req.contexts:
429
- req.contexts = self._truncate_contexts(req.contexts)
430
- self._fix_messages(req.contexts)
421
+ # apply knowledge base feature
422
+ await self._apply_kb(event, req)
431
423
 
432
- # session_id
433
- if not req.session_id:
434
- req.session_id = event.unified_msg_origin
424
+ # truncate contexts to fit max length
425
+ if req.contexts:
426
+ req.contexts = self._truncate_contexts(req.contexts)
427
+ self._fix_messages(req.contexts)
435
428
 
436
- # check provider modalities, if provider does not support image/tool_use, clear them in request.
437
- self._modalities_fix(provider, req)
429
+ # session_id
430
+ if not req.session_id:
431
+ req.session_id = event.unified_msg_origin
438
432
 
439
- # filter tools, only keep tools from this pipeline's selected plugins
440
- self._plugin_tool_fix(event, req)
433
+ # check provider modalities, if provider does not support image/tool_use, clear them in request.
434
+ self._modalities_fix(provider, req)
441
435
 
442
- stream_to_general = (
443
- self.unsupported_streaming_strategy == "turn_off"
444
- and not event.platform_meta.support_streaming_message
445
- )
446
- # 备份 req.contexts
447
- backup_contexts = copy.deepcopy(req.contexts)
436
+ # filter tools, only keep tools from this pipeline's selected plugins
437
+ self._plugin_tool_fix(event, req)
448
438
 
449
- # run agent
450
- agent_runner = AgentRunner()
451
- logger.debug(
452
- f"handle provider[id: {provider.provider_config['id']}] request: {req}",
453
- )
454
- astr_agent_ctx = AstrAgentContext(
455
- context=self.ctx.plugin_manager.context,
456
- event=event,
457
- )
458
- await agent_runner.reset(
459
- provider=provider,
460
- request=req,
461
- run_context=AgentContextWrapper(
462
- context=astr_agent_ctx,
463
- tool_call_timeout=self.tool_call_timeout,
464
- ),
465
- tool_executor=FunctionToolExecutor(),
466
- agent_hooks=MAIN_AGENT_HOOKS,
467
- streaming=streaming_response,
468
- )
439
+ stream_to_general = (
440
+ self.unsupported_streaming_strategy == "turn_off"
441
+ and not event.platform_meta.support_streaming_message
442
+ )
443
+ # 备份 req.contexts
444
+ backup_contexts = copy.deepcopy(req.contexts)
469
445
 
470
- if streaming_response and not stream_to_general:
471
- # 流式响应
472
- event.set_result(
473
- MessageEventResult()
474
- .set_result_content_type(ResultContentType.STREAMING_RESULT)
475
- .set_async_stream(
476
- run_agent(
477
- agent_runner,
478
- self.max_step,
479
- self.show_tool_use,
480
- show_reasoning=self.show_reasoning,
481
- ),
446
+ # run agent
447
+ agent_runner = AgentRunner()
448
+ logger.debug(
449
+ f"handle provider[id: {provider.provider_config['id']}] request: {req}",
450
+ )
451
+ astr_agent_ctx = AstrAgentContext(
452
+ context=self.ctx.plugin_manager.context,
453
+ event=event,
454
+ )
455
+ await agent_runner.reset(
456
+ provider=provider,
457
+ request=req,
458
+ run_context=AgentContextWrapper(
459
+ context=astr_agent_ctx,
460
+ tool_call_timeout=self.tool_call_timeout,
482
461
  ),
462
+ tool_executor=FunctionToolExecutor(),
463
+ agent_hooks=MAIN_AGENT_HOOKS,
464
+ streaming=streaming_response,
483
465
  )
484
- yield
485
- if agent_runner.done():
486
- if final_llm_resp := agent_runner.get_final_llm_resp():
487
- if final_llm_resp.completion_text:
488
- chain = (
489
- MessageChain()
490
- .message(final_llm_resp.completion_text)
491
- .chain
492
- )
493
- elif final_llm_resp.result_chain:
494
- chain = final_llm_resp.result_chain.chain
495
- else:
496
- chain = MessageChain().chain
497
- event.set_result(
498
- MessageEventResult(
499
- chain=chain,
500
- result_content_type=ResultContentType.STREAMING_FINISH,
466
+
467
+ if streaming_response and not stream_to_general:
468
+ # 流式响应
469
+ event.set_result(
470
+ MessageEventResult()
471
+ .set_result_content_type(ResultContentType.STREAMING_RESULT)
472
+ .set_async_stream(
473
+ run_agent(
474
+ agent_runner,
475
+ self.max_step,
476
+ self.show_tool_use,
477
+ show_reasoning=self.show_reasoning,
501
478
  ),
502
- )
503
- else:
504
- async for _ in run_agent(
505
- agent_runner,
506
- self.max_step,
507
- self.show_tool_use,
508
- stream_to_general,
509
- show_reasoning=self.show_reasoning,
510
- ):
479
+ ),
480
+ )
511
481
  yield
482
+ if agent_runner.done():
483
+ if final_llm_resp := agent_runner.get_final_llm_resp():
484
+ if final_llm_resp.completion_text:
485
+ chain = (
486
+ MessageChain()
487
+ .message(final_llm_resp.completion_text)
488
+ .chain
489
+ )
490
+ elif final_llm_resp.result_chain:
491
+ chain = final_llm_resp.result_chain.chain
492
+ else:
493
+ chain = MessageChain().chain
494
+ event.set_result(
495
+ MessageEventResult(
496
+ chain=chain,
497
+ result_content_type=ResultContentType.STREAMING_FINISH,
498
+ ),
499
+ )
500
+ else:
501
+ async for _ in run_agent(
502
+ agent_runner,
503
+ self.max_step,
504
+ self.show_tool_use,
505
+ stream_to_general,
506
+ show_reasoning=self.show_reasoning,
507
+ ):
508
+ yield
509
+
510
+ # 恢复备份的 contexts
511
+ req.contexts = backup_contexts
512
+
513
+ await self._save_to_history(
514
+ event,
515
+ req,
516
+ agent_runner.get_final_llm_resp(),
517
+ agent_runner.run_context.messages,
518
+ )
512
519
 
513
- # 恢复备份的 contexts
514
- req.contexts = backup_contexts
515
-
516
- await self._save_to_history(event, req, agent_runner.get_final_llm_resp())
520
+ # 异步处理 WebChat 特殊情况
521
+ if event.get_platform_name() == "webchat":
522
+ asyncio.create_task(self._handle_webchat(event, req, provider))
517
523
 
518
- # 异步处理 WebChat 特殊情况
519
- if event.get_platform_name() == "webchat":
520
- asyncio.create_task(self._handle_webchat(event, req, provider))
524
+ asyncio.create_task(
525
+ Metric.upload(
526
+ llm_tick=1,
527
+ model_name=agent_runner.provider.get_model(),
528
+ provider_type=agent_runner.provider.meta().type,
529
+ ),
530
+ )
521
531
 
522
- asyncio.create_task(
523
- Metric.upload(
524
- llm_tick=1,
525
- model_name=agent_runner.provider.get_model(),
526
- provider_type=agent_runner.provider.meta().type,
527
- ),
528
- )
532
+ except Exception as e:
533
+ logger.error(f"Error occurred while processing agent: {e}")
534
+ await event.send(
535
+ MessageChain().message(
536
+ f"Error occurred while processing agent request: {e}"
537
+ )
538
+ )
@@ -98,6 +98,9 @@ class ResultDecorateStage(Stage):
98
98
  self.content_safe_check_stage = stage_cls()
99
99
  await self.content_safe_check_stage.initialize(ctx)
100
100
 
101
+ provider_cfg = ctx.astrbot_config.get("provider_settings", {})
102
+ self.show_reasoning = provider_cfg.get("display_reasoning_text", False)
103
+
101
104
  def _split_text_by_words(self, text: str) -> list[str]:
102
105
  """使用分段词列表分段文本"""
103
106
  if not self.split_words_pattern:
@@ -254,70 +257,75 @@ class ResultDecorateStage(Stage):
254
257
  event.unified_msg_origin,
255
258
  )
256
259
 
257
- if (
258
- self.ctx.astrbot_config["provider_tts_settings"]["enable"]
260
+ should_tts = (
261
+ bool(self.ctx.astrbot_config["provider_tts_settings"]["enable"])
259
262
  and result.is_llm_result()
260
263
  and SessionServiceManager.should_process_tts_request(event)
261
- ):
262
- should_tts = self.tts_trigger_probability >= 1.0 or (
263
- self.tts_trigger_probability > 0.0
264
- and random.random() <= self.tts_trigger_probability
264
+ and random.random() <= self.tts_trigger_probability
265
+ and tts_provider
266
+ )
267
+ if should_tts and not tts_provider:
268
+ logger.warning(
269
+ f"会话 {event.unified_msg_origin} 未配置文本转语音模型。",
265
270
  )
266
271
 
267
- if not should_tts:
268
- logger.debug("跳过 TTS:触发概率未命中。")
269
- elif not tts_provider:
270
- logger.warning(
271
- f"会话 {event.unified_msg_origin} 未配置文本转语音模型。",
272
- )
273
- else:
274
- new_chain = []
275
- for comp in result.chain:
276
- if isinstance(comp, Plain) and len(comp.text) > 1:
277
- try:
278
- logger.info(f"TTS 请求: {comp.text}")
279
- audio_path = await tts_provider.get_audio(comp.text)
280
- logger.info(f"TTS 结果: {audio_path}")
281
- if not audio_path:
282
- logger.error(
283
- f"由于 TTS 音频文件未找到,消息段转语音失败: {comp.text}",
284
- )
285
- new_chain.append(comp)
286
- continue
287
-
288
- use_file_service = self.ctx.astrbot_config[
289
- "provider_tts_settings"
290
- ]["use_file_service"]
291
- callback_api_base = self.ctx.astrbot_config[
292
- "callback_api_base"
293
- ]
294
- dual_output = self.ctx.astrbot_config[
295
- "provider_tts_settings"
296
- ]["dual_output"]
297
-
298
- url = None
299
- if use_file_service and callback_api_base:
300
- token = await file_token_service.register_file(
301
- audio_path,
302
- )
303
- url = f"{callback_api_base}/api/file/{token}"
304
- logger.debug(f"已注册:{url}")
305
-
306
- new_chain.append(
307
- Record(
308
- file=url or audio_path,
309
- url=url or audio_path,
310
- ),
272
+ if (
273
+ not should_tts
274
+ and self.show_reasoning
275
+ and event.get_extra("_llm_reasoning_content")
276
+ ):
277
+ # inject reasoning content to chain
278
+ reasoning_content = event.get_extra("_llm_reasoning_content")
279
+ result.chain.insert(0, Plain(f"🤔 思考: {reasoning_content}\n"))
280
+
281
+ if should_tts and tts_provider:
282
+ new_chain = []
283
+ for comp in result.chain:
284
+ if isinstance(comp, Plain) and len(comp.text) > 1:
285
+ try:
286
+ logger.info(f"TTS 请求: {comp.text}")
287
+ audio_path = await tts_provider.get_audio(comp.text)
288
+ logger.info(f"TTS 结果: {audio_path}")
289
+ if not audio_path:
290
+ logger.error(
291
+ f"由于 TTS 音频文件未找到,消息段转语音失败: {comp.text}",
311
292
  )
312
- if dual_output:
313
- new_chain.append(comp)
314
- except Exception:
315
- logger.error(traceback.format_exc())
316
- logger.error("TTS 失败,使用文本发送。")
317
293
  new_chain.append(comp)
318
- else:
294
+ continue
295
+
296
+ use_file_service = self.ctx.astrbot_config[
297
+ "provider_tts_settings"
298
+ ]["use_file_service"]
299
+ callback_api_base = self.ctx.astrbot_config[
300
+ "callback_api_base"
301
+ ]
302
+ dual_output = self.ctx.astrbot_config[
303
+ "provider_tts_settings"
304
+ ]["dual_output"]
305
+
306
+ url = None
307
+ if use_file_service and callback_api_base:
308
+ token = await file_token_service.register_file(
309
+ audio_path,
310
+ )
311
+ url = f"{callback_api_base}/api/file/{token}"
312
+ logger.debug(f"已注册:{url}")
313
+
314
+ new_chain.append(
315
+ Record(
316
+ file=url or audio_path,
317
+ url=url or audio_path,
318
+ ),
319
+ )
320
+ if dual_output:
321
+ new_chain.append(comp)
322
+ except Exception:
323
+ logger.error(traceback.format_exc())
324
+ logger.error("TTS 失败,使用文本发送。")
319
325
  new_chain.append(comp)
320
- result.chain = new_chain
326
+ else:
327
+ new_chain.append(comp)
328
+ result.chain = new_chain
321
329
 
322
330
  # 文本转图片
323
331
  elif (
@@ -1,9 +1,10 @@
1
- from collections.abc import AsyncGenerator
1
+ from collections.abc import AsyncGenerator, Callable
2
2
 
3
3
  from astrbot import logger
4
4
  from astrbot.core.message.components import At, AtAll, Reply
5
5
  from astrbot.core.message.message_event_result import MessageChain, MessageEventResult
6
6
  from astrbot.core.platform.astr_message_event import AstrMessageEvent
7
+ from astrbot.core.platform.message_type import MessageType
7
8
  from astrbot.core.star.filter.command_group import CommandGroupFilter
8
9
  from astrbot.core.star.filter.permission import PermissionTypeFilter
9
10
  from astrbot.core.star.session_plugin_manager import SessionPluginManager
@@ -13,6 +14,23 @@ from astrbot.core.star.star_handler import EventType, star_handlers_registry
13
14
  from ..context import PipelineContext
14
15
  from ..stage import Stage, register_stage
15
16
 
17
+ UNIQUE_SESSION_ID_BUILDERS: dict[str, Callable[[AstrMessageEvent], str | None]] = {
18
+ "aiocqhttp": lambda e: f"{e.get_sender_id()}_{e.get_group_id()}",
19
+ "slack": lambda e: f"{e.get_sender_id()}_{e.get_group_id()}",
20
+ "dingtalk": lambda e: e.get_sender_id(),
21
+ "qq_official": lambda e: e.get_sender_id(),
22
+ "qq_official_webhook": lambda e: e.get_sender_id(),
23
+ "lark": lambda e: f"{e.get_sender_id()}%{e.get_group_id()}",
24
+ "misskey": lambda e: f"{e.get_session_id()}_{e.get_sender_id()}",
25
+ "wechatpadpro": lambda e: f"{e.get_group_id()}#{e.get_sender_id()}",
26
+ }
27
+
28
+
29
+ def build_unique_session_id(event: AstrMessageEvent) -> str | None:
30
+ platform = event.get_platform_name()
31
+ builder = UNIQUE_SESSION_ID_BUILDERS.get(platform)
32
+ return builder(event) if builder else None
33
+
16
34
 
17
35
  @register_stage
18
36
  class WakingCheckStage(Stage):
@@ -53,18 +71,27 @@ class WakingCheckStage(Stage):
53
71
  self.disable_builtin_commands = self.ctx.astrbot_config.get(
54
72
  "disable_builtin_commands", False
55
73
  )
74
+ platform_settings = self.ctx.astrbot_config.get("platform_settings", {})
75
+ self.unique_session = platform_settings.get("unique_session", False)
56
76
 
57
77
  async def process(
58
78
  self,
59
79
  event: AstrMessageEvent,
60
80
  ) -> None | AsyncGenerator[None, None]:
81
+ # apply unique session
82
+ if self.unique_session and event.message_obj.type == MessageType.GROUP_MESSAGE:
83
+ sid = build_unique_session_id(event)
84
+ if sid:
85
+ event.session_id = sid
86
+
87
+ # ignore bot self message
61
88
  if (
62
89
  self.ignore_bot_self_message
63
90
  and event.get_self_id() == event.get_sender_id()
64
91
  ):
65
- # 忽略机器人自己发送的消息
66
92
  event.stop_event()
67
93
  return
94
+
68
95
  # 设置 sender 身份
69
96
  event.message_str = event.message_str.strip()
70
97
  for admin_id in self.ctx.astrbot_config["admins_id"]: