AstrBot 4.9.2__py3-none-any.whl → 4.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. astrbot/cli/__init__.py +1 -1
  2. astrbot/core/agent/message.py +6 -4
  3. astrbot/core/agent/response.py +22 -1
  4. astrbot/core/agent/run_context.py +1 -1
  5. astrbot/core/agent/runners/tool_loop_agent_runner.py +99 -20
  6. astrbot/core/astr_agent_context.py +3 -1
  7. astrbot/core/astr_agent_run_util.py +42 -3
  8. astrbot/core/astr_agent_tool_exec.py +34 -4
  9. astrbot/core/config/default.py +127 -184
  10. astrbot/core/core_lifecycle.py +3 -0
  11. astrbot/core/db/__init__.py +72 -0
  12. astrbot/core/db/po.py +59 -0
  13. astrbot/core/db/sqlite.py +240 -0
  14. astrbot/core/message/components.py +4 -5
  15. astrbot/core/pipeline/process_stage/method/agent_sub_stages/internal.py +6 -1
  16. astrbot/core/pipeline/respond/stage.py +1 -1
  17. astrbot/core/platform/sources/telegram/tg_event.py +9 -0
  18. astrbot/core/platform/sources/webchat/webchat_event.py +22 -18
  19. astrbot/core/provider/entities.py +41 -0
  20. astrbot/core/provider/manager.py +203 -93
  21. astrbot/core/provider/sources/anthropic_source.py +55 -11
  22. astrbot/core/provider/sources/gemini_source.py +84 -33
  23. astrbot/core/provider/sources/openai_source.py +21 -6
  24. astrbot/core/star/command_management.py +449 -0
  25. astrbot/core/star/context.py +4 -0
  26. astrbot/core/star/filter/command.py +1 -0
  27. astrbot/core/star/filter/command_group.py +1 -0
  28. astrbot/core/star/star_handler.py +4 -0
  29. astrbot/core/star/star_manager.py +2 -0
  30. astrbot/core/utils/llm_metadata.py +63 -0
  31. astrbot/core/utils/migra_helper.py +93 -0
  32. astrbot/dashboard/routes/__init__.py +2 -0
  33. astrbot/dashboard/routes/chat.py +56 -13
  34. astrbot/dashboard/routes/command.py +82 -0
  35. astrbot/dashboard/routes/config.py +291 -33
  36. astrbot/dashboard/routes/stat.py +96 -0
  37. astrbot/dashboard/routes/tools.py +20 -4
  38. astrbot/dashboard/server.py +1 -0
  39. {astrbot-4.9.2.dist-info → astrbot-4.10.0.dist-info}/METADATA +2 -2
  40. {astrbot-4.9.2.dist-info → astrbot-4.10.0.dist-info}/RECORD +43 -40
  41. {astrbot-4.9.2.dist-info → astrbot-4.10.0.dist-info}/WHEEL +0 -0
  42. {astrbot-4.9.2.dist-info → astrbot-4.10.0.dist-info}/entry_points.txt +0 -0
  43. {astrbot-4.9.2.dist-info → astrbot-4.10.0.dist-info}/licenses/LICENSE +0 -0
@@ -14,7 +14,7 @@ import astrbot.core.message.components as Comp
14
14
  from astrbot import logger
15
15
  from astrbot.api.provider import Provider
16
16
  from astrbot.core.message.message_event_result import MessageChain
17
- from astrbot.core.provider.entities import LLMResponse
17
+ from astrbot.core.provider.entities import LLMResponse, TokenUsage
18
18
  from astrbot.core.provider.func_tool_manager import ToolSet
19
19
  from astrbot.core.utils.io import download_image_by_url
20
20
 
@@ -68,7 +68,7 @@ class ProviderGoogleGenAI(Provider):
68
68
  self.api_base = self.api_base[:-1]
69
69
 
70
70
  self._init_client()
71
- self.set_model(provider_config["model_config"]["model"])
71
+ self.set_model(provider_config.get("model", "unknown"))
72
72
  self._init_safety_settings()
73
73
 
74
74
  def _init_client(self) -> None:
@@ -138,7 +138,7 @@ class ProviderGoogleGenAI(Provider):
138
138
  modalities = ["TEXT"]
139
139
 
140
140
  tool_list: list[types.Tool] | None = []
141
- model_name = self.get_model()
141
+ model_name = cast(str, payloads.get("model", self.get_model()))
142
142
  native_coderunner = self.provider_config.get("gm_native_coderunner", False)
143
143
  native_search = self.provider_config.get("gm_native_search", False)
144
144
  url_context = self.provider_config.get("gm_url_context", False)
@@ -197,6 +197,53 @@ class ProviderGoogleGenAI(Provider):
197
197
  types.Tool(function_declarations=func_desc["function_declarations"]),
198
198
  ]
199
199
 
200
+ # oper thinking config
201
+ thinking_config = None
202
+ if model_name in [
203
+ "gemini-2.5-pro",
204
+ "gemini-2.5-pro-preview",
205
+ "gemini-2.5-flash",
206
+ "gemini-2.5-flash-preview",
207
+ "gemini-2.5-flash-lite",
208
+ "gemini-2.5-flash-lite-preview",
209
+ "gemini-robotics-er-1.5-preview",
210
+ "gemini-live-2.5-flash-preview-native-audio-09-2025",
211
+ ]:
212
+ # The thinkingBudget parameter, introduced with the Gemini 2.5 series
213
+ thinking_budget = self.provider_config.get("gm_thinking_config", {}).get(
214
+ "budget", 0
215
+ )
216
+ if thinking_budget is not None:
217
+ thinking_config = types.ThinkingConfig(
218
+ thinking_budget=thinking_budget,
219
+ )
220
+ elif model_name in [
221
+ "gemini-3-pro",
222
+ "gemini-3-pro-preview",
223
+ "gemini-3-flash",
224
+ "gemini-3-flash-preview",
225
+ "gemini-3-flash-lite",
226
+ "gemini-3-flash-lite-preview",
227
+ ]:
228
+ # The thinkingLevel parameter, recommended for Gemini 3 models and onwards
229
+ # Gemini 2.5 series models don't support thinkingLevel; use thinkingBudget instead.
230
+ thinking_level = self.provider_config.get("gm_thinking_config", {}).get(
231
+ "level", "HIGH"
232
+ )
233
+ if thinking_level and isinstance(thinking_level, str):
234
+ thinking_level = thinking_level.upper()
235
+ if thinking_level not in ["MINIMAL", "LOW", "MEDIUM", "HIGH"]:
236
+ logger.warning(
237
+ f"Invalid thinking level: {thinking_level}, using HIGH"
238
+ )
239
+ thinking_level = "HIGH"
240
+ level = types.ThinkingLevel(thinking_level)
241
+ thinking_config = types.ThinkingConfig()
242
+ if not hasattr(types.ThinkingConfig, "thinking_level"):
243
+ setattr(types.ThinkingConfig, "thinking_level", level)
244
+ else:
245
+ thinking_config.thinking_level = level
246
+
200
247
  return types.GenerateContentConfig(
201
248
  system_instruction=system_instruction,
202
249
  temperature=temperature,
@@ -216,22 +263,7 @@ class ProviderGoogleGenAI(Provider):
216
263
  response_modalities=modalities,
217
264
  tools=cast(types.ToolListUnion | None, tool_list),
218
265
  safety_settings=self.safety_settings if self.safety_settings else None,
219
- thinking_config=(
220
- types.ThinkingConfig(
221
- thinking_budget=min(
222
- int(
223
- self.provider_config.get("gm_thinking_config", {}).get(
224
- "budget",
225
- 0,
226
- ),
227
- ),
228
- 24576,
229
- ),
230
- )
231
- if "gemini-2.5-flash" in self.get_model()
232
- and hasattr(types.ThinkingConfig, "thinking_budget")
233
- else None
234
- ),
266
+ thinking_config=thinking_config,
235
267
  automatic_function_calling=types.AutomaticFunctionCallingConfig(
236
268
  disable=True,
237
269
  ),
@@ -347,6 +379,16 @@ class ProviderGoogleGenAI(Provider):
347
379
  ]
348
380
  return "".join(thought_buf).strip()
349
381
 
382
+ def _extract_usage(
383
+ self, usage_metadata: types.GenerateContentResponseUsageMetadata
384
+ ) -> TokenUsage:
385
+ """Extract usage from candidate"""
386
+ return TokenUsage(
387
+ input_other=usage_metadata.prompt_token_count or 0,
388
+ input_cached=usage_metadata.cached_content_token_count or 0,
389
+ output=usage_metadata.candidates_token_count or 0,
390
+ )
391
+
350
392
  def _process_content_parts(
351
393
  self,
352
394
  candidate: types.Candidate,
@@ -431,6 +473,8 @@ class ProviderGoogleGenAI(Provider):
431
473
  None,
432
474
  )
433
475
 
476
+ model = payloads.get("model", self.get_model())
477
+
434
478
  modalities = ["TEXT"]
435
479
  if self.provider_config.get("gm_resp_image_modal", False):
436
480
  modalities.append("IMAGE")
@@ -449,7 +493,7 @@ class ProviderGoogleGenAI(Provider):
449
493
  temperature,
450
494
  )
451
495
  result = await self.client.models.generate_content(
452
- model=self.get_model(),
496
+ model=model,
453
497
  contents=cast(types.ContentListUnion, conversation),
454
498
  config=config,
455
499
  )
@@ -475,11 +519,11 @@ class ProviderGoogleGenAI(Provider):
475
519
  e.message = ""
476
520
  if "Developer instruction is not enabled" in e.message:
477
521
  logger.warning(
478
- f"{self.get_model()} 不支持 system prompt,已自动去除(影响人格设置)",
522
+ f"{model} 不支持 system prompt,已自动去除(影响人格设置)",
479
523
  )
480
524
  system_instruction = None
481
525
  elif "Function calling is not enabled" in e.message:
482
- logger.warning(f"{self.get_model()} 不支持函数调用,已自动去除")
526
+ logger.warning(f"{model} 不支持函数调用,已自动去除")
483
527
  tools = None
484
528
  elif (
485
529
  "Multi-modal output is not supported" in e.message
@@ -488,7 +532,7 @@ class ProviderGoogleGenAI(Provider):
488
532
  or "only supports text output" in e.message
489
533
  ):
490
534
  logger.warning(
491
- f"{self.get_model()} 不支持多模态输出,降级为文本模态",
535
+ f"{model} 不支持多模态输出,降级为文本模态",
492
536
  )
493
537
  modalities = ["TEXT"]
494
538
  else:
@@ -501,6 +545,9 @@ class ProviderGoogleGenAI(Provider):
501
545
  result.candidates[0],
502
546
  llm_response,
503
547
  )
548
+ llm_response.id = result.response_id
549
+ if result.usage_metadata:
550
+ llm_response.usage = self._extract_usage(result.usage_metadata)
504
551
  return llm_response
505
552
 
506
553
  async def _query_stream(
@@ -513,7 +560,7 @@ class ProviderGoogleGenAI(Provider):
513
560
  (msg["content"] for msg in payloads["messages"] if msg["role"] == "system"),
514
561
  None,
515
562
  )
516
-
563
+ model = payloads.get("model", self.get_model())
517
564
  conversation = self._prepare_conversation(payloads)
518
565
 
519
566
  result = None
@@ -525,7 +572,7 @@ class ProviderGoogleGenAI(Provider):
525
572
  system_instruction,
526
573
  )
527
574
  result = await self.client.models.generate_content_stream(
528
- model=self.get_model(),
575
+ model=model,
529
576
  contents=cast(types.ContentListUnion, conversation),
530
577
  config=config,
531
578
  )
@@ -535,11 +582,11 @@ class ProviderGoogleGenAI(Provider):
535
582
  e.message = ""
536
583
  if "Developer instruction is not enabled" in e.message:
537
584
  logger.warning(
538
- f"{self.get_model()} 不支持 system prompt,已自动去除(影响人格设置)",
585
+ f"{model} 不支持 system prompt,已自动去除(影响人格设置)",
539
586
  )
540
587
  system_instruction = None
541
588
  elif "Function calling is not enabled" in e.message:
542
- logger.warning(f"{self.get_model()} 不支持函数调用,已自动去除")
589
+ logger.warning(f"{model} 不支持函数调用,已自动去除")
543
590
  tools = None
544
591
  else:
545
592
  raise
@@ -569,6 +616,9 @@ class ProviderGoogleGenAI(Provider):
569
616
  chunk.candidates[0],
570
617
  llm_response,
571
618
  )
619
+ llm_response.id = chunk.response_id
620
+ if chunk.usage_metadata:
621
+ llm_response.usage = self._extract_usage(chunk.usage_metadata)
572
622
  yield llm_response
573
623
  return
574
624
 
@@ -596,6 +646,9 @@ class ProviderGoogleGenAI(Provider):
596
646
  chunk.candidates[0],
597
647
  final_response,
598
648
  )
649
+ final_response.id = chunk.response_id
650
+ if chunk.usage_metadata:
651
+ final_response.usage = self._extract_usage(chunk.usage_metadata)
599
652
  break
600
653
 
601
654
  # Yield final complete response with accumulated text
@@ -652,10 +705,9 @@ class ProviderGoogleGenAI(Provider):
652
705
  for tcr in tool_calls_result:
653
706
  context_query.extend(tcr.to_openai_messages())
654
707
 
655
- model_config = self.provider_config.get("model_config", {})
656
- model_config["model"] = model or self.get_model()
708
+ model = model or self.get_model()
657
709
 
658
- payloads = {"messages": context_query, **model_config}
710
+ payloads = {"messages": context_query, "model": model}
659
711
 
660
712
  retry = 10
661
713
  keys = self.api_keys.copy()
@@ -705,10 +757,9 @@ class ProviderGoogleGenAI(Provider):
705
757
  for tcr in tool_calls_result:
706
758
  context_query.extend(tcr.to_openai_messages())
707
759
 
708
- model_config = self.provider_config.get("model_config", {})
709
- model_config["model"] = model or self.get_model()
760
+ model = model or self.get_model()
710
761
 
711
- payloads = {"messages": context_query, **model_config}
762
+ payloads = {"messages": context_query, "model": model}
712
763
 
713
764
  retry = 10
714
765
  keys = self.api_keys.copy()
@@ -12,6 +12,7 @@ from openai._exceptions import NotFoundError
12
12
  from openai.lib.streaming.chat._completions import ChatCompletionStreamState
13
13
  from openai.types.chat.chat_completion import ChatCompletion
14
14
  from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
15
+ from openai.types.completion_usage import CompletionUsage
15
16
 
16
17
  import astrbot.core.message.components as Comp
17
18
  from astrbot import logger
@@ -19,7 +20,7 @@ from astrbot.api.provider import Provider
19
20
  from astrbot.core.agent.message import Message
20
21
  from astrbot.core.agent.tool import ToolSet
21
22
  from astrbot.core.message.message_event_result import MessageChain
22
- from astrbot.core.provider.entities import LLMResponse, ToolCallsResult
23
+ from astrbot.core.provider.entities import LLMResponse, TokenUsage, ToolCallsResult
23
24
  from astrbot.core.utils.io import download_image_by_url
24
25
 
25
26
  from ..register import register_provider_adapter
@@ -68,8 +69,7 @@ class ProviderOpenAIOfficial(Provider):
68
69
  self.client.chat.completions.create,
69
70
  ).parameters.keys()
70
71
 
71
- model_config = provider_config.get("model_config", {})
72
- model = model_config.get("model", "unknown")
72
+ model = provider_config.get("model", "unknown")
73
73
  self.set_model(model)
74
74
 
75
75
  self.reasoning_key = "reasoning_content"
@@ -208,6 +208,7 @@ class ProviderOpenAIOfficial(Provider):
208
208
  # handle the content delta
209
209
  reasoning = self._extract_reasoning_content(chunk)
210
210
  _y = False
211
+ llm_response.id = chunk.id
211
212
  if reasoning:
212
213
  llm_response.reasoning_content = reasoning
213
214
  _y = True
@@ -217,6 +218,8 @@ class ProviderOpenAIOfficial(Provider):
217
218
  chain=[Comp.Plain(completion_text)],
218
219
  )
219
220
  _y = True
221
+ if chunk.usage:
222
+ llm_response.usage = self._extract_usage(chunk.usage)
220
223
  if _y:
221
224
  yield llm_response
222
225
 
@@ -245,6 +248,15 @@ class ProviderOpenAIOfficial(Provider):
245
248
  reasoning_text = str(reasoning_attr)
246
249
  return reasoning_text
247
250
 
251
+ def _extract_usage(self, usage: CompletionUsage) -> TokenUsage:
252
+ ptd = usage.prompt_tokens_details
253
+ cached = ptd.cached_tokens if ptd and ptd.cached_tokens else 0
254
+ return TokenUsage(
255
+ input_other=usage.prompt_tokens - cached,
256
+ input_cached=ptd.cached_tokens if ptd and ptd.cached_tokens else 0,
257
+ output=usage.completion_tokens,
258
+ )
259
+
248
260
  async def _parse_openai_completion(
249
261
  self, completion: ChatCompletion, tools: ToolSet | None
250
262
  ) -> LLMResponse:
@@ -321,6 +333,10 @@ class ProviderOpenAIOfficial(Provider):
321
333
  raise Exception(f"API 返回的 completion 无法解析:{completion}。")
322
334
 
323
335
  llm_response.raw_completion = completion
336
+ llm_response.id = completion.id
337
+
338
+ if completion.usage:
339
+ llm_response.usage = self._extract_usage(completion.usage)
324
340
 
325
341
  return llm_response
326
342
 
@@ -358,10 +374,9 @@ class ProviderOpenAIOfficial(Provider):
358
374
  for tcr in tool_calls_result:
359
375
  context_query.extend(tcr.to_openai_messages())
360
376
 
361
- model_config = self.provider_config.get("model_config", {})
362
- model_config["model"] = model or self.get_model()
377
+ model = model or self.get_model()
363
378
 
364
- payloads = {"messages": context_query, **model_config}
379
+ payloads = {"messages": context_query, "model": model}
365
380
 
366
381
  # xAI origin search tool inject
367
382
  self._maybe_inject_xai_search(payloads, **kwargs)