AstrBot 4.9.2__py3-none-any.whl → 4.10.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astrbot/cli/__init__.py +1 -1
- astrbot/core/agent/message.py +6 -4
- astrbot/core/agent/response.py +22 -1
- astrbot/core/agent/run_context.py +1 -1
- astrbot/core/agent/runners/tool_loop_agent_runner.py +54 -15
- astrbot/core/astr_agent_context.py +3 -1
- astrbot/core/astr_agent_run_util.py +23 -2
- astrbot/core/config/default.py +127 -184
- astrbot/core/core_lifecycle.py +3 -0
- astrbot/core/db/__init__.py +72 -0
- astrbot/core/db/po.py +59 -0
- astrbot/core/db/sqlite.py +240 -0
- astrbot/core/message/components.py +4 -5
- astrbot/core/pipeline/respond/stage.py +1 -1
- astrbot/core/platform/sources/telegram/tg_event.py +9 -0
- astrbot/core/platform/sources/webchat/webchat_event.py +22 -18
- astrbot/core/provider/entities.py +41 -0
- astrbot/core/provider/manager.py +203 -93
- astrbot/core/provider/sources/anthropic_source.py +55 -11
- astrbot/core/provider/sources/gemini_source.py +68 -33
- astrbot/core/provider/sources/openai_source.py +21 -6
- astrbot/core/star/command_management.py +449 -0
- astrbot/core/star/context.py +4 -0
- astrbot/core/star/filter/command.py +1 -0
- astrbot/core/star/filter/command_group.py +1 -0
- astrbot/core/star/star_handler.py +4 -0
- astrbot/core/star/star_manager.py +2 -0
- astrbot/core/utils/llm_metadata.py +63 -0
- astrbot/core/utils/migra_helper.py +93 -0
- astrbot/dashboard/routes/__init__.py +2 -0
- astrbot/dashboard/routes/chat.py +56 -13
- astrbot/dashboard/routes/command.py +82 -0
- astrbot/dashboard/routes/config.py +291 -33
- astrbot/dashboard/routes/stat.py +96 -0
- astrbot/dashboard/routes/tools.py +20 -4
- astrbot/dashboard/server.py +1 -0
- {astrbot-4.9.2.dist-info → astrbot-4.10.0a1.dist-info}/METADATA +2 -2
- {astrbot-4.9.2.dist-info → astrbot-4.10.0a1.dist-info}/RECORD +41 -38
- {astrbot-4.9.2.dist-info → astrbot-4.10.0a1.dist-info}/WHEEL +0 -0
- {astrbot-4.9.2.dist-info → astrbot-4.10.0a1.dist-info}/entry_points.txt +0 -0
- {astrbot-4.9.2.dist-info → astrbot-4.10.0a1.dist-info}/licenses/LICENSE +0 -0
|
@@ -14,7 +14,7 @@ import astrbot.core.message.components as Comp
|
|
|
14
14
|
from astrbot import logger
|
|
15
15
|
from astrbot.api.provider import Provider
|
|
16
16
|
from astrbot.core.message.message_event_result import MessageChain
|
|
17
|
-
from astrbot.core.provider.entities import LLMResponse
|
|
17
|
+
from astrbot.core.provider.entities import LLMResponse, TokenUsage
|
|
18
18
|
from astrbot.core.provider.func_tool_manager import ToolSet
|
|
19
19
|
from astrbot.core.utils.io import download_image_by_url
|
|
20
20
|
|
|
@@ -68,7 +68,7 @@ class ProviderGoogleGenAI(Provider):
|
|
|
68
68
|
self.api_base = self.api_base[:-1]
|
|
69
69
|
|
|
70
70
|
self._init_client()
|
|
71
|
-
self.set_model(provider_config
|
|
71
|
+
self.set_model(provider_config.get("model", "unknown"))
|
|
72
72
|
self._init_safety_settings()
|
|
73
73
|
|
|
74
74
|
def _init_client(self) -> None:
|
|
@@ -138,7 +138,7 @@ class ProviderGoogleGenAI(Provider):
|
|
|
138
138
|
modalities = ["TEXT"]
|
|
139
139
|
|
|
140
140
|
tool_list: list[types.Tool] | None = []
|
|
141
|
-
model_name = self.get_model()
|
|
141
|
+
model_name = payloads.get("model", self.get_model())
|
|
142
142
|
native_coderunner = self.provider_config.get("gm_native_coderunner", False)
|
|
143
143
|
native_search = self.provider_config.get("gm_native_search", False)
|
|
144
144
|
url_context = self.provider_config.get("gm_url_context", False)
|
|
@@ -197,6 +197,37 @@ class ProviderGoogleGenAI(Provider):
|
|
|
197
197
|
types.Tool(function_declarations=func_desc["function_declarations"]),
|
|
198
198
|
]
|
|
199
199
|
|
|
200
|
+
# oper thinking config
|
|
201
|
+
thinking_config = None
|
|
202
|
+
if model_name.startswith("gemini-2.5"):
|
|
203
|
+
# The thinkingBudget parameter, introduced with the Gemini 2.5 series
|
|
204
|
+
thinking_budget = self.provider_config.get("gm_thinking_config", {}).get(
|
|
205
|
+
"budget", 0
|
|
206
|
+
)
|
|
207
|
+
if thinking_budget is not None:
|
|
208
|
+
thinking_config = types.ThinkingConfig(
|
|
209
|
+
thinking_budget=thinking_budget,
|
|
210
|
+
)
|
|
211
|
+
elif model_name.startswith("gemini-3"):
|
|
212
|
+
# The thinkingLevel parameter, recommended for Gemini 3 models and onwards
|
|
213
|
+
# Gemini 2.5 series models don't support thinkingLevel; use thinkingBudget instead.
|
|
214
|
+
thinking_level = self.provider_config.get("gm_thinking_config", {}).get(
|
|
215
|
+
"level", "HIGH"
|
|
216
|
+
)
|
|
217
|
+
if thinking_level and isinstance(thinking_level, str):
|
|
218
|
+
thinking_level = thinking_level.upper()
|
|
219
|
+
if thinking_level not in ["MINIMAL", "LOW", "MEDIUM", "HIGH"]:
|
|
220
|
+
logger.warning(
|
|
221
|
+
f"Invalid thinking level: {thinking_level}, using HIGH"
|
|
222
|
+
)
|
|
223
|
+
thinking_level = "HIGH"
|
|
224
|
+
level = types.ThinkingLevel(thinking_level)
|
|
225
|
+
thinking_config = types.ThinkingConfig()
|
|
226
|
+
if not hasattr(types.ThinkingConfig, "thinking_level"):
|
|
227
|
+
setattr(types.ThinkingConfig, "thinking_level", level)
|
|
228
|
+
else:
|
|
229
|
+
thinking_config.thinking_level = level
|
|
230
|
+
|
|
200
231
|
return types.GenerateContentConfig(
|
|
201
232
|
system_instruction=system_instruction,
|
|
202
233
|
temperature=temperature,
|
|
@@ -216,22 +247,7 @@ class ProviderGoogleGenAI(Provider):
|
|
|
216
247
|
response_modalities=modalities,
|
|
217
248
|
tools=cast(types.ToolListUnion | None, tool_list),
|
|
218
249
|
safety_settings=self.safety_settings if self.safety_settings else None,
|
|
219
|
-
thinking_config=
|
|
220
|
-
types.ThinkingConfig(
|
|
221
|
-
thinking_budget=min(
|
|
222
|
-
int(
|
|
223
|
-
self.provider_config.get("gm_thinking_config", {}).get(
|
|
224
|
-
"budget",
|
|
225
|
-
0,
|
|
226
|
-
),
|
|
227
|
-
),
|
|
228
|
-
24576,
|
|
229
|
-
),
|
|
230
|
-
)
|
|
231
|
-
if "gemini-2.5-flash" in self.get_model()
|
|
232
|
-
and hasattr(types.ThinkingConfig, "thinking_budget")
|
|
233
|
-
else None
|
|
234
|
-
),
|
|
250
|
+
thinking_config=thinking_config,
|
|
235
251
|
automatic_function_calling=types.AutomaticFunctionCallingConfig(
|
|
236
252
|
disable=True,
|
|
237
253
|
),
|
|
@@ -347,6 +363,16 @@ class ProviderGoogleGenAI(Provider):
|
|
|
347
363
|
]
|
|
348
364
|
return "".join(thought_buf).strip()
|
|
349
365
|
|
|
366
|
+
def _extract_usage(
|
|
367
|
+
self, usage_metadata: types.GenerateContentResponseUsageMetadata
|
|
368
|
+
) -> TokenUsage:
|
|
369
|
+
"""Extract usage from candidate"""
|
|
370
|
+
return TokenUsage(
|
|
371
|
+
input_other=usage_metadata.prompt_token_count or 0,
|
|
372
|
+
input_cached=usage_metadata.cached_content_token_count or 0,
|
|
373
|
+
output=usage_metadata.candidates_token_count or 0,
|
|
374
|
+
)
|
|
375
|
+
|
|
350
376
|
def _process_content_parts(
|
|
351
377
|
self,
|
|
352
378
|
candidate: types.Candidate,
|
|
@@ -431,6 +457,8 @@ class ProviderGoogleGenAI(Provider):
|
|
|
431
457
|
None,
|
|
432
458
|
)
|
|
433
459
|
|
|
460
|
+
model = payloads.get("model", self.get_model())
|
|
461
|
+
|
|
434
462
|
modalities = ["TEXT"]
|
|
435
463
|
if self.provider_config.get("gm_resp_image_modal", False):
|
|
436
464
|
modalities.append("IMAGE")
|
|
@@ -449,7 +477,7 @@ class ProviderGoogleGenAI(Provider):
|
|
|
449
477
|
temperature,
|
|
450
478
|
)
|
|
451
479
|
result = await self.client.models.generate_content(
|
|
452
|
-
model=
|
|
480
|
+
model=model,
|
|
453
481
|
contents=cast(types.ContentListUnion, conversation),
|
|
454
482
|
config=config,
|
|
455
483
|
)
|
|
@@ -475,11 +503,11 @@ class ProviderGoogleGenAI(Provider):
|
|
|
475
503
|
e.message = ""
|
|
476
504
|
if "Developer instruction is not enabled" in e.message:
|
|
477
505
|
logger.warning(
|
|
478
|
-
f"{
|
|
506
|
+
f"{model} 不支持 system prompt,已自动去除(影响人格设置)",
|
|
479
507
|
)
|
|
480
508
|
system_instruction = None
|
|
481
509
|
elif "Function calling is not enabled" in e.message:
|
|
482
|
-
logger.warning(f"{
|
|
510
|
+
logger.warning(f"{model} 不支持函数调用,已自动去除")
|
|
483
511
|
tools = None
|
|
484
512
|
elif (
|
|
485
513
|
"Multi-modal output is not supported" in e.message
|
|
@@ -488,7 +516,7 @@ class ProviderGoogleGenAI(Provider):
|
|
|
488
516
|
or "only supports text output" in e.message
|
|
489
517
|
):
|
|
490
518
|
logger.warning(
|
|
491
|
-
f"{
|
|
519
|
+
f"{model} 不支持多模态输出,降级为文本模态",
|
|
492
520
|
)
|
|
493
521
|
modalities = ["TEXT"]
|
|
494
522
|
else:
|
|
@@ -501,6 +529,9 @@ class ProviderGoogleGenAI(Provider):
|
|
|
501
529
|
result.candidates[0],
|
|
502
530
|
llm_response,
|
|
503
531
|
)
|
|
532
|
+
llm_response.id = result.response_id
|
|
533
|
+
if result.usage_metadata:
|
|
534
|
+
llm_response.usage = self._extract_usage(result.usage_metadata)
|
|
504
535
|
return llm_response
|
|
505
536
|
|
|
506
537
|
async def _query_stream(
|
|
@@ -513,7 +544,7 @@ class ProviderGoogleGenAI(Provider):
|
|
|
513
544
|
(msg["content"] for msg in payloads["messages"] if msg["role"] == "system"),
|
|
514
545
|
None,
|
|
515
546
|
)
|
|
516
|
-
|
|
547
|
+
model = payloads.get("model", self.get_model())
|
|
517
548
|
conversation = self._prepare_conversation(payloads)
|
|
518
549
|
|
|
519
550
|
result = None
|
|
@@ -525,7 +556,7 @@ class ProviderGoogleGenAI(Provider):
|
|
|
525
556
|
system_instruction,
|
|
526
557
|
)
|
|
527
558
|
result = await self.client.models.generate_content_stream(
|
|
528
|
-
model=
|
|
559
|
+
model=model,
|
|
529
560
|
contents=cast(types.ContentListUnion, conversation),
|
|
530
561
|
config=config,
|
|
531
562
|
)
|
|
@@ -535,11 +566,11 @@ class ProviderGoogleGenAI(Provider):
|
|
|
535
566
|
e.message = ""
|
|
536
567
|
if "Developer instruction is not enabled" in e.message:
|
|
537
568
|
logger.warning(
|
|
538
|
-
f"{
|
|
569
|
+
f"{model} 不支持 system prompt,已自动去除(影响人格设置)",
|
|
539
570
|
)
|
|
540
571
|
system_instruction = None
|
|
541
572
|
elif "Function calling is not enabled" in e.message:
|
|
542
|
-
logger.warning(f"{
|
|
573
|
+
logger.warning(f"{model} 不支持函数调用,已自动去除")
|
|
543
574
|
tools = None
|
|
544
575
|
else:
|
|
545
576
|
raise
|
|
@@ -569,6 +600,9 @@ class ProviderGoogleGenAI(Provider):
|
|
|
569
600
|
chunk.candidates[0],
|
|
570
601
|
llm_response,
|
|
571
602
|
)
|
|
603
|
+
llm_response.id = chunk.response_id
|
|
604
|
+
if chunk.usage_metadata:
|
|
605
|
+
llm_response.usage = self._extract_usage(chunk.usage_metadata)
|
|
572
606
|
yield llm_response
|
|
573
607
|
return
|
|
574
608
|
|
|
@@ -596,6 +630,9 @@ class ProviderGoogleGenAI(Provider):
|
|
|
596
630
|
chunk.candidates[0],
|
|
597
631
|
final_response,
|
|
598
632
|
)
|
|
633
|
+
final_response.id = chunk.response_id
|
|
634
|
+
if chunk.usage_metadata:
|
|
635
|
+
final_response.usage = self._extract_usage(chunk.usage_metadata)
|
|
599
636
|
break
|
|
600
637
|
|
|
601
638
|
# Yield final complete response with accumulated text
|
|
@@ -652,10 +689,9 @@ class ProviderGoogleGenAI(Provider):
|
|
|
652
689
|
for tcr in tool_calls_result:
|
|
653
690
|
context_query.extend(tcr.to_openai_messages())
|
|
654
691
|
|
|
655
|
-
|
|
656
|
-
model_config["model"] = model or self.get_model()
|
|
692
|
+
model = model or self.get_model()
|
|
657
693
|
|
|
658
|
-
payloads = {"messages": context_query,
|
|
694
|
+
payloads = {"messages": context_query, "model": model}
|
|
659
695
|
|
|
660
696
|
retry = 10
|
|
661
697
|
keys = self.api_keys.copy()
|
|
@@ -705,10 +741,9 @@ class ProviderGoogleGenAI(Provider):
|
|
|
705
741
|
for tcr in tool_calls_result:
|
|
706
742
|
context_query.extend(tcr.to_openai_messages())
|
|
707
743
|
|
|
708
|
-
|
|
709
|
-
model_config["model"] = model or self.get_model()
|
|
744
|
+
model = model or self.get_model()
|
|
710
745
|
|
|
711
|
-
payloads = {"messages": context_query,
|
|
746
|
+
payloads = {"messages": context_query, "model": model}
|
|
712
747
|
|
|
713
748
|
retry = 10
|
|
714
749
|
keys = self.api_keys.copy()
|
|
@@ -12,6 +12,7 @@ from openai._exceptions import NotFoundError
|
|
|
12
12
|
from openai.lib.streaming.chat._completions import ChatCompletionStreamState
|
|
13
13
|
from openai.types.chat.chat_completion import ChatCompletion
|
|
14
14
|
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
|
|
15
|
+
from openai.types.completion_usage import CompletionUsage
|
|
15
16
|
|
|
16
17
|
import astrbot.core.message.components as Comp
|
|
17
18
|
from astrbot import logger
|
|
@@ -19,7 +20,7 @@ from astrbot.api.provider import Provider
|
|
|
19
20
|
from astrbot.core.agent.message import Message
|
|
20
21
|
from astrbot.core.agent.tool import ToolSet
|
|
21
22
|
from astrbot.core.message.message_event_result import MessageChain
|
|
22
|
-
from astrbot.core.provider.entities import LLMResponse, ToolCallsResult
|
|
23
|
+
from astrbot.core.provider.entities import LLMResponse, TokenUsage, ToolCallsResult
|
|
23
24
|
from astrbot.core.utils.io import download_image_by_url
|
|
24
25
|
|
|
25
26
|
from ..register import register_provider_adapter
|
|
@@ -68,8 +69,7 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
68
69
|
self.client.chat.completions.create,
|
|
69
70
|
).parameters.keys()
|
|
70
71
|
|
|
71
|
-
|
|
72
|
-
model = model_config.get("model", "unknown")
|
|
72
|
+
model = provider_config.get("model", "unknown")
|
|
73
73
|
self.set_model(model)
|
|
74
74
|
|
|
75
75
|
self.reasoning_key = "reasoning_content"
|
|
@@ -208,6 +208,7 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
208
208
|
# handle the content delta
|
|
209
209
|
reasoning = self._extract_reasoning_content(chunk)
|
|
210
210
|
_y = False
|
|
211
|
+
llm_response.id = chunk.id
|
|
211
212
|
if reasoning:
|
|
212
213
|
llm_response.reasoning_content = reasoning
|
|
213
214
|
_y = True
|
|
@@ -217,6 +218,8 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
217
218
|
chain=[Comp.Plain(completion_text)],
|
|
218
219
|
)
|
|
219
220
|
_y = True
|
|
221
|
+
if chunk.usage:
|
|
222
|
+
llm_response.usage = self._extract_usage(chunk.usage)
|
|
220
223
|
if _y:
|
|
221
224
|
yield llm_response
|
|
222
225
|
|
|
@@ -245,6 +248,15 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
245
248
|
reasoning_text = str(reasoning_attr)
|
|
246
249
|
return reasoning_text
|
|
247
250
|
|
|
251
|
+
def _extract_usage(self, usage: CompletionUsage) -> TokenUsage:
|
|
252
|
+
ptd = usage.prompt_tokens_details
|
|
253
|
+
cached = ptd.cached_tokens if ptd and ptd.cached_tokens else 0
|
|
254
|
+
return TokenUsage(
|
|
255
|
+
input_other=usage.prompt_tokens - cached,
|
|
256
|
+
input_cached=ptd.cached_tokens if ptd and ptd.cached_tokens else 0,
|
|
257
|
+
output=usage.completion_tokens,
|
|
258
|
+
)
|
|
259
|
+
|
|
248
260
|
async def _parse_openai_completion(
|
|
249
261
|
self, completion: ChatCompletion, tools: ToolSet | None
|
|
250
262
|
) -> LLMResponse:
|
|
@@ -321,6 +333,10 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
321
333
|
raise Exception(f"API 返回的 completion 无法解析:{completion}。")
|
|
322
334
|
|
|
323
335
|
llm_response.raw_completion = completion
|
|
336
|
+
llm_response.id = completion.id
|
|
337
|
+
|
|
338
|
+
if completion.usage:
|
|
339
|
+
llm_response.usage = self._extract_usage(completion.usage)
|
|
324
340
|
|
|
325
341
|
return llm_response
|
|
326
342
|
|
|
@@ -358,10 +374,9 @@ class ProviderOpenAIOfficial(Provider):
|
|
|
358
374
|
for tcr in tool_calls_result:
|
|
359
375
|
context_query.extend(tcr.to_openai_messages())
|
|
360
376
|
|
|
361
|
-
|
|
362
|
-
model_config["model"] = model or self.get_model()
|
|
377
|
+
model = model or self.get_model()
|
|
363
378
|
|
|
364
|
-
payloads = {"messages": context_query,
|
|
379
|
+
payloads = {"messages": context_query, "model": model}
|
|
365
380
|
|
|
366
381
|
# xAI origin search tool inject
|
|
367
382
|
self._maybe_inject_xai_search(payloads, **kwargs)
|