AstrBot 4.10.3__py3-none-any.whl → 4.10.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astrbot/builtin_stars/astrbot/main.py +2 -10
- astrbot/builtin_stars/python_interpreter/main.py +130 -131
- astrbot/cli/__init__.py +1 -1
- astrbot/core/agent/message.py +23 -1
- astrbot/core/agent/runners/tool_loop_agent_runner.py +24 -7
- astrbot/core/astr_agent_hooks.py +6 -0
- astrbot/core/backup/exporter.py +1 -0
- astrbot/core/config/astrbot_config.py +2 -0
- astrbot/core/config/default.py +47 -6
- astrbot/core/knowledge_base/chunking/recursive.py +10 -2
- astrbot/core/pipeline/process_stage/method/agent_sub_stages/internal.py +184 -174
- astrbot/core/pipeline/result_decorate/stage.py +65 -57
- astrbot/core/pipeline/waking_check/stage.py +29 -2
- astrbot/core/platform/sources/aiocqhttp/aiocqhttp_platform_adapter.py +15 -29
- astrbot/core/platform/sources/dingtalk/dingtalk_adapter.py +1 -6
- astrbot/core/platform/sources/dingtalk/dingtalk_event.py +15 -1
- astrbot/core/platform/sources/lark/lark_adapter.py +2 -10
- astrbot/core/platform/sources/misskey/misskey_adapter.py +0 -5
- astrbot/core/platform/sources/misskey/misskey_utils.py +0 -3
- astrbot/core/platform/sources/qqofficial/qqofficial_platform_adapter.py +4 -9
- astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_adapter.py +4 -9
- astrbot/core/platform/sources/satori/satori_adapter.py +6 -1
- astrbot/core/platform/sources/slack/slack_adapter.py +3 -6
- astrbot/core/platform/sources/webchat/webchat_adapter.py +0 -1
- astrbot/core/platform/sources/wechatpadpro/wechatpadpro_adapter.py +3 -5
- astrbot/core/provider/entities.py +9 -1
- astrbot/core/provider/sources/anthropic_source.py +60 -3
- astrbot/core/provider/sources/gemini_source.py +37 -3
- astrbot/core/provider/sources/minimax_tts_api_source.py +4 -1
- astrbot/core/provider/sources/openai_source.py +25 -31
- astrbot/core/provider/sources/xai_source.py +29 -0
- astrbot/core/provider/sources/xinference_stt_provider.py +24 -12
- astrbot/core/star/star_manager.py +41 -0
- astrbot/core/utils/pip_installer.py +20 -1
- astrbot/dashboard/routes/backup.py +519 -15
- astrbot/dashboard/routes/config.py +45 -0
- astrbot/dashboard/server.py +1 -0
- {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/METADATA +1 -1
- {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/RECORD +42 -41
- {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/WHEEL +0 -0
- {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/entry_points.txt +0 -0
- {astrbot-4.10.3.dist-info → astrbot-4.10.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -100,16 +100,8 @@ class Main(star.Star):
|
|
|
100
100
|
logger.error(f"ltm: {e}")
|
|
101
101
|
|
|
102
102
|
@filter.on_llm_response()
|
|
103
|
-
async def
|
|
104
|
-
"""在 LLM
|
|
105
|
-
umo = event.unified_msg_origin
|
|
106
|
-
cfg = self.context.get_config(umo).get("provider_settings", {})
|
|
107
|
-
show_reasoning = cfg.get("display_reasoning_text", False)
|
|
108
|
-
if show_reasoning and resp.reasoning_content:
|
|
109
|
-
resp.completion_text = (
|
|
110
|
-
f"🤔 思考: {resp.reasoning_content}\n\n{resp.completion_text}"
|
|
111
|
-
)
|
|
112
|
-
|
|
103
|
+
async def record_llm_resp_to_ltm(self, event: AstrMessageEvent, resp: LLMResponse):
|
|
104
|
+
"""在 LLM 响应后记录对话"""
|
|
113
105
|
if self.ltm and self.ltm_enabled(event):
|
|
114
106
|
try:
|
|
115
107
|
await self.ltm.after_req_llm(event, resp)
|
|
@@ -157,9 +157,8 @@ class Main(star.Star):
|
|
|
157
157
|
async def is_docker_available(self) -> bool:
|
|
158
158
|
"""Check if docker is available"""
|
|
159
159
|
try:
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
await docker.close()
|
|
160
|
+
async with aiodocker.Docker() as docker:
|
|
161
|
+
await docker.version()
|
|
163
162
|
return True
|
|
164
163
|
except BaseException as e:
|
|
165
164
|
logger.info(f"检查 Docker 可用性: {e}")
|
|
@@ -279,14 +278,14 @@ class Main(star.Star):
|
|
|
279
278
|
@pi.command("repull")
|
|
280
279
|
async def pi_repull(self, event: AstrMessageEvent):
|
|
281
280
|
"""重新拉取沙箱镜像"""
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
281
|
+
async with aiodocker.Docker() as docker:
|
|
282
|
+
image_name = await self.get_image_name()
|
|
283
|
+
try:
|
|
284
|
+
await docker.images.get(image_name)
|
|
285
|
+
await docker.images.delete(image_name, force=True)
|
|
286
|
+
except aiodocker.exceptions.DockerError:
|
|
287
|
+
pass
|
|
288
|
+
await docker.images.pull(image_name)
|
|
290
289
|
yield event.plain_result("重新拉取沙箱镜像成功。")
|
|
291
290
|
|
|
292
291
|
@pi.command("file")
|
|
@@ -371,137 +370,137 @@ class Main(star.Star):
|
|
|
371
370
|
obs = ""
|
|
372
371
|
n = 5
|
|
373
372
|
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
PROMPT_ = PROMPT.format(
|
|
379
|
-
prompt=plain_text,
|
|
380
|
-
extra_input=extra_inputs,
|
|
381
|
-
extra_prompt=obs,
|
|
382
|
-
)
|
|
383
|
-
provider = self.context.get_using_provider()
|
|
384
|
-
llm_response = await provider.text_chat(
|
|
385
|
-
prompt=PROMPT_,
|
|
386
|
-
session_id=f"{event.session_id}_{magic_code}_{i!s}",
|
|
387
|
-
)
|
|
388
|
-
|
|
389
|
-
logger.debug(
|
|
390
|
-
"code interpreter llm gened code:" + llm_response.completion_text,
|
|
391
|
-
)
|
|
392
|
-
|
|
393
|
-
# 整理代码并保存
|
|
394
|
-
code_clean = await self.tidy_code(llm_response.completion_text)
|
|
395
|
-
with open(os.path.join(workplace_path, "exec.py"), "w") as f:
|
|
396
|
-
f.write(code_clean)
|
|
397
|
-
|
|
398
|
-
# 启动容器
|
|
399
|
-
docker = aiodocker.Docker()
|
|
400
|
-
|
|
401
|
-
# 检查有没有image
|
|
402
|
-
image_name = await self.get_image_name()
|
|
403
|
-
try:
|
|
404
|
-
await docker.images.get(image_name)
|
|
405
|
-
except aiodocker.exceptions.DockerError:
|
|
406
|
-
# 拉取镜像
|
|
407
|
-
logger.info(f"未找到沙箱镜像,正在尝试拉取 {image_name}...")
|
|
408
|
-
await docker.images.pull(image_name)
|
|
373
|
+
async with aiodocker.Docker() as docker:
|
|
374
|
+
for i in range(n):
|
|
375
|
+
if i > 0:
|
|
376
|
+
logger.info(f"Try {i + 1}/{n}")
|
|
409
377
|
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
378
|
+
PROMPT_ = PROMPT.format(
|
|
379
|
+
prompt=plain_text,
|
|
380
|
+
extra_input=extra_inputs,
|
|
381
|
+
extra_prompt=obs,
|
|
382
|
+
)
|
|
383
|
+
provider = self.context.get_using_provider()
|
|
384
|
+
llm_response = await provider.text_chat(
|
|
385
|
+
prompt=PROMPT_,
|
|
386
|
+
session_id=f"{event.session_id}_{magic_code}_{i!s}",
|
|
387
|
+
)
|
|
413
388
|
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
"",
|
|
417
|
-
)
|
|
418
|
-
if self.docker_host_astrbot_abs_path:
|
|
419
|
-
host_shared = os.path.join(
|
|
420
|
-
self.docker_host_astrbot_abs_path,
|
|
421
|
-
self.shared_path,
|
|
389
|
+
logger.debug(
|
|
390
|
+
"code interpreter llm gened code:" + llm_response.completion_text,
|
|
422
391
|
)
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
392
|
+
|
|
393
|
+
# 整理代码并保存
|
|
394
|
+
code_clean = await self.tidy_code(llm_response.completion_text)
|
|
395
|
+
with open(os.path.join(workplace_path, "exec.py"), "w") as f:
|
|
396
|
+
f.write(code_clean)
|
|
397
|
+
|
|
398
|
+
# 检查有没有image
|
|
399
|
+
image_name = await self.get_image_name()
|
|
400
|
+
try:
|
|
401
|
+
await docker.images.get(image_name)
|
|
402
|
+
except aiodocker.exceptions.DockerError:
|
|
403
|
+
# 拉取镜像
|
|
404
|
+
logger.info(f"未找到沙箱镜像,正在尝试拉取 {image_name}...")
|
|
405
|
+
await docker.images.pull(image_name)
|
|
406
|
+
|
|
407
|
+
yield event.plain_result(
|
|
408
|
+
f"使用沙箱执行代码中,请稍等...(尝试次数: {i + 1}/{n})",
|
|
426
409
|
)
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
410
|
+
|
|
411
|
+
self.docker_host_astrbot_abs_path = self.config.get(
|
|
412
|
+
"docker_host_astrbot_abs_path",
|
|
413
|
+
"",
|
|
430
414
|
)
|
|
415
|
+
if self.docker_host_astrbot_abs_path:
|
|
416
|
+
host_shared = os.path.join(
|
|
417
|
+
self.docker_host_astrbot_abs_path,
|
|
418
|
+
self.shared_path,
|
|
419
|
+
)
|
|
420
|
+
host_output = os.path.join(
|
|
421
|
+
self.docker_host_astrbot_abs_path,
|
|
422
|
+
output_path,
|
|
423
|
+
)
|
|
424
|
+
host_workplace = os.path.join(
|
|
425
|
+
self.docker_host_astrbot_abs_path,
|
|
426
|
+
workplace_path,
|
|
427
|
+
)
|
|
431
428
|
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
429
|
+
else:
|
|
430
|
+
host_shared = os.path.abspath(self.shared_path)
|
|
431
|
+
host_output = os.path.abspath(output_path)
|
|
432
|
+
host_workplace = os.path.abspath(workplace_path)
|
|
436
433
|
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
434
|
+
logger.debug(
|
|
435
|
+
f"host_shared: {host_shared}, host_output: {host_output}, host_workplace: {host_workplace}",
|
|
436
|
+
)
|
|
440
437
|
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
438
|
+
container = await docker.containers.run(
|
|
439
|
+
{
|
|
440
|
+
"Image": image_name,
|
|
441
|
+
"Cmd": ["python", "exec.py"],
|
|
442
|
+
"Memory": 512 * 1024 * 1024,
|
|
443
|
+
"NanoCPUs": 1000000000,
|
|
444
|
+
"HostConfig": {
|
|
445
|
+
"Binds": [
|
|
446
|
+
f"{host_shared}:/astrbot_sandbox/shared:ro",
|
|
447
|
+
f"{host_output}:/astrbot_sandbox/output:rw",
|
|
448
|
+
f"{host_workplace}:/astrbot_sandbox:rw",
|
|
449
|
+
],
|
|
450
|
+
},
|
|
451
|
+
"Env": [f"MAGIC_CODE={magic_code}"],
|
|
452
|
+
"AutoRemove": True,
|
|
453
453
|
},
|
|
454
|
-
|
|
455
|
-
"AutoRemove": True,
|
|
456
|
-
},
|
|
457
|
-
)
|
|
454
|
+
)
|
|
458
455
|
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
456
|
+
logger.debug(f"Container {container.id} created.")
|
|
457
|
+
logs = await self.run_container(container)
|
|
458
|
+
|
|
459
|
+
logger.debug(f"Container {container.id} finished.")
|
|
460
|
+
logger.debug(f"Container {container.id} logs: {logs}")
|
|
461
|
+
|
|
462
|
+
# 发送结果
|
|
463
|
+
pattern = r"\[ASTRBOT_(TEXT|IMAGE|FILE)_OUTPUT#\w+\]: (.*)"
|
|
464
|
+
ok = False
|
|
465
|
+
traceback = ""
|
|
466
|
+
for idx, log in enumerate(logs):
|
|
467
|
+
match = re.match(pattern, log)
|
|
468
|
+
if match:
|
|
469
|
+
ok = True
|
|
470
|
+
if match.group(1) == "TEXT":
|
|
471
|
+
yield event.plain_result(match.group(2))
|
|
472
|
+
elif match.group(1) == "IMAGE":
|
|
473
|
+
image_path = os.path.join(workplace_path, match.group(2))
|
|
474
|
+
logger.debug(f"Sending image: {image_path}")
|
|
475
|
+
yield event.image_result(image_path)
|
|
476
|
+
elif match.group(1) == "FILE":
|
|
477
|
+
file_path = os.path.join(workplace_path, match.group(2))
|
|
478
|
+
# logger.debug(f"Sending file: {file_path}")
|
|
479
|
+
# file_s3_url = await self.file_upload(file_path)
|
|
480
|
+
# logger.info(f"文件上传到 AstrBot 云节点: {file_s3_url}")
|
|
481
|
+
file_name = os.path.basename(file_path)
|
|
482
|
+
chain: list[BaseMessageComponent] = [
|
|
483
|
+
File(name=file_name, file=file_path)
|
|
484
|
+
]
|
|
485
|
+
yield event.set_result(MessageEventResult(chain=chain))
|
|
486
|
+
|
|
487
|
+
elif (
|
|
488
|
+
"Traceback (most recent call last)" in log or "[Error]: " in log
|
|
489
|
+
):
|
|
490
|
+
traceback = "\n".join(logs[idx:])
|
|
491
|
+
|
|
492
|
+
if not ok:
|
|
493
|
+
if traceback:
|
|
494
|
+
obs = f"## Observation \n When execute the code: ```python\n{code_clean}\n```\n\n Error occurred:\n\n{traceback}\n Need to improve/fix the code."
|
|
495
|
+
else:
|
|
496
|
+
logger.warning(
|
|
497
|
+
f"未从沙箱输出中捕获到合法的输出。沙箱输出日志: {logs}",
|
|
498
|
+
)
|
|
499
|
+
break
|
|
496
500
|
else:
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
break
|
|
501
|
-
else:
|
|
502
|
-
# 成功了
|
|
503
|
-
self.user_file_msg_buffer.pop(event.get_session_id())
|
|
504
|
-
return
|
|
501
|
+
# 成功了
|
|
502
|
+
self.user_file_msg_buffer.pop(event.get_session_id())
|
|
503
|
+
return
|
|
505
504
|
|
|
506
505
|
yield event.plain_result(
|
|
507
506
|
"经过多次尝试后,未从沙箱输出中捕获到合法的输出,请更换问法或者查看日志。",
|
astrbot/cli/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "4.10.
|
|
1
|
+
__version__ = "4.10.4"
|
astrbot/core/agent/message.py
CHANGED
|
@@ -12,7 +12,7 @@ class ContentPart(BaseModel):
|
|
|
12
12
|
|
|
13
13
|
__content_part_registry: ClassVar[dict[str, type["ContentPart"]]] = {}
|
|
14
14
|
|
|
15
|
-
type:
|
|
15
|
+
type: Literal["text", "think", "image_url", "audio_url"]
|
|
16
16
|
|
|
17
17
|
def __init_subclass__(cls, **kwargs: Any) -> None:
|
|
18
18
|
super().__init_subclass__(**kwargs)
|
|
@@ -63,6 +63,28 @@ class TextPart(ContentPart):
|
|
|
63
63
|
text: str
|
|
64
64
|
|
|
65
65
|
|
|
66
|
+
class ThinkPart(ContentPart):
|
|
67
|
+
"""
|
|
68
|
+
>>> ThinkPart(think="I think I need to think about this.").model_dump()
|
|
69
|
+
{'type': 'think', 'think': 'I think I need to think about this.', 'encrypted': None}
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
type: str = "think"
|
|
73
|
+
think: str
|
|
74
|
+
encrypted: str | None = None
|
|
75
|
+
"""Encrypted thinking content, or signature."""
|
|
76
|
+
|
|
77
|
+
def merge_in_place(self, other: Any) -> bool:
|
|
78
|
+
if not isinstance(other, ThinkPart):
|
|
79
|
+
return False
|
|
80
|
+
if self.encrypted:
|
|
81
|
+
return False
|
|
82
|
+
self.think += other.think
|
|
83
|
+
if other.encrypted:
|
|
84
|
+
self.encrypted = other.encrypted
|
|
85
|
+
return True
|
|
86
|
+
|
|
87
|
+
|
|
66
88
|
class ImageURLPart(ContentPart):
|
|
67
89
|
"""
|
|
68
90
|
>>> ImageURLPart(image_url="http://example.com/image.jpg").model_dump()
|
|
@@ -13,6 +13,7 @@ from mcp.types import (
|
|
|
13
13
|
)
|
|
14
14
|
|
|
15
15
|
from astrbot import logger
|
|
16
|
+
from astrbot.core.agent.message import TextPart, ThinkPart
|
|
16
17
|
from astrbot.core.message.components import Json
|
|
17
18
|
from astrbot.core.message.message_event_result import (
|
|
18
19
|
MessageChain,
|
|
@@ -169,13 +170,20 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
169
170
|
self.final_llm_resp = llm_resp
|
|
170
171
|
self._transition_state(AgentState.DONE)
|
|
171
172
|
self.stats.end_time = time.time()
|
|
173
|
+
|
|
172
174
|
# record the final assistant message
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
175
|
+
parts = []
|
|
176
|
+
if llm_resp.reasoning_content or llm_resp.reasoning_signature:
|
|
177
|
+
parts.append(
|
|
178
|
+
ThinkPart(
|
|
179
|
+
think=llm_resp.reasoning_content,
|
|
180
|
+
encrypted=llm_resp.reasoning_signature,
|
|
181
|
+
)
|
|
182
|
+
)
|
|
183
|
+
parts.append(TextPart(text=llm_resp.completion_text or "*No response*"))
|
|
184
|
+
self.run_context.messages.append(Message(role="assistant", content=parts))
|
|
185
|
+
|
|
186
|
+
# call the on_agent_done hook
|
|
179
187
|
try:
|
|
180
188
|
await self.agent_hooks.on_agent_done(self.run_context, llm_resp)
|
|
181
189
|
except Exception as e:
|
|
@@ -214,10 +222,19 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
214
222
|
data=AgentResponseData(chain=result),
|
|
215
223
|
)
|
|
216
224
|
# 将结果添加到上下文中
|
|
225
|
+
parts = []
|
|
226
|
+
if llm_resp.reasoning_content or llm_resp.reasoning_signature:
|
|
227
|
+
parts.append(
|
|
228
|
+
ThinkPart(
|
|
229
|
+
think=llm_resp.reasoning_content,
|
|
230
|
+
encrypted=llm_resp.reasoning_signature,
|
|
231
|
+
)
|
|
232
|
+
)
|
|
233
|
+
parts.append(TextPart(text=llm_resp.completion_text or "*No response*"))
|
|
217
234
|
tool_calls_result = ToolCallsResult(
|
|
218
235
|
tool_calls_info=AssistantMessageSegment(
|
|
219
236
|
tool_calls=llm_resp.to_openai_to_calls_model(),
|
|
220
|
-
content=
|
|
237
|
+
content=parts,
|
|
221
238
|
),
|
|
222
239
|
tool_calls_result=tool_call_result_blocks,
|
|
223
240
|
)
|
astrbot/core/astr_agent_hooks.py
CHANGED
|
@@ -13,6 +13,12 @@ from astrbot.core.star.star_handler import EventType
|
|
|
13
13
|
class MainAgentHooks(BaseAgentRunHooks[AstrAgentContext]):
|
|
14
14
|
async def on_agent_done(self, run_context, llm_response):
|
|
15
15
|
# 执行事件钩子
|
|
16
|
+
if llm_response and llm_response.reasoning_content:
|
|
17
|
+
# we will use this in result_decorate stage to inject reasoning content to chain
|
|
18
|
+
run_context.context.event.set_extra(
|
|
19
|
+
"_llm_reasoning_content", llm_response.reasoning_content
|
|
20
|
+
)
|
|
21
|
+
|
|
16
22
|
await call_event_hook(
|
|
17
23
|
run_context.context.event,
|
|
18
24
|
EventType.OnLLMResponseEvent,
|
astrbot/core/backup/exporter.py
CHANGED
|
@@ -447,6 +447,7 @@ class AstrBotExporter:
|
|
|
447
447
|
"version": BACKUP_MANIFEST_VERSION,
|
|
448
448
|
"astrbot_version": VERSION,
|
|
449
449
|
"exported_at": datetime.now(timezone.utc).isoformat(),
|
|
450
|
+
"origin": "exported", # 标记备份来源:exported=本实例导出, uploaded=用户上传
|
|
450
451
|
"schema_version": {
|
|
451
452
|
"main_db": "v4",
|
|
452
453
|
"kb_db": "v1",
|
astrbot/core/config/default.py
CHANGED
|
@@ -5,7 +5,7 @@ from typing import Any, TypedDict
|
|
|
5
5
|
|
|
6
6
|
from astrbot.core.utils.astrbot_path import get_astrbot_data_path
|
|
7
7
|
|
|
8
|
-
VERSION = "4.10.
|
|
8
|
+
VERSION = "4.10.4"
|
|
9
9
|
DB_PATH = os.path.join(get_astrbot_data_path(), "data_v4.db")
|
|
10
10
|
|
|
11
11
|
WEBHOOK_SUPPORTED_PLATFORMS = [
|
|
@@ -905,6 +905,7 @@ CONFIG_METADATA_2 = {
|
|
|
905
905
|
"key": [],
|
|
906
906
|
"api_base": "https://api.anthropic.com/v1",
|
|
907
907
|
"timeout": 120,
|
|
908
|
+
"anth_thinking_config": {"budget": 0},
|
|
908
909
|
},
|
|
909
910
|
"Moonshot": {
|
|
910
911
|
"id": "moonshot",
|
|
@@ -920,7 +921,7 @@ CONFIG_METADATA_2 = {
|
|
|
920
921
|
"xAI": {
|
|
921
922
|
"id": "xai",
|
|
922
923
|
"provider": "xai",
|
|
923
|
-
"type": "
|
|
924
|
+
"type": "xai_chat_completion",
|
|
924
925
|
"provider_type": "chat_completion",
|
|
925
926
|
"enable": True,
|
|
926
927
|
"key": [],
|
|
@@ -1286,7 +1287,7 @@ CONFIG_METADATA_2 = {
|
|
|
1286
1287
|
"minimax-is-timber-weight": False,
|
|
1287
1288
|
"minimax-voice-id": "female-shaonv",
|
|
1288
1289
|
"minimax-timber-weight": '[\n {\n "voice_id": "Chinese (Mandarin)_Warm_Girl",\n "weight": 25\n },\n {\n "voice_id": "Chinese (Mandarin)_BashfulGirl",\n "weight": 50\n }\n]',
|
|
1289
|
-
"minimax-voice-emotion": "
|
|
1290
|
+
"minimax-voice-emotion": "auto",
|
|
1290
1291
|
"minimax-voice-latex": False,
|
|
1291
1292
|
"minimax-voice-english-normalization": False,
|
|
1292
1293
|
"timeout": 20,
|
|
@@ -1450,7 +1451,32 @@ CONFIG_METADATA_2 = {
|
|
|
1450
1451
|
"description": "自定义请求体参数",
|
|
1451
1452
|
"type": "dict",
|
|
1452
1453
|
"items": {},
|
|
1453
|
-
"hint": "
|
|
1454
|
+
"hint": "用于在请求时添加额外的参数,如 temperature、top_p、max_tokens 等。",
|
|
1455
|
+
"template_schema": {
|
|
1456
|
+
"temperature": {
|
|
1457
|
+
"name": "Temperature",
|
|
1458
|
+
"description": "温度参数",
|
|
1459
|
+
"hint": "控制输出的随机性,范围通常为 0-2。值越高越随机。",
|
|
1460
|
+
"type": "float",
|
|
1461
|
+
"default": 0.6,
|
|
1462
|
+
"slider": {"min": 0, "max": 2, "step": 0.1},
|
|
1463
|
+
},
|
|
1464
|
+
"top_p": {
|
|
1465
|
+
"name": "Top-p",
|
|
1466
|
+
"description": "Top-p 采样",
|
|
1467
|
+
"hint": "核采样参数,范围通常为 0-1。控制模型考虑的概率质量。",
|
|
1468
|
+
"type": "float",
|
|
1469
|
+
"default": 1.0,
|
|
1470
|
+
"slider": {"min": 0, "max": 1, "step": 0.01},
|
|
1471
|
+
},
|
|
1472
|
+
"max_tokens": {
|
|
1473
|
+
"name": "Max Tokens",
|
|
1474
|
+
"description": "最大令牌数",
|
|
1475
|
+
"hint": "生成的最大令牌数。",
|
|
1476
|
+
"type": "int",
|
|
1477
|
+
"default": 8192,
|
|
1478
|
+
},
|
|
1479
|
+
},
|
|
1454
1480
|
},
|
|
1455
1481
|
"provider": {
|
|
1456
1482
|
"type": "string",
|
|
@@ -1787,6 +1813,17 @@ CONFIG_METADATA_2 = {
|
|
|
1787
1813
|
},
|
|
1788
1814
|
},
|
|
1789
1815
|
},
|
|
1816
|
+
"anth_thinking_config": {
|
|
1817
|
+
"description": "Thinking Config",
|
|
1818
|
+
"type": "object",
|
|
1819
|
+
"items": {
|
|
1820
|
+
"budget": {
|
|
1821
|
+
"description": "Thinking Budget",
|
|
1822
|
+
"type": "int",
|
|
1823
|
+
"hint": "Anthropic thinking.budget_tokens param. Must >= 1024. See: https://platform.claude.com/docs/en/build-with-claude/extended-thinking",
|
|
1824
|
+
},
|
|
1825
|
+
},
|
|
1826
|
+
},
|
|
1790
1827
|
"minimax-group-id": {
|
|
1791
1828
|
"type": "string",
|
|
1792
1829
|
"description": "用户组",
|
|
@@ -1858,15 +1895,18 @@ CONFIG_METADATA_2 = {
|
|
|
1858
1895
|
"minimax-voice-emotion": {
|
|
1859
1896
|
"type": "string",
|
|
1860
1897
|
"description": "情绪",
|
|
1861
|
-
"hint": "
|
|
1898
|
+
"hint": "控制合成语音的情绪。当为 auto 时,将根据文本内容自动选择情绪。",
|
|
1862
1899
|
"options": [
|
|
1900
|
+
"auto",
|
|
1863
1901
|
"happy",
|
|
1864
1902
|
"sad",
|
|
1865
1903
|
"angry",
|
|
1866
1904
|
"fearful",
|
|
1867
1905
|
"disgusted",
|
|
1868
1906
|
"surprised",
|
|
1869
|
-
"
|
|
1907
|
+
"calm",
|
|
1908
|
+
"fluent",
|
|
1909
|
+
"whisper",
|
|
1870
1910
|
],
|
|
1871
1911
|
},
|
|
1872
1912
|
"minimax-voice-latex": {
|
|
@@ -3049,4 +3089,5 @@ DEFAULT_VALUE_MAP = {
|
|
|
3049
3089
|
"text": "",
|
|
3050
3090
|
"list": [],
|
|
3051
3091
|
"object": {},
|
|
3092
|
+
"template_list": [],
|
|
3052
3093
|
}
|
|
@@ -149,8 +149,16 @@ class RecursiveCharacterChunker(BaseChunker):
|
|
|
149
149
|
分割后的文本块列表
|
|
150
150
|
|
|
151
151
|
"""
|
|
152
|
-
|
|
153
|
-
|
|
152
|
+
if chunk_size is None:
|
|
153
|
+
chunk_size = self.chunk_size
|
|
154
|
+
if overlap is None:
|
|
155
|
+
overlap = self.chunk_overlap
|
|
156
|
+
if chunk_size <= 0:
|
|
157
|
+
raise ValueError("chunk_size must be greater than 0")
|
|
158
|
+
if overlap < 0:
|
|
159
|
+
raise ValueError("chunk_overlap must be non-negative")
|
|
160
|
+
if overlap >= chunk_size:
|
|
161
|
+
raise ValueError("chunk_overlap must be less than chunk_size")
|
|
154
162
|
result = []
|
|
155
163
|
for i in range(0, len(text), chunk_size - overlap):
|
|
156
164
|
end = min(i + chunk_size, len(text))
|