AstrBot 4.10.3__py3-none-any.whl → 4.10.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. astrbot/builtin_stars/astrbot/main.py +2 -10
  2. astrbot/builtin_stars/python_interpreter/main.py +130 -131
  3. astrbot/cli/__init__.py +1 -1
  4. astrbot/core/agent/message.py +23 -1
  5. astrbot/core/agent/runners/tool_loop_agent_runner.py +24 -7
  6. astrbot/core/astr_agent_hooks.py +6 -0
  7. astrbot/core/backup/exporter.py +1 -0
  8. astrbot/core/config/astrbot_config.py +2 -0
  9. astrbot/core/config/default.py +47 -6
  10. astrbot/core/knowledge_base/chunking/recursive.py +10 -2
  11. astrbot/core/pipeline/process_stage/method/agent_sub_stages/internal.py +184 -174
  12. astrbot/core/pipeline/result_decorate/stage.py +65 -57
  13. astrbot/core/pipeline/waking_check/stage.py +29 -2
  14. astrbot/core/platform/sources/aiocqhttp/aiocqhttp_platform_adapter.py +15 -29
  15. astrbot/core/platform/sources/dingtalk/dingtalk_adapter.py +1 -6
  16. astrbot/core/platform/sources/dingtalk/dingtalk_event.py +15 -1
  17. astrbot/core/platform/sources/lark/lark_adapter.py +2 -10
  18. astrbot/core/platform/sources/misskey/misskey_adapter.py +0 -5
  19. astrbot/core/platform/sources/misskey/misskey_utils.py +0 -3
  20. astrbot/core/platform/sources/qqofficial/qqofficial_platform_adapter.py +4 -9
  21. astrbot/core/platform/sources/qqofficial_webhook/qo_webhook_adapter.py +4 -9
  22. astrbot/core/platform/sources/satori/satori_adapter.py +6 -1
  23. astrbot/core/platform/sources/slack/slack_adapter.py +3 -6
  24. astrbot/core/platform/sources/webchat/webchat_adapter.py +0 -1
  25. astrbot/core/platform/sources/wechatpadpro/wechatpadpro_adapter.py +3 -5
  26. astrbot/core/provider/entities.py +9 -1
  27. astrbot/core/provider/sources/anthropic_source.py +60 -3
  28. astrbot/core/provider/sources/gemini_source.py +37 -3
  29. astrbot/core/provider/sources/minimax_tts_api_source.py +4 -1
  30. astrbot/core/provider/sources/openai_source.py +25 -31
  31. astrbot/core/provider/sources/xai_source.py +29 -0
  32. astrbot/core/provider/sources/xinference_stt_provider.py +24 -12
  33. astrbot/core/star/star_manager.py +41 -0
  34. astrbot/core/utils/pip_installer.py +20 -1
  35. astrbot/dashboard/routes/backup.py +519 -15
  36. astrbot/dashboard/routes/config.py +45 -0
  37. astrbot/dashboard/server.py +1 -0
  38. {astrbot-4.10.3.dist-info → astrbot-4.10.5.dist-info}/METADATA +1 -1
  39. {astrbot-4.10.3.dist-info → astrbot-4.10.5.dist-info}/RECORD +42 -41
  40. {astrbot-4.10.3.dist-info → astrbot-4.10.5.dist-info}/WHEEL +0 -0
  41. {astrbot-4.10.3.dist-info → astrbot-4.10.5.dist-info}/entry_points.txt +0 -0
  42. {astrbot-4.10.3.dist-info → astrbot-4.10.5.dist-info}/licenses/LICENSE +0 -0
@@ -100,16 +100,8 @@ class Main(star.Star):
100
100
  logger.error(f"ltm: {e}")
101
101
 
102
102
  @filter.on_llm_response()
103
- async def inject_reasoning(self, event: AstrMessageEvent, resp: LLMResponse):
104
- """在 LLM 响应后基于配置注入思考过程文本 / 在 LLM 响应后记录对话"""
105
- umo = event.unified_msg_origin
106
- cfg = self.context.get_config(umo).get("provider_settings", {})
107
- show_reasoning = cfg.get("display_reasoning_text", False)
108
- if show_reasoning and resp.reasoning_content:
109
- resp.completion_text = (
110
- f"🤔 思考: {resp.reasoning_content}\n\n{resp.completion_text}"
111
- )
112
-
103
+ async def record_llm_resp_to_ltm(self, event: AstrMessageEvent, resp: LLMResponse):
104
+ """在 LLM 响应后记录对话"""
113
105
  if self.ltm and self.ltm_enabled(event):
114
106
  try:
115
107
  await self.ltm.after_req_llm(event, resp)
@@ -157,9 +157,8 @@ class Main(star.Star):
157
157
  async def is_docker_available(self) -> bool:
158
158
  """Check if docker is available"""
159
159
  try:
160
- docker = aiodocker.Docker()
161
- await docker.version()
162
- await docker.close()
160
+ async with aiodocker.Docker() as docker:
161
+ await docker.version()
163
162
  return True
164
163
  except BaseException as e:
165
164
  logger.info(f"检查 Docker 可用性: {e}")
@@ -279,14 +278,14 @@ class Main(star.Star):
279
278
  @pi.command("repull")
280
279
  async def pi_repull(self, event: AstrMessageEvent):
281
280
  """重新拉取沙箱镜像"""
282
- docker = aiodocker.Docker()
283
- image_name = await self.get_image_name()
284
- try:
285
- await docker.images.get(image_name)
286
- await docker.images.delete(image_name, force=True)
287
- except aiodocker.exceptions.DockerError:
288
- pass
289
- await docker.images.pull(image_name)
281
+ async with aiodocker.Docker() as docker:
282
+ image_name = await self.get_image_name()
283
+ try:
284
+ await docker.images.get(image_name)
285
+ await docker.images.delete(image_name, force=True)
286
+ except aiodocker.exceptions.DockerError:
287
+ pass
288
+ await docker.images.pull(image_name)
290
289
  yield event.plain_result("重新拉取沙箱镜像成功。")
291
290
 
292
291
  @pi.command("file")
@@ -371,137 +370,137 @@ class Main(star.Star):
371
370
  obs = ""
372
371
  n = 5
373
372
 
374
- for i in range(n):
375
- if i > 0:
376
- logger.info(f"Try {i + 1}/{n}")
377
-
378
- PROMPT_ = PROMPT.format(
379
- prompt=plain_text,
380
- extra_input=extra_inputs,
381
- extra_prompt=obs,
382
- )
383
- provider = self.context.get_using_provider()
384
- llm_response = await provider.text_chat(
385
- prompt=PROMPT_,
386
- session_id=f"{event.session_id}_{magic_code}_{i!s}",
387
- )
388
-
389
- logger.debug(
390
- "code interpreter llm gened code:" + llm_response.completion_text,
391
- )
392
-
393
- # 整理代码并保存
394
- code_clean = await self.tidy_code(llm_response.completion_text)
395
- with open(os.path.join(workplace_path, "exec.py"), "w") as f:
396
- f.write(code_clean)
397
-
398
- # 启动容器
399
- docker = aiodocker.Docker()
400
-
401
- # 检查有没有image
402
- image_name = await self.get_image_name()
403
- try:
404
- await docker.images.get(image_name)
405
- except aiodocker.exceptions.DockerError:
406
- # 拉取镜像
407
- logger.info(f"未找到沙箱镜像,正在尝试拉取 {image_name}...")
408
- await docker.images.pull(image_name)
373
+ async with aiodocker.Docker() as docker:
374
+ for i in range(n):
375
+ if i > 0:
376
+ logger.info(f"Try {i + 1}/{n}")
409
377
 
410
- yield event.plain_result(
411
- f"使用沙箱执行代码中,请稍等...(尝试次数: {i + 1}/{n})",
412
- )
378
+ PROMPT_ = PROMPT.format(
379
+ prompt=plain_text,
380
+ extra_input=extra_inputs,
381
+ extra_prompt=obs,
382
+ )
383
+ provider = self.context.get_using_provider()
384
+ llm_response = await provider.text_chat(
385
+ prompt=PROMPT_,
386
+ session_id=f"{event.session_id}_{magic_code}_{i!s}",
387
+ )
413
388
 
414
- self.docker_host_astrbot_abs_path = self.config.get(
415
- "docker_host_astrbot_abs_path",
416
- "",
417
- )
418
- if self.docker_host_astrbot_abs_path:
419
- host_shared = os.path.join(
420
- self.docker_host_astrbot_abs_path,
421
- self.shared_path,
389
+ logger.debug(
390
+ "code interpreter llm gened code:" + llm_response.completion_text,
422
391
  )
423
- host_output = os.path.join(
424
- self.docker_host_astrbot_abs_path,
425
- output_path,
392
+
393
+ # 整理代码并保存
394
+ code_clean = await self.tidy_code(llm_response.completion_text)
395
+ with open(os.path.join(workplace_path, "exec.py"), "w") as f:
396
+ f.write(code_clean)
397
+
398
+ # 检查有没有image
399
+ image_name = await self.get_image_name()
400
+ try:
401
+ await docker.images.get(image_name)
402
+ except aiodocker.exceptions.DockerError:
403
+ # 拉取镜像
404
+ logger.info(f"未找到沙箱镜像,正在尝试拉取 {image_name}...")
405
+ await docker.images.pull(image_name)
406
+
407
+ yield event.plain_result(
408
+ f"使用沙箱执行代码中,请稍等...(尝试次数: {i + 1}/{n})",
426
409
  )
427
- host_workplace = os.path.join(
428
- self.docker_host_astrbot_abs_path,
429
- workplace_path,
410
+
411
+ self.docker_host_astrbot_abs_path = self.config.get(
412
+ "docker_host_astrbot_abs_path",
413
+ "",
430
414
  )
415
+ if self.docker_host_astrbot_abs_path:
416
+ host_shared = os.path.join(
417
+ self.docker_host_astrbot_abs_path,
418
+ self.shared_path,
419
+ )
420
+ host_output = os.path.join(
421
+ self.docker_host_astrbot_abs_path,
422
+ output_path,
423
+ )
424
+ host_workplace = os.path.join(
425
+ self.docker_host_astrbot_abs_path,
426
+ workplace_path,
427
+ )
431
428
 
432
- else:
433
- host_shared = os.path.abspath(self.shared_path)
434
- host_output = os.path.abspath(output_path)
435
- host_workplace = os.path.abspath(workplace_path)
429
+ else:
430
+ host_shared = os.path.abspath(self.shared_path)
431
+ host_output = os.path.abspath(output_path)
432
+ host_workplace = os.path.abspath(workplace_path)
436
433
 
437
- logger.debug(
438
- f"host_shared: {host_shared}, host_output: {host_output}, host_workplace: {host_workplace}",
439
- )
434
+ logger.debug(
435
+ f"host_shared: {host_shared}, host_output: {host_output}, host_workplace: {host_workplace}",
436
+ )
440
437
 
441
- container = await docker.containers.run(
442
- {
443
- "Image": image_name,
444
- "Cmd": ["python", "exec.py"],
445
- "Memory": 512 * 1024 * 1024,
446
- "NanoCPUs": 1000000000,
447
- "HostConfig": {
448
- "Binds": [
449
- f"{host_shared}:/astrbot_sandbox/shared:ro",
450
- f"{host_output}:/astrbot_sandbox/output:rw",
451
- f"{host_workplace}:/astrbot_sandbox:rw",
452
- ],
438
+ container = await docker.containers.run(
439
+ {
440
+ "Image": image_name,
441
+ "Cmd": ["python", "exec.py"],
442
+ "Memory": 512 * 1024 * 1024,
443
+ "NanoCPUs": 1000000000,
444
+ "HostConfig": {
445
+ "Binds": [
446
+ f"{host_shared}:/astrbot_sandbox/shared:ro",
447
+ f"{host_output}:/astrbot_sandbox/output:rw",
448
+ f"{host_workplace}:/astrbot_sandbox:rw",
449
+ ],
450
+ },
451
+ "Env": [f"MAGIC_CODE={magic_code}"],
452
+ "AutoRemove": True,
453
453
  },
454
- "Env": [f"MAGIC_CODE={magic_code}"],
455
- "AutoRemove": True,
456
- },
457
- )
454
+ )
458
455
 
459
- logger.debug(f"Container {container.id} created.")
460
- logs = await self.run_container(container)
461
-
462
- logger.debug(f"Container {container.id} finished.")
463
- logger.debug(f"Container {container.id} logs: {logs}")
464
-
465
- # 发送结果
466
- pattern = r"\[ASTRBOT_(TEXT|IMAGE|FILE)_OUTPUT#\w+\]: (.*)"
467
- ok = False
468
- traceback = ""
469
- for idx, log in enumerate(logs):
470
- match = re.match(pattern, log)
471
- if match:
472
- ok = True
473
- if match.group(1) == "TEXT":
474
- yield event.plain_result(match.group(2))
475
- elif match.group(1) == "IMAGE":
476
- image_path = os.path.join(workplace_path, match.group(2))
477
- logger.debug(f"Sending image: {image_path}")
478
- yield event.image_result(image_path)
479
- elif match.group(1) == "FILE":
480
- file_path = os.path.join(workplace_path, match.group(2))
481
- # logger.debug(f"Sending file: {file_path}")
482
- # file_s3_url = await self.file_upload(file_path)
483
- # logger.info(f"文件上传到 AstrBot 云节点: {file_s3_url}")
484
- file_name = os.path.basename(file_path)
485
- chain: list[BaseMessageComponent] = [
486
- File(name=file_name, file=file_path)
487
- ]
488
- yield event.set_result(MessageEventResult(chain=chain))
489
-
490
- elif "Traceback (most recent call last)" in log or "[Error]: " in log:
491
- traceback = "\n".join(logs[idx:])
492
-
493
- if not ok:
494
- if traceback:
495
- obs = f"## Observation \n When execute the code: ```python\n{code_clean}\n```\n\n Error occurred:\n\n{traceback}\n Need to improve/fix the code."
456
+ logger.debug(f"Container {container.id} created.")
457
+ logs = await self.run_container(container)
458
+
459
+ logger.debug(f"Container {container.id} finished.")
460
+ logger.debug(f"Container {container.id} logs: {logs}")
461
+
462
+ # 发送结果
463
+ pattern = r"\[ASTRBOT_(TEXT|IMAGE|FILE)_OUTPUT#\w+\]: (.*)"
464
+ ok = False
465
+ traceback = ""
466
+ for idx, log in enumerate(logs):
467
+ match = re.match(pattern, log)
468
+ if match:
469
+ ok = True
470
+ if match.group(1) == "TEXT":
471
+ yield event.plain_result(match.group(2))
472
+ elif match.group(1) == "IMAGE":
473
+ image_path = os.path.join(workplace_path, match.group(2))
474
+ logger.debug(f"Sending image: {image_path}")
475
+ yield event.image_result(image_path)
476
+ elif match.group(1) == "FILE":
477
+ file_path = os.path.join(workplace_path, match.group(2))
478
+ # logger.debug(f"Sending file: {file_path}")
479
+ # file_s3_url = await self.file_upload(file_path)
480
+ # logger.info(f"文件上传到 AstrBot 云节点: {file_s3_url}")
481
+ file_name = os.path.basename(file_path)
482
+ chain: list[BaseMessageComponent] = [
483
+ File(name=file_name, file=file_path)
484
+ ]
485
+ yield event.set_result(MessageEventResult(chain=chain))
486
+
487
+ elif (
488
+ "Traceback (most recent call last)" in log or "[Error]: " in log
489
+ ):
490
+ traceback = "\n".join(logs[idx:])
491
+
492
+ if not ok:
493
+ if traceback:
494
+ obs = f"## Observation \n When execute the code: ```python\n{code_clean}\n```\n\n Error occurred:\n\n{traceback}\n Need to improve/fix the code."
495
+ else:
496
+ logger.warning(
497
+ f"未从沙箱输出中捕获到合法的输出。沙箱输出日志: {logs}",
498
+ )
499
+ break
496
500
  else:
497
- logger.warning(
498
- f"未从沙箱输出中捕获到合法的输出。沙箱输出日志: {logs}",
499
- )
500
- break
501
- else:
502
- # 成功了
503
- self.user_file_msg_buffer.pop(event.get_session_id())
504
- return
501
+ # 成功了
502
+ self.user_file_msg_buffer.pop(event.get_session_id())
503
+ return
505
504
 
506
505
  yield event.plain_result(
507
506
  "经过多次尝试后,未从沙箱输出中捕获到合法的输出,请更换问法或者查看日志。",
astrbot/cli/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "4.10.3"
1
+ __version__ = "4.10.5"
@@ -12,7 +12,7 @@ class ContentPart(BaseModel):
12
12
 
13
13
  __content_part_registry: ClassVar[dict[str, type["ContentPart"]]] = {}
14
14
 
15
- type: str
15
+ type: Literal["text", "think", "image_url", "audio_url"]
16
16
 
17
17
  def __init_subclass__(cls, **kwargs: Any) -> None:
18
18
  super().__init_subclass__(**kwargs)
@@ -63,6 +63,28 @@ class TextPart(ContentPart):
63
63
  text: str
64
64
 
65
65
 
66
+ class ThinkPart(ContentPart):
67
+ """
68
+ >>> ThinkPart(think="I think I need to think about this.").model_dump()
69
+ {'type': 'think', 'think': 'I think I need to think about this.', 'encrypted': None}
70
+ """
71
+
72
+ type: str = "think"
73
+ think: str
74
+ encrypted: str | None = None
75
+ """Encrypted thinking content, or signature."""
76
+
77
+ def merge_in_place(self, other: Any) -> bool:
78
+ if not isinstance(other, ThinkPart):
79
+ return False
80
+ if self.encrypted:
81
+ return False
82
+ self.think += other.think
83
+ if other.encrypted:
84
+ self.encrypted = other.encrypted
85
+ return True
86
+
87
+
66
88
  class ImageURLPart(ContentPart):
67
89
  """
68
90
  >>> ImageURLPart(image_url="http://example.com/image.jpg").model_dump()
@@ -13,6 +13,7 @@ from mcp.types import (
13
13
  )
14
14
 
15
15
  from astrbot import logger
16
+ from astrbot.core.agent.message import TextPart, ThinkPart
16
17
  from astrbot.core.message.components import Json
17
18
  from astrbot.core.message.message_event_result import (
18
19
  MessageChain,
@@ -169,13 +170,20 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
169
170
  self.final_llm_resp = llm_resp
170
171
  self._transition_state(AgentState.DONE)
171
172
  self.stats.end_time = time.time()
173
+
172
174
  # record the final assistant message
173
- self.run_context.messages.append(
174
- Message(
175
- role="assistant",
176
- content=llm_resp.completion_text or "*No response*",
177
- ),
178
- )
175
+ parts = []
176
+ if llm_resp.reasoning_content or llm_resp.reasoning_signature:
177
+ parts.append(
178
+ ThinkPart(
179
+ think=llm_resp.reasoning_content,
180
+ encrypted=llm_resp.reasoning_signature,
181
+ )
182
+ )
183
+ parts.append(TextPart(text=llm_resp.completion_text or "*No response*"))
184
+ self.run_context.messages.append(Message(role="assistant", content=parts))
185
+
186
+ # call the on_agent_done hook
179
187
  try:
180
188
  await self.agent_hooks.on_agent_done(self.run_context, llm_resp)
181
189
  except Exception as e:
@@ -214,10 +222,19 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
214
222
  data=AgentResponseData(chain=result),
215
223
  )
216
224
  # 将结果添加到上下文中
225
+ parts = []
226
+ if llm_resp.reasoning_content or llm_resp.reasoning_signature:
227
+ parts.append(
228
+ ThinkPart(
229
+ think=llm_resp.reasoning_content,
230
+ encrypted=llm_resp.reasoning_signature,
231
+ )
232
+ )
233
+ parts.append(TextPart(text=llm_resp.completion_text or "*No response*"))
217
234
  tool_calls_result = ToolCallsResult(
218
235
  tool_calls_info=AssistantMessageSegment(
219
236
  tool_calls=llm_resp.to_openai_to_calls_model(),
220
- content=llm_resp.completion_text,
237
+ content=parts,
221
238
  ),
222
239
  tool_calls_result=tool_call_result_blocks,
223
240
  )
@@ -13,6 +13,12 @@ from astrbot.core.star.star_handler import EventType
13
13
  class MainAgentHooks(BaseAgentRunHooks[AstrAgentContext]):
14
14
  async def on_agent_done(self, run_context, llm_response):
15
15
  # 执行事件钩子
16
+ if llm_response and llm_response.reasoning_content:
17
+ # we will use this in result_decorate stage to inject reasoning content to chain
18
+ run_context.context.event.set_extra(
19
+ "_llm_reasoning_content", llm_response.reasoning_content
20
+ )
21
+
16
22
  await call_event_hook(
17
23
  run_context.context.event,
18
24
  EventType.OnLLMResponseEvent,
@@ -447,6 +447,7 @@ class AstrBotExporter:
447
447
  "version": BACKUP_MANIFEST_VERSION,
448
448
  "astrbot_version": VERSION,
449
449
  "exported_at": datetime.now(timezone.utc).isoformat(),
450
+ "origin": "exported", # 标记备份来源:exported=本实例导出, uploaded=用户上传
450
451
  "schema_version": {
451
452
  "main_db": "v4",
452
453
  "kb_db": "v1",
@@ -80,6 +80,8 @@ class AstrBotConfig(dict):
80
80
  if v["type"] == "object":
81
81
  conf[k] = {}
82
82
  _parse_schema(v["items"], conf[k])
83
+ elif v["type"] == "template_list":
84
+ conf[k] = default
83
85
  else:
84
86
  conf[k] = default
85
87
 
@@ -5,7 +5,7 @@ from typing import Any, TypedDict
5
5
 
6
6
  from astrbot.core.utils.astrbot_path import get_astrbot_data_path
7
7
 
8
- VERSION = "4.10.3"
8
+ VERSION = "4.10.5"
9
9
  DB_PATH = os.path.join(get_astrbot_data_path(), "data_v4.db")
10
10
 
11
11
  WEBHOOK_SUPPORTED_PLATFORMS = [
@@ -905,6 +905,7 @@ CONFIG_METADATA_2 = {
905
905
  "key": [],
906
906
  "api_base": "https://api.anthropic.com/v1",
907
907
  "timeout": 120,
908
+ "anth_thinking_config": {"budget": 0},
908
909
  },
909
910
  "Moonshot": {
910
911
  "id": "moonshot",
@@ -920,7 +921,7 @@ CONFIG_METADATA_2 = {
920
921
  "xAI": {
921
922
  "id": "xai",
922
923
  "provider": "xai",
923
- "type": "openai_chat_completion",
924
+ "type": "xai_chat_completion",
924
925
  "provider_type": "chat_completion",
925
926
  "enable": True,
926
927
  "key": [],
@@ -1286,7 +1287,7 @@ CONFIG_METADATA_2 = {
1286
1287
  "minimax-is-timber-weight": False,
1287
1288
  "minimax-voice-id": "female-shaonv",
1288
1289
  "minimax-timber-weight": '[\n {\n "voice_id": "Chinese (Mandarin)_Warm_Girl",\n "weight": 25\n },\n {\n "voice_id": "Chinese (Mandarin)_BashfulGirl",\n "weight": 50\n }\n]',
1289
- "minimax-voice-emotion": "neutral",
1290
+ "minimax-voice-emotion": "auto",
1290
1291
  "minimax-voice-latex": False,
1291
1292
  "minimax-voice-english-normalization": False,
1292
1293
  "timeout": 20,
@@ -1450,7 +1451,32 @@ CONFIG_METADATA_2 = {
1450
1451
  "description": "自定义请求体参数",
1451
1452
  "type": "dict",
1452
1453
  "items": {},
1453
- "hint": "此处添加的键值对将被合并到发送给 API 的 extra_body 中。值可以是字符串、数字或布尔值。",
1454
+ "hint": "用于在请求时添加额外的参数,如 temperature、top_p、max_tokens 等。",
1455
+ "template_schema": {
1456
+ "temperature": {
1457
+ "name": "Temperature",
1458
+ "description": "温度参数",
1459
+ "hint": "控制输出的随机性,范围通常为 0-2。值越高越随机。",
1460
+ "type": "float",
1461
+ "default": 0.6,
1462
+ "slider": {"min": 0, "max": 2, "step": 0.1},
1463
+ },
1464
+ "top_p": {
1465
+ "name": "Top-p",
1466
+ "description": "Top-p 采样",
1467
+ "hint": "核采样参数,范围通常为 0-1。控制模型考虑的概率质量。",
1468
+ "type": "float",
1469
+ "default": 1.0,
1470
+ "slider": {"min": 0, "max": 1, "step": 0.01},
1471
+ },
1472
+ "max_tokens": {
1473
+ "name": "Max Tokens",
1474
+ "description": "最大令牌数",
1475
+ "hint": "生成的最大令牌数。",
1476
+ "type": "int",
1477
+ "default": 8192,
1478
+ },
1479
+ },
1454
1480
  },
1455
1481
  "provider": {
1456
1482
  "type": "string",
@@ -1787,6 +1813,17 @@ CONFIG_METADATA_2 = {
1787
1813
  },
1788
1814
  },
1789
1815
  },
1816
+ "anth_thinking_config": {
1817
+ "description": "Thinking Config",
1818
+ "type": "object",
1819
+ "items": {
1820
+ "budget": {
1821
+ "description": "Thinking Budget",
1822
+ "type": "int",
1823
+ "hint": "Anthropic thinking.budget_tokens param. Must >= 1024. See: https://platform.claude.com/docs/en/build-with-claude/extended-thinking",
1824
+ },
1825
+ },
1826
+ },
1790
1827
  "minimax-group-id": {
1791
1828
  "type": "string",
1792
1829
  "description": "用户组",
@@ -1858,15 +1895,18 @@ CONFIG_METADATA_2 = {
1858
1895
  "minimax-voice-emotion": {
1859
1896
  "type": "string",
1860
1897
  "description": "情绪",
1861
- "hint": "控制合成语音的情绪",
1898
+ "hint": "控制合成语音的情绪。当为 auto 时,将根据文本内容自动选择情绪。",
1862
1899
  "options": [
1900
+ "auto",
1863
1901
  "happy",
1864
1902
  "sad",
1865
1903
  "angry",
1866
1904
  "fearful",
1867
1905
  "disgusted",
1868
1906
  "surprised",
1869
- "neutral",
1907
+ "calm",
1908
+ "fluent",
1909
+ "whisper",
1870
1910
  ],
1871
1911
  },
1872
1912
  "minimax-voice-latex": {
@@ -3049,4 +3089,5 @@ DEFAULT_VALUE_MAP = {
3049
3089
  "text": "",
3050
3090
  "list": [],
3051
3091
  "object": {},
3092
+ "template_list": [],
3052
3093
  }
@@ -149,8 +149,16 @@ class RecursiveCharacterChunker(BaseChunker):
149
149
  分割后的文本块列表
150
150
 
151
151
  """
152
- chunk_size = chunk_size or self.chunk_size
153
- overlap = overlap or self.chunk_overlap
152
+ if chunk_size is None:
153
+ chunk_size = self.chunk_size
154
+ if overlap is None:
155
+ overlap = self.chunk_overlap
156
+ if chunk_size <= 0:
157
+ raise ValueError("chunk_size must be greater than 0")
158
+ if overlap < 0:
159
+ raise ValueError("chunk_overlap must be non-negative")
160
+ if overlap >= chunk_size:
161
+ raise ValueError("chunk_overlap must be less than chunk_size")
154
162
  result = []
155
163
  for i in range(0, len(text), chunk_size - overlap):
156
164
  end = min(i + chunk_size, len(text))