AstrBot 4.9.2__py3-none-any.whl → 4.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astrbot/cli/__init__.py +1 -1
- astrbot/core/agent/message.py +6 -4
- astrbot/core/agent/response.py +22 -1
- astrbot/core/agent/run_context.py +1 -1
- astrbot/core/agent/runners/tool_loop_agent_runner.py +99 -20
- astrbot/core/astr_agent_context.py +3 -1
- astrbot/core/astr_agent_run_util.py +42 -3
- astrbot/core/astr_agent_tool_exec.py +34 -4
- astrbot/core/config/default.py +127 -184
- astrbot/core/core_lifecycle.py +3 -0
- astrbot/core/db/__init__.py +72 -0
- astrbot/core/db/po.py +59 -0
- astrbot/core/db/sqlite.py +240 -0
- astrbot/core/message/components.py +4 -5
- astrbot/core/pipeline/process_stage/method/agent_sub_stages/internal.py +6 -1
- astrbot/core/pipeline/respond/stage.py +1 -1
- astrbot/core/platform/sources/telegram/tg_event.py +9 -0
- astrbot/core/platform/sources/webchat/webchat_event.py +22 -18
- astrbot/core/provider/entities.py +41 -0
- astrbot/core/provider/manager.py +203 -93
- astrbot/core/provider/sources/anthropic_source.py +55 -11
- astrbot/core/provider/sources/gemini_source.py +84 -33
- astrbot/core/provider/sources/openai_source.py +21 -6
- astrbot/core/star/command_management.py +449 -0
- astrbot/core/star/context.py +4 -0
- astrbot/core/star/filter/command.py +1 -0
- astrbot/core/star/filter/command_group.py +1 -0
- astrbot/core/star/star_handler.py +4 -0
- astrbot/core/star/star_manager.py +2 -0
- astrbot/core/utils/llm_metadata.py +63 -0
- astrbot/core/utils/migra_helper.py +93 -0
- astrbot/dashboard/routes/__init__.py +2 -0
- astrbot/dashboard/routes/chat.py +56 -13
- astrbot/dashboard/routes/command.py +82 -0
- astrbot/dashboard/routes/config.py +291 -33
- astrbot/dashboard/routes/stat.py +96 -0
- astrbot/dashboard/routes/tools.py +20 -4
- astrbot/dashboard/server.py +1 -0
- {astrbot-4.9.2.dist-info → astrbot-4.10.0.dist-info}/METADATA +2 -2
- {astrbot-4.9.2.dist-info → astrbot-4.10.0.dist-info}/RECORD +43 -40
- {astrbot-4.9.2.dist-info → astrbot-4.10.0.dist-info}/WHEEL +0 -0
- {astrbot-4.9.2.dist-info → astrbot-4.10.0.dist-info}/entry_points.txt +0 -0
- {astrbot-4.9.2.dist-info → astrbot-4.10.0.dist-info}/licenses/LICENSE +0 -0
astrbot/cli/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "4.
|
|
1
|
+
__version__ = "4.10.0"
|
astrbot/core/agent/message.py
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
|
|
4
4
|
from typing import Any, ClassVar, Literal, cast
|
|
5
5
|
|
|
6
|
-
from pydantic import BaseModel, GetCoreSchemaHandler, model_validator
|
|
6
|
+
from pydantic import BaseModel, GetCoreSchemaHandler, model_serializer, model_validator
|
|
7
7
|
from pydantic_core import core_schema
|
|
8
8
|
|
|
9
9
|
|
|
@@ -122,10 +122,12 @@ class ToolCall(BaseModel):
|
|
|
122
122
|
extra_content: dict[str, Any] | None = None
|
|
123
123
|
"""Extra metadata for the tool call."""
|
|
124
124
|
|
|
125
|
-
|
|
125
|
+
@model_serializer(mode="wrap")
|
|
126
|
+
def serialize(self, handler):
|
|
127
|
+
data = handler(self)
|
|
126
128
|
if self.extra_content is None:
|
|
127
|
-
|
|
128
|
-
return
|
|
129
|
+
data.pop("extra_content", None)
|
|
130
|
+
return data
|
|
129
131
|
|
|
130
132
|
|
|
131
133
|
class ToolCallPart(BaseModel):
|
astrbot/core/agent/response.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import typing as T
|
|
2
|
-
from dataclasses import dataclass
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
3
|
|
|
4
4
|
from astrbot.core.message.message_event_result import MessageChain
|
|
5
|
+
from astrbot.core.provider.entities import TokenUsage
|
|
5
6
|
|
|
6
7
|
|
|
7
8
|
class AgentResponseData(T.TypedDict):
|
|
@@ -12,3 +13,23 @@ class AgentResponseData(T.TypedDict):
|
|
|
12
13
|
class AgentResponse:
|
|
13
14
|
type: str
|
|
14
15
|
data: AgentResponseData
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class AgentStats:
|
|
20
|
+
token_usage: TokenUsage = field(default_factory=TokenUsage)
|
|
21
|
+
start_time: float = 0.0
|
|
22
|
+
end_time: float = 0.0
|
|
23
|
+
time_to_first_token: float = 0.0
|
|
24
|
+
|
|
25
|
+
@property
|
|
26
|
+
def duration(self) -> float:
|
|
27
|
+
return self.end_time - self.start_time
|
|
28
|
+
|
|
29
|
+
def to_dict(self) -> dict:
|
|
30
|
+
return {
|
|
31
|
+
"token_usage": self.token_usage.__dict__,
|
|
32
|
+
"start_time": self.start_time,
|
|
33
|
+
"end_time": self.end_time,
|
|
34
|
+
"time_to_first_token": self.time_to_first_token,
|
|
35
|
+
}
|
|
@@ -9,7 +9,7 @@ from .message import Message
|
|
|
9
9
|
TContext = TypeVar("TContext", default=Any)
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
@dataclass
|
|
12
|
+
@dataclass
|
|
13
13
|
class ContextWrapper(Generic[TContext]):
|
|
14
14
|
"""A context for running an agent, which can be used to pass additional data or state."""
|
|
15
15
|
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import sys
|
|
2
|
+
import time
|
|
2
3
|
import traceback
|
|
3
4
|
import typing as T
|
|
4
5
|
|
|
@@ -12,6 +13,7 @@ from mcp.types import (
|
|
|
12
13
|
)
|
|
13
14
|
|
|
14
15
|
from astrbot import logger
|
|
16
|
+
from astrbot.core.message.components import Json
|
|
15
17
|
from astrbot.core.message.message_event_result import (
|
|
16
18
|
MessageChain,
|
|
17
19
|
)
|
|
@@ -24,7 +26,7 @@ from astrbot.core.provider.provider import Provider
|
|
|
24
26
|
|
|
25
27
|
from ..hooks import BaseAgentRunHooks
|
|
26
28
|
from ..message import AssistantMessageSegment, Message, ToolCallMessageSegment
|
|
27
|
-
from ..response import AgentResponseData
|
|
29
|
+
from ..response import AgentResponseData, AgentStats
|
|
28
30
|
from ..run_context import ContextWrapper, TContext
|
|
29
31
|
from ..tool_executor import BaseFunctionToolExecutor
|
|
30
32
|
from .base import AgentResponse, AgentState, BaseAgentRunner
|
|
@@ -69,14 +71,24 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
69
71
|
)
|
|
70
72
|
self.run_context.messages = messages
|
|
71
73
|
|
|
74
|
+
self.stats = AgentStats()
|
|
75
|
+
self.stats.start_time = time.time()
|
|
76
|
+
|
|
72
77
|
async def _iter_llm_responses(self) -> T.AsyncGenerator[LLMResponse, None]:
|
|
73
78
|
"""Yields chunks *and* a final LLMResponse."""
|
|
79
|
+
payload = {
|
|
80
|
+
"contexts": self.run_context.messages,
|
|
81
|
+
"func_tool": self.req.func_tool,
|
|
82
|
+
"model": self.req.model, # NOTE: in fact, this arg is None in most cases
|
|
83
|
+
"session_id": self.req.session_id,
|
|
84
|
+
}
|
|
85
|
+
|
|
74
86
|
if self.streaming:
|
|
75
|
-
stream = self.provider.text_chat_stream(**
|
|
87
|
+
stream = self.provider.text_chat_stream(**payload)
|
|
76
88
|
async for resp in stream: # type: ignore
|
|
77
89
|
yield resp
|
|
78
90
|
else:
|
|
79
|
-
yield await self.provider.text_chat(**
|
|
91
|
+
yield await self.provider.text_chat(**payload)
|
|
80
92
|
|
|
81
93
|
@override
|
|
82
94
|
async def step(self):
|
|
@@ -98,6 +110,10 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
98
110
|
|
|
99
111
|
async for llm_response in self._iter_llm_responses():
|
|
100
112
|
if llm_response.is_chunk:
|
|
113
|
+
# update ttft
|
|
114
|
+
if self.stats.time_to_first_token == 0:
|
|
115
|
+
self.stats.time_to_first_token = time.time() - self.stats.start_time
|
|
116
|
+
|
|
101
117
|
if llm_response.result_chain:
|
|
102
118
|
yield AgentResponse(
|
|
103
119
|
type="streaming_delta",
|
|
@@ -121,6 +137,10 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
121
137
|
)
|
|
122
138
|
continue
|
|
123
139
|
llm_resp_result = llm_response
|
|
140
|
+
|
|
141
|
+
if not llm_response.is_chunk and llm_response.usage:
|
|
142
|
+
# only count the token usage of the final response for computation purpose
|
|
143
|
+
self.stats.token_usage += llm_response.usage
|
|
124
144
|
break # got final response
|
|
125
145
|
|
|
126
146
|
if not llm_resp_result:
|
|
@@ -132,6 +152,7 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
132
152
|
if llm_resp.role == "err":
|
|
133
153
|
# 如果 LLM 响应错误,转换到错误状态
|
|
134
154
|
self.final_llm_resp = llm_resp
|
|
155
|
+
self.stats.end_time = time.time()
|
|
135
156
|
self._transition_state(AgentState.ERROR)
|
|
136
157
|
yield AgentResponse(
|
|
137
158
|
type="err",
|
|
@@ -146,11 +167,12 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
146
167
|
# 如果没有工具调用,转换到完成状态
|
|
147
168
|
self.final_llm_resp = llm_resp
|
|
148
169
|
self._transition_state(AgentState.DONE)
|
|
170
|
+
self.stats.end_time = time.time()
|
|
149
171
|
# record the final assistant message
|
|
150
172
|
self.run_context.messages.append(
|
|
151
173
|
Message(
|
|
152
174
|
role="assistant",
|
|
153
|
-
content=llm_resp.completion_text or "",
|
|
175
|
+
content=llm_resp.completion_text or "*No response*",
|
|
154
176
|
),
|
|
155
177
|
)
|
|
156
178
|
try:
|
|
@@ -175,22 +197,19 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
175
197
|
# 如果有工具调用,还需处理工具调用
|
|
176
198
|
if llm_resp.tools_call_name:
|
|
177
199
|
tool_call_result_blocks = []
|
|
178
|
-
for tool_call_name in llm_resp.tools_call_name:
|
|
179
|
-
yield AgentResponse(
|
|
180
|
-
type="tool_call",
|
|
181
|
-
data=AgentResponseData(
|
|
182
|
-
chain=MessageChain(type="tool_call").message(
|
|
183
|
-
f"🔨 调用工具: {tool_call_name}"
|
|
184
|
-
),
|
|
185
|
-
),
|
|
186
|
-
)
|
|
187
200
|
async for result in self._handle_function_tools(self.req, llm_resp):
|
|
188
201
|
if isinstance(result, list):
|
|
189
202
|
tool_call_result_blocks = result
|
|
190
203
|
elif isinstance(result, MessageChain):
|
|
191
|
-
result.type
|
|
204
|
+
if result.type is None:
|
|
205
|
+
# should not happen
|
|
206
|
+
continue
|
|
207
|
+
if result.type == "tool_direct_result":
|
|
208
|
+
ar_type = "tool_call_result"
|
|
209
|
+
else:
|
|
210
|
+
ar_type = result.type
|
|
192
211
|
yield AgentResponse(
|
|
193
|
-
type=
|
|
212
|
+
type=ar_type,
|
|
194
213
|
data=AgentResponseData(chain=result),
|
|
195
214
|
)
|
|
196
215
|
# 将结果添加到上下文中
|
|
@@ -218,6 +237,25 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
218
237
|
async for resp in self.step():
|
|
219
238
|
yield resp
|
|
220
239
|
|
|
240
|
+
# 如果循环结束了但是 agent 还没有完成,说明是达到了 max_step
|
|
241
|
+
if not self.done():
|
|
242
|
+
logger.warning(
|
|
243
|
+
f"Agent reached max steps ({max_step}), forcing a final response."
|
|
244
|
+
)
|
|
245
|
+
# 拔掉所有工具
|
|
246
|
+
if self.req:
|
|
247
|
+
self.req.func_tool = None
|
|
248
|
+
# 注入提示词
|
|
249
|
+
self.run_context.messages.append(
|
|
250
|
+
Message(
|
|
251
|
+
role="user",
|
|
252
|
+
content="工具调用次数已达到上限,请停止使用工具,并根据已经收集到的信息,对你的任务和发现进行总结,然后直接回复用户。",
|
|
253
|
+
)
|
|
254
|
+
)
|
|
255
|
+
# 再执行最后一步
|
|
256
|
+
async for resp in self.step():
|
|
257
|
+
yield resp
|
|
258
|
+
|
|
221
259
|
async def _handle_function_tools(
|
|
222
260
|
self,
|
|
223
261
|
req: ProviderRequest,
|
|
@@ -233,6 +271,19 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
233
271
|
llm_response.tools_call_args,
|
|
234
272
|
llm_response.tools_call_ids,
|
|
235
273
|
):
|
|
274
|
+
yield MessageChain(
|
|
275
|
+
type="tool_call",
|
|
276
|
+
chain=[
|
|
277
|
+
Json(
|
|
278
|
+
data={
|
|
279
|
+
"id": func_tool_id,
|
|
280
|
+
"name": func_tool_name,
|
|
281
|
+
"args": func_tool_args,
|
|
282
|
+
"ts": time.time(),
|
|
283
|
+
}
|
|
284
|
+
)
|
|
285
|
+
],
|
|
286
|
+
)
|
|
236
287
|
try:
|
|
237
288
|
if not req.func_tool:
|
|
238
289
|
return
|
|
@@ -306,7 +357,6 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
306
357
|
content=res.content[0].text,
|
|
307
358
|
),
|
|
308
359
|
)
|
|
309
|
-
yield MessageChain().message(res.content[0].text)
|
|
310
360
|
elif isinstance(res.content[0], ImageContent):
|
|
311
361
|
tool_call_result_blocks.append(
|
|
312
362
|
ToolCallMessageSegment(
|
|
@@ -328,7 +378,6 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
328
378
|
content=resource.text,
|
|
329
379
|
),
|
|
330
380
|
)
|
|
331
|
-
yield MessageChain().message(resource.text)
|
|
332
381
|
elif (
|
|
333
382
|
isinstance(resource, BlobResourceContents)
|
|
334
383
|
and resource.mimeType
|
|
@@ -352,20 +401,34 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
352
401
|
content="返回的数据类型不受支持",
|
|
353
402
|
),
|
|
354
403
|
)
|
|
355
|
-
yield MessageChain().message("返回的数据类型不受支持。")
|
|
356
404
|
|
|
357
405
|
elif resp is None:
|
|
358
406
|
# Tool 直接请求发送消息给用户
|
|
359
407
|
# 这里我们将直接结束 Agent Loop。
|
|
360
408
|
# 发送消息逻辑在 ToolExecutor 中处理了。
|
|
361
409
|
logger.warning(
|
|
362
|
-
f"{func_tool_name}
|
|
410
|
+
f"{func_tool_name} 没有没有返回值或者将结果直接发送给用户。"
|
|
363
411
|
)
|
|
364
412
|
self._transition_state(AgentState.DONE)
|
|
413
|
+
self.stats.end_time = time.time()
|
|
414
|
+
tool_call_result_blocks.append(
|
|
415
|
+
ToolCallMessageSegment(
|
|
416
|
+
role="tool",
|
|
417
|
+
tool_call_id=func_tool_id,
|
|
418
|
+
content="*工具没有返回值或者将结果直接发送给了用户*",
|
|
419
|
+
),
|
|
420
|
+
)
|
|
365
421
|
else:
|
|
366
422
|
# 不应该出现其他类型
|
|
367
423
|
logger.warning(
|
|
368
|
-
f"Tool 返回了不支持的类型: {type(resp)}
|
|
424
|
+
f"Tool 返回了不支持的类型: {type(resp)}。",
|
|
425
|
+
)
|
|
426
|
+
tool_call_result_blocks.append(
|
|
427
|
+
ToolCallMessageSegment(
|
|
428
|
+
role="tool",
|
|
429
|
+
tool_call_id=func_tool_id,
|
|
430
|
+
content="*工具返回了不支持的类型,请告诉用户检查这个工具的定义和实现。*",
|
|
431
|
+
),
|
|
369
432
|
)
|
|
370
433
|
|
|
371
434
|
try:
|
|
@@ -387,6 +450,22 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|
|
387
450
|
),
|
|
388
451
|
)
|
|
389
452
|
|
|
453
|
+
# yield the last tool call result
|
|
454
|
+
if tool_call_result_blocks:
|
|
455
|
+
last_tcr_content = str(tool_call_result_blocks[-1].content)
|
|
456
|
+
yield MessageChain(
|
|
457
|
+
type="tool_call_result",
|
|
458
|
+
chain=[
|
|
459
|
+
Json(
|
|
460
|
+
data={
|
|
461
|
+
"id": func_tool_id,
|
|
462
|
+
"ts": time.time(),
|
|
463
|
+
"result": last_tcr_content,
|
|
464
|
+
}
|
|
465
|
+
)
|
|
466
|
+
],
|
|
467
|
+
)
|
|
468
|
+
|
|
390
469
|
# 处理函数调用响应
|
|
391
470
|
if tool_call_result_blocks:
|
|
392
471
|
yield tool_call_result_blocks
|
|
@@ -6,8 +6,10 @@ from astrbot.core.platform.astr_message_event import AstrMessageEvent
|
|
|
6
6
|
from astrbot.core.star.context import Context
|
|
7
7
|
|
|
8
8
|
|
|
9
|
-
@dataclass
|
|
9
|
+
@dataclass
|
|
10
10
|
class AstrAgentContext:
|
|
11
|
+
__pydantic_config__ = {"arbitrary_types_allowed": True}
|
|
12
|
+
|
|
11
13
|
context: Context
|
|
12
14
|
"""The star context instance"""
|
|
13
15
|
event: AstrMessageEvent
|
|
@@ -2,8 +2,10 @@ import traceback
|
|
|
2
2
|
from collections.abc import AsyncGenerator
|
|
3
3
|
|
|
4
4
|
from astrbot.core import logger
|
|
5
|
+
from astrbot.core.agent.message import Message
|
|
5
6
|
from astrbot.core.agent.runners.tool_loop_agent_runner import ToolLoopAgentRunner
|
|
6
7
|
from astrbot.core.astr_agent_context import AstrAgentContext
|
|
8
|
+
from astrbot.core.message.components import Json
|
|
7
9
|
from astrbot.core.message.message_event_result import (
|
|
8
10
|
MessageChain,
|
|
9
11
|
MessageEventResult,
|
|
@@ -23,8 +25,25 @@ async def run_agent(
|
|
|
23
25
|
) -> AsyncGenerator[MessageChain | None, None]:
|
|
24
26
|
step_idx = 0
|
|
25
27
|
astr_event = agent_runner.run_context.context.event
|
|
26
|
-
while step_idx < max_step:
|
|
28
|
+
while step_idx < max_step + 1:
|
|
27
29
|
step_idx += 1
|
|
30
|
+
|
|
31
|
+
if step_idx == max_step + 1:
|
|
32
|
+
logger.warning(
|
|
33
|
+
f"Agent reached max steps ({max_step}), forcing a final response."
|
|
34
|
+
)
|
|
35
|
+
if not agent_runner.done():
|
|
36
|
+
# 拔掉所有工具
|
|
37
|
+
if agent_runner.req:
|
|
38
|
+
agent_runner.req.func_tool = None
|
|
39
|
+
# 注入提示词
|
|
40
|
+
agent_runner.run_context.messages.append(
|
|
41
|
+
Message(
|
|
42
|
+
role="user",
|
|
43
|
+
content="工具调用次数已达到上限,请停止使用工具,并根据已经收集到的信息,对你的任务和发现进行总结,然后直接回复用户。",
|
|
44
|
+
)
|
|
45
|
+
)
|
|
46
|
+
|
|
28
47
|
try:
|
|
29
48
|
async for resp in agent_runner.step():
|
|
30
49
|
if astr_event.is_stopped():
|
|
@@ -33,16 +52,27 @@ async def run_agent(
|
|
|
33
52
|
msg_chain = resp.data["chain"]
|
|
34
53
|
if msg_chain.type == "tool_direct_result":
|
|
35
54
|
# tool_direct_result 用于标记 llm tool 需要直接发送给用户的内容
|
|
36
|
-
await astr_event.send(
|
|
55
|
+
await astr_event.send(msg_chain)
|
|
37
56
|
continue
|
|
57
|
+
if astr_event.get_platform_id() == "webchat":
|
|
58
|
+
await astr_event.send(msg_chain)
|
|
38
59
|
# 对于其他情况,暂时先不处理
|
|
39
60
|
continue
|
|
40
61
|
elif resp.type == "tool_call":
|
|
41
62
|
if agent_runner.streaming:
|
|
42
63
|
# 用来标记流式响应需要分节
|
|
43
64
|
yield MessageChain(chain=[], type="break")
|
|
44
|
-
|
|
65
|
+
|
|
66
|
+
if astr_event.get_platform_name() == "webchat":
|
|
45
67
|
await astr_event.send(resp.data["chain"])
|
|
68
|
+
elif show_tool_use:
|
|
69
|
+
json_comp = resp.data["chain"].chain[0]
|
|
70
|
+
if isinstance(json_comp, Json):
|
|
71
|
+
m = f"🔨 调用工具: {json_comp.data.get('name')}"
|
|
72
|
+
else:
|
|
73
|
+
m = "🔨 调用工具..."
|
|
74
|
+
chain = MessageChain(type="tool_call").message(m)
|
|
75
|
+
await astr_event.send(chain)
|
|
46
76
|
continue
|
|
47
77
|
|
|
48
78
|
if stream_to_general and resp.type == "streaming_delta":
|
|
@@ -69,6 +99,15 @@ async def run_agent(
|
|
|
69
99
|
continue
|
|
70
100
|
yield resp.data["chain"] # MessageChain
|
|
71
101
|
if agent_runner.done():
|
|
102
|
+
# send agent stats to webchat
|
|
103
|
+
if astr_event.get_platform_name() == "webchat":
|
|
104
|
+
await astr_event.send(
|
|
105
|
+
MessageChain(
|
|
106
|
+
type="agent_stats",
|
|
107
|
+
chain=[Json(data=agent_runner.stats.to_dict())],
|
|
108
|
+
)
|
|
109
|
+
)
|
|
110
|
+
|
|
72
111
|
break
|
|
73
112
|
|
|
74
113
|
except Exception as e:
|
|
@@ -209,12 +209,42 @@ async def call_local_llm_tool(
|
|
|
209
209
|
else:
|
|
210
210
|
raise ValueError(f"未知的方法名: {method_name}")
|
|
211
211
|
except ValueError as e:
|
|
212
|
-
|
|
213
|
-
except TypeError:
|
|
214
|
-
|
|
212
|
+
raise Exception(f"Tool execution ValueError: {e}") from e
|
|
213
|
+
except TypeError as e:
|
|
214
|
+
# 获取函数的签名(包括类型),除了第一个 event/context 参数。
|
|
215
|
+
try:
|
|
216
|
+
sig = inspect.signature(handler)
|
|
217
|
+
params = list(sig.parameters.values())
|
|
218
|
+
# 跳过第一个参数(event 或 context)
|
|
219
|
+
if params:
|
|
220
|
+
params = params[1:]
|
|
221
|
+
|
|
222
|
+
param_strs = []
|
|
223
|
+
for param in params:
|
|
224
|
+
param_str = param.name
|
|
225
|
+
if param.annotation != inspect.Parameter.empty:
|
|
226
|
+
# 获取类型注解的字符串表示
|
|
227
|
+
if isinstance(param.annotation, type):
|
|
228
|
+
type_str = param.annotation.__name__
|
|
229
|
+
else:
|
|
230
|
+
type_str = str(param.annotation)
|
|
231
|
+
param_str += f": {type_str}"
|
|
232
|
+
if param.default != inspect.Parameter.empty:
|
|
233
|
+
param_str += f" = {param.default!r}"
|
|
234
|
+
param_strs.append(param_str)
|
|
235
|
+
|
|
236
|
+
handler_param_str = (
|
|
237
|
+
", ".join(param_strs) if param_strs else "(no additional parameters)"
|
|
238
|
+
)
|
|
239
|
+
except Exception:
|
|
240
|
+
handler_param_str = "(unable to inspect signature)"
|
|
241
|
+
|
|
242
|
+
raise Exception(
|
|
243
|
+
f"Tool handler parameter mismatch, please check the handler definition. Handler parameters: {handler_param_str}"
|
|
244
|
+
) from e
|
|
215
245
|
except Exception as e:
|
|
216
246
|
trace_ = traceback.format_exc()
|
|
217
|
-
|
|
247
|
+
raise Exception(f"Tool execution error: {e}. Traceback: {trace_}") from e
|
|
218
248
|
|
|
219
249
|
if not ready_to_call:
|
|
220
250
|
return
|