agentrun-sdk 0.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentrun/__init__.py +209 -0
- agentrun/agent_runtime/__client_async_template.py +466 -0
- agentrun/agent_runtime/__endpoint_async_template.py +345 -0
- agentrun/agent_runtime/__init__.py +53 -0
- agentrun/agent_runtime/__runtime_async_template.py +477 -0
- agentrun/agent_runtime/api/__data_async_template.py +58 -0
- agentrun/agent_runtime/api/__init__.py +6 -0
- agentrun/agent_runtime/api/control.py +1362 -0
- agentrun/agent_runtime/api/data.py +98 -0
- agentrun/agent_runtime/client.py +868 -0
- agentrun/agent_runtime/endpoint.py +649 -0
- agentrun/agent_runtime/model.py +362 -0
- agentrun/agent_runtime/runtime.py +904 -0
- agentrun/credential/__client_async_template.py +177 -0
- agentrun/credential/__credential_async_template.py +216 -0
- agentrun/credential/__init__.py +28 -0
- agentrun/credential/api/__init__.py +5 -0
- agentrun/credential/api/control.py +606 -0
- agentrun/credential/client.py +319 -0
- agentrun/credential/credential.py +381 -0
- agentrun/credential/model.py +248 -0
- agentrun/integration/__init__.py +21 -0
- agentrun/integration/agentscope/__init__.py +12 -0
- agentrun/integration/agentscope/adapter.py +17 -0
- agentrun/integration/agentscope/builtin.py +65 -0
- agentrun/integration/agentscope/message_adapter.py +185 -0
- agentrun/integration/agentscope/model_adapter.py +60 -0
- agentrun/integration/agentscope/tool_adapter.py +59 -0
- agentrun/integration/builtin/__init__.py +16 -0
- agentrun/integration/builtin/model.py +97 -0
- agentrun/integration/builtin/sandbox.py +276 -0
- agentrun/integration/builtin/toolset.py +47 -0
- agentrun/integration/crewai/__init__.py +12 -0
- agentrun/integration/crewai/adapter.py +9 -0
- agentrun/integration/crewai/builtin.py +65 -0
- agentrun/integration/crewai/model_adapter.py +27 -0
- agentrun/integration/crewai/tool_adapter.py +26 -0
- agentrun/integration/google_adk/__init__.py +12 -0
- agentrun/integration/google_adk/adapter.py +15 -0
- agentrun/integration/google_adk/builtin.py +65 -0
- agentrun/integration/google_adk/message_adapter.py +144 -0
- agentrun/integration/google_adk/model_adapter.py +43 -0
- agentrun/integration/google_adk/tool_adapter.py +25 -0
- agentrun/integration/langchain/__init__.py +9 -0
- agentrun/integration/langchain/adapter.py +15 -0
- agentrun/integration/langchain/builtin.py +71 -0
- agentrun/integration/langchain/message_adapter.py +141 -0
- agentrun/integration/langchain/model_adapter.py +37 -0
- agentrun/integration/langchain/tool_adapter.py +50 -0
- agentrun/integration/langgraph/__init__.py +13 -0
- agentrun/integration/langgraph/adapter.py +20 -0
- agentrun/integration/langgraph/builtin.py +65 -0
- agentrun/integration/pydantic_ai/__init__.py +12 -0
- agentrun/integration/pydantic_ai/adapter.py +13 -0
- agentrun/integration/pydantic_ai/builtin.py +65 -0
- agentrun/integration/pydantic_ai/model_adapter.py +44 -0
- agentrun/integration/pydantic_ai/tool_adapter.py +19 -0
- agentrun/integration/utils/__init__.py +112 -0
- agentrun/integration/utils/adapter.py +167 -0
- agentrun/integration/utils/canonical.py +157 -0
- agentrun/integration/utils/converter.py +134 -0
- agentrun/integration/utils/model.py +107 -0
- agentrun/integration/utils/tool.py +1714 -0
- agentrun/model/__client_async_template.py +357 -0
- agentrun/model/__init__.py +57 -0
- agentrun/model/__model_proxy_async_template.py +270 -0
- agentrun/model/__model_service_async_template.py +267 -0
- agentrun/model/api/__init__.py +6 -0
- agentrun/model/api/control.py +1173 -0
- agentrun/model/api/data.py +196 -0
- agentrun/model/client.py +674 -0
- agentrun/model/model.py +218 -0
- agentrun/model/model_proxy.py +439 -0
- agentrun/model/model_service.py +438 -0
- agentrun/sandbox/__browser_sandbox_async_template.py +113 -0
- agentrun/sandbox/__client_async_template.py +466 -0
- agentrun/sandbox/__code_interpreter_sandbox_async_template.py +466 -0
- agentrun/sandbox/__init__.py +54 -0
- agentrun/sandbox/__sandbox_async_template.py +398 -0
- agentrun/sandbox/__template_async_template.py +150 -0
- agentrun/sandbox/api/__browser_data_async_template.py +140 -0
- agentrun/sandbox/api/__code_interpreter_data_async_template.py +206 -0
- agentrun/sandbox/api/__init__.py +17 -0
- agentrun/sandbox/api/__sandbox_data_async_template.py +100 -0
- agentrun/sandbox/api/browser_data.py +172 -0
- agentrun/sandbox/api/code_interpreter_data.py +396 -0
- agentrun/sandbox/api/control.py +1051 -0
- agentrun/sandbox/api/playwright_async.py +492 -0
- agentrun/sandbox/api/playwright_sync.py +492 -0
- agentrun/sandbox/api/sandbox_data.py +140 -0
- agentrun/sandbox/browser_sandbox.py +191 -0
- agentrun/sandbox/client.py +878 -0
- agentrun/sandbox/code_interpreter_sandbox.py +829 -0
- agentrun/sandbox/model.py +269 -0
- agentrun/sandbox/sandbox.py +737 -0
- agentrun/sandbox/template.py +215 -0
- agentrun/server/__init__.py +82 -0
- agentrun/server/invoker.py +131 -0
- agentrun/server/model.py +225 -0
- agentrun/server/openai_protocol.py +798 -0
- agentrun/server/protocol.py +96 -0
- agentrun/server/server.py +192 -0
- agentrun/toolset/__client_async_template.py +62 -0
- agentrun/toolset/__init__.py +51 -0
- agentrun/toolset/__toolset_async_template.py +204 -0
- agentrun/toolset/api/__init__.py +17 -0
- agentrun/toolset/api/control.py +262 -0
- agentrun/toolset/api/mcp.py +100 -0
- agentrun/toolset/api/openapi.py +1184 -0
- agentrun/toolset/client.py +102 -0
- agentrun/toolset/model.py +160 -0
- agentrun/toolset/toolset.py +271 -0
- agentrun/utils/__data_api_async_template.py +715 -0
- agentrun/utils/__init__.py +5 -0
- agentrun/utils/__resource_async_template.py +158 -0
- agentrun/utils/config.py +258 -0
- agentrun/utils/control_api.py +78 -0
- agentrun/utils/data_api.py +1110 -0
- agentrun/utils/exception.py +149 -0
- agentrun/utils/helper.py +34 -0
- agentrun/utils/log.py +77 -0
- agentrun/utils/model.py +168 -0
- agentrun/utils/resource.py +291 -0
- agentrun_sdk-0.0.4.dist-info/METADATA +262 -0
- agentrun_sdk-0.0.4.dist-info/RECORD +128 -0
- agentrun_sdk-0.0.4.dist-info/WHEEL +5 -0
- agentrun_sdk-0.0.4.dist-info/licenses/LICENSE +201 -0
- agentrun_sdk-0.0.4.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
"""Google ADK 消息适配器 / Google ADK Message Adapter
|
|
2
|
+
|
|
3
|
+
将 Google ADK LlmRequest 转换为标准格式,供 ModelAdapter 内部使用。
|
|
4
|
+
Converts Google ADK LlmRequest to canonical format for internal use by ModelAdapter.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
from typing import Any, List
|
|
9
|
+
|
|
10
|
+
from agentrun.integration.utils.adapter import MessageAdapter
|
|
11
|
+
from agentrun.integration.utils.canonical import (
|
|
12
|
+
CanonicalMessage,
|
|
13
|
+
CanonicalToolCall,
|
|
14
|
+
MessageRole,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class GoogleADKMessageAdapter(MessageAdapter):
|
|
19
|
+
"""Google ADK 消息适配器 / Google ADK Message Adapter
|
|
20
|
+
|
|
21
|
+
实现 Google ADK LlmRequest → CanonicalMessage 的转换。
|
|
22
|
+
Implements conversion from Google ADK LlmRequest to CanonicalMessage.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def to_canonical(self, messages: Any) -> List[CanonicalMessage]:
|
|
26
|
+
"""将 Google ADK LlmRequest 转换为标准格式 / Google ADK Message Adapter"""
|
|
27
|
+
canonical = []
|
|
28
|
+
|
|
29
|
+
# Google ADK 使用 LlmRequest,包含 contents 列表
|
|
30
|
+
if hasattr(messages, "contents"):
|
|
31
|
+
contents = messages.contents
|
|
32
|
+
elif isinstance(messages, list):
|
|
33
|
+
contents = messages
|
|
34
|
+
else:
|
|
35
|
+
contents = [messages]
|
|
36
|
+
|
|
37
|
+
# 处理 system_instruction (在 config 中)
|
|
38
|
+
if hasattr(messages, "config") and messages.config:
|
|
39
|
+
if (
|
|
40
|
+
hasattr(messages.config, "system_instruction")
|
|
41
|
+
and messages.config.system_instruction
|
|
42
|
+
):
|
|
43
|
+
canonical.append(
|
|
44
|
+
CanonicalMessage(
|
|
45
|
+
role=MessageRole.SYSTEM,
|
|
46
|
+
content=messages.config.system_instruction,
|
|
47
|
+
)
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
# 处理 contents
|
|
51
|
+
for content in contents:
|
|
52
|
+
# 确定角色
|
|
53
|
+
role = MessageRole.USER
|
|
54
|
+
if hasattr(content, "role"):
|
|
55
|
+
role_str = str(content.role).lower()
|
|
56
|
+
if "model" in role_str or "assistant" in role_str:
|
|
57
|
+
role = MessageRole.ASSISTANT
|
|
58
|
+
elif "system" in role_str:
|
|
59
|
+
role = MessageRole.SYSTEM
|
|
60
|
+
elif "tool" in role_str:
|
|
61
|
+
role = MessageRole.TOOL
|
|
62
|
+
elif "function" in role_str:
|
|
63
|
+
role = MessageRole.TOOL
|
|
64
|
+
else:
|
|
65
|
+
role = MessageRole.USER
|
|
66
|
+
|
|
67
|
+
# 处理 parts
|
|
68
|
+
if hasattr(content, "parts"):
|
|
69
|
+
text_parts = []
|
|
70
|
+
tool_calls = []
|
|
71
|
+
|
|
72
|
+
for part in content.parts:
|
|
73
|
+
# 处理文本
|
|
74
|
+
if hasattr(part, "text") and part.text:
|
|
75
|
+
text_parts.append(part.text)
|
|
76
|
+
|
|
77
|
+
# 处理 function_call
|
|
78
|
+
elif hasattr(part, "function_call") and part.function_call:
|
|
79
|
+
func_call = part.function_call
|
|
80
|
+
args = {}
|
|
81
|
+
if hasattr(func_call, "args"):
|
|
82
|
+
if isinstance(func_call.args, dict):
|
|
83
|
+
args = func_call.args
|
|
84
|
+
else:
|
|
85
|
+
try:
|
|
86
|
+
args = json.loads(str(func_call.args))
|
|
87
|
+
except (json.JSONDecodeError, TypeError):
|
|
88
|
+
args = {}
|
|
89
|
+
|
|
90
|
+
tool_calls.append(
|
|
91
|
+
CanonicalToolCall(
|
|
92
|
+
id=getattr(
|
|
93
|
+
func_call, "id", f"call_{len(tool_calls)}"
|
|
94
|
+
),
|
|
95
|
+
name=getattr(func_call, "name", ""),
|
|
96
|
+
arguments=args,
|
|
97
|
+
)
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
# 处理 function_response
|
|
101
|
+
elif (
|
|
102
|
+
hasattr(part, "function_response")
|
|
103
|
+
and part.function_response
|
|
104
|
+
):
|
|
105
|
+
func_resp = part.function_response
|
|
106
|
+
response_content = ""
|
|
107
|
+
if hasattr(func_resp, "response"):
|
|
108
|
+
if isinstance(func_resp.response, dict):
|
|
109
|
+
response_content = json.dumps(
|
|
110
|
+
func_resp.response
|
|
111
|
+
)
|
|
112
|
+
else:
|
|
113
|
+
response_content = str(func_resp.response)
|
|
114
|
+
else:
|
|
115
|
+
response_content = str(func_resp)
|
|
116
|
+
|
|
117
|
+
# function_response 表示工具返回结果
|
|
118
|
+
canonical.append(
|
|
119
|
+
CanonicalMessage(
|
|
120
|
+
role=MessageRole.TOOL,
|
|
121
|
+
content=response_content,
|
|
122
|
+
tool_call_id=getattr(func_resp, "id", "call_0"),
|
|
123
|
+
)
|
|
124
|
+
)
|
|
125
|
+
continue
|
|
126
|
+
|
|
127
|
+
# 构建消息
|
|
128
|
+
if text_parts or tool_calls:
|
|
129
|
+
content_text = " ".join(text_parts) if text_parts else None
|
|
130
|
+
canonical.append(
|
|
131
|
+
CanonicalMessage(
|
|
132
|
+
role=role,
|
|
133
|
+
content=content_text,
|
|
134
|
+
tool_calls=tool_calls if tool_calls else None,
|
|
135
|
+
)
|
|
136
|
+
)
|
|
137
|
+
else:
|
|
138
|
+
# 没有 parts,直接使用字符串内容
|
|
139
|
+
content_text = str(content) if content else None
|
|
140
|
+
canonical.append(
|
|
141
|
+
CanonicalMessage(role=role, content=content_text)
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
return canonical
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"""Google ADK 模型适配器 / Google ADK Model Adapter
|
|
2
|
+
|
|
3
|
+
将 CommonModel 包装为 Google ADK BaseLlm。"""
|
|
4
|
+
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from agentrun.integration.google_adk.message_adapter import (
|
|
8
|
+
GoogleADKMessageAdapter,
|
|
9
|
+
)
|
|
10
|
+
from agentrun.integration.utils.adapter import ModelAdapter
|
|
11
|
+
from agentrun.integration.utils.model import CommonModel
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class GoogleADKModelAdapter(ModelAdapter):
|
|
15
|
+
"""Google ADK 模型适配器 / Google ADK Model Adapter
|
|
16
|
+
|
|
17
|
+
将 CommonModel 包装为 Google ADK BaseLlm。"""
|
|
18
|
+
|
|
19
|
+
def __init__(self):
|
|
20
|
+
"""初始化适配器,创建内部的消息适配器 / Google ADK Message Adapter"""
|
|
21
|
+
self._message_adapter = GoogleADKMessageAdapter()
|
|
22
|
+
|
|
23
|
+
def wrap_model(self, common_model: CommonModel) -> Any:
|
|
24
|
+
"""包装 CommonModel 为 Google ADK BaseLlm / Google ADK Model Adapter"""
|
|
25
|
+
|
|
26
|
+
try:
|
|
27
|
+
from google.adk.models.lite_llm import LiteLlm # type: ignore
|
|
28
|
+
except ImportError as e:
|
|
29
|
+
raise ImportError(
|
|
30
|
+
"import google.adk.models.lite_llm failed."
|
|
31
|
+
"Google ADK may not installed, "
|
|
32
|
+
"Install it with: pip install google-adk"
|
|
33
|
+
) from e
|
|
34
|
+
|
|
35
|
+
info = common_model.get_model_info()
|
|
36
|
+
|
|
37
|
+
return LiteLlm(
|
|
38
|
+
model=f"{info.provider or 'openai'}/{info.model}",
|
|
39
|
+
api_base=info.base_url,
|
|
40
|
+
api_key=info.api_key,
|
|
41
|
+
extra_headers=info.headers,
|
|
42
|
+
stream_options={"include_usage": True},
|
|
43
|
+
)
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""Google ADK 工具适配器 / Google ADK Tool Adapter
|
|
2
|
+
|
|
3
|
+
将标准工具定义转换为 Google ADK 函数格式。"""
|
|
4
|
+
|
|
5
|
+
from typing import List, Optional
|
|
6
|
+
|
|
7
|
+
from agentrun.integration.utils.adapter import ToolAdapter
|
|
8
|
+
from agentrun.integration.utils.canonical import CanonicalTool
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class GoogleADKToolAdapter(ToolAdapter):
|
|
12
|
+
"""Google ADK 工具适配器 / Google ADK Tool Adapter
|
|
13
|
+
|
|
14
|
+
实现 CanonicalTool → Google ADK 函数的转换。
|
|
15
|
+
Google ADK 直接使用 Python 函数作为工具。"""
|
|
16
|
+
|
|
17
|
+
def get_registered_tool(self, name: str) -> Optional[CanonicalTool]:
|
|
18
|
+
"""根据名称获取最近注册的工具定义 / Google ADK Tool Adapter"""
|
|
19
|
+
return self._registered_tools.get(name)
|
|
20
|
+
|
|
21
|
+
def from_canonical(self, tools: List[CanonicalTool]):
|
|
22
|
+
"""将标准格式转换为 Google ADK 工具 / Google ADK Tool Adapter
|
|
23
|
+
|
|
24
|
+
Google ADK 通过函数的类型注解推断参数,需要动态创建带注解的函数。"""
|
|
25
|
+
return self.function_tools(tools)
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""LangChain 适配器 / LangChain Adapters
|
|
2
|
+
|
|
3
|
+
提供 LangChain 框架的消息、工具和模型适配器。"""
|
|
4
|
+
|
|
5
|
+
from agentrun.integration.langchain.message_adapter import (
|
|
6
|
+
LangChainMessageAdapter,
|
|
7
|
+
)
|
|
8
|
+
from agentrun.integration.langchain.model_adapter import LangChainModelAdapter
|
|
9
|
+
from agentrun.integration.langchain.tool_adapter import LangChainToolAdapter
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"LangChainMessageAdapter",
|
|
13
|
+
"LangChainToolAdapter",
|
|
14
|
+
"LangChainModelAdapter",
|
|
15
|
+
]
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
"""LangChain 内置集成函数 / LangChain Built-in Integration Functions
|
|
2
|
+
|
|
3
|
+
提供快速创建 LangChain 兼容模型和工具的便捷函数。
|
|
4
|
+
Provides convenient functions for quickly creating LangChain-compatible models and tools.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, Callable, List, Optional, Union
|
|
8
|
+
|
|
9
|
+
from typing_extensions import Unpack
|
|
10
|
+
|
|
11
|
+
from agentrun.integration.builtin import model as _model
|
|
12
|
+
from agentrun.integration.builtin import ModelArgs
|
|
13
|
+
from agentrun.integration.builtin import sandbox_toolset as _sandbox_toolset
|
|
14
|
+
from agentrun.integration.builtin import toolset as _toolset
|
|
15
|
+
from agentrun.integration.utils.tool import Tool
|
|
16
|
+
from agentrun.model import ModelProxy, ModelService
|
|
17
|
+
from agentrun.sandbox import TemplateType
|
|
18
|
+
from agentrun.toolset import ToolSet
|
|
19
|
+
from agentrun.utils.config import Config
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def model(
|
|
23
|
+
name: Union[str, ModelProxy, ModelService],
|
|
24
|
+
**kwargs: Unpack[ModelArgs],
|
|
25
|
+
):
|
|
26
|
+
"""获取 AgentRun 模型并转换为 LangChain ``BaseChatModel``。 / LangChain Built-in Integration Functions"""
|
|
27
|
+
|
|
28
|
+
m = _model(input=name, **kwargs)
|
|
29
|
+
return m.to_langchain()
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def toolset(
|
|
33
|
+
name: Union[str, ToolSet],
|
|
34
|
+
*,
|
|
35
|
+
prefix: Optional[str] = None,
|
|
36
|
+
modify_tool_name: Optional[Callable[[Tool], Tool]] = None,
|
|
37
|
+
filter_tools_by_name: Optional[Callable[[str], bool]] = None,
|
|
38
|
+
config: Optional[Config] = None,
|
|
39
|
+
) -> List[Any]:
|
|
40
|
+
"""将内置工具集封装为 LangChain ``StructuredTool`` 列表。 / LangChain Built-in Integration Functions"""
|
|
41
|
+
|
|
42
|
+
ts = _toolset(input=name, config=config)
|
|
43
|
+
return ts.to_langchain(
|
|
44
|
+
prefix=prefix,
|
|
45
|
+
modify_tool_name=modify_tool_name,
|
|
46
|
+
filter_tools_by_name=filter_tools_by_name,
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def sandbox_toolset(
|
|
51
|
+
template_name: str,
|
|
52
|
+
*,
|
|
53
|
+
template_type: TemplateType = TemplateType.CODE_INTERPRETER,
|
|
54
|
+
sandbox_idle_timeout_seconds: int = 600,
|
|
55
|
+
prefix: Optional[str] = None,
|
|
56
|
+
modify_tool_name: Optional[Callable[[Tool], Tool]] = None,
|
|
57
|
+
filter_tools_by_name: Optional[Callable[[str], bool]] = None,
|
|
58
|
+
config: Optional[Config] = None,
|
|
59
|
+
) -> List[Any]:
|
|
60
|
+
"""将沙箱模板封装为 LangChain ``StructuredTool`` 列表。 / LangChain Built-in Integration Functions"""
|
|
61
|
+
|
|
62
|
+
return _sandbox_toolset(
|
|
63
|
+
template_name=template_name,
|
|
64
|
+
template_type=template_type,
|
|
65
|
+
config=config,
|
|
66
|
+
sandbox_idle_timeout_seconds=sandbox_idle_timeout_seconds,
|
|
67
|
+
).to_langchain(
|
|
68
|
+
prefix=prefix,
|
|
69
|
+
modify_tool_name=modify_tool_name,
|
|
70
|
+
filter_tools_by_name=filter_tools_by_name,
|
|
71
|
+
)
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
"""LangChain 消息适配器 / LangChain Message Adapter
|
|
2
|
+
|
|
3
|
+
将 LangChain BaseMessage 转换为标准格式,供 ModelAdapter 内部使用。
|
|
4
|
+
Converts LangChain BaseMessage to canonical format for internal use by ModelAdapter.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
from typing import Any, List
|
|
9
|
+
|
|
10
|
+
from agentrun.integration.utils.adapter import MessageAdapter
|
|
11
|
+
from agentrun.integration.utils.canonical import (
|
|
12
|
+
CanonicalMessage,
|
|
13
|
+
CanonicalToolCall,
|
|
14
|
+
MessageRole,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class LangChainMessageAdapter(MessageAdapter):
|
|
19
|
+
"""LangChain 消息适配器 / LangChain Message Adapter
|
|
20
|
+
|
|
21
|
+
实现 LangChain BaseMessage → CanonicalMessage 的转换。
|
|
22
|
+
Implements conversion from LangChain BaseMessage to CanonicalMessage.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def to_canonical(self, messages: Any) -> List[CanonicalMessage]:
|
|
26
|
+
"""将 LangChain BaseMessage 转换为标准格式 / LangChain Message Adapter"""
|
|
27
|
+
try:
|
|
28
|
+
from langchain_core.messages import (
|
|
29
|
+
AIMessage,
|
|
30
|
+
HumanMessage,
|
|
31
|
+
SystemMessage,
|
|
32
|
+
ToolMessage,
|
|
33
|
+
)
|
|
34
|
+
except ImportError as e:
|
|
35
|
+
raise ImportError(
|
|
36
|
+
"LangChain is not installed. "
|
|
37
|
+
"Install it with: pip install langchain-core"
|
|
38
|
+
) from e
|
|
39
|
+
|
|
40
|
+
canonical = []
|
|
41
|
+
for msg in messages:
|
|
42
|
+
if isinstance(msg, SystemMessage):
|
|
43
|
+
canonical.append(
|
|
44
|
+
CanonicalMessage(
|
|
45
|
+
role=MessageRole.SYSTEM,
|
|
46
|
+
content=msg.content
|
|
47
|
+
if hasattr(msg, "content")
|
|
48
|
+
else None,
|
|
49
|
+
)
|
|
50
|
+
)
|
|
51
|
+
elif isinstance(msg, HumanMessage):
|
|
52
|
+
canonical.append(
|
|
53
|
+
CanonicalMessage(
|
|
54
|
+
role=MessageRole.USER,
|
|
55
|
+
content=msg.content
|
|
56
|
+
if hasattr(msg, "content")
|
|
57
|
+
else None,
|
|
58
|
+
)
|
|
59
|
+
)
|
|
60
|
+
elif isinstance(msg, AIMessage):
|
|
61
|
+
tool_calls = None
|
|
62
|
+
if hasattr(msg, "tool_calls") and msg.tool_calls:
|
|
63
|
+
tool_calls = []
|
|
64
|
+
for call in msg.tool_calls:
|
|
65
|
+
# LangChain tool_calls: {"id": ..., "name": ..., "args": ...}
|
|
66
|
+
call_id = (
|
|
67
|
+
call.get("id")
|
|
68
|
+
if isinstance(call, dict)
|
|
69
|
+
else getattr(call, "id", "")
|
|
70
|
+
)
|
|
71
|
+
call_name = (
|
|
72
|
+
call.get("name")
|
|
73
|
+
if isinstance(call, dict)
|
|
74
|
+
else getattr(call, "name", "")
|
|
75
|
+
)
|
|
76
|
+
call_args = (
|
|
77
|
+
call.get("args")
|
|
78
|
+
if isinstance(call, dict)
|
|
79
|
+
else getattr(call, "args", {})
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# 如果 args 是字符串,尝试解析
|
|
83
|
+
if isinstance(call_args, str):
|
|
84
|
+
try:
|
|
85
|
+
call_args = json.loads(call_args)
|
|
86
|
+
except json.JSONDecodeError:
|
|
87
|
+
call_args = {}
|
|
88
|
+
|
|
89
|
+
tool_calls.append(
|
|
90
|
+
CanonicalToolCall(
|
|
91
|
+
id=str(call_id),
|
|
92
|
+
name=str(call_name),
|
|
93
|
+
arguments=(
|
|
94
|
+
call_args
|
|
95
|
+
if isinstance(call_args, dict)
|
|
96
|
+
else {}
|
|
97
|
+
),
|
|
98
|
+
)
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
canonical.append(
|
|
102
|
+
CanonicalMessage(
|
|
103
|
+
role=MessageRole.ASSISTANT,
|
|
104
|
+
content=msg.content
|
|
105
|
+
if hasattr(msg, "content")
|
|
106
|
+
else None,
|
|
107
|
+
tool_calls=tool_calls,
|
|
108
|
+
)
|
|
109
|
+
)
|
|
110
|
+
elif isinstance(msg, ToolMessage):
|
|
111
|
+
content = msg.content
|
|
112
|
+
if type(content) is not str:
|
|
113
|
+
content = str(content)
|
|
114
|
+
|
|
115
|
+
canonical.append(
|
|
116
|
+
CanonicalMessage(
|
|
117
|
+
role=MessageRole.TOOL,
|
|
118
|
+
content=content,
|
|
119
|
+
tool_call_id=(
|
|
120
|
+
msg.tool_call_id
|
|
121
|
+
if hasattr(msg, "tool_call_id")
|
|
122
|
+
else None
|
|
123
|
+
),
|
|
124
|
+
)
|
|
125
|
+
)
|
|
126
|
+
else:
|
|
127
|
+
# 未知消息类型,尝试提取基本信息
|
|
128
|
+
role_str = getattr(msg, "type", "user").lower()
|
|
129
|
+
if "system" in role_str:
|
|
130
|
+
role = MessageRole.SYSTEM
|
|
131
|
+
elif "assistant" in role_str or "ai" in role_str:
|
|
132
|
+
role = MessageRole.ASSISTANT
|
|
133
|
+
elif "tool" in role_str:
|
|
134
|
+
role = MessageRole.TOOL
|
|
135
|
+
else:
|
|
136
|
+
role = MessageRole.USER
|
|
137
|
+
|
|
138
|
+
content = getattr(msg, "content", None)
|
|
139
|
+
canonical.append(CanonicalMessage(role=role, content=content))
|
|
140
|
+
|
|
141
|
+
return canonical
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
"""LangChain 模型适配器 / LangChain Model Adapter
|
|
2
|
+
|
|
3
|
+
将 CommonModel 包装为 LangChain BaseChatModel。"""
|
|
4
|
+
|
|
5
|
+
import inspect
|
|
6
|
+
import json
|
|
7
|
+
from typing import Any, List, Optional
|
|
8
|
+
|
|
9
|
+
from agentrun.integration.langchain.message_adapter import (
|
|
10
|
+
LangChainMessageAdapter,
|
|
11
|
+
)
|
|
12
|
+
from agentrun.integration.utils.adapter import ModelAdapter
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class LangChainModelAdapter(ModelAdapter):
|
|
16
|
+
"""LangChain 模型适配器 / LangChain Model Adapter
|
|
17
|
+
|
|
18
|
+
将 CommonModel 包装为 LangChain BaseChatModel。"""
|
|
19
|
+
|
|
20
|
+
def __init__(self):
|
|
21
|
+
"""初始化适配器,创建内部的消息适配器 / LangChain Message Adapter"""
|
|
22
|
+
self._message_adapter = LangChainMessageAdapter()
|
|
23
|
+
|
|
24
|
+
def wrap_model(self, common_model: Any) -> Any:
|
|
25
|
+
"""包装 CommonModel 为 LangChain BaseChatModel / LangChain Model Adapter"""
|
|
26
|
+
from httpx import AsyncClient
|
|
27
|
+
from langchain_openai import ChatOpenAI
|
|
28
|
+
|
|
29
|
+
info = common_model.get_model_info() # 确保模型可用
|
|
30
|
+
return ChatOpenAI(
|
|
31
|
+
name=info.model,
|
|
32
|
+
api_key=info.api_key,
|
|
33
|
+
model=info.model,
|
|
34
|
+
base_url=info.base_url,
|
|
35
|
+
async_client=AsyncClient(headers=info.headers),
|
|
36
|
+
stream_usage=True,
|
|
37
|
+
)
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"""LangChain 工具适配器 / LangChain Tool Adapter
|
|
2
|
+
|
|
3
|
+
将标准工具定义转换为 LangChain StructuredTool 格式。"""
|
|
4
|
+
|
|
5
|
+
from typing import Any, List
|
|
6
|
+
|
|
7
|
+
from agentrun.integration.utils.adapter import ToolAdapter
|
|
8
|
+
from agentrun.integration.utils.canonical import CanonicalTool
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class LangChainToolAdapter(ToolAdapter):
|
|
12
|
+
"""LangChain 工具适配器 / LangChain Tool Adapter
|
|
13
|
+
|
|
14
|
+
实现 CanonicalTool → LangChain StructuredTool 的转换。"""
|
|
15
|
+
|
|
16
|
+
def from_canonical(self, tools: List[CanonicalTool]) -> Any:
|
|
17
|
+
"""将标准格式转换为 LangChain StructuredTool / LangChain Tool Adapter"""
|
|
18
|
+
try:
|
|
19
|
+
from langchain_core.tools import StructuredTool
|
|
20
|
+
except ImportError as e:
|
|
21
|
+
raise ImportError(
|
|
22
|
+
"LangChain is not installed. "
|
|
23
|
+
"Install it with: pip install langchain-core"
|
|
24
|
+
) from e
|
|
25
|
+
|
|
26
|
+
from agentrun.integration.utils.tool import _json_schema_to_pydantic
|
|
27
|
+
|
|
28
|
+
result = []
|
|
29
|
+
for tool in tools:
|
|
30
|
+
# 从 JSON Schema 创建 Pydantic 模型
|
|
31
|
+
args_schema = _json_schema_to_pydantic(
|
|
32
|
+
f"{tool.name}_Args", tool.parameters
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
if args_schema is None:
|
|
36
|
+
# 如果无法创建 schema,使用空模型
|
|
37
|
+
from pydantic import create_model
|
|
38
|
+
|
|
39
|
+
args_schema = create_model(f"{tool.name}_Args")
|
|
40
|
+
|
|
41
|
+
result.append(
|
|
42
|
+
StructuredTool.from_function(
|
|
43
|
+
func=tool.func,
|
|
44
|
+
name=tool.name,
|
|
45
|
+
description=tool.description,
|
|
46
|
+
args_schema=args_schema,
|
|
47
|
+
)
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
return result
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""LangGraph 集成模块。 / LangGraph 集成 Module
|
|
2
|
+
|
|
3
|
+
提供 AgentRun 模型与沙箱工具的 LangGraph 适配入口。 / 提供 AgentRun 模型with沙箱工具的 LangGraph 适配入口。
|
|
4
|
+
LangGraph 与 LangChain 兼容,因此直接复用 LangChain 的转换逻辑。 / LangGraph with LangChain 兼容,因此直接复用 LangChain 的转换逻辑。
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .builtin import model, sandbox_toolset, toolset
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"model",
|
|
11
|
+
"toolset",
|
|
12
|
+
"sandbox_toolset",
|
|
13
|
+
]
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""LangGraph 适配器 / LangGraph Adapters
|
|
2
|
+
|
|
3
|
+
LangGraph 与 LangChain 完全兼容,因此直接复用 LangChain 的适配器。"""
|
|
4
|
+
|
|
5
|
+
from agentrun.integration.langchain.adapter import (
|
|
6
|
+
LangChainMessageAdapter,
|
|
7
|
+
LangChainModelAdapter,
|
|
8
|
+
LangChainToolAdapter,
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
# LangGraph 使用与 LangChain 相同的适配器
|
|
12
|
+
LangGraphMessageAdapter = LangChainMessageAdapter
|
|
13
|
+
LangGraphToolAdapter = LangChainToolAdapter
|
|
14
|
+
LangGraphModelAdapter = LangChainModelAdapter
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"LangGraphMessageAdapter",
|
|
18
|
+
"LangGraphToolAdapter",
|
|
19
|
+
"LangGraphModelAdapter",
|
|
20
|
+
]
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
"""LangGraph 内置集成函数 / LangGraph Built-in Integration Functions
|
|
2
|
+
|
|
3
|
+
提供快速创建 LangGraph 兼容模型和工具的便捷函数。
|
|
4
|
+
Provides convenient functions for quickly creating LangGraph-compatible models and tools.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, Callable, List, Optional, Union
|
|
8
|
+
|
|
9
|
+
from typing_extensions import Unpack
|
|
10
|
+
|
|
11
|
+
from agentrun.integration.builtin import model as _model
|
|
12
|
+
from agentrun.integration.builtin import ModelArgs
|
|
13
|
+
from agentrun.integration.builtin import sandbox_toolset as _sandbox_toolset
|
|
14
|
+
from agentrun.integration.builtin import toolset as _toolset
|
|
15
|
+
from agentrun.integration.utils.tool import Tool
|
|
16
|
+
from agentrun.model import ModelProxy, ModelService
|
|
17
|
+
from agentrun.sandbox import TemplateType
|
|
18
|
+
from agentrun.toolset import ToolSet
|
|
19
|
+
from agentrun.utils.config import Config
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def model(
|
|
23
|
+
name: Union[str, ModelProxy, ModelService],
|
|
24
|
+
**kwargs: Unpack[ModelArgs],
|
|
25
|
+
):
|
|
26
|
+
"""获取 AgentRun 模型并转换为 LangChain ``BaseChatModel``。 / LangGraph Built-in Integration Functions"""
|
|
27
|
+
|
|
28
|
+
m = _model(input=name, **kwargs) # type: ignore
|
|
29
|
+
return m.to_langgraph()
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def toolset(
|
|
33
|
+
name: Union[str, ToolSet],
|
|
34
|
+
*,
|
|
35
|
+
prefix: Optional[str] = None,
|
|
36
|
+
modify_tool_name: Optional[Callable[[Tool], Tool]] = None,
|
|
37
|
+
filter_tools_by_name: Optional[Callable[[str], bool]] = None,
|
|
38
|
+
config: Optional[Config] = None,
|
|
39
|
+
) -> List[Any]:
|
|
40
|
+
"""将内置工具集封装为 LangChain ``StructuredTool`` 列表。 / LangGraph Built-in Integration Functions"""
|
|
41
|
+
|
|
42
|
+
ts = _toolset(input=name, config=config)
|
|
43
|
+
return ts.to_langgraph(
|
|
44
|
+
prefix=prefix,
|
|
45
|
+
modify_tool_name=modify_tool_name,
|
|
46
|
+
filter_tools_by_name=filter_tools_by_name,
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def sandbox_toolset(
|
|
51
|
+
template_name: str,
|
|
52
|
+
*,
|
|
53
|
+
template_type: TemplateType = TemplateType.CODE_INTERPRETER,
|
|
54
|
+
config: Optional[Config] = None,
|
|
55
|
+
sandbox_idle_timeout_seconds: int = 600,
|
|
56
|
+
prefix: Optional[str] = None,
|
|
57
|
+
) -> List[Any]:
|
|
58
|
+
"""将沙箱模板封装为 LangChain ``StructuredTool`` 列表。 / LangGraph Built-in Integration Functions"""
|
|
59
|
+
|
|
60
|
+
return _sandbox_toolset(
|
|
61
|
+
template_name=template_name,
|
|
62
|
+
template_type=template_type,
|
|
63
|
+
config=config,
|
|
64
|
+
sandbox_idle_timeout_seconds=sandbox_idle_timeout_seconds,
|
|
65
|
+
).to_langgraph(prefix=prefix)
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""PydanticAI 集成模块。 / PydanticAI 集成 Module
|
|
2
|
+
|
|
3
|
+
提供 AgentRun 模型与沙箱工具的 PydanticAI 适配入口。 / 提供 AgentRun 模型with沙箱工具的 PydanticAI 适配入口。
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from .builtin import model, sandbox_toolset, toolset
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"model",
|
|
10
|
+
"toolset",
|
|
11
|
+
"sandbox_toolset",
|
|
12
|
+
]
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""PydanticAI 适配器 / PydanticAI Adapters
|
|
2
|
+
|
|
3
|
+
提供 PydanticAI 框架的工具和模型适配器。"""
|
|
4
|
+
|
|
5
|
+
from agentrun.integration.pydantic_ai.model_adapter import (
|
|
6
|
+
PydanticAIModelAdapter,
|
|
7
|
+
)
|
|
8
|
+
from agentrun.integration.pydantic_ai.tool_adapter import PydanticAIToolAdapter
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"PydanticAIToolAdapter",
|
|
12
|
+
"PydanticAIModelAdapter",
|
|
13
|
+
]
|