eah-langflow-comp 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eah_langflow_comp-0.1.0/PKG-INFO +41 -0
- eah_langflow_comp-0.1.0/README.md +19 -0
- eah_langflow_comp-0.1.0/eah_langflow_comp.egg-info/PKG-INFO +41 -0
- eah_langflow_comp-0.1.0/eah_langflow_comp.egg-info/SOURCES.txt +11 -0
- eah_langflow_comp-0.1.0/eah_langflow_comp.egg-info/dependency_links.txt +1 -0
- eah_langflow_comp-0.1.0/eah_langflow_comp.egg-info/requires.txt +6 -0
- eah_langflow_comp-0.1.0/eah_langflow_comp.egg-info/top_level.txt +1 -0
- eah_langflow_comp-0.1.0/pyproject.toml +46 -0
- eah_langflow_comp-0.1.0/setup.cfg +4 -0
- eah_langflow_comp-0.1.0/src/custom_agent_comp.py +72 -0
- eah_langflow_comp-0.1.0/src/custom_base_agent_comp.py +441 -0
- eah_langflow_comp-0.1.0/src/custom_mcp_comp.py +377 -0
- eah_langflow_comp-0.1.0/src/message_2_text.py +29 -0
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: eah_langflow_comp
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A Python package for AI-powered workflows
|
|
5
|
+
Author-email: Your Name <your.email@example.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Classifier: Development Status :: 3 - Alpha
|
|
8
|
+
Classifier: Intended Audience :: Developers
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Requires-Python: >=3.8
|
|
16
|
+
Description-Content-Type: text/markdown
|
|
17
|
+
Requires-Dist: langflow<2.0.0,>=1.0.0
|
|
18
|
+
Provides-Extra: dev
|
|
19
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
20
|
+
Requires-Dist: black>=23.0; extra == "dev"
|
|
21
|
+
Requires-Dist: ruff>=0.0.260; extra == "dev"
|
|
22
|
+
|
|
23
|
+
# EAH AI Flow
|
|
24
|
+
|
|
25
|
+
A Python package for AI-powered workflows.
|
|
26
|
+
|
|
27
|
+
## Installation
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
pip install eah_langflow_comp
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Usage
|
|
34
|
+
|
|
35
|
+
```python
|
|
36
|
+
from eah_langflow_comp import AgentComponent
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## License
|
|
40
|
+
|
|
41
|
+
MIT
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# EAH AI Flow
|
|
2
|
+
|
|
3
|
+
A Python package for AI-powered workflows.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install eah_langflow_comp
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
from eah_langflow_comp import AgentComponent
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
## License
|
|
18
|
+
|
|
19
|
+
MIT
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: eah_langflow_comp
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A Python package for AI-powered workflows
|
|
5
|
+
Author-email: Your Name <your.email@example.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Classifier: Development Status :: 3 - Alpha
|
|
8
|
+
Classifier: Intended Audience :: Developers
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Requires-Python: >=3.8
|
|
16
|
+
Description-Content-Type: text/markdown
|
|
17
|
+
Requires-Dist: langflow<2.0.0,>=1.0.0
|
|
18
|
+
Provides-Extra: dev
|
|
19
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
20
|
+
Requires-Dist: black>=23.0; extra == "dev"
|
|
21
|
+
Requires-Dist: ruff>=0.0.260; extra == "dev"
|
|
22
|
+
|
|
23
|
+
# EAH AI Flow
|
|
24
|
+
|
|
25
|
+
A Python package for AI-powered workflows.
|
|
26
|
+
|
|
27
|
+
## Installation
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
pip install eah_langflow_comp
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Usage
|
|
34
|
+
|
|
35
|
+
```python
|
|
36
|
+
from eah_langflow_comp import AgentComponent
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## License
|
|
40
|
+
|
|
41
|
+
MIT
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
eah_langflow_comp.egg-info/PKG-INFO
|
|
4
|
+
eah_langflow_comp.egg-info/SOURCES.txt
|
|
5
|
+
eah_langflow_comp.egg-info/dependency_links.txt
|
|
6
|
+
eah_langflow_comp.egg-info/requires.txt
|
|
7
|
+
eah_langflow_comp.egg-info/top_level.txt
|
|
8
|
+
src/custom_agent_comp.py
|
|
9
|
+
src/custom_base_agent_comp.py
|
|
10
|
+
src/custom_mcp_comp.py
|
|
11
|
+
src/message_2_text.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
src
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "eah_langflow_comp"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "A Python package for AI-powered workflows"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.8"
|
|
11
|
+
license = {text = "MIT"}
|
|
12
|
+
authors = [
|
|
13
|
+
{name = "Your Name", email = "your.email@example.com"}
|
|
14
|
+
]
|
|
15
|
+
classifiers = [
|
|
16
|
+
"Development Status :: 3 - Alpha",
|
|
17
|
+
"Intended Audience :: Developers",
|
|
18
|
+
"License :: OSI Approved :: MIT License",
|
|
19
|
+
"Programming Language :: Python :: 3",
|
|
20
|
+
"Programming Language :: Python :: 3.8",
|
|
21
|
+
"Programming Language :: Python :: 3.9",
|
|
22
|
+
"Programming Language :: Python :: 3.10",
|
|
23
|
+
"Programming Language :: Python :: 3.11",
|
|
24
|
+
]
|
|
25
|
+
dependencies = [
|
|
26
|
+
"langflow>=1.0.0,<2.0.0", # 限定版本范围,避免兼容性问题
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
[project.optional-dependencies]
|
|
30
|
+
dev = [
|
|
31
|
+
"pytest>=7.0",
|
|
32
|
+
"black>=23.0",
|
|
33
|
+
"ruff>=0.0.260",
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
[tool.setuptools.packages.find]
|
|
37
|
+
where = ["."]
|
|
38
|
+
include = ["src*"]
|
|
39
|
+
|
|
40
|
+
[tool.black]
|
|
41
|
+
line-length = 88
|
|
42
|
+
target-version = ['py38']
|
|
43
|
+
|
|
44
|
+
[tool.ruff]
|
|
45
|
+
line-length = 88
|
|
46
|
+
target-version = "py38"
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
from lfx.base.agents.events import ExceptionWithMessageError
|
|
2
|
+
from lfx.log.logger import logger
|
|
3
|
+
from lfx.schema.data import Data
|
|
4
|
+
from lfx.schema.message import Message
|
|
5
|
+
from .custom_base_agent_comp import BaseAgentComponent
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class AgentComponent(BaseAgentComponent):
|
|
9
|
+
"""在 BaseAgentComponent 基础上,为使用含前置下划线参数名的 MCP tool 提供兼容支持。
|
|
10
|
+
|
|
11
|
+
重写的方法
|
|
12
|
+
----------
|
|
13
|
+
message_response -- 全程激活 json_schema create_model patch
|
|
14
|
+
json_response -- 同上
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
display_name: str = "Agent(custom)"
|
|
18
|
+
description: str = "Define the agent's instructions, then enter a task to complete using tools."
|
|
19
|
+
documentation: str = "https://docs.langflow.org/agents"
|
|
20
|
+
icon = "bot"
|
|
21
|
+
beta = False
|
|
22
|
+
name = "Agent"
|
|
23
|
+
|
|
24
|
+
# ------------------------------------------------------------------
|
|
25
|
+
# MCP patch 工具方法
|
|
26
|
+
# ------------------------------------------------------------------
|
|
27
|
+
|
|
28
|
+
@staticmethod
|
|
29
|
+
def _apply_pydantic_patch() -> list[tuple]:
|
|
30
|
+
"""确保 lfx.schema.json_schema.create_model patch 已安装(幂等)。
|
|
31
|
+
|
|
32
|
+
patch 将 MCP tool args_schema 中前置下划线字段名改为 field_xxx,
|
|
33
|
+
使 Pydantic v2 能正常构建 model,不抛 NameError。
|
|
34
|
+
返回空列表(patch 是进程级永久 patch,无需还原)。
|
|
35
|
+
"""
|
|
36
|
+
try:
|
|
37
|
+
import lfx.schema.json_schema as _js # noqa: PLC2701
|
|
38
|
+
if not getattr(_js, "_mcp_create_model_patched", False):
|
|
39
|
+
from langflow.eah_langflow_comp.src.custom_mcp_comp import CustomMCPToolsComponent # noqa: PLC2701
|
|
40
|
+
CustomMCPToolsComponent._install_json_schema_create_model_patch()
|
|
41
|
+
except Exception:
|
|
42
|
+
pass
|
|
43
|
+
return []
|
|
44
|
+
|
|
45
|
+
@staticmethod
|
|
46
|
+
def _restore_pydantic_patch(patches: list[tuple]) -> None:
|
|
47
|
+
pass # no-op:patch 是进程级永久 patch
|
|
48
|
+
|
|
49
|
+
# ------------------------------------------------------------------
|
|
50
|
+
# 重写父类方法:在 agent 生命周期内保持 patch 激活
|
|
51
|
+
# ------------------------------------------------------------------
|
|
52
|
+
|
|
53
|
+
async def message_response(self) -> Message:
|
|
54
|
+
"""重写:全程激活 MCP json_schema patch 后再运行 agent。"""
|
|
55
|
+
patches = self._apply_pydantic_patch()
|
|
56
|
+
try:
|
|
57
|
+
return await super().message_response()
|
|
58
|
+
except (ValueError, TypeError, KeyError, ExceptionWithMessageError):
|
|
59
|
+
raise
|
|
60
|
+
except Exception as e:
|
|
61
|
+
await logger.aerror(f"Unexpected error: {e!s}")
|
|
62
|
+
raise
|
|
63
|
+
finally:
|
|
64
|
+
self._restore_pydantic_patch(patches)
|
|
65
|
+
|
|
66
|
+
async def json_response(self) -> Data:
|
|
67
|
+
"""重写:全程激活 MCP json_schema patch 后再运行结构化输出 agent。"""
|
|
68
|
+
patches = self._apply_pydantic_patch()
|
|
69
|
+
try:
|
|
70
|
+
return await super().json_response()
|
|
71
|
+
finally:
|
|
72
|
+
self._restore_pydantic_patch(patches)
|
|
@@ -0,0 +1,441 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import re
|
|
3
|
+
|
|
4
|
+
from langchain.agents import create_tool_calling_agent
|
|
5
|
+
from langchain_core.prompts import ChatPromptTemplate
|
|
6
|
+
from langchain_core.tools import StructuredTool, Tool
|
|
7
|
+
from pydantic import ValidationError
|
|
8
|
+
|
|
9
|
+
from lfx.base.agents.agent import LCToolsAgentComponent
|
|
10
|
+
from lfx.base.agents.events import ExceptionWithMessageError
|
|
11
|
+
from lfx.base.models.model_utils import get_model_name
|
|
12
|
+
from lfx.components.helpers import CurrentDateComponent
|
|
13
|
+
from lfx.components.langchain_utilities.tool_calling import ToolCallingAgentComponent
|
|
14
|
+
from lfx.components.models_and_agents.memory import MemoryComponent
|
|
15
|
+
from lfx.custom.custom_component.component import get_component_toolkit
|
|
16
|
+
# from lfx.custom.utils import update_component_build_config
|
|
17
|
+
from lfx.helpers.base_model import build_model_from_schema
|
|
18
|
+
from lfx.inputs.inputs import BoolInput, SecretStrInput, StrInput
|
|
19
|
+
from lfx.io import DropdownInput, IntInput, MessageTextInput, MultilineInput, Output, TableInput, SliderInput
|
|
20
|
+
from lfx.field_typing.range_spec import RangeSpec
|
|
21
|
+
from lfx.log.logger import logger
|
|
22
|
+
from lfx.schema.data import Data
|
|
23
|
+
from lfx.schema.dotdict import dotdict
|
|
24
|
+
from lfx.schema.message import Message
|
|
25
|
+
from lfx.schema.table import EditMode
|
|
26
|
+
from langchain_openai import ChatOpenAI
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def set_advanced_true(component_input):
|
|
30
|
+
component_input.advanced = True
|
|
31
|
+
return component_input
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class BaseAgentComponent(LCToolsAgentComponent):
|
|
35
|
+
display_name: str = "Base Agent(custom)"
|
|
36
|
+
description: str = "Define the agent's instructions, then enter a task to complete using tools."
|
|
37
|
+
documentation: str = "https://docs.langflow.org/agents"
|
|
38
|
+
icon = "bot"
|
|
39
|
+
beta = False
|
|
40
|
+
name = "BaseAgent"
|
|
41
|
+
|
|
42
|
+
memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]
|
|
43
|
+
|
|
44
|
+
inputs = [
|
|
45
|
+
SecretStrInput(
|
|
46
|
+
name="api_key",
|
|
47
|
+
display_name="API Key",
|
|
48
|
+
info="The API key to use for the model.",
|
|
49
|
+
required=True,
|
|
50
|
+
),
|
|
51
|
+
StrInput(
|
|
52
|
+
name="base_url",
|
|
53
|
+
display_name="Base URL",
|
|
54
|
+
info="The base URL of the API.",
|
|
55
|
+
required=True,
|
|
56
|
+
show=True,
|
|
57
|
+
),
|
|
58
|
+
IntInput(
|
|
59
|
+
name="max_output_tokens",
|
|
60
|
+
display_name="Max Output Tokens",
|
|
61
|
+
info="The maximum number of tokens to generate.",
|
|
62
|
+
show=False,
|
|
63
|
+
),
|
|
64
|
+
SliderInput(
|
|
65
|
+
name="temperature",
|
|
66
|
+
display_name="Temperature",
|
|
67
|
+
value=0.1,
|
|
68
|
+
info="Controls randomness in responses",
|
|
69
|
+
range_spec=RangeSpec(min=0, max=1, step=0.01),
|
|
70
|
+
advanced=True,
|
|
71
|
+
),
|
|
72
|
+
StrInput(
|
|
73
|
+
name="model_name",
|
|
74
|
+
display_name="Model Name",
|
|
75
|
+
info="The name of the model to use.",
|
|
76
|
+
required=True,
|
|
77
|
+
show=True,
|
|
78
|
+
),
|
|
79
|
+
MultilineInput(
|
|
80
|
+
name="system_prompt",
|
|
81
|
+
display_name="Agent Instructions",
|
|
82
|
+
info="System Prompt: Initial instructions and context provided to guide the agent's behavior.",
|
|
83
|
+
value="You are a helpful assistant that can use tools to answer questions and perform tasks.",
|
|
84
|
+
advanced=False,
|
|
85
|
+
),
|
|
86
|
+
MessageTextInput(
|
|
87
|
+
name="context_id",
|
|
88
|
+
display_name="Context ID",
|
|
89
|
+
info="The context ID of the chat. Adds an extra layer to the local memory.",
|
|
90
|
+
value="",
|
|
91
|
+
advanced=True,
|
|
92
|
+
),
|
|
93
|
+
IntInput(
|
|
94
|
+
name="n_messages",
|
|
95
|
+
display_name="Number of Chat History Messages",
|
|
96
|
+
value=100,
|
|
97
|
+
info="Number of chat history messages to retrieve.",
|
|
98
|
+
advanced=True,
|
|
99
|
+
show=True,
|
|
100
|
+
),
|
|
101
|
+
MultilineInput(
|
|
102
|
+
name="format_instructions",
|
|
103
|
+
display_name="Output Format Instructions",
|
|
104
|
+
info="Generic Template for structured output formatting. Valid only with Structured response.",
|
|
105
|
+
value=(
|
|
106
|
+
"You are an AI that extracts structured JSON objects from unstructured text. "
|
|
107
|
+
"Use a predefined schema with expected types (str, int, float, bool, dict). "
|
|
108
|
+
"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. "
|
|
109
|
+
"Fill missing or ambiguous values with defaults: null for missing values. "
|
|
110
|
+
"Remove exact duplicates but keep variations that have different field values. "
|
|
111
|
+
"Always return valid JSON in the expected format, never throw errors. "
|
|
112
|
+
"If multiple objects can be extracted, return them all in the structured format."
|
|
113
|
+
),
|
|
114
|
+
advanced=True,
|
|
115
|
+
),
|
|
116
|
+
*LCToolsAgentComponent.get_base_inputs(),
|
|
117
|
+
# removed memory inputs from agent component
|
|
118
|
+
# *memory_inputs,
|
|
119
|
+
BoolInput(
|
|
120
|
+
name="add_current_date_tool",
|
|
121
|
+
display_name="Current Date",
|
|
122
|
+
advanced=True,
|
|
123
|
+
info="If true, will add a tool to the agent that returns the current date.",
|
|
124
|
+
value=True,
|
|
125
|
+
),
|
|
126
|
+
]
|
|
127
|
+
outputs = [
|
|
128
|
+
Output(name="response", display_name="Response", method="message_response"),
|
|
129
|
+
]
|
|
130
|
+
|
|
131
|
+
async def get_agent_requirements(self):
|
|
132
|
+
"""Get the agent requirements for the agent."""
|
|
133
|
+
llm_model, display_name = await self.get_llm()
|
|
134
|
+
if llm_model is None:
|
|
135
|
+
msg = "No language model selected. Please choose a model to proceed."
|
|
136
|
+
raise ValueError(msg)
|
|
137
|
+
self.model_name = get_model_name(llm_model, display_name=display_name)
|
|
138
|
+
|
|
139
|
+
# Get memory data
|
|
140
|
+
self.chat_history = await self.get_memory_data()
|
|
141
|
+
await logger.adebug(f"Retrieved {len(self.chat_history)} chat history messages")
|
|
142
|
+
if isinstance(self.chat_history, Message):
|
|
143
|
+
self.chat_history = [self.chat_history]
|
|
144
|
+
|
|
145
|
+
# Add current date tool if enabled
|
|
146
|
+
if self.add_current_date_tool:
|
|
147
|
+
if not isinstance(self.tools, list): # type: ignore[has-type]
|
|
148
|
+
self.tools = []
|
|
149
|
+
current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)
|
|
150
|
+
|
|
151
|
+
if not isinstance(current_date_tool, StructuredTool):
|
|
152
|
+
msg = "CurrentDateComponent must be converted to a StructuredTool"
|
|
153
|
+
raise TypeError(msg)
|
|
154
|
+
self.tools.append(current_date_tool)
|
|
155
|
+
|
|
156
|
+
# Set shared callbacks for tracing the tools used by the agent
|
|
157
|
+
self.set_tools_callbacks(self.tools, self._get_shared_callbacks())
|
|
158
|
+
|
|
159
|
+
return llm_model, self.chat_history, self.tools
|
|
160
|
+
|
|
161
|
+
async def message_response(self) -> Message:
|
|
162
|
+
try:
|
|
163
|
+
llm_model, self.chat_history, self.tools = await self.get_agent_requirements()
|
|
164
|
+
# Set up and run agent
|
|
165
|
+
self.set(
|
|
166
|
+
llm=llm_model,
|
|
167
|
+
tools=self.tools or [],
|
|
168
|
+
chat_history=self.chat_history,
|
|
169
|
+
input_value=self.input_value,
|
|
170
|
+
system_prompt=self.system_prompt,
|
|
171
|
+
)
|
|
172
|
+
agent = self.create_agent_runnable()
|
|
173
|
+
result = await self.run_agent(agent)
|
|
174
|
+
|
|
175
|
+
# Store result for potential JSON output
|
|
176
|
+
self._agent_result = result
|
|
177
|
+
|
|
178
|
+
except (ValueError, TypeError, KeyError) as e:
|
|
179
|
+
await logger.aerror(f"{type(e).__name__}: {e!s}")
|
|
180
|
+
raise
|
|
181
|
+
except ExceptionWithMessageError as e:
|
|
182
|
+
await logger.aerror(f"ExceptionWithMessageError occurred: {e}")
|
|
183
|
+
raise
|
|
184
|
+
# Avoid catching blind Exception; let truly unexpected exceptions propagate
|
|
185
|
+
except Exception as e:
|
|
186
|
+
await logger.aerror(f"Unexpected error: {e!s}")
|
|
187
|
+
raise
|
|
188
|
+
else:
|
|
189
|
+
return result
|
|
190
|
+
|
|
191
|
+
def _preprocess_schema(self, schema):
|
|
192
|
+
"""Preprocess schema to ensure correct data types for build_model_from_schema."""
|
|
193
|
+
processed_schema = []
|
|
194
|
+
for field in schema:
|
|
195
|
+
processed_field = {
|
|
196
|
+
"name": str(field.get("name", "field")),
|
|
197
|
+
"type": str(field.get("type", "str")),
|
|
198
|
+
"description": str(field.get("description", "")),
|
|
199
|
+
"multiple": field.get("multiple", False),
|
|
200
|
+
}
|
|
201
|
+
# Ensure multiple is handled correctly
|
|
202
|
+
if isinstance(processed_field["multiple"], str):
|
|
203
|
+
processed_field["multiple"] = processed_field["multiple"].lower() in [
|
|
204
|
+
"true",
|
|
205
|
+
"1",
|
|
206
|
+
"t",
|
|
207
|
+
"y",
|
|
208
|
+
"yes",
|
|
209
|
+
]
|
|
210
|
+
processed_schema.append(processed_field)
|
|
211
|
+
return processed_schema
|
|
212
|
+
|
|
213
|
+
async def build_structured_output_base(self, content: str):
|
|
214
|
+
"""Build structured output with optional BaseModel validation."""
|
|
215
|
+
json_pattern = r"\{.*\}"
|
|
216
|
+
schema_error_msg = "Try setting an output schema"
|
|
217
|
+
|
|
218
|
+
# Try to parse content as JSON first
|
|
219
|
+
json_data = None
|
|
220
|
+
try:
|
|
221
|
+
json_data = json.loads(content)
|
|
222
|
+
except json.JSONDecodeError:
|
|
223
|
+
json_match = re.search(json_pattern, content, re.DOTALL)
|
|
224
|
+
if json_match:
|
|
225
|
+
try:
|
|
226
|
+
json_data = json.loads(json_match.group())
|
|
227
|
+
except json.JSONDecodeError:
|
|
228
|
+
return {"content": content, "error": schema_error_msg}
|
|
229
|
+
else:
|
|
230
|
+
return {"content": content, "error": schema_error_msg}
|
|
231
|
+
|
|
232
|
+
# If no output schema provided, return parsed JSON without validation
|
|
233
|
+
if not hasattr(self, "output_schema") or not self.output_schema or len(self.output_schema) == 0:
|
|
234
|
+
return json_data
|
|
235
|
+
|
|
236
|
+
# Use BaseModel validation with schema
|
|
237
|
+
try:
|
|
238
|
+
processed_schema = self._preprocess_schema(self.output_schema)
|
|
239
|
+
output_model = build_model_from_schema(processed_schema)
|
|
240
|
+
|
|
241
|
+
# Validate against the schema
|
|
242
|
+
if isinstance(json_data, list):
|
|
243
|
+
# Multiple objects
|
|
244
|
+
validated_objects = []
|
|
245
|
+
for item in json_data:
|
|
246
|
+
try:
|
|
247
|
+
validated_obj = output_model.model_validate(item)
|
|
248
|
+
validated_objects.append(validated_obj.model_dump())
|
|
249
|
+
except ValidationError as e:
|
|
250
|
+
await logger.aerror(f"Validation error for item: {e}")
|
|
251
|
+
# Include invalid items with error info
|
|
252
|
+
validated_objects.append({"data": item, "validation_error": str(e)})
|
|
253
|
+
return validated_objects
|
|
254
|
+
|
|
255
|
+
# Single object
|
|
256
|
+
try:
|
|
257
|
+
validated_obj = output_model.model_validate(json_data)
|
|
258
|
+
return [validated_obj.model_dump()] # Return as list for consistency
|
|
259
|
+
except ValidationError as e:
|
|
260
|
+
await logger.aerror(f"Validation error: {e}")
|
|
261
|
+
return [{"data": json_data, "validation_error": str(e)}]
|
|
262
|
+
|
|
263
|
+
except (TypeError, ValueError) as e:
|
|
264
|
+
await logger.aerror(f"Error building structured output: {e}")
|
|
265
|
+
# Fallback to parsed JSON without validation
|
|
266
|
+
return json_data
|
|
267
|
+
|
|
268
|
+
async def json_response(self) -> Data:
|
|
269
|
+
"""Convert agent response to structured JSON Data output with schema validation."""
|
|
270
|
+
# Always use structured chat agent for JSON response mode for better JSON formatting
|
|
271
|
+
try:
|
|
272
|
+
system_components = []
|
|
273
|
+
|
|
274
|
+
# 1. Agent Instructions (system_prompt)
|
|
275
|
+
agent_instructions = getattr(self, "system_prompt", "") or ""
|
|
276
|
+
if agent_instructions:
|
|
277
|
+
system_components.append(f"{agent_instructions}")
|
|
278
|
+
|
|
279
|
+
# 2. Format Instructions
|
|
280
|
+
format_instructions = getattr(self, "format_instructions", "") or ""
|
|
281
|
+
if format_instructions:
|
|
282
|
+
system_components.append(f"Format instructions: {format_instructions}")
|
|
283
|
+
|
|
284
|
+
# 3. Schema Information from BaseModel
|
|
285
|
+
if hasattr(self, "output_schema") and self.output_schema and len(self.output_schema) > 0:
|
|
286
|
+
try:
|
|
287
|
+
processed_schema = self._preprocess_schema(self.output_schema)
|
|
288
|
+
output_model = build_model_from_schema(processed_schema)
|
|
289
|
+
schema_dict = output_model.model_json_schema()
|
|
290
|
+
schema_info = (
|
|
291
|
+
"You are given some text that may include format instructions, "
|
|
292
|
+
"explanations, or other content alongside a JSON schema.\n\n"
|
|
293
|
+
"Your task:\n"
|
|
294
|
+
"- Extract only the JSON schema.\n"
|
|
295
|
+
"- Return it as valid JSON.\n"
|
|
296
|
+
"- Do not include format instructions, explanations, or extra text.\n\n"
|
|
297
|
+
"Input:\n"
|
|
298
|
+
f"{json.dumps(schema_dict, indent=2)}\n\n"
|
|
299
|
+
"Output (only JSON schema):"
|
|
300
|
+
)
|
|
301
|
+
system_components.append(schema_info)
|
|
302
|
+
except (ValidationError, ValueError, TypeError, KeyError) as e:
|
|
303
|
+
await logger.aerror(f"Could not build schema for prompt: {e}", exc_info=True)
|
|
304
|
+
|
|
305
|
+
# Combine all components
|
|
306
|
+
combined_instructions = "\n\n".join(system_components) if system_components else ""
|
|
307
|
+
llm_model, self.chat_history, self.tools = await self.get_agent_requirements()
|
|
308
|
+
self.set(
|
|
309
|
+
llm=llm_model,
|
|
310
|
+
tools=self.tools or [],
|
|
311
|
+
chat_history=self.chat_history,
|
|
312
|
+
input_value=self.input_value,
|
|
313
|
+
system_prompt=combined_instructions,
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
# Create and run structured chat agent
|
|
317
|
+
try:
|
|
318
|
+
structured_agent = self.create_agent_runnable()
|
|
319
|
+
except (NotImplementedError, ValueError, TypeError) as e:
|
|
320
|
+
await logger.aerror(f"Error with structured chat agent: {e}")
|
|
321
|
+
raise
|
|
322
|
+
try:
|
|
323
|
+
result = await self.run_agent(structured_agent)
|
|
324
|
+
except (
|
|
325
|
+
ExceptionWithMessageError,
|
|
326
|
+
ValueError,
|
|
327
|
+
TypeError,
|
|
328
|
+
RuntimeError,
|
|
329
|
+
) as e:
|
|
330
|
+
await logger.aerror(f"Error with structured agent result: {e}")
|
|
331
|
+
raise
|
|
332
|
+
# Extract content from structured agent result
|
|
333
|
+
if hasattr(result, "content"):
|
|
334
|
+
content = result.content
|
|
335
|
+
elif hasattr(result, "text"):
|
|
336
|
+
content = result.text
|
|
337
|
+
else:
|
|
338
|
+
content = str(result)
|
|
339
|
+
|
|
340
|
+
except (
|
|
341
|
+
ExceptionWithMessageError,
|
|
342
|
+
ValueError,
|
|
343
|
+
TypeError,
|
|
344
|
+
NotImplementedError,
|
|
345
|
+
AttributeError,
|
|
346
|
+
) as e:
|
|
347
|
+
await logger.aerror(f"Error with structured chat agent: {e}")
|
|
348
|
+
# Fallback to regular agent
|
|
349
|
+
content_str = "No content returned from agent"
|
|
350
|
+
return Data(data={"content": content_str, "error": str(e)})
|
|
351
|
+
|
|
352
|
+
# Process with structured output validation
|
|
353
|
+
try:
|
|
354
|
+
structured_output = await self.build_structured_output_base(content)
|
|
355
|
+
|
|
356
|
+
# Handle different output formats
|
|
357
|
+
if isinstance(structured_output, list) and structured_output:
|
|
358
|
+
if len(structured_output) == 1:
|
|
359
|
+
return Data(data=structured_output[0])
|
|
360
|
+
return Data(data={"results": structured_output})
|
|
361
|
+
if isinstance(structured_output, dict):
|
|
362
|
+
return Data(data=structured_output)
|
|
363
|
+
return Data(data={"content": content})
|
|
364
|
+
|
|
365
|
+
except (ValueError, TypeError) as e:
|
|
366
|
+
await logger.aerror(f"Error in structured output processing: {e}")
|
|
367
|
+
return Data(data={"content": content, "error": str(e)})
|
|
368
|
+
|
|
369
|
+
async def get_memory_data(self):
|
|
370
|
+
# TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.
|
|
371
|
+
messages = (
|
|
372
|
+
await MemoryComponent(**self.get_base_args())
|
|
373
|
+
.set(
|
|
374
|
+
session_id=self.graph.session_id,
|
|
375
|
+
context_id=self.context_id,
|
|
376
|
+
order="Ascending",
|
|
377
|
+
n_messages=self.n_messages,
|
|
378
|
+
)
|
|
379
|
+
.retrieve_messages()
|
|
380
|
+
)
|
|
381
|
+
return [
|
|
382
|
+
message for message in messages if getattr(message, "id", None) != getattr(self.input_value, "id", None)
|
|
383
|
+
]
|
|
384
|
+
|
|
385
|
+
async def get_llm(self):
|
|
386
|
+
try:
|
|
387
|
+
return ChatOpenAI(
|
|
388
|
+
temperature=self.temperature,
|
|
389
|
+
model=self.model_name,
|
|
390
|
+
api_key=self.api_key,
|
|
391
|
+
base_url=self.base_url,
|
|
392
|
+
), "OpenAI"
|
|
393
|
+
|
|
394
|
+
except (AttributeError, ValueError, TypeError, RuntimeError) as e:
|
|
395
|
+
await logger.aerror(f"Error building language model: {e!s}")
|
|
396
|
+
msg = f"Failed to initialize language model: {e!s}"
|
|
397
|
+
raise ValueError(msg) from e
|
|
398
|
+
|
|
399
|
+
def create_agent_runnable(self):
|
|
400
|
+
"""Create the tool-calling agent.
|
|
401
|
+
|
|
402
|
+
Returns:
|
|
403
|
+
A runnable agent that can use tools to complete tasks.
|
|
404
|
+
"""
|
|
405
|
+
messages = [
|
|
406
|
+
("system", "{system_prompt}"),
|
|
407
|
+
("placeholder", "{chat_history}"),
|
|
408
|
+
("human", "{input}"),
|
|
409
|
+
("placeholder", "{agent_scratchpad}"),
|
|
410
|
+
]
|
|
411
|
+
prompt = ChatPromptTemplate.from_messages(messages)
|
|
412
|
+
self.validate_tool_names()
|
|
413
|
+
try:
|
|
414
|
+
return create_tool_calling_agent(self.llm, self.tools or [], prompt)
|
|
415
|
+
except NotImplementedError as e:
|
|
416
|
+
message = f"{self.display_name} does not support tool calling. Please try using a compatible model."
|
|
417
|
+
raise NotImplementedError(message) from e
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
async def update_build_config(
|
|
421
|
+
self, build_config: dotdict, field_value: str, field_name: str | None = None
|
|
422
|
+
) -> dotdict:
|
|
423
|
+
return dotdict({k: v.to_dict() if hasattr(v, "to_dict") else v for k, v in build_config.items()})
|
|
424
|
+
|
|
425
|
+
async def _get_tools(self) -> list[Tool]:
|
|
426
|
+
component_toolkit = get_component_toolkit()
|
|
427
|
+
tools_names = self._build_tools_names()
|
|
428
|
+
agent_description = self.get_tool_description()
|
|
429
|
+
# TODO: Agent Description Depreciated Feature to be removed
|
|
430
|
+
description = f"{agent_description}{tools_names}"
|
|
431
|
+
|
|
432
|
+
tools = component_toolkit(component=self).get_tools(
|
|
433
|
+
tool_name="Call_Agent",
|
|
434
|
+
tool_description=description,
|
|
435
|
+
# here we do not use the shared callbacks as we are exposing the agent as a tool
|
|
436
|
+
callbacks=self.get_langchain_callbacks(),
|
|
437
|
+
)
|
|
438
|
+
if hasattr(self, "tools_metadata"):
|
|
439
|
+
tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)
|
|
440
|
+
|
|
441
|
+
return tools
|
|
@@ -0,0 +1,377 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import copy
|
|
4
|
+
import functools
|
|
5
|
+
import json
|
|
6
|
+
|
|
7
|
+
from lfx.base.mcp.util import create_input_schema_from_json_schema, update_tools
|
|
8
|
+
from lfx.components.models_and_agents.mcp_component import MCPToolsComponent
|
|
9
|
+
from lfx.io.schema import flatten_schema, schema_to_langflow_inputs
|
|
10
|
+
from lfx.log.logger import logger
|
|
11
|
+
from lfx.schema.dataframe import DataFrame
|
|
12
|
+
from lfx.schema.message import Message
|
|
13
|
+
from lfx.base.agents.utils import maybe_unflatten_dict
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class CustomMCPToolsComponent(MCPToolsComponent):
|
|
17
|
+
"""MCP Tools 组件的扩展版本。
|
|
18
|
+
|
|
19
|
+
在 MCPToolsComponent 基础上,额外处理 MCP server 返回的 tool 参数名中
|
|
20
|
+
含有前置下划线(如 ``_param``)的场景。
|
|
21
|
+
|
|
22
|
+
Pydantic v2 禁止以 ``_`` 开头的字段名,因此在构建 args_schema 时会报错。
|
|
23
|
+
本组件通过在 ``lfx.schema.json_schema`` 模块的 ``create_model`` 上安装
|
|
24
|
+
一次性字段名净化 patch 来绕过该限制,并在调用 MCP server 时还原原始名。
|
|
25
|
+
|
|
26
|
+
重写的方法
|
|
27
|
+
----------
|
|
28
|
+
_validate_schema_inputs -- 净化 schema 后再生成 LangFlow 输入
|
|
29
|
+
get_inputs_for_all_tools -- 净化 schema 后再生成 LangFlow 输入
|
|
30
|
+
update_tool_list -- 通过 _safe_update_tools 加载 tools
|
|
31
|
+
build_output -- 执行时还原字段名映射
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
display_name = "Custom MCP Tools"
|
|
35
|
+
name = "CustomMCPTools"
|
|
36
|
+
|
|
37
|
+
# ------------------------------------------------------------------
|
|
38
|
+
# 字段名净化工具方法
|
|
39
|
+
# ------------------------------------------------------------------
|
|
40
|
+
|
|
41
|
+
@staticmethod
|
|
42
|
+
def _sanitize_field_name(name: str) -> str:
|
|
43
|
+
"""将前置下划线字段名改为 ``field_xxx`` 形式。"""
|
|
44
|
+
if name.startswith("_"):
|
|
45
|
+
return "field" + name
|
|
46
|
+
return name
|
|
47
|
+
|
|
48
|
+
@staticmethod
|
|
49
|
+
def _build_field_name_mapping(schema_dict: dict) -> dict[str, str]:
|
|
50
|
+
"""构建 净化名 -> 原始名 的映射表。"""
|
|
51
|
+
mapping = {}
|
|
52
|
+
for field_name in schema_dict.get("properties", {}):
|
|
53
|
+
sanitized = CustomMCPToolsComponent._sanitize_field_name(field_name)
|
|
54
|
+
if sanitized != field_name:
|
|
55
|
+
mapping[sanitized] = field_name
|
|
56
|
+
return mapping
|
|
57
|
+
|
|
58
|
+
@staticmethod
|
|
59
|
+
def _sanitize_schema_dict(schema_dict: dict) -> dict:
|
|
60
|
+
"""返回将 properties 中前置下划线字段名替换后的 schema 副本。"""
|
|
61
|
+
schema_copy = copy.deepcopy(schema_dict)
|
|
62
|
+
new_props = {}
|
|
63
|
+
for k, v in schema_copy.get("properties", {}).items():
|
|
64
|
+
new_props[CustomMCPToolsComponent._sanitize_field_name(k)] = v
|
|
65
|
+
schema_copy["properties"] = new_props
|
|
66
|
+
if "required" in schema_copy:
|
|
67
|
+
schema_copy["required"] = [
|
|
68
|
+
CustomMCPToolsComponent._sanitize_field_name(f)
|
|
69
|
+
for f in schema_copy["required"]
|
|
70
|
+
]
|
|
71
|
+
return schema_copy
|
|
72
|
+
|
|
73
|
+
# ------------------------------------------------------------------
|
|
74
|
+
# json_schema.create_model 一次性 patch
|
|
75
|
+
# ------------------------------------------------------------------
|
|
76
|
+
|
|
77
|
+
@staticmethod
|
|
78
|
+
def _install_json_schema_create_model_patch() -> None:
|
|
79
|
+
"""在 lfx.schema.json_schema 模块的 create_model 上安装净化 patch(幂等)。
|
|
80
|
+
|
|
81
|
+
该 patch 将字段名 ``_xxx`` 改为 ``field_xxx`` 再传给 Pydantic,
|
|
82
|
+
并在生成的 model class 上附加 ``__mcp_field_name_map__``(净化名->原始名),
|
|
83
|
+
供 coroutine 包装器还原参数名时使用。
|
|
84
|
+
"""
|
|
85
|
+
try:
|
|
86
|
+
import lfx.schema.json_schema as _js # noqa: PLC2701
|
|
87
|
+
|
|
88
|
+
if getattr(_js, "_mcp_create_model_patched", False):
|
|
89
|
+
return
|
|
90
|
+
|
|
91
|
+
_orig = _js.create_model
|
|
92
|
+
|
|
93
|
+
def _sanitized_create_model(__model_name: str, **field_definitions):
|
|
94
|
+
renamed: dict = {}
|
|
95
|
+
key_map: dict = {}
|
|
96
|
+
for k, v in field_definitions.items():
|
|
97
|
+
if k.startswith("_") and not k.startswith("__"):
|
|
98
|
+
new_k = "field" + k
|
|
99
|
+
renamed[new_k] = v
|
|
100
|
+
key_map[new_k] = k
|
|
101
|
+
else:
|
|
102
|
+
renamed[k] = v
|
|
103
|
+
model_cls = _orig(__model_name, **renamed)
|
|
104
|
+
if key_map:
|
|
105
|
+
model_cls.__mcp_field_name_map__ = key_map
|
|
106
|
+
return model_cls
|
|
107
|
+
|
|
108
|
+
_js.create_model = _sanitized_create_model
|
|
109
|
+
_js._mcp_create_model_patched = True # type: ignore[attr-defined]
|
|
110
|
+
except Exception:
|
|
111
|
+
pass
|
|
112
|
+
|
|
113
|
+
# ------------------------------------------------------------------
|
|
114
|
+
# 安全加载 tools
|
|
115
|
+
# ------------------------------------------------------------------
|
|
116
|
+
|
|
117
|
+
async def _safe_update_tools(self, server_name: str, server_config: dict):
|
|
118
|
+
"""安装 patch 后调用 update_tools,并为 coroutine 添加参数名还原包装。"""
|
|
119
|
+
self._install_json_schema_create_model_patch()
|
|
120
|
+
try:
|
|
121
|
+
result = await update_tools(
|
|
122
|
+
server_name=server_name,
|
|
123
|
+
server_config=server_config,
|
|
124
|
+
mcp_stdio_client=self.stdio_client,
|
|
125
|
+
mcp_streamable_http_client=self.streamable_http_client,
|
|
126
|
+
)
|
|
127
|
+
if result is not None and len(result) == 3:
|
|
128
|
+
_, tool_list, tool_cache = result
|
|
129
|
+
self._wrap_tool_cache_coroutines(tool_cache)
|
|
130
|
+
return result
|
|
131
|
+
except Exception as e:
|
|
132
|
+
err_msg = str(e)
|
|
133
|
+
if "leading underscores" not in err_msg:
|
|
134
|
+
raise
|
|
135
|
+
await logger.awarning(
|
|
136
|
+
f"MCP tool schema still has leading-underscore field names "
|
|
137
|
+
f"for server '{server_name}': {err_msg}"
|
|
138
|
+
)
|
|
139
|
+
return None, [], {}
|
|
140
|
+
|
|
141
|
+
def _wrap_tool_cache_coroutines(self, tool_cache: dict) -> None:
|
|
142
|
+
"""给 tool_cache 中每个 tool 的 coroutine 包装参数名还原逻辑。
|
|
143
|
+
|
|
144
|
+
``create_model`` patch 把 ``_foo`` 改为 ``field_foo``,LLM 按净化后
|
|
145
|
+
的字段名传参;此包装器在调用真实 coroutine 前把参数名还原为原始名,
|
|
146
|
+
确保 MCP server 收到正确的参数。
|
|
147
|
+
"""
|
|
148
|
+
for _tool_name, exec_tool in list(tool_cache.items()):
|
|
149
|
+
original_coroutine = getattr(exec_tool, "coroutine", None)
|
|
150
|
+
if original_coroutine is None:
|
|
151
|
+
continue
|
|
152
|
+
if getattr(original_coroutine, "_mcp_field_remapped", False):
|
|
153
|
+
continue
|
|
154
|
+
|
|
155
|
+
args_schema = getattr(exec_tool, "args_schema", None)
|
|
156
|
+
field_name_map: dict = getattr(args_schema, "__mcp_field_name_map__", {})
|
|
157
|
+
if not field_name_map:
|
|
158
|
+
continue
|
|
159
|
+
|
|
160
|
+
@functools.wraps(original_coroutine)
|
|
161
|
+
async def _remapped_coroutine(
|
|
162
|
+
*args,
|
|
163
|
+
_orig=original_coroutine,
|
|
164
|
+
_fmap=field_name_map,
|
|
165
|
+
**kwargs,
|
|
166
|
+
):
|
|
167
|
+
restored = {_fmap.get(k, k): v for k, v in kwargs.items()}
|
|
168
|
+
return await _orig(*args, **restored)
|
|
169
|
+
|
|
170
|
+
_remapped_coroutine._mcp_field_remapped = True
|
|
171
|
+
exec_tool.coroutine = _remapped_coroutine
|
|
172
|
+
|
|
173
|
+
# ------------------------------------------------------------------
|
|
174
|
+
# 重写父类方法
|
|
175
|
+
# ------------------------------------------------------------------
|
|
176
|
+
|
|
177
|
+
async def _validate_schema_inputs(self, tool_obj):
|
|
178
|
+
"""重写:净化 schema 字段名后再生成 LangFlow 输入,并记录字段名映射。"""
|
|
179
|
+
try:
|
|
180
|
+
if not tool_obj or not hasattr(tool_obj, "args_schema"):
|
|
181
|
+
raise ValueError("Invalid tool object or missing input schema")
|
|
182
|
+
|
|
183
|
+
raw_schema = tool_obj.args_schema.schema()
|
|
184
|
+
sanitized_schema = self._sanitize_schema_dict(raw_schema)
|
|
185
|
+
|
|
186
|
+
# 记录净化名->原始名映射,供 build_output 还原
|
|
187
|
+
mapping = self._build_field_name_mapping(raw_schema)
|
|
188
|
+
if mapping:
|
|
189
|
+
if not hasattr(self, "_tool_field_name_mapping"):
|
|
190
|
+
self._tool_field_name_mapping = {}
|
|
191
|
+
self._tool_field_name_mapping[tool_obj.name] = mapping
|
|
192
|
+
|
|
193
|
+
flat_schema = flatten_schema(sanitized_schema)
|
|
194
|
+
input_schema = create_input_schema_from_json_schema(flat_schema)
|
|
195
|
+
if not input_schema:
|
|
196
|
+
raise ValueError(f"Empty input schema for tool '{tool_obj.name}'")
|
|
197
|
+
|
|
198
|
+
schema_inputs = schema_to_langflow_inputs(input_schema)
|
|
199
|
+
if not schema_inputs:
|
|
200
|
+
await logger.awarning(f"No input parameters defined for tool '{tool_obj.name}'")
|
|
201
|
+
return []
|
|
202
|
+
|
|
203
|
+
except Exception as e:
|
|
204
|
+
msg = f"Error validating schema inputs: {e!s}"
|
|
205
|
+
await logger.aexception(msg)
|
|
206
|
+
raise ValueError(msg) from e
|
|
207
|
+
else:
|
|
208
|
+
return schema_inputs
|
|
209
|
+
|
|
210
|
+
def get_inputs_for_all_tools(self, tools: list) -> dict:
|
|
211
|
+
"""重写:净化 schema 字段名后再生成 LangFlow 输入。"""
|
|
212
|
+
inputs = {}
|
|
213
|
+
for tool in tools:
|
|
214
|
+
if not tool or not hasattr(tool, "name"):
|
|
215
|
+
continue
|
|
216
|
+
try:
|
|
217
|
+
raw_schema = tool.args_schema.schema()
|
|
218
|
+
sanitized_schema = self._sanitize_schema_dict(raw_schema)
|
|
219
|
+
flat_schema = flatten_schema(sanitized_schema)
|
|
220
|
+
input_schema = create_input_schema_from_json_schema(flat_schema)
|
|
221
|
+
inputs[tool.name] = schema_to_langflow_inputs(input_schema)
|
|
222
|
+
except (AttributeError, ValueError, TypeError, KeyError) as e:
|
|
223
|
+
logger.exception(f"Error getting inputs for tool {getattr(tool, 'name', 'unknown')}: {e!s}")
|
|
224
|
+
continue
|
|
225
|
+
return inputs
|
|
226
|
+
|
|
227
|
+
async def update_tool_list(self, mcp_server_value=None):
|
|
228
|
+
"""重写:将内部的 ``update_tools`` 调用替换为 ``_safe_update_tools``。"""
|
|
229
|
+
from lfx.base.agents.utils import safe_cache_get, safe_cache_set
|
|
230
|
+
import asyncio
|
|
231
|
+
|
|
232
|
+
mcp_server = mcp_server_value if mcp_server_value is not None else getattr(self, "mcp_server", None)
|
|
233
|
+
server_name = None
|
|
234
|
+
server_config_from_value = None
|
|
235
|
+
if isinstance(mcp_server, dict):
|
|
236
|
+
server_name = mcp_server.get("name")
|
|
237
|
+
server_config_from_value = mcp_server.get("config")
|
|
238
|
+
else:
|
|
239
|
+
server_name = mcp_server
|
|
240
|
+
if not server_name:
|
|
241
|
+
self.tools = []
|
|
242
|
+
return [], {"name": server_name, "config": server_config_from_value}
|
|
243
|
+
|
|
244
|
+
use_cache = getattr(self, "use_cache", False)
|
|
245
|
+
|
|
246
|
+
cached = None
|
|
247
|
+
if use_cache:
|
|
248
|
+
servers_cache = safe_cache_get(self._shared_component_cache, "servers", {})
|
|
249
|
+
cached = servers_cache.get(server_name) if isinstance(servers_cache, dict) else None
|
|
250
|
+
|
|
251
|
+
if cached is not None:
|
|
252
|
+
try:
|
|
253
|
+
self.tools = cached["tools"]
|
|
254
|
+
self.tool_names = cached["tool_names"]
|
|
255
|
+
self._tool_cache = cached["tool_cache"]
|
|
256
|
+
server_config_from_value = cached["config"]
|
|
257
|
+
except (TypeError, KeyError, AttributeError) as e:
|
|
258
|
+
await logger.awarning(f"Unable to use cached data for MCP Server {server_name}: {e}")
|
|
259
|
+
current_servers_cache = safe_cache_get(self._shared_component_cache, "servers", {})
|
|
260
|
+
if isinstance(current_servers_cache, dict) and server_name in current_servers_cache:
|
|
261
|
+
current_servers_cache.pop(server_name)
|
|
262
|
+
safe_cache_set(self._shared_component_cache, "servers", current_servers_cache)
|
|
263
|
+
else:
|
|
264
|
+
return self.tools, {"name": server_name, "config": server_config_from_value}
|
|
265
|
+
|
|
266
|
+
try:
|
|
267
|
+
try:
|
|
268
|
+
from langflow.api.v2.mcp import get_server
|
|
269
|
+
from langflow.services.database.models.user.crud import get_user_by_id
|
|
270
|
+
except ImportError as e:
|
|
271
|
+
raise ImportError(
|
|
272
|
+
"Langflow MCP server functionality is not available. "
|
|
273
|
+
"This feature requires the full Langflow installation."
|
|
274
|
+
) from e
|
|
275
|
+
|
|
276
|
+
from lfx.services.deps import get_settings_service, get_storage_service, session_scope
|
|
277
|
+
|
|
278
|
+
async with session_scope() as db:
|
|
279
|
+
if not self.user_id:
|
|
280
|
+
raise ValueError("User ID is required for fetching MCP tools.")
|
|
281
|
+
current_user = await get_user_by_id(db, self.user_id)
|
|
282
|
+
server_config = await get_server(
|
|
283
|
+
server_name,
|
|
284
|
+
current_user,
|
|
285
|
+
db,
|
|
286
|
+
storage_service=get_storage_service(),
|
|
287
|
+
settings_service=get_settings_service(),
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
if not server_config and server_config_from_value:
|
|
291
|
+
server_config = server_config_from_value
|
|
292
|
+
if not server_config:
|
|
293
|
+
self.tools = []
|
|
294
|
+
return [], {"name": server_name, "config": server_config}
|
|
295
|
+
|
|
296
|
+
if "verify_ssl" not in server_config:
|
|
297
|
+
server_config["verify_ssl"] = getattr(self, "verify_ssl", True)
|
|
298
|
+
|
|
299
|
+
# ↓ 关键:使用 _safe_update_tools 替代直接调用 update_tools
|
|
300
|
+
_, tool_list, tool_cache = await self._safe_update_tools(
|
|
301
|
+
server_name=server_name,
|
|
302
|
+
server_config=server_config,
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
self.tool_names = [tool.name for tool in tool_list if hasattr(tool, "name")]
|
|
306
|
+
self._tool_cache = tool_cache
|
|
307
|
+
self.tools = tool_list
|
|
308
|
+
|
|
309
|
+
if use_cache:
|
|
310
|
+
current_servers_cache = safe_cache_get(self._shared_component_cache, "servers", {})
|
|
311
|
+
if isinstance(current_servers_cache, dict):
|
|
312
|
+
current_servers_cache[server_name] = {
|
|
313
|
+
"tools": tool_list,
|
|
314
|
+
"tool_names": self.tool_names,
|
|
315
|
+
"tool_cache": tool_cache,
|
|
316
|
+
"config": server_config,
|
|
317
|
+
}
|
|
318
|
+
safe_cache_set(self._shared_component_cache, "servers", current_servers_cache)
|
|
319
|
+
|
|
320
|
+
except (TimeoutError, asyncio.TimeoutError) as e:
|
|
321
|
+
msg = f"Timeout updating tool list: {e!s}"
|
|
322
|
+
await logger.aexception(msg)
|
|
323
|
+
raise TimeoutError(msg) from e
|
|
324
|
+
except Exception as e:
|
|
325
|
+
msg = f"Error updating tool list: {e!s}"
|
|
326
|
+
await logger.aexception(msg)
|
|
327
|
+
raise ValueError(msg) from e
|
|
328
|
+
else:
|
|
329
|
+
return tool_list, {"name": server_name, "config": server_config}
|
|
330
|
+
|
|
331
|
+
async def build_output(self) -> DataFrame:
|
|
332
|
+
"""重写:执行 tool 时将净化字段名还原为原始名再传给 MCP server。"""
|
|
333
|
+
try:
|
|
334
|
+
self.tools, _ = await self.update_tool_list()
|
|
335
|
+
if self.tool != "":
|
|
336
|
+
session_context = self._get_session_context()
|
|
337
|
+
if session_context:
|
|
338
|
+
self.stdio_client.set_session_context(session_context)
|
|
339
|
+
self.streamable_http_client.set_session_context(session_context)
|
|
340
|
+
|
|
341
|
+
exec_tool = self._tool_cache[self.tool]
|
|
342
|
+
tool_args = self.get_inputs_for_all_tools(self.tools)[self.tool]
|
|
343
|
+
|
|
344
|
+
# 收集用户填写的参数(字段名为净化后的名,如 field_param)
|
|
345
|
+
kwargs = {}
|
|
346
|
+
for arg in tool_args:
|
|
347
|
+
value = getattr(self, arg.name, None)
|
|
348
|
+
if value is not None:
|
|
349
|
+
kwargs[arg.name] = value.text if isinstance(value, Message) else value
|
|
350
|
+
|
|
351
|
+
# 若 args_schema 携带映射表(由 create_model patch 写入),优先使用;
|
|
352
|
+
# 否则退回到 _tool_field_name_mapping(由 _validate_schema_inputs 写入)
|
|
353
|
+
args_schema = getattr(exec_tool, "args_schema", None)
|
|
354
|
+
field_name_map: dict = getattr(args_schema, "__mcp_field_name_map__", {})
|
|
355
|
+
if not field_name_map and hasattr(self, "_tool_field_name_mapping"):
|
|
356
|
+
field_name_map = self._tool_field_name_mapping.get(self.tool, {})
|
|
357
|
+
|
|
358
|
+
if field_name_map:
|
|
359
|
+
kwargs = {field_name_map.get(k, k): v for k, v in kwargs.items()}
|
|
360
|
+
|
|
361
|
+
unflattened_kwargs = maybe_unflatten_dict(kwargs)
|
|
362
|
+
output = await exec_tool.coroutine(**unflattened_kwargs)
|
|
363
|
+
|
|
364
|
+
tool_content = []
|
|
365
|
+
for item in output.content:
|
|
366
|
+
item_dict = item.model_dump()
|
|
367
|
+
item_dict = self.process_output_item(item_dict)
|
|
368
|
+
tool_content.append(item_dict)
|
|
369
|
+
|
|
370
|
+
if isinstance(tool_content, list) and all(isinstance(x, dict) for x in tool_content):
|
|
371
|
+
return DataFrame(tool_content)
|
|
372
|
+
return DataFrame(data=tool_content)
|
|
373
|
+
return DataFrame(data=[{"error": "You must select a tool"}])
|
|
374
|
+
except Exception as e:
|
|
375
|
+
msg = f"Error in build_output: {e!s}"
|
|
376
|
+
await logger.aexception(msg)
|
|
377
|
+
raise ValueError(msg) from e
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from lfx.custom.custom_component.component import Component
|
|
2
|
+
from lfx.io import MessageTextInput, Output
|
|
3
|
+
from lfx.schema.message import Message
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Message2TextComponent(Component):
|
|
7
|
+
display_name: str = "Message to Text"
|
|
8
|
+
description: str = "Convert a message to a text prompt template."
|
|
9
|
+
icon = "braces"
|
|
10
|
+
name = "Message to Text"
|
|
11
|
+
priority = 0 # Set priority to 0 to make it appear first
|
|
12
|
+
|
|
13
|
+
inputs = [
|
|
14
|
+
MessageTextInput(
|
|
15
|
+
name="message",
|
|
16
|
+
display_name="message",
|
|
17
|
+
tool_mode=True,
|
|
18
|
+
),
|
|
19
|
+
]
|
|
20
|
+
|
|
21
|
+
outputs = [
|
|
22
|
+
Output(display_name="Text", name="text", method="build_text"),
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
async def build_text(self) -> Message:
|
|
26
|
+
return self.message
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
|