lfx-nightly 0.2.0.dev41__py3-none-any.whl → 0.3.0.dev3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lfx/__main__.py +137 -6
- lfx/_assets/component_index.json +1 -1
- lfx/base/agents/agent.py +10 -6
- lfx/base/agents/altk_base_agent.py +5 -3
- lfx/base/agents/altk_tool_wrappers.py +1 -1
- lfx/base/agents/events.py +1 -1
- lfx/base/agents/utils.py +4 -0
- lfx/base/composio/composio_base.py +78 -41
- lfx/base/data/cloud_storage_utils.py +156 -0
- lfx/base/data/docling_utils.py +130 -55
- lfx/base/datastax/astradb_base.py +75 -64
- lfx/base/embeddings/embeddings_class.py +113 -0
- lfx/base/models/__init__.py +11 -1
- lfx/base/models/google_generative_ai_constants.py +33 -9
- lfx/base/models/model_metadata.py +6 -0
- lfx/base/models/ollama_constants.py +196 -30
- lfx/base/models/openai_constants.py +37 -10
- lfx/base/models/unified_models.py +1123 -0
- lfx/base/models/watsonx_constants.py +43 -4
- lfx/base/prompts/api_utils.py +40 -5
- lfx/base/tools/component_tool.py +2 -9
- lfx/cli/__init__.py +10 -2
- lfx/cli/commands.py +3 -0
- lfx/cli/run.py +65 -409
- lfx/cli/script_loader.py +18 -7
- lfx/cli/validation.py +6 -3
- lfx/components/__init__.py +0 -3
- lfx/components/composio/github_composio.py +1 -1
- lfx/components/cuga/cuga_agent.py +39 -27
- lfx/components/data_source/api_request.py +4 -2
- lfx/components/datastax/astradb_assistant_manager.py +4 -2
- lfx/components/docling/__init__.py +45 -11
- lfx/components/docling/docling_inline.py +39 -49
- lfx/components/docling/docling_remote.py +1 -0
- lfx/components/elastic/opensearch_multimodal.py +1733 -0
- lfx/components/files_and_knowledge/file.py +384 -36
- lfx/components/files_and_knowledge/ingestion.py +8 -0
- lfx/components/files_and_knowledge/retrieval.py +10 -0
- lfx/components/files_and_knowledge/save_file.py +91 -88
- lfx/components/langchain_utilities/ibm_granite_handler.py +211 -0
- lfx/components/langchain_utilities/tool_calling.py +37 -6
- lfx/components/llm_operations/batch_run.py +64 -18
- lfx/components/llm_operations/lambda_filter.py +213 -101
- lfx/components/llm_operations/llm_conditional_router.py +39 -7
- lfx/components/llm_operations/structured_output.py +38 -12
- lfx/components/models/__init__.py +16 -74
- lfx/components/models_and_agents/agent.py +51 -203
- lfx/components/models_and_agents/embedding_model.py +171 -255
- lfx/components/models_and_agents/language_model.py +54 -318
- lfx/components/models_and_agents/mcp_component.py +96 -10
- lfx/components/models_and_agents/prompt.py +105 -18
- lfx/components/ollama/ollama_embeddings.py +111 -29
- lfx/components/openai/openai_chat_model.py +1 -1
- lfx/components/processing/text_operations.py +580 -0
- lfx/components/vllm/__init__.py +37 -0
- lfx/components/vllm/vllm.py +141 -0
- lfx/components/vllm/vllm_embeddings.py +110 -0
- lfx/custom/custom_component/component.py +65 -10
- lfx/custom/custom_component/custom_component.py +8 -6
- lfx/events/observability/__init__.py +0 -0
- lfx/events/observability/lifecycle_events.py +111 -0
- lfx/field_typing/__init__.py +57 -58
- lfx/graph/graph/base.py +40 -1
- lfx/graph/utils.py +109 -30
- lfx/graph/vertex/base.py +75 -23
- lfx/graph/vertex/vertex_types.py +0 -5
- lfx/inputs/__init__.py +2 -0
- lfx/inputs/input_mixin.py +55 -0
- lfx/inputs/inputs.py +120 -0
- lfx/interface/components.py +24 -7
- lfx/interface/initialize/loading.py +42 -12
- lfx/io/__init__.py +2 -0
- lfx/run/__init__.py +5 -0
- lfx/run/base.py +464 -0
- lfx/schema/__init__.py +50 -0
- lfx/schema/data.py +1 -1
- lfx/schema/image.py +26 -7
- lfx/schema/message.py +104 -11
- lfx/schema/workflow.py +171 -0
- lfx/services/deps.py +12 -0
- lfx/services/interfaces.py +43 -1
- lfx/services/mcp_composer/service.py +7 -1
- lfx/services/schema.py +1 -0
- lfx/services/settings/auth.py +95 -4
- lfx/services/settings/base.py +11 -1
- lfx/services/settings/constants.py +2 -0
- lfx/services/settings/utils.py +82 -0
- lfx/services/storage/local.py +13 -8
- lfx/services/transaction/__init__.py +5 -0
- lfx/services/transaction/service.py +35 -0
- lfx/tests/unit/components/__init__.py +0 -0
- lfx/utils/constants.py +2 -0
- lfx/utils/mustache_security.py +79 -0
- lfx/utils/validate_cloud.py +81 -3
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/METADATA +7 -2
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/RECORD +98 -80
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/WHEEL +0 -0
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/entry_points.txt +0 -0
|
@@ -1,43 +1,15 @@
|
|
|
1
|
-
from typing import Any
|
|
2
|
-
|
|
3
|
-
import requests
|
|
4
|
-
from langchain_anthropic import ChatAnthropic
|
|
5
|
-
from langchain_ibm import ChatWatsonx
|
|
6
|
-
from langchain_ollama import ChatOllama
|
|
7
|
-
from langchain_openai import ChatOpenAI
|
|
8
|
-
from pydantic.v1 import SecretStr
|
|
9
|
-
|
|
10
|
-
from lfx.base.models.anthropic_constants import ANTHROPIC_MODELS
|
|
11
|
-
from lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS
|
|
12
|
-
from lfx.base.models.google_generative_ai_model import ChatGoogleGenerativeAIFixed
|
|
13
1
|
from lfx.base.models.model import LCModelComponent
|
|
14
|
-
from lfx.base.models.
|
|
15
|
-
|
|
2
|
+
from lfx.base.models.unified_models import (
|
|
3
|
+
get_language_model_options,
|
|
4
|
+
get_llm,
|
|
5
|
+
update_model_options_in_build_config,
|
|
6
|
+
)
|
|
7
|
+
from lfx.base.models.watsonx_constants import IBM_WATSONX_URLS
|
|
16
8
|
from lfx.field_typing import LanguageModel
|
|
17
9
|
from lfx.field_typing.range_spec import RangeSpec
|
|
18
|
-
from lfx.inputs.inputs import BoolInput,
|
|
19
|
-
from lfx.io import
|
|
20
|
-
from lfx.log.logger import logger
|
|
21
|
-
from lfx.schema.dotdict import dotdict
|
|
22
|
-
from lfx.utils.util import transform_localhost_url
|
|
23
|
-
|
|
24
|
-
# IBM watsonx.ai constants
|
|
25
|
-
IBM_WATSONX_DEFAULT_MODELS = ["ibm/granite-3-2b-instruct", "ibm/granite-3-8b-instruct", "ibm/granite-13b-instruct-v2"]
|
|
26
|
-
IBM_WATSONX_URLS = [
|
|
27
|
-
"https://us-south.ml.cloud.ibm.com",
|
|
28
|
-
"https://eu-de.ml.cloud.ibm.com",
|
|
29
|
-
"https://eu-gb.ml.cloud.ibm.com",
|
|
30
|
-
"https://au-syd.ml.cloud.ibm.com",
|
|
31
|
-
"https://jp-tok.ml.cloud.ibm.com",
|
|
32
|
-
"https://ca-tor.ml.cloud.ibm.com",
|
|
33
|
-
]
|
|
10
|
+
from lfx.inputs.inputs import BoolInput, DropdownInput, StrInput
|
|
11
|
+
from lfx.io import MessageInput, ModelInput, MultilineInput, SecretStrInput, SliderInput
|
|
34
12
|
|
|
35
|
-
# Ollama API constants
|
|
36
|
-
HTTP_STATUS_OK = 200
|
|
37
|
-
JSON_MODELS_KEY = "models"
|
|
38
|
-
JSON_NAME_KEY = "name"
|
|
39
|
-
JSON_CAPABILITIES_KEY = "capabilities"
|
|
40
|
-
DESIRED_CAPABILITY = "completion"
|
|
41
13
|
DEFAULT_OLLAMA_URL = "http://localhost:11434"
|
|
42
14
|
|
|
43
15
|
|
|
@@ -49,53 +21,22 @@ class LanguageModelComponent(LCModelComponent):
|
|
|
49
21
|
category = "models"
|
|
50
22
|
priority = 0 # Set priority to 0 to make it appear first
|
|
51
23
|
|
|
52
|
-
@staticmethod
|
|
53
|
-
def fetch_ibm_models(base_url: str) -> list[str]:
|
|
54
|
-
"""Fetch available models from the watsonx.ai API."""
|
|
55
|
-
try:
|
|
56
|
-
endpoint = f"{base_url}/ml/v1/foundation_model_specs"
|
|
57
|
-
params = {"version": "2024-09-16", "filters": "function_text_chat,!lifecycle_withdrawn"}
|
|
58
|
-
response = requests.get(endpoint, params=params, timeout=10)
|
|
59
|
-
response.raise_for_status()
|
|
60
|
-
data = response.json()
|
|
61
|
-
models = [model["model_id"] for model in data.get("resources", [])]
|
|
62
|
-
return sorted(models)
|
|
63
|
-
except Exception: # noqa: BLE001
|
|
64
|
-
logger.exception("Error fetching IBM watsonx models. Using default models.")
|
|
65
|
-
return IBM_WATSONX_DEFAULT_MODELS
|
|
66
|
-
|
|
67
24
|
inputs = [
|
|
68
|
-
|
|
69
|
-
name="
|
|
70
|
-
display_name="Model
|
|
71
|
-
|
|
72
|
-
value="OpenAI",
|
|
73
|
-
info="Select the model provider",
|
|
25
|
+
ModelInput(
|
|
26
|
+
name="model",
|
|
27
|
+
display_name="Language Model",
|
|
28
|
+
info="Select your model provider",
|
|
74
29
|
real_time_refresh=True,
|
|
75
|
-
|
|
76
|
-
{"icon": "OpenAI"},
|
|
77
|
-
{"icon": "Anthropic"},
|
|
78
|
-
{"icon": "GoogleGenerativeAI"},
|
|
79
|
-
{"icon": "WatsonxAI"},
|
|
80
|
-
{"icon": "Ollama"},
|
|
81
|
-
],
|
|
82
|
-
),
|
|
83
|
-
DropdownInput(
|
|
84
|
-
name="model_name",
|
|
85
|
-
display_name="Model Name",
|
|
86
|
-
options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,
|
|
87
|
-
value=OPENAI_CHAT_MODEL_NAMES[0],
|
|
88
|
-
info="Select the model to use",
|
|
89
|
-
real_time_refresh=True,
|
|
90
|
-
refresh_button=True,
|
|
30
|
+
required=True,
|
|
91
31
|
),
|
|
92
32
|
SecretStrInput(
|
|
93
33
|
name="api_key",
|
|
94
|
-
display_name="
|
|
34
|
+
display_name="API Key",
|
|
95
35
|
info="Model Provider API key",
|
|
96
36
|
required=False,
|
|
97
37
|
show=True,
|
|
98
38
|
real_time_refresh=True,
|
|
39
|
+
advanced=True,
|
|
99
40
|
),
|
|
100
41
|
DropdownInput(
|
|
101
42
|
name="base_url_ibm_watsonx",
|
|
@@ -113,7 +54,7 @@ class LanguageModelComponent(LCModelComponent):
|
|
|
113
54
|
show=False,
|
|
114
55
|
required=False,
|
|
115
56
|
),
|
|
116
|
-
|
|
57
|
+
MessageInput(
|
|
117
58
|
name="ollama_base_url",
|
|
118
59
|
display_name="Ollama API URL",
|
|
119
60
|
info=f"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}",
|
|
@@ -151,248 +92,43 @@ class LanguageModelComponent(LCModelComponent):
|
|
|
151
92
|
]
|
|
152
93
|
|
|
153
94
|
def build_model(self) -> LanguageModel:
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
google_api_key=self.api_key,
|
|
193
|
-
)
|
|
194
|
-
if provider == "IBM watsonx.ai":
|
|
195
|
-
if not self.api_key:
|
|
196
|
-
msg = "IBM API key is required when using IBM watsonx.ai provider"
|
|
197
|
-
raise ValueError(msg)
|
|
198
|
-
if not self.base_url_ibm_watsonx:
|
|
199
|
-
msg = "IBM watsonx API Endpoint is required when using IBM watsonx.ai provider"
|
|
200
|
-
raise ValueError(msg)
|
|
201
|
-
if not self.project_id:
|
|
202
|
-
msg = "IBM watsonx Project ID is required when using IBM watsonx.ai provider"
|
|
203
|
-
raise ValueError(msg)
|
|
204
|
-
return ChatWatsonx(
|
|
205
|
-
apikey=SecretStr(self.api_key).get_secret_value(),
|
|
206
|
-
url=self.base_url_ibm_watsonx,
|
|
207
|
-
project_id=self.project_id,
|
|
208
|
-
model_id=model_name,
|
|
209
|
-
params={
|
|
210
|
-
"temperature": temperature,
|
|
211
|
-
},
|
|
212
|
-
streaming=stream,
|
|
213
|
-
)
|
|
214
|
-
if provider == "Ollama":
|
|
215
|
-
if not self.ollama_base_url:
|
|
216
|
-
msg = "Ollama API URL is required when using Ollama provider"
|
|
217
|
-
raise ValueError(msg)
|
|
218
|
-
if not model_name:
|
|
219
|
-
msg = "Model name is required when using Ollama provider"
|
|
220
|
-
raise ValueError(msg)
|
|
221
|
-
|
|
222
|
-
transformed_base_url = transform_localhost_url(self.ollama_base_url)
|
|
223
|
-
|
|
224
|
-
# Check if URL contains /v1 suffix (OpenAI-compatible mode)
|
|
225
|
-
if transformed_base_url and transformed_base_url.rstrip("/").endswith("/v1"):
|
|
226
|
-
# Strip /v1 suffix and log warning
|
|
227
|
-
transformed_base_url = transformed_base_url.rstrip("/").removesuffix("/v1")
|
|
228
|
-
logger.warning(
|
|
229
|
-
"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, "
|
|
230
|
-
"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. "
|
|
231
|
-
"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. "
|
|
232
|
-
"Learn more at https://docs.ollama.com/openai#openai-compatibility"
|
|
233
|
-
)
|
|
234
|
-
|
|
235
|
-
return ChatOllama(
|
|
236
|
-
base_url=transformed_base_url,
|
|
237
|
-
model=model_name,
|
|
238
|
-
temperature=temperature,
|
|
239
|
-
)
|
|
240
|
-
msg = f"Unknown provider: {provider}"
|
|
241
|
-
raise ValueError(msg)
|
|
242
|
-
|
|
243
|
-
async def update_build_config(
|
|
244
|
-
self, build_config: dotdict, field_value: Any, field_name: str | None = None
|
|
245
|
-
) -> dotdict:
|
|
246
|
-
if field_name == "provider":
|
|
247
|
-
if field_value == "OpenAI":
|
|
248
|
-
build_config["model_name"]["options"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES
|
|
249
|
-
build_config["model_name"]["value"] = OPENAI_CHAT_MODEL_NAMES[0]
|
|
250
|
-
build_config["api_key"]["display_name"] = "OpenAI API Key"
|
|
251
|
-
build_config["api_key"]["show"] = True
|
|
252
|
-
build_config["base_url_ibm_watsonx"]["show"] = False
|
|
253
|
-
build_config["project_id"]["show"] = False
|
|
254
|
-
build_config["ollama_base_url"]["show"] = False
|
|
255
|
-
elif field_value == "Anthropic":
|
|
256
|
-
build_config["model_name"]["options"] = ANTHROPIC_MODELS
|
|
257
|
-
build_config["model_name"]["value"] = ANTHROPIC_MODELS[0]
|
|
258
|
-
build_config["api_key"]["display_name"] = "Anthropic API Key"
|
|
259
|
-
build_config["api_key"]["show"] = True
|
|
260
|
-
build_config["base_url_ibm_watsonx"]["show"] = False
|
|
261
|
-
build_config["project_id"]["show"] = False
|
|
262
|
-
build_config["ollama_base_url"]["show"] = False
|
|
263
|
-
elif field_value == "Google":
|
|
264
|
-
build_config["model_name"]["options"] = GOOGLE_GENERATIVE_AI_MODELS
|
|
265
|
-
build_config["model_name"]["value"] = GOOGLE_GENERATIVE_AI_MODELS[0]
|
|
266
|
-
build_config["api_key"]["display_name"] = "Google API Key"
|
|
267
|
-
build_config["api_key"]["show"] = True
|
|
268
|
-
build_config["base_url_ibm_watsonx"]["show"] = False
|
|
269
|
-
build_config["project_id"]["show"] = False
|
|
270
|
-
build_config["ollama_base_url"]["show"] = False
|
|
271
|
-
elif field_value == "IBM watsonx.ai":
|
|
272
|
-
build_config["model_name"]["options"] = IBM_WATSONX_DEFAULT_MODELS
|
|
273
|
-
build_config["model_name"]["value"] = IBM_WATSONX_DEFAULT_MODELS[0]
|
|
274
|
-
build_config["api_key"]["display_name"] = "IBM API Key"
|
|
275
|
-
build_config["api_key"]["show"] = True
|
|
276
|
-
build_config["base_url_ibm_watsonx"]["show"] = True
|
|
277
|
-
build_config["project_id"]["show"] = True
|
|
278
|
-
build_config["ollama_base_url"]["show"] = False
|
|
279
|
-
elif field_value == "Ollama":
|
|
280
|
-
# Fetch Ollama models from the API
|
|
281
|
-
build_config["api_key"]["show"] = False
|
|
282
|
-
build_config["base_url_ibm_watsonx"]["show"] = False
|
|
283
|
-
build_config["project_id"]["show"] = False
|
|
284
|
-
build_config["ollama_base_url"]["show"] = True
|
|
285
|
-
|
|
286
|
-
# Try multiple sources to get the URL (in order of preference):
|
|
287
|
-
# 1. Instance attribute (already resolved from global/db)
|
|
288
|
-
# 2. Build config value (may be a global variable reference)
|
|
289
|
-
# 3. Default value
|
|
290
|
-
ollama_url = getattr(self, "ollama_base_url", None)
|
|
291
|
-
if not ollama_url:
|
|
292
|
-
config_value = build_config["ollama_base_url"].get("value", DEFAULT_OLLAMA_URL)
|
|
293
|
-
# If config_value looks like a variable name (all caps with underscores), use default
|
|
294
|
-
is_variable_ref = (
|
|
295
|
-
config_value
|
|
296
|
-
and isinstance(config_value, str)
|
|
297
|
-
and config_value.isupper()
|
|
298
|
-
and "_" in config_value
|
|
299
|
-
)
|
|
300
|
-
if is_variable_ref:
|
|
301
|
-
await logger.adebug(
|
|
302
|
-
f"Config value appears to be a variable reference: {config_value}, using default"
|
|
303
|
-
)
|
|
304
|
-
ollama_url = DEFAULT_OLLAMA_URL
|
|
305
|
-
else:
|
|
306
|
-
ollama_url = config_value
|
|
307
|
-
|
|
308
|
-
await logger.adebug(f"Fetching Ollama models for provider switch. URL: {ollama_url}")
|
|
309
|
-
if await is_valid_ollama_url(url=ollama_url):
|
|
310
|
-
try:
|
|
311
|
-
models = await get_ollama_models(
|
|
312
|
-
base_url_value=ollama_url,
|
|
313
|
-
desired_capability=DESIRED_CAPABILITY,
|
|
314
|
-
json_models_key=JSON_MODELS_KEY,
|
|
315
|
-
json_name_key=JSON_NAME_KEY,
|
|
316
|
-
json_capabilities_key=JSON_CAPABILITIES_KEY,
|
|
317
|
-
)
|
|
318
|
-
build_config["model_name"]["options"] = models
|
|
319
|
-
build_config["model_name"]["value"] = models[0] if models else ""
|
|
320
|
-
except ValueError:
|
|
321
|
-
await logger.awarning("Failed to fetch Ollama models. Setting empty options.")
|
|
322
|
-
build_config["model_name"]["options"] = []
|
|
323
|
-
build_config["model_name"]["value"] = ""
|
|
324
|
-
else:
|
|
325
|
-
await logger.awarning(f"Invalid Ollama URL: {ollama_url}")
|
|
326
|
-
build_config["model_name"]["options"] = []
|
|
327
|
-
build_config["model_name"]["value"] = ""
|
|
328
|
-
elif (
|
|
329
|
-
field_name == "base_url_ibm_watsonx"
|
|
330
|
-
and field_value
|
|
331
|
-
and hasattr(self, "provider")
|
|
332
|
-
and self.provider == "IBM watsonx.ai"
|
|
333
|
-
):
|
|
334
|
-
# Fetch IBM models when base_url changes
|
|
335
|
-
try:
|
|
336
|
-
models = self.fetch_ibm_models(base_url=field_value)
|
|
337
|
-
build_config["model_name"]["options"] = models
|
|
338
|
-
build_config["model_name"]["value"] = models[0] if models else IBM_WATSONX_DEFAULT_MODELS[0]
|
|
339
|
-
info_message = f"Updated model options: {len(models)} models found in {field_value}"
|
|
340
|
-
logger.info(info_message)
|
|
341
|
-
except Exception: # noqa: BLE001
|
|
342
|
-
logger.exception("Error updating IBM model options.")
|
|
343
|
-
elif field_name == "ollama_base_url":
|
|
344
|
-
# Fetch Ollama models when ollama_base_url changes
|
|
345
|
-
# Use the field_value directly since this is triggered when the field changes
|
|
346
|
-
logger.debug(
|
|
347
|
-
f"Fetching Ollama models from updated URL: {build_config['ollama_base_url']} \
|
|
348
|
-
and value {self.ollama_base_url}",
|
|
349
|
-
)
|
|
350
|
-
await logger.adebug(f"Fetching Ollama models from updated URL: {self.ollama_base_url}")
|
|
351
|
-
if await is_valid_ollama_url(url=self.ollama_base_url):
|
|
352
|
-
try:
|
|
353
|
-
models = await get_ollama_models(
|
|
354
|
-
base_url_value=self.ollama_base_url,
|
|
355
|
-
desired_capability=DESIRED_CAPABILITY,
|
|
356
|
-
json_models_key=JSON_MODELS_KEY,
|
|
357
|
-
json_name_key=JSON_NAME_KEY,
|
|
358
|
-
json_capabilities_key=JSON_CAPABILITIES_KEY,
|
|
359
|
-
)
|
|
360
|
-
build_config["model_name"]["options"] = models
|
|
361
|
-
build_config["model_name"]["value"] = models[0] if models else ""
|
|
362
|
-
info_message = f"Updated model options: {len(models)} models found in {self.ollama_base_url}"
|
|
363
|
-
await logger.ainfo(info_message)
|
|
364
|
-
except ValueError:
|
|
365
|
-
await logger.awarning("Error updating Ollama model options.")
|
|
366
|
-
build_config["model_name"]["options"] = []
|
|
367
|
-
build_config["model_name"]["value"] = ""
|
|
368
|
-
else:
|
|
369
|
-
await logger.awarning(f"Invalid Ollama URL: {self.ollama_base_url}")
|
|
370
|
-
build_config["model_name"]["options"] = []
|
|
371
|
-
build_config["model_name"]["value"] = ""
|
|
372
|
-
elif field_name == "model_name":
|
|
373
|
-
# Refresh Ollama models when model_name field is accessed
|
|
374
|
-
if hasattr(self, "provider") and self.provider == "Ollama":
|
|
375
|
-
ollama_url = getattr(self, "ollama_base_url", DEFAULT_OLLAMA_URL)
|
|
376
|
-
if await is_valid_ollama_url(url=ollama_url):
|
|
377
|
-
try:
|
|
378
|
-
models = await get_ollama_models(
|
|
379
|
-
base_url_value=ollama_url,
|
|
380
|
-
desired_capability=DESIRED_CAPABILITY,
|
|
381
|
-
json_models_key=JSON_MODELS_KEY,
|
|
382
|
-
json_name_key=JSON_NAME_KEY,
|
|
383
|
-
json_capabilities_key=JSON_CAPABILITIES_KEY,
|
|
384
|
-
)
|
|
385
|
-
build_config["model_name"]["options"] = models
|
|
386
|
-
except ValueError:
|
|
387
|
-
await logger.awarning("Failed to refresh Ollama models.")
|
|
388
|
-
build_config["model_name"]["options"] = []
|
|
389
|
-
else:
|
|
390
|
-
build_config["model_name"]["options"] = []
|
|
95
|
+
return get_llm(
|
|
96
|
+
model=self.model,
|
|
97
|
+
user_id=self.user_id,
|
|
98
|
+
api_key=self.api_key,
|
|
99
|
+
temperature=self.temperature,
|
|
100
|
+
stream=self.stream,
|
|
101
|
+
watsonx_url=getattr(self, "base_url_ibm_watsonx", None),
|
|
102
|
+
watsonx_project_id=getattr(self, "project_id", None),
|
|
103
|
+
ollama_base_url=getattr(self, "ollama_base_url", None),
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):
|
|
107
|
+
"""Dynamically update build config with user-filtered model options."""
|
|
108
|
+
# Update model options
|
|
109
|
+
build_config = update_model_options_in_build_config(
|
|
110
|
+
component=self,
|
|
111
|
+
build_config=build_config,
|
|
112
|
+
cache_key_prefix="language_model_options",
|
|
113
|
+
get_options_func=get_language_model_options,
|
|
114
|
+
field_name=field_name,
|
|
115
|
+
field_value=field_value,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# Show/hide provider-specific fields based on selected model
|
|
119
|
+
if field_name == "model" and isinstance(field_value, list) and len(field_value) > 0:
|
|
120
|
+
selected_model = field_value[0]
|
|
121
|
+
provider = selected_model.get("provider", "")
|
|
122
|
+
|
|
123
|
+
# Show/hide watsonx fields
|
|
124
|
+
is_watsonx = provider == "IBM WatsonX"
|
|
125
|
+
build_config["base_url_ibm_watsonx"]["show"] = is_watsonx
|
|
126
|
+
build_config["project_id"]["show"] = is_watsonx
|
|
127
|
+
build_config["base_url_ibm_watsonx"]["required"] = is_watsonx
|
|
128
|
+
build_config["project_id"]["required"] = is_watsonx
|
|
129
|
+
|
|
130
|
+
# Show/hide Ollama fields
|
|
131
|
+
is_ollama = provider == "Ollama"
|
|
132
|
+
build_config["ollama_base_url"]["show"] = is_ollama
|
|
391
133
|
|
|
392
|
-
# Hide system_message for o1 models - currently unsupported
|
|
393
|
-
if field_value and field_value.startswith("o1") and hasattr(self, "provider") and self.provider == "OpenAI":
|
|
394
|
-
if "system_message" in build_config:
|
|
395
|
-
build_config["system_message"]["show"] = False
|
|
396
|
-
elif "system_message" in build_config:
|
|
397
|
-
build_config["system_message"]["show"] = True
|
|
398
134
|
return build_config
|
|
@@ -15,7 +15,7 @@ from lfx.base.mcp.util import (
|
|
|
15
15
|
)
|
|
16
16
|
from lfx.custom.custom_component.component_with_cache import ComponentWithCache
|
|
17
17
|
from lfx.inputs.inputs import InputTypes # noqa: TC001
|
|
18
|
-
from lfx.io import BoolInput, DropdownInput, McpInput, MessageTextInput, Output
|
|
18
|
+
from lfx.io import BoolInput, DictInput, DropdownInput, McpInput, MessageTextInput, Output
|
|
19
19
|
from lfx.io.schema import flatten_schema, schema_to_langflow_inputs
|
|
20
20
|
from lfx.log.logger import logger
|
|
21
21
|
from lfx.schema.dataframe import DataFrame
|
|
@@ -23,6 +23,31 @@ from lfx.schema.message import Message
|
|
|
23
23
|
from lfx.services.deps import get_settings_service, get_storage_service, session_scope
|
|
24
24
|
|
|
25
25
|
|
|
26
|
+
def resolve_mcp_config(
|
|
27
|
+
server_name: str, # noqa: ARG001
|
|
28
|
+
server_config_from_value: dict | None,
|
|
29
|
+
server_config_from_db: dict | None,
|
|
30
|
+
) -> dict | None:
|
|
31
|
+
"""Resolve MCP server config with proper precedence.
|
|
32
|
+
|
|
33
|
+
Resolves the configuration for an MCP server with the following precedence:
|
|
34
|
+
1. Database config (takes priority) - ensures edits are reflected
|
|
35
|
+
2. Config from value/tweaks (fallback) - allows REST API to provide config for new servers
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
server_name: Name of the MCP server
|
|
39
|
+
server_config_from_value: Config provided via value/tweaks (optional)
|
|
40
|
+
server_config_from_db: Config from database (optional)
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
Final config to use (DB takes priority, falls back to value)
|
|
44
|
+
Returns None if no config found in either location
|
|
45
|
+
"""
|
|
46
|
+
if server_config_from_db:
|
|
47
|
+
return server_config_from_db
|
|
48
|
+
return server_config_from_value
|
|
49
|
+
|
|
50
|
+
|
|
26
51
|
class MCPToolsComponent(ComponentWithCache):
|
|
27
52
|
schema_inputs: list = []
|
|
28
53
|
tools: list[StructuredTool] = []
|
|
@@ -62,6 +87,7 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
62
87
|
"tool",
|
|
63
88
|
"use_cache",
|
|
64
89
|
"verify_ssl",
|
|
90
|
+
"headers",
|
|
65
91
|
]
|
|
66
92
|
|
|
67
93
|
display_name = "MCP Tools"
|
|
@@ -97,6 +123,17 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
97
123
|
value=True,
|
|
98
124
|
advanced=True,
|
|
99
125
|
),
|
|
126
|
+
DictInput(
|
|
127
|
+
name="headers",
|
|
128
|
+
display_name="Headers",
|
|
129
|
+
info=(
|
|
130
|
+
"HTTP headers to include with MCP server requests. "
|
|
131
|
+
"Useful for authentication (e.g., Authorization header). "
|
|
132
|
+
"These headers override any headers configured in the MCP server settings."
|
|
133
|
+
),
|
|
134
|
+
advanced=True,
|
|
135
|
+
is_list=True,
|
|
136
|
+
),
|
|
100
137
|
DropdownInput(
|
|
101
138
|
name="tool",
|
|
102
139
|
display_name="Tool",
|
|
@@ -189,6 +226,8 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
189
226
|
return self.tools, {"name": server_name, "config": server_config_from_value}
|
|
190
227
|
|
|
191
228
|
try:
|
|
229
|
+
# Try to fetch from database first to ensure we have the latest config
|
|
230
|
+
# This ensures database updates (like editing a server) take effect
|
|
192
231
|
try:
|
|
193
232
|
from langflow.api.v2.mcp import get_server
|
|
194
233
|
from langflow.services.database.models.user.crud import get_user_by_id
|
|
@@ -198,6 +237,8 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
198
237
|
"This feature requires the full Langflow installation."
|
|
199
238
|
)
|
|
200
239
|
raise ImportError(msg) from e
|
|
240
|
+
|
|
241
|
+
server_config_from_db = None
|
|
201
242
|
async with session_scope() as db:
|
|
202
243
|
if not self.user_id:
|
|
203
244
|
msg = "User ID is required for fetching MCP tools."
|
|
@@ -205,7 +246,7 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
205
246
|
current_user = await get_user_by_id(db, self.user_id)
|
|
206
247
|
|
|
207
248
|
# Try to get server config from DB/API
|
|
208
|
-
|
|
249
|
+
server_config_from_db = await get_server(
|
|
209
250
|
server_name,
|
|
210
251
|
current_user,
|
|
211
252
|
db,
|
|
@@ -213,9 +254,12 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
213
254
|
settings_service=get_settings_service(),
|
|
214
255
|
)
|
|
215
256
|
|
|
216
|
-
#
|
|
217
|
-
|
|
218
|
-
|
|
257
|
+
# Resolve config with proper precedence: DB takes priority, falls back to value
|
|
258
|
+
server_config = resolve_mcp_config(
|
|
259
|
+
server_name=server_name,
|
|
260
|
+
server_config_from_value=server_config_from_value,
|
|
261
|
+
server_config_from_db=server_config_from_db,
|
|
262
|
+
)
|
|
219
263
|
|
|
220
264
|
if not server_config:
|
|
221
265
|
self.tools = []
|
|
@@ -226,6 +270,31 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
226
270
|
verify_ssl = getattr(self, "verify_ssl", True)
|
|
227
271
|
server_config["verify_ssl"] = verify_ssl
|
|
228
272
|
|
|
273
|
+
# Merge headers from component input with server config headers
|
|
274
|
+
# Component headers take precedence over server config headers
|
|
275
|
+
component_headers = getattr(self, "headers", None) or []
|
|
276
|
+
if component_headers:
|
|
277
|
+
# Convert list of {"key": k, "value": v} to dict
|
|
278
|
+
component_headers_dict = {}
|
|
279
|
+
if isinstance(component_headers, list):
|
|
280
|
+
for item in component_headers:
|
|
281
|
+
if isinstance(item, dict) and "key" in item and "value" in item:
|
|
282
|
+
component_headers_dict[item["key"]] = item["value"]
|
|
283
|
+
elif isinstance(component_headers, dict):
|
|
284
|
+
component_headers_dict = component_headers
|
|
285
|
+
|
|
286
|
+
if component_headers_dict:
|
|
287
|
+
existing_headers = server_config.get("headers", {}) or {}
|
|
288
|
+
# Ensure existing_headers is a dict (convert from list if needed)
|
|
289
|
+
if isinstance(existing_headers, list):
|
|
290
|
+
existing_dict = {}
|
|
291
|
+
for item in existing_headers:
|
|
292
|
+
if isinstance(item, dict) and "key" in item and "value" in item:
|
|
293
|
+
existing_dict[item["key"]] = item["value"]
|
|
294
|
+
existing_headers = existing_dict
|
|
295
|
+
merged_headers = {**existing_headers, **component_headers_dict}
|
|
296
|
+
server_config["headers"] = merged_headers
|
|
297
|
+
|
|
229
298
|
_, tool_list, tool_cache = await update_tools(
|
|
230
299
|
server_name=server_name,
|
|
231
300
|
server_config=server_config,
|
|
@@ -268,7 +337,10 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
268
337
|
try:
|
|
269
338
|
if field_name == "tool":
|
|
270
339
|
try:
|
|
271
|
-
|
|
340
|
+
# Always refresh tools when cache is disabled, or when tools list is empty
|
|
341
|
+
# This ensures database edits are reflected immediately when cache is disabled
|
|
342
|
+
use_cache = getattr(self, "use_cache", False)
|
|
343
|
+
if len(self.tools) == 0 or not use_cache:
|
|
272
344
|
try:
|
|
273
345
|
self.tools, build_config["mcp_server"]["value"] = await self.update_tool_list()
|
|
274
346
|
build_config["tool"]["options"] = [tool.name for tool in self.tools]
|
|
@@ -360,6 +432,14 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
360
432
|
return build_config
|
|
361
433
|
safe_cache_set(self._shared_component_cache, "last_selected_server", current_server_name)
|
|
362
434
|
|
|
435
|
+
# When cache is disabled, clear any cached data for this server
|
|
436
|
+
# This ensures we always fetch fresh data from the database
|
|
437
|
+
if not use_cache and current_server_name:
|
|
438
|
+
servers_cache = safe_cache_get(self._shared_component_cache, "servers", {})
|
|
439
|
+
if isinstance(servers_cache, dict) and current_server_name in servers_cache:
|
|
440
|
+
servers_cache.pop(current_server_name)
|
|
441
|
+
safe_cache_set(self._shared_component_cache, "servers", servers_cache)
|
|
442
|
+
|
|
363
443
|
# Check if tools are already cached for this server before clearing
|
|
364
444
|
cached_tools = None
|
|
365
445
|
if current_server_name and use_cache:
|
|
@@ -378,9 +458,10 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
378
458
|
await logger.awarning(msg)
|
|
379
459
|
cached_tools = None
|
|
380
460
|
|
|
381
|
-
#
|
|
382
|
-
|
|
383
|
-
|
|
461
|
+
# Clear tools when cache is disabled OR when we don't have cached tools
|
|
462
|
+
# This ensures fresh tools are fetched after database edits
|
|
463
|
+
if not cached_tools or not use_cache:
|
|
464
|
+
self.tools = [] # Clear previous tools to force refresh
|
|
384
465
|
|
|
385
466
|
# Clear previous tool inputs if:
|
|
386
467
|
# 1. Server actually changed
|
|
@@ -565,7 +646,12 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
565
646
|
if item_dict.get("type") == "text":
|
|
566
647
|
text = item_dict.get("text")
|
|
567
648
|
try:
|
|
568
|
-
|
|
649
|
+
parsed = json.loads(text)
|
|
650
|
+
# Ensure we always return a dictionary for DataFrame compatibility
|
|
651
|
+
if isinstance(parsed, dict):
|
|
652
|
+
return parsed
|
|
653
|
+
# Wrap non-dict parsed values in a dictionary
|
|
654
|
+
return {"text": text, "parsed_value": parsed, "type": "text"} # noqa: TRY300
|
|
569
655
|
except json.JSONDecodeError:
|
|
570
656
|
return item_dict
|
|
571
657
|
return item_dict
|