lfx-nightly 0.2.0.dev41__py3-none-any.whl → 0.2.1.dev7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lfx/_assets/component_index.json +1 -1
- lfx/base/agents/agent.py +1 -1
- lfx/base/agents/altk_tool_wrappers.py +1 -1
- lfx/base/agents/utils.py +4 -0
- lfx/base/composio/composio_base.py +78 -41
- lfx/base/data/cloud_storage_utils.py +156 -0
- lfx/base/data/docling_utils.py +130 -55
- lfx/base/datastax/astradb_base.py +75 -64
- lfx/base/embeddings/embeddings_class.py +113 -0
- lfx/base/models/__init__.py +11 -1
- lfx/base/models/google_generative_ai_constants.py +33 -9
- lfx/base/models/model_metadata.py +6 -0
- lfx/base/models/ollama_constants.py +196 -30
- lfx/base/models/openai_constants.py +37 -10
- lfx/base/models/unified_models.py +1123 -0
- lfx/base/models/watsonx_constants.py +36 -0
- lfx/base/tools/component_tool.py +2 -9
- lfx/cli/commands.py +3 -0
- lfx/cli/run.py +65 -409
- lfx/cli/script_loader.py +13 -3
- lfx/components/__init__.py +0 -3
- lfx/components/composio/github_composio.py +1 -1
- lfx/components/cuga/cuga_agent.py +39 -27
- lfx/components/data_source/api_request.py +4 -2
- lfx/components/docling/__init__.py +45 -11
- lfx/components/docling/docling_inline.py +39 -49
- lfx/components/elastic/opensearch_multimodal.py +1733 -0
- lfx/components/files_and_knowledge/file.py +384 -36
- lfx/components/files_and_knowledge/ingestion.py +8 -0
- lfx/components/files_and_knowledge/retrieval.py +10 -0
- lfx/components/files_and_knowledge/save_file.py +91 -88
- lfx/components/langchain_utilities/tool_calling.py +14 -6
- lfx/components/llm_operations/batch_run.py +64 -18
- lfx/components/llm_operations/lambda_filter.py +33 -6
- lfx/components/llm_operations/llm_conditional_router.py +39 -7
- lfx/components/llm_operations/structured_output.py +38 -12
- lfx/components/models/__init__.py +16 -74
- lfx/components/models_and_agents/agent.py +51 -203
- lfx/components/models_and_agents/embedding_model.py +171 -255
- lfx/components/models_and_agents/language_model.py +54 -318
- lfx/components/models_and_agents/mcp_component.py +58 -9
- lfx/components/ollama/ollama_embeddings.py +2 -1
- lfx/components/openai/openai_chat_model.py +1 -1
- lfx/components/vllm/__init__.py +37 -0
- lfx/components/vllm/vllm.py +141 -0
- lfx/components/vllm/vllm_embeddings.py +110 -0
- lfx/custom/custom_component/custom_component.py +8 -6
- lfx/graph/graph/base.py +4 -1
- lfx/graph/utils.py +64 -18
- lfx/graph/vertex/base.py +4 -1
- lfx/inputs/__init__.py +2 -0
- lfx/inputs/input_mixin.py +54 -0
- lfx/inputs/inputs.py +115 -0
- lfx/interface/initialize/loading.py +42 -12
- lfx/io/__init__.py +2 -0
- lfx/run/__init__.py +5 -0
- lfx/run/base.py +494 -0
- lfx/schema/data.py +1 -1
- lfx/schema/image.py +26 -7
- lfx/schema/message.py +19 -3
- lfx/services/mcp_composer/service.py +7 -1
- lfx/services/settings/base.py +7 -1
- lfx/services/settings/constants.py +2 -0
- lfx/services/storage/local.py +13 -8
- lfx/utils/constants.py +1 -0
- lfx/utils/validate_cloud.py +14 -3
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/METADATA +5 -2
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/RECORD +70 -61
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/WHEEL +0 -0
- {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/entry_points.txt +0 -0
|
@@ -1,43 +1,15 @@
|
|
|
1
|
-
from typing import Any
|
|
2
|
-
|
|
3
|
-
import requests
|
|
4
|
-
from langchain_anthropic import ChatAnthropic
|
|
5
|
-
from langchain_ibm import ChatWatsonx
|
|
6
|
-
from langchain_ollama import ChatOllama
|
|
7
|
-
from langchain_openai import ChatOpenAI
|
|
8
|
-
from pydantic.v1 import SecretStr
|
|
9
|
-
|
|
10
|
-
from lfx.base.models.anthropic_constants import ANTHROPIC_MODELS
|
|
11
|
-
from lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS
|
|
12
|
-
from lfx.base.models.google_generative_ai_model import ChatGoogleGenerativeAIFixed
|
|
13
1
|
from lfx.base.models.model import LCModelComponent
|
|
14
|
-
from lfx.base.models.
|
|
15
|
-
|
|
2
|
+
from lfx.base.models.unified_models import (
|
|
3
|
+
get_language_model_options,
|
|
4
|
+
get_llm,
|
|
5
|
+
update_model_options_in_build_config,
|
|
6
|
+
)
|
|
7
|
+
from lfx.base.models.watsonx_constants import IBM_WATSONX_URLS
|
|
16
8
|
from lfx.field_typing import LanguageModel
|
|
17
9
|
from lfx.field_typing.range_spec import RangeSpec
|
|
18
|
-
from lfx.inputs.inputs import BoolInput,
|
|
19
|
-
from lfx.io import
|
|
20
|
-
from lfx.log.logger import logger
|
|
21
|
-
from lfx.schema.dotdict import dotdict
|
|
22
|
-
from lfx.utils.util import transform_localhost_url
|
|
23
|
-
|
|
24
|
-
# IBM watsonx.ai constants
|
|
25
|
-
IBM_WATSONX_DEFAULT_MODELS = ["ibm/granite-3-2b-instruct", "ibm/granite-3-8b-instruct", "ibm/granite-13b-instruct-v2"]
|
|
26
|
-
IBM_WATSONX_URLS = [
|
|
27
|
-
"https://us-south.ml.cloud.ibm.com",
|
|
28
|
-
"https://eu-de.ml.cloud.ibm.com",
|
|
29
|
-
"https://eu-gb.ml.cloud.ibm.com",
|
|
30
|
-
"https://au-syd.ml.cloud.ibm.com",
|
|
31
|
-
"https://jp-tok.ml.cloud.ibm.com",
|
|
32
|
-
"https://ca-tor.ml.cloud.ibm.com",
|
|
33
|
-
]
|
|
10
|
+
from lfx.inputs.inputs import BoolInput, DropdownInput, StrInput
|
|
11
|
+
from lfx.io import MessageInput, ModelInput, MultilineInput, SecretStrInput, SliderInput
|
|
34
12
|
|
|
35
|
-
# Ollama API constants
|
|
36
|
-
HTTP_STATUS_OK = 200
|
|
37
|
-
JSON_MODELS_KEY = "models"
|
|
38
|
-
JSON_NAME_KEY = "name"
|
|
39
|
-
JSON_CAPABILITIES_KEY = "capabilities"
|
|
40
|
-
DESIRED_CAPABILITY = "completion"
|
|
41
13
|
DEFAULT_OLLAMA_URL = "http://localhost:11434"
|
|
42
14
|
|
|
43
15
|
|
|
@@ -49,53 +21,22 @@ class LanguageModelComponent(LCModelComponent):
|
|
|
49
21
|
category = "models"
|
|
50
22
|
priority = 0 # Set priority to 0 to make it appear first
|
|
51
23
|
|
|
52
|
-
@staticmethod
|
|
53
|
-
def fetch_ibm_models(base_url: str) -> list[str]:
|
|
54
|
-
"""Fetch available models from the watsonx.ai API."""
|
|
55
|
-
try:
|
|
56
|
-
endpoint = f"{base_url}/ml/v1/foundation_model_specs"
|
|
57
|
-
params = {"version": "2024-09-16", "filters": "function_text_chat,!lifecycle_withdrawn"}
|
|
58
|
-
response = requests.get(endpoint, params=params, timeout=10)
|
|
59
|
-
response.raise_for_status()
|
|
60
|
-
data = response.json()
|
|
61
|
-
models = [model["model_id"] for model in data.get("resources", [])]
|
|
62
|
-
return sorted(models)
|
|
63
|
-
except Exception: # noqa: BLE001
|
|
64
|
-
logger.exception("Error fetching IBM watsonx models. Using default models.")
|
|
65
|
-
return IBM_WATSONX_DEFAULT_MODELS
|
|
66
|
-
|
|
67
24
|
inputs = [
|
|
68
|
-
|
|
69
|
-
name="
|
|
70
|
-
display_name="Model
|
|
71
|
-
|
|
72
|
-
value="OpenAI",
|
|
73
|
-
info="Select the model provider",
|
|
25
|
+
ModelInput(
|
|
26
|
+
name="model",
|
|
27
|
+
display_name="Language Model",
|
|
28
|
+
info="Select your model provider",
|
|
74
29
|
real_time_refresh=True,
|
|
75
|
-
|
|
76
|
-
{"icon": "OpenAI"},
|
|
77
|
-
{"icon": "Anthropic"},
|
|
78
|
-
{"icon": "GoogleGenerativeAI"},
|
|
79
|
-
{"icon": "WatsonxAI"},
|
|
80
|
-
{"icon": "Ollama"},
|
|
81
|
-
],
|
|
82
|
-
),
|
|
83
|
-
DropdownInput(
|
|
84
|
-
name="model_name",
|
|
85
|
-
display_name="Model Name",
|
|
86
|
-
options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,
|
|
87
|
-
value=OPENAI_CHAT_MODEL_NAMES[0],
|
|
88
|
-
info="Select the model to use",
|
|
89
|
-
real_time_refresh=True,
|
|
90
|
-
refresh_button=True,
|
|
30
|
+
required=True,
|
|
91
31
|
),
|
|
92
32
|
SecretStrInput(
|
|
93
33
|
name="api_key",
|
|
94
|
-
display_name="
|
|
34
|
+
display_name="API Key",
|
|
95
35
|
info="Model Provider API key",
|
|
96
36
|
required=False,
|
|
97
37
|
show=True,
|
|
98
38
|
real_time_refresh=True,
|
|
39
|
+
advanced=True,
|
|
99
40
|
),
|
|
100
41
|
DropdownInput(
|
|
101
42
|
name="base_url_ibm_watsonx",
|
|
@@ -113,7 +54,7 @@ class LanguageModelComponent(LCModelComponent):
|
|
|
113
54
|
show=False,
|
|
114
55
|
required=False,
|
|
115
56
|
),
|
|
116
|
-
|
|
57
|
+
MessageInput(
|
|
117
58
|
name="ollama_base_url",
|
|
118
59
|
display_name="Ollama API URL",
|
|
119
60
|
info=f"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}",
|
|
@@ -151,248 +92,43 @@ class LanguageModelComponent(LCModelComponent):
|
|
|
151
92
|
]
|
|
152
93
|
|
|
153
94
|
def build_model(self) -> LanguageModel:
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
google_api_key=self.api_key,
|
|
193
|
-
)
|
|
194
|
-
if provider == "IBM watsonx.ai":
|
|
195
|
-
if not self.api_key:
|
|
196
|
-
msg = "IBM API key is required when using IBM watsonx.ai provider"
|
|
197
|
-
raise ValueError(msg)
|
|
198
|
-
if not self.base_url_ibm_watsonx:
|
|
199
|
-
msg = "IBM watsonx API Endpoint is required when using IBM watsonx.ai provider"
|
|
200
|
-
raise ValueError(msg)
|
|
201
|
-
if not self.project_id:
|
|
202
|
-
msg = "IBM watsonx Project ID is required when using IBM watsonx.ai provider"
|
|
203
|
-
raise ValueError(msg)
|
|
204
|
-
return ChatWatsonx(
|
|
205
|
-
apikey=SecretStr(self.api_key).get_secret_value(),
|
|
206
|
-
url=self.base_url_ibm_watsonx,
|
|
207
|
-
project_id=self.project_id,
|
|
208
|
-
model_id=model_name,
|
|
209
|
-
params={
|
|
210
|
-
"temperature": temperature,
|
|
211
|
-
},
|
|
212
|
-
streaming=stream,
|
|
213
|
-
)
|
|
214
|
-
if provider == "Ollama":
|
|
215
|
-
if not self.ollama_base_url:
|
|
216
|
-
msg = "Ollama API URL is required when using Ollama provider"
|
|
217
|
-
raise ValueError(msg)
|
|
218
|
-
if not model_name:
|
|
219
|
-
msg = "Model name is required when using Ollama provider"
|
|
220
|
-
raise ValueError(msg)
|
|
221
|
-
|
|
222
|
-
transformed_base_url = transform_localhost_url(self.ollama_base_url)
|
|
223
|
-
|
|
224
|
-
# Check if URL contains /v1 suffix (OpenAI-compatible mode)
|
|
225
|
-
if transformed_base_url and transformed_base_url.rstrip("/").endswith("/v1"):
|
|
226
|
-
# Strip /v1 suffix and log warning
|
|
227
|
-
transformed_base_url = transformed_base_url.rstrip("/").removesuffix("/v1")
|
|
228
|
-
logger.warning(
|
|
229
|
-
"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, "
|
|
230
|
-
"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. "
|
|
231
|
-
"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. "
|
|
232
|
-
"Learn more at https://docs.ollama.com/openai#openai-compatibility"
|
|
233
|
-
)
|
|
234
|
-
|
|
235
|
-
return ChatOllama(
|
|
236
|
-
base_url=transformed_base_url,
|
|
237
|
-
model=model_name,
|
|
238
|
-
temperature=temperature,
|
|
239
|
-
)
|
|
240
|
-
msg = f"Unknown provider: {provider}"
|
|
241
|
-
raise ValueError(msg)
|
|
242
|
-
|
|
243
|
-
async def update_build_config(
|
|
244
|
-
self, build_config: dotdict, field_value: Any, field_name: str | None = None
|
|
245
|
-
) -> dotdict:
|
|
246
|
-
if field_name == "provider":
|
|
247
|
-
if field_value == "OpenAI":
|
|
248
|
-
build_config["model_name"]["options"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES
|
|
249
|
-
build_config["model_name"]["value"] = OPENAI_CHAT_MODEL_NAMES[0]
|
|
250
|
-
build_config["api_key"]["display_name"] = "OpenAI API Key"
|
|
251
|
-
build_config["api_key"]["show"] = True
|
|
252
|
-
build_config["base_url_ibm_watsonx"]["show"] = False
|
|
253
|
-
build_config["project_id"]["show"] = False
|
|
254
|
-
build_config["ollama_base_url"]["show"] = False
|
|
255
|
-
elif field_value == "Anthropic":
|
|
256
|
-
build_config["model_name"]["options"] = ANTHROPIC_MODELS
|
|
257
|
-
build_config["model_name"]["value"] = ANTHROPIC_MODELS[0]
|
|
258
|
-
build_config["api_key"]["display_name"] = "Anthropic API Key"
|
|
259
|
-
build_config["api_key"]["show"] = True
|
|
260
|
-
build_config["base_url_ibm_watsonx"]["show"] = False
|
|
261
|
-
build_config["project_id"]["show"] = False
|
|
262
|
-
build_config["ollama_base_url"]["show"] = False
|
|
263
|
-
elif field_value == "Google":
|
|
264
|
-
build_config["model_name"]["options"] = GOOGLE_GENERATIVE_AI_MODELS
|
|
265
|
-
build_config["model_name"]["value"] = GOOGLE_GENERATIVE_AI_MODELS[0]
|
|
266
|
-
build_config["api_key"]["display_name"] = "Google API Key"
|
|
267
|
-
build_config["api_key"]["show"] = True
|
|
268
|
-
build_config["base_url_ibm_watsonx"]["show"] = False
|
|
269
|
-
build_config["project_id"]["show"] = False
|
|
270
|
-
build_config["ollama_base_url"]["show"] = False
|
|
271
|
-
elif field_value == "IBM watsonx.ai":
|
|
272
|
-
build_config["model_name"]["options"] = IBM_WATSONX_DEFAULT_MODELS
|
|
273
|
-
build_config["model_name"]["value"] = IBM_WATSONX_DEFAULT_MODELS[0]
|
|
274
|
-
build_config["api_key"]["display_name"] = "IBM API Key"
|
|
275
|
-
build_config["api_key"]["show"] = True
|
|
276
|
-
build_config["base_url_ibm_watsonx"]["show"] = True
|
|
277
|
-
build_config["project_id"]["show"] = True
|
|
278
|
-
build_config["ollama_base_url"]["show"] = False
|
|
279
|
-
elif field_value == "Ollama":
|
|
280
|
-
# Fetch Ollama models from the API
|
|
281
|
-
build_config["api_key"]["show"] = False
|
|
282
|
-
build_config["base_url_ibm_watsonx"]["show"] = False
|
|
283
|
-
build_config["project_id"]["show"] = False
|
|
284
|
-
build_config["ollama_base_url"]["show"] = True
|
|
285
|
-
|
|
286
|
-
# Try multiple sources to get the URL (in order of preference):
|
|
287
|
-
# 1. Instance attribute (already resolved from global/db)
|
|
288
|
-
# 2. Build config value (may be a global variable reference)
|
|
289
|
-
# 3. Default value
|
|
290
|
-
ollama_url = getattr(self, "ollama_base_url", None)
|
|
291
|
-
if not ollama_url:
|
|
292
|
-
config_value = build_config["ollama_base_url"].get("value", DEFAULT_OLLAMA_URL)
|
|
293
|
-
# If config_value looks like a variable name (all caps with underscores), use default
|
|
294
|
-
is_variable_ref = (
|
|
295
|
-
config_value
|
|
296
|
-
and isinstance(config_value, str)
|
|
297
|
-
and config_value.isupper()
|
|
298
|
-
and "_" in config_value
|
|
299
|
-
)
|
|
300
|
-
if is_variable_ref:
|
|
301
|
-
await logger.adebug(
|
|
302
|
-
f"Config value appears to be a variable reference: {config_value}, using default"
|
|
303
|
-
)
|
|
304
|
-
ollama_url = DEFAULT_OLLAMA_URL
|
|
305
|
-
else:
|
|
306
|
-
ollama_url = config_value
|
|
307
|
-
|
|
308
|
-
await logger.adebug(f"Fetching Ollama models for provider switch. URL: {ollama_url}")
|
|
309
|
-
if await is_valid_ollama_url(url=ollama_url):
|
|
310
|
-
try:
|
|
311
|
-
models = await get_ollama_models(
|
|
312
|
-
base_url_value=ollama_url,
|
|
313
|
-
desired_capability=DESIRED_CAPABILITY,
|
|
314
|
-
json_models_key=JSON_MODELS_KEY,
|
|
315
|
-
json_name_key=JSON_NAME_KEY,
|
|
316
|
-
json_capabilities_key=JSON_CAPABILITIES_KEY,
|
|
317
|
-
)
|
|
318
|
-
build_config["model_name"]["options"] = models
|
|
319
|
-
build_config["model_name"]["value"] = models[0] if models else ""
|
|
320
|
-
except ValueError:
|
|
321
|
-
await logger.awarning("Failed to fetch Ollama models. Setting empty options.")
|
|
322
|
-
build_config["model_name"]["options"] = []
|
|
323
|
-
build_config["model_name"]["value"] = ""
|
|
324
|
-
else:
|
|
325
|
-
await logger.awarning(f"Invalid Ollama URL: {ollama_url}")
|
|
326
|
-
build_config["model_name"]["options"] = []
|
|
327
|
-
build_config["model_name"]["value"] = ""
|
|
328
|
-
elif (
|
|
329
|
-
field_name == "base_url_ibm_watsonx"
|
|
330
|
-
and field_value
|
|
331
|
-
and hasattr(self, "provider")
|
|
332
|
-
and self.provider == "IBM watsonx.ai"
|
|
333
|
-
):
|
|
334
|
-
# Fetch IBM models when base_url changes
|
|
335
|
-
try:
|
|
336
|
-
models = self.fetch_ibm_models(base_url=field_value)
|
|
337
|
-
build_config["model_name"]["options"] = models
|
|
338
|
-
build_config["model_name"]["value"] = models[0] if models else IBM_WATSONX_DEFAULT_MODELS[0]
|
|
339
|
-
info_message = f"Updated model options: {len(models)} models found in {field_value}"
|
|
340
|
-
logger.info(info_message)
|
|
341
|
-
except Exception: # noqa: BLE001
|
|
342
|
-
logger.exception("Error updating IBM model options.")
|
|
343
|
-
elif field_name == "ollama_base_url":
|
|
344
|
-
# Fetch Ollama models when ollama_base_url changes
|
|
345
|
-
# Use the field_value directly since this is triggered when the field changes
|
|
346
|
-
logger.debug(
|
|
347
|
-
f"Fetching Ollama models from updated URL: {build_config['ollama_base_url']} \
|
|
348
|
-
and value {self.ollama_base_url}",
|
|
349
|
-
)
|
|
350
|
-
await logger.adebug(f"Fetching Ollama models from updated URL: {self.ollama_base_url}")
|
|
351
|
-
if await is_valid_ollama_url(url=self.ollama_base_url):
|
|
352
|
-
try:
|
|
353
|
-
models = await get_ollama_models(
|
|
354
|
-
base_url_value=self.ollama_base_url,
|
|
355
|
-
desired_capability=DESIRED_CAPABILITY,
|
|
356
|
-
json_models_key=JSON_MODELS_KEY,
|
|
357
|
-
json_name_key=JSON_NAME_KEY,
|
|
358
|
-
json_capabilities_key=JSON_CAPABILITIES_KEY,
|
|
359
|
-
)
|
|
360
|
-
build_config["model_name"]["options"] = models
|
|
361
|
-
build_config["model_name"]["value"] = models[0] if models else ""
|
|
362
|
-
info_message = f"Updated model options: {len(models)} models found in {self.ollama_base_url}"
|
|
363
|
-
await logger.ainfo(info_message)
|
|
364
|
-
except ValueError:
|
|
365
|
-
await logger.awarning("Error updating Ollama model options.")
|
|
366
|
-
build_config["model_name"]["options"] = []
|
|
367
|
-
build_config["model_name"]["value"] = ""
|
|
368
|
-
else:
|
|
369
|
-
await logger.awarning(f"Invalid Ollama URL: {self.ollama_base_url}")
|
|
370
|
-
build_config["model_name"]["options"] = []
|
|
371
|
-
build_config["model_name"]["value"] = ""
|
|
372
|
-
elif field_name == "model_name":
|
|
373
|
-
# Refresh Ollama models when model_name field is accessed
|
|
374
|
-
if hasattr(self, "provider") and self.provider == "Ollama":
|
|
375
|
-
ollama_url = getattr(self, "ollama_base_url", DEFAULT_OLLAMA_URL)
|
|
376
|
-
if await is_valid_ollama_url(url=ollama_url):
|
|
377
|
-
try:
|
|
378
|
-
models = await get_ollama_models(
|
|
379
|
-
base_url_value=ollama_url,
|
|
380
|
-
desired_capability=DESIRED_CAPABILITY,
|
|
381
|
-
json_models_key=JSON_MODELS_KEY,
|
|
382
|
-
json_name_key=JSON_NAME_KEY,
|
|
383
|
-
json_capabilities_key=JSON_CAPABILITIES_KEY,
|
|
384
|
-
)
|
|
385
|
-
build_config["model_name"]["options"] = models
|
|
386
|
-
except ValueError:
|
|
387
|
-
await logger.awarning("Failed to refresh Ollama models.")
|
|
388
|
-
build_config["model_name"]["options"] = []
|
|
389
|
-
else:
|
|
390
|
-
build_config["model_name"]["options"] = []
|
|
95
|
+
return get_llm(
|
|
96
|
+
model=self.model,
|
|
97
|
+
user_id=self.user_id,
|
|
98
|
+
api_key=self.api_key,
|
|
99
|
+
temperature=self.temperature,
|
|
100
|
+
stream=self.stream,
|
|
101
|
+
watsonx_url=getattr(self, "base_url_ibm_watsonx", None),
|
|
102
|
+
watsonx_project_id=getattr(self, "project_id", None),
|
|
103
|
+
ollama_base_url=getattr(self, "ollama_base_url", None),
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):
|
|
107
|
+
"""Dynamically update build config with user-filtered model options."""
|
|
108
|
+
# Update model options
|
|
109
|
+
build_config = update_model_options_in_build_config(
|
|
110
|
+
component=self,
|
|
111
|
+
build_config=build_config,
|
|
112
|
+
cache_key_prefix="language_model_options",
|
|
113
|
+
get_options_func=get_language_model_options,
|
|
114
|
+
field_name=field_name,
|
|
115
|
+
field_value=field_value,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# Show/hide provider-specific fields based on selected model
|
|
119
|
+
if field_name == "model" and isinstance(field_value, list) and len(field_value) > 0:
|
|
120
|
+
selected_model = field_value[0]
|
|
121
|
+
provider = selected_model.get("provider", "")
|
|
122
|
+
|
|
123
|
+
# Show/hide watsonx fields
|
|
124
|
+
is_watsonx = provider == "IBM WatsonX"
|
|
125
|
+
build_config["base_url_ibm_watsonx"]["show"] = is_watsonx
|
|
126
|
+
build_config["project_id"]["show"] = is_watsonx
|
|
127
|
+
build_config["base_url_ibm_watsonx"]["required"] = is_watsonx
|
|
128
|
+
build_config["project_id"]["required"] = is_watsonx
|
|
129
|
+
|
|
130
|
+
# Show/hide Ollama fields
|
|
131
|
+
is_ollama = provider == "Ollama"
|
|
132
|
+
build_config["ollama_base_url"]["show"] = is_ollama
|
|
391
133
|
|
|
392
|
-
# Hide system_message for o1 models - currently unsupported
|
|
393
|
-
if field_value and field_value.startswith("o1") and hasattr(self, "provider") and self.provider == "OpenAI":
|
|
394
|
-
if "system_message" in build_config:
|
|
395
|
-
build_config["system_message"]["show"] = False
|
|
396
|
-
elif "system_message" in build_config:
|
|
397
|
-
build_config["system_message"]["show"] = True
|
|
398
134
|
return build_config
|
|
@@ -23,6 +23,31 @@ from lfx.schema.message import Message
|
|
|
23
23
|
from lfx.services.deps import get_settings_service, get_storage_service, session_scope
|
|
24
24
|
|
|
25
25
|
|
|
26
|
+
def resolve_mcp_config(
|
|
27
|
+
server_name: str, # noqa: ARG001
|
|
28
|
+
server_config_from_value: dict | None,
|
|
29
|
+
server_config_from_db: dict | None,
|
|
30
|
+
) -> dict | None:
|
|
31
|
+
"""Resolve MCP server config with proper precedence.
|
|
32
|
+
|
|
33
|
+
Resolves the configuration for an MCP server with the following precedence:
|
|
34
|
+
1. Database config (takes priority) - ensures edits are reflected
|
|
35
|
+
2. Config from value/tweaks (fallback) - allows REST API to provide config for new servers
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
server_name: Name of the MCP server
|
|
39
|
+
server_config_from_value: Config provided via value/tweaks (optional)
|
|
40
|
+
server_config_from_db: Config from database (optional)
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
Final config to use (DB takes priority, falls back to value)
|
|
44
|
+
Returns None if no config found in either location
|
|
45
|
+
"""
|
|
46
|
+
if server_config_from_db:
|
|
47
|
+
return server_config_from_db
|
|
48
|
+
return server_config_from_value
|
|
49
|
+
|
|
50
|
+
|
|
26
51
|
class MCPToolsComponent(ComponentWithCache):
|
|
27
52
|
schema_inputs: list = []
|
|
28
53
|
tools: list[StructuredTool] = []
|
|
@@ -189,6 +214,8 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
189
214
|
return self.tools, {"name": server_name, "config": server_config_from_value}
|
|
190
215
|
|
|
191
216
|
try:
|
|
217
|
+
# Try to fetch from database first to ensure we have the latest config
|
|
218
|
+
# This ensures database updates (like editing a server) take effect
|
|
192
219
|
try:
|
|
193
220
|
from langflow.api.v2.mcp import get_server
|
|
194
221
|
from langflow.services.database.models.user.crud import get_user_by_id
|
|
@@ -198,6 +225,8 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
198
225
|
"This feature requires the full Langflow installation."
|
|
199
226
|
)
|
|
200
227
|
raise ImportError(msg) from e
|
|
228
|
+
|
|
229
|
+
server_config_from_db = None
|
|
201
230
|
async with session_scope() as db:
|
|
202
231
|
if not self.user_id:
|
|
203
232
|
msg = "User ID is required for fetching MCP tools."
|
|
@@ -205,7 +234,7 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
205
234
|
current_user = await get_user_by_id(db, self.user_id)
|
|
206
235
|
|
|
207
236
|
# Try to get server config from DB/API
|
|
208
|
-
|
|
237
|
+
server_config_from_db = await get_server(
|
|
209
238
|
server_name,
|
|
210
239
|
current_user,
|
|
211
240
|
db,
|
|
@@ -213,9 +242,12 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
213
242
|
settings_service=get_settings_service(),
|
|
214
243
|
)
|
|
215
244
|
|
|
216
|
-
#
|
|
217
|
-
|
|
218
|
-
|
|
245
|
+
# Resolve config with proper precedence: DB takes priority, falls back to value
|
|
246
|
+
server_config = resolve_mcp_config(
|
|
247
|
+
server_name=server_name,
|
|
248
|
+
server_config_from_value=server_config_from_value,
|
|
249
|
+
server_config_from_db=server_config_from_db,
|
|
250
|
+
)
|
|
219
251
|
|
|
220
252
|
if not server_config:
|
|
221
253
|
self.tools = []
|
|
@@ -268,7 +300,10 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
268
300
|
try:
|
|
269
301
|
if field_name == "tool":
|
|
270
302
|
try:
|
|
271
|
-
|
|
303
|
+
# Always refresh tools when cache is disabled, or when tools list is empty
|
|
304
|
+
# This ensures database edits are reflected immediately when cache is disabled
|
|
305
|
+
use_cache = getattr(self, "use_cache", False)
|
|
306
|
+
if len(self.tools) == 0 or not use_cache:
|
|
272
307
|
try:
|
|
273
308
|
self.tools, build_config["mcp_server"]["value"] = await self.update_tool_list()
|
|
274
309
|
build_config["tool"]["options"] = [tool.name for tool in self.tools]
|
|
@@ -360,6 +395,14 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
360
395
|
return build_config
|
|
361
396
|
safe_cache_set(self._shared_component_cache, "last_selected_server", current_server_name)
|
|
362
397
|
|
|
398
|
+
# When cache is disabled, clear any cached data for this server
|
|
399
|
+
# This ensures we always fetch fresh data from the database
|
|
400
|
+
if not use_cache and current_server_name:
|
|
401
|
+
servers_cache = safe_cache_get(self._shared_component_cache, "servers", {})
|
|
402
|
+
if isinstance(servers_cache, dict) and current_server_name in servers_cache:
|
|
403
|
+
servers_cache.pop(current_server_name)
|
|
404
|
+
safe_cache_set(self._shared_component_cache, "servers", servers_cache)
|
|
405
|
+
|
|
363
406
|
# Check if tools are already cached for this server before clearing
|
|
364
407
|
cached_tools = None
|
|
365
408
|
if current_server_name and use_cache:
|
|
@@ -378,9 +421,10 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
378
421
|
await logger.awarning(msg)
|
|
379
422
|
cached_tools = None
|
|
380
423
|
|
|
381
|
-
#
|
|
382
|
-
|
|
383
|
-
|
|
424
|
+
# Clear tools when cache is disabled OR when we don't have cached tools
|
|
425
|
+
# This ensures fresh tools are fetched after database edits
|
|
426
|
+
if not cached_tools or not use_cache:
|
|
427
|
+
self.tools = [] # Clear previous tools to force refresh
|
|
384
428
|
|
|
385
429
|
# Clear previous tool inputs if:
|
|
386
430
|
# 1. Server actually changed
|
|
@@ -565,7 +609,12 @@ class MCPToolsComponent(ComponentWithCache):
|
|
|
565
609
|
if item_dict.get("type") == "text":
|
|
566
610
|
text = item_dict.get("text")
|
|
567
611
|
try:
|
|
568
|
-
|
|
612
|
+
parsed = json.loads(text)
|
|
613
|
+
# Ensure we always return a dictionary for DataFrame compatibility
|
|
614
|
+
if isinstance(parsed, dict):
|
|
615
|
+
return parsed
|
|
616
|
+
# Wrap non-dict parsed values in a dictionary
|
|
617
|
+
return {"text": text, "parsed_value": parsed, "type": "text"} # noqa: TRY300
|
|
569
618
|
except json.JSONDecodeError:
|
|
570
619
|
return item_dict
|
|
571
620
|
return item_dict
|
|
@@ -34,7 +34,8 @@ class OllamaEmbeddingsComponent(LCModelComponent):
|
|
|
34
34
|
MessageTextInput(
|
|
35
35
|
name="base_url",
|
|
36
36
|
display_name="Ollama Base URL",
|
|
37
|
-
|
|
37
|
+
info="Endpoint of the Ollama API. Defaults to http://localhost:11434.",
|
|
38
|
+
value="http://localhost:11434",
|
|
38
39
|
required=True,
|
|
39
40
|
),
|
|
40
41
|
]
|
|
@@ -98,7 +98,7 @@ class OpenAIModelComponent(LCModelComponent):
|
|
|
98
98
|
# Handle api_key - it can be string or SecretStr
|
|
99
99
|
api_key_value = None
|
|
100
100
|
if self.api_key:
|
|
101
|
-
logger.debug(f"API key type: {type(self.api_key)}, value: {self.api_key
|
|
101
|
+
logger.debug(f"API key type: {type(self.api_key)}, value: {'***' if self.api_key else None}")
|
|
102
102
|
if isinstance(self.api_key, SecretStr):
|
|
103
103
|
api_key_value = self.api_key.get_secret_value()
|
|
104
104
|
else:
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any
|
|
4
|
+
|
|
5
|
+
from lfx.components._importing import import_mod
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from lfx.components.vllm.vllm import VllmComponent
|
|
9
|
+
from lfx.components.vllm.vllm_embeddings import VllmEmbeddingsComponent
|
|
10
|
+
|
|
11
|
+
_dynamic_imports = {
|
|
12
|
+
"VllmComponent": "vllm",
|
|
13
|
+
"VllmEmbeddingsComponent": "vllm_embeddings",
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"VllmComponent",
|
|
18
|
+
"VllmEmbeddingsComponent",
|
|
19
|
+
]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def __getattr__(attr_name: str) -> Any:
|
|
23
|
+
"""Lazily import vLLM components on attribute access."""
|
|
24
|
+
if attr_name not in _dynamic_imports:
|
|
25
|
+
msg = f"module '{__name__}' has no attribute '{attr_name}'"
|
|
26
|
+
raise AttributeError(msg)
|
|
27
|
+
try:
|
|
28
|
+
result = import_mod(attr_name, _dynamic_imports[attr_name], __spec__.parent)
|
|
29
|
+
except (ImportError, AttributeError) as e:
|
|
30
|
+
msg = f"Could not import '{attr_name}' from '{__name__}': {e}"
|
|
31
|
+
raise AttributeError(msg) from e
|
|
32
|
+
globals()[attr_name] = result
|
|
33
|
+
return result
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def __dir__() -> list[str]:
|
|
37
|
+
return list(__all__)
|