lfx-nightly 0.2.0.dev26__py3-none-any.whl → 0.2.1.dev7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lfx/_assets/component_index.json +1 -1
- lfx/base/agents/agent.py +9 -4
- lfx/base/agents/altk_base_agent.py +16 -3
- lfx/base/agents/altk_tool_wrappers.py +1 -1
- lfx/base/agents/utils.py +4 -0
- lfx/base/composio/composio_base.py +78 -41
- lfx/base/data/base_file.py +14 -4
- lfx/base/data/cloud_storage_utils.py +156 -0
- lfx/base/data/docling_utils.py +191 -65
- lfx/base/data/storage_utils.py +109 -0
- lfx/base/datastax/astradb_base.py +75 -64
- lfx/base/mcp/util.py +2 -2
- lfx/base/models/__init__.py +11 -1
- lfx/base/models/anthropic_constants.py +21 -12
- lfx/base/models/google_generative_ai_constants.py +33 -9
- lfx/base/models/model_metadata.py +6 -0
- lfx/base/models/ollama_constants.py +196 -30
- lfx/base/models/openai_constants.py +37 -10
- lfx/base/models/unified_models.py +1123 -0
- lfx/base/models/watsonx_constants.py +36 -0
- lfx/base/tools/component_tool.py +2 -9
- lfx/cli/commands.py +6 -1
- lfx/cli/run.py +65 -409
- lfx/cli/script_loader.py +13 -3
- lfx/components/__init__.py +0 -3
- lfx/components/composio/github_composio.py +1 -1
- lfx/components/cuga/cuga_agent.py +39 -27
- lfx/components/data_source/api_request.py +4 -2
- lfx/components/docling/__init__.py +45 -11
- lfx/components/docling/chunk_docling_document.py +3 -1
- lfx/components/docling/docling_inline.py +39 -49
- lfx/components/docling/export_docling_document.py +3 -1
- lfx/components/elastic/opensearch_multimodal.py +215 -57
- lfx/components/files_and_knowledge/file.py +439 -39
- lfx/components/files_and_knowledge/ingestion.py +8 -0
- lfx/components/files_and_knowledge/retrieval.py +10 -0
- lfx/components/files_and_knowledge/save_file.py +123 -53
- lfx/components/ibm/watsonx.py +7 -1
- lfx/components/input_output/chat_output.py +7 -1
- lfx/components/langchain_utilities/tool_calling.py +14 -6
- lfx/components/llm_operations/batch_run.py +80 -25
- lfx/components/llm_operations/lambda_filter.py +33 -6
- lfx/components/llm_operations/llm_conditional_router.py +39 -7
- lfx/components/llm_operations/structured_output.py +38 -12
- lfx/components/models/__init__.py +16 -74
- lfx/components/models_and_agents/agent.py +51 -201
- lfx/components/models_and_agents/embedding_model.py +185 -339
- lfx/components/models_and_agents/language_model.py +54 -318
- lfx/components/models_and_agents/mcp_component.py +58 -9
- lfx/components/ollama/ollama.py +9 -4
- lfx/components/ollama/ollama_embeddings.py +2 -1
- lfx/components/openai/openai_chat_model.py +1 -1
- lfx/components/processing/__init__.py +0 -3
- lfx/components/vllm/__init__.py +37 -0
- lfx/components/vllm/vllm.py +141 -0
- lfx/components/vllm/vllm_embeddings.py +110 -0
- lfx/custom/custom_component/custom_component.py +8 -6
- lfx/custom/directory_reader/directory_reader.py +5 -2
- lfx/graph/utils.py +64 -18
- lfx/inputs/__init__.py +2 -0
- lfx/inputs/input_mixin.py +54 -0
- lfx/inputs/inputs.py +115 -0
- lfx/interface/initialize/loading.py +42 -12
- lfx/io/__init__.py +2 -0
- lfx/run/__init__.py +5 -0
- lfx/run/base.py +494 -0
- lfx/schema/data.py +1 -1
- lfx/schema/image.py +28 -19
- lfx/schema/message.py +19 -3
- lfx/services/interfaces.py +5 -0
- lfx/services/manager.py +5 -4
- lfx/services/mcp_composer/service.py +45 -13
- lfx/services/settings/auth.py +18 -11
- lfx/services/settings/base.py +12 -24
- lfx/services/settings/constants.py +2 -0
- lfx/services/storage/local.py +37 -0
- lfx/services/storage/service.py +19 -0
- lfx/utils/constants.py +1 -0
- lfx/utils/image.py +29 -11
- lfx/utils/validate_cloud.py +14 -3
- {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/METADATA +5 -2
- {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/RECORD +84 -78
- lfx/components/processing/dataframe_to_toolset.py +0 -259
- {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/WHEEL +0 -0
- {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/entry_points.txt +0 -0
|
@@ -1,17 +1,13 @@
|
|
|
1
1
|
from typing import Any
|
|
2
2
|
|
|
3
|
-
import requests
|
|
4
|
-
from ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames
|
|
5
|
-
from langchain_openai import OpenAIEmbeddings
|
|
6
|
-
|
|
7
|
-
from lfx.base.embeddings.embeddings_class import EmbeddingsWithModels
|
|
8
3
|
from lfx.base.embeddings.model import LCEmbeddingsModel
|
|
9
|
-
from lfx.base.models.
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
4
|
+
from lfx.base.models.unified_models import (
|
|
5
|
+
get_api_key_for_provider,
|
|
6
|
+
get_embedding_classes,
|
|
7
|
+
get_embedding_model_options,
|
|
8
|
+
update_model_options_in_build_config,
|
|
14
9
|
)
|
|
10
|
+
from lfx.base.models.watsonx_constants import IBM_WATSONX_URLS
|
|
15
11
|
from lfx.field_typing import Embeddings
|
|
16
12
|
from lfx.io import (
|
|
17
13
|
BoolInput,
|
|
@@ -20,19 +16,9 @@ from lfx.io import (
|
|
|
20
16
|
FloatInput,
|
|
21
17
|
IntInput,
|
|
22
18
|
MessageTextInput,
|
|
19
|
+
ModelInput,
|
|
23
20
|
SecretStrInput,
|
|
24
21
|
)
|
|
25
|
-
from lfx.log.logger import logger
|
|
26
|
-
from lfx.schema.dotdict import dotdict
|
|
27
|
-
from lfx.utils.util import transform_localhost_url
|
|
28
|
-
|
|
29
|
-
# Ollama API constants
|
|
30
|
-
HTTP_STATUS_OK = 200
|
|
31
|
-
JSON_MODELS_KEY = "models"
|
|
32
|
-
JSON_NAME_KEY = "name"
|
|
33
|
-
JSON_CAPABILITIES_KEY = "capabilities"
|
|
34
|
-
DESIRED_CAPABILITY = "embedding"
|
|
35
|
-
DEFAULT_OLLAMA_URL = "http://localhost:11434"
|
|
36
22
|
|
|
37
23
|
|
|
38
24
|
class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
@@ -43,15 +29,51 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
43
29
|
name = "EmbeddingModel"
|
|
44
30
|
category = "models"
|
|
45
31
|
|
|
32
|
+
def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):
|
|
33
|
+
"""Dynamically update build config with user-filtered model options."""
|
|
34
|
+
# Update model options
|
|
35
|
+
build_config = update_model_options_in_build_config(
|
|
36
|
+
component=self,
|
|
37
|
+
build_config=build_config,
|
|
38
|
+
cache_key_prefix="embedding_model_options",
|
|
39
|
+
get_options_func=get_embedding_model_options,
|
|
40
|
+
field_name=field_name,
|
|
41
|
+
field_value=field_value,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# Show/hide provider-specific fields based on selected model
|
|
45
|
+
if field_name == "model" and isinstance(field_value, list) and len(field_value) > 0:
|
|
46
|
+
selected_model = field_value[0]
|
|
47
|
+
provider = selected_model.get("provider", "")
|
|
48
|
+
|
|
49
|
+
# Show/hide watsonx fields
|
|
50
|
+
is_watsonx = provider == "IBM WatsonX"
|
|
51
|
+
build_config["base_url_ibm_watsonx"]["show"] = is_watsonx
|
|
52
|
+
build_config["project_id"]["show"] = is_watsonx
|
|
53
|
+
build_config["truncate_input_tokens"]["show"] = is_watsonx
|
|
54
|
+
build_config["input_text"]["show"] = is_watsonx
|
|
55
|
+
if is_watsonx:
|
|
56
|
+
build_config["base_url_ibm_watsonx"]["required"] = True
|
|
57
|
+
build_config["project_id"]["required"] = True
|
|
58
|
+
|
|
59
|
+
return build_config
|
|
60
|
+
|
|
46
61
|
inputs = [
|
|
47
|
-
|
|
48
|
-
name="
|
|
49
|
-
display_name="Model
|
|
50
|
-
|
|
51
|
-
value="OpenAI",
|
|
52
|
-
info="Select the embedding model provider",
|
|
62
|
+
ModelInput(
|
|
63
|
+
name="model",
|
|
64
|
+
display_name="Embedding Model",
|
|
65
|
+
info="Select your model provider",
|
|
53
66
|
real_time_refresh=True,
|
|
54
|
-
|
|
67
|
+
required=True,
|
|
68
|
+
model_type="embedding",
|
|
69
|
+
input_types=["Embeddings"], # Override default to accept Embeddings instead of LanguageModel
|
|
70
|
+
),
|
|
71
|
+
SecretStrInput(
|
|
72
|
+
name="api_key",
|
|
73
|
+
display_name="API Key",
|
|
74
|
+
info="Model Provider API key",
|
|
75
|
+
real_time_refresh=True,
|
|
76
|
+
advanced=True,
|
|
55
77
|
),
|
|
56
78
|
MessageTextInput(
|
|
57
79
|
name="api_base",
|
|
@@ -59,15 +81,7 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
59
81
|
info="Base URL for the API. Leave empty for default.",
|
|
60
82
|
advanced=True,
|
|
61
83
|
),
|
|
62
|
-
|
|
63
|
-
name="ollama_base_url",
|
|
64
|
-
display_name="Ollama API URL",
|
|
65
|
-
info=f"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}",
|
|
66
|
-
value=DEFAULT_OLLAMA_URL,
|
|
67
|
-
show=False,
|
|
68
|
-
real_time_refresh=True,
|
|
69
|
-
load_from_db=True,
|
|
70
|
-
),
|
|
84
|
+
# Watson-specific inputs
|
|
71
85
|
DropdownInput(
|
|
72
86
|
name="base_url_ibm_watsonx",
|
|
73
87
|
display_name="watsonx API Endpoint",
|
|
@@ -77,24 +91,6 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
77
91
|
show=False,
|
|
78
92
|
real_time_refresh=True,
|
|
79
93
|
),
|
|
80
|
-
DropdownInput(
|
|
81
|
-
name="model",
|
|
82
|
-
display_name="Model Name",
|
|
83
|
-
options=OPENAI_EMBEDDING_MODEL_NAMES,
|
|
84
|
-
value=OPENAI_EMBEDDING_MODEL_NAMES[0],
|
|
85
|
-
info="Select the embedding model to use",
|
|
86
|
-
real_time_refresh=True,
|
|
87
|
-
refresh_button=True,
|
|
88
|
-
),
|
|
89
|
-
SecretStrInput(
|
|
90
|
-
name="api_key",
|
|
91
|
-
display_name="OpenAI API Key",
|
|
92
|
-
info="Model Provider API key",
|
|
93
|
-
required=True,
|
|
94
|
-
show=True,
|
|
95
|
-
real_time_refresh=True,
|
|
96
|
-
),
|
|
97
|
-
# Watson-specific inputs
|
|
98
94
|
MessageTextInput(
|
|
99
95
|
name="project_id",
|
|
100
96
|
display_name="Project ID",
|
|
@@ -108,10 +104,28 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
108
104
|
"Only supported by certain models.",
|
|
109
105
|
advanced=True,
|
|
110
106
|
),
|
|
111
|
-
IntInput(
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
107
|
+
IntInput(
|
|
108
|
+
name="chunk_size",
|
|
109
|
+
display_name="Chunk Size",
|
|
110
|
+
advanced=True,
|
|
111
|
+
value=1000,
|
|
112
|
+
),
|
|
113
|
+
FloatInput(
|
|
114
|
+
name="request_timeout",
|
|
115
|
+
display_name="Request Timeout",
|
|
116
|
+
advanced=True,
|
|
117
|
+
),
|
|
118
|
+
IntInput(
|
|
119
|
+
name="max_retries",
|
|
120
|
+
display_name="Max Retries",
|
|
121
|
+
advanced=True,
|
|
122
|
+
value=3,
|
|
123
|
+
),
|
|
124
|
+
BoolInput(
|
|
125
|
+
name="show_progress_bar",
|
|
126
|
+
display_name="Show Progress Bar",
|
|
127
|
+
advanced=True,
|
|
128
|
+
),
|
|
115
129
|
DictInput(
|
|
116
130
|
name="model_kwargs",
|
|
117
131
|
display_name="Model Kwargs",
|
|
@@ -134,290 +148,122 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
134
148
|
),
|
|
135
149
|
]
|
|
136
150
|
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
151
|
+
def build_embeddings(self) -> Embeddings:
|
|
152
|
+
"""Build and return an embeddings instance based on the selected model."""
|
|
153
|
+
# If an Embeddings object is directly connected, return it
|
|
140
154
|
try:
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
model_kwargs = self.model_kwargs or {}
|
|
168
|
-
|
|
169
|
-
if provider == "OpenAI":
|
|
170
|
-
if not api_key:
|
|
171
|
-
msg = "OpenAI API key is required when using OpenAI provider"
|
|
172
|
-
raise ValueError(msg)
|
|
173
|
-
|
|
174
|
-
# Create the primary embedding instance
|
|
175
|
-
embeddings_instance = OpenAIEmbeddings(
|
|
176
|
-
model=model,
|
|
177
|
-
dimensions=dimensions or None,
|
|
178
|
-
base_url=api_base or None,
|
|
179
|
-
api_key=api_key,
|
|
180
|
-
chunk_size=chunk_size,
|
|
181
|
-
max_retries=max_retries,
|
|
182
|
-
timeout=request_timeout or None,
|
|
183
|
-
show_progress_bar=show_progress_bar,
|
|
184
|
-
model_kwargs=model_kwargs,
|
|
185
|
-
)
|
|
186
|
-
|
|
187
|
-
# Create dedicated instances for each available model
|
|
188
|
-
available_models_dict = {}
|
|
189
|
-
for model_name in OPENAI_EMBEDDING_MODEL_NAMES:
|
|
190
|
-
available_models_dict[model_name] = OpenAIEmbeddings(
|
|
191
|
-
model=model_name,
|
|
192
|
-
dimensions=dimensions or None, # Use same dimensions config for all
|
|
193
|
-
base_url=api_base or None,
|
|
194
|
-
api_key=api_key,
|
|
195
|
-
chunk_size=chunk_size,
|
|
196
|
-
max_retries=max_retries,
|
|
197
|
-
timeout=request_timeout or None,
|
|
198
|
-
show_progress_bar=show_progress_bar,
|
|
199
|
-
model_kwargs=model_kwargs,
|
|
200
|
-
)
|
|
201
|
-
|
|
202
|
-
return EmbeddingsWithModels(
|
|
203
|
-
embeddings=embeddings_instance,
|
|
204
|
-
available_models=available_models_dict,
|
|
205
|
-
)
|
|
206
|
-
|
|
207
|
-
if provider == "Ollama":
|
|
208
|
-
try:
|
|
209
|
-
from langchain_ollama import OllamaEmbeddings
|
|
210
|
-
except ImportError:
|
|
211
|
-
try:
|
|
212
|
-
from langchain_community.embeddings import OllamaEmbeddings
|
|
213
|
-
except ImportError:
|
|
214
|
-
msg = "Please install langchain-ollama: pip install langchain-ollama"
|
|
215
|
-
raise ImportError(msg) from None
|
|
216
|
-
|
|
217
|
-
transformed_base_url = transform_localhost_url(ollama_base_url)
|
|
218
|
-
|
|
219
|
-
# Check if URL contains /v1 suffix (OpenAI-compatible mode)
|
|
220
|
-
if transformed_base_url and transformed_base_url.rstrip("/").endswith("/v1"):
|
|
221
|
-
# Strip /v1 suffix and log warning
|
|
222
|
-
transformed_base_url = transformed_base_url.rstrip("/").removesuffix("/v1")
|
|
223
|
-
logger.warning(
|
|
224
|
-
"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, "
|
|
225
|
-
"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. "
|
|
226
|
-
"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. "
|
|
227
|
-
"Learn more at https://docs.ollama.com/openai#openai-compatibility"
|
|
228
|
-
)
|
|
229
|
-
|
|
230
|
-
final_base_url = transformed_base_url or "http://localhost:11434"
|
|
231
|
-
|
|
232
|
-
# Create the primary embedding instance
|
|
233
|
-
embeddings_instance = OllamaEmbeddings(
|
|
234
|
-
model=model,
|
|
235
|
-
base_url=final_base_url,
|
|
236
|
-
**model_kwargs,
|
|
237
|
-
)
|
|
238
|
-
|
|
239
|
-
# Fetch available Ollama models
|
|
240
|
-
available_model_names = await get_ollama_models(
|
|
241
|
-
base_url_value=self.ollama_base_url,
|
|
242
|
-
desired_capability=DESIRED_CAPABILITY,
|
|
243
|
-
json_models_key=JSON_MODELS_KEY,
|
|
244
|
-
json_name_key=JSON_NAME_KEY,
|
|
245
|
-
json_capabilities_key=JSON_CAPABILITIES_KEY,
|
|
246
|
-
)
|
|
247
|
-
|
|
248
|
-
# Create dedicated instances for each available model
|
|
249
|
-
available_models_dict = {}
|
|
250
|
-
for model_name in available_model_names:
|
|
251
|
-
available_models_dict[model_name] = OllamaEmbeddings(
|
|
252
|
-
model=model_name,
|
|
253
|
-
base_url=final_base_url,
|
|
254
|
-
**model_kwargs,
|
|
255
|
-
)
|
|
256
|
-
|
|
257
|
-
return EmbeddingsWithModels(
|
|
258
|
-
embeddings=embeddings_instance,
|
|
259
|
-
available_models=available_models_dict,
|
|
155
|
+
from langchain_core.embeddings import Embeddings as BaseEmbeddings
|
|
156
|
+
|
|
157
|
+
if isinstance(self.model, BaseEmbeddings):
|
|
158
|
+
return self.model
|
|
159
|
+
except ImportError:
|
|
160
|
+
pass
|
|
161
|
+
|
|
162
|
+
# Safely extract model configuration
|
|
163
|
+
if not self.model or not isinstance(self.model, list):
|
|
164
|
+
msg = "Model must be a non-empty list"
|
|
165
|
+
raise ValueError(msg)
|
|
166
|
+
|
|
167
|
+
model = self.model[0]
|
|
168
|
+
model_name = model.get("name")
|
|
169
|
+
provider = model.get("provider")
|
|
170
|
+
metadata = model.get("metadata", {})
|
|
171
|
+
|
|
172
|
+
# Get API key from user input or global variables
|
|
173
|
+
api_key = get_api_key_for_provider(self.user_id, provider, self.api_key)
|
|
174
|
+
|
|
175
|
+
# Validate required fields (Ollama doesn't require API key)
|
|
176
|
+
if not api_key and provider != "Ollama":
|
|
177
|
+
msg = (
|
|
178
|
+
f"{provider} API key is required. "
|
|
179
|
+
f"Please provide it in the component or configure it globally as "
|
|
180
|
+
f"{provider.upper().replace(' ', '_')}_API_KEY."
|
|
260
181
|
)
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
182
|
+
raise ValueError(msg)
|
|
183
|
+
|
|
184
|
+
if not model_name:
|
|
185
|
+
msg = "Model name is required"
|
|
186
|
+
raise ValueError(msg)
|
|
187
|
+
|
|
188
|
+
# Get embedding class
|
|
189
|
+
embedding_class_name = metadata.get("embedding_class")
|
|
190
|
+
if not embedding_class_name:
|
|
191
|
+
msg = f"No embedding class defined in metadata for {model_name}"
|
|
192
|
+
raise ValueError(msg)
|
|
193
|
+
|
|
194
|
+
embedding_class = get_embedding_classes().get(embedding_class_name)
|
|
195
|
+
if not embedding_class:
|
|
196
|
+
msg = f"Unknown embedding class: {embedding_class_name}"
|
|
197
|
+
raise ValueError(msg)
|
|
198
|
+
|
|
199
|
+
# Build kwargs using parameter mapping
|
|
200
|
+
kwargs = self._build_kwargs(model, metadata)
|
|
201
|
+
|
|
202
|
+
return embedding_class(**kwargs)
|
|
203
|
+
|
|
204
|
+
def _build_kwargs(self, model: dict[str, Any], metadata: dict[str, Any]) -> dict[str, Any]:
|
|
205
|
+
"""Build kwargs dictionary using parameter mapping."""
|
|
206
|
+
param_mapping = metadata.get("param_mapping", {})
|
|
207
|
+
if not param_mapping:
|
|
208
|
+
msg = "Parameter mapping not found in metadata"
|
|
209
|
+
raise ValueError(msg)
|
|
210
|
+
|
|
211
|
+
kwargs = {}
|
|
212
|
+
|
|
213
|
+
# Required parameters - handle both "model" and "model_id" (for watsonx)
|
|
214
|
+
if "model" in param_mapping:
|
|
215
|
+
kwargs[param_mapping["model"]] = model.get("name")
|
|
216
|
+
elif "model_id" in param_mapping:
|
|
217
|
+
kwargs[param_mapping["model_id"]] = model.get("name")
|
|
218
|
+
if "api_key" in param_mapping:
|
|
219
|
+
kwargs[param_mapping["api_key"]] = get_api_key_for_provider(
|
|
220
|
+
self.user_id,
|
|
221
|
+
model.get("provider"),
|
|
222
|
+
self.api_key,
|
|
301
223
|
)
|
|
302
224
|
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
225
|
+
# Optional parameters with their values
|
|
226
|
+
provider = model.get("provider")
|
|
227
|
+
optional_params = {
|
|
228
|
+
"api_base": self.api_base if self.api_base else None,
|
|
229
|
+
"dimensions": int(self.dimensions) if self.dimensions else None,
|
|
230
|
+
"chunk_size": int(self.chunk_size) if self.chunk_size else None,
|
|
231
|
+
"request_timeout": float(self.request_timeout) if self.request_timeout else None,
|
|
232
|
+
"max_retries": int(self.max_retries) if self.max_retries else None,
|
|
233
|
+
"show_progress_bar": self.show_progress_bar if hasattr(self, "show_progress_bar") else None,
|
|
234
|
+
"model_kwargs": self.model_kwargs if self.model_kwargs else None,
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
# Watson-specific parameters
|
|
238
|
+
if provider in {"IBM WatsonX", "IBM watsonx.ai"}:
|
|
239
|
+
# Map base_url_ibm_watsonx to "url" parameter for watsonx
|
|
240
|
+
if "url" in param_mapping:
|
|
241
|
+
url_value = (
|
|
242
|
+
self.base_url_ibm_watsonx
|
|
243
|
+
if hasattr(self, "base_url_ibm_watsonx") and self.base_url_ibm_watsonx
|
|
244
|
+
else "https://us-south.ml.cloud.ibm.com"
|
|
314
245
|
)
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
if
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
build_config["api_base"]["advanced"] = True
|
|
336
|
-
build_config["api_base"]["show"] = True
|
|
337
|
-
build_config["ollama_base_url"]["show"] = False
|
|
338
|
-
build_config["project_id"]["show"] = False
|
|
339
|
-
build_config["base_url_ibm_watsonx"]["show"] = False
|
|
340
|
-
build_config["truncate_input_tokens"]["show"] = False
|
|
341
|
-
build_config["input_text"]["show"] = False
|
|
342
|
-
elif field_value == "Ollama":
|
|
343
|
-
build_config["ollama_base_url"]["show"] = True
|
|
344
|
-
|
|
345
|
-
if await is_valid_ollama_url(url=self.ollama_base_url):
|
|
346
|
-
try:
|
|
347
|
-
models = await get_ollama_models(
|
|
348
|
-
base_url_value=self.ollama_base_url,
|
|
349
|
-
desired_capability=DESIRED_CAPABILITY,
|
|
350
|
-
json_models_key=JSON_MODELS_KEY,
|
|
351
|
-
json_name_key=JSON_NAME_KEY,
|
|
352
|
-
json_capabilities_key=JSON_CAPABILITIES_KEY,
|
|
353
|
-
)
|
|
354
|
-
build_config["model"]["options"] = models
|
|
355
|
-
build_config["model"]["value"] = models[0] if models else ""
|
|
356
|
-
except ValueError:
|
|
357
|
-
build_config["model"]["options"] = []
|
|
358
|
-
build_config["model"]["value"] = ""
|
|
246
|
+
kwargs[param_mapping["url"]] = url_value
|
|
247
|
+
# Map project_id for watsonx
|
|
248
|
+
if hasattr(self, "project_id") and self.project_id and "project_id" in param_mapping:
|
|
249
|
+
kwargs[param_mapping["project_id"]] = self.project_id
|
|
250
|
+
|
|
251
|
+
# Ollama-specific parameters
|
|
252
|
+
if provider == "Ollama" and "base_url" in param_mapping:
|
|
253
|
+
# Map api_base to "base_url" parameter for Ollama
|
|
254
|
+
base_url_value = self.api_base if hasattr(self, "api_base") and self.api_base else "http://localhost:11434"
|
|
255
|
+
kwargs[param_mapping["base_url"]] = base_url_value
|
|
256
|
+
|
|
257
|
+
# Add optional parameters if they have values and are mapped
|
|
258
|
+
for param_name, param_value in optional_params.items():
|
|
259
|
+
if param_value is not None and param_name in param_mapping:
|
|
260
|
+
# Special handling for request_timeout with Google provider
|
|
261
|
+
if param_name == "request_timeout":
|
|
262
|
+
if provider == "Google" and isinstance(param_value, (int, float)):
|
|
263
|
+
kwargs[param_mapping[param_name]] = {"timeout": param_value}
|
|
264
|
+
else:
|
|
265
|
+
kwargs[param_mapping[param_name]] = param_value
|
|
359
266
|
else:
|
|
360
|
-
|
|
361
|
-
build_config["model"]["value"] = ""
|
|
362
|
-
build_config["truncate_input_tokens"]["show"] = False
|
|
363
|
-
build_config["input_text"]["show"] = False
|
|
364
|
-
build_config["api_key"]["display_name"] = "API Key (Optional)"
|
|
365
|
-
build_config["api_key"]["required"] = False
|
|
366
|
-
build_config["api_key"]["show"] = False
|
|
367
|
-
build_config["api_base"]["show"] = False
|
|
368
|
-
build_config["project_id"]["show"] = False
|
|
369
|
-
build_config["base_url_ibm_watsonx"]["show"] = False
|
|
370
|
-
|
|
371
|
-
elif field_value == "IBM watsonx.ai":
|
|
372
|
-
build_config["model"]["options"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)
|
|
373
|
-
build_config["model"]["value"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]
|
|
374
|
-
build_config["api_key"]["display_name"] = "IBM watsonx.ai API Key"
|
|
375
|
-
build_config["api_key"]["required"] = True
|
|
376
|
-
build_config["api_key"]["show"] = True
|
|
377
|
-
build_config["api_base"]["show"] = False
|
|
378
|
-
build_config["ollama_base_url"]["show"] = False
|
|
379
|
-
build_config["base_url_ibm_watsonx"]["show"] = True
|
|
380
|
-
build_config["project_id"]["show"] = True
|
|
381
|
-
build_config["truncate_input_tokens"]["show"] = True
|
|
382
|
-
build_config["input_text"]["show"] = True
|
|
383
|
-
elif field_name == "base_url_ibm_watsonx":
|
|
384
|
-
build_config["model"]["options"] = self.fetch_ibm_models(base_url=field_value)
|
|
385
|
-
build_config["model"]["value"] = self.fetch_ibm_models(base_url=field_value)[0]
|
|
386
|
-
elif field_name == "ollama_base_url":
|
|
387
|
-
# # Refresh Ollama models when base URL changes
|
|
388
|
-
# if hasattr(self, "provider") and self.provider == "Ollama":
|
|
389
|
-
# Use field_value if provided, otherwise fall back to instance attribute
|
|
390
|
-
ollama_url = self.ollama_base_url
|
|
391
|
-
if await is_valid_ollama_url(url=ollama_url):
|
|
392
|
-
try:
|
|
393
|
-
models = await get_ollama_models(
|
|
394
|
-
base_url_value=ollama_url,
|
|
395
|
-
desired_capability=DESIRED_CAPABILITY,
|
|
396
|
-
json_models_key=JSON_MODELS_KEY,
|
|
397
|
-
json_name_key=JSON_NAME_KEY,
|
|
398
|
-
json_capabilities_key=JSON_CAPABILITIES_KEY,
|
|
399
|
-
)
|
|
400
|
-
build_config["model"]["options"] = models
|
|
401
|
-
build_config["model"]["value"] = models[0] if models else ""
|
|
402
|
-
except ValueError:
|
|
403
|
-
await logger.awarning("Failed to fetch Ollama embedding models.")
|
|
404
|
-
build_config["model"]["options"] = []
|
|
405
|
-
build_config["model"]["value"] = ""
|
|
267
|
+
kwargs[param_mapping[param_name]] = param_value
|
|
406
268
|
|
|
407
|
-
|
|
408
|
-
ollama_url = self.ollama_base_url
|
|
409
|
-
if await is_valid_ollama_url(url=ollama_url):
|
|
410
|
-
try:
|
|
411
|
-
models = await get_ollama_models(
|
|
412
|
-
base_url_value=ollama_url,
|
|
413
|
-
desired_capability=DESIRED_CAPABILITY,
|
|
414
|
-
json_models_key=JSON_MODELS_KEY,
|
|
415
|
-
json_name_key=JSON_NAME_KEY,
|
|
416
|
-
json_capabilities_key=JSON_CAPABILITIES_KEY,
|
|
417
|
-
)
|
|
418
|
-
build_config["model"]["options"] = models
|
|
419
|
-
except ValueError:
|
|
420
|
-
await logger.awarning("Failed to refresh Ollama embedding models.")
|
|
421
|
-
build_config["model"]["options"] = []
|
|
422
|
-
|
|
423
|
-
return build_config
|
|
269
|
+
return kwargs
|