lfx-nightly 0.2.0.dev26__py3-none-any.whl → 0.2.1.dev7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. lfx/_assets/component_index.json +1 -1
  2. lfx/base/agents/agent.py +9 -4
  3. lfx/base/agents/altk_base_agent.py +16 -3
  4. lfx/base/agents/altk_tool_wrappers.py +1 -1
  5. lfx/base/agents/utils.py +4 -0
  6. lfx/base/composio/composio_base.py +78 -41
  7. lfx/base/data/base_file.py +14 -4
  8. lfx/base/data/cloud_storage_utils.py +156 -0
  9. lfx/base/data/docling_utils.py +191 -65
  10. lfx/base/data/storage_utils.py +109 -0
  11. lfx/base/datastax/astradb_base.py +75 -64
  12. lfx/base/mcp/util.py +2 -2
  13. lfx/base/models/__init__.py +11 -1
  14. lfx/base/models/anthropic_constants.py +21 -12
  15. lfx/base/models/google_generative_ai_constants.py +33 -9
  16. lfx/base/models/model_metadata.py +6 -0
  17. lfx/base/models/ollama_constants.py +196 -30
  18. lfx/base/models/openai_constants.py +37 -10
  19. lfx/base/models/unified_models.py +1123 -0
  20. lfx/base/models/watsonx_constants.py +36 -0
  21. lfx/base/tools/component_tool.py +2 -9
  22. lfx/cli/commands.py +6 -1
  23. lfx/cli/run.py +65 -409
  24. lfx/cli/script_loader.py +13 -3
  25. lfx/components/__init__.py +0 -3
  26. lfx/components/composio/github_composio.py +1 -1
  27. lfx/components/cuga/cuga_agent.py +39 -27
  28. lfx/components/data_source/api_request.py +4 -2
  29. lfx/components/docling/__init__.py +45 -11
  30. lfx/components/docling/chunk_docling_document.py +3 -1
  31. lfx/components/docling/docling_inline.py +39 -49
  32. lfx/components/docling/export_docling_document.py +3 -1
  33. lfx/components/elastic/opensearch_multimodal.py +215 -57
  34. lfx/components/files_and_knowledge/file.py +439 -39
  35. lfx/components/files_and_knowledge/ingestion.py +8 -0
  36. lfx/components/files_and_knowledge/retrieval.py +10 -0
  37. lfx/components/files_and_knowledge/save_file.py +123 -53
  38. lfx/components/ibm/watsonx.py +7 -1
  39. lfx/components/input_output/chat_output.py +7 -1
  40. lfx/components/langchain_utilities/tool_calling.py +14 -6
  41. lfx/components/llm_operations/batch_run.py +80 -25
  42. lfx/components/llm_operations/lambda_filter.py +33 -6
  43. lfx/components/llm_operations/llm_conditional_router.py +39 -7
  44. lfx/components/llm_operations/structured_output.py +38 -12
  45. lfx/components/models/__init__.py +16 -74
  46. lfx/components/models_and_agents/agent.py +51 -201
  47. lfx/components/models_and_agents/embedding_model.py +185 -339
  48. lfx/components/models_and_agents/language_model.py +54 -318
  49. lfx/components/models_and_agents/mcp_component.py +58 -9
  50. lfx/components/ollama/ollama.py +9 -4
  51. lfx/components/ollama/ollama_embeddings.py +2 -1
  52. lfx/components/openai/openai_chat_model.py +1 -1
  53. lfx/components/processing/__init__.py +0 -3
  54. lfx/components/vllm/__init__.py +37 -0
  55. lfx/components/vllm/vllm.py +141 -0
  56. lfx/components/vllm/vllm_embeddings.py +110 -0
  57. lfx/custom/custom_component/custom_component.py +8 -6
  58. lfx/custom/directory_reader/directory_reader.py +5 -2
  59. lfx/graph/utils.py +64 -18
  60. lfx/inputs/__init__.py +2 -0
  61. lfx/inputs/input_mixin.py +54 -0
  62. lfx/inputs/inputs.py +115 -0
  63. lfx/interface/initialize/loading.py +42 -12
  64. lfx/io/__init__.py +2 -0
  65. lfx/run/__init__.py +5 -0
  66. lfx/run/base.py +494 -0
  67. lfx/schema/data.py +1 -1
  68. lfx/schema/image.py +28 -19
  69. lfx/schema/message.py +19 -3
  70. lfx/services/interfaces.py +5 -0
  71. lfx/services/manager.py +5 -4
  72. lfx/services/mcp_composer/service.py +45 -13
  73. lfx/services/settings/auth.py +18 -11
  74. lfx/services/settings/base.py +12 -24
  75. lfx/services/settings/constants.py +2 -0
  76. lfx/services/storage/local.py +37 -0
  77. lfx/services/storage/service.py +19 -0
  78. lfx/utils/constants.py +1 -0
  79. lfx/utils/image.py +29 -11
  80. lfx/utils/validate_cloud.py +14 -3
  81. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/METADATA +5 -2
  82. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/RECORD +84 -78
  83. lfx/components/processing/dataframe_to_toolset.py +0 -259
  84. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/WHEEL +0 -0
  85. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,141 @@
1
+ from typing import Any
2
+
3
+ from langchain_openai import ChatOpenAI
4
+ from pydantic.v1 import SecretStr
5
+
6
+ from lfx.base.models.model import LCModelComponent
7
+ from lfx.field_typing import LanguageModel
8
+ from lfx.field_typing.range_spec import RangeSpec
9
+ from lfx.inputs.inputs import BoolInput, DictInput, IntInput, SecretStrInput, SliderInput, StrInput
10
+ from lfx.log.logger import logger
11
+
12
+
13
+ class VllmComponent(LCModelComponent):
14
+ display_name = "vLLM"
15
+ description = "Generates text using vLLM models via OpenAI-compatible API."
16
+ icon = "vLLM"
17
+ name = "vLLMModel"
18
+
19
+ inputs = [
20
+ *LCModelComponent.get_base_inputs(),
21
+ IntInput(
22
+ name="max_tokens",
23
+ display_name="Max Tokens",
24
+ advanced=True,
25
+ info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
26
+ range_spec=RangeSpec(min=0, max=128000),
27
+ ),
28
+ DictInput(
29
+ name="model_kwargs",
30
+ display_name="Model Kwargs",
31
+ advanced=True,
32
+ info="Additional keyword arguments to pass to the model.",
33
+ ),
34
+ BoolInput(
35
+ name="json_mode",
36
+ display_name="JSON Mode",
37
+ advanced=True,
38
+ info="If True, it will output JSON regardless of passing a schema.",
39
+ ),
40
+ StrInput(
41
+ name="model_name",
42
+ display_name="Model Name",
43
+ advanced=False,
44
+ info="The name of the vLLM model to use (e.g., 'ibm-granite/granite-3.3-8b-instruct').",
45
+ value="ibm-granite/granite-3.3-8b-instruct",
46
+ ),
47
+ StrInput(
48
+ name="api_base",
49
+ display_name="vLLM API Base",
50
+ advanced=False,
51
+ info="The base URL of the vLLM API server. Defaults to http://localhost:8000/v1 for local vLLM server.",
52
+ value="http://localhost:8000/v1",
53
+ ),
54
+ SecretStrInput(
55
+ name="api_key",
56
+ display_name="API Key",
57
+ info="The API Key to use for the vLLM model (optional for local servers).",
58
+ advanced=False,
59
+ value="",
60
+ required=False,
61
+ ),
62
+ SliderInput(
63
+ name="temperature",
64
+ display_name="Temperature",
65
+ value=0.1,
66
+ range_spec=RangeSpec(min=0, max=1, step=0.01),
67
+ show=True,
68
+ ),
69
+ IntInput(
70
+ name="seed",
71
+ display_name="Seed",
72
+ info="Controls the reproducibility of the job. Set to -1 to disable (some providers may not support).",
73
+ advanced=True,
74
+ value=-1,
75
+ required=False,
76
+ ),
77
+ IntInput(
78
+ name="max_retries",
79
+ display_name="Max Retries",
80
+ info="Max retries when generating. Set to -1 to disable (some providers may not support).",
81
+ advanced=True,
82
+ value=-1,
83
+ required=False,
84
+ ),
85
+ IntInput(
86
+ name="timeout",
87
+ display_name="Timeout",
88
+ info="Timeout for requests to vLLM completion API. Set to -1 to disable (some providers may not support).",
89
+ advanced=True,
90
+ value=-1,
91
+ required=False,
92
+ ),
93
+ ]
94
+
95
+ def build_model(self) -> LanguageModel: # type: ignore[type-var]
96
+ logger.debug(f"Executing request with vLLM model: {self.model_name}")
97
+ parameters = {
98
+ "api_key": SecretStr(self.api_key).get_secret_value() if self.api_key else None,
99
+ "model_name": self.model_name,
100
+ "max_tokens": self.max_tokens or None,
101
+ "model_kwargs": self.model_kwargs or {},
102
+ "base_url": self.api_base or "http://localhost:8000/v1",
103
+ "temperature": self.temperature if self.temperature is not None else 0.1,
104
+ }
105
+
106
+ # Only add optional parameters if explicitly set (not -1)
107
+ if self.seed is not None and self.seed != -1:
108
+ parameters["seed"] = self.seed
109
+ if self.timeout is not None and self.timeout != -1:
110
+ parameters["timeout"] = self.timeout
111
+ if self.max_retries is not None and self.max_retries != -1:
112
+ parameters["max_retries"] = self.max_retries
113
+
114
+ output = ChatOpenAI(**parameters)
115
+ if self.json_mode:
116
+ output = output.bind(response_format={"type": "json_object"})
117
+
118
+ return output
119
+
120
+ def _get_exception_message(self, e: Exception):
121
+ """Get a message from a vLLM exception.
122
+
123
+ Args:
124
+ e (Exception): The exception to get the message from.
125
+
126
+ Returns:
127
+ str: The message from the exception.
128
+ """
129
+ try:
130
+ from openai import BadRequestError
131
+ except ImportError:
132
+ return None
133
+ if isinstance(e, BadRequestError):
134
+ message = e.body.get("message")
135
+ if message:
136
+ return message
137
+ return None
138
+
139
+ def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict: # noqa: ARG002
140
+ # vLLM models support all parameters, so no special handling needed
141
+ return build_config
@@ -0,0 +1,110 @@
1
+ from langchain_openai import OpenAIEmbeddings
2
+
3
+ from lfx.base.embeddings.model import LCEmbeddingsModel
4
+ from lfx.field_typing import Embeddings
5
+ from lfx.io import BoolInput, DictInput, FloatInput, IntInput, MessageTextInput, SecretStrInput
6
+
7
+
8
+ class VllmEmbeddingsComponent(LCEmbeddingsModel):
9
+ display_name = "vLLM Embeddings"
10
+ description = "Generate embeddings using vLLM models via OpenAI-compatible API."
11
+ icon = "vLLM"
12
+ name = "vLLMEmbeddings"
13
+
14
+ inputs = [
15
+ MessageTextInput(
16
+ name="model_name",
17
+ display_name="Model Name",
18
+ advanced=False,
19
+ info="The name of the vLLM embeddings model to use (e.g., 'BAAI/bge-large-en-v1.5').",
20
+ value="BAAI/bge-large-en-v1.5",
21
+ ),
22
+ MessageTextInput(
23
+ name="api_base",
24
+ display_name="vLLM API Base",
25
+ advanced=False,
26
+ info="The base URL of the vLLM API server. Defaults to http://localhost:8000/v1 for local vLLM server.",
27
+ value="http://localhost:8000/v1",
28
+ ),
29
+ SecretStrInput(
30
+ name="api_key",
31
+ display_name="API Key",
32
+ info="The API Key to use for the vLLM model (optional for local servers).",
33
+ advanced=False,
34
+ value="",
35
+ required=False,
36
+ ),
37
+ IntInput(
38
+ name="dimensions",
39
+ display_name="Dimensions",
40
+ info="The number of dimensions the resulting output embeddings should have. "
41
+ "Only supported by certain models.",
42
+ advanced=True,
43
+ ),
44
+ IntInput(
45
+ name="chunk_size",
46
+ display_name="Chunk Size",
47
+ advanced=True,
48
+ value=1000,
49
+ info="The chunk size to use when processing documents.",
50
+ ),
51
+ IntInput(
52
+ name="max_retries",
53
+ display_name="Max Retries",
54
+ value=3,
55
+ advanced=True,
56
+ info="Maximum number of retries for failed requests.",
57
+ ),
58
+ FloatInput(
59
+ name="request_timeout",
60
+ display_name="Request Timeout",
61
+ advanced=True,
62
+ info="Timeout for requests to vLLM API in seconds.",
63
+ ),
64
+ BoolInput(
65
+ name="show_progress_bar",
66
+ display_name="Show Progress Bar",
67
+ advanced=True,
68
+ info="Whether to show a progress bar when processing multiple documents.",
69
+ ),
70
+ BoolInput(
71
+ name="skip_empty",
72
+ display_name="Skip Empty",
73
+ advanced=True,
74
+ info="Whether to skip empty documents.",
75
+ ),
76
+ DictInput(
77
+ name="model_kwargs",
78
+ display_name="Model Kwargs",
79
+ advanced=True,
80
+ info="Additional keyword arguments to pass to the model.",
81
+ ),
82
+ DictInput(
83
+ name="default_headers",
84
+ display_name="Default Headers",
85
+ advanced=True,
86
+ info="Default headers to use for the API request.",
87
+ ),
88
+ DictInput(
89
+ name="default_query",
90
+ display_name="Default Query",
91
+ advanced=True,
92
+ info="Default query parameters to use for the API request.",
93
+ ),
94
+ ]
95
+
96
+ def build_embeddings(self) -> Embeddings:
97
+ return OpenAIEmbeddings(
98
+ model=self.model_name,
99
+ base_url=self.api_base or "http://localhost:8000/v1",
100
+ api_key=self.api_key or None,
101
+ dimensions=self.dimensions or None,
102
+ chunk_size=self.chunk_size,
103
+ max_retries=self.max_retries,
104
+ timeout=self.request_timeout or None,
105
+ show_progress_bar=self.show_progress_bar,
106
+ skip_empty=self.skip_empty,
107
+ model_kwargs=self.model_kwargs,
108
+ default_headers=self.default_headers or None,
109
+ default_query=self.default_query or None,
110
+ )
@@ -456,24 +456,26 @@ class CustomComponent(BaseComponent):
456
456
  """Returns the variable for the current user with the specified name.
457
457
 
458
458
  Raises:
459
- ValueError: If the user id is not set.
459
+ ValueError: If the user id is not set and variable not found in context.
460
460
 
461
461
  Returns:
462
462
  The variable for the current user with the specified name.
463
463
  """
464
- if hasattr(self, "_user_id") and not self.user_id:
465
- msg = f"User id is not set for {self.__class__.__name__}"
466
- raise ValueError(msg)
467
-
468
464
  # Check graph context for request-level variable overrides first
465
+ # This allows run_flow to work without user_id when variables are passed
469
466
  if hasattr(self, "graph") and self.graph and hasattr(self.graph, "context"):
470
467
  context = self.graph.context
471
468
  if context and "request_variables" in context:
472
469
  request_variables = context["request_variables"]
473
470
  if name in request_variables:
474
- logger.debug(f"Found context override for variable '{name}': {request_variables[name]}")
471
+ logger.debug(f"Found context override for variable '{name}'")
475
472
  return request_variables[name]
476
473
 
474
+ # Only check user_id when we need to access the database
475
+ if hasattr(self, "_user_id") and not self.user_id:
476
+ msg = f"User id is not set for {self.__class__.__name__}"
477
+ raise ValueError(msg)
478
+
477
479
  variable_service = get_variable_service() # Get service instance
478
480
  # Retrieve and decrypt the variable by name for the current user
479
481
  if isinstance(self.user_id, str):
@@ -72,8 +72,11 @@ class DirectoryReader:
72
72
  if component["error"] if with_errors else not component["error"]:
73
73
  component_tuple = (*build_component(component), component)
74
74
  components.append(component_tuple)
75
- except Exception: # noqa: BLE001
76
- logger.debug(f"Error while loading component {component['name']} from {component['file']}")
75
+ except Exception as exc: # noqa: BLE001
76
+ logger.debug(
77
+ f"Skipping component {component['name']} from {component['file']} (load error)",
78
+ exc_info=exc,
79
+ )
77
80
  continue
78
81
  items.append({"name": menu["name"], "path": menu["path"], "components": components})
79
82
  filtered = [menu for menu in items if menu["components"]]
lfx/graph/utils.py CHANGED
@@ -143,35 +143,81 @@ async def log_vertex_build(
143
143
  flow_id: str | UUID,
144
144
  vertex_id: str,
145
145
  valid: bool,
146
- params: Any, # noqa: ARG001
147
- data: dict | Any, # noqa: ARG001
148
- artifacts: dict | None = None, # noqa: ARG001
146
+ params: Any,
147
+ data: dict | Any,
148
+ artifacts: dict | None = None,
149
149
  ) -> None:
150
150
  """Asynchronously logs a vertex build record if vertex build storage is enabled.
151
151
 
152
152
  This is a lightweight implementation that only logs if database service is available.
153
+ When running within langflow, it will use langflow's database service to persist the build.
154
+ When running standalone (lfx only), it will only log debug messages.
153
155
  """
154
156
  try:
155
- settings_service = get_settings_service()
156
- if not settings_service or not getattr(settings_service.settings, "vertex_builds_storage_enabled", False):
157
- return
157
+ # Try to use langflow's services if available (when running within langflow)
158
+ try:
159
+ from langflow.services.deps import get_db_service as langflow_get_db_service
160
+ from langflow.services.deps import get_settings_service as langflow_get_settings_service
158
161
 
159
- db_service = get_db_service()
160
- if db_service is None:
161
- logger.debug("Database service not available, skipping vertex build logging")
162
- return
162
+ settings_service = langflow_get_settings_service()
163
+ if not settings_service:
164
+ return
165
+ if not getattr(settings_service.settings, "vertex_builds_storage_enabled", False):
166
+ return
163
167
 
164
- try:
165
168
  if isinstance(flow_id, str):
166
169
  flow_id = UUID(flow_id)
167
- except ValueError:
168
- logger.debug(f"Invalid flow_id passed to log_vertex_build: {flow_id!r}")
169
- return
170
170
 
171
- # Log basic vertex build info - concrete implementation should be in langflow
172
- logger.debug(f"Vertex build logged: vertex={vertex_id}, flow={flow_id}, valid={valid}")
173
- except Exception: # noqa: BLE001
174
- logger.debug("Error logging vertex build")
171
+ from langflow.services.database.models.vertex_builds.crud import (
172
+ log_vertex_build as crud_log_vertex_build,
173
+ )
174
+ from langflow.services.database.models.vertex_builds.model import VertexBuildBase
175
+
176
+ # Convert data to dict if it's a pydantic model
177
+ data_dict = data
178
+ if hasattr(data, "model_dump"):
179
+ data_dict = data.model_dump()
180
+ elif hasattr(data, "dict"):
181
+ data_dict = data.dict()
182
+
183
+ # Convert artifacts to dict if it's a pydantic model
184
+ artifacts_dict = artifacts
185
+ if artifacts is not None:
186
+ if hasattr(artifacts, "model_dump"):
187
+ artifacts_dict = artifacts.model_dump()
188
+ elif hasattr(artifacts, "dict"):
189
+ artifacts_dict = artifacts.dict()
190
+
191
+ vertex_build = VertexBuildBase(
192
+ flow_id=flow_id,
193
+ id=vertex_id,
194
+ valid=valid,
195
+ params=str(params) if params else None,
196
+ data=data_dict,
197
+ artifacts=artifacts_dict,
198
+ )
199
+
200
+ db_service = langflow_get_db_service()
201
+ if db_service is None:
202
+ return
203
+
204
+ async with db_service._with_session() as session: # noqa: SLF001
205
+ await crud_log_vertex_build(session, vertex_build)
206
+
207
+ except ImportError:
208
+ # Fallback for standalone lfx usage (without langflow)
209
+ settings_service = get_settings_service()
210
+ if not settings_service or not getattr(settings_service.settings, "vertex_builds_storage_enabled", False):
211
+ return
212
+
213
+ if isinstance(flow_id, str):
214
+ flow_id = UUID(flow_id)
215
+
216
+ # Log basic vertex build info - concrete implementation is in langflow
217
+ logger.debug(f"Vertex build logged: vertex={vertex_id}, flow={flow_id}, valid={valid}")
218
+
219
+ except Exception as exc: # noqa: BLE001
220
+ logger.warning(f"Error logging vertex build: {exc}")
175
221
 
176
222
 
177
223
  def rewrite_file_path(file_path: str):
lfx/inputs/__init__.py CHANGED
@@ -17,6 +17,7 @@ from .inputs import (
17
17
  McpInput,
18
18
  MessageInput,
19
19
  MessageTextInput,
20
+ ModelInput,
20
21
  MultilineInput,
21
22
  MultilineSecretInput,
22
23
  MultiselectInput,
@@ -52,6 +53,7 @@ __all__ = [
52
53
  "McpInput",
53
54
  "MessageInput",
54
55
  "MessageTextInput",
56
+ "ModelInput",
55
57
  "MultilineInput",
56
58
  "MultilineSecretInput",
57
59
  "MultiselectInput",
lfx/inputs/input_mixin.py CHANGED
@@ -37,6 +37,7 @@ class FieldTypes(str, Enum):
37
37
  QUERY = "query"
38
38
  TOOLS = "tools"
39
39
  MCP = "mcp"
40
+ MODEL = "model"
40
41
 
41
42
 
42
43
  SerializableFieldTypes = Annotated[FieldTypes, PlainSerializer(lambda v: v.value, return_type=str)]
@@ -136,6 +137,59 @@ class BaseInputMixin(CrossModuleModel, validate_assignment=True): # type: ignor
136
137
  return dump
137
138
 
138
139
 
140
+ class ModelInputMixin(BaseModel):
141
+ model_config = ConfigDict(populate_by_name=True)
142
+ """Mixin for model input fields."""
143
+ model_name: str | None = None
144
+ """Name of the model to be used in the input."""
145
+ model_type: str | None = "language"
146
+ """Type of model: 'language' or 'embedding'. Defaults to 'language'."""
147
+ model_options: list[dict[str, Any]] | None = Field(
148
+ default=None,
149
+ validation_alias="options",
150
+ serialization_alias="options",
151
+ )
152
+ """List of model options with name, icon, category, provider, and metadata."""
153
+ temperature: float | None = None
154
+ """Temperature parameter for model generation."""
155
+ max_tokens: int | None = None
156
+ """Maximum tokens for model generation."""
157
+ limit: int | None = None
158
+ """Limit for the number of options to display."""
159
+ external_options: dict[str, Any] | None = None
160
+ """Dictionary of external options to display below the dropdown options (e.g., 'Connect other models')."""
161
+
162
+ @field_validator("model_options", mode="before")
163
+ @classmethod
164
+ def normalize_model_options(cls, v):
165
+ """Convert simple list of model names to list of dicts format.
166
+
167
+ Allows passing ['gpt-4o', 'gpt-4o-mini'] which gets converted to:
168
+ [{'name': 'gpt-4o', ...}, {'name': 'gpt-4o-mini', ...}]
169
+ """
170
+ if v is None or not isinstance(v, list):
171
+ return v
172
+
173
+ # If already in dict format, return as-is
174
+ if all(isinstance(item, dict) for item in v):
175
+ return v
176
+
177
+ # If it's a list of strings, convert to dict format
178
+ if all(isinstance(item, str) for item in v):
179
+ # Avoid circular import by importing the module directly (not through package __init__)
180
+ try:
181
+ from lfx.base.models.unified_models import normalize_model_names_to_dicts
182
+
183
+ return normalize_model_names_to_dicts(v)
184
+ except Exception: # noqa: BLE001
185
+ # Fallback if import or normalization fails
186
+ # This can happen during module initialization or in test environments
187
+ return [{"name": item} for item in v]
188
+
189
+ # Mixed list or unexpected format, return as-is
190
+ return v
191
+
192
+
139
193
  class ToolModeMixin(BaseModel):
140
194
  tool_mode: bool = False
141
195
 
lfx/inputs/inputs.py CHANGED
@@ -22,6 +22,7 @@ from .input_mixin import (
22
22
  LinkMixin,
23
23
  ListableInputMixin,
24
24
  MetadataTraceMixin,
25
+ ModelInputMixin,
25
26
  MultilineMixin,
26
27
  QueryMixin,
27
28
  RangeMixin,
@@ -123,6 +124,119 @@ class CodeInput(BaseInputMixin, ListableInputMixin, InputTraceMixin, ToolModeMix
123
124
  field_type: SerializableFieldTypes = FieldTypes.CODE
124
125
 
125
126
 
127
+ class ModelInput(BaseInputMixin, ModelInputMixin, ListableInputMixin, InputTraceMixin, ToolModeMixin):
128
+ """Represents a model input field with optional LanguageModel connection support.
129
+
130
+ By default:
131
+ - input_types=[] (no handle shown)
132
+ - external_options with "Connect other models" button
133
+ - refresh_button=True
134
+
135
+ When "Connect other models" is selected (value="connect_other_models"):
136
+ - input_types is set to ["LanguageModel"] to show the connection handle
137
+
138
+ Value format:
139
+ - Can be a list of dicts: [{'name': 'gpt-4o', 'provider': 'OpenAI', ...}]
140
+ - Can be a simple list of strings: ['gpt-4o', 'gpt-4o-mini'] (auto-converted)
141
+ - Can be a single string: 'gpt-4o' (auto-converted to list)
142
+ - Can be "connect_other_models" string to enable connection mode
143
+ """
144
+
145
+ field_type: SerializableFieldTypes = FieldTypes.MODEL
146
+ placeholder: str | None = "Setup Provider"
147
+ input_types: list[str] = Field(default_factory=list) # Empty by default, no handle shown
148
+ refresh_button: bool | None = True
149
+ external_options: dict = Field(
150
+ default_factory=lambda: {
151
+ "fields": {
152
+ "data": {
153
+ "node": {
154
+ "name": "connect_other_models",
155
+ "display_name": "Connect other models",
156
+ "icon": "CornerDownLeft",
157
+ }
158
+ }
159
+ },
160
+ }
161
+ )
162
+
163
+ @field_validator("value", mode="before")
164
+ @classmethod
165
+ def normalize_value(cls, v):
166
+ """Convert simple string or list of strings to list of dicts format.
167
+
168
+ Allows passing:
169
+ - 'gpt-4o' -> [{'name': 'gpt-4o', ...}]
170
+ - ['gpt-4o', 'claude-3'] -> [{'name': 'gpt-4o', ...}, {'name': 'claude-3', ...}]
171
+ - [{'name': 'gpt-4o'}] -> [{'name': 'gpt-4o'}] (unchanged)
172
+ - 'connect_other_models' -> 'connect_other_models' (special value, keep as string)
173
+ """
174
+ # Handle empty or None values
175
+ if v is None or v == "":
176
+ return v
177
+
178
+ # Special case: keep "connect_other_models" as a string to enable connection mode
179
+ if v == "connect_other_models":
180
+ return v
181
+
182
+ # If it's not a list or string, return as-is (could be a BaseLanguageModel)
183
+ if not isinstance(v, list | str):
184
+ return v
185
+
186
+ # If it's a list and already in dict format, return as-is
187
+ if isinstance(v, list) and all(isinstance(item, dict) for item in v):
188
+ return v
189
+
190
+ # If it's a string or list of strings, convert to dict format
191
+ if isinstance(v, str) or (isinstance(v, list) and all(isinstance(item, str) for item in v)):
192
+ # Avoid circular import by importing the module directly (not through package __init__)
193
+ try:
194
+ from lfx.base.models.unified_models import normalize_model_names_to_dicts
195
+
196
+ return normalize_model_names_to_dicts(v)
197
+ except Exception: # noqa: BLE001
198
+ # Fallback if import or normalization fails
199
+ # This can happen during module initialization or in test environments
200
+ if isinstance(v, str):
201
+ return [{"name": v}]
202
+ return [{"name": item} for item in v]
203
+
204
+ # Return as-is for all other cases
205
+ return v
206
+
207
+ @model_validator(mode="after")
208
+ def set_defaults(self):
209
+ """Handle connection mode and set defaults.
210
+
211
+ When value is "connect_other_models", set input_types to ["LanguageModel"]
212
+ to enable the connection handle. Otherwise, keep input_types empty.
213
+ """
214
+ # Check if we're in connection mode (user selected "Connect other models")
215
+ if self.value == "connect_other_models" and not self.input_types:
216
+ # Enable connection handle by setting input_types
217
+ # Use object.__setattr__ to avoid triggering validation recursion
218
+ object.__setattr__(self, "input_types", ["LanguageModel"])
219
+
220
+ # Set external_options if not explicitly provided
221
+ if self.external_options is None or len(self.external_options) == 0:
222
+ object.__setattr__(
223
+ self,
224
+ "external_options",
225
+ {
226
+ "fields": {
227
+ "data": {
228
+ "node": {
229
+ "name": "connect_other_models",
230
+ "display_name": "Connect other models",
231
+ "icon": "CornerDownLeft",
232
+ }
233
+ }
234
+ },
235
+ },
236
+ )
237
+ return self
238
+
239
+
126
240
  # Applying mixins to a specific input type
127
241
  class StrInput(
128
242
  BaseInputMixin,
@@ -687,6 +801,7 @@ InputTypes: TypeAlias = (
687
801
  | HandleInput
688
802
  | IntInput
689
803
  | McpInput
804
+ | ModelInput
690
805
  | MultilineInput
691
806
  | MultilineSecretInput
692
807
  | NestedDictInput