lfx-nightly 0.2.0.dev0__py3-none-any.whl → 0.2.0.dev41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. lfx/_assets/component_index.json +1 -1
  2. lfx/base/agents/agent.py +21 -4
  3. lfx/base/agents/altk_base_agent.py +393 -0
  4. lfx/base/agents/altk_tool_wrappers.py +565 -0
  5. lfx/base/agents/events.py +2 -1
  6. lfx/base/composio/composio_base.py +159 -224
  7. lfx/base/data/base_file.py +97 -20
  8. lfx/base/data/docling_utils.py +61 -10
  9. lfx/base/data/storage_utils.py +301 -0
  10. lfx/base/data/utils.py +178 -14
  11. lfx/base/mcp/util.py +2 -2
  12. lfx/base/models/anthropic_constants.py +21 -12
  13. lfx/base/models/groq_constants.py +74 -58
  14. lfx/base/models/groq_model_discovery.py +265 -0
  15. lfx/base/models/model.py +1 -1
  16. lfx/base/models/model_utils.py +100 -0
  17. lfx/base/models/openai_constants.py +7 -0
  18. lfx/base/models/watsonx_constants.py +32 -8
  19. lfx/base/tools/run_flow.py +601 -129
  20. lfx/cli/commands.py +9 -4
  21. lfx/cli/common.py +2 -2
  22. lfx/cli/run.py +1 -1
  23. lfx/cli/script_loader.py +53 -11
  24. lfx/components/Notion/create_page.py +1 -1
  25. lfx/components/Notion/list_database_properties.py +1 -1
  26. lfx/components/Notion/list_pages.py +1 -1
  27. lfx/components/Notion/list_users.py +1 -1
  28. lfx/components/Notion/page_content_viewer.py +1 -1
  29. lfx/components/Notion/search.py +1 -1
  30. lfx/components/Notion/update_page_property.py +1 -1
  31. lfx/components/__init__.py +19 -5
  32. lfx/components/{agents → altk}/__init__.py +5 -9
  33. lfx/components/altk/altk_agent.py +193 -0
  34. lfx/components/apify/apify_actor.py +1 -1
  35. lfx/components/composio/__init__.py +70 -18
  36. lfx/components/composio/apollo_composio.py +11 -0
  37. lfx/components/composio/bitbucket_composio.py +11 -0
  38. lfx/components/composio/canva_composio.py +11 -0
  39. lfx/components/composio/coda_composio.py +11 -0
  40. lfx/components/composio/composio_api.py +10 -0
  41. lfx/components/composio/discord_composio.py +1 -1
  42. lfx/components/composio/elevenlabs_composio.py +11 -0
  43. lfx/components/composio/exa_composio.py +11 -0
  44. lfx/components/composio/firecrawl_composio.py +11 -0
  45. lfx/components/composio/fireflies_composio.py +11 -0
  46. lfx/components/composio/gmail_composio.py +1 -1
  47. lfx/components/composio/googlebigquery_composio.py +11 -0
  48. lfx/components/composio/googlecalendar_composio.py +1 -1
  49. lfx/components/composio/googledocs_composio.py +1 -1
  50. lfx/components/composio/googlemeet_composio.py +1 -1
  51. lfx/components/composio/googlesheets_composio.py +1 -1
  52. lfx/components/composio/googletasks_composio.py +1 -1
  53. lfx/components/composio/heygen_composio.py +11 -0
  54. lfx/components/composio/mem0_composio.py +11 -0
  55. lfx/components/composio/peopledatalabs_composio.py +11 -0
  56. lfx/components/composio/perplexityai_composio.py +11 -0
  57. lfx/components/composio/serpapi_composio.py +11 -0
  58. lfx/components/composio/slack_composio.py +3 -574
  59. lfx/components/composio/slackbot_composio.py +1 -1
  60. lfx/components/composio/snowflake_composio.py +11 -0
  61. lfx/components/composio/tavily_composio.py +11 -0
  62. lfx/components/composio/youtube_composio.py +2 -2
  63. lfx/components/cuga/__init__.py +34 -0
  64. lfx/components/cuga/cuga_agent.py +730 -0
  65. lfx/components/data/__init__.py +78 -28
  66. lfx/components/data_source/__init__.py +58 -0
  67. lfx/components/{data → data_source}/api_request.py +26 -3
  68. lfx/components/{data → data_source}/csv_to_data.py +15 -10
  69. lfx/components/{data → data_source}/json_to_data.py +15 -8
  70. lfx/components/{data → data_source}/news_search.py +1 -1
  71. lfx/components/{data → data_source}/rss.py +1 -1
  72. lfx/components/{data → data_source}/sql_executor.py +1 -1
  73. lfx/components/{data → data_source}/url.py +1 -1
  74. lfx/components/{data → data_source}/web_search.py +1 -1
  75. lfx/components/datastax/astradb_cql.py +1 -1
  76. lfx/components/datastax/astradb_graph.py +1 -1
  77. lfx/components/datastax/astradb_tool.py +1 -1
  78. lfx/components/datastax/astradb_vectorstore.py +1 -1
  79. lfx/components/datastax/hcd.py +1 -1
  80. lfx/components/deactivated/json_document_builder.py +1 -1
  81. lfx/components/docling/__init__.py +0 -3
  82. lfx/components/docling/chunk_docling_document.py +3 -1
  83. lfx/components/docling/export_docling_document.py +3 -1
  84. lfx/components/elastic/elasticsearch.py +1 -1
  85. lfx/components/files_and_knowledge/__init__.py +47 -0
  86. lfx/components/{data → files_and_knowledge}/directory.py +1 -1
  87. lfx/components/{data → files_and_knowledge}/file.py +304 -24
  88. lfx/components/{knowledge_bases → files_and_knowledge}/retrieval.py +2 -2
  89. lfx/components/{data → files_and_knowledge}/save_file.py +218 -31
  90. lfx/components/flow_controls/__init__.py +58 -0
  91. lfx/components/{logic → flow_controls}/conditional_router.py +1 -1
  92. lfx/components/{logic → flow_controls}/loop.py +43 -9
  93. lfx/components/flow_controls/run_flow.py +108 -0
  94. lfx/components/glean/glean_search_api.py +1 -1
  95. lfx/components/groq/groq.py +35 -28
  96. lfx/components/helpers/__init__.py +102 -0
  97. lfx/components/ibm/watsonx.py +7 -1
  98. lfx/components/input_output/__init__.py +3 -1
  99. lfx/components/input_output/chat.py +4 -3
  100. lfx/components/input_output/chat_output.py +10 -4
  101. lfx/components/input_output/text.py +1 -1
  102. lfx/components/input_output/text_output.py +1 -1
  103. lfx/components/{data → input_output}/webhook.py +1 -1
  104. lfx/components/knowledge_bases/__init__.py +59 -4
  105. lfx/components/langchain_utilities/character.py +1 -1
  106. lfx/components/langchain_utilities/csv_agent.py +84 -16
  107. lfx/components/langchain_utilities/json_agent.py +67 -12
  108. lfx/components/langchain_utilities/language_recursive.py +1 -1
  109. lfx/components/llm_operations/__init__.py +46 -0
  110. lfx/components/{processing → llm_operations}/batch_run.py +17 -8
  111. lfx/components/{processing → llm_operations}/lambda_filter.py +1 -1
  112. lfx/components/{logic → llm_operations}/llm_conditional_router.py +1 -1
  113. lfx/components/{processing/llm_router.py → llm_operations/llm_selector.py} +3 -3
  114. lfx/components/{processing → llm_operations}/structured_output.py +1 -1
  115. lfx/components/logic/__init__.py +126 -0
  116. lfx/components/mem0/mem0_chat_memory.py +11 -0
  117. lfx/components/models/__init__.py +64 -9
  118. lfx/components/models_and_agents/__init__.py +49 -0
  119. lfx/components/{agents → models_and_agents}/agent.py +6 -4
  120. lfx/components/models_and_agents/embedding_model.py +353 -0
  121. lfx/components/models_and_agents/language_model.py +398 -0
  122. lfx/components/{agents → models_and_agents}/mcp_component.py +53 -44
  123. lfx/components/{helpers → models_and_agents}/memory.py +1 -1
  124. lfx/components/nvidia/system_assist.py +1 -1
  125. lfx/components/olivya/olivya.py +1 -1
  126. lfx/components/ollama/ollama.py +24 -5
  127. lfx/components/processing/__init__.py +9 -60
  128. lfx/components/processing/converter.py +1 -1
  129. lfx/components/processing/dataframe_operations.py +1 -1
  130. lfx/components/processing/parse_json_data.py +2 -2
  131. lfx/components/processing/parser.py +1 -1
  132. lfx/components/processing/split_text.py +1 -1
  133. lfx/components/qdrant/qdrant.py +1 -1
  134. lfx/components/redis/redis.py +1 -1
  135. lfx/components/twelvelabs/split_video.py +10 -0
  136. lfx/components/twelvelabs/video_file.py +12 -0
  137. lfx/components/utilities/__init__.py +43 -0
  138. lfx/components/{helpers → utilities}/calculator_core.py +1 -1
  139. lfx/components/{helpers → utilities}/current_date.py +1 -1
  140. lfx/components/{processing → utilities}/python_repl_core.py +1 -1
  141. lfx/components/vectorstores/local_db.py +9 -0
  142. lfx/components/youtube/youtube_transcripts.py +118 -30
  143. lfx/custom/custom_component/component.py +57 -1
  144. lfx/custom/custom_component/custom_component.py +68 -6
  145. lfx/custom/directory_reader/directory_reader.py +5 -2
  146. lfx/graph/edge/base.py +43 -20
  147. lfx/graph/state/model.py +15 -2
  148. lfx/graph/utils.py +6 -0
  149. lfx/graph/vertex/param_handler.py +10 -7
  150. lfx/helpers/__init__.py +12 -0
  151. lfx/helpers/flow.py +117 -0
  152. lfx/inputs/input_mixin.py +24 -1
  153. lfx/inputs/inputs.py +13 -1
  154. lfx/interface/components.py +161 -83
  155. lfx/log/logger.py +5 -3
  156. lfx/schema/image.py +2 -12
  157. lfx/services/database/__init__.py +5 -0
  158. lfx/services/database/service.py +25 -0
  159. lfx/services/deps.py +87 -22
  160. lfx/services/interfaces.py +5 -0
  161. lfx/services/manager.py +24 -10
  162. lfx/services/mcp_composer/service.py +1029 -162
  163. lfx/services/session.py +5 -0
  164. lfx/services/settings/auth.py +18 -11
  165. lfx/services/settings/base.py +56 -30
  166. lfx/services/settings/constants.py +8 -0
  167. lfx/services/storage/local.py +108 -46
  168. lfx/services/storage/service.py +171 -29
  169. lfx/template/field/base.py +3 -0
  170. lfx/utils/image.py +29 -11
  171. lfx/utils/ssrf_protection.py +384 -0
  172. lfx/utils/validate_cloud.py +26 -0
  173. {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/METADATA +38 -22
  174. {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/RECORD +189 -160
  175. {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/WHEEL +1 -1
  176. lfx/components/agents/altk_agent.py +0 -366
  177. lfx/components/agents/cuga_agent.py +0 -1013
  178. lfx/components/docling/docling_remote_vlm.py +0 -284
  179. lfx/components/logic/run_flow.py +0 -71
  180. lfx/components/models/embedding_model.py +0 -195
  181. lfx/components/models/language_model.py +0 -144
  182. lfx/components/processing/dataframe_to_toolset.py +0 -259
  183. /lfx/components/{data → data_source}/mock_data.py +0 -0
  184. /lfx/components/{knowledge_bases → files_and_knowledge}/ingestion.py +0 -0
  185. /lfx/components/{logic → flow_controls}/data_conditional_router.py +0 -0
  186. /lfx/components/{logic → flow_controls}/flow_tool.py +0 -0
  187. /lfx/components/{logic → flow_controls}/listen.py +0 -0
  188. /lfx/components/{logic → flow_controls}/notify.py +0 -0
  189. /lfx/components/{logic → flow_controls}/pass_message.py +0 -0
  190. /lfx/components/{logic → flow_controls}/sub_flow.py +0 -0
  191. /lfx/components/{processing → models_and_agents}/prompt.py +0 -0
  192. /lfx/components/{helpers → processing}/create_list.py +0 -0
  193. /lfx/components/{helpers → processing}/output_parser.py +0 -0
  194. /lfx/components/{helpers → processing}/store_message.py +0 -0
  195. /lfx/components/{helpers → utilities}/id_generator.py +0 -0
  196. {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,398 @@
1
+ from typing import Any
2
+
3
+ import requests
4
+ from langchain_anthropic import ChatAnthropic
5
+ from langchain_ibm import ChatWatsonx
6
+ from langchain_ollama import ChatOllama
7
+ from langchain_openai import ChatOpenAI
8
+ from pydantic.v1 import SecretStr
9
+
10
+ from lfx.base.models.anthropic_constants import ANTHROPIC_MODELS
11
+ from lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS
12
+ from lfx.base.models.google_generative_ai_model import ChatGoogleGenerativeAIFixed
13
+ from lfx.base.models.model import LCModelComponent
14
+ from lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url
15
+ from lfx.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES
16
+ from lfx.field_typing import LanguageModel
17
+ from lfx.field_typing.range_spec import RangeSpec
18
+ from lfx.inputs.inputs import BoolInput, MessageTextInput, StrInput
19
+ from lfx.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput
20
+ from lfx.log.logger import logger
21
+ from lfx.schema.dotdict import dotdict
22
+ from lfx.utils.util import transform_localhost_url
23
+
24
+ # IBM watsonx.ai constants
25
+ IBM_WATSONX_DEFAULT_MODELS = ["ibm/granite-3-2b-instruct", "ibm/granite-3-8b-instruct", "ibm/granite-13b-instruct-v2"]
26
+ IBM_WATSONX_URLS = [
27
+ "https://us-south.ml.cloud.ibm.com",
28
+ "https://eu-de.ml.cloud.ibm.com",
29
+ "https://eu-gb.ml.cloud.ibm.com",
30
+ "https://au-syd.ml.cloud.ibm.com",
31
+ "https://jp-tok.ml.cloud.ibm.com",
32
+ "https://ca-tor.ml.cloud.ibm.com",
33
+ ]
34
+
35
+ # Ollama API constants
36
+ HTTP_STATUS_OK = 200
37
+ JSON_MODELS_KEY = "models"
38
+ JSON_NAME_KEY = "name"
39
+ JSON_CAPABILITIES_KEY = "capabilities"
40
+ DESIRED_CAPABILITY = "completion"
41
+ DEFAULT_OLLAMA_URL = "http://localhost:11434"
42
+
43
+
44
+ class LanguageModelComponent(LCModelComponent):
45
+ display_name = "Language Model"
46
+ description = "Runs a language model given a specified provider."
47
+ documentation: str = "https://docs.langflow.org/components-models"
48
+ icon = "brain-circuit"
49
+ category = "models"
50
+ priority = 0 # Set priority to 0 to make it appear first
51
+
52
+ @staticmethod
53
+ def fetch_ibm_models(base_url: str) -> list[str]:
54
+ """Fetch available models from the watsonx.ai API."""
55
+ try:
56
+ endpoint = f"{base_url}/ml/v1/foundation_model_specs"
57
+ params = {"version": "2024-09-16", "filters": "function_text_chat,!lifecycle_withdrawn"}
58
+ response = requests.get(endpoint, params=params, timeout=10)
59
+ response.raise_for_status()
60
+ data = response.json()
61
+ models = [model["model_id"] for model in data.get("resources", [])]
62
+ return sorted(models)
63
+ except Exception: # noqa: BLE001
64
+ logger.exception("Error fetching IBM watsonx models. Using default models.")
65
+ return IBM_WATSONX_DEFAULT_MODELS
66
+
67
+ inputs = [
68
+ DropdownInput(
69
+ name="provider",
70
+ display_name="Model Provider",
71
+ options=["OpenAI", "Anthropic", "Google", "IBM watsonx.ai", "Ollama"],
72
+ value="OpenAI",
73
+ info="Select the model provider",
74
+ real_time_refresh=True,
75
+ options_metadata=[
76
+ {"icon": "OpenAI"},
77
+ {"icon": "Anthropic"},
78
+ {"icon": "GoogleGenerativeAI"},
79
+ {"icon": "WatsonxAI"},
80
+ {"icon": "Ollama"},
81
+ ],
82
+ ),
83
+ DropdownInput(
84
+ name="model_name",
85
+ display_name="Model Name",
86
+ options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,
87
+ value=OPENAI_CHAT_MODEL_NAMES[0],
88
+ info="Select the model to use",
89
+ real_time_refresh=True,
90
+ refresh_button=True,
91
+ ),
92
+ SecretStrInput(
93
+ name="api_key",
94
+ display_name="OpenAI API Key",
95
+ info="Model Provider API key",
96
+ required=False,
97
+ show=True,
98
+ real_time_refresh=True,
99
+ ),
100
+ DropdownInput(
101
+ name="base_url_ibm_watsonx",
102
+ display_name="watsonx API Endpoint",
103
+ info="The base URL of the API (IBM watsonx.ai only)",
104
+ options=IBM_WATSONX_URLS,
105
+ value=IBM_WATSONX_URLS[0],
106
+ show=False,
107
+ real_time_refresh=True,
108
+ ),
109
+ StrInput(
110
+ name="project_id",
111
+ display_name="watsonx Project ID",
112
+ info="The project ID associated with the foundation model (IBM watsonx.ai only)",
113
+ show=False,
114
+ required=False,
115
+ ),
116
+ MessageTextInput(
117
+ name="ollama_base_url",
118
+ display_name="Ollama API URL",
119
+ info=f"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}",
120
+ value=DEFAULT_OLLAMA_URL,
121
+ show=False,
122
+ real_time_refresh=True,
123
+ load_from_db=True,
124
+ ),
125
+ MessageInput(
126
+ name="input_value",
127
+ display_name="Input",
128
+ info="The input text to send to the model",
129
+ ),
130
+ MultilineInput(
131
+ name="system_message",
132
+ display_name="System Message",
133
+ info="A system message that helps set the behavior of the assistant",
134
+ advanced=False,
135
+ ),
136
+ BoolInput(
137
+ name="stream",
138
+ display_name="Stream",
139
+ info="Whether to stream the response",
140
+ value=False,
141
+ advanced=True,
142
+ ),
143
+ SliderInput(
144
+ name="temperature",
145
+ display_name="Temperature",
146
+ value=0.1,
147
+ info="Controls randomness in responses",
148
+ range_spec=RangeSpec(min=0, max=1, step=0.01),
149
+ advanced=True,
150
+ ),
151
+ ]
152
+
153
+ def build_model(self) -> LanguageModel:
154
+ provider = self.provider
155
+ model_name = self.model_name
156
+ temperature = self.temperature
157
+ stream = self.stream
158
+
159
+ if provider == "OpenAI":
160
+ if not self.api_key:
161
+ msg = "OpenAI API key is required when using OpenAI provider"
162
+ raise ValueError(msg)
163
+
164
+ if model_name in OPENAI_REASONING_MODEL_NAMES:
165
+ # reasoning models do not support temperature (yet)
166
+ temperature = None
167
+
168
+ return ChatOpenAI(
169
+ model_name=model_name,
170
+ temperature=temperature,
171
+ streaming=stream,
172
+ openai_api_key=self.api_key,
173
+ )
174
+ if provider == "Anthropic":
175
+ if not self.api_key:
176
+ msg = "Anthropic API key is required when using Anthropic provider"
177
+ raise ValueError(msg)
178
+ return ChatAnthropic(
179
+ model=model_name,
180
+ temperature=temperature,
181
+ streaming=stream,
182
+ anthropic_api_key=self.api_key,
183
+ )
184
+ if provider == "Google":
185
+ if not self.api_key:
186
+ msg = "Google API key is required when using Google provider"
187
+ raise ValueError(msg)
188
+ return ChatGoogleGenerativeAIFixed(
189
+ model=model_name,
190
+ temperature=temperature,
191
+ streaming=stream,
192
+ google_api_key=self.api_key,
193
+ )
194
+ if provider == "IBM watsonx.ai":
195
+ if not self.api_key:
196
+ msg = "IBM API key is required when using IBM watsonx.ai provider"
197
+ raise ValueError(msg)
198
+ if not self.base_url_ibm_watsonx:
199
+ msg = "IBM watsonx API Endpoint is required when using IBM watsonx.ai provider"
200
+ raise ValueError(msg)
201
+ if not self.project_id:
202
+ msg = "IBM watsonx Project ID is required when using IBM watsonx.ai provider"
203
+ raise ValueError(msg)
204
+ return ChatWatsonx(
205
+ apikey=SecretStr(self.api_key).get_secret_value(),
206
+ url=self.base_url_ibm_watsonx,
207
+ project_id=self.project_id,
208
+ model_id=model_name,
209
+ params={
210
+ "temperature": temperature,
211
+ },
212
+ streaming=stream,
213
+ )
214
+ if provider == "Ollama":
215
+ if not self.ollama_base_url:
216
+ msg = "Ollama API URL is required when using Ollama provider"
217
+ raise ValueError(msg)
218
+ if not model_name:
219
+ msg = "Model name is required when using Ollama provider"
220
+ raise ValueError(msg)
221
+
222
+ transformed_base_url = transform_localhost_url(self.ollama_base_url)
223
+
224
+ # Check if URL contains /v1 suffix (OpenAI-compatible mode)
225
+ if transformed_base_url and transformed_base_url.rstrip("/").endswith("/v1"):
226
+ # Strip /v1 suffix and log warning
227
+ transformed_base_url = transformed_base_url.rstrip("/").removesuffix("/v1")
228
+ logger.warning(
229
+ "Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, "
230
+ "not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. "
231
+ "If you want to use the OpenAI-compatible API, please use the OpenAI component instead. "
232
+ "Learn more at https://docs.ollama.com/openai#openai-compatibility"
233
+ )
234
+
235
+ return ChatOllama(
236
+ base_url=transformed_base_url,
237
+ model=model_name,
238
+ temperature=temperature,
239
+ )
240
+ msg = f"Unknown provider: {provider}"
241
+ raise ValueError(msg)
242
+
243
+ async def update_build_config(
244
+ self, build_config: dotdict, field_value: Any, field_name: str | None = None
245
+ ) -> dotdict:
246
+ if field_name == "provider":
247
+ if field_value == "OpenAI":
248
+ build_config["model_name"]["options"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES
249
+ build_config["model_name"]["value"] = OPENAI_CHAT_MODEL_NAMES[0]
250
+ build_config["api_key"]["display_name"] = "OpenAI API Key"
251
+ build_config["api_key"]["show"] = True
252
+ build_config["base_url_ibm_watsonx"]["show"] = False
253
+ build_config["project_id"]["show"] = False
254
+ build_config["ollama_base_url"]["show"] = False
255
+ elif field_value == "Anthropic":
256
+ build_config["model_name"]["options"] = ANTHROPIC_MODELS
257
+ build_config["model_name"]["value"] = ANTHROPIC_MODELS[0]
258
+ build_config["api_key"]["display_name"] = "Anthropic API Key"
259
+ build_config["api_key"]["show"] = True
260
+ build_config["base_url_ibm_watsonx"]["show"] = False
261
+ build_config["project_id"]["show"] = False
262
+ build_config["ollama_base_url"]["show"] = False
263
+ elif field_value == "Google":
264
+ build_config["model_name"]["options"] = GOOGLE_GENERATIVE_AI_MODELS
265
+ build_config["model_name"]["value"] = GOOGLE_GENERATIVE_AI_MODELS[0]
266
+ build_config["api_key"]["display_name"] = "Google API Key"
267
+ build_config["api_key"]["show"] = True
268
+ build_config["base_url_ibm_watsonx"]["show"] = False
269
+ build_config["project_id"]["show"] = False
270
+ build_config["ollama_base_url"]["show"] = False
271
+ elif field_value == "IBM watsonx.ai":
272
+ build_config["model_name"]["options"] = IBM_WATSONX_DEFAULT_MODELS
273
+ build_config["model_name"]["value"] = IBM_WATSONX_DEFAULT_MODELS[0]
274
+ build_config["api_key"]["display_name"] = "IBM API Key"
275
+ build_config["api_key"]["show"] = True
276
+ build_config["base_url_ibm_watsonx"]["show"] = True
277
+ build_config["project_id"]["show"] = True
278
+ build_config["ollama_base_url"]["show"] = False
279
+ elif field_value == "Ollama":
280
+ # Fetch Ollama models from the API
281
+ build_config["api_key"]["show"] = False
282
+ build_config["base_url_ibm_watsonx"]["show"] = False
283
+ build_config["project_id"]["show"] = False
284
+ build_config["ollama_base_url"]["show"] = True
285
+
286
+ # Try multiple sources to get the URL (in order of preference):
287
+ # 1. Instance attribute (already resolved from global/db)
288
+ # 2. Build config value (may be a global variable reference)
289
+ # 3. Default value
290
+ ollama_url = getattr(self, "ollama_base_url", None)
291
+ if not ollama_url:
292
+ config_value = build_config["ollama_base_url"].get("value", DEFAULT_OLLAMA_URL)
293
+ # If config_value looks like a variable name (all caps with underscores), use default
294
+ is_variable_ref = (
295
+ config_value
296
+ and isinstance(config_value, str)
297
+ and config_value.isupper()
298
+ and "_" in config_value
299
+ )
300
+ if is_variable_ref:
301
+ await logger.adebug(
302
+ f"Config value appears to be a variable reference: {config_value}, using default"
303
+ )
304
+ ollama_url = DEFAULT_OLLAMA_URL
305
+ else:
306
+ ollama_url = config_value
307
+
308
+ await logger.adebug(f"Fetching Ollama models for provider switch. URL: {ollama_url}")
309
+ if await is_valid_ollama_url(url=ollama_url):
310
+ try:
311
+ models = await get_ollama_models(
312
+ base_url_value=ollama_url,
313
+ desired_capability=DESIRED_CAPABILITY,
314
+ json_models_key=JSON_MODELS_KEY,
315
+ json_name_key=JSON_NAME_KEY,
316
+ json_capabilities_key=JSON_CAPABILITIES_KEY,
317
+ )
318
+ build_config["model_name"]["options"] = models
319
+ build_config["model_name"]["value"] = models[0] if models else ""
320
+ except ValueError:
321
+ await logger.awarning("Failed to fetch Ollama models. Setting empty options.")
322
+ build_config["model_name"]["options"] = []
323
+ build_config["model_name"]["value"] = ""
324
+ else:
325
+ await logger.awarning(f"Invalid Ollama URL: {ollama_url}")
326
+ build_config["model_name"]["options"] = []
327
+ build_config["model_name"]["value"] = ""
328
+ elif (
329
+ field_name == "base_url_ibm_watsonx"
330
+ and field_value
331
+ and hasattr(self, "provider")
332
+ and self.provider == "IBM watsonx.ai"
333
+ ):
334
+ # Fetch IBM models when base_url changes
335
+ try:
336
+ models = self.fetch_ibm_models(base_url=field_value)
337
+ build_config["model_name"]["options"] = models
338
+ build_config["model_name"]["value"] = models[0] if models else IBM_WATSONX_DEFAULT_MODELS[0]
339
+ info_message = f"Updated model options: {len(models)} models found in {field_value}"
340
+ logger.info(info_message)
341
+ except Exception: # noqa: BLE001
342
+ logger.exception("Error updating IBM model options.")
343
+ elif field_name == "ollama_base_url":
344
+ # Fetch Ollama models when ollama_base_url changes
345
+ # Use the field_value directly since this is triggered when the field changes
346
+ logger.debug(
347
+ f"Fetching Ollama models from updated URL: {build_config['ollama_base_url']} \
348
+ and value {self.ollama_base_url}",
349
+ )
350
+ await logger.adebug(f"Fetching Ollama models from updated URL: {self.ollama_base_url}")
351
+ if await is_valid_ollama_url(url=self.ollama_base_url):
352
+ try:
353
+ models = await get_ollama_models(
354
+ base_url_value=self.ollama_base_url,
355
+ desired_capability=DESIRED_CAPABILITY,
356
+ json_models_key=JSON_MODELS_KEY,
357
+ json_name_key=JSON_NAME_KEY,
358
+ json_capabilities_key=JSON_CAPABILITIES_KEY,
359
+ )
360
+ build_config["model_name"]["options"] = models
361
+ build_config["model_name"]["value"] = models[0] if models else ""
362
+ info_message = f"Updated model options: {len(models)} models found in {self.ollama_base_url}"
363
+ await logger.ainfo(info_message)
364
+ except ValueError:
365
+ await logger.awarning("Error updating Ollama model options.")
366
+ build_config["model_name"]["options"] = []
367
+ build_config["model_name"]["value"] = ""
368
+ else:
369
+ await logger.awarning(f"Invalid Ollama URL: {self.ollama_base_url}")
370
+ build_config["model_name"]["options"] = []
371
+ build_config["model_name"]["value"] = ""
372
+ elif field_name == "model_name":
373
+ # Refresh Ollama models when model_name field is accessed
374
+ if hasattr(self, "provider") and self.provider == "Ollama":
375
+ ollama_url = getattr(self, "ollama_base_url", DEFAULT_OLLAMA_URL)
376
+ if await is_valid_ollama_url(url=ollama_url):
377
+ try:
378
+ models = await get_ollama_models(
379
+ base_url_value=ollama_url,
380
+ desired_capability=DESIRED_CAPABILITY,
381
+ json_models_key=JSON_MODELS_KEY,
382
+ json_name_key=JSON_NAME_KEY,
383
+ json_capabilities_key=JSON_CAPABILITIES_KEY,
384
+ )
385
+ build_config["model_name"]["options"] = models
386
+ except ValueError:
387
+ await logger.awarning("Failed to refresh Ollama models.")
388
+ build_config["model_name"]["options"] = []
389
+ else:
390
+ build_config["model_name"]["options"] = []
391
+
392
+ # Hide system_message for o1 models - currently unsupported
393
+ if field_value and field_value.startswith("o1") and hasattr(self, "provider") and self.provider == "OpenAI":
394
+ if "system_message" in build_config:
395
+ build_config["system_message"]["show"] = False
396
+ elif "system_message" in build_config:
397
+ build_config["system_message"]["show"] = True
398
+ return build_config
@@ -3,7 +3,6 @@ from __future__ import annotations
3
3
  import asyncio
4
4
  import json
5
5
  import uuid
6
- from typing import Any
7
6
 
8
7
  from langchain_core.tools import StructuredTool # noqa: TC002
9
8
 
@@ -67,7 +66,7 @@ class MCPToolsComponent(ComponentWithCache):
67
66
 
68
67
  display_name = "MCP Tools"
69
68
  description = "Connect to an MCP server to use its tools."
70
- documentation: str = "https://docs.langflow.org/mcp-client"
69
+ documentation: str = "https://docs.langflow.org/mcp-tools"
71
70
  icon = "Mcp"
72
71
  name = "MCPTools"
73
72
 
@@ -325,9 +324,29 @@ class MCPToolsComponent(ComponentWithCache):
325
324
 
326
325
  current_server_name = field_value.get("name") if isinstance(field_value, dict) else field_value
327
326
  _last_selected_server = safe_cache_get(self._shared_component_cache, "last_selected_server", "")
327
+ server_changed = current_server_name != _last_selected_server
328
+
329
+ # Determine if "Tool Mode" is active by checking if the tool dropdown is hidden.
330
+ is_in_tool_mode = build_config["tools_metadata"]["show"]
331
+
332
+ # Get use_cache setting to determine if we should use cached data
333
+ use_cache = getattr(self, "use_cache", False)
334
+
335
+ # Fast path: if server didn't change and we already have options, keep them as-is
336
+ # BUT only if caching is enabled or we're in tool mode
337
+ existing_options = build_config.get("tool", {}).get("options") or []
338
+ if not server_changed and existing_options:
339
+ # In non-tool mode with cache disabled, skip the fast path to force refresh
340
+ if not is_in_tool_mode and not use_cache:
341
+ pass # Continue to refresh logic below
342
+ else:
343
+ if not is_in_tool_mode:
344
+ build_config["tool"]["show"] = True
345
+ return build_config
328
346
 
329
347
  # To avoid unnecessary updates, only proceed if the server has actually changed
330
- if (_last_selected_server in (current_server_name, "")) and build_config["tool"]["show"]:
348
+ # OR if caching is disabled (to force refresh in non-tool mode)
349
+ if (_last_selected_server in (current_server_name, "")) and build_config["tool"]["show"] and use_cache:
331
350
  if current_server_name:
332
351
  servers_cache = safe_cache_get(self._shared_component_cache, "servers", {})
333
352
  if isinstance(servers_cache, dict):
@@ -339,36 +358,35 @@ class MCPToolsComponent(ComponentWithCache):
339
358
  return build_config
340
359
  else:
341
360
  return build_config
342
-
343
- # Determine if "Tool Mode" is active by checking if the tool dropdown is hidden.
344
- is_in_tool_mode = build_config["tools_metadata"]["show"]
345
361
  safe_cache_set(self._shared_component_cache, "last_selected_server", current_server_name)
346
362
 
347
363
  # Check if tools are already cached for this server before clearing
348
364
  cached_tools = None
349
- if current_server_name:
350
- use_cache = getattr(self, "use_cache", True)
351
- if use_cache:
352
- servers_cache = safe_cache_get(self._shared_component_cache, "servers", {})
353
- if isinstance(servers_cache, dict):
354
- cached = servers_cache.get(current_server_name)
355
- if cached is not None:
356
- try:
357
- cached_tools = cached["tools"]
358
- self.tools = cached_tools
359
- self.tool_names = cached["tool_names"]
360
- self._tool_cache = cached["tool_cache"]
361
- except (TypeError, KeyError, AttributeError) as e:
362
- # Handle corrupted cache data by ignoring it
363
- msg = f"Unable to use cached data for MCP Server,{current_server_name}: {e}"
364
- await logger.awarning(msg)
365
- cached_tools = None
365
+ if current_server_name and use_cache:
366
+ servers_cache = safe_cache_get(self._shared_component_cache, "servers", {})
367
+ if isinstance(servers_cache, dict):
368
+ cached = servers_cache.get(current_server_name)
369
+ if cached is not None:
370
+ try:
371
+ cached_tools = cached["tools"]
372
+ self.tools = cached_tools
373
+ self.tool_names = cached["tool_names"]
374
+ self._tool_cache = cached["tool_cache"]
375
+ except (TypeError, KeyError, AttributeError) as e:
376
+ # Handle corrupted cache data by ignoring it
377
+ msg = f"Unable to use cached data for MCP Server,{current_server_name}: {e}"
378
+ await logger.awarning(msg)
379
+ cached_tools = None
366
380
 
367
381
  # Only clear tools if we don't have cached tools for the current server
368
382
  if not cached_tools:
369
383
  self.tools = [] # Clear previous tools only if no cache
370
384
 
371
- self.remove_non_default_keys(build_config) # Clear previous tool inputs
385
+ # Clear previous tool inputs if:
386
+ # 1. Server actually changed
387
+ # 2. Cache is disabled (meaning tool list will be refreshed)
388
+ if server_changed or not use_cache:
389
+ self.remove_non_default_keys(build_config)
372
390
 
373
391
  # Only show the tool dropdown if not in tool_mode
374
392
  if not is_in_tool_mode:
@@ -381,7 +399,12 @@ class MCPToolsComponent(ComponentWithCache):
381
399
  # Show loading state only when we need to fetch tools
382
400
  build_config["tool"]["placeholder"] = "Loading tools..."
383
401
  build_config["tool"]["options"] = []
384
- build_config["tool"]["value"] = uuid.uuid4()
402
+ # Force a value refresh when:
403
+ # 1. Server changed
404
+ # 2. We don't have cached tools
405
+ # 3. Cache is disabled (to force refresh on config changes)
406
+ if server_changed or not cached_tools or not use_cache:
407
+ build_config["tool"]["value"] = uuid.uuid4()
385
408
  else:
386
409
  # Keep the tool dropdown hidden if in tool_mode
387
410
  self._not_load_actions = True
@@ -426,18 +449,6 @@ class MCPToolsComponent(ComponentWithCache):
426
449
  continue
427
450
  return inputs
428
451
 
429
- def remove_input_schema_from_build_config(
430
- self, build_config: dict, tool_name: str, input_schema: dict[list[InputTypes], Any]
431
- ):
432
- """Remove the input schema for the tool from the build config."""
433
- # Keep only schemas that don't belong to the current tool
434
- input_schema = {k: v for k, v in input_schema.items() if k != tool_name}
435
- # Remove all inputs from other tools
436
- for value in input_schema.values():
437
- for _input in value:
438
- if _input.name in build_config:
439
- build_config.pop(_input.name)
440
-
441
452
  def remove_non_default_keys(self, build_config: dict) -> None:
442
453
  """Remove non-default keys from the build config."""
443
454
  for key in list(build_config.keys()):
@@ -461,24 +472,23 @@ class MCPToolsComponent(ComponentWithCache):
461
472
  return
462
473
 
463
474
  try:
464
- # Store current values before removing inputs
475
+ # Store current values before removing inputs (only for the current tool)
465
476
  current_values = {}
466
477
  for key, value in build_config.items():
467
478
  if key not in self.default_keys and isinstance(value, dict) and "value" in value:
468
479
  current_values[key] = value["value"]
469
480
 
470
- # Get all tool inputs and remove old ones
471
- input_schema_for_all_tools = self.get_inputs_for_all_tools(self.tools)
472
- self.remove_input_schema_from_build_config(build_config, tool_name, input_schema_for_all_tools)
481
+ # Remove ALL non-default keys (all previous tool inputs)
482
+ self.remove_non_default_keys(build_config)
473
483
 
474
- # Get and validate new inputs
484
+ # Get and validate new inputs for the selected tool
475
485
  self.schema_inputs = await self._validate_schema_inputs(tool_obj)
476
486
  if not self.schema_inputs:
477
487
  msg = f"No input parameters to configure for tool '{tool_name}'"
478
488
  await logger.ainfo(msg)
479
489
  return
480
490
 
481
- # Add new inputs to build config
491
+ # Add new inputs to build config for the selected tool only
482
492
  for schema_input in self.schema_inputs:
483
493
  if not schema_input or not hasattr(schema_input, "name"):
484
494
  msg = "Invalid schema input detected, skipping"
@@ -556,7 +566,6 @@ class MCPToolsComponent(ComponentWithCache):
556
566
  text = item_dict.get("text")
557
567
  try:
558
568
  return json.loads(text)
559
- # convert it to dict
560
569
  except json.JSONDecodeError:
561
570
  return item_dict
562
571
  return item_dict
@@ -16,7 +16,7 @@ from lfx.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSA
16
16
  class MemoryComponent(Component):
17
17
  display_name = "Message History"
18
18
  description = "Stores or retrieves stored chat messages from Langflow tables or an external memory."
19
- documentation: str = "https://docs.langflow.org/components-helpers#message-history"
19
+ documentation: str = "https://docs.langflow.org/message-history"
20
20
  icon = "message-square-more"
21
21
  name = "Memory"
22
22
  default_keys = ["mode", "memory", "session_id", "context_id"]
@@ -15,7 +15,7 @@ class NvidiaSystemAssistComponent(ComponentWithCache):
15
15
  "The user may query GPU specifications, state, and ask the NV-API to perform "
16
16
  "several GPU-editing acations. The prompt must be human-readable language."
17
17
  )
18
- documentation = "https://docs.langflow.org/integrations-nvidia-g-assist"
18
+ documentation = "https://docs.langflow.org/bundles-nvidia"
19
19
  icon = "NVIDIA"
20
20
  rise_initialized = False
21
21
 
@@ -11,7 +11,7 @@ from lfx.schema.data import Data
11
11
  class OlivyaComponent(Component):
12
12
  display_name = "Place Call"
13
13
  description = "A component to create an outbound call request from Olivya's platform."
14
- documentation: str = "http://docs.langflow.org/components/olivya"
14
+ documentation: str = "https://docs.olivya.io"
15
15
  icon = "Olivya"
16
16
  name = "OlivyaComponent"
17
17