lfx-nightly 0.1.13.dev0__py3-none-any.whl → 0.2.0.dev26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (237) hide show
  1. lfx/_assets/component_index.json +1 -1
  2. lfx/base/agents/agent.py +121 -29
  3. lfx/base/agents/altk_base_agent.py +380 -0
  4. lfx/base/agents/altk_tool_wrappers.py +565 -0
  5. lfx/base/agents/events.py +103 -35
  6. lfx/base/agents/utils.py +15 -2
  7. lfx/base/composio/composio_base.py +183 -233
  8. lfx/base/data/base_file.py +88 -21
  9. lfx/base/data/storage_utils.py +192 -0
  10. lfx/base/data/utils.py +178 -14
  11. lfx/base/datastax/__init__.py +5 -0
  12. lfx/{components/vectorstores/astradb.py → base/datastax/astradb_base.py} +84 -473
  13. lfx/base/embeddings/embeddings_class.py +113 -0
  14. lfx/base/io/chat.py +5 -4
  15. lfx/base/mcp/util.py +101 -15
  16. lfx/base/models/groq_constants.py +74 -58
  17. lfx/base/models/groq_model_discovery.py +265 -0
  18. lfx/base/models/model.py +1 -1
  19. lfx/base/models/model_input_constants.py +74 -7
  20. lfx/base/models/model_utils.py +100 -0
  21. lfx/base/models/ollama_constants.py +3 -0
  22. lfx/base/models/openai_constants.py +7 -0
  23. lfx/base/models/watsonx_constants.py +36 -0
  24. lfx/base/tools/run_flow.py +601 -129
  25. lfx/cli/commands.py +7 -4
  26. lfx/cli/common.py +2 -2
  27. lfx/cli/run.py +1 -1
  28. lfx/cli/script_loader.py +53 -11
  29. lfx/components/Notion/create_page.py +1 -1
  30. lfx/components/Notion/list_database_properties.py +1 -1
  31. lfx/components/Notion/list_pages.py +1 -1
  32. lfx/components/Notion/list_users.py +1 -1
  33. lfx/components/Notion/page_content_viewer.py +1 -1
  34. lfx/components/Notion/search.py +1 -1
  35. lfx/components/Notion/update_page_property.py +1 -1
  36. lfx/components/__init__.py +19 -5
  37. lfx/components/altk/__init__.py +34 -0
  38. lfx/components/altk/altk_agent.py +193 -0
  39. lfx/components/amazon/amazon_bedrock_converse.py +1 -1
  40. lfx/components/apify/apify_actor.py +4 -4
  41. lfx/components/composio/__init__.py +70 -18
  42. lfx/components/composio/apollo_composio.py +11 -0
  43. lfx/components/composio/bitbucket_composio.py +11 -0
  44. lfx/components/composio/canva_composio.py +11 -0
  45. lfx/components/composio/coda_composio.py +11 -0
  46. lfx/components/composio/composio_api.py +10 -0
  47. lfx/components/composio/discord_composio.py +1 -1
  48. lfx/components/composio/elevenlabs_composio.py +11 -0
  49. lfx/components/composio/exa_composio.py +11 -0
  50. lfx/components/composio/firecrawl_composio.py +11 -0
  51. lfx/components/composio/fireflies_composio.py +11 -0
  52. lfx/components/composio/gmail_composio.py +1 -1
  53. lfx/components/composio/googlebigquery_composio.py +11 -0
  54. lfx/components/composio/googlecalendar_composio.py +1 -1
  55. lfx/components/composio/googledocs_composio.py +1 -1
  56. lfx/components/composio/googlemeet_composio.py +1 -1
  57. lfx/components/composio/googlesheets_composio.py +1 -1
  58. lfx/components/composio/googletasks_composio.py +1 -1
  59. lfx/components/composio/heygen_composio.py +11 -0
  60. lfx/components/composio/mem0_composio.py +11 -0
  61. lfx/components/composio/peopledatalabs_composio.py +11 -0
  62. lfx/components/composio/perplexityai_composio.py +11 -0
  63. lfx/components/composio/serpapi_composio.py +11 -0
  64. lfx/components/composio/slack_composio.py +3 -574
  65. lfx/components/composio/slackbot_composio.py +1 -1
  66. lfx/components/composio/snowflake_composio.py +11 -0
  67. lfx/components/composio/tavily_composio.py +11 -0
  68. lfx/components/composio/youtube_composio.py +2 -2
  69. lfx/components/{agents → cuga}/__init__.py +5 -7
  70. lfx/components/cuga/cuga_agent.py +730 -0
  71. lfx/components/data/__init__.py +78 -28
  72. lfx/components/data_source/__init__.py +58 -0
  73. lfx/components/{data → data_source}/api_request.py +26 -3
  74. lfx/components/{data → data_source}/csv_to_data.py +15 -10
  75. lfx/components/{data → data_source}/json_to_data.py +15 -8
  76. lfx/components/{data → data_source}/news_search.py +1 -1
  77. lfx/components/{data → data_source}/rss.py +1 -1
  78. lfx/components/{data → data_source}/sql_executor.py +1 -1
  79. lfx/components/{data → data_source}/url.py +1 -1
  80. lfx/components/{data → data_source}/web_search.py +1 -1
  81. lfx/components/datastax/__init__.py +12 -6
  82. lfx/components/datastax/{astra_assistant_manager.py → astradb_assistant_manager.py} +1 -0
  83. lfx/components/datastax/astradb_chatmemory.py +40 -0
  84. lfx/components/datastax/astradb_cql.py +6 -32
  85. lfx/components/datastax/astradb_graph.py +10 -124
  86. lfx/components/datastax/astradb_tool.py +13 -53
  87. lfx/components/datastax/astradb_vectorstore.py +134 -977
  88. lfx/components/datastax/create_assistant.py +1 -0
  89. lfx/components/datastax/create_thread.py +1 -0
  90. lfx/components/datastax/dotenv.py +1 -0
  91. lfx/components/datastax/get_assistant.py +1 -0
  92. lfx/components/datastax/getenvvar.py +1 -0
  93. lfx/components/datastax/graph_rag.py +1 -1
  94. lfx/components/datastax/hcd.py +1 -1
  95. lfx/components/datastax/list_assistants.py +1 -0
  96. lfx/components/datastax/run.py +1 -0
  97. lfx/components/deactivated/json_document_builder.py +1 -1
  98. lfx/components/elastic/elasticsearch.py +1 -1
  99. lfx/components/elastic/opensearch_multimodal.py +1575 -0
  100. lfx/components/files_and_knowledge/__init__.py +47 -0
  101. lfx/components/{data → files_and_knowledge}/directory.py +1 -1
  102. lfx/components/{data → files_and_knowledge}/file.py +246 -18
  103. lfx/components/{knowledge_bases → files_and_knowledge}/ingestion.py +17 -9
  104. lfx/components/{knowledge_bases → files_and_knowledge}/retrieval.py +18 -10
  105. lfx/components/{data → files_and_knowledge}/save_file.py +142 -22
  106. lfx/components/flow_controls/__init__.py +58 -0
  107. lfx/components/{logic → flow_controls}/conditional_router.py +1 -1
  108. lfx/components/{logic → flow_controls}/loop.py +47 -9
  109. lfx/components/flow_controls/run_flow.py +108 -0
  110. lfx/components/glean/glean_search_api.py +1 -1
  111. lfx/components/groq/groq.py +35 -28
  112. lfx/components/helpers/__init__.py +102 -0
  113. lfx/components/ibm/watsonx.py +25 -21
  114. lfx/components/input_output/__init__.py +3 -1
  115. lfx/components/input_output/chat.py +12 -3
  116. lfx/components/input_output/chat_output.py +12 -4
  117. lfx/components/input_output/text.py +1 -1
  118. lfx/components/input_output/text_output.py +1 -1
  119. lfx/components/{data → input_output}/webhook.py +1 -1
  120. lfx/components/knowledge_bases/__init__.py +59 -4
  121. lfx/components/langchain_utilities/character.py +1 -1
  122. lfx/components/langchain_utilities/csv_agent.py +84 -16
  123. lfx/components/langchain_utilities/json_agent.py +67 -12
  124. lfx/components/langchain_utilities/language_recursive.py +1 -1
  125. lfx/components/llm_operations/__init__.py +46 -0
  126. lfx/components/{processing → llm_operations}/batch_run.py +1 -1
  127. lfx/components/{processing → llm_operations}/lambda_filter.py +1 -1
  128. lfx/components/{logic → llm_operations}/llm_conditional_router.py +1 -1
  129. lfx/components/{processing/llm_router.py → llm_operations/llm_selector.py} +3 -3
  130. lfx/components/{processing → llm_operations}/structured_output.py +56 -18
  131. lfx/components/logic/__init__.py +126 -0
  132. lfx/components/mem0/mem0_chat_memory.py +11 -0
  133. lfx/components/mistral/mistral_embeddings.py +1 -1
  134. lfx/components/models/__init__.py +64 -9
  135. lfx/components/models_and_agents/__init__.py +49 -0
  136. lfx/components/{agents → models_and_agents}/agent.py +49 -6
  137. lfx/components/models_and_agents/embedding_model.py +423 -0
  138. lfx/components/models_and_agents/language_model.py +398 -0
  139. lfx/components/{agents → models_and_agents}/mcp_component.py +84 -45
  140. lfx/components/{helpers → models_and_agents}/memory.py +1 -1
  141. lfx/components/nvidia/system_assist.py +1 -1
  142. lfx/components/olivya/olivya.py +1 -1
  143. lfx/components/ollama/ollama.py +235 -14
  144. lfx/components/openrouter/openrouter.py +49 -147
  145. lfx/components/processing/__init__.py +9 -57
  146. lfx/components/processing/converter.py +1 -1
  147. lfx/components/processing/dataframe_operations.py +1 -1
  148. lfx/components/processing/parse_json_data.py +2 -2
  149. lfx/components/processing/parser.py +7 -2
  150. lfx/components/processing/split_text.py +1 -1
  151. lfx/components/qdrant/qdrant.py +1 -1
  152. lfx/components/redis/redis.py +1 -1
  153. lfx/components/twelvelabs/split_video.py +10 -0
  154. lfx/components/twelvelabs/video_file.py +12 -0
  155. lfx/components/utilities/__init__.py +43 -0
  156. lfx/components/{helpers → utilities}/calculator_core.py +1 -1
  157. lfx/components/{helpers → utilities}/current_date.py +1 -1
  158. lfx/components/{processing → utilities}/python_repl_core.py +1 -1
  159. lfx/components/vectorstores/__init__.py +0 -6
  160. lfx/components/vectorstores/local_db.py +9 -0
  161. lfx/components/youtube/youtube_transcripts.py +118 -30
  162. lfx/custom/custom_component/component.py +60 -3
  163. lfx/custom/custom_component/custom_component.py +68 -6
  164. lfx/field_typing/constants.py +1 -0
  165. lfx/graph/edge/base.py +45 -22
  166. lfx/graph/graph/base.py +5 -2
  167. lfx/graph/graph/schema.py +3 -2
  168. lfx/graph/state/model.py +15 -2
  169. lfx/graph/utils.py +6 -0
  170. lfx/graph/vertex/base.py +4 -1
  171. lfx/graph/vertex/param_handler.py +10 -7
  172. lfx/graph/vertex/vertex_types.py +1 -1
  173. lfx/helpers/__init__.py +12 -0
  174. lfx/helpers/flow.py +117 -0
  175. lfx/inputs/input_mixin.py +24 -1
  176. lfx/inputs/inputs.py +13 -1
  177. lfx/interface/components.py +161 -83
  178. lfx/io/schema.py +6 -0
  179. lfx/log/logger.py +5 -3
  180. lfx/schema/schema.py +5 -0
  181. lfx/services/database/__init__.py +5 -0
  182. lfx/services/database/service.py +25 -0
  183. lfx/services/deps.py +87 -22
  184. lfx/services/manager.py +19 -6
  185. lfx/services/mcp_composer/service.py +998 -157
  186. lfx/services/session.py +5 -0
  187. lfx/services/settings/base.py +51 -7
  188. lfx/services/settings/constants.py +8 -0
  189. lfx/services/storage/local.py +76 -46
  190. lfx/services/storage/service.py +152 -29
  191. lfx/template/field/base.py +3 -0
  192. lfx/utils/ssrf_protection.py +384 -0
  193. lfx/utils/validate_cloud.py +26 -0
  194. {lfx_nightly-0.1.13.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/METADATA +38 -22
  195. {lfx_nightly-0.1.13.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/RECORD +210 -196
  196. {lfx_nightly-0.1.13.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/WHEEL +1 -1
  197. lfx/components/agents/cuga_agent.py +0 -1013
  198. lfx/components/datastax/astra_db.py +0 -77
  199. lfx/components/datastax/cassandra.py +0 -92
  200. lfx/components/logic/run_flow.py +0 -71
  201. lfx/components/models/embedding_model.py +0 -114
  202. lfx/components/models/language_model.py +0 -144
  203. lfx/components/vectorstores/astradb_graph.py +0 -326
  204. lfx/components/vectorstores/cassandra.py +0 -264
  205. lfx/components/vectorstores/cassandra_graph.py +0 -238
  206. lfx/components/vectorstores/chroma.py +0 -167
  207. lfx/components/vectorstores/clickhouse.py +0 -135
  208. lfx/components/vectorstores/couchbase.py +0 -102
  209. lfx/components/vectorstores/elasticsearch.py +0 -267
  210. lfx/components/vectorstores/faiss.py +0 -111
  211. lfx/components/vectorstores/graph_rag.py +0 -141
  212. lfx/components/vectorstores/hcd.py +0 -314
  213. lfx/components/vectorstores/milvus.py +0 -115
  214. lfx/components/vectorstores/mongodb_atlas.py +0 -213
  215. lfx/components/vectorstores/opensearch.py +0 -243
  216. lfx/components/vectorstores/pgvector.py +0 -72
  217. lfx/components/vectorstores/pinecone.py +0 -134
  218. lfx/components/vectorstores/qdrant.py +0 -109
  219. lfx/components/vectorstores/supabase.py +0 -76
  220. lfx/components/vectorstores/upstash.py +0 -124
  221. lfx/components/vectorstores/vectara.py +0 -97
  222. lfx/components/vectorstores/vectara_rag.py +0 -164
  223. lfx/components/vectorstores/weaviate.py +0 -89
  224. /lfx/components/{data → data_source}/mock_data.py +0 -0
  225. /lfx/components/datastax/{astra_vectorize.py → astradb_vectorize.py} +0 -0
  226. /lfx/components/{logic → flow_controls}/data_conditional_router.py +0 -0
  227. /lfx/components/{logic → flow_controls}/flow_tool.py +0 -0
  228. /lfx/components/{logic → flow_controls}/listen.py +0 -0
  229. /lfx/components/{logic → flow_controls}/notify.py +0 -0
  230. /lfx/components/{logic → flow_controls}/pass_message.py +0 -0
  231. /lfx/components/{logic → flow_controls}/sub_flow.py +0 -0
  232. /lfx/components/{processing → models_and_agents}/prompt.py +0 -0
  233. /lfx/components/{helpers → processing}/create_list.py +0 -0
  234. /lfx/components/{helpers → processing}/output_parser.py +0 -0
  235. /lfx/components/{helpers → processing}/store_message.py +0 -0
  236. /lfx/components/{helpers → utilities}/id_generator.py +0 -0
  237. {lfx_nightly-0.1.13.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,423 @@
1
+ from typing import Any
2
+
3
+ import requests
4
+ from ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames
5
+ from langchain_openai import OpenAIEmbeddings
6
+
7
+ from lfx.base.embeddings.embeddings_class import EmbeddingsWithModels
8
+ from lfx.base.embeddings.model import LCEmbeddingsModel
9
+ from lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url
10
+ from lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES
11
+ from lfx.base.models.watsonx_constants import (
12
+ IBM_WATSONX_URLS,
13
+ WATSONX_EMBEDDING_MODEL_NAMES,
14
+ )
15
+ from lfx.field_typing import Embeddings
16
+ from lfx.io import (
17
+ BoolInput,
18
+ DictInput,
19
+ DropdownInput,
20
+ FloatInput,
21
+ IntInput,
22
+ MessageTextInput,
23
+ SecretStrInput,
24
+ )
25
+ from lfx.log.logger import logger
26
+ from lfx.schema.dotdict import dotdict
27
+ from lfx.utils.util import transform_localhost_url
28
+
29
+ # Ollama API constants
30
+ HTTP_STATUS_OK = 200
31
+ JSON_MODELS_KEY = "models"
32
+ JSON_NAME_KEY = "name"
33
+ JSON_CAPABILITIES_KEY = "capabilities"
34
+ DESIRED_CAPABILITY = "embedding"
35
+ DEFAULT_OLLAMA_URL = "http://localhost:11434"
36
+
37
+
38
+ class EmbeddingModelComponent(LCEmbeddingsModel):
39
+ display_name = "Embedding Model"
40
+ description = "Generate embeddings using a specified provider."
41
+ documentation: str = "https://docs.langflow.org/components-embedding-models"
42
+ icon = "binary"
43
+ name = "EmbeddingModel"
44
+ category = "models"
45
+
46
+ inputs = [
47
+ DropdownInput(
48
+ name="provider",
49
+ display_name="Model Provider",
50
+ options=["OpenAI", "Ollama", "IBM watsonx.ai"],
51
+ value="OpenAI",
52
+ info="Select the embedding model provider",
53
+ real_time_refresh=True,
54
+ options_metadata=[{"icon": "OpenAI"}, {"icon": "Ollama"}, {"icon": "WatsonxAI"}],
55
+ ),
56
+ MessageTextInput(
57
+ name="api_base",
58
+ display_name="API Base URL",
59
+ info="Base URL for the API. Leave empty for default.",
60
+ advanced=True,
61
+ ),
62
+ MessageTextInput(
63
+ name="ollama_base_url",
64
+ display_name="Ollama API URL",
65
+ info=f"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}",
66
+ value=DEFAULT_OLLAMA_URL,
67
+ show=False,
68
+ real_time_refresh=True,
69
+ load_from_db=True,
70
+ ),
71
+ DropdownInput(
72
+ name="base_url_ibm_watsonx",
73
+ display_name="watsonx API Endpoint",
74
+ info="The base URL of the API (IBM watsonx.ai only)",
75
+ options=IBM_WATSONX_URLS,
76
+ value=IBM_WATSONX_URLS[0],
77
+ show=False,
78
+ real_time_refresh=True,
79
+ ),
80
+ DropdownInput(
81
+ name="model",
82
+ display_name="Model Name",
83
+ options=OPENAI_EMBEDDING_MODEL_NAMES,
84
+ value=OPENAI_EMBEDDING_MODEL_NAMES[0],
85
+ info="Select the embedding model to use",
86
+ real_time_refresh=True,
87
+ refresh_button=True,
88
+ ),
89
+ SecretStrInput(
90
+ name="api_key",
91
+ display_name="OpenAI API Key",
92
+ info="Model Provider API key",
93
+ required=True,
94
+ show=True,
95
+ real_time_refresh=True,
96
+ ),
97
+ # Watson-specific inputs
98
+ MessageTextInput(
99
+ name="project_id",
100
+ display_name="Project ID",
101
+ info="IBM watsonx.ai Project ID (required for IBM watsonx.ai)",
102
+ show=False,
103
+ ),
104
+ IntInput(
105
+ name="dimensions",
106
+ display_name="Dimensions",
107
+ info="The number of dimensions the resulting output embeddings should have. "
108
+ "Only supported by certain models.",
109
+ advanced=True,
110
+ ),
111
+ IntInput(name="chunk_size", display_name="Chunk Size", advanced=True, value=1000),
112
+ FloatInput(name="request_timeout", display_name="Request Timeout", advanced=True),
113
+ IntInput(name="max_retries", display_name="Max Retries", advanced=True, value=3),
114
+ BoolInput(name="show_progress_bar", display_name="Show Progress Bar", advanced=True),
115
+ DictInput(
116
+ name="model_kwargs",
117
+ display_name="Model Kwargs",
118
+ advanced=True,
119
+ info="Additional keyword arguments to pass to the model.",
120
+ ),
121
+ IntInput(
122
+ name="truncate_input_tokens",
123
+ display_name="Truncate Input Tokens",
124
+ advanced=True,
125
+ value=200,
126
+ show=False,
127
+ ),
128
+ BoolInput(
129
+ name="input_text",
130
+ display_name="Include the original text in the output",
131
+ value=True,
132
+ advanced=True,
133
+ show=False,
134
+ ),
135
+ ]
136
+
137
+ @staticmethod
138
+ def fetch_ibm_models(base_url: str) -> list[str]:
139
+ """Fetch available models from the watsonx.ai API."""
140
+ try:
141
+ endpoint = f"{base_url}/ml/v1/foundation_model_specs"
142
+ params = {
143
+ "version": "2024-09-16",
144
+ "filters": "function_embedding,!lifecycle_withdrawn:and",
145
+ }
146
+ response = requests.get(endpoint, params=params, timeout=10)
147
+ response.raise_for_status()
148
+ data = response.json()
149
+ models = [model["model_id"] for model in data.get("resources", [])]
150
+ return sorted(models)
151
+ except Exception: # noqa: BLE001
152
+ logger.exception("Error fetching models")
153
+ return WATSONX_EMBEDDING_MODEL_NAMES
154
+
155
+ async def build_embeddings(self) -> Embeddings:
156
+ provider = self.provider
157
+ model = self.model
158
+ api_key = self.api_key
159
+ api_base = self.api_base
160
+ base_url_ibm_watsonx = self.base_url_ibm_watsonx
161
+ ollama_base_url = self.ollama_base_url
162
+ dimensions = self.dimensions
163
+ chunk_size = self.chunk_size
164
+ request_timeout = self.request_timeout
165
+ max_retries = self.max_retries
166
+ show_progress_bar = self.show_progress_bar
167
+ model_kwargs = self.model_kwargs or {}
168
+
169
+ if provider == "OpenAI":
170
+ if not api_key:
171
+ msg = "OpenAI API key is required when using OpenAI provider"
172
+ raise ValueError(msg)
173
+
174
+ # Create the primary embedding instance
175
+ embeddings_instance = OpenAIEmbeddings(
176
+ model=model,
177
+ dimensions=dimensions or None,
178
+ base_url=api_base or None,
179
+ api_key=api_key,
180
+ chunk_size=chunk_size,
181
+ max_retries=max_retries,
182
+ timeout=request_timeout or None,
183
+ show_progress_bar=show_progress_bar,
184
+ model_kwargs=model_kwargs,
185
+ )
186
+
187
+ # Create dedicated instances for each available model
188
+ available_models_dict = {}
189
+ for model_name in OPENAI_EMBEDDING_MODEL_NAMES:
190
+ available_models_dict[model_name] = OpenAIEmbeddings(
191
+ model=model_name,
192
+ dimensions=dimensions or None, # Use same dimensions config for all
193
+ base_url=api_base or None,
194
+ api_key=api_key,
195
+ chunk_size=chunk_size,
196
+ max_retries=max_retries,
197
+ timeout=request_timeout or None,
198
+ show_progress_bar=show_progress_bar,
199
+ model_kwargs=model_kwargs,
200
+ )
201
+
202
+ return EmbeddingsWithModels(
203
+ embeddings=embeddings_instance,
204
+ available_models=available_models_dict,
205
+ )
206
+
207
+ if provider == "Ollama":
208
+ try:
209
+ from langchain_ollama import OllamaEmbeddings
210
+ except ImportError:
211
+ try:
212
+ from langchain_community.embeddings import OllamaEmbeddings
213
+ except ImportError:
214
+ msg = "Please install langchain-ollama: pip install langchain-ollama"
215
+ raise ImportError(msg) from None
216
+
217
+ transformed_base_url = transform_localhost_url(ollama_base_url)
218
+
219
+ # Check if URL contains /v1 suffix (OpenAI-compatible mode)
220
+ if transformed_base_url and transformed_base_url.rstrip("/").endswith("/v1"):
221
+ # Strip /v1 suffix and log warning
222
+ transformed_base_url = transformed_base_url.rstrip("/").removesuffix("/v1")
223
+ logger.warning(
224
+ "Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, "
225
+ "not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. "
226
+ "If you want to use the OpenAI-compatible API, please use the OpenAI component instead. "
227
+ "Learn more at https://docs.ollama.com/openai#openai-compatibility"
228
+ )
229
+
230
+ final_base_url = transformed_base_url or "http://localhost:11434"
231
+
232
+ # Create the primary embedding instance
233
+ embeddings_instance = OllamaEmbeddings(
234
+ model=model,
235
+ base_url=final_base_url,
236
+ **model_kwargs,
237
+ )
238
+
239
+ # Fetch available Ollama models
240
+ available_model_names = await get_ollama_models(
241
+ base_url_value=self.ollama_base_url,
242
+ desired_capability=DESIRED_CAPABILITY,
243
+ json_models_key=JSON_MODELS_KEY,
244
+ json_name_key=JSON_NAME_KEY,
245
+ json_capabilities_key=JSON_CAPABILITIES_KEY,
246
+ )
247
+
248
+ # Create dedicated instances for each available model
249
+ available_models_dict = {}
250
+ for model_name in available_model_names:
251
+ available_models_dict[model_name] = OllamaEmbeddings(
252
+ model=model_name,
253
+ base_url=final_base_url,
254
+ **model_kwargs,
255
+ )
256
+
257
+ return EmbeddingsWithModels(
258
+ embeddings=embeddings_instance,
259
+ available_models=available_models_dict,
260
+ )
261
+
262
+ if provider == "IBM watsonx.ai":
263
+ try:
264
+ from langchain_ibm import WatsonxEmbeddings
265
+ except ImportError:
266
+ msg = "Please install langchain-ibm: pip install langchain-ibm"
267
+ raise ImportError(msg) from None
268
+
269
+ if not api_key:
270
+ msg = "IBM watsonx.ai API key is required when using IBM watsonx.ai provider"
271
+ raise ValueError(msg)
272
+
273
+ project_id = self.project_id
274
+
275
+ if not project_id:
276
+ msg = "Project ID is required for IBM watsonx.ai provider"
277
+ raise ValueError(msg)
278
+
279
+ from ibm_watsonx_ai import APIClient, Credentials
280
+
281
+ final_url = base_url_ibm_watsonx or "https://us-south.ml.cloud.ibm.com"
282
+
283
+ credentials = Credentials(
284
+ api_key=self.api_key,
285
+ url=final_url,
286
+ )
287
+
288
+ api_client = APIClient(credentials)
289
+
290
+ params = {
291
+ EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,
292
+ EmbedTextParamsMetaNames.RETURN_OPTIONS: {"input_text": self.input_text},
293
+ }
294
+
295
+ # Create the primary embedding instance
296
+ embeddings_instance = WatsonxEmbeddings(
297
+ model_id=model,
298
+ params=params,
299
+ watsonx_client=api_client,
300
+ project_id=project_id,
301
+ )
302
+
303
+ # Fetch available IBM watsonx.ai models
304
+ available_model_names = self.fetch_ibm_models(final_url)
305
+
306
+ # Create dedicated instances for each available model
307
+ available_models_dict = {}
308
+ for model_name in available_model_names:
309
+ available_models_dict[model_name] = WatsonxEmbeddings(
310
+ model_id=model_name,
311
+ params=params,
312
+ watsonx_client=api_client,
313
+ project_id=project_id,
314
+ )
315
+
316
+ return EmbeddingsWithModels(
317
+ embeddings=embeddings_instance,
318
+ available_models=available_models_dict,
319
+ )
320
+
321
+ msg = f"Unknown provider: {provider}"
322
+ raise ValueError(msg)
323
+
324
+ async def update_build_config(
325
+ self, build_config: dotdict, field_value: Any, field_name: str | None = None
326
+ ) -> dotdict:
327
+ if field_name == "provider":
328
+ if field_value == "OpenAI":
329
+ build_config["model"]["options"] = OPENAI_EMBEDDING_MODEL_NAMES
330
+ build_config["model"]["value"] = OPENAI_EMBEDDING_MODEL_NAMES[0]
331
+ build_config["api_key"]["display_name"] = "OpenAI API Key"
332
+ build_config["api_key"]["required"] = True
333
+ build_config["api_key"]["show"] = True
334
+ build_config["api_base"]["display_name"] = "OpenAI API Base URL"
335
+ build_config["api_base"]["advanced"] = True
336
+ build_config["api_base"]["show"] = True
337
+ build_config["ollama_base_url"]["show"] = False
338
+ build_config["project_id"]["show"] = False
339
+ build_config["base_url_ibm_watsonx"]["show"] = False
340
+ build_config["truncate_input_tokens"]["show"] = False
341
+ build_config["input_text"]["show"] = False
342
+ elif field_value == "Ollama":
343
+ build_config["ollama_base_url"]["show"] = True
344
+
345
+ if await is_valid_ollama_url(url=self.ollama_base_url):
346
+ try:
347
+ models = await get_ollama_models(
348
+ base_url_value=self.ollama_base_url,
349
+ desired_capability=DESIRED_CAPABILITY,
350
+ json_models_key=JSON_MODELS_KEY,
351
+ json_name_key=JSON_NAME_KEY,
352
+ json_capabilities_key=JSON_CAPABILITIES_KEY,
353
+ )
354
+ build_config["model"]["options"] = models
355
+ build_config["model"]["value"] = models[0] if models else ""
356
+ except ValueError:
357
+ build_config["model"]["options"] = []
358
+ build_config["model"]["value"] = ""
359
+ else:
360
+ build_config["model"]["options"] = []
361
+ build_config["model"]["value"] = ""
362
+ build_config["truncate_input_tokens"]["show"] = False
363
+ build_config["input_text"]["show"] = False
364
+ build_config["api_key"]["display_name"] = "API Key (Optional)"
365
+ build_config["api_key"]["required"] = False
366
+ build_config["api_key"]["show"] = False
367
+ build_config["api_base"]["show"] = False
368
+ build_config["project_id"]["show"] = False
369
+ build_config["base_url_ibm_watsonx"]["show"] = False
370
+
371
+ elif field_value == "IBM watsonx.ai":
372
+ build_config["model"]["options"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)
373
+ build_config["model"]["value"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]
374
+ build_config["api_key"]["display_name"] = "IBM watsonx.ai API Key"
375
+ build_config["api_key"]["required"] = True
376
+ build_config["api_key"]["show"] = True
377
+ build_config["api_base"]["show"] = False
378
+ build_config["ollama_base_url"]["show"] = False
379
+ build_config["base_url_ibm_watsonx"]["show"] = True
380
+ build_config["project_id"]["show"] = True
381
+ build_config["truncate_input_tokens"]["show"] = True
382
+ build_config["input_text"]["show"] = True
383
+ elif field_name == "base_url_ibm_watsonx":
384
+ build_config["model"]["options"] = self.fetch_ibm_models(base_url=field_value)
385
+ build_config["model"]["value"] = self.fetch_ibm_models(base_url=field_value)[0]
386
+ elif field_name == "ollama_base_url":
387
+ # # Refresh Ollama models when base URL changes
388
+ # if hasattr(self, "provider") and self.provider == "Ollama":
389
+ # Use field_value if provided, otherwise fall back to instance attribute
390
+ ollama_url = self.ollama_base_url
391
+ if await is_valid_ollama_url(url=ollama_url):
392
+ try:
393
+ models = await get_ollama_models(
394
+ base_url_value=ollama_url,
395
+ desired_capability=DESIRED_CAPABILITY,
396
+ json_models_key=JSON_MODELS_KEY,
397
+ json_name_key=JSON_NAME_KEY,
398
+ json_capabilities_key=JSON_CAPABILITIES_KEY,
399
+ )
400
+ build_config["model"]["options"] = models
401
+ build_config["model"]["value"] = models[0] if models else ""
402
+ except ValueError:
403
+ await logger.awarning("Failed to fetch Ollama embedding models.")
404
+ build_config["model"]["options"] = []
405
+ build_config["model"]["value"] = ""
406
+
407
+ elif field_name == "model" and self.provider == "Ollama":
408
+ ollama_url = self.ollama_base_url
409
+ if await is_valid_ollama_url(url=ollama_url):
410
+ try:
411
+ models = await get_ollama_models(
412
+ base_url_value=ollama_url,
413
+ desired_capability=DESIRED_CAPABILITY,
414
+ json_models_key=JSON_MODELS_KEY,
415
+ json_name_key=JSON_NAME_KEY,
416
+ json_capabilities_key=JSON_CAPABILITIES_KEY,
417
+ )
418
+ build_config["model"]["options"] = models
419
+ except ValueError:
420
+ await logger.awarning("Failed to refresh Ollama embedding models.")
421
+ build_config["model"]["options"] = []
422
+
423
+ return build_config