lfx-nightly 0.1.13.dev0__py3-none-any.whl → 0.2.0.dev26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (237) hide show
  1. lfx/_assets/component_index.json +1 -1
  2. lfx/base/agents/agent.py +121 -29
  3. lfx/base/agents/altk_base_agent.py +380 -0
  4. lfx/base/agents/altk_tool_wrappers.py +565 -0
  5. lfx/base/agents/events.py +103 -35
  6. lfx/base/agents/utils.py +15 -2
  7. lfx/base/composio/composio_base.py +183 -233
  8. lfx/base/data/base_file.py +88 -21
  9. lfx/base/data/storage_utils.py +192 -0
  10. lfx/base/data/utils.py +178 -14
  11. lfx/base/datastax/__init__.py +5 -0
  12. lfx/{components/vectorstores/astradb.py → base/datastax/astradb_base.py} +84 -473
  13. lfx/base/embeddings/embeddings_class.py +113 -0
  14. lfx/base/io/chat.py +5 -4
  15. lfx/base/mcp/util.py +101 -15
  16. lfx/base/models/groq_constants.py +74 -58
  17. lfx/base/models/groq_model_discovery.py +265 -0
  18. lfx/base/models/model.py +1 -1
  19. lfx/base/models/model_input_constants.py +74 -7
  20. lfx/base/models/model_utils.py +100 -0
  21. lfx/base/models/ollama_constants.py +3 -0
  22. lfx/base/models/openai_constants.py +7 -0
  23. lfx/base/models/watsonx_constants.py +36 -0
  24. lfx/base/tools/run_flow.py +601 -129
  25. lfx/cli/commands.py +7 -4
  26. lfx/cli/common.py +2 -2
  27. lfx/cli/run.py +1 -1
  28. lfx/cli/script_loader.py +53 -11
  29. lfx/components/Notion/create_page.py +1 -1
  30. lfx/components/Notion/list_database_properties.py +1 -1
  31. lfx/components/Notion/list_pages.py +1 -1
  32. lfx/components/Notion/list_users.py +1 -1
  33. lfx/components/Notion/page_content_viewer.py +1 -1
  34. lfx/components/Notion/search.py +1 -1
  35. lfx/components/Notion/update_page_property.py +1 -1
  36. lfx/components/__init__.py +19 -5
  37. lfx/components/altk/__init__.py +34 -0
  38. lfx/components/altk/altk_agent.py +193 -0
  39. lfx/components/amazon/amazon_bedrock_converse.py +1 -1
  40. lfx/components/apify/apify_actor.py +4 -4
  41. lfx/components/composio/__init__.py +70 -18
  42. lfx/components/composio/apollo_composio.py +11 -0
  43. lfx/components/composio/bitbucket_composio.py +11 -0
  44. lfx/components/composio/canva_composio.py +11 -0
  45. lfx/components/composio/coda_composio.py +11 -0
  46. lfx/components/composio/composio_api.py +10 -0
  47. lfx/components/composio/discord_composio.py +1 -1
  48. lfx/components/composio/elevenlabs_composio.py +11 -0
  49. lfx/components/composio/exa_composio.py +11 -0
  50. lfx/components/composio/firecrawl_composio.py +11 -0
  51. lfx/components/composio/fireflies_composio.py +11 -0
  52. lfx/components/composio/gmail_composio.py +1 -1
  53. lfx/components/composio/googlebigquery_composio.py +11 -0
  54. lfx/components/composio/googlecalendar_composio.py +1 -1
  55. lfx/components/composio/googledocs_composio.py +1 -1
  56. lfx/components/composio/googlemeet_composio.py +1 -1
  57. lfx/components/composio/googlesheets_composio.py +1 -1
  58. lfx/components/composio/googletasks_composio.py +1 -1
  59. lfx/components/composio/heygen_composio.py +11 -0
  60. lfx/components/composio/mem0_composio.py +11 -0
  61. lfx/components/composio/peopledatalabs_composio.py +11 -0
  62. lfx/components/composio/perplexityai_composio.py +11 -0
  63. lfx/components/composio/serpapi_composio.py +11 -0
  64. lfx/components/composio/slack_composio.py +3 -574
  65. lfx/components/composio/slackbot_composio.py +1 -1
  66. lfx/components/composio/snowflake_composio.py +11 -0
  67. lfx/components/composio/tavily_composio.py +11 -0
  68. lfx/components/composio/youtube_composio.py +2 -2
  69. lfx/components/{agents → cuga}/__init__.py +5 -7
  70. lfx/components/cuga/cuga_agent.py +730 -0
  71. lfx/components/data/__init__.py +78 -28
  72. lfx/components/data_source/__init__.py +58 -0
  73. lfx/components/{data → data_source}/api_request.py +26 -3
  74. lfx/components/{data → data_source}/csv_to_data.py +15 -10
  75. lfx/components/{data → data_source}/json_to_data.py +15 -8
  76. lfx/components/{data → data_source}/news_search.py +1 -1
  77. lfx/components/{data → data_source}/rss.py +1 -1
  78. lfx/components/{data → data_source}/sql_executor.py +1 -1
  79. lfx/components/{data → data_source}/url.py +1 -1
  80. lfx/components/{data → data_source}/web_search.py +1 -1
  81. lfx/components/datastax/__init__.py +12 -6
  82. lfx/components/datastax/{astra_assistant_manager.py → astradb_assistant_manager.py} +1 -0
  83. lfx/components/datastax/astradb_chatmemory.py +40 -0
  84. lfx/components/datastax/astradb_cql.py +6 -32
  85. lfx/components/datastax/astradb_graph.py +10 -124
  86. lfx/components/datastax/astradb_tool.py +13 -53
  87. lfx/components/datastax/astradb_vectorstore.py +134 -977
  88. lfx/components/datastax/create_assistant.py +1 -0
  89. lfx/components/datastax/create_thread.py +1 -0
  90. lfx/components/datastax/dotenv.py +1 -0
  91. lfx/components/datastax/get_assistant.py +1 -0
  92. lfx/components/datastax/getenvvar.py +1 -0
  93. lfx/components/datastax/graph_rag.py +1 -1
  94. lfx/components/datastax/hcd.py +1 -1
  95. lfx/components/datastax/list_assistants.py +1 -0
  96. lfx/components/datastax/run.py +1 -0
  97. lfx/components/deactivated/json_document_builder.py +1 -1
  98. lfx/components/elastic/elasticsearch.py +1 -1
  99. lfx/components/elastic/opensearch_multimodal.py +1575 -0
  100. lfx/components/files_and_knowledge/__init__.py +47 -0
  101. lfx/components/{data → files_and_knowledge}/directory.py +1 -1
  102. lfx/components/{data → files_and_knowledge}/file.py +246 -18
  103. lfx/components/{knowledge_bases → files_and_knowledge}/ingestion.py +17 -9
  104. lfx/components/{knowledge_bases → files_and_knowledge}/retrieval.py +18 -10
  105. lfx/components/{data → files_and_knowledge}/save_file.py +142 -22
  106. lfx/components/flow_controls/__init__.py +58 -0
  107. lfx/components/{logic → flow_controls}/conditional_router.py +1 -1
  108. lfx/components/{logic → flow_controls}/loop.py +47 -9
  109. lfx/components/flow_controls/run_flow.py +108 -0
  110. lfx/components/glean/glean_search_api.py +1 -1
  111. lfx/components/groq/groq.py +35 -28
  112. lfx/components/helpers/__init__.py +102 -0
  113. lfx/components/ibm/watsonx.py +25 -21
  114. lfx/components/input_output/__init__.py +3 -1
  115. lfx/components/input_output/chat.py +12 -3
  116. lfx/components/input_output/chat_output.py +12 -4
  117. lfx/components/input_output/text.py +1 -1
  118. lfx/components/input_output/text_output.py +1 -1
  119. lfx/components/{data → input_output}/webhook.py +1 -1
  120. lfx/components/knowledge_bases/__init__.py +59 -4
  121. lfx/components/langchain_utilities/character.py +1 -1
  122. lfx/components/langchain_utilities/csv_agent.py +84 -16
  123. lfx/components/langchain_utilities/json_agent.py +67 -12
  124. lfx/components/langchain_utilities/language_recursive.py +1 -1
  125. lfx/components/llm_operations/__init__.py +46 -0
  126. lfx/components/{processing → llm_operations}/batch_run.py +1 -1
  127. lfx/components/{processing → llm_operations}/lambda_filter.py +1 -1
  128. lfx/components/{logic → llm_operations}/llm_conditional_router.py +1 -1
  129. lfx/components/{processing/llm_router.py → llm_operations/llm_selector.py} +3 -3
  130. lfx/components/{processing → llm_operations}/structured_output.py +56 -18
  131. lfx/components/logic/__init__.py +126 -0
  132. lfx/components/mem0/mem0_chat_memory.py +11 -0
  133. lfx/components/mistral/mistral_embeddings.py +1 -1
  134. lfx/components/models/__init__.py +64 -9
  135. lfx/components/models_and_agents/__init__.py +49 -0
  136. lfx/components/{agents → models_and_agents}/agent.py +49 -6
  137. lfx/components/models_and_agents/embedding_model.py +423 -0
  138. lfx/components/models_and_agents/language_model.py +398 -0
  139. lfx/components/{agents → models_and_agents}/mcp_component.py +84 -45
  140. lfx/components/{helpers → models_and_agents}/memory.py +1 -1
  141. lfx/components/nvidia/system_assist.py +1 -1
  142. lfx/components/olivya/olivya.py +1 -1
  143. lfx/components/ollama/ollama.py +235 -14
  144. lfx/components/openrouter/openrouter.py +49 -147
  145. lfx/components/processing/__init__.py +9 -57
  146. lfx/components/processing/converter.py +1 -1
  147. lfx/components/processing/dataframe_operations.py +1 -1
  148. lfx/components/processing/parse_json_data.py +2 -2
  149. lfx/components/processing/parser.py +7 -2
  150. lfx/components/processing/split_text.py +1 -1
  151. lfx/components/qdrant/qdrant.py +1 -1
  152. lfx/components/redis/redis.py +1 -1
  153. lfx/components/twelvelabs/split_video.py +10 -0
  154. lfx/components/twelvelabs/video_file.py +12 -0
  155. lfx/components/utilities/__init__.py +43 -0
  156. lfx/components/{helpers → utilities}/calculator_core.py +1 -1
  157. lfx/components/{helpers → utilities}/current_date.py +1 -1
  158. lfx/components/{processing → utilities}/python_repl_core.py +1 -1
  159. lfx/components/vectorstores/__init__.py +0 -6
  160. lfx/components/vectorstores/local_db.py +9 -0
  161. lfx/components/youtube/youtube_transcripts.py +118 -30
  162. lfx/custom/custom_component/component.py +60 -3
  163. lfx/custom/custom_component/custom_component.py +68 -6
  164. lfx/field_typing/constants.py +1 -0
  165. lfx/graph/edge/base.py +45 -22
  166. lfx/graph/graph/base.py +5 -2
  167. lfx/graph/graph/schema.py +3 -2
  168. lfx/graph/state/model.py +15 -2
  169. lfx/graph/utils.py +6 -0
  170. lfx/graph/vertex/base.py +4 -1
  171. lfx/graph/vertex/param_handler.py +10 -7
  172. lfx/graph/vertex/vertex_types.py +1 -1
  173. lfx/helpers/__init__.py +12 -0
  174. lfx/helpers/flow.py +117 -0
  175. lfx/inputs/input_mixin.py +24 -1
  176. lfx/inputs/inputs.py +13 -1
  177. lfx/interface/components.py +161 -83
  178. lfx/io/schema.py +6 -0
  179. lfx/log/logger.py +5 -3
  180. lfx/schema/schema.py +5 -0
  181. lfx/services/database/__init__.py +5 -0
  182. lfx/services/database/service.py +25 -0
  183. lfx/services/deps.py +87 -22
  184. lfx/services/manager.py +19 -6
  185. lfx/services/mcp_composer/service.py +998 -157
  186. lfx/services/session.py +5 -0
  187. lfx/services/settings/base.py +51 -7
  188. lfx/services/settings/constants.py +8 -0
  189. lfx/services/storage/local.py +76 -46
  190. lfx/services/storage/service.py +152 -29
  191. lfx/template/field/base.py +3 -0
  192. lfx/utils/ssrf_protection.py +384 -0
  193. lfx/utils/validate_cloud.py +26 -0
  194. {lfx_nightly-0.1.13.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/METADATA +38 -22
  195. {lfx_nightly-0.1.13.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/RECORD +210 -196
  196. {lfx_nightly-0.1.13.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/WHEEL +1 -1
  197. lfx/components/agents/cuga_agent.py +0 -1013
  198. lfx/components/datastax/astra_db.py +0 -77
  199. lfx/components/datastax/cassandra.py +0 -92
  200. lfx/components/logic/run_flow.py +0 -71
  201. lfx/components/models/embedding_model.py +0 -114
  202. lfx/components/models/language_model.py +0 -144
  203. lfx/components/vectorstores/astradb_graph.py +0 -326
  204. lfx/components/vectorstores/cassandra.py +0 -264
  205. lfx/components/vectorstores/cassandra_graph.py +0 -238
  206. lfx/components/vectorstores/chroma.py +0 -167
  207. lfx/components/vectorstores/clickhouse.py +0 -135
  208. lfx/components/vectorstores/couchbase.py +0 -102
  209. lfx/components/vectorstores/elasticsearch.py +0 -267
  210. lfx/components/vectorstores/faiss.py +0 -111
  211. lfx/components/vectorstores/graph_rag.py +0 -141
  212. lfx/components/vectorstores/hcd.py +0 -314
  213. lfx/components/vectorstores/milvus.py +0 -115
  214. lfx/components/vectorstores/mongodb_atlas.py +0 -213
  215. lfx/components/vectorstores/opensearch.py +0 -243
  216. lfx/components/vectorstores/pgvector.py +0 -72
  217. lfx/components/vectorstores/pinecone.py +0 -134
  218. lfx/components/vectorstores/qdrant.py +0 -109
  219. lfx/components/vectorstores/supabase.py +0 -76
  220. lfx/components/vectorstores/upstash.py +0 -124
  221. lfx/components/vectorstores/vectara.py +0 -97
  222. lfx/components/vectorstores/vectara_rag.py +0 -164
  223. lfx/components/vectorstores/weaviate.py +0 -89
  224. /lfx/components/{data → data_source}/mock_data.py +0 -0
  225. /lfx/components/datastax/{astra_vectorize.py → astradb_vectorize.py} +0 -0
  226. /lfx/components/{logic → flow_controls}/data_conditional_router.py +0 -0
  227. /lfx/components/{logic → flow_controls}/flow_tool.py +0 -0
  228. /lfx/components/{logic → flow_controls}/listen.py +0 -0
  229. /lfx/components/{logic → flow_controls}/notify.py +0 -0
  230. /lfx/components/{logic → flow_controls}/pass_message.py +0 -0
  231. /lfx/components/{logic → flow_controls}/sub_flow.py +0 -0
  232. /lfx/components/{processing → models_and_agents}/prompt.py +0 -0
  233. /lfx/components/{helpers → processing}/create_list.py +0 -0
  234. /lfx/components/{helpers → processing}/output_parser.py +0 -0
  235. /lfx/components/{helpers → processing}/store_message.py +0 -0
  236. /lfx/components/{helpers → utilities}/id_generator.py +0 -0
  237. {lfx_nightly-0.1.13.dev0.dist-info → lfx_nightly-0.2.0.dev26.dist-info}/entry_points.txt +0 -0
@@ -1,4 +1,6 @@
1
1
  import asyncio
2
+ import json
3
+ from contextlib import suppress
2
4
  from typing import Any
3
5
  from urllib.parse import urljoin
4
6
 
@@ -8,11 +10,27 @@ from langchain_ollama import ChatOllama
8
10
  from lfx.base.models.model import LCModelComponent
9
11
  from lfx.field_typing import LanguageModel
10
12
  from lfx.field_typing.range_spec import RangeSpec
11
- from lfx.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SliderInput
13
+ from lfx.helpers.base_model import build_model_from_schema
14
+ from lfx.io import (
15
+ BoolInput,
16
+ DictInput,
17
+ DropdownInput,
18
+ FloatInput,
19
+ IntInput,
20
+ MessageTextInput,
21
+ Output,
22
+ SecretStrInput,
23
+ SliderInput,
24
+ TableInput,
25
+ )
12
26
  from lfx.log.logger import logger
27
+ from lfx.schema.data import Data
28
+ from lfx.schema.dataframe import DataFrame
29
+ from lfx.schema.table import EditMode
13
30
  from lfx.utils.util import transform_localhost_url
14
31
 
15
32
  HTTP_STATUS_OK = 200
33
+ TABLE_ROW_PLACEHOLDER = {"name": "field", "description": "description of field", "type": "str", "multiple": "False"}
16
34
 
17
35
 
18
36
  class ChatOllamaComponent(LCModelComponent):
@@ -28,11 +46,51 @@ class ChatOllamaComponent(LCModelComponent):
28
46
  DESIRED_CAPABILITY = "completion"
29
47
  TOOL_CALLING_CAPABILITY = "tools"
30
48
 
49
+ # Define the table schema for the format input
50
+ TABLE_SCHEMA = [
51
+ {
52
+ "name": "name",
53
+ "display_name": "Name",
54
+ "type": "str",
55
+ "description": "Specify the name of the output field.",
56
+ "default": "field",
57
+ "edit_mode": EditMode.INLINE,
58
+ },
59
+ {
60
+ "name": "description",
61
+ "display_name": "Description",
62
+ "type": "str",
63
+ "description": "Describe the purpose of the output field.",
64
+ "default": "description of field",
65
+ "edit_mode": EditMode.POPOVER,
66
+ },
67
+ {
68
+ "name": "type",
69
+ "display_name": "Type",
70
+ "type": "str",
71
+ "edit_mode": EditMode.INLINE,
72
+ "description": ("Indicate the data type of the output field (e.g., str, int, float, bool, dict)."),
73
+ "options": ["str", "int", "float", "bool", "dict"],
74
+ "default": "str",
75
+ },
76
+ {
77
+ "name": "multiple",
78
+ "display_name": "As List",
79
+ "type": "boolean",
80
+ "description": "Set to True if this output field should be a list of the specified type.",
81
+ "edit_mode": EditMode.INLINE,
82
+ "options": ["True", "False"],
83
+ "default": "False",
84
+ },
85
+ ]
86
+ default_table_row = {row["name"]: row.get("default", None) for row in TABLE_SCHEMA}
87
+ default_table_row_schema = build_model_from_schema([default_table_row]).model_json_schema()
88
+
31
89
  inputs = [
32
90
  MessageTextInput(
33
91
  name="base_url",
34
- display_name="Base URL",
35
- info="Endpoint of the Ollama API. Defaults to http://localhost:11434 .",
92
+ display_name="Ollama API URL",
93
+ info="Endpoint of the Ollama API. Defaults to http://localhost:11434.",
36
94
  value="http://localhost:11434",
37
95
  real_time_refresh=True,
38
96
  ),
@@ -43,6 +101,16 @@ class ChatOllamaComponent(LCModelComponent):
43
101
  info="Refer to https://ollama.com/library for more models.",
44
102
  refresh_button=True,
45
103
  real_time_refresh=True,
104
+ required=True,
105
+ ),
106
+ SecretStrInput(
107
+ name="api_key",
108
+ display_name="Ollama API Key",
109
+ info="Your Ollama API key.",
110
+ value=None,
111
+ required=False,
112
+ real_time_refresh=True,
113
+ advanced=True,
46
114
  ),
47
115
  SliderInput(
48
116
  name="temperature",
@@ -51,8 +119,13 @@ class ChatOllamaComponent(LCModelComponent):
51
119
  range_spec=RangeSpec(min=0, max=1, step=0.01),
52
120
  advanced=True,
53
121
  ),
54
- MessageTextInput(
55
- name="format", display_name="Format", info="Specify the format of the output (e.g., json).", advanced=True
122
+ TableInput(
123
+ name="format",
124
+ display_name="Format",
125
+ info="Specify the format of the output.",
126
+ table_schema=TABLE_SCHEMA,
127
+ value=default_table_row,
128
+ show=False,
56
129
  ),
57
130
  DictInput(name="metadata", display_name="Metadata", info="Metadata to add to the run trace.", advanced=True),
58
131
  DropdownInput(
@@ -112,7 +185,12 @@ class ChatOllamaComponent(LCModelComponent):
112
185
  name="top_k", display_name="Top K", info="Limits token selection to top K. (Default: 40)", advanced=True
113
186
  ),
114
187
  FloatInput(name="top_p", display_name="Top P", info="Works together with top-k. (Default: 0.9)", advanced=True),
115
- BoolInput(name="verbose", display_name="Verbose", info="Whether to print out response text.", advanced=True),
188
+ BoolInput(
189
+ name="enable_verbose_output",
190
+ display_name="Ollama Verbose Output",
191
+ info="Whether to print out response text.",
192
+ advanced=True,
193
+ ),
116
194
  MessageTextInput(
117
195
  name="tags",
118
196
  display_name="Tags",
@@ -138,18 +216,33 @@ class ChatOllamaComponent(LCModelComponent):
138
216
  MessageTextInput(
139
217
  name="template", display_name="Template", info="Template to use for generating text.", advanced=True
140
218
  ),
219
+ BoolInput(
220
+ name="enable_structured_output",
221
+ display_name="Enable Structured Output",
222
+ info="Whether to enable structured output in the model.",
223
+ value=False,
224
+ advanced=False,
225
+ real_time_refresh=True,
226
+ ),
141
227
  *LCModelComponent.get_base_inputs(),
142
228
  ]
143
229
 
230
+ outputs = [
231
+ Output(display_name="Text", name="text_output", method="text_response"),
232
+ Output(display_name="Language Model", name="model_output", method="build_model"),
233
+ Output(display_name="Data", name="data_output", method="build_data_output"),
234
+ Output(display_name="DataFrame", name="dataframe_output", method="build_dataframe_output"),
235
+ ]
236
+
144
237
  def build_model(self) -> LanguageModel: # type: ignore[type-var]
145
238
  # Mapping mirostat settings to their corresponding values
146
239
  mirostat_options = {"Mirostat": 1, "Mirostat 2.0": 2}
147
240
 
148
- # Default to 0 for 'Disabled'
149
- mirostat_value = mirostat_options.get(self.mirostat, 0)
241
+ # Default to None for 'Disabled'
242
+ mirostat_value = mirostat_options.get(self.mirostat, None)
150
243
 
151
244
  # Set mirostat_eta and mirostat_tau to None if mirostat is disabled
152
- if mirostat_value == 0:
245
+ if mirostat_value is None:
153
246
  mirostat_eta = None
154
247
  mirostat_tau = None
155
248
  else:
@@ -169,12 +262,18 @@ class ChatOllamaComponent(LCModelComponent):
169
262
  "Learn more at https://docs.ollama.com/openai#openai-compatibility"
170
263
  )
171
264
 
265
+ try:
266
+ output_format = self._parse_format_field(self.format) if self.enable_structured_output else None
267
+ except Exception as e:
268
+ msg = f"Failed to parse the format field: {e}"
269
+ raise ValueError(msg) from e
270
+
172
271
  # Mapping system settings to their corresponding values
173
272
  llm_params = {
174
273
  "base_url": transformed_base_url,
175
274
  "model": self.model_name,
176
275
  "mirostat": mirostat_value,
177
- "format": self.format,
276
+ "format": output_format or None,
178
277
  "metadata": self.metadata,
179
278
  "tags": self.tags.split(",") if self.tags else None,
180
279
  "mirostat_eta": mirostat_eta,
@@ -191,9 +290,12 @@ class ChatOllamaComponent(LCModelComponent):
191
290
  "timeout": self.timeout or None,
192
291
  "top_k": self.top_k or None,
193
292
  "top_p": self.top_p or None,
194
- "verbose": self.verbose,
293
+ "verbose": self.enable_verbose_output or False,
195
294
  "template": self.template,
196
295
  }
296
+ headers = self.headers
297
+ if headers is not None:
298
+ llm_params["client_kwargs"] = {"headers": headers}
197
299
 
198
300
  # Remove parameters with None values
199
301
  llm_params = {k: v for k, v in llm_params.items() if v is not None}
@@ -219,11 +321,16 @@ class ChatOllamaComponent(LCModelComponent):
219
321
  url = url.rstrip("/").removesuffix("/v1")
220
322
  if not url.endswith("/"):
221
323
  url = url + "/"
222
- return (await client.get(urljoin(url, "api/tags"))).status_code == HTTP_STATUS_OK
324
+ return (
325
+ await client.get(url=urljoin(url, "api/tags"), headers=self.headers)
326
+ ).status_code == HTTP_STATUS_OK
223
327
  except httpx.RequestError:
224
328
  return False
225
329
 
226
330
  async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):
331
+ if field_name == "enable_structured_output": # bind enable_structured_output boolean to format show value
332
+ build_config["format"]["show"] = field_value
333
+
227
334
  if field_name == "mirostat":
228
335
  if field_value == "Disabled":
229
336
  build_config["mirostat_eta"]["advanced"] = True
@@ -243,6 +350,8 @@ class ChatOllamaComponent(LCModelComponent):
243
350
  build_config["mirostat_tau"]["value"] = 5
244
351
 
245
352
  if field_name in {"model_name", "base_url", "tool_model_enabled"}:
353
+ logger.warning(f"Fetching Ollama models from updated URL: {build_config['base_url']}")
354
+
246
355
  if await self.is_valid_ollama_url(self.base_url):
247
356
  tool_model_enabled = build_config["tool_model_enabled"].get("value", False) or self.tool_model_enabled
248
357
  build_config["model_name"]["options"] = await self.get_models(
@@ -292,8 +401,9 @@ class ChatOllamaComponent(LCModelComponent):
292
401
  show_url = urljoin(base_url, "api/show")
293
402
 
294
403
  async with httpx.AsyncClient() as client:
404
+ headers = self.headers
295
405
  # Fetch available models
296
- tags_response = await client.get(tags_url)
406
+ tags_response = await client.get(url=tags_url, headers=headers)
297
407
  tags_response.raise_for_status()
298
408
  models = tags_response.json()
299
409
  if asyncio.iscoroutine(models):
@@ -307,11 +417,12 @@ class ChatOllamaComponent(LCModelComponent):
307
417
  await logger.adebug(f"Checking model: {model_name}")
308
418
 
309
419
  payload = {"model": model_name}
310
- show_response = await client.post(show_url, json=payload)
420
+ show_response = await client.post(url=show_url, json=payload, headers=headers)
311
421
  show_response.raise_for_status()
312
422
  json_data = show_response.json()
313
423
  if asyncio.iscoroutine(json_data):
314
424
  json_data = await json_data
425
+
315
426
  capabilities = json_data.get(self.JSON_CAPABILITIES_KEY, [])
316
427
  await logger.adebug(f"Model: {model_name}, Capabilities: {capabilities}")
317
428
 
@@ -325,3 +436,113 @@ class ChatOllamaComponent(LCModelComponent):
325
436
  raise ValueError(msg) from e
326
437
 
327
438
  return model_ids
439
+
440
+ def _parse_format_field(self, format_value: Any) -> Any:
441
+ """Parse the format field to handle both string and dict inputs.
442
+
443
+ The format field can be:
444
+ - A simple string like "json" (backward compatibility)
445
+ - A JSON string from NestedDictInput that needs parsing
446
+ - A dict/JSON schema (already parsed)
447
+ - None or empty
448
+
449
+ Args:
450
+ format_value: The raw format value from the input field
451
+
452
+ Returns:
453
+ Parsed format value as string, dict, or None
454
+ """
455
+ if not format_value:
456
+ return None
457
+
458
+ schema = format_value
459
+ if isinstance(format_value, list):
460
+ schema = build_model_from_schema(format_value).model_json_schema()
461
+ if schema == self.default_table_row_schema:
462
+ return None # the rows are generic placeholder rows
463
+ elif isinstance(format_value, str): # parse as json if string
464
+ with suppress(json.JSONDecodeError): # e.g., literal "json" is valid for format field
465
+ schema = json.loads(format_value)
466
+
467
+ return schema or None
468
+
469
+ async def _parse_json_response(self) -> Any:
470
+ """Parse the JSON response from the model.
471
+
472
+ This method gets the text response and attempts to parse it as JSON.
473
+ Works with models that have format='json' or a JSON schema set.
474
+
475
+ Returns:
476
+ Parsed JSON (dict, list, or primitive type)
477
+
478
+ Raises:
479
+ ValueError: If the response is not valid JSON
480
+ """
481
+ message = await self.text_response()
482
+ text = message.text if hasattr(message, "text") else str(message)
483
+
484
+ if not text:
485
+ msg = "No response from model"
486
+ raise ValueError(msg)
487
+
488
+ try:
489
+ return json.loads(text)
490
+ except json.JSONDecodeError as e:
491
+ msg = f"Invalid JSON response. Ensure model supports JSON output. Error: {e}"
492
+ raise ValueError(msg) from e
493
+
494
+ async def build_data_output(self) -> Data:
495
+ """Build a Data output from the model's JSON response.
496
+
497
+ Returns:
498
+ Data: A Data object containing the parsed JSON response
499
+ """
500
+ parsed = await self._parse_json_response()
501
+
502
+ # If the response is already a dict, wrap it in Data
503
+ if isinstance(parsed, dict):
504
+ return Data(data=parsed)
505
+
506
+ # If it's a list, wrap in a results container
507
+ if isinstance(parsed, list):
508
+ if len(parsed) == 1:
509
+ return Data(data=parsed[0])
510
+ return Data(data={"results": parsed})
511
+
512
+ # For primitive types, wrap in a value container
513
+ return Data(data={"value": parsed})
514
+
515
+ async def build_dataframe_output(self) -> DataFrame:
516
+ """Build a DataFrame output from the model's JSON response.
517
+
518
+ Returns:
519
+ DataFrame: A DataFrame containing the parsed JSON response
520
+
521
+ Raises:
522
+ ValueError: If the response cannot be converted to a DataFrame
523
+ """
524
+ parsed = await self._parse_json_response()
525
+
526
+ # If it's a list of dicts, convert directly to DataFrame
527
+ if isinstance(parsed, list):
528
+ if not parsed:
529
+ return DataFrame()
530
+ # Ensure all items are dicts for proper DataFrame conversion
531
+ if all(isinstance(item, dict) for item in parsed):
532
+ return DataFrame(parsed)
533
+ msg = "List items must be dictionaries to convert to DataFrame"
534
+ raise ValueError(msg)
535
+
536
+ # If it's a single dict, wrap in a list to create a single-row DataFrame
537
+ if isinstance(parsed, dict):
538
+ return DataFrame([parsed])
539
+
540
+ # For primitive types, create a single-column DataFrame
541
+ return DataFrame([{"value": parsed}])
542
+
543
+ @property
544
+ def headers(self) -> dict[str, str] | None:
545
+ """Get the headers for the Ollama API."""
546
+ if self.api_key and self.api_key.strip():
547
+ return {"Authorization": f"Bearer {self.api_key}"}
548
+ return None
@@ -1,6 +1,3 @@
1
- from collections import defaultdict
2
- from typing import Any
3
-
4
1
  import httpx
5
2
  from langchain_openai import ChatOpenAI
6
3
  from pydantic.v1 import SecretStr
@@ -8,13 +5,7 @@ from pydantic.v1 import SecretStr
8
5
  from lfx.base.models.model import LCModelComponent
9
6
  from lfx.field_typing import LanguageModel
10
7
  from lfx.field_typing.range_spec import RangeSpec
11
- from lfx.inputs.inputs import (
12
- DropdownInput,
13
- IntInput,
14
- SecretStrInput,
15
- SliderInput,
16
- StrInput,
17
- )
8
+ from lfx.inputs.inputs import DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput
18
9
 
19
10
 
20
11
  class OpenRouterComponent(LCModelComponent):
@@ -28,36 +19,13 @@ class OpenRouterComponent(LCModelComponent):
28
19
 
29
20
  inputs = [
30
21
  *LCModelComponent.get_base_inputs(),
31
- SecretStrInput(
32
- name="api_key", display_name="OpenRouter API Key", required=True, info="Your OpenRouter API key"
33
- ),
34
- StrInput(
35
- name="site_url",
36
- display_name="Site URL",
37
- info="Your site URL for OpenRouter rankings",
38
- advanced=True,
39
- ),
40
- StrInput(
41
- name="app_name",
42
- display_name="App Name",
43
- info="Your app name for OpenRouter rankings",
44
- advanced=True,
45
- ),
46
- DropdownInput(
47
- name="provider",
48
- display_name="Provider",
49
- info="The AI model provider",
50
- options=["Loading providers..."],
51
- value="Loading providers...",
52
- real_time_refresh=True,
53
- required=True,
54
- ),
22
+ SecretStrInput(name="api_key", display_name="API Key", required=True),
55
23
  DropdownInput(
56
24
  name="model_name",
57
25
  display_name="Model",
58
- info="The model to use for chat completion",
59
- options=["Select a provider first"],
60
- value="Select a provider first",
26
+ options=[],
27
+ value="",
28
+ refresh_button=True,
61
29
  real_time_refresh=True,
62
30
  required=True,
63
31
  ),
@@ -66,137 +34,71 @@ class OpenRouterComponent(LCModelComponent):
66
34
  display_name="Temperature",
67
35
  value=0.7,
68
36
  range_spec=RangeSpec(min=0, max=2, step=0.01),
69
- info="Controls randomness. Lower values are more deterministic, higher values are more creative.",
70
- advanced=True,
71
- ),
72
- IntInput(
73
- name="max_tokens",
74
- display_name="Max Tokens",
75
- info="Maximum number of tokens to generate",
76
37
  advanced=True,
77
38
  ),
39
+ IntInput(name="max_tokens", display_name="Max Tokens", advanced=True),
40
+ StrInput(name="site_url", display_name="Site URL", advanced=True),
41
+ StrInput(name="app_name", display_name="App Name", advanced=True),
78
42
  ]
79
43
 
80
- def fetch_models(self) -> dict[str, list]:
81
- """Fetch available models from OpenRouter API and organize them by provider."""
82
- url = "https://openrouter.ai/api/v1/models"
83
-
44
+ def fetch_models(self) -> list[dict]:
45
+ """Fetch available models from OpenRouter."""
84
46
  try:
85
- with httpx.Client() as client:
86
- response = client.get(url)
87
- response.raise_for_status()
88
-
89
- models_data = response.json().get("data", [])
90
- provider_models = defaultdict(list)
91
-
92
- for model in models_data:
93
- model_id = model.get("id", "")
94
- if "/" in model_id:
95
- provider = model_id.split("/")[0].title()
96
- provider_models[provider].append(
97
- {
98
- "id": model_id,
99
- "name": model.get("name", ""),
100
- "description": model.get("description", ""),
101
- "context_length": model.get("context_length", 0),
102
- }
103
- )
104
-
105
- return dict(provider_models)
106
-
107
- except httpx.HTTPError as e:
108
- self.log(f"Error fetching models: {e!s}")
109
- return {"Error": [{"id": "error", "name": f"Error fetching models: {e!s}"}]}
47
+ response = httpx.get("https://openrouter.ai/api/v1/models", timeout=10.0)
48
+ response.raise_for_status()
49
+ models = response.json().get("data", [])
50
+ return sorted(
51
+ [
52
+ {
53
+ "id": m["id"],
54
+ "name": m.get("name", m["id"]),
55
+ "context": m.get("context_length", 0),
56
+ }
57
+ for m in models
58
+ if m.get("id")
59
+ ],
60
+ key=lambda x: x["name"],
61
+ )
62
+ except (httpx.RequestError, httpx.HTTPStatusError) as e:
63
+ self.log(f"Error fetching models: {e}")
64
+ return []
65
+
66
+ def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict: # noqa: ARG002
67
+ """Update model options."""
68
+ models = self.fetch_models()
69
+ if models:
70
+ build_config["model_name"]["options"] = [m["id"] for m in models]
71
+ build_config["model_name"]["tooltips"] = {m["id"]: f"{m['name']} ({m['context']:,} tokens)" for m in models}
72
+ else:
73
+ build_config["model_name"]["options"] = ["Failed to load models"]
74
+ build_config["model_name"]["value"] = "Failed to load models"
75
+ return build_config
110
76
 
111
77
  def build_model(self) -> LanguageModel:
112
- """Build and return the OpenRouter language model."""
113
- model_not_selected = "Please select a model"
114
- api_key_required = "API key is required"
115
-
116
- if not self.model_name or self.model_name == "Select a provider first":
117
- raise ValueError(model_not_selected)
118
-
78
+ """Build the OpenRouter model."""
119
79
  if not self.api_key:
120
- raise ValueError(api_key_required)
121
-
122
- api_key = SecretStr(self.api_key).get_secret_value()
80
+ msg = "API key is required"
81
+ raise ValueError(msg)
82
+ if not self.model_name or self.model_name == "Loading...":
83
+ msg = "Please select a model"
84
+ raise ValueError(msg)
123
85
 
124
- # Build base configuration
125
- kwargs: dict[str, Any] = {
86
+ kwargs = {
126
87
  "model": self.model_name,
127
- "openai_api_key": api_key,
88
+ "openai_api_key": SecretStr(self.api_key).get_secret_value(),
128
89
  "openai_api_base": "https://openrouter.ai/api/v1",
129
90
  "temperature": self.temperature if self.temperature is not None else 0.7,
130
91
  }
131
92
 
132
- # Add optional parameters
133
93
  if self.max_tokens:
134
- kwargs["max_tokens"] = self.max_tokens
94
+ kwargs["max_tokens"] = int(self.max_tokens)
135
95
 
136
96
  headers = {}
137
97
  if self.site_url:
138
98
  headers["HTTP-Referer"] = self.site_url
139
99
  if self.app_name:
140
100
  headers["X-Title"] = self.app_name
141
-
142
101
  if headers:
143
102
  kwargs["default_headers"] = headers
144
103
 
145
- try:
146
- return ChatOpenAI(**kwargs)
147
- except (ValueError, httpx.HTTPError) as err:
148
- error_msg = f"Failed to build model: {err!s}"
149
- self.log(error_msg)
150
- raise ValueError(error_msg) from err
151
-
152
- def _get_exception_message(self, e: Exception) -> str | None:
153
- """Get a message from an OpenRouter exception.
154
-
155
- Args:
156
- e (Exception): The exception to get the message from.
157
-
158
- Returns:
159
- str | None: The message from the exception, or None if no specific message can be extracted.
160
- """
161
- try:
162
- from openai import BadRequestError
163
-
164
- if isinstance(e, BadRequestError):
165
- message = e.body.get("message")
166
- if message:
167
- return message
168
- except ImportError:
169
- pass
170
- return None
171
-
172
- def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:
173
- """Update build configuration based on field updates."""
174
- try:
175
- if field_name is None or field_name == "provider":
176
- provider_models = self.fetch_models()
177
- build_config["provider"]["options"] = sorted(provider_models.keys())
178
- if build_config["provider"]["value"] not in provider_models:
179
- build_config["provider"]["value"] = build_config["provider"]["options"][0]
180
-
181
- if field_name == "provider" and field_value in self.fetch_models():
182
- provider_models = self.fetch_models()
183
- models = provider_models[field_value]
184
-
185
- build_config["model_name"]["options"] = [model["id"] for model in models]
186
- if models:
187
- build_config["model_name"]["value"] = models[0]["id"]
188
-
189
- tooltips = {
190
- model["id"]: (f"{model['name']}\nContext Length: {model['context_length']}\n{model['description']}")
191
- for model in models
192
- }
193
- build_config["model_name"]["tooltips"] = tooltips
194
-
195
- except httpx.HTTPError as e:
196
- self.log(f"Error updating build config: {e!s}")
197
- build_config["provider"]["options"] = ["Error loading providers"]
198
- build_config["provider"]["value"] = "Error loading providers"
199
- build_config["model_name"]["options"] = ["Error loading models"]
200
- build_config["model_name"]["value"] = "Error loading models"
201
-
202
- return build_config
104
+ return ChatOpenAI(**kwargs)