lfx-nightly 0.2.0.dev0__py3-none-any.whl → 0.2.0.dev41__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lfx/_assets/component_index.json +1 -1
- lfx/base/agents/agent.py +21 -4
- lfx/base/agents/altk_base_agent.py +393 -0
- lfx/base/agents/altk_tool_wrappers.py +565 -0
- lfx/base/agents/events.py +2 -1
- lfx/base/composio/composio_base.py +159 -224
- lfx/base/data/base_file.py +97 -20
- lfx/base/data/docling_utils.py +61 -10
- lfx/base/data/storage_utils.py +301 -0
- lfx/base/data/utils.py +178 -14
- lfx/base/mcp/util.py +2 -2
- lfx/base/models/anthropic_constants.py +21 -12
- lfx/base/models/groq_constants.py +74 -58
- lfx/base/models/groq_model_discovery.py +265 -0
- lfx/base/models/model.py +1 -1
- lfx/base/models/model_utils.py +100 -0
- lfx/base/models/openai_constants.py +7 -0
- lfx/base/models/watsonx_constants.py +32 -8
- lfx/base/tools/run_flow.py +601 -129
- lfx/cli/commands.py +9 -4
- lfx/cli/common.py +2 -2
- lfx/cli/run.py +1 -1
- lfx/cli/script_loader.py +53 -11
- lfx/components/Notion/create_page.py +1 -1
- lfx/components/Notion/list_database_properties.py +1 -1
- lfx/components/Notion/list_pages.py +1 -1
- lfx/components/Notion/list_users.py +1 -1
- lfx/components/Notion/page_content_viewer.py +1 -1
- lfx/components/Notion/search.py +1 -1
- lfx/components/Notion/update_page_property.py +1 -1
- lfx/components/__init__.py +19 -5
- lfx/components/{agents → altk}/__init__.py +5 -9
- lfx/components/altk/altk_agent.py +193 -0
- lfx/components/apify/apify_actor.py +1 -1
- lfx/components/composio/__init__.py +70 -18
- lfx/components/composio/apollo_composio.py +11 -0
- lfx/components/composio/bitbucket_composio.py +11 -0
- lfx/components/composio/canva_composio.py +11 -0
- lfx/components/composio/coda_composio.py +11 -0
- lfx/components/composio/composio_api.py +10 -0
- lfx/components/composio/discord_composio.py +1 -1
- lfx/components/composio/elevenlabs_composio.py +11 -0
- lfx/components/composio/exa_composio.py +11 -0
- lfx/components/composio/firecrawl_composio.py +11 -0
- lfx/components/composio/fireflies_composio.py +11 -0
- lfx/components/composio/gmail_composio.py +1 -1
- lfx/components/composio/googlebigquery_composio.py +11 -0
- lfx/components/composio/googlecalendar_composio.py +1 -1
- lfx/components/composio/googledocs_composio.py +1 -1
- lfx/components/composio/googlemeet_composio.py +1 -1
- lfx/components/composio/googlesheets_composio.py +1 -1
- lfx/components/composio/googletasks_composio.py +1 -1
- lfx/components/composio/heygen_composio.py +11 -0
- lfx/components/composio/mem0_composio.py +11 -0
- lfx/components/composio/peopledatalabs_composio.py +11 -0
- lfx/components/composio/perplexityai_composio.py +11 -0
- lfx/components/composio/serpapi_composio.py +11 -0
- lfx/components/composio/slack_composio.py +3 -574
- lfx/components/composio/slackbot_composio.py +1 -1
- lfx/components/composio/snowflake_composio.py +11 -0
- lfx/components/composio/tavily_composio.py +11 -0
- lfx/components/composio/youtube_composio.py +2 -2
- lfx/components/cuga/__init__.py +34 -0
- lfx/components/cuga/cuga_agent.py +730 -0
- lfx/components/data/__init__.py +78 -28
- lfx/components/data_source/__init__.py +58 -0
- lfx/components/{data → data_source}/api_request.py +26 -3
- lfx/components/{data → data_source}/csv_to_data.py +15 -10
- lfx/components/{data → data_source}/json_to_data.py +15 -8
- lfx/components/{data → data_source}/news_search.py +1 -1
- lfx/components/{data → data_source}/rss.py +1 -1
- lfx/components/{data → data_source}/sql_executor.py +1 -1
- lfx/components/{data → data_source}/url.py +1 -1
- lfx/components/{data → data_source}/web_search.py +1 -1
- lfx/components/datastax/astradb_cql.py +1 -1
- lfx/components/datastax/astradb_graph.py +1 -1
- lfx/components/datastax/astradb_tool.py +1 -1
- lfx/components/datastax/astradb_vectorstore.py +1 -1
- lfx/components/datastax/hcd.py +1 -1
- lfx/components/deactivated/json_document_builder.py +1 -1
- lfx/components/docling/__init__.py +0 -3
- lfx/components/docling/chunk_docling_document.py +3 -1
- lfx/components/docling/export_docling_document.py +3 -1
- lfx/components/elastic/elasticsearch.py +1 -1
- lfx/components/files_and_knowledge/__init__.py +47 -0
- lfx/components/{data → files_and_knowledge}/directory.py +1 -1
- lfx/components/{data → files_and_knowledge}/file.py +304 -24
- lfx/components/{knowledge_bases → files_and_knowledge}/retrieval.py +2 -2
- lfx/components/{data → files_and_knowledge}/save_file.py +218 -31
- lfx/components/flow_controls/__init__.py +58 -0
- lfx/components/{logic → flow_controls}/conditional_router.py +1 -1
- lfx/components/{logic → flow_controls}/loop.py +43 -9
- lfx/components/flow_controls/run_flow.py +108 -0
- lfx/components/glean/glean_search_api.py +1 -1
- lfx/components/groq/groq.py +35 -28
- lfx/components/helpers/__init__.py +102 -0
- lfx/components/ibm/watsonx.py +7 -1
- lfx/components/input_output/__init__.py +3 -1
- lfx/components/input_output/chat.py +4 -3
- lfx/components/input_output/chat_output.py +10 -4
- lfx/components/input_output/text.py +1 -1
- lfx/components/input_output/text_output.py +1 -1
- lfx/components/{data → input_output}/webhook.py +1 -1
- lfx/components/knowledge_bases/__init__.py +59 -4
- lfx/components/langchain_utilities/character.py +1 -1
- lfx/components/langchain_utilities/csv_agent.py +84 -16
- lfx/components/langchain_utilities/json_agent.py +67 -12
- lfx/components/langchain_utilities/language_recursive.py +1 -1
- lfx/components/llm_operations/__init__.py +46 -0
- lfx/components/{processing → llm_operations}/batch_run.py +17 -8
- lfx/components/{processing → llm_operations}/lambda_filter.py +1 -1
- lfx/components/{logic → llm_operations}/llm_conditional_router.py +1 -1
- lfx/components/{processing/llm_router.py → llm_operations/llm_selector.py} +3 -3
- lfx/components/{processing → llm_operations}/structured_output.py +1 -1
- lfx/components/logic/__init__.py +126 -0
- lfx/components/mem0/mem0_chat_memory.py +11 -0
- lfx/components/models/__init__.py +64 -9
- lfx/components/models_and_agents/__init__.py +49 -0
- lfx/components/{agents → models_and_agents}/agent.py +6 -4
- lfx/components/models_and_agents/embedding_model.py +353 -0
- lfx/components/models_and_agents/language_model.py +398 -0
- lfx/components/{agents → models_and_agents}/mcp_component.py +53 -44
- lfx/components/{helpers → models_and_agents}/memory.py +1 -1
- lfx/components/nvidia/system_assist.py +1 -1
- lfx/components/olivya/olivya.py +1 -1
- lfx/components/ollama/ollama.py +24 -5
- lfx/components/processing/__init__.py +9 -60
- lfx/components/processing/converter.py +1 -1
- lfx/components/processing/dataframe_operations.py +1 -1
- lfx/components/processing/parse_json_data.py +2 -2
- lfx/components/processing/parser.py +1 -1
- lfx/components/processing/split_text.py +1 -1
- lfx/components/qdrant/qdrant.py +1 -1
- lfx/components/redis/redis.py +1 -1
- lfx/components/twelvelabs/split_video.py +10 -0
- lfx/components/twelvelabs/video_file.py +12 -0
- lfx/components/utilities/__init__.py +43 -0
- lfx/components/{helpers → utilities}/calculator_core.py +1 -1
- lfx/components/{helpers → utilities}/current_date.py +1 -1
- lfx/components/{processing → utilities}/python_repl_core.py +1 -1
- lfx/components/vectorstores/local_db.py +9 -0
- lfx/components/youtube/youtube_transcripts.py +118 -30
- lfx/custom/custom_component/component.py +57 -1
- lfx/custom/custom_component/custom_component.py +68 -6
- lfx/custom/directory_reader/directory_reader.py +5 -2
- lfx/graph/edge/base.py +43 -20
- lfx/graph/state/model.py +15 -2
- lfx/graph/utils.py +6 -0
- lfx/graph/vertex/param_handler.py +10 -7
- lfx/helpers/__init__.py +12 -0
- lfx/helpers/flow.py +117 -0
- lfx/inputs/input_mixin.py +24 -1
- lfx/inputs/inputs.py +13 -1
- lfx/interface/components.py +161 -83
- lfx/log/logger.py +5 -3
- lfx/schema/image.py +2 -12
- lfx/services/database/__init__.py +5 -0
- lfx/services/database/service.py +25 -0
- lfx/services/deps.py +87 -22
- lfx/services/interfaces.py +5 -0
- lfx/services/manager.py +24 -10
- lfx/services/mcp_composer/service.py +1029 -162
- lfx/services/session.py +5 -0
- lfx/services/settings/auth.py +18 -11
- lfx/services/settings/base.py +56 -30
- lfx/services/settings/constants.py +8 -0
- lfx/services/storage/local.py +108 -46
- lfx/services/storage/service.py +171 -29
- lfx/template/field/base.py +3 -0
- lfx/utils/image.py +29 -11
- lfx/utils/ssrf_protection.py +384 -0
- lfx/utils/validate_cloud.py +26 -0
- {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/METADATA +38 -22
- {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/RECORD +189 -160
- {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/WHEEL +1 -1
- lfx/components/agents/altk_agent.py +0 -366
- lfx/components/agents/cuga_agent.py +0 -1013
- lfx/components/docling/docling_remote_vlm.py +0 -284
- lfx/components/logic/run_flow.py +0 -71
- lfx/components/models/embedding_model.py +0 -195
- lfx/components/models/language_model.py +0 -144
- lfx/components/processing/dataframe_to_toolset.py +0 -259
- /lfx/components/{data → data_source}/mock_data.py +0 -0
- /lfx/components/{knowledge_bases → files_and_knowledge}/ingestion.py +0 -0
- /lfx/components/{logic → flow_controls}/data_conditional_router.py +0 -0
- /lfx/components/{logic → flow_controls}/flow_tool.py +0 -0
- /lfx/components/{logic → flow_controls}/listen.py +0 -0
- /lfx/components/{logic → flow_controls}/notify.py +0 -0
- /lfx/components/{logic → flow_controls}/pass_message.py +0 -0
- /lfx/components/{logic → flow_controls}/sub_flow.py +0 -0
- /lfx/components/{processing → models_and_agents}/prompt.py +0 -0
- /lfx/components/{helpers → processing}/create_list.py +0 -0
- /lfx/components/{helpers → processing}/output_parser.py +0 -0
- /lfx/components/{helpers → processing}/store_message.py +0 -0
- /lfx/components/{helpers → utilities}/id_generator.py +0 -0
- {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/entry_points.txt +0 -0
|
@@ -1,284 +0,0 @@
|
|
|
1
|
-
from typing import Any
|
|
2
|
-
|
|
3
|
-
import requests
|
|
4
|
-
from docling.datamodel.base_models import ConversionStatus, InputFormat
|
|
5
|
-
from docling.datamodel.pipeline_options import (
|
|
6
|
-
ApiVlmOptions,
|
|
7
|
-
ResponseFormat,
|
|
8
|
-
VlmPipelineOptions,
|
|
9
|
-
)
|
|
10
|
-
from docling.document_converter import DocumentConverter, PdfFormatOption
|
|
11
|
-
from docling.pipeline.vlm_pipeline import VlmPipeline
|
|
12
|
-
from langflow.base.data import BaseFileComponent
|
|
13
|
-
from langflow.inputs import DropdownInput, SecretStrInput, StrInput
|
|
14
|
-
from langflow.schema import Data
|
|
15
|
-
from langflow.schema.dotdict import dotdict
|
|
16
|
-
|
|
17
|
-
from lfx.components.ibm.watsonx import WatsonxAIComponent
|
|
18
|
-
from lfx.log.logger import logger
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
class DoclingRemoteVLMComponent(BaseFileComponent):
|
|
22
|
-
display_name = "Docling Remote VLM"
|
|
23
|
-
description = (
|
|
24
|
-
"Uses Docling to process input documents running a VLM pipeline with a remote model"
|
|
25
|
-
"(OpenAI-compatible API or IBM Cloud)."
|
|
26
|
-
)
|
|
27
|
-
documentation = "https://docling-project.github.io/docling/examples/vlm_pipeline_api_model/"
|
|
28
|
-
trace_type = "tool"
|
|
29
|
-
icon = "Docling"
|
|
30
|
-
name = "DoclingRemoteVLM"
|
|
31
|
-
|
|
32
|
-
# https://docling-project.github.io/docling/usage/supported_formats/
|
|
33
|
-
VALID_EXTENSIONS = [
|
|
34
|
-
"adoc",
|
|
35
|
-
"asciidoc",
|
|
36
|
-
"asc",
|
|
37
|
-
"bmp",
|
|
38
|
-
"csv",
|
|
39
|
-
"dotx",
|
|
40
|
-
"dotm",
|
|
41
|
-
"docm",
|
|
42
|
-
"docx",
|
|
43
|
-
"htm",
|
|
44
|
-
"html",
|
|
45
|
-
"jpeg",
|
|
46
|
-
"json",
|
|
47
|
-
"md",
|
|
48
|
-
"pdf",
|
|
49
|
-
"png",
|
|
50
|
-
"potx",
|
|
51
|
-
"ppsx",
|
|
52
|
-
"pptm",
|
|
53
|
-
"potm",
|
|
54
|
-
"ppsm",
|
|
55
|
-
"pptx",
|
|
56
|
-
"tiff",
|
|
57
|
-
"txt",
|
|
58
|
-
"xls",
|
|
59
|
-
"xlsx",
|
|
60
|
-
"xhtml",
|
|
61
|
-
"xml",
|
|
62
|
-
"webp",
|
|
63
|
-
]
|
|
64
|
-
|
|
65
|
-
inputs = [
|
|
66
|
-
*BaseFileComponent.get_base_inputs(),
|
|
67
|
-
DropdownInput(
|
|
68
|
-
name="provider",
|
|
69
|
-
display_name="Provider",
|
|
70
|
-
info="Select which remote VLM provider to use.",
|
|
71
|
-
options=["IBM Cloud", "OpenAI-Compatible"],
|
|
72
|
-
value="IBM Cloud",
|
|
73
|
-
real_time_refresh=True,
|
|
74
|
-
),
|
|
75
|
-
# IBM Cloud inputs
|
|
76
|
-
SecretStrInput(
|
|
77
|
-
name="watsonx_api_key",
|
|
78
|
-
display_name="Watsonx API Key",
|
|
79
|
-
info="IBM Cloud API key used for authentication (leave blank to load from .env).",
|
|
80
|
-
required=False,
|
|
81
|
-
),
|
|
82
|
-
StrInput(
|
|
83
|
-
name="watsonx_project_id",
|
|
84
|
-
display_name="Watsonx Project ID",
|
|
85
|
-
required=False,
|
|
86
|
-
info="The Watsonx project ID or deployment space ID associated with the model.",
|
|
87
|
-
value="",
|
|
88
|
-
),
|
|
89
|
-
DropdownInput(
|
|
90
|
-
name="url",
|
|
91
|
-
display_name="Watsonx API Endpoint",
|
|
92
|
-
info="The base URL of the Watsonx API.",
|
|
93
|
-
options=[
|
|
94
|
-
"https://us-south.ml.cloud.ibm.com",
|
|
95
|
-
"https://eu-de.ml.cloud.ibm.com",
|
|
96
|
-
"https://eu-gb.ml.cloud.ibm.com",
|
|
97
|
-
"https://au-syd.ml.cloud.ibm.com",
|
|
98
|
-
"https://jp-tok.ml.cloud.ibm.com",
|
|
99
|
-
"https://ca-tor.ml.cloud.ibm.com",
|
|
100
|
-
],
|
|
101
|
-
real_time_refresh=True,
|
|
102
|
-
),
|
|
103
|
-
DropdownInput(
|
|
104
|
-
name="model_name",
|
|
105
|
-
display_name="Model Name",
|
|
106
|
-
options=[],
|
|
107
|
-
value=None,
|
|
108
|
-
dynamic=True,
|
|
109
|
-
required=False,
|
|
110
|
-
),
|
|
111
|
-
# OpenAI inputs
|
|
112
|
-
StrInput(
|
|
113
|
-
name="openai_base_url",
|
|
114
|
-
display_name="OpenAI-Compatible API Base URL",
|
|
115
|
-
info="Example: https://openrouter.ai/api/",
|
|
116
|
-
required=False,
|
|
117
|
-
show=False,
|
|
118
|
-
),
|
|
119
|
-
SecretStrInput(
|
|
120
|
-
name="openai_api_key",
|
|
121
|
-
display_name="API Key",
|
|
122
|
-
info="API key for OpenAI-compatible endpoints (leave blank if not required).",
|
|
123
|
-
required=False,
|
|
124
|
-
show=False,
|
|
125
|
-
),
|
|
126
|
-
StrInput(
|
|
127
|
-
name="openai_model",
|
|
128
|
-
display_name="OpenAI Model Name",
|
|
129
|
-
info="Model ID for OpenAI-compatible provider (e.g. gpt-4o-mini).",
|
|
130
|
-
required=False,
|
|
131
|
-
show=False,
|
|
132
|
-
),
|
|
133
|
-
StrInput(name="vlm_prompt", display_name="Prompt", info="Prompt for VLM.", required=False),
|
|
134
|
-
]
|
|
135
|
-
|
|
136
|
-
outputs = [*BaseFileComponent.get_base_outputs()]
|
|
137
|
-
|
|
138
|
-
@staticmethod
|
|
139
|
-
def fetch_models(base_url: str) -> list[str]:
|
|
140
|
-
"""Fetch available models from the Watsonx.ai API."""
|
|
141
|
-
try:
|
|
142
|
-
endpoint = f"{base_url}/ml/v1/foundation_model_specs"
|
|
143
|
-
params = {"version": "2024-09-16", "filters": "function_text_chat,!lifecycle_withdrawn"}
|
|
144
|
-
response = requests.get(endpoint, params=params, timeout=10)
|
|
145
|
-
response.raise_for_status()
|
|
146
|
-
data = response.json()
|
|
147
|
-
models = [model["model_id"] for model in data.get("resources", [])]
|
|
148
|
-
return sorted(models)
|
|
149
|
-
except (requests.RequestException, requests.HTTPError, requests.Timeout, ConnectionError, ValueError):
|
|
150
|
-
logger.exception("Error fetching models. Using default models.")
|
|
151
|
-
return WatsonxAIComponent._default_models # noqa: SLF001
|
|
152
|
-
|
|
153
|
-
def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):
|
|
154
|
-
"""Update shown fields based on chosen provider."""
|
|
155
|
-
logger.info(f"update_build_config called: field_name={field_name}, field_value={field_value}")
|
|
156
|
-
|
|
157
|
-
if field_name == "provider":
|
|
158
|
-
provider_choice = field_value
|
|
159
|
-
|
|
160
|
-
if provider_choice == "IBM Cloud":
|
|
161
|
-
build_config.model_name.show = True
|
|
162
|
-
build_config.watsonx_api_key.show = True
|
|
163
|
-
build_config.watsonx_project_id.show = True
|
|
164
|
-
build_config.url.show = True
|
|
165
|
-
|
|
166
|
-
build_config.openai_base_url.show = False
|
|
167
|
-
build_config.openai_api_key.show = False
|
|
168
|
-
build_config.openai_model.show = False
|
|
169
|
-
|
|
170
|
-
elif provider_choice == "OpenAI-Compatible":
|
|
171
|
-
build_config.model_name.show = False
|
|
172
|
-
build_config.watsonx_api_key.show = False
|
|
173
|
-
build_config.watsonx_project_id.show = False
|
|
174
|
-
build_config.url.show = False
|
|
175
|
-
|
|
176
|
-
build_config.openai_base_url.show = True
|
|
177
|
-
build_config.openai_api_key.show = True
|
|
178
|
-
build_config.openai_model.show = True
|
|
179
|
-
|
|
180
|
-
if field_name == "url":
|
|
181
|
-
provider_value = build_config.provider.value if hasattr(build_config, "provider") else None
|
|
182
|
-
if provider_value == "IBM Cloud" and field_value:
|
|
183
|
-
models = self.fetch_models(base_url=field_value)
|
|
184
|
-
build_config.model_name.options = models
|
|
185
|
-
if models:
|
|
186
|
-
build_config.model_name.value = models[0]
|
|
187
|
-
logger.info(f"Updated Watsonx model list: {len(models)} models found.")
|
|
188
|
-
|
|
189
|
-
def watsonx_vlm_options(self, model: str, prompt: str):
|
|
190
|
-
"""Creates Docling ApiVlmOptions for a watsonx VLM."""
|
|
191
|
-
api_key = getattr(self, "watsonx_api_key", "")
|
|
192
|
-
project_id = getattr(self, "watsonx_project_id", "")
|
|
193
|
-
base_url = getattr(self, "url", "https://us-south.ml.cloud.ibm.com")
|
|
194
|
-
|
|
195
|
-
def _get_iam_access_token(api_key: str) -> str:
|
|
196
|
-
res = requests.post(
|
|
197
|
-
url="https://iam.cloud.ibm.com/identity/token",
|
|
198
|
-
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
|
199
|
-
data=f"grant_type=urn:ibm:params:oauth:grant-type:apikey&apikey={api_key}",
|
|
200
|
-
timeout=90,
|
|
201
|
-
)
|
|
202
|
-
res.raise_for_status()
|
|
203
|
-
return res.json()["access_token"]
|
|
204
|
-
|
|
205
|
-
access_token = _get_iam_access_token(api_key)
|
|
206
|
-
return ApiVlmOptions(
|
|
207
|
-
url=f"{base_url}/ml/v1/text/chat?version=2023-05-29",
|
|
208
|
-
params={"model_id": model, "project_id": project_id, "parameters": {"max_new_tokens": 400}},
|
|
209
|
-
headers={"Authorization": f"Bearer {access_token}"},
|
|
210
|
-
prompt=prompt,
|
|
211
|
-
timeout=60,
|
|
212
|
-
response_format=ResponseFormat.MARKDOWN,
|
|
213
|
-
)
|
|
214
|
-
|
|
215
|
-
def openai_compatible_vlm_options(
|
|
216
|
-
self,
|
|
217
|
-
model: str,
|
|
218
|
-
prompt: str,
|
|
219
|
-
response_format: ResponseFormat,
|
|
220
|
-
url: str,
|
|
221
|
-
temperature: float = 0.7,
|
|
222
|
-
max_tokens: int = 4096,
|
|
223
|
-
api_key: str = "",
|
|
224
|
-
*,
|
|
225
|
-
skip_special_tokens: bool = False,
|
|
226
|
-
):
|
|
227
|
-
"""Create OpenAI-compatible Docling ApiVlmOptions options (e.g., LM Studio, vLLM, Ollama)."""
|
|
228
|
-
api_key = getattr(self, "openai_api_key", api_key)
|
|
229
|
-
model_override = getattr(self, "openai_model", model)
|
|
230
|
-
|
|
231
|
-
headers = {}
|
|
232
|
-
if api_key:
|
|
233
|
-
headers["Authorization"] = f"Bearer {api_key}"
|
|
234
|
-
|
|
235
|
-
return ApiVlmOptions(
|
|
236
|
-
url=f"{url}/v1/chat/completions",
|
|
237
|
-
params={"model": model_override, "max_tokens": max_tokens, "skip_special_tokens": skip_special_tokens},
|
|
238
|
-
headers=headers,
|
|
239
|
-
prompt=prompt,
|
|
240
|
-
timeout=90,
|
|
241
|
-
scale=2.0,
|
|
242
|
-
temperature=temperature,
|
|
243
|
-
response_format=response_format,
|
|
244
|
-
)
|
|
245
|
-
|
|
246
|
-
def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:
|
|
247
|
-
file_paths = [file.path for file in file_list if file.path]
|
|
248
|
-
if not file_paths:
|
|
249
|
-
logger.warning("No files to process.")
|
|
250
|
-
return file_list
|
|
251
|
-
|
|
252
|
-
provider = getattr(self, "provider", "IBM Cloud")
|
|
253
|
-
prompt = getattr(self, "vlm_prompt", "")
|
|
254
|
-
|
|
255
|
-
if provider == "IBM Cloud":
|
|
256
|
-
model = getattr(self, "model_name", "")
|
|
257
|
-
vlm_opts = self.watsonx_vlm_options(model=model, prompt=prompt)
|
|
258
|
-
else:
|
|
259
|
-
model = getattr(self, "openai_model", "") or getattr(self, "model_name", "")
|
|
260
|
-
base_url = getattr(self, "openai_base_url", "")
|
|
261
|
-
vlm_opts = self.openai_compatible_vlm_options(
|
|
262
|
-
model=model,
|
|
263
|
-
prompt=prompt,
|
|
264
|
-
response_format=ResponseFormat.MARKDOWN,
|
|
265
|
-
url=base_url,
|
|
266
|
-
)
|
|
267
|
-
|
|
268
|
-
pipeline_options = VlmPipelineOptions(enable_remote_services=True)
|
|
269
|
-
pipeline_options.vlm_options = vlm_opts
|
|
270
|
-
|
|
271
|
-
converter = DocumentConverter(
|
|
272
|
-
format_options={
|
|
273
|
-
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options, pipeline_cls=VlmPipeline)
|
|
274
|
-
}
|
|
275
|
-
)
|
|
276
|
-
|
|
277
|
-
results = converter.convert_all(file_paths)
|
|
278
|
-
processed_data = [
|
|
279
|
-
Data(data={"doc": res.document, "file_path": str(res.input.file)})
|
|
280
|
-
if res.status == ConversionStatus.SUCCESS
|
|
281
|
-
else None
|
|
282
|
-
for res in results
|
|
283
|
-
]
|
|
284
|
-
return self.rollup_data(file_list, processed_data)
|
lfx/components/logic/run_flow.py
DELETED
|
@@ -1,71 +0,0 @@
|
|
|
1
|
-
from typing import Any
|
|
2
|
-
|
|
3
|
-
from lfx.base.tools.run_flow import RunFlowBaseComponent
|
|
4
|
-
from lfx.helpers import run_flow
|
|
5
|
-
from lfx.log.logger import logger
|
|
6
|
-
from lfx.schema.dotdict import dotdict
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class RunFlowComponent(RunFlowBaseComponent):
|
|
10
|
-
display_name = "Run Flow"
|
|
11
|
-
description = (
|
|
12
|
-
"Creates a tool component from a Flow that takes all its inputs and runs it. "
|
|
13
|
-
" \n **Select a Flow to use the tool mode**"
|
|
14
|
-
)
|
|
15
|
-
documentation: str = "https://docs.langflow.org/components-logic#run-flow"
|
|
16
|
-
beta = True
|
|
17
|
-
name = "RunFlow"
|
|
18
|
-
icon = "Workflow"
|
|
19
|
-
|
|
20
|
-
inputs = RunFlowBaseComponent.get_base_inputs()
|
|
21
|
-
outputs = RunFlowBaseComponent.get_base_outputs()
|
|
22
|
-
|
|
23
|
-
async def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):
|
|
24
|
-
if field_name == "flow_name_selected":
|
|
25
|
-
build_config["flow_name_selected"]["options"] = await self.get_flow_names()
|
|
26
|
-
missing_keys = [key for key in self.default_keys if key not in build_config]
|
|
27
|
-
if missing_keys:
|
|
28
|
-
msg = f"Missing required keys in build_config: {missing_keys}"
|
|
29
|
-
raise ValueError(msg)
|
|
30
|
-
if field_value is not None:
|
|
31
|
-
try:
|
|
32
|
-
graph = await self.get_graph(field_value)
|
|
33
|
-
build_config = self.update_build_config_from_graph(build_config, graph)
|
|
34
|
-
except Exception as e:
|
|
35
|
-
msg = f"Error building graph for flow {field_value}"
|
|
36
|
-
await logger.aexception(msg)
|
|
37
|
-
raise RuntimeError(msg) from e
|
|
38
|
-
return build_config
|
|
39
|
-
|
|
40
|
-
async def run_flow_with_tweaks(self):
|
|
41
|
-
tweaks: dict = {}
|
|
42
|
-
|
|
43
|
-
flow_name_selected = self._attributes.get("flow_name_selected")
|
|
44
|
-
parsed_flow_tweak_data = self._attributes.get("flow_tweak_data", {})
|
|
45
|
-
if not isinstance(parsed_flow_tweak_data, dict):
|
|
46
|
-
parsed_flow_tweak_data = parsed_flow_tweak_data.dict()
|
|
47
|
-
|
|
48
|
-
if parsed_flow_tweak_data != {}:
|
|
49
|
-
for field in parsed_flow_tweak_data:
|
|
50
|
-
if "~" in field:
|
|
51
|
-
[node, name] = field.split("~")
|
|
52
|
-
if node not in tweaks:
|
|
53
|
-
tweaks[node] = {}
|
|
54
|
-
tweaks[node][name] = parsed_flow_tweak_data[field]
|
|
55
|
-
else:
|
|
56
|
-
for field in self._attributes:
|
|
57
|
-
if field not in self.default_keys and "~" in field:
|
|
58
|
-
[node, name] = field.split("~")
|
|
59
|
-
if node not in tweaks:
|
|
60
|
-
tweaks[node] = {}
|
|
61
|
-
tweaks[node][name] = self._attributes[field]
|
|
62
|
-
|
|
63
|
-
return await run_flow(
|
|
64
|
-
inputs=None,
|
|
65
|
-
output_type="all",
|
|
66
|
-
flow_id=None,
|
|
67
|
-
flow_name=flow_name_selected,
|
|
68
|
-
tweaks=tweaks,
|
|
69
|
-
user_id=str(self.user_id),
|
|
70
|
-
session_id=self.graph.session_id or self.session_id,
|
|
71
|
-
)
|
|
@@ -1,195 +0,0 @@
|
|
|
1
|
-
from typing import Any
|
|
2
|
-
|
|
3
|
-
from langchain_openai import OpenAIEmbeddings
|
|
4
|
-
|
|
5
|
-
from lfx.base.embeddings.model import LCEmbeddingsModel
|
|
6
|
-
from lfx.base.models.ollama_constants import OLLAMA_EMBEDDING_MODELS
|
|
7
|
-
from lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES
|
|
8
|
-
from lfx.base.models.watsonx_constants import WATSONX_EMBEDDING_MODEL_NAMES
|
|
9
|
-
from lfx.field_typing import Embeddings
|
|
10
|
-
from lfx.io import (
|
|
11
|
-
BoolInput,
|
|
12
|
-
DictInput,
|
|
13
|
-
DropdownInput,
|
|
14
|
-
FloatInput,
|
|
15
|
-
IntInput,
|
|
16
|
-
MessageTextInput,
|
|
17
|
-
SecretStrInput,
|
|
18
|
-
)
|
|
19
|
-
from lfx.schema.dotdict import dotdict
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
23
|
-
display_name = "Embedding Model"
|
|
24
|
-
description = "Generate embeddings using a specified provider."
|
|
25
|
-
documentation: str = "https://docs.langflow.org/components-embedding-models"
|
|
26
|
-
icon = "binary"
|
|
27
|
-
name = "EmbeddingModel"
|
|
28
|
-
category = "models"
|
|
29
|
-
|
|
30
|
-
inputs = [
|
|
31
|
-
DropdownInput(
|
|
32
|
-
name="provider",
|
|
33
|
-
display_name="Model Provider",
|
|
34
|
-
options=["OpenAI", "Ollama", "IBM watsonx.ai"],
|
|
35
|
-
value="OpenAI",
|
|
36
|
-
info="Select the embedding model provider",
|
|
37
|
-
real_time_refresh=True,
|
|
38
|
-
options_metadata=[{"icon": "OpenAI"}, {"icon": "Ollama"}, {"icon": "WatsonxAI"}],
|
|
39
|
-
),
|
|
40
|
-
DropdownInput(
|
|
41
|
-
name="model",
|
|
42
|
-
display_name="Model Name",
|
|
43
|
-
options=OPENAI_EMBEDDING_MODEL_NAMES,
|
|
44
|
-
value=OPENAI_EMBEDDING_MODEL_NAMES[0],
|
|
45
|
-
info="Select the embedding model to use",
|
|
46
|
-
),
|
|
47
|
-
SecretStrInput(
|
|
48
|
-
name="api_key",
|
|
49
|
-
display_name="OpenAI API Key",
|
|
50
|
-
info="Model Provider API key",
|
|
51
|
-
required=True,
|
|
52
|
-
show=True,
|
|
53
|
-
real_time_refresh=True,
|
|
54
|
-
),
|
|
55
|
-
MessageTextInput(
|
|
56
|
-
name="api_base",
|
|
57
|
-
display_name="API Base URL",
|
|
58
|
-
info="Base URL for the API. Leave empty for default.",
|
|
59
|
-
advanced=True,
|
|
60
|
-
),
|
|
61
|
-
# Watson-specific inputs
|
|
62
|
-
MessageTextInput(
|
|
63
|
-
name="project_id",
|
|
64
|
-
display_name="Project ID",
|
|
65
|
-
info="IBM watsonx.ai Project ID (required for IBM watsonx.ai)",
|
|
66
|
-
show=False,
|
|
67
|
-
),
|
|
68
|
-
IntInput(
|
|
69
|
-
name="dimensions",
|
|
70
|
-
display_name="Dimensions",
|
|
71
|
-
info="The number of dimensions the resulting output embeddings should have. "
|
|
72
|
-
"Only supported by certain models.",
|
|
73
|
-
advanced=True,
|
|
74
|
-
),
|
|
75
|
-
IntInput(name="chunk_size", display_name="Chunk Size", advanced=True, value=1000),
|
|
76
|
-
FloatInput(name="request_timeout", display_name="Request Timeout", advanced=True),
|
|
77
|
-
IntInput(name="max_retries", display_name="Max Retries", advanced=True, value=3),
|
|
78
|
-
BoolInput(name="show_progress_bar", display_name="Show Progress Bar", advanced=True),
|
|
79
|
-
DictInput(
|
|
80
|
-
name="model_kwargs",
|
|
81
|
-
display_name="Model Kwargs",
|
|
82
|
-
advanced=True,
|
|
83
|
-
info="Additional keyword arguments to pass to the model.",
|
|
84
|
-
),
|
|
85
|
-
]
|
|
86
|
-
|
|
87
|
-
def build_embeddings(self) -> Embeddings:
|
|
88
|
-
provider = self.provider
|
|
89
|
-
model = self.model
|
|
90
|
-
api_key = self.api_key
|
|
91
|
-
api_base = self.api_base
|
|
92
|
-
dimensions = self.dimensions
|
|
93
|
-
chunk_size = self.chunk_size
|
|
94
|
-
request_timeout = self.request_timeout
|
|
95
|
-
max_retries = self.max_retries
|
|
96
|
-
show_progress_bar = self.show_progress_bar
|
|
97
|
-
model_kwargs = self.model_kwargs or {}
|
|
98
|
-
|
|
99
|
-
if provider == "OpenAI":
|
|
100
|
-
if not api_key:
|
|
101
|
-
msg = "OpenAI API key is required when using OpenAI provider"
|
|
102
|
-
raise ValueError(msg)
|
|
103
|
-
return OpenAIEmbeddings(
|
|
104
|
-
model=model,
|
|
105
|
-
dimensions=dimensions or None,
|
|
106
|
-
base_url=api_base or None,
|
|
107
|
-
api_key=api_key,
|
|
108
|
-
chunk_size=chunk_size,
|
|
109
|
-
max_retries=max_retries,
|
|
110
|
-
timeout=request_timeout or None,
|
|
111
|
-
show_progress_bar=show_progress_bar,
|
|
112
|
-
model_kwargs=model_kwargs,
|
|
113
|
-
)
|
|
114
|
-
|
|
115
|
-
if provider == "Ollama":
|
|
116
|
-
try:
|
|
117
|
-
from langchain_ollama import OllamaEmbeddings
|
|
118
|
-
except ImportError:
|
|
119
|
-
try:
|
|
120
|
-
from langchain_community.embeddings import OllamaEmbeddings
|
|
121
|
-
except ImportError:
|
|
122
|
-
msg = "Please install langchain-ollama: pip install langchain-ollama"
|
|
123
|
-
raise ImportError(msg) from None
|
|
124
|
-
|
|
125
|
-
return OllamaEmbeddings(
|
|
126
|
-
model=model,
|
|
127
|
-
base_url=api_base or "http://localhost:11434",
|
|
128
|
-
**model_kwargs,
|
|
129
|
-
)
|
|
130
|
-
|
|
131
|
-
if provider == "IBM watsonx.ai":
|
|
132
|
-
try:
|
|
133
|
-
from langchain_ibm import WatsonxEmbeddings
|
|
134
|
-
except ImportError:
|
|
135
|
-
msg = "Please install langchain-ibm: pip install langchain-ibm"
|
|
136
|
-
raise ImportError(msg) from None
|
|
137
|
-
|
|
138
|
-
if not api_key:
|
|
139
|
-
msg = "IBM watsonx.ai API key is required when using IBM watsonx.ai provider"
|
|
140
|
-
raise ValueError(msg)
|
|
141
|
-
|
|
142
|
-
project_id = self.project_id
|
|
143
|
-
|
|
144
|
-
if not project_id:
|
|
145
|
-
msg = "Project ID is required for IBM watsonx.ai provider"
|
|
146
|
-
raise ValueError(msg)
|
|
147
|
-
|
|
148
|
-
params = {
|
|
149
|
-
"model_id": model,
|
|
150
|
-
"url": api_base or "https://us-south.ml.cloud.ibm.com",
|
|
151
|
-
"apikey": api_key,
|
|
152
|
-
}
|
|
153
|
-
|
|
154
|
-
params["project_id"] = project_id
|
|
155
|
-
|
|
156
|
-
return WatsonxEmbeddings(**params)
|
|
157
|
-
|
|
158
|
-
msg = f"Unknown provider: {provider}"
|
|
159
|
-
raise ValueError(msg)
|
|
160
|
-
|
|
161
|
-
def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:
|
|
162
|
-
if field_name == "provider":
|
|
163
|
-
if field_value == "OpenAI":
|
|
164
|
-
build_config["model"]["options"] = OPENAI_EMBEDDING_MODEL_NAMES
|
|
165
|
-
build_config["model"]["value"] = OPENAI_EMBEDDING_MODEL_NAMES[0]
|
|
166
|
-
build_config["api_key"]["display_name"] = "OpenAI API Key"
|
|
167
|
-
build_config["api_key"]["required"] = True
|
|
168
|
-
build_config["api_key"]["show"] = True
|
|
169
|
-
build_config["api_base"]["display_name"] = "OpenAI API Base URL"
|
|
170
|
-
build_config["api_base"]["advanced"] = True
|
|
171
|
-
build_config["project_id"]["show"] = False
|
|
172
|
-
|
|
173
|
-
elif field_value == "Ollama":
|
|
174
|
-
build_config["model"]["options"] = OLLAMA_EMBEDDING_MODELS
|
|
175
|
-
build_config["model"]["value"] = OLLAMA_EMBEDDING_MODELS[0]
|
|
176
|
-
build_config["api_key"]["display_name"] = "API Key (Optional)"
|
|
177
|
-
build_config["api_key"]["required"] = False
|
|
178
|
-
build_config["api_key"]["show"] = False
|
|
179
|
-
build_config["api_base"]["display_name"] = "Ollama Base URL"
|
|
180
|
-
build_config["api_base"]["value"] = "http://localhost:11434"
|
|
181
|
-
build_config["api_base"]["advanced"] = True
|
|
182
|
-
build_config["project_id"]["show"] = False
|
|
183
|
-
|
|
184
|
-
elif field_value == "IBM watsonx.ai":
|
|
185
|
-
build_config["model"]["options"] = WATSONX_EMBEDDING_MODEL_NAMES
|
|
186
|
-
build_config["model"]["value"] = WATSONX_EMBEDDING_MODEL_NAMES[0]
|
|
187
|
-
build_config["api_key"]["display_name"] = "IBM watsonx.ai API Key"
|
|
188
|
-
build_config["api_key"]["required"] = True
|
|
189
|
-
build_config["api_key"]["show"] = True
|
|
190
|
-
build_config["api_base"]["display_name"] = "IBM watsonx.ai URL"
|
|
191
|
-
build_config["api_base"]["value"] = "https://us-south.ml.cloud.ibm.com"
|
|
192
|
-
build_config["api_base"]["advanced"] = False
|
|
193
|
-
build_config["project_id"]["show"] = True
|
|
194
|
-
|
|
195
|
-
return build_config
|