lfx-nightly 0.1.12.dev13__py3-none-any.whl → 0.1.12.dev15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lfx-nightly might be problematic. Click here for more details.
- lfx/base/agents/events.py +40 -29
- lfx/base/constants.py +1 -1
- lfx/base/data/docling_utils.py +43 -8
- lfx/base/data/utils.py +3 -3
- lfx/base/knowledge_bases/__init__.py +3 -0
- lfx/base/knowledge_bases/knowledge_base_utils.py +137 -0
- lfx/base/models/anthropic_constants.py +3 -1
- lfx/base/models/model_input_constants.py +1 -1
- lfx/base/vectorstores/vector_store_connection_decorator.py +1 -1
- lfx/components/agentql/agentql_api.py +1 -1
- lfx/components/agents/agent.py +62 -17
- lfx/components/agents/mcp_component.py +11 -1
- lfx/components/aiml/aiml.py +4 -1
- lfx/components/amazon/amazon_bedrock_converse.py +196 -0
- lfx/components/amazon/amazon_bedrock_model.py +5 -1
- lfx/components/azure/azure_openai.py +1 -1
- lfx/components/azure/azure_openai_embeddings.py +1 -1
- lfx/components/clickhouse/clickhouse.py +1 -1
- lfx/components/confluence/confluence.py +1 -1
- lfx/components/crewai/crewai.py +1 -0
- lfx/components/crewai/hierarchical_crew.py +1 -0
- lfx/components/crewai/hierarchical_task.py +1 -0
- lfx/components/crewai/sequential_crew.py +1 -0
- lfx/components/crewai/sequential_task.py +1 -0
- lfx/components/crewai/sequential_task_agent.py +1 -0
- lfx/components/data/api_request.py +13 -3
- lfx/components/data/csv_to_data.py +1 -0
- lfx/components/data/file.py +71 -25
- lfx/components/data/json_to_data.py +1 -0
- lfx/components/datastax/astra_db.py +2 -1
- lfx/components/datastax/astra_vectorize.py +3 -5
- lfx/components/datastax/astradb_tool.py +5 -1
- lfx/components/datastax/astradb_vectorstore.py +8 -1
- lfx/components/deactivated/chat_litellm_model.py +1 -1
- lfx/components/deactivated/metal.py +1 -1
- lfx/components/docling/docling_inline.py +23 -9
- lfx/components/elastic/elasticsearch.py +1 -1
- lfx/components/elastic/opensearch.py +1 -1
- lfx/components/embeddings/similarity.py +1 -0
- lfx/components/embeddings/text_embedder.py +1 -0
- lfx/components/firecrawl/firecrawl_crawl_api.py +1 -1
- lfx/components/firecrawl/firecrawl_extract_api.py +1 -1
- lfx/components/firecrawl/firecrawl_map_api.py +1 -1
- lfx/components/firecrawl/firecrawl_scrape_api.py +1 -1
- lfx/components/google/gmail.py +1 -0
- lfx/components/google/google_generative_ai_embeddings.py +1 -1
- lfx/components/helpers/memory.py +8 -6
- lfx/components/helpers/output_parser.py +1 -0
- lfx/components/helpers/store_message.py +1 -0
- lfx/components/huggingface/huggingface.py +3 -1
- lfx/components/huggingface/huggingface_inference_api.py +1 -1
- lfx/components/ibm/watsonx.py +1 -1
- lfx/components/ibm/watsonx_embeddings.py +1 -1
- lfx/components/icosacomputing/combinatorial_reasoner.py +1 -1
- lfx/components/input_output/chat.py +0 -27
- lfx/components/input_output/chat_output.py +3 -27
- lfx/components/knowledge_bases/__init__.py +34 -0
- lfx/components/knowledge_bases/ingestion.py +686 -0
- lfx/components/knowledge_bases/retrieval.py +256 -0
- lfx/components/langchain_utilities/langchain_hub.py +1 -1
- lfx/components/langwatch/langwatch.py +1 -1
- lfx/components/logic/conditional_router.py +40 -3
- lfx/components/logic/data_conditional_router.py +1 -0
- lfx/components/logic/flow_tool.py +2 -1
- lfx/components/logic/pass_message.py +1 -0
- lfx/components/logic/sub_flow.py +2 -1
- lfx/components/milvus/milvus.py +1 -1
- lfx/components/olivya/olivya.py +1 -1
- lfx/components/processing/alter_metadata.py +1 -0
- lfx/components/processing/combine_text.py +1 -0
- lfx/components/processing/create_data.py +1 -0
- lfx/components/processing/data_to_dataframe.py +1 -0
- lfx/components/processing/extract_key.py +1 -0
- lfx/components/processing/filter_data.py +1 -0
- lfx/components/processing/filter_data_values.py +1 -0
- lfx/components/processing/json_cleaner.py +1 -0
- lfx/components/processing/merge_data.py +1 -0
- lfx/components/processing/message_to_data.py +1 -0
- lfx/components/processing/parse_data.py +1 -0
- lfx/components/processing/parse_dataframe.py +1 -0
- lfx/components/processing/parse_json_data.py +1 -0
- lfx/components/processing/regex.py +1 -0
- lfx/components/processing/select_data.py +1 -0
- lfx/components/processing/structured_output.py +7 -3
- lfx/components/processing/update_data.py +1 -0
- lfx/components/prototypes/__init__.py +8 -7
- lfx/components/qdrant/qdrant.py +1 -1
- lfx/components/redis/redis_chat.py +1 -1
- lfx/components/tools/__init__.py +0 -6
- lfx/components/tools/calculator.py +2 -1
- lfx/components/tools/python_code_structured_tool.py +1 -0
- lfx/components/tools/python_repl.py +2 -1
- lfx/components/tools/search_api.py +2 -1
- lfx/components/tools/serp_api.py +2 -1
- lfx/components/tools/tavily_search_tool.py +1 -0
- lfx/components/tools/wikidata_api.py +2 -1
- lfx/components/tools/wikipedia_api.py +2 -1
- lfx/components/tools/yahoo_finance.py +2 -1
- lfx/components/twelvelabs/video_embeddings.py +1 -1
- lfx/components/upstash/upstash.py +1 -1
- lfx/components/vectorstores/astradb_graph.py +8 -1
- lfx/components/vectorstores/local_db.py +1 -0
- lfx/components/vectorstores/weaviate.py +1 -1
- lfx/components/wolframalpha/wolfram_alpha_api.py +1 -1
- lfx/components/zep/zep.py +2 -1
- lfx/custom/attributes.py +1 -0
- lfx/graph/graph/base.py +61 -4
- lfx/inputs/inputs.py +1 -0
- lfx/log/logger.py +31 -11
- lfx/schema/message.py +6 -1
- lfx/schema/schema.py +4 -0
- lfx/services/__init__.py +3 -0
- lfx/services/mcp_composer/__init__.py +6 -0
- lfx/services/mcp_composer/factory.py +16 -0
- lfx/services/mcp_composer/service.py +599 -0
- lfx/services/schema.py +1 -0
- lfx/services/settings/auth.py +18 -15
- lfx/services/settings/base.py +38 -0
- lfx/services/settings/constants.py +4 -1
- lfx/services/settings/feature_flags.py +0 -1
- lfx/template/frontend_node/base.py +2 -0
- lfx/utils/image.py +1 -1
- {lfx_nightly-0.1.12.dev13.dist-info → lfx_nightly-0.1.12.dev15.dist-info}/METADATA +1 -1
- {lfx_nightly-0.1.12.dev13.dist-info → lfx_nightly-0.1.12.dev15.dist-info}/RECORD +126 -118
- lfx/components/datastax/astradb.py +0 -1285
- {lfx_nightly-0.1.12.dev13.dist-info → lfx_nightly-0.1.12.dev15.dist-info}/WHEEL +0 -0
- {lfx_nightly-0.1.12.dev13.dist-info → lfx_nightly-0.1.12.dev15.dist-info}/entry_points.txt +0 -0
|
@@ -56,7 +56,6 @@ class DoclingInlineComponent(BaseFileComponent):
|
|
|
56
56
|
display_name="Pipeline",
|
|
57
57
|
info="Docling pipeline to use",
|
|
58
58
|
options=["standard", "vlm"],
|
|
59
|
-
real_time_refresh=False,
|
|
60
59
|
value="standard",
|
|
61
60
|
),
|
|
62
61
|
DropdownInput(
|
|
@@ -64,7 +63,6 @@ class DoclingInlineComponent(BaseFileComponent):
|
|
|
64
63
|
display_name="OCR Engine",
|
|
65
64
|
info="OCR engine to use. None will disable OCR.",
|
|
66
65
|
options=["None", "easyocr", "tesserocr", "rapidocr", "ocrmac"],
|
|
67
|
-
real_time_refresh=False,
|
|
68
66
|
value="None",
|
|
69
67
|
),
|
|
70
68
|
BoolInput(
|
|
@@ -208,17 +206,33 @@ class DoclingInlineComponent(BaseFileComponent):
|
|
|
208
206
|
# Ignore cleanup errors, but log them
|
|
209
207
|
self.log(f"Warning: Error during queue cleanup - {e}")
|
|
210
208
|
|
|
211
|
-
#
|
|
209
|
+
# Enhanced error checking with dependency-specific handling
|
|
212
210
|
if isinstance(result, dict) and "error" in result:
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
211
|
+
error_msg = result["error"]
|
|
212
|
+
|
|
213
|
+
# Handle dependency errors specifically
|
|
214
|
+
if result.get("error_type") == "dependency_error":
|
|
215
|
+
dependency_name = result.get("dependency_name", "Unknown dependency")
|
|
216
|
+
install_command = result.get("install_command", "Please check documentation")
|
|
217
|
+
|
|
218
|
+
# Create a user-friendly error message
|
|
219
|
+
user_message = (
|
|
220
|
+
f"Missing OCR dependency: {dependency_name}. "
|
|
221
|
+
f"{install_command} "
|
|
222
|
+
f"Alternatively, you can set OCR Engine to 'None' to disable OCR processing."
|
|
223
|
+
)
|
|
224
|
+
raise ImportError(user_message)
|
|
225
|
+
|
|
226
|
+
# Handle other specific errors
|
|
227
|
+
if error_msg.startswith("Docling is not installed"):
|
|
228
|
+
raise ImportError(error_msg)
|
|
229
|
+
|
|
230
|
+
# Handle graceful shutdown
|
|
231
|
+
if "Worker interrupted by SIGINT" in error_msg or "shutdown" in result:
|
|
218
232
|
self.log("Docling process cancelled by user")
|
|
219
233
|
result = []
|
|
220
234
|
else:
|
|
221
|
-
raise RuntimeError(
|
|
235
|
+
raise RuntimeError(error_msg)
|
|
222
236
|
|
|
223
237
|
processed_data = [Data(data={"doc": r["document"], "file_path": r["file_path"]}) if r else None for r in result]
|
|
224
238
|
return self.rollup_data(file_list, processed_data)
|
|
@@ -15,6 +15,7 @@ class TextEmbedderComponent(Component):
|
|
|
15
15
|
description: str = "Generate embeddings for a given message using the specified embedding model."
|
|
16
16
|
icon = "binary"
|
|
17
17
|
legacy: bool = True
|
|
18
|
+
replacement = ["models.EmbeddingModel"]
|
|
18
19
|
inputs = [
|
|
19
20
|
HandleInput(
|
|
20
21
|
name="embedding_model",
|
lfx/components/google/gmail.py
CHANGED
|
@@ -28,7 +28,7 @@ class GoogleGenerativeAIEmbeddingsComponent(Component):
|
|
|
28
28
|
name = "Google Generative AI Embeddings"
|
|
29
29
|
|
|
30
30
|
inputs = [
|
|
31
|
-
SecretStrInput(name="api_key", display_name="API Key", required=True),
|
|
31
|
+
SecretStrInput(name="api_key", display_name="Google Generative AI API Key", required=True),
|
|
32
32
|
MessageTextInput(name="model_name", display_name="Model Name", value="models/text-embedding-004"),
|
|
33
33
|
]
|
|
34
34
|
|
lfx/components/helpers/memory.py
CHANGED
|
@@ -19,10 +19,10 @@ class MemoryComponent(Component):
|
|
|
19
19
|
documentation: str = "https://docs.langflow.org/components-helpers#message-history"
|
|
20
20
|
icon = "message-square-more"
|
|
21
21
|
name = "Memory"
|
|
22
|
-
default_keys = ["mode", "memory"]
|
|
22
|
+
default_keys = ["mode", "memory", "session_id"]
|
|
23
23
|
mode_config = {
|
|
24
24
|
"Store": ["message", "memory", "sender", "sender_name", "session_id"],
|
|
25
|
-
"Retrieve": ["n_messages", "order", "template", "memory"],
|
|
25
|
+
"Retrieve": ["n_messages", "order", "template", "memory", "session_id"],
|
|
26
26
|
}
|
|
27
27
|
|
|
28
28
|
inputs = [
|
|
@@ -199,10 +199,12 @@ class MemoryComponent(Component):
|
|
|
199
199
|
stored = await self.memory.aget_messages()
|
|
200
200
|
# langchain memories are supposed to return messages in ascending order
|
|
201
201
|
|
|
202
|
-
if order == "DESC":
|
|
203
|
-
stored = stored[::-1]
|
|
204
202
|
if n_messages:
|
|
205
|
-
stored = stored[-n_messages:]
|
|
203
|
+
stored = stored[-n_messages:] # Get last N messages first
|
|
204
|
+
|
|
205
|
+
if order == "DESC":
|
|
206
|
+
stored = stored[::-1] # Then reverse if needed
|
|
207
|
+
|
|
206
208
|
stored = [Message.from_lc_message(m) for m in stored]
|
|
207
209
|
if sender_type:
|
|
208
210
|
expected_type = MESSAGE_SENDER_AI if sender_type == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER
|
|
@@ -217,7 +219,7 @@ class MemoryComponent(Component):
|
|
|
217
219
|
order=order,
|
|
218
220
|
)
|
|
219
221
|
if n_messages:
|
|
220
|
-
stored = stored[-n_messages:]
|
|
222
|
+
stored = stored[-n_messages:] # Get last N messages
|
|
221
223
|
|
|
222
224
|
# self.status = stored
|
|
223
225
|
return cast("Data", stored)
|
|
@@ -104,7 +104,9 @@ class HuggingFaceEndpointsComponent(LCModelComponent):
|
|
|
104
104
|
advanced=True,
|
|
105
105
|
info="The task to call the model with. Should be a task that returns `generated_text` or `summary_text`.",
|
|
106
106
|
),
|
|
107
|
-
SecretStrInput(
|
|
107
|
+
SecretStrInput(
|
|
108
|
+
name="huggingfacehub_api_token", display_name="HuggingFace HubAPI Token", password=True, required=True
|
|
109
|
+
),
|
|
108
110
|
DictInput(name="model_kwargs", display_name="Model Keyword Arguments", advanced=True),
|
|
109
111
|
IntInput(name="retry_attempts", display_name="Retry Attempts", value=1, advanced=True),
|
|
110
112
|
]
|
|
@@ -22,7 +22,7 @@ class HuggingFaceInferenceAPIEmbeddingsComponent(LCEmbeddingsModel):
|
|
|
22
22
|
inputs = [
|
|
23
23
|
SecretStrInput(
|
|
24
24
|
name="api_key",
|
|
25
|
-
display_name="API Key",
|
|
25
|
+
display_name="HuggingFace API Key",
|
|
26
26
|
advanced=False,
|
|
27
27
|
info="Required for non-local inference endpoints. Local inference does not require an API Key.",
|
|
28
28
|
),
|
lfx/components/ibm/watsonx.py
CHANGED
|
@@ -34,7 +34,7 @@ class CombinatorialReasonerComponent(Component):
|
|
|
34
34
|
),
|
|
35
35
|
SecretStrInput(
|
|
36
36
|
name="password",
|
|
37
|
-
display_name="Password",
|
|
37
|
+
display_name="Combinatorial Reasoner Password",
|
|
38
38
|
info="Password to authenticate access to Icosa CR API.",
|
|
39
39
|
advanced=False,
|
|
40
40
|
required=True,
|
|
@@ -69,45 +69,18 @@ class ChatInput(ChatComponent):
|
|
|
69
69
|
is_list=True,
|
|
70
70
|
temp_file=True,
|
|
71
71
|
),
|
|
72
|
-
MessageTextInput(
|
|
73
|
-
name="background_color",
|
|
74
|
-
display_name="Background Color",
|
|
75
|
-
info="The background color of the icon.",
|
|
76
|
-
advanced=True,
|
|
77
|
-
),
|
|
78
|
-
MessageTextInput(
|
|
79
|
-
name="chat_icon",
|
|
80
|
-
display_name="Icon",
|
|
81
|
-
info="The icon of the message.",
|
|
82
|
-
advanced=True,
|
|
83
|
-
),
|
|
84
|
-
MessageTextInput(
|
|
85
|
-
name="text_color",
|
|
86
|
-
display_name="Text Color",
|
|
87
|
-
info="The text color of the name",
|
|
88
|
-
advanced=True,
|
|
89
|
-
),
|
|
90
72
|
]
|
|
91
73
|
outputs = [
|
|
92
74
|
Output(display_name="Chat Message", name="message", method="message_response"),
|
|
93
75
|
]
|
|
94
76
|
|
|
95
77
|
async def message_response(self) -> Message:
|
|
96
|
-
background_color = self.background_color
|
|
97
|
-
text_color = self.text_color
|
|
98
|
-
icon = self.chat_icon
|
|
99
|
-
|
|
100
78
|
message = await Message.create(
|
|
101
79
|
text=self.input_value,
|
|
102
80
|
sender=self.sender,
|
|
103
81
|
sender_name=self.sender_name,
|
|
104
82
|
session_id=self.session_id,
|
|
105
83
|
files=self.files,
|
|
106
|
-
properties={
|
|
107
|
-
"background_color": background_color,
|
|
108
|
-
"text_color": text_color,
|
|
109
|
-
"icon": icon,
|
|
110
|
-
},
|
|
111
84
|
)
|
|
112
85
|
if self.session_id and isinstance(message, Message) and self.should_store_message:
|
|
113
86
|
stored_message = await self.send_message(
|
|
@@ -70,30 +70,12 @@ class ChatOutput(ChatComponent):
|
|
|
70
70
|
advanced=True,
|
|
71
71
|
info="Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.",
|
|
72
72
|
),
|
|
73
|
-
MessageTextInput(
|
|
74
|
-
name="background_color",
|
|
75
|
-
display_name="Background Color",
|
|
76
|
-
info="The background color of the icon.",
|
|
77
|
-
advanced=True,
|
|
78
|
-
),
|
|
79
|
-
MessageTextInput(
|
|
80
|
-
name="chat_icon",
|
|
81
|
-
display_name="Icon",
|
|
82
|
-
info="The icon of the message.",
|
|
83
|
-
advanced=True,
|
|
84
|
-
),
|
|
85
|
-
MessageTextInput(
|
|
86
|
-
name="text_color",
|
|
87
|
-
display_name="Text Color",
|
|
88
|
-
info="The text color of the name",
|
|
89
|
-
advanced=True,
|
|
90
|
-
),
|
|
91
73
|
BoolInput(
|
|
92
74
|
name="clean_data",
|
|
93
75
|
display_name="Basic Clean Data",
|
|
94
76
|
value=True,
|
|
95
|
-
info="Whether to clean the data",
|
|
96
77
|
advanced=True,
|
|
78
|
+
info="Whether to clean data before converting to string.",
|
|
97
79
|
),
|
|
98
80
|
]
|
|
99
81
|
outputs = [
|
|
@@ -126,10 +108,6 @@ class ChatOutput(ChatComponent):
|
|
|
126
108
|
|
|
127
109
|
# Get source properties
|
|
128
110
|
source, icon, display_name, source_id = self.get_properties_from_source_component()
|
|
129
|
-
background_color = self.background_color
|
|
130
|
-
text_color = self.text_color
|
|
131
|
-
if self.chat_icon:
|
|
132
|
-
icon = self.chat_icon
|
|
133
111
|
|
|
134
112
|
# Create or use existing Message object
|
|
135
113
|
if isinstance(self.input_value, Message):
|
|
@@ -145,9 +123,6 @@ class ChatOutput(ChatComponent):
|
|
|
145
123
|
message.session_id = self.session_id
|
|
146
124
|
message.flow_id = self.graph.flow_id if hasattr(self, "graph") else None
|
|
147
125
|
message.properties.source = self._build_source(source_id, display_name, source)
|
|
148
|
-
message.properties.icon = icon
|
|
149
|
-
message.properties.background_color = background_color
|
|
150
|
-
message.properties.text_color = text_color
|
|
151
126
|
|
|
152
127
|
# Store message if needed
|
|
153
128
|
if self.session_id and self.should_store_message:
|
|
@@ -194,7 +169,8 @@ class ChatOutput(ChatComponent):
|
|
|
194
169
|
"""Convert input data to string with proper error handling."""
|
|
195
170
|
self._validate_input()
|
|
196
171
|
if isinstance(self.input_value, list):
|
|
197
|
-
|
|
172
|
+
clean_data: bool = getattr(self, "clean_data", False)
|
|
173
|
+
return "\n".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])
|
|
198
174
|
if isinstance(self.input_value, Generator):
|
|
199
175
|
return self.input_value
|
|
200
176
|
return safe_convert(self.input_value)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any
|
|
4
|
+
|
|
5
|
+
from lfx.components._importing import import_mod
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from lfx.components.knowledge_bases.ingestion import KnowledgeIngestionComponent
|
|
9
|
+
from lfx.components.knowledge_bases.retrieval import KnowledgeRetrievalComponent
|
|
10
|
+
|
|
11
|
+
_dynamic_imports = {
|
|
12
|
+
"KnowledgeIngestionComponent": "ingestion",
|
|
13
|
+
"KnowledgeRetrievalComponent": "retrieval",
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
__all__ = ["KnowledgeIngestionComponent", "KnowledgeRetrievalComponent"]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def __getattr__(attr_name: str) -> Any:
|
|
20
|
+
"""Lazily import input/output components on attribute access."""
|
|
21
|
+
if attr_name not in _dynamic_imports:
|
|
22
|
+
msg = f"module '{__name__}' has no attribute '{attr_name}'"
|
|
23
|
+
raise AttributeError(msg)
|
|
24
|
+
try:
|
|
25
|
+
result = import_mod(attr_name, _dynamic_imports[attr_name], __spec__.parent)
|
|
26
|
+
except (ModuleNotFoundError, ImportError, AttributeError) as e:
|
|
27
|
+
msg = f"Could not import '{attr_name}' from '{__name__}': {e}"
|
|
28
|
+
raise AttributeError(msg) from e
|
|
29
|
+
globals()[attr_name] = result
|
|
30
|
+
return result
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def __dir__() -> list[str]:
|
|
34
|
+
return list(__all__)
|