lfx-nightly 0.1.12.dev13__py3-none-any.whl → 0.1.12.dev15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lfx-nightly might be problematic. Click here for more details.
- lfx/base/agents/events.py +40 -29
- lfx/base/constants.py +1 -1
- lfx/base/data/docling_utils.py +43 -8
- lfx/base/data/utils.py +3 -3
- lfx/base/knowledge_bases/__init__.py +3 -0
- lfx/base/knowledge_bases/knowledge_base_utils.py +137 -0
- lfx/base/models/anthropic_constants.py +3 -1
- lfx/base/models/model_input_constants.py +1 -1
- lfx/base/vectorstores/vector_store_connection_decorator.py +1 -1
- lfx/components/agentql/agentql_api.py +1 -1
- lfx/components/agents/agent.py +62 -17
- lfx/components/agents/mcp_component.py +11 -1
- lfx/components/aiml/aiml.py +4 -1
- lfx/components/amazon/amazon_bedrock_converse.py +196 -0
- lfx/components/amazon/amazon_bedrock_model.py +5 -1
- lfx/components/azure/azure_openai.py +1 -1
- lfx/components/azure/azure_openai_embeddings.py +1 -1
- lfx/components/clickhouse/clickhouse.py +1 -1
- lfx/components/confluence/confluence.py +1 -1
- lfx/components/crewai/crewai.py +1 -0
- lfx/components/crewai/hierarchical_crew.py +1 -0
- lfx/components/crewai/hierarchical_task.py +1 -0
- lfx/components/crewai/sequential_crew.py +1 -0
- lfx/components/crewai/sequential_task.py +1 -0
- lfx/components/crewai/sequential_task_agent.py +1 -0
- lfx/components/data/api_request.py +13 -3
- lfx/components/data/csv_to_data.py +1 -0
- lfx/components/data/file.py +71 -25
- lfx/components/data/json_to_data.py +1 -0
- lfx/components/datastax/astra_db.py +2 -1
- lfx/components/datastax/astra_vectorize.py +3 -5
- lfx/components/datastax/astradb_tool.py +5 -1
- lfx/components/datastax/astradb_vectorstore.py +8 -1
- lfx/components/deactivated/chat_litellm_model.py +1 -1
- lfx/components/deactivated/metal.py +1 -1
- lfx/components/docling/docling_inline.py +23 -9
- lfx/components/elastic/elasticsearch.py +1 -1
- lfx/components/elastic/opensearch.py +1 -1
- lfx/components/embeddings/similarity.py +1 -0
- lfx/components/embeddings/text_embedder.py +1 -0
- lfx/components/firecrawl/firecrawl_crawl_api.py +1 -1
- lfx/components/firecrawl/firecrawl_extract_api.py +1 -1
- lfx/components/firecrawl/firecrawl_map_api.py +1 -1
- lfx/components/firecrawl/firecrawl_scrape_api.py +1 -1
- lfx/components/google/gmail.py +1 -0
- lfx/components/google/google_generative_ai_embeddings.py +1 -1
- lfx/components/helpers/memory.py +8 -6
- lfx/components/helpers/output_parser.py +1 -0
- lfx/components/helpers/store_message.py +1 -0
- lfx/components/huggingface/huggingface.py +3 -1
- lfx/components/huggingface/huggingface_inference_api.py +1 -1
- lfx/components/ibm/watsonx.py +1 -1
- lfx/components/ibm/watsonx_embeddings.py +1 -1
- lfx/components/icosacomputing/combinatorial_reasoner.py +1 -1
- lfx/components/input_output/chat.py +0 -27
- lfx/components/input_output/chat_output.py +3 -27
- lfx/components/knowledge_bases/__init__.py +34 -0
- lfx/components/knowledge_bases/ingestion.py +686 -0
- lfx/components/knowledge_bases/retrieval.py +256 -0
- lfx/components/langchain_utilities/langchain_hub.py +1 -1
- lfx/components/langwatch/langwatch.py +1 -1
- lfx/components/logic/conditional_router.py +40 -3
- lfx/components/logic/data_conditional_router.py +1 -0
- lfx/components/logic/flow_tool.py +2 -1
- lfx/components/logic/pass_message.py +1 -0
- lfx/components/logic/sub_flow.py +2 -1
- lfx/components/milvus/milvus.py +1 -1
- lfx/components/olivya/olivya.py +1 -1
- lfx/components/processing/alter_metadata.py +1 -0
- lfx/components/processing/combine_text.py +1 -0
- lfx/components/processing/create_data.py +1 -0
- lfx/components/processing/data_to_dataframe.py +1 -0
- lfx/components/processing/extract_key.py +1 -0
- lfx/components/processing/filter_data.py +1 -0
- lfx/components/processing/filter_data_values.py +1 -0
- lfx/components/processing/json_cleaner.py +1 -0
- lfx/components/processing/merge_data.py +1 -0
- lfx/components/processing/message_to_data.py +1 -0
- lfx/components/processing/parse_data.py +1 -0
- lfx/components/processing/parse_dataframe.py +1 -0
- lfx/components/processing/parse_json_data.py +1 -0
- lfx/components/processing/regex.py +1 -0
- lfx/components/processing/select_data.py +1 -0
- lfx/components/processing/structured_output.py +7 -3
- lfx/components/processing/update_data.py +1 -0
- lfx/components/prototypes/__init__.py +8 -7
- lfx/components/qdrant/qdrant.py +1 -1
- lfx/components/redis/redis_chat.py +1 -1
- lfx/components/tools/__init__.py +0 -6
- lfx/components/tools/calculator.py +2 -1
- lfx/components/tools/python_code_structured_tool.py +1 -0
- lfx/components/tools/python_repl.py +2 -1
- lfx/components/tools/search_api.py +2 -1
- lfx/components/tools/serp_api.py +2 -1
- lfx/components/tools/tavily_search_tool.py +1 -0
- lfx/components/tools/wikidata_api.py +2 -1
- lfx/components/tools/wikipedia_api.py +2 -1
- lfx/components/tools/yahoo_finance.py +2 -1
- lfx/components/twelvelabs/video_embeddings.py +1 -1
- lfx/components/upstash/upstash.py +1 -1
- lfx/components/vectorstores/astradb_graph.py +8 -1
- lfx/components/vectorstores/local_db.py +1 -0
- lfx/components/vectorstores/weaviate.py +1 -1
- lfx/components/wolframalpha/wolfram_alpha_api.py +1 -1
- lfx/components/zep/zep.py +2 -1
- lfx/custom/attributes.py +1 -0
- lfx/graph/graph/base.py +61 -4
- lfx/inputs/inputs.py +1 -0
- lfx/log/logger.py +31 -11
- lfx/schema/message.py +6 -1
- lfx/schema/schema.py +4 -0
- lfx/services/__init__.py +3 -0
- lfx/services/mcp_composer/__init__.py +6 -0
- lfx/services/mcp_composer/factory.py +16 -0
- lfx/services/mcp_composer/service.py +599 -0
- lfx/services/schema.py +1 -0
- lfx/services/settings/auth.py +18 -15
- lfx/services/settings/base.py +38 -0
- lfx/services/settings/constants.py +4 -1
- lfx/services/settings/feature_flags.py +0 -1
- lfx/template/frontend_node/base.py +2 -0
- lfx/utils/image.py +1 -1
- {lfx_nightly-0.1.12.dev13.dist-info → lfx_nightly-0.1.12.dev15.dist-info}/METADATA +1 -1
- {lfx_nightly-0.1.12.dev13.dist-info → lfx_nightly-0.1.12.dev15.dist-info}/RECORD +126 -118
- lfx/components/datastax/astradb.py +0 -1285
- {lfx_nightly-0.1.12.dev13.dist-info → lfx_nightly-0.1.12.dev15.dist-info}/WHEEL +0 -0
- {lfx_nightly-0.1.12.dev13.dist-info → lfx_nightly-0.1.12.dev15.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from cryptography.fernet import InvalidToken
|
|
6
|
+
from langchain_chroma import Chroma
|
|
7
|
+
from langflow.services.auth.utils import decrypt_api_key
|
|
8
|
+
from langflow.services.database.models.user.crud import get_user_by_id
|
|
9
|
+
from pydantic import SecretStr
|
|
10
|
+
|
|
11
|
+
from lfx.base.knowledge_bases.knowledge_base_utils import get_knowledge_bases
|
|
12
|
+
from lfx.custom import Component
|
|
13
|
+
from lfx.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput
|
|
14
|
+
from lfx.log.logger import logger
|
|
15
|
+
from lfx.schema.data import Data
|
|
16
|
+
from lfx.schema.dataframe import DataFrame
|
|
17
|
+
from lfx.services.deps import get_settings_service, session_scope
|
|
18
|
+
|
|
19
|
+
settings = get_settings_service().settings
|
|
20
|
+
knowledge_directory = settings.knowledge_bases_dir
|
|
21
|
+
if not knowledge_directory:
|
|
22
|
+
msg = "Knowledge bases directory is not set in the settings."
|
|
23
|
+
raise ValueError(msg)
|
|
24
|
+
KNOWLEDGE_BASES_ROOT_PATH = Path(knowledge_directory).expanduser()
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class KnowledgeRetrievalComponent(Component):
|
|
28
|
+
display_name = "Knowledge Retrieval"
|
|
29
|
+
description = "Search and retrieve data from knowledge."
|
|
30
|
+
icon = "download"
|
|
31
|
+
name = "KnowledgeRetrieval"
|
|
32
|
+
|
|
33
|
+
inputs = [
|
|
34
|
+
DropdownInput(
|
|
35
|
+
name="knowledge_base",
|
|
36
|
+
display_name="Knowledge",
|
|
37
|
+
info="Select the knowledge to load data from.",
|
|
38
|
+
required=True,
|
|
39
|
+
options=[],
|
|
40
|
+
refresh_button=True,
|
|
41
|
+
real_time_refresh=True,
|
|
42
|
+
),
|
|
43
|
+
SecretStrInput(
|
|
44
|
+
name="api_key",
|
|
45
|
+
display_name="Embedding Provider API Key",
|
|
46
|
+
info="API key for the embedding provider to generate embeddings.",
|
|
47
|
+
advanced=True,
|
|
48
|
+
required=False,
|
|
49
|
+
),
|
|
50
|
+
MessageTextInput(
|
|
51
|
+
name="search_query",
|
|
52
|
+
display_name="Search Query",
|
|
53
|
+
info="Optional search query to filter knowledge base data.",
|
|
54
|
+
tool_mode=True,
|
|
55
|
+
),
|
|
56
|
+
IntInput(
|
|
57
|
+
name="top_k",
|
|
58
|
+
display_name="Top K Results",
|
|
59
|
+
info="Number of top results to return from the knowledge base.",
|
|
60
|
+
value=5,
|
|
61
|
+
advanced=True,
|
|
62
|
+
required=False,
|
|
63
|
+
),
|
|
64
|
+
BoolInput(
|
|
65
|
+
name="include_metadata",
|
|
66
|
+
display_name="Include Metadata",
|
|
67
|
+
info="Whether to include all metadata in the output. If false, only content is returned.",
|
|
68
|
+
value=True,
|
|
69
|
+
advanced=False,
|
|
70
|
+
),
|
|
71
|
+
BoolInput(
|
|
72
|
+
name="include_embeddings",
|
|
73
|
+
display_name="Include Embeddings",
|
|
74
|
+
info="Whether to include embeddings in the output. Only applicable if 'Include Metadata' is enabled.",
|
|
75
|
+
value=False,
|
|
76
|
+
advanced=True,
|
|
77
|
+
),
|
|
78
|
+
]
|
|
79
|
+
|
|
80
|
+
outputs = [
|
|
81
|
+
Output(
|
|
82
|
+
name="retrieve_data",
|
|
83
|
+
display_name="Results",
|
|
84
|
+
method="retrieve_data",
|
|
85
|
+
info="Returns the data from the selected knowledge base.",
|
|
86
|
+
),
|
|
87
|
+
]
|
|
88
|
+
|
|
89
|
+
async def update_build_config(self, build_config, field_value, field_name=None): # noqa: ARG002
|
|
90
|
+
if field_name == "knowledge_base":
|
|
91
|
+
# Update the knowledge base options dynamically
|
|
92
|
+
build_config["knowledge_base"]["options"] = await get_knowledge_bases(
|
|
93
|
+
KNOWLEDGE_BASES_ROOT_PATH,
|
|
94
|
+
user_id=self.user_id, # Use the user_id from the component context
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
# If the selected knowledge base is not available, reset it
|
|
98
|
+
if build_config["knowledge_base"]["value"] not in build_config["knowledge_base"]["options"]:
|
|
99
|
+
build_config["knowledge_base"]["value"] = None
|
|
100
|
+
|
|
101
|
+
return build_config
|
|
102
|
+
|
|
103
|
+
def _get_kb_metadata(self, kb_path: Path) -> dict:
|
|
104
|
+
"""Load and process knowledge base metadata."""
|
|
105
|
+
metadata: dict[str, Any] = {}
|
|
106
|
+
metadata_file = kb_path / "embedding_metadata.json"
|
|
107
|
+
if not metadata_file.exists():
|
|
108
|
+
logger.warning(f"Embedding metadata file not found at {metadata_file}")
|
|
109
|
+
return metadata
|
|
110
|
+
|
|
111
|
+
try:
|
|
112
|
+
with metadata_file.open("r", encoding="utf-8") as f:
|
|
113
|
+
metadata = json.load(f)
|
|
114
|
+
except json.JSONDecodeError:
|
|
115
|
+
logger.error(f"Error decoding JSON from {metadata_file}")
|
|
116
|
+
return {}
|
|
117
|
+
|
|
118
|
+
# Decrypt API key if it exists
|
|
119
|
+
if "api_key" in metadata and metadata.get("api_key"):
|
|
120
|
+
settings_service = get_settings_service()
|
|
121
|
+
try:
|
|
122
|
+
decrypted_key = decrypt_api_key(metadata["api_key"], settings_service)
|
|
123
|
+
metadata["api_key"] = decrypted_key
|
|
124
|
+
except (InvalidToken, TypeError, ValueError) as e:
|
|
125
|
+
logger.error(f"Could not decrypt API key. Please provide it manually. Error: {e}")
|
|
126
|
+
metadata["api_key"] = None
|
|
127
|
+
return metadata
|
|
128
|
+
|
|
129
|
+
def _build_embeddings(self, metadata: dict):
|
|
130
|
+
"""Build embedding model from metadata."""
|
|
131
|
+
runtime_api_key = self.api_key.get_secret_value() if isinstance(self.api_key, SecretStr) else self.api_key
|
|
132
|
+
provider = metadata.get("embedding_provider")
|
|
133
|
+
model = metadata.get("embedding_model")
|
|
134
|
+
api_key = runtime_api_key or metadata.get("api_key")
|
|
135
|
+
chunk_size = metadata.get("chunk_size")
|
|
136
|
+
|
|
137
|
+
# Handle various providers
|
|
138
|
+
if provider == "OpenAI":
|
|
139
|
+
from langchain_openai import OpenAIEmbeddings
|
|
140
|
+
|
|
141
|
+
if not api_key:
|
|
142
|
+
msg = "OpenAI API key is required. Provide it in the component's advanced settings."
|
|
143
|
+
raise ValueError(msg)
|
|
144
|
+
return OpenAIEmbeddings(
|
|
145
|
+
model=model,
|
|
146
|
+
api_key=api_key,
|
|
147
|
+
chunk_size=chunk_size,
|
|
148
|
+
)
|
|
149
|
+
if provider == "HuggingFace":
|
|
150
|
+
from langchain_huggingface import HuggingFaceEmbeddings
|
|
151
|
+
|
|
152
|
+
return HuggingFaceEmbeddings(
|
|
153
|
+
model=model,
|
|
154
|
+
)
|
|
155
|
+
if provider == "Cohere":
|
|
156
|
+
from langchain_cohere import CohereEmbeddings
|
|
157
|
+
|
|
158
|
+
if not api_key:
|
|
159
|
+
msg = "Cohere API key is required when using Cohere provider"
|
|
160
|
+
raise ValueError(msg)
|
|
161
|
+
return CohereEmbeddings(
|
|
162
|
+
model=model,
|
|
163
|
+
cohere_api_key=api_key,
|
|
164
|
+
)
|
|
165
|
+
if provider == "Custom":
|
|
166
|
+
# For custom embedding models, we would need additional configuration
|
|
167
|
+
msg = "Custom embedding models not yet supported"
|
|
168
|
+
raise NotImplementedError(msg)
|
|
169
|
+
# Add other providers here if they become supported in ingest
|
|
170
|
+
msg = f"Embedding provider '{provider}' is not supported for retrieval."
|
|
171
|
+
raise NotImplementedError(msg)
|
|
172
|
+
|
|
173
|
+
async def retrieve_data(self) -> DataFrame:
|
|
174
|
+
"""Retrieve data from the selected knowledge base by reading the Chroma collection.
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
A DataFrame containing the data rows from the knowledge base.
|
|
178
|
+
"""
|
|
179
|
+
# Get the current user
|
|
180
|
+
async with session_scope() as db:
|
|
181
|
+
if not self.user_id:
|
|
182
|
+
msg = "User ID is required for fetching Knowledge Base data."
|
|
183
|
+
raise ValueError(msg)
|
|
184
|
+
current_user = await get_user_by_id(db, self.user_id)
|
|
185
|
+
if not current_user:
|
|
186
|
+
msg = f"User with ID {self.user_id} not found."
|
|
187
|
+
raise ValueError(msg)
|
|
188
|
+
kb_user = current_user.username
|
|
189
|
+
kb_path = KNOWLEDGE_BASES_ROOT_PATH / kb_user / self.knowledge_base
|
|
190
|
+
|
|
191
|
+
metadata = self._get_kb_metadata(kb_path)
|
|
192
|
+
if not metadata:
|
|
193
|
+
msg = f"Metadata not found for knowledge base: {self.knowledge_base}. Ensure it has been indexed."
|
|
194
|
+
raise ValueError(msg)
|
|
195
|
+
|
|
196
|
+
# Build the embedder for the knowledge base
|
|
197
|
+
embedding_function = self._build_embeddings(metadata)
|
|
198
|
+
|
|
199
|
+
# Load vector store
|
|
200
|
+
chroma = Chroma(
|
|
201
|
+
persist_directory=str(kb_path),
|
|
202
|
+
embedding_function=embedding_function,
|
|
203
|
+
collection_name=self.knowledge_base,
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
# If a search query is provided, perform a similarity search
|
|
207
|
+
if self.search_query:
|
|
208
|
+
# Use the search query to perform a similarity search
|
|
209
|
+
logger.info(f"Performing similarity search with query: {self.search_query}")
|
|
210
|
+
results = chroma.similarity_search_with_score(
|
|
211
|
+
query=self.search_query or "",
|
|
212
|
+
k=self.top_k,
|
|
213
|
+
)
|
|
214
|
+
else:
|
|
215
|
+
results = chroma.similarity_search(
|
|
216
|
+
query=self.search_query or "",
|
|
217
|
+
k=self.top_k,
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
# For each result, make it a tuple to match the expected output format
|
|
221
|
+
results = [(doc, 0) for doc in results] # Assign a dummy score of 0
|
|
222
|
+
|
|
223
|
+
# If include_embeddings is enabled, get embeddings for the results
|
|
224
|
+
id_to_embedding = {}
|
|
225
|
+
if self.include_embeddings and results:
|
|
226
|
+
doc_ids = [doc[0].metadata.get("_id") for doc in results if doc[0].metadata.get("_id")]
|
|
227
|
+
|
|
228
|
+
# Only proceed if we have valid document IDs
|
|
229
|
+
if doc_ids:
|
|
230
|
+
# Access underlying client to get embeddings
|
|
231
|
+
collection = chroma._client.get_collection(name=self.knowledge_base)
|
|
232
|
+
embeddings_result = collection.get(where={"_id": {"$in": doc_ids}}, include=["metadatas", "embeddings"])
|
|
233
|
+
|
|
234
|
+
# Create a mapping from document ID to embedding
|
|
235
|
+
for i, metadata in enumerate(embeddings_result.get("metadatas", [])):
|
|
236
|
+
if metadata and "_id" in metadata:
|
|
237
|
+
id_to_embedding[metadata["_id"]] = embeddings_result["embeddings"][i]
|
|
238
|
+
|
|
239
|
+
# Build output data based on include_metadata setting
|
|
240
|
+
data_list = []
|
|
241
|
+
for doc in results:
|
|
242
|
+
kwargs = {
|
|
243
|
+
"content": doc[0].page_content,
|
|
244
|
+
}
|
|
245
|
+
if self.search_query:
|
|
246
|
+
kwargs["_score"] = -1 * doc[1]
|
|
247
|
+
if self.include_metadata:
|
|
248
|
+
# Include all metadata, embeddings, and content
|
|
249
|
+
kwargs.update(doc[0].metadata)
|
|
250
|
+
if self.include_embeddings:
|
|
251
|
+
kwargs["_embeddings"] = id_to_embedding.get(doc[0].metadata.get("_id"))
|
|
252
|
+
|
|
253
|
+
data_list.append(Data(**kwargs))
|
|
254
|
+
|
|
255
|
+
# Return the DataFrame containing the data
|
|
256
|
+
return DataFrame(data=data_list)
|
|
@@ -129,20 +129,55 @@ class ConditionalRouterComponent(Component):
|
|
|
129
129
|
return False
|
|
130
130
|
|
|
131
131
|
def iterate_and_stop_once(self, route_to_stop: str):
|
|
132
|
+
"""Handles cycle iteration counting and branch exclusion.
|
|
133
|
+
|
|
134
|
+
Uses two complementary mechanisms:
|
|
135
|
+
1. stop() - ACTIVE/INACTIVE state for cycle management (gets reset each iteration)
|
|
136
|
+
2. exclude_branch_conditionally() - Persistent exclusion for conditional routing
|
|
137
|
+
|
|
138
|
+
When max_iterations is reached, breaks the cycle by allowing the default_route to execute.
|
|
139
|
+
"""
|
|
132
140
|
if not self.__iteration_updated:
|
|
133
141
|
self.update_ctx({f"{self._id}_iteration": self.ctx.get(f"{self._id}_iteration", 0) + 1})
|
|
134
142
|
self.__iteration_updated = True
|
|
135
|
-
|
|
143
|
+
current_iteration = self.ctx.get(f"{self._id}_iteration", 0)
|
|
144
|
+
|
|
145
|
+
# Check if max iterations reached and we're trying to stop the default route
|
|
146
|
+
if current_iteration >= self.max_iterations and route_to_stop == self.default_route:
|
|
147
|
+
# Clear ALL conditional exclusions to allow default route to execute
|
|
148
|
+
if self._id in self.graph.conditional_exclusion_sources:
|
|
149
|
+
previous_exclusions = self.graph.conditional_exclusion_sources[self._id]
|
|
150
|
+
self.graph.conditionally_excluded_vertices -= previous_exclusions
|
|
151
|
+
del self.graph.conditional_exclusion_sources[self._id]
|
|
152
|
+
|
|
153
|
+
# Switch which route to stop - stop the NON-default route to break the cycle
|
|
136
154
|
route_to_stop = "true_result" if route_to_stop == "false_result" else "false_result"
|
|
155
|
+
|
|
156
|
+
# Call stop to break the cycle
|
|
157
|
+
self.stop(route_to_stop)
|
|
158
|
+
# Don't apply conditional exclusion when breaking cycle
|
|
159
|
+
return
|
|
160
|
+
|
|
161
|
+
# Normal case: Use BOTH mechanisms
|
|
162
|
+
# 1. stop() for cycle management (marks INACTIVE, updates run manager, gets reset)
|
|
137
163
|
self.stop(route_to_stop)
|
|
138
164
|
|
|
165
|
+
# 2. Conditional exclusion for persistent routing (doesn't get reset except by this router)
|
|
166
|
+
self.graph.exclude_branch_conditionally(self._id, output_name=route_to_stop)
|
|
167
|
+
|
|
139
168
|
def true_response(self) -> Message:
|
|
140
169
|
result = self.evaluate_condition(
|
|
141
170
|
self.input_text, self.match_text, self.operator, case_sensitive=self.case_sensitive
|
|
142
171
|
)
|
|
143
|
-
|
|
172
|
+
|
|
173
|
+
# Check if we should force output due to max_iterations on default route
|
|
174
|
+
current_iteration = self.ctx.get(f"{self._id}_iteration", 0)
|
|
175
|
+
force_output = current_iteration >= self.max_iterations and self.default_route == "true_result"
|
|
176
|
+
|
|
177
|
+
if result or force_output:
|
|
144
178
|
self.status = self.true_case_message
|
|
145
|
-
|
|
179
|
+
if not force_output: # Only stop the other branch if not forcing due to max iterations
|
|
180
|
+
self.iterate_and_stop_once("false_result")
|
|
146
181
|
return self.true_case_message
|
|
147
182
|
self.iterate_and_stop_once("true_result")
|
|
148
183
|
return Message(content="")
|
|
@@ -151,10 +186,12 @@ class ConditionalRouterComponent(Component):
|
|
|
151
186
|
result = self.evaluate_condition(
|
|
152
187
|
self.input_text, self.match_text, self.operator, case_sensitive=self.case_sensitive
|
|
153
188
|
)
|
|
189
|
+
|
|
154
190
|
if not result:
|
|
155
191
|
self.status = self.false_case_message
|
|
156
192
|
self.iterate_and_stop_once("true_result")
|
|
157
193
|
return self.false_case_message
|
|
194
|
+
|
|
158
195
|
self.iterate_and_stop_once("false_result")
|
|
159
196
|
return Message(content="")
|
|
160
197
|
|
|
@@ -14,12 +14,13 @@ from lfx.schema.dotdict import dotdict
|
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
class FlowToolComponent(LCToolComponent):
|
|
17
|
-
display_name = "Flow as Tool
|
|
17
|
+
display_name = "Flow as Tool"
|
|
18
18
|
description = "Construct a Tool from a function that runs the loaded Flow."
|
|
19
19
|
field_order = ["flow_name", "name", "description", "return_direct"]
|
|
20
20
|
trace_type = "tool"
|
|
21
21
|
name = "FlowTool"
|
|
22
22
|
legacy: bool = True
|
|
23
|
+
replacement = ["logic.RunFlow"]
|
|
23
24
|
icon = "hammer"
|
|
24
25
|
|
|
25
26
|
async def get_flow_names(self) -> list[str]:
|
lfx/components/logic/sub_flow.py
CHANGED
|
@@ -12,10 +12,11 @@ from lfx.schema.dotdict import dotdict
|
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
class SubFlowComponent(Component):
|
|
15
|
-
display_name = "Sub Flow
|
|
15
|
+
display_name = "Sub Flow"
|
|
16
16
|
description = "Generates a Component from a Flow, with all of its inputs, and "
|
|
17
17
|
name = "SubFlow"
|
|
18
18
|
legacy: bool = True
|
|
19
|
+
replacement = ["logic.RunFlow"]
|
|
19
20
|
icon = "Workflow"
|
|
20
21
|
|
|
21
22
|
async def get_flow_names(self) -> list[str]:
|
lfx/components/milvus/milvus.py
CHANGED
lfx/components/olivya/olivya.py
CHANGED
|
@@ -15,6 +15,7 @@ class DataFilterComponent(Component):
|
|
|
15
15
|
beta = True
|
|
16
16
|
name = "FilterDataValues"
|
|
17
17
|
legacy = True
|
|
18
|
+
replacement = ["processing.DataOperations"]
|
|
18
19
|
|
|
19
20
|
inputs = [
|
|
20
21
|
DataInput(name="input_data", display_name="Input Data", info="The list of data items to filter.", is_list=True),
|
|
@@ -15,6 +15,7 @@ class JSONCleaner(Component):
|
|
|
15
15
|
"so that they are fully compliant with the JSON spec."
|
|
16
16
|
)
|
|
17
17
|
legacy = True
|
|
18
|
+
replacement = ["processing.ParserComponent"]
|
|
18
19
|
inputs = [
|
|
19
20
|
MessageTextInput(
|
|
20
21
|
name="json_str", display_name="JSON String", info="The JSON string to be cleaned.", required=True
|
|
@@ -20,6 +20,7 @@ class MergeDataComponent(Component):
|
|
|
20
20
|
icon = "merge"
|
|
21
21
|
MIN_INPUTS_REQUIRED = 2
|
|
22
22
|
legacy = True
|
|
23
|
+
replacement = ["processing.DataOperations"]
|
|
23
24
|
|
|
24
25
|
inputs = [
|
|
25
26
|
DataInput(name="data_inputs", display_name="Data Inputs", info="Data to combine", is_list=True, required=True),
|
|
@@ -12,6 +12,7 @@ class ParseDataFrameComponent(Component):
|
|
|
12
12
|
icon = "braces"
|
|
13
13
|
name = "ParseDataFrame"
|
|
14
14
|
legacy = True
|
|
15
|
+
replacement = ["processing.DataFrameOperations", "processing.TypeConverterComponent"]
|
|
15
16
|
|
|
16
17
|
inputs = [
|
|
17
18
|
DataFrameInput(name="df", display_name="DataFrame", info="The DataFrame to convert to text rows."),
|
|
@@ -197,6 +197,10 @@ class StructuredOutputComponent(Component):
|
|
|
197
197
|
# handle empty or unexpected type case
|
|
198
198
|
msg = "No structured output returned"
|
|
199
199
|
raise ValueError(msg)
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
200
|
+
if len(output) == 1:
|
|
201
|
+
# For single dictionary, wrap in a list to create DataFrame with one row
|
|
202
|
+
return DataFrame([output[0]])
|
|
203
|
+
if len(output) > 1:
|
|
204
|
+
# Multiple outputs - convert to DataFrame directly
|
|
205
|
+
return DataFrame(output)
|
|
206
|
+
return DataFrame()
|
|
@@ -1,23 +1,24 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import Any
|
|
4
4
|
|
|
5
5
|
from lfx.components._importing import import_mod
|
|
6
6
|
|
|
7
|
-
|
|
8
|
-
|
|
7
|
+
# _dynamic_imports = {
|
|
8
|
+
# "KnowledgeIngestionComponent": "ingestion",
|
|
9
|
+
# "KnowledgeRetrievalComponent": "retrieval",
|
|
9
10
|
|
|
11
|
+
# }
|
|
10
12
|
_dynamic_imports = {
|
|
11
13
|
"PythonFunctionComponent": "python_function",
|
|
12
14
|
}
|
|
13
15
|
|
|
14
|
-
__all__ = [
|
|
15
|
-
|
|
16
|
-
]
|
|
16
|
+
# __all__ = ["KnowledgeIngestionComponent", "KnowledgeRetrievalComponent"]
|
|
17
|
+
__all__ = ["PythonFunctionComponent"]
|
|
17
18
|
|
|
18
19
|
|
|
19
20
|
def __getattr__(attr_name: str) -> Any:
|
|
20
|
-
"""Lazily import
|
|
21
|
+
"""Lazily import input/output components on attribute access."""
|
|
21
22
|
if attr_name not in _dynamic_imports:
|
|
22
23
|
msg = f"module '{__name__}' has no attribute '{attr_name}'"
|
|
23
24
|
raise AttributeError(msg)
|
lfx/components/qdrant/qdrant.py
CHANGED
|
@@ -23,7 +23,7 @@ class QdrantVectorStoreComponent(LCVectorStoreComponent):
|
|
|
23
23
|
StrInput(name="host", display_name="Host", value="localhost", advanced=True),
|
|
24
24
|
IntInput(name="port", display_name="Port", value=6333, advanced=True),
|
|
25
25
|
IntInput(name="grpc_port", display_name="gRPC Port", value=6334, advanced=True),
|
|
26
|
-
SecretStrInput(name="api_key", display_name="API Key", advanced=True),
|
|
26
|
+
SecretStrInput(name="api_key", display_name="Qdrant API Key", advanced=True),
|
|
27
27
|
StrInput(name="prefix", display_name="Prefix", advanced=True),
|
|
28
28
|
IntInput(name="timeout", display_name="Timeout", advanced=True),
|
|
29
29
|
StrInput(name="path", display_name="Path", advanced=True),
|
|
@@ -23,7 +23,7 @@ class RedisIndexChatMemory(LCChatMemoryComponent):
|
|
|
23
23
|
name="username", display_name="Username", value="", info="The Redis user name.", advanced=True
|
|
24
24
|
),
|
|
25
25
|
SecretStrInput(
|
|
26
|
-
name="password", display_name="Password", value="", info="The password for username.", advanced=True
|
|
26
|
+
name="password", display_name="Redis Password", value="", info="The password for username.", advanced=True
|
|
27
27
|
),
|
|
28
28
|
StrInput(name="key_prefix", display_name="Key prefix", info="Key prefix.", advanced=True),
|
|
29
29
|
MessageTextInput(
|
lfx/components/tools/__init__.py
CHANGED
|
@@ -9,8 +9,6 @@ from lfx.components._importing import import_mod
|
|
|
9
9
|
|
|
10
10
|
if TYPE_CHECKING:
|
|
11
11
|
from .calculator import CalculatorToolComponent
|
|
12
|
-
from .google_search_api import GoogleSearchAPIComponent
|
|
13
|
-
from .google_serper_api import GoogleSerperAPIComponent
|
|
14
12
|
from .python_code_structured_tool import PythonCodeStructuredTool
|
|
15
13
|
from .python_repl import PythonREPLToolComponent
|
|
16
14
|
from .search_api import SearchAPIComponent
|
|
@@ -23,8 +21,6 @@ if TYPE_CHECKING:
|
|
|
23
21
|
|
|
24
22
|
_dynamic_imports = {
|
|
25
23
|
"CalculatorToolComponent": "calculator",
|
|
26
|
-
"GoogleSearchAPIComponent": "google_search_api",
|
|
27
|
-
"GoogleSerperAPIComponent": "google_serper_api",
|
|
28
24
|
"PythonCodeStructuredTool": "python_code_structured_tool",
|
|
29
25
|
"PythonREPLToolComponent": "python_repl",
|
|
30
26
|
"SearchAPIComponent": "search_api",
|
|
@@ -38,8 +34,6 @@ _dynamic_imports = {
|
|
|
38
34
|
|
|
39
35
|
__all__ = [
|
|
40
36
|
"CalculatorToolComponent",
|
|
41
|
-
"GoogleSearchAPIComponent",
|
|
42
|
-
"GoogleSerperAPIComponent",
|
|
43
37
|
"PythonCodeStructuredTool",
|
|
44
38
|
"PythonREPLToolComponent",
|
|
45
39
|
"SearXNGToolComponent",
|