lfx-nightly 0.2.0.dev26__py3-none-any.whl → 0.2.0.dev41__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lfx/_assets/component_index.json +1 -1
- lfx/base/agents/agent.py +8 -3
- lfx/base/agents/altk_base_agent.py +16 -3
- lfx/base/data/base_file.py +14 -4
- lfx/base/data/docling_utils.py +61 -10
- lfx/base/data/storage_utils.py +109 -0
- lfx/base/mcp/util.py +2 -2
- lfx/base/models/anthropic_constants.py +21 -12
- lfx/cli/commands.py +3 -1
- lfx/components/docling/chunk_docling_document.py +3 -1
- lfx/components/docling/export_docling_document.py +3 -1
- lfx/components/files_and_knowledge/file.py +59 -7
- lfx/components/files_and_knowledge/save_file.py +79 -12
- lfx/components/ibm/watsonx.py +7 -1
- lfx/components/input_output/chat_output.py +7 -1
- lfx/components/llm_operations/batch_run.py +16 -7
- lfx/components/models_and_agents/agent.py +4 -2
- lfx/components/models_and_agents/embedding_model.py +6 -76
- lfx/components/ollama/ollama.py +9 -4
- lfx/components/processing/__init__.py +0 -3
- lfx/custom/directory_reader/directory_reader.py +5 -2
- lfx/graph/graph/base.py +1 -4
- lfx/graph/vertex/base.py +1 -4
- lfx/schema/image.py +2 -12
- lfx/services/interfaces.py +5 -0
- lfx/services/manager.py +5 -4
- lfx/services/mcp_composer/service.py +38 -12
- lfx/services/settings/auth.py +18 -11
- lfx/services/settings/base.py +5 -23
- lfx/services/storage/local.py +32 -0
- lfx/services/storage/service.py +19 -0
- lfx/utils/image.py +29 -11
- {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/METADATA +1 -1
- {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/RECORD +36 -39
- lfx/base/embeddings/embeddings_class.py +0 -113
- lfx/components/elastic/opensearch_multimodal.py +0 -1575
- lfx/components/processing/dataframe_to_toolset.py +0 -259
- {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/WHEEL +0 -0
- {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/entry_points.txt +0 -0
|
@@ -77,7 +77,10 @@ class SaveToFileComponent(Component):
|
|
|
77
77
|
BoolInput(
|
|
78
78
|
name="append_mode",
|
|
79
79
|
display_name="Append",
|
|
80
|
-
info=
|
|
80
|
+
info=(
|
|
81
|
+
"Append to file if it exists (only for Local storage with plain text formats). "
|
|
82
|
+
"Not supported for cloud storage (AWS/Google Drive)."
|
|
83
|
+
),
|
|
81
84
|
value=False,
|
|
82
85
|
show=False,
|
|
83
86
|
),
|
|
@@ -157,6 +160,7 @@ class SaveToFileComponent(Component):
|
|
|
157
160
|
"The Google Drive folder ID where the file will be uploaded. "
|
|
158
161
|
"The folder must be shared with the service account email."
|
|
159
162
|
),
|
|
163
|
+
required=True,
|
|
160
164
|
show=False,
|
|
161
165
|
advanced=True,
|
|
162
166
|
),
|
|
@@ -196,11 +200,13 @@ class SaveToFileComponent(Component):
|
|
|
196
200
|
if len(selected) == 1:
|
|
197
201
|
location = selected[0]
|
|
198
202
|
|
|
199
|
-
# Show file_name
|
|
203
|
+
# Show file_name when any storage location is selected
|
|
200
204
|
if "file_name" in build_config:
|
|
201
205
|
build_config["file_name"]["show"] = True
|
|
206
|
+
|
|
207
|
+
# Show append_mode only for Local storage (not supported for cloud storage)
|
|
202
208
|
if "append_mode" in build_config:
|
|
203
|
-
build_config["append_mode"]["show"] =
|
|
209
|
+
build_config["append_mode"]["show"] = location == "Local"
|
|
204
210
|
|
|
205
211
|
if location == "Local":
|
|
206
212
|
if "local_format" in build_config:
|
|
@@ -575,7 +581,9 @@ class SaveToFileComponent(Component):
|
|
|
575
581
|
# Create temporary file
|
|
576
582
|
import tempfile
|
|
577
583
|
|
|
578
|
-
with tempfile.NamedTemporaryFile(
|
|
584
|
+
with tempfile.NamedTemporaryFile(
|
|
585
|
+
mode="w", encoding="utf-8", suffix=f".{file_format}", delete=False
|
|
586
|
+
) as temp_file:
|
|
579
587
|
temp_file.write(content)
|
|
580
588
|
temp_file_path = temp_file.name
|
|
581
589
|
|
|
@@ -611,16 +619,57 @@ class SaveToFileComponent(Component):
|
|
|
611
619
|
msg = "Google API client libraries are not installed. Please install them."
|
|
612
620
|
raise ImportError(msg) from e
|
|
613
621
|
|
|
614
|
-
# Parse credentials
|
|
622
|
+
# Parse credentials with multiple fallback strategies
|
|
623
|
+
credentials_dict = None
|
|
624
|
+
parse_errors = []
|
|
625
|
+
|
|
626
|
+
# Strategy 1: Parse as-is with strict=False to allow control characters
|
|
615
627
|
try:
|
|
616
|
-
credentials_dict = json.loads(self.service_account_key)
|
|
628
|
+
credentials_dict = json.loads(self.service_account_key, strict=False)
|
|
617
629
|
except json.JSONDecodeError as e:
|
|
618
|
-
|
|
619
|
-
|
|
630
|
+
parse_errors.append(f"Standard parse: {e!s}")
|
|
631
|
+
|
|
632
|
+
# Strategy 2: Strip whitespace and try again
|
|
633
|
+
if credentials_dict is None:
|
|
634
|
+
try:
|
|
635
|
+
cleaned_key = self.service_account_key.strip()
|
|
636
|
+
credentials_dict = json.loads(cleaned_key, strict=False)
|
|
637
|
+
except json.JSONDecodeError as e:
|
|
638
|
+
parse_errors.append(f"Stripped parse: {e!s}")
|
|
639
|
+
|
|
640
|
+
# Strategy 3: Check if it's double-encoded (JSON string of a JSON string)
|
|
641
|
+
if credentials_dict is None:
|
|
642
|
+
try:
|
|
643
|
+
decoded_once = json.loads(self.service_account_key, strict=False)
|
|
644
|
+
if isinstance(decoded_once, str):
|
|
645
|
+
credentials_dict = json.loads(decoded_once, strict=False)
|
|
646
|
+
else:
|
|
647
|
+
credentials_dict = decoded_once
|
|
648
|
+
except json.JSONDecodeError as e:
|
|
649
|
+
parse_errors.append(f"Double-encoded parse: {e!s}")
|
|
650
|
+
|
|
651
|
+
# Strategy 4: Try to fix common issues with newlines in the private_key field
|
|
652
|
+
if credentials_dict is None:
|
|
653
|
+
try:
|
|
654
|
+
# Replace literal \n with actual newlines which is common in pasted JSON
|
|
655
|
+
fixed_key = self.service_account_key.replace("\\n", "\n")
|
|
656
|
+
credentials_dict = json.loads(fixed_key, strict=False)
|
|
657
|
+
except json.JSONDecodeError as e:
|
|
658
|
+
parse_errors.append(f"Newline-fixed parse: {e!s}")
|
|
659
|
+
|
|
660
|
+
if credentials_dict is None:
|
|
661
|
+
error_details = "; ".join(parse_errors)
|
|
662
|
+
msg = (
|
|
663
|
+
f"Unable to parse service account key JSON. Tried multiple strategies: {error_details}. "
|
|
664
|
+
"Please ensure you've copied the entire JSON content from your service account key file. "
|
|
665
|
+
"The JSON should start with '{' and contain fields like 'type', 'project_id', 'private_key', etc."
|
|
666
|
+
)
|
|
667
|
+
raise ValueError(msg)
|
|
620
668
|
|
|
621
|
-
# Create Google Drive service
|
|
669
|
+
# Create Google Drive service with appropriate scopes
|
|
670
|
+
# Use drive scope for folder access, file scope is too restrictive for folder verification
|
|
622
671
|
credentials = service_account.Credentials.from_service_account_info(
|
|
623
|
-
credentials_dict, scopes=["https://www.googleapis.com/auth/drive
|
|
672
|
+
credentials_dict, scopes=["https://www.googleapis.com/auth/drive"]
|
|
624
673
|
)
|
|
625
674
|
drive_service = build("drive", "v3", credentials=credentials)
|
|
626
675
|
|
|
@@ -634,16 +683,34 @@ class SaveToFileComponent(Component):
|
|
|
634
683
|
|
|
635
684
|
# Create temporary file
|
|
636
685
|
file_path = f"{self.file_name}.{file_format}"
|
|
637
|
-
with tempfile.NamedTemporaryFile(
|
|
686
|
+
with tempfile.NamedTemporaryFile(
|
|
687
|
+
mode="w",
|
|
688
|
+
encoding="utf-8",
|
|
689
|
+
suffix=f".{file_format}",
|
|
690
|
+
delete=False,
|
|
691
|
+
) as temp_file:
|
|
638
692
|
temp_file.write(content)
|
|
639
693
|
temp_file_path = temp_file.name
|
|
640
694
|
|
|
641
695
|
try:
|
|
642
696
|
# Upload to Google Drive
|
|
697
|
+
# Note: We skip explicit folder verification since it requires broader permissions.
|
|
698
|
+
# If the folder doesn't exist or isn't accessible, the create() call will fail with a clear error.
|
|
643
699
|
file_metadata = {"name": file_path, "parents": [self.folder_id]}
|
|
644
700
|
media = MediaFileUpload(temp_file_path, resumable=True)
|
|
645
701
|
|
|
646
|
-
|
|
702
|
+
try:
|
|
703
|
+
uploaded_file = (
|
|
704
|
+
drive_service.files().create(body=file_metadata, media_body=media, fields="id").execute()
|
|
705
|
+
)
|
|
706
|
+
except Exception as e:
|
|
707
|
+
msg = (
|
|
708
|
+
f"Unable to upload file to Google Drive folder '{self.folder_id}'. "
|
|
709
|
+
f"Error: {e!s}. "
|
|
710
|
+
"Please ensure: 1) The folder ID is correct, 2) The folder exists, "
|
|
711
|
+
"3) The service account has been granted access to this folder."
|
|
712
|
+
)
|
|
713
|
+
raise ValueError(msg) from e
|
|
647
714
|
|
|
648
715
|
file_id = uploaded_file.get("id")
|
|
649
716
|
file_url = f"https://drive.google.com/file/d/{file_id}/view"
|
lfx/components/ibm/watsonx.py
CHANGED
|
@@ -197,8 +197,14 @@ class WatsonxAIComponent(LCModelComponent):
|
|
|
197
197
|
"logit_bias": logit_bias,
|
|
198
198
|
}
|
|
199
199
|
|
|
200
|
+
# Pass API key as plain string to avoid SecretStr serialization issues
|
|
201
|
+
# when model is configured with with_config() or used in batch operations
|
|
202
|
+
api_key_value = self.api_key
|
|
203
|
+
if isinstance(api_key_value, SecretStr):
|
|
204
|
+
api_key_value = api_key_value.get_secret_value()
|
|
205
|
+
|
|
200
206
|
return ChatWatsonx(
|
|
201
|
-
apikey=
|
|
207
|
+
apikey=api_key_value,
|
|
202
208
|
url=self.base_url,
|
|
203
209
|
project_id=self.project_id,
|
|
204
210
|
model_id=self.model_name,
|
|
@@ -121,13 +121,19 @@ class ChatOutput(ChatComponent):
|
|
|
121
121
|
message = self.input_value
|
|
122
122
|
# Update message properties
|
|
123
123
|
message.text = text
|
|
124
|
+
# Preserve existing session_id from the incoming message if it exists
|
|
125
|
+
existing_session_id = message.session_id
|
|
124
126
|
else:
|
|
125
127
|
message = Message(text=text)
|
|
128
|
+
existing_session_id = None
|
|
126
129
|
|
|
127
130
|
# Set message properties
|
|
128
131
|
message.sender = self.sender
|
|
129
132
|
message.sender_name = self.sender_name
|
|
130
|
-
|
|
133
|
+
# Preserve session_id from incoming message, or use component/graph session_id
|
|
134
|
+
message.session_id = (
|
|
135
|
+
self.session_id or existing_session_id or (self.graph.session_id if hasattr(self, "graph") else None) or ""
|
|
136
|
+
)
|
|
131
137
|
message.context_id = self.context_id
|
|
132
138
|
message.flow_id = self.graph.flow_id if hasattr(self, "graph") else None
|
|
133
139
|
message.properties.source = self._build_source(source_id, display_name, source)
|
|
@@ -159,13 +159,22 @@ class BatchRunComponent(Component):
|
|
|
159
159
|
]
|
|
160
160
|
|
|
161
161
|
# Configure the model with project info and callbacks
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
162
|
+
# Some models (e.g., ChatWatsonx) may have serialization issues with with_config()
|
|
163
|
+
# due to SecretStr or other non-serializable attributes
|
|
164
|
+
try:
|
|
165
|
+
model = model.with_config(
|
|
166
|
+
{
|
|
167
|
+
"run_name": self.display_name,
|
|
168
|
+
"project_name": self.get_project_name(),
|
|
169
|
+
"callbacks": self.get_langchain_callbacks(),
|
|
170
|
+
}
|
|
171
|
+
)
|
|
172
|
+
except (TypeError, ValueError, AttributeError) as e:
|
|
173
|
+
# Log warning and continue without configuration
|
|
174
|
+
await logger.awarning(
|
|
175
|
+
f"Could not configure model with callbacks and project info: {e!s}. "
|
|
176
|
+
"Proceeding with batch processing without configuration."
|
|
177
|
+
)
|
|
169
178
|
# Process batches and track progress
|
|
170
179
|
responses_with_idx = list(
|
|
171
180
|
zip(
|
|
@@ -619,9 +619,11 @@ class AgentComponent(ToolCallingAgentComponent):
|
|
|
619
619
|
# Call each component class's update_build_config method
|
|
620
620
|
# remove the prefix from the field_name
|
|
621
621
|
if isinstance(field_name, str) and isinstance(prefix, str):
|
|
622
|
-
|
|
622
|
+
field_name_without_prefix = field_name.replace(prefix, "")
|
|
623
|
+
else:
|
|
624
|
+
field_name_without_prefix = field_name
|
|
623
625
|
build_config = await update_component_build_config(
|
|
624
|
-
component_class, build_config, field_value,
|
|
626
|
+
component_class, build_config, field_value, field_name_without_prefix
|
|
625
627
|
)
|
|
626
628
|
return dotdict({k: v.to_dict() if hasattr(v, "to_dict") else v for k, v in build_config.items()})
|
|
627
629
|
|
|
@@ -4,7 +4,6 @@ import requests
|
|
|
4
4
|
from ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames
|
|
5
5
|
from langchain_openai import OpenAIEmbeddings
|
|
6
6
|
|
|
7
|
-
from lfx.base.embeddings.embeddings_class import EmbeddingsWithModels
|
|
8
7
|
from lfx.base.embeddings.model import LCEmbeddingsModel
|
|
9
8
|
from lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url
|
|
10
9
|
from lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES
|
|
@@ -152,7 +151,7 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
152
151
|
logger.exception("Error fetching models")
|
|
153
152
|
return WATSONX_EMBEDDING_MODEL_NAMES
|
|
154
153
|
|
|
155
|
-
|
|
154
|
+
def build_embeddings(self) -> Embeddings:
|
|
156
155
|
provider = self.provider
|
|
157
156
|
model = self.model
|
|
158
157
|
api_key = self.api_key
|
|
@@ -170,9 +169,7 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
170
169
|
if not api_key:
|
|
171
170
|
msg = "OpenAI API key is required when using OpenAI provider"
|
|
172
171
|
raise ValueError(msg)
|
|
173
|
-
|
|
174
|
-
# Create the primary embedding instance
|
|
175
|
-
embeddings_instance = OpenAIEmbeddings(
|
|
172
|
+
return OpenAIEmbeddings(
|
|
176
173
|
model=model,
|
|
177
174
|
dimensions=dimensions or None,
|
|
178
175
|
base_url=api_base or None,
|
|
@@ -184,26 +181,6 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
184
181
|
model_kwargs=model_kwargs,
|
|
185
182
|
)
|
|
186
183
|
|
|
187
|
-
# Create dedicated instances for each available model
|
|
188
|
-
available_models_dict = {}
|
|
189
|
-
for model_name in OPENAI_EMBEDDING_MODEL_NAMES:
|
|
190
|
-
available_models_dict[model_name] = OpenAIEmbeddings(
|
|
191
|
-
model=model_name,
|
|
192
|
-
dimensions=dimensions or None, # Use same dimensions config for all
|
|
193
|
-
base_url=api_base or None,
|
|
194
|
-
api_key=api_key,
|
|
195
|
-
chunk_size=chunk_size,
|
|
196
|
-
max_retries=max_retries,
|
|
197
|
-
timeout=request_timeout or None,
|
|
198
|
-
show_progress_bar=show_progress_bar,
|
|
199
|
-
model_kwargs=model_kwargs,
|
|
200
|
-
)
|
|
201
|
-
|
|
202
|
-
return EmbeddingsWithModels(
|
|
203
|
-
embeddings=embeddings_instance,
|
|
204
|
-
available_models=available_models_dict,
|
|
205
|
-
)
|
|
206
|
-
|
|
207
184
|
if provider == "Ollama":
|
|
208
185
|
try:
|
|
209
186
|
from langchain_ollama import OllamaEmbeddings
|
|
@@ -227,38 +204,12 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
227
204
|
"Learn more at https://docs.ollama.com/openai#openai-compatibility"
|
|
228
205
|
)
|
|
229
206
|
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
# Create the primary embedding instance
|
|
233
|
-
embeddings_instance = OllamaEmbeddings(
|
|
207
|
+
return OllamaEmbeddings(
|
|
234
208
|
model=model,
|
|
235
|
-
base_url=
|
|
209
|
+
base_url=transformed_base_url or "http://localhost:11434",
|
|
236
210
|
**model_kwargs,
|
|
237
211
|
)
|
|
238
212
|
|
|
239
|
-
# Fetch available Ollama models
|
|
240
|
-
available_model_names = await get_ollama_models(
|
|
241
|
-
base_url_value=self.ollama_base_url,
|
|
242
|
-
desired_capability=DESIRED_CAPABILITY,
|
|
243
|
-
json_models_key=JSON_MODELS_KEY,
|
|
244
|
-
json_name_key=JSON_NAME_KEY,
|
|
245
|
-
json_capabilities_key=JSON_CAPABILITIES_KEY,
|
|
246
|
-
)
|
|
247
|
-
|
|
248
|
-
# Create dedicated instances for each available model
|
|
249
|
-
available_models_dict = {}
|
|
250
|
-
for model_name in available_model_names:
|
|
251
|
-
available_models_dict[model_name] = OllamaEmbeddings(
|
|
252
|
-
model=model_name,
|
|
253
|
-
base_url=final_base_url,
|
|
254
|
-
**model_kwargs,
|
|
255
|
-
)
|
|
256
|
-
|
|
257
|
-
return EmbeddingsWithModels(
|
|
258
|
-
embeddings=embeddings_instance,
|
|
259
|
-
available_models=available_models_dict,
|
|
260
|
-
)
|
|
261
|
-
|
|
262
213
|
if provider == "IBM watsonx.ai":
|
|
263
214
|
try:
|
|
264
215
|
from langchain_ibm import WatsonxEmbeddings
|
|
@@ -278,11 +229,9 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
278
229
|
|
|
279
230
|
from ibm_watsonx_ai import APIClient, Credentials
|
|
280
231
|
|
|
281
|
-
final_url = base_url_ibm_watsonx or "https://us-south.ml.cloud.ibm.com"
|
|
282
|
-
|
|
283
232
|
credentials = Credentials(
|
|
284
233
|
api_key=self.api_key,
|
|
285
|
-
url=
|
|
234
|
+
url=base_url_ibm_watsonx or "https://us-south.ml.cloud.ibm.com",
|
|
286
235
|
)
|
|
287
236
|
|
|
288
237
|
api_client = APIClient(credentials)
|
|
@@ -292,32 +241,13 @@ class EmbeddingModelComponent(LCEmbeddingsModel):
|
|
|
292
241
|
EmbedTextParamsMetaNames.RETURN_OPTIONS: {"input_text": self.input_text},
|
|
293
242
|
}
|
|
294
243
|
|
|
295
|
-
|
|
296
|
-
embeddings_instance = WatsonxEmbeddings(
|
|
244
|
+
return WatsonxEmbeddings(
|
|
297
245
|
model_id=model,
|
|
298
246
|
params=params,
|
|
299
247
|
watsonx_client=api_client,
|
|
300
248
|
project_id=project_id,
|
|
301
249
|
)
|
|
302
250
|
|
|
303
|
-
# Fetch available IBM watsonx.ai models
|
|
304
|
-
available_model_names = self.fetch_ibm_models(final_url)
|
|
305
|
-
|
|
306
|
-
# Create dedicated instances for each available model
|
|
307
|
-
available_models_dict = {}
|
|
308
|
-
for model_name in available_model_names:
|
|
309
|
-
available_models_dict[model_name] = WatsonxEmbeddings(
|
|
310
|
-
model_id=model_name,
|
|
311
|
-
params=params,
|
|
312
|
-
watsonx_client=api_client,
|
|
313
|
-
project_id=project_id,
|
|
314
|
-
)
|
|
315
|
-
|
|
316
|
-
return EmbeddingsWithModels(
|
|
317
|
-
embeddings=embeddings_instance,
|
|
318
|
-
available_models=available_models_dict,
|
|
319
|
-
)
|
|
320
|
-
|
|
321
251
|
msg = f"Unknown provider: {provider}"
|
|
322
252
|
raise ValueError(msg)
|
|
323
253
|
|
lfx/components/ollama/ollama.py
CHANGED
|
@@ -350,12 +350,17 @@ class ChatOllamaComponent(LCModelComponent):
|
|
|
350
350
|
build_config["mirostat_tau"]["value"] = 5
|
|
351
351
|
|
|
352
352
|
if field_name in {"model_name", "base_url", "tool_model_enabled"}:
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
353
|
+
# Use field_value if base_url is being updated, otherwise use self.base_url
|
|
354
|
+
base_url_to_check = field_value if field_name == "base_url" else self.base_url
|
|
355
|
+
# Fallback to self.base_url if field_value is None or empty
|
|
356
|
+
if not base_url_to_check and field_name == "base_url":
|
|
357
|
+
base_url_to_check = self.base_url
|
|
358
|
+
logger.warning(f"Fetching Ollama models from updated URL: {base_url_to_check}")
|
|
359
|
+
|
|
360
|
+
if base_url_to_check and await self.is_valid_ollama_url(base_url_to_check):
|
|
356
361
|
tool_model_enabled = build_config["tool_model_enabled"].get("value", False) or self.tool_model_enabled
|
|
357
362
|
build_config["model_name"]["options"] = await self.get_models(
|
|
358
|
-
|
|
363
|
+
base_url_to_check, tool_model_enabled=tool_model_enabled
|
|
359
364
|
)
|
|
360
365
|
else:
|
|
361
366
|
build_config["model_name"]["options"] = []
|
|
@@ -12,7 +12,6 @@ if TYPE_CHECKING:
|
|
|
12
12
|
from lfx.components.processing.create_list import CreateListComponent
|
|
13
13
|
from lfx.components.processing.data_operations import DataOperationsComponent
|
|
14
14
|
from lfx.components.processing.dataframe_operations import DataFrameOperationsComponent
|
|
15
|
-
from lfx.components.processing.dataframe_to_toolset import DataFrameToToolsetComponent
|
|
16
15
|
from lfx.components.processing.json_cleaner import JSONCleaner
|
|
17
16
|
from lfx.components.processing.output_parser import OutputParserComponent
|
|
18
17
|
from lfx.components.processing.parse_data import ParseDataComponent
|
|
@@ -27,7 +26,6 @@ _dynamic_imports = {
|
|
|
27
26
|
"CreateListComponent": "create_list",
|
|
28
27
|
"DataOperationsComponent": "data_operations",
|
|
29
28
|
"DataFrameOperationsComponent": "dataframe_operations",
|
|
30
|
-
"DataFrameToToolsetComponent": "dataframe_to_toolset",
|
|
31
29
|
"JSONCleaner": "json_cleaner",
|
|
32
30
|
"OutputParserComponent": "output_parser",
|
|
33
31
|
"ParseDataComponent": "parse_data",
|
|
@@ -41,7 +39,6 @@ __all__ = [
|
|
|
41
39
|
"CombineTextComponent",
|
|
42
40
|
"CreateListComponent",
|
|
43
41
|
"DataFrameOperationsComponent",
|
|
44
|
-
"DataFrameToToolsetComponent",
|
|
45
42
|
"DataOperationsComponent",
|
|
46
43
|
"JSONCleaner",
|
|
47
44
|
"MessageStoreComponent",
|
|
@@ -72,8 +72,11 @@ class DirectoryReader:
|
|
|
72
72
|
if component["error"] if with_errors else not component["error"]:
|
|
73
73
|
component_tuple = (*build_component(component), component)
|
|
74
74
|
components.append(component_tuple)
|
|
75
|
-
except Exception: # noqa: BLE001
|
|
76
|
-
logger.debug(
|
|
75
|
+
except Exception as exc: # noqa: BLE001
|
|
76
|
+
logger.debug(
|
|
77
|
+
f"Skipping component {component['name']} from {component['file']} (load error)",
|
|
78
|
+
exc_info=exc,
|
|
79
|
+
)
|
|
77
80
|
continue
|
|
78
81
|
items.append({"name": menu["name"], "path": menu["path"], "components": components})
|
|
79
82
|
filtered = [menu for menu in items if menu["components"]]
|
lfx/graph/graph/base.py
CHANGED
|
@@ -1519,10 +1519,7 @@ class Graph:
|
|
|
1519
1519
|
try:
|
|
1520
1520
|
params = ""
|
|
1521
1521
|
should_build = False
|
|
1522
|
-
|
|
1523
|
-
# because they need to iterate through their data
|
|
1524
|
-
is_loop_component = vertex.display_name == "Loop" or vertex.is_loop
|
|
1525
|
-
if not vertex.frozen or is_loop_component:
|
|
1522
|
+
if not vertex.frozen:
|
|
1526
1523
|
should_build = True
|
|
1527
1524
|
else:
|
|
1528
1525
|
# Check the cache for the vertex
|
lfx/graph/vertex/base.py
CHANGED
|
@@ -735,10 +735,7 @@ class Vertex:
|
|
|
735
735
|
self.build_inactive()
|
|
736
736
|
return None
|
|
737
737
|
|
|
738
|
-
|
|
739
|
-
# because they need to iterate through their data
|
|
740
|
-
is_loop_component = self.display_name == "Loop" or self.is_loop
|
|
741
|
-
if self.frozen and self.built and not is_loop_component:
|
|
738
|
+
if self.frozen and self.built:
|
|
742
739
|
return await self.get_requester_result(requester)
|
|
743
740
|
if self.built and requester is not None:
|
|
744
741
|
# This means that the vertex has already been built
|
lfx/schema/image.py
CHANGED
|
@@ -74,12 +74,7 @@ def get_file_paths(files: list[str | dict]):
|
|
|
74
74
|
if not file_path_str: # Skip empty paths
|
|
75
75
|
continue
|
|
76
76
|
|
|
77
|
-
|
|
78
|
-
# Handle edge case where path might be just a filename without parent
|
|
79
|
-
if file_path.parent == Path():
|
|
80
|
-
flow_id, file_name = "", file_path.name
|
|
81
|
-
else:
|
|
82
|
-
flow_id, file_name = str(file_path.parent), file_path.name
|
|
77
|
+
flow_id, file_name = storage_service.parse_file_path(file_path_str)
|
|
83
78
|
|
|
84
79
|
if not file_name: # Skip if no filename
|
|
85
80
|
continue
|
|
@@ -129,12 +124,7 @@ async def get_files(
|
|
|
129
124
|
if not file: # Skip empty file paths
|
|
130
125
|
continue
|
|
131
126
|
|
|
132
|
-
|
|
133
|
-
# Handle edge case where path might be just a filename without parent
|
|
134
|
-
if file_path.parent == Path():
|
|
135
|
-
flow_id, file_name = "", file_path.name
|
|
136
|
-
else:
|
|
137
|
-
flow_id, file_name = str(file_path.parent), file_path.name
|
|
127
|
+
flow_id, file_name = storage_service.parse_file_path(file)
|
|
138
128
|
|
|
139
129
|
if not file_name: # Skip if no filename
|
|
140
130
|
continue
|
lfx/services/interfaces.py
CHANGED
|
@@ -41,6 +41,11 @@ class StorageServiceProtocol(Protocol):
|
|
|
41
41
|
"""Build the full path of a file in the storage."""
|
|
42
42
|
...
|
|
43
43
|
|
|
44
|
+
@abstractmethod
|
|
45
|
+
def parse_file_path(self, full_path: str) -> tuple[str, str]:
|
|
46
|
+
"""Parse a full storage path to extract flow_id and file_name."""
|
|
47
|
+
...
|
|
48
|
+
|
|
44
49
|
|
|
45
50
|
class SettingsServiceProtocol(Protocol):
|
|
46
51
|
"""Protocol for settings service."""
|
lfx/services/manager.py
CHANGED
|
@@ -155,10 +155,11 @@ class ServiceManager:
|
|
|
155
155
|
factories.append(obj())
|
|
156
156
|
break
|
|
157
157
|
|
|
158
|
-
except Exception
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
158
|
+
except Exception: # noqa: BLE001, S110
|
|
159
|
+
# This is expected during initial service discovery - some services
|
|
160
|
+
# may not have factories yet or depend on settings service being ready first
|
|
161
|
+
# Intentionally suppressed to avoid startup noise - not an error condition
|
|
162
|
+
pass
|
|
162
163
|
|
|
163
164
|
return factories
|
|
164
165
|
|