lfx-nightly 0.2.0.dev26__py3-none-any.whl → 0.2.1.dev7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. lfx/_assets/component_index.json +1 -1
  2. lfx/base/agents/agent.py +9 -4
  3. lfx/base/agents/altk_base_agent.py +16 -3
  4. lfx/base/agents/altk_tool_wrappers.py +1 -1
  5. lfx/base/agents/utils.py +4 -0
  6. lfx/base/composio/composio_base.py +78 -41
  7. lfx/base/data/base_file.py +14 -4
  8. lfx/base/data/cloud_storage_utils.py +156 -0
  9. lfx/base/data/docling_utils.py +191 -65
  10. lfx/base/data/storage_utils.py +109 -0
  11. lfx/base/datastax/astradb_base.py +75 -64
  12. lfx/base/mcp/util.py +2 -2
  13. lfx/base/models/__init__.py +11 -1
  14. lfx/base/models/anthropic_constants.py +21 -12
  15. lfx/base/models/google_generative_ai_constants.py +33 -9
  16. lfx/base/models/model_metadata.py +6 -0
  17. lfx/base/models/ollama_constants.py +196 -30
  18. lfx/base/models/openai_constants.py +37 -10
  19. lfx/base/models/unified_models.py +1123 -0
  20. lfx/base/models/watsonx_constants.py +36 -0
  21. lfx/base/tools/component_tool.py +2 -9
  22. lfx/cli/commands.py +6 -1
  23. lfx/cli/run.py +65 -409
  24. lfx/cli/script_loader.py +13 -3
  25. lfx/components/__init__.py +0 -3
  26. lfx/components/composio/github_composio.py +1 -1
  27. lfx/components/cuga/cuga_agent.py +39 -27
  28. lfx/components/data_source/api_request.py +4 -2
  29. lfx/components/docling/__init__.py +45 -11
  30. lfx/components/docling/chunk_docling_document.py +3 -1
  31. lfx/components/docling/docling_inline.py +39 -49
  32. lfx/components/docling/export_docling_document.py +3 -1
  33. lfx/components/elastic/opensearch_multimodal.py +215 -57
  34. lfx/components/files_and_knowledge/file.py +439 -39
  35. lfx/components/files_and_knowledge/ingestion.py +8 -0
  36. lfx/components/files_and_knowledge/retrieval.py +10 -0
  37. lfx/components/files_and_knowledge/save_file.py +123 -53
  38. lfx/components/ibm/watsonx.py +7 -1
  39. lfx/components/input_output/chat_output.py +7 -1
  40. lfx/components/langchain_utilities/tool_calling.py +14 -6
  41. lfx/components/llm_operations/batch_run.py +80 -25
  42. lfx/components/llm_operations/lambda_filter.py +33 -6
  43. lfx/components/llm_operations/llm_conditional_router.py +39 -7
  44. lfx/components/llm_operations/structured_output.py +38 -12
  45. lfx/components/models/__init__.py +16 -74
  46. lfx/components/models_and_agents/agent.py +51 -201
  47. lfx/components/models_and_agents/embedding_model.py +185 -339
  48. lfx/components/models_and_agents/language_model.py +54 -318
  49. lfx/components/models_and_agents/mcp_component.py +58 -9
  50. lfx/components/ollama/ollama.py +9 -4
  51. lfx/components/ollama/ollama_embeddings.py +2 -1
  52. lfx/components/openai/openai_chat_model.py +1 -1
  53. lfx/components/processing/__init__.py +0 -3
  54. lfx/components/vllm/__init__.py +37 -0
  55. lfx/components/vllm/vllm.py +141 -0
  56. lfx/components/vllm/vllm_embeddings.py +110 -0
  57. lfx/custom/custom_component/custom_component.py +8 -6
  58. lfx/custom/directory_reader/directory_reader.py +5 -2
  59. lfx/graph/utils.py +64 -18
  60. lfx/inputs/__init__.py +2 -0
  61. lfx/inputs/input_mixin.py +54 -0
  62. lfx/inputs/inputs.py +115 -0
  63. lfx/interface/initialize/loading.py +42 -12
  64. lfx/io/__init__.py +2 -0
  65. lfx/run/__init__.py +5 -0
  66. lfx/run/base.py +494 -0
  67. lfx/schema/data.py +1 -1
  68. lfx/schema/image.py +28 -19
  69. lfx/schema/message.py +19 -3
  70. lfx/services/interfaces.py +5 -0
  71. lfx/services/manager.py +5 -4
  72. lfx/services/mcp_composer/service.py +45 -13
  73. lfx/services/settings/auth.py +18 -11
  74. lfx/services/settings/base.py +12 -24
  75. lfx/services/settings/constants.py +2 -0
  76. lfx/services/storage/local.py +37 -0
  77. lfx/services/storage/service.py +19 -0
  78. lfx/utils/constants.py +1 -0
  79. lfx/utils/image.py +29 -11
  80. lfx/utils/validate_cloud.py +14 -3
  81. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/METADATA +5 -2
  82. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/RECORD +84 -78
  83. lfx/components/processing/dataframe_to_toolset.py +0 -259
  84. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/WHEEL +0 -0
  85. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/entry_points.txt +0 -0
@@ -111,15 +111,26 @@ def convert_kwargs(params):
111
111
  return params
112
112
 
113
113
 
114
- def load_from_env_vars(params, load_from_db_fields):
114
+ def load_from_env_vars(params, load_from_db_fields, context=None):
115
115
  for field in load_from_db_fields:
116
116
  if field not in params or not params[field]:
117
117
  continue
118
- key = os.getenv(params[field])
119
- if key:
120
- logger.info(f"Using environment variable {params[field]} for {field}")
121
- else:
122
- logger.error(f"Environment variable {params[field]} is not set.")
118
+ variable_name = params[field]
119
+ key = None
120
+
121
+ # Check request_variables in context
122
+ if context and "request_variables" in context:
123
+ request_variables = context["request_variables"]
124
+ if variable_name in request_variables:
125
+ key = request_variables[variable_name]
126
+ logger.debug(f"Found context override for variable '{variable_name}'")
127
+
128
+ if key is None:
129
+ key = os.getenv(variable_name)
130
+ if key:
131
+ logger.info(f"Using environment variable {variable_name} for {field}")
132
+ else:
133
+ logger.error(f"Environment variable {variable_name} is not set.")
123
134
  params[field] = key if key is not None else None
124
135
  if key is None:
125
136
  logger.warning(f"Could not get value for {field}. Setting it to None.")
@@ -142,6 +153,11 @@ async def update_table_params_with_load_from_db_fields(
142
153
  if not table_data or not load_from_db_columns:
143
154
  return params
144
155
 
156
+ # Extract context once for use throughout the function
157
+ context = None
158
+ if hasattr(custom_component, "graph") and hasattr(custom_component.graph, "context"):
159
+ context = custom_component.graph.context
160
+
145
161
  async with session_scope() as session:
146
162
  settings_service = get_settings_service()
147
163
  is_noop_session = isinstance(session, NoopSession) or (
@@ -170,11 +186,22 @@ async def update_table_params_with_load_from_db_fields(
170
186
  try:
171
187
  if is_noop_session:
172
188
  # Fallback to environment variables
173
- key = os.getenv(variable_name)
174
- if key:
175
- logger.info(f"Using environment variable {variable_name} for table column {column_name}")
176
- else:
177
- logger.error(f"Environment variable {variable_name} is not set.")
189
+ key = None
190
+ # Check request_variables first
191
+ if context and "request_variables" in context:
192
+ request_variables = context["request_variables"]
193
+ if variable_name in request_variables:
194
+ key = request_variables[variable_name]
195
+ logger.debug(f"Found context override for variable '{variable_name}'")
196
+
197
+ if key is None:
198
+ key = os.getenv(variable_name)
199
+ if key:
200
+ logger.info(
201
+ f"Using environment variable {variable_name} for table column {column_name}"
202
+ )
203
+ else:
204
+ logger.error(f"Environment variable {variable_name} is not set.")
178
205
  else:
179
206
  # Load from database
180
207
  key = await custom_component.get_variable(
@@ -222,7 +249,10 @@ async def update_params_with_load_from_db_fields(
222
249
  )
223
250
  if is_noop_session:
224
251
  logger.debug("Loading variables from environment variables because database is not available.")
225
- return load_from_env_vars(params, load_from_db_fields)
252
+ context = None
253
+ if hasattr(custom_component, "graph") and hasattr(custom_component.graph, "context"):
254
+ context = custom_component.graph.context
255
+ return load_from_env_vars(params, load_from_db_fields, context=context)
226
256
  for field in load_from_db_fields:
227
257
  # Check if this is a table field (using our naming convention)
228
258
  if field.startswith("table:"):
lfx/io/__init__.py CHANGED
@@ -14,6 +14,7 @@ from lfx.inputs import (
14
14
  McpInput,
15
15
  MessageInput,
16
16
  MessageTextInput,
17
+ ModelInput,
17
18
  MultilineInput,
18
19
  MultilineSecretInput,
19
20
  MultiselectInput,
@@ -47,6 +48,7 @@ __all__ = [
47
48
  "McpInput",
48
49
  "MessageInput",
49
50
  "MessageTextInput",
51
+ "ModelInput",
50
52
  "MultilineInput",
51
53
  "MultilineSecretInput",
52
54
  "MultiselectInput",
lfx/run/__init__.py ADDED
@@ -0,0 +1,5 @@
1
+ """Run module for executing Langflow graphs."""
2
+
3
+ from lfx.run.base import RunError, run_flow
4
+
5
+ __all__ = ["RunError", "run_flow"]
lfx/run/base.py ADDED
@@ -0,0 +1,494 @@
1
+ """Core run functionality for executing Langflow graphs."""
2
+
3
+ import json
4
+ import re
5
+ import sys
6
+ import tempfile
7
+ import time
8
+ from io import StringIO
9
+ from pathlib import Path
10
+
11
+ from lfx.cli.script_loader import (
12
+ extract_structured_result,
13
+ extract_text_from_result,
14
+ find_graph_variable,
15
+ load_graph_from_script,
16
+ )
17
+ from lfx.cli.validation import validate_global_variables_for_env
18
+ from lfx.log.logger import logger
19
+ from lfx.schema.schema import InputValueRequest
20
+
21
+ # Verbosity level constants
22
+ VERBOSITY_DETAILED = 2
23
+ VERBOSITY_FULL = 3
24
+
25
+
26
+ class RunError(Exception):
27
+ """Exception raised when run execution fails."""
28
+
29
+ def __init__(self, message: str, exception: Exception | None = None):
30
+ super().__init__(message)
31
+ self.original_exception = exception
32
+
33
+
34
+ def output_error(error_message: str, *, verbose: bool, exception: Exception | None = None) -> dict:
35
+ """Create error response dict and optionally print to stderr when verbose."""
36
+ if verbose:
37
+ sys.stderr.write(f"{error_message}\n")
38
+
39
+ error_response = {
40
+ "success": False,
41
+ "type": "error",
42
+ }
43
+
44
+ # Add clean exception data if available
45
+ if exception:
46
+ error_response["exception_type"] = type(exception).__name__
47
+ error_response["exception_message"] = str(exception)
48
+ else:
49
+ error_response["exception_message"] = error_message
50
+
51
+ return error_response
52
+
53
+
54
+ async def run_flow(
55
+ script_path: Path | None = None,
56
+ input_value: str | None = None,
57
+ input_value_option: str | None = None,
58
+ output_format: str = "json",
59
+ flow_json: str | None = None,
60
+ *,
61
+ stdin: bool = False,
62
+ check_variables: bool = True,
63
+ verbose: bool = False,
64
+ verbose_detailed: bool = False,
65
+ verbose_full: bool = False,
66
+ timing: bool = False,
67
+ global_variables: dict[str, str] | None = None,
68
+ ) -> dict:
69
+ """Execute a Langflow graph script or JSON flow and return the result.
70
+
71
+ This function analyzes and executes either a Python script containing a Langflow graph,
72
+ a JSON flow file, inline JSON, or JSON from stdin, returning the result as a dict.
73
+
74
+ Args:
75
+ script_path: Path to the Python script (.py) or JSON flow (.json) containing a graph
76
+ input_value: Input value to pass to the graph (positional argument)
77
+ input_value_option: Input value to pass to the graph (alternative option)
78
+ output_format: Format for output (json, text, message, or result)
79
+ flow_json: Inline JSON flow content as a string
80
+ stdin: Read JSON flow content from stdin
81
+ check_variables: Check global variables for environment compatibility
82
+ verbose: Show basic progress information
83
+ verbose_detailed: Show detailed progress and debug information
84
+ verbose_full: Show full debugging output including component logs
85
+ timing: Include detailed timing information in output
86
+ global_variables: Dict of global variables to inject into the graph context
87
+
88
+ Returns:
89
+ dict: Result data containing the execution results, logs, and optionally timing info
90
+
91
+ Raises:
92
+ RunError: If execution fails at any stage
93
+ """
94
+ # Configure logger based on verbosity level
95
+ from lfx.log.logger import configure
96
+
97
+ if verbose_full:
98
+ configure(log_level="DEBUG", output_file=sys.stderr)
99
+ verbosity = 3
100
+ elif verbose_detailed:
101
+ configure(log_level="DEBUG", output_file=sys.stderr)
102
+ verbosity = 2
103
+ elif verbose:
104
+ configure(log_level="INFO", output_file=sys.stderr)
105
+ verbosity = 1
106
+ else:
107
+ configure(log_level="CRITICAL", output_file=sys.stderr)
108
+ verbosity = 0
109
+
110
+ start_time = time.time() if timing else None
111
+
112
+ # Use either positional input_value or --input-value option
113
+ final_input_value = input_value or input_value_option
114
+
115
+ # Validate input sources - exactly one must be provided
116
+ input_sources = [script_path is not None, flow_json is not None, bool(stdin)]
117
+ if sum(input_sources) != 1:
118
+ if sum(input_sources) == 0:
119
+ error_msg = "No input source provided. Must provide either script_path, --flow-json, or --stdin"
120
+ else:
121
+ error_msg = (
122
+ "Multiple input sources provided. Cannot use script_path, --flow-json, and "
123
+ "--stdin together. Choose exactly one."
124
+ )
125
+ output_error(error_msg, verbose=verbose)
126
+ raise RunError(error_msg, None)
127
+
128
+ temp_file_to_cleanup = None
129
+
130
+ if flow_json is not None:
131
+ if verbosity > 0:
132
+ sys.stderr.write("Processing inline JSON content...\n")
133
+ try:
134
+ json_data = json.loads(flow_json)
135
+ if verbosity > 0:
136
+ sys.stderr.write("JSON content is valid\n")
137
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as temp_file:
138
+ json.dump(json_data, temp_file, indent=2)
139
+ temp_file_to_cleanup = temp_file.name
140
+ script_path = Path(temp_file_to_cleanup)
141
+ if verbosity > 0:
142
+ sys.stderr.write(f"Created temporary file: {script_path}\n")
143
+ except json.JSONDecodeError as e:
144
+ error_msg = f"Invalid JSON content: {e}"
145
+ output_error(error_msg, verbose=verbose)
146
+ raise RunError(error_msg, e) from e
147
+ except Exception as e:
148
+ error_msg = f"Error processing JSON content: {e}"
149
+ output_error(error_msg, verbose=verbose)
150
+ raise RunError(error_msg, e) from e
151
+ elif stdin:
152
+ if verbosity > 0:
153
+ sys.stderr.write("Reading JSON content from stdin...\n")
154
+ try:
155
+ stdin_content = sys.stdin.read().strip()
156
+ if not stdin_content:
157
+ error_msg = "No content received from stdin"
158
+ output_error(error_msg, verbose=verbose)
159
+ raise RunError(error_msg, None)
160
+ json_data = json.loads(stdin_content)
161
+ if verbosity > 0:
162
+ sys.stderr.write("JSON content from stdin is valid\n")
163
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as temp_file:
164
+ json.dump(json_data, temp_file, indent=2)
165
+ temp_file_to_cleanup = temp_file.name
166
+ script_path = Path(temp_file_to_cleanup)
167
+ if verbosity > 0:
168
+ sys.stderr.write(f"Created temporary file from stdin: {script_path}\n")
169
+ except json.JSONDecodeError as e:
170
+ error_msg = f"Invalid JSON content from stdin: {e}"
171
+ output_error(error_msg, verbose=verbose)
172
+ raise RunError(error_msg, e) from e
173
+ except Exception as e:
174
+ error_msg = f"Error reading from stdin: {e}"
175
+ output_error(error_msg, verbose=verbose)
176
+ raise RunError(error_msg, e) from e
177
+
178
+ try:
179
+ if not script_path or not script_path.exists():
180
+ error_msg = f"File '{script_path}' does not exist."
181
+ raise ValueError(error_msg)
182
+ if not script_path.is_file():
183
+ error_msg = f"'{script_path}' is not a file."
184
+ raise ValueError(error_msg)
185
+ file_extension = script_path.suffix.lower()
186
+ if file_extension not in [".py", ".json"]:
187
+ error_msg = f"'{script_path}' must be a .py or .json file."
188
+ raise ValueError(error_msg)
189
+ file_type = "Python script" if file_extension == ".py" else "JSON flow"
190
+ if verbosity > 0:
191
+ sys.stderr.write(f"Analyzing {file_type}: {script_path}\n")
192
+ if file_extension == ".py":
193
+ graph_info = find_graph_variable(script_path)
194
+ if not graph_info:
195
+ error_msg = (
196
+ "No 'graph' variable found in the script. Expected to find an assignment like: graph = Graph(...)"
197
+ )
198
+ raise ValueError(error_msg)
199
+ if verbosity > 0:
200
+ sys.stderr.write(f"Found 'graph' variable at line {graph_info['line_number']}\n")
201
+ sys.stderr.write(f"Type: {graph_info['type']}\n")
202
+ sys.stderr.write(f"Source: {graph_info['source_line']}\n")
203
+ sys.stderr.write("Loading and executing script...\n")
204
+ graph = await load_graph_from_script(script_path)
205
+ elif file_extension == ".json":
206
+ if verbosity > 0:
207
+ sys.stderr.write("Valid JSON flow file detected\n")
208
+ sys.stderr.write("Loading and executing JSON flow\n")
209
+ from lfx.load import aload_flow_from_json
210
+
211
+ graph = await aload_flow_from_json(script_path, disable_logs=not verbose)
212
+
213
+ # Inject global variables into graph context
214
+ if global_variables:
215
+ if "request_variables" not in graph.context:
216
+ graph.context["request_variables"] = {}
217
+ graph.context["request_variables"].update(global_variables)
218
+ if verbosity > 0:
219
+ # Log keys only to avoid leaking sensitive data
220
+ logger.info(f"Injected global variables: {list(global_variables.keys())}")
221
+
222
+ except Exception as e:
223
+ error_type = type(e).__name__
224
+ logger.error(f"Graph loading failed with {error_type}")
225
+
226
+ if verbosity > 0:
227
+ # Try to identify common error patterns
228
+ if "ModuleNotFoundError" in str(e) or "No module named" in str(e):
229
+ logger.info("This appears to be a missing dependency issue")
230
+ if "langchain" in str(e).lower():
231
+ match = re.search(r"langchain_(.*)", str(e).lower())
232
+ if match:
233
+ module_name = match.group(1)
234
+ logger.info(
235
+ f"Missing LangChain dependency detected. Try: pip install langchain-{module_name}",
236
+ )
237
+ elif "ImportError" in str(e):
238
+ logger.info("This appears to be an import issue - check component dependencies")
239
+ elif "AttributeError" in str(e):
240
+ logger.info("This appears to be a component configuration issue")
241
+
242
+ # Show full traceback in debug mode
243
+ logger.exception("Failed to load graph.")
244
+
245
+ error_msg = f"Failed to load graph. {e}"
246
+ output_error(error_msg, verbose=verbose, exception=e)
247
+ if temp_file_to_cleanup:
248
+ try:
249
+ Path(temp_file_to_cleanup).unlink()
250
+ logger.info(f"Cleaned up temporary file: {temp_file_to_cleanup}")
251
+ except OSError:
252
+ pass
253
+ raise RunError(error_msg, e) from e
254
+
255
+ inputs = InputValueRequest(input_value=final_input_value) if final_input_value else None
256
+
257
+ # Mark end of loading phase if timing
258
+ load_end_time = time.time() if timing else None
259
+
260
+ if verbosity > 0:
261
+ sys.stderr.write("Preparing graph for execution...\n")
262
+ try:
263
+ # Add detailed preparation steps
264
+ if verbosity > 0:
265
+ logger.debug(f"Graph contains {len(graph.vertices)} vertices")
266
+ logger.debug(f"Graph contains {len(graph.edges)} edges")
267
+
268
+ # Show component types being used
269
+ component_types = set()
270
+ for vertex in graph.vertices:
271
+ if hasattr(vertex, "display_name"):
272
+ component_types.add(vertex.display_name)
273
+ logger.debug(f"Component types in graph: {', '.join(sorted(component_types))}")
274
+
275
+ graph.prepare()
276
+ logger.info("Graph preparation completed")
277
+
278
+ # Validate global variables for environment compatibility
279
+ if check_variables:
280
+ logger.info("Validating global variables...")
281
+ validation_errors = validate_global_variables_for_env(graph)
282
+ if validation_errors:
283
+ error_details = "Global variable validation failed: " + "; ".join(validation_errors)
284
+ logger.info(f"Variable validation failed: {len(validation_errors)} errors")
285
+ for error in validation_errors:
286
+ logger.debug(f"Validation error: {error}")
287
+ output_error(error_details, verbose=verbose)
288
+ if temp_file_to_cleanup:
289
+ try:
290
+ Path(temp_file_to_cleanup).unlink()
291
+ logger.info(f"Cleaned up temporary file: {temp_file_to_cleanup}")
292
+ except OSError:
293
+ pass
294
+ if validation_errors:
295
+ raise RunError(error_details, None)
296
+ logger.info("Global variable validation passed")
297
+ else:
298
+ logger.info("Global variable validation skipped")
299
+ except RunError:
300
+ raise
301
+ except Exception as e:
302
+ error_type = type(e).__name__
303
+ logger.info(f"Graph preparation failed with {error_type}")
304
+
305
+ if verbosity > 0:
306
+ logger.debug(f"Preparation error: {e!s}")
307
+ logger.exception("Failed to prepare graph - full traceback:")
308
+
309
+ error_msg = f"Failed to prepare graph: {e}"
310
+ output_error(error_msg, verbose=verbose, exception=e)
311
+ if temp_file_to_cleanup:
312
+ try:
313
+ Path(temp_file_to_cleanup).unlink()
314
+ logger.info(f"Cleaned up temporary file: {temp_file_to_cleanup}")
315
+ except OSError:
316
+ pass
317
+ raise RunError(error_msg, e) from e
318
+
319
+ logger.info("Executing graph...")
320
+ execution_start_time = time.time() if timing else None
321
+ if verbose:
322
+ logger.debug("Setting up execution environment")
323
+ if inputs:
324
+ logger.debug(f"Input provided: {inputs.input_value}")
325
+ else:
326
+ logger.debug("No input provided")
327
+
328
+ captured_stdout = StringIO()
329
+ captured_stderr = StringIO()
330
+ original_stdout = sys.stdout
331
+ original_stderr = sys.stderr
332
+
333
+ # Track component timing if requested
334
+ component_timings = [] if timing else None
335
+ execution_step_start = execution_start_time if timing else None
336
+ result_count = 0
337
+
338
+ try:
339
+ sys.stdout = captured_stdout
340
+ # Don't capture stderr at high verbosity levels to avoid duplication with direct logging
341
+ if verbosity < VERBOSITY_FULL:
342
+ sys.stderr = captured_stderr
343
+ results = []
344
+
345
+ logger.info("Starting graph execution...", level="DEBUG")
346
+
347
+ async for result in graph.async_start(inputs):
348
+ result_count += 1
349
+ if verbosity > 0:
350
+ logger.debug(f"Processing result #{result_count}")
351
+ if hasattr(result, "vertex") and hasattr(result.vertex, "display_name"):
352
+ logger.debug(f"Component: {result.vertex.display_name}")
353
+ if timing:
354
+ step_end_time = time.time()
355
+ step_duration = step_end_time - execution_step_start
356
+
357
+ # Extract component information
358
+ if hasattr(result, "vertex"):
359
+ component_name = getattr(result.vertex, "display_name", "Unknown")
360
+ component_id = getattr(result.vertex, "id", "Unknown")
361
+ component_timings.append(
362
+ {
363
+ "component": component_name,
364
+ "component_id": component_id,
365
+ "duration": step_duration,
366
+ "cumulative_time": step_end_time - execution_start_time,
367
+ }
368
+ )
369
+
370
+ execution_step_start = step_end_time
371
+
372
+ results.append(result)
373
+
374
+ logger.info(f"Graph execution completed. Processed {result_count} results")
375
+
376
+ except Exception as e:
377
+ error_type = type(e).__name__
378
+ logger.info(f"Graph execution failed with {error_type}")
379
+
380
+ if verbosity >= VERBOSITY_DETAILED: # Only show details at -vv and above
381
+ logger.debug(f"Failed after processing {result_count} results")
382
+
383
+ # Only show component output at maximum verbosity (-vvv)
384
+ if verbosity >= VERBOSITY_FULL:
385
+ # Capture any output that was generated before the error
386
+ # Only show captured stdout since stderr logging is already shown directly in verbose mode
387
+ captured_content = captured_stdout.getvalue()
388
+ if captured_content.strip():
389
+ # Check if captured content contains the same error that will be displayed at the end
390
+ error_text = str(e)
391
+ captured_lines = captured_content.strip().split("\n")
392
+
393
+ # Filter out lines that are duplicates of the final error message
394
+ unique_lines = [
395
+ line
396
+ for line in captured_lines
397
+ if not any(
398
+ error_part.strip() in line for error_part in error_text.split("\n") if error_part.strip()
399
+ )
400
+ ]
401
+
402
+ if unique_lines:
403
+ logger.info("Component output before error:", level="DEBUG")
404
+ for line in unique_lines:
405
+ # Log each line directly using the logger to avoid nested formatting
406
+ if verbosity > 0:
407
+ # Remove any existing timestamp prefix to avoid duplication
408
+ clean_line = line
409
+ if "] " in line and line.startswith("2025-"):
410
+ # Extract just the log message after the timestamp and level
411
+ parts = line.split("] ", 1)
412
+ if len(parts) > 1:
413
+ clean_line = parts[1]
414
+ logger.debug(clean_line)
415
+
416
+ # Provide context about common execution errors
417
+ if "list can't be used in 'await' expression" in str(e):
418
+ logger.info("This appears to be an async/await mismatch in a component")
419
+ logger.info("Check that async methods are properly awaited")
420
+ elif "AttributeError" in error_type and "NoneType" in str(e):
421
+ logger.info("This appears to be a null reference error")
422
+ logger.info("A component may be receiving unexpected None values")
423
+ elif "ConnectionError" in str(e) or "TimeoutError" in str(e):
424
+ logger.info("This appears to be a network connectivity issue")
425
+ logger.info("Check API keys and network connectivity")
426
+
427
+ logger.exception("Failed to execute graph - full traceback:")
428
+
429
+ if temp_file_to_cleanup:
430
+ try:
431
+ Path(temp_file_to_cleanup).unlink()
432
+ logger.info(f"Cleaned up temporary file: {temp_file_to_cleanup}")
433
+ except OSError:
434
+ pass
435
+ sys.stdout = original_stdout
436
+ sys.stderr = original_stderr
437
+ error_msg = f"Failed to execute graph: {e}"
438
+ output_error(error_msg, verbose=verbosity > 0, exception=e)
439
+ raise RunError(error_msg, e) from e
440
+ finally:
441
+ sys.stdout = original_stdout
442
+ sys.stderr = original_stderr
443
+ if temp_file_to_cleanup:
444
+ try:
445
+ Path(temp_file_to_cleanup).unlink()
446
+ logger.info(f"Cleaned up temporary file: {temp_file_to_cleanup}")
447
+ except OSError:
448
+ pass
449
+
450
+ execution_end_time = time.time() if timing else None
451
+
452
+ captured_logs = captured_stdout.getvalue() + captured_stderr.getvalue()
453
+
454
+ # Create timing metadata if requested
455
+ timing_metadata = None
456
+ if timing:
457
+ load_duration = load_end_time - start_time
458
+ execution_duration = execution_end_time - execution_start_time
459
+ total_duration = execution_end_time - start_time
460
+
461
+ timing_metadata = {
462
+ "load_time": round(load_duration, 3),
463
+ "execution_time": round(execution_duration, 3),
464
+ "total_time": round(total_duration, 3),
465
+ "component_timings": [
466
+ {
467
+ "component": ct["component"],
468
+ "component_id": ct["component_id"],
469
+ "duration": round(ct["duration"], 3),
470
+ "cumulative_time": round(ct["cumulative_time"], 3),
471
+ }
472
+ for ct in component_timings
473
+ ],
474
+ }
475
+
476
+ # Build result based on output format
477
+ if output_format == "json":
478
+ result_data = extract_structured_result(results)
479
+ result_data["logs"] = captured_logs
480
+ if timing_metadata:
481
+ result_data["timing"] = timing_metadata
482
+ return result_data
483
+ if output_format in {"text", "message"}:
484
+ result_data = extract_structured_result(results)
485
+ output_text = result_data.get("result", result_data.get("text", ""))
486
+ return {"output": str(output_text), "format": output_format}
487
+ if output_format == "result":
488
+ return {"output": extract_text_from_result(results), "format": "result"}
489
+ # Default case
490
+ result_data = extract_structured_result(results)
491
+ result_data["logs"] = captured_logs
492
+ if timing_metadata:
493
+ result_data["timing"] = timing_metadata
494
+ return result_data
lfx/schema/data.py CHANGED
@@ -209,7 +209,7 @@ class Data(CrossModuleModel):
209
209
  """
210
210
  if key in {"data", "text_key"} or key.startswith("_"):
211
211
  super().__setattr__(key, value)
212
- elif key in self.model_fields:
212
+ elif key in type(self).model_fields:
213
213
  self.data[key] = value
214
214
  super().__setattr__(key, value)
215
215
  else: