lfx-nightly 0.2.0.dev41__py3-none-any.whl → 0.3.0.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. lfx/__main__.py +137 -6
  2. lfx/_assets/component_index.json +1 -1
  3. lfx/base/agents/agent.py +10 -6
  4. lfx/base/agents/altk_base_agent.py +5 -3
  5. lfx/base/agents/altk_tool_wrappers.py +1 -1
  6. lfx/base/agents/events.py +1 -1
  7. lfx/base/agents/utils.py +4 -0
  8. lfx/base/composio/composio_base.py +78 -41
  9. lfx/base/data/cloud_storage_utils.py +156 -0
  10. lfx/base/data/docling_utils.py +130 -55
  11. lfx/base/datastax/astradb_base.py +75 -64
  12. lfx/base/embeddings/embeddings_class.py +113 -0
  13. lfx/base/models/__init__.py +11 -1
  14. lfx/base/models/google_generative_ai_constants.py +33 -9
  15. lfx/base/models/model_metadata.py +6 -0
  16. lfx/base/models/ollama_constants.py +196 -30
  17. lfx/base/models/openai_constants.py +37 -10
  18. lfx/base/models/unified_models.py +1123 -0
  19. lfx/base/models/watsonx_constants.py +43 -4
  20. lfx/base/prompts/api_utils.py +40 -5
  21. lfx/base/tools/component_tool.py +2 -9
  22. lfx/cli/__init__.py +10 -2
  23. lfx/cli/commands.py +3 -0
  24. lfx/cli/run.py +65 -409
  25. lfx/cli/script_loader.py +18 -7
  26. lfx/cli/validation.py +6 -3
  27. lfx/components/__init__.py +0 -3
  28. lfx/components/composio/github_composio.py +1 -1
  29. lfx/components/cuga/cuga_agent.py +39 -27
  30. lfx/components/data_source/api_request.py +4 -2
  31. lfx/components/datastax/astradb_assistant_manager.py +4 -2
  32. lfx/components/docling/__init__.py +45 -11
  33. lfx/components/docling/docling_inline.py +39 -49
  34. lfx/components/docling/docling_remote.py +1 -0
  35. lfx/components/elastic/opensearch_multimodal.py +1733 -0
  36. lfx/components/files_and_knowledge/file.py +384 -36
  37. lfx/components/files_and_knowledge/ingestion.py +8 -0
  38. lfx/components/files_and_knowledge/retrieval.py +10 -0
  39. lfx/components/files_and_knowledge/save_file.py +91 -88
  40. lfx/components/langchain_utilities/ibm_granite_handler.py +211 -0
  41. lfx/components/langchain_utilities/tool_calling.py +37 -6
  42. lfx/components/llm_operations/batch_run.py +64 -18
  43. lfx/components/llm_operations/lambda_filter.py +213 -101
  44. lfx/components/llm_operations/llm_conditional_router.py +39 -7
  45. lfx/components/llm_operations/structured_output.py +38 -12
  46. lfx/components/models/__init__.py +16 -74
  47. lfx/components/models_and_agents/agent.py +51 -203
  48. lfx/components/models_and_agents/embedding_model.py +171 -255
  49. lfx/components/models_and_agents/language_model.py +54 -318
  50. lfx/components/models_and_agents/mcp_component.py +96 -10
  51. lfx/components/models_and_agents/prompt.py +105 -18
  52. lfx/components/ollama/ollama_embeddings.py +111 -29
  53. lfx/components/openai/openai_chat_model.py +1 -1
  54. lfx/components/processing/text_operations.py +580 -0
  55. lfx/components/vllm/__init__.py +37 -0
  56. lfx/components/vllm/vllm.py +141 -0
  57. lfx/components/vllm/vllm_embeddings.py +110 -0
  58. lfx/custom/custom_component/component.py +65 -10
  59. lfx/custom/custom_component/custom_component.py +8 -6
  60. lfx/events/observability/__init__.py +0 -0
  61. lfx/events/observability/lifecycle_events.py +111 -0
  62. lfx/field_typing/__init__.py +57 -58
  63. lfx/graph/graph/base.py +40 -1
  64. lfx/graph/utils.py +109 -30
  65. lfx/graph/vertex/base.py +75 -23
  66. lfx/graph/vertex/vertex_types.py +0 -5
  67. lfx/inputs/__init__.py +2 -0
  68. lfx/inputs/input_mixin.py +55 -0
  69. lfx/inputs/inputs.py +120 -0
  70. lfx/interface/components.py +24 -7
  71. lfx/interface/initialize/loading.py +42 -12
  72. lfx/io/__init__.py +2 -0
  73. lfx/run/__init__.py +5 -0
  74. lfx/run/base.py +464 -0
  75. lfx/schema/__init__.py +50 -0
  76. lfx/schema/data.py +1 -1
  77. lfx/schema/image.py +26 -7
  78. lfx/schema/message.py +104 -11
  79. lfx/schema/workflow.py +171 -0
  80. lfx/services/deps.py +12 -0
  81. lfx/services/interfaces.py +43 -1
  82. lfx/services/mcp_composer/service.py +7 -1
  83. lfx/services/schema.py +1 -0
  84. lfx/services/settings/auth.py +95 -4
  85. lfx/services/settings/base.py +11 -1
  86. lfx/services/settings/constants.py +2 -0
  87. lfx/services/settings/utils.py +82 -0
  88. lfx/services/storage/local.py +13 -8
  89. lfx/services/transaction/__init__.py +5 -0
  90. lfx/services/transaction/service.py +35 -0
  91. lfx/tests/unit/components/__init__.py +0 -0
  92. lfx/utils/constants.py +2 -0
  93. lfx/utils/mustache_security.py +79 -0
  94. lfx/utils/validate_cloud.py +81 -3
  95. {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/METADATA +7 -2
  96. {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/RECORD +98 -80
  97. {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/WHEEL +0 -0
  98. {lfx_nightly-0.2.0.dev41.dist-info → lfx_nightly-0.3.0.dev3.dist-info}/entry_points.txt +0 -0
lfx/run/base.py ADDED
@@ -0,0 +1,464 @@
1
+ """Core run functionality for executing Langflow graphs."""
2
+
3
+ import json
4
+ import re
5
+ import sys
6
+ import time
7
+ from io import StringIO
8
+ from pathlib import Path
9
+
10
+ from lfx.cli.script_loader import (
11
+ extract_structured_result,
12
+ extract_text_from_result,
13
+ find_graph_variable,
14
+ load_graph_from_script,
15
+ )
16
+ from lfx.cli.validation import validate_global_variables_for_env
17
+ from lfx.log.logger import logger
18
+ from lfx.schema.schema import InputValueRequest
19
+
20
+ # Verbosity level constants
21
+ VERBOSITY_DETAILED = 2
22
+ VERBOSITY_FULL = 3
23
+
24
+
25
+ class RunError(Exception):
26
+ """Exception raised when run execution fails."""
27
+
28
+ def __init__(self, message: str, exception: Exception | None = None):
29
+ super().__init__(message)
30
+ self.original_exception = exception
31
+
32
+
33
+ def output_error(error_message: str, *, verbose: bool, exception: Exception | None = None) -> dict:
34
+ """Create error response dict and optionally print to stderr when verbose."""
35
+ if verbose:
36
+ sys.stderr.write(f"{error_message}\n")
37
+
38
+ error_response = {
39
+ "success": False,
40
+ "type": "error",
41
+ }
42
+
43
+ # Add clean exception data if available
44
+ if exception:
45
+ error_response["exception_type"] = type(exception).__name__
46
+ error_response["exception_message"] = str(exception)
47
+ else:
48
+ error_response["exception_message"] = error_message
49
+
50
+ return error_response
51
+
52
+
53
+ async def run_flow(
54
+ script_path: Path | None = None,
55
+ input_value: str | None = None,
56
+ input_value_option: str | None = None,
57
+ output_format: str = "json",
58
+ flow_json: str | None = None,
59
+ *,
60
+ stdin: bool = False,
61
+ check_variables: bool = True,
62
+ verbose: bool = False,
63
+ verbose_detailed: bool = False,
64
+ verbose_full: bool = False,
65
+ timing: bool = False,
66
+ global_variables: dict[str, str] | None = None,
67
+ ) -> dict:
68
+ """Execute a Langflow graph script or JSON flow and return the result.
69
+
70
+ This function analyzes and executes either a Python script containing a Langflow graph,
71
+ a JSON flow file, inline JSON, or JSON from stdin, returning the result as a dict.
72
+
73
+ Args:
74
+ script_path: Path to the Python script (.py) or JSON flow (.json) containing a graph
75
+ input_value: Input value to pass to the graph (positional argument)
76
+ input_value_option: Input value to pass to the graph (alternative option)
77
+ output_format: Format for output (json, text, message, or result)
78
+ flow_json: Inline JSON flow content as a string
79
+ stdin: Read JSON flow content from stdin
80
+ check_variables: Check global variables for environment compatibility
81
+ verbose: Show basic progress information
82
+ verbose_detailed: Show detailed progress and debug information
83
+ verbose_full: Show full debugging output including component logs
84
+ timing: Include detailed timing information in output
85
+ global_variables: Dict of global variables to inject into the graph context
86
+
87
+ Returns:
88
+ dict: Result data containing the execution results, logs, and optionally timing info
89
+
90
+ Raises:
91
+ RunError: If execution fails at any stage
92
+ """
93
+ # Configure logger based on verbosity level
94
+ from lfx.log.logger import configure
95
+
96
+ if verbose_full:
97
+ configure(log_level="DEBUG", output_file=sys.stderr)
98
+ verbosity = 3
99
+ elif verbose_detailed:
100
+ configure(log_level="DEBUG", output_file=sys.stderr)
101
+ verbosity = 2
102
+ elif verbose:
103
+ configure(log_level="INFO", output_file=sys.stderr)
104
+ verbosity = 1
105
+ else:
106
+ configure(log_level="CRITICAL", output_file=sys.stderr)
107
+ verbosity = 0
108
+
109
+ start_time = time.time() if timing else None
110
+
111
+ # Use either positional input_value or --input-value option
112
+ final_input_value = input_value or input_value_option
113
+
114
+ # Validate input sources - exactly one must be provided
115
+ input_sources = [script_path is not None, flow_json is not None, bool(stdin)]
116
+ if sum(input_sources) != 1:
117
+ if sum(input_sources) == 0:
118
+ error_msg = "No input source provided. Must provide either script_path, --flow-json, or --stdin"
119
+ else:
120
+ error_msg = (
121
+ "Multiple input sources provided. Cannot use script_path, --flow-json, and "
122
+ "--stdin together. Choose exactly one."
123
+ )
124
+ output_error(error_msg, verbose=verbose)
125
+ raise RunError(error_msg, None)
126
+
127
+ # Store parsed JSON dict for direct loading (avoids temp file round-trip)
128
+ flow_dict: dict | None = None
129
+
130
+ if flow_json is not None:
131
+ if verbosity > 0:
132
+ sys.stderr.write("Processing inline JSON content...\n")
133
+ try:
134
+ flow_dict = json.loads(flow_json)
135
+ if verbosity > 0:
136
+ sys.stderr.write("JSON content is valid\n")
137
+ except json.JSONDecodeError as e:
138
+ error_msg = f"Invalid JSON content: {e}"
139
+ output_error(error_msg, verbose=verbose)
140
+ raise RunError(error_msg, e) from e
141
+ except Exception as e:
142
+ error_msg = f"Error processing JSON content: {e}"
143
+ output_error(error_msg, verbose=verbose)
144
+ raise RunError(error_msg, e) from e
145
+ elif stdin:
146
+ if verbosity > 0:
147
+ sys.stderr.write("Reading JSON content from stdin...\n")
148
+ try:
149
+ stdin_content = sys.stdin.read().strip()
150
+ if not stdin_content:
151
+ error_msg = "No content received from stdin"
152
+ output_error(error_msg, verbose=verbose)
153
+ raise RunError(error_msg, None)
154
+ flow_dict = json.loads(stdin_content)
155
+ if verbosity > 0:
156
+ sys.stderr.write("JSON content from stdin is valid\n")
157
+ except json.JSONDecodeError as e:
158
+ error_msg = f"Invalid JSON content from stdin: {e}"
159
+ output_error(error_msg, verbose=verbose)
160
+ raise RunError(error_msg, e) from e
161
+ except Exception as e:
162
+ error_msg = f"Error reading from stdin: {e}"
163
+ output_error(error_msg, verbose=verbose)
164
+ raise RunError(error_msg, e) from e
165
+
166
+ try:
167
+ # Handle direct JSON dict (from stdin or --flow-json)
168
+ if flow_dict is not None:
169
+ if verbosity > 0:
170
+ sys.stderr.write("Loading graph from JSON content...\n")
171
+ from lfx.load import aload_flow_from_json
172
+
173
+ graph = await aload_flow_from_json(flow_dict, disable_logs=not verbose)
174
+ # Handle file path
175
+ elif script_path is not None:
176
+ if not script_path.exists():
177
+ error_msg = f"File '{script_path}' does not exist."
178
+ raise ValueError(error_msg)
179
+ if not script_path.is_file():
180
+ error_msg = f"'{script_path}' is not a file."
181
+ raise ValueError(error_msg)
182
+ file_extension = script_path.suffix.lower()
183
+ if file_extension not in [".py", ".json"]:
184
+ error_msg = f"'{script_path}' must be a .py or .json file."
185
+ raise ValueError(error_msg)
186
+ file_type = "Python script" if file_extension == ".py" else "JSON flow"
187
+ if verbosity > 0:
188
+ sys.stderr.write(f"Analyzing {file_type}: {script_path}\n")
189
+ if file_extension == ".py":
190
+ graph_info = find_graph_variable(script_path)
191
+ if not graph_info:
192
+ error_msg = (
193
+ "No 'graph' variable found in the script. "
194
+ "Expected to find an assignment like: graph = Graph(...)"
195
+ )
196
+ raise ValueError(error_msg)
197
+ if verbosity > 0:
198
+ sys.stderr.write(f"Found 'graph' variable at line {graph_info['line_number']}\n")
199
+ sys.stderr.write(f"Type: {graph_info['type']}\n")
200
+ sys.stderr.write(f"Source: {graph_info['source_line']}\n")
201
+ sys.stderr.write("Loading and executing script...\n")
202
+ graph = await load_graph_from_script(script_path)
203
+ else: # .json file
204
+ if verbosity > 0:
205
+ sys.stderr.write("Valid JSON flow file detected\n")
206
+ sys.stderr.write("Loading and executing JSON flow\n")
207
+ from lfx.load import aload_flow_from_json
208
+
209
+ graph = await aload_flow_from_json(script_path, disable_logs=not verbose)
210
+ else:
211
+ error_msg = "No input source provided"
212
+ raise ValueError(error_msg)
213
+
214
+ # Inject global variables into graph context
215
+ if global_variables:
216
+ if "request_variables" not in graph.context:
217
+ graph.context["request_variables"] = {}
218
+ graph.context["request_variables"].update(global_variables)
219
+ if verbosity > 0:
220
+ # Log keys only to avoid leaking sensitive data
221
+ logger.info(f"Injected global variables: {list(global_variables.keys())}")
222
+
223
+ except Exception as e:
224
+ error_type = type(e).__name__
225
+ logger.error(f"Graph loading failed with {error_type}")
226
+
227
+ if verbosity > 0:
228
+ # Try to identify common error patterns
229
+ if "ModuleNotFoundError" in str(e) or "No module named" in str(e):
230
+ logger.info("This appears to be a missing dependency issue")
231
+ if "langchain" in str(e).lower():
232
+ match = re.search(r"langchain_(.*)", str(e).lower())
233
+ if match:
234
+ module_name = match.group(1)
235
+ logger.info(
236
+ f"Missing LangChain dependency detected. Try: pip install langchain-{module_name}",
237
+ )
238
+ elif "ImportError" in str(e):
239
+ logger.info("This appears to be an import issue - check component dependencies")
240
+ elif "AttributeError" in str(e):
241
+ logger.info("This appears to be a component configuration issue")
242
+
243
+ # Show full traceback in debug mode
244
+ logger.exception("Failed to load graph.")
245
+
246
+ error_msg = f"Failed to load graph. {e}"
247
+ output_error(error_msg, verbose=verbose, exception=e)
248
+ raise RunError(error_msg, e) from e
249
+
250
+ inputs = InputValueRequest(input_value=final_input_value) if final_input_value else None
251
+
252
+ # Mark end of loading phase if timing
253
+ load_end_time = time.time() if timing else None
254
+
255
+ if verbosity > 0:
256
+ sys.stderr.write("Preparing graph for execution...\n")
257
+ try:
258
+ # Add detailed preparation steps
259
+ if verbosity > 0:
260
+ logger.debug(f"Graph contains {len(graph.vertices)} vertices")
261
+ logger.debug(f"Graph contains {len(graph.edges)} edges")
262
+
263
+ # Show component types being used
264
+ component_types = set()
265
+ for vertex in graph.vertices:
266
+ if hasattr(vertex, "display_name"):
267
+ component_types.add(vertex.display_name)
268
+ logger.debug(f"Component types in graph: {', '.join(sorted(component_types))}")
269
+
270
+ graph.prepare()
271
+ logger.info("Graph preparation completed")
272
+
273
+ # Validate global variables for environment compatibility
274
+ if check_variables:
275
+ logger.info("Validating global variables...")
276
+ validation_errors = validate_global_variables_for_env(graph)
277
+ if validation_errors:
278
+ error_details = "Global variable validation failed: " + "; ".join(validation_errors)
279
+ logger.info(f"Variable validation failed: {len(validation_errors)} errors")
280
+ for error in validation_errors:
281
+ logger.debug(f"Validation error: {error}")
282
+ output_error(error_details, verbose=verbose)
283
+ raise RunError(error_details, None)
284
+ logger.info("Global variable validation passed")
285
+ else:
286
+ logger.info("Global variable validation skipped")
287
+ except RunError:
288
+ raise
289
+ except Exception as e:
290
+ error_type = type(e).__name__
291
+ logger.info(f"Graph preparation failed with {error_type}")
292
+
293
+ if verbosity > 0:
294
+ logger.debug(f"Preparation error: {e!s}")
295
+ logger.exception("Failed to prepare graph - full traceback:")
296
+
297
+ error_msg = f"Failed to prepare graph: {e}"
298
+ output_error(error_msg, verbose=verbose, exception=e)
299
+ raise RunError(error_msg, e) from e
300
+
301
+ logger.info("Executing graph...")
302
+ execution_start_time = time.time() if timing else None
303
+ if verbose:
304
+ logger.debug("Setting up execution environment")
305
+ if inputs:
306
+ logger.debug(f"Input provided: {inputs.input_value}")
307
+ else:
308
+ logger.debug("No input provided")
309
+
310
+ captured_stdout = StringIO()
311
+ captured_stderr = StringIO()
312
+ original_stdout = sys.stdout
313
+ original_stderr = sys.stderr
314
+
315
+ # Track component timing if requested
316
+ component_timings = [] if timing else None
317
+ execution_step_start = execution_start_time if timing else None
318
+ result_count = 0
319
+
320
+ try:
321
+ sys.stdout = captured_stdout
322
+ # Don't capture stderr at high verbosity levels to avoid duplication with direct logging
323
+ if verbosity < VERBOSITY_FULL:
324
+ sys.stderr = captured_stderr
325
+ results = []
326
+
327
+ logger.info("Starting graph execution...", level="DEBUG")
328
+
329
+ async for result in graph.async_start(inputs):
330
+ result_count += 1
331
+ if verbosity > 0:
332
+ logger.debug(f"Processing result #{result_count}")
333
+ if hasattr(result, "vertex") and hasattr(result.vertex, "display_name"):
334
+ logger.debug(f"Component: {result.vertex.display_name}")
335
+ if timing:
336
+ step_end_time = time.time()
337
+ step_duration = step_end_time - execution_step_start
338
+
339
+ # Extract component information
340
+ if hasattr(result, "vertex"):
341
+ component_name = getattr(result.vertex, "display_name", "Unknown")
342
+ component_id = getattr(result.vertex, "id", "Unknown")
343
+ component_timings.append(
344
+ {
345
+ "component": component_name,
346
+ "component_id": component_id,
347
+ "duration": step_duration,
348
+ "cumulative_time": step_end_time - execution_start_time,
349
+ }
350
+ )
351
+
352
+ execution_step_start = step_end_time
353
+
354
+ results.append(result)
355
+
356
+ logger.info(f"Graph execution completed. Processed {result_count} results")
357
+
358
+ except Exception as e:
359
+ error_type = type(e).__name__
360
+ logger.info(f"Graph execution failed with {error_type}")
361
+
362
+ if verbosity >= VERBOSITY_DETAILED: # Only show details at -vv and above
363
+ logger.debug(f"Failed after processing {result_count} results")
364
+
365
+ # Only show component output at maximum verbosity (-vvv)
366
+ if verbosity >= VERBOSITY_FULL:
367
+ # Capture any output that was generated before the error
368
+ # Only show captured stdout since stderr logging is already shown directly in verbose mode
369
+ captured_content = captured_stdout.getvalue()
370
+ if captured_content.strip():
371
+ # Check if captured content contains the same error that will be displayed at the end
372
+ error_text = str(e)
373
+ captured_lines = captured_content.strip().split("\n")
374
+
375
+ # Filter out lines that are duplicates of the final error message
376
+ unique_lines = [
377
+ line
378
+ for line in captured_lines
379
+ if not any(
380
+ error_part.strip() in line for error_part in error_text.split("\n") if error_part.strip()
381
+ )
382
+ ]
383
+
384
+ if unique_lines:
385
+ logger.info("Component output before error:", level="DEBUG")
386
+ for line in unique_lines:
387
+ # Log each line directly using the logger to avoid nested formatting
388
+ if verbosity > 0:
389
+ # Remove any existing timestamp prefix to avoid duplication
390
+ clean_line = line
391
+ if "] " in line and line.startswith("2025-"):
392
+ # Extract just the log message after the timestamp and level
393
+ parts = line.split("] ", 1)
394
+ if len(parts) > 1:
395
+ clean_line = parts[1]
396
+ logger.debug(clean_line)
397
+
398
+ # Provide context about common execution errors
399
+ if "list can't be used in 'await' expression" in str(e):
400
+ logger.info("This appears to be an async/await mismatch in a component")
401
+ logger.info("Check that async methods are properly awaited")
402
+ elif "AttributeError" in error_type and "NoneType" in str(e):
403
+ logger.info("This appears to be a null reference error")
404
+ logger.info("A component may be receiving unexpected None values")
405
+ elif "ConnectionError" in str(e) or "TimeoutError" in str(e):
406
+ logger.info("This appears to be a network connectivity issue")
407
+ logger.info("Check API keys and network connectivity")
408
+
409
+ logger.exception("Failed to execute graph - full traceback:")
410
+
411
+ sys.stdout = original_stdout
412
+ sys.stderr = original_stderr
413
+ error_msg = f"Failed to execute graph: {e}"
414
+ output_error(error_msg, verbose=verbosity > 0, exception=e)
415
+ raise RunError(error_msg, e) from e
416
+ finally:
417
+ sys.stdout = original_stdout
418
+ sys.stderr = original_stderr
419
+
420
+ execution_end_time = time.time() if timing else None
421
+
422
+ captured_logs = captured_stdout.getvalue() + captured_stderr.getvalue()
423
+
424
+ # Create timing metadata if requested
425
+ timing_metadata = None
426
+ if timing:
427
+ load_duration = load_end_time - start_time
428
+ execution_duration = execution_end_time - execution_start_time
429
+ total_duration = execution_end_time - start_time
430
+
431
+ timing_metadata = {
432
+ "load_time": round(load_duration, 3),
433
+ "execution_time": round(execution_duration, 3),
434
+ "total_time": round(total_duration, 3),
435
+ "component_timings": [
436
+ {
437
+ "component": ct["component"],
438
+ "component_id": ct["component_id"],
439
+ "duration": round(ct["duration"], 3),
440
+ "cumulative_time": round(ct["cumulative_time"], 3),
441
+ }
442
+ for ct in component_timings
443
+ ],
444
+ }
445
+
446
+ # Build result based on output format
447
+ if output_format == "json":
448
+ result_data = extract_structured_result(results)
449
+ result_data["logs"] = captured_logs
450
+ if timing_metadata:
451
+ result_data["timing"] = timing_metadata
452
+ return result_data
453
+ if output_format in {"text", "message"}:
454
+ result_data = extract_structured_result(results)
455
+ output_text = result_data.get("result", result_data.get("text", ""))
456
+ return {"output": str(output_text), "format": output_format}
457
+ if output_format == "result":
458
+ return {"output": extract_text_from_result(results), "format": "result"}
459
+ # Default case
460
+ result_data = extract_structured_result(results)
461
+ result_data["logs"] = captured_logs
462
+ if timing_metadata:
463
+ result_data["timing"] = timing_metadata
464
+ return result_data
lfx/schema/__init__.py CHANGED
@@ -1,9 +1,12 @@
1
1
  """Schema modules for lfx package."""
2
2
 
3
3
  __all__ = [
4
+ "ComponentOutput",
4
5
  "Data",
5
6
  "DataFrame",
7
+ "ErrorDetail",
6
8
  "InputValue",
9
+ "JobStatus",
7
10
  "Message",
8
11
  "OpenAIErrorResponse",
9
12
  "OpenAIResponsesRequest",
@@ -11,6 +14,13 @@ __all__ = [
11
14
  "OpenAIResponsesStreamChunk",
12
15
  "Tweaks",
13
16
  "UUIDstr",
17
+ "WorkflowExecutionRequest",
18
+ "WorkflowExecutionResponse",
19
+ "WorkflowJobResponse",
20
+ "WorkflowStatusResponse",
21
+ "WorkflowStopRequest",
22
+ "WorkflowStopResponse",
23
+ "WorkflowStreamEvent",
14
24
  "dotdict",
15
25
  ]
16
26
 
@@ -61,6 +71,46 @@ def __getattr__(name: str):
61
71
  from .openai_responses_schemas import OpenAIErrorResponse
62
72
 
63
73
  return OpenAIErrorResponse
74
+ if name == "WorkflowExecutionRequest":
75
+ from .workflow import WorkflowExecutionRequest
76
+
77
+ return WorkflowExecutionRequest
78
+ if name == "WorkflowExecutionResponse":
79
+ from .workflow import WorkflowExecutionResponse
80
+
81
+ return WorkflowExecutionResponse
82
+ if name == "WorkflowJobResponse":
83
+ from .workflow import WorkflowJobResponse
84
+
85
+ return WorkflowJobResponse
86
+ if name == "WorkflowStreamEvent":
87
+ from .workflow import WorkflowStreamEvent
88
+
89
+ return WorkflowStreamEvent
90
+ if name == "WorkflowStatusResponse":
91
+ from .workflow import WorkflowStatusResponse
92
+
93
+ return WorkflowStatusResponse
94
+ if name == "WorkflowStopRequest":
95
+ from .workflow import WorkflowStopRequest
96
+
97
+ return WorkflowStopRequest
98
+ if name == "WorkflowStopResponse":
99
+ from .workflow import WorkflowStopResponse
100
+
101
+ return WorkflowStopResponse
102
+ if name == "JobStatus":
103
+ from .workflow import JobStatus
104
+
105
+ return JobStatus
106
+ if name == "ErrorDetail":
107
+ from .workflow import ErrorDetail
108
+
109
+ return ErrorDetail
110
+ if name == "ComponentOutput":
111
+ from .workflow import ComponentOutput
112
+
113
+ return ComponentOutput
64
114
 
65
115
  msg = f"module '{__name__}' has no attribute '{name}'"
66
116
  raise AttributeError(msg)
lfx/schema/data.py CHANGED
@@ -209,7 +209,7 @@ class Data(CrossModuleModel):
209
209
  """
210
210
  if key in {"data", "text_key"} or key.startswith("_"):
211
211
  super().__setattr__(key, value)
212
- elif key in self.model_fields:
212
+ elif key in type(self).model_fields:
213
213
  self.data[key] = value
214
214
  super().__setattr__(key, value)
215
215
  else:
lfx/schema/image.py CHANGED
@@ -7,6 +7,7 @@ from platformdirs import user_cache_dir
7
7
  from pydantic import BaseModel
8
8
 
9
9
  from lfx.services.deps import get_storage_service
10
+ from lfx.utils.image import create_image_content_dict
10
11
 
11
12
  IMAGE_ENDPOINT = "/files/images/"
12
13
 
@@ -37,7 +38,14 @@ def get_file_paths(files: list[str | dict]):
37
38
  if not file: # Skip empty/None files
38
39
  continue
39
40
 
40
- file_path = file["path"] if isinstance(file, dict) and "path" in file else file
41
+ # Handle Image objects, dicts, and strings
42
+ if isinstance(file, dict) and "path" in file:
43
+ file_path = file["path"]
44
+ elif hasattr(file, "path") and file.path:
45
+ file_path = file.path
46
+ else:
47
+ file_path = file
48
+
41
49
  if not file_path: # Skip empty paths
42
50
  continue
43
51
 
@@ -162,12 +170,23 @@ class Image(BaseModel):
162
170
  msg = "Image path is not set."
163
171
  raise ValueError(msg)
164
172
 
165
- def to_content_dict(self):
166
- """Convert image to content dictionary."""
167
- return {
168
- "type": "image_url",
169
- "image_url": self.to_base64(),
170
- }
173
+ def to_content_dict(self, flow_id: str | None = None):
174
+ """Convert image to content dictionary.
175
+
176
+ Args:
177
+ flow_id: Optional flow ID to prepend to the path if it doesn't contain one
178
+ """
179
+ if not self.path:
180
+ msg = "Image path is not set."
181
+ raise ValueError(msg)
182
+
183
+ # If the path doesn't contain a "/" and we have a flow_id, prepend it
184
+ image_path = self.path
185
+ if flow_id and "/" not in self.path:
186
+ image_path = f"{flow_id}/{self.path}"
187
+
188
+ # Use the utility function that properly handles the conversion
189
+ return create_image_content_dict(image_path, None, None)
171
190
 
172
191
  def get_url(self) -> str:
173
192
  """Get the URL for the image."""