langflow-base-nightly 0.5.0.dev39__py3-none-any.whl → 0.5.1.dev0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. langflow/api/router.py +2 -0
  2. langflow/api/v1/__init__.py +2 -0
  3. langflow/api/v1/endpoints.py +7 -1
  4. langflow/api/v1/openai_responses.py +545 -0
  5. langflow/components/data/file.py +302 -376
  6. langflow/components/docling/docling_inline.py +56 -4
  7. langflow/components/nvidia/nvidia_ingest.py +3 -2
  8. langflow/components/youtube/channel.py +1 -1
  9. langflow/custom/custom_component/custom_component.py +11 -0
  10. langflow/graph/graph/base.py +3 -1
  11. langflow/initial_setup/starter_projects/Basic Prompt Chaining.json +1 -1
  12. langflow/initial_setup/starter_projects/Basic Prompting.json +1 -1
  13. langflow/initial_setup/starter_projects/Blog Writer.json +2 -2
  14. langflow/initial_setup/starter_projects/Custom Component Generator.json +1 -1
  15. langflow/initial_setup/starter_projects/Document Q&A.json +2 -2
  16. langflow/initial_setup/starter_projects/Financial Report Parser.json +1 -1
  17. langflow/initial_setup/starter_projects/Hybrid Search RAG.json +2 -2
  18. langflow/initial_setup/starter_projects/Image Sentiment Analysis.json +1 -1
  19. langflow/initial_setup/starter_projects/Instagram Copywriter.json +2 -2
  20. langflow/initial_setup/starter_projects/Invoice Summarizer.json +1 -1
  21. langflow/initial_setup/starter_projects/Knowledge Ingestion.json +2 -2
  22. langflow/initial_setup/starter_projects/Knowledge Retrieval.json +1 -1
  23. langflow/initial_setup/starter_projects/Market Research.json +2 -2
  24. langflow/initial_setup/starter_projects/Meeting Summary.json +3 -3
  25. langflow/initial_setup/starter_projects/Memory Chatbot.json +1 -1
  26. langflow/initial_setup/starter_projects/News Aggregator.json +3 -3
  27. langflow/initial_setup/starter_projects/Nvidia Remix.json +2 -2
  28. langflow/initial_setup/starter_projects/Pok/303/251dex Agent.json" +2 -2
  29. langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json +2 -2
  30. langflow/initial_setup/starter_projects/Price Deal Finder.json +3 -3
  31. langflow/initial_setup/starter_projects/Research Agent.json +2 -2
  32. langflow/initial_setup/starter_projects/Research Translation Loop.json +1 -1
  33. langflow/initial_setup/starter_projects/SEO Keyword Generator.json +1 -1
  34. langflow/initial_setup/starter_projects/SaaS Pricing.json +1 -1
  35. langflow/initial_setup/starter_projects/Search agent.json +2 -2
  36. langflow/initial_setup/starter_projects/Sequential Tasks Agents.json +3 -3
  37. langflow/initial_setup/starter_projects/Simple Agent.json +2 -2
  38. langflow/initial_setup/starter_projects/Social Media Agent.json +5 -5
  39. langflow/initial_setup/starter_projects/Text Sentiment Analysis.json +3 -3
  40. langflow/initial_setup/starter_projects/Travel Planning Agents.json +1 -1
  41. langflow/initial_setup/starter_projects/Twitter Thread Generator.json +1 -1
  42. langflow/initial_setup/starter_projects/Vector Store RAG.json +5 -5
  43. langflow/initial_setup/starter_projects/Youtube Analysis.json +2 -2
  44. langflow/schema/openai_responses_schemas.py +74 -0
  45. {langflow_base_nightly-0.5.0.dev39.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/METADATA +1 -1
  46. {langflow_base_nightly-0.5.0.dev39.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/RECORD +48 -46
  47. {langflow_base_nightly-0.5.0.dev39.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/WHEEL +0 -0
  48. {langflow_base_nightly-0.5.0.dev39.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/entry_points.txt +0 -0
@@ -1,9 +1,21 @@
1
- """Enhanced file component v2 with mypy and ruff compliance."""
1
+ """Enhanced file component with clearer structure and Docling isolation.
2
+
3
+ Notes:
4
+ -----
5
+ - Functionality is preserved with minimal behavioral changes.
6
+ - ALL Docling parsing/export runs in a separate OS process to prevent memory
7
+ growth and native library state from impacting the main Langflow process.
8
+ - Standard text/structured parsing continues to use existing BaseFileComponent
9
+ utilities (and optional threading via `parallel_load_data`).
10
+ """
2
11
 
3
12
  from __future__ import annotations
4
13
 
14
+ import json
15
+ import subprocess
16
+ import sys
17
+ import textwrap
5
18
  from copy import deepcopy
6
- from enum import Enum
7
19
  from typing import TYPE_CHECKING, Any
8
20
 
9
21
  from langflow.base.data.base_file import BaseFileComponent
@@ -24,51 +36,8 @@ if TYPE_CHECKING:
24
36
  from langflow.schema import DataFrame
25
37
 
26
38
 
27
- class MockConversionStatus(Enum):
28
- """Mock ConversionStatus for fallback compatibility."""
29
-
30
- SUCCESS = "success"
31
- FAILURE = "failure"
32
-
33
-
34
- class MockInputFormat(Enum):
35
- """Mock InputFormat for fallback compatibility."""
36
-
37
- PDF = "pdf"
38
- IMAGE = "image"
39
-
40
-
41
- class MockImageRefMode(Enum):
42
- """Mock ImageRefMode for fallback compatibility."""
43
-
44
- PLACEHOLDER = "placeholder"
45
- EMBEDDED = "embedded"
46
-
47
-
48
- class DoclingImports:
49
- """Container for docling imports with type information."""
50
-
51
- def __init__(
52
- self,
53
- conversion_status: type[Enum],
54
- input_format: type[Enum],
55
- document_converter: type,
56
- image_ref_mode: type[Enum],
57
- strategy: str,
58
- ) -> None:
59
- self.conversion_status = conversion_status
60
- self.input_format = input_format
61
- self.document_converter = document_converter
62
- self.image_ref_mode = image_ref_mode
63
- self.strategy = strategy
64
-
65
-
66
39
  class FileComponent(BaseFileComponent):
67
- """Enhanced file component v2 that combines standard file loading with optional Docling processing and export.
68
-
69
- This component supports all features of the standard File component, plus an advanced mode
70
- that enables Docling document processing and export to various formats (Markdown, HTML, etc.).
71
- """
40
+ """File component with optional Docling processing (isolated in a subprocess)."""
72
41
 
73
42
  display_name = "File"
74
43
  description = "Loads content from files with optional advanced document processing and export using Docling."
@@ -76,7 +45,7 @@ class FileComponent(BaseFileComponent):
76
45
  icon = "file-text"
77
46
  name = "File"
78
47
 
79
- # Docling supported formats from original component
48
+ # Docling-supported/compatible extensions; TEXT_FILE_TYPES are supported by the base loader.
80
49
  VALID_EXTENSIONS = [
81
50
  "adoc",
82
51
  "asciidoc",
@@ -110,12 +79,12 @@ class FileComponent(BaseFileComponent):
110
79
  *TEXT_FILE_TYPES,
111
80
  ]
112
81
 
113
- # Fixed export settings
82
+ # Fixed export settings used when markdown export is requested.
114
83
  EXPORT_FORMAT = "Markdown"
115
84
  IMAGE_MODE = "placeholder"
116
85
 
86
+ # ---- Inputs / Outputs (kept as close to original as possible) -------------------
117
87
  _base_inputs = deepcopy(BaseFileComponent._base_inputs)
118
-
119
88
  for input_item in _base_inputs:
120
89
  if isinstance(input_item, FileInput) and input_item.name == "path":
121
90
  input_item.real_time_refresh = True
@@ -175,6 +144,7 @@ class FileComponent(BaseFileComponent):
175
144
  advanced=True,
176
145
  show=False,
177
146
  ),
147
+ # Deprecated input retained for backward-compatibility.
178
148
  BoolInput(
179
149
  name="use_multithreading",
180
150
  display_name="[Deprecated] Use Multithreading",
@@ -202,8 +172,10 @@ class FileComponent(BaseFileComponent):
202
172
  Output(display_name="Raw Content", name="message", method="load_files_message"),
203
173
  ]
204
174
 
205
- def _path_value(self, template) -> list[str]:
206
- # Get current path value
175
+ # ------------------------------ UI helpers --------------------------------------
176
+
177
+ def _path_value(self, template: dict) -> list[str]:
178
+ """Return the list of currently selected file paths from the template."""
207
179
  return template.get("path", {}).get("file_path", [])
208
180
 
209
181
  def update_build_config(
@@ -212,65 +184,41 @@ class FileComponent(BaseFileComponent):
212
184
  field_value: Any,
213
185
  field_name: str | None = None,
214
186
  ) -> dict[str, Any]:
215
- """Update build configuration to show/hide fields based on file count and advanced_mode."""
187
+ """Show/hide Advanced Parser and related fields based on selection context."""
216
188
  if field_name == "path":
217
- # Get current path value
218
- path_value = self._path_value(build_config)
219
- file_path = path_value[0] if len(path_value) > 0 else ""
220
-
221
- # Show/hide Advanced Parser based on file count (only for single files)
189
+ paths = self._path_value(build_config)
190
+ file_path = paths[0] if paths else ""
222
191
  file_count = len(field_value) if field_value else 0
223
- if file_count == 1 and not file_path.endswith((".csv", ".xlsx", ".parquet")):
224
- build_config["advanced_mode"]["show"] = True
225
- else:
226
- build_config["advanced_mode"]["show"] = False
227
- build_config["advanced_mode"]["value"] = False # Reset to False when hidden
228
-
229
- # Hide all advanced fields when Advanced Parser is not available
230
- advanced_fields = [
231
- "pipeline",
232
- "ocr_engine",
233
- "doc_key",
234
- "md_image_placeholder",
235
- "md_page_break_placeholder",
236
- ]
237
- for field in advanced_fields:
238
- if field in build_config:
239
- build_config[field]["show"] = False
192
+
193
+ # Advanced mode only for single (non-tabular) file
194
+ allow_advanced = file_count == 1 and not file_path.endswith((".csv", ".xlsx", ".parquet"))
195
+ build_config["advanced_mode"]["show"] = allow_advanced
196
+ if not allow_advanced:
197
+ build_config["advanced_mode"]["value"] = False
198
+ for f in ("pipeline", "ocr_engine", "doc_key", "md_image_placeholder", "md_page_break_placeholder"):
199
+ if f in build_config:
200
+ build_config[f]["show"] = False
240
201
 
241
202
  elif field_name == "advanced_mode":
242
- # Show/hide advanced fields based on advanced_mode (only if single file)
243
- advanced_fields = [
244
- "pipeline",
245
- "ocr_engine",
246
- "doc_key",
247
- "md_image_placeholder",
248
- "md_page_break_placeholder",
249
- ]
250
-
251
- for field in advanced_fields:
252
- if field in build_config:
253
- build_config[field]["show"] = field_value
203
+ for f in ("pipeline", "ocr_engine", "doc_key", "md_image_placeholder", "md_page_break_placeholder"):
204
+ if f in build_config:
205
+ build_config[f]["show"] = bool(field_value)
254
206
 
255
207
  return build_config
256
208
 
257
209
  def update_outputs(self, frontend_node: dict[str, Any], field_name: str, field_value: Any) -> dict[str, Any]: # noqa: ARG002
258
- """Dynamically show outputs based on the number of files and their types."""
210
+ """Dynamically show outputs based on file count/type and advanced mode."""
259
211
  if field_name not in ["path", "advanced_mode"]:
260
212
  return frontend_node
261
213
 
262
- # Add outputs based on the number of files in the path
263
214
  template = frontend_node.get("template", {})
264
- path_value = self._path_value(template)
265
- if len(path_value) == 0:
215
+ paths = self._path_value(template)
216
+ if not paths:
266
217
  return frontend_node
267
218
 
268
- # Clear existing outputs
269
219
  frontend_node["outputs"] = []
270
-
271
- if len(path_value) == 1:
272
- # We need to check if the file is structured content
273
- file_path = path_value[0] if field_name == "path" else frontend_node["template"]["path"]["file_path"][0]
220
+ if len(paths) == 1:
221
+ file_path = paths[0] if field_name == "path" else frontend_node["template"]["path"]["file_path"][0]
274
222
  if file_path.endswith((".csv", ".xlsx", ".parquet")):
275
223
  frontend_node["outputs"].append(
276
224
  Output(display_name="Structured Content", name="dataframe", method="load_files_structured"),
@@ -280,11 +228,8 @@ class FileComponent(BaseFileComponent):
280
228
  Output(display_name="Structured Content", name="json", method="load_files_json"),
281
229
  )
282
230
 
283
- # Add outputs based on advanced mode
284
231
  advanced_mode = frontend_node.get("template", {}).get("advanced_mode", {}).get("value", False)
285
-
286
232
  if advanced_mode:
287
- # Advanced mode: Structured Output, Markdown, and File Path
288
233
  frontend_node["outputs"].append(
289
234
  Output(display_name="Structured Output", name="advanced", method="load_files_advanced"),
290
235
  )
@@ -295,7 +240,6 @@ class FileComponent(BaseFileComponent):
295
240
  Output(display_name="File Path", name="path", method="load_files_path"),
296
241
  )
297
242
  else:
298
- # Normal mode: Raw Content and File Path
299
243
  frontend_node["outputs"].append(
300
244
  Output(display_name="Raw Content", name="message", method="load_files_message"),
301
245
  )
@@ -303,130 +247,16 @@ class FileComponent(BaseFileComponent):
303
247
  Output(display_name="File Path", name="path", method="load_files_path"),
304
248
  )
305
249
  else:
306
- # For multiple files, we show the files output (DataFrame format)
307
- # Advanced Parser is not available for multiple files
308
- frontend_node["outputs"].append(
309
- Output(display_name="Files", name="dataframe", method="load_files"),
310
- )
250
+ # Multiple files => DataFrame output; advanced parser disabled
251
+ frontend_node["outputs"].append(Output(display_name="Files", name="dataframe", method="load_files"))
311
252
 
312
253
  return frontend_node
313
254
 
314
- def _try_import_docling(self) -> DoclingImports | None:
315
- """Try different import strategies for docling components."""
316
- # Try strategy 1: Latest docling structure
317
- try:
318
- from docling.datamodel.base_models import ConversionStatus, InputFormat # type: ignore[import-untyped]
319
- from docling.document_converter import DocumentConverter # type: ignore[import-untyped]
320
- from docling_core.types.doc import ImageRefMode # type: ignore[import-untyped]
321
-
322
- self.log("Using latest docling import structure")
323
- return DoclingImports(
324
- conversion_status=ConversionStatus,
325
- input_format=InputFormat,
326
- document_converter=DocumentConverter,
327
- image_ref_mode=ImageRefMode,
328
- strategy="latest",
329
- )
330
- except ImportError as e:
331
- self.log(f"Latest docling structure failed: {e}")
332
-
333
- # Try strategy 2: Alternative import paths
334
- try:
335
- from docling.document_converter import DocumentConverter # type: ignore[import-untyped]
336
- from docling_core.types.doc import ImageRefMode # type: ignore[import-untyped]
337
-
338
- # Try to get ConversionStatus from different locations
339
- conversion_status: type[Enum] = MockConversionStatus
340
- input_format: type[Enum] = MockInputFormat
341
-
342
- try:
343
- from docling_core.types import ConversionStatus, InputFormat # type: ignore[import-untyped]
344
-
345
- conversion_status = ConversionStatus
346
- input_format = InputFormat
347
- except ImportError:
348
- try:
349
- from docling.datamodel import ConversionStatus, InputFormat # type: ignore[import-untyped]
350
-
351
- conversion_status = ConversionStatus
352
- input_format = InputFormat
353
- except ImportError:
354
- # Use mock enums if we can't find them
355
- pass
356
-
357
- self.log("Using alternative docling import structure")
358
- return DoclingImports(
359
- conversion_status=conversion_status,
360
- input_format=input_format,
361
- document_converter=DocumentConverter,
362
- image_ref_mode=ImageRefMode,
363
- strategy="alternative",
364
- )
365
- except ImportError as e:
366
- self.log(f"Alternative docling structure failed: {e}")
367
-
368
- # Try strategy 3: Basic converter only
369
- try:
370
- from docling.document_converter import DocumentConverter # type: ignore[import-untyped]
371
-
372
- self.log("Using basic docling import structure with mocks")
373
- return DoclingImports(
374
- conversion_status=MockConversionStatus,
375
- input_format=MockInputFormat,
376
- document_converter=DocumentConverter,
377
- image_ref_mode=MockImageRefMode,
378
- strategy="basic",
379
- )
380
- except ImportError as e:
381
- self.log(f"Basic docling structure failed: {e}")
382
-
383
- # Strategy 4: Complete fallback - return None to indicate failure
384
- return None
385
-
386
- def _create_advanced_converter(self, docling_imports: DoclingImports) -> Any:
387
- """Create advanced converter with pipeline options if available."""
388
- try:
389
- from docling.datamodel.pipeline_options import PdfPipelineOptions # type: ignore[import-untyped]
390
- from docling.document_converter import PdfFormatOption # type: ignore[import-untyped]
391
-
392
- document_converter = docling_imports.document_converter
393
- input_format = docling_imports.input_format
394
-
395
- # Create basic pipeline options
396
- pipeline_options = PdfPipelineOptions()
397
-
398
- # Configure OCR if specified and available
399
- if self.ocr_engine:
400
- try:
401
- from docling.models.factories import get_ocr_factory # type: ignore[import-untyped]
402
-
403
- pipeline_options.do_ocr = True
404
- ocr_factory = get_ocr_factory(allow_external_plugins=False)
405
- ocr_options = ocr_factory.create_options(kind=self.ocr_engine)
406
- pipeline_options.ocr_options = ocr_options
407
- self.log(f"Configured OCR with engine: {self.ocr_engine}")
408
- except Exception as e: # noqa: BLE001
409
- self.log(f"Could not configure OCR: {e}, proceeding without OCR")
410
- pipeline_options.do_ocr = False
411
-
412
- # Create format options
413
- pdf_format_option = PdfFormatOption(pipeline_options=pipeline_options)
414
- format_options = {}
415
- if hasattr(input_format, "PDF"):
416
- format_options[input_format.PDF] = pdf_format_option
417
- if hasattr(input_format, "IMAGE"):
418
- format_options[input_format.IMAGE] = pdf_format_option
419
-
420
- return document_converter(format_options=format_options)
421
-
422
- except Exception as e: # noqa: BLE001
423
- self.log(f"Could not create advanced converter: {e}, using basic converter")
424
- return docling_imports.document_converter()
255
+ # ------------------------------ Core processing ----------------------------------
425
256
 
426
257
  def _is_docling_compatible(self, file_path: str) -> bool:
427
- """Check if file is compatible with Docling processing."""
428
- # All VALID_EXTENSIONS are Docling compatible (except for TEXT_FILE_TYPES which may overlap)
429
- docling_extensions = [
258
+ """Lightweight extension gate for Docling-compatible types."""
259
+ docling_exts = (
430
260
  ".adoc",
431
261
  ".asciidoc",
432
262
  ".asc",
@@ -456,102 +286,296 @@ class FileComponent(BaseFileComponent):
456
286
  ".xhtml",
457
287
  ".xml",
458
288
  ".webp",
459
- ]
460
- return any(file_path.lower().endswith(ext) for ext in docling_extensions)
289
+ )
290
+ return file_path.lower().endswith(docling_exts)
291
+
292
+ def _process_docling_in_subprocess(self, file_path: str) -> Data | None:
293
+ """Run Docling in a separate OS process and map the result to a Data object.
294
+
295
+ We avoid multiprocessing pickling by launching `python -c "<script>"` and
296
+ passing JSON config via stdin. The child prints a JSON result to stdout.
297
+ """
298
+ if not file_path:
299
+ return None
300
+
301
+ args: dict[str, Any] = {
302
+ "file_path": file_path,
303
+ "markdown": bool(self.markdown),
304
+ "image_mode": str(self.IMAGE_MODE),
305
+ "md_image_placeholder": str(self.md_image_placeholder),
306
+ "md_page_break_placeholder": str(self.md_page_break_placeholder),
307
+ "pipeline": str(self.pipeline),
308
+ "ocr_engine": str(self.ocr_engine) if getattr(self, "ocr_engine", "") else None,
309
+ }
310
+
311
+ # The child is a tiny, self-contained script to keep memory/state isolated.
312
+ child_script = textwrap.dedent(
313
+ r"""
314
+ import json, sys
315
+
316
+ def try_imports():
317
+ # Strategy 1: latest layout
318
+ try:
319
+ from docling.datamodel.base_models import ConversionStatus, InputFormat # type: ignore
320
+ from docling.document_converter import DocumentConverter # type: ignore
321
+ from docling_core.types.doc import ImageRefMode # type: ignore
322
+ return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, "latest"
323
+ except Exception:
324
+ pass
325
+ # Strategy 2: alternative layout
326
+ try:
327
+ from docling.document_converter import DocumentConverter # type: ignore
328
+ try:
329
+ from docling_core.types import ConversionStatus, InputFormat # type: ignore
330
+ except Exception:
331
+ try:
332
+ from docling.datamodel import ConversionStatus, InputFormat # type: ignore
333
+ except Exception:
334
+ class ConversionStatus: SUCCESS = "success"
335
+ class InputFormat:
336
+ PDF="pdf"; IMAGE="image"
337
+ try:
338
+ from docling_core.types.doc import ImageRefMode # type: ignore
339
+ except Exception:
340
+ class ImageRefMode:
341
+ PLACEHOLDER="placeholder"; EMBEDDED="embedded"
342
+ return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, "alternative"
343
+ except Exception:
344
+ pass
345
+ # Strategy 3: basic converter only
346
+ try:
347
+ from docling.document_converter import DocumentConverter # type: ignore
348
+ class ConversionStatus: SUCCESS = "success"
349
+ class InputFormat:
350
+ PDF="pdf"; IMAGE="image"
351
+ class ImageRefMode:
352
+ PLACEHOLDER="placeholder"; EMBEDDED="embedded"
353
+ return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, "basic"
354
+ except Exception as e:
355
+ raise ImportError(f"Docling imports failed: {e}") from e
356
+
357
+ def create_converter(strategy, input_format, DocumentConverter, pipeline, ocr_engine):
358
+ if strategy == "latest" and pipeline == "standard":
359
+ try:
360
+ from docling.datamodel.pipeline_options import PdfPipelineOptions # type: ignore
361
+ from docling.document_converter import PdfFormatOption # type: ignore
362
+ pipe = PdfPipelineOptions()
363
+ if ocr_engine:
364
+ try:
365
+ from docling.models.factories import get_ocr_factory # type: ignore
366
+ pipe.do_ocr = True
367
+ fac = get_ocr_factory(allow_external_plugins=False)
368
+ pipe.ocr_options = fac.create_options(kind=ocr_engine)
369
+ except Exception:
370
+ pipe.do_ocr = False
371
+ fmt = {}
372
+ if hasattr(input_format, "PDF"):
373
+ fmt[getattr(input_format, "PDF")] = PdfFormatOption(pipeline_options=pipe)
374
+ if hasattr(input_format, "IMAGE"):
375
+ fmt[getattr(input_format, "IMAGE")] = PdfFormatOption(pipeline_options=pipe)
376
+ return DocumentConverter(format_options=fmt)
377
+ except Exception:
378
+ return DocumentConverter()
379
+ return DocumentConverter()
380
+
381
+ def export_markdown(document, ImageRefMode, image_mode, img_ph, pg_ph):
382
+ try:
383
+ mode = getattr(ImageRefMode, image_mode.upper(), image_mode)
384
+ return document.export_to_markdown(
385
+ image_mode=mode,
386
+ image_placeholder=img_ph,
387
+ page_break_placeholder=pg_ph,
388
+ )
389
+ except Exception:
390
+ try:
391
+ return document.export_to_text()
392
+ except Exception:
393
+ return str(document)
394
+
395
+ def to_rows(doc_dict):
396
+ rows = []
397
+ for t in doc_dict.get("texts", []):
398
+ prov = t.get("prov") or []
399
+ page_no = None
400
+ if prov and isinstance(prov, list) and isinstance(prov[0], dict):
401
+ page_no = prov[0].get("page_no")
402
+ rows.append({
403
+ "page_no": page_no,
404
+ "label": t.get("label"),
405
+ "text": t.get("text"),
406
+ "level": t.get("level"),
407
+ })
408
+ return rows
409
+
410
+ def main():
411
+ cfg = json.loads(sys.stdin.read())
412
+ file_path = cfg["file_path"]
413
+ markdown = cfg["markdown"]
414
+ image_mode = cfg["image_mode"]
415
+ img_ph = cfg["md_image_placeholder"]
416
+ pg_ph = cfg["md_page_break_placeholder"]
417
+ pipeline = cfg["pipeline"]
418
+ ocr_engine = cfg.get("ocr_engine")
419
+ meta = {"file_path": file_path}
420
+
421
+ try:
422
+ ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, strategy = try_imports()
423
+ converter = create_converter(strategy, InputFormat, DocumentConverter, pipeline, ocr_engine)
424
+ try:
425
+ res = converter.convert(file_path)
426
+ except Exception as e:
427
+ print(json.dumps({"ok": False, "error": f"Docling conversion error: {e}", "meta": meta}))
428
+ return
429
+
430
+ ok = False
431
+ if hasattr(res, "status"):
432
+ try:
433
+ ok = (res.status == ConversionStatus.SUCCESS) or (str(res.status).lower() == "success")
434
+ except Exception:
435
+ ok = (str(res.status).lower() == "success")
436
+ if not ok and hasattr(res, "document"):
437
+ ok = getattr(res, "document", None) is not None
438
+ if not ok:
439
+ print(json.dumps({"ok": False, "error": "Docling conversion failed", "meta": meta}))
440
+ return
441
+
442
+ doc = getattr(res, "document", None)
443
+ if doc is None:
444
+ print(json.dumps({"ok": False, "error": "Docling produced no document", "meta": meta}))
445
+ return
446
+
447
+ if markdown:
448
+ text = export_markdown(doc, ImageRefMode, image_mode, img_ph, pg_ph)
449
+ print(json.dumps({"ok": True, "mode": "markdown", "text": text, "meta": meta}))
450
+ return
451
+
452
+ # structured
453
+ try:
454
+ doc_dict = doc.export_to_dict()
455
+ except Exception as e:
456
+ print(json.dumps({"ok": False, "error": f"Docling export_to_dict failed: {e}", "meta": meta}))
457
+ return
458
+
459
+ rows = to_rows(doc_dict)
460
+ print(json.dumps({"ok": True, "mode": "structured", "doc": rows, "meta": meta}))
461
+ except Exception as e:
462
+ print(
463
+ json.dumps({
464
+ "ok": False,
465
+ "error": f"Docling processing error: {e}",
466
+ "meta": {"file_path": file_path},
467
+ })
468
+ )
469
+
470
+ if __name__ == "__main__":
471
+ main()
472
+ """
473
+ )
474
+
475
+ # Validate file_path to avoid command injection or unsafe input
476
+ if not isinstance(args["file_path"], str) or any(c in args["file_path"] for c in [";", "|", "&", "$", "`"]):
477
+ return Data(data={"error": "Unsafe file path detected.", "file_path": args["file_path"]})
478
+
479
+ proc = subprocess.run( # noqa: S603
480
+ [sys.executable, "-u", "-c", child_script],
481
+ input=json.dumps(args).encode("utf-8"),
482
+ capture_output=True,
483
+ check=False,
484
+ )
485
+
486
+ if not proc.stdout:
487
+ err_msg = proc.stderr.decode("utf-8", errors="replace") or "no output from child process"
488
+ return Data(data={"error": f"Docling subprocess error: {err_msg}", "file_path": file_path})
489
+
490
+ try:
491
+ result = json.loads(proc.stdout.decode("utf-8"))
492
+ except Exception as e: # noqa: BLE001
493
+ err_msg = proc.stderr.decode("utf-8", errors="replace")
494
+ return Data(
495
+ data={"error": f"Invalid JSON from Docling subprocess: {e}. stderr={err_msg}", "file_path": file_path},
496
+ )
497
+
498
+ if not result.get("ok"):
499
+ return Data(data={"error": result.get("error", "Unknown Docling error"), **result.get("meta", {})})
500
+
501
+ meta = result.get("meta", {})
502
+ if result.get("mode") == "markdown":
503
+ exported_content = str(result.get("text", ""))
504
+ return Data(
505
+ text=exported_content,
506
+ data={"exported_content": exported_content, "export_format": self.EXPORT_FORMAT, **meta},
507
+ )
508
+
509
+ rows = list(result.get("doc", []))
510
+ return Data(data={"doc": rows, "export_format": self.EXPORT_FORMAT, **meta})
461
511
 
462
512
  def process_files(
463
513
  self,
464
514
  file_list: list[BaseFileComponent.BaseFile],
465
515
  ) -> list[BaseFileComponent.BaseFile]:
466
- """Process files using standard parsing or Docling based on advanced_mode and file type."""
516
+ """Process input files.
517
+
518
+ - Single file + advanced_mode => Docling in a separate process.
519
+ - Otherwise => standard parsing in current process (optionally threaded).
520
+ """
521
+ if not file_list:
522
+ msg = "No files to process."
523
+ raise ValueError(msg)
467
524
 
468
525
  def process_file_standard(file_path: str, *, silent_errors: bool = False) -> Data | None:
469
- """Process a single file using standard text parsing."""
470
526
  try:
471
527
  return parse_text_file_to_data(file_path, silent_errors=silent_errors)
472
528
  except FileNotFoundError as e:
473
- msg = f"File not found: {file_path}. Error: {e}"
474
- self.log(msg)
529
+ self.log(f"File not found: {file_path}. Error: {e}")
475
530
  if not silent_errors:
476
531
  raise
477
532
  return None
478
533
  except Exception as e:
479
- msg = f"Unexpected error processing {file_path}: {e}"
480
- self.log(msg)
534
+ self.log(f"Unexpected error processing {file_path}: {e}")
481
535
  if not silent_errors:
482
536
  raise
483
537
  return None
484
538
 
485
- def process_file_docling(file_path: str, *, silent_errors: bool = False) -> Data | None:
486
- """Process a single file using Docling if compatible, otherwise standard processing."""
487
- # Try Docling first if file is compatible and advanced mode is enabled
488
- try:
489
- return self._process_with_docling_and_export(file_path)
490
- except Exception as e: # noqa: BLE001
491
- self.log(f"Docling processing failed for {file_path}: {e}, falling back to standard processing")
492
- if not silent_errors:
493
- # Return error data instead of raising
494
- return Data(data={"error": f"Docling processing failed: {e}", "file_path": file_path})
495
-
496
- return None
497
-
498
- if not file_list:
499
- msg = "No files to process."
500
- raise ValueError(msg)
501
-
502
- file_path = str(file_list[0].path)
503
- if self.advanced_mode and self._is_docling_compatible(file_path):
504
- processed_data = process_file_docling(file_path)
505
- if not processed_data:
506
- msg = f"Failed to process file with Docling: {file_path}"
507
- raise ValueError(msg)
508
-
509
- # Serialize processed data to match Data structure
510
- serialized_data = processed_data.serialize_model()
511
-
512
- # Now, if doc is nested, we need to unravel it
513
- clean_data: list[Data | None] = [processed_data]
514
-
515
- # This is where we've manually processed the data
516
- try:
517
- if "exported_content" not in serialized_data:
518
- clean_data = [
539
+ # Advanced path: only for a single Docling-compatible file
540
+ if len(file_list) == 1:
541
+ file_path = str(file_list[0].path)
542
+ if self.advanced_mode and self._is_docling_compatible(file_path):
543
+ advanced_data: Data | None = self._process_docling_in_subprocess(file_path)
544
+
545
+ # --- UNNEST: expand each element in `doc` to its own Data row
546
+ payload = getattr(advanced_data, "data", {}) or {}
547
+ doc_rows = payload.get("doc")
548
+ if isinstance(doc_rows, list):
549
+ rows: list[Data | None] = [
519
550
  Data(
520
551
  data={
521
552
  "file_path": file_path,
522
- **(
523
- item["element"]
524
- if "element" in item
525
- else {k: v for k, v in item.items() if k != "file_path"}
526
- ),
527
- }
553
+ **(item if isinstance(item, dict) else {"value": item}),
554
+ },
528
555
  )
529
- for item in serialized_data["doc"]
556
+ for item in doc_rows
530
557
  ]
531
- except Exception as _: # noqa: BLE001
532
- raise ValueError(serialized_data) from None
558
+ return self.rollup_data(file_list, rows)
533
559
 
534
- # Repeat file_list to match the number of processed data elements
535
- final_data: list[Data | None] = clean_data
536
- return self.rollup_data(file_list, final_data)
560
+ # If not structured, keep as-is (e.g., markdown export or error dict)
561
+ return self.rollup_data(file_list, [advanced_data])
537
562
 
563
+ # Standard multi-file (or single non-advanced) path
538
564
  concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)
539
- file_count = len(file_list)
540
-
541
- self.log(f"Starting parallel processing of {file_count} files with concurrency: {concurrency}.")
542
- file_paths = [str(file.path) for file in file_list]
565
+ file_paths = [str(f.path) for f in file_list]
566
+ self.log(f"Starting parallel processing of {len(file_paths)} files with concurrency: {concurrency}.")
543
567
  my_data = parallel_load_data(
544
568
  file_paths,
545
569
  silent_errors=self.silent_errors,
546
570
  load_function=process_file_standard,
547
571
  max_concurrency=concurrency,
548
572
  )
549
-
550
573
  return self.rollup_data(file_list, my_data)
551
574
 
575
+ # ------------------------------ Output helpers -----------------------------------
576
+
552
577
  def load_files_advanced(self) -> DataFrame:
553
578
  """Load files using advanced Docling processing and export to an advanced format."""
554
- # TODO: Update
555
579
  self.markdown = False
556
580
  return self.load_files()
557
581
 
@@ -560,101 +584,3 @@ class FileComponent(BaseFileComponent):
560
584
  self.markdown = True
561
585
  result = self.load_files()
562
586
  return Message(text=str(result.text[0]))
563
-
564
- def _process_with_docling_and_export(self, file_path: str) -> Data:
565
- """Process a single file with Docling and export to the specified format."""
566
- # Import docling components only when needed
567
- docling_imports = self._try_import_docling()
568
-
569
- if docling_imports is None:
570
- msg = "Docling not available for advanced processing"
571
- raise ImportError(msg)
572
-
573
- conversion_status = docling_imports.conversion_status
574
- document_converter = docling_imports.document_converter
575
- image_ref_mode = docling_imports.image_ref_mode
576
-
577
- try:
578
- # Create converter based on strategy and pipeline setting
579
- if docling_imports.strategy == "latest" and self.pipeline == "standard":
580
- converter = self._create_advanced_converter(docling_imports)
581
- else:
582
- # Use basic converter for compatibility
583
- converter = document_converter()
584
- self.log("Using basic DocumentConverter for Docling processing")
585
-
586
- # Process single file
587
- result = converter.convert(file_path)
588
-
589
- # Check if conversion was successful
590
- success = False
591
- if hasattr(result, "status"):
592
- if hasattr(conversion_status, "SUCCESS"):
593
- success = result.status == conversion_status.SUCCESS
594
- else:
595
- success = str(result.status).lower() == "success"
596
- elif hasattr(result, "document"):
597
- # If no status but has document, assume success
598
- success = result.document is not None
599
-
600
- if not success:
601
- return Data(data={"error": "Docling conversion failed", "file_path": file_path})
602
-
603
- if self.markdown:
604
- self.log("Exporting document to Markdown format")
605
- # Export the document to the specified format
606
- exported_content = self._export_document(result.document, image_ref_mode)
607
-
608
- return Data(
609
- text=exported_content,
610
- data={
611
- "exported_content": exported_content,
612
- "export_format": self.EXPORT_FORMAT,
613
- "file_path": file_path,
614
- },
615
- )
616
-
617
- return Data(
618
- data={
619
- "doc": self.docling_to_dataframe_simple(result.document.export_to_dict()),
620
- "export_format": self.EXPORT_FORMAT,
621
- "file_path": file_path,
622
- }
623
- )
624
-
625
- except Exception as e: # noqa: BLE001
626
- return Data(data={"error": f"Docling processing error: {e!s}", "file_path": file_path})
627
-
628
- def docling_to_dataframe_simple(self, doc):
629
- """Extract all text elements into a simple DataFrame."""
630
- return [
631
- {
632
- "page_no": text["prov"][0]["page_no"] if text["prov"] else None,
633
- "label": text["label"],
634
- "text": text["text"],
635
- "level": text.get("level", None), # for headers
636
- }
637
- for text in doc["texts"]
638
- ]
639
-
640
- def _export_document(self, document: Any, image_ref_mode: type[Enum]) -> str:
641
- """Export document to Markdown format with placeholder images."""
642
- try:
643
- image_mode = (
644
- image_ref_mode(self.IMAGE_MODE) if hasattr(image_ref_mode, self.IMAGE_MODE) else self.IMAGE_MODE
645
- )
646
-
647
- # Always export to Markdown since it's fixed
648
- return document.export_to_markdown(
649
- image_mode=image_mode,
650
- image_placeholder=self.md_image_placeholder,
651
- page_break_placeholder=self.md_page_break_placeholder,
652
- )
653
-
654
- except Exception as e: # noqa: BLE001
655
- self.log(f"Markdown export failed: {e}, using basic text export")
656
- # Fallback to basic text export
657
- try:
658
- return document.export_to_text()
659
- except Exception: # noqa: BLE001
660
- return str(document)