langflow-base-nightly 0.5.0.dev38__py3-none-any.whl → 0.5.1.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langflow/alembic/versions/0882f9657f22_encrypt_existing_mcp_auth_settings_.py +122 -0
- langflow/api/router.py +2 -0
- langflow/api/v1/__init__.py +2 -0
- langflow/api/v1/endpoints.py +7 -1
- langflow/api/v1/mcp_projects.py +373 -52
- langflow/api/v1/openai_responses.py +545 -0
- langflow/api/v1/schemas.py +1 -2
- langflow/components/FAISS/__init__.py +34 -0
- langflow/components/agents/agent.py +246 -52
- langflow/components/cassandra/__init__.py +40 -0
- langflow/components/chroma/__init__.py +34 -0
- langflow/components/clickhouse/__init__.py +34 -0
- langflow/components/couchbase/__init__.py +34 -0
- langflow/components/data/file.py +302 -376
- langflow/components/datastax/__init__.py +3 -3
- langflow/components/docling/docling_inline.py +56 -4
- langflow/components/elastic/__init__.py +37 -0
- langflow/components/milvus/__init__.py +34 -0
- langflow/components/mongodb/__init__.py +34 -0
- langflow/components/nvidia/nvidia_ingest.py +3 -2
- langflow/components/ollama/ollama.py +1 -0
- langflow/components/perplexity/perplexity.py +3 -13
- langflow/components/pgvector/__init__.py +34 -0
- langflow/components/pinecone/__init__.py +34 -0
- langflow/components/qdrant/__init__.py +34 -0
- langflow/components/redis/__init__.py +36 -2
- langflow/components/redis/redis.py +75 -29
- langflow/components/redis/redis_chat.py +43 -0
- langflow/components/supabase/__init__.py +37 -0
- langflow/components/upstash/__init__.py +34 -0
- langflow/components/vectara/__init__.py +37 -0
- langflow/components/vectorstores/__init__.py +0 -69
- langflow/components/vectorstores/local_db.py +1 -0
- langflow/components/weaviate/__init__.py +34 -0
- langflow/components/youtube/channel.py +1 -1
- langflow/custom/custom_component/custom_component.py +11 -0
- langflow/custom/dependency_analyzer.py +165 -0
- langflow/custom/utils.py +34 -16
- langflow/frontend/assets/{SlackIcon-BhW6H3JR.js → SlackIcon-Cr3Q15Px.js} +1 -1
- langflow/frontend/assets/{Wikipedia-Dx5jbiy3.js → Wikipedia-GxM5sPdM.js} +1 -1
- langflow/frontend/assets/{Wolfram-CIyonzwo.js → Wolfram-BN3-VOCA.js} +1 -1
- langflow/frontend/assets/{index-DOEvKC2X.js → index-28oOcafk.js} +1 -1
- langflow/frontend/assets/{index-Bhv79Zso.js → index-2wSXqBtB.js} +1 -1
- langflow/frontend/assets/{index-BRmSeoWR.js → index-3wW7BClE.js} +1 -1
- langflow/frontend/assets/{index-eUkS6iJM.js → index-6pyH3ZJB.js} +1 -1
- langflow/frontend/assets/{index-Cr5v2ave.js → index-AWCSdofD.js} +1 -1
- langflow/frontend/assets/{index-C27Jj_26.js → index-B2Zgv_xv.js} +1 -1
- langflow/frontend/assets/{index-BKKrUElc.js → index-B2ptVQGM.js} +1 -1
- langflow/frontend/assets/{index-BnAFhkSN.js → index-B3TANVes.js} +1 -1
- langflow/frontend/assets/{index-hZUcL0MZ.js → index-B4yCvZKV.js} +1 -1
- langflow/frontend/assets/{index-BPR2mEFC.js → index-BC65VuWx.js} +1 -1
- langflow/frontend/assets/{index-CgU7KF4I.js → index-BCDSei1q.js} +1 -1
- langflow/frontend/assets/{index-CzHzeZuA.js → index-BJy50PvP.js} +1 -1
- langflow/frontend/assets/{index-DkGhPNeA.js → index-BKseQQ2I.js} +1 -1
- langflow/frontend/assets/{index-BVFaF7HW.js → index-BLTxEeTi.js} +1 -1
- langflow/frontend/assets/{index-cEXY6V06.js → index-BRg1f4Mu.js} +1 -1
- langflow/frontend/assets/{index-C2eQmQsn.js → index-BS8Vo8nc.js} +1 -1
- langflow/frontend/assets/{index-gdb7XMS8.js → index-BTKOU4xC.js} +1 -1
- langflow/frontend/assets/{index-U9GWm1eH.js → index-BVwJDmw-.js} +1 -1
- langflow/frontend/assets/{index-BWt5xGeA.js → index-BWYuQ2Sj.js} +1 -1
- langflow/frontend/assets/{index-Dx-Z87KT.js → index-BWdLILDG.js} +1 -1
- langflow/frontend/assets/{index-paQEWYGT.js → index-BZcw4827.js} +1 -1
- langflow/frontend/assets/{index-BDQrd7Tj.js → index-Bbi87Ve4.js} +1 -1
- langflow/frontend/assets/{index-vJOO5U8M.js → index-Bf0IYKLd.js} +1 -1
- langflow/frontend/assets/{index-1Q3VBqKn.js → index-Bg5nrMRh.js} +1 -1
- langflow/frontend/assets/{index-BFQ8KFK0.js → index-BiC280Nx.js} +1 -1
- langflow/frontend/assets/{index-CFNTYfFK.js → index-BiKKN6FR.js} +1 -1
- langflow/frontend/assets/{index-BPfdqCc_.js → index-Bief6eyJ.js} +1 -1
- langflow/frontend/assets/{index-Cxy9sEpy.js → index-BkXec1Yf.js} +1 -1
- langflow/frontend/assets/{index-D4tjMhfY.js → index-Bnl6QHtP.js} +1 -1
- langflow/frontend/assets/{index-BD7Io1hL.js → index-BpxbUiZD.js} +1978 -1978
- langflow/frontend/assets/{index-Ch5r0oW6.js → index-BrJV8psX.js} +1 -1
- langflow/frontend/assets/{index-DOQDkSoK.js → index-BwLWcUXL.js} +1 -1
- langflow/frontend/assets/{index-CMHpjHZl.js → index-Bx7dBY26.js} +1 -1
- langflow/frontend/assets/{index-CbnWRlYY.js → index-C-EdnFdA.js} +1 -1
- langflow/frontend/assets/{index-DljpLeCW.js → index-C-Xfg4cD.js} +1 -1
- langflow/frontend/assets/{index-Bwi4flFg.js → index-C1f2wMat.js} +1 -1
- langflow/frontend/assets/index-C1xroOlH.css +1 -0
- langflow/frontend/assets/{index-D6CSIrp1.js → index-C3KequvP.js} +1 -1
- langflow/frontend/assets/{index-BYjw7Gk3.js → index-C3ZjKdCD.js} +1 -1
- langflow/frontend/assets/{index-DIKUsGLF.js → index-C3l0zYn0.js} +1 -1
- langflow/frontend/assets/{index-CfPBgkqg.js → index-C3yvArUT.js} +1 -1
- langflow/frontend/assets/{index-CsLQiWNf.js → index-C9Cxnkl8.js} +1 -1
- langflow/frontend/assets/{index-mzl9ULw5.js → index-CBc8fEAE.js} +1 -1
- langflow/frontend/assets/{index-CEJNWPhA.js → index-CBvrGgID.js} +1 -1
- langflow/frontend/assets/{index-DwfHWnX7.js → index-CD-PqGCY.js} +1 -1
- langflow/frontend/assets/{index-dyXKnkMi.js → index-CGO1CiUr.js} +1 -1
- langflow/frontend/assets/{index-Dka_Rk4-.js → index-CH5UVA9b.js} +1 -1
- langflow/frontend/assets/{index-uiKla4UR.js → index-CLJeJYjH.js} +1 -1
- langflow/frontend/assets/{index-D9kwEzPB.js → index-CMZ79X-Y.js} +1 -1
- langflow/frontend/assets/{index-BrVhdPZb.js → index-CMzfJKiW.js} +1 -1
- langflow/frontend/assets/{index-Bct1s6__.js → index-CNw1H-Wc.js} +1 -1
- langflow/frontend/assets/{index-B7uEuOPK.js → index-CPHEscq9.js} +1 -1
- langflow/frontend/assets/{index-ekfMOqrF.js → index-CRPKJZw9.js} +1 -1
- langflow/frontend/assets/{index-G4ro0MjT.js → index-CRPyCfYy.js} +1 -1
- langflow/frontend/assets/{index-CSu8KHOi.js → index-CRcMqCIj.js} +1 -1
- langflow/frontend/assets/{index-DsoX2o1S.js → index-CUVDws8F.js} +1 -1
- langflow/frontend/assets/{index-r_8gs4nL.js → index-CVWQfRYZ.js} +1 -1
- langflow/frontend/assets/{index-7hzXChQz.js → index-CVl6MbaM.js} +1 -1
- langflow/frontend/assets/{index-B8UR8v-Q.js → index-CVwWoX99.js} +1 -1
- langflow/frontend/assets/{index-Dda2u_yz.js → index-CWPzZtSx.js} +1 -1
- langflow/frontend/assets/{index-BKeZt2hQ.js → index-CZqRL9DE.js} +1 -1
- langflow/frontend/assets/{index-DHngW1k8.js → index-CdIf07Rw.js} +1 -1
- langflow/frontend/assets/{index-C--IDAyc.js → index-Cewy7JZE.js} +1 -1
- langflow/frontend/assets/{index-DZP_SaHb.js → index-CfwLpbMM.js} +1 -1
- langflow/frontend/assets/{index-CuCM7Wu7.js → index-CiR1dxI4.js} +1 -1
- langflow/frontend/assets/{index-Xi4TplbI.js → index-CiixOzDG.js} +1 -1
- langflow/frontend/assets/{index-BLYw9MK2.js → index-ClsuDmR6.js} +1 -1
- langflow/frontend/assets/{index-DMCWDJOl.js → index-CmEYYRN1.js} +1 -1
- langflow/frontend/assets/{index-CrAF-31Y.js → index-Co20d-eQ.js} +1 -1
- langflow/frontend/assets/{index-DXAfIEvs.js → index-CpzXS6md.js} +1 -1
- langflow/frontend/assets/{index-BmYJJ5YS.js → index-Cqpzl1J4.js} +1 -1
- langflow/frontend/assets/{index-KWY77KfV.js → index-CtVIONP2.js} +1 -1
- langflow/frontend/assets/{index-B3KCdQ91.js → index-CuFXdTx4.js} +1 -1
- langflow/frontend/assets/{index-p2kStSPe.js → index-Cyd2HtHK.js} +1 -1
- langflow/frontend/assets/{index-CkjwSTSM.js → index-D-1tA8Dt.js} +1 -1
- langflow/frontend/assets/{index-BFf0HTFI.js → index-D-KY3kkq.js} +1 -1
- langflow/frontend/assets/{index-BYhcGLTV.js → index-D-_B1a8v.js} +1 -1
- langflow/frontend/assets/{index-Dr6pVDPI.js → index-D14EWPyZ.js} +1 -1
- langflow/frontend/assets/{index-BDuk0d7P.js → index-D2N3l-cw.js} +1 -1
- langflow/frontend/assets/{index-BvGQfVBD.js → index-D5ETnvJa.js} +1 -1
- langflow/frontend/assets/{index-D1oynC8a.js → index-D7kquVv2.js} +1 -1
- langflow/frontend/assets/{index-B1XqWJhG.js → index-DA6-bvgN.js} +1 -1
- langflow/frontend/assets/{index-DzIv3RyR.js → index-DDWBeudF.js} +1 -1
- langflow/frontend/assets/{index-BKlQbl-6.js → index-DDcMAaG4.js} +1 -1
- langflow/frontend/assets/{index-CkK25zZO.js → index-DHgomBdh.js} +1 -1
- langflow/frontend/assets/{index-Bj3lSwvZ.js → index-DJP-ss47.js} +1 -1
- langflow/frontend/assets/{index-DDXsm8tz.js → index-DQ7VYqQc.js} +1 -1
- langflow/frontend/assets/{index-BNQIbda3.js → index-DTqbvGC0.js} +1 -1
- langflow/frontend/assets/{index-BzoRPtTY.js → index-DUpri6zF.js} +1 -1
- langflow/frontend/assets/{index-35sspuLu.js → index-DV3utZDZ.js} +1 -1
- langflow/frontend/assets/{index-BpmqDOeZ.js → index-DXRfN4HV.js} +1 -1
- langflow/frontend/assets/{index-C0E3_MIK.js → index-Db9dYSzy.js} +1 -1
- langflow/frontend/assets/{index-C8K0r39B.js → index-DdtMEn6I.js} +1 -1
- langflow/frontend/assets/{index-BLsVo9iW.js → index-DfDhMHgQ.js} +1 -1
- langflow/frontend/assets/{index-BZFljdMa.js → index-Dfe7qfvf.js} +1 -1
- langflow/frontend/assets/{index-CyP3py8K.js → index-DhtZ5hx8.js} +1 -1
- langflow/frontend/assets/{index-w72fDjpG.js → index-DiB3CTo8.js} +1 -1
- langflow/frontend/assets/{index-CY7_TBTC.js → index-DiGWASY5.js} +1 -1
- langflow/frontend/assets/{index-CmSFKgiD.js → index-Dl5amdBz.js} +1 -1
- langflow/frontend/assets/{index-B0m53xKd.js → index-DlD4dXlZ.js} +1 -1
- langflow/frontend/assets/{index-DnVYJtVO.js → index-DmeiHnfl.js} +1 -1
- langflow/frontend/assets/index-Dmu-X5-4.js +1 -0
- langflow/frontend/assets/{index-CWYiSeWV.js → index-DpVWih90.js} +1 -1
- langflow/frontend/assets/{index-CjsommIr.js → index-DrDrcajG.js} +1 -1
- langflow/frontend/assets/{index-Un9pWxnP.js → index-Du-pc0KE.js} +1 -1
- langflow/frontend/assets/{index-oxHBZk2v.js → index-DwPkMTaY.js} +1 -1
- langflow/frontend/assets/{index-CgwykVGh.js → index-DwQEZe3C.js} +1 -1
- langflow/frontend/assets/{index-BmIx1cws.js → index-DyJFTK24.js} +1 -1
- langflow/frontend/assets/{index-0XQqYgdG.js → index-J38wh62w.js} +1 -1
- langflow/frontend/assets/{index-H7J7w7fa.js → index-Kwdl-e29.js} +1 -1
- langflow/frontend/assets/{index-CUKmGsI6.js → index-OwPvCmpW.js} +1 -1
- langflow/frontend/assets/{index-zV82kQ6k.js → index-Tw3Os-DN.js} +1 -1
- langflow/frontend/assets/{index-8cuhogZP.js → index-X0guhYF8.js} +1 -1
- langflow/frontend/assets/{index-BUse-kxM.js → index-dJWNxIRH.js} +1 -1
- langflow/frontend/assets/{index-DyqITq51.js → index-dcJ8-agu.js} +1 -1
- langflow/frontend/assets/{index-Cg53lrYh.js → index-eo2mAtL-.js} +1 -1
- langflow/frontend/assets/{index-DqbzUcI5.js → index-hG24k5xJ.js} +1 -1
- langflow/frontend/assets/{index-BQrVDjR1.js → index-h_aSZHf3.js} +1 -1
- langflow/frontend/assets/{index-kkA-qHB_.js → index-hbndqB9B.js} +1 -1
- langflow/frontend/assets/{index-DZxUIhWh.js → index-iJngutFo.js} +1 -1
- langflow/frontend/assets/{index-Dg8N3NSO.js → index-lTpteg8t.js} +1 -1
- langflow/frontend/assets/{index-DDhJVVel.js → index-lZX9AvZW.js} +1 -1
- langflow/frontend/assets/{index-BHhnpSkW.js → index-m8QA6VNM.js} +1 -1
- langflow/frontend/assets/{index-Bk4mTwnI.js → index-o0D2S7xW.js} +1 -1
- langflow/frontend/assets/{index-DJESSNJi.js → index-ovFJ_0J6.js} +1 -1
- langflow/frontend/assets/{index-DH6o91_s.js → index-pYJJOcma.js} +1 -1
- langflow/frontend/assets/{index-Bo-ww0Bb.js → index-sI75DsdM.js} +1 -1
- langflow/frontend/assets/{index-BcAgItH4.js → index-xvFOmxx4.js} +1 -1
- langflow/frontend/assets/{index-_cbGmjF4.js → index-z3SRY-mX.js} +1 -1
- langflow/frontend/assets/lazyIconImports-D97HEZkE.js +2 -0
- langflow/frontend/assets/{use-post-add-user-CvtuazTg.js → use-post-add-user-C0MdTpQ5.js} +1 -1
- langflow/frontend/index.html +2 -2
- langflow/graph/graph/base.py +4 -2
- langflow/initial_setup/starter_projects/Basic Prompt Chaining.json +26 -0
- langflow/initial_setup/starter_projects/Basic Prompting.json +26 -0
- langflow/initial_setup/starter_projects/Blog Writer.json +56 -0
- langflow/initial_setup/starter_projects/Custom Component Generator.json +35 -0
- langflow/initial_setup/starter_projects/Document Q&A.json +27 -1
- langflow/initial_setup/starter_projects/Financial Report Parser.json +43 -0
- langflow/initial_setup/starter_projects/Hybrid Search RAG.json +83 -1
- langflow/initial_setup/starter_projects/Image Sentiment Analysis.json +43 -0
- langflow/initial_setup/starter_projects/Instagram Copywriter.json +49 -1
- langflow/initial_setup/starter_projects/Invoice Summarizer.json +40 -1
- langflow/initial_setup/starter_projects/Knowledge Ingestion.json +71 -0
- langflow/initial_setup/starter_projects/Knowledge Retrieval.json +63 -0
- langflow/initial_setup/starter_projects/Market Research.json +57 -1
- langflow/initial_setup/starter_projects/Meeting Summary.json +95 -0
- langflow/initial_setup/starter_projects/Memory Chatbot.json +35 -0
- langflow/initial_setup/starter_projects/News Aggregator.json +61 -1
- langflow/initial_setup/starter_projects/Nvidia Remix.json +67 -2
- langflow/initial_setup/starter_projects/Pok/303/251dex Agent.json" +48 -1
- langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json +44 -1
- langflow/initial_setup/starter_projects/Price Deal Finder.json +53 -1
- langflow/initial_setup/starter_projects/Research Agent.json +40 -1
- langflow/initial_setup/starter_projects/Research Translation Loop.json +66 -0
- langflow/initial_setup/starter_projects/SEO Keyword Generator.json +17 -0
- langflow/initial_setup/starter_projects/SaaS Pricing.json +27 -1
- langflow/initial_setup/starter_projects/Search agent.json +40 -1
- langflow/initial_setup/starter_projects/Sequential Tasks Agents.json +72 -3
- langflow/initial_setup/starter_projects/Simple Agent.json +57 -1
- langflow/initial_setup/starter_projects/Social Media Agent.json +77 -1
- langflow/initial_setup/starter_projects/Text Sentiment Analysis.json +35 -1
- langflow/initial_setup/starter_projects/Travel Planning Agents.json +51 -3
- langflow/initial_setup/starter_projects/Twitter Thread Generator.json +80 -0
- langflow/initial_setup/starter_projects/Vector Store RAG.json +110 -3
- langflow/initial_setup/starter_projects/Youtube Analysis.json +82 -1
- langflow/initial_setup/starter_projects/vector_store_rag.py +1 -1
- langflow/processing/process.py +3 -0
- langflow/schema/openai_responses_schemas.py +74 -0
- langflow/services/auth/mcp_encryption.py +104 -0
- langflow/services/settings/feature_flags.py +1 -1
- {langflow_base_nightly-0.5.0.dev38.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/METADATA +1 -1
- {langflow_base_nightly-0.5.0.dev38.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/RECORD +239 -219
- langflow/components/vectorstores/redis.py +0 -89
- langflow/frontend/assets/index-BWgIWfv2.js +0 -1
- langflow/frontend/assets/index-CqS7zir1.css +0 -1
- langflow/frontend/assets/lazyIconImports-DTNgvPE-.js +0 -2
- /langflow/components/{vectorstores → FAISS}/faiss.py +0 -0
- /langflow/components/{vectorstores → cassandra}/cassandra.py +0 -0
- /langflow/components/{datastax/cassandra.py → cassandra/cassandra_chat.py} +0 -0
- /langflow/components/{vectorstores → cassandra}/cassandra_graph.py +0 -0
- /langflow/components/{vectorstores → chroma}/chroma.py +0 -0
- /langflow/components/{vectorstores → clickhouse}/clickhouse.py +0 -0
- /langflow/components/{vectorstores → couchbase}/couchbase.py +0 -0
- /langflow/components/{vectorstores → datastax}/astradb.py +0 -0
- /langflow/components/{vectorstores → datastax}/astradb_graph.py +0 -0
- /langflow/components/{vectorstores → datastax}/graph_rag.py +0 -0
- /langflow/components/{vectorstores → datastax}/hcd.py +0 -0
- /langflow/components/{vectorstores → elastic}/elasticsearch.py +0 -0
- /langflow/components/{vectorstores → elastic}/opensearch.py +0 -0
- /langflow/components/{vectorstores → milvus}/milvus.py +0 -0
- /langflow/components/{vectorstores → mongodb}/mongodb_atlas.py +0 -0
- /langflow/components/{vectorstores → pgvector}/pgvector.py +0 -0
- /langflow/components/{vectorstores → pinecone}/pinecone.py +0 -0
- /langflow/components/{vectorstores → qdrant}/qdrant.py +0 -0
- /langflow/components/{vectorstores → supabase}/supabase.py +0 -0
- /langflow/components/{vectorstores → upstash}/upstash.py +0 -0
- /langflow/components/{vectorstores → vectara}/vectara.py +0 -0
- /langflow/components/{vectorstores → vectara}/vectara_rag.py +0 -0
- /langflow/components/{vectorstores → weaviate}/weaviate.py +0 -0
- {langflow_base_nightly-0.5.0.dev38.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/WHEEL +0 -0
- {langflow_base_nightly-0.5.0.dev38.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/entry_points.txt +0 -0
langflow/components/data/file.py
CHANGED
|
@@ -1,9 +1,21 @@
|
|
|
1
|
-
"""Enhanced file component
|
|
1
|
+
"""Enhanced file component with clearer structure and Docling isolation.
|
|
2
|
+
|
|
3
|
+
Notes:
|
|
4
|
+
-----
|
|
5
|
+
- Functionality is preserved with minimal behavioral changes.
|
|
6
|
+
- ALL Docling parsing/export runs in a separate OS process to prevent memory
|
|
7
|
+
growth and native library state from impacting the main Langflow process.
|
|
8
|
+
- Standard text/structured parsing continues to use existing BaseFileComponent
|
|
9
|
+
utilities (and optional threading via `parallel_load_data`).
|
|
10
|
+
"""
|
|
2
11
|
|
|
3
12
|
from __future__ import annotations
|
|
4
13
|
|
|
14
|
+
import json
|
|
15
|
+
import subprocess
|
|
16
|
+
import sys
|
|
17
|
+
import textwrap
|
|
5
18
|
from copy import deepcopy
|
|
6
|
-
from enum import Enum
|
|
7
19
|
from typing import TYPE_CHECKING, Any
|
|
8
20
|
|
|
9
21
|
from langflow.base.data.base_file import BaseFileComponent
|
|
@@ -24,51 +36,8 @@ if TYPE_CHECKING:
|
|
|
24
36
|
from langflow.schema import DataFrame
|
|
25
37
|
|
|
26
38
|
|
|
27
|
-
class MockConversionStatus(Enum):
|
|
28
|
-
"""Mock ConversionStatus for fallback compatibility."""
|
|
29
|
-
|
|
30
|
-
SUCCESS = "success"
|
|
31
|
-
FAILURE = "failure"
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
class MockInputFormat(Enum):
|
|
35
|
-
"""Mock InputFormat for fallback compatibility."""
|
|
36
|
-
|
|
37
|
-
PDF = "pdf"
|
|
38
|
-
IMAGE = "image"
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
class MockImageRefMode(Enum):
|
|
42
|
-
"""Mock ImageRefMode for fallback compatibility."""
|
|
43
|
-
|
|
44
|
-
PLACEHOLDER = "placeholder"
|
|
45
|
-
EMBEDDED = "embedded"
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
class DoclingImports:
|
|
49
|
-
"""Container for docling imports with type information."""
|
|
50
|
-
|
|
51
|
-
def __init__(
|
|
52
|
-
self,
|
|
53
|
-
conversion_status: type[Enum],
|
|
54
|
-
input_format: type[Enum],
|
|
55
|
-
document_converter: type,
|
|
56
|
-
image_ref_mode: type[Enum],
|
|
57
|
-
strategy: str,
|
|
58
|
-
) -> None:
|
|
59
|
-
self.conversion_status = conversion_status
|
|
60
|
-
self.input_format = input_format
|
|
61
|
-
self.document_converter = document_converter
|
|
62
|
-
self.image_ref_mode = image_ref_mode
|
|
63
|
-
self.strategy = strategy
|
|
64
|
-
|
|
65
|
-
|
|
66
39
|
class FileComponent(BaseFileComponent):
|
|
67
|
-
"""
|
|
68
|
-
|
|
69
|
-
This component supports all features of the standard File component, plus an advanced mode
|
|
70
|
-
that enables Docling document processing and export to various formats (Markdown, HTML, etc.).
|
|
71
|
-
"""
|
|
40
|
+
"""File component with optional Docling processing (isolated in a subprocess)."""
|
|
72
41
|
|
|
73
42
|
display_name = "File"
|
|
74
43
|
description = "Loads content from files with optional advanced document processing and export using Docling."
|
|
@@ -76,7 +45,7 @@ class FileComponent(BaseFileComponent):
|
|
|
76
45
|
icon = "file-text"
|
|
77
46
|
name = "File"
|
|
78
47
|
|
|
79
|
-
# Docling supported
|
|
48
|
+
# Docling-supported/compatible extensions; TEXT_FILE_TYPES are supported by the base loader.
|
|
80
49
|
VALID_EXTENSIONS = [
|
|
81
50
|
"adoc",
|
|
82
51
|
"asciidoc",
|
|
@@ -110,12 +79,12 @@ class FileComponent(BaseFileComponent):
|
|
|
110
79
|
*TEXT_FILE_TYPES,
|
|
111
80
|
]
|
|
112
81
|
|
|
113
|
-
# Fixed export settings
|
|
82
|
+
# Fixed export settings used when markdown export is requested.
|
|
114
83
|
EXPORT_FORMAT = "Markdown"
|
|
115
84
|
IMAGE_MODE = "placeholder"
|
|
116
85
|
|
|
86
|
+
# ---- Inputs / Outputs (kept as close to original as possible) -------------------
|
|
117
87
|
_base_inputs = deepcopy(BaseFileComponent._base_inputs)
|
|
118
|
-
|
|
119
88
|
for input_item in _base_inputs:
|
|
120
89
|
if isinstance(input_item, FileInput) and input_item.name == "path":
|
|
121
90
|
input_item.real_time_refresh = True
|
|
@@ -175,6 +144,7 @@ class FileComponent(BaseFileComponent):
|
|
|
175
144
|
advanced=True,
|
|
176
145
|
show=False,
|
|
177
146
|
),
|
|
147
|
+
# Deprecated input retained for backward-compatibility.
|
|
178
148
|
BoolInput(
|
|
179
149
|
name="use_multithreading",
|
|
180
150
|
display_name="[Deprecated] Use Multithreading",
|
|
@@ -202,8 +172,10 @@ class FileComponent(BaseFileComponent):
|
|
|
202
172
|
Output(display_name="Raw Content", name="message", method="load_files_message"),
|
|
203
173
|
]
|
|
204
174
|
|
|
205
|
-
|
|
206
|
-
|
|
175
|
+
# ------------------------------ UI helpers --------------------------------------
|
|
176
|
+
|
|
177
|
+
def _path_value(self, template: dict) -> list[str]:
|
|
178
|
+
"""Return the list of currently selected file paths from the template."""
|
|
207
179
|
return template.get("path", {}).get("file_path", [])
|
|
208
180
|
|
|
209
181
|
def update_build_config(
|
|
@@ -212,65 +184,41 @@ class FileComponent(BaseFileComponent):
|
|
|
212
184
|
field_value: Any,
|
|
213
185
|
field_name: str | None = None,
|
|
214
186
|
) -> dict[str, Any]:
|
|
215
|
-
"""
|
|
187
|
+
"""Show/hide Advanced Parser and related fields based on selection context."""
|
|
216
188
|
if field_name == "path":
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
file_path = path_value[0] if len(path_value) > 0 else ""
|
|
220
|
-
|
|
221
|
-
# Show/hide Advanced Parser based on file count (only for single files)
|
|
189
|
+
paths = self._path_value(build_config)
|
|
190
|
+
file_path = paths[0] if paths else ""
|
|
222
191
|
file_count = len(field_value) if field_value else 0
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
"ocr_engine",
|
|
233
|
-
"doc_key",
|
|
234
|
-
"md_image_placeholder",
|
|
235
|
-
"md_page_break_placeholder",
|
|
236
|
-
]
|
|
237
|
-
for field in advanced_fields:
|
|
238
|
-
if field in build_config:
|
|
239
|
-
build_config[field]["show"] = False
|
|
192
|
+
|
|
193
|
+
# Advanced mode only for single (non-tabular) file
|
|
194
|
+
allow_advanced = file_count == 1 and not file_path.endswith((".csv", ".xlsx", ".parquet"))
|
|
195
|
+
build_config["advanced_mode"]["show"] = allow_advanced
|
|
196
|
+
if not allow_advanced:
|
|
197
|
+
build_config["advanced_mode"]["value"] = False
|
|
198
|
+
for f in ("pipeline", "ocr_engine", "doc_key", "md_image_placeholder", "md_page_break_placeholder"):
|
|
199
|
+
if f in build_config:
|
|
200
|
+
build_config[f]["show"] = False
|
|
240
201
|
|
|
241
202
|
elif field_name == "advanced_mode":
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
"ocr_engine",
|
|
246
|
-
"doc_key",
|
|
247
|
-
"md_image_placeholder",
|
|
248
|
-
"md_page_break_placeholder",
|
|
249
|
-
]
|
|
250
|
-
|
|
251
|
-
for field in advanced_fields:
|
|
252
|
-
if field in build_config:
|
|
253
|
-
build_config[field]["show"] = field_value
|
|
203
|
+
for f in ("pipeline", "ocr_engine", "doc_key", "md_image_placeholder", "md_page_break_placeholder"):
|
|
204
|
+
if f in build_config:
|
|
205
|
+
build_config[f]["show"] = bool(field_value)
|
|
254
206
|
|
|
255
207
|
return build_config
|
|
256
208
|
|
|
257
209
|
def update_outputs(self, frontend_node: dict[str, Any], field_name: str, field_value: Any) -> dict[str, Any]: # noqa: ARG002
|
|
258
|
-
"""Dynamically show outputs based on
|
|
210
|
+
"""Dynamically show outputs based on file count/type and advanced mode."""
|
|
259
211
|
if field_name not in ["path", "advanced_mode"]:
|
|
260
212
|
return frontend_node
|
|
261
213
|
|
|
262
|
-
# Add outputs based on the number of files in the path
|
|
263
214
|
template = frontend_node.get("template", {})
|
|
264
|
-
|
|
265
|
-
if
|
|
215
|
+
paths = self._path_value(template)
|
|
216
|
+
if not paths:
|
|
266
217
|
return frontend_node
|
|
267
218
|
|
|
268
|
-
# Clear existing outputs
|
|
269
219
|
frontend_node["outputs"] = []
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
# We need to check if the file is structured content
|
|
273
|
-
file_path = path_value[0] if field_name == "path" else frontend_node["template"]["path"]["file_path"][0]
|
|
220
|
+
if len(paths) == 1:
|
|
221
|
+
file_path = paths[0] if field_name == "path" else frontend_node["template"]["path"]["file_path"][0]
|
|
274
222
|
if file_path.endswith((".csv", ".xlsx", ".parquet")):
|
|
275
223
|
frontend_node["outputs"].append(
|
|
276
224
|
Output(display_name="Structured Content", name="dataframe", method="load_files_structured"),
|
|
@@ -280,11 +228,8 @@ class FileComponent(BaseFileComponent):
|
|
|
280
228
|
Output(display_name="Structured Content", name="json", method="load_files_json"),
|
|
281
229
|
)
|
|
282
230
|
|
|
283
|
-
# Add outputs based on advanced mode
|
|
284
231
|
advanced_mode = frontend_node.get("template", {}).get("advanced_mode", {}).get("value", False)
|
|
285
|
-
|
|
286
232
|
if advanced_mode:
|
|
287
|
-
# Advanced mode: Structured Output, Markdown, and File Path
|
|
288
233
|
frontend_node["outputs"].append(
|
|
289
234
|
Output(display_name="Structured Output", name="advanced", method="load_files_advanced"),
|
|
290
235
|
)
|
|
@@ -295,7 +240,6 @@ class FileComponent(BaseFileComponent):
|
|
|
295
240
|
Output(display_name="File Path", name="path", method="load_files_path"),
|
|
296
241
|
)
|
|
297
242
|
else:
|
|
298
|
-
# Normal mode: Raw Content and File Path
|
|
299
243
|
frontend_node["outputs"].append(
|
|
300
244
|
Output(display_name="Raw Content", name="message", method="load_files_message"),
|
|
301
245
|
)
|
|
@@ -303,130 +247,16 @@ class FileComponent(BaseFileComponent):
|
|
|
303
247
|
Output(display_name="File Path", name="path", method="load_files_path"),
|
|
304
248
|
)
|
|
305
249
|
else:
|
|
306
|
-
#
|
|
307
|
-
|
|
308
|
-
frontend_node["outputs"].append(
|
|
309
|
-
Output(display_name="Files", name="dataframe", method="load_files"),
|
|
310
|
-
)
|
|
250
|
+
# Multiple files => DataFrame output; advanced parser disabled
|
|
251
|
+
frontend_node["outputs"].append(Output(display_name="Files", name="dataframe", method="load_files"))
|
|
311
252
|
|
|
312
253
|
return frontend_node
|
|
313
254
|
|
|
314
|
-
|
|
315
|
-
"""Try different import strategies for docling components."""
|
|
316
|
-
# Try strategy 1: Latest docling structure
|
|
317
|
-
try:
|
|
318
|
-
from docling.datamodel.base_models import ConversionStatus, InputFormat # type: ignore[import-untyped]
|
|
319
|
-
from docling.document_converter import DocumentConverter # type: ignore[import-untyped]
|
|
320
|
-
from docling_core.types.doc import ImageRefMode # type: ignore[import-untyped]
|
|
321
|
-
|
|
322
|
-
self.log("Using latest docling import structure")
|
|
323
|
-
return DoclingImports(
|
|
324
|
-
conversion_status=ConversionStatus,
|
|
325
|
-
input_format=InputFormat,
|
|
326
|
-
document_converter=DocumentConverter,
|
|
327
|
-
image_ref_mode=ImageRefMode,
|
|
328
|
-
strategy="latest",
|
|
329
|
-
)
|
|
330
|
-
except ImportError as e:
|
|
331
|
-
self.log(f"Latest docling structure failed: {e}")
|
|
332
|
-
|
|
333
|
-
# Try strategy 2: Alternative import paths
|
|
334
|
-
try:
|
|
335
|
-
from docling.document_converter import DocumentConverter # type: ignore[import-untyped]
|
|
336
|
-
from docling_core.types.doc import ImageRefMode # type: ignore[import-untyped]
|
|
337
|
-
|
|
338
|
-
# Try to get ConversionStatus from different locations
|
|
339
|
-
conversion_status: type[Enum] = MockConversionStatus
|
|
340
|
-
input_format: type[Enum] = MockInputFormat
|
|
341
|
-
|
|
342
|
-
try:
|
|
343
|
-
from docling_core.types import ConversionStatus, InputFormat # type: ignore[import-untyped]
|
|
344
|
-
|
|
345
|
-
conversion_status = ConversionStatus
|
|
346
|
-
input_format = InputFormat
|
|
347
|
-
except ImportError:
|
|
348
|
-
try:
|
|
349
|
-
from docling.datamodel import ConversionStatus, InputFormat # type: ignore[import-untyped]
|
|
350
|
-
|
|
351
|
-
conversion_status = ConversionStatus
|
|
352
|
-
input_format = InputFormat
|
|
353
|
-
except ImportError:
|
|
354
|
-
# Use mock enums if we can't find them
|
|
355
|
-
pass
|
|
356
|
-
|
|
357
|
-
self.log("Using alternative docling import structure")
|
|
358
|
-
return DoclingImports(
|
|
359
|
-
conversion_status=conversion_status,
|
|
360
|
-
input_format=input_format,
|
|
361
|
-
document_converter=DocumentConverter,
|
|
362
|
-
image_ref_mode=ImageRefMode,
|
|
363
|
-
strategy="alternative",
|
|
364
|
-
)
|
|
365
|
-
except ImportError as e:
|
|
366
|
-
self.log(f"Alternative docling structure failed: {e}")
|
|
367
|
-
|
|
368
|
-
# Try strategy 3: Basic converter only
|
|
369
|
-
try:
|
|
370
|
-
from docling.document_converter import DocumentConverter # type: ignore[import-untyped]
|
|
371
|
-
|
|
372
|
-
self.log("Using basic docling import structure with mocks")
|
|
373
|
-
return DoclingImports(
|
|
374
|
-
conversion_status=MockConversionStatus,
|
|
375
|
-
input_format=MockInputFormat,
|
|
376
|
-
document_converter=DocumentConverter,
|
|
377
|
-
image_ref_mode=MockImageRefMode,
|
|
378
|
-
strategy="basic",
|
|
379
|
-
)
|
|
380
|
-
except ImportError as e:
|
|
381
|
-
self.log(f"Basic docling structure failed: {e}")
|
|
382
|
-
|
|
383
|
-
# Strategy 4: Complete fallback - return None to indicate failure
|
|
384
|
-
return None
|
|
385
|
-
|
|
386
|
-
def _create_advanced_converter(self, docling_imports: DoclingImports) -> Any:
|
|
387
|
-
"""Create advanced converter with pipeline options if available."""
|
|
388
|
-
try:
|
|
389
|
-
from docling.datamodel.pipeline_options import PdfPipelineOptions # type: ignore[import-untyped]
|
|
390
|
-
from docling.document_converter import PdfFormatOption # type: ignore[import-untyped]
|
|
391
|
-
|
|
392
|
-
document_converter = docling_imports.document_converter
|
|
393
|
-
input_format = docling_imports.input_format
|
|
394
|
-
|
|
395
|
-
# Create basic pipeline options
|
|
396
|
-
pipeline_options = PdfPipelineOptions()
|
|
397
|
-
|
|
398
|
-
# Configure OCR if specified and available
|
|
399
|
-
if self.ocr_engine:
|
|
400
|
-
try:
|
|
401
|
-
from docling.models.factories import get_ocr_factory # type: ignore[import-untyped]
|
|
402
|
-
|
|
403
|
-
pipeline_options.do_ocr = True
|
|
404
|
-
ocr_factory = get_ocr_factory(allow_external_plugins=False)
|
|
405
|
-
ocr_options = ocr_factory.create_options(kind=self.ocr_engine)
|
|
406
|
-
pipeline_options.ocr_options = ocr_options
|
|
407
|
-
self.log(f"Configured OCR with engine: {self.ocr_engine}")
|
|
408
|
-
except Exception as e: # noqa: BLE001
|
|
409
|
-
self.log(f"Could not configure OCR: {e}, proceeding without OCR")
|
|
410
|
-
pipeline_options.do_ocr = False
|
|
411
|
-
|
|
412
|
-
# Create format options
|
|
413
|
-
pdf_format_option = PdfFormatOption(pipeline_options=pipeline_options)
|
|
414
|
-
format_options = {}
|
|
415
|
-
if hasattr(input_format, "PDF"):
|
|
416
|
-
format_options[input_format.PDF] = pdf_format_option
|
|
417
|
-
if hasattr(input_format, "IMAGE"):
|
|
418
|
-
format_options[input_format.IMAGE] = pdf_format_option
|
|
419
|
-
|
|
420
|
-
return document_converter(format_options=format_options)
|
|
421
|
-
|
|
422
|
-
except Exception as e: # noqa: BLE001
|
|
423
|
-
self.log(f"Could not create advanced converter: {e}, using basic converter")
|
|
424
|
-
return docling_imports.document_converter()
|
|
255
|
+
# ------------------------------ Core processing ----------------------------------
|
|
425
256
|
|
|
426
257
|
def _is_docling_compatible(self, file_path: str) -> bool:
|
|
427
|
-
"""
|
|
428
|
-
|
|
429
|
-
docling_extensions = [
|
|
258
|
+
"""Lightweight extension gate for Docling-compatible types."""
|
|
259
|
+
docling_exts = (
|
|
430
260
|
".adoc",
|
|
431
261
|
".asciidoc",
|
|
432
262
|
".asc",
|
|
@@ -456,102 +286,296 @@ class FileComponent(BaseFileComponent):
|
|
|
456
286
|
".xhtml",
|
|
457
287
|
".xml",
|
|
458
288
|
".webp",
|
|
459
|
-
|
|
460
|
-
return
|
|
289
|
+
)
|
|
290
|
+
return file_path.lower().endswith(docling_exts)
|
|
291
|
+
|
|
292
|
+
def _process_docling_in_subprocess(self, file_path: str) -> Data | None:
|
|
293
|
+
"""Run Docling in a separate OS process and map the result to a Data object.
|
|
294
|
+
|
|
295
|
+
We avoid multiprocessing pickling by launching `python -c "<script>"` and
|
|
296
|
+
passing JSON config via stdin. The child prints a JSON result to stdout.
|
|
297
|
+
"""
|
|
298
|
+
if not file_path:
|
|
299
|
+
return None
|
|
300
|
+
|
|
301
|
+
args: dict[str, Any] = {
|
|
302
|
+
"file_path": file_path,
|
|
303
|
+
"markdown": bool(self.markdown),
|
|
304
|
+
"image_mode": str(self.IMAGE_MODE),
|
|
305
|
+
"md_image_placeholder": str(self.md_image_placeholder),
|
|
306
|
+
"md_page_break_placeholder": str(self.md_page_break_placeholder),
|
|
307
|
+
"pipeline": str(self.pipeline),
|
|
308
|
+
"ocr_engine": str(self.ocr_engine) if getattr(self, "ocr_engine", "") else None,
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
# The child is a tiny, self-contained script to keep memory/state isolated.
|
|
312
|
+
child_script = textwrap.dedent(
|
|
313
|
+
r"""
|
|
314
|
+
import json, sys
|
|
315
|
+
|
|
316
|
+
def try_imports():
|
|
317
|
+
# Strategy 1: latest layout
|
|
318
|
+
try:
|
|
319
|
+
from docling.datamodel.base_models import ConversionStatus, InputFormat # type: ignore
|
|
320
|
+
from docling.document_converter import DocumentConverter # type: ignore
|
|
321
|
+
from docling_core.types.doc import ImageRefMode # type: ignore
|
|
322
|
+
return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, "latest"
|
|
323
|
+
except Exception:
|
|
324
|
+
pass
|
|
325
|
+
# Strategy 2: alternative layout
|
|
326
|
+
try:
|
|
327
|
+
from docling.document_converter import DocumentConverter # type: ignore
|
|
328
|
+
try:
|
|
329
|
+
from docling_core.types import ConversionStatus, InputFormat # type: ignore
|
|
330
|
+
except Exception:
|
|
331
|
+
try:
|
|
332
|
+
from docling.datamodel import ConversionStatus, InputFormat # type: ignore
|
|
333
|
+
except Exception:
|
|
334
|
+
class ConversionStatus: SUCCESS = "success"
|
|
335
|
+
class InputFormat:
|
|
336
|
+
PDF="pdf"; IMAGE="image"
|
|
337
|
+
try:
|
|
338
|
+
from docling_core.types.doc import ImageRefMode # type: ignore
|
|
339
|
+
except Exception:
|
|
340
|
+
class ImageRefMode:
|
|
341
|
+
PLACEHOLDER="placeholder"; EMBEDDED="embedded"
|
|
342
|
+
return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, "alternative"
|
|
343
|
+
except Exception:
|
|
344
|
+
pass
|
|
345
|
+
# Strategy 3: basic converter only
|
|
346
|
+
try:
|
|
347
|
+
from docling.document_converter import DocumentConverter # type: ignore
|
|
348
|
+
class ConversionStatus: SUCCESS = "success"
|
|
349
|
+
class InputFormat:
|
|
350
|
+
PDF="pdf"; IMAGE="image"
|
|
351
|
+
class ImageRefMode:
|
|
352
|
+
PLACEHOLDER="placeholder"; EMBEDDED="embedded"
|
|
353
|
+
return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, "basic"
|
|
354
|
+
except Exception as e:
|
|
355
|
+
raise ImportError(f"Docling imports failed: {e}") from e
|
|
356
|
+
|
|
357
|
+
def create_converter(strategy, input_format, DocumentConverter, pipeline, ocr_engine):
|
|
358
|
+
if strategy == "latest" and pipeline == "standard":
|
|
359
|
+
try:
|
|
360
|
+
from docling.datamodel.pipeline_options import PdfPipelineOptions # type: ignore
|
|
361
|
+
from docling.document_converter import PdfFormatOption # type: ignore
|
|
362
|
+
pipe = PdfPipelineOptions()
|
|
363
|
+
if ocr_engine:
|
|
364
|
+
try:
|
|
365
|
+
from docling.models.factories import get_ocr_factory # type: ignore
|
|
366
|
+
pipe.do_ocr = True
|
|
367
|
+
fac = get_ocr_factory(allow_external_plugins=False)
|
|
368
|
+
pipe.ocr_options = fac.create_options(kind=ocr_engine)
|
|
369
|
+
except Exception:
|
|
370
|
+
pipe.do_ocr = False
|
|
371
|
+
fmt = {}
|
|
372
|
+
if hasattr(input_format, "PDF"):
|
|
373
|
+
fmt[getattr(input_format, "PDF")] = PdfFormatOption(pipeline_options=pipe)
|
|
374
|
+
if hasattr(input_format, "IMAGE"):
|
|
375
|
+
fmt[getattr(input_format, "IMAGE")] = PdfFormatOption(pipeline_options=pipe)
|
|
376
|
+
return DocumentConverter(format_options=fmt)
|
|
377
|
+
except Exception:
|
|
378
|
+
return DocumentConverter()
|
|
379
|
+
return DocumentConverter()
|
|
380
|
+
|
|
381
|
+
def export_markdown(document, ImageRefMode, image_mode, img_ph, pg_ph):
|
|
382
|
+
try:
|
|
383
|
+
mode = getattr(ImageRefMode, image_mode.upper(), image_mode)
|
|
384
|
+
return document.export_to_markdown(
|
|
385
|
+
image_mode=mode,
|
|
386
|
+
image_placeholder=img_ph,
|
|
387
|
+
page_break_placeholder=pg_ph,
|
|
388
|
+
)
|
|
389
|
+
except Exception:
|
|
390
|
+
try:
|
|
391
|
+
return document.export_to_text()
|
|
392
|
+
except Exception:
|
|
393
|
+
return str(document)
|
|
394
|
+
|
|
395
|
+
def to_rows(doc_dict):
|
|
396
|
+
rows = []
|
|
397
|
+
for t in doc_dict.get("texts", []):
|
|
398
|
+
prov = t.get("prov") or []
|
|
399
|
+
page_no = None
|
|
400
|
+
if prov and isinstance(prov, list) and isinstance(prov[0], dict):
|
|
401
|
+
page_no = prov[0].get("page_no")
|
|
402
|
+
rows.append({
|
|
403
|
+
"page_no": page_no,
|
|
404
|
+
"label": t.get("label"),
|
|
405
|
+
"text": t.get("text"),
|
|
406
|
+
"level": t.get("level"),
|
|
407
|
+
})
|
|
408
|
+
return rows
|
|
409
|
+
|
|
410
|
+
def main():
|
|
411
|
+
cfg = json.loads(sys.stdin.read())
|
|
412
|
+
file_path = cfg["file_path"]
|
|
413
|
+
markdown = cfg["markdown"]
|
|
414
|
+
image_mode = cfg["image_mode"]
|
|
415
|
+
img_ph = cfg["md_image_placeholder"]
|
|
416
|
+
pg_ph = cfg["md_page_break_placeholder"]
|
|
417
|
+
pipeline = cfg["pipeline"]
|
|
418
|
+
ocr_engine = cfg.get("ocr_engine")
|
|
419
|
+
meta = {"file_path": file_path}
|
|
420
|
+
|
|
421
|
+
try:
|
|
422
|
+
ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, strategy = try_imports()
|
|
423
|
+
converter = create_converter(strategy, InputFormat, DocumentConverter, pipeline, ocr_engine)
|
|
424
|
+
try:
|
|
425
|
+
res = converter.convert(file_path)
|
|
426
|
+
except Exception as e:
|
|
427
|
+
print(json.dumps({"ok": False, "error": f"Docling conversion error: {e}", "meta": meta}))
|
|
428
|
+
return
|
|
429
|
+
|
|
430
|
+
ok = False
|
|
431
|
+
if hasattr(res, "status"):
|
|
432
|
+
try:
|
|
433
|
+
ok = (res.status == ConversionStatus.SUCCESS) or (str(res.status).lower() == "success")
|
|
434
|
+
except Exception:
|
|
435
|
+
ok = (str(res.status).lower() == "success")
|
|
436
|
+
if not ok and hasattr(res, "document"):
|
|
437
|
+
ok = getattr(res, "document", None) is not None
|
|
438
|
+
if not ok:
|
|
439
|
+
print(json.dumps({"ok": False, "error": "Docling conversion failed", "meta": meta}))
|
|
440
|
+
return
|
|
441
|
+
|
|
442
|
+
doc = getattr(res, "document", None)
|
|
443
|
+
if doc is None:
|
|
444
|
+
print(json.dumps({"ok": False, "error": "Docling produced no document", "meta": meta}))
|
|
445
|
+
return
|
|
446
|
+
|
|
447
|
+
if markdown:
|
|
448
|
+
text = export_markdown(doc, ImageRefMode, image_mode, img_ph, pg_ph)
|
|
449
|
+
print(json.dumps({"ok": True, "mode": "markdown", "text": text, "meta": meta}))
|
|
450
|
+
return
|
|
451
|
+
|
|
452
|
+
# structured
|
|
453
|
+
try:
|
|
454
|
+
doc_dict = doc.export_to_dict()
|
|
455
|
+
except Exception as e:
|
|
456
|
+
print(json.dumps({"ok": False, "error": f"Docling export_to_dict failed: {e}", "meta": meta}))
|
|
457
|
+
return
|
|
458
|
+
|
|
459
|
+
rows = to_rows(doc_dict)
|
|
460
|
+
print(json.dumps({"ok": True, "mode": "structured", "doc": rows, "meta": meta}))
|
|
461
|
+
except Exception as e:
|
|
462
|
+
print(
|
|
463
|
+
json.dumps({
|
|
464
|
+
"ok": False,
|
|
465
|
+
"error": f"Docling processing error: {e}",
|
|
466
|
+
"meta": {"file_path": file_path},
|
|
467
|
+
})
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
if __name__ == "__main__":
|
|
471
|
+
main()
|
|
472
|
+
"""
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
# Validate file_path to avoid command injection or unsafe input
|
|
476
|
+
if not isinstance(args["file_path"], str) or any(c in args["file_path"] for c in [";", "|", "&", "$", "`"]):
|
|
477
|
+
return Data(data={"error": "Unsafe file path detected.", "file_path": args["file_path"]})
|
|
478
|
+
|
|
479
|
+
proc = subprocess.run( # noqa: S603
|
|
480
|
+
[sys.executable, "-u", "-c", child_script],
|
|
481
|
+
input=json.dumps(args).encode("utf-8"),
|
|
482
|
+
capture_output=True,
|
|
483
|
+
check=False,
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
if not proc.stdout:
|
|
487
|
+
err_msg = proc.stderr.decode("utf-8", errors="replace") or "no output from child process"
|
|
488
|
+
return Data(data={"error": f"Docling subprocess error: {err_msg}", "file_path": file_path})
|
|
489
|
+
|
|
490
|
+
try:
|
|
491
|
+
result = json.loads(proc.stdout.decode("utf-8"))
|
|
492
|
+
except Exception as e: # noqa: BLE001
|
|
493
|
+
err_msg = proc.stderr.decode("utf-8", errors="replace")
|
|
494
|
+
return Data(
|
|
495
|
+
data={"error": f"Invalid JSON from Docling subprocess: {e}. stderr={err_msg}", "file_path": file_path},
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
if not result.get("ok"):
|
|
499
|
+
return Data(data={"error": result.get("error", "Unknown Docling error"), **result.get("meta", {})})
|
|
500
|
+
|
|
501
|
+
meta = result.get("meta", {})
|
|
502
|
+
if result.get("mode") == "markdown":
|
|
503
|
+
exported_content = str(result.get("text", ""))
|
|
504
|
+
return Data(
|
|
505
|
+
text=exported_content,
|
|
506
|
+
data={"exported_content": exported_content, "export_format": self.EXPORT_FORMAT, **meta},
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
rows = list(result.get("doc", []))
|
|
510
|
+
return Data(data={"doc": rows, "export_format": self.EXPORT_FORMAT, **meta})
|
|
461
511
|
|
|
462
512
|
def process_files(
|
|
463
513
|
self,
|
|
464
514
|
file_list: list[BaseFileComponent.BaseFile],
|
|
465
515
|
) -> list[BaseFileComponent.BaseFile]:
|
|
466
|
-
"""Process files
|
|
516
|
+
"""Process input files.
|
|
517
|
+
|
|
518
|
+
- Single file + advanced_mode => Docling in a separate process.
|
|
519
|
+
- Otherwise => standard parsing in current process (optionally threaded).
|
|
520
|
+
"""
|
|
521
|
+
if not file_list:
|
|
522
|
+
msg = "No files to process."
|
|
523
|
+
raise ValueError(msg)
|
|
467
524
|
|
|
468
525
|
def process_file_standard(file_path: str, *, silent_errors: bool = False) -> Data | None:
|
|
469
|
-
"""Process a single file using standard text parsing."""
|
|
470
526
|
try:
|
|
471
527
|
return parse_text_file_to_data(file_path, silent_errors=silent_errors)
|
|
472
528
|
except FileNotFoundError as e:
|
|
473
|
-
|
|
474
|
-
self.log(msg)
|
|
529
|
+
self.log(f"File not found: {file_path}. Error: {e}")
|
|
475
530
|
if not silent_errors:
|
|
476
531
|
raise
|
|
477
532
|
return None
|
|
478
533
|
except Exception as e:
|
|
479
|
-
|
|
480
|
-
self.log(msg)
|
|
534
|
+
self.log(f"Unexpected error processing {file_path}: {e}")
|
|
481
535
|
if not silent_errors:
|
|
482
536
|
raise
|
|
483
537
|
return None
|
|
484
538
|
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
return None
|
|
497
|
-
|
|
498
|
-
if not file_list:
|
|
499
|
-
msg = "No files to process."
|
|
500
|
-
raise ValueError(msg)
|
|
501
|
-
|
|
502
|
-
file_path = str(file_list[0].path)
|
|
503
|
-
if self.advanced_mode and self._is_docling_compatible(file_path):
|
|
504
|
-
processed_data = process_file_docling(file_path)
|
|
505
|
-
if not processed_data:
|
|
506
|
-
msg = f"Failed to process file with Docling: {file_path}"
|
|
507
|
-
raise ValueError(msg)
|
|
508
|
-
|
|
509
|
-
# Serialize processed data to match Data structure
|
|
510
|
-
serialized_data = processed_data.serialize_model()
|
|
511
|
-
|
|
512
|
-
# Now, if doc is nested, we need to unravel it
|
|
513
|
-
clean_data: list[Data | None] = [processed_data]
|
|
514
|
-
|
|
515
|
-
# This is where we've manually processed the data
|
|
516
|
-
try:
|
|
517
|
-
if "exported_content" not in serialized_data:
|
|
518
|
-
clean_data = [
|
|
539
|
+
# Advanced path: only for a single Docling-compatible file
|
|
540
|
+
if len(file_list) == 1:
|
|
541
|
+
file_path = str(file_list[0].path)
|
|
542
|
+
if self.advanced_mode and self._is_docling_compatible(file_path):
|
|
543
|
+
advanced_data: Data | None = self._process_docling_in_subprocess(file_path)
|
|
544
|
+
|
|
545
|
+
# --- UNNEST: expand each element in `doc` to its own Data row
|
|
546
|
+
payload = getattr(advanced_data, "data", {}) or {}
|
|
547
|
+
doc_rows = payload.get("doc")
|
|
548
|
+
if isinstance(doc_rows, list):
|
|
549
|
+
rows: list[Data | None] = [
|
|
519
550
|
Data(
|
|
520
551
|
data={
|
|
521
552
|
"file_path": file_path,
|
|
522
|
-
**(
|
|
523
|
-
|
|
524
|
-
if "element" in item
|
|
525
|
-
else {k: v for k, v in item.items() if k != "file_path"}
|
|
526
|
-
),
|
|
527
|
-
}
|
|
553
|
+
**(item if isinstance(item, dict) else {"value": item}),
|
|
554
|
+
},
|
|
528
555
|
)
|
|
529
|
-
for item in
|
|
556
|
+
for item in doc_rows
|
|
530
557
|
]
|
|
531
|
-
|
|
532
|
-
raise ValueError(serialized_data) from None
|
|
558
|
+
return self.rollup_data(file_list, rows)
|
|
533
559
|
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
return self.rollup_data(file_list, final_data)
|
|
560
|
+
# If not structured, keep as-is (e.g., markdown export or error dict)
|
|
561
|
+
return self.rollup_data(file_list, [advanced_data])
|
|
537
562
|
|
|
563
|
+
# Standard multi-file (or single non-advanced) path
|
|
538
564
|
concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
self.log(f"Starting parallel processing of {file_count} files with concurrency: {concurrency}.")
|
|
542
|
-
file_paths = [str(file.path) for file in file_list]
|
|
565
|
+
file_paths = [str(f.path) for f in file_list]
|
|
566
|
+
self.log(f"Starting parallel processing of {len(file_paths)} files with concurrency: {concurrency}.")
|
|
543
567
|
my_data = parallel_load_data(
|
|
544
568
|
file_paths,
|
|
545
569
|
silent_errors=self.silent_errors,
|
|
546
570
|
load_function=process_file_standard,
|
|
547
571
|
max_concurrency=concurrency,
|
|
548
572
|
)
|
|
549
|
-
|
|
550
573
|
return self.rollup_data(file_list, my_data)
|
|
551
574
|
|
|
575
|
+
# ------------------------------ Output helpers -----------------------------------
|
|
576
|
+
|
|
552
577
|
def load_files_advanced(self) -> DataFrame:
|
|
553
578
|
"""Load files using advanced Docling processing and export to an advanced format."""
|
|
554
|
-
# TODO: Update
|
|
555
579
|
self.markdown = False
|
|
556
580
|
return self.load_files()
|
|
557
581
|
|
|
@@ -560,101 +584,3 @@ class FileComponent(BaseFileComponent):
|
|
|
560
584
|
self.markdown = True
|
|
561
585
|
result = self.load_files()
|
|
562
586
|
return Message(text=str(result.text[0]))
|
|
563
|
-
|
|
564
|
-
def _process_with_docling_and_export(self, file_path: str) -> Data:
|
|
565
|
-
"""Process a single file with Docling and export to the specified format."""
|
|
566
|
-
# Import docling components only when needed
|
|
567
|
-
docling_imports = self._try_import_docling()
|
|
568
|
-
|
|
569
|
-
if docling_imports is None:
|
|
570
|
-
msg = "Docling not available for advanced processing"
|
|
571
|
-
raise ImportError(msg)
|
|
572
|
-
|
|
573
|
-
conversion_status = docling_imports.conversion_status
|
|
574
|
-
document_converter = docling_imports.document_converter
|
|
575
|
-
image_ref_mode = docling_imports.image_ref_mode
|
|
576
|
-
|
|
577
|
-
try:
|
|
578
|
-
# Create converter based on strategy and pipeline setting
|
|
579
|
-
if docling_imports.strategy == "latest" and self.pipeline == "standard":
|
|
580
|
-
converter = self._create_advanced_converter(docling_imports)
|
|
581
|
-
else:
|
|
582
|
-
# Use basic converter for compatibility
|
|
583
|
-
converter = document_converter()
|
|
584
|
-
self.log("Using basic DocumentConverter for Docling processing")
|
|
585
|
-
|
|
586
|
-
# Process single file
|
|
587
|
-
result = converter.convert(file_path)
|
|
588
|
-
|
|
589
|
-
# Check if conversion was successful
|
|
590
|
-
success = False
|
|
591
|
-
if hasattr(result, "status"):
|
|
592
|
-
if hasattr(conversion_status, "SUCCESS"):
|
|
593
|
-
success = result.status == conversion_status.SUCCESS
|
|
594
|
-
else:
|
|
595
|
-
success = str(result.status).lower() == "success"
|
|
596
|
-
elif hasattr(result, "document"):
|
|
597
|
-
# If no status but has document, assume success
|
|
598
|
-
success = result.document is not None
|
|
599
|
-
|
|
600
|
-
if not success:
|
|
601
|
-
return Data(data={"error": "Docling conversion failed", "file_path": file_path})
|
|
602
|
-
|
|
603
|
-
if self.markdown:
|
|
604
|
-
self.log("Exporting document to Markdown format")
|
|
605
|
-
# Export the document to the specified format
|
|
606
|
-
exported_content = self._export_document(result.document, image_ref_mode)
|
|
607
|
-
|
|
608
|
-
return Data(
|
|
609
|
-
text=exported_content,
|
|
610
|
-
data={
|
|
611
|
-
"exported_content": exported_content,
|
|
612
|
-
"export_format": self.EXPORT_FORMAT,
|
|
613
|
-
"file_path": file_path,
|
|
614
|
-
},
|
|
615
|
-
)
|
|
616
|
-
|
|
617
|
-
return Data(
|
|
618
|
-
data={
|
|
619
|
-
"doc": self.docling_to_dataframe_simple(result.document.export_to_dict()),
|
|
620
|
-
"export_format": self.EXPORT_FORMAT,
|
|
621
|
-
"file_path": file_path,
|
|
622
|
-
}
|
|
623
|
-
)
|
|
624
|
-
|
|
625
|
-
except Exception as e: # noqa: BLE001
|
|
626
|
-
return Data(data={"error": f"Docling processing error: {e!s}", "file_path": file_path})
|
|
627
|
-
|
|
628
|
-
def docling_to_dataframe_simple(self, doc):
|
|
629
|
-
"""Extract all text elements into a simple DataFrame."""
|
|
630
|
-
return [
|
|
631
|
-
{
|
|
632
|
-
"page_no": text["prov"][0]["page_no"] if text["prov"] else None,
|
|
633
|
-
"label": text["label"],
|
|
634
|
-
"text": text["text"],
|
|
635
|
-
"level": text.get("level", None), # for headers
|
|
636
|
-
}
|
|
637
|
-
for text in doc["texts"]
|
|
638
|
-
]
|
|
639
|
-
|
|
640
|
-
def _export_document(self, document: Any, image_ref_mode: type[Enum]) -> str:
|
|
641
|
-
"""Export document to Markdown format with placeholder images."""
|
|
642
|
-
try:
|
|
643
|
-
image_mode = (
|
|
644
|
-
image_ref_mode(self.IMAGE_MODE) if hasattr(image_ref_mode, self.IMAGE_MODE) else self.IMAGE_MODE
|
|
645
|
-
)
|
|
646
|
-
|
|
647
|
-
# Always export to Markdown since it's fixed
|
|
648
|
-
return document.export_to_markdown(
|
|
649
|
-
image_mode=image_mode,
|
|
650
|
-
image_placeholder=self.md_image_placeholder,
|
|
651
|
-
page_break_placeholder=self.md_page_break_placeholder,
|
|
652
|
-
)
|
|
653
|
-
|
|
654
|
-
except Exception as e: # noqa: BLE001
|
|
655
|
-
self.log(f"Markdown export failed: {e}, using basic text export")
|
|
656
|
-
# Fallback to basic text export
|
|
657
|
-
try:
|
|
658
|
-
return document.export_to_text()
|
|
659
|
-
except Exception: # noqa: BLE001
|
|
660
|
-
return str(document)
|