langflow-base-nightly 0.5.0.dev36__py3-none-any.whl → 0.5.0.dev38__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langflow/__main__.py +1 -1
- langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py +24 -30
- langflow/alembic/versions/58b28437a398_modify_nullable.py +6 -6
- langflow/alembic/versions/79e675cb6752_change_datetime_type.py +24 -30
- langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py +12 -13
- langflow/api/build.py +21 -26
- langflow/api/health_check_router.py +3 -3
- langflow/api/utils.py +3 -3
- langflow/api/v1/callback.py +2 -2
- langflow/api/v1/chat.py +19 -31
- langflow/api/v1/endpoints.py +10 -10
- langflow/api/v1/flows.py +1 -1
- langflow/api/v1/knowledge_bases.py +19 -12
- langflow/api/v1/mcp.py +12 -12
- langflow/api/v1/mcp_projects.py +45 -81
- langflow/api/v1/mcp_utils.py +8 -8
- langflow/api/v1/schemas.py +1 -5
- langflow/api/v1/store.py +1 -1
- langflow/api/v1/validate.py +2 -2
- langflow/api/v1/voice_mode.py +58 -62
- langflow/api/v2/files.py +5 -3
- langflow/api/v2/mcp.py +10 -9
- langflow/base/composio/composio_base.py +21 -2
- langflow/base/data/docling_utils.py +194 -0
- langflow/base/data/kb_utils.py +33 -0
- langflow/base/embeddings/aiml_embeddings.py +1 -1
- langflow/base/flow_processing/utils.py +1 -2
- langflow/base/io/__init__.py +0 -1
- langflow/base/langwatch/utils.py +2 -1
- langflow/base/mcp/util.py +49 -47
- langflow/base/models/model.py +3 -3
- langflow/base/prompts/api_utils.py +1 -1
- langflow/base/tools/flow_tool.py +2 -2
- langflow/base/tools/run_flow.py +2 -6
- langflow/components/Notion/add_content_to_page.py +2 -2
- langflow/components/Notion/list_database_properties.py +2 -2
- langflow/components/Notion/list_pages.py +2 -2
- langflow/components/Notion/page_content_viewer.py +2 -2
- langflow/components/Notion/update_page_property.py +1 -1
- langflow/components/agentql/agentql_api.py +2 -10
- langflow/components/agents/agent.py +3 -3
- langflow/components/agents/mcp_component.py +54 -69
- langflow/components/anthropic/anthropic.py +5 -4
- langflow/components/assemblyai/assemblyai_get_subtitles.py +2 -2
- langflow/components/assemblyai/assemblyai_lemur.py +2 -2
- langflow/components/assemblyai/assemblyai_list_transcripts.py +2 -2
- langflow/components/assemblyai/assemblyai_poll_transcript.py +2 -2
- langflow/components/assemblyai/assemblyai_start_transcript.py +2 -2
- langflow/components/data/file.py +575 -55
- langflow/components/data/kb_ingest.py +116 -43
- langflow/components/data/kb_retrieval.py +24 -26
- langflow/components/data/url.py +1 -1
- langflow/components/datastax/astra_assistant_manager.py +3 -3
- langflow/components/datastax/create_assistant.py +1 -2
- langflow/components/deactivated/merge_data.py +1 -2
- langflow/components/deactivated/sub_flow.py +6 -7
- langflow/components/deactivated/vectara_self_query.py +3 -3
- langflow/components/docling/__init__.py +0 -198
- langflow/components/docling/docling_inline.py +1 -1
- langflow/components/embeddings/text_embedder.py +3 -3
- langflow/components/firecrawl/firecrawl_extract_api.py +2 -9
- langflow/components/google/gmail.py +1 -1
- langflow/components/google/google_generative_ai.py +5 -11
- langflow/components/groq/groq.py +4 -3
- langflow/components/helpers/current_date.py +2 -3
- langflow/components/helpers/memory.py +1 -1
- langflow/components/ibm/watsonx.py +1 -1
- langflow/components/ibm/watsonx_embeddings.py +1 -1
- langflow/components/langwatch/langwatch.py +3 -3
- langflow/components/logic/flow_tool.py +2 -2
- langflow/components/logic/notify.py +1 -1
- langflow/components/logic/run_flow.py +2 -3
- langflow/components/logic/sub_flow.py +4 -5
- langflow/components/mem0/mem0_chat_memory.py +2 -8
- langflow/components/nvidia/nvidia.py +3 -3
- langflow/components/olivya/olivya.py +7 -7
- langflow/components/ollama/ollama.py +8 -6
- langflow/components/processing/batch_run.py +8 -8
- langflow/components/processing/data_operations.py +2 -2
- langflow/components/processing/merge_data.py +1 -2
- langflow/components/processing/message_to_data.py +2 -3
- langflow/components/processing/parse_json_data.py +1 -1
- langflow/components/processing/save_file.py +6 -32
- langflow/components/prototypes/python_function.py +2 -3
- langflow/components/serpapi/serp.py +1 -1
- langflow/components/tavily/tavily_extract.py +1 -1
- langflow/components/tavily/tavily_search.py +1 -1
- langflow/components/tools/calculator.py +2 -2
- langflow/components/tools/python_code_structured_tool.py +3 -10
- langflow/components/tools/python_repl.py +2 -2
- langflow/components/tools/searxng.py +3 -3
- langflow/components/tools/serp_api.py +2 -2
- langflow/components/tools/tavily_search_tool.py +2 -2
- langflow/components/tools/yahoo_finance.py +1 -1
- langflow/components/twelvelabs/video_embeddings.py +4 -4
- langflow/components/vectorstores/astradb.py +30 -19
- langflow/components/vectorstores/local_db.py +1 -1
- langflow/components/yahoosearch/yahoo.py +1 -1
- langflow/components/youtube/trending.py +3 -4
- langflow/custom/attributes.py +2 -1
- langflow/custom/code_parser/code_parser.py +1 -1
- langflow/custom/custom_component/base_component.py +1 -1
- langflow/custom/custom_component/component.py +16 -2
- langflow/custom/directory_reader/directory_reader.py +7 -7
- langflow/custom/directory_reader/utils.py +1 -2
- langflow/custom/utils.py +30 -30
- langflow/events/event_manager.py +1 -1
- langflow/frontend/assets/{SlackIcon-B260Qg_R.js → SlackIcon-BhW6H3JR.js} +1 -1
- langflow/frontend/assets/{Wikipedia-BB2mbgyd.js → Wikipedia-Dx5jbiy3.js} +1 -1
- langflow/frontend/assets/{Wolfram-DytXC9hF.js → Wolfram-CIyonzwo.js} +1 -1
- langflow/frontend/assets/{index-DPX6X_bw.js → index-0XQqYgdG.js} +1 -1
- langflow/frontend/assets/{index-DtJyCbzF.js → index-1Q3VBqKn.js} +1 -1
- langflow/frontend/assets/{index-DztLFiip.js → index-35sspuLu.js} +1 -1
- langflow/frontend/assets/{index-BeNby7qF.js → index-7hzXChQz.js} +1 -1
- langflow/frontend/assets/{index-BOEf7-ty.js → index-8cuhogZP.js} +1 -1
- langflow/frontend/assets/{index-D0s9f6Re.js → index-B0m53xKd.js} +1 -1
- langflow/frontend/assets/{index-DpJiH-Rk.js → index-B1XqWJhG.js} +1 -1
- langflow/frontend/assets/{index-DuAeoC-H.js → index-B3KCdQ91.js} +1 -1
- langflow/frontend/assets/{index-Bxml6wXu.js → index-B7uEuOPK.js} +1 -1
- langflow/frontend/assets/{index-CDFLVFB4.js → index-B8UR8v-Q.js} +1 -1
- langflow/frontend/assets/{index-ci4XHjbJ.js → index-BD7Io1hL.js} +6 -6
- langflow/frontend/assets/{index-DasrI03Y.js → index-BDQrd7Tj.js} +1 -1
- langflow/frontend/assets/{index-CkQ-bJ4G.js → index-BDuk0d7P.js} +1 -1
- langflow/frontend/assets/{index-C_1RBTul.js → index-BFQ8KFK0.js} +1 -1
- langflow/frontend/assets/{index-DqSH4x-R.js → index-BFf0HTFI.js} +1 -1
- langflow/frontend/assets/{index-BXMhmvTj.js → index-BHhnpSkW.js} +1 -1
- langflow/frontend/assets/{index-Uq2ij_SS.js → index-BKKrUElc.js} +1 -1
- langflow/frontend/assets/{index-3TJWUdmx.js → index-BKeZt2hQ.js} +1 -1
- langflow/frontend/assets/{index-DHlEwAxb.js → index-BKlQbl-6.js} +1 -1
- langflow/frontend/assets/{index-Bisa4IQF.js → index-BLYw9MK2.js} +1 -1
- langflow/frontend/assets/{index-GODbXlHC.js → index-BLsVo9iW.js} +1 -1
- langflow/frontend/assets/{index-CHFO5O4g.js → index-BNQIbda3.js} +1 -1
- langflow/frontend/assets/{index-3uOAA_XX.js → index-BPR2mEFC.js} +1 -1
- langflow/frontend/assets/{index-3qMh9x6K.js → index-BPfdqCc_.js} +1 -1
- langflow/frontend/assets/{index-rcdQpNcU.js → index-BQrVDjR1.js} +1 -1
- langflow/frontend/assets/{index-4eRtaV45.js → index-BRmSeoWR.js} +1 -1
- langflow/frontend/assets/{index-Ct9_T9ox.js → index-BUse-kxM.js} +1 -1
- langflow/frontend/assets/{index-BdYgKk1d.js → index-BVFaF7HW.js} +1 -1
- langflow/frontend/assets/{index-CWWo2zOA.js → index-BWgIWfv2.js} +1 -1
- langflow/frontend/assets/{index-Du9aJK7m.js → index-BWt5xGeA.js} +1 -1
- langflow/frontend/assets/{index-Baka5dKE.js → index-BYhcGLTV.js} +1 -1
- langflow/frontend/assets/{index-BWq9GTzt.js → index-BYjw7Gk3.js} +1 -1
- langflow/frontend/assets/{index-r1LZg-PY.js → index-BZFljdMa.js} +1 -1
- langflow/frontend/assets/index-BcAgItH4.js +1 -0
- langflow/frontend/assets/{index-B8TlNgn-.js → index-Bct1s6__.js} +1 -1
- langflow/frontend/assets/{index-DZzbmg3J.js → index-Bhv79Zso.js} +1 -1
- langflow/frontend/assets/{index-CqDUqHfd.js → index-Bj3lSwvZ.js} +1 -1
- langflow/frontend/assets/{index-dkS0ek2S.js → index-Bk4mTwnI.js} +1 -1
- langflow/frontend/assets/{index-tOy_uloT.js → index-BmIx1cws.js} +1 -1
- langflow/frontend/assets/{index-BVtf6m9S.js → index-BmYJJ5YS.js} +1 -1
- langflow/frontend/assets/{index-mBjJYD9q.js → index-BnAFhkSN.js} +1 -1
- langflow/frontend/assets/{index-Ba3RTMXI.js → index-Bo-ww0Bb.js} +1 -1
- langflow/frontend/assets/{index-BsBWP-Dh.js → index-BpmqDOeZ.js} +1 -1
- langflow/frontend/assets/{index-BqUeOc7Y.js → index-BrVhdPZb.js} +1 -1
- langflow/frontend/assets/{index-DWkMJnbd.js → index-BvGQfVBD.js} +1 -1
- langflow/frontend/assets/{index-DdzVmJHE.js → index-Bwi4flFg.js} +1 -1
- langflow/frontend/assets/{index-Ccb5B8zG.js → index-BzoRPtTY.js} +1 -1
- langflow/frontend/assets/{index-Ym6gz0T6.js → index-C--IDAyc.js} +1 -1
- langflow/frontend/assets/{index-CvQ0w8Pj.js → index-C0E3_MIK.js} +1 -1
- langflow/frontend/assets/{index-DxIs8VSp.js → index-C27Jj_26.js} +1 -1
- langflow/frontend/assets/{index-BxWXWRmZ.js → index-C2eQmQsn.js} +1 -1
- langflow/frontend/assets/{index-B536IPXH.js → index-C8K0r39B.js} +1 -1
- langflow/frontend/assets/{index-BEDxAk3N.js → index-CEJNWPhA.js} +1 -1
- langflow/frontend/assets/{index-G_U_kPAd.js → index-CFNTYfFK.js} +1 -1
- langflow/frontend/assets/{index-CMGZGIx_.js → index-CMHpjHZl.js} +1 -1
- langflow/frontend/assets/{index-C76aBV_h.js → index-CSu8KHOi.js} +1 -1
- langflow/frontend/assets/{index-B-c82Fnu.js → index-CUKmGsI6.js} +1 -1
- langflow/frontend/assets/{index-DX7XsAcx.js → index-CWYiSeWV.js} +1 -1
- langflow/frontend/assets/{index-COL0eiWI.js → index-CY7_TBTC.js} +1 -1
- langflow/frontend/assets/{index-BlBl2tvQ.js → index-CbnWRlYY.js} +1 -1
- langflow/frontend/assets/{index-BQB-iDYl.js → index-CfPBgkqg.js} +1 -1
- langflow/frontend/assets/{index-DWr_zPkx.js → index-Cg53lrYh.js} +1 -1
- langflow/frontend/assets/{index-BcgB3rXH.js → index-CgU7KF4I.js} +1 -1
- langflow/frontend/assets/{index-CkSzjCqM.js → index-CgwykVGh.js} +1 -1
- langflow/frontend/assets/{index-BbsND1Qg.js → index-Ch5r0oW6.js} +1 -1
- langflow/frontend/assets/{index-AY5Dm2mG.js → index-CjsommIr.js} +1 -1
- langflow/frontend/assets/{index-BtJ2o21k.js → index-CkK25zZO.js} +1 -1
- langflow/frontend/assets/{index-BKvKC-12.js → index-CkjwSTSM.js} +1 -1
- langflow/frontend/assets/{index-BVHvIhT5.js → index-CmSFKgiD.js} +1 -1
- langflow/frontend/assets/{index-D-zkHcob.js → index-Cr5v2ave.js} +1 -1
- langflow/frontend/assets/{index-js8ceOaP.js → index-CrAF-31Y.js} +1 -1
- langflow/frontend/assets/{index-BNbWMmAV.js → index-CsLQiWNf.js} +1 -1
- langflow/frontend/assets/{index-VcXZzovW.js → index-CuCM7Wu7.js} +1 -1
- langflow/frontend/assets/{index-DzeIsaBm.js → index-Cxy9sEpy.js} +1 -1
- langflow/frontend/assets/{index-LrMzDsq9.js → index-CyP3py8K.js} +1 -1
- langflow/frontend/assets/{index-C8KD3LPb.js → index-CzHzeZuA.js} +1 -1
- langflow/frontend/assets/{index-DS1EgA10.js → index-D1oynC8a.js} +1 -1
- langflow/frontend/assets/{index-ByFXr9Iq.js → index-D4tjMhfY.js} +1 -1
- langflow/frontend/assets/{index-DyJDHm2D.js → index-D6CSIrp1.js} +1 -1
- langflow/frontend/assets/{index-DIqSyDVO.js → index-D9kwEzPB.js} +1 -1
- langflow/frontend/assets/{index-D5PeCofu.js → index-DDXsm8tz.js} +1 -1
- langflow/frontend/assets/{index-CJwYfDBz.js → index-DDhJVVel.js} +1 -1
- langflow/frontend/assets/{index-C7x9R_Yo.js → index-DH6o91_s.js} +1 -1
- langflow/frontend/assets/{index-DpQKtcXu.js → index-DHngW1k8.js} +1 -1
- langflow/frontend/assets/{index-VZnN0P6C.js → index-DIKUsGLF.js} +1 -1
- langflow/frontend/assets/{index-VHmUHUUU.js → index-DJESSNJi.js} +1 -1
- langflow/frontend/assets/{index-BdIWbCEL.js → index-DMCWDJOl.js} +1 -1
- langflow/frontend/assets/{index-DK8vNpXK.js → index-DOEvKC2X.js} +1 -1
- langflow/frontend/assets/{index-C7V5U9yH.js → index-DOQDkSoK.js} +1 -1
- langflow/frontend/assets/{index-D0HmkH0H.js → index-DXAfIEvs.js} +1 -1
- langflow/frontend/assets/{index-C9N80hP8.js → index-DZP_SaHb.js} +1 -1
- langflow/frontend/assets/{index-B2ggrBuR.js → index-DZxUIhWh.js} +1 -1
- langflow/frontend/assets/{index-DS9I4y48.js → index-Dda2u_yz.js} +1 -1
- langflow/frontend/assets/{index-BLROcaSz.js → index-Dg8N3NSO.js} +1 -1
- langflow/frontend/assets/{index-Dpz3oBf5.js → index-DkGhPNeA.js} +1 -1
- langflow/frontend/assets/{index-BnLT29qW.js → index-Dka_Rk4-.js} +1 -1
- langflow/frontend/assets/{index-B5ed-sAv.js → index-DljpLeCW.js} +1 -1
- langflow/frontend/assets/{index-Cx__T92e.js → index-DnVYJtVO.js} +1 -1
- langflow/frontend/assets/{index-hOkEW3JP.js → index-DqbzUcI5.js} +1 -1
- langflow/frontend/assets/{index-BxkZkBgQ.js → index-Dr6pVDPI.js} +1 -1
- langflow/frontend/assets/{index-BIkqesA-.js → index-DsoX2o1S.js} +1 -1
- langflow/frontend/assets/{index-Cpgkb0Q3.js → index-DwfHWnX7.js} +1 -1
- langflow/frontend/assets/{index-B9Mo3ndZ.js → index-Dx-Z87KT.js} +1 -1
- langflow/frontend/assets/{index-R7q8cAek.js → index-DyqITq51.js} +1 -1
- langflow/frontend/assets/{index-DKEXZFUO.js → index-DzIv3RyR.js} +1 -1
- langflow/frontend/assets/{index-BJrY2Fiu.js → index-G4ro0MjT.js} +1 -1
- langflow/frontend/assets/{index-IFGgPiye.js → index-H7J7w7fa.js} +1 -1
- langflow/frontend/assets/{index-lKEJpUsF.js → index-KWY77KfV.js} +1 -1
- langflow/frontend/assets/{index-DDNNv4C0.js → index-U9GWm1eH.js} +1 -1
- langflow/frontend/assets/{index-BRWNIt9F.js → index-Un9pWxnP.js} +1 -1
- langflow/frontend/assets/{index-BCK-ZyIh.js → index-Xi4TplbI.js} +1 -1
- langflow/frontend/assets/{index-BEKoRwsX.js → index-_cbGmjF4.js} +1 -1
- langflow/frontend/assets/{index-7xXgqu09.js → index-cEXY6V06.js} +1 -1
- langflow/frontend/assets/{index-D87Zw62M.js → index-dyXKnkMi.js} +1 -1
- langflow/frontend/assets/{index-CG7cp0nD.js → index-eUkS6iJM.js} +1 -1
- langflow/frontend/assets/{index-CoUlHbtg.js → index-ekfMOqrF.js} +1 -1
- langflow/frontend/assets/{index-DhzEUXfr.js → index-gdb7XMS8.js} +1 -1
- langflow/frontend/assets/{index-D9eflZfP.js → index-hZUcL0MZ.js} +1 -1
- langflow/frontend/assets/{index-CwIxqYlT.js → index-kkA-qHB_.js} +1 -1
- langflow/frontend/assets/{index-sS6XLk3j.js → index-mzl9ULw5.js} +1 -1
- langflow/frontend/assets/{index-BjENqyKe.js → index-oxHBZk2v.js} +1 -1
- langflow/frontend/assets/{index-BejHxU5W.js → index-p2kStSPe.js} +1 -1
- langflow/frontend/assets/{index-BOYTBrh9.js → index-paQEWYGT.js} +1 -1
- langflow/frontend/assets/{index-Cd5zuUUK.js → index-r_8gs4nL.js} +1 -1
- langflow/frontend/assets/{index-AlJ7td-D.js → index-uiKla4UR.js} +1 -1
- langflow/frontend/assets/{index-B8y58M9b.js → index-vJOO5U8M.js} +1 -1
- langflow/frontend/assets/{index-CF4dtI6S.js → index-w72fDjpG.js} +1 -1
- langflow/frontend/assets/{index-C2Xd7UkR.js → index-zV82kQ6k.js} +1 -1
- langflow/frontend/assets/lazyIconImports-DTNgvPE-.js +2 -0
- langflow/frontend/assets/{use-post-add-user-HN0rRnhv.js → use-post-add-user-CvtuazTg.js} +1 -1
- langflow/frontend/index.html +1 -1
- langflow/graph/edge/base.py +2 -3
- langflow/graph/graph/base.py +14 -12
- langflow/graph/graph/constants.py +3 -0
- langflow/graph/utils.py +6 -6
- langflow/graph/vertex/base.py +4 -5
- langflow/graph/vertex/param_handler.py +1 -1
- langflow/graph/vertex/vertex_types.py +2 -2
- langflow/helpers/flow.py +1 -1
- langflow/initial_setup/setup.py +32 -30
- langflow/initial_setup/starter_projects/Blog Writer.json +2 -2
- langflow/initial_setup/starter_projects/Custom Component Generator.json +2 -2
- langflow/initial_setup/starter_projects/Document Q&A.json +1 -1
- langflow/initial_setup/starter_projects/Hybrid Search RAG.json +2 -2
- langflow/initial_setup/starter_projects/Instagram Copywriter.json +3 -3
- langflow/initial_setup/starter_projects/Invoice Summarizer.json +1 -1
- langflow/initial_setup/starter_projects/Knowledge Ingestion.json +4 -4
- langflow/initial_setup/starter_projects/Knowledge Retrieval.json +2 -2
- langflow/initial_setup/starter_projects/Market Research.json +3 -3
- langflow/initial_setup/starter_projects/Meeting Summary.json +6 -6
- langflow/initial_setup/starter_projects/Memory Chatbot.json +2 -2
- langflow/initial_setup/starter_projects/News Aggregator.json +5 -22
- langflow/initial_setup/starter_projects/Nvidia Remix.json +3 -20
- langflow/initial_setup/starter_projects/Pok/303/251dex Agent.json" +1 -1
- langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json +1 -1
- langflow/initial_setup/starter_projects/Price Deal Finder.json +5 -5
- langflow/initial_setup/starter_projects/Research Agent.json +3 -3
- langflow/initial_setup/starter_projects/SaaS Pricing.json +1 -1
- langflow/initial_setup/starter_projects/Search agent.json +1 -1
- langflow/initial_setup/starter_projects/Sequential Tasks Agents.json +7 -7
- langflow/initial_setup/starter_projects/Simple Agent.json +3 -3
- langflow/initial_setup/starter_projects/Social Media Agent.json +1 -1
- langflow/initial_setup/starter_projects/Text Sentiment Analysis.json +1 -1
- langflow/initial_setup/starter_projects/Travel Planning Agents.json +3 -3
- langflow/initial_setup/starter_projects/Vector Store RAG.json +5 -5
- langflow/initial_setup/starter_projects/Youtube Analysis.json +3 -3
- langflow/interface/components.py +23 -22
- langflow/interface/initialize/loading.py +5 -5
- langflow/interface/run.py +1 -1
- langflow/interface/utils.py +1 -1
- langflow/io/__init__.py +0 -1
- langflow/langflow_launcher.py +1 -1
- langflow/load/load.py +2 -7
- langflow/logging/__init__.py +0 -1
- langflow/logging/logger.py +191 -115
- langflow/logging/setup.py +1 -1
- langflow/main.py +37 -52
- langflow/memory.py +7 -7
- langflow/middleware.py +1 -1
- langflow/processing/process.py +4 -4
- langflow/schema/artifact.py +2 -2
- langflow/schema/data.py +10 -2
- langflow/schema/dataframe.py +1 -1
- langflow/schema/message.py +1 -1
- langflow/serialization/serialization.py +1 -1
- langflow/services/auth/utils.py +2 -2
- langflow/services/cache/disk.py +1 -1
- langflow/services/cache/service.py +3 -3
- langflow/services/database/models/flow/model.py +2 -7
- langflow/services/database/models/transactions/crud.py +2 -2
- langflow/services/database/models/user/crud.py +2 -2
- langflow/services/database/service.py +8 -8
- langflow/services/database/utils.py +6 -5
- langflow/services/deps.py +2 -3
- langflow/services/factory.py +1 -1
- langflow/services/flow/flow_runner.py +7 -12
- langflow/services/job_queue/service.py +16 -15
- langflow/services/manager.py +3 -4
- langflow/services/settings/auth.py +1 -1
- langflow/services/settings/base.py +3 -8
- langflow/services/settings/manager.py +1 -1
- langflow/services/settings/utils.py +1 -1
- langflow/services/socket/__init__.py +0 -1
- langflow/services/socket/service.py +3 -3
- langflow/services/socket/utils.py +4 -4
- langflow/services/state/service.py +1 -2
- langflow/services/storage/factory.py +1 -1
- langflow/services/storage/local.py +9 -8
- langflow/services/storage/s3.py +11 -10
- langflow/services/store/service.py +3 -3
- langflow/services/store/utils.py +3 -2
- langflow/services/task/temp_flow_cleanup.py +7 -7
- langflow/services/telemetry/service.py +10 -10
- langflow/services/tracing/arize_phoenix.py +2 -2
- langflow/services/tracing/langfuse.py +1 -1
- langflow/services/tracing/langsmith.py +1 -1
- langflow/services/tracing/langwatch.py +1 -1
- langflow/services/tracing/opik.py +1 -1
- langflow/services/tracing/service.py +25 -6
- langflow/services/tracing/traceloop.py +245 -0
- langflow/services/utils.py +7 -7
- langflow/services/variable/kubernetes.py +3 -3
- langflow/services/variable/kubernetes_secrets.py +2 -1
- langflow/services/variable/service.py +5 -5
- langflow/utils/component_utils.py +9 -6
- langflow/utils/util.py +5 -5
- langflow/utils/validate.py +3 -3
- langflow/utils/voice_utils.py +2 -2
- {langflow_base_nightly-0.5.0.dev36.dist-info → langflow_base_nightly-0.5.0.dev38.dist-info}/METADATA +2 -1
- {langflow_base_nightly-0.5.0.dev36.dist-info → langflow_base_nightly-0.5.0.dev38.dist-info}/RECORD +342 -340
- langflow/frontend/assets/lazyIconImports-Bh1TFfvH.js +0 -2
- {langflow_base_nightly-0.5.0.dev36.dist-info → langflow_base_nightly-0.5.0.dev38.dist-info}/WHEEL +0 -0
- {langflow_base_nightly-0.5.0.dev36.dist-info → langflow_base_nightly-0.5.0.dev38.dist-info}/entry_points.txt +0 -0
langflow/initial_setup/setup.py
CHANGED
|
@@ -19,7 +19,6 @@ import orjson
|
|
|
19
19
|
import sqlalchemy as sa
|
|
20
20
|
from aiofile import async_open
|
|
21
21
|
from emoji import demojize, purely_emoji
|
|
22
|
-
from loguru import logger
|
|
23
22
|
from sqlalchemy.exc import NoResultFound
|
|
24
23
|
from sqlalchemy.orm import selectinload
|
|
25
24
|
from sqlmodel import col, select
|
|
@@ -33,6 +32,7 @@ from langflow.base.constants import (
|
|
|
33
32
|
SKIPPED_FIELD_ATTRIBUTES,
|
|
34
33
|
)
|
|
35
34
|
from langflow.initial_setup.constants import STARTER_FOLDER_DESCRIPTION, STARTER_FOLDER_NAME
|
|
35
|
+
from langflow.logging.logger import logger
|
|
36
36
|
from langflow.services.auth.utils import create_super_user
|
|
37
37
|
from langflow.services.database.models.flow.model import Flow, FlowCreate
|
|
38
38
|
from langflow.services.database.models.folder.constants import DEFAULT_FOLDER_NAME
|
|
@@ -517,7 +517,7 @@ def log_node_changes(node_changes_log) -> None:
|
|
|
517
517
|
async def load_starter_projects(retries=3, delay=1) -> list[tuple[anyio.Path, dict]]:
|
|
518
518
|
starter_projects = []
|
|
519
519
|
folder = anyio.Path(__file__).parent / "starter_projects"
|
|
520
|
-
logger.
|
|
520
|
+
await logger.adebug("Loading starter projects")
|
|
521
521
|
async for file in folder.glob("*.json"):
|
|
522
522
|
attempt = 0
|
|
523
523
|
while attempt < retries:
|
|
@@ -533,7 +533,7 @@ async def load_starter_projects(retries=3, delay=1) -> list[tuple[anyio.Path, di
|
|
|
533
533
|
msg = f"Error loading starter project {file}: {e}"
|
|
534
534
|
raise ValueError(msg) from e
|
|
535
535
|
await asyncio.sleep(delay) # Wait before retrying
|
|
536
|
-
logger.
|
|
536
|
+
await logger.adebug(f"Loaded {len(starter_projects)} starter projects")
|
|
537
537
|
return starter_projects
|
|
538
538
|
|
|
539
539
|
|
|
@@ -580,7 +580,7 @@ async def copy_profile_pictures() -> None:
|
|
|
580
580
|
await dst_file.parent.mkdir(parents=True, exist_ok=True)
|
|
581
581
|
# Offload blocking I/O to a thread
|
|
582
582
|
await asyncio.to_thread(shutil.copy2, str(src_file), str(dst_file))
|
|
583
|
-
logger.
|
|
583
|
+
await logger.adebug(f"Copied file '{rel_path}'")
|
|
584
584
|
|
|
585
585
|
tasks = []
|
|
586
586
|
async for src_file in origin.rglob("*"):
|
|
@@ -596,7 +596,7 @@ async def copy_profile_pictures() -> None:
|
|
|
596
596
|
await asyncio.gather(*tasks)
|
|
597
597
|
|
|
598
598
|
except Exception as exc:
|
|
599
|
-
logger.
|
|
599
|
+
await logger.aexception("Error copying profile pictures")
|
|
600
600
|
msg = "An error occurred while copying profile pictures."
|
|
601
601
|
raise RuntimeError(msg) from exc
|
|
602
602
|
|
|
@@ -633,7 +633,7 @@ async def update_project_file(project_path: anyio.Path, project: dict, updated_p
|
|
|
633
633
|
project["data"] = updated_project_data
|
|
634
634
|
async with async_open(str(project_path), "w", encoding="utf-8") as f:
|
|
635
635
|
await f.write(orjson.dumps(project, option=ORJSON_OPTIONS).decode())
|
|
636
|
-
logger.
|
|
636
|
+
await logger.adebug(f"Updated starter project {project['name']} file")
|
|
637
637
|
|
|
638
638
|
|
|
639
639
|
def update_existing_project(
|
|
@@ -734,7 +734,7 @@ async def load_flows_from_directory() -> None:
|
|
|
734
734
|
if not flows_path:
|
|
735
735
|
return
|
|
736
736
|
if not settings_service.auth_settings.AUTO_LOGIN:
|
|
737
|
-
logger.
|
|
737
|
+
await logger.awarning("AUTO_LOGIN is disabled, not loading flows from directory")
|
|
738
738
|
return
|
|
739
739
|
|
|
740
740
|
async with session_scope() as session:
|
|
@@ -749,7 +749,7 @@ async def load_flows_from_directory() -> None:
|
|
|
749
749
|
for file_path in await asyncio.to_thread(Path(flows_path).iterdir):
|
|
750
750
|
if not await anyio.Path(file_path).is_file() or file_path.suffix != ".json":
|
|
751
751
|
continue
|
|
752
|
-
logger.
|
|
752
|
+
await logger.ainfo(f"Loading flow from file: {file_path.name}")
|
|
753
753
|
async with async_open(str(file_path), "r", encoding="utf-8") as f:
|
|
754
754
|
content = await f.read()
|
|
755
755
|
await upsert_flow_from_file(content, file_path.stem, session, user.id)
|
|
@@ -794,7 +794,7 @@ async def load_bundles_from_urls() -> tuple[list[TemporaryDirectory], list[str]]
|
|
|
794
794
|
if not bundle_urls:
|
|
795
795
|
return [], []
|
|
796
796
|
if not settings_service.auth_settings.AUTO_LOGIN:
|
|
797
|
-
logger.
|
|
797
|
+
await logger.awarning("AUTO_LOGIN is disabled, not loading flows from URLs")
|
|
798
798
|
|
|
799
799
|
async with session_scope() as session:
|
|
800
800
|
user = await get_user_by_username(session, settings_service.auth_settings.SUPERUSER)
|
|
@@ -844,13 +844,13 @@ async def upsert_flow_from_file(file_content: AnyStr, filename: str, session: As
|
|
|
844
844
|
try:
|
|
845
845
|
flow_id = UUID(flow_id)
|
|
846
846
|
except ValueError:
|
|
847
|
-
logger.
|
|
847
|
+
await logger.aerror(f"Invalid UUID string: {flow_id}")
|
|
848
848
|
return
|
|
849
849
|
|
|
850
850
|
existing = await find_existing_flow(session, flow_id, flow_endpoint_name)
|
|
851
851
|
if existing:
|
|
852
|
-
logger.
|
|
853
|
-
logger.
|
|
852
|
+
await logger.adebug(f"Found existing flow: {existing.name}")
|
|
853
|
+
await logger.ainfo(f"Updating existing flow: {flow_id} with endpoint name {flow_endpoint_name}")
|
|
854
854
|
for key, value in flow.items():
|
|
855
855
|
if hasattr(existing, key):
|
|
856
856
|
# flow dict from json and db representation are not 100% the same
|
|
@@ -867,12 +867,12 @@ async def upsert_flow_from_file(file_content: AnyStr, filename: str, session: As
|
|
|
867
867
|
try:
|
|
868
868
|
existing.id = UUID(existing.id)
|
|
869
869
|
except ValueError:
|
|
870
|
-
logger.
|
|
870
|
+
await logger.aerror(f"Invalid UUID string: {existing.id}")
|
|
871
871
|
return
|
|
872
872
|
|
|
873
873
|
session.add(existing)
|
|
874
874
|
else:
|
|
875
|
-
logger.
|
|
875
|
+
await logger.ainfo(f"Creating new flow: {flow_id} with endpoint name {flow_endpoint_name}")
|
|
876
876
|
|
|
877
877
|
# Assign the newly created flow to the default folder
|
|
878
878
|
folder = await get_or_create_default_folder(session, user_id)
|
|
@@ -886,15 +886,15 @@ async def upsert_flow_from_file(file_content: AnyStr, filename: str, session: As
|
|
|
886
886
|
|
|
887
887
|
async def find_existing_flow(session, flow_id, flow_endpoint_name):
|
|
888
888
|
if flow_endpoint_name:
|
|
889
|
-
logger.
|
|
889
|
+
await logger.adebug(f"flow_endpoint_name: {flow_endpoint_name}")
|
|
890
890
|
stmt = select(Flow).where(Flow.endpoint_name == flow_endpoint_name)
|
|
891
891
|
if existing := (await session.exec(stmt)).first():
|
|
892
|
-
logger.
|
|
892
|
+
await logger.adebug(f"Found existing flow by endpoint name: {existing.name}")
|
|
893
893
|
return existing
|
|
894
894
|
|
|
895
895
|
stmt = select(Flow).where(Flow.id == flow_id)
|
|
896
896
|
if existing := (await session.exec(stmt)).first():
|
|
897
|
-
logger.
|
|
897
|
+
await logger.adebug(f"Found existing flow by id: {flow_id}")
|
|
898
898
|
return existing
|
|
899
899
|
return None
|
|
900
900
|
|
|
@@ -916,7 +916,7 @@ async def create_or_update_starter_projects(all_types_dict: dict) -> None:
|
|
|
916
916
|
starter_projects = await load_starter_projects()
|
|
917
917
|
|
|
918
918
|
if get_settings_service().settings.update_starter_projects:
|
|
919
|
-
logger.
|
|
919
|
+
await logger.adebug("Updating starter projects")
|
|
920
920
|
# 1. Delete all existing starter projects
|
|
921
921
|
successfully_updated_projects = 0
|
|
922
922
|
await delete_starter_projects(session, new_folder.id)
|
|
@@ -959,13 +959,13 @@ async def create_or_update_starter_projects(all_types_dict: dict) -> None:
|
|
|
959
959
|
new_folder_id=new_folder.id,
|
|
960
960
|
)
|
|
961
961
|
except Exception: # noqa: BLE001
|
|
962
|
-
logger.
|
|
962
|
+
await logger.aexception(f"Error while creating starter project {project_name}")
|
|
963
963
|
|
|
964
964
|
successfully_updated_projects += 1
|
|
965
|
-
logger.
|
|
965
|
+
await logger.adebug(f"Successfully updated {successfully_updated_projects} starter projects")
|
|
966
966
|
else:
|
|
967
967
|
# Even if we're not updating starter projects, we still need to create any that don't exist
|
|
968
|
-
logger.
|
|
968
|
+
await logger.adebug("Creating new starter projects")
|
|
969
969
|
successfully_created_projects = 0
|
|
970
970
|
existing_flows = await get_all_flows_similar_to_project(session, new_folder.id)
|
|
971
971
|
existing_flow_names = [existing_flow.name for existing_flow in existing_flows]
|
|
@@ -997,9 +997,9 @@ async def create_or_update_starter_projects(all_types_dict: dict) -> None:
|
|
|
997
997
|
new_folder_id=new_folder.id,
|
|
998
998
|
)
|
|
999
999
|
except Exception: # noqa: BLE001
|
|
1000
|
-
logger.
|
|
1000
|
+
await logger.aexception(f"Error while creating starter project {project_name}")
|
|
1001
1001
|
successfully_created_projects += 1
|
|
1002
|
-
logger.
|
|
1002
|
+
await logger.adebug(f"Successfully created {successfully_created_projects} starter projects")
|
|
1003
1003
|
|
|
1004
1004
|
|
|
1005
1005
|
async def initialize_super_user_if_needed() -> None:
|
|
@@ -1016,7 +1016,7 @@ async def initialize_super_user_if_needed() -> None:
|
|
|
1016
1016
|
super_user = await create_super_user(db=async_session, username=username, password=password)
|
|
1017
1017
|
await get_variable_service().initialize_user_variables(super_user.id, async_session)
|
|
1018
1018
|
_ = await get_or_create_default_folder(async_session, super_user.id)
|
|
1019
|
-
logger.
|
|
1019
|
+
await logger.adebug("Super user initialized")
|
|
1020
1020
|
|
|
1021
1021
|
|
|
1022
1022
|
async def get_or_create_default_folder(session: AsyncSession, user_id: UUID) -> FolderRead:
|
|
@@ -1082,22 +1082,24 @@ async def sync_flows_from_fs():
|
|
|
1082
1082
|
await session.commit()
|
|
1083
1083
|
await session.refresh(flow)
|
|
1084
1084
|
except Exception: # noqa: BLE001
|
|
1085
|
-
logger.
|
|
1085
|
+
await logger.aexception(
|
|
1086
|
+
f"Couldn't update flow {flow.id} in database from path {path}"
|
|
1087
|
+
)
|
|
1086
1088
|
flow_mtimes[flow.id] = new_mtime
|
|
1087
1089
|
except Exception: # noqa: BLE001
|
|
1088
|
-
logger.
|
|
1090
|
+
await logger.aexception(f"Error while handling flow file {path}")
|
|
1089
1091
|
except asyncio.CancelledError:
|
|
1090
|
-
logger.
|
|
1092
|
+
await logger.adebug("Flow sync cancelled")
|
|
1091
1093
|
break
|
|
1092
1094
|
except (sa.exc.OperationalError, ValueError) as e:
|
|
1093
1095
|
if "no active connection" in str(e) or "connection is closed" in str(e):
|
|
1094
|
-
logger.
|
|
1096
|
+
await logger.adebug("Database connection lost, assuming shutdown")
|
|
1095
1097
|
break # Exit gracefully, don't error
|
|
1096
1098
|
raise # Re-raise if it's a real connection problem
|
|
1097
1099
|
except Exception: # noqa: BLE001
|
|
1098
|
-
logger.
|
|
1100
|
+
await logger.aexception("Error while syncing flows from database")
|
|
1099
1101
|
break
|
|
1100
1102
|
|
|
1101
1103
|
await asyncio.sleep(fs_flows_polling_interval)
|
|
1102
1104
|
except asyncio.CancelledError:
|
|
1103
|
-
logger.
|
|
1105
|
+
await logger.adebug("Flow sync task cancelled")
|
|
@@ -978,7 +978,7 @@
|
|
|
978
978
|
"legacy": false,
|
|
979
979
|
"lf_version": "1.4.2",
|
|
980
980
|
"metadata": {
|
|
981
|
-
"code_hash": "
|
|
981
|
+
"code_hash": "252132357639",
|
|
982
982
|
"module": "langflow.components.data.url.URLComponent"
|
|
983
983
|
},
|
|
984
984
|
"minimized": false,
|
|
@@ -1069,7 +1069,7 @@
|
|
|
1069
1069
|
"show": true,
|
|
1070
1070
|
"title_case": false,
|
|
1071
1071
|
"type": "code",
|
|
1072
|
-
"value": "import re\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom langchain_community.document_loaders import RecursiveUrlLoader\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.helpers.data import safe_convert\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SliderInput, TableInput\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.services.deps import get_settings_service\n\n# Constants\nDEFAULT_TIMEOUT = 30\nDEFAULT_MAX_DEPTH = 1\nDEFAULT_FORMAT = \"Text\"\nURL_REGEX = re.compile(\n r\"^(https?:\\/\\/)?\" r\"(www\\.)?\" r\"([a-zA-Z0-9.-]+)\" r\"(\\.[a-zA-Z]{2,})?\" r\"(:\\d+)?\" r\"(\\/[^\\s]*)?$\",\n re.IGNORECASE,\n)\n\n\nclass URLComponent(Component):\n \"\"\"A component that loads and parses content from web pages recursively.\n\n This component allows fetching content from one or more URLs, with options to:\n - Control crawl depth\n - Prevent crawling outside the root domain\n - Use async loading for better performance\n - Extract either raw HTML or clean text\n - Configure request headers and timeouts\n \"\"\"\n\n display_name = \"URL\"\n description = \"Fetch content from one or more web pages, following links recursively.\"\n documentation: str = \"https://docs.langflow.org/components-data#url\"\n icon = \"layout-template\"\n name = \"URLComponent\"\n\n inputs = [\n MessageTextInput(\n name=\"urls\",\n display_name=\"URLs\",\n info=\"Enter one or more URLs to crawl recursively, by clicking the '+' button.\",\n is_list=True,\n tool_mode=True,\n placeholder=\"Enter a URL...\",\n list_add_label=\"Add URL\",\n input_types=[],\n ),\n SliderInput(\n name=\"max_depth\",\n display_name=\"Depth\",\n info=(\n \"Controls how many 'clicks' away from the initial page the crawler will go:\\n\"\n \"- depth 1: only the initial page\\n\"\n \"- depth 2: initial page + all pages linked directly from it\\n\"\n \"- depth 3: initial page + direct links + links found on those direct link pages\\n\"\n \"Note: This is about link traversal, not URL path depth.\"\n ),\n value=DEFAULT_MAX_DEPTH,\n range_spec=RangeSpec(min=1, max=5, step=1),\n required=False,\n min_label=\" \",\n max_label=\" \",\n min_label_icon=\"None\",\n max_label_icon=\"None\",\n # slider_input=True\n ),\n BoolInput(\n name=\"prevent_outside\",\n display_name=\"Prevent Outside\",\n info=(\n \"If enabled, only crawls URLs within the same domain as the root URL. \"\n \"This helps prevent the crawler from going to external websites.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"use_async\",\n display_name=\"Use Async\",\n info=(\n \"If enabled, uses asynchronous loading which can be significantly faster \"\n \"but might use more system resources.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n DropdownInput(\n name=\"format\",\n display_name=\"Output Format\",\n info=\"Output Format. Use 'Text' to extract the text from the HTML or 'HTML' for the raw HTML content.\",\n options=[\"Text\", \"HTML\"],\n value=DEFAULT_FORMAT,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Timeout for the request in seconds.\",\n value=DEFAULT_TIMEOUT,\n required=False,\n advanced=True,\n ),\n TableInput(\n name=\"headers\",\n display_name=\"Headers\",\n info=\"The headers to send with the request\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Header\",\n \"type\": \"str\",\n \"description\": \"Header name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Header value\",\n },\n ],\n value=[{\"key\": \"User-Agent\", \"value\": get_settings_service().settings.user_agent}],\n advanced=True,\n input_types=[\"DataFrame\"],\n ),\n BoolInput(\n name=\"filter_text_html\",\n display_name=\"Filter Text/HTML\",\n info=\"If enabled, filters out text/css content type from the results.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"continue_on_failure\",\n display_name=\"Continue on Failure\",\n info=\"If enabled, continues crawling even if some requests fail.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"check_response_status\",\n display_name=\"Check Response Status\",\n info=\"If enabled, checks the response status of the request.\",\n value=False,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"autoset_encoding\",\n display_name=\"Autoset Encoding\",\n info=\"If enabled, automatically sets the encoding of the request.\",\n value=True,\n required=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Extracted Pages\", name=\"page_results\", method=\"fetch_content\"),\n Output(display_name=\"Raw Content\", name=\"raw_results\", method=\"fetch_content_as_message\", tool_mode=False),\n ]\n\n @staticmethod\n def validate_url(url: str) -> bool:\n \"\"\"Validates if the given string matches URL pattern.\n\n Args:\n url: The URL string to validate\n\n Returns:\n bool: True if the URL is valid, False otherwise\n \"\"\"\n return bool(URL_REGEX.match(url))\n\n def ensure_url(self, url: str) -> str:\n \"\"\"Ensures the given string is a valid URL.\n\n Args:\n url: The URL string to validate and normalize\n\n Returns:\n str: The normalized URL\n\n Raises:\n ValueError: If the URL is invalid\n \"\"\"\n url = url.strip()\n if not url.startswith((\"http://\", \"https://\")):\n url = \"https://\" + url\n\n if not self.validate_url(url):\n msg = f\"Invalid URL: {url}\"\n raise ValueError(msg)\n\n return url\n\n def _create_loader(self, url: str) -> RecursiveUrlLoader:\n \"\"\"Creates a RecursiveUrlLoader instance with the configured settings.\n\n Args:\n url: The URL to load\n\n Returns:\n RecursiveUrlLoader: Configured loader instance\n \"\"\"\n headers_dict = {header[\"key\"]: header[\"value\"] for header in self.headers}\n extractor = (lambda x: x) if self.format == \"HTML\" else (lambda x: BeautifulSoup(x, \"lxml\").get_text())\n\n return RecursiveUrlLoader(\n url=url,\n max_depth=self.max_depth,\n prevent_outside=self.prevent_outside,\n use_async=self.use_async,\n extractor=extractor,\n timeout=self.timeout,\n headers=headers_dict,\n check_response_status=self.check_response_status,\n continue_on_failure=self.continue_on_failure,\n base_url=url, # Add base_url to ensure consistent domain crawling\n autoset_encoding=self.autoset_encoding, # Enable automatic encoding detection\n exclude_dirs=[], # Allow customization of excluded directories\n link_regex=None, # Allow customization of link filtering\n )\n\n def fetch_url_contents(self) -> list[dict]:\n \"\"\"Load documents from the configured URLs.\n\n Returns:\n List[Data]: List of Data objects containing the fetched content\n\n Raises:\n ValueError: If no valid URLs are provided or if there's an error loading documents\n \"\"\"\n try:\n urls = list({self.ensure_url(url) for url in self.urls if url.strip()})\n logger.debug(f\"URLs: {urls}\")\n if not urls:\n msg = \"No valid URLs provided.\"\n raise ValueError(msg)\n\n all_docs = []\n for url in urls:\n logger.debug(f\"Loading documents from {url}\")\n\n try:\n loader = self._create_loader(url)\n docs = loader.load()\n\n if not docs:\n logger.warning(f\"No documents found for {url}\")\n continue\n\n logger.debug(f\"Found {len(docs)} documents from {url}\")\n all_docs.extend(docs)\n\n except requests.exceptions.RequestException as e:\n logger.exception(f\"Error loading documents from {url}: {e}\")\n continue\n\n if not all_docs:\n msg = \"No documents were successfully loaded from any URL\"\n raise ValueError(msg)\n\n # data = [Data(text=doc.page_content, **doc.metadata) for doc in all_docs]\n data = [\n {\n \"text\": safe_convert(doc.page_content, clean_data=True),\n \"url\": doc.metadata.get(\"source\", \"\"),\n \"title\": doc.metadata.get(\"title\", \"\"),\n \"description\": doc.metadata.get(\"description\", \"\"),\n \"content_type\": doc.metadata.get(\"content_type\", \"\"),\n \"language\": doc.metadata.get(\"language\", \"\"),\n }\n for doc in all_docs\n ]\n except Exception as e:\n error_msg = e.message if hasattr(e, \"message\") else e\n msg = f\"Error loading documents: {error_msg!s}\"\n logger.exception(msg)\n raise ValueError(msg) from e\n return data\n\n def fetch_content(self) -> DataFrame:\n \"\"\"Convert the documents to a DataFrame.\"\"\"\n return DataFrame(data=self.fetch_url_contents())\n\n def fetch_content_as_message(self) -> Message:\n \"\"\"Convert the documents to a Message.\"\"\"\n url_contents = self.fetch_url_contents()\n return Message(text=\"\\n\\n\".join([x[\"text\"] for x in url_contents]), data={\"data\": url_contents})\n"
|
|
1072
|
+
"value": "import re\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom langchain_community.document_loaders import RecursiveUrlLoader\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.helpers.data import safe_convert\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SliderInput, TableInput\nfrom langflow.logging.logger import logger\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.services.deps import get_settings_service\n\n# Constants\nDEFAULT_TIMEOUT = 30\nDEFAULT_MAX_DEPTH = 1\nDEFAULT_FORMAT = \"Text\"\nURL_REGEX = re.compile(\n r\"^(https?:\\/\\/)?\" r\"(www\\.)?\" r\"([a-zA-Z0-9.-]+)\" r\"(\\.[a-zA-Z]{2,})?\" r\"(:\\d+)?\" r\"(\\/[^\\s]*)?$\",\n re.IGNORECASE,\n)\n\n\nclass URLComponent(Component):\n \"\"\"A component that loads and parses content from web pages recursively.\n\n This component allows fetching content from one or more URLs, with options to:\n - Control crawl depth\n - Prevent crawling outside the root domain\n - Use async loading for better performance\n - Extract either raw HTML or clean text\n - Configure request headers and timeouts\n \"\"\"\n\n display_name = \"URL\"\n description = \"Fetch content from one or more web pages, following links recursively.\"\n documentation: str = \"https://docs.langflow.org/components-data#url\"\n icon = \"layout-template\"\n name = \"URLComponent\"\n\n inputs = [\n MessageTextInput(\n name=\"urls\",\n display_name=\"URLs\",\n info=\"Enter one or more URLs to crawl recursively, by clicking the '+' button.\",\n is_list=True,\n tool_mode=True,\n placeholder=\"Enter a URL...\",\n list_add_label=\"Add URL\",\n input_types=[],\n ),\n SliderInput(\n name=\"max_depth\",\n display_name=\"Depth\",\n info=(\n \"Controls how many 'clicks' away from the initial page the crawler will go:\\n\"\n \"- depth 1: only the initial page\\n\"\n \"- depth 2: initial page + all pages linked directly from it\\n\"\n \"- depth 3: initial page + direct links + links found on those direct link pages\\n\"\n \"Note: This is about link traversal, not URL path depth.\"\n ),\n value=DEFAULT_MAX_DEPTH,\n range_spec=RangeSpec(min=1, max=5, step=1),\n required=False,\n min_label=\" \",\n max_label=\" \",\n min_label_icon=\"None\",\n max_label_icon=\"None\",\n # slider_input=True\n ),\n BoolInput(\n name=\"prevent_outside\",\n display_name=\"Prevent Outside\",\n info=(\n \"If enabled, only crawls URLs within the same domain as the root URL. \"\n \"This helps prevent the crawler from going to external websites.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"use_async\",\n display_name=\"Use Async\",\n info=(\n \"If enabled, uses asynchronous loading which can be significantly faster \"\n \"but might use more system resources.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n DropdownInput(\n name=\"format\",\n display_name=\"Output Format\",\n info=\"Output Format. Use 'Text' to extract the text from the HTML or 'HTML' for the raw HTML content.\",\n options=[\"Text\", \"HTML\"],\n value=DEFAULT_FORMAT,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Timeout for the request in seconds.\",\n value=DEFAULT_TIMEOUT,\n required=False,\n advanced=True,\n ),\n TableInput(\n name=\"headers\",\n display_name=\"Headers\",\n info=\"The headers to send with the request\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Header\",\n \"type\": \"str\",\n \"description\": \"Header name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Header value\",\n },\n ],\n value=[{\"key\": \"User-Agent\", \"value\": get_settings_service().settings.user_agent}],\n advanced=True,\n input_types=[\"DataFrame\"],\n ),\n BoolInput(\n name=\"filter_text_html\",\n display_name=\"Filter Text/HTML\",\n info=\"If enabled, filters out text/css content type from the results.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"continue_on_failure\",\n display_name=\"Continue on Failure\",\n info=\"If enabled, continues crawling even if some requests fail.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"check_response_status\",\n display_name=\"Check Response Status\",\n info=\"If enabled, checks the response status of the request.\",\n value=False,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"autoset_encoding\",\n display_name=\"Autoset Encoding\",\n info=\"If enabled, automatically sets the encoding of the request.\",\n value=True,\n required=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Extracted Pages\", name=\"page_results\", method=\"fetch_content\"),\n Output(display_name=\"Raw Content\", name=\"raw_results\", method=\"fetch_content_as_message\", tool_mode=False),\n ]\n\n @staticmethod\n def validate_url(url: str) -> bool:\n \"\"\"Validates if the given string matches URL pattern.\n\n Args:\n url: The URL string to validate\n\n Returns:\n bool: True if the URL is valid, False otherwise\n \"\"\"\n return bool(URL_REGEX.match(url))\n\n def ensure_url(self, url: str) -> str:\n \"\"\"Ensures the given string is a valid URL.\n\n Args:\n url: The URL string to validate and normalize\n\n Returns:\n str: The normalized URL\n\n Raises:\n ValueError: If the URL is invalid\n \"\"\"\n url = url.strip()\n if not url.startswith((\"http://\", \"https://\")):\n url = \"https://\" + url\n\n if not self.validate_url(url):\n msg = f\"Invalid URL: {url}\"\n raise ValueError(msg)\n\n return url\n\n def _create_loader(self, url: str) -> RecursiveUrlLoader:\n \"\"\"Creates a RecursiveUrlLoader instance with the configured settings.\n\n Args:\n url: The URL to load\n\n Returns:\n RecursiveUrlLoader: Configured loader instance\n \"\"\"\n headers_dict = {header[\"key\"]: header[\"value\"] for header in self.headers}\n extractor = (lambda x: x) if self.format == \"HTML\" else (lambda x: BeautifulSoup(x, \"lxml\").get_text())\n\n return RecursiveUrlLoader(\n url=url,\n max_depth=self.max_depth,\n prevent_outside=self.prevent_outside,\n use_async=self.use_async,\n extractor=extractor,\n timeout=self.timeout,\n headers=headers_dict,\n check_response_status=self.check_response_status,\n continue_on_failure=self.continue_on_failure,\n base_url=url, # Add base_url to ensure consistent domain crawling\n autoset_encoding=self.autoset_encoding, # Enable automatic encoding detection\n exclude_dirs=[], # Allow customization of excluded directories\n link_regex=None, # Allow customization of link filtering\n )\n\n def fetch_url_contents(self) -> list[dict]:\n \"\"\"Load documents from the configured URLs.\n\n Returns:\n List[Data]: List of Data objects containing the fetched content\n\n Raises:\n ValueError: If no valid URLs are provided or if there's an error loading documents\n \"\"\"\n try:\n urls = list({self.ensure_url(url) for url in self.urls if url.strip()})\n logger.debug(f\"URLs: {urls}\")\n if not urls:\n msg = \"No valid URLs provided.\"\n raise ValueError(msg)\n\n all_docs = []\n for url in urls:\n logger.debug(f\"Loading documents from {url}\")\n\n try:\n loader = self._create_loader(url)\n docs = loader.load()\n\n if not docs:\n logger.warning(f\"No documents found for {url}\")\n continue\n\n logger.debug(f\"Found {len(docs)} documents from {url}\")\n all_docs.extend(docs)\n\n except requests.exceptions.RequestException as e:\n logger.exception(f\"Error loading documents from {url}: {e}\")\n continue\n\n if not all_docs:\n msg = \"No documents were successfully loaded from any URL\"\n raise ValueError(msg)\n\n # data = [Data(text=doc.page_content, **doc.metadata) for doc in all_docs]\n data = [\n {\n \"text\": safe_convert(doc.page_content, clean_data=True),\n \"url\": doc.metadata.get(\"source\", \"\"),\n \"title\": doc.metadata.get(\"title\", \"\"),\n \"description\": doc.metadata.get(\"description\", \"\"),\n \"content_type\": doc.metadata.get(\"content_type\", \"\"),\n \"language\": doc.metadata.get(\"language\", \"\"),\n }\n for doc in all_docs\n ]\n except Exception as e:\n error_msg = e.message if hasattr(e, \"message\") else e\n msg = f\"Error loading documents: {error_msg!s}\"\n logger.exception(msg)\n raise ValueError(msg) from e\n return data\n\n def fetch_content(self) -> DataFrame:\n \"\"\"Convert the documents to a DataFrame.\"\"\"\n return DataFrame(data=self.fetch_url_contents())\n\n def fetch_content_as_message(self) -> Message:\n \"\"\"Convert the documents to a Message.\"\"\"\n url_contents = self.fetch_url_contents()\n return Message(text=\"\\n\\n\".join([x[\"text\"] for x in url_contents]), data={\"data\": url_contents})\n"
|
|
1073
1073
|
},
|
|
1074
1074
|
"continue_on_failure": {
|
|
1075
1075
|
"_input_type": "BoolInput",
|
|
@@ -237,7 +237,7 @@
|
|
|
237
237
|
"legacy": false,
|
|
238
238
|
"lf_version": "1.4.3",
|
|
239
239
|
"metadata": {
|
|
240
|
-
"code_hash": "
|
|
240
|
+
"code_hash": "464cc8b8fdd2",
|
|
241
241
|
"module": "langflow.components.helpers.memory.MemoryComponent"
|
|
242
242
|
},
|
|
243
243
|
"output_types": [],
|
|
@@ -290,7 +290,7 @@
|
|
|
290
290
|
"show": true,
|
|
291
291
|
"title_case": false,
|
|
292
292
|
"type": "code",
|
|
293
|
-
"value": "from typing import Any, cast\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.inputs.inputs import DropdownInput, HandleInput, IntInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.memory import aget_messages, astore_message\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\nfrom langflow.utils.component_utils import set_current_fields, set_field_display\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Message History\"\n description = \"Stores or retrieves stored chat messages from Langflow tables or an external memory.\"\n documentation: str = \"https://docs.langflow.org/components-helpers#message-history\"\n icon = \"message-square-more\"\n name = \"Memory\"\n default_keys = [\"mode\", \"memory\"]\n mode_config = {\n \"Store\": [\"message\", \"memory\", \"sender\", \"sender_name\", \"session_id\"],\n \"Retrieve\": [\"n_messages\", \"order\", \"template\", \"memory\"],\n }\n\n inputs = [\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Retrieve\", \"Store\"],\n value=\"Retrieve\",\n info=\"Operation mode: Store messages or Retrieve messages.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The chat message to be stored.\",\n tool_mode=True,\n dynamic=True,\n show=False,\n ),\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"Memory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"sender_type\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender\",\n display_name=\"Sender\",\n info=\"The sender of the message. Might be Machine or User. \"\n \"If empty, the current sender parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n show=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n value=\"\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Message\", name=\"messages_text\", method=\"retrieve_messages_as_text\", dynamic=True),\n Output(display_name=\"Dataframe\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True),\n ]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"mode\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n if field_value == \"Store\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Stored Messages\",\n name=\"stored_messages\",\n method=\"store_message\",\n hidden=True,\n dynamic=True,\n )\n ]\n if field_value == \"Retrieve\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Messages\", name=\"messages_text\", method=\"retrieve_messages_as_text\", dynamic=True\n ),\n Output(\n display_name=\"Dataframe\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True\n ),\n ]\n return frontend_node\n\n async def store_message(self) -> Message:\n message = Message(text=self.message) if isinstance(self.message, str) else self.message\n\n message.session_id = self.session_id or message.session_id\n message.sender = self.sender or message.sender or MESSAGE_SENDER_AI\n message.sender_name = self.sender_name or message.sender_name or MESSAGE_SENDER_NAME_AI\n\n stored_messages: list[Message] = []\n\n if self.memory:\n self.memory.session_id = message.session_id\n lc_message = message.to_lc_message()\n await self.memory.aadd_messages([lc_message])\n\n stored_messages = await self.memory.aget_messages() or []\n\n stored_messages = [Message.from_lc_message(m) for m in stored_messages] if stored_messages else []\n\n if message.sender:\n stored_messages = [m for m in stored_messages if m.sender == message.sender]\n else:\n await astore_message(message, flow_id=self.graph.flow_id)\n stored_messages = (\n await aget_messages(\n session_id=message.session_id, sender_name=message.sender_name, sender=message.sender\n )\n or []\n )\n\n if not stored_messages:\n msg = \"No messages were stored. Please ensure that the session ID and sender are properly set.\"\n raise ValueError(msg)\n\n stored_message = stored_messages[0]\n self.status = stored_message\n return stored_message\n\n async def retrieve_messages(self) -> Data:\n sender_type = self.sender_type\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender_type == \"Machine and User\":\n sender_type = None\n\n if self.memory and not hasattr(self.memory, \"aget_messages\"):\n memory_name = type(self.memory).__name__\n err_msg = f\"External Memory object ({memory_name}) must have 'aget_messages' method.\"\n raise AttributeError(err_msg)\n # Check if n_messages is None or 0\n if n_messages == 0:\n stored = []\n elif self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = await self.memory.aget_messages()\n # langchain memories are supposed to return messages in ascending order\n\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[-n_messages:] if order == \"ASC\" else stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender_type:\n expected_type = MESSAGE_SENDER_AI if sender_type == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n # For internal memory, we always fetch the last N messages by ordering by DESC\n stored = await aget_messages(\n sender=sender_type,\n sender_name=sender_name,\n session_id=session_id,\n limit=10000,\n order=order,\n )\n if n_messages:\n stored = stored[-n_messages:] if order == \"ASC\" else stored[:n_messages]\n\n # self.status = stored\n return cast(Data, stored)\n\n async def retrieve_messages_as_text(self) -> Message:\n stored_text = data_to_text(self.template, await self.retrieve_messages())\n # self.status = stored_text\n return Message(text=stored_text)\n\n async def retrieve_messages_dataframe(self) -> DataFrame:\n \"\"\"Convert the retrieved messages into a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the message data.\n \"\"\"\n messages = await self.retrieve_messages()\n return DataFrame(messages)\n\n def update_build_config(\n self,\n build_config: dotdict,\n field_value: Any, # noqa: ARG002\n field_name: str | None = None, # noqa: ARG002\n ) -> dotdict:\n return set_current_fields(\n build_config=build_config,\n action_fields=self.mode_config,\n selected_action=build_config[\"mode\"][\"value\"],\n default_fields=self.default_keys,\n func=set_field_display,\n )\n"
|
|
293
|
+
"value": "from typing import Any, cast\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.inputs.inputs import DropdownInput, HandleInput, IntInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.memory import aget_messages, astore_message\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\nfrom langflow.utils.component_utils import set_current_fields, set_field_display\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Message History\"\n description = \"Stores or retrieves stored chat messages from Langflow tables or an external memory.\"\n documentation: str = \"https://docs.langflow.org/components-helpers#message-history\"\n icon = \"message-square-more\"\n name = \"Memory\"\n default_keys = [\"mode\", \"memory\"]\n mode_config = {\n \"Store\": [\"message\", \"memory\", \"sender\", \"sender_name\", \"session_id\"],\n \"Retrieve\": [\"n_messages\", \"order\", \"template\", \"memory\"],\n }\n\n inputs = [\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Retrieve\", \"Store\"],\n value=\"Retrieve\",\n info=\"Operation mode: Store messages or Retrieve messages.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The chat message to be stored.\",\n tool_mode=True,\n dynamic=True,\n show=False,\n ),\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"Memory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"sender_type\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender\",\n display_name=\"Sender\",\n info=\"The sender of the message. Might be Machine or User. \"\n \"If empty, the current sender parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n show=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n value=\"\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Message\", name=\"messages_text\", method=\"retrieve_messages_as_text\", dynamic=True),\n Output(display_name=\"Dataframe\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True),\n ]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"mode\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n if field_value == \"Store\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Stored Messages\",\n name=\"stored_messages\",\n method=\"store_message\",\n hidden=True,\n dynamic=True,\n )\n ]\n if field_value == \"Retrieve\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Messages\", name=\"messages_text\", method=\"retrieve_messages_as_text\", dynamic=True\n ),\n Output(\n display_name=\"Dataframe\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True\n ),\n ]\n return frontend_node\n\n async def store_message(self) -> Message:\n message = Message(text=self.message) if isinstance(self.message, str) else self.message\n\n message.session_id = self.session_id or message.session_id\n message.sender = self.sender or message.sender or MESSAGE_SENDER_AI\n message.sender_name = self.sender_name or message.sender_name or MESSAGE_SENDER_NAME_AI\n\n stored_messages: list[Message] = []\n\n if self.memory:\n self.memory.session_id = message.session_id\n lc_message = message.to_lc_message()\n await self.memory.aadd_messages([lc_message])\n\n stored_messages = await self.memory.aget_messages() or []\n\n stored_messages = [Message.from_lc_message(m) for m in stored_messages] if stored_messages else []\n\n if message.sender:\n stored_messages = [m for m in stored_messages if m.sender == message.sender]\n else:\n await astore_message(message, flow_id=self.graph.flow_id)\n stored_messages = (\n await aget_messages(\n session_id=message.session_id, sender_name=message.sender_name, sender=message.sender\n )\n or []\n )\n\n if not stored_messages:\n msg = \"No messages were stored. Please ensure that the session ID and sender are properly set.\"\n raise ValueError(msg)\n\n stored_message = stored_messages[0]\n self.status = stored_message\n return stored_message\n\n async def retrieve_messages(self) -> Data:\n sender_type = self.sender_type\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender_type == \"Machine and User\":\n sender_type = None\n\n if self.memory and not hasattr(self.memory, \"aget_messages\"):\n memory_name = type(self.memory).__name__\n err_msg = f\"External Memory object ({memory_name}) must have 'aget_messages' method.\"\n raise AttributeError(err_msg)\n # Check if n_messages is None or 0\n if n_messages == 0:\n stored = []\n elif self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = await self.memory.aget_messages()\n # langchain memories are supposed to return messages in ascending order\n\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[-n_messages:] if order == \"ASC\" else stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender_type:\n expected_type = MESSAGE_SENDER_AI if sender_type == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n # For internal memory, we always fetch the last N messages by ordering by DESC\n stored = await aget_messages(\n sender=sender_type,\n sender_name=sender_name,\n session_id=session_id,\n limit=10000,\n order=order,\n )\n if n_messages:\n stored = stored[-n_messages:] if order == \"ASC\" else stored[:n_messages]\n\n # self.status = stored\n return cast(\"Data\", stored)\n\n async def retrieve_messages_as_text(self) -> Message:\n stored_text = data_to_text(self.template, await self.retrieve_messages())\n # self.status = stored_text\n return Message(text=stored_text)\n\n async def retrieve_messages_dataframe(self) -> DataFrame:\n \"\"\"Convert the retrieved messages into a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the message data.\n \"\"\"\n messages = await self.retrieve_messages()\n return DataFrame(messages)\n\n def update_build_config(\n self,\n build_config: dotdict,\n field_value: Any, # noqa: ARG002\n field_name: str | None = None, # noqa: ARG002\n ) -> dotdict:\n return set_current_fields(\n build_config=build_config,\n action_fields=self.mode_config,\n selected_action=build_config[\"mode\"][\"value\"],\n default_fields=self.default_keys,\n func=set_field_display,\n )\n"
|
|
294
294
|
},
|
|
295
295
|
"memory": {
|
|
296
296
|
"_input_type": "HandleInput",
|
|
@@ -1276,7 +1276,7 @@
|
|
|
1276
1276
|
"show": true,
|
|
1277
1277
|
"title_case": false,
|
|
1278
1278
|
"type": "code",
|
|
1279
|
-
"value": "from copy import deepcopy\nfrom typing import Any\n\nfrom langflow.base.data.base_file import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import BoolInput, FileInput, IntInput, Output\nfrom langflow.schema.data import Data\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"Handles loading and processing of individual or zipped text files.\n\n This component supports processing multiple valid files within a zip archive,\n resolving paths, validating file types, and optionally using multithreading for processing.\n \"\"\"\n\n display_name = \"File\"\n description = \"Loads content from one or more files.\"\n documentation: str = \"https://docs.langflow.org/components-data#file\"\n icon = \"file-text\"\n name = \"File\"\n\n VALID_EXTENSIONS = TEXT_FILE_TYPES\n\n _base_inputs = deepcopy(BaseFileComponent._base_inputs)\n\n for input_item in _base_inputs:\n if isinstance(input_item, FileInput) and input_item.name == \"path\":\n input_item.real_time_refresh = True\n break\n\n inputs = [\n *_base_inputs,\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n ]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the number of files processed.\"\"\"\n if field_name == \"path\":\n # Add outputs based on the number of files in the path\n if len(field_value) == 0:\n return frontend_node\n\n frontend_node[\"outputs\"] = []\n\n if len(field_value) == 1:\n # We need to check if the file is structured content\n file_path = frontend_node[\"template\"][\"path\"][\"file_path\"][0]\n if file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"dataframe\", method=\"load_files_structured\"),\n )\n elif file_path.endswith(\".json\"):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"json\", method=\"load_files_json\"),\n )\n\n # All files get the raw content and path outputs\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # For multiple files, we only show the files output\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Files\", name=\"dataframe\", method=\"load_files\"),\n )\n\n return frontend_node\n\n def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Processes files either sequentially or in parallel, depending on concurrency settings.\n\n Args:\n file_list (list[BaseFileComponent.BaseFile]): List of files to process.\n\n Returns:\n list[BaseFileComponent.BaseFile]: Updated list of files with merged data.\n \"\"\"\n\n def process_file(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Processes a single file and returns its Data object.\"\"\"\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n msg = f\"File not found: {file_path}. Error: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n except Exception as e:\n msg = f\"Unexpected error processing {file_path}: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_count = len(file_list)\n\n parallel_processing_threshold = 2\n if concurrency < parallel_processing_threshold or file_count < parallel_processing_threshold:\n if file_count > 1:\n self.log(f\"Processing {file_count} files sequentially.\")\n processed_data = [process_file(str(file.path), silent_errors=self.silent_errors) for file in file_list]\n else:\n self.log(f\"Starting parallel processing of {file_count} files with concurrency: {concurrency}.\")\n file_paths = [str(file.path) for file in file_list]\n processed_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file,\n max_concurrency=concurrency,\n )\n\n # Use rollup_basefile_data to merge processed data with BaseFile objects\n return self.rollup_data(file_list, processed_data)\n"
|
|
1279
|
+
"value": "\"\"\"Enhanced file component v2 with mypy and ruff compliance.\"\"\"\n\nfrom __future__ import annotations\n\nfrom copy import deepcopy\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any\n\nfrom langflow.base.data.base_file import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n FileInput,\n IntInput,\n MessageTextInput,\n Output,\n StrInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.message import Message\n\nif TYPE_CHECKING:\n from langflow.schema import DataFrame\n\n\nclass MockConversionStatus(Enum):\n \"\"\"Mock ConversionStatus for fallback compatibility.\"\"\"\n\n SUCCESS = \"success\"\n FAILURE = \"failure\"\n\n\nclass MockInputFormat(Enum):\n \"\"\"Mock InputFormat for fallback compatibility.\"\"\"\n\n PDF = \"pdf\"\n IMAGE = \"image\"\n\n\nclass MockImageRefMode(Enum):\n \"\"\"Mock ImageRefMode for fallback compatibility.\"\"\"\n\n PLACEHOLDER = \"placeholder\"\n EMBEDDED = \"embedded\"\n\n\nclass DoclingImports:\n \"\"\"Container for docling imports with type information.\"\"\"\n\n def __init__(\n self,\n conversion_status: type[Enum],\n input_format: type[Enum],\n document_converter: type,\n image_ref_mode: type[Enum],\n strategy: str,\n ) -> None:\n self.conversion_status = conversion_status\n self.input_format = input_format\n self.document_converter = document_converter\n self.image_ref_mode = image_ref_mode\n self.strategy = strategy\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"Enhanced file component v2 that combines standard file loading with optional Docling processing and export.\n\n This component supports all features of the standard File component, plus an advanced mode\n that enables Docling document processing and export to various formats (Markdown, HTML, etc.).\n \"\"\"\n\n display_name = \"File\"\n description = \"Loads content from files with optional advanced document processing and export using Docling.\"\n documentation: str = \"https://docs.langflow.org/components-data#file\"\n icon = \"file-text\"\n name = \"File\"\n\n # Docling supported formats from original component\n VALID_EXTENSIONS = [\n \"adoc\",\n \"asciidoc\",\n \"asc\",\n \"bmp\",\n \"csv\",\n \"dotx\",\n \"dotm\",\n \"docm\",\n \"docx\",\n \"htm\",\n \"html\",\n \"jpeg\",\n \"json\",\n \"md\",\n \"pdf\",\n \"png\",\n \"potx\",\n \"ppsx\",\n \"pptm\",\n \"potm\",\n \"ppsm\",\n \"pptx\",\n \"tiff\",\n \"txt\",\n \"xls\",\n \"xlsx\",\n \"xhtml\",\n \"xml\",\n \"webp\",\n *TEXT_FILE_TYPES,\n ]\n\n # Fixed export settings\n EXPORT_FORMAT = \"Markdown\"\n IMAGE_MODE = \"placeholder\"\n\n _base_inputs = deepcopy(BaseFileComponent._base_inputs)\n\n for input_item in _base_inputs:\n if isinstance(input_item, FileInput) and input_item.name == \"path\":\n input_item.real_time_refresh = True\n break\n\n inputs = [\n *_base_inputs,\n BoolInput(\n name=\"advanced_mode\",\n display_name=\"Advanced Parser\",\n value=False,\n real_time_refresh=True,\n info=(\n \"Enable advanced document processing and export with Docling for PDFs, images, and office documents. \"\n \"Available only for single file processing.\"\n ),\n show=False,\n ),\n DropdownInput(\n name=\"pipeline\",\n display_name=\"Pipeline\",\n info=\"Docling pipeline to use\",\n options=[\"standard\", \"vlm\"],\n value=\"standard\",\n advanced=True,\n ),\n DropdownInput(\n name=\"ocr_engine\",\n display_name=\"OCR Engine\",\n info=\"OCR engine to use. Only available when pipeline is set to 'standard'.\",\n options=[\"\", \"easyocr\"],\n value=\"\",\n show=False,\n advanced=True,\n ),\n StrInput(\n name=\"md_image_placeholder\",\n display_name=\"Image placeholder\",\n info=\"Specify the image placeholder for markdown exports.\",\n value=\"<!-- image -->\",\n advanced=True,\n show=False,\n ),\n StrInput(\n name=\"md_page_break_placeholder\",\n display_name=\"Page break placeholder\",\n info=\"Add this placeholder between pages in the markdown output.\",\n value=\"\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"doc_key\",\n display_name=\"Doc Key\",\n info=\"The key to use for the DoclingDocument column.\",\n value=\"doc\",\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n BoolInput(\n name=\"markdown\",\n display_name=\"Markdown Export\",\n info=\"Export processed documents to Markdown format. Only available when advanced mode is enabled.\",\n value=False,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n ]\n\n def _path_value(self, template) -> list[str]:\n # Get current path value\n return template.get(\"path\", {}).get(\"file_path\", [])\n\n def update_build_config(\n self,\n build_config: dict[str, Any],\n field_value: Any,\n field_name: str | None = None,\n ) -> dict[str, Any]:\n \"\"\"Update build configuration to show/hide fields based on file count and advanced_mode.\"\"\"\n if field_name == \"path\":\n # Get current path value\n path_value = self._path_value(build_config)\n file_path = path_value[0] if len(path_value) > 0 else \"\"\n\n # Show/hide Advanced Parser based on file count (only for single files)\n file_count = len(field_value) if field_value else 0\n if file_count == 1 and not file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n build_config[\"advanced_mode\"][\"show\"] = True\n else:\n build_config[\"advanced_mode\"][\"show\"] = False\n build_config[\"advanced_mode\"][\"value\"] = False # Reset to False when hidden\n\n # Hide all advanced fields when Advanced Parser is not available\n advanced_fields = [\n \"pipeline\",\n \"ocr_engine\",\n \"doc_key\",\n \"md_image_placeholder\",\n \"md_page_break_placeholder\",\n ]\n for field in advanced_fields:\n if field in build_config:\n build_config[field][\"show\"] = False\n\n elif field_name == \"advanced_mode\":\n # Show/hide advanced fields based on advanced_mode (only if single file)\n advanced_fields = [\n \"pipeline\",\n \"ocr_engine\",\n \"doc_key\",\n \"md_image_placeholder\",\n \"md_page_break_placeholder\",\n ]\n\n for field in advanced_fields:\n if field in build_config:\n build_config[field][\"show\"] = field_value\n\n return build_config\n\n def update_outputs(self, frontend_node: dict[str, Any], field_name: str, field_value: Any) -> dict[str, Any]: # noqa: ARG002\n \"\"\"Dynamically show outputs based on the number of files and their types.\"\"\"\n if field_name not in [\"path\", \"advanced_mode\"]:\n return frontend_node\n\n # Add outputs based on the number of files in the path\n template = frontend_node.get(\"template\", {})\n path_value = self._path_value(template)\n if len(path_value) == 0:\n return frontend_node\n\n # Clear existing outputs\n frontend_node[\"outputs\"] = []\n\n if len(path_value) == 1:\n # We need to check if the file is structured content\n file_path = path_value[0] if field_name == \"path\" else frontend_node[\"template\"][\"path\"][\"file_path\"][0]\n if file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"dataframe\", method=\"load_files_structured\"),\n )\n elif file_path.endswith(\".json\"):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"json\", method=\"load_files_json\"),\n )\n\n # Add outputs based on advanced mode\n advanced_mode = frontend_node.get(\"template\", {}).get(\"advanced_mode\", {}).get(\"value\", False)\n\n if advanced_mode:\n # Advanced mode: Structured Output, Markdown, and File Path\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Output\", name=\"advanced\", method=\"load_files_advanced\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Markdown\", name=\"markdown\", method=\"load_files_markdown\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # Normal mode: Raw Content and File Path\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # For multiple files, we show the files output (DataFrame format)\n # Advanced Parser is not available for multiple files\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Files\", name=\"dataframe\", method=\"load_files\"),\n )\n\n return frontend_node\n\n def _try_import_docling(self) -> DoclingImports | None:\n \"\"\"Try different import strategies for docling components.\"\"\"\n # Try strategy 1: Latest docling structure\n try:\n from docling.datamodel.base_models import ConversionStatus, InputFormat # type: ignore[import-untyped]\n from docling.document_converter import DocumentConverter # type: ignore[import-untyped]\n from docling_core.types.doc import ImageRefMode # type: ignore[import-untyped]\n\n self.log(\"Using latest docling import structure\")\n return DoclingImports(\n conversion_status=ConversionStatus,\n input_format=InputFormat,\n document_converter=DocumentConverter,\n image_ref_mode=ImageRefMode,\n strategy=\"latest\",\n )\n except ImportError as e:\n self.log(f\"Latest docling structure failed: {e}\")\n\n # Try strategy 2: Alternative import paths\n try:\n from docling.document_converter import DocumentConverter # type: ignore[import-untyped]\n from docling_core.types.doc import ImageRefMode # type: ignore[import-untyped]\n\n # Try to get ConversionStatus from different locations\n conversion_status: type[Enum] = MockConversionStatus\n input_format: type[Enum] = MockInputFormat\n\n try:\n from docling_core.types import ConversionStatus, InputFormat # type: ignore[import-untyped]\n\n conversion_status = ConversionStatus\n input_format = InputFormat\n except ImportError:\n try:\n from docling.datamodel import ConversionStatus, InputFormat # type: ignore[import-untyped]\n\n conversion_status = ConversionStatus\n input_format = InputFormat\n except ImportError:\n # Use mock enums if we can't find them\n pass\n\n self.log(\"Using alternative docling import structure\")\n return DoclingImports(\n conversion_status=conversion_status,\n input_format=input_format,\n document_converter=DocumentConverter,\n image_ref_mode=ImageRefMode,\n strategy=\"alternative\",\n )\n except ImportError as e:\n self.log(f\"Alternative docling structure failed: {e}\")\n\n # Try strategy 3: Basic converter only\n try:\n from docling.document_converter import DocumentConverter # type: ignore[import-untyped]\n\n self.log(\"Using basic docling import structure with mocks\")\n return DoclingImports(\n conversion_status=MockConversionStatus,\n input_format=MockInputFormat,\n document_converter=DocumentConverter,\n image_ref_mode=MockImageRefMode,\n strategy=\"basic\",\n )\n except ImportError as e:\n self.log(f\"Basic docling structure failed: {e}\")\n\n # Strategy 4: Complete fallback - return None to indicate failure\n return None\n\n def _create_advanced_converter(self, docling_imports: DoclingImports) -> Any:\n \"\"\"Create advanced converter with pipeline options if available.\"\"\"\n try:\n from docling.datamodel.pipeline_options import PdfPipelineOptions # type: ignore[import-untyped]\n from docling.document_converter import PdfFormatOption # type: ignore[import-untyped]\n\n document_converter = docling_imports.document_converter\n input_format = docling_imports.input_format\n\n # Create basic pipeline options\n pipeline_options = PdfPipelineOptions()\n\n # Configure OCR if specified and available\n if self.ocr_engine:\n try:\n from docling.models.factories import get_ocr_factory # type: ignore[import-untyped]\n\n pipeline_options.do_ocr = True\n ocr_factory = get_ocr_factory(allow_external_plugins=False)\n ocr_options = ocr_factory.create_options(kind=self.ocr_engine)\n pipeline_options.ocr_options = ocr_options\n self.log(f\"Configured OCR with engine: {self.ocr_engine}\")\n except Exception as e: # noqa: BLE001\n self.log(f\"Could not configure OCR: {e}, proceeding without OCR\")\n pipeline_options.do_ocr = False\n\n # Create format options\n pdf_format_option = PdfFormatOption(pipeline_options=pipeline_options)\n format_options = {}\n if hasattr(input_format, \"PDF\"):\n format_options[input_format.PDF] = pdf_format_option\n if hasattr(input_format, \"IMAGE\"):\n format_options[input_format.IMAGE] = pdf_format_option\n\n return document_converter(format_options=format_options)\n\n except Exception as e: # noqa: BLE001\n self.log(f\"Could not create advanced converter: {e}, using basic converter\")\n return docling_imports.document_converter()\n\n def _is_docling_compatible(self, file_path: str) -> bool:\n \"\"\"Check if file is compatible with Docling processing.\"\"\"\n # All VALID_EXTENSIONS are Docling compatible (except for TEXT_FILE_TYPES which may overlap)\n docling_extensions = [\n \".adoc\",\n \".asciidoc\",\n \".asc\",\n \".bmp\",\n \".csv\",\n \".dotx\",\n \".dotm\",\n \".docm\",\n \".docx\",\n \".htm\",\n \".html\",\n \".jpeg\",\n \".json\",\n \".md\",\n \".pdf\",\n \".png\",\n \".potx\",\n \".ppsx\",\n \".pptm\",\n \".potm\",\n \".ppsm\",\n \".pptx\",\n \".tiff\",\n \".txt\",\n \".xls\",\n \".xlsx\",\n \".xhtml\",\n \".xml\",\n \".webp\",\n ]\n return any(file_path.lower().endswith(ext) for ext in docling_extensions)\n\n def process_files(\n self,\n file_list: list[BaseFileComponent.BaseFile],\n ) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Process files using standard parsing or Docling based on advanced_mode and file type.\"\"\"\n\n def process_file_standard(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Process a single file using standard text parsing.\"\"\"\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n msg = f\"File not found: {file_path}. Error: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n except Exception as e:\n msg = f\"Unexpected error processing {file_path}: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n\n def process_file_docling(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Process a single file using Docling if compatible, otherwise standard processing.\"\"\"\n # Try Docling first if file is compatible and advanced mode is enabled\n try:\n return self._process_with_docling_and_export(file_path)\n except Exception as e: # noqa: BLE001\n self.log(f\"Docling processing failed for {file_path}: {e}, falling back to standard processing\")\n if not silent_errors:\n # Return error data instead of raising\n return Data(data={\"error\": f\"Docling processing failed: {e}\", \"file_path\": file_path})\n\n return None\n\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n file_path = str(file_list[0].path)\n if self.advanced_mode and self._is_docling_compatible(file_path):\n processed_data = process_file_docling(file_path)\n if not processed_data:\n msg = f\"Failed to process file with Docling: {file_path}\"\n raise ValueError(msg)\n\n # Serialize processed data to match Data structure\n serialized_data = processed_data.serialize_model()\n\n # Now, if doc is nested, we need to unravel it\n clean_data: list[Data | None] = [processed_data]\n\n # This is where we've manually processed the data\n try:\n if \"exported_content\" not in serialized_data:\n clean_data = [\n Data(\n data={\n \"file_path\": file_path,\n **(\n item[\"element\"]\n if \"element\" in item\n else {k: v for k, v in item.items() if k != \"file_path\"}\n ),\n }\n )\n for item in serialized_data[\"doc\"]\n ]\n except Exception as _: # noqa: BLE001\n raise ValueError(serialized_data) from None\n\n # Repeat file_list to match the number of processed data elements\n final_data: list[Data | None] = clean_data\n return self.rollup_data(file_list, final_data)\n\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_count = len(file_list)\n\n self.log(f\"Starting parallel processing of {file_count} files with concurrency: {concurrency}.\")\n file_paths = [str(file.path) for file in file_list]\n my_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file_standard,\n max_concurrency=concurrency,\n )\n\n return self.rollup_data(file_list, my_data)\n\n def load_files_advanced(self) -> DataFrame:\n \"\"\"Load files using advanced Docling processing and export to an advanced format.\"\"\"\n # TODO: Update\n self.markdown = False\n return self.load_files()\n\n def load_files_markdown(self) -> Message:\n \"\"\"Load files using advanced Docling processing and export to Markdown format.\"\"\"\n self.markdown = True\n result = self.load_files()\n return Message(text=str(result.text[0]))\n\n def _process_with_docling_and_export(self, file_path: str) -> Data:\n \"\"\"Process a single file with Docling and export to the specified format.\"\"\"\n # Import docling components only when needed\n docling_imports = self._try_import_docling()\n\n if docling_imports is None:\n msg = \"Docling not available for advanced processing\"\n raise ImportError(msg)\n\n conversion_status = docling_imports.conversion_status\n document_converter = docling_imports.document_converter\n image_ref_mode = docling_imports.image_ref_mode\n\n try:\n # Create converter based on strategy and pipeline setting\n if docling_imports.strategy == \"latest\" and self.pipeline == \"standard\":\n converter = self._create_advanced_converter(docling_imports)\n else:\n # Use basic converter for compatibility\n converter = document_converter()\n self.log(\"Using basic DocumentConverter for Docling processing\")\n\n # Process single file\n result = converter.convert(file_path)\n\n # Check if conversion was successful\n success = False\n if hasattr(result, \"status\"):\n if hasattr(conversion_status, \"SUCCESS\"):\n success = result.status == conversion_status.SUCCESS\n else:\n success = str(result.status).lower() == \"success\"\n elif hasattr(result, \"document\"):\n # If no status but has document, assume success\n success = result.document is not None\n\n if not success:\n return Data(data={\"error\": \"Docling conversion failed\", \"file_path\": file_path})\n\n if self.markdown:\n self.log(\"Exporting document to Markdown format\")\n # Export the document to the specified format\n exported_content = self._export_document(result.document, image_ref_mode)\n\n return Data(\n text=exported_content,\n data={\n \"exported_content\": exported_content,\n \"export_format\": self.EXPORT_FORMAT,\n \"file_path\": file_path,\n },\n )\n\n return Data(\n data={\n \"doc\": self.docling_to_dataframe_simple(result.document.export_to_dict()),\n \"export_format\": self.EXPORT_FORMAT,\n \"file_path\": file_path,\n }\n )\n\n except Exception as e: # noqa: BLE001\n return Data(data={\"error\": f\"Docling processing error: {e!s}\", \"file_path\": file_path})\n\n def docling_to_dataframe_simple(self, doc):\n \"\"\"Extract all text elements into a simple DataFrame.\"\"\"\n return [\n {\n \"page_no\": text[\"prov\"][0][\"page_no\"] if text[\"prov\"] else None,\n \"label\": text[\"label\"],\n \"text\": text[\"text\"],\n \"level\": text.get(\"level\", None), # for headers\n }\n for text in doc[\"texts\"]\n ]\n\n def _export_document(self, document: Any, image_ref_mode: type[Enum]) -> str:\n \"\"\"Export document to Markdown format with placeholder images.\"\"\"\n try:\n image_mode = (\n image_ref_mode(self.IMAGE_MODE) if hasattr(image_ref_mode, self.IMAGE_MODE) else self.IMAGE_MODE\n )\n\n # Always export to Markdown since it's fixed\n return document.export_to_markdown(\n image_mode=image_mode,\n image_placeholder=self.md_image_placeholder,\n page_break_placeholder=self.md_page_break_placeholder,\n )\n\n except Exception as e: # noqa: BLE001\n self.log(f\"Markdown export failed: {e}, using basic text export\")\n # Fallback to basic text export\n try:\n return document.export_to_text()\n except Exception: # noqa: BLE001\n return str(document)\n"
|
|
1280
1280
|
},
|
|
1281
1281
|
"concurrency_multithreading": {
|
|
1282
1282
|
"_input_type": "IntInput",
|