langflow-base-nightly 0.5.0.dev37__py3-none-any.whl → 0.5.0.dev39__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langflow/__main__.py +1 -1
- langflow/alembic/versions/0882f9657f22_encrypt_existing_mcp_auth_settings_.py +122 -0
- langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py +24 -30
- langflow/alembic/versions/58b28437a398_modify_nullable.py +6 -6
- langflow/alembic/versions/79e675cb6752_change_datetime_type.py +24 -30
- langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py +12 -13
- langflow/api/build.py +21 -26
- langflow/api/health_check_router.py +3 -3
- langflow/api/utils.py +3 -3
- langflow/api/v1/callback.py +2 -2
- langflow/api/v1/chat.py +19 -31
- langflow/api/v1/endpoints.py +10 -10
- langflow/api/v1/flows.py +1 -1
- langflow/api/v1/knowledge_bases.py +3 -3
- langflow/api/v1/mcp.py +12 -12
- langflow/api/v1/mcp_projects.py +405 -120
- langflow/api/v1/mcp_utils.py +8 -8
- langflow/api/v1/schemas.py +2 -7
- langflow/api/v1/store.py +1 -1
- langflow/api/v1/validate.py +2 -2
- langflow/api/v1/voice_mode.py +58 -62
- langflow/api/v2/files.py +2 -2
- langflow/api/v2/mcp.py +10 -9
- langflow/base/composio/composio_base.py +21 -2
- langflow/base/data/docling_utils.py +194 -0
- langflow/base/embeddings/aiml_embeddings.py +1 -1
- langflow/base/flow_processing/utils.py +1 -2
- langflow/base/io/__init__.py +0 -1
- langflow/base/langwatch/utils.py +2 -1
- langflow/base/mcp/util.py +49 -47
- langflow/base/prompts/api_utils.py +1 -1
- langflow/base/tools/flow_tool.py +2 -2
- langflow/base/tools/run_flow.py +2 -6
- langflow/components/FAISS/__init__.py +34 -0
- langflow/components/Notion/add_content_to_page.py +2 -2
- langflow/components/Notion/list_database_properties.py +2 -2
- langflow/components/Notion/list_pages.py +2 -2
- langflow/components/Notion/page_content_viewer.py +2 -2
- langflow/components/Notion/update_page_property.py +1 -1
- langflow/components/agentql/agentql_api.py +2 -10
- langflow/components/agents/agent.py +249 -55
- langflow/components/agents/mcp_component.py +14 -14
- langflow/components/anthropic/anthropic.py +5 -4
- langflow/components/assemblyai/assemblyai_get_subtitles.py +2 -2
- langflow/components/assemblyai/assemblyai_lemur.py +2 -2
- langflow/components/assemblyai/assemblyai_list_transcripts.py +2 -2
- langflow/components/assemblyai/assemblyai_poll_transcript.py +2 -2
- langflow/components/assemblyai/assemblyai_start_transcript.py +2 -2
- langflow/components/cassandra/__init__.py +40 -0
- langflow/components/chroma/__init__.py +34 -0
- langflow/components/clickhouse/__init__.py +34 -0
- langflow/components/couchbase/__init__.py +34 -0
- langflow/components/data/file.py +575 -55
- langflow/components/data/url.py +1 -1
- langflow/components/datastax/__init__.py +3 -3
- langflow/components/datastax/astra_assistant_manager.py +3 -3
- langflow/components/datastax/create_assistant.py +1 -2
- langflow/components/deactivated/merge_data.py +1 -2
- langflow/components/deactivated/sub_flow.py +6 -7
- langflow/components/deactivated/vectara_self_query.py +3 -3
- langflow/components/docling/__init__.py +0 -198
- langflow/components/docling/docling_inline.py +1 -1
- langflow/components/elastic/__init__.py +37 -0
- langflow/components/embeddings/text_embedder.py +3 -3
- langflow/components/firecrawl/firecrawl_extract_api.py +2 -9
- langflow/components/google/gmail.py +1 -1
- langflow/components/google/google_generative_ai.py +5 -11
- langflow/components/groq/groq.py +4 -3
- langflow/components/helpers/current_date.py +2 -3
- langflow/components/helpers/memory.py +1 -1
- langflow/components/ibm/watsonx.py +1 -1
- langflow/components/ibm/watsonx_embeddings.py +1 -1
- langflow/components/langwatch/langwatch.py +3 -3
- langflow/components/logic/flow_tool.py +2 -2
- langflow/components/logic/notify.py +1 -1
- langflow/components/logic/run_flow.py +2 -3
- langflow/components/logic/sub_flow.py +4 -5
- langflow/components/mem0/mem0_chat_memory.py +2 -8
- langflow/components/milvus/__init__.py +34 -0
- langflow/components/mongodb/__init__.py +34 -0
- langflow/components/nvidia/nvidia.py +3 -3
- langflow/components/olivya/olivya.py +7 -7
- langflow/components/ollama/ollama.py +9 -6
- langflow/components/perplexity/perplexity.py +3 -13
- langflow/components/pgvector/__init__.py +34 -0
- langflow/components/pinecone/__init__.py +34 -0
- langflow/components/processing/batch_run.py +8 -8
- langflow/components/processing/data_operations.py +2 -2
- langflow/components/processing/merge_data.py +1 -2
- langflow/components/processing/message_to_data.py +2 -3
- langflow/components/processing/parse_json_data.py +1 -1
- langflow/components/prototypes/python_function.py +2 -3
- langflow/components/qdrant/__init__.py +34 -0
- langflow/components/redis/__init__.py +36 -2
- langflow/components/redis/redis.py +75 -29
- langflow/components/redis/redis_chat.py +43 -0
- langflow/components/serpapi/serp.py +1 -1
- langflow/components/supabase/__init__.py +37 -0
- langflow/components/tavily/tavily_extract.py +1 -1
- langflow/components/tavily/tavily_search.py +1 -1
- langflow/components/tools/calculator.py +2 -2
- langflow/components/tools/python_code_structured_tool.py +3 -10
- langflow/components/tools/python_repl.py +2 -2
- langflow/components/tools/searxng.py +3 -3
- langflow/components/tools/serp_api.py +2 -2
- langflow/components/tools/tavily_search_tool.py +2 -2
- langflow/components/tools/yahoo_finance.py +1 -1
- langflow/components/twelvelabs/video_embeddings.py +4 -4
- langflow/components/upstash/__init__.py +34 -0
- langflow/components/vectara/__init__.py +37 -0
- langflow/components/vectorstores/__init__.py +0 -69
- langflow/components/vectorstores/local_db.py +2 -1
- langflow/components/weaviate/__init__.py +34 -0
- langflow/components/yahoosearch/yahoo.py +1 -1
- langflow/components/youtube/trending.py +3 -4
- langflow/custom/attributes.py +2 -1
- langflow/custom/code_parser/code_parser.py +1 -1
- langflow/custom/custom_component/base_component.py +1 -1
- langflow/custom/custom_component/component.py +16 -2
- langflow/custom/dependency_analyzer.py +165 -0
- langflow/custom/directory_reader/directory_reader.py +7 -7
- langflow/custom/directory_reader/utils.py +1 -2
- langflow/custom/utils.py +63 -45
- langflow/events/event_manager.py +1 -1
- langflow/frontend/assets/{SlackIcon-CnvyOamQ.js → SlackIcon-Cr3Q15Px.js} +1 -1
- langflow/frontend/assets/{Wikipedia-nyTEXdr2.js → Wikipedia-GxM5sPdM.js} +1 -1
- langflow/frontend/assets/{Wolfram-BYMQkNSq.js → Wolfram-BN3-VOCA.js} +1 -1
- langflow/frontend/assets/{index-DZTC5pdT.js → index-28oOcafk.js} +1 -1
- langflow/frontend/assets/{index-ChXJpBz4.js → index-2wSXqBtB.js} +1 -1
- langflow/frontend/assets/{index-BB15_iOb.js → index-3wW7BClE.js} +1 -1
- langflow/frontend/assets/{index-DKHNourL.js → index-6pyH3ZJB.js} +1 -1
- langflow/frontend/assets/{index-BvwZfF2i.js → index-AWCSdofD.js} +1 -1
- langflow/frontend/assets/{index-Bvxg4_ux.js → index-B2Zgv_xv.js} +1 -1
- langflow/frontend/assets/{index-Bd6WtbKA.js → index-B2ptVQGM.js} +1 -1
- langflow/frontend/assets/{index-C7QWbnLK.js → index-B3TANVes.js} +1 -1
- langflow/frontend/assets/{index-CpvYQ0ug.js → index-B4yCvZKV.js} +1 -1
- langflow/frontend/assets/{index-Dg-63Si_.js → index-BC65VuWx.js} +1 -1
- langflow/frontend/assets/{index-C6jri9Wm.js → index-BCDSei1q.js} +1 -1
- langflow/frontend/assets/{index-OazXJdEl.js → index-BJy50PvP.js} +1 -1
- langflow/frontend/assets/{index-CWdkbVsd.js → index-BKseQQ2I.js} +1 -1
- langflow/frontend/assets/{index-CaQ_H9ww.js → index-BLTxEeTi.js} +1 -1
- langflow/frontend/assets/{index-DGRMNe9n.js → index-BRg1f4Mu.js} +1 -1
- langflow/frontend/assets/{index-D8lOi1GI.js → index-BS8Vo8nc.js} +1 -1
- langflow/frontend/assets/{index-B748uLP1.js → index-BTKOU4xC.js} +1 -1
- langflow/frontend/assets/{index-Dqd4RjYA.js → index-BVwJDmw-.js} +1 -1
- langflow/frontend/assets/{index-DbMFlnHE.js → index-BWYuQ2Sj.js} +1 -1
- langflow/frontend/assets/{index-BEMw2Np8.js → index-BWdLILDG.js} +1 -1
- langflow/frontend/assets/{index-BmX5CoED.js → index-BZcw4827.js} +1 -1
- langflow/frontend/assets/{index-CyPvTB63.js → index-Bbi87Ve4.js} +1 -1
- langflow/frontend/assets/{index-BTEW9e8P.js → index-Bf0IYKLd.js} +1 -1
- langflow/frontend/assets/{index-BZgXW854.js → index-Bg5nrMRh.js} +1 -1
- langflow/frontend/assets/{index-BBxAPk1y.js → index-BiC280Nx.js} +1 -1
- langflow/frontend/assets/{index-BR0bkVqX.js → index-BiKKN6FR.js} +1 -1
- langflow/frontend/assets/{index-CTrt1Q_j.js → index-Bief6eyJ.js} +1 -1
- langflow/frontend/assets/{index-D5_DsUJc.js → index-BkXec1Yf.js} +1 -1
- langflow/frontend/assets/{index-CZQ9rXNa.js → index-Bnl6QHtP.js} +1 -1
- langflow/frontend/assets/{index-BChjg6Az.js → index-BpxbUiZD.js} +1979 -1979
- langflow/frontend/assets/{index-BOeo01QB.js → index-BrJV8psX.js} +1 -1
- langflow/frontend/assets/{index-DysKpOuj.js → index-BwLWcUXL.js} +1 -1
- langflow/frontend/assets/{index-Bnqod3vk.js → index-Bx7dBY26.js} +1 -1
- langflow/frontend/assets/{index-D3DDfngy.js → index-C-EdnFdA.js} +1 -1
- langflow/frontend/assets/{index-Bsa0xZyL.js → index-C-Xfg4cD.js} +1 -1
- langflow/frontend/assets/{index-BTrsh9LS.js → index-C1f2wMat.js} +1 -1
- langflow/frontend/assets/index-C1xroOlH.css +1 -0
- langflow/frontend/assets/{index-B1YN7oMV.js → index-C3KequvP.js} +1 -1
- langflow/frontend/assets/{index-DzW2mfkK.js → index-C3ZjKdCD.js} +1 -1
- langflow/frontend/assets/{index-ajRge-Mg.js → index-C3l0zYn0.js} +1 -1
- langflow/frontend/assets/{index-cvZdgWHQ.js → index-C3yvArUT.js} +1 -1
- langflow/frontend/assets/{index-C-2hghRJ.js → index-C9Cxnkl8.js} +1 -1
- langflow/frontend/assets/{index-BhIOhlCH.js → index-CBc8fEAE.js} +1 -1
- langflow/frontend/assets/{index-B3Sur4Z3.js → index-CBvrGgID.js} +1 -1
- langflow/frontend/assets/{index-CCePCqkT.js → index-CD-PqGCY.js} +1 -1
- langflow/frontend/assets/{index-8yMsjVV2.js → index-CGO1CiUr.js} +1 -1
- langflow/frontend/assets/{index-DF5VwgU6.js → index-CH5UVA9b.js} +1 -1
- langflow/frontend/assets/{index-dcnYpT9N.js → index-CLJeJYjH.js} +1 -1
- langflow/frontend/assets/{index-DfxYyS3M.js → index-CMZ79X-Y.js} +1 -1
- langflow/frontend/assets/{index-ya2uXE8v.js → index-CMzfJKiW.js} +1 -1
- langflow/frontend/assets/{index-DkelbYy7.js → index-CNw1H-Wc.js} +1 -1
- langflow/frontend/assets/{index-DytJENYD.js → index-CPHEscq9.js} +1 -1
- langflow/frontend/assets/{index-Bv8h2Z-q.js → index-CRPKJZw9.js} +1 -1
- langflow/frontend/assets/{index-D-9TI74R.js → index-CRPyCfYy.js} +1 -1
- langflow/frontend/assets/{index-BLGYN-9b.js → index-CRcMqCIj.js} +1 -1
- langflow/frontend/assets/{index-tVYiABdp.js → index-CUVDws8F.js} +1 -1
- langflow/frontend/assets/{index-CpcbQZIF.js → index-CVWQfRYZ.js} +1 -1
- langflow/frontend/assets/{index-DPCzHdsC.js → index-CVl6MbaM.js} +1 -1
- langflow/frontend/assets/{index-DkXy1WFo.js → index-CVwWoX99.js} +1 -1
- langflow/frontend/assets/{index-DK1Ptcc4.js → index-CWPzZtSx.js} +1 -1
- langflow/frontend/assets/{index-DHq8TQPB.js → index-CZqRL9DE.js} +1 -1
- langflow/frontend/assets/{index-DnEGCgih.js → index-CdIf07Rw.js} +1 -1
- langflow/frontend/assets/{index-BIQQCMvz.js → index-Cewy7JZE.js} +1 -1
- langflow/frontend/assets/{index-D8GJngXa.js → index-CfwLpbMM.js} +1 -1
- langflow/frontend/assets/{index-C_TdzfAn.js → index-CiR1dxI4.js} +1 -1
- langflow/frontend/assets/{index-BzL_EoKd.js → index-CiixOzDG.js} +1 -1
- langflow/frontend/assets/{index-Boso-xEw.js → index-ClsuDmR6.js} +1 -1
- langflow/frontend/assets/{index-8WdfSTTz.js → index-CmEYYRN1.js} +1 -1
- langflow/frontend/assets/{index-FUxmznS-.js → index-Co20d-eQ.js} +1 -1
- langflow/frontend/assets/{index-C82JjCPD.js → index-CpzXS6md.js} +1 -1
- langflow/frontend/assets/{index-DIDDfmlJ.js → index-Cqpzl1J4.js} +1 -1
- langflow/frontend/assets/{index-_UcqeEjm.js → index-CtVIONP2.js} +1 -1
- langflow/frontend/assets/{index-Gkrq-vzm.js → index-CuFXdTx4.js} +1 -1
- langflow/frontend/assets/{index-WPFivmdQ.js → index-Cyd2HtHK.js} +1 -1
- langflow/frontend/assets/{index-BFp_O-c9.js → index-D-1tA8Dt.js} +1 -1
- langflow/frontend/assets/{index-BqPpO6KG.js → index-D-KY3kkq.js} +1 -1
- langflow/frontend/assets/{index-Db71w3lq.js → index-D-_B1a8v.js} +1 -1
- langflow/frontend/assets/{index-BIzTEqFh.js → index-D14EWPyZ.js} +1 -1
- langflow/frontend/assets/{index-BbJjt5m4.js → index-D2N3l-cw.js} +1 -1
- langflow/frontend/assets/{index-DCRk27Tp.js → index-D5ETnvJa.js} +1 -1
- langflow/frontend/assets/{index-CvcEzq4x.js → index-D7kquVv2.js} +1 -1
- langflow/frontend/assets/{index-Q9vDw0Xl.js → index-DA6-bvgN.js} +1 -1
- langflow/frontend/assets/{index-l7bzB8Ex.js → index-DDWBeudF.js} +1 -1
- langflow/frontend/assets/{index-BCCGvqay.js → index-DDcMAaG4.js} +1 -1
- langflow/frontend/assets/{index-pCQ_yw8m.js → index-DHgomBdh.js} +1 -1
- langflow/frontend/assets/{index-BxEuHa76.js → index-DJP-ss47.js} +1 -1
- langflow/frontend/assets/{index-BbRm7beF.js → index-DQ7VYqQc.js} +1 -1
- langflow/frontend/assets/{index-Car-zdor.js → index-DTqbvGC0.js} +1 -1
- langflow/frontend/assets/{index-BRxvproo.js → index-DUpri6zF.js} +1 -1
- langflow/frontend/assets/{index-BQ6NUdMY.js → index-DV3utZDZ.js} +1 -1
- langflow/frontend/assets/{index-DjQETUy8.js → index-DXRfN4HV.js} +1 -1
- langflow/frontend/assets/{index-DfngcQxO.js → index-Db9dYSzy.js} +1 -1
- langflow/frontend/assets/{index-rXV1G1aB.js → index-DdtMEn6I.js} +1 -1
- langflow/frontend/assets/{index-DmMDPoi0.js → index-DfDhMHgQ.js} +1 -1
- langflow/frontend/assets/{index-DJB12jIC.js → index-Dfe7qfvf.js} +1 -1
- langflow/frontend/assets/{index-C_veJlEb.js → index-DhtZ5hx8.js} +1 -1
- langflow/frontend/assets/{index-CQMoqLAu.js → index-DiB3CTo8.js} +1 -1
- langflow/frontend/assets/{index-DVlceYFD.js → index-DiGWASY5.js} +1 -1
- langflow/frontend/assets/{index-Du_18NCU.js → index-Dl5amdBz.js} +1 -1
- langflow/frontend/assets/{index-CYDAYm-i.js → index-DlD4dXlZ.js} +1 -1
- langflow/frontend/assets/{index-CLPdN-q6.js → index-DmeiHnfl.js} +1 -1
- langflow/frontend/assets/index-Dmu-X5-4.js +1 -0
- langflow/frontend/assets/{index-BzEUlaw_.js → index-DpVWih90.js} +1 -1
- langflow/frontend/assets/{index-D6PSjHxP.js → index-DrDrcajG.js} +1 -1
- langflow/frontend/assets/{index-Dq5ilsem.js → index-Du-pc0KE.js} +1 -1
- langflow/frontend/assets/{index-CYe8Ipef.js → index-DwPkMTaY.js} +1 -1
- langflow/frontend/assets/{index-BVEZDXxS.js → index-DwQEZe3C.js} +1 -1
- langflow/frontend/assets/{index-BvT7L317.js → index-DyJFTK24.js} +1 -1
- langflow/frontend/assets/{index-HK3bVMYA.js → index-J38wh62w.js} +1 -1
- langflow/frontend/assets/{index-CCxGSSTT.js → index-Kwdl-e29.js} +1 -1
- langflow/frontend/assets/{index-BOB_zsjl.js → index-OwPvCmpW.js} +1 -1
- langflow/frontend/assets/{index-Dsps-jKu.js → index-Tw3Os-DN.js} +1 -1
- langflow/frontend/assets/{index-CFDvOtKC.js → index-X0guhYF8.js} +1 -1
- langflow/frontend/assets/{index-BX5D-USa.js → index-dJWNxIRH.js} +1 -1
- langflow/frontend/assets/{index-BRYjyhAd.js → index-dcJ8-agu.js} +1 -1
- langflow/frontend/assets/{index-Ui4xUImO.js → index-eo2mAtL-.js} +1 -1
- langflow/frontend/assets/{index-CxvP91st.js → index-hG24k5xJ.js} +1 -1
- langflow/frontend/assets/{index-CVQmT7ZL.js → index-h_aSZHf3.js} +1 -1
- langflow/frontend/assets/{index-BIXaW2aY.js → index-hbndqB9B.js} +1 -1
- langflow/frontend/assets/{index-DIkNW9Cd.js → index-iJngutFo.js} +1 -1
- langflow/frontend/assets/{index-BWmPX4iQ.js → index-lTpteg8t.js} +1 -1
- langflow/frontend/assets/{index-xuIrH2Dq.js → index-lZX9AvZW.js} +1 -1
- langflow/frontend/assets/{index-yCHsaqs8.js → index-m8QA6VNM.js} +1 -1
- langflow/frontend/assets/{index-BkPYpfgw.js → index-o0D2S7xW.js} +1 -1
- langflow/frontend/assets/{index-DpClkXIV.js → index-ovFJ_0J6.js} +1 -1
- langflow/frontend/assets/{index-CmplyEaa.js → index-pYJJOcma.js} +1 -1
- langflow/frontend/assets/{index-CJo_cyWW.js → index-sI75DsdM.js} +1 -1
- langflow/frontend/assets/{index-nVwHLjuV.js → index-xvFOmxx4.js} +1 -1
- langflow/frontend/assets/{index-LbYjHKkn.js → index-z3SRY-mX.js} +1 -1
- langflow/frontend/assets/lazyIconImports-D97HEZkE.js +2 -0
- langflow/frontend/assets/{use-post-add-user-BrBYH9eR.js → use-post-add-user-C0MdTpQ5.js} +1 -1
- langflow/frontend/index.html +2 -2
- langflow/graph/edge/base.py +2 -3
- langflow/graph/graph/base.py +15 -13
- langflow/graph/graph/constants.py +3 -0
- langflow/graph/utils.py +6 -6
- langflow/graph/vertex/base.py +4 -5
- langflow/graph/vertex/param_handler.py +1 -1
- langflow/graph/vertex/vertex_types.py +2 -2
- langflow/helpers/flow.py +1 -1
- langflow/initial_setup/setup.py +32 -30
- langflow/initial_setup/starter_projects/Basic Prompt Chaining.json +26 -0
- langflow/initial_setup/starter_projects/Basic Prompting.json +26 -0
- langflow/initial_setup/starter_projects/Blog Writer.json +58 -2
- langflow/initial_setup/starter_projects/Custom Component Generator.json +37 -2
- langflow/initial_setup/starter_projects/Document Q&A.json +27 -1
- langflow/initial_setup/starter_projects/Financial Report Parser.json +43 -0
- langflow/initial_setup/starter_projects/Hybrid Search RAG.json +83 -1
- langflow/initial_setup/starter_projects/Image Sentiment Analysis.json +43 -0
- langflow/initial_setup/starter_projects/Instagram Copywriter.json +51 -3
- langflow/initial_setup/starter_projects/Invoice Summarizer.json +40 -1
- langflow/initial_setup/starter_projects/Knowledge Ingestion.json +73 -2
- langflow/initial_setup/starter_projects/Knowledge Retrieval.json +63 -0
- langflow/initial_setup/starter_projects/Market Research.json +59 -3
- langflow/initial_setup/starter_projects/Meeting Summary.json +101 -6
- langflow/initial_setup/starter_projects/Memory Chatbot.json +37 -2
- langflow/initial_setup/starter_projects/News Aggregator.json +63 -3
- langflow/initial_setup/starter_projects/Nvidia Remix.json +69 -4
- langflow/initial_setup/starter_projects/Pok/303/251dex Agent.json" +48 -1
- langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json +44 -1
- langflow/initial_setup/starter_projects/Price Deal Finder.json +57 -5
- langflow/initial_setup/starter_projects/Research Agent.json +42 -3
- langflow/initial_setup/starter_projects/Research Translation Loop.json +66 -0
- langflow/initial_setup/starter_projects/SEO Keyword Generator.json +17 -0
- langflow/initial_setup/starter_projects/SaaS Pricing.json +27 -1
- langflow/initial_setup/starter_projects/Search agent.json +40 -1
- langflow/initial_setup/starter_projects/Sequential Tasks Agents.json +76 -7
- langflow/initial_setup/starter_projects/Simple Agent.json +59 -3
- langflow/initial_setup/starter_projects/Social Media Agent.json +77 -1
- langflow/initial_setup/starter_projects/Text Sentiment Analysis.json +35 -1
- langflow/initial_setup/starter_projects/Travel Planning Agents.json +51 -3
- langflow/initial_setup/starter_projects/Twitter Thread Generator.json +80 -0
- langflow/initial_setup/starter_projects/Vector Store RAG.json +110 -3
- langflow/initial_setup/starter_projects/Youtube Analysis.json +84 -3
- langflow/initial_setup/starter_projects/vector_store_rag.py +1 -1
- langflow/interface/components.py +23 -22
- langflow/interface/initialize/loading.py +5 -5
- langflow/interface/run.py +1 -1
- langflow/interface/utils.py +1 -1
- langflow/io/__init__.py +0 -1
- langflow/langflow_launcher.py +1 -1
- langflow/load/load.py +2 -7
- langflow/logging/__init__.py +0 -1
- langflow/logging/logger.py +191 -115
- langflow/logging/setup.py +1 -1
- langflow/main.py +37 -52
- langflow/memory.py +7 -7
- langflow/middleware.py +1 -1
- langflow/processing/process.py +6 -3
- langflow/schema/artifact.py +2 -2
- langflow/schema/data.py +10 -2
- langflow/schema/dataframe.py +1 -1
- langflow/schema/message.py +1 -1
- langflow/serialization/serialization.py +1 -1
- langflow/services/auth/mcp_encryption.py +104 -0
- langflow/services/auth/utils.py +2 -2
- langflow/services/cache/disk.py +1 -1
- langflow/services/cache/service.py +3 -3
- langflow/services/database/models/flow/model.py +2 -7
- langflow/services/database/models/transactions/crud.py +2 -2
- langflow/services/database/models/user/crud.py +2 -2
- langflow/services/database/service.py +8 -8
- langflow/services/database/utils.py +6 -5
- langflow/services/deps.py +2 -3
- langflow/services/factory.py +1 -1
- langflow/services/flow/flow_runner.py +7 -12
- langflow/services/job_queue/service.py +16 -15
- langflow/services/manager.py +3 -4
- langflow/services/settings/auth.py +1 -1
- langflow/services/settings/base.py +3 -8
- langflow/services/settings/feature_flags.py +1 -1
- langflow/services/settings/manager.py +1 -1
- langflow/services/settings/utils.py +1 -1
- langflow/services/socket/__init__.py +0 -1
- langflow/services/socket/service.py +3 -3
- langflow/services/socket/utils.py +4 -4
- langflow/services/state/service.py +1 -2
- langflow/services/storage/factory.py +1 -1
- langflow/services/storage/local.py +9 -8
- langflow/services/storage/s3.py +11 -10
- langflow/services/store/service.py +3 -3
- langflow/services/store/utils.py +3 -2
- langflow/services/task/temp_flow_cleanup.py +7 -7
- langflow/services/telemetry/service.py +10 -10
- langflow/services/tracing/arize_phoenix.py +2 -2
- langflow/services/tracing/langfuse.py +1 -1
- langflow/services/tracing/langsmith.py +1 -1
- langflow/services/tracing/langwatch.py +1 -1
- langflow/services/tracing/opik.py +1 -1
- langflow/services/tracing/service.py +25 -6
- langflow/services/tracing/traceloop.py +245 -0
- langflow/services/utils.py +7 -7
- langflow/services/variable/kubernetes.py +3 -3
- langflow/services/variable/kubernetes_secrets.py +2 -1
- langflow/services/variable/service.py +5 -5
- langflow/utils/component_utils.py +9 -6
- langflow/utils/util.py +5 -5
- langflow/utils/validate.py +3 -3
- langflow/utils/voice_utils.py +2 -2
- {langflow_base_nightly-0.5.0.dev37.dist-info → langflow_base_nightly-0.5.0.dev39.dist-info}/METADATA +2 -1
- {langflow_base_nightly-0.5.0.dev37.dist-info → langflow_base_nightly-0.5.0.dev39.dist-info}/RECORD +393 -374
- langflow/components/vectorstores/redis.py +0 -89
- langflow/frontend/assets/index-C26RqKWL.js +0 -1
- langflow/frontend/assets/index-CqS7zir1.css +0 -1
- langflow/frontend/assets/lazyIconImports-t6wEndt1.js +0 -2
- /langflow/components/{vectorstores → FAISS}/faiss.py +0 -0
- /langflow/components/{vectorstores → cassandra}/cassandra.py +0 -0
- /langflow/components/{datastax/cassandra.py → cassandra/cassandra_chat.py} +0 -0
- /langflow/components/{vectorstores → cassandra}/cassandra_graph.py +0 -0
- /langflow/components/{vectorstores → chroma}/chroma.py +0 -0
- /langflow/components/{vectorstores → clickhouse}/clickhouse.py +0 -0
- /langflow/components/{vectorstores → couchbase}/couchbase.py +0 -0
- /langflow/components/{vectorstores → datastax}/astradb.py +0 -0
- /langflow/components/{vectorstores → datastax}/astradb_graph.py +0 -0
- /langflow/components/{vectorstores → datastax}/graph_rag.py +0 -0
- /langflow/components/{vectorstores → datastax}/hcd.py +0 -0
- /langflow/components/{vectorstores → elastic}/elasticsearch.py +0 -0
- /langflow/components/{vectorstores → elastic}/opensearch.py +0 -0
- /langflow/components/{vectorstores → milvus}/milvus.py +0 -0
- /langflow/components/{vectorstores → mongodb}/mongodb_atlas.py +0 -0
- /langflow/components/{vectorstores → pgvector}/pgvector.py +0 -0
- /langflow/components/{vectorstores → pinecone}/pinecone.py +0 -0
- /langflow/components/{vectorstores → qdrant}/qdrant.py +0 -0
- /langflow/components/{vectorstores → supabase}/supabase.py +0 -0
- /langflow/components/{vectorstores → upstash}/upstash.py +0 -0
- /langflow/components/{vectorstores → vectara}/vectara.py +0 -0
- /langflow/components/{vectorstores → vectara}/vectara_rag.py +0 -0
- /langflow/components/{vectorstores → weaviate}/weaviate.py +0 -0
- {langflow_base_nightly-0.5.0.dev37.dist-info → langflow_base_nightly-0.5.0.dev39.dist-info}/WHEEL +0 -0
- {langflow_base_nightly-0.5.0.dev37.dist-info → langflow_base_nightly-0.5.0.dev39.dist-info}/entry_points.txt +0 -0
|
@@ -306,6 +306,23 @@
|
|
|
306
306
|
"lf_version": "1.1.5",
|
|
307
307
|
"metadata": {
|
|
308
308
|
"code_hash": "6f74e04e39d5",
|
|
309
|
+
"dependencies": {
|
|
310
|
+
"dependencies": [
|
|
311
|
+
{
|
|
312
|
+
"name": "orjson",
|
|
313
|
+
"version": "3.10.15"
|
|
314
|
+
},
|
|
315
|
+
{
|
|
316
|
+
"name": "fastapi",
|
|
317
|
+
"version": "0.115.13"
|
|
318
|
+
},
|
|
319
|
+
{
|
|
320
|
+
"name": "langflow",
|
|
321
|
+
"version": null
|
|
322
|
+
}
|
|
323
|
+
],
|
|
324
|
+
"total_dependencies": 3
|
|
325
|
+
},
|
|
309
326
|
"module": "langflow.components.input_output.chat_output.ChatOutput"
|
|
310
327
|
},
|
|
311
328
|
"minimized": true,
|
|
@@ -670,6 +687,19 @@
|
|
|
670
687
|
"legacy": false,
|
|
671
688
|
"metadata": {
|
|
672
689
|
"code_hash": "57d868cb067b",
|
|
690
|
+
"dependencies": {
|
|
691
|
+
"dependencies": [
|
|
692
|
+
{
|
|
693
|
+
"name": "langchain_community",
|
|
694
|
+
"version": "0.3.21"
|
|
695
|
+
},
|
|
696
|
+
{
|
|
697
|
+
"name": "langflow",
|
|
698
|
+
"version": null
|
|
699
|
+
}
|
|
700
|
+
],
|
|
701
|
+
"total_dependencies": 2
|
|
702
|
+
},
|
|
673
703
|
"module": "langflow.components.needle.needle.NeedleComponent"
|
|
674
704
|
},
|
|
675
705
|
"minimized": false,
|
|
@@ -878,6 +908,15 @@
|
|
|
878
908
|
"legacy": false,
|
|
879
909
|
"metadata": {
|
|
880
910
|
"code_hash": "192913db3453",
|
|
911
|
+
"dependencies": {
|
|
912
|
+
"dependencies": [
|
|
913
|
+
{
|
|
914
|
+
"name": "langflow",
|
|
915
|
+
"version": null
|
|
916
|
+
}
|
|
917
|
+
],
|
|
918
|
+
"total_dependencies": 1
|
|
919
|
+
},
|
|
881
920
|
"module": "langflow.components.input_output.chat.ChatInput"
|
|
882
921
|
},
|
|
883
922
|
"minimized": true,
|
|
@@ -1350,7 +1389,7 @@
|
|
|
1350
1389
|
"show": true,
|
|
1351
1390
|
"title_case": false,
|
|
1352
1391
|
"type": "code",
|
|
1353
|
-
"value": "import json\nimport re\n\nfrom langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.data import Data\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nMODEL_PROVIDERS_LIST = [\"Anthropic\", \"Google Generative AI\", \"Groq\", \"OpenAI\"]\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n documentation: str = \"https://docs.langflow.org/agents\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n # Filter out json_mode from OpenAI inputs since we handle structured output differently\n openai_inputs_filtered = [\n input_field\n for input_field in MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"]\n if not (hasattr(input_field, \"name\") and input_field.name == \"json_mode\")\n ]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST, \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST] + [{\"icon\": \"brain\"}],\n ),\n *openai_inputs_filtered,\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Chat History Messages\",\n value=100,\n info=\"Number of chat history messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n *LCToolsAgentComponent._base_inputs,\n # removed memory inputs from agent component\n # *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [\n Output(name=\"response\", display_name=\"Response\", method=\"message_response\"),\n Output(name=\"structured_response\", display_name=\"Structured Response\", method=\"json_response\", tool_mode=False),\n ]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n if isinstance(self.chat_history, Message):\n self.chat_history = [self.chat_history]\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n # note the tools are not required to run the agent, hence the validation removed.\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n result = await self.run_agent(agent)\n\n # Store result for potential JSON output\n self._agent_result = result\n # return result\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n else:\n return result\n\n async def json_response(self) -> Data:\n \"\"\"Convert agent response to structured JSON Data output.\"\"\"\n # Run the regular message response first to get the result\n if not hasattr(self, \"_agent_result\"):\n await self.message_response()\n\n result = self._agent_result\n\n # Extract content from result\n if hasattr(result, \"content\"):\n content = result.content\n elif hasattr(result, \"text\"):\n content = result.text\n else:\n content = str(result)\n\n # Try to parse as JSON\n try:\n json_data = json.loads(content)\n return Data(data=json_data)\n except json.JSONDecodeError:\n # If it's not valid JSON, try to extract JSON from the content\n json_match = re.search(r\"\\{.*\\}\", content, re.DOTALL)\n if json_match:\n try:\n json_data = json.loads(json_match.group())\n return Data(data=json_data)\n except json.JSONDecodeError:\n pass\n\n # If we can't extract JSON, return the raw content as data\n return Data(data={\"content\": content, \"error\": \"Could not parse as JSON\"})\n\n async def get_memory_data(self):\n # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.\n messages = (\n await MemoryComponent(**self.get_base_args())\n .set(session_id=self.graph.session_id, order=\"Ascending\", n_messages=self.n_messages)\n .retrieve_messages()\n )\n return [\n message for message in messages if getattr(message, \"id\", None) != getattr(self.input_value, \"id\", None)\n ]\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n # Filter out json_mode and only use attributes that exist on this component\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
|
|
1392
|
+
"value": "import json\nimport re\n\nfrom langchain_core.tools import StructuredTool\nfrom pydantic import ValidationError\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.helpers.base_model import build_model_from_schema\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MultilineInput, Output, TableInput\nfrom langflow.logging import logger\nfrom langflow.schema.data import Data\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\nfrom langflow.schema.table import EditMode\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nMODEL_PROVIDERS_LIST = [\"Anthropic\", \"Google Generative AI\", \"Groq\", \"OpenAI\"]\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n documentation: str = \"https://docs.langflow.org/agents\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n # Filter out json_mode from OpenAI inputs since we handle structured output differently\n openai_inputs_filtered = [\n input_field\n for input_field in MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"]\n if not (hasattr(input_field, \"name\") and input_field.name == \"json_mode\")\n ]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST, \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST] + [{\"icon\": \"brain\"}],\n ),\n *openai_inputs_filtered,\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Chat History Messages\",\n value=100,\n info=\"Number of chat history messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MultilineInput(\n name=\"format_instructions\",\n display_name=\"Output Format Instructions\",\n info=\"Generic Template for structured output formatting. Valid only with Structured response.\",\n value=(\n \"You are an AI that extracts structured JSON objects from unstructured text. \"\n \"Use a predefined schema with expected types (str, int, float, bool, dict). \"\n \"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. \"\n \"Fill missing or ambiguous values with defaults: null for missing values. \"\n \"Remove exact duplicates but keep variations that have different field values. \"\n \"Always return valid JSON in the expected format, never throw errors. \"\n \"If multiple objects can be extracted, return them all in the structured format.\"\n ),\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=(\n \"Schema Validation: Define the structure and data types for structured output. \"\n \"No validation if no output schema.\"\n ),\n advanced=True,\n required=False,\n value=[],\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n {\n \"name\": \"multiple\",\n \"display_name\": \"As List\",\n \"type\": \"boolean\",\n \"description\": \"Set to True if this output field should be a list of the specified type.\",\n \"default\": \"False\",\n \"edit_mode\": EditMode.INLINE,\n },\n ],\n ),\n *LCToolsAgentComponent._base_inputs,\n # removed memory inputs from agent component\n # *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [\n Output(name=\"response\", display_name=\"Response\", method=\"message_response\"),\n Output(name=\"structured_response\", display_name=\"Structured Response\", method=\"json_response\", tool_mode=False),\n ]\n\n async def get_agent_requirements(self):\n \"\"\"Get the agent requirements for the agent.\"\"\"\n llm_model, display_name = await self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n if isinstance(self.chat_history, Message):\n self.chat_history = [self.chat_history]\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n return llm_model, self.chat_history, self.tools\n\n async def message_response(self) -> Message:\n try:\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n result = await self.run_agent(agent)\n\n # Store result for potential JSON output\n self._agent_result = result\n\n except (ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n await logger.aerror(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n # Avoid catching blind Exception; let truly unexpected exceptions propagate\n except Exception as e:\n await logger.aerror(f\"Unexpected error: {e!s}\")\n raise\n else:\n return result\n\n def _preprocess_schema(self, schema):\n \"\"\"Preprocess schema to ensure correct data types for build_model_from_schema.\"\"\"\n processed_schema = []\n for field in schema:\n processed_field = {\n \"name\": str(field.get(\"name\", \"field\")),\n \"type\": str(field.get(\"type\", \"str\")),\n \"description\": str(field.get(\"description\", \"\")),\n \"multiple\": field.get(\"multiple\", False),\n }\n # Ensure multiple is handled correctly\n if isinstance(processed_field[\"multiple\"], str):\n processed_field[\"multiple\"] = processed_field[\"multiple\"].lower() in [\"true\", \"1\", \"t\", \"y\", \"yes\"]\n processed_schema.append(processed_field)\n return processed_schema\n\n async def build_structured_output_base(self, content: str):\n \"\"\"Build structured output with optional BaseModel validation.\"\"\"\n json_pattern = r\"\\{.*\\}\"\n schema_error_msg = \"Try setting an output schema\"\n\n # Try to parse content as JSON first\n json_data = None\n try:\n json_data = json.loads(content)\n except json.JSONDecodeError:\n json_match = re.search(json_pattern, content, re.DOTALL)\n if json_match:\n try:\n json_data = json.loads(json_match.group())\n except json.JSONDecodeError:\n return {\"content\": content, \"error\": schema_error_msg}\n else:\n return {\"content\": content, \"error\": schema_error_msg}\n\n # If no output schema provided, return parsed JSON without validation\n if not hasattr(self, \"output_schema\") or not self.output_schema or len(self.output_schema) == 0:\n return json_data\n\n # Use BaseModel validation with schema\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n\n # Validate against the schema\n if isinstance(json_data, list):\n # Multiple objects\n validated_objects = []\n for item in json_data:\n try:\n validated_obj = output_model.model_validate(item)\n validated_objects.append(validated_obj.model_dump())\n except ValidationError as e:\n await logger.aerror(f\"Validation error for item: {e}\")\n # Include invalid items with error info\n validated_objects.append({\"data\": item, \"validation_error\": str(e)})\n return validated_objects\n\n # Single object\n try:\n validated_obj = output_model.model_validate(json_data)\n return [validated_obj.model_dump()] # Return as list for consistency\n except ValidationError as e:\n await logger.aerror(f\"Validation error: {e}\")\n return [{\"data\": json_data, \"validation_error\": str(e)}]\n\n except (TypeError, ValueError) as e:\n await logger.aerror(f\"Error building structured output: {e}\")\n # Fallback to parsed JSON without validation\n return json_data\n\n async def json_response(self) -> Data:\n \"\"\"Convert agent response to structured JSON Data output with schema validation.\"\"\"\n # Always use structured chat agent for JSON response mode for better JSON formatting\n try:\n system_components = []\n\n # 1. Agent Instructions (system_prompt)\n agent_instructions = getattr(self, \"system_prompt\", \"\") or \"\"\n if agent_instructions:\n system_components.append(f\"{agent_instructions}\")\n\n # 2. Format Instructions\n format_instructions = getattr(self, \"format_instructions\", \"\") or \"\"\n if format_instructions:\n system_components.append(f\"Format instructions: {format_instructions}\")\n\n # 3. Schema Information from BaseModel\n if hasattr(self, \"output_schema\") and self.output_schema and len(self.output_schema) > 0:\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n schema_dict = output_model.model_json_schema()\n schema_info = (\n \"You are given some text that may include format instructions, \"\n \"explanations, or other content alongside a JSON schema.\\n\\n\"\n \"Your task:\\n\"\n \"- Extract only the JSON schema.\\n\"\n \"- Return it as valid JSON.\\n\"\n \"- Do not include format instructions, explanations, or extra text.\\n\\n\"\n \"Input:\\n\"\n f\"{json.dumps(schema_dict, indent=2)}\\n\\n\"\n \"Output (only JSON schema):\"\n )\n system_components.append(schema_info)\n except (ValidationError, ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"Could not build schema for prompt: {e}\", exc_info=True)\n\n # Combine all components\n combined_instructions = \"\\n\\n\".join(system_components) if system_components else \"\"\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=combined_instructions,\n )\n\n # Create and run structured chat agent\n try:\n structured_agent = self.create_agent_runnable()\n except (NotImplementedError, ValueError, TypeError) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n raise\n try:\n result = await self.run_agent(structured_agent)\n except (ExceptionWithMessageError, ValueError, TypeError, RuntimeError) as e:\n await logger.aerror(f\"Error with structured agent result: {e}\")\n raise\n # Extract content from structured agent result\n if hasattr(result, \"content\"):\n content = result.content\n elif hasattr(result, \"text\"):\n content = result.text\n else:\n content = str(result)\n\n except (ExceptionWithMessageError, ValueError, TypeError, NotImplementedError, AttributeError) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n # Fallback to regular agent\n content_str = \"No content returned from agent\"\n return Data(data={\"content\": content_str, \"error\": str(e)})\n\n # Process with structured output validation\n try:\n structured_output = await self.build_structured_output_base(content)\n\n # Handle different output formats\n if isinstance(structured_output, list) and structured_output:\n if len(structured_output) == 1:\n return Data(data=structured_output[0])\n return Data(data={\"results\": structured_output})\n if isinstance(structured_output, dict):\n return Data(data=structured_output)\n return Data(data={\"content\": content})\n\n except (ValueError, TypeError) as e:\n await logger.aerror(f\"Error in structured output processing: {e}\")\n return Data(data={\"content\": content, \"error\": str(e)})\n\n async def get_memory_data(self):\n # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.\n messages = (\n await MemoryComponent(**self.get_base_args())\n .set(session_id=self.graph.session_id, order=\"Ascending\", n_messages=self.n_messages)\n .retrieve_messages()\n )\n return [\n message for message in messages if getattr(message, \"id\", None) != getattr(self.input_value, \"id\", None)\n ]\n\n async def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except (AttributeError, ValueError, TypeError, RuntimeError) as e:\n await logger.aerror(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n # Filter out json_mode and only use attributes that exist on this component\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
|
|
1354
1393
|
},
|
|
1355
1394
|
"handle_parsing_errors": {
|
|
1356
1395
|
"_input_type": "BoolInput",
|
|
@@ -89,6 +89,19 @@
|
|
|
89
89
|
"lf_version": "1.5.0.post1",
|
|
90
90
|
"metadata": {
|
|
91
91
|
"code_hash": "dbf2e9d2319d",
|
|
92
|
+
"dependencies": {
|
|
93
|
+
"dependencies": [
|
|
94
|
+
{
|
|
95
|
+
"name": "langchain_text_splitters",
|
|
96
|
+
"version": "0.3.8"
|
|
97
|
+
},
|
|
98
|
+
{
|
|
99
|
+
"name": "langflow",
|
|
100
|
+
"version": null
|
|
101
|
+
}
|
|
102
|
+
],
|
|
103
|
+
"total_dependencies": 2
|
|
104
|
+
},
|
|
92
105
|
"module": "langflow.components.processing.split_text.SplitTextComponent"
|
|
93
106
|
},
|
|
94
107
|
"minimized": false,
|
|
@@ -339,7 +352,28 @@
|
|
|
339
352
|
"legacy": false,
|
|
340
353
|
"lf_version": "1.5.0.post1",
|
|
341
354
|
"metadata": {
|
|
342
|
-
"code_hash": "
|
|
355
|
+
"code_hash": "252132357639",
|
|
356
|
+
"dependencies": {
|
|
357
|
+
"dependencies": [
|
|
358
|
+
{
|
|
359
|
+
"name": "requests",
|
|
360
|
+
"version": "2.32.4"
|
|
361
|
+
},
|
|
362
|
+
{
|
|
363
|
+
"name": "bs4",
|
|
364
|
+
"version": "4.12.3"
|
|
365
|
+
},
|
|
366
|
+
{
|
|
367
|
+
"name": "langchain_community",
|
|
368
|
+
"version": "0.3.21"
|
|
369
|
+
},
|
|
370
|
+
{
|
|
371
|
+
"name": "langflow",
|
|
372
|
+
"version": null
|
|
373
|
+
}
|
|
374
|
+
],
|
|
375
|
+
"total_dependencies": 4
|
|
376
|
+
},
|
|
343
377
|
"module": "langflow.components.data.url.URLComponent"
|
|
344
378
|
},
|
|
345
379
|
"minimized": false,
|
|
@@ -429,7 +463,7 @@
|
|
|
429
463
|
"show": true,
|
|
430
464
|
"title_case": false,
|
|
431
465
|
"type": "code",
|
|
432
|
-
"value": "import re\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom langchain_community.document_loaders import RecursiveUrlLoader\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.helpers.data import safe_convert\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SliderInput, TableInput\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.services.deps import get_settings_service\n\n# Constants\nDEFAULT_TIMEOUT = 30\nDEFAULT_MAX_DEPTH = 1\nDEFAULT_FORMAT = \"Text\"\nURL_REGEX = re.compile(\n r\"^(https?:\\/\\/)?\" r\"(www\\.)?\" r\"([a-zA-Z0-9.-]+)\" r\"(\\.[a-zA-Z]{2,})?\" r\"(:\\d+)?\" r\"(\\/[^\\s]*)?$\",\n re.IGNORECASE,\n)\n\n\nclass URLComponent(Component):\n \"\"\"A component that loads and parses content from web pages recursively.\n\n This component allows fetching content from one or more URLs, with options to:\n - Control crawl depth\n - Prevent crawling outside the root domain\n - Use async loading for better performance\n - Extract either raw HTML or clean text\n - Configure request headers and timeouts\n \"\"\"\n\n display_name = \"URL\"\n description = \"Fetch content from one or more web pages, following links recursively.\"\n documentation: str = \"https://docs.langflow.org/components-data#url\"\n icon = \"layout-template\"\n name = \"URLComponent\"\n\n inputs = [\n MessageTextInput(\n name=\"urls\",\n display_name=\"URLs\",\n info=\"Enter one or more URLs to crawl recursively, by clicking the '+' button.\",\n is_list=True,\n tool_mode=True,\n placeholder=\"Enter a URL...\",\n list_add_label=\"Add URL\",\n input_types=[],\n ),\n SliderInput(\n name=\"max_depth\",\n display_name=\"Depth\",\n info=(\n \"Controls how many 'clicks' away from the initial page the crawler will go:\\n\"\n \"- depth 1: only the initial page\\n\"\n \"- depth 2: initial page + all pages linked directly from it\\n\"\n \"- depth 3: initial page + direct links + links found on those direct link pages\\n\"\n \"Note: This is about link traversal, not URL path depth.\"\n ),\n value=DEFAULT_MAX_DEPTH,\n range_spec=RangeSpec(min=1, max=5, step=1),\n required=False,\n min_label=\" \",\n max_label=\" \",\n min_label_icon=\"None\",\n max_label_icon=\"None\",\n # slider_input=True\n ),\n BoolInput(\n name=\"prevent_outside\",\n display_name=\"Prevent Outside\",\n info=(\n \"If enabled, only crawls URLs within the same domain as the root URL. \"\n \"This helps prevent the crawler from going to external websites.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"use_async\",\n display_name=\"Use Async\",\n info=(\n \"If enabled, uses asynchronous loading which can be significantly faster \"\n \"but might use more system resources.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n DropdownInput(\n name=\"format\",\n display_name=\"Output Format\",\n info=\"Output Format. Use 'Text' to extract the text from the HTML or 'HTML' for the raw HTML content.\",\n options=[\"Text\", \"HTML\"],\n value=DEFAULT_FORMAT,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Timeout for the request in seconds.\",\n value=DEFAULT_TIMEOUT,\n required=False,\n advanced=True,\n ),\n TableInput(\n name=\"headers\",\n display_name=\"Headers\",\n info=\"The headers to send with the request\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Header\",\n \"type\": \"str\",\n \"description\": \"Header name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Header value\",\n },\n ],\n value=[{\"key\": \"User-Agent\", \"value\": get_settings_service().settings.user_agent}],\n advanced=True,\n input_types=[\"DataFrame\"],\n ),\n BoolInput(\n name=\"filter_text_html\",\n display_name=\"Filter Text/HTML\",\n info=\"If enabled, filters out text/css content type from the results.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"continue_on_failure\",\n display_name=\"Continue on Failure\",\n info=\"If enabled, continues crawling even if some requests fail.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"check_response_status\",\n display_name=\"Check Response Status\",\n info=\"If enabled, checks the response status of the request.\",\n value=False,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"autoset_encoding\",\n display_name=\"Autoset Encoding\",\n info=\"If enabled, automatically sets the encoding of the request.\",\n value=True,\n required=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Extracted Pages\", name=\"page_results\", method=\"fetch_content\"),\n Output(display_name=\"Raw Content\", name=\"raw_results\", method=\"fetch_content_as_message\", tool_mode=False),\n ]\n\n @staticmethod\n def validate_url(url: str) -> bool:\n \"\"\"Validates if the given string matches URL pattern.\n\n Args:\n url: The URL string to validate\n\n Returns:\n bool: True if the URL is valid, False otherwise\n \"\"\"\n return bool(URL_REGEX.match(url))\n\n def ensure_url(self, url: str) -> str:\n \"\"\"Ensures the given string is a valid URL.\n\n Args:\n url: The URL string to validate and normalize\n\n Returns:\n str: The normalized URL\n\n Raises:\n ValueError: If the URL is invalid\n \"\"\"\n url = url.strip()\n if not url.startswith((\"http://\", \"https://\")):\n url = \"https://\" + url\n\n if not self.validate_url(url):\n msg = f\"Invalid URL: {url}\"\n raise ValueError(msg)\n\n return url\n\n def _create_loader(self, url: str) -> RecursiveUrlLoader:\n \"\"\"Creates a RecursiveUrlLoader instance with the configured settings.\n\n Args:\n url: The URL to load\n\n Returns:\n RecursiveUrlLoader: Configured loader instance\n \"\"\"\n headers_dict = {header[\"key\"]: header[\"value\"] for header in self.headers}\n extractor = (lambda x: x) if self.format == \"HTML\" else (lambda x: BeautifulSoup(x, \"lxml\").get_text())\n\n return RecursiveUrlLoader(\n url=url,\n max_depth=self.max_depth,\n prevent_outside=self.prevent_outside,\n use_async=self.use_async,\n extractor=extractor,\n timeout=self.timeout,\n headers=headers_dict,\n check_response_status=self.check_response_status,\n continue_on_failure=self.continue_on_failure,\n base_url=url, # Add base_url to ensure consistent domain crawling\n autoset_encoding=self.autoset_encoding, # Enable automatic encoding detection\n exclude_dirs=[], # Allow customization of excluded directories\n link_regex=None, # Allow customization of link filtering\n )\n\n def fetch_url_contents(self) -> list[dict]:\n \"\"\"Load documents from the configured URLs.\n\n Returns:\n List[Data]: List of Data objects containing the fetched content\n\n Raises:\n ValueError: If no valid URLs are provided or if there's an error loading documents\n \"\"\"\n try:\n urls = list({self.ensure_url(url) for url in self.urls if url.strip()})\n logger.debug(f\"URLs: {urls}\")\n if not urls:\n msg = \"No valid URLs provided.\"\n raise ValueError(msg)\n\n all_docs = []\n for url in urls:\n logger.debug(f\"Loading documents from {url}\")\n\n try:\n loader = self._create_loader(url)\n docs = loader.load()\n\n if not docs:\n logger.warning(f\"No documents found for {url}\")\n continue\n\n logger.debug(f\"Found {len(docs)} documents from {url}\")\n all_docs.extend(docs)\n\n except requests.exceptions.RequestException as e:\n logger.exception(f\"Error loading documents from {url}: {e}\")\n continue\n\n if not all_docs:\n msg = \"No documents were successfully loaded from any URL\"\n raise ValueError(msg)\n\n # data = [Data(text=doc.page_content, **doc.metadata) for doc in all_docs]\n data = [\n {\n \"text\": safe_convert(doc.page_content, clean_data=True),\n \"url\": doc.metadata.get(\"source\", \"\"),\n \"title\": doc.metadata.get(\"title\", \"\"),\n \"description\": doc.metadata.get(\"description\", \"\"),\n \"content_type\": doc.metadata.get(\"content_type\", \"\"),\n \"language\": doc.metadata.get(\"language\", \"\"),\n }\n for doc in all_docs\n ]\n except Exception as e:\n error_msg = e.message if hasattr(e, \"message\") else e\n msg = f\"Error loading documents: {error_msg!s}\"\n logger.exception(msg)\n raise ValueError(msg) from e\n return data\n\n def fetch_content(self) -> DataFrame:\n \"\"\"Convert the documents to a DataFrame.\"\"\"\n return DataFrame(data=self.fetch_url_contents())\n\n def fetch_content_as_message(self) -> Message:\n \"\"\"Convert the documents to a Message.\"\"\"\n url_contents = self.fetch_url_contents()\n return Message(text=\"\\n\\n\".join([x[\"text\"] for x in url_contents]), data={\"data\": url_contents})\n"
|
|
466
|
+
"value": "import re\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom langchain_community.document_loaders import RecursiveUrlLoader\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.helpers.data import safe_convert\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SliderInput, TableInput\nfrom langflow.logging.logger import logger\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.services.deps import get_settings_service\n\n# Constants\nDEFAULT_TIMEOUT = 30\nDEFAULT_MAX_DEPTH = 1\nDEFAULT_FORMAT = \"Text\"\nURL_REGEX = re.compile(\n r\"^(https?:\\/\\/)?\" r\"(www\\.)?\" r\"([a-zA-Z0-9.-]+)\" r\"(\\.[a-zA-Z]{2,})?\" r\"(:\\d+)?\" r\"(\\/[^\\s]*)?$\",\n re.IGNORECASE,\n)\n\n\nclass URLComponent(Component):\n \"\"\"A component that loads and parses content from web pages recursively.\n\n This component allows fetching content from one or more URLs, with options to:\n - Control crawl depth\n - Prevent crawling outside the root domain\n - Use async loading for better performance\n - Extract either raw HTML or clean text\n - Configure request headers and timeouts\n \"\"\"\n\n display_name = \"URL\"\n description = \"Fetch content from one or more web pages, following links recursively.\"\n documentation: str = \"https://docs.langflow.org/components-data#url\"\n icon = \"layout-template\"\n name = \"URLComponent\"\n\n inputs = [\n MessageTextInput(\n name=\"urls\",\n display_name=\"URLs\",\n info=\"Enter one or more URLs to crawl recursively, by clicking the '+' button.\",\n is_list=True,\n tool_mode=True,\n placeholder=\"Enter a URL...\",\n list_add_label=\"Add URL\",\n input_types=[],\n ),\n SliderInput(\n name=\"max_depth\",\n display_name=\"Depth\",\n info=(\n \"Controls how many 'clicks' away from the initial page the crawler will go:\\n\"\n \"- depth 1: only the initial page\\n\"\n \"- depth 2: initial page + all pages linked directly from it\\n\"\n \"- depth 3: initial page + direct links + links found on those direct link pages\\n\"\n \"Note: This is about link traversal, not URL path depth.\"\n ),\n value=DEFAULT_MAX_DEPTH,\n range_spec=RangeSpec(min=1, max=5, step=1),\n required=False,\n min_label=\" \",\n max_label=\" \",\n min_label_icon=\"None\",\n max_label_icon=\"None\",\n # slider_input=True\n ),\n BoolInput(\n name=\"prevent_outside\",\n display_name=\"Prevent Outside\",\n info=(\n \"If enabled, only crawls URLs within the same domain as the root URL. \"\n \"This helps prevent the crawler from going to external websites.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"use_async\",\n display_name=\"Use Async\",\n info=(\n \"If enabled, uses asynchronous loading which can be significantly faster \"\n \"but might use more system resources.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n DropdownInput(\n name=\"format\",\n display_name=\"Output Format\",\n info=\"Output Format. Use 'Text' to extract the text from the HTML or 'HTML' for the raw HTML content.\",\n options=[\"Text\", \"HTML\"],\n value=DEFAULT_FORMAT,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Timeout for the request in seconds.\",\n value=DEFAULT_TIMEOUT,\n required=False,\n advanced=True,\n ),\n TableInput(\n name=\"headers\",\n display_name=\"Headers\",\n info=\"The headers to send with the request\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Header\",\n \"type\": \"str\",\n \"description\": \"Header name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Header value\",\n },\n ],\n value=[{\"key\": \"User-Agent\", \"value\": get_settings_service().settings.user_agent}],\n advanced=True,\n input_types=[\"DataFrame\"],\n ),\n BoolInput(\n name=\"filter_text_html\",\n display_name=\"Filter Text/HTML\",\n info=\"If enabled, filters out text/css content type from the results.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"continue_on_failure\",\n display_name=\"Continue on Failure\",\n info=\"If enabled, continues crawling even if some requests fail.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"check_response_status\",\n display_name=\"Check Response Status\",\n info=\"If enabled, checks the response status of the request.\",\n value=False,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"autoset_encoding\",\n display_name=\"Autoset Encoding\",\n info=\"If enabled, automatically sets the encoding of the request.\",\n value=True,\n required=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Extracted Pages\", name=\"page_results\", method=\"fetch_content\"),\n Output(display_name=\"Raw Content\", name=\"raw_results\", method=\"fetch_content_as_message\", tool_mode=False),\n ]\n\n @staticmethod\n def validate_url(url: str) -> bool:\n \"\"\"Validates if the given string matches URL pattern.\n\n Args:\n url: The URL string to validate\n\n Returns:\n bool: True if the URL is valid, False otherwise\n \"\"\"\n return bool(URL_REGEX.match(url))\n\n def ensure_url(self, url: str) -> str:\n \"\"\"Ensures the given string is a valid URL.\n\n Args:\n url: The URL string to validate and normalize\n\n Returns:\n str: The normalized URL\n\n Raises:\n ValueError: If the URL is invalid\n \"\"\"\n url = url.strip()\n if not url.startswith((\"http://\", \"https://\")):\n url = \"https://\" + url\n\n if not self.validate_url(url):\n msg = f\"Invalid URL: {url}\"\n raise ValueError(msg)\n\n return url\n\n def _create_loader(self, url: str) -> RecursiveUrlLoader:\n \"\"\"Creates a RecursiveUrlLoader instance with the configured settings.\n\n Args:\n url: The URL to load\n\n Returns:\n RecursiveUrlLoader: Configured loader instance\n \"\"\"\n headers_dict = {header[\"key\"]: header[\"value\"] for header in self.headers}\n extractor = (lambda x: x) if self.format == \"HTML\" else (lambda x: BeautifulSoup(x, \"lxml\").get_text())\n\n return RecursiveUrlLoader(\n url=url,\n max_depth=self.max_depth,\n prevent_outside=self.prevent_outside,\n use_async=self.use_async,\n extractor=extractor,\n timeout=self.timeout,\n headers=headers_dict,\n check_response_status=self.check_response_status,\n continue_on_failure=self.continue_on_failure,\n base_url=url, # Add base_url to ensure consistent domain crawling\n autoset_encoding=self.autoset_encoding, # Enable automatic encoding detection\n exclude_dirs=[], # Allow customization of excluded directories\n link_regex=None, # Allow customization of link filtering\n )\n\n def fetch_url_contents(self) -> list[dict]:\n \"\"\"Load documents from the configured URLs.\n\n Returns:\n List[Data]: List of Data objects containing the fetched content\n\n Raises:\n ValueError: If no valid URLs are provided or if there's an error loading documents\n \"\"\"\n try:\n urls = list({self.ensure_url(url) for url in self.urls if url.strip()})\n logger.debug(f\"URLs: {urls}\")\n if not urls:\n msg = \"No valid URLs provided.\"\n raise ValueError(msg)\n\n all_docs = []\n for url in urls:\n logger.debug(f\"Loading documents from {url}\")\n\n try:\n loader = self._create_loader(url)\n docs = loader.load()\n\n if not docs:\n logger.warning(f\"No documents found for {url}\")\n continue\n\n logger.debug(f\"Found {len(docs)} documents from {url}\")\n all_docs.extend(docs)\n\n except requests.exceptions.RequestException as e:\n logger.exception(f\"Error loading documents from {url}: {e}\")\n continue\n\n if not all_docs:\n msg = \"No documents were successfully loaded from any URL\"\n raise ValueError(msg)\n\n # data = [Data(text=doc.page_content, **doc.metadata) for doc in all_docs]\n data = [\n {\n \"text\": safe_convert(doc.page_content, clean_data=True),\n \"url\": doc.metadata.get(\"source\", \"\"),\n \"title\": doc.metadata.get(\"title\", \"\"),\n \"description\": doc.metadata.get(\"description\", \"\"),\n \"content_type\": doc.metadata.get(\"content_type\", \"\"),\n \"language\": doc.metadata.get(\"language\", \"\"),\n }\n for doc in all_docs\n ]\n except Exception as e:\n error_msg = e.message if hasattr(e, \"message\") else e\n msg = f\"Error loading documents: {error_msg!s}\"\n logger.exception(msg)\n raise ValueError(msg) from e\n return data\n\n def fetch_content(self) -> DataFrame:\n \"\"\"Convert the documents to a DataFrame.\"\"\"\n return DataFrame(data=self.fetch_url_contents())\n\n def fetch_content_as_message(self) -> Message:\n \"\"\"Convert the documents to a Message.\"\"\"\n url_contents = self.fetch_url_contents()\n return Message(text=\"\\n\\n\".join([x[\"text\"] for x in url_contents]), data={\"data\": url_contents})\n"
|
|
433
467
|
},
|
|
434
468
|
"continue_on_failure": {
|
|
435
469
|
"_input_type": "BoolInput",
|
|
@@ -703,6 +737,43 @@
|
|
|
703
737
|
"legacy": false,
|
|
704
738
|
"metadata": {
|
|
705
739
|
"code_hash": "6c62063f2c09",
|
|
740
|
+
"dependencies": {
|
|
741
|
+
"dependencies": [
|
|
742
|
+
{
|
|
743
|
+
"name": "pandas",
|
|
744
|
+
"version": "2.2.3"
|
|
745
|
+
},
|
|
746
|
+
{
|
|
747
|
+
"name": "cryptography",
|
|
748
|
+
"version": "43.0.3"
|
|
749
|
+
},
|
|
750
|
+
{
|
|
751
|
+
"name": "langchain_chroma",
|
|
752
|
+
"version": "0.1.4"
|
|
753
|
+
},
|
|
754
|
+
{
|
|
755
|
+
"name": "loguru",
|
|
756
|
+
"version": "0.7.3"
|
|
757
|
+
},
|
|
758
|
+
{
|
|
759
|
+
"name": "langflow",
|
|
760
|
+
"version": null
|
|
761
|
+
},
|
|
762
|
+
{
|
|
763
|
+
"name": "langchain_openai",
|
|
764
|
+
"version": "0.3.23"
|
|
765
|
+
},
|
|
766
|
+
{
|
|
767
|
+
"name": "langchain_huggingface",
|
|
768
|
+
"version": "0.3.1"
|
|
769
|
+
},
|
|
770
|
+
{
|
|
771
|
+
"name": "langchain_cohere",
|
|
772
|
+
"version": "0.3.3"
|
|
773
|
+
}
|
|
774
|
+
],
|
|
775
|
+
"total_dependencies": 8
|
|
776
|
+
},
|
|
706
777
|
"module": "langflow.components.data.kb_ingest.KBIngestionComponent"
|
|
707
778
|
},
|
|
708
779
|
"minimized": false,
|
|
@@ -109,6 +109,15 @@
|
|
|
109
109
|
"lf_version": "1.5.0.post1",
|
|
110
110
|
"metadata": {
|
|
111
111
|
"code_hash": "efdcba3771af",
|
|
112
|
+
"dependencies": {
|
|
113
|
+
"dependencies": [
|
|
114
|
+
{
|
|
115
|
+
"name": "langflow",
|
|
116
|
+
"version": null
|
|
117
|
+
}
|
|
118
|
+
],
|
|
119
|
+
"total_dependencies": 1
|
|
120
|
+
},
|
|
112
121
|
"module": "langflow.components.input_output.text.TextInputComponent"
|
|
113
122
|
},
|
|
114
123
|
"minimized": false,
|
|
@@ -226,6 +235,23 @@
|
|
|
226
235
|
"lf_version": "1.5.0.post1",
|
|
227
236
|
"metadata": {
|
|
228
237
|
"code_hash": "6f74e04e39d5",
|
|
238
|
+
"dependencies": {
|
|
239
|
+
"dependencies": [
|
|
240
|
+
{
|
|
241
|
+
"name": "orjson",
|
|
242
|
+
"version": "3.10.15"
|
|
243
|
+
},
|
|
244
|
+
{
|
|
245
|
+
"name": "fastapi",
|
|
246
|
+
"version": "0.115.13"
|
|
247
|
+
},
|
|
248
|
+
{
|
|
249
|
+
"name": "langflow",
|
|
250
|
+
"version": null
|
|
251
|
+
}
|
|
252
|
+
],
|
|
253
|
+
"total_dependencies": 3
|
|
254
|
+
},
|
|
229
255
|
"module": "langflow.components.input_output.chat_output.ChatOutput"
|
|
230
256
|
},
|
|
231
257
|
"minimized": true,
|
|
@@ -533,6 +559,43 @@
|
|
|
533
559
|
"legacy": false,
|
|
534
560
|
"metadata": {
|
|
535
561
|
"code_hash": "6fcf86be1aca",
|
|
562
|
+
"dependencies": {
|
|
563
|
+
"dependencies": [
|
|
564
|
+
{
|
|
565
|
+
"name": "cryptography",
|
|
566
|
+
"version": "43.0.3"
|
|
567
|
+
},
|
|
568
|
+
{
|
|
569
|
+
"name": "langchain_chroma",
|
|
570
|
+
"version": "0.1.4"
|
|
571
|
+
},
|
|
572
|
+
{
|
|
573
|
+
"name": "loguru",
|
|
574
|
+
"version": "0.7.3"
|
|
575
|
+
},
|
|
576
|
+
{
|
|
577
|
+
"name": "pydantic",
|
|
578
|
+
"version": "2.10.6"
|
|
579
|
+
},
|
|
580
|
+
{
|
|
581
|
+
"name": "langflow",
|
|
582
|
+
"version": null
|
|
583
|
+
},
|
|
584
|
+
{
|
|
585
|
+
"name": "langchain_openai",
|
|
586
|
+
"version": "0.3.23"
|
|
587
|
+
},
|
|
588
|
+
{
|
|
589
|
+
"name": "langchain_huggingface",
|
|
590
|
+
"version": "0.3.1"
|
|
591
|
+
},
|
|
592
|
+
{
|
|
593
|
+
"name": "langchain_cohere",
|
|
594
|
+
"version": "0.3.3"
|
|
595
|
+
}
|
|
596
|
+
],
|
|
597
|
+
"total_dependencies": 8
|
|
598
|
+
},
|
|
536
599
|
"module": "langflow.components.data.kb_retrieval.KBRetrievalComponent"
|
|
537
600
|
},
|
|
538
601
|
"minimized": false,
|