langflow-base-nightly 0.5.0.dev36__py3-none-any.whl → 0.5.0.dev38__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langflow/__main__.py +1 -1
- langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py +24 -30
- langflow/alembic/versions/58b28437a398_modify_nullable.py +6 -6
- langflow/alembic/versions/79e675cb6752_change_datetime_type.py +24 -30
- langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py +12 -13
- langflow/api/build.py +21 -26
- langflow/api/health_check_router.py +3 -3
- langflow/api/utils.py +3 -3
- langflow/api/v1/callback.py +2 -2
- langflow/api/v1/chat.py +19 -31
- langflow/api/v1/endpoints.py +10 -10
- langflow/api/v1/flows.py +1 -1
- langflow/api/v1/knowledge_bases.py +19 -12
- langflow/api/v1/mcp.py +12 -12
- langflow/api/v1/mcp_projects.py +45 -81
- langflow/api/v1/mcp_utils.py +8 -8
- langflow/api/v1/schemas.py +1 -5
- langflow/api/v1/store.py +1 -1
- langflow/api/v1/validate.py +2 -2
- langflow/api/v1/voice_mode.py +58 -62
- langflow/api/v2/files.py +5 -3
- langflow/api/v2/mcp.py +10 -9
- langflow/base/composio/composio_base.py +21 -2
- langflow/base/data/docling_utils.py +194 -0
- langflow/base/data/kb_utils.py +33 -0
- langflow/base/embeddings/aiml_embeddings.py +1 -1
- langflow/base/flow_processing/utils.py +1 -2
- langflow/base/io/__init__.py +0 -1
- langflow/base/langwatch/utils.py +2 -1
- langflow/base/mcp/util.py +49 -47
- langflow/base/models/model.py +3 -3
- langflow/base/prompts/api_utils.py +1 -1
- langflow/base/tools/flow_tool.py +2 -2
- langflow/base/tools/run_flow.py +2 -6
- langflow/components/Notion/add_content_to_page.py +2 -2
- langflow/components/Notion/list_database_properties.py +2 -2
- langflow/components/Notion/list_pages.py +2 -2
- langflow/components/Notion/page_content_viewer.py +2 -2
- langflow/components/Notion/update_page_property.py +1 -1
- langflow/components/agentql/agentql_api.py +2 -10
- langflow/components/agents/agent.py +3 -3
- langflow/components/agents/mcp_component.py +54 -69
- langflow/components/anthropic/anthropic.py +5 -4
- langflow/components/assemblyai/assemblyai_get_subtitles.py +2 -2
- langflow/components/assemblyai/assemblyai_lemur.py +2 -2
- langflow/components/assemblyai/assemblyai_list_transcripts.py +2 -2
- langflow/components/assemblyai/assemblyai_poll_transcript.py +2 -2
- langflow/components/assemblyai/assemblyai_start_transcript.py +2 -2
- langflow/components/data/file.py +575 -55
- langflow/components/data/kb_ingest.py +116 -43
- langflow/components/data/kb_retrieval.py +24 -26
- langflow/components/data/url.py +1 -1
- langflow/components/datastax/astra_assistant_manager.py +3 -3
- langflow/components/datastax/create_assistant.py +1 -2
- langflow/components/deactivated/merge_data.py +1 -2
- langflow/components/deactivated/sub_flow.py +6 -7
- langflow/components/deactivated/vectara_self_query.py +3 -3
- langflow/components/docling/__init__.py +0 -198
- langflow/components/docling/docling_inline.py +1 -1
- langflow/components/embeddings/text_embedder.py +3 -3
- langflow/components/firecrawl/firecrawl_extract_api.py +2 -9
- langflow/components/google/gmail.py +1 -1
- langflow/components/google/google_generative_ai.py +5 -11
- langflow/components/groq/groq.py +4 -3
- langflow/components/helpers/current_date.py +2 -3
- langflow/components/helpers/memory.py +1 -1
- langflow/components/ibm/watsonx.py +1 -1
- langflow/components/ibm/watsonx_embeddings.py +1 -1
- langflow/components/langwatch/langwatch.py +3 -3
- langflow/components/logic/flow_tool.py +2 -2
- langflow/components/logic/notify.py +1 -1
- langflow/components/logic/run_flow.py +2 -3
- langflow/components/logic/sub_flow.py +4 -5
- langflow/components/mem0/mem0_chat_memory.py +2 -8
- langflow/components/nvidia/nvidia.py +3 -3
- langflow/components/olivya/olivya.py +7 -7
- langflow/components/ollama/ollama.py +8 -6
- langflow/components/processing/batch_run.py +8 -8
- langflow/components/processing/data_operations.py +2 -2
- langflow/components/processing/merge_data.py +1 -2
- langflow/components/processing/message_to_data.py +2 -3
- langflow/components/processing/parse_json_data.py +1 -1
- langflow/components/processing/save_file.py +6 -32
- langflow/components/prototypes/python_function.py +2 -3
- langflow/components/serpapi/serp.py +1 -1
- langflow/components/tavily/tavily_extract.py +1 -1
- langflow/components/tavily/tavily_search.py +1 -1
- langflow/components/tools/calculator.py +2 -2
- langflow/components/tools/python_code_structured_tool.py +3 -10
- langflow/components/tools/python_repl.py +2 -2
- langflow/components/tools/searxng.py +3 -3
- langflow/components/tools/serp_api.py +2 -2
- langflow/components/tools/tavily_search_tool.py +2 -2
- langflow/components/tools/yahoo_finance.py +1 -1
- langflow/components/twelvelabs/video_embeddings.py +4 -4
- langflow/components/vectorstores/astradb.py +30 -19
- langflow/components/vectorstores/local_db.py +1 -1
- langflow/components/yahoosearch/yahoo.py +1 -1
- langflow/components/youtube/trending.py +3 -4
- langflow/custom/attributes.py +2 -1
- langflow/custom/code_parser/code_parser.py +1 -1
- langflow/custom/custom_component/base_component.py +1 -1
- langflow/custom/custom_component/component.py +16 -2
- langflow/custom/directory_reader/directory_reader.py +7 -7
- langflow/custom/directory_reader/utils.py +1 -2
- langflow/custom/utils.py +30 -30
- langflow/events/event_manager.py +1 -1
- langflow/frontend/assets/{SlackIcon-B260Qg_R.js → SlackIcon-BhW6H3JR.js} +1 -1
- langflow/frontend/assets/{Wikipedia-BB2mbgyd.js → Wikipedia-Dx5jbiy3.js} +1 -1
- langflow/frontend/assets/{Wolfram-DytXC9hF.js → Wolfram-CIyonzwo.js} +1 -1
- langflow/frontend/assets/{index-DPX6X_bw.js → index-0XQqYgdG.js} +1 -1
- langflow/frontend/assets/{index-DtJyCbzF.js → index-1Q3VBqKn.js} +1 -1
- langflow/frontend/assets/{index-DztLFiip.js → index-35sspuLu.js} +1 -1
- langflow/frontend/assets/{index-BeNby7qF.js → index-7hzXChQz.js} +1 -1
- langflow/frontend/assets/{index-BOEf7-ty.js → index-8cuhogZP.js} +1 -1
- langflow/frontend/assets/{index-D0s9f6Re.js → index-B0m53xKd.js} +1 -1
- langflow/frontend/assets/{index-DpJiH-Rk.js → index-B1XqWJhG.js} +1 -1
- langflow/frontend/assets/{index-DuAeoC-H.js → index-B3KCdQ91.js} +1 -1
- langflow/frontend/assets/{index-Bxml6wXu.js → index-B7uEuOPK.js} +1 -1
- langflow/frontend/assets/{index-CDFLVFB4.js → index-B8UR8v-Q.js} +1 -1
- langflow/frontend/assets/{index-ci4XHjbJ.js → index-BD7Io1hL.js} +6 -6
- langflow/frontend/assets/{index-DasrI03Y.js → index-BDQrd7Tj.js} +1 -1
- langflow/frontend/assets/{index-CkQ-bJ4G.js → index-BDuk0d7P.js} +1 -1
- langflow/frontend/assets/{index-C_1RBTul.js → index-BFQ8KFK0.js} +1 -1
- langflow/frontend/assets/{index-DqSH4x-R.js → index-BFf0HTFI.js} +1 -1
- langflow/frontend/assets/{index-BXMhmvTj.js → index-BHhnpSkW.js} +1 -1
- langflow/frontend/assets/{index-Uq2ij_SS.js → index-BKKrUElc.js} +1 -1
- langflow/frontend/assets/{index-3TJWUdmx.js → index-BKeZt2hQ.js} +1 -1
- langflow/frontend/assets/{index-DHlEwAxb.js → index-BKlQbl-6.js} +1 -1
- langflow/frontend/assets/{index-Bisa4IQF.js → index-BLYw9MK2.js} +1 -1
- langflow/frontend/assets/{index-GODbXlHC.js → index-BLsVo9iW.js} +1 -1
- langflow/frontend/assets/{index-CHFO5O4g.js → index-BNQIbda3.js} +1 -1
- langflow/frontend/assets/{index-3uOAA_XX.js → index-BPR2mEFC.js} +1 -1
- langflow/frontend/assets/{index-3qMh9x6K.js → index-BPfdqCc_.js} +1 -1
- langflow/frontend/assets/{index-rcdQpNcU.js → index-BQrVDjR1.js} +1 -1
- langflow/frontend/assets/{index-4eRtaV45.js → index-BRmSeoWR.js} +1 -1
- langflow/frontend/assets/{index-Ct9_T9ox.js → index-BUse-kxM.js} +1 -1
- langflow/frontend/assets/{index-BdYgKk1d.js → index-BVFaF7HW.js} +1 -1
- langflow/frontend/assets/{index-CWWo2zOA.js → index-BWgIWfv2.js} +1 -1
- langflow/frontend/assets/{index-Du9aJK7m.js → index-BWt5xGeA.js} +1 -1
- langflow/frontend/assets/{index-Baka5dKE.js → index-BYhcGLTV.js} +1 -1
- langflow/frontend/assets/{index-BWq9GTzt.js → index-BYjw7Gk3.js} +1 -1
- langflow/frontend/assets/{index-r1LZg-PY.js → index-BZFljdMa.js} +1 -1
- langflow/frontend/assets/index-BcAgItH4.js +1 -0
- langflow/frontend/assets/{index-B8TlNgn-.js → index-Bct1s6__.js} +1 -1
- langflow/frontend/assets/{index-DZzbmg3J.js → index-Bhv79Zso.js} +1 -1
- langflow/frontend/assets/{index-CqDUqHfd.js → index-Bj3lSwvZ.js} +1 -1
- langflow/frontend/assets/{index-dkS0ek2S.js → index-Bk4mTwnI.js} +1 -1
- langflow/frontend/assets/{index-tOy_uloT.js → index-BmIx1cws.js} +1 -1
- langflow/frontend/assets/{index-BVtf6m9S.js → index-BmYJJ5YS.js} +1 -1
- langflow/frontend/assets/{index-mBjJYD9q.js → index-BnAFhkSN.js} +1 -1
- langflow/frontend/assets/{index-Ba3RTMXI.js → index-Bo-ww0Bb.js} +1 -1
- langflow/frontend/assets/{index-BsBWP-Dh.js → index-BpmqDOeZ.js} +1 -1
- langflow/frontend/assets/{index-BqUeOc7Y.js → index-BrVhdPZb.js} +1 -1
- langflow/frontend/assets/{index-DWkMJnbd.js → index-BvGQfVBD.js} +1 -1
- langflow/frontend/assets/{index-DdzVmJHE.js → index-Bwi4flFg.js} +1 -1
- langflow/frontend/assets/{index-Ccb5B8zG.js → index-BzoRPtTY.js} +1 -1
- langflow/frontend/assets/{index-Ym6gz0T6.js → index-C--IDAyc.js} +1 -1
- langflow/frontend/assets/{index-CvQ0w8Pj.js → index-C0E3_MIK.js} +1 -1
- langflow/frontend/assets/{index-DxIs8VSp.js → index-C27Jj_26.js} +1 -1
- langflow/frontend/assets/{index-BxWXWRmZ.js → index-C2eQmQsn.js} +1 -1
- langflow/frontend/assets/{index-B536IPXH.js → index-C8K0r39B.js} +1 -1
- langflow/frontend/assets/{index-BEDxAk3N.js → index-CEJNWPhA.js} +1 -1
- langflow/frontend/assets/{index-G_U_kPAd.js → index-CFNTYfFK.js} +1 -1
- langflow/frontend/assets/{index-CMGZGIx_.js → index-CMHpjHZl.js} +1 -1
- langflow/frontend/assets/{index-C76aBV_h.js → index-CSu8KHOi.js} +1 -1
- langflow/frontend/assets/{index-B-c82Fnu.js → index-CUKmGsI6.js} +1 -1
- langflow/frontend/assets/{index-DX7XsAcx.js → index-CWYiSeWV.js} +1 -1
- langflow/frontend/assets/{index-COL0eiWI.js → index-CY7_TBTC.js} +1 -1
- langflow/frontend/assets/{index-BlBl2tvQ.js → index-CbnWRlYY.js} +1 -1
- langflow/frontend/assets/{index-BQB-iDYl.js → index-CfPBgkqg.js} +1 -1
- langflow/frontend/assets/{index-DWr_zPkx.js → index-Cg53lrYh.js} +1 -1
- langflow/frontend/assets/{index-BcgB3rXH.js → index-CgU7KF4I.js} +1 -1
- langflow/frontend/assets/{index-CkSzjCqM.js → index-CgwykVGh.js} +1 -1
- langflow/frontend/assets/{index-BbsND1Qg.js → index-Ch5r0oW6.js} +1 -1
- langflow/frontend/assets/{index-AY5Dm2mG.js → index-CjsommIr.js} +1 -1
- langflow/frontend/assets/{index-BtJ2o21k.js → index-CkK25zZO.js} +1 -1
- langflow/frontend/assets/{index-BKvKC-12.js → index-CkjwSTSM.js} +1 -1
- langflow/frontend/assets/{index-BVHvIhT5.js → index-CmSFKgiD.js} +1 -1
- langflow/frontend/assets/{index-D-zkHcob.js → index-Cr5v2ave.js} +1 -1
- langflow/frontend/assets/{index-js8ceOaP.js → index-CrAF-31Y.js} +1 -1
- langflow/frontend/assets/{index-BNbWMmAV.js → index-CsLQiWNf.js} +1 -1
- langflow/frontend/assets/{index-VcXZzovW.js → index-CuCM7Wu7.js} +1 -1
- langflow/frontend/assets/{index-DzeIsaBm.js → index-Cxy9sEpy.js} +1 -1
- langflow/frontend/assets/{index-LrMzDsq9.js → index-CyP3py8K.js} +1 -1
- langflow/frontend/assets/{index-C8KD3LPb.js → index-CzHzeZuA.js} +1 -1
- langflow/frontend/assets/{index-DS1EgA10.js → index-D1oynC8a.js} +1 -1
- langflow/frontend/assets/{index-ByFXr9Iq.js → index-D4tjMhfY.js} +1 -1
- langflow/frontend/assets/{index-DyJDHm2D.js → index-D6CSIrp1.js} +1 -1
- langflow/frontend/assets/{index-DIqSyDVO.js → index-D9kwEzPB.js} +1 -1
- langflow/frontend/assets/{index-D5PeCofu.js → index-DDXsm8tz.js} +1 -1
- langflow/frontend/assets/{index-CJwYfDBz.js → index-DDhJVVel.js} +1 -1
- langflow/frontend/assets/{index-C7x9R_Yo.js → index-DH6o91_s.js} +1 -1
- langflow/frontend/assets/{index-DpQKtcXu.js → index-DHngW1k8.js} +1 -1
- langflow/frontend/assets/{index-VZnN0P6C.js → index-DIKUsGLF.js} +1 -1
- langflow/frontend/assets/{index-VHmUHUUU.js → index-DJESSNJi.js} +1 -1
- langflow/frontend/assets/{index-BdIWbCEL.js → index-DMCWDJOl.js} +1 -1
- langflow/frontend/assets/{index-DK8vNpXK.js → index-DOEvKC2X.js} +1 -1
- langflow/frontend/assets/{index-C7V5U9yH.js → index-DOQDkSoK.js} +1 -1
- langflow/frontend/assets/{index-D0HmkH0H.js → index-DXAfIEvs.js} +1 -1
- langflow/frontend/assets/{index-C9N80hP8.js → index-DZP_SaHb.js} +1 -1
- langflow/frontend/assets/{index-B2ggrBuR.js → index-DZxUIhWh.js} +1 -1
- langflow/frontend/assets/{index-DS9I4y48.js → index-Dda2u_yz.js} +1 -1
- langflow/frontend/assets/{index-BLROcaSz.js → index-Dg8N3NSO.js} +1 -1
- langflow/frontend/assets/{index-Dpz3oBf5.js → index-DkGhPNeA.js} +1 -1
- langflow/frontend/assets/{index-BnLT29qW.js → index-Dka_Rk4-.js} +1 -1
- langflow/frontend/assets/{index-B5ed-sAv.js → index-DljpLeCW.js} +1 -1
- langflow/frontend/assets/{index-Cx__T92e.js → index-DnVYJtVO.js} +1 -1
- langflow/frontend/assets/{index-hOkEW3JP.js → index-DqbzUcI5.js} +1 -1
- langflow/frontend/assets/{index-BxkZkBgQ.js → index-Dr6pVDPI.js} +1 -1
- langflow/frontend/assets/{index-BIkqesA-.js → index-DsoX2o1S.js} +1 -1
- langflow/frontend/assets/{index-Cpgkb0Q3.js → index-DwfHWnX7.js} +1 -1
- langflow/frontend/assets/{index-B9Mo3ndZ.js → index-Dx-Z87KT.js} +1 -1
- langflow/frontend/assets/{index-R7q8cAek.js → index-DyqITq51.js} +1 -1
- langflow/frontend/assets/{index-DKEXZFUO.js → index-DzIv3RyR.js} +1 -1
- langflow/frontend/assets/{index-BJrY2Fiu.js → index-G4ro0MjT.js} +1 -1
- langflow/frontend/assets/{index-IFGgPiye.js → index-H7J7w7fa.js} +1 -1
- langflow/frontend/assets/{index-lKEJpUsF.js → index-KWY77KfV.js} +1 -1
- langflow/frontend/assets/{index-DDNNv4C0.js → index-U9GWm1eH.js} +1 -1
- langflow/frontend/assets/{index-BRWNIt9F.js → index-Un9pWxnP.js} +1 -1
- langflow/frontend/assets/{index-BCK-ZyIh.js → index-Xi4TplbI.js} +1 -1
- langflow/frontend/assets/{index-BEKoRwsX.js → index-_cbGmjF4.js} +1 -1
- langflow/frontend/assets/{index-7xXgqu09.js → index-cEXY6V06.js} +1 -1
- langflow/frontend/assets/{index-D87Zw62M.js → index-dyXKnkMi.js} +1 -1
- langflow/frontend/assets/{index-CG7cp0nD.js → index-eUkS6iJM.js} +1 -1
- langflow/frontend/assets/{index-CoUlHbtg.js → index-ekfMOqrF.js} +1 -1
- langflow/frontend/assets/{index-DhzEUXfr.js → index-gdb7XMS8.js} +1 -1
- langflow/frontend/assets/{index-D9eflZfP.js → index-hZUcL0MZ.js} +1 -1
- langflow/frontend/assets/{index-CwIxqYlT.js → index-kkA-qHB_.js} +1 -1
- langflow/frontend/assets/{index-sS6XLk3j.js → index-mzl9ULw5.js} +1 -1
- langflow/frontend/assets/{index-BjENqyKe.js → index-oxHBZk2v.js} +1 -1
- langflow/frontend/assets/{index-BejHxU5W.js → index-p2kStSPe.js} +1 -1
- langflow/frontend/assets/{index-BOYTBrh9.js → index-paQEWYGT.js} +1 -1
- langflow/frontend/assets/{index-Cd5zuUUK.js → index-r_8gs4nL.js} +1 -1
- langflow/frontend/assets/{index-AlJ7td-D.js → index-uiKla4UR.js} +1 -1
- langflow/frontend/assets/{index-B8y58M9b.js → index-vJOO5U8M.js} +1 -1
- langflow/frontend/assets/{index-CF4dtI6S.js → index-w72fDjpG.js} +1 -1
- langflow/frontend/assets/{index-C2Xd7UkR.js → index-zV82kQ6k.js} +1 -1
- langflow/frontend/assets/lazyIconImports-DTNgvPE-.js +2 -0
- langflow/frontend/assets/{use-post-add-user-HN0rRnhv.js → use-post-add-user-CvtuazTg.js} +1 -1
- langflow/frontend/index.html +1 -1
- langflow/graph/edge/base.py +2 -3
- langflow/graph/graph/base.py +14 -12
- langflow/graph/graph/constants.py +3 -0
- langflow/graph/utils.py +6 -6
- langflow/graph/vertex/base.py +4 -5
- langflow/graph/vertex/param_handler.py +1 -1
- langflow/graph/vertex/vertex_types.py +2 -2
- langflow/helpers/flow.py +1 -1
- langflow/initial_setup/setup.py +32 -30
- langflow/initial_setup/starter_projects/Blog Writer.json +2 -2
- langflow/initial_setup/starter_projects/Custom Component Generator.json +2 -2
- langflow/initial_setup/starter_projects/Document Q&A.json +1 -1
- langflow/initial_setup/starter_projects/Hybrid Search RAG.json +2 -2
- langflow/initial_setup/starter_projects/Instagram Copywriter.json +3 -3
- langflow/initial_setup/starter_projects/Invoice Summarizer.json +1 -1
- langflow/initial_setup/starter_projects/Knowledge Ingestion.json +4 -4
- langflow/initial_setup/starter_projects/Knowledge Retrieval.json +2 -2
- langflow/initial_setup/starter_projects/Market Research.json +3 -3
- langflow/initial_setup/starter_projects/Meeting Summary.json +6 -6
- langflow/initial_setup/starter_projects/Memory Chatbot.json +2 -2
- langflow/initial_setup/starter_projects/News Aggregator.json +5 -22
- langflow/initial_setup/starter_projects/Nvidia Remix.json +3 -20
- langflow/initial_setup/starter_projects/Pok/303/251dex Agent.json" +1 -1
- langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json +1 -1
- langflow/initial_setup/starter_projects/Price Deal Finder.json +5 -5
- langflow/initial_setup/starter_projects/Research Agent.json +3 -3
- langflow/initial_setup/starter_projects/SaaS Pricing.json +1 -1
- langflow/initial_setup/starter_projects/Search agent.json +1 -1
- langflow/initial_setup/starter_projects/Sequential Tasks Agents.json +7 -7
- langflow/initial_setup/starter_projects/Simple Agent.json +3 -3
- langflow/initial_setup/starter_projects/Social Media Agent.json +1 -1
- langflow/initial_setup/starter_projects/Text Sentiment Analysis.json +1 -1
- langflow/initial_setup/starter_projects/Travel Planning Agents.json +3 -3
- langflow/initial_setup/starter_projects/Vector Store RAG.json +5 -5
- langflow/initial_setup/starter_projects/Youtube Analysis.json +3 -3
- langflow/interface/components.py +23 -22
- langflow/interface/initialize/loading.py +5 -5
- langflow/interface/run.py +1 -1
- langflow/interface/utils.py +1 -1
- langflow/io/__init__.py +0 -1
- langflow/langflow_launcher.py +1 -1
- langflow/load/load.py +2 -7
- langflow/logging/__init__.py +0 -1
- langflow/logging/logger.py +191 -115
- langflow/logging/setup.py +1 -1
- langflow/main.py +37 -52
- langflow/memory.py +7 -7
- langflow/middleware.py +1 -1
- langflow/processing/process.py +4 -4
- langflow/schema/artifact.py +2 -2
- langflow/schema/data.py +10 -2
- langflow/schema/dataframe.py +1 -1
- langflow/schema/message.py +1 -1
- langflow/serialization/serialization.py +1 -1
- langflow/services/auth/utils.py +2 -2
- langflow/services/cache/disk.py +1 -1
- langflow/services/cache/service.py +3 -3
- langflow/services/database/models/flow/model.py +2 -7
- langflow/services/database/models/transactions/crud.py +2 -2
- langflow/services/database/models/user/crud.py +2 -2
- langflow/services/database/service.py +8 -8
- langflow/services/database/utils.py +6 -5
- langflow/services/deps.py +2 -3
- langflow/services/factory.py +1 -1
- langflow/services/flow/flow_runner.py +7 -12
- langflow/services/job_queue/service.py +16 -15
- langflow/services/manager.py +3 -4
- langflow/services/settings/auth.py +1 -1
- langflow/services/settings/base.py +3 -8
- langflow/services/settings/manager.py +1 -1
- langflow/services/settings/utils.py +1 -1
- langflow/services/socket/__init__.py +0 -1
- langflow/services/socket/service.py +3 -3
- langflow/services/socket/utils.py +4 -4
- langflow/services/state/service.py +1 -2
- langflow/services/storage/factory.py +1 -1
- langflow/services/storage/local.py +9 -8
- langflow/services/storage/s3.py +11 -10
- langflow/services/store/service.py +3 -3
- langflow/services/store/utils.py +3 -2
- langflow/services/task/temp_flow_cleanup.py +7 -7
- langflow/services/telemetry/service.py +10 -10
- langflow/services/tracing/arize_phoenix.py +2 -2
- langflow/services/tracing/langfuse.py +1 -1
- langflow/services/tracing/langsmith.py +1 -1
- langflow/services/tracing/langwatch.py +1 -1
- langflow/services/tracing/opik.py +1 -1
- langflow/services/tracing/service.py +25 -6
- langflow/services/tracing/traceloop.py +245 -0
- langflow/services/utils.py +7 -7
- langflow/services/variable/kubernetes.py +3 -3
- langflow/services/variable/kubernetes_secrets.py +2 -1
- langflow/services/variable/service.py +5 -5
- langflow/utils/component_utils.py +9 -6
- langflow/utils/util.py +5 -5
- langflow/utils/validate.py +3 -3
- langflow/utils/voice_utils.py +2 -2
- {langflow_base_nightly-0.5.0.dev36.dist-info → langflow_base_nightly-0.5.0.dev38.dist-info}/METADATA +2 -1
- {langflow_base_nightly-0.5.0.dev36.dist-info → langflow_base_nightly-0.5.0.dev38.dist-info}/RECORD +342 -340
- langflow/frontend/assets/lazyIconImports-Bh1TFfvH.js +0 -2
- {langflow_base_nightly-0.5.0.dev36.dist-info → langflow_base_nightly-0.5.0.dev38.dist-info}/WHEEL +0 -0
- {langflow_base_nightly-0.5.0.dev36.dist-info → langflow_base_nightly-0.5.0.dev38.dist-info}/entry_points.txt +0 -0
|
@@ -285,7 +285,7 @@
|
|
|
285
285
|
"legacy": false,
|
|
286
286
|
"lf_version": "1.4.3",
|
|
287
287
|
"metadata": {
|
|
288
|
-
"code_hash": "
|
|
288
|
+
"code_hash": "ee50d5005321",
|
|
289
289
|
"module": "langflow.components.processing.batch_run.BatchRunComponent"
|
|
290
290
|
},
|
|
291
291
|
"minimized": false,
|
|
@@ -326,7 +326,7 @@
|
|
|
326
326
|
"show": true,
|
|
327
327
|
"title_case": false,
|
|
328
328
|
"type": "code",
|
|
329
|
-
"value": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, cast\n\nimport toml # type: ignore[import-untyped]\
|
|
329
|
+
"value": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, cast\n\nimport toml # type: ignore[import-untyped]\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.io import BoolInput, DataFrameInput, HandleInput, MessageTextInput, MultilineInput, Output\nfrom langflow.logging.logger import logger\nfrom langflow.schema.dataframe import DataFrame\n\nif TYPE_CHECKING:\n from langchain_core.runnables import Runnable\n\n\nclass BatchRunComponent(Component):\n display_name = \"Batch Run\"\n description = \"Runs an LLM on each row of a DataFrame column. If no column is specified, all columns are used.\"\n documentation: str = \"https://docs.langflow.org/components-processing#batch-run\"\n icon = \"List\"\n\n inputs = [\n HandleInput(\n name=\"model\",\n display_name=\"Language Model\",\n info=\"Connect the 'Language Model' output from your LLM component here.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"Instructions\",\n info=\"Multi-line system instruction for all rows in the DataFrame.\",\n required=False,\n ),\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The DataFrame whose column (specified by 'column_name') we'll treat as text messages.\",\n required=True,\n ),\n MessageTextInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=(\n \"The name of the DataFrame column to treat as text messages. \"\n \"If empty, all columns will be formatted in TOML.\"\n ),\n required=False,\n advanced=False,\n ),\n MessageTextInput(\n name=\"output_column_name\",\n display_name=\"Output Column Name\",\n info=\"Name of the column where the model's response will be stored.\",\n value=\"model_response\",\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"enable_metadata\",\n display_name=\"Enable Metadata\",\n info=\"If True, add metadata to the output DataFrame.\",\n value=False,\n required=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"LLM Results\",\n name=\"batch_results\",\n method=\"run_batch\",\n info=\"A DataFrame with all original columns plus the model's response column.\",\n ),\n ]\n\n def _format_row_as_toml(self, row: dict[str, Any]) -> str:\n \"\"\"Convert a dictionary (row) into a TOML-formatted string.\"\"\"\n formatted_dict = {str(col): {\"value\": str(val)} for col, val in row.items()}\n return toml.dumps(formatted_dict)\n\n def _create_base_row(\n self, original_row: dict[str, Any], model_response: str = \"\", batch_index: int = -1\n ) -> dict[str, Any]:\n \"\"\"Create a base row with original columns and additional metadata.\"\"\"\n row = original_row.copy()\n row[self.output_column_name] = model_response\n row[\"batch_index\"] = batch_index\n return row\n\n def _add_metadata(\n self, row: dict[str, Any], *, success: bool = True, system_msg: str = \"\", error: str | None = None\n ) -> None:\n \"\"\"Add metadata to a row if enabled.\"\"\"\n if not self.enable_metadata:\n return\n\n if success:\n row[\"metadata\"] = {\n \"has_system_message\": bool(system_msg),\n \"input_length\": len(row.get(\"text_input\", \"\")),\n \"response_length\": len(row[self.output_column_name]),\n \"processing_status\": \"success\",\n }\n else:\n row[\"metadata\"] = {\n \"error\": error,\n \"processing_status\": \"failed\",\n }\n\n async def run_batch(self) -> DataFrame:\n \"\"\"Process each row in df[column_name] with the language model asynchronously.\n\n Returns:\n DataFrame: A new DataFrame containing:\n - All original columns\n - The model's response column (customizable name)\n - 'batch_index' column for processing order\n - 'metadata' (optional)\n\n Raises:\n ValueError: If the specified column is not found in the DataFrame\n TypeError: If the model is not compatible or input types are wrong\n \"\"\"\n model: Runnable = self.model\n system_msg = self.system_message or \"\"\n df: DataFrame = self.df\n col_name = self.column_name or \"\"\n\n # Validate inputs first\n if not isinstance(df, DataFrame):\n msg = f\"Expected DataFrame input, got {type(df)}\"\n raise TypeError(msg)\n\n if col_name and col_name not in df.columns:\n msg = f\"Column '{col_name}' not found in the DataFrame. Available columns: {', '.join(df.columns)}\"\n raise ValueError(msg)\n\n try:\n # Determine text input for each row\n if col_name:\n user_texts = df[col_name].astype(str).tolist()\n else:\n user_texts = [\n self._format_row_as_toml(cast(\"dict[str, Any]\", row)) for row in df.to_dict(orient=\"records\")\n ]\n\n total_rows = len(user_texts)\n await logger.ainfo(f\"Processing {total_rows} rows with batch run\")\n\n # Prepare the batch of conversations\n conversations = [\n [{\"role\": \"system\", \"content\": system_msg}, {\"role\": \"user\", \"content\": text}]\n if system_msg\n else [{\"role\": \"user\", \"content\": text}]\n for text in user_texts\n ]\n\n # Configure the model with project info and callbacks\n model = model.with_config(\n {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n )\n # Process batches and track progress\n responses_with_idx = list(\n zip(\n range(len(conversations)),\n await model.abatch(list(conversations)),\n strict=True,\n )\n )\n\n # Sort by index to maintain order\n responses_with_idx.sort(key=lambda x: x[0])\n\n # Build the final data with enhanced metadata\n rows: list[dict[str, Any]] = []\n for idx, (original_row, response) in enumerate(\n zip(df.to_dict(orient=\"records\"), responses_with_idx, strict=False)\n ):\n response_text = response[1].content if hasattr(response[1], \"content\") else str(response[1])\n row = self._create_base_row(\n cast(\"dict[str, Any]\", original_row), model_response=response_text, batch_index=idx\n )\n self._add_metadata(row, success=True, system_msg=system_msg)\n rows.append(row)\n\n # Log progress\n if (idx + 1) % max(1, total_rows // 10) == 0:\n await logger.ainfo(f\"Processed {idx + 1}/{total_rows} rows\")\n\n await logger.ainfo(\"Batch processing completed successfully\")\n return DataFrame(rows)\n\n except (KeyError, AttributeError) as e:\n # Handle data structure and attribute access errors\n await logger.aerror(f\"Data processing error: {e!s}\")\n error_row = self._create_base_row(dict.fromkeys(df.columns, \"\"), model_response=\"\", batch_index=-1)\n self._add_metadata(error_row, success=False, error=str(e))\n return DataFrame([error_row])\n"
|
|
330
330
|
},
|
|
331
331
|
"column_name": {
|
|
332
332
|
"_input_type": "StrInput",
|
|
@@ -871,7 +871,7 @@
|
|
|
871
871
|
"show": true,
|
|
872
872
|
"title_case": false,
|
|
873
873
|
"type": "code",
|
|
874
|
-
"value": "import json\nimport re\n\nfrom langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.data import Data\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nMODEL_PROVIDERS_LIST = [\"Anthropic\", \"Google Generative AI\", \"Groq\", \"OpenAI\"]\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n documentation: str = \"https://docs.langflow.org/agents\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n # Filter out json_mode from OpenAI inputs since we handle structured output differently\n openai_inputs_filtered = [\n input_field\n for input_field in MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"]\n if not (hasattr(input_field, \"name\") and input_field.name == \"json_mode\")\n ]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST, \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST] + [{\"icon\": \"brain\"}],\n ),\n *openai_inputs_filtered,\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Chat History Messages\",\n value=100,\n info=\"Number of chat history messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n *LCToolsAgentComponent._base_inputs,\n # removed memory inputs from agent component\n # *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [\n Output(name=\"response\", display_name=\"Response\", method=\"message_response\"),\n Output(name=\"structured_response\", display_name=\"Structured Response\", method=\"json_response\", tool_mode=False),\n ]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n if isinstance(self.chat_history, Message):\n self.chat_history = [self.chat_history]\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n # note the tools are not required to run the agent, hence the validation removed.\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n result = await self.run_agent(agent)\n\n # Store result for potential JSON output\n self._agent_result = result\n # return result\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n else:\n return result\n\n async def json_response(self) -> Data:\n \"\"\"Convert agent response to structured JSON Data output.\"\"\"\n # Run the regular message response first to get the result\n if not hasattr(self, \"_agent_result\"):\n await self.message_response()\n\n result = self._agent_result\n\n # Extract content from result\n if hasattr(result, \"content\"):\n content = result.content\n elif hasattr(result, \"text\"):\n content = result.text\n else:\n content = str(result)\n\n # Try to parse as JSON\n try:\n json_data = json.loads(content)\n return Data(data=json_data)\n except json.JSONDecodeError:\n # If it's not valid JSON, try to extract JSON from the content\n json_match = re.search(r\"\\{.*\\}\", content, re.DOTALL)\n if json_match:\n try:\n json_data = json.loads(json_match.group())\n return Data(data=json_data)\n except json.JSONDecodeError:\n pass\n\n # If we can't extract JSON, return the raw content as data\n return Data(data={\"content\": content, \"error\": \"Could not parse as JSON\"})\n\n async def get_memory_data(self):\n # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.\n messages = (\n await MemoryComponent(**self.get_base_args())\n .set(session_id=self.graph.session_id, order=\"Ascending\", n_messages=self.n_messages)\n .retrieve_messages()\n )\n return [\n message for message in messages if getattr(message, \"id\", None) != getattr(self.input_value, \"id\", None)\n ]\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n # Filter out json_mode and only use attributes that exist on this component\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
|
|
874
|
+
"value": "import json\nimport re\n\nfrom langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.data import Data\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nMODEL_PROVIDERS_LIST = [\"Anthropic\", \"Google Generative AI\", \"Groq\", \"OpenAI\"]\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n documentation: str = \"https://docs.langflow.org/agents\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n # Filter out json_mode from OpenAI inputs since we handle structured output differently\n openai_inputs_filtered = [\n input_field\n for input_field in MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"]\n if not (hasattr(input_field, \"name\") and input_field.name == \"json_mode\")\n ]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST, \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST] + [{\"icon\": \"brain\"}],\n ),\n *openai_inputs_filtered,\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Chat History Messages\",\n value=100,\n info=\"Number of chat history messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n *LCToolsAgentComponent._base_inputs,\n # removed memory inputs from agent component\n # *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [\n Output(name=\"response\", display_name=\"Response\", method=\"message_response\"),\n Output(name=\"structured_response\", display_name=\"Structured Response\", method=\"json_response\", tool_mode=False),\n ]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n if isinstance(self.chat_history, Message):\n self.chat_history = [self.chat_history]\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n # note the tools are not required to run the agent, hence the validation removed.\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n result = await self.run_agent(agent)\n\n # Store result for potential JSON output\n self._agent_result = result\n # return result\n\n except (ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n await logger.aerror(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n await logger.aerror(f\"Unexpected error: {e!s}\")\n raise\n else:\n return result\n\n async def json_response(self) -> Data:\n \"\"\"Convert agent response to structured JSON Data output.\"\"\"\n # Run the regular message response first to get the result\n if not hasattr(self, \"_agent_result\"):\n await self.message_response()\n\n result = self._agent_result\n\n # Extract content from result\n if hasattr(result, \"content\"):\n content = result.content\n elif hasattr(result, \"text\"):\n content = result.text\n else:\n content = str(result)\n\n # Try to parse as JSON\n try:\n json_data = json.loads(content)\n return Data(data=json_data)\n except json.JSONDecodeError:\n # If it's not valid JSON, try to extract JSON from the content\n json_match = re.search(r\"\\{.*\\}\", content, re.DOTALL)\n if json_match:\n try:\n json_data = json.loads(json_match.group())\n return Data(data=json_data)\n except json.JSONDecodeError:\n pass\n\n # If we can't extract JSON, return the raw content as data\n return Data(data={\"content\": content, \"error\": \"Could not parse as JSON\"})\n\n async def get_memory_data(self):\n # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.\n messages = (\n await MemoryComponent(**self.get_base_args())\n .set(session_id=self.graph.session_id, order=\"Ascending\", n_messages=self.n_messages)\n .retrieve_messages()\n )\n return [\n message for message in messages if getattr(message, \"id\", None) != getattr(self.input_value, \"id\", None)\n ]\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n # Filter out json_mode and only use attributes that exist on this component\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
|
|
875
875
|
},
|
|
876
876
|
"handle_parsing_errors": {
|
|
877
877
|
"_input_type": "BoolInput",
|
langflow/interface/components.py
CHANGED
|
@@ -7,9 +7,8 @@ import pkgutil
|
|
|
7
7
|
from pathlib import Path
|
|
8
8
|
from typing import TYPE_CHECKING, Any
|
|
9
9
|
|
|
10
|
-
from loguru import logger
|
|
11
|
-
|
|
12
10
|
from langflow.custom.utils import abuild_custom_components, create_component_template
|
|
11
|
+
from langflow.logging.logger import logger
|
|
13
12
|
from langflow.services.settings.base import BASE_COMPONENTS_PATH
|
|
14
13
|
|
|
15
14
|
if TYPE_CHECKING:
|
|
@@ -49,7 +48,7 @@ async def import_langflow_components():
|
|
|
49
48
|
try:
|
|
50
49
|
import langflow.components as components_pkg
|
|
51
50
|
except ImportError as e:
|
|
52
|
-
logger.
|
|
51
|
+
await logger.aerror(f"Failed to import langflow.components package: {e}", exc_info=True)
|
|
53
52
|
return {"components": modules_dict}
|
|
54
53
|
|
|
55
54
|
# Collect all module names to process
|
|
@@ -69,13 +68,13 @@ async def import_langflow_components():
|
|
|
69
68
|
try:
|
|
70
69
|
module_results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
71
70
|
except Exception as e: # noqa: BLE001
|
|
72
|
-
logger.
|
|
71
|
+
await logger.aerror(f"Error during parallel module processing: {e}", exc_info=True)
|
|
73
72
|
return {"components": modules_dict}
|
|
74
73
|
|
|
75
74
|
# Merge results from all modules
|
|
76
75
|
for result in module_results:
|
|
77
76
|
if isinstance(result, Exception):
|
|
78
|
-
logger.
|
|
77
|
+
await logger.awarning(f"Module processing failed: {result}")
|
|
79
78
|
continue
|
|
80
79
|
|
|
81
80
|
if result and isinstance(result, tuple) and len(result) == EXPECTED_RESULT_LENGTH:
|
|
@@ -165,7 +164,7 @@ async def _determine_loading_strategy(settings_service: SettingsService) -> dict
|
|
|
165
164
|
component_cache.all_types_dict = {}
|
|
166
165
|
if settings_service.settings.lazy_load_components:
|
|
167
166
|
# Partial loading mode - just load component metadata
|
|
168
|
-
logger.
|
|
167
|
+
await logger.adebug("Using partial component loading")
|
|
169
168
|
component_cache.all_types_dict = await aget_component_metadata(settings_service.settings.components_path)
|
|
170
169
|
elif settings_service.settings.components_path:
|
|
171
170
|
# Traditional full loading - filter out base components path to only load custom components
|
|
@@ -177,7 +176,9 @@ async def _determine_loading_strategy(settings_service: SettingsService) -> dict
|
|
|
177
176
|
components_dict = component_cache.all_types_dict or {}
|
|
178
177
|
component_count = sum(len(comps) for comps in components_dict.get("components", {}).values())
|
|
179
178
|
if component_count > 0 and settings_service.settings.components_path:
|
|
180
|
-
logger.
|
|
179
|
+
await logger.adebug(
|
|
180
|
+
f"Built {component_count} custom components from {settings_service.settings.components_path}"
|
|
181
|
+
)
|
|
181
182
|
|
|
182
183
|
return component_cache.all_types_dict
|
|
183
184
|
|
|
@@ -193,7 +194,7 @@ async def get_and_cache_all_types_dict(
|
|
|
193
194
|
resulting dictionary.
|
|
194
195
|
"""
|
|
195
196
|
if component_cache.all_types_dict is None:
|
|
196
|
-
logger.
|
|
197
|
+
await logger.adebug("Building components cache")
|
|
197
198
|
|
|
198
199
|
langflow_components = await import_langflow_components()
|
|
199
200
|
custom_components_dict = await _determine_loading_strategy(settings_service)
|
|
@@ -204,7 +205,7 @@ async def get_and_cache_all_types_dict(
|
|
|
204
205
|
**custom_components_dict,
|
|
205
206
|
}
|
|
206
207
|
component_count = sum(len(comps) for comps in component_cache.all_types_dict.values())
|
|
207
|
-
logger.
|
|
208
|
+
await logger.adebug(f"Loaded {component_count} components")
|
|
208
209
|
return component_cache.all_types_dict
|
|
209
210
|
|
|
210
211
|
|
|
@@ -235,7 +236,7 @@ async def aget_component_metadata(components_paths: list[str]):
|
|
|
235
236
|
|
|
236
237
|
# Get all component types
|
|
237
238
|
component_types = await discover_component_types(components_paths)
|
|
238
|
-
logger.
|
|
239
|
+
await logger.adebug(f"Discovered {len(component_types)} component types: {', '.join(component_types)}")
|
|
239
240
|
|
|
240
241
|
# For each component type directory
|
|
241
242
|
for component_type in component_types:
|
|
@@ -243,7 +244,7 @@ async def aget_component_metadata(components_paths: list[str]):
|
|
|
243
244
|
|
|
244
245
|
# Get list of components in this type
|
|
245
246
|
component_names = await discover_component_names(component_type, components_paths)
|
|
246
|
-
logger.
|
|
247
|
+
await logger.adebug(f"Found {len(component_names)} components for type {component_type}")
|
|
247
248
|
|
|
248
249
|
# Create stub entries with just basic metadata
|
|
249
250
|
for name in component_names:
|
|
@@ -365,7 +366,7 @@ async def ensure_component_loaded(component_type: str, component_name: str, sett
|
|
|
365
366
|
|
|
366
367
|
# Check if component is marked for lazy loading
|
|
367
368
|
if component_cache.all_types_dict["components"][component_type][component_name].get("lazy_loaded", False):
|
|
368
|
-
logger.
|
|
369
|
+
await logger.adebug(f"Fully loading component {component_type}:{component_name}")
|
|
369
370
|
|
|
370
371
|
# Load just this specific component
|
|
371
372
|
full_component = await load_single_component(
|
|
@@ -381,9 +382,9 @@ async def ensure_component_loaded(component_type: str, component_name: str, sett
|
|
|
381
382
|
|
|
382
383
|
# Mark as fully loaded
|
|
383
384
|
component_cache.fully_loaded_components[component_key] = True
|
|
384
|
-
logger.
|
|
385
|
+
await logger.adebug(f"Component {component_type}:{component_name} fully loaded")
|
|
385
386
|
else:
|
|
386
|
-
logger.
|
|
387
|
+
await logger.awarning(f"Failed to fully load component {component_type}:{component_name}")
|
|
387
388
|
|
|
388
389
|
|
|
389
390
|
async def load_single_component(component_type: str, component_name: str, components_paths: list[str]):
|
|
@@ -396,32 +397,32 @@ async def load_single_component(component_type: str, component_name: str, compon
|
|
|
396
397
|
return await get_single_component_dict(component_type, component_name, components_paths)
|
|
397
398
|
except (ImportError, ModuleNotFoundError) as e:
|
|
398
399
|
# Handle issues with importing the component or its dependencies
|
|
399
|
-
logger.
|
|
400
|
+
await logger.aerror(f"Import error loading component {component_type}:{component_name}: {e!s}")
|
|
400
401
|
return None
|
|
401
402
|
except (AttributeError, TypeError) as e:
|
|
402
403
|
# Handle issues with component structure or type errors
|
|
403
|
-
logger.
|
|
404
|
+
await logger.aerror(f"Component structure error for {component_type}:{component_name}: {e!s}")
|
|
404
405
|
return None
|
|
405
406
|
except FileNotFoundError as e:
|
|
406
407
|
# Handle missing files
|
|
407
|
-
logger.
|
|
408
|
+
await logger.aerror(f"File not found for component {component_type}:{component_name}: {e!s}")
|
|
408
409
|
return None
|
|
409
410
|
except ValueError as e:
|
|
410
411
|
# Handle invalid values or configurations
|
|
411
|
-
logger.
|
|
412
|
+
await logger.aerror(f"Invalid configuration for component {component_type}:{component_name}: {e!s}")
|
|
412
413
|
return None
|
|
413
414
|
except (KeyError, IndexError) as e:
|
|
414
415
|
# Handle data structure access errors
|
|
415
|
-
logger.
|
|
416
|
+
await logger.aerror(f"Data structure error for component {component_type}:{component_name}: {e!s}")
|
|
416
417
|
return None
|
|
417
418
|
except RuntimeError as e:
|
|
418
419
|
# Handle runtime errors
|
|
419
|
-
logger.
|
|
420
|
-
logger.
|
|
420
|
+
await logger.aerror(f"Runtime error loading component {component_type}:{component_name}: {e!s}")
|
|
421
|
+
await logger.adebug("Full traceback for runtime error", exc_info=True)
|
|
421
422
|
return None
|
|
422
423
|
except OSError as e:
|
|
423
424
|
# Handle OS-related errors (file system, permissions, etc.)
|
|
424
|
-
logger.
|
|
425
|
+
await logger.aerror(f"OS error loading component {component_type}:{component_name}: {e!s}")
|
|
425
426
|
return None
|
|
426
427
|
|
|
427
428
|
|
|
@@ -6,10 +6,10 @@ import warnings
|
|
|
6
6
|
from typing import TYPE_CHECKING, Any
|
|
7
7
|
|
|
8
8
|
import orjson
|
|
9
|
-
from loguru import logger
|
|
10
9
|
from pydantic import PydanticDeprecatedSince20
|
|
11
10
|
|
|
12
11
|
from langflow.custom.eval import eval_custom_component_code
|
|
12
|
+
from langflow.logging.logger import logger
|
|
13
13
|
from langflow.schema.artifact import get_artifact_type, post_process_raw
|
|
14
14
|
from langflow.schema.data import Data
|
|
15
15
|
from langflow.services.deps import get_tracing_service, session_scope
|
|
@@ -126,19 +126,19 @@ async def update_params_with_load_from_db_fields(
|
|
|
126
126
|
raise
|
|
127
127
|
if "variable not found." in str(e) and not fallback_to_env_vars:
|
|
128
128
|
raise
|
|
129
|
-
logger.
|
|
129
|
+
await logger.adebug(str(e))
|
|
130
130
|
key = None
|
|
131
131
|
|
|
132
132
|
if fallback_to_env_vars and key is None:
|
|
133
133
|
key = os.getenv(params[field])
|
|
134
134
|
if key:
|
|
135
|
-
logger.
|
|
135
|
+
await logger.ainfo(f"Using environment variable {params[field]} for {field}")
|
|
136
136
|
else:
|
|
137
|
-
logger.
|
|
137
|
+
await logger.aerror(f"Environment variable {params[field]} is not set.")
|
|
138
138
|
|
|
139
139
|
params[field] = key if key is not None else None
|
|
140
140
|
if key is None:
|
|
141
|
-
logger.
|
|
141
|
+
await logger.awarning(f"Could not get value for {field}. Setting it to None.")
|
|
142
142
|
|
|
143
143
|
return params
|
|
144
144
|
|
langflow/interface/run.py
CHANGED
langflow/interface/utils.py
CHANGED
|
@@ -7,9 +7,9 @@ from string import Formatter
|
|
|
7
7
|
|
|
8
8
|
import yaml
|
|
9
9
|
from langchain_core.language_models import BaseLanguageModel
|
|
10
|
-
from loguru import logger
|
|
11
10
|
from PIL.Image import Image
|
|
12
11
|
|
|
12
|
+
from langflow.logging.logger import logger
|
|
13
13
|
from langflow.services.chat.config import ChatConfig
|
|
14
14
|
from langflow.services.deps import get_settings_service
|
|
15
15
|
|
langflow/io/__init__.py
CHANGED
langflow/langflow_launcher.py
CHANGED
|
@@ -44,7 +44,7 @@ def _launch_with_exec():
|
|
|
44
44
|
os.environ["no_proxy"] = "*"
|
|
45
45
|
|
|
46
46
|
try:
|
|
47
|
-
os.execv(sys.executable, [sys.executable, "-m", "langflow.__main__"
|
|
47
|
+
os.execv(sys.executable, [sys.executable, "-m", "langflow.__main__", *sys.argv[1:]]) # noqa: S606
|
|
48
48
|
except OSError as e:
|
|
49
49
|
# If exec fails, we need to exit since the process replacement failed
|
|
50
50
|
typer.echo(f"Failed to exec langflow: {e}", file=sys.stderr)
|
langflow/load/load.py
CHANGED
|
@@ -4,7 +4,6 @@ from pathlib import Path
|
|
|
4
4
|
|
|
5
5
|
from aiofile import async_open
|
|
6
6
|
from dotenv import dotenv_values
|
|
7
|
-
from loguru import logger
|
|
8
7
|
|
|
9
8
|
from langflow.graph.graph.base import Graph
|
|
10
9
|
from langflow.graph.schema import RunOutputs
|
|
@@ -49,9 +48,7 @@ async def aload_flow_from_json(
|
|
|
49
48
|
"""
|
|
50
49
|
# If input is a file path, load JSON from the file
|
|
51
50
|
log_file_path = Path(log_file) if log_file else None
|
|
52
|
-
configure(
|
|
53
|
-
log_level=log_level, log_file=log_file_path, disable=disable_logs, async_file=True, log_rotation=log_rotation
|
|
54
|
-
)
|
|
51
|
+
configure(log_level=log_level, log_file=log_file_path, disable=disable_logs, log_rotation=log_rotation)
|
|
55
52
|
|
|
56
53
|
# override env variables with .env file
|
|
57
54
|
if env_file and tweaks is not None:
|
|
@@ -179,7 +176,7 @@ async def arun_flow_from_json(
|
|
|
179
176
|
cache=cache,
|
|
180
177
|
disable_logs=disable_logs,
|
|
181
178
|
)
|
|
182
|
-
|
|
179
|
+
return await run_graph(
|
|
183
180
|
graph=graph,
|
|
184
181
|
session_id=session_id,
|
|
185
182
|
input_value=input_value,
|
|
@@ -188,8 +185,6 @@ async def arun_flow_from_json(
|
|
|
188
185
|
output_component=output_component,
|
|
189
186
|
fallback_to_env_vars=fallback_to_env_vars,
|
|
190
187
|
)
|
|
191
|
-
await logger.complete()
|
|
192
|
-
return result
|
|
193
188
|
|
|
194
189
|
|
|
195
190
|
def run_flow_from_json(
|
langflow/logging/__init__.py
CHANGED