langflow-base-nightly 0.5.0.dev37__py3-none-any.whl → 0.5.0.dev39__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (397) hide show
  1. langflow/__main__.py +1 -1
  2. langflow/alembic/versions/0882f9657f22_encrypt_existing_mcp_auth_settings_.py +122 -0
  3. langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py +24 -30
  4. langflow/alembic/versions/58b28437a398_modify_nullable.py +6 -6
  5. langflow/alembic/versions/79e675cb6752_change_datetime_type.py +24 -30
  6. langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py +12 -13
  7. langflow/api/build.py +21 -26
  8. langflow/api/health_check_router.py +3 -3
  9. langflow/api/utils.py +3 -3
  10. langflow/api/v1/callback.py +2 -2
  11. langflow/api/v1/chat.py +19 -31
  12. langflow/api/v1/endpoints.py +10 -10
  13. langflow/api/v1/flows.py +1 -1
  14. langflow/api/v1/knowledge_bases.py +3 -3
  15. langflow/api/v1/mcp.py +12 -12
  16. langflow/api/v1/mcp_projects.py +405 -120
  17. langflow/api/v1/mcp_utils.py +8 -8
  18. langflow/api/v1/schemas.py +2 -7
  19. langflow/api/v1/store.py +1 -1
  20. langflow/api/v1/validate.py +2 -2
  21. langflow/api/v1/voice_mode.py +58 -62
  22. langflow/api/v2/files.py +2 -2
  23. langflow/api/v2/mcp.py +10 -9
  24. langflow/base/composio/composio_base.py +21 -2
  25. langflow/base/data/docling_utils.py +194 -0
  26. langflow/base/embeddings/aiml_embeddings.py +1 -1
  27. langflow/base/flow_processing/utils.py +1 -2
  28. langflow/base/io/__init__.py +0 -1
  29. langflow/base/langwatch/utils.py +2 -1
  30. langflow/base/mcp/util.py +49 -47
  31. langflow/base/prompts/api_utils.py +1 -1
  32. langflow/base/tools/flow_tool.py +2 -2
  33. langflow/base/tools/run_flow.py +2 -6
  34. langflow/components/FAISS/__init__.py +34 -0
  35. langflow/components/Notion/add_content_to_page.py +2 -2
  36. langflow/components/Notion/list_database_properties.py +2 -2
  37. langflow/components/Notion/list_pages.py +2 -2
  38. langflow/components/Notion/page_content_viewer.py +2 -2
  39. langflow/components/Notion/update_page_property.py +1 -1
  40. langflow/components/agentql/agentql_api.py +2 -10
  41. langflow/components/agents/agent.py +249 -55
  42. langflow/components/agents/mcp_component.py +14 -14
  43. langflow/components/anthropic/anthropic.py +5 -4
  44. langflow/components/assemblyai/assemblyai_get_subtitles.py +2 -2
  45. langflow/components/assemblyai/assemblyai_lemur.py +2 -2
  46. langflow/components/assemblyai/assemblyai_list_transcripts.py +2 -2
  47. langflow/components/assemblyai/assemblyai_poll_transcript.py +2 -2
  48. langflow/components/assemblyai/assemblyai_start_transcript.py +2 -2
  49. langflow/components/cassandra/__init__.py +40 -0
  50. langflow/components/chroma/__init__.py +34 -0
  51. langflow/components/clickhouse/__init__.py +34 -0
  52. langflow/components/couchbase/__init__.py +34 -0
  53. langflow/components/data/file.py +575 -55
  54. langflow/components/data/url.py +1 -1
  55. langflow/components/datastax/__init__.py +3 -3
  56. langflow/components/datastax/astra_assistant_manager.py +3 -3
  57. langflow/components/datastax/create_assistant.py +1 -2
  58. langflow/components/deactivated/merge_data.py +1 -2
  59. langflow/components/deactivated/sub_flow.py +6 -7
  60. langflow/components/deactivated/vectara_self_query.py +3 -3
  61. langflow/components/docling/__init__.py +0 -198
  62. langflow/components/docling/docling_inline.py +1 -1
  63. langflow/components/elastic/__init__.py +37 -0
  64. langflow/components/embeddings/text_embedder.py +3 -3
  65. langflow/components/firecrawl/firecrawl_extract_api.py +2 -9
  66. langflow/components/google/gmail.py +1 -1
  67. langflow/components/google/google_generative_ai.py +5 -11
  68. langflow/components/groq/groq.py +4 -3
  69. langflow/components/helpers/current_date.py +2 -3
  70. langflow/components/helpers/memory.py +1 -1
  71. langflow/components/ibm/watsonx.py +1 -1
  72. langflow/components/ibm/watsonx_embeddings.py +1 -1
  73. langflow/components/langwatch/langwatch.py +3 -3
  74. langflow/components/logic/flow_tool.py +2 -2
  75. langflow/components/logic/notify.py +1 -1
  76. langflow/components/logic/run_flow.py +2 -3
  77. langflow/components/logic/sub_flow.py +4 -5
  78. langflow/components/mem0/mem0_chat_memory.py +2 -8
  79. langflow/components/milvus/__init__.py +34 -0
  80. langflow/components/mongodb/__init__.py +34 -0
  81. langflow/components/nvidia/nvidia.py +3 -3
  82. langflow/components/olivya/olivya.py +7 -7
  83. langflow/components/ollama/ollama.py +9 -6
  84. langflow/components/perplexity/perplexity.py +3 -13
  85. langflow/components/pgvector/__init__.py +34 -0
  86. langflow/components/pinecone/__init__.py +34 -0
  87. langflow/components/processing/batch_run.py +8 -8
  88. langflow/components/processing/data_operations.py +2 -2
  89. langflow/components/processing/merge_data.py +1 -2
  90. langflow/components/processing/message_to_data.py +2 -3
  91. langflow/components/processing/parse_json_data.py +1 -1
  92. langflow/components/prototypes/python_function.py +2 -3
  93. langflow/components/qdrant/__init__.py +34 -0
  94. langflow/components/redis/__init__.py +36 -2
  95. langflow/components/redis/redis.py +75 -29
  96. langflow/components/redis/redis_chat.py +43 -0
  97. langflow/components/serpapi/serp.py +1 -1
  98. langflow/components/supabase/__init__.py +37 -0
  99. langflow/components/tavily/tavily_extract.py +1 -1
  100. langflow/components/tavily/tavily_search.py +1 -1
  101. langflow/components/tools/calculator.py +2 -2
  102. langflow/components/tools/python_code_structured_tool.py +3 -10
  103. langflow/components/tools/python_repl.py +2 -2
  104. langflow/components/tools/searxng.py +3 -3
  105. langflow/components/tools/serp_api.py +2 -2
  106. langflow/components/tools/tavily_search_tool.py +2 -2
  107. langflow/components/tools/yahoo_finance.py +1 -1
  108. langflow/components/twelvelabs/video_embeddings.py +4 -4
  109. langflow/components/upstash/__init__.py +34 -0
  110. langflow/components/vectara/__init__.py +37 -0
  111. langflow/components/vectorstores/__init__.py +0 -69
  112. langflow/components/vectorstores/local_db.py +2 -1
  113. langflow/components/weaviate/__init__.py +34 -0
  114. langflow/components/yahoosearch/yahoo.py +1 -1
  115. langflow/components/youtube/trending.py +3 -4
  116. langflow/custom/attributes.py +2 -1
  117. langflow/custom/code_parser/code_parser.py +1 -1
  118. langflow/custom/custom_component/base_component.py +1 -1
  119. langflow/custom/custom_component/component.py +16 -2
  120. langflow/custom/dependency_analyzer.py +165 -0
  121. langflow/custom/directory_reader/directory_reader.py +7 -7
  122. langflow/custom/directory_reader/utils.py +1 -2
  123. langflow/custom/utils.py +63 -45
  124. langflow/events/event_manager.py +1 -1
  125. langflow/frontend/assets/{SlackIcon-CnvyOamQ.js → SlackIcon-Cr3Q15Px.js} +1 -1
  126. langflow/frontend/assets/{Wikipedia-nyTEXdr2.js → Wikipedia-GxM5sPdM.js} +1 -1
  127. langflow/frontend/assets/{Wolfram-BYMQkNSq.js → Wolfram-BN3-VOCA.js} +1 -1
  128. langflow/frontend/assets/{index-DZTC5pdT.js → index-28oOcafk.js} +1 -1
  129. langflow/frontend/assets/{index-ChXJpBz4.js → index-2wSXqBtB.js} +1 -1
  130. langflow/frontend/assets/{index-BB15_iOb.js → index-3wW7BClE.js} +1 -1
  131. langflow/frontend/assets/{index-DKHNourL.js → index-6pyH3ZJB.js} +1 -1
  132. langflow/frontend/assets/{index-BvwZfF2i.js → index-AWCSdofD.js} +1 -1
  133. langflow/frontend/assets/{index-Bvxg4_ux.js → index-B2Zgv_xv.js} +1 -1
  134. langflow/frontend/assets/{index-Bd6WtbKA.js → index-B2ptVQGM.js} +1 -1
  135. langflow/frontend/assets/{index-C7QWbnLK.js → index-B3TANVes.js} +1 -1
  136. langflow/frontend/assets/{index-CpvYQ0ug.js → index-B4yCvZKV.js} +1 -1
  137. langflow/frontend/assets/{index-Dg-63Si_.js → index-BC65VuWx.js} +1 -1
  138. langflow/frontend/assets/{index-C6jri9Wm.js → index-BCDSei1q.js} +1 -1
  139. langflow/frontend/assets/{index-OazXJdEl.js → index-BJy50PvP.js} +1 -1
  140. langflow/frontend/assets/{index-CWdkbVsd.js → index-BKseQQ2I.js} +1 -1
  141. langflow/frontend/assets/{index-CaQ_H9ww.js → index-BLTxEeTi.js} +1 -1
  142. langflow/frontend/assets/{index-DGRMNe9n.js → index-BRg1f4Mu.js} +1 -1
  143. langflow/frontend/assets/{index-D8lOi1GI.js → index-BS8Vo8nc.js} +1 -1
  144. langflow/frontend/assets/{index-B748uLP1.js → index-BTKOU4xC.js} +1 -1
  145. langflow/frontend/assets/{index-Dqd4RjYA.js → index-BVwJDmw-.js} +1 -1
  146. langflow/frontend/assets/{index-DbMFlnHE.js → index-BWYuQ2Sj.js} +1 -1
  147. langflow/frontend/assets/{index-BEMw2Np8.js → index-BWdLILDG.js} +1 -1
  148. langflow/frontend/assets/{index-BmX5CoED.js → index-BZcw4827.js} +1 -1
  149. langflow/frontend/assets/{index-CyPvTB63.js → index-Bbi87Ve4.js} +1 -1
  150. langflow/frontend/assets/{index-BTEW9e8P.js → index-Bf0IYKLd.js} +1 -1
  151. langflow/frontend/assets/{index-BZgXW854.js → index-Bg5nrMRh.js} +1 -1
  152. langflow/frontend/assets/{index-BBxAPk1y.js → index-BiC280Nx.js} +1 -1
  153. langflow/frontend/assets/{index-BR0bkVqX.js → index-BiKKN6FR.js} +1 -1
  154. langflow/frontend/assets/{index-CTrt1Q_j.js → index-Bief6eyJ.js} +1 -1
  155. langflow/frontend/assets/{index-D5_DsUJc.js → index-BkXec1Yf.js} +1 -1
  156. langflow/frontend/assets/{index-CZQ9rXNa.js → index-Bnl6QHtP.js} +1 -1
  157. langflow/frontend/assets/{index-BChjg6Az.js → index-BpxbUiZD.js} +1979 -1979
  158. langflow/frontend/assets/{index-BOeo01QB.js → index-BrJV8psX.js} +1 -1
  159. langflow/frontend/assets/{index-DysKpOuj.js → index-BwLWcUXL.js} +1 -1
  160. langflow/frontend/assets/{index-Bnqod3vk.js → index-Bx7dBY26.js} +1 -1
  161. langflow/frontend/assets/{index-D3DDfngy.js → index-C-EdnFdA.js} +1 -1
  162. langflow/frontend/assets/{index-Bsa0xZyL.js → index-C-Xfg4cD.js} +1 -1
  163. langflow/frontend/assets/{index-BTrsh9LS.js → index-C1f2wMat.js} +1 -1
  164. langflow/frontend/assets/index-C1xroOlH.css +1 -0
  165. langflow/frontend/assets/{index-B1YN7oMV.js → index-C3KequvP.js} +1 -1
  166. langflow/frontend/assets/{index-DzW2mfkK.js → index-C3ZjKdCD.js} +1 -1
  167. langflow/frontend/assets/{index-ajRge-Mg.js → index-C3l0zYn0.js} +1 -1
  168. langflow/frontend/assets/{index-cvZdgWHQ.js → index-C3yvArUT.js} +1 -1
  169. langflow/frontend/assets/{index-C-2hghRJ.js → index-C9Cxnkl8.js} +1 -1
  170. langflow/frontend/assets/{index-BhIOhlCH.js → index-CBc8fEAE.js} +1 -1
  171. langflow/frontend/assets/{index-B3Sur4Z3.js → index-CBvrGgID.js} +1 -1
  172. langflow/frontend/assets/{index-CCePCqkT.js → index-CD-PqGCY.js} +1 -1
  173. langflow/frontend/assets/{index-8yMsjVV2.js → index-CGO1CiUr.js} +1 -1
  174. langflow/frontend/assets/{index-DF5VwgU6.js → index-CH5UVA9b.js} +1 -1
  175. langflow/frontend/assets/{index-dcnYpT9N.js → index-CLJeJYjH.js} +1 -1
  176. langflow/frontend/assets/{index-DfxYyS3M.js → index-CMZ79X-Y.js} +1 -1
  177. langflow/frontend/assets/{index-ya2uXE8v.js → index-CMzfJKiW.js} +1 -1
  178. langflow/frontend/assets/{index-DkelbYy7.js → index-CNw1H-Wc.js} +1 -1
  179. langflow/frontend/assets/{index-DytJENYD.js → index-CPHEscq9.js} +1 -1
  180. langflow/frontend/assets/{index-Bv8h2Z-q.js → index-CRPKJZw9.js} +1 -1
  181. langflow/frontend/assets/{index-D-9TI74R.js → index-CRPyCfYy.js} +1 -1
  182. langflow/frontend/assets/{index-BLGYN-9b.js → index-CRcMqCIj.js} +1 -1
  183. langflow/frontend/assets/{index-tVYiABdp.js → index-CUVDws8F.js} +1 -1
  184. langflow/frontend/assets/{index-CpcbQZIF.js → index-CVWQfRYZ.js} +1 -1
  185. langflow/frontend/assets/{index-DPCzHdsC.js → index-CVl6MbaM.js} +1 -1
  186. langflow/frontend/assets/{index-DkXy1WFo.js → index-CVwWoX99.js} +1 -1
  187. langflow/frontend/assets/{index-DK1Ptcc4.js → index-CWPzZtSx.js} +1 -1
  188. langflow/frontend/assets/{index-DHq8TQPB.js → index-CZqRL9DE.js} +1 -1
  189. langflow/frontend/assets/{index-DnEGCgih.js → index-CdIf07Rw.js} +1 -1
  190. langflow/frontend/assets/{index-BIQQCMvz.js → index-Cewy7JZE.js} +1 -1
  191. langflow/frontend/assets/{index-D8GJngXa.js → index-CfwLpbMM.js} +1 -1
  192. langflow/frontend/assets/{index-C_TdzfAn.js → index-CiR1dxI4.js} +1 -1
  193. langflow/frontend/assets/{index-BzL_EoKd.js → index-CiixOzDG.js} +1 -1
  194. langflow/frontend/assets/{index-Boso-xEw.js → index-ClsuDmR6.js} +1 -1
  195. langflow/frontend/assets/{index-8WdfSTTz.js → index-CmEYYRN1.js} +1 -1
  196. langflow/frontend/assets/{index-FUxmznS-.js → index-Co20d-eQ.js} +1 -1
  197. langflow/frontend/assets/{index-C82JjCPD.js → index-CpzXS6md.js} +1 -1
  198. langflow/frontend/assets/{index-DIDDfmlJ.js → index-Cqpzl1J4.js} +1 -1
  199. langflow/frontend/assets/{index-_UcqeEjm.js → index-CtVIONP2.js} +1 -1
  200. langflow/frontend/assets/{index-Gkrq-vzm.js → index-CuFXdTx4.js} +1 -1
  201. langflow/frontend/assets/{index-WPFivmdQ.js → index-Cyd2HtHK.js} +1 -1
  202. langflow/frontend/assets/{index-BFp_O-c9.js → index-D-1tA8Dt.js} +1 -1
  203. langflow/frontend/assets/{index-BqPpO6KG.js → index-D-KY3kkq.js} +1 -1
  204. langflow/frontend/assets/{index-Db71w3lq.js → index-D-_B1a8v.js} +1 -1
  205. langflow/frontend/assets/{index-BIzTEqFh.js → index-D14EWPyZ.js} +1 -1
  206. langflow/frontend/assets/{index-BbJjt5m4.js → index-D2N3l-cw.js} +1 -1
  207. langflow/frontend/assets/{index-DCRk27Tp.js → index-D5ETnvJa.js} +1 -1
  208. langflow/frontend/assets/{index-CvcEzq4x.js → index-D7kquVv2.js} +1 -1
  209. langflow/frontend/assets/{index-Q9vDw0Xl.js → index-DA6-bvgN.js} +1 -1
  210. langflow/frontend/assets/{index-l7bzB8Ex.js → index-DDWBeudF.js} +1 -1
  211. langflow/frontend/assets/{index-BCCGvqay.js → index-DDcMAaG4.js} +1 -1
  212. langflow/frontend/assets/{index-pCQ_yw8m.js → index-DHgomBdh.js} +1 -1
  213. langflow/frontend/assets/{index-BxEuHa76.js → index-DJP-ss47.js} +1 -1
  214. langflow/frontend/assets/{index-BbRm7beF.js → index-DQ7VYqQc.js} +1 -1
  215. langflow/frontend/assets/{index-Car-zdor.js → index-DTqbvGC0.js} +1 -1
  216. langflow/frontend/assets/{index-BRxvproo.js → index-DUpri6zF.js} +1 -1
  217. langflow/frontend/assets/{index-BQ6NUdMY.js → index-DV3utZDZ.js} +1 -1
  218. langflow/frontend/assets/{index-DjQETUy8.js → index-DXRfN4HV.js} +1 -1
  219. langflow/frontend/assets/{index-DfngcQxO.js → index-Db9dYSzy.js} +1 -1
  220. langflow/frontend/assets/{index-rXV1G1aB.js → index-DdtMEn6I.js} +1 -1
  221. langflow/frontend/assets/{index-DmMDPoi0.js → index-DfDhMHgQ.js} +1 -1
  222. langflow/frontend/assets/{index-DJB12jIC.js → index-Dfe7qfvf.js} +1 -1
  223. langflow/frontend/assets/{index-C_veJlEb.js → index-DhtZ5hx8.js} +1 -1
  224. langflow/frontend/assets/{index-CQMoqLAu.js → index-DiB3CTo8.js} +1 -1
  225. langflow/frontend/assets/{index-DVlceYFD.js → index-DiGWASY5.js} +1 -1
  226. langflow/frontend/assets/{index-Du_18NCU.js → index-Dl5amdBz.js} +1 -1
  227. langflow/frontend/assets/{index-CYDAYm-i.js → index-DlD4dXlZ.js} +1 -1
  228. langflow/frontend/assets/{index-CLPdN-q6.js → index-DmeiHnfl.js} +1 -1
  229. langflow/frontend/assets/index-Dmu-X5-4.js +1 -0
  230. langflow/frontend/assets/{index-BzEUlaw_.js → index-DpVWih90.js} +1 -1
  231. langflow/frontend/assets/{index-D6PSjHxP.js → index-DrDrcajG.js} +1 -1
  232. langflow/frontend/assets/{index-Dq5ilsem.js → index-Du-pc0KE.js} +1 -1
  233. langflow/frontend/assets/{index-CYe8Ipef.js → index-DwPkMTaY.js} +1 -1
  234. langflow/frontend/assets/{index-BVEZDXxS.js → index-DwQEZe3C.js} +1 -1
  235. langflow/frontend/assets/{index-BvT7L317.js → index-DyJFTK24.js} +1 -1
  236. langflow/frontend/assets/{index-HK3bVMYA.js → index-J38wh62w.js} +1 -1
  237. langflow/frontend/assets/{index-CCxGSSTT.js → index-Kwdl-e29.js} +1 -1
  238. langflow/frontend/assets/{index-BOB_zsjl.js → index-OwPvCmpW.js} +1 -1
  239. langflow/frontend/assets/{index-Dsps-jKu.js → index-Tw3Os-DN.js} +1 -1
  240. langflow/frontend/assets/{index-CFDvOtKC.js → index-X0guhYF8.js} +1 -1
  241. langflow/frontend/assets/{index-BX5D-USa.js → index-dJWNxIRH.js} +1 -1
  242. langflow/frontend/assets/{index-BRYjyhAd.js → index-dcJ8-agu.js} +1 -1
  243. langflow/frontend/assets/{index-Ui4xUImO.js → index-eo2mAtL-.js} +1 -1
  244. langflow/frontend/assets/{index-CxvP91st.js → index-hG24k5xJ.js} +1 -1
  245. langflow/frontend/assets/{index-CVQmT7ZL.js → index-h_aSZHf3.js} +1 -1
  246. langflow/frontend/assets/{index-BIXaW2aY.js → index-hbndqB9B.js} +1 -1
  247. langflow/frontend/assets/{index-DIkNW9Cd.js → index-iJngutFo.js} +1 -1
  248. langflow/frontend/assets/{index-BWmPX4iQ.js → index-lTpteg8t.js} +1 -1
  249. langflow/frontend/assets/{index-xuIrH2Dq.js → index-lZX9AvZW.js} +1 -1
  250. langflow/frontend/assets/{index-yCHsaqs8.js → index-m8QA6VNM.js} +1 -1
  251. langflow/frontend/assets/{index-BkPYpfgw.js → index-o0D2S7xW.js} +1 -1
  252. langflow/frontend/assets/{index-DpClkXIV.js → index-ovFJ_0J6.js} +1 -1
  253. langflow/frontend/assets/{index-CmplyEaa.js → index-pYJJOcma.js} +1 -1
  254. langflow/frontend/assets/{index-CJo_cyWW.js → index-sI75DsdM.js} +1 -1
  255. langflow/frontend/assets/{index-nVwHLjuV.js → index-xvFOmxx4.js} +1 -1
  256. langflow/frontend/assets/{index-LbYjHKkn.js → index-z3SRY-mX.js} +1 -1
  257. langflow/frontend/assets/lazyIconImports-D97HEZkE.js +2 -0
  258. langflow/frontend/assets/{use-post-add-user-BrBYH9eR.js → use-post-add-user-C0MdTpQ5.js} +1 -1
  259. langflow/frontend/index.html +2 -2
  260. langflow/graph/edge/base.py +2 -3
  261. langflow/graph/graph/base.py +15 -13
  262. langflow/graph/graph/constants.py +3 -0
  263. langflow/graph/utils.py +6 -6
  264. langflow/graph/vertex/base.py +4 -5
  265. langflow/graph/vertex/param_handler.py +1 -1
  266. langflow/graph/vertex/vertex_types.py +2 -2
  267. langflow/helpers/flow.py +1 -1
  268. langflow/initial_setup/setup.py +32 -30
  269. langflow/initial_setup/starter_projects/Basic Prompt Chaining.json +26 -0
  270. langflow/initial_setup/starter_projects/Basic Prompting.json +26 -0
  271. langflow/initial_setup/starter_projects/Blog Writer.json +58 -2
  272. langflow/initial_setup/starter_projects/Custom Component Generator.json +37 -2
  273. langflow/initial_setup/starter_projects/Document Q&A.json +27 -1
  274. langflow/initial_setup/starter_projects/Financial Report Parser.json +43 -0
  275. langflow/initial_setup/starter_projects/Hybrid Search RAG.json +83 -1
  276. langflow/initial_setup/starter_projects/Image Sentiment Analysis.json +43 -0
  277. langflow/initial_setup/starter_projects/Instagram Copywriter.json +51 -3
  278. langflow/initial_setup/starter_projects/Invoice Summarizer.json +40 -1
  279. langflow/initial_setup/starter_projects/Knowledge Ingestion.json +73 -2
  280. langflow/initial_setup/starter_projects/Knowledge Retrieval.json +63 -0
  281. langflow/initial_setup/starter_projects/Market Research.json +59 -3
  282. langflow/initial_setup/starter_projects/Meeting Summary.json +101 -6
  283. langflow/initial_setup/starter_projects/Memory Chatbot.json +37 -2
  284. langflow/initial_setup/starter_projects/News Aggregator.json +63 -3
  285. langflow/initial_setup/starter_projects/Nvidia Remix.json +69 -4
  286. langflow/initial_setup/starter_projects/Pok/303/251dex Agent.json" +48 -1
  287. langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json +44 -1
  288. langflow/initial_setup/starter_projects/Price Deal Finder.json +57 -5
  289. langflow/initial_setup/starter_projects/Research Agent.json +42 -3
  290. langflow/initial_setup/starter_projects/Research Translation Loop.json +66 -0
  291. langflow/initial_setup/starter_projects/SEO Keyword Generator.json +17 -0
  292. langflow/initial_setup/starter_projects/SaaS Pricing.json +27 -1
  293. langflow/initial_setup/starter_projects/Search agent.json +40 -1
  294. langflow/initial_setup/starter_projects/Sequential Tasks Agents.json +76 -7
  295. langflow/initial_setup/starter_projects/Simple Agent.json +59 -3
  296. langflow/initial_setup/starter_projects/Social Media Agent.json +77 -1
  297. langflow/initial_setup/starter_projects/Text Sentiment Analysis.json +35 -1
  298. langflow/initial_setup/starter_projects/Travel Planning Agents.json +51 -3
  299. langflow/initial_setup/starter_projects/Twitter Thread Generator.json +80 -0
  300. langflow/initial_setup/starter_projects/Vector Store RAG.json +110 -3
  301. langflow/initial_setup/starter_projects/Youtube Analysis.json +84 -3
  302. langflow/initial_setup/starter_projects/vector_store_rag.py +1 -1
  303. langflow/interface/components.py +23 -22
  304. langflow/interface/initialize/loading.py +5 -5
  305. langflow/interface/run.py +1 -1
  306. langflow/interface/utils.py +1 -1
  307. langflow/io/__init__.py +0 -1
  308. langflow/langflow_launcher.py +1 -1
  309. langflow/load/load.py +2 -7
  310. langflow/logging/__init__.py +0 -1
  311. langflow/logging/logger.py +191 -115
  312. langflow/logging/setup.py +1 -1
  313. langflow/main.py +37 -52
  314. langflow/memory.py +7 -7
  315. langflow/middleware.py +1 -1
  316. langflow/processing/process.py +6 -3
  317. langflow/schema/artifact.py +2 -2
  318. langflow/schema/data.py +10 -2
  319. langflow/schema/dataframe.py +1 -1
  320. langflow/schema/message.py +1 -1
  321. langflow/serialization/serialization.py +1 -1
  322. langflow/services/auth/mcp_encryption.py +104 -0
  323. langflow/services/auth/utils.py +2 -2
  324. langflow/services/cache/disk.py +1 -1
  325. langflow/services/cache/service.py +3 -3
  326. langflow/services/database/models/flow/model.py +2 -7
  327. langflow/services/database/models/transactions/crud.py +2 -2
  328. langflow/services/database/models/user/crud.py +2 -2
  329. langflow/services/database/service.py +8 -8
  330. langflow/services/database/utils.py +6 -5
  331. langflow/services/deps.py +2 -3
  332. langflow/services/factory.py +1 -1
  333. langflow/services/flow/flow_runner.py +7 -12
  334. langflow/services/job_queue/service.py +16 -15
  335. langflow/services/manager.py +3 -4
  336. langflow/services/settings/auth.py +1 -1
  337. langflow/services/settings/base.py +3 -8
  338. langflow/services/settings/feature_flags.py +1 -1
  339. langflow/services/settings/manager.py +1 -1
  340. langflow/services/settings/utils.py +1 -1
  341. langflow/services/socket/__init__.py +0 -1
  342. langflow/services/socket/service.py +3 -3
  343. langflow/services/socket/utils.py +4 -4
  344. langflow/services/state/service.py +1 -2
  345. langflow/services/storage/factory.py +1 -1
  346. langflow/services/storage/local.py +9 -8
  347. langflow/services/storage/s3.py +11 -10
  348. langflow/services/store/service.py +3 -3
  349. langflow/services/store/utils.py +3 -2
  350. langflow/services/task/temp_flow_cleanup.py +7 -7
  351. langflow/services/telemetry/service.py +10 -10
  352. langflow/services/tracing/arize_phoenix.py +2 -2
  353. langflow/services/tracing/langfuse.py +1 -1
  354. langflow/services/tracing/langsmith.py +1 -1
  355. langflow/services/tracing/langwatch.py +1 -1
  356. langflow/services/tracing/opik.py +1 -1
  357. langflow/services/tracing/service.py +25 -6
  358. langflow/services/tracing/traceloop.py +245 -0
  359. langflow/services/utils.py +7 -7
  360. langflow/services/variable/kubernetes.py +3 -3
  361. langflow/services/variable/kubernetes_secrets.py +2 -1
  362. langflow/services/variable/service.py +5 -5
  363. langflow/utils/component_utils.py +9 -6
  364. langflow/utils/util.py +5 -5
  365. langflow/utils/validate.py +3 -3
  366. langflow/utils/voice_utils.py +2 -2
  367. {langflow_base_nightly-0.5.0.dev37.dist-info → langflow_base_nightly-0.5.0.dev39.dist-info}/METADATA +2 -1
  368. {langflow_base_nightly-0.5.0.dev37.dist-info → langflow_base_nightly-0.5.0.dev39.dist-info}/RECORD +393 -374
  369. langflow/components/vectorstores/redis.py +0 -89
  370. langflow/frontend/assets/index-C26RqKWL.js +0 -1
  371. langflow/frontend/assets/index-CqS7zir1.css +0 -1
  372. langflow/frontend/assets/lazyIconImports-t6wEndt1.js +0 -2
  373. /langflow/components/{vectorstores → FAISS}/faiss.py +0 -0
  374. /langflow/components/{vectorstores → cassandra}/cassandra.py +0 -0
  375. /langflow/components/{datastax/cassandra.py → cassandra/cassandra_chat.py} +0 -0
  376. /langflow/components/{vectorstores → cassandra}/cassandra_graph.py +0 -0
  377. /langflow/components/{vectorstores → chroma}/chroma.py +0 -0
  378. /langflow/components/{vectorstores → clickhouse}/clickhouse.py +0 -0
  379. /langflow/components/{vectorstores → couchbase}/couchbase.py +0 -0
  380. /langflow/components/{vectorstores → datastax}/astradb.py +0 -0
  381. /langflow/components/{vectorstores → datastax}/astradb_graph.py +0 -0
  382. /langflow/components/{vectorstores → datastax}/graph_rag.py +0 -0
  383. /langflow/components/{vectorstores → datastax}/hcd.py +0 -0
  384. /langflow/components/{vectorstores → elastic}/elasticsearch.py +0 -0
  385. /langflow/components/{vectorstores → elastic}/opensearch.py +0 -0
  386. /langflow/components/{vectorstores → milvus}/milvus.py +0 -0
  387. /langflow/components/{vectorstores → mongodb}/mongodb_atlas.py +0 -0
  388. /langflow/components/{vectorstores → pgvector}/pgvector.py +0 -0
  389. /langflow/components/{vectorstores → pinecone}/pinecone.py +0 -0
  390. /langflow/components/{vectorstores → qdrant}/qdrant.py +0 -0
  391. /langflow/components/{vectorstores → supabase}/supabase.py +0 -0
  392. /langflow/components/{vectorstores → upstash}/upstash.py +0 -0
  393. /langflow/components/{vectorstores → vectara}/vectara.py +0 -0
  394. /langflow/components/{vectorstores → vectara}/vectara_rag.py +0 -0
  395. /langflow/components/{vectorstores → weaviate}/weaviate.py +0 -0
  396. {langflow_base_nightly-0.5.0.dev37.dist-info → langflow_base_nightly-0.5.0.dev39.dist-info}/WHEEL +0 -0
  397. {langflow_base_nightly-0.5.0.dev37.dist-info → langflow_base_nightly-0.5.0.dev39.dist-info}/entry_points.txt +0 -0
@@ -193,6 +193,15 @@
193
193
  "lf_version": "1.2.0",
194
194
  "metadata": {
195
195
  "code_hash": "efdcba3771af",
196
+ "dependencies": {
197
+ "dependencies": [
198
+ {
199
+ "name": "langflow",
200
+ "version": null
201
+ }
202
+ ],
203
+ "total_dependencies": 1
204
+ },
196
205
  "module": "langflow.components.input_output.text.TextInputComponent"
197
206
  },
198
207
  "minimized": false,
@@ -312,6 +321,23 @@
312
321
  "lf_version": "1.2.0",
313
322
  "metadata": {
314
323
  "code_hash": "6f74e04e39d5",
324
+ "dependencies": {
325
+ "dependencies": [
326
+ {
327
+ "name": "orjson",
328
+ "version": "3.10.15"
329
+ },
330
+ {
331
+ "name": "fastapi",
332
+ "version": "0.115.13"
333
+ },
334
+ {
335
+ "name": "langflow",
336
+ "version": null
337
+ }
338
+ ],
339
+ "total_dependencies": 3
340
+ },
315
341
  "module": "langflow.components.input_output.chat_output.ChatOutput"
316
342
  },
317
343
  "minimized": true,
@@ -767,6 +793,23 @@
767
793
  "lf_version": "1.2.0",
768
794
  "metadata": {
769
795
  "code_hash": "ad2a6f4552c0",
796
+ "dependencies": {
797
+ "dependencies": [
798
+ {
799
+ "name": "pydantic",
800
+ "version": "2.10.6"
801
+ },
802
+ {
803
+ "name": "trustcall",
804
+ "version": "0.0.39"
805
+ },
806
+ {
807
+ "name": "langflow",
808
+ "version": null
809
+ }
810
+ ],
811
+ "total_dependencies": 3
812
+ },
770
813
  "module": "langflow.components.processing.structured_output.StructuredOutputComponent"
771
814
  },
772
815
  "minimized": false,
@@ -1336,7 +1379,7 @@
1336
1379
  "show": true,
1337
1380
  "title_case": false,
1338
1381
  "type": "code",
1339
- "value": "from copy import deepcopy\nfrom typing import Any\n\nfrom langflow.base.data.base_file import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import BoolInput, FileInput, IntInput, Output\nfrom langflow.schema.data import Data\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"Handles loading and processing of individual or zipped text files.\n\n This component supports processing multiple valid files within a zip archive,\n resolving paths, validating file types, and optionally using multithreading for processing.\n \"\"\"\n\n display_name = \"File\"\n description = \"Loads content from one or more files.\"\n documentation: str = \"https://docs.langflow.org/components-data#file\"\n icon = \"file-text\"\n name = \"File\"\n\n VALID_EXTENSIONS = TEXT_FILE_TYPES\n\n _base_inputs = deepcopy(BaseFileComponent._base_inputs)\n\n for input_item in _base_inputs:\n if isinstance(input_item, FileInput) and input_item.name == \"path\":\n input_item.real_time_refresh = True\n break\n\n inputs = [\n *_base_inputs,\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n ]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the number of files processed.\"\"\"\n if field_name == \"path\":\n # Add outputs based on the number of files in the path\n if len(field_value) == 0:\n return frontend_node\n\n frontend_node[\"outputs\"] = []\n\n if len(field_value) == 1:\n # We need to check if the file is structured content\n file_path = frontend_node[\"template\"][\"path\"][\"file_path\"][0]\n if file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"dataframe\", method=\"load_files_structured\"),\n )\n elif file_path.endswith(\".json\"):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"json\", method=\"load_files_json\"),\n )\n\n # All files get the raw content and path outputs\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # For multiple files, we only show the files output\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Files\", name=\"dataframe\", method=\"load_files\"),\n )\n\n return frontend_node\n\n def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Processes files either sequentially or in parallel, depending on concurrency settings.\n\n Args:\n file_list (list[BaseFileComponent.BaseFile]): List of files to process.\n\n Returns:\n list[BaseFileComponent.BaseFile]: Updated list of files with merged data.\n \"\"\"\n\n def process_file(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Processes a single file and returns its Data object.\"\"\"\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n msg = f\"File not found: {file_path}. Error: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n except Exception as e:\n msg = f\"Unexpected error processing {file_path}: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_count = len(file_list)\n\n parallel_processing_threshold = 2\n if concurrency < parallel_processing_threshold or file_count < parallel_processing_threshold:\n if file_count > 1:\n self.log(f\"Processing {file_count} files sequentially.\")\n processed_data = [process_file(str(file.path), silent_errors=self.silent_errors) for file in file_list]\n else:\n self.log(f\"Starting parallel processing of {file_count} files with concurrency: {concurrency}.\")\n file_paths = [str(file.path) for file in file_list]\n processed_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file,\n max_concurrency=concurrency,\n )\n\n # Use rollup_basefile_data to merge processed data with BaseFile objects\n return self.rollup_data(file_list, processed_data)\n"
1382
+ "value": "\"\"\"Enhanced file component v2 with mypy and ruff compliance.\"\"\"\n\nfrom __future__ import annotations\n\nfrom copy import deepcopy\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any\n\nfrom langflow.base.data.base_file import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n FileInput,\n IntInput,\n MessageTextInput,\n Output,\n StrInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.message import Message\n\nif TYPE_CHECKING:\n from langflow.schema import DataFrame\n\n\nclass MockConversionStatus(Enum):\n \"\"\"Mock ConversionStatus for fallback compatibility.\"\"\"\n\n SUCCESS = \"success\"\n FAILURE = \"failure\"\n\n\nclass MockInputFormat(Enum):\n \"\"\"Mock InputFormat for fallback compatibility.\"\"\"\n\n PDF = \"pdf\"\n IMAGE = \"image\"\n\n\nclass MockImageRefMode(Enum):\n \"\"\"Mock ImageRefMode for fallback compatibility.\"\"\"\n\n PLACEHOLDER = \"placeholder\"\n EMBEDDED = \"embedded\"\n\n\nclass DoclingImports:\n \"\"\"Container for docling imports with type information.\"\"\"\n\n def __init__(\n self,\n conversion_status: type[Enum],\n input_format: type[Enum],\n document_converter: type,\n image_ref_mode: type[Enum],\n strategy: str,\n ) -> None:\n self.conversion_status = conversion_status\n self.input_format = input_format\n self.document_converter = document_converter\n self.image_ref_mode = image_ref_mode\n self.strategy = strategy\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"Enhanced file component v2 that combines standard file loading with optional Docling processing and export.\n\n This component supports all features of the standard File component, plus an advanced mode\n that enables Docling document processing and export to various formats (Markdown, HTML, etc.).\n \"\"\"\n\n display_name = \"File\"\n description = \"Loads content from files with optional advanced document processing and export using Docling.\"\n documentation: str = \"https://docs.langflow.org/components-data#file\"\n icon = \"file-text\"\n name = \"File\"\n\n # Docling supported formats from original component\n VALID_EXTENSIONS = [\n \"adoc\",\n \"asciidoc\",\n \"asc\",\n \"bmp\",\n \"csv\",\n \"dotx\",\n \"dotm\",\n \"docm\",\n \"docx\",\n \"htm\",\n \"html\",\n \"jpeg\",\n \"json\",\n \"md\",\n \"pdf\",\n \"png\",\n \"potx\",\n \"ppsx\",\n \"pptm\",\n \"potm\",\n \"ppsm\",\n \"pptx\",\n \"tiff\",\n \"txt\",\n \"xls\",\n \"xlsx\",\n \"xhtml\",\n \"xml\",\n \"webp\",\n *TEXT_FILE_TYPES,\n ]\n\n # Fixed export settings\n EXPORT_FORMAT = \"Markdown\"\n IMAGE_MODE = \"placeholder\"\n\n _base_inputs = deepcopy(BaseFileComponent._base_inputs)\n\n for input_item in _base_inputs:\n if isinstance(input_item, FileInput) and input_item.name == \"path\":\n input_item.real_time_refresh = True\n break\n\n inputs = [\n *_base_inputs,\n BoolInput(\n name=\"advanced_mode\",\n display_name=\"Advanced Parser\",\n value=False,\n real_time_refresh=True,\n info=(\n \"Enable advanced document processing and export with Docling for PDFs, images, and office documents. \"\n \"Available only for single file processing.\"\n ),\n show=False,\n ),\n DropdownInput(\n name=\"pipeline\",\n display_name=\"Pipeline\",\n info=\"Docling pipeline to use\",\n options=[\"standard\", \"vlm\"],\n value=\"standard\",\n advanced=True,\n ),\n DropdownInput(\n name=\"ocr_engine\",\n display_name=\"OCR Engine\",\n info=\"OCR engine to use. Only available when pipeline is set to 'standard'.\",\n options=[\"\", \"easyocr\"],\n value=\"\",\n show=False,\n advanced=True,\n ),\n StrInput(\n name=\"md_image_placeholder\",\n display_name=\"Image placeholder\",\n info=\"Specify the image placeholder for markdown exports.\",\n value=\"<!-- image -->\",\n advanced=True,\n show=False,\n ),\n StrInput(\n name=\"md_page_break_placeholder\",\n display_name=\"Page break placeholder\",\n info=\"Add this placeholder between pages in the markdown output.\",\n value=\"\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"doc_key\",\n display_name=\"Doc Key\",\n info=\"The key to use for the DoclingDocument column.\",\n value=\"doc\",\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n BoolInput(\n name=\"markdown\",\n display_name=\"Markdown Export\",\n info=\"Export processed documents to Markdown format. Only available when advanced mode is enabled.\",\n value=False,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n ]\n\n def _path_value(self, template) -> list[str]:\n # Get current path value\n return template.get(\"path\", {}).get(\"file_path\", [])\n\n def update_build_config(\n self,\n build_config: dict[str, Any],\n field_value: Any,\n field_name: str | None = None,\n ) -> dict[str, Any]:\n \"\"\"Update build configuration to show/hide fields based on file count and advanced_mode.\"\"\"\n if field_name == \"path\":\n # Get current path value\n path_value = self._path_value(build_config)\n file_path = path_value[0] if len(path_value) > 0 else \"\"\n\n # Show/hide Advanced Parser based on file count (only for single files)\n file_count = len(field_value) if field_value else 0\n if file_count == 1 and not file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n build_config[\"advanced_mode\"][\"show\"] = True\n else:\n build_config[\"advanced_mode\"][\"show\"] = False\n build_config[\"advanced_mode\"][\"value\"] = False # Reset to False when hidden\n\n # Hide all advanced fields when Advanced Parser is not available\n advanced_fields = [\n \"pipeline\",\n \"ocr_engine\",\n \"doc_key\",\n \"md_image_placeholder\",\n \"md_page_break_placeholder\",\n ]\n for field in advanced_fields:\n if field in build_config:\n build_config[field][\"show\"] = False\n\n elif field_name == \"advanced_mode\":\n # Show/hide advanced fields based on advanced_mode (only if single file)\n advanced_fields = [\n \"pipeline\",\n \"ocr_engine\",\n \"doc_key\",\n \"md_image_placeholder\",\n \"md_page_break_placeholder\",\n ]\n\n for field in advanced_fields:\n if field in build_config:\n build_config[field][\"show\"] = field_value\n\n return build_config\n\n def update_outputs(self, frontend_node: dict[str, Any], field_name: str, field_value: Any) -> dict[str, Any]: # noqa: ARG002\n \"\"\"Dynamically show outputs based on the number of files and their types.\"\"\"\n if field_name not in [\"path\", \"advanced_mode\"]:\n return frontend_node\n\n # Add outputs based on the number of files in the path\n template = frontend_node.get(\"template\", {})\n path_value = self._path_value(template)\n if len(path_value) == 0:\n return frontend_node\n\n # Clear existing outputs\n frontend_node[\"outputs\"] = []\n\n if len(path_value) == 1:\n # We need to check if the file is structured content\n file_path = path_value[0] if field_name == \"path\" else frontend_node[\"template\"][\"path\"][\"file_path\"][0]\n if file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"dataframe\", method=\"load_files_structured\"),\n )\n elif file_path.endswith(\".json\"):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"json\", method=\"load_files_json\"),\n )\n\n # Add outputs based on advanced mode\n advanced_mode = frontend_node.get(\"template\", {}).get(\"advanced_mode\", {}).get(\"value\", False)\n\n if advanced_mode:\n # Advanced mode: Structured Output, Markdown, and File Path\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Output\", name=\"advanced\", method=\"load_files_advanced\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Markdown\", name=\"markdown\", method=\"load_files_markdown\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # Normal mode: Raw Content and File Path\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # For multiple files, we show the files output (DataFrame format)\n # Advanced Parser is not available for multiple files\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Files\", name=\"dataframe\", method=\"load_files\"),\n )\n\n return frontend_node\n\n def _try_import_docling(self) -> DoclingImports | None:\n \"\"\"Try different import strategies for docling components.\"\"\"\n # Try strategy 1: Latest docling structure\n try:\n from docling.datamodel.base_models import ConversionStatus, InputFormat # type: ignore[import-untyped]\n from docling.document_converter import DocumentConverter # type: ignore[import-untyped]\n from docling_core.types.doc import ImageRefMode # type: ignore[import-untyped]\n\n self.log(\"Using latest docling import structure\")\n return DoclingImports(\n conversion_status=ConversionStatus,\n input_format=InputFormat,\n document_converter=DocumentConverter,\n image_ref_mode=ImageRefMode,\n strategy=\"latest\",\n )\n except ImportError as e:\n self.log(f\"Latest docling structure failed: {e}\")\n\n # Try strategy 2: Alternative import paths\n try:\n from docling.document_converter import DocumentConverter # type: ignore[import-untyped]\n from docling_core.types.doc import ImageRefMode # type: ignore[import-untyped]\n\n # Try to get ConversionStatus from different locations\n conversion_status: type[Enum] = MockConversionStatus\n input_format: type[Enum] = MockInputFormat\n\n try:\n from docling_core.types import ConversionStatus, InputFormat # type: ignore[import-untyped]\n\n conversion_status = ConversionStatus\n input_format = InputFormat\n except ImportError:\n try:\n from docling.datamodel import ConversionStatus, InputFormat # type: ignore[import-untyped]\n\n conversion_status = ConversionStatus\n input_format = InputFormat\n except ImportError:\n # Use mock enums if we can't find them\n pass\n\n self.log(\"Using alternative docling import structure\")\n return DoclingImports(\n conversion_status=conversion_status,\n input_format=input_format,\n document_converter=DocumentConverter,\n image_ref_mode=ImageRefMode,\n strategy=\"alternative\",\n )\n except ImportError as e:\n self.log(f\"Alternative docling structure failed: {e}\")\n\n # Try strategy 3: Basic converter only\n try:\n from docling.document_converter import DocumentConverter # type: ignore[import-untyped]\n\n self.log(\"Using basic docling import structure with mocks\")\n return DoclingImports(\n conversion_status=MockConversionStatus,\n input_format=MockInputFormat,\n document_converter=DocumentConverter,\n image_ref_mode=MockImageRefMode,\n strategy=\"basic\",\n )\n except ImportError as e:\n self.log(f\"Basic docling structure failed: {e}\")\n\n # Strategy 4: Complete fallback - return None to indicate failure\n return None\n\n def _create_advanced_converter(self, docling_imports: DoclingImports) -> Any:\n \"\"\"Create advanced converter with pipeline options if available.\"\"\"\n try:\n from docling.datamodel.pipeline_options import PdfPipelineOptions # type: ignore[import-untyped]\n from docling.document_converter import PdfFormatOption # type: ignore[import-untyped]\n\n document_converter = docling_imports.document_converter\n input_format = docling_imports.input_format\n\n # Create basic pipeline options\n pipeline_options = PdfPipelineOptions()\n\n # Configure OCR if specified and available\n if self.ocr_engine:\n try:\n from docling.models.factories import get_ocr_factory # type: ignore[import-untyped]\n\n pipeline_options.do_ocr = True\n ocr_factory = get_ocr_factory(allow_external_plugins=False)\n ocr_options = ocr_factory.create_options(kind=self.ocr_engine)\n pipeline_options.ocr_options = ocr_options\n self.log(f\"Configured OCR with engine: {self.ocr_engine}\")\n except Exception as e: # noqa: BLE001\n self.log(f\"Could not configure OCR: {e}, proceeding without OCR\")\n pipeline_options.do_ocr = False\n\n # Create format options\n pdf_format_option = PdfFormatOption(pipeline_options=pipeline_options)\n format_options = {}\n if hasattr(input_format, \"PDF\"):\n format_options[input_format.PDF] = pdf_format_option\n if hasattr(input_format, \"IMAGE\"):\n format_options[input_format.IMAGE] = pdf_format_option\n\n return document_converter(format_options=format_options)\n\n except Exception as e: # noqa: BLE001\n self.log(f\"Could not create advanced converter: {e}, using basic converter\")\n return docling_imports.document_converter()\n\n def _is_docling_compatible(self, file_path: str) -> bool:\n \"\"\"Check if file is compatible with Docling processing.\"\"\"\n # All VALID_EXTENSIONS are Docling compatible (except for TEXT_FILE_TYPES which may overlap)\n docling_extensions = [\n \".adoc\",\n \".asciidoc\",\n \".asc\",\n \".bmp\",\n \".csv\",\n \".dotx\",\n \".dotm\",\n \".docm\",\n \".docx\",\n \".htm\",\n \".html\",\n \".jpeg\",\n \".json\",\n \".md\",\n \".pdf\",\n \".png\",\n \".potx\",\n \".ppsx\",\n \".pptm\",\n \".potm\",\n \".ppsm\",\n \".pptx\",\n \".tiff\",\n \".txt\",\n \".xls\",\n \".xlsx\",\n \".xhtml\",\n \".xml\",\n \".webp\",\n ]\n return any(file_path.lower().endswith(ext) for ext in docling_extensions)\n\n def process_files(\n self,\n file_list: list[BaseFileComponent.BaseFile],\n ) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Process files using standard parsing or Docling based on advanced_mode and file type.\"\"\"\n\n def process_file_standard(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Process a single file using standard text parsing.\"\"\"\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n msg = f\"File not found: {file_path}. Error: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n except Exception as e:\n msg = f\"Unexpected error processing {file_path}: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n\n def process_file_docling(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Process a single file using Docling if compatible, otherwise standard processing.\"\"\"\n # Try Docling first if file is compatible and advanced mode is enabled\n try:\n return self._process_with_docling_and_export(file_path)\n except Exception as e: # noqa: BLE001\n self.log(f\"Docling processing failed for {file_path}: {e}, falling back to standard processing\")\n if not silent_errors:\n # Return error data instead of raising\n return Data(data={\"error\": f\"Docling processing failed: {e}\", \"file_path\": file_path})\n\n return None\n\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n file_path = str(file_list[0].path)\n if self.advanced_mode and self._is_docling_compatible(file_path):\n processed_data = process_file_docling(file_path)\n if not processed_data:\n msg = f\"Failed to process file with Docling: {file_path}\"\n raise ValueError(msg)\n\n # Serialize processed data to match Data structure\n serialized_data = processed_data.serialize_model()\n\n # Now, if doc is nested, we need to unravel it\n clean_data: list[Data | None] = [processed_data]\n\n # This is where we've manually processed the data\n try:\n if \"exported_content\" not in serialized_data:\n clean_data = [\n Data(\n data={\n \"file_path\": file_path,\n **(\n item[\"element\"]\n if \"element\" in item\n else {k: v for k, v in item.items() if k != \"file_path\"}\n ),\n }\n )\n for item in serialized_data[\"doc\"]\n ]\n except Exception as _: # noqa: BLE001\n raise ValueError(serialized_data) from None\n\n # Repeat file_list to match the number of processed data elements\n final_data: list[Data | None] = clean_data\n return self.rollup_data(file_list, final_data)\n\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_count = len(file_list)\n\n self.log(f\"Starting parallel processing of {file_count} files with concurrency: {concurrency}.\")\n file_paths = [str(file.path) for file in file_list]\n my_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file_standard,\n max_concurrency=concurrency,\n )\n\n return self.rollup_data(file_list, my_data)\n\n def load_files_advanced(self) -> DataFrame:\n \"\"\"Load files using advanced Docling processing and export to an advanced format.\"\"\"\n # TODO: Update\n self.markdown = False\n return self.load_files()\n\n def load_files_markdown(self) -> Message:\n \"\"\"Load files using advanced Docling processing and export to Markdown format.\"\"\"\n self.markdown = True\n result = self.load_files()\n return Message(text=str(result.text[0]))\n\n def _process_with_docling_and_export(self, file_path: str) -> Data:\n \"\"\"Process a single file with Docling and export to the specified format.\"\"\"\n # Import docling components only when needed\n docling_imports = self._try_import_docling()\n\n if docling_imports is None:\n msg = \"Docling not available for advanced processing\"\n raise ImportError(msg)\n\n conversion_status = docling_imports.conversion_status\n document_converter = docling_imports.document_converter\n image_ref_mode = docling_imports.image_ref_mode\n\n try:\n # Create converter based on strategy and pipeline setting\n if docling_imports.strategy == \"latest\" and self.pipeline == \"standard\":\n converter = self._create_advanced_converter(docling_imports)\n else:\n # Use basic converter for compatibility\n converter = document_converter()\n self.log(\"Using basic DocumentConverter for Docling processing\")\n\n # Process single file\n result = converter.convert(file_path)\n\n # Check if conversion was successful\n success = False\n if hasattr(result, \"status\"):\n if hasattr(conversion_status, \"SUCCESS\"):\n success = result.status == conversion_status.SUCCESS\n else:\n success = str(result.status).lower() == \"success\"\n elif hasattr(result, \"document\"):\n # If no status but has document, assume success\n success = result.document is not None\n\n if not success:\n return Data(data={\"error\": \"Docling conversion failed\", \"file_path\": file_path})\n\n if self.markdown:\n self.log(\"Exporting document to Markdown format\")\n # Export the document to the specified format\n exported_content = self._export_document(result.document, image_ref_mode)\n\n return Data(\n text=exported_content,\n data={\n \"exported_content\": exported_content,\n \"export_format\": self.EXPORT_FORMAT,\n \"file_path\": file_path,\n },\n )\n\n return Data(\n data={\n \"doc\": self.docling_to_dataframe_simple(result.document.export_to_dict()),\n \"export_format\": self.EXPORT_FORMAT,\n \"file_path\": file_path,\n }\n )\n\n except Exception as e: # noqa: BLE001\n return Data(data={\"error\": f\"Docling processing error: {e!s}\", \"file_path\": file_path})\n\n def docling_to_dataframe_simple(self, doc):\n \"\"\"Extract all text elements into a simple DataFrame.\"\"\"\n return [\n {\n \"page_no\": text[\"prov\"][0][\"page_no\"] if text[\"prov\"] else None,\n \"label\": text[\"label\"],\n \"text\": text[\"text\"],\n \"level\": text.get(\"level\", None), # for headers\n }\n for text in doc[\"texts\"]\n ]\n\n def _export_document(self, document: Any, image_ref_mode: type[Enum]) -> str:\n \"\"\"Export document to Markdown format with placeholder images.\"\"\"\n try:\n image_mode = (\n image_ref_mode(self.IMAGE_MODE) if hasattr(image_ref_mode, self.IMAGE_MODE) else self.IMAGE_MODE\n )\n\n # Always export to Markdown since it's fixed\n return document.export_to_markdown(\n image_mode=image_mode,\n image_placeholder=self.md_image_placeholder,\n page_break_placeholder=self.md_page_break_placeholder,\n )\n\n except Exception as e: # noqa: BLE001\n self.log(f\"Markdown export failed: {e}, using basic text export\")\n # Fallback to basic text export\n try:\n return document.export_to_text()\n except Exception: # noqa: BLE001\n return str(document)\n"
1340
1383
  },
1341
1384
  "concurrency_multithreading": {
1342
1385
  "_input_type": "IntInput",
@@ -138,6 +138,15 @@
138
138
  "lf_version": "1.3.2",
139
139
  "metadata": {
140
140
  "code_hash": "192913db3453",
141
+ "dependencies": {
142
+ "dependencies": [
143
+ {
144
+ "name": "langflow",
145
+ "version": null
146
+ }
147
+ ],
148
+ "total_dependencies": 1
149
+ },
141
150
  "module": "langflow.components.input_output.chat.ChatInput"
142
151
  },
143
152
  "minimized": true,
@@ -454,6 +463,23 @@
454
463
  "lf_version": "1.3.2",
455
464
  "metadata": {
456
465
  "code_hash": "6f74e04e39d5",
466
+ "dependencies": {
467
+ "dependencies": [
468
+ {
469
+ "name": "orjson",
470
+ "version": "3.10.15"
471
+ },
472
+ {
473
+ "name": "fastapi",
474
+ "version": "0.115.13"
475
+ },
476
+ {
477
+ "name": "langflow",
478
+ "version": null
479
+ }
480
+ ],
481
+ "total_dependencies": 3
482
+ },
457
483
  "module": "langflow.components.input_output.chat_output.ChatOutput"
458
484
  },
459
485
  "minimized": true,
@@ -767,7 +793,20 @@
767
793
  "legacy": false,
768
794
  "lf_version": "1.3.2",
769
795
  "metadata": {
770
- "code_hash": "6843645056d9",
796
+ "code_hash": "4c76fb76d395",
797
+ "dependencies": {
798
+ "dependencies": [
799
+ {
800
+ "name": "httpx",
801
+ "version": "0.27.2"
802
+ },
803
+ {
804
+ "name": "langflow",
805
+ "version": null
806
+ }
807
+ ],
808
+ "total_dependencies": 2
809
+ },
771
810
  "module": "langflow.components.tavily.tavily_search.TavilySearchComponent"
772
811
  },
773
812
  "minimized": false,
@@ -845,7 +884,7 @@
845
884
  "show": true,
846
885
  "title_case": false,
847
886
  "type": "code",
848
- "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.template.field.base import Output\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
887
+ "value": "import httpx\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput\nfrom langflow.logging.logger import logger\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.template.field.base import Output\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
849
888
  },
850
889
  "days": {
851
890
  "_input_type": "IntInput",
@@ -1168,7 +1207,20 @@
1168
1207
  "legacy": false,
1169
1208
  "lf_version": "1.3.2",
1170
1209
  "metadata": {
1171
- "code_hash": "ce845cc47ae8",
1210
+ "code_hash": "ab828f4cdff2",
1211
+ "dependencies": {
1212
+ "dependencies": [
1213
+ {
1214
+ "name": "httpx",
1215
+ "version": "0.27.2"
1216
+ },
1217
+ {
1218
+ "name": "langflow",
1219
+ "version": null
1220
+ }
1221
+ ],
1222
+ "total_dependencies": 2
1223
+ },
1172
1224
  "module": "langflow.components.agentql.agentql_api.AgentQL"
1173
1225
  },
1174
1226
  "minimized": false,
@@ -1228,7 +1280,7 @@
1228
1280
  "show": true,
1229
1281
  "title_case": false,
1230
1282
  "type": "code",
1231
- "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n IntInput,\n MessageTextInput,\n MultilineInput,\n Output,\n SecretStrInput,\n)\nfrom langflow.schema.data import Data\n\n\nclass AgentQL(Component):\n display_name = \"Extract Web Data\"\n description = \"Extracts structured data from a web page using an AgentQL query or a Natural Language description.\"\n documentation: str = \"https://docs.agentql.com/rest-api/api-reference\"\n icon = \"AgentQL\"\n name = \"AgentQL\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n required=True,\n password=True,\n info=\"Your AgentQL API key from dev.agentql.com\",\n ),\n MessageTextInput(\n name=\"url\",\n display_name=\"URL\",\n required=True,\n info=\"The URL of the public web page you want to extract data from.\",\n tool_mode=True,\n ),\n MultilineInput(\n name=\"query\",\n display_name=\"AgentQL Query\",\n required=False,\n info=\"The AgentQL query to execute. Learn more at https://docs.agentql.com/agentql-query or use a prompt.\",\n tool_mode=True,\n ),\n MultilineInput(\n name=\"prompt\",\n display_name=\"Prompt\",\n required=False,\n info=\"A Natural Language description of the data to extract from the page. Alternative to AgentQL query.\",\n tool_mode=True,\n ),\n BoolInput(\n name=\"is_stealth_mode_enabled\",\n display_name=\"Enable Stealth Mode (Beta)\",\n info=\"Enable experimental anti-bot evasion strategies. May not work for all websites at all times.\",\n value=False,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Seconds to wait for a request.\",\n value=900,\n advanced=True,\n ),\n DropdownInput(\n name=\"mode\",\n display_name=\"Request Mode\",\n info=\"'standard' uses deep data analysis, while 'fast' trades some depth of analysis for speed.\",\n options=[\"fast\", \"standard\"],\n value=\"fast\",\n advanced=True,\n ),\n IntInput(\n name=\"wait_for\",\n display_name=\"Wait For\",\n info=\"Seconds to wait for the page to load before extracting data.\",\n value=0,\n range_spec=RangeSpec(min=0, max=10, step_type=\"int\"),\n advanced=True,\n ),\n BoolInput(\n name=\"is_scroll_to_bottom_enabled\",\n display_name=\"Enable scroll to bottom\",\n info=\"Scroll to bottom of the page before extracting data.\",\n value=False,\n advanced=True,\n ),\n BoolInput(\n name=\"is_screenshot_enabled\",\n display_name=\"Enable screenshot\",\n info=\"Take a screenshot before extracting data. Returned in 'metadata' as a Base64 string.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Data:\n endpoint = \"https://api.agentql.com/v1/query-data\"\n headers = {\n \"X-API-Key\": self.api_key,\n \"Content-Type\": \"application/json\",\n \"X-TF-Request-Origin\": \"langflow\",\n }\n\n payload = {\n \"url\": self.url,\n \"query\": self.query,\n \"prompt\": self.prompt,\n \"params\": {\n \"mode\": self.mode,\n \"wait_for\": self.wait_for,\n \"is_scroll_to_bottom_enabled\": self.is_scroll_to_bottom_enabled,\n \"is_screenshot_enabled\": self.is_screenshot_enabled,\n },\n \"metadata\": {\n \"experimental_stealth_mode_enabled\": self.is_stealth_mode_enabled,\n },\n }\n\n if not self.prompt and not self.query:\n self.status = \"Either Query or Prompt must be provided.\"\n raise ValueError(self.status)\n if self.prompt and self.query:\n self.status = \"Both Query and Prompt can't be provided at the same time.\"\n raise ValueError(self.status)\n\n try:\n response = httpx.post(endpoint, headers=headers, json=payload, timeout=self.timeout)\n response.raise_for_status()\n\n json = response.json()\n data = Data(result=json[\"data\"], metadata=json[\"metadata\"])\n\n except httpx.HTTPStatusError as e:\n response = e.response\n if response.status_code == httpx.codes.UNAUTHORIZED:\n self.status = \"Please, provide a valid API Key. You can create one at https://dev.agentql.com.\"\n else:\n try:\n error_json = response.json()\n logger.error(\n f\"Failure response: '{response.status_code} {response.reason_phrase}' with body: {error_json}\"\n )\n msg = error_json[\"error_info\"] if \"error_info\" in error_json else error_json[\"detail\"]\n except (ValueError, TypeError):\n msg = f\"HTTP {e}.\"\n self.status = msg\n raise ValueError(self.status) from e\n\n else:\n self.status = data\n return data\n"
1283
+ "value": "import httpx\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, MultilineInput, Output, SecretStrInput\nfrom langflow.logging.logger import logger\nfrom langflow.schema.data import Data\n\n\nclass AgentQL(Component):\n display_name = \"Extract Web Data\"\n description = \"Extracts structured data from a web page using an AgentQL query or a Natural Language description.\"\n documentation: str = \"https://docs.agentql.com/rest-api/api-reference\"\n icon = \"AgentQL\"\n name = \"AgentQL\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n required=True,\n password=True,\n info=\"Your AgentQL API key from dev.agentql.com\",\n ),\n MessageTextInput(\n name=\"url\",\n display_name=\"URL\",\n required=True,\n info=\"The URL of the public web page you want to extract data from.\",\n tool_mode=True,\n ),\n MultilineInput(\n name=\"query\",\n display_name=\"AgentQL Query\",\n required=False,\n info=\"The AgentQL query to execute. Learn more at https://docs.agentql.com/agentql-query or use a prompt.\",\n tool_mode=True,\n ),\n MultilineInput(\n name=\"prompt\",\n display_name=\"Prompt\",\n required=False,\n info=\"A Natural Language description of the data to extract from the page. Alternative to AgentQL query.\",\n tool_mode=True,\n ),\n BoolInput(\n name=\"is_stealth_mode_enabled\",\n display_name=\"Enable Stealth Mode (Beta)\",\n info=\"Enable experimental anti-bot evasion strategies. May not work for all websites at all times.\",\n value=False,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Seconds to wait for a request.\",\n value=900,\n advanced=True,\n ),\n DropdownInput(\n name=\"mode\",\n display_name=\"Request Mode\",\n info=\"'standard' uses deep data analysis, while 'fast' trades some depth of analysis for speed.\",\n options=[\"fast\", \"standard\"],\n value=\"fast\",\n advanced=True,\n ),\n IntInput(\n name=\"wait_for\",\n display_name=\"Wait For\",\n info=\"Seconds to wait for the page to load before extracting data.\",\n value=0,\n range_spec=RangeSpec(min=0, max=10, step_type=\"int\"),\n advanced=True,\n ),\n BoolInput(\n name=\"is_scroll_to_bottom_enabled\",\n display_name=\"Enable scroll to bottom\",\n info=\"Scroll to bottom of the page before extracting data.\",\n value=False,\n advanced=True,\n ),\n BoolInput(\n name=\"is_screenshot_enabled\",\n display_name=\"Enable screenshot\",\n info=\"Take a screenshot before extracting data. Returned in 'metadata' as a Base64 string.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Data:\n endpoint = \"https://api.agentql.com/v1/query-data\"\n headers = {\n \"X-API-Key\": self.api_key,\n \"Content-Type\": \"application/json\",\n \"X-TF-Request-Origin\": \"langflow\",\n }\n\n payload = {\n \"url\": self.url,\n \"query\": self.query,\n \"prompt\": self.prompt,\n \"params\": {\n \"mode\": self.mode,\n \"wait_for\": self.wait_for,\n \"is_scroll_to_bottom_enabled\": self.is_scroll_to_bottom_enabled,\n \"is_screenshot_enabled\": self.is_screenshot_enabled,\n },\n \"metadata\": {\n \"experimental_stealth_mode_enabled\": self.is_stealth_mode_enabled,\n },\n }\n\n if not self.prompt and not self.query:\n self.status = \"Either Query or Prompt must be provided.\"\n raise ValueError(self.status)\n if self.prompt and self.query:\n self.status = \"Both Query and Prompt can't be provided at the same time.\"\n raise ValueError(self.status)\n\n try:\n response = httpx.post(endpoint, headers=headers, json=payload, timeout=self.timeout)\n response.raise_for_status()\n\n json = response.json()\n data = Data(result=json[\"data\"], metadata=json[\"metadata\"])\n\n except httpx.HTTPStatusError as e:\n response = e.response\n if response.status_code == httpx.codes.UNAUTHORIZED:\n self.status = \"Please, provide a valid API Key. You can create one at https://dev.agentql.com.\"\n else:\n try:\n error_json = response.json()\n logger.error(\n f\"Failure response: '{response.status_code} {response.reason_phrase}' with body: {error_json}\"\n )\n msg = error_json[\"error_info\"] if \"error_info\" in error_json else error_json[\"detail\"]\n except (ValueError, TypeError):\n msg = f\"HTTP {e}.\"\n self.status = msg\n raise ValueError(self.status) from e\n\n else:\n self.status = data\n return data\n"
1232
1284
  },
1233
1285
  "is_screenshot_enabled": {
1234
1286
  "_input_type": "BoolInput",
@@ -1789,7 +1841,7 @@
1789
1841
  "show": true,
1790
1842
  "title_case": false,
1791
1843
  "type": "code",
1792
- "value": "import json\nimport re\n\nfrom langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.data import Data\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nMODEL_PROVIDERS_LIST = [\"Anthropic\", \"Google Generative AI\", \"Groq\", \"OpenAI\"]\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n documentation: str = \"https://docs.langflow.org/agents\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n # Filter out json_mode from OpenAI inputs since we handle structured output differently\n openai_inputs_filtered = [\n input_field\n for input_field in MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"]\n if not (hasattr(input_field, \"name\") and input_field.name == \"json_mode\")\n ]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST, \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST] + [{\"icon\": \"brain\"}],\n ),\n *openai_inputs_filtered,\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Chat History Messages\",\n value=100,\n info=\"Number of chat history messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n *LCToolsAgentComponent._base_inputs,\n # removed memory inputs from agent component\n # *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [\n Output(name=\"response\", display_name=\"Response\", method=\"message_response\"),\n Output(name=\"structured_response\", display_name=\"Structured Response\", method=\"json_response\", tool_mode=False),\n ]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n if isinstance(self.chat_history, Message):\n self.chat_history = [self.chat_history]\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n # note the tools are not required to run the agent, hence the validation removed.\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n result = await self.run_agent(agent)\n\n # Store result for potential JSON output\n self._agent_result = result\n # return result\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n else:\n return result\n\n async def json_response(self) -> Data:\n \"\"\"Convert agent response to structured JSON Data output.\"\"\"\n # Run the regular message response first to get the result\n if not hasattr(self, \"_agent_result\"):\n await self.message_response()\n\n result = self._agent_result\n\n # Extract content from result\n if hasattr(result, \"content\"):\n content = result.content\n elif hasattr(result, \"text\"):\n content = result.text\n else:\n content = str(result)\n\n # Try to parse as JSON\n try:\n json_data = json.loads(content)\n return Data(data=json_data)\n except json.JSONDecodeError:\n # If it's not valid JSON, try to extract JSON from the content\n json_match = re.search(r\"\\{.*\\}\", content, re.DOTALL)\n if json_match:\n try:\n json_data = json.loads(json_match.group())\n return Data(data=json_data)\n except json.JSONDecodeError:\n pass\n\n # If we can't extract JSON, return the raw content as data\n return Data(data={\"content\": content, \"error\": \"Could not parse as JSON\"})\n\n async def get_memory_data(self):\n # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.\n messages = (\n await MemoryComponent(**self.get_base_args())\n .set(session_id=self.graph.session_id, order=\"Ascending\", n_messages=self.n_messages)\n .retrieve_messages()\n )\n return [\n message for message in messages if getattr(message, \"id\", None) != getattr(self.input_value, \"id\", None)\n ]\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n # Filter out json_mode and only use attributes that exist on this component\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
1844
+ "value": "import json\nimport re\n\nfrom langchain_core.tools import StructuredTool\nfrom pydantic import ValidationError\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.helpers.base_model import build_model_from_schema\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MultilineInput, Output, TableInput\nfrom langflow.logging import logger\nfrom langflow.schema.data import Data\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\nfrom langflow.schema.table import EditMode\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nMODEL_PROVIDERS_LIST = [\"Anthropic\", \"Google Generative AI\", \"Groq\", \"OpenAI\"]\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n documentation: str = \"https://docs.langflow.org/agents\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n # Filter out json_mode from OpenAI inputs since we handle structured output differently\n openai_inputs_filtered = [\n input_field\n for input_field in MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"]\n if not (hasattr(input_field, \"name\") and input_field.name == \"json_mode\")\n ]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST, \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST] + [{\"icon\": \"brain\"}],\n ),\n *openai_inputs_filtered,\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Chat History Messages\",\n value=100,\n info=\"Number of chat history messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MultilineInput(\n name=\"format_instructions\",\n display_name=\"Output Format Instructions\",\n info=\"Generic Template for structured output formatting. Valid only with Structured response.\",\n value=(\n \"You are an AI that extracts structured JSON objects from unstructured text. \"\n \"Use a predefined schema with expected types (str, int, float, bool, dict). \"\n \"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. \"\n \"Fill missing or ambiguous values with defaults: null for missing values. \"\n \"Remove exact duplicates but keep variations that have different field values. \"\n \"Always return valid JSON in the expected format, never throw errors. \"\n \"If multiple objects can be extracted, return them all in the structured format.\"\n ),\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=(\n \"Schema Validation: Define the structure and data types for structured output. \"\n \"No validation if no output schema.\"\n ),\n advanced=True,\n required=False,\n value=[],\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n {\n \"name\": \"multiple\",\n \"display_name\": \"As List\",\n \"type\": \"boolean\",\n \"description\": \"Set to True if this output field should be a list of the specified type.\",\n \"default\": \"False\",\n \"edit_mode\": EditMode.INLINE,\n },\n ],\n ),\n *LCToolsAgentComponent._base_inputs,\n # removed memory inputs from agent component\n # *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [\n Output(name=\"response\", display_name=\"Response\", method=\"message_response\"),\n Output(name=\"structured_response\", display_name=\"Structured Response\", method=\"json_response\", tool_mode=False),\n ]\n\n async def get_agent_requirements(self):\n \"\"\"Get the agent requirements for the agent.\"\"\"\n llm_model, display_name = await self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n if isinstance(self.chat_history, Message):\n self.chat_history = [self.chat_history]\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n return llm_model, self.chat_history, self.tools\n\n async def message_response(self) -> Message:\n try:\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n result = await self.run_agent(agent)\n\n # Store result for potential JSON output\n self._agent_result = result\n\n except (ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n await logger.aerror(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n # Avoid catching blind Exception; let truly unexpected exceptions propagate\n except Exception as e:\n await logger.aerror(f\"Unexpected error: {e!s}\")\n raise\n else:\n return result\n\n def _preprocess_schema(self, schema):\n \"\"\"Preprocess schema to ensure correct data types for build_model_from_schema.\"\"\"\n processed_schema = []\n for field in schema:\n processed_field = {\n \"name\": str(field.get(\"name\", \"field\")),\n \"type\": str(field.get(\"type\", \"str\")),\n \"description\": str(field.get(\"description\", \"\")),\n \"multiple\": field.get(\"multiple\", False),\n }\n # Ensure multiple is handled correctly\n if isinstance(processed_field[\"multiple\"], str):\n processed_field[\"multiple\"] = processed_field[\"multiple\"].lower() in [\"true\", \"1\", \"t\", \"y\", \"yes\"]\n processed_schema.append(processed_field)\n return processed_schema\n\n async def build_structured_output_base(self, content: str):\n \"\"\"Build structured output with optional BaseModel validation.\"\"\"\n json_pattern = r\"\\{.*\\}\"\n schema_error_msg = \"Try setting an output schema\"\n\n # Try to parse content as JSON first\n json_data = None\n try:\n json_data = json.loads(content)\n except json.JSONDecodeError:\n json_match = re.search(json_pattern, content, re.DOTALL)\n if json_match:\n try:\n json_data = json.loads(json_match.group())\n except json.JSONDecodeError:\n return {\"content\": content, \"error\": schema_error_msg}\n else:\n return {\"content\": content, \"error\": schema_error_msg}\n\n # If no output schema provided, return parsed JSON without validation\n if not hasattr(self, \"output_schema\") or not self.output_schema or len(self.output_schema) == 0:\n return json_data\n\n # Use BaseModel validation with schema\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n\n # Validate against the schema\n if isinstance(json_data, list):\n # Multiple objects\n validated_objects = []\n for item in json_data:\n try:\n validated_obj = output_model.model_validate(item)\n validated_objects.append(validated_obj.model_dump())\n except ValidationError as e:\n await logger.aerror(f\"Validation error for item: {e}\")\n # Include invalid items with error info\n validated_objects.append({\"data\": item, \"validation_error\": str(e)})\n return validated_objects\n\n # Single object\n try:\n validated_obj = output_model.model_validate(json_data)\n return [validated_obj.model_dump()] # Return as list for consistency\n except ValidationError as e:\n await logger.aerror(f\"Validation error: {e}\")\n return [{\"data\": json_data, \"validation_error\": str(e)}]\n\n except (TypeError, ValueError) as e:\n await logger.aerror(f\"Error building structured output: {e}\")\n # Fallback to parsed JSON without validation\n return json_data\n\n async def json_response(self) -> Data:\n \"\"\"Convert agent response to structured JSON Data output with schema validation.\"\"\"\n # Always use structured chat agent for JSON response mode for better JSON formatting\n try:\n system_components = []\n\n # 1. Agent Instructions (system_prompt)\n agent_instructions = getattr(self, \"system_prompt\", \"\") or \"\"\n if agent_instructions:\n system_components.append(f\"{agent_instructions}\")\n\n # 2. Format Instructions\n format_instructions = getattr(self, \"format_instructions\", \"\") or \"\"\n if format_instructions:\n system_components.append(f\"Format instructions: {format_instructions}\")\n\n # 3. Schema Information from BaseModel\n if hasattr(self, \"output_schema\") and self.output_schema and len(self.output_schema) > 0:\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n schema_dict = output_model.model_json_schema()\n schema_info = (\n \"You are given some text that may include format instructions, \"\n \"explanations, or other content alongside a JSON schema.\\n\\n\"\n \"Your task:\\n\"\n \"- Extract only the JSON schema.\\n\"\n \"- Return it as valid JSON.\\n\"\n \"- Do not include format instructions, explanations, or extra text.\\n\\n\"\n \"Input:\\n\"\n f\"{json.dumps(schema_dict, indent=2)}\\n\\n\"\n \"Output (only JSON schema):\"\n )\n system_components.append(schema_info)\n except (ValidationError, ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"Could not build schema for prompt: {e}\", exc_info=True)\n\n # Combine all components\n combined_instructions = \"\\n\\n\".join(system_components) if system_components else \"\"\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=combined_instructions,\n )\n\n # Create and run structured chat agent\n try:\n structured_agent = self.create_agent_runnable()\n except (NotImplementedError, ValueError, TypeError) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n raise\n try:\n result = await self.run_agent(structured_agent)\n except (ExceptionWithMessageError, ValueError, TypeError, RuntimeError) as e:\n await logger.aerror(f\"Error with structured agent result: {e}\")\n raise\n # Extract content from structured agent result\n if hasattr(result, \"content\"):\n content = result.content\n elif hasattr(result, \"text\"):\n content = result.text\n else:\n content = str(result)\n\n except (ExceptionWithMessageError, ValueError, TypeError, NotImplementedError, AttributeError) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n # Fallback to regular agent\n content_str = \"No content returned from agent\"\n return Data(data={\"content\": content_str, \"error\": str(e)})\n\n # Process with structured output validation\n try:\n structured_output = await self.build_structured_output_base(content)\n\n # Handle different output formats\n if isinstance(structured_output, list) and structured_output:\n if len(structured_output) == 1:\n return Data(data=structured_output[0])\n return Data(data={\"results\": structured_output})\n if isinstance(structured_output, dict):\n return Data(data=structured_output)\n return Data(data={\"content\": content})\n\n except (ValueError, TypeError) as e:\n await logger.aerror(f\"Error in structured output processing: {e}\")\n return Data(data={\"content\": content, \"error\": str(e)})\n\n async def get_memory_data(self):\n # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.\n messages = (\n await MemoryComponent(**self.get_base_args())\n .set(session_id=self.graph.session_id, order=\"Ascending\", n_messages=self.n_messages)\n .retrieve_messages()\n )\n return [\n message for message in messages if getattr(message, \"id\", None) != getattr(self.input_value, \"id\", None)\n ]\n\n async def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except (AttributeError, ValueError, TypeError, RuntimeError) as e:\n await logger.aerror(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n # Filter out json_mode and only use attributes that exist on this component\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
1793
1845
  },
1794
1846
  "handle_parsing_errors": {
1795
1847
  "_input_type": "BoolInput",