langflow-base-nightly 0.5.1.dev3__py3-none-any.whl → 0.5.1.dev4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langflow/__init__.py +215 -0
- langflow/__main__.py +16 -2
- langflow/alembic/versions/006b3990db50_add_unique_constraints.py +4 -7
- langflow/alembic/versions/012fb73ac359_add_folder_table.py +4 -5
- langflow/alembic/versions/0ae3a2674f32_update_the_columns_that_need_to_change_.py +11 -20
- langflow/alembic/versions/0b8757876a7c_.py +4 -7
- langflow/alembic/versions/0d60fcbd4e8e_create_vertex_builds_table.py +4 -6
- langflow/alembic/versions/1a110b568907_replace_credential_table_with_variable.py +4 -5
- langflow/alembic/versions/1b8b740a6fa3_remove_fk_constraint_in_message_.py +32 -27
- langflow/alembic/versions/1c79524817ed_add_unique_constraints_per_user_in_.py +4 -5
- langflow/alembic/versions/1d90f8a0efe1_update_description_columns_type.py +4 -5
- langflow/alembic/versions/1eab2c3eb45e_event_error.py +14 -15
- langflow/alembic/versions/1ef9c4f3765d_.py +5 -10
- langflow/alembic/versions/1f4d6df60295_add_default_fields_column.py +4 -5
- langflow/alembic/versions/260dbcc8b680_adds_tables.py +4 -5
- langflow/alembic/versions/29fe8f1f806b_add_missing_index.py +4 -5
- langflow/alembic/versions/2ac71eb9c3ae_adds_credential_table.py +4 -7
- langflow/alembic/versions/3bb0ddf32dfb_add_unique_constraints_per_user_in_flow_.py +4 -5
- langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py +1 -2
- langflow/alembic/versions/58b28437a398_modify_nullable.py +1 -2
- langflow/alembic/versions/5ace73a7f223_new_remove_table_upgrade_op.py +6 -12
- langflow/alembic/versions/631faacf5da2_add_webhook_columns.py +4 -5
- langflow/alembic/versions/63b9c451fd30_add_icon_and_icon_bg_color_to_flow.py +4 -5
- langflow/alembic/versions/66f72f04a1de_add_mcp_support_with_project_settings_.py +21 -23
- langflow/alembic/versions/67cc006d50bf_add_profile_image_column.py +4 -5
- langflow/alembic/versions/6e7b581b5648_fix_nullable.py +4 -5
- langflow/alembic/versions/7843803a87b5_store_updates.py +4 -6
- langflow/alembic/versions/79e675cb6752_change_datetime_type.py +1 -2
- langflow/alembic/versions/7d2162acc8b2_adds_updated_at_and_folder_cols.py +4 -10
- langflow/alembic/versions/90be8e2ed91e_create_transactions_table.py +4 -6
- langflow/alembic/versions/93e2705fa8d6_add_column_save_path_to_flow.py +7 -9
- langflow/alembic/versions/a72f5cf9c2f9_add_endpoint_name_col.py +4 -5
- langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py +1 -2
- langflow/alembic/versions/bc2f01c40e4a_new_fixes.py +4 -5
- langflow/alembic/versions/c153816fd85f_set_name_and_value_to_not_nullable.py +4 -5
- langflow/alembic/versions/d066bfd22890_add_message_table.py +4 -4
- langflow/alembic/versions/d2d475a1f7c0_add_tags_column_to_flow.py +12 -13
- langflow/alembic/versions/d3dbf656a499_add_gradient_column_in_flow.py +12 -12
- langflow/alembic/versions/d9a6ea21edcd_rename_default_folder.py +7 -10
- langflow/alembic/versions/dd9e0804ebd1_add_v2_file_table.py +8 -7
- langflow/alembic/versions/e3162c1804e6_add_persistent_locked_state.py +10 -10
- langflow/alembic/versions/e3bc869fa272_fix_nullable.py +4 -5
- langflow/alembic/versions/e56d87f8994a_add_optins_column_to_user.py +13 -14
- langflow/alembic/versions/e5a65ecff2cd_nullable_in_vertex_build.py +4 -5
- langflow/alembic/versions/eb5866d51fd2_change_columns_to_be_nullable.py +4 -5
- langflow/alembic/versions/eb5e72293a8e_add_error_and_edit_flags_to_message.py +4 -5
- langflow/alembic/versions/f3b2d1f1002d_add_column_access_type_to_flow.py +19 -15
- langflow/alembic/versions/f5ee9749d1a6_user_id_can_be_null_in_flow.py +4 -6
- langflow/alembic/versions/fd531f8868b1_fix_credential_table.py +5 -8
- langflow/api/build.py +5 -4
- langflow/api/health_check_router.py +1 -1
- langflow/api/limited_background_tasks.py +1 -1
- langflow/api/log_router.py +1 -2
- langflow/api/utils.py +2 -2
- langflow/api/v1/base.py +1 -2
- langflow/api/v1/callback.py +4 -9
- langflow/api/v1/chat.py +6 -7
- langflow/api/v1/endpoints.py +15 -15
- langflow/api/v1/files.py +1 -1
- langflow/api/v1/flows.py +1 -1
- langflow/api/v1/knowledge_bases.py +1 -1
- langflow/api/v1/mcp.py +1 -1
- langflow/api/v1/mcp_projects.py +14 -5
- langflow/api/v1/mcp_utils.py +3 -3
- langflow/api/v1/openai_responses.py +4 -4
- langflow/api/v1/schemas.py +3 -38
- langflow/api/v1/starter_projects.py +61 -3
- langflow/api/v1/store.py +1 -1
- langflow/api/v1/validate.py +3 -3
- langflow/api/v1/voice_mode.py +2 -2
- langflow/api/v2/files.py +1 -1
- langflow/api/v2/mcp.py +2 -2
- langflow/base/__init__.py +11 -0
- langflow/base/agents/__init__.py +3 -0
- langflow/base/data/__init__.py +2 -4
- langflow/base/data/utils.py +2 -197
- langflow/base/embeddings/__init__.py +3 -0
- langflow/base/io/__init__.py +7 -0
- langflow/base/io/chat.py +5 -18
- langflow/base/io/text.py +2 -21
- langflow/base/knowledge_bases/__init__.py +3 -0
- langflow/base/memory/__init__.py +3 -0
- langflow/base/models/__init__.py +2 -2
- langflow/base/models/openai_constants.py +6 -120
- langflow/base/prompts/__init__.py +3 -0
- langflow/base/prompts/api_utils.py +2 -223
- langflow/base/textsplitters/__init__.py +3 -0
- langflow/base/tools/__init__.py +3 -0
- langflow/base/vectorstores/__init__.py +3 -0
- langflow/components/__init__.py +7 -259
- langflow/components/agents.py +6 -0
- langflow/components/anthropic.py +6 -0
- langflow/components/data.py +6 -0
- langflow/components/helpers.py +6 -0
- langflow/components/knowledge_bases/ingestion.py +13 -14
- langflow/components/knowledge_bases/retrieval.py +8 -7
- langflow/components/openai.py +6 -0
- langflow/components/processing/__init__.py +1 -117
- langflow/components/processing/converter.py +3 -149
- langflow/custom/__init__.py +26 -3
- langflow/custom/custom_component/__init__.py +4 -0
- langflow/custom/custom_component/component.py +20 -1738
- langflow/custom/custom_component/component_with_cache.py +1 -8
- langflow/custom/custom_component/custom_component.py +1 -552
- langflow/custom/utils.py +1 -872
- langflow/custom/validate.py +1 -0
- langflow/events/event_manager.py +18 -108
- langflow/field_typing/__init__.py +6 -6
- langflow/field_typing/constants.py +87 -122
- langflow/field_typing/range_spec.py +2 -32
- langflow/frontend/assets/{SlackIcon-Cc7Qnzki.js → SlackIcon-v88osOTA.js} +1 -1
- langflow/frontend/assets/{Wikipedia-7ulMZY46.js → Wikipedia-DD_S2k00.js} +1 -1
- langflow/frontend/assets/{Wolfram-By9PGsHS.js → Wolfram-EO2C5noN.js} +1 -1
- langflow/frontend/assets/{index-DVLIDc2_.js → index-1Gv1mfvk.js} +1 -1
- langflow/frontend/assets/{index-MVW4HTEk.js → index-7v-bzlzf.js} +1 -1
- langflow/frontend/assets/{index-CUzlcce2.js → index-9CbMazbV.js} +1 -1
- langflow/frontend/assets/{index-CU16NJD7.js → index-B8ZHP8g2.js} +1 -1
- langflow/frontend/assets/{index-v8eXbWlM.js → index-B8y2e6vN.js} +1 -1
- langflow/frontend/assets/{index-BX_asvRB.js → index-BBRUGsyr.js} +1 -1
- langflow/frontend/assets/{index-9FL5xjkL.js → index-BGwqQwlh.js} +1 -1
- langflow/frontend/assets/{index-BAn-AzCS.js → index-BIq-k-FG.js} +1 -1
- langflow/frontend/assets/{index-D5c2nNvp.js → index-BSN73YP8.js} +1 -1
- langflow/frontend/assets/{index-DMCerPJM.js → index-BU8R8jRn.js} +1 -1
- langflow/frontend/assets/{index-CvSoff-8.js → index-BV6yx8ey.js} +1 -1
- langflow/frontend/assets/{index-BISPW-f6.js → index-BYIsg-Eh.js} +1 -1
- langflow/frontend/assets/{index-GzOGB_fo.js → index-B_ksDBSQ.js} +1 -1
- langflow/frontend/assets/{index-BIqEYjNT.js → index-Ba1UOZ9A.js} +1 -1
- langflow/frontend/assets/{index-ByxGmq5p.js → index-Ba9tKRQg.js} +1 -1
- langflow/frontend/assets/{index-BLEWsL1U.js → index-Bbfaw8ca.js} +1 -1
- langflow/frontend/assets/{index-C_MhBX6R.js → index-BbuGqvAx.js} +1 -1
- langflow/frontend/assets/{index-RH_I78z_.js → index-BeoXu1YX.js} +1 -1
- langflow/frontend/assets/{index-cYFKmtmg.js → index-BfjZmOnH.js} +1 -1
- langflow/frontend/assets/{index-Bm9i8F4W.js → index-Bjzy_HZB.js} +1 -1
- langflow/frontend/assets/{index-_szO7sta.js → index-BofEkpYB.js} +1 -1
- langflow/frontend/assets/{index-DP1oE6QB.js → index-Bp7Mty2H.js} +1 -1
- langflow/frontend/assets/{index-CeswGUz3.js → index-BqX1H6yK.js} +1 -1
- langflow/frontend/assets/{index-C8pI0lzi.js → index-BqtBAJAN.js} +1 -1
- langflow/frontend/assets/{index-BusCv3bR.js → index-Bsfraj7A.js} +1 -1
- langflow/frontend/assets/{index-BWnKMRFJ.js → index-BtFl7fER.js} +1 -1
- langflow/frontend/assets/{index-DnlVWWU8.js → index-BvX993Sv.js} +1 -1
- langflow/frontend/assets/{index-C676MS3I.js → index-BvgQ2vzM.js} +1 -1
- langflow/frontend/assets/{index-DJ6HD14g.js → index-BwY98u8n.js} +1 -1
- langflow/frontend/assets/{index-C51yNvIL.js → index-C-RIJAOS.js} +1 -1
- langflow/frontend/assets/{index-DiblXWmk.js → index-C1K6A38P.js} +1 -1
- langflow/frontend/assets/{index-Co__gFM1.js → index-C3Vwhx0t.js} +1 -1
- langflow/frontend/assets/{index-Coi86oqP.js → index-C5XUG_gr.js} +1 -1
- langflow/frontend/assets/{index-jwzN3Jd_.js → index-C6ouLG9o.js} +1 -1
- langflow/frontend/assets/{index-CQQ-4XMS.js → index-C7ZJ_Z6f.js} +1 -1
- langflow/frontend/assets/{index-Bl7RpmrB.js → index-CCOGIwGY.js} +1 -1
- langflow/frontend/assets/{index-CVkIdc6y.js → index-CCcye2rt.js} +1 -1
- langflow/frontend/assets/{index-bMhyLtgS.js → index-CFR4yJQB.js} +1 -1
- langflow/frontend/assets/{index-aAgSKWb3.js → index-CIGmPP0H.js} +1 -1
- langflow/frontend/assets/{index-BGt6jQ4x.js → index-CJmMEa6d.js} +1 -1
- langflow/frontend/assets/{index-DX7JcSMz.js → index-CJxD7lyU.js} +1 -1
- langflow/frontend/assets/{index-BZ-A4K98.js → index-CL_vu6ut.js} +1 -1
- langflow/frontend/assets/{index-BMpKFGhI.js → index-COf3UnBn.js} +1 -1
- langflow/frontend/assets/{index-xN8ogFdo.js → index-CV9650h_.js} +1 -1
- langflow/frontend/assets/{index-OsUvqIUr.js → index-CVDzych0.js} +1 -1
- langflow/frontend/assets/{index-BH7AyHxp.js → index-CWIHsC4D.js} +1 -1
- langflow/frontend/assets/{index-mjwtJmkP.js → index-CXCnFZ0L.js} +1 -1
- langflow/frontend/assets/{index-3jlSQi5Y.js → index-Ca_Pw_Dn.js} +1 -1
- langflow/frontend/assets/{index-D-SnFlhU.js → index-Cbb3bX9e.js} +1 -1
- langflow/frontend/assets/{index--e0oQqZh.js → index-CcJtOz-Z.js} +1 -1
- langflow/frontend/assets/{index-S-sc0Cm9.js → index-CfTbTHEv.js} +1 -1
- langflow/frontend/assets/{index-Deu8rlaZ.js → index-ChoxDAgX.js} +1 -1
- langflow/frontend/assets/{index-lnF9Eqr2.js → index-Cn4gw8aE.js} +1 -1
- langflow/frontend/assets/{index-C_NwzK6j.js → index-CnpLg4zX.js} +1 -1
- langflow/frontend/assets/{index-DznH7Jbq.js → index-Cpao2omG.js} +1 -1
- langflow/frontend/assets/{index-DpWrk8mA.js → index-CqoxM01j.js} +1 -1
- langflow/frontend/assets/{index-Bw-TIIC6.js → index-CrHf2Ic1.js} +1 -1
- langflow/frontend/assets/{index-DmYLDQag.js → index-CrV0uIjp.js} +1 -1
- langflow/frontend/assets/{index-Dp7ZQyL3.js → index-CssADaak.js} +1 -1
- langflow/frontend/assets/{index-CNh0rwur.js → index-CtJdNLy9.js} +1 -1
- langflow/frontend/assets/{index-Ca1b7Iag.js → index-CyeWD2dh.js} +1 -1
- langflow/frontend/assets/{index-DcApTyZ7.js → index-D1xzD7uc.js} +1 -1
- langflow/frontend/assets/{index-B3GvPjhD.js → index-D6MuXC4L.js} +1 -1
- langflow/frontend/assets/{index-Cw0UComa.js → index-D8w9zvIF.js} +1 -1
- langflow/frontend/assets/{index-C-2MRYoJ.js → index-D98Gn0A6.js} +1 -1
- langflow/frontend/assets/{index-aWnZIwHd.js → index-DBhjpWkf.js} +1 -1
- langflow/frontend/assets/{index-nw3WF9lY.js → index-DCCRJzcY.js} +1 -1
- langflow/frontend/assets/{index-RjeC0kaX.js → index-DCTRSkEW.js} +1 -1
- langflow/frontend/assets/{index-B_kBTgxV.js → index-DCUfitVj.js} +1 -1
- langflow/frontend/assets/{index-ChsGhZn3.js → index-DDdz-Xcl.js} +1 -1
- langflow/frontend/assets/{index-7yAHPRxv.js → index-DGdMwZjG.js} +1 -1
- langflow/frontend/assets/{index-DjQElpEg.js → index-DGtl2vMw.js} +1 -1
- langflow/frontend/assets/{index-BCXhKCOK.js → index-DHVdkrni.js} +1 -1
- langflow/frontend/assets/{index-S8uJXTOq.js → index-DJBWwjgl.js} +1 -1
- langflow/frontend/assets/{index-qiVTWUuf.js → index-DMAkJ_qX.js} +1 -1
- langflow/frontend/assets/{index-D-WStJI6.js → index-DMEvEQI5.js} +1 -1
- langflow/frontend/assets/{index-BhqVw9WQ.js → index-DNGRoOsp.js} +1 -1
- langflow/frontend/assets/{index-Cu7vC48Y.js → index-DNT_TUTa.js} +1 -1
- langflow/frontend/assets/{index-Bhcv5M0n.js → index-DQKOH_9K.js} +1 -1
- langflow/frontend/assets/{index-CLcaktde.js → index-DQhqqtqQ.js} +1 -1
- langflow/frontend/assets/{index-DZVgPCio.js → index-DRM7KKnG.js} +1 -1
- langflow/frontend/assets/{index-uybez8MR.js → index-DSCtl3a5.js} +1 -1
- langflow/frontend/assets/{index-CJ5A6STv.js → index-DSLNlm0Z.js} +1 -1
- langflow/frontend/assets/{index-Drg8me2a.js → index-DT-PspE-.js} +1 -1
- langflow/frontend/assets/{index-DsEZjOcp.js → index-DTpbH-p8.js} +1 -1
- langflow/frontend/assets/{index-DrXXKzpD.js → index-DWV6MsIq.js} +1 -1
- langflow/frontend/assets/{index-4JIEdyIM.js → index-DWeL4US_.js} +1 -1
- langflow/frontend/assets/{index-BlDsBQ_1.js → index-DYKZHhpU.js} +1 -1
- langflow/frontend/assets/{index-DFY8YFbC.js → index-DZyQHiMR.js} +1 -1
- langflow/frontend/assets/{index-CKPZpkQk.js → index-Dc6qVuSa.js} +1 -1
- langflow/frontend/assets/{index-yyAaYjLR.js → index-DkYuicnC.js} +1 -1
- langflow/frontend/assets/{index-DmVt5Jlx.js → index-Dlj_2mMs.js} +1 -1
- langflow/frontend/assets/{index-BvRIG6P5.js → index-DmGJUrEp.js} +1 -1
- langflow/frontend/assets/{index-BWFIrwW1.js → index-Dn6hpCAZ.js} +1 -1
- langflow/frontend/assets/{index-Cb5G9Ifd.js → index-DrJU8Fgb.js} +1 -1
- langflow/frontend/assets/{index-COoTCxvs.js → index-DsWfdCzp.js} +1 -1
- langflow/frontend/assets/{index-ZjeocHyu.js → index-DvCPWs2_.js} +1 -1
- langflow/frontend/assets/{index-B5LHnuQR.js → index-DvPVq7OP.js} +1 -1
- langflow/frontend/assets/{index-BnCnYnao.js → index-Dw71ufW4.js} +1 -1
- langflow/frontend/assets/{index-AALDfCyt.js → index-DxkJactf.js} +1 -1
- langflow/frontend/assets/{index-k9jP5chN.js → index-Dz2GTphU.js} +1 -1
- langflow/frontend/assets/{index-BdjfHsrf.js → index-Fvd524_c.js} +1 -1
- langflow/frontend/assets/{index-AKVkmT4S.js → index-GAQ0Mk2M.js} +1 -1
- langflow/frontend/assets/{index-BZSa2qz7.js → index-Hm5-4ItD.js} +1 -1
- langflow/frontend/assets/{index-DbfS_UH-.js → index-IT67FzsK.js} +1 -1
- langflow/frontend/assets/{index-BLXN681C.js → index-ItYiij1i.js} +1 -1
- langflow/frontend/assets/{index-CiklyQU3.js → index-IuR_FEdB.js} +1 -1
- langflow/frontend/assets/{index-xV6ystWy.js → index-Jj60FQkv.js} +1 -1
- langflow/frontend/assets/{index-C_157Mb-.js → index-LlvshmVz.js} +1 -1
- langflow/frontend/assets/{index-CDphUsa3.js → index-LwKh3I_W.js} +1 -1
- langflow/frontend/assets/{index-BrDz-PxE.js → index-N-xxmKKH.js} +1 -1
- langflow/frontend/assets/{index-BsdLyYMY.js → index-RwpaHIAH.js} +1 -1
- langflow/frontend/assets/{index-Cu2Xr6_j.js → index-TVvsp-xh.js} +1 -1
- langflow/frontend/assets/{index-CPiM2oyj.js → index-TdE2u9zP.js} +1 -1
- langflow/frontend/assets/{index-DOj_QWqG.js → index-_x-NkYeW.js} +1 -1
- langflow/frontend/assets/{index-YJsAl7vm.js → index-a-YclEbW.js} +1 -1
- langflow/frontend/assets/{index-5-CSw2-z.js → index-e9MFKUCo.js} +1 -1
- langflow/frontend/assets/{index-BSwBVwyF.js → index-krPr8f2F.js} +1 -1
- langflow/frontend/assets/{index-Df6psZEj.js → index-kveiUWuL.js} +1 -1
- langflow/frontend/assets/{index-CF4_Og1m.js → index-lE3oSjJi.js} +1 -1
- langflow/frontend/assets/{index-C6nzdeYx.js → index-lM3UYg7F.js} +1 -1
- langflow/frontend/assets/{index-C-wnbBBY.js → index-nsRk3qgA.js} +1 -1
- langflow/frontend/assets/{index-D234yKNJ.js → index-pBO0SZLD.js} +4 -4
- langflow/frontend/assets/{index-BMvp94tO.js → index-pbZHsbuE.js} +1 -1
- langflow/frontend/assets/{index-hg2y9OAt.js → index-sfX3aWyp.js} +1 -1
- langflow/frontend/assets/{index-DTCrijba.js → index-xQz-VJ0-.js} +1 -1
- langflow/frontend/assets/{index-SB4rw8D5.js → index-yfcsaHS6.js} +1 -1
- langflow/frontend/assets/{index-C-bjC2sz.js → index-zcGjo9fx.js} +1 -1
- langflow/frontend/assets/lazyIconImports-BjqDmNYG.js +2 -0
- langflow/frontend/assets/{use-post-add-user-JUeLDErC.js → use-post-add-user-w3vpKSOB.js} +1 -1
- langflow/frontend/index.html +1 -1
- langflow/graph/__init__.py +4 -4
- langflow/helpers/data.py +2 -2
- langflow/helpers/flow.py +9 -7
- langflow/helpers/user.py +2 -2
- langflow/initial_setup/setup.py +9 -9
- langflow/initial_setup/starter_projects/Basic Prompt Chaining.json +119 -41
- langflow/initial_setup/starter_projects/Basic Prompting.json +45 -19
- langflow/initial_setup/starter_projects/Blog Writer.json +53 -21
- langflow/initial_setup/starter_projects/Custom Component Generator.json +121 -97
- langflow/initial_setup/starter_projects/Document Q&A.json +46 -18
- langflow/initial_setup/starter_projects/Financial Report Parser.json +49 -17
- langflow/initial_setup/starter_projects/Hybrid Search RAG.json +89 -50
- langflow/initial_setup/starter_projects/Image Sentiment Analysis.json +86 -22
- langflow/initial_setup/starter_projects/Instagram Copywriter.json +210 -57
- langflow/initial_setup/starter_projects/Invoice Summarizer.json +132 -35
- langflow/initial_setup/starter_projects/Knowledge Ingestion.json +8 -8
- langflow/initial_setup/starter_projects/Knowledge Retrieval.json +8 -8
- langflow/initial_setup/starter_projects/Market Research.json +174 -48
- langflow/initial_setup/starter_projects/Meeting Summary.json +102 -38
- langflow/initial_setup/starter_projects/Memory Chatbot.json +49 -21
- langflow/initial_setup/starter_projects/News Aggregator.json +140 -39
- langflow/initial_setup/starter_projects/Nvidia Remix.json +153 -181
- langflow/initial_setup/starter_projects/Pok/303/251dex Agent.json" +132 -35
- langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json +106 -43
- langflow/initial_setup/starter_projects/Price Deal Finder.json +136 -39
- langflow/initial_setup/starter_projects/Research Agent.json +206 -53
- langflow/initial_setup/starter_projects/Research Translation Loop.json +66 -34
- langflow/initial_setup/starter_projects/SEO Keyword Generator.json +41 -15
- langflow/initial_setup/starter_projects/SaaS Pricing.json +128 -31
- langflow/initial_setup/starter_projects/Search agent.json +132 -35
- langflow/initial_setup/starter_projects/Sequential Tasks Agents.json +422 -98
- langflow/initial_setup/starter_projects/Simple Agent.json +150 -42
- langflow/initial_setup/starter_projects/Social Media Agent.json +150 -42
- langflow/initial_setup/starter_projects/Text Sentiment Analysis.json +120 -24
- langflow/initial_setup/starter_projects/Travel Planning Agents.json +418 -94
- langflow/initial_setup/starter_projects/Twitter Thread Generator.json +69 -37
- langflow/initial_setup/starter_projects/Vector Store RAG.json +66 -38
- langflow/initial_setup/starter_projects/Youtube Analysis.json +191 -51
- langflow/initial_setup/starter_projects/basic_prompting.py +4 -4
- langflow/initial_setup/starter_projects/blog_writer.py +5 -5
- langflow/initial_setup/starter_projects/complex_agent.py +8 -8
- langflow/initial_setup/starter_projects/document_qa.py +5 -5
- langflow/initial_setup/starter_projects/hierarchical_tasks_agent.py +8 -8
- langflow/initial_setup/starter_projects/memory_chatbot.py +6 -6
- langflow/initial_setup/starter_projects/sequential_tasks_agent.py +7 -7
- langflow/initial_setup/starter_projects/vector_store_rag.py +8 -8
- langflow/inputs/__init__.py +3 -2
- langflow/inputs/constants.py +3 -2
- langflow/inputs/input_mixin.py +49 -310
- langflow/inputs/inputs.py +72 -703
- langflow/inputs/validators.py +2 -18
- langflow/interface/__init__.py +4 -0
- langflow/interface/components.py +3 -491
- langflow/interface/initialize/loading.py +7 -6
- langflow/interface/listing.py +3 -25
- langflow/interface/run.py +1 -1
- langflow/interface/utils.py +3 -111
- langflow/io/__init__.py +2 -2
- langflow/io/schema.py +11 -302
- langflow/load/__init__.py +4 -2
- langflow/load/utils.py +2 -96
- langflow/logging/__init__.py +2 -1
- langflow/logging/setup.py +1 -1
- langflow/main.py +8 -5
- langflow/memory.py +12 -6
- langflow/middleware.py +1 -1
- langflow/processing/process.py +7 -7
- langflow/schema/__init__.py +22 -5
- langflow/schema/artifact.py +1 -1
- langflow/schema/data.py +5 -303
- langflow/schema/dataframe.py +2 -205
- langflow/schema/graph.py +4 -45
- langflow/schema/image.py +2 -67
- langflow/schema/message.py +6 -470
- langflow/schema/playground_events.py +5 -6
- langflow/schema/schema.py +24 -117
- langflow/serialization/constants.py +3 -2
- langflow/serialization/serialization.py +1 -1
- langflow/server.py +1 -2
- langflow/services/__init__.py +1 -2
- langflow/services/auth/mcp_encryption.py +1 -1
- langflow/services/auth/service.py +1 -1
- langflow/services/auth/utils.py +5 -5
- langflow/services/cache/disk.py +2 -2
- langflow/services/cache/factory.py +2 -2
- langflow/services/cache/service.py +2 -2
- langflow/services/cache/utils.py +0 -11
- langflow/services/database/factory.py +1 -1
- langflow/services/database/models/flow/model.py +1 -1
- langflow/services/database/models/message/crud.py +2 -1
- langflow/services/database/models/transactions/crud.py +1 -1
- langflow/services/database/models/user/crud.py +1 -1
- langflow/services/database/service.py +2 -2
- langflow/services/database/utils.py +1 -2
- langflow/services/deps.py +12 -17
- langflow/services/enhanced_manager.py +71 -0
- langflow/services/factory.py +14 -7
- langflow/services/flow/flow_runner.py +4 -4
- langflow/services/job_queue/service.py +2 -1
- langflow/services/manager.py +14 -130
- langflow/services/schema.py +0 -1
- langflow/services/session/service.py +3 -2
- langflow/services/settings/__init__.py +0 -3
- langflow/services/settings/base.py +16 -549
- langflow/services/settings/factory.py +2 -21
- langflow/services/settings/feature_flags.py +2 -11
- langflow/services/settings/service.py +2 -31
- langflow/services/shared_component_cache/factory.py +1 -1
- langflow/services/socket/service.py +1 -1
- langflow/services/socket/utils.py +1 -8
- langflow/services/state/factory.py +1 -1
- langflow/services/state/service.py +3 -2
- langflow/services/storage/factory.py +2 -2
- langflow/services/storage/local.py +1 -2
- langflow/services/storage/s3.py +1 -2
- langflow/services/storage/service.py +2 -1
- langflow/services/store/factory.py +1 -1
- langflow/services/store/service.py +2 -2
- langflow/services/store/utils.py +1 -2
- langflow/services/task/service.py +2 -1
- langflow/services/task/temp_flow_cleanup.py +1 -1
- langflow/services/telemetry/factory.py +1 -1
- langflow/services/telemetry/service.py +2 -3
- langflow/services/tracing/arize_phoenix.py +3 -3
- langflow/services/tracing/base.py +1 -1
- langflow/services/tracing/factory.py +1 -1
- langflow/services/tracing/langfuse.py +2 -2
- langflow/services/tracing/langsmith.py +2 -2
- langflow/services/tracing/langwatch.py +4 -4
- langflow/services/tracing/opik.py +2 -2
- langflow/services/tracing/service.py +17 -11
- langflow/services/tracing/traceloop.py +2 -2
- langflow/services/tracing/utils.py +1 -1
- langflow/services/utils.py +54 -9
- langflow/services/variable/factory.py +1 -1
- langflow/services/variable/kubernetes.py +2 -3
- langflow/services/variable/kubernetes_secrets.py +1 -2
- langflow/services/variable/service.py +2 -3
- langflow/template/__init__.py +2 -9
- langflow/template/field/__init__.py +3 -0
- langflow/template/field/base.py +2 -256
- langflow/template/frontend_node.py +3 -0
- langflow/template/utils.py +2 -216
- langflow/utils/constants.py +28 -204
- langflow/utils/lazy_load.py +3 -14
- langflow/utils/schemas.py +2 -3
- langflow/utils/template_validation.py +2 -2
- langflow/utils/util.py +59 -479
- langflow/utils/validate.py +2 -488
- langflow/utils/voice_utils.py +1 -2
- langflow/worker.py +1 -1
- {langflow_base_nightly-0.5.1.dev3.dist-info → langflow_base_nightly-0.5.1.dev4.dist-info}/METADATA +2 -1
- langflow_base_nightly-0.5.1.dev4.dist-info/RECORD +633 -0
- langflow/base/agents/agent.py +0 -267
- langflow/base/agents/callback.py +0 -130
- langflow/base/agents/context.py +0 -109
- langflow/base/agents/crewai/__init__.py +0 -0
- langflow/base/agents/crewai/crew.py +0 -231
- langflow/base/agents/crewai/tasks.py +0 -12
- langflow/base/agents/default_prompts.py +0 -23
- langflow/base/agents/errors.py +0 -15
- langflow/base/agents/events.py +0 -346
- langflow/base/agents/utils.py +0 -205
- langflow/base/astra_assistants/__init__.py +0 -0
- langflow/base/astra_assistants/util.py +0 -171
- langflow/base/chains/__init__.py +0 -0
- langflow/base/chains/model.py +0 -19
- langflow/base/composio/__init__.py +0 -0
- langflow/base/composio/composio_base.py +0 -1297
- langflow/base/compressors/__init__.py +0 -0
- langflow/base/compressors/model.py +0 -60
- langflow/base/constants.py +0 -46
- langflow/base/curl/__init__.py +0 -0
- langflow/base/curl/parse.py +0 -188
- langflow/base/data/base_file.py +0 -685
- langflow/base/data/docling_utils.py +0 -245
- langflow/base/document_transformers/__init__.py +0 -0
- langflow/base/document_transformers/model.py +0 -43
- langflow/base/embeddings/aiml_embeddings.py +0 -62
- langflow/base/embeddings/model.py +0 -26
- langflow/base/flow_processing/__init__.py +0 -0
- langflow/base/flow_processing/utils.py +0 -86
- langflow/base/huggingface/__init__.py +0 -0
- langflow/base/huggingface/model_bridge.py +0 -133
- langflow/base/langchain_utilities/__init__.py +0 -0
- langflow/base/langchain_utilities/model.py +0 -35
- langflow/base/langchain_utilities/spider_constants.py +0 -1
- langflow/base/langwatch/__init__.py +0 -0
- langflow/base/langwatch/utils.py +0 -18
- langflow/base/mcp/__init__.py +0 -0
- langflow/base/mcp/constants.py +0 -2
- langflow/base/mcp/util.py +0 -1524
- langflow/base/memory/memory.py +0 -49
- langflow/base/memory/model.py +0 -38
- langflow/base/models/aiml_constants.py +0 -51
- langflow/base/models/anthropic_constants.py +0 -47
- langflow/base/models/aws_constants.py +0 -151
- langflow/base/models/chat_result.py +0 -76
- langflow/base/models/google_generative_ai_constants.py +0 -70
- langflow/base/models/groq_constants.py +0 -134
- langflow/base/models/model.py +0 -375
- langflow/base/models/model_input_constants.py +0 -299
- langflow/base/models/model_metadata.py +0 -41
- langflow/base/models/model_utils.py +0 -8
- langflow/base/models/novita_constants.py +0 -35
- langflow/base/models/ollama_constants.py +0 -49
- langflow/base/models/sambanova_constants.py +0 -18
- langflow/base/processing/__init__.py +0 -0
- langflow/base/prompts/utils.py +0 -61
- langflow/base/textsplitters/model.py +0 -28
- langflow/base/tools/base.py +0 -26
- langflow/base/tools/component_tool.py +0 -324
- langflow/base/tools/constants.py +0 -49
- langflow/base/tools/flow_tool.py +0 -131
- langflow/base/tools/run_flow.py +0 -227
- langflow/base/vectorstores/model.py +0 -193
- langflow/base/vectorstores/utils.py +0 -22
- langflow/base/vectorstores/vector_store_connection_decorator.py +0 -52
- langflow/components/FAISS/__init__.py +0 -34
- langflow/components/FAISS/faiss.py +0 -111
- langflow/components/Notion/__init__.py +0 -19
- langflow/components/Notion/add_content_to_page.py +0 -269
- langflow/components/Notion/create_page.py +0 -94
- langflow/components/Notion/list_database_properties.py +0 -68
- langflow/components/Notion/list_pages.py +0 -122
- langflow/components/Notion/list_users.py +0 -77
- langflow/components/Notion/page_content_viewer.py +0 -93
- langflow/components/Notion/search.py +0 -111
- langflow/components/Notion/update_page_property.py +0 -114
- langflow/components/_importing.py +0 -37
- langflow/components/agentql/__init__.py +0 -3
- langflow/components/agentql/agentql_api.py +0 -151
- langflow/components/agents/__init__.py +0 -4
- langflow/components/agents/agent.py +0 -554
- langflow/components/agents/mcp_component.py +0 -501
- langflow/components/aiml/__init__.py +0 -37
- langflow/components/aiml/aiml.py +0 -112
- langflow/components/aiml/aiml_embeddings.py +0 -37
- langflow/components/amazon/__init__.py +0 -36
- langflow/components/amazon/amazon_bedrock_embedding.py +0 -109
- langflow/components/amazon/amazon_bedrock_model.py +0 -124
- langflow/components/amazon/s3_bucket_uploader.py +0 -211
- langflow/components/anthropic/__init__.py +0 -34
- langflow/components/anthropic/anthropic.py +0 -187
- langflow/components/apify/__init__.py +0 -5
- langflow/components/apify/apify_actor.py +0 -325
- langflow/components/arxiv/__init__.py +0 -3
- langflow/components/arxiv/arxiv.py +0 -163
- langflow/components/assemblyai/__init__.py +0 -46
- langflow/components/assemblyai/assemblyai_get_subtitles.py +0 -83
- langflow/components/assemblyai/assemblyai_lemur.py +0 -183
- langflow/components/assemblyai/assemblyai_list_transcripts.py +0 -95
- langflow/components/assemblyai/assemblyai_poll_transcript.py +0 -72
- langflow/components/assemblyai/assemblyai_start_transcript.py +0 -188
- langflow/components/azure/__init__.py +0 -37
- langflow/components/azure/azure_openai.py +0 -95
- langflow/components/azure/azure_openai_embeddings.py +0 -83
- langflow/components/baidu/__init__.py +0 -32
- langflow/components/baidu/baidu_qianfan_chat.py +0 -113
- langflow/components/bing/__init__.py +0 -3
- langflow/components/bing/bing_search_api.py +0 -61
- langflow/components/cassandra/__init__.py +0 -40
- langflow/components/cassandra/cassandra.py +0 -264
- langflow/components/cassandra/cassandra_chat.py +0 -92
- langflow/components/cassandra/cassandra_graph.py +0 -238
- langflow/components/chains/__init__.py +0 -0
- langflow/components/chroma/__init__.py +0 -34
- langflow/components/chroma/chroma.py +0 -167
- langflow/components/cleanlab/__init__.py +0 -40
- langflow/components/cleanlab/cleanlab_evaluator.py +0 -157
- langflow/components/cleanlab/cleanlab_rag_evaluator.py +0 -254
- langflow/components/cleanlab/cleanlab_remediator.py +0 -131
- langflow/components/clickhouse/__init__.py +0 -34
- langflow/components/clickhouse/clickhouse.py +0 -135
- langflow/components/cloudflare/__init__.py +0 -32
- langflow/components/cloudflare/cloudflare.py +0 -81
- langflow/components/cohere/__init__.py +0 -40
- langflow/components/cohere/cohere_embeddings.py +0 -81
- langflow/components/cohere/cohere_models.py +0 -46
- langflow/components/cohere/cohere_rerank.py +0 -51
- langflow/components/composio/__init__.py +0 -73
- langflow/components/composio/composio_api.py +0 -268
- langflow/components/composio/dropbox_compnent.py +0 -11
- langflow/components/composio/github_composio.py +0 -11
- langflow/components/composio/gmail_composio.py +0 -38
- langflow/components/composio/googlecalendar_composio.py +0 -11
- langflow/components/composio/googlemeet_composio.py +0 -11
- langflow/components/composio/googletasks_composio.py +0 -8
- langflow/components/composio/linear_composio.py +0 -11
- langflow/components/composio/outlook_composio.py +0 -11
- langflow/components/composio/reddit_composio.py +0 -11
- langflow/components/composio/slack_composio.py +0 -11
- langflow/components/composio/slackbot_composio.py +0 -11
- langflow/components/composio/supabase_composio.py +0 -11
- langflow/components/composio/todoist_composio.py +0 -11
- langflow/components/composio/youtube_composio.py +0 -11
- langflow/components/confluence/__init__.py +0 -3
- langflow/components/confluence/confluence.py +0 -84
- langflow/components/couchbase/__init__.py +0 -34
- langflow/components/couchbase/couchbase.py +0 -102
- langflow/components/crewai/__init__.py +0 -49
- langflow/components/crewai/crewai.py +0 -107
- langflow/components/crewai/hierarchical_crew.py +0 -46
- langflow/components/crewai/hierarchical_task.py +0 -44
- langflow/components/crewai/sequential_crew.py +0 -52
- langflow/components/crewai/sequential_task.py +0 -73
- langflow/components/crewai/sequential_task_agent.py +0 -143
- langflow/components/custom_component/__init__.py +0 -34
- langflow/components/custom_component/custom_component.py +0 -31
- langflow/components/data/__init__.py +0 -25
- langflow/components/data/api_request.py +0 -545
- langflow/components/data/csv_to_data.py +0 -95
- langflow/components/data/directory.py +0 -113
- langflow/components/data/file.py +0 -586
- langflow/components/data/json_to_data.py +0 -98
- langflow/components/data/news_search.py +0 -164
- langflow/components/data/rss.py +0 -69
- langflow/components/data/sql_executor.py +0 -99
- langflow/components/data/url.py +0 -299
- langflow/components/data/web_search.py +0 -112
- langflow/components/data/webhook.py +0 -56
- langflow/components/datastax/__init__.py +0 -70
- langflow/components/datastax/astra_assistant_manager.py +0 -306
- langflow/components/datastax/astra_db.py +0 -69
- langflow/components/datastax/astra_vectorize.py +0 -124
- langflow/components/datastax/astradb_cql.py +0 -314
- langflow/components/datastax/astradb_graph.py +0 -319
- langflow/components/datastax/astradb_tool.py +0 -414
- langflow/components/datastax/astradb_vectorstore.py +0 -1285
- langflow/components/datastax/create_assistant.py +0 -58
- langflow/components/datastax/create_thread.py +0 -32
- langflow/components/datastax/dotenv.py +0 -35
- langflow/components/datastax/get_assistant.py +0 -37
- langflow/components/datastax/getenvvar.py +0 -30
- langflow/components/datastax/graph_rag.py +0 -141
- langflow/components/datastax/hcd.py +0 -314
- langflow/components/datastax/list_assistants.py +0 -25
- langflow/components/datastax/run.py +0 -89
- langflow/components/deactivated/__init__.py +0 -19
- langflow/components/deactivated/amazon_kendra.py +0 -66
- langflow/components/deactivated/chat_litellm_model.py +0 -158
- langflow/components/deactivated/code_block_extractor.py +0 -26
- langflow/components/deactivated/documents_to_data.py +0 -22
- langflow/components/deactivated/embed.py +0 -16
- langflow/components/deactivated/extract_key_from_data.py +0 -46
- langflow/components/deactivated/json_document_builder.py +0 -59
- langflow/components/deactivated/list_flows.py +0 -20
- langflow/components/deactivated/mcp_sse.py +0 -61
- langflow/components/deactivated/mcp_stdio.py +0 -62
- langflow/components/deactivated/merge_data.py +0 -93
- langflow/components/deactivated/message.py +0 -37
- langflow/components/deactivated/metal.py +0 -54
- langflow/components/deactivated/multi_query.py +0 -59
- langflow/components/deactivated/retriever.py +0 -43
- langflow/components/deactivated/selective_passthrough.py +0 -77
- langflow/components/deactivated/should_run_next.py +0 -40
- langflow/components/deactivated/split_text.py +0 -63
- langflow/components/deactivated/store_message.py +0 -24
- langflow/components/deactivated/sub_flow.py +0 -124
- langflow/components/deactivated/vectara_self_query.py +0 -76
- langflow/components/deactivated/vector_store.py +0 -24
- langflow/components/deepseek/__init__.py +0 -34
- langflow/components/deepseek/deepseek.py +0 -136
- langflow/components/docling/__init__.py +0 -43
- langflow/components/docling/chunk_docling_document.py +0 -186
- langflow/components/docling/docling_inline.py +0 -235
- langflow/components/docling/docling_remote.py +0 -193
- langflow/components/docling/export_docling_document.py +0 -117
- langflow/components/documentloaders/__init__.py +0 -0
- langflow/components/duckduckgo/__init__.py +0 -3
- langflow/components/duckduckgo/duck_duck_go_search_run.py +0 -92
- langflow/components/elastic/__init__.py +0 -37
- langflow/components/elastic/elasticsearch.py +0 -267
- langflow/components/elastic/opensearch.py +0 -243
- langflow/components/embeddings/__init__.py +0 -37
- langflow/components/embeddings/similarity.py +0 -76
- langflow/components/embeddings/text_embedder.py +0 -64
- langflow/components/exa/__init__.py +0 -3
- langflow/components/exa/exa_search.py +0 -68
- langflow/components/firecrawl/__init__.py +0 -43
- langflow/components/firecrawl/firecrawl_crawl_api.py +0 -88
- langflow/components/firecrawl/firecrawl_extract_api.py +0 -136
- langflow/components/firecrawl/firecrawl_map_api.py +0 -89
- langflow/components/firecrawl/firecrawl_scrape_api.py +0 -73
- langflow/components/git/__init__.py +0 -4
- langflow/components/git/git.py +0 -262
- langflow/components/git/gitextractor.py +0 -196
- langflow/components/glean/__init__.py +0 -3
- langflow/components/glean/glean_search_api.py +0 -173
- langflow/components/google/__init__.py +0 -17
- langflow/components/google/gmail.py +0 -192
- langflow/components/google/google_bq_sql_executor.py +0 -157
- langflow/components/google/google_drive.py +0 -92
- langflow/components/google/google_drive_search.py +0 -152
- langflow/components/google/google_generative_ai.py +0 -147
- langflow/components/google/google_generative_ai_embeddings.py +0 -141
- langflow/components/google/google_oauth_token.py +0 -89
- langflow/components/google/google_search_api_core.py +0 -68
- langflow/components/google/google_serper_api_core.py +0 -74
- langflow/components/groq/__init__.py +0 -34
- langflow/components/groq/groq.py +0 -140
- langflow/components/helpers/__init__.py +0 -52
- langflow/components/helpers/calculator_core.py +0 -89
- langflow/components/helpers/create_list.py +0 -40
- langflow/components/helpers/current_date.py +0 -42
- langflow/components/helpers/id_generator.py +0 -42
- langflow/components/helpers/memory.py +0 -251
- langflow/components/helpers/output_parser.py +0 -45
- langflow/components/helpers/store_message.py +0 -90
- langflow/components/homeassistant/__init__.py +0 -7
- langflow/components/homeassistant/home_assistant_control.py +0 -152
- langflow/components/homeassistant/list_home_assistant_states.py +0 -137
- langflow/components/huggingface/__init__.py +0 -37
- langflow/components/huggingface/huggingface.py +0 -197
- langflow/components/huggingface/huggingface_inference_api.py +0 -106
- langflow/components/ibm/__init__.py +0 -34
- langflow/components/ibm/watsonx.py +0 -203
- langflow/components/ibm/watsonx_embeddings.py +0 -135
- langflow/components/icosacomputing/__init__.py +0 -5
- langflow/components/icosacomputing/combinatorial_reasoner.py +0 -84
- langflow/components/input_output/__init__.py +0 -38
- langflow/components/input_output/chat.py +0 -120
- langflow/components/input_output/chat_output.py +0 -200
- langflow/components/input_output/text.py +0 -27
- langflow/components/input_output/text_output.py +0 -29
- langflow/components/jigsawstack/__init__.py +0 -23
- langflow/components/jigsawstack/ai_scrape.py +0 -126
- langflow/components/jigsawstack/ai_web_search.py +0 -136
- langflow/components/jigsawstack/file_read.py +0 -115
- langflow/components/jigsawstack/file_upload.py +0 -94
- langflow/components/jigsawstack/image_generation.py +0 -205
- langflow/components/jigsawstack/nsfw.py +0 -60
- langflow/components/jigsawstack/object_detection.py +0 -124
- langflow/components/jigsawstack/sentiment.py +0 -112
- langflow/components/jigsawstack/text_to_sql.py +0 -90
- langflow/components/jigsawstack/text_translate.py +0 -77
- langflow/components/jigsawstack/vocr.py +0 -107
- langflow/components/langchain_utilities/__init__.py +0 -109
- langflow/components/langchain_utilities/character.py +0 -53
- langflow/components/langchain_utilities/conversation.py +0 -52
- langflow/components/langchain_utilities/csv_agent.py +0 -107
- langflow/components/langchain_utilities/fake_embeddings.py +0 -26
- langflow/components/langchain_utilities/html_link_extractor.py +0 -35
- langflow/components/langchain_utilities/json_agent.py +0 -45
- langflow/components/langchain_utilities/langchain_hub.py +0 -126
- langflow/components/langchain_utilities/language_recursive.py +0 -49
- langflow/components/langchain_utilities/language_semantic.py +0 -138
- langflow/components/langchain_utilities/llm_checker.py +0 -39
- langflow/components/langchain_utilities/llm_math.py +0 -42
- langflow/components/langchain_utilities/natural_language.py +0 -61
- langflow/components/langchain_utilities/openai_tools.py +0 -53
- langflow/components/langchain_utilities/openapi.py +0 -48
- langflow/components/langchain_utilities/recursive_character.py +0 -60
- langflow/components/langchain_utilities/retrieval_qa.py +0 -83
- langflow/components/langchain_utilities/runnable_executor.py +0 -137
- langflow/components/langchain_utilities/self_query.py +0 -80
- langflow/components/langchain_utilities/spider.py +0 -142
- langflow/components/langchain_utilities/sql.py +0 -40
- langflow/components/langchain_utilities/sql_database.py +0 -35
- langflow/components/langchain_utilities/sql_generator.py +0 -78
- langflow/components/langchain_utilities/tool_calling.py +0 -59
- langflow/components/langchain_utilities/vector_store_info.py +0 -49
- langflow/components/langchain_utilities/vector_store_router.py +0 -33
- langflow/components/langchain_utilities/xml_agent.py +0 -71
- langflow/components/langwatch/__init__.py +0 -3
- langflow/components/langwatch/langwatch.py +0 -278
- langflow/components/link_extractors/__init__.py +0 -0
- langflow/components/lmstudio/__init__.py +0 -34
- langflow/components/lmstudio/lmstudioembeddings.py +0 -89
- langflow/components/lmstudio/lmstudiomodel.py +0 -129
- langflow/components/logic/__init__.py +0 -52
- langflow/components/logic/conditional_router.py +0 -171
- langflow/components/logic/data_conditional_router.py +0 -125
- langflow/components/logic/flow_tool.py +0 -110
- langflow/components/logic/listen.py +0 -29
- langflow/components/logic/loop.py +0 -125
- langflow/components/logic/notify.py +0 -88
- langflow/components/logic/pass_message.py +0 -35
- langflow/components/logic/run_flow.py +0 -71
- langflow/components/logic/sub_flow.py +0 -114
- langflow/components/maritalk/__init__.py +0 -32
- langflow/components/maritalk/maritalk.py +0 -52
- langflow/components/mem0/__init__.py +0 -3
- langflow/components/mem0/mem0_chat_memory.py +0 -136
- langflow/components/milvus/__init__.py +0 -34
- langflow/components/milvus/milvus.py +0 -115
- langflow/components/mistral/__init__.py +0 -37
- langflow/components/mistral/mistral.py +0 -114
- langflow/components/mistral/mistral_embeddings.py +0 -58
- langflow/components/models/__init__.py +0 -34
- langflow/components/models/embedding_model.py +0 -114
- langflow/components/models/language_model.py +0 -144
- langflow/components/mongodb/__init__.py +0 -34
- langflow/components/mongodb/mongodb_atlas.py +0 -213
- langflow/components/needle/__init__.py +0 -3
- langflow/components/needle/needle.py +0 -104
- langflow/components/notdiamond/__init__.py +0 -36
- langflow/components/notdiamond/notdiamond.py +0 -228
- langflow/components/novita/__init__.py +0 -32
- langflow/components/novita/novita.py +0 -130
- langflow/components/nvidia/__init__.py +0 -57
- langflow/components/nvidia/nvidia.py +0 -157
- langflow/components/nvidia/nvidia_embedding.py +0 -77
- langflow/components/nvidia/nvidia_ingest.py +0 -317
- langflow/components/nvidia/nvidia_rerank.py +0 -63
- langflow/components/nvidia/system_assist.py +0 -65
- langflow/components/olivya/__init__.py +0 -3
- langflow/components/olivya/olivya.py +0 -116
- langflow/components/ollama/__init__.py +0 -37
- langflow/components/ollama/ollama.py +0 -330
- langflow/components/ollama/ollama_embeddings.py +0 -106
- langflow/components/openai/__init__.py +0 -37
- langflow/components/openai/openai.py +0 -100
- langflow/components/openai/openai_chat_model.py +0 -158
- langflow/components/openrouter/__init__.py +0 -32
- langflow/components/openrouter/openrouter.py +0 -202
- langflow/components/output_parsers/__init__.py +0 -0
- langflow/components/perplexity/__init__.py +0 -34
- langflow/components/perplexity/perplexity.py +0 -75
- langflow/components/pgvector/__init__.py +0 -34
- langflow/components/pgvector/pgvector.py +0 -72
- langflow/components/pinecone/__init__.py +0 -34
- langflow/components/pinecone/pinecone.py +0 -134
- langflow/components/processing/alter_metadata.py +0 -108
- langflow/components/processing/batch_run.py +0 -205
- langflow/components/processing/combine_text.py +0 -39
- langflow/components/processing/create_data.py +0 -110
- langflow/components/processing/data_operations.py +0 -438
- langflow/components/processing/data_to_dataframe.py +0 -70
- langflow/components/processing/dataframe_operations.py +0 -321
- langflow/components/processing/extract_key.py +0 -53
- langflow/components/processing/filter_data.py +0 -42
- langflow/components/processing/filter_data_values.py +0 -88
- langflow/components/processing/json_cleaner.py +0 -103
- langflow/components/processing/lambda_filter.py +0 -154
- langflow/components/processing/llm_router.py +0 -499
- langflow/components/processing/merge_data.py +0 -90
- langflow/components/processing/message_to_data.py +0 -36
- langflow/components/processing/parse_data.py +0 -70
- langflow/components/processing/parse_dataframe.py +0 -68
- langflow/components/processing/parse_json_data.py +0 -90
- langflow/components/processing/parser.py +0 -143
- langflow/components/processing/prompt.py +0 -67
- langflow/components/processing/python_repl_core.py +0 -98
- langflow/components/processing/regex.py +0 -82
- langflow/components/processing/save_file.py +0 -208
- langflow/components/processing/select_data.py +0 -48
- langflow/components/processing/split_text.py +0 -141
- langflow/components/processing/structured_output.py +0 -202
- langflow/components/processing/update_data.py +0 -160
- langflow/components/prototypes/__init__.py +0 -34
- langflow/components/prototypes/python_function.py +0 -73
- langflow/components/qdrant/__init__.py +0 -34
- langflow/components/qdrant/qdrant.py +0 -109
- langflow/components/redis/__init__.py +0 -37
- langflow/components/redis/redis.py +0 -89
- langflow/components/redis/redis_chat.py +0 -43
- langflow/components/sambanova/__init__.py +0 -32
- langflow/components/sambanova/sambanova.py +0 -84
- langflow/components/scrapegraph/__init__.py +0 -40
- langflow/components/scrapegraph/scrapegraph_markdownify_api.py +0 -64
- langflow/components/scrapegraph/scrapegraph_search_api.py +0 -64
- langflow/components/scrapegraph/scrapegraph_smart_scraper_api.py +0 -71
- langflow/components/searchapi/__init__.py +0 -36
- langflow/components/searchapi/search.py +0 -79
- langflow/components/serpapi/__init__.py +0 -3
- langflow/components/serpapi/serp.py +0 -115
- langflow/components/serper/__init__.py +0 -3
- langflow/components/serper/google_serper_api_core.py +0 -74
- langflow/components/supabase/__init__.py +0 -37
- langflow/components/supabase/supabase.py +0 -76
- langflow/components/tavily/__init__.py +0 -4
- langflow/components/tavily/tavily_extract.py +0 -117
- langflow/components/tavily/tavily_search.py +0 -212
- langflow/components/textsplitters/__init__.py +0 -0
- langflow/components/toolkits/__init__.py +0 -0
- langflow/components/tools/__init__.py +0 -72
- langflow/components/tools/calculator.py +0 -103
- langflow/components/tools/google_search_api.py +0 -45
- langflow/components/tools/google_serper_api.py +0 -115
- langflow/components/tools/python_code_structured_tool.py +0 -327
- langflow/components/tools/python_repl.py +0 -97
- langflow/components/tools/search_api.py +0 -87
- langflow/components/tools/searxng.py +0 -145
- langflow/components/tools/serp_api.py +0 -119
- langflow/components/tools/tavily_search_tool.py +0 -344
- langflow/components/tools/wikidata_api.py +0 -102
- langflow/components/tools/wikipedia_api.py +0 -49
- langflow/components/tools/yahoo_finance.py +0 -124
- langflow/components/twelvelabs/__init__.py +0 -52
- langflow/components/twelvelabs/convert_astra_results.py +0 -84
- langflow/components/twelvelabs/pegasus_index.py +0 -311
- langflow/components/twelvelabs/split_video.py +0 -291
- langflow/components/twelvelabs/text_embeddings.py +0 -57
- langflow/components/twelvelabs/twelvelabs_pegasus.py +0 -408
- langflow/components/twelvelabs/video_embeddings.py +0 -100
- langflow/components/twelvelabs/video_file.py +0 -179
- langflow/components/unstructured/__init__.py +0 -3
- langflow/components/unstructured/unstructured.py +0 -121
- langflow/components/upstash/__init__.py +0 -34
- langflow/components/upstash/upstash.py +0 -124
- langflow/components/vectara/__init__.py +0 -37
- langflow/components/vectara/vectara.py +0 -97
- langflow/components/vectara/vectara_rag.py +0 -164
- langflow/components/vectorstores/__init__.py +0 -34
- langflow/components/vectorstores/local_db.py +0 -261
- langflow/components/vertexai/__init__.py +0 -37
- langflow/components/vertexai/vertexai.py +0 -71
- langflow/components/vertexai/vertexai_embeddings.py +0 -67
- langflow/components/weaviate/__init__.py +0 -34
- langflow/components/weaviate/weaviate.py +0 -89
- langflow/components/wikipedia/__init__.py +0 -4
- langflow/components/wikipedia/wikidata.py +0 -86
- langflow/components/wikipedia/wikipedia.py +0 -53
- langflow/components/wolframalpha/__init__.py +0 -3
- langflow/components/wolframalpha/wolfram_alpha_api.py +0 -54
- langflow/components/xai/__init__.py +0 -32
- langflow/components/xai/xai.py +0 -167
- langflow/components/yahoosearch/__init__.py +0 -3
- langflow/components/yahoosearch/yahoo.py +0 -137
- langflow/components/youtube/__init__.py +0 -52
- langflow/components/youtube/channel.py +0 -227
- langflow/components/youtube/comments.py +0 -231
- langflow/components/youtube/playlist.py +0 -33
- langflow/components/youtube/search.py +0 -120
- langflow/components/youtube/trending.py +0 -285
- langflow/components/youtube/video_details.py +0 -263
- langflow/components/youtube/youtube_transcripts.py +0 -118
- langflow/components/zep/__init__.py +0 -3
- langflow/components/zep/zep.py +0 -44
- langflow/custom/attributes.py +0 -86
- langflow/custom/code_parser/__init__.py +0 -3
- langflow/custom/code_parser/code_parser.py +0 -361
- langflow/custom/custom_component/base_component.py +0 -118
- langflow/custom/dependency_analyzer.py +0 -165
- langflow/custom/directory_reader/__init__.py +0 -3
- langflow/custom/directory_reader/directory_reader.py +0 -359
- langflow/custom/directory_reader/utils.py +0 -171
- langflow/custom/eval.py +0 -12
- langflow/custom/schema.py +0 -32
- langflow/custom/tree_visitor.py +0 -21
- langflow/frontend/assets/lazyIconImports-Ci-S9xBA.js +0 -2
- langflow/graph/edge/__init__.py +0 -0
- langflow/graph/edge/base.py +0 -277
- langflow/graph/edge/schema.py +0 -119
- langflow/graph/edge/utils.py +0 -0
- langflow/graph/graph/__init__.py +0 -0
- langflow/graph/graph/ascii.py +0 -202
- langflow/graph/graph/base.py +0 -2185
- langflow/graph/graph/constants.py +0 -58
- langflow/graph/graph/runnable_vertices_manager.py +0 -133
- langflow/graph/graph/schema.py +0 -53
- langflow/graph/graph/state_model.py +0 -66
- langflow/graph/graph/utils.py +0 -1024
- langflow/graph/schema.py +0 -75
- langflow/graph/state/__init__.py +0 -0
- langflow/graph/state/model.py +0 -237
- langflow/graph/utils.py +0 -229
- langflow/graph/vertex/__init__.py +0 -0
- langflow/graph/vertex/base.py +0 -811
- langflow/graph/vertex/constants.py +0 -0
- langflow/graph/vertex/exceptions.py +0 -4
- langflow/graph/vertex/param_handler.py +0 -255
- langflow/graph/vertex/schema.py +0 -26
- langflow/graph/vertex/utils.py +0 -19
- langflow/graph/vertex/vertex_types.py +0 -489
- langflow/legacy_custom/__init__.py +0 -0
- langflow/legacy_custom/customs.py +0 -16
- langflow/load/load.py +0 -250
- langflow/logging/logger.py +0 -369
- langflow/processing/utils.py +0 -25
- langflow/schema/openai_responses_schemas.py +0 -74
- langflow/schema/serialize.py +0 -13
- langflow/services/chat/config.py +0 -2
- langflow/services/settings/auth.py +0 -130
- langflow/services/settings/constants.py +0 -31
- langflow/services/settings/manager.py +0 -49
- langflow/services/settings/utils.py +0 -40
- langflow/template/field/prompt.py +0 -2
- langflow/template/frontend_node/__init__.py +0 -6
- langflow/template/frontend_node/base.py +0 -212
- langflow/template/frontend_node/constants.py +0 -65
- langflow/template/frontend_node/custom_components.py +0 -97
- langflow/template/template/__init__.py +0 -0
- langflow/template/template/base.py +0 -99
- langflow/utils/async_helpers.py +0 -42
- langflow/utils/concurrency.py +0 -60
- langflow/utils/util_strings.py +0 -56
- langflow_base_nightly-0.5.1.dev3.dist-info/RECORD +0 -1159
- {langflow_base_nightly-0.5.1.dev3.dist-info → langflow_base_nightly-0.5.1.dev4.dist-info}/WHEEL +0 -0
- {langflow_base_nightly-0.5.1.dev3.dist-info → langflow_base_nightly-0.5.1.dev4.dist-info}/entry_points.txt +0 -0
|
@@ -192,17 +192,17 @@
|
|
|
192
192
|
"legacy": false,
|
|
193
193
|
"lf_version": "1.2.0",
|
|
194
194
|
"metadata": {
|
|
195
|
-
"code_hash": "
|
|
195
|
+
"code_hash": "3dd28ea591b9",
|
|
196
196
|
"dependencies": {
|
|
197
197
|
"dependencies": [
|
|
198
198
|
{
|
|
199
|
-
"name": "
|
|
199
|
+
"name": "lfx",
|
|
200
200
|
"version": null
|
|
201
201
|
}
|
|
202
202
|
],
|
|
203
203
|
"total_dependencies": 1
|
|
204
204
|
},
|
|
205
|
-
"module": "
|
|
205
|
+
"module": "lfx.components.input_output.text.TextInputComponent"
|
|
206
206
|
},
|
|
207
207
|
"minimized": false,
|
|
208
208
|
"output_types": [],
|
|
@@ -242,7 +242,7 @@
|
|
|
242
242
|
"show": true,
|
|
243
243
|
"title_case": false,
|
|
244
244
|
"type": "code",
|
|
245
|
-
"value": "from
|
|
245
|
+
"value": "from lfx.base.io.text import TextComponent\nfrom lfx.io import MultilineInput, Output\nfrom lfx.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n"
|
|
246
246
|
},
|
|
247
247
|
"input_value": {
|
|
248
248
|
"_input_type": "MultilineInput",
|
|
@@ -320,7 +320,7 @@
|
|
|
320
320
|
"legacy": false,
|
|
321
321
|
"lf_version": "1.2.0",
|
|
322
322
|
"metadata": {
|
|
323
|
-
"code_hash": "
|
|
323
|
+
"code_hash": "9619107fecd1",
|
|
324
324
|
"dependencies": {
|
|
325
325
|
"dependencies": [
|
|
326
326
|
{
|
|
@@ -332,13 +332,13 @@
|
|
|
332
332
|
"version": "0.116.1"
|
|
333
333
|
},
|
|
334
334
|
{
|
|
335
|
-
"name": "
|
|
335
|
+
"name": "lfx",
|
|
336
336
|
"version": null
|
|
337
337
|
}
|
|
338
338
|
],
|
|
339
339
|
"total_dependencies": 3
|
|
340
340
|
},
|
|
341
|
-
"module": "
|
|
341
|
+
"module": "lfx.components.input_output.chat_output.ChatOutput"
|
|
342
342
|
},
|
|
343
343
|
"minimized": true,
|
|
344
344
|
"output_types": [],
|
|
@@ -442,7 +442,7 @@
|
|
|
442
442
|
"show": true,
|
|
443
443
|
"title_case": false,
|
|
444
444
|
"type": "code",
|
|
445
|
-
"value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom
|
|
445
|
+
"value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
|
|
446
446
|
},
|
|
447
447
|
"data_template": {
|
|
448
448
|
"_input_type": "MessageTextInput",
|
|
@@ -792,7 +792,7 @@
|
|
|
792
792
|
"legacy": false,
|
|
793
793
|
"lf_version": "1.2.0",
|
|
794
794
|
"metadata": {
|
|
795
|
-
"code_hash": "
|
|
795
|
+
"code_hash": "6fb55f08b295",
|
|
796
796
|
"dependencies": {
|
|
797
797
|
"dependencies": [
|
|
798
798
|
{
|
|
@@ -804,13 +804,13 @@
|
|
|
804
804
|
"version": "0.0.39"
|
|
805
805
|
},
|
|
806
806
|
{
|
|
807
|
-
"name": "
|
|
807
|
+
"name": "lfx",
|
|
808
808
|
"version": null
|
|
809
809
|
}
|
|
810
810
|
],
|
|
811
811
|
"total_dependencies": 3
|
|
812
812
|
},
|
|
813
|
-
"module": "
|
|
813
|
+
"module": "lfx.components.processing.structured_output.StructuredOutputComponent"
|
|
814
814
|
},
|
|
815
815
|
"minimized": false,
|
|
816
816
|
"output_types": [],
|
|
@@ -863,7 +863,7 @@
|
|
|
863
863
|
"show": true,
|
|
864
864
|
"title_case": false,
|
|
865
865
|
"type": "code",
|
|
866
|
-
"value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom
|
|
866
|
+
"value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom lfx.base.models.chat_result import get_chat_result\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.helpers.base_model import build_model_from_schema\nfrom lfx.io import (\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TableInput,\n)\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.table import EditMode\n\n\nclass StructuredOutputComponent(Component):\n display_name = \"Structured Output\"\n description = \"Uses an LLM to generate structured data. Ideal for extraction and consistency.\"\n documentation: str = \"https://docs.langflow.org/components-processing#structured-output\"\n name = \"StructuredOutput\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"The language model to use to generate the structured output.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Message\",\n info=\"The input message to the language model.\",\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Format Instructions\",\n info=\"The instructions to the language model for formatting the output.\",\n value=(\n \"You are an AI that extracts structured JSON objects from unstructured text. \"\n \"Use a predefined schema with expected types (str, int, float, bool, dict). \"\n \"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. \"\n \"Fill missing or ambiguous values with defaults: null for missing values. \"\n \"Remove exact duplicates but keep variations that have different field values. \"\n \"Always return valid JSON in the expected format, never throw errors. \"\n \"If multiple objects can be extracted, return them all in the structured format.\"\n ),\n required=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"schema_name\",\n display_name=\"Schema Name\",\n info=\"Provide a name for the output data schema.\",\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=\"Define the structure and data types for the model's output.\",\n required=True,\n # TODO: remove deault value\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n {\n \"name\": \"multiple\",\n \"display_name\": \"As List\",\n \"type\": \"boolean\",\n \"description\": \"Set to True if this output field should be a list of the specified type.\",\n \"default\": \"False\",\n \"edit_mode\": EditMode.INLINE,\n },\n ],\n value=[\n {\n \"name\": \"field\",\n \"description\": \"description of field\",\n \"type\": \"str\",\n \"multiple\": \"False\",\n }\n ],\n ),\n ]\n\n outputs = [\n Output(\n name=\"structured_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_output\",\n ),\n Output(\n name=\"dataframe_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_dataframe\",\n ),\n ]\n\n def build_structured_output_base(self):\n schema_name = self.schema_name or \"OutputModel\"\n\n if not hasattr(self.llm, \"with_structured_output\"):\n msg = \"Language model does not support structured output.\"\n raise TypeError(msg)\n if not self.output_schema:\n msg = \"Output schema cannot be empty\"\n raise ValueError(msg)\n\n output_model_ = build_model_from_schema(self.output_schema)\n\n output_model = create_model(\n schema_name,\n __doc__=f\"A list of {schema_name}.\",\n objects=(list[output_model_], Field(description=f\"A list of {schema_name}.\")), # type: ignore[valid-type]\n )\n\n try:\n llm_with_structured_output = create_extractor(self.llm, tools=[output_model])\n except NotImplementedError as exc:\n msg = f\"{self.llm.__class__.__name__} does not support structured output.\"\n raise TypeError(msg) from exc\n\n config_dict = {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n\n # OPTIMIZATION NOTE: Simplified processing based on trustcall response structure\n # Handle non-dict responses (shouldn't happen with trustcall, but defensive)\n if not isinstance(result, dict):\n return result\n\n # Extract first response and convert BaseModel to dict\n responses = result.get(\"responses\", [])\n if not responses:\n return result\n\n # Convert BaseModel to dict (creates the \"objects\" key)\n first_response = responses[0]\n structured_data = first_response.model_dump() if isinstance(first_response, BaseModel) else first_response\n\n # Extract the objects array (guaranteed to exist due to our Pydantic model structure)\n return structured_data.get(\"objects\", structured_data)\n\n def build_structured_output(self) -> Data:\n output = self.build_structured_output_base()\n if not isinstance(output, list) or not output:\n # handle empty or unexpected type case\n msg = \"No structured output returned\"\n raise ValueError(msg)\n if len(output) == 1:\n return Data(data=output[0])\n if len(output) > 1:\n # Multiple outputs - wrap them in a results container\n return Data(data={\"results\": output})\n return Data()\n\n def build_structured_dataframe(self) -> DataFrame:\n output = self.build_structured_output_base()\n if not isinstance(output, list) or not output:\n # handle empty or unexpected type case\n msg = \"No structured output returned\"\n raise ValueError(msg)\n data_list = [Data(data=output[0])] if len(output) == 1 else [Data(data=item) for item in output]\n\n return DataFrame(data_list)\n"
|
|
867
867
|
},
|
|
868
868
|
"input_value": {
|
|
869
869
|
"_input_type": "MessageTextInput",
|
|
@@ -1379,7 +1379,7 @@
|
|
|
1379
1379
|
"show": true,
|
|
1380
1380
|
"title_case": false,
|
|
1381
1381
|
"type": "code",
|
|
1382
|
-
"value": "\"\"\"Enhanced file component with clearer structure and Docling isolation.\n\nNotes:\n-----\n- Functionality is preserved with minimal behavioral changes.\n- ALL Docling parsing/export runs in a separate OS process to prevent memory\n growth and native library state from impacting the main Langflow process.\n- Standard text/structured parsing continues to use existing BaseFileComponent\n utilities (and optional threading via `parallel_load_data`).\n\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport subprocess\nimport sys\nimport textwrap\nfrom copy import deepcopy\nfrom typing import TYPE_CHECKING, Any\n\nfrom langflow.base.data.base_file import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n FileInput,\n IntInput,\n MessageTextInput,\n Output,\n StrInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.message import Message\n\nif TYPE_CHECKING:\n from langflow.schema import DataFrame\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"File component with optional Docling processing (isolated in a subprocess).\"\"\"\n\n display_name = \"File\"\n description = \"Loads content from files with optional advanced document processing and export using Docling.\"\n documentation: str = \"https://docs.langflow.org/components-data#file\"\n icon = \"file-text\"\n name = \"File\"\n\n # Docling-supported/compatible extensions; TEXT_FILE_TYPES are supported by the base loader.\n VALID_EXTENSIONS = [\n \"adoc\",\n \"asciidoc\",\n \"asc\",\n \"bmp\",\n \"csv\",\n \"dotx\",\n \"dotm\",\n \"docm\",\n \"docx\",\n \"htm\",\n \"html\",\n \"jpeg\",\n \"json\",\n \"md\",\n \"pdf\",\n \"png\",\n \"potx\",\n \"ppsx\",\n \"pptm\",\n \"potm\",\n \"ppsm\",\n \"pptx\",\n \"tiff\",\n \"txt\",\n \"xls\",\n \"xlsx\",\n \"xhtml\",\n \"xml\",\n \"webp\",\n *TEXT_FILE_TYPES,\n ]\n\n # Fixed export settings used when markdown export is requested.\n EXPORT_FORMAT = \"Markdown\"\n IMAGE_MODE = \"placeholder\"\n\n # ---- Inputs / Outputs (kept as close to original as possible) -------------------\n _base_inputs = deepcopy(BaseFileComponent._base_inputs)\n for input_item in _base_inputs:\n if isinstance(input_item, FileInput) and input_item.name == \"path\":\n input_item.real_time_refresh = True\n break\n\n inputs = [\n *_base_inputs,\n BoolInput(\n name=\"advanced_mode\",\n display_name=\"Advanced Parser\",\n value=False,\n real_time_refresh=True,\n info=(\n \"Enable advanced document processing and export with Docling for PDFs, images, and office documents. \"\n \"Available only for single file processing.\"\n ),\n show=False,\n ),\n DropdownInput(\n name=\"pipeline\",\n display_name=\"Pipeline\",\n info=\"Docling pipeline to use\",\n options=[\"standard\", \"vlm\"],\n value=\"standard\",\n advanced=True,\n ),\n DropdownInput(\n name=\"ocr_engine\",\n display_name=\"OCR Engine\",\n info=\"OCR engine to use. Only available when pipeline is set to 'standard'.\",\n options=[\"\", \"easyocr\"],\n value=\"\",\n show=False,\n advanced=True,\n ),\n StrInput(\n name=\"md_image_placeholder\",\n display_name=\"Image placeholder\",\n info=\"Specify the image placeholder for markdown exports.\",\n value=\"<!-- image -->\",\n advanced=True,\n show=False,\n ),\n StrInput(\n name=\"md_page_break_placeholder\",\n display_name=\"Page break placeholder\",\n info=\"Add this placeholder between pages in the markdown output.\",\n value=\"\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"doc_key\",\n display_name=\"Doc Key\",\n info=\"The key to use for the DoclingDocument column.\",\n value=\"doc\",\n advanced=True,\n show=False,\n ),\n # Deprecated input retained for backward-compatibility.\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n BoolInput(\n name=\"markdown\",\n display_name=\"Markdown Export\",\n info=\"Export processed documents to Markdown format. Only available when advanced mode is enabled.\",\n value=False,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n ]\n\n # ------------------------------ UI helpers --------------------------------------\n\n def _path_value(self, template: dict) -> list[str]:\n \"\"\"Return the list of currently selected file paths from the template.\"\"\"\n return template.get(\"path\", {}).get(\"file_path\", [])\n\n def update_build_config(\n self,\n build_config: dict[str, Any],\n field_value: Any,\n field_name: str | None = None,\n ) -> dict[str, Any]:\n \"\"\"Show/hide Advanced Parser and related fields based on selection context.\"\"\"\n if field_name == \"path\":\n paths = self._path_value(build_config)\n file_path = paths[0] if paths else \"\"\n file_count = len(field_value) if field_value else 0\n\n # Advanced mode only for single (non-tabular) file\n allow_advanced = file_count == 1 and not file_path.endswith((\".csv\", \".xlsx\", \".parquet\"))\n build_config[\"advanced_mode\"][\"show\"] = allow_advanced\n if not allow_advanced:\n build_config[\"advanced_mode\"][\"value\"] = False\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = False\n\n elif field_name == \"advanced_mode\":\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = bool(field_value)\n\n return build_config\n\n def update_outputs(self, frontend_node: dict[str, Any], field_name: str, field_value: Any) -> dict[str, Any]: # noqa: ARG002\n \"\"\"Dynamically show outputs based on file count/type and advanced mode.\"\"\"\n if field_name not in [\"path\", \"advanced_mode\"]:\n return frontend_node\n\n template = frontend_node.get(\"template\", {})\n paths = self._path_value(template)\n if not paths:\n return frontend_node\n\n frontend_node[\"outputs\"] = []\n if len(paths) == 1:\n file_path = paths[0] if field_name == \"path\" else frontend_node[\"template\"][\"path\"][\"file_path\"][0]\n if file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"dataframe\", method=\"load_files_structured\"),\n )\n elif file_path.endswith(\".json\"):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"json\", method=\"load_files_json\"),\n )\n\n advanced_mode = frontend_node.get(\"template\", {}).get(\"advanced_mode\", {}).get(\"value\", False)\n if advanced_mode:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Output\", name=\"advanced\", method=\"load_files_advanced\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Markdown\", name=\"markdown\", method=\"load_files_markdown\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # Multiple files => DataFrame output; advanced parser disabled\n frontend_node[\"outputs\"].append(Output(display_name=\"Files\", name=\"dataframe\", method=\"load_files\"))\n\n return frontend_node\n\n # ------------------------------ Core processing ----------------------------------\n\n def _is_docling_compatible(self, file_path: str) -> bool:\n \"\"\"Lightweight extension gate for Docling-compatible types.\"\"\"\n docling_exts = (\n \".adoc\",\n \".asciidoc\",\n \".asc\",\n \".bmp\",\n \".csv\",\n \".dotx\",\n \".dotm\",\n \".docm\",\n \".docx\",\n \".htm\",\n \".html\",\n \".jpeg\",\n \".json\",\n \".md\",\n \".pdf\",\n \".png\",\n \".potx\",\n \".ppsx\",\n \".pptm\",\n \".potm\",\n \".ppsm\",\n \".pptx\",\n \".tiff\",\n \".txt\",\n \".xls\",\n \".xlsx\",\n \".xhtml\",\n \".xml\",\n \".webp\",\n )\n return file_path.lower().endswith(docling_exts)\n\n def _process_docling_in_subprocess(self, file_path: str) -> Data | None:\n \"\"\"Run Docling in a separate OS process and map the result to a Data object.\n\n We avoid multiprocessing pickling by launching `python -c \"<script>\"` and\n passing JSON config via stdin. The child prints a JSON result to stdout.\n \"\"\"\n if not file_path:\n return None\n\n args: dict[str, Any] = {\n \"file_path\": file_path,\n \"markdown\": bool(self.markdown),\n \"image_mode\": str(self.IMAGE_MODE),\n \"md_image_placeholder\": str(self.md_image_placeholder),\n \"md_page_break_placeholder\": str(self.md_page_break_placeholder),\n \"pipeline\": str(self.pipeline),\n \"ocr_engine\": str(self.ocr_engine) if getattr(self, \"ocr_engine\", \"\") else None,\n }\n\n # The child is a tiny, self-contained script to keep memory/state isolated.\n child_script = textwrap.dedent(\n r\"\"\"\n import json, sys\n\n def try_imports():\n # Strategy 1: latest layout\n try:\n from docling.datamodel.base_models import ConversionStatus, InputFormat # type: ignore\n from docling.document_converter import DocumentConverter # type: ignore\n from docling_core.types.doc import ImageRefMode # type: ignore\n return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, \"latest\"\n except Exception:\n pass\n # Strategy 2: alternative layout\n try:\n from docling.document_converter import DocumentConverter # type: ignore\n try:\n from docling_core.types import ConversionStatus, InputFormat # type: ignore\n except Exception:\n try:\n from docling.datamodel import ConversionStatus, InputFormat # type: ignore\n except Exception:\n class ConversionStatus: SUCCESS = \"success\"\n class InputFormat:\n PDF=\"pdf\"; IMAGE=\"image\"\n try:\n from docling_core.types.doc import ImageRefMode # type: ignore\n except Exception:\n class ImageRefMode:\n PLACEHOLDER=\"placeholder\"; EMBEDDED=\"embedded\"\n return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, \"alternative\"\n except Exception:\n pass\n # Strategy 3: basic converter only\n try:\n from docling.document_converter import DocumentConverter # type: ignore\n class ConversionStatus: SUCCESS = \"success\"\n class InputFormat:\n PDF=\"pdf\"; IMAGE=\"image\"\n class ImageRefMode:\n PLACEHOLDER=\"placeholder\"; EMBEDDED=\"embedded\"\n return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, \"basic\"\n except Exception as e:\n raise ImportError(f\"Docling imports failed: {e}\") from e\n\n def create_converter(strategy, input_format, DocumentConverter, pipeline, ocr_engine):\n if strategy == \"latest\" and pipeline == \"standard\":\n try:\n from docling.datamodel.pipeline_options import PdfPipelineOptions # type: ignore\n from docling.document_converter import PdfFormatOption # type: ignore\n pipe = PdfPipelineOptions()\n if ocr_engine:\n try:\n from docling.models.factories import get_ocr_factory # type: ignore\n pipe.do_ocr = True\n fac = get_ocr_factory(allow_external_plugins=False)\n pipe.ocr_options = fac.create_options(kind=ocr_engine)\n except Exception:\n pipe.do_ocr = False\n fmt = {}\n if hasattr(input_format, \"PDF\"):\n fmt[getattr(input_format, \"PDF\")] = PdfFormatOption(pipeline_options=pipe)\n if hasattr(input_format, \"IMAGE\"):\n fmt[getattr(input_format, \"IMAGE\")] = PdfFormatOption(pipeline_options=pipe)\n return DocumentConverter(format_options=fmt)\n except Exception:\n return DocumentConverter()\n return DocumentConverter()\n\n def export_markdown(document, ImageRefMode, image_mode, img_ph, pg_ph):\n try:\n mode = getattr(ImageRefMode, image_mode.upper(), image_mode)\n return document.export_to_markdown(\n image_mode=mode,\n image_placeholder=img_ph,\n page_break_placeholder=pg_ph,\n )\n except Exception:\n try:\n return document.export_to_text()\n except Exception:\n return str(document)\n\n def to_rows(doc_dict):\n rows = []\n for t in doc_dict.get(\"texts\", []):\n prov = t.get(\"prov\") or []\n page_no = None\n if prov and isinstance(prov, list) and isinstance(prov[0], dict):\n page_no = prov[0].get(\"page_no\")\n rows.append({\n \"page_no\": page_no,\n \"label\": t.get(\"label\"),\n \"text\": t.get(\"text\"),\n \"level\": t.get(\"level\"),\n })\n return rows\n\n def main():\n cfg = json.loads(sys.stdin.read())\n file_path = cfg[\"file_path\"]\n markdown = cfg[\"markdown\"]\n image_mode = cfg[\"image_mode\"]\n img_ph = cfg[\"md_image_placeholder\"]\n pg_ph = cfg[\"md_page_break_placeholder\"]\n pipeline = cfg[\"pipeline\"]\n ocr_engine = cfg.get(\"ocr_engine\")\n meta = {\"file_path\": file_path}\n\n try:\n ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, strategy = try_imports()\n converter = create_converter(strategy, InputFormat, DocumentConverter, pipeline, ocr_engine)\n try:\n res = converter.convert(file_path)\n except Exception as e:\n print(json.dumps({\"ok\": False, \"error\": f\"Docling conversion error: {e}\", \"meta\": meta}))\n return\n\n ok = False\n if hasattr(res, \"status\"):\n try:\n ok = (res.status == ConversionStatus.SUCCESS) or (str(res.status).lower() == \"success\")\n except Exception:\n ok = (str(res.status).lower() == \"success\")\n if not ok and hasattr(res, \"document\"):\n ok = getattr(res, \"document\", None) is not None\n if not ok:\n print(json.dumps({\"ok\": False, \"error\": \"Docling conversion failed\", \"meta\": meta}))\n return\n\n doc = getattr(res, \"document\", None)\n if doc is None:\n print(json.dumps({\"ok\": False, \"error\": \"Docling produced no document\", \"meta\": meta}))\n return\n\n if markdown:\n text = export_markdown(doc, ImageRefMode, image_mode, img_ph, pg_ph)\n print(json.dumps({\"ok\": True, \"mode\": \"markdown\", \"text\": text, \"meta\": meta}))\n return\n\n # structured\n try:\n doc_dict = doc.export_to_dict()\n except Exception as e:\n print(json.dumps({\"ok\": False, \"error\": f\"Docling export_to_dict failed: {e}\", \"meta\": meta}))\n return\n\n rows = to_rows(doc_dict)\n print(json.dumps({\"ok\": True, \"mode\": \"structured\", \"doc\": rows, \"meta\": meta}))\n except Exception as e:\n print(\n json.dumps({\n \"ok\": False,\n \"error\": f\"Docling processing error: {e}\",\n \"meta\": {\"file_path\": file_path},\n })\n )\n\n if __name__ == \"__main__\":\n main()\n \"\"\"\n )\n\n # Validate file_path to avoid command injection or unsafe input\n if not isinstance(args[\"file_path\"], str) or any(c in args[\"file_path\"] for c in [\";\", \"|\", \"&\", \"$\", \"`\"]):\n return Data(data={\"error\": \"Unsafe file path detected.\", \"file_path\": args[\"file_path\"]})\n\n proc = subprocess.run( # noqa: S603\n [sys.executable, \"-u\", \"-c\", child_script],\n input=json.dumps(args).encode(\"utf-8\"),\n capture_output=True,\n check=False,\n )\n\n if not proc.stdout:\n err_msg = proc.stderr.decode(\"utf-8\", errors=\"replace\") or \"no output from child process\"\n return Data(data={\"error\": f\"Docling subprocess error: {err_msg}\", \"file_path\": file_path})\n\n try:\n result = json.loads(proc.stdout.decode(\"utf-8\"))\n except Exception as e: # noqa: BLE001\n err_msg = proc.stderr.decode(\"utf-8\", errors=\"replace\")\n return Data(\n data={\"error\": f\"Invalid JSON from Docling subprocess: {e}. stderr={err_msg}\", \"file_path\": file_path},\n )\n\n if not result.get(\"ok\"):\n return Data(data={\"error\": result.get(\"error\", \"Unknown Docling error\"), **result.get(\"meta\", {})})\n\n meta = result.get(\"meta\", {})\n if result.get(\"mode\") == \"markdown\":\n exported_content = str(result.get(\"text\", \"\"))\n return Data(\n text=exported_content,\n data={\"exported_content\": exported_content, \"export_format\": self.EXPORT_FORMAT, **meta},\n )\n\n rows = list(result.get(\"doc\", []))\n return Data(data={\"doc\": rows, \"export_format\": self.EXPORT_FORMAT, **meta})\n\n def process_files(\n self,\n file_list: list[BaseFileComponent.BaseFile],\n ) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Process input files.\n\n - Single file + advanced_mode => Docling in a separate process.\n - Otherwise => standard parsing in current process (optionally threaded).\n \"\"\"\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n def process_file_standard(file_path: str, *, silent_errors: bool = False) -> Data | None:\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n self.log(f\"File not found: {file_path}. Error: {e}\")\n if not silent_errors:\n raise\n return None\n except Exception as e:\n self.log(f\"Unexpected error processing {file_path}: {e}\")\n if not silent_errors:\n raise\n return None\n\n # Advanced path: only for a single Docling-compatible file\n if len(file_list) == 1:\n file_path = str(file_list[0].path)\n if self.advanced_mode and self._is_docling_compatible(file_path):\n advanced_data: Data | None = self._process_docling_in_subprocess(file_path)\n\n # --- UNNEST: expand each element in `doc` to its own Data row\n payload = getattr(advanced_data, \"data\", {}) or {}\n doc_rows = payload.get(\"doc\")\n if isinstance(doc_rows, list):\n rows: list[Data | None] = [\n Data(\n data={\n \"file_path\": file_path,\n **(item if isinstance(item, dict) else {\"value\": item}),\n },\n )\n for item in doc_rows\n ]\n return self.rollup_data(file_list, rows)\n\n # If not structured, keep as-is (e.g., markdown export or error dict)\n return self.rollup_data(file_list, [advanced_data])\n\n # Standard multi-file (or single non-advanced) path\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_paths = [str(f.path) for f in file_list]\n self.log(f\"Starting parallel processing of {len(file_paths)} files with concurrency: {concurrency}.\")\n my_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file_standard,\n max_concurrency=concurrency,\n )\n return self.rollup_data(file_list, my_data)\n\n # ------------------------------ Output helpers -----------------------------------\n\n def load_files_advanced(self) -> DataFrame:\n \"\"\"Load files using advanced Docling processing and export to an advanced format.\"\"\"\n self.markdown = False\n return self.load_files()\n\n def load_files_markdown(self) -> Message:\n \"\"\"Load files using advanced Docling processing and export to Markdown format.\"\"\"\n self.markdown = True\n result = self.load_files()\n return Message(text=str(result.text[0]))\n"
|
|
1382
|
+
"value": "\"\"\"Enhanced file component with clearer structure and Docling isolation.\n\nNotes:\n-----\n- Functionality is preserved with minimal behavioral changes.\n- ALL Docling parsing/export runs in a separate OS process to prevent memory\n growth and native library state from impacting the main Langflow process.\n- Standard text/structured parsing continues to use existing BaseFileComponent\n utilities (and optional threading via `parallel_load_data`).\n\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport subprocess\nimport sys\nimport textwrap\nfrom copy import deepcopy\nfrom typing import Any\n\nfrom lfx.base.data.base_file import BaseFileComponent\nfrom lfx.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom lfx.inputs.inputs import DropdownInput, MessageTextInput, StrInput\nfrom lfx.io import BoolInput, FileInput, IntInput, Output\nfrom lfx.schema import DataFrame # noqa: TC001\nfrom lfx.schema.data import Data\nfrom lfx.schema.message import Message\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"File component with optional Docling processing (isolated in a subprocess).\"\"\"\n\n display_name = \"File\"\n description = \"Loads content from files with optional advanced document processing and export using Docling.\"\n documentation: str = \"https://docs.langflow.org/components-data#file\"\n icon = \"file-text\"\n name = \"File\"\n\n # Docling-supported/compatible extensions; TEXT_FILE_TYPES are supported by the base loader.\n VALID_EXTENSIONS = [\n \"adoc\",\n \"asciidoc\",\n \"asc\",\n \"bmp\",\n \"csv\",\n \"dotx\",\n \"dotm\",\n \"docm\",\n \"docx\",\n \"htm\",\n \"html\",\n \"jpeg\",\n \"json\",\n \"md\",\n \"pdf\",\n \"png\",\n \"potx\",\n \"ppsx\",\n \"pptm\",\n \"potm\",\n \"ppsm\",\n \"pptx\",\n \"tiff\",\n \"txt\",\n \"xls\",\n \"xlsx\",\n \"xhtml\",\n \"xml\",\n \"webp\",\n *TEXT_FILE_TYPES,\n ]\n\n # Fixed export settings used when markdown export is requested.\n EXPORT_FORMAT = \"Markdown\"\n IMAGE_MODE = \"placeholder\"\n\n _base_inputs = deepcopy(BaseFileComponent.get_base_inputs())\n\n for input_item in _base_inputs:\n if isinstance(input_item, FileInput) and input_item.name == \"path\":\n input_item.real_time_refresh = True\n break\n\n inputs = [\n *_base_inputs,\n BoolInput(\n name=\"advanced_mode\",\n display_name=\"Advanced Parser\",\n value=False,\n real_time_refresh=True,\n info=(\n \"Enable advanced document processing and export with Docling for PDFs, images, and office documents. \"\n \"Available only for single file processing.\"\n ),\n show=False,\n ),\n DropdownInput(\n name=\"pipeline\",\n display_name=\"Pipeline\",\n info=\"Docling pipeline to use\",\n options=[\"standard\", \"vlm\"],\n value=\"standard\",\n advanced=True,\n ),\n DropdownInput(\n name=\"ocr_engine\",\n display_name=\"OCR Engine\",\n info=\"OCR engine to use. Only available when pipeline is set to 'standard'.\",\n options=[\"\", \"easyocr\"],\n value=\"\",\n show=False,\n advanced=True,\n ),\n StrInput(\n name=\"md_image_placeholder\",\n display_name=\"Image placeholder\",\n info=\"Specify the image placeholder for markdown exports.\",\n value=\"<!-- image -->\",\n advanced=True,\n show=False,\n ),\n StrInput(\n name=\"md_page_break_placeholder\",\n display_name=\"Page break placeholder\",\n info=\"Add this placeholder between pages in the markdown output.\",\n value=\"\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"doc_key\",\n display_name=\"Doc Key\",\n info=\"The key to use for the DoclingDocument column.\",\n value=\"doc\",\n advanced=True,\n show=False,\n ),\n # Deprecated input retained for backward-compatibility.\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n BoolInput(\n name=\"markdown\",\n display_name=\"Markdown Export\",\n info=\"Export processed documents to Markdown format. Only available when advanced mode is enabled.\",\n value=False,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n ]\n\n # ------------------------------ UI helpers --------------------------------------\n\n def _path_value(self, template: dict) -> list[str]:\n \"\"\"Return the list of currently selected file paths from the template.\"\"\"\n return template.get(\"path\", {}).get(\"file_path\", [])\n\n def update_build_config(\n self,\n build_config: dict[str, Any],\n field_value: Any,\n field_name: str | None = None,\n ) -> dict[str, Any]:\n \"\"\"Show/hide Advanced Parser and related fields based on selection context.\"\"\"\n if field_name == \"path\":\n paths = self._path_value(build_config)\n file_path = paths[0] if paths else \"\"\n file_count = len(field_value) if field_value else 0\n\n # Advanced mode only for single (non-tabular) file\n allow_advanced = file_count == 1 and not file_path.endswith((\".csv\", \".xlsx\", \".parquet\"))\n build_config[\"advanced_mode\"][\"show\"] = allow_advanced\n if not allow_advanced:\n build_config[\"advanced_mode\"][\"value\"] = False\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = False\n\n elif field_name == \"advanced_mode\":\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = bool(field_value)\n\n return build_config\n\n def update_outputs(self, frontend_node: dict[str, Any], field_name: str, field_value: Any) -> dict[str, Any]: # noqa: ARG002\n \"\"\"Dynamically show outputs based on file count/type and advanced mode.\"\"\"\n if field_name not in [\"path\", \"advanced_mode\"]:\n return frontend_node\n\n template = frontend_node.get(\"template\", {})\n paths = self._path_value(template)\n if not paths:\n return frontend_node\n\n frontend_node[\"outputs\"] = []\n if len(paths) == 1:\n file_path = paths[0] if field_name == \"path\" else frontend_node[\"template\"][\"path\"][\"file_path\"][0]\n if file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"dataframe\", method=\"load_files_structured\"),\n )\n elif file_path.endswith(\".json\"):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"json\", method=\"load_files_json\"),\n )\n\n advanced_mode = frontend_node.get(\"template\", {}).get(\"advanced_mode\", {}).get(\"value\", False)\n if advanced_mode:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Output\", name=\"advanced\", method=\"load_files_advanced\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Markdown\", name=\"markdown\", method=\"load_files_markdown\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # Multiple files => DataFrame output; advanced parser disabled\n frontend_node[\"outputs\"].append(Output(display_name=\"Files\", name=\"dataframe\", method=\"load_files\"))\n\n return frontend_node\n\n # ------------------------------ Core processing ----------------------------------\n\n def _is_docling_compatible(self, file_path: str) -> bool:\n \"\"\"Lightweight extension gate for Docling-compatible types.\"\"\"\n docling_exts = (\n \".adoc\",\n \".asciidoc\",\n \".asc\",\n \".bmp\",\n \".csv\",\n \".dotx\",\n \".dotm\",\n \".docm\",\n \".docx\",\n \".htm\",\n \".html\",\n \".jpeg\",\n \".json\",\n \".md\",\n \".pdf\",\n \".png\",\n \".potx\",\n \".ppsx\",\n \".pptm\",\n \".potm\",\n \".ppsm\",\n \".pptx\",\n \".tiff\",\n \".txt\",\n \".xls\",\n \".xlsx\",\n \".xhtml\",\n \".xml\",\n \".webp\",\n )\n return file_path.lower().endswith(docling_exts)\n\n def _process_docling_in_subprocess(self, file_path: str) -> Data | None:\n \"\"\"Run Docling in a separate OS process and map the result to a Data object.\n\n We avoid multiprocessing pickling by launching `python -c \"<script>\"` and\n passing JSON config via stdin. The child prints a JSON result to stdout.\n \"\"\"\n if not file_path:\n return None\n\n args: dict[str, Any] = {\n \"file_path\": file_path,\n \"markdown\": bool(self.markdown),\n \"image_mode\": str(self.IMAGE_MODE),\n \"md_image_placeholder\": str(self.md_image_placeholder),\n \"md_page_break_placeholder\": str(self.md_page_break_placeholder),\n \"pipeline\": str(self.pipeline),\n \"ocr_engine\": str(self.ocr_engine) if getattr(self, \"ocr_engine\", \"\") else None,\n }\n\n # The child is a tiny, self-contained script to keep memory/state isolated.\n child_script = textwrap.dedent(\n r\"\"\"\n import json, sys\n\n def try_imports():\n # Strategy 1: latest layout\n try:\n from docling.datamodel.base_models import ConversionStatus, InputFormat # type: ignore\n from docling.document_converter import DocumentConverter # type: ignore\n from docling_core.types.doc import ImageRefMode # type: ignore\n return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, \"latest\"\n except Exception:\n pass\n # Strategy 2: alternative layout\n try:\n from docling.document_converter import DocumentConverter # type: ignore\n try:\n from docling_core.types import ConversionStatus, InputFormat # type: ignore\n except Exception:\n try:\n from docling.datamodel import ConversionStatus, InputFormat # type: ignore\n except Exception:\n class ConversionStatus: SUCCESS = \"success\"\n class InputFormat:\n PDF=\"pdf\"; IMAGE=\"image\"\n try:\n from docling_core.types.doc import ImageRefMode # type: ignore\n except Exception:\n class ImageRefMode:\n PLACEHOLDER=\"placeholder\"; EMBEDDED=\"embedded\"\n return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, \"alternative\"\n except Exception:\n pass\n # Strategy 3: basic converter only\n try:\n from docling.document_converter import DocumentConverter # type: ignore\n class ConversionStatus: SUCCESS = \"success\"\n class InputFormat:\n PDF=\"pdf\"; IMAGE=\"image\"\n class ImageRefMode:\n PLACEHOLDER=\"placeholder\"; EMBEDDED=\"embedded\"\n return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, \"basic\"\n except Exception as e:\n raise ImportError(f\"Docling imports failed: {e}\") from e\n\n def create_converter(strategy, input_format, DocumentConverter, pipeline, ocr_engine):\n if strategy == \"latest\" and pipeline == \"standard\":\n try:\n from docling.datamodel.pipeline_options import PdfPipelineOptions # type: ignore\n from docling.document_converter import PdfFormatOption # type: ignore\n pipe = PdfPipelineOptions()\n if ocr_engine:\n try:\n from docling.models.factories import get_ocr_factory # type: ignore\n pipe.do_ocr = True\n fac = get_ocr_factory(allow_external_plugins=False)\n pipe.ocr_options = fac.create_options(kind=ocr_engine)\n except Exception:\n pipe.do_ocr = False\n fmt = {}\n if hasattr(input_format, \"PDF\"):\n fmt[getattr(input_format, \"PDF\")] = PdfFormatOption(pipeline_options=pipe)\n if hasattr(input_format, \"IMAGE\"):\n fmt[getattr(input_format, \"IMAGE\")] = PdfFormatOption(pipeline_options=pipe)\n return DocumentConverter(format_options=fmt)\n except Exception:\n return DocumentConverter()\n return DocumentConverter()\n\n def export_markdown(document, ImageRefMode, image_mode, img_ph, pg_ph):\n try:\n mode = getattr(ImageRefMode, image_mode.upper(), image_mode)\n return document.export_to_markdown(\n image_mode=mode,\n image_placeholder=img_ph,\n page_break_placeholder=pg_ph,\n )\n except Exception:\n try:\n return document.export_to_text()\n except Exception:\n return str(document)\n\n def to_rows(doc_dict):\n rows = []\n for t in doc_dict.get(\"texts\", []):\n prov = t.get(\"prov\") or []\n page_no = None\n if prov and isinstance(prov, list) and isinstance(prov[0], dict):\n page_no = prov[0].get(\"page_no\")\n rows.append({\n \"page_no\": page_no,\n \"label\": t.get(\"label\"),\n \"text\": t.get(\"text\"),\n \"level\": t.get(\"level\"),\n })\n return rows\n\n def main():\n cfg = json.loads(sys.stdin.read())\n file_path = cfg[\"file_path\"]\n markdown = cfg[\"markdown\"]\n image_mode = cfg[\"image_mode\"]\n img_ph = cfg[\"md_image_placeholder\"]\n pg_ph = cfg[\"md_page_break_placeholder\"]\n pipeline = cfg[\"pipeline\"]\n ocr_engine = cfg.get(\"ocr_engine\")\n meta = {\"file_path\": file_path}\n\n try:\n ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, strategy = try_imports()\n converter = create_converter(strategy, InputFormat, DocumentConverter, pipeline, ocr_engine)\n try:\n res = converter.convert(file_path)\n except Exception as e:\n print(json.dumps({\"ok\": False, \"error\": f\"Docling conversion error: {e}\", \"meta\": meta}))\n return\n\n ok = False\n if hasattr(res, \"status\"):\n try:\n ok = (res.status == ConversionStatus.SUCCESS) or (str(res.status).lower() == \"success\")\n except Exception:\n ok = (str(res.status).lower() == \"success\")\n if not ok and hasattr(res, \"document\"):\n ok = getattr(res, \"document\", None) is not None\n if not ok:\n print(json.dumps({\"ok\": False, \"error\": \"Docling conversion failed\", \"meta\": meta}))\n return\n\n doc = getattr(res, \"document\", None)\n if doc is None:\n print(json.dumps({\"ok\": False, \"error\": \"Docling produced no document\", \"meta\": meta}))\n return\n\n if markdown:\n text = export_markdown(doc, ImageRefMode, image_mode, img_ph, pg_ph)\n print(json.dumps({\"ok\": True, \"mode\": \"markdown\", \"text\": text, \"meta\": meta}))\n return\n\n # structured\n try:\n doc_dict = doc.export_to_dict()\n except Exception as e:\n print(json.dumps({\"ok\": False, \"error\": f\"Docling export_to_dict failed: {e}\", \"meta\": meta}))\n return\n\n rows = to_rows(doc_dict)\n print(json.dumps({\"ok\": True, \"mode\": \"structured\", \"doc\": rows, \"meta\": meta}))\n except Exception as e:\n print(\n json.dumps({\n \"ok\": False,\n \"error\": f\"Docling processing error: {e}\",\n \"meta\": {\"file_path\": file_path},\n })\n )\n\n if __name__ == \"__main__\":\n main()\n \"\"\"\n )\n\n # Validate file_path to avoid command injection or unsafe input\n if not isinstance(args[\"file_path\"], str) or any(c in args[\"file_path\"] for c in [\";\", \"|\", \"&\", \"$\", \"`\"]):\n return Data(data={\"error\": \"Unsafe file path detected.\", \"file_path\": args[\"file_path\"]})\n\n proc = subprocess.run( # noqa: S603\n [sys.executable, \"-u\", \"-c\", child_script],\n input=json.dumps(args).encode(\"utf-8\"),\n capture_output=True,\n check=False,\n )\n\n if not proc.stdout:\n err_msg = proc.stderr.decode(\"utf-8\", errors=\"replace\") or \"no output from child process\"\n return Data(data={\"error\": f\"Docling subprocess error: {err_msg}\", \"file_path\": file_path})\n\n try:\n result = json.loads(proc.stdout.decode(\"utf-8\"))\n except Exception as e: # noqa: BLE001\n err_msg = proc.stderr.decode(\"utf-8\", errors=\"replace\")\n return Data(\n data={\"error\": f\"Invalid JSON from Docling subprocess: {e}. stderr={err_msg}\", \"file_path\": file_path},\n )\n\n if not result.get(\"ok\"):\n return Data(data={\"error\": result.get(\"error\", \"Unknown Docling error\"), **result.get(\"meta\", {})})\n\n meta = result.get(\"meta\", {})\n if result.get(\"mode\") == \"markdown\":\n exported_content = str(result.get(\"text\", \"\"))\n return Data(\n text=exported_content,\n data={\"exported_content\": exported_content, \"export_format\": self.EXPORT_FORMAT, **meta},\n )\n\n rows = list(result.get(\"doc\", []))\n return Data(data={\"doc\": rows, \"export_format\": self.EXPORT_FORMAT, **meta})\n\n def process_files(\n self,\n file_list: list[BaseFileComponent.BaseFile],\n ) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Process input files.\n\n - Single file + advanced_mode => Docling in a separate process.\n - Otherwise => standard parsing in current process (optionally threaded).\n \"\"\"\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n def process_file_standard(file_path: str, *, silent_errors: bool = False) -> Data | None:\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n self.log(f\"File not found: {file_path}. Error: {e}\")\n if not silent_errors:\n raise\n return None\n except Exception as e:\n self.log(f\"Unexpected error processing {file_path}: {e}\")\n if not silent_errors:\n raise\n return None\n\n # Advanced path: only for a single Docling-compatible file\n if len(file_list) == 1:\n file_path = str(file_list[0].path)\n if self.advanced_mode and self._is_docling_compatible(file_path):\n advanced_data: Data | None = self._process_docling_in_subprocess(file_path)\n\n # --- UNNEST: expand each element in `doc` to its own Data row\n payload = getattr(advanced_data, \"data\", {}) or {}\n doc_rows = payload.get(\"doc\")\n if isinstance(doc_rows, list):\n rows: list[Data | None] = [\n Data(\n data={\n \"file_path\": file_path,\n **(item if isinstance(item, dict) else {\"value\": item}),\n },\n )\n for item in doc_rows\n ]\n return self.rollup_data(file_list, rows)\n\n # If not structured, keep as-is (e.g., markdown export or error dict)\n return self.rollup_data(file_list, [advanced_data])\n\n # Standard multi-file (or single non-advanced) path\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_paths = [str(f.path) for f in file_list]\n self.log(f\"Starting parallel processing of {len(file_paths)} files with concurrency: {concurrency}.\")\n my_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file_standard,\n max_concurrency=concurrency,\n )\n return self.rollup_data(file_list, my_data)\n\n # ------------------------------ Output helpers -----------------------------------\n\n def load_files_advanced(self) -> DataFrame:\n \"\"\"Load files using advanced Docling processing and export to an advanced format.\"\"\"\n self.markdown = False\n return self.load_files()\n\n def load_files_markdown(self) -> Message:\n \"\"\"Load files using advanced Docling processing and export to Markdown format.\"\"\"\n self.markdown = True\n result = self.load_files()\n return Message(text=str(result.text[0]))\n"
|
|
1383
1383
|
},
|
|
1384
1384
|
"concurrency_multithreading": {
|
|
1385
1385
|
"_input_type": "IntInput",
|
|
@@ -1604,7 +1604,7 @@
|
|
|
1604
1604
|
"beta": false,
|
|
1605
1605
|
"conditional_paths": [],
|
|
1606
1606
|
"custom_fields": {},
|
|
1607
|
-
"description": "Runs a language model given a specified provider.
|
|
1607
|
+
"description": "Runs a language model given a specified provider.",
|
|
1608
1608
|
"display_name": "Language Model",
|
|
1609
1609
|
"documentation": "",
|
|
1610
1610
|
"edited": false,
|
|
@@ -1621,12 +1621,35 @@
|
|
|
1621
1621
|
"icon": "brain-circuit",
|
|
1622
1622
|
"legacy": false,
|
|
1623
1623
|
"metadata": {
|
|
1624
|
+
"code_hash": "bb5f8714781b",
|
|
1625
|
+
"dependencies": {
|
|
1626
|
+
"dependencies": [
|
|
1627
|
+
{
|
|
1628
|
+
"name": "langchain_anthropic",
|
|
1629
|
+
"version": "0.3.14"
|
|
1630
|
+
},
|
|
1631
|
+
{
|
|
1632
|
+
"name": "langchain_google_genai",
|
|
1633
|
+
"version": "2.0.6"
|
|
1634
|
+
},
|
|
1635
|
+
{
|
|
1636
|
+
"name": "langchain_openai",
|
|
1637
|
+
"version": "0.3.23"
|
|
1638
|
+
},
|
|
1639
|
+
{
|
|
1640
|
+
"name": "lfx",
|
|
1641
|
+
"version": null
|
|
1642
|
+
}
|
|
1643
|
+
],
|
|
1644
|
+
"total_dependencies": 4
|
|
1645
|
+
},
|
|
1624
1646
|
"keywords": [
|
|
1625
1647
|
"model",
|
|
1626
1648
|
"llm",
|
|
1627
1649
|
"language model",
|
|
1628
1650
|
"large language model"
|
|
1629
|
-
]
|
|
1651
|
+
],
|
|
1652
|
+
"module": "lfx.components.models.language_model.LanguageModelComponent"
|
|
1630
1653
|
},
|
|
1631
1654
|
"minimized": false,
|
|
1632
1655
|
"output_types": [],
|
|
@@ -1638,8 +1661,7 @@
|
|
|
1638
1661
|
"group_outputs": false,
|
|
1639
1662
|
"method": "text_response",
|
|
1640
1663
|
"name": "text_output",
|
|
1641
|
-
"
|
|
1642
|
-
"required_inputs": null,
|
|
1664
|
+
"selected": "Message",
|
|
1643
1665
|
"tool_mode": true,
|
|
1644
1666
|
"types": [
|
|
1645
1667
|
"Message"
|
|
@@ -1653,8 +1675,6 @@
|
|
|
1653
1675
|
"group_outputs": false,
|
|
1654
1676
|
"method": "build_model",
|
|
1655
1677
|
"name": "model_output",
|
|
1656
|
-
"options": null,
|
|
1657
|
-
"required_inputs": null,
|
|
1658
1678
|
"selected": "LanguageModel",
|
|
1659
1679
|
"tool_mode": true,
|
|
1660
1680
|
"types": [
|
|
@@ -1670,7 +1690,7 @@
|
|
|
1670
1690
|
"api_key": {
|
|
1671
1691
|
"_input_type": "SecretStrInput",
|
|
1672
1692
|
"advanced": false,
|
|
1673
|
-
"display_name": "
|
|
1693
|
+
"display_name": "OpenAI API Key",
|
|
1674
1694
|
"dynamic": false,
|
|
1675
1695
|
"info": "Model Provider API key",
|
|
1676
1696
|
"input_types": [],
|
|
@@ -1701,7 +1721,7 @@
|
|
|
1701
1721
|
"show": true,
|
|
1702
1722
|
"title_case": false,
|
|
1703
1723
|
"type": "code",
|
|
1704
|
-
"value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom
|
|
1724
|
+
"value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom lfx.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom lfx.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n"
|
|
1705
1725
|
},
|
|
1706
1726
|
"input_value": {
|
|
1707
1727
|
"_input_type": "MessageInput",
|
|
@@ -1736,13 +1756,25 @@
|
|
|
1736
1756
|
"info": "Select the model to use",
|
|
1737
1757
|
"name": "model_name",
|
|
1738
1758
|
"options": [
|
|
1739
|
-
"
|
|
1740
|
-
"
|
|
1741
|
-
"
|
|
1742
|
-
"
|
|
1743
|
-
"
|
|
1744
|
-
"
|
|
1745
|
-
"
|
|
1759
|
+
"gpt-4o-mini",
|
|
1760
|
+
"gpt-4o",
|
|
1761
|
+
"gpt-4.1",
|
|
1762
|
+
"gpt-4.1-mini",
|
|
1763
|
+
"gpt-4.1-nano",
|
|
1764
|
+
"gpt-4-turbo",
|
|
1765
|
+
"gpt-4-turbo-preview",
|
|
1766
|
+
"gpt-4",
|
|
1767
|
+
"gpt-3.5-turbo",
|
|
1768
|
+
"gpt-5",
|
|
1769
|
+
"gpt-5-mini",
|
|
1770
|
+
"gpt-5-nano",
|
|
1771
|
+
"gpt-5-chat-latest",
|
|
1772
|
+
"o1",
|
|
1773
|
+
"o3-mini",
|
|
1774
|
+
"o3",
|
|
1775
|
+
"o3-pro",
|
|
1776
|
+
"o4-mini",
|
|
1777
|
+
"o4-mini-high"
|
|
1746
1778
|
],
|
|
1747
1779
|
"options_metadata": [],
|
|
1748
1780
|
"placeholder": "",
|
|
@@ -1893,7 +1925,7 @@
|
|
|
1893
1925
|
"beta": false,
|
|
1894
1926
|
"conditional_paths": [],
|
|
1895
1927
|
"custom_fields": {},
|
|
1896
|
-
"description": "Runs a language model given a specified provider.
|
|
1928
|
+
"description": "Runs a language model given a specified provider.",
|
|
1897
1929
|
"display_name": "Language Model",
|
|
1898
1930
|
"documentation": "",
|
|
1899
1931
|
"edited": false,
|
|
@@ -1910,12 +1942,35 @@
|
|
|
1910
1942
|
"icon": "brain-circuit",
|
|
1911
1943
|
"legacy": false,
|
|
1912
1944
|
"metadata": {
|
|
1945
|
+
"code_hash": "bb5f8714781b",
|
|
1946
|
+
"dependencies": {
|
|
1947
|
+
"dependencies": [
|
|
1948
|
+
{
|
|
1949
|
+
"name": "langchain_anthropic",
|
|
1950
|
+
"version": "0.3.14"
|
|
1951
|
+
},
|
|
1952
|
+
{
|
|
1953
|
+
"name": "langchain_google_genai",
|
|
1954
|
+
"version": "2.0.6"
|
|
1955
|
+
},
|
|
1956
|
+
{
|
|
1957
|
+
"name": "langchain_openai",
|
|
1958
|
+
"version": "0.3.23"
|
|
1959
|
+
},
|
|
1960
|
+
{
|
|
1961
|
+
"name": "lfx",
|
|
1962
|
+
"version": null
|
|
1963
|
+
}
|
|
1964
|
+
],
|
|
1965
|
+
"total_dependencies": 4
|
|
1966
|
+
},
|
|
1913
1967
|
"keywords": [
|
|
1914
1968
|
"model",
|
|
1915
1969
|
"llm",
|
|
1916
1970
|
"language model",
|
|
1917
1971
|
"large language model"
|
|
1918
|
-
]
|
|
1972
|
+
],
|
|
1973
|
+
"module": "lfx.components.models.language_model.LanguageModelComponent"
|
|
1919
1974
|
},
|
|
1920
1975
|
"minimized": false,
|
|
1921
1976
|
"output_types": [],
|
|
@@ -1927,8 +1982,6 @@
|
|
|
1927
1982
|
"group_outputs": false,
|
|
1928
1983
|
"method": "text_response",
|
|
1929
1984
|
"name": "text_output",
|
|
1930
|
-
"options": null,
|
|
1931
|
-
"required_inputs": null,
|
|
1932
1985
|
"selected": "Message",
|
|
1933
1986
|
"tool_mode": true,
|
|
1934
1987
|
"types": [
|
|
@@ -1943,8 +1996,6 @@
|
|
|
1943
1996
|
"group_outputs": false,
|
|
1944
1997
|
"method": "build_model",
|
|
1945
1998
|
"name": "model_output",
|
|
1946
|
-
"options": null,
|
|
1947
|
-
"required_inputs": null,
|
|
1948
1999
|
"selected": "LanguageModel",
|
|
1949
2000
|
"tool_mode": true,
|
|
1950
2001
|
"types": [
|
|
@@ -1960,7 +2011,7 @@
|
|
|
1960
2011
|
"api_key": {
|
|
1961
2012
|
"_input_type": "SecretStrInput",
|
|
1962
2013
|
"advanced": false,
|
|
1963
|
-
"display_name": "
|
|
2014
|
+
"display_name": "OpenAI API Key",
|
|
1964
2015
|
"dynamic": false,
|
|
1965
2016
|
"info": "Model Provider API key",
|
|
1966
2017
|
"input_types": [],
|
|
@@ -1991,7 +2042,7 @@
|
|
|
1991
2042
|
"show": true,
|
|
1992
2043
|
"title_case": false,
|
|
1993
2044
|
"type": "code",
|
|
1994
|
-
"value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom
|
|
2045
|
+
"value": "from typing import Any\n\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langchain_openai import ChatOpenAI\n\nfrom lfx.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom lfx.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.openai_constants import OPENAI_CHAT_MODEL_NAMES, OPENAI_REASONING_MODEL_NAMES\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import DropdownInput, MessageInput, MultilineInput, SecretStrInput, SliderInput\nfrom lfx.schema.dotdict import dotdict\n\n\nclass LanguageModelComponent(LCModelComponent):\n display_name = \"Language Model\"\n description = \"Runs a language model given a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-models\"\n icon = \"brain-circuit\"\n category = \"models\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Anthropic\", \"Google\"],\n value=\"OpenAI\",\n info=\"Select the model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Anthropic\"}, {\"icon\": \"GoogleGenerativeAI\"}],\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_CHAT_MODEL_NAMES[0],\n info=\"Select the model to use\",\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=False,\n show=True,\n real_time_refresh=True,\n ),\n MessageInput(\n name=\"input_value\",\n display_name=\"Input\",\n info=\"The input text to send to the model\",\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"System Message\",\n info=\"A system message that helps set the behavior of the assistant\",\n advanced=False,\n ),\n BoolInput(\n name=\"stream\",\n display_name=\"Stream\",\n info=\"Whether to stream the response\",\n value=False,\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Controls randomness in responses\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n ]\n\n def build_model(self) -> LanguageModel:\n provider = self.provider\n model_name = self.model_name\n temperature = self.temperature\n stream = self.stream\n\n if provider == \"OpenAI\":\n if not self.api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n if model_name in OPENAI_REASONING_MODEL_NAMES:\n # reasoning models do not support temperature (yet)\n temperature = None\n\n return ChatOpenAI(\n model_name=model_name,\n temperature=temperature,\n streaming=stream,\n openai_api_key=self.api_key,\n )\n if provider == \"Anthropic\":\n if not self.api_key:\n msg = \"Anthropic API key is required when using Anthropic provider\"\n raise ValueError(msg)\n return ChatAnthropic(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n anthropic_api_key=self.api_key,\n )\n if provider == \"Google\":\n if not self.api_key:\n msg = \"Google API key is required when using Google provider\"\n raise ValueError(msg)\n return ChatGoogleGenerativeAI(\n model=model_name,\n temperature=temperature,\n streaming=stream,\n google_api_key=self.api_key,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model_name\"][\"options\"] = OPENAI_CHAT_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES\n build_config[\"model_name\"][\"value\"] = OPENAI_CHAT_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n elif field_value == \"Anthropic\":\n build_config[\"model_name\"][\"options\"] = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"value\"] = ANTHROPIC_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Anthropic API Key\"\n elif field_value == \"Google\":\n build_config[\"model_name\"][\"options\"] = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"value\"] = GOOGLE_GENERATIVE_AI_MODELS[0]\n build_config[\"api_key\"][\"display_name\"] = \"Google API Key\"\n elif field_name == \"model_name\" and field_value.startswith(\"o1\") and self.provider == \"OpenAI\":\n # Hide system_message for o1 models - currently unsupported\n if \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = False\n elif field_name == \"model_name\" and not field_value.startswith(\"o1\") and \"system_message\" in build_config:\n build_config[\"system_message\"][\"show\"] = True\n return build_config\n"
|
|
1995
2046
|
},
|
|
1996
2047
|
"input_value": {
|
|
1997
2048
|
"_input_type": "MessageInput",
|
|
@@ -2026,13 +2077,25 @@
|
|
|
2026
2077
|
"info": "Select the model to use",
|
|
2027
2078
|
"name": "model_name",
|
|
2028
2079
|
"options": [
|
|
2029
|
-
"
|
|
2030
|
-
"
|
|
2031
|
-
"
|
|
2032
|
-
"
|
|
2033
|
-
"
|
|
2034
|
-
"
|
|
2035
|
-
"
|
|
2080
|
+
"gpt-4o-mini",
|
|
2081
|
+
"gpt-4o",
|
|
2082
|
+
"gpt-4.1",
|
|
2083
|
+
"gpt-4.1-mini",
|
|
2084
|
+
"gpt-4.1-nano",
|
|
2085
|
+
"gpt-4-turbo",
|
|
2086
|
+
"gpt-4-turbo-preview",
|
|
2087
|
+
"gpt-4",
|
|
2088
|
+
"gpt-3.5-turbo",
|
|
2089
|
+
"gpt-5",
|
|
2090
|
+
"gpt-5-mini",
|
|
2091
|
+
"gpt-5-nano",
|
|
2092
|
+
"gpt-5-chat-latest",
|
|
2093
|
+
"o1",
|
|
2094
|
+
"o3-mini",
|
|
2095
|
+
"o3",
|
|
2096
|
+
"o3-pro",
|
|
2097
|
+
"o4-mini",
|
|
2098
|
+
"o4-mini-high"
|
|
2036
2099
|
],
|
|
2037
2100
|
"options_metadata": [],
|
|
2038
2101
|
"placeholder": "",
|