langflow-base-nightly 0.5.1.dev3__py3-none-any.whl → 0.5.1.dev5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langflow/__init__.py +215 -0
- langflow/__main__.py +16 -2
- langflow/alembic/versions/006b3990db50_add_unique_constraints.py +4 -7
- langflow/alembic/versions/012fb73ac359_add_folder_table.py +4 -5
- langflow/alembic/versions/0ae3a2674f32_update_the_columns_that_need_to_change_.py +11 -20
- langflow/alembic/versions/0b8757876a7c_.py +4 -7
- langflow/alembic/versions/0d60fcbd4e8e_create_vertex_builds_table.py +4 -6
- langflow/alembic/versions/1a110b568907_replace_credential_table_with_variable.py +4 -5
- langflow/alembic/versions/1b8b740a6fa3_remove_fk_constraint_in_message_.py +32 -27
- langflow/alembic/versions/1c79524817ed_add_unique_constraints_per_user_in_.py +4 -5
- langflow/alembic/versions/1d90f8a0efe1_update_description_columns_type.py +4 -5
- langflow/alembic/versions/1eab2c3eb45e_event_error.py +14 -15
- langflow/alembic/versions/1ef9c4f3765d_.py +5 -10
- langflow/alembic/versions/1f4d6df60295_add_default_fields_column.py +4 -5
- langflow/alembic/versions/260dbcc8b680_adds_tables.py +4 -5
- langflow/alembic/versions/29fe8f1f806b_add_missing_index.py +4 -5
- langflow/alembic/versions/2ac71eb9c3ae_adds_credential_table.py +4 -7
- langflow/alembic/versions/3bb0ddf32dfb_add_unique_constraints_per_user_in_flow_.py +4 -5
- langflow/alembic/versions/4e5980a44eaa_fix_date_times_again.py +1 -2
- langflow/alembic/versions/58b28437a398_modify_nullable.py +1 -2
- langflow/alembic/versions/5ace73a7f223_new_remove_table_upgrade_op.py +6 -12
- langflow/alembic/versions/631faacf5da2_add_webhook_columns.py +4 -5
- langflow/alembic/versions/63b9c451fd30_add_icon_and_icon_bg_color_to_flow.py +4 -5
- langflow/alembic/versions/66f72f04a1de_add_mcp_support_with_project_settings_.py +21 -23
- langflow/alembic/versions/67cc006d50bf_add_profile_image_column.py +4 -5
- langflow/alembic/versions/6e7b581b5648_fix_nullable.py +4 -5
- langflow/alembic/versions/7843803a87b5_store_updates.py +4 -6
- langflow/alembic/versions/79e675cb6752_change_datetime_type.py +1 -2
- langflow/alembic/versions/7d2162acc8b2_adds_updated_at_and_folder_cols.py +4 -10
- langflow/alembic/versions/90be8e2ed91e_create_transactions_table.py +4 -6
- langflow/alembic/versions/93e2705fa8d6_add_column_save_path_to_flow.py +7 -9
- langflow/alembic/versions/a72f5cf9c2f9_add_endpoint_name_col.py +4 -5
- langflow/alembic/versions/b2fa308044b5_add_unique_constraints.py +1 -2
- langflow/alembic/versions/bc2f01c40e4a_new_fixes.py +4 -5
- langflow/alembic/versions/c153816fd85f_set_name_and_value_to_not_nullable.py +4 -5
- langflow/alembic/versions/d066bfd22890_add_message_table.py +4 -4
- langflow/alembic/versions/d2d475a1f7c0_add_tags_column_to_flow.py +12 -13
- langflow/alembic/versions/d3dbf656a499_add_gradient_column_in_flow.py +12 -12
- langflow/alembic/versions/d9a6ea21edcd_rename_default_folder.py +7 -10
- langflow/alembic/versions/dd9e0804ebd1_add_v2_file_table.py +8 -7
- langflow/alembic/versions/e3162c1804e6_add_persistent_locked_state.py +10 -10
- langflow/alembic/versions/e3bc869fa272_fix_nullable.py +4 -5
- langflow/alembic/versions/e56d87f8994a_add_optins_column_to_user.py +13 -14
- langflow/alembic/versions/e5a65ecff2cd_nullable_in_vertex_build.py +4 -5
- langflow/alembic/versions/eb5866d51fd2_change_columns_to_be_nullable.py +4 -5
- langflow/alembic/versions/eb5e72293a8e_add_error_and_edit_flags_to_message.py +4 -5
- langflow/alembic/versions/f3b2d1f1002d_add_column_access_type_to_flow.py +19 -15
- langflow/alembic/versions/f5ee9749d1a6_user_id_can_be_null_in_flow.py +4 -6
- langflow/alembic/versions/fd531f8868b1_fix_credential_table.py +5 -8
- langflow/api/build.py +5 -4
- langflow/api/health_check_router.py +1 -1
- langflow/api/limited_background_tasks.py +1 -1
- langflow/api/log_router.py +1 -2
- langflow/api/utils.py +2 -2
- langflow/api/v1/base.py +1 -2
- langflow/api/v1/callback.py +4 -9
- langflow/api/v1/chat.py +6 -7
- langflow/api/v1/endpoints.py +15 -15
- langflow/api/v1/files.py +1 -1
- langflow/api/v1/flows.py +1 -1
- langflow/api/v1/knowledge_bases.py +1 -1
- langflow/api/v1/mcp.py +1 -1
- langflow/api/v1/mcp_projects.py +14 -5
- langflow/api/v1/mcp_utils.py +3 -3
- langflow/api/v1/openai_responses.py +4 -4
- langflow/api/v1/schemas.py +3 -38
- langflow/api/v1/starter_projects.py +61 -3
- langflow/api/v1/store.py +1 -1
- langflow/api/v1/validate.py +3 -3
- langflow/api/v1/voice_mode.py +2 -2
- langflow/api/v2/files.py +1 -1
- langflow/api/v2/mcp.py +2 -2
- langflow/base/__init__.py +11 -0
- langflow/base/agents/__init__.py +3 -0
- langflow/base/data/__init__.py +2 -4
- langflow/base/data/utils.py +2 -197
- langflow/base/embeddings/__init__.py +3 -0
- langflow/base/io/__init__.py +7 -0
- langflow/base/io/chat.py +5 -18
- langflow/base/io/text.py +2 -21
- langflow/base/knowledge_bases/__init__.py +3 -0
- langflow/base/memory/__init__.py +3 -0
- langflow/base/models/__init__.py +2 -2
- langflow/base/models/openai_constants.py +6 -120
- langflow/base/prompts/__init__.py +3 -0
- langflow/base/prompts/api_utils.py +2 -223
- langflow/base/textsplitters/__init__.py +3 -0
- langflow/base/tools/__init__.py +3 -0
- langflow/base/vectorstores/__init__.py +3 -0
- langflow/components/__init__.py +7 -259
- langflow/components/agents.py +6 -0
- langflow/components/anthropic.py +6 -0
- langflow/components/data.py +6 -0
- langflow/components/helpers.py +6 -0
- langflow/components/knowledge_bases/ingestion.py +13 -14
- langflow/components/knowledge_bases/retrieval.py +8 -7
- langflow/components/openai.py +6 -0
- langflow/components/processing/__init__.py +1 -117
- langflow/components/processing/converter.py +3 -149
- langflow/custom/__init__.py +26 -3
- langflow/custom/custom_component/__init__.py +4 -0
- langflow/custom/custom_component/component.py +20 -1738
- langflow/custom/custom_component/component_with_cache.py +1 -8
- langflow/custom/custom_component/custom_component.py +1 -552
- langflow/custom/utils.py +1 -872
- langflow/custom/validate.py +1 -0
- langflow/events/event_manager.py +18 -108
- langflow/field_typing/__init__.py +6 -6
- langflow/field_typing/constants.py +87 -122
- langflow/field_typing/range_spec.py +2 -32
- langflow/frontend/assets/{SlackIcon-Cc7Qnzki.js → SlackIcon-v88osOTA.js} +1 -1
- langflow/frontend/assets/{Wikipedia-7ulMZY46.js → Wikipedia-DD_S2k00.js} +1 -1
- langflow/frontend/assets/{Wolfram-By9PGsHS.js → Wolfram-EO2C5noN.js} +1 -1
- langflow/frontend/assets/{index-DVLIDc2_.js → index-1Gv1mfvk.js} +1 -1
- langflow/frontend/assets/{index-MVW4HTEk.js → index-7v-bzlzf.js} +1 -1
- langflow/frontend/assets/{index-CUzlcce2.js → index-9CbMazbV.js} +1 -1
- langflow/frontend/assets/{index-CU16NJD7.js → index-B8ZHP8g2.js} +1 -1
- langflow/frontend/assets/{index-v8eXbWlM.js → index-B8y2e6vN.js} +1 -1
- langflow/frontend/assets/{index-BX_asvRB.js → index-BBRUGsyr.js} +1 -1
- langflow/frontend/assets/{index-9FL5xjkL.js → index-BGwqQwlh.js} +1 -1
- langflow/frontend/assets/{index-BAn-AzCS.js → index-BIq-k-FG.js} +1 -1
- langflow/frontend/assets/{index-D5c2nNvp.js → index-BSN73YP8.js} +1 -1
- langflow/frontend/assets/{index-DMCerPJM.js → index-BU8R8jRn.js} +1 -1
- langflow/frontend/assets/{index-CvSoff-8.js → index-BV6yx8ey.js} +1 -1
- langflow/frontend/assets/{index-BISPW-f6.js → index-BYIsg-Eh.js} +1 -1
- langflow/frontend/assets/{index-GzOGB_fo.js → index-B_ksDBSQ.js} +1 -1
- langflow/frontend/assets/{index-BIqEYjNT.js → index-Ba1UOZ9A.js} +1 -1
- langflow/frontend/assets/{index-ByxGmq5p.js → index-Ba9tKRQg.js} +1 -1
- langflow/frontend/assets/{index-BLEWsL1U.js → index-Bbfaw8ca.js} +1 -1
- langflow/frontend/assets/{index-C_MhBX6R.js → index-BbuGqvAx.js} +1 -1
- langflow/frontend/assets/{index-RH_I78z_.js → index-BeoXu1YX.js} +1 -1
- langflow/frontend/assets/{index-cYFKmtmg.js → index-BfjZmOnH.js} +1 -1
- langflow/frontend/assets/{index-Bm9i8F4W.js → index-Bjzy_HZB.js} +1 -1
- langflow/frontend/assets/{index-_szO7sta.js → index-BofEkpYB.js} +1 -1
- langflow/frontend/assets/{index-DP1oE6QB.js → index-Bp7Mty2H.js} +1 -1
- langflow/frontend/assets/{index-CeswGUz3.js → index-BqX1H6yK.js} +1 -1
- langflow/frontend/assets/{index-C8pI0lzi.js → index-BqtBAJAN.js} +1 -1
- langflow/frontend/assets/{index-BusCv3bR.js → index-Bsfraj7A.js} +1 -1
- langflow/frontend/assets/{index-BWnKMRFJ.js → index-BtFl7fER.js} +1 -1
- langflow/frontend/assets/{index-DnlVWWU8.js → index-BvX993Sv.js} +1 -1
- langflow/frontend/assets/{index-C676MS3I.js → index-BvgQ2vzM.js} +1 -1
- langflow/frontend/assets/{index-DJ6HD14g.js → index-BwY98u8n.js} +1 -1
- langflow/frontend/assets/{index-C51yNvIL.js → index-C-RIJAOS.js} +1 -1
- langflow/frontend/assets/{index-DiblXWmk.js → index-C1K6A38P.js} +1 -1
- langflow/frontend/assets/{index-Co__gFM1.js → index-C3Vwhx0t.js} +1 -1
- langflow/frontend/assets/{index-Coi86oqP.js → index-C5XUG_gr.js} +1 -1
- langflow/frontend/assets/{index-jwzN3Jd_.js → index-C6ouLG9o.js} +1 -1
- langflow/frontend/assets/{index-CQQ-4XMS.js → index-C7ZJ_Z6f.js} +1 -1
- langflow/frontend/assets/{index-Bl7RpmrB.js → index-CCOGIwGY.js} +1 -1
- langflow/frontend/assets/{index-CVkIdc6y.js → index-CCcye2rt.js} +1 -1
- langflow/frontend/assets/{index-bMhyLtgS.js → index-CFR4yJQB.js} +1 -1
- langflow/frontend/assets/{index-aAgSKWb3.js → index-CIGmPP0H.js} +1 -1
- langflow/frontend/assets/{index-BGt6jQ4x.js → index-CJmMEa6d.js} +1 -1
- langflow/frontend/assets/{index-DX7JcSMz.js → index-CJxD7lyU.js} +1 -1
- langflow/frontend/assets/{index-BZ-A4K98.js → index-CL_vu6ut.js} +1 -1
- langflow/frontend/assets/{index-BMpKFGhI.js → index-COf3UnBn.js} +1 -1
- langflow/frontend/assets/{index-xN8ogFdo.js → index-CV9650h_.js} +1 -1
- langflow/frontend/assets/{index-OsUvqIUr.js → index-CVDzych0.js} +1 -1
- langflow/frontend/assets/{index-BH7AyHxp.js → index-CWIHsC4D.js} +1 -1
- langflow/frontend/assets/{index-mjwtJmkP.js → index-CXCnFZ0L.js} +1 -1
- langflow/frontend/assets/{index-3jlSQi5Y.js → index-Ca_Pw_Dn.js} +1 -1
- langflow/frontend/assets/{index-D-SnFlhU.js → index-Cbb3bX9e.js} +1 -1
- langflow/frontend/assets/{index--e0oQqZh.js → index-CcJtOz-Z.js} +1 -1
- langflow/frontend/assets/{index-S-sc0Cm9.js → index-CfTbTHEv.js} +1 -1
- langflow/frontend/assets/{index-Deu8rlaZ.js → index-ChoxDAgX.js} +1 -1
- langflow/frontend/assets/{index-lnF9Eqr2.js → index-Cn4gw8aE.js} +1 -1
- langflow/frontend/assets/{index-C_NwzK6j.js → index-CnpLg4zX.js} +1 -1
- langflow/frontend/assets/{index-DznH7Jbq.js → index-Cpao2omG.js} +1 -1
- langflow/frontend/assets/{index-DpWrk8mA.js → index-CqoxM01j.js} +1 -1
- langflow/frontend/assets/{index-Bw-TIIC6.js → index-CrHf2Ic1.js} +1 -1
- langflow/frontend/assets/{index-DmYLDQag.js → index-CrV0uIjp.js} +1 -1
- langflow/frontend/assets/{index-Dp7ZQyL3.js → index-CssADaak.js} +1 -1
- langflow/frontend/assets/{index-CNh0rwur.js → index-CtJdNLy9.js} +1 -1
- langflow/frontend/assets/{index-Ca1b7Iag.js → index-CyeWD2dh.js} +1 -1
- langflow/frontend/assets/{index-DcApTyZ7.js → index-D1xzD7uc.js} +1 -1
- langflow/frontend/assets/{index-B3GvPjhD.js → index-D6MuXC4L.js} +1 -1
- langflow/frontend/assets/{index-Cw0UComa.js → index-D8w9zvIF.js} +1 -1
- langflow/frontend/assets/{index-C-2MRYoJ.js → index-D98Gn0A6.js} +1 -1
- langflow/frontend/assets/{index-aWnZIwHd.js → index-DBhjpWkf.js} +1 -1
- langflow/frontend/assets/{index-nw3WF9lY.js → index-DCCRJzcY.js} +1 -1
- langflow/frontend/assets/{index-RjeC0kaX.js → index-DCTRSkEW.js} +1 -1
- langflow/frontend/assets/{index-B_kBTgxV.js → index-DCUfitVj.js} +1 -1
- langflow/frontend/assets/{index-ChsGhZn3.js → index-DDdz-Xcl.js} +1 -1
- langflow/frontend/assets/{index-7yAHPRxv.js → index-DGdMwZjG.js} +1 -1
- langflow/frontend/assets/{index-DjQElpEg.js → index-DGtl2vMw.js} +1 -1
- langflow/frontend/assets/{index-BCXhKCOK.js → index-DHVdkrni.js} +1 -1
- langflow/frontend/assets/{index-S8uJXTOq.js → index-DJBWwjgl.js} +1 -1
- langflow/frontend/assets/{index-qiVTWUuf.js → index-DMAkJ_qX.js} +1 -1
- langflow/frontend/assets/{index-D-WStJI6.js → index-DMEvEQI5.js} +1 -1
- langflow/frontend/assets/{index-BhqVw9WQ.js → index-DNGRoOsp.js} +1 -1
- langflow/frontend/assets/{index-Cu7vC48Y.js → index-DNT_TUTa.js} +1 -1
- langflow/frontend/assets/{index-Bhcv5M0n.js → index-DQKOH_9K.js} +1 -1
- langflow/frontend/assets/{index-CLcaktde.js → index-DQhqqtqQ.js} +1 -1
- langflow/frontend/assets/{index-DZVgPCio.js → index-DRM7KKnG.js} +1 -1
- langflow/frontend/assets/{index-uybez8MR.js → index-DSCtl3a5.js} +1 -1
- langflow/frontend/assets/{index-CJ5A6STv.js → index-DSLNlm0Z.js} +1 -1
- langflow/frontend/assets/{index-Drg8me2a.js → index-DT-PspE-.js} +1 -1
- langflow/frontend/assets/{index-DsEZjOcp.js → index-DTpbH-p8.js} +1 -1
- langflow/frontend/assets/{index-DrXXKzpD.js → index-DWV6MsIq.js} +1 -1
- langflow/frontend/assets/{index-4JIEdyIM.js → index-DWeL4US_.js} +1 -1
- langflow/frontend/assets/{index-BlDsBQ_1.js → index-DYKZHhpU.js} +1 -1
- langflow/frontend/assets/{index-DFY8YFbC.js → index-DZyQHiMR.js} +1 -1
- langflow/frontend/assets/{index-CKPZpkQk.js → index-Dc6qVuSa.js} +1 -1
- langflow/frontend/assets/{index-yyAaYjLR.js → index-DkYuicnC.js} +1 -1
- langflow/frontend/assets/{index-DmVt5Jlx.js → index-Dlj_2mMs.js} +1 -1
- langflow/frontend/assets/{index-BvRIG6P5.js → index-DmGJUrEp.js} +1 -1
- langflow/frontend/assets/{index-BWFIrwW1.js → index-Dn6hpCAZ.js} +1 -1
- langflow/frontend/assets/{index-Cb5G9Ifd.js → index-DrJU8Fgb.js} +1 -1
- langflow/frontend/assets/{index-COoTCxvs.js → index-DsWfdCzp.js} +1 -1
- langflow/frontend/assets/{index-ZjeocHyu.js → index-DvCPWs2_.js} +1 -1
- langflow/frontend/assets/{index-B5LHnuQR.js → index-DvPVq7OP.js} +1 -1
- langflow/frontend/assets/{index-BnCnYnao.js → index-Dw71ufW4.js} +1 -1
- langflow/frontend/assets/{index-AALDfCyt.js → index-DxkJactf.js} +1 -1
- langflow/frontend/assets/{index-k9jP5chN.js → index-Dz2GTphU.js} +1 -1
- langflow/frontend/assets/{index-BdjfHsrf.js → index-Fvd524_c.js} +1 -1
- langflow/frontend/assets/{index-AKVkmT4S.js → index-GAQ0Mk2M.js} +1 -1
- langflow/frontend/assets/{index-BZSa2qz7.js → index-Hm5-4ItD.js} +1 -1
- langflow/frontend/assets/{index-DbfS_UH-.js → index-IT67FzsK.js} +1 -1
- langflow/frontend/assets/{index-BLXN681C.js → index-ItYiij1i.js} +1 -1
- langflow/frontend/assets/{index-CiklyQU3.js → index-IuR_FEdB.js} +1 -1
- langflow/frontend/assets/{index-xV6ystWy.js → index-Jj60FQkv.js} +1 -1
- langflow/frontend/assets/{index-C_157Mb-.js → index-LlvshmVz.js} +1 -1
- langflow/frontend/assets/{index-CDphUsa3.js → index-LwKh3I_W.js} +1 -1
- langflow/frontend/assets/{index-BrDz-PxE.js → index-N-xxmKKH.js} +1 -1
- langflow/frontend/assets/{index-BsdLyYMY.js → index-RwpaHIAH.js} +1 -1
- langflow/frontend/assets/{index-Cu2Xr6_j.js → index-TVvsp-xh.js} +1 -1
- langflow/frontend/assets/{index-CPiM2oyj.js → index-TdE2u9zP.js} +1 -1
- langflow/frontend/assets/{index-DOj_QWqG.js → index-_x-NkYeW.js} +1 -1
- langflow/frontend/assets/{index-YJsAl7vm.js → index-a-YclEbW.js} +1 -1
- langflow/frontend/assets/{index-5-CSw2-z.js → index-e9MFKUCo.js} +1 -1
- langflow/frontend/assets/{index-BSwBVwyF.js → index-krPr8f2F.js} +1 -1
- langflow/frontend/assets/{index-Df6psZEj.js → index-kveiUWuL.js} +1 -1
- langflow/frontend/assets/{index-CF4_Og1m.js → index-lE3oSjJi.js} +1 -1
- langflow/frontend/assets/{index-C6nzdeYx.js → index-lM3UYg7F.js} +1 -1
- langflow/frontend/assets/{index-C-wnbBBY.js → index-nsRk3qgA.js} +1 -1
- langflow/frontend/assets/{index-D234yKNJ.js → index-pBO0SZLD.js} +4 -4
- langflow/frontend/assets/{index-BMvp94tO.js → index-pbZHsbuE.js} +1 -1
- langflow/frontend/assets/{index-hg2y9OAt.js → index-sfX3aWyp.js} +1 -1
- langflow/frontend/assets/{index-DTCrijba.js → index-xQz-VJ0-.js} +1 -1
- langflow/frontend/assets/{index-SB4rw8D5.js → index-yfcsaHS6.js} +1 -1
- langflow/frontend/assets/{index-C-bjC2sz.js → index-zcGjo9fx.js} +1 -1
- langflow/frontend/assets/lazyIconImports-BjqDmNYG.js +2 -0
- langflow/frontend/assets/{use-post-add-user-JUeLDErC.js → use-post-add-user-w3vpKSOB.js} +1 -1
- langflow/frontend/index.html +1 -1
- langflow/graph/__init__.py +4 -4
- langflow/helpers/data.py +2 -2
- langflow/helpers/flow.py +9 -7
- langflow/helpers/user.py +2 -2
- langflow/initial_setup/setup.py +9 -9
- langflow/initial_setup/starter_projects/Basic Prompt Chaining.json +119 -41
- langflow/initial_setup/starter_projects/Basic Prompting.json +45 -19
- langflow/initial_setup/starter_projects/Blog Writer.json +53 -21
- langflow/initial_setup/starter_projects/Custom Component Generator.json +121 -97
- langflow/initial_setup/starter_projects/Document Q&A.json +46 -18
- langflow/initial_setup/starter_projects/Financial Report Parser.json +49 -17
- langflow/initial_setup/starter_projects/Hybrid Search RAG.json +89 -50
- langflow/initial_setup/starter_projects/Image Sentiment Analysis.json +86 -22
- langflow/initial_setup/starter_projects/Instagram Copywriter.json +210 -57
- langflow/initial_setup/starter_projects/Invoice Summarizer.json +132 -35
- langflow/initial_setup/starter_projects/Knowledge Ingestion.json +8 -8
- langflow/initial_setup/starter_projects/Knowledge Retrieval.json +8 -8
- langflow/initial_setup/starter_projects/Market Research.json +174 -48
- langflow/initial_setup/starter_projects/Meeting Summary.json +102 -38
- langflow/initial_setup/starter_projects/Memory Chatbot.json +49 -21
- langflow/initial_setup/starter_projects/News Aggregator.json +140 -39
- langflow/initial_setup/starter_projects/Nvidia Remix.json +153 -181
- langflow/initial_setup/starter_projects/Pok/303/251dex Agent.json" +132 -35
- langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json +106 -43
- langflow/initial_setup/starter_projects/Price Deal Finder.json +136 -39
- langflow/initial_setup/starter_projects/Research Agent.json +206 -53
- langflow/initial_setup/starter_projects/Research Translation Loop.json +66 -34
- langflow/initial_setup/starter_projects/SEO Keyword Generator.json +41 -15
- langflow/initial_setup/starter_projects/SaaS Pricing.json +128 -31
- langflow/initial_setup/starter_projects/Search agent.json +132 -35
- langflow/initial_setup/starter_projects/Sequential Tasks Agents.json +422 -98
- langflow/initial_setup/starter_projects/Simple Agent.json +150 -42
- langflow/initial_setup/starter_projects/Social Media Agent.json +150 -42
- langflow/initial_setup/starter_projects/Text Sentiment Analysis.json +120 -24
- langflow/initial_setup/starter_projects/Travel Planning Agents.json +418 -94
- langflow/initial_setup/starter_projects/Twitter Thread Generator.json +69 -37
- langflow/initial_setup/starter_projects/Vector Store RAG.json +66 -38
- langflow/initial_setup/starter_projects/Youtube Analysis.json +191 -51
- langflow/initial_setup/starter_projects/basic_prompting.py +4 -4
- langflow/initial_setup/starter_projects/blog_writer.py +5 -5
- langflow/initial_setup/starter_projects/complex_agent.py +8 -8
- langflow/initial_setup/starter_projects/document_qa.py +5 -5
- langflow/initial_setup/starter_projects/hierarchical_tasks_agent.py +8 -8
- langflow/initial_setup/starter_projects/memory_chatbot.py +6 -6
- langflow/initial_setup/starter_projects/sequential_tasks_agent.py +7 -7
- langflow/initial_setup/starter_projects/vector_store_rag.py +8 -8
- langflow/inputs/__init__.py +3 -2
- langflow/inputs/constants.py +3 -2
- langflow/inputs/input_mixin.py +49 -310
- langflow/inputs/inputs.py +72 -703
- langflow/inputs/validators.py +2 -18
- langflow/interface/__init__.py +4 -0
- langflow/interface/components.py +3 -491
- langflow/interface/initialize/loading.py +7 -6
- langflow/interface/listing.py +3 -25
- langflow/interface/run.py +1 -1
- langflow/interface/utils.py +3 -111
- langflow/io/__init__.py +2 -2
- langflow/io/schema.py +11 -302
- langflow/load/__init__.py +4 -2
- langflow/load/utils.py +2 -96
- langflow/logging/__init__.py +2 -1
- langflow/logging/setup.py +1 -1
- langflow/main.py +8 -5
- langflow/memory.py +12 -6
- langflow/middleware.py +1 -1
- langflow/processing/process.py +7 -7
- langflow/schema/__init__.py +22 -5
- langflow/schema/artifact.py +1 -1
- langflow/schema/data.py +5 -303
- langflow/schema/dataframe.py +2 -205
- langflow/schema/graph.py +4 -45
- langflow/schema/image.py +2 -67
- langflow/schema/message.py +6 -470
- langflow/schema/playground_events.py +5 -6
- langflow/schema/schema.py +24 -117
- langflow/serialization/constants.py +3 -2
- langflow/serialization/serialization.py +1 -1
- langflow/server.py +1 -2
- langflow/services/__init__.py +1 -2
- langflow/services/auth/mcp_encryption.py +1 -1
- langflow/services/auth/service.py +1 -1
- langflow/services/auth/utils.py +5 -5
- langflow/services/cache/disk.py +2 -2
- langflow/services/cache/factory.py +2 -2
- langflow/services/cache/service.py +2 -2
- langflow/services/cache/utils.py +0 -11
- langflow/services/database/factory.py +1 -1
- langflow/services/database/models/flow/model.py +1 -1
- langflow/services/database/models/message/crud.py +2 -1
- langflow/services/database/models/transactions/crud.py +1 -1
- langflow/services/database/models/user/crud.py +1 -1
- langflow/services/database/service.py +2 -2
- langflow/services/database/utils.py +1 -2
- langflow/services/deps.py +12 -17
- langflow/services/enhanced_manager.py +71 -0
- langflow/services/factory.py +14 -7
- langflow/services/flow/flow_runner.py +4 -4
- langflow/services/job_queue/service.py +2 -1
- langflow/services/manager.py +14 -130
- langflow/services/schema.py +0 -1
- langflow/services/session/service.py +3 -2
- langflow/services/settings/__init__.py +0 -3
- langflow/services/settings/base.py +16 -549
- langflow/services/settings/factory.py +2 -21
- langflow/services/settings/feature_flags.py +2 -11
- langflow/services/settings/service.py +2 -31
- langflow/services/shared_component_cache/factory.py +1 -1
- langflow/services/socket/service.py +1 -1
- langflow/services/socket/utils.py +1 -8
- langflow/services/state/factory.py +1 -1
- langflow/services/state/service.py +3 -2
- langflow/services/storage/factory.py +2 -2
- langflow/services/storage/local.py +1 -2
- langflow/services/storage/s3.py +1 -2
- langflow/services/storage/service.py +2 -1
- langflow/services/store/factory.py +1 -1
- langflow/services/store/service.py +2 -2
- langflow/services/store/utils.py +1 -2
- langflow/services/task/service.py +2 -1
- langflow/services/task/temp_flow_cleanup.py +1 -1
- langflow/services/telemetry/factory.py +1 -1
- langflow/services/telemetry/service.py +2 -3
- langflow/services/tracing/arize_phoenix.py +3 -3
- langflow/services/tracing/base.py +1 -1
- langflow/services/tracing/factory.py +1 -1
- langflow/services/tracing/langfuse.py +2 -2
- langflow/services/tracing/langsmith.py +2 -2
- langflow/services/tracing/langwatch.py +4 -4
- langflow/services/tracing/opik.py +2 -2
- langflow/services/tracing/service.py +17 -11
- langflow/services/tracing/traceloop.py +2 -2
- langflow/services/tracing/utils.py +1 -1
- langflow/services/utils.py +54 -9
- langflow/services/variable/factory.py +1 -1
- langflow/services/variable/kubernetes.py +2 -3
- langflow/services/variable/kubernetes_secrets.py +1 -2
- langflow/services/variable/service.py +2 -3
- langflow/template/__init__.py +2 -9
- langflow/template/field/__init__.py +3 -0
- langflow/template/field/base.py +2 -256
- langflow/template/frontend_node.py +3 -0
- langflow/template/utils.py +2 -216
- langflow/utils/constants.py +28 -204
- langflow/utils/lazy_load.py +3 -14
- langflow/utils/schemas.py +2 -3
- langflow/utils/template_validation.py +2 -2
- langflow/utils/util.py +59 -479
- langflow/utils/validate.py +2 -488
- langflow/utils/voice_utils.py +1 -2
- langflow/worker.py +1 -1
- {langflow_base_nightly-0.5.1.dev3.dist-info → langflow_base_nightly-0.5.1.dev5.dist-info}/METADATA +2 -1
- langflow_base_nightly-0.5.1.dev5.dist-info/RECORD +633 -0
- langflow/base/agents/agent.py +0 -267
- langflow/base/agents/callback.py +0 -130
- langflow/base/agents/context.py +0 -109
- langflow/base/agents/crewai/__init__.py +0 -0
- langflow/base/agents/crewai/crew.py +0 -231
- langflow/base/agents/crewai/tasks.py +0 -12
- langflow/base/agents/default_prompts.py +0 -23
- langflow/base/agents/errors.py +0 -15
- langflow/base/agents/events.py +0 -346
- langflow/base/agents/utils.py +0 -205
- langflow/base/astra_assistants/__init__.py +0 -0
- langflow/base/astra_assistants/util.py +0 -171
- langflow/base/chains/__init__.py +0 -0
- langflow/base/chains/model.py +0 -19
- langflow/base/composio/__init__.py +0 -0
- langflow/base/composio/composio_base.py +0 -1297
- langflow/base/compressors/__init__.py +0 -0
- langflow/base/compressors/model.py +0 -60
- langflow/base/constants.py +0 -46
- langflow/base/curl/__init__.py +0 -0
- langflow/base/curl/parse.py +0 -188
- langflow/base/data/base_file.py +0 -685
- langflow/base/data/docling_utils.py +0 -245
- langflow/base/document_transformers/__init__.py +0 -0
- langflow/base/document_transformers/model.py +0 -43
- langflow/base/embeddings/aiml_embeddings.py +0 -62
- langflow/base/embeddings/model.py +0 -26
- langflow/base/flow_processing/__init__.py +0 -0
- langflow/base/flow_processing/utils.py +0 -86
- langflow/base/huggingface/__init__.py +0 -0
- langflow/base/huggingface/model_bridge.py +0 -133
- langflow/base/langchain_utilities/__init__.py +0 -0
- langflow/base/langchain_utilities/model.py +0 -35
- langflow/base/langchain_utilities/spider_constants.py +0 -1
- langflow/base/langwatch/__init__.py +0 -0
- langflow/base/langwatch/utils.py +0 -18
- langflow/base/mcp/__init__.py +0 -0
- langflow/base/mcp/constants.py +0 -2
- langflow/base/mcp/util.py +0 -1524
- langflow/base/memory/memory.py +0 -49
- langflow/base/memory/model.py +0 -38
- langflow/base/models/aiml_constants.py +0 -51
- langflow/base/models/anthropic_constants.py +0 -47
- langflow/base/models/aws_constants.py +0 -151
- langflow/base/models/chat_result.py +0 -76
- langflow/base/models/google_generative_ai_constants.py +0 -70
- langflow/base/models/groq_constants.py +0 -134
- langflow/base/models/model.py +0 -375
- langflow/base/models/model_input_constants.py +0 -299
- langflow/base/models/model_metadata.py +0 -41
- langflow/base/models/model_utils.py +0 -8
- langflow/base/models/novita_constants.py +0 -35
- langflow/base/models/ollama_constants.py +0 -49
- langflow/base/models/sambanova_constants.py +0 -18
- langflow/base/processing/__init__.py +0 -0
- langflow/base/prompts/utils.py +0 -61
- langflow/base/textsplitters/model.py +0 -28
- langflow/base/tools/base.py +0 -26
- langflow/base/tools/component_tool.py +0 -324
- langflow/base/tools/constants.py +0 -49
- langflow/base/tools/flow_tool.py +0 -131
- langflow/base/tools/run_flow.py +0 -227
- langflow/base/vectorstores/model.py +0 -193
- langflow/base/vectorstores/utils.py +0 -22
- langflow/base/vectorstores/vector_store_connection_decorator.py +0 -52
- langflow/components/FAISS/__init__.py +0 -34
- langflow/components/FAISS/faiss.py +0 -111
- langflow/components/Notion/__init__.py +0 -19
- langflow/components/Notion/add_content_to_page.py +0 -269
- langflow/components/Notion/create_page.py +0 -94
- langflow/components/Notion/list_database_properties.py +0 -68
- langflow/components/Notion/list_pages.py +0 -122
- langflow/components/Notion/list_users.py +0 -77
- langflow/components/Notion/page_content_viewer.py +0 -93
- langflow/components/Notion/search.py +0 -111
- langflow/components/Notion/update_page_property.py +0 -114
- langflow/components/_importing.py +0 -37
- langflow/components/agentql/__init__.py +0 -3
- langflow/components/agentql/agentql_api.py +0 -151
- langflow/components/agents/__init__.py +0 -4
- langflow/components/agents/agent.py +0 -554
- langflow/components/agents/mcp_component.py +0 -501
- langflow/components/aiml/__init__.py +0 -37
- langflow/components/aiml/aiml.py +0 -112
- langflow/components/aiml/aiml_embeddings.py +0 -37
- langflow/components/amazon/__init__.py +0 -36
- langflow/components/amazon/amazon_bedrock_embedding.py +0 -109
- langflow/components/amazon/amazon_bedrock_model.py +0 -124
- langflow/components/amazon/s3_bucket_uploader.py +0 -211
- langflow/components/anthropic/__init__.py +0 -34
- langflow/components/anthropic/anthropic.py +0 -187
- langflow/components/apify/__init__.py +0 -5
- langflow/components/apify/apify_actor.py +0 -325
- langflow/components/arxiv/__init__.py +0 -3
- langflow/components/arxiv/arxiv.py +0 -163
- langflow/components/assemblyai/__init__.py +0 -46
- langflow/components/assemblyai/assemblyai_get_subtitles.py +0 -83
- langflow/components/assemblyai/assemblyai_lemur.py +0 -183
- langflow/components/assemblyai/assemblyai_list_transcripts.py +0 -95
- langflow/components/assemblyai/assemblyai_poll_transcript.py +0 -72
- langflow/components/assemblyai/assemblyai_start_transcript.py +0 -188
- langflow/components/azure/__init__.py +0 -37
- langflow/components/azure/azure_openai.py +0 -95
- langflow/components/azure/azure_openai_embeddings.py +0 -83
- langflow/components/baidu/__init__.py +0 -32
- langflow/components/baidu/baidu_qianfan_chat.py +0 -113
- langflow/components/bing/__init__.py +0 -3
- langflow/components/bing/bing_search_api.py +0 -61
- langflow/components/cassandra/__init__.py +0 -40
- langflow/components/cassandra/cassandra.py +0 -264
- langflow/components/cassandra/cassandra_chat.py +0 -92
- langflow/components/cassandra/cassandra_graph.py +0 -238
- langflow/components/chains/__init__.py +0 -0
- langflow/components/chroma/__init__.py +0 -34
- langflow/components/chroma/chroma.py +0 -167
- langflow/components/cleanlab/__init__.py +0 -40
- langflow/components/cleanlab/cleanlab_evaluator.py +0 -157
- langflow/components/cleanlab/cleanlab_rag_evaluator.py +0 -254
- langflow/components/cleanlab/cleanlab_remediator.py +0 -131
- langflow/components/clickhouse/__init__.py +0 -34
- langflow/components/clickhouse/clickhouse.py +0 -135
- langflow/components/cloudflare/__init__.py +0 -32
- langflow/components/cloudflare/cloudflare.py +0 -81
- langflow/components/cohere/__init__.py +0 -40
- langflow/components/cohere/cohere_embeddings.py +0 -81
- langflow/components/cohere/cohere_models.py +0 -46
- langflow/components/cohere/cohere_rerank.py +0 -51
- langflow/components/composio/__init__.py +0 -73
- langflow/components/composio/composio_api.py +0 -268
- langflow/components/composio/dropbox_compnent.py +0 -11
- langflow/components/composio/github_composio.py +0 -11
- langflow/components/composio/gmail_composio.py +0 -38
- langflow/components/composio/googlecalendar_composio.py +0 -11
- langflow/components/composio/googlemeet_composio.py +0 -11
- langflow/components/composio/googletasks_composio.py +0 -8
- langflow/components/composio/linear_composio.py +0 -11
- langflow/components/composio/outlook_composio.py +0 -11
- langflow/components/composio/reddit_composio.py +0 -11
- langflow/components/composio/slack_composio.py +0 -11
- langflow/components/composio/slackbot_composio.py +0 -11
- langflow/components/composio/supabase_composio.py +0 -11
- langflow/components/composio/todoist_composio.py +0 -11
- langflow/components/composio/youtube_composio.py +0 -11
- langflow/components/confluence/__init__.py +0 -3
- langflow/components/confluence/confluence.py +0 -84
- langflow/components/couchbase/__init__.py +0 -34
- langflow/components/couchbase/couchbase.py +0 -102
- langflow/components/crewai/__init__.py +0 -49
- langflow/components/crewai/crewai.py +0 -107
- langflow/components/crewai/hierarchical_crew.py +0 -46
- langflow/components/crewai/hierarchical_task.py +0 -44
- langflow/components/crewai/sequential_crew.py +0 -52
- langflow/components/crewai/sequential_task.py +0 -73
- langflow/components/crewai/sequential_task_agent.py +0 -143
- langflow/components/custom_component/__init__.py +0 -34
- langflow/components/custom_component/custom_component.py +0 -31
- langflow/components/data/__init__.py +0 -25
- langflow/components/data/api_request.py +0 -545
- langflow/components/data/csv_to_data.py +0 -95
- langflow/components/data/directory.py +0 -113
- langflow/components/data/file.py +0 -586
- langflow/components/data/json_to_data.py +0 -98
- langflow/components/data/news_search.py +0 -164
- langflow/components/data/rss.py +0 -69
- langflow/components/data/sql_executor.py +0 -99
- langflow/components/data/url.py +0 -299
- langflow/components/data/web_search.py +0 -112
- langflow/components/data/webhook.py +0 -56
- langflow/components/datastax/__init__.py +0 -70
- langflow/components/datastax/astra_assistant_manager.py +0 -306
- langflow/components/datastax/astra_db.py +0 -69
- langflow/components/datastax/astra_vectorize.py +0 -124
- langflow/components/datastax/astradb_cql.py +0 -314
- langflow/components/datastax/astradb_graph.py +0 -319
- langflow/components/datastax/astradb_tool.py +0 -414
- langflow/components/datastax/astradb_vectorstore.py +0 -1285
- langflow/components/datastax/create_assistant.py +0 -58
- langflow/components/datastax/create_thread.py +0 -32
- langflow/components/datastax/dotenv.py +0 -35
- langflow/components/datastax/get_assistant.py +0 -37
- langflow/components/datastax/getenvvar.py +0 -30
- langflow/components/datastax/graph_rag.py +0 -141
- langflow/components/datastax/hcd.py +0 -314
- langflow/components/datastax/list_assistants.py +0 -25
- langflow/components/datastax/run.py +0 -89
- langflow/components/deactivated/__init__.py +0 -19
- langflow/components/deactivated/amazon_kendra.py +0 -66
- langflow/components/deactivated/chat_litellm_model.py +0 -158
- langflow/components/deactivated/code_block_extractor.py +0 -26
- langflow/components/deactivated/documents_to_data.py +0 -22
- langflow/components/deactivated/embed.py +0 -16
- langflow/components/deactivated/extract_key_from_data.py +0 -46
- langflow/components/deactivated/json_document_builder.py +0 -59
- langflow/components/deactivated/list_flows.py +0 -20
- langflow/components/deactivated/mcp_sse.py +0 -61
- langflow/components/deactivated/mcp_stdio.py +0 -62
- langflow/components/deactivated/merge_data.py +0 -93
- langflow/components/deactivated/message.py +0 -37
- langflow/components/deactivated/metal.py +0 -54
- langflow/components/deactivated/multi_query.py +0 -59
- langflow/components/deactivated/retriever.py +0 -43
- langflow/components/deactivated/selective_passthrough.py +0 -77
- langflow/components/deactivated/should_run_next.py +0 -40
- langflow/components/deactivated/split_text.py +0 -63
- langflow/components/deactivated/store_message.py +0 -24
- langflow/components/deactivated/sub_flow.py +0 -124
- langflow/components/deactivated/vectara_self_query.py +0 -76
- langflow/components/deactivated/vector_store.py +0 -24
- langflow/components/deepseek/__init__.py +0 -34
- langflow/components/deepseek/deepseek.py +0 -136
- langflow/components/docling/__init__.py +0 -43
- langflow/components/docling/chunk_docling_document.py +0 -186
- langflow/components/docling/docling_inline.py +0 -235
- langflow/components/docling/docling_remote.py +0 -193
- langflow/components/docling/export_docling_document.py +0 -117
- langflow/components/documentloaders/__init__.py +0 -0
- langflow/components/duckduckgo/__init__.py +0 -3
- langflow/components/duckduckgo/duck_duck_go_search_run.py +0 -92
- langflow/components/elastic/__init__.py +0 -37
- langflow/components/elastic/elasticsearch.py +0 -267
- langflow/components/elastic/opensearch.py +0 -243
- langflow/components/embeddings/__init__.py +0 -37
- langflow/components/embeddings/similarity.py +0 -76
- langflow/components/embeddings/text_embedder.py +0 -64
- langflow/components/exa/__init__.py +0 -3
- langflow/components/exa/exa_search.py +0 -68
- langflow/components/firecrawl/__init__.py +0 -43
- langflow/components/firecrawl/firecrawl_crawl_api.py +0 -88
- langflow/components/firecrawl/firecrawl_extract_api.py +0 -136
- langflow/components/firecrawl/firecrawl_map_api.py +0 -89
- langflow/components/firecrawl/firecrawl_scrape_api.py +0 -73
- langflow/components/git/__init__.py +0 -4
- langflow/components/git/git.py +0 -262
- langflow/components/git/gitextractor.py +0 -196
- langflow/components/glean/__init__.py +0 -3
- langflow/components/glean/glean_search_api.py +0 -173
- langflow/components/google/__init__.py +0 -17
- langflow/components/google/gmail.py +0 -192
- langflow/components/google/google_bq_sql_executor.py +0 -157
- langflow/components/google/google_drive.py +0 -92
- langflow/components/google/google_drive_search.py +0 -152
- langflow/components/google/google_generative_ai.py +0 -147
- langflow/components/google/google_generative_ai_embeddings.py +0 -141
- langflow/components/google/google_oauth_token.py +0 -89
- langflow/components/google/google_search_api_core.py +0 -68
- langflow/components/google/google_serper_api_core.py +0 -74
- langflow/components/groq/__init__.py +0 -34
- langflow/components/groq/groq.py +0 -140
- langflow/components/helpers/__init__.py +0 -52
- langflow/components/helpers/calculator_core.py +0 -89
- langflow/components/helpers/create_list.py +0 -40
- langflow/components/helpers/current_date.py +0 -42
- langflow/components/helpers/id_generator.py +0 -42
- langflow/components/helpers/memory.py +0 -251
- langflow/components/helpers/output_parser.py +0 -45
- langflow/components/helpers/store_message.py +0 -90
- langflow/components/homeassistant/__init__.py +0 -7
- langflow/components/homeassistant/home_assistant_control.py +0 -152
- langflow/components/homeassistant/list_home_assistant_states.py +0 -137
- langflow/components/huggingface/__init__.py +0 -37
- langflow/components/huggingface/huggingface.py +0 -197
- langflow/components/huggingface/huggingface_inference_api.py +0 -106
- langflow/components/ibm/__init__.py +0 -34
- langflow/components/ibm/watsonx.py +0 -203
- langflow/components/ibm/watsonx_embeddings.py +0 -135
- langflow/components/icosacomputing/__init__.py +0 -5
- langflow/components/icosacomputing/combinatorial_reasoner.py +0 -84
- langflow/components/input_output/__init__.py +0 -38
- langflow/components/input_output/chat.py +0 -120
- langflow/components/input_output/chat_output.py +0 -200
- langflow/components/input_output/text.py +0 -27
- langflow/components/input_output/text_output.py +0 -29
- langflow/components/jigsawstack/__init__.py +0 -23
- langflow/components/jigsawstack/ai_scrape.py +0 -126
- langflow/components/jigsawstack/ai_web_search.py +0 -136
- langflow/components/jigsawstack/file_read.py +0 -115
- langflow/components/jigsawstack/file_upload.py +0 -94
- langflow/components/jigsawstack/image_generation.py +0 -205
- langflow/components/jigsawstack/nsfw.py +0 -60
- langflow/components/jigsawstack/object_detection.py +0 -124
- langflow/components/jigsawstack/sentiment.py +0 -112
- langflow/components/jigsawstack/text_to_sql.py +0 -90
- langflow/components/jigsawstack/text_translate.py +0 -77
- langflow/components/jigsawstack/vocr.py +0 -107
- langflow/components/langchain_utilities/__init__.py +0 -109
- langflow/components/langchain_utilities/character.py +0 -53
- langflow/components/langchain_utilities/conversation.py +0 -52
- langflow/components/langchain_utilities/csv_agent.py +0 -107
- langflow/components/langchain_utilities/fake_embeddings.py +0 -26
- langflow/components/langchain_utilities/html_link_extractor.py +0 -35
- langflow/components/langchain_utilities/json_agent.py +0 -45
- langflow/components/langchain_utilities/langchain_hub.py +0 -126
- langflow/components/langchain_utilities/language_recursive.py +0 -49
- langflow/components/langchain_utilities/language_semantic.py +0 -138
- langflow/components/langchain_utilities/llm_checker.py +0 -39
- langflow/components/langchain_utilities/llm_math.py +0 -42
- langflow/components/langchain_utilities/natural_language.py +0 -61
- langflow/components/langchain_utilities/openai_tools.py +0 -53
- langflow/components/langchain_utilities/openapi.py +0 -48
- langflow/components/langchain_utilities/recursive_character.py +0 -60
- langflow/components/langchain_utilities/retrieval_qa.py +0 -83
- langflow/components/langchain_utilities/runnable_executor.py +0 -137
- langflow/components/langchain_utilities/self_query.py +0 -80
- langflow/components/langchain_utilities/spider.py +0 -142
- langflow/components/langchain_utilities/sql.py +0 -40
- langflow/components/langchain_utilities/sql_database.py +0 -35
- langflow/components/langchain_utilities/sql_generator.py +0 -78
- langflow/components/langchain_utilities/tool_calling.py +0 -59
- langflow/components/langchain_utilities/vector_store_info.py +0 -49
- langflow/components/langchain_utilities/vector_store_router.py +0 -33
- langflow/components/langchain_utilities/xml_agent.py +0 -71
- langflow/components/langwatch/__init__.py +0 -3
- langflow/components/langwatch/langwatch.py +0 -278
- langflow/components/link_extractors/__init__.py +0 -0
- langflow/components/lmstudio/__init__.py +0 -34
- langflow/components/lmstudio/lmstudioembeddings.py +0 -89
- langflow/components/lmstudio/lmstudiomodel.py +0 -129
- langflow/components/logic/__init__.py +0 -52
- langflow/components/logic/conditional_router.py +0 -171
- langflow/components/logic/data_conditional_router.py +0 -125
- langflow/components/logic/flow_tool.py +0 -110
- langflow/components/logic/listen.py +0 -29
- langflow/components/logic/loop.py +0 -125
- langflow/components/logic/notify.py +0 -88
- langflow/components/logic/pass_message.py +0 -35
- langflow/components/logic/run_flow.py +0 -71
- langflow/components/logic/sub_flow.py +0 -114
- langflow/components/maritalk/__init__.py +0 -32
- langflow/components/maritalk/maritalk.py +0 -52
- langflow/components/mem0/__init__.py +0 -3
- langflow/components/mem0/mem0_chat_memory.py +0 -136
- langflow/components/milvus/__init__.py +0 -34
- langflow/components/milvus/milvus.py +0 -115
- langflow/components/mistral/__init__.py +0 -37
- langflow/components/mistral/mistral.py +0 -114
- langflow/components/mistral/mistral_embeddings.py +0 -58
- langflow/components/models/__init__.py +0 -34
- langflow/components/models/embedding_model.py +0 -114
- langflow/components/models/language_model.py +0 -144
- langflow/components/mongodb/__init__.py +0 -34
- langflow/components/mongodb/mongodb_atlas.py +0 -213
- langflow/components/needle/__init__.py +0 -3
- langflow/components/needle/needle.py +0 -104
- langflow/components/notdiamond/__init__.py +0 -36
- langflow/components/notdiamond/notdiamond.py +0 -228
- langflow/components/novita/__init__.py +0 -32
- langflow/components/novita/novita.py +0 -130
- langflow/components/nvidia/__init__.py +0 -57
- langflow/components/nvidia/nvidia.py +0 -157
- langflow/components/nvidia/nvidia_embedding.py +0 -77
- langflow/components/nvidia/nvidia_ingest.py +0 -317
- langflow/components/nvidia/nvidia_rerank.py +0 -63
- langflow/components/nvidia/system_assist.py +0 -65
- langflow/components/olivya/__init__.py +0 -3
- langflow/components/olivya/olivya.py +0 -116
- langflow/components/ollama/__init__.py +0 -37
- langflow/components/ollama/ollama.py +0 -330
- langflow/components/ollama/ollama_embeddings.py +0 -106
- langflow/components/openai/__init__.py +0 -37
- langflow/components/openai/openai.py +0 -100
- langflow/components/openai/openai_chat_model.py +0 -158
- langflow/components/openrouter/__init__.py +0 -32
- langflow/components/openrouter/openrouter.py +0 -202
- langflow/components/output_parsers/__init__.py +0 -0
- langflow/components/perplexity/__init__.py +0 -34
- langflow/components/perplexity/perplexity.py +0 -75
- langflow/components/pgvector/__init__.py +0 -34
- langflow/components/pgvector/pgvector.py +0 -72
- langflow/components/pinecone/__init__.py +0 -34
- langflow/components/pinecone/pinecone.py +0 -134
- langflow/components/processing/alter_metadata.py +0 -108
- langflow/components/processing/batch_run.py +0 -205
- langflow/components/processing/combine_text.py +0 -39
- langflow/components/processing/create_data.py +0 -110
- langflow/components/processing/data_operations.py +0 -438
- langflow/components/processing/data_to_dataframe.py +0 -70
- langflow/components/processing/dataframe_operations.py +0 -321
- langflow/components/processing/extract_key.py +0 -53
- langflow/components/processing/filter_data.py +0 -42
- langflow/components/processing/filter_data_values.py +0 -88
- langflow/components/processing/json_cleaner.py +0 -103
- langflow/components/processing/lambda_filter.py +0 -154
- langflow/components/processing/llm_router.py +0 -499
- langflow/components/processing/merge_data.py +0 -90
- langflow/components/processing/message_to_data.py +0 -36
- langflow/components/processing/parse_data.py +0 -70
- langflow/components/processing/parse_dataframe.py +0 -68
- langflow/components/processing/parse_json_data.py +0 -90
- langflow/components/processing/parser.py +0 -143
- langflow/components/processing/prompt.py +0 -67
- langflow/components/processing/python_repl_core.py +0 -98
- langflow/components/processing/regex.py +0 -82
- langflow/components/processing/save_file.py +0 -208
- langflow/components/processing/select_data.py +0 -48
- langflow/components/processing/split_text.py +0 -141
- langflow/components/processing/structured_output.py +0 -202
- langflow/components/processing/update_data.py +0 -160
- langflow/components/prototypes/__init__.py +0 -34
- langflow/components/prototypes/python_function.py +0 -73
- langflow/components/qdrant/__init__.py +0 -34
- langflow/components/qdrant/qdrant.py +0 -109
- langflow/components/redis/__init__.py +0 -37
- langflow/components/redis/redis.py +0 -89
- langflow/components/redis/redis_chat.py +0 -43
- langflow/components/sambanova/__init__.py +0 -32
- langflow/components/sambanova/sambanova.py +0 -84
- langflow/components/scrapegraph/__init__.py +0 -40
- langflow/components/scrapegraph/scrapegraph_markdownify_api.py +0 -64
- langflow/components/scrapegraph/scrapegraph_search_api.py +0 -64
- langflow/components/scrapegraph/scrapegraph_smart_scraper_api.py +0 -71
- langflow/components/searchapi/__init__.py +0 -36
- langflow/components/searchapi/search.py +0 -79
- langflow/components/serpapi/__init__.py +0 -3
- langflow/components/serpapi/serp.py +0 -115
- langflow/components/serper/__init__.py +0 -3
- langflow/components/serper/google_serper_api_core.py +0 -74
- langflow/components/supabase/__init__.py +0 -37
- langflow/components/supabase/supabase.py +0 -76
- langflow/components/tavily/__init__.py +0 -4
- langflow/components/tavily/tavily_extract.py +0 -117
- langflow/components/tavily/tavily_search.py +0 -212
- langflow/components/textsplitters/__init__.py +0 -0
- langflow/components/toolkits/__init__.py +0 -0
- langflow/components/tools/__init__.py +0 -72
- langflow/components/tools/calculator.py +0 -103
- langflow/components/tools/google_search_api.py +0 -45
- langflow/components/tools/google_serper_api.py +0 -115
- langflow/components/tools/python_code_structured_tool.py +0 -327
- langflow/components/tools/python_repl.py +0 -97
- langflow/components/tools/search_api.py +0 -87
- langflow/components/tools/searxng.py +0 -145
- langflow/components/tools/serp_api.py +0 -119
- langflow/components/tools/tavily_search_tool.py +0 -344
- langflow/components/tools/wikidata_api.py +0 -102
- langflow/components/tools/wikipedia_api.py +0 -49
- langflow/components/tools/yahoo_finance.py +0 -124
- langflow/components/twelvelabs/__init__.py +0 -52
- langflow/components/twelvelabs/convert_astra_results.py +0 -84
- langflow/components/twelvelabs/pegasus_index.py +0 -311
- langflow/components/twelvelabs/split_video.py +0 -291
- langflow/components/twelvelabs/text_embeddings.py +0 -57
- langflow/components/twelvelabs/twelvelabs_pegasus.py +0 -408
- langflow/components/twelvelabs/video_embeddings.py +0 -100
- langflow/components/twelvelabs/video_file.py +0 -179
- langflow/components/unstructured/__init__.py +0 -3
- langflow/components/unstructured/unstructured.py +0 -121
- langflow/components/upstash/__init__.py +0 -34
- langflow/components/upstash/upstash.py +0 -124
- langflow/components/vectara/__init__.py +0 -37
- langflow/components/vectara/vectara.py +0 -97
- langflow/components/vectara/vectara_rag.py +0 -164
- langflow/components/vectorstores/__init__.py +0 -34
- langflow/components/vectorstores/local_db.py +0 -261
- langflow/components/vertexai/__init__.py +0 -37
- langflow/components/vertexai/vertexai.py +0 -71
- langflow/components/vertexai/vertexai_embeddings.py +0 -67
- langflow/components/weaviate/__init__.py +0 -34
- langflow/components/weaviate/weaviate.py +0 -89
- langflow/components/wikipedia/__init__.py +0 -4
- langflow/components/wikipedia/wikidata.py +0 -86
- langflow/components/wikipedia/wikipedia.py +0 -53
- langflow/components/wolframalpha/__init__.py +0 -3
- langflow/components/wolframalpha/wolfram_alpha_api.py +0 -54
- langflow/components/xai/__init__.py +0 -32
- langflow/components/xai/xai.py +0 -167
- langflow/components/yahoosearch/__init__.py +0 -3
- langflow/components/yahoosearch/yahoo.py +0 -137
- langflow/components/youtube/__init__.py +0 -52
- langflow/components/youtube/channel.py +0 -227
- langflow/components/youtube/comments.py +0 -231
- langflow/components/youtube/playlist.py +0 -33
- langflow/components/youtube/search.py +0 -120
- langflow/components/youtube/trending.py +0 -285
- langflow/components/youtube/video_details.py +0 -263
- langflow/components/youtube/youtube_transcripts.py +0 -118
- langflow/components/zep/__init__.py +0 -3
- langflow/components/zep/zep.py +0 -44
- langflow/custom/attributes.py +0 -86
- langflow/custom/code_parser/__init__.py +0 -3
- langflow/custom/code_parser/code_parser.py +0 -361
- langflow/custom/custom_component/base_component.py +0 -118
- langflow/custom/dependency_analyzer.py +0 -165
- langflow/custom/directory_reader/__init__.py +0 -3
- langflow/custom/directory_reader/directory_reader.py +0 -359
- langflow/custom/directory_reader/utils.py +0 -171
- langflow/custom/eval.py +0 -12
- langflow/custom/schema.py +0 -32
- langflow/custom/tree_visitor.py +0 -21
- langflow/frontend/assets/lazyIconImports-Ci-S9xBA.js +0 -2
- langflow/graph/edge/__init__.py +0 -0
- langflow/graph/edge/base.py +0 -277
- langflow/graph/edge/schema.py +0 -119
- langflow/graph/edge/utils.py +0 -0
- langflow/graph/graph/__init__.py +0 -0
- langflow/graph/graph/ascii.py +0 -202
- langflow/graph/graph/base.py +0 -2185
- langflow/graph/graph/constants.py +0 -58
- langflow/graph/graph/runnable_vertices_manager.py +0 -133
- langflow/graph/graph/schema.py +0 -53
- langflow/graph/graph/state_model.py +0 -66
- langflow/graph/graph/utils.py +0 -1024
- langflow/graph/schema.py +0 -75
- langflow/graph/state/__init__.py +0 -0
- langflow/graph/state/model.py +0 -237
- langflow/graph/utils.py +0 -229
- langflow/graph/vertex/__init__.py +0 -0
- langflow/graph/vertex/base.py +0 -811
- langflow/graph/vertex/constants.py +0 -0
- langflow/graph/vertex/exceptions.py +0 -4
- langflow/graph/vertex/param_handler.py +0 -255
- langflow/graph/vertex/schema.py +0 -26
- langflow/graph/vertex/utils.py +0 -19
- langflow/graph/vertex/vertex_types.py +0 -489
- langflow/legacy_custom/__init__.py +0 -0
- langflow/legacy_custom/customs.py +0 -16
- langflow/load/load.py +0 -250
- langflow/logging/logger.py +0 -369
- langflow/processing/utils.py +0 -25
- langflow/schema/openai_responses_schemas.py +0 -74
- langflow/schema/serialize.py +0 -13
- langflow/services/chat/config.py +0 -2
- langflow/services/settings/auth.py +0 -130
- langflow/services/settings/constants.py +0 -31
- langflow/services/settings/manager.py +0 -49
- langflow/services/settings/utils.py +0 -40
- langflow/template/field/prompt.py +0 -2
- langflow/template/frontend_node/__init__.py +0 -6
- langflow/template/frontend_node/base.py +0 -212
- langflow/template/frontend_node/constants.py +0 -65
- langflow/template/frontend_node/custom_components.py +0 -97
- langflow/template/template/__init__.py +0 -0
- langflow/template/template/base.py +0 -99
- langflow/utils/async_helpers.py +0 -42
- langflow/utils/concurrency.py +0 -60
- langflow/utils/util_strings.py +0 -56
- langflow_base_nightly-0.5.1.dev3.dist-info/RECORD +0 -1159
- {langflow_base_nightly-0.5.1.dev3.dist-info → langflow_base_nightly-0.5.1.dev5.dist-info}/WHEEL +0 -0
- {langflow_base_nightly-0.5.1.dev3.dist-info → langflow_base_nightly-0.5.1.dev5.dist-info}/entry_points.txt +0 -0
|
@@ -144,7 +144,7 @@
|
|
|
144
144
|
"legacy": false,
|
|
145
145
|
"lf_version": "1.4.2",
|
|
146
146
|
"metadata": {
|
|
147
|
-
"code_hash": "
|
|
147
|
+
"code_hash": "3bc6aee68a53",
|
|
148
148
|
"dependencies": {
|
|
149
149
|
"dependencies": [
|
|
150
150
|
{
|
|
@@ -164,13 +164,13 @@
|
|
|
164
164
|
"version": "2.10.6"
|
|
165
165
|
},
|
|
166
166
|
{
|
|
167
|
-
"name": "
|
|
167
|
+
"name": "lfx",
|
|
168
168
|
"version": null
|
|
169
169
|
}
|
|
170
170
|
],
|
|
171
171
|
"total_dependencies": 5
|
|
172
172
|
},
|
|
173
|
-
"module": "
|
|
173
|
+
"module": "lfx.components.apify.apify_actor.ApifyActorsComponent"
|
|
174
174
|
},
|
|
175
175
|
"minimized": false,
|
|
176
176
|
"output_types": [],
|
|
@@ -260,7 +260,7 @@
|
|
|
260
260
|
"show": true,
|
|
261
261
|
"title_case": false,
|
|
262
262
|
"type": "code",
|
|
263
|
-
"value": "import json\nimport string\nfrom typing import Any, cast\n\nfrom apify_client import ApifyClient\nfrom langchain_community.document_loaders.apify_dataset import ApifyDatasetLoader\nfrom langchain_core.tools import BaseTool\nfrom pydantic import BaseModel, Field, field_serializer\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing import Tool\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import MultilineInput, Output, SecretStrInput, StrInput\nfrom langflow.schema.data import Data\n\nMAX_DESCRIPTION_LEN = 250\n\n\nclass ApifyActorsComponent(Component):\n display_name = \"Apify Actors\"\n description = (\n \"Use Apify Actors to extract data from hundreds of places fast. \"\n \"This component can be used in a flow to retrieve data or as a tool with an agent.\"\n )\n documentation: str = \"http://docs.langflow.org/integrations-apify\"\n icon = \"Apify\"\n name = \"ApifyActors\"\n\n inputs = [\n SecretStrInput(\n name=\"apify_token\",\n display_name=\"Apify Token\",\n info=\"The API token for the Apify account.\",\n required=True,\n password=True,\n ),\n StrInput(\n name=\"actor_id\",\n display_name=\"Actor\",\n info=(\n \"Actor name from Apify store to run. For example 'apify/website-content-crawler' \"\n \"to use the Website Content Crawler Actor.\"\n ),\n value=\"apify/website-content-crawler\",\n required=True,\n ),\n # multiline input is more pleasant to use than the nested dict input\n MultilineInput(\n name=\"run_input\",\n display_name=\"Run input\",\n info=(\n 'The JSON input for the Actor run. For example for the \"apify/website-content-crawler\" Actor: '\n '{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}'\n ),\n value='{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}',\n required=True,\n ),\n MultilineInput(\n name=\"dataset_fields\",\n display_name=\"Output fields\",\n info=(\n \"Fields to extract from the dataset, split by commas. \"\n \"Other fields will be ignored. Dots in nested structures will be replaced by underscores. \"\n \"Sample input: 'text, metadata.title'. \"\n \"Sample output: {'text': 'page content here', 'metadata_title': 'page title here'}. \"\n \"For example, for the 'apify/website-content-crawler' Actor, you can extract the 'markdown' field, \"\n \"which is the content of the website in markdown format.\"\n ),\n ),\n BoolInput(\n name=\"flatten_dataset\",\n display_name=\"Flatten output\",\n info=(\n \"The output dataset will be converted from a nested format to a flat structure. \"\n \"Dots in nested structure will be replaced by underscores. \"\n \"This is useful for further processing of the Data object. \"\n \"For example, {'a': {'b': 1}} will be flattened to {'a_b': 1}.\"\n ),\n ),\n ]\n\n outputs = [\n Output(display_name=\"Output\", name=\"output\", type_=list[Data], method=\"run_model\"),\n Output(display_name=\"Tool\", name=\"tool\", type_=Tool, method=\"build_tool\"),\n ]\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._apify_client: ApifyClient | None = None\n\n def run_model(self) -> list[Data]:\n \"\"\"Run the Actor and return node output.\"\"\"\n input_ = json.loads(self.run_input)\n fields = ApifyActorsComponent.parse_dataset_fields(self.dataset_fields) if self.dataset_fields else None\n res = self._run_actor(self.actor_id, input_, fields=fields)\n if self.flatten_dataset:\n res = [ApifyActorsComponent.flatten(item) for item in res]\n data = [Data(data=item) for item in res]\n\n self.status = data\n return data\n\n def build_tool(self) -> Tool:\n \"\"\"Build a tool for an agent that runs the Apify Actor.\"\"\"\n actor_id = self.actor_id\n\n build = self._get_actor_latest_build(actor_id)\n readme = build.get(\"readme\", \"\")[:250] + \"...\"\n if not (input_schema_str := build.get(\"inputSchema\")):\n msg = \"Input schema not found\"\n raise ValueError(msg)\n input_schema = json.loads(input_schema_str)\n properties, required = ApifyActorsComponent.get_actor_input_schema_from_build(input_schema)\n properties = {\"run_input\": properties}\n\n # works from input schema\n info_ = [\n (\n \"JSON encoded as a string with input schema (STRICTLY FOLLOW JSON FORMAT AND SCHEMA):\\n\\n\"\n f\"{json.dumps(properties, separators=(',', ':'))}\"\n )\n ]\n if required:\n info_.append(\"\\n\\nRequired fields:\\n\" + \"\\n\".join(required))\n\n info = \"\".join(info_)\n\n input_model_cls = ApifyActorsComponent.create_input_model_class(info)\n tool_cls = ApifyActorsComponent.create_tool_class(self, readme, input_model_cls, actor_id)\n\n return cast(\"Tool\", tool_cls())\n\n @staticmethod\n def create_tool_class(\n parent: \"ApifyActorsComponent\", readme: str, input_model: type[BaseModel], actor_id: str\n ) -> type[BaseTool]:\n \"\"\"Create a tool class that runs an Apify Actor.\"\"\"\n\n class ApifyActorRun(BaseTool):\n \"\"\"Tool that runs Apify Actors.\"\"\"\n\n name: str = f\"apify_actor_{ApifyActorsComponent.actor_id_to_tool_name(actor_id)}\"\n description: str = (\n \"Run an Apify Actor with the given input. \"\n \"Here is a part of the currently loaded Actor README:\\n\\n\"\n f\"{readme}\\n\\n\"\n )\n\n args_schema: type[BaseModel] = input_model\n\n @field_serializer(\"args_schema\")\n def serialize_args_schema(self, args_schema):\n return args_schema.schema()\n\n def _run(self, run_input: str | dict) -> str:\n \"\"\"Use the Apify Actor.\"\"\"\n input_dict = json.loads(run_input) if isinstance(run_input, str) else run_input\n\n # retrieve if nested, just in case\n input_dict = input_dict.get(\"run_input\", input_dict)\n\n res = parent._run_actor(actor_id, input_dict)\n return \"\\n\\n\".join([ApifyActorsComponent.dict_to_json_str(item) for item in res])\n\n return ApifyActorRun\n\n @staticmethod\n def create_input_model_class(description: str) -> type[BaseModel]:\n \"\"\"Create a Pydantic model class for the Actor input.\"\"\"\n\n class ActorInput(BaseModel):\n \"\"\"Input for the Apify Actor tool.\"\"\"\n\n run_input: str = Field(..., description=description)\n\n return ActorInput\n\n def _get_apify_client(self) -> ApifyClient:\n \"\"\"Get the Apify client.\n\n Is created if not exists or token changes.\n \"\"\"\n if not self.apify_token:\n msg = \"API token is required.\"\n raise ValueError(msg)\n # when token changes, create a new client\n if self._apify_client is None or self._apify_client.token != self.apify_token:\n self._apify_client = ApifyClient(self.apify_token)\n if httpx_client := self._apify_client.http_client.httpx_client:\n httpx_client.headers[\"user-agent\"] += \"; Origin/langflow\"\n return self._apify_client\n\n def _get_actor_latest_build(self, actor_id: str) -> dict:\n \"\"\"Get the latest build of an Actor from the default build tag.\"\"\"\n client = self._get_apify_client()\n actor = client.actor(actor_id=actor_id)\n if not (actor_info := actor.get()):\n msg = f\"Actor {actor_id} not found.\"\n raise ValueError(msg)\n\n default_build_tag = actor_info.get(\"defaultRunOptions\", {}).get(\"build\")\n latest_build_id = actor_info.get(\"taggedBuilds\", {}).get(default_build_tag, {}).get(\"buildId\")\n\n if (build := client.build(latest_build_id).get()) is None:\n msg = f\"Build {latest_build_id} not found.\"\n raise ValueError(msg)\n\n return build\n\n @staticmethod\n def get_actor_input_schema_from_build(input_schema: dict) -> tuple[dict, list[str]]:\n \"\"\"Get the input schema from the Actor build.\n\n Trim the description to 250 characters.\n \"\"\"\n properties = input_schema.get(\"properties\", {})\n required = input_schema.get(\"required\", [])\n\n properties_out: dict = {}\n for item, meta in properties.items():\n properties_out[item] = {}\n if desc := meta.get(\"description\"):\n properties_out[item][\"description\"] = (\n desc[:MAX_DESCRIPTION_LEN] + \"...\" if len(desc) > MAX_DESCRIPTION_LEN else desc\n )\n for key_name in (\"type\", \"default\", \"prefill\", \"enum\"):\n if value := meta.get(key_name):\n properties_out[item][key_name] = value\n\n return properties_out, required\n\n def _get_run_dataset_id(self, run_id: str) -> str:\n \"\"\"Get the dataset id from the run id.\"\"\"\n client = self._get_apify_client()\n run = client.run(run_id=run_id)\n if (dataset := run.dataset().get()) is None:\n msg = \"Dataset not found\"\n raise ValueError(msg)\n if (did := dataset.get(\"id\")) is None:\n msg = \"Dataset id not found\"\n raise ValueError(msg)\n return did\n\n @staticmethod\n def dict_to_json_str(d: dict) -> str:\n \"\"\"Convert a dictionary to a JSON string.\"\"\"\n return json.dumps(d, separators=(\",\", \":\"), default=lambda _: \"<n/a>\")\n\n @staticmethod\n def actor_id_to_tool_name(actor_id: str) -> str:\n \"\"\"Turn actor_id into a valid tool name.\n\n Tool name must only contain letters, numbers, underscores, dashes,\n and cannot contain spaces.\n \"\"\"\n valid_chars = string.ascii_letters + string.digits + \"_-\"\n return \"\".join(char if char in valid_chars else \"_\" for char in actor_id)\n\n def _run_actor(self, actor_id: str, run_input: dict, fields: list[str] | None = None) -> list[dict]:\n \"\"\"Run an Apify Actor and return the output dataset.\n\n Args:\n actor_id: Actor name from Apify store to run.\n run_input: JSON input for the Actor.\n fields: List of fields to extract from the dataset. Other fields will be ignored.\n \"\"\"\n client = self._get_apify_client()\n if (details := client.actor(actor_id=actor_id).call(run_input=run_input, wait_secs=1)) is None:\n msg = \"Actor run details not found\"\n raise ValueError(msg)\n if (run_id := details.get(\"id\")) is None:\n msg = \"Run id not found\"\n raise ValueError(msg)\n\n if (run_client := client.run(run_id)) is None:\n msg = \"Run client not found\"\n raise ValueError(msg)\n\n # stream logs\n with run_client.log().stream() as response:\n if response:\n for line in response.iter_lines():\n self.log(line)\n run_client.wait_for_finish()\n\n dataset_id = self._get_run_dataset_id(run_id)\n\n loader = ApifyDatasetLoader(\n dataset_id=dataset_id,\n dataset_mapping_function=lambda item: item\n if not fields\n else {k.replace(\".\", \"_\"): ApifyActorsComponent.get_nested_value(item, k) for k in fields},\n )\n return loader.load()\n\n @staticmethod\n def get_nested_value(data: dict[str, Any], key: str) -> Any:\n \"\"\"Get a nested value from a dictionary.\"\"\"\n keys = key.split(\".\")\n value = data\n for k in keys:\n if not isinstance(value, dict) or k not in value:\n return None\n value = value[k]\n return value\n\n @staticmethod\n def parse_dataset_fields(dataset_fields: str) -> list[str]:\n \"\"\"Convert a string of comma-separated fields into a list of fields.\"\"\"\n dataset_fields = dataset_fields.replace(\"'\", \"\").replace('\"', \"\").replace(\"`\", \"\")\n return [field.strip() for field in dataset_fields.split(\",\")]\n\n @staticmethod\n def flatten(d: dict) -> dict:\n \"\"\"Flatten a nested dictionary.\"\"\"\n\n def items():\n for key, value in d.items():\n if isinstance(value, dict):\n for subkey, subvalue in ApifyActorsComponent.flatten(value).items():\n yield key + \"_\" + subkey, subvalue\n else:\n yield key, value\n\n return dict(items())\n"
|
|
263
|
+
"value": "import json\nimport string\nfrom typing import Any, cast\n\nfrom apify_client import ApifyClient\nfrom langchain_community.document_loaders.apify_dataset import ApifyDatasetLoader\nfrom langchain_core.tools import BaseTool\nfrom pydantic import BaseModel, Field, field_serializer\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.field_typing import Tool\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import MultilineInput, Output, SecretStrInput, StrInput\nfrom lfx.schema.data import Data\n\nMAX_DESCRIPTION_LEN = 250\n\n\nclass ApifyActorsComponent(Component):\n display_name = \"Apify Actors\"\n description = (\n \"Use Apify Actors to extract data from hundreds of places fast. \"\n \"This component can be used in a flow to retrieve data or as a tool with an agent.\"\n )\n documentation: str = \"http://docs.langflow.org/integrations-apify\"\n icon = \"Apify\"\n name = \"ApifyActors\"\n\n inputs = [\n SecretStrInput(\n name=\"apify_token\",\n display_name=\"Apify Token\",\n info=\"The API token for the Apify account.\",\n required=True,\n password=True,\n ),\n StrInput(\n name=\"actor_id\",\n display_name=\"Actor\",\n info=(\n \"Actor name from Apify store to run. For example 'apify/website-content-crawler' \"\n \"to use the Website Content Crawler Actor.\"\n ),\n value=\"apify/website-content-crawler\",\n required=True,\n ),\n # multiline input is more pleasant to use than the nested dict input\n MultilineInput(\n name=\"run_input\",\n display_name=\"Run input\",\n info=(\n 'The JSON input for the Actor run. For example for the \"apify/website-content-crawler\" Actor: '\n '{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}'\n ),\n value='{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}',\n required=True,\n ),\n MultilineInput(\n name=\"dataset_fields\",\n display_name=\"Output fields\",\n info=(\n \"Fields to extract from the dataset, split by commas. \"\n \"Other fields will be ignored. Dots in nested structures will be replaced by underscores. \"\n \"Sample input: 'text, metadata.title'. \"\n \"Sample output: {'text': 'page content here', 'metadata_title': 'page title here'}. \"\n \"For example, for the 'apify/website-content-crawler' Actor, you can extract the 'markdown' field, \"\n \"which is the content of the website in markdown format.\"\n ),\n ),\n BoolInput(\n name=\"flatten_dataset\",\n display_name=\"Flatten output\",\n info=(\n \"The output dataset will be converted from a nested format to a flat structure. \"\n \"Dots in nested structure will be replaced by underscores. \"\n \"This is useful for further processing of the Data object. \"\n \"For example, {'a': {'b': 1}} will be flattened to {'a_b': 1}.\"\n ),\n ),\n ]\n\n outputs = [\n Output(display_name=\"Output\", name=\"output\", type_=list[Data], method=\"run_model\"),\n Output(display_name=\"Tool\", name=\"tool\", type_=Tool, method=\"build_tool\"),\n ]\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._apify_client: ApifyClient | None = None\n\n def run_model(self) -> list[Data]:\n \"\"\"Run the Actor and return node output.\"\"\"\n input_ = json.loads(self.run_input)\n fields = ApifyActorsComponent.parse_dataset_fields(self.dataset_fields) if self.dataset_fields else None\n res = self._run_actor(self.actor_id, input_, fields=fields)\n if self.flatten_dataset:\n res = [ApifyActorsComponent.flatten(item) for item in res]\n data = [Data(data=item) for item in res]\n\n self.status = data\n return data\n\n def build_tool(self) -> Tool:\n \"\"\"Build a tool for an agent that runs the Apify Actor.\"\"\"\n actor_id = self.actor_id\n\n build = self._get_actor_latest_build(actor_id)\n readme = build.get(\"readme\", \"\")[:250] + \"...\"\n if not (input_schema_str := build.get(\"inputSchema\")):\n msg = \"Input schema not found\"\n raise ValueError(msg)\n input_schema = json.loads(input_schema_str)\n properties, required = ApifyActorsComponent.get_actor_input_schema_from_build(input_schema)\n properties = {\"run_input\": properties}\n\n # works from input schema\n info_ = [\n (\n \"JSON encoded as a string with input schema (STRICTLY FOLLOW JSON FORMAT AND SCHEMA):\\n\\n\"\n f\"{json.dumps(properties, separators=(',', ':'))}\"\n )\n ]\n if required:\n info_.append(\"\\n\\nRequired fields:\\n\" + \"\\n\".join(required))\n\n info = \"\".join(info_)\n\n input_model_cls = ApifyActorsComponent.create_input_model_class(info)\n tool_cls = ApifyActorsComponent.create_tool_class(self, readme, input_model_cls, actor_id)\n\n return cast(\"Tool\", tool_cls())\n\n @staticmethod\n def create_tool_class(\n parent: \"ApifyActorsComponent\", readme: str, input_model: type[BaseModel], actor_id: str\n ) -> type[BaseTool]:\n \"\"\"Create a tool class that runs an Apify Actor.\"\"\"\n\n class ApifyActorRun(BaseTool):\n \"\"\"Tool that runs Apify Actors.\"\"\"\n\n name: str = f\"apify_actor_{ApifyActorsComponent.actor_id_to_tool_name(actor_id)}\"\n description: str = (\n \"Run an Apify Actor with the given input. \"\n \"Here is a part of the currently loaded Actor README:\\n\\n\"\n f\"{readme}\\n\\n\"\n )\n\n args_schema: type[BaseModel] = input_model\n\n @field_serializer(\"args_schema\")\n def serialize_args_schema(self, args_schema):\n return args_schema.schema()\n\n def _run(self, run_input: str | dict) -> str:\n \"\"\"Use the Apify Actor.\"\"\"\n input_dict = json.loads(run_input) if isinstance(run_input, str) else run_input\n\n # retrieve if nested, just in case\n input_dict = input_dict.get(\"run_input\", input_dict)\n\n res = parent._run_actor(actor_id, input_dict)\n return \"\\n\\n\".join([ApifyActorsComponent.dict_to_json_str(item) for item in res])\n\n return ApifyActorRun\n\n @staticmethod\n def create_input_model_class(description: str) -> type[BaseModel]:\n \"\"\"Create a Pydantic model class for the Actor input.\"\"\"\n\n class ActorInput(BaseModel):\n \"\"\"Input for the Apify Actor tool.\"\"\"\n\n run_input: str = Field(..., description=description)\n\n return ActorInput\n\n def _get_apify_client(self) -> ApifyClient:\n \"\"\"Get the Apify client.\n\n Is created if not exists or token changes.\n \"\"\"\n if not self.apify_token:\n msg = \"API token is required.\"\n raise ValueError(msg)\n # when token changes, create a new client\n if self._apify_client is None or self._apify_client.token != self.apify_token:\n self._apify_client = ApifyClient(self.apify_token)\n if httpx_client := self._apify_client.http_client.httpx_client:\n httpx_client.headers[\"user-agent\"] += \"; Origin/langflow\"\n return self._apify_client\n\n def _get_actor_latest_build(self, actor_id: str) -> dict:\n \"\"\"Get the latest build of an Actor from the default build tag.\"\"\"\n client = self._get_apify_client()\n actor = client.actor(actor_id=actor_id)\n if not (actor_info := actor.get()):\n msg = f\"Actor {actor_id} not found.\"\n raise ValueError(msg)\n\n default_build_tag = actor_info.get(\"defaultRunOptions\", {}).get(\"build\")\n latest_build_id = actor_info.get(\"taggedBuilds\", {}).get(default_build_tag, {}).get(\"buildId\")\n\n if (build := client.build(latest_build_id).get()) is None:\n msg = f\"Build {latest_build_id} not found.\"\n raise ValueError(msg)\n\n return build\n\n @staticmethod\n def get_actor_input_schema_from_build(input_schema: dict) -> tuple[dict, list[str]]:\n \"\"\"Get the input schema from the Actor build.\n\n Trim the description to 250 characters.\n \"\"\"\n properties = input_schema.get(\"properties\", {})\n required = input_schema.get(\"required\", [])\n\n properties_out: dict = {}\n for item, meta in properties.items():\n properties_out[item] = {}\n if desc := meta.get(\"description\"):\n properties_out[item][\"description\"] = (\n desc[:MAX_DESCRIPTION_LEN] + \"...\" if len(desc) > MAX_DESCRIPTION_LEN else desc\n )\n for key_name in (\"type\", \"default\", \"prefill\", \"enum\"):\n if value := meta.get(key_name):\n properties_out[item][key_name] = value\n\n return properties_out, required\n\n def _get_run_dataset_id(self, run_id: str) -> str:\n \"\"\"Get the dataset id from the run id.\"\"\"\n client = self._get_apify_client()\n run = client.run(run_id=run_id)\n if (dataset := run.dataset().get()) is None:\n msg = \"Dataset not found\"\n raise ValueError(msg)\n if (did := dataset.get(\"id\")) is None:\n msg = \"Dataset id not found\"\n raise ValueError(msg)\n return did\n\n @staticmethod\n def dict_to_json_str(d: dict) -> str:\n \"\"\"Convert a dictionary to a JSON string.\"\"\"\n return json.dumps(d, separators=(\",\", \":\"), default=lambda _: \"<n/a>\")\n\n @staticmethod\n def actor_id_to_tool_name(actor_id: str) -> str:\n \"\"\"Turn actor_id into a valid tool name.\n\n Tool name must only contain letters, numbers, underscores, dashes,\n and cannot contain spaces.\n \"\"\"\n valid_chars = string.ascii_letters + string.digits + \"_-\"\n return \"\".join(char if char in valid_chars else \"_\" for char in actor_id)\n\n def _run_actor(self, actor_id: str, run_input: dict, fields: list[str] | None = None) -> list[dict]:\n \"\"\"Run an Apify Actor and return the output dataset.\n\n Args:\n actor_id: Actor name from Apify store to run.\n run_input: JSON input for the Actor.\n fields: List of fields to extract from the dataset. Other fields will be ignored.\n \"\"\"\n client = self._get_apify_client()\n if (details := client.actor(actor_id=actor_id).call(run_input=run_input, wait_secs=1)) is None:\n msg = \"Actor run details not found\"\n raise ValueError(msg)\n if (run_id := details.get(\"id\")) is None:\n msg = \"Run id not found\"\n raise ValueError(msg)\n\n if (run_client := client.run(run_id)) is None:\n msg = \"Run client not found\"\n raise ValueError(msg)\n\n # stream logs\n with run_client.log().stream() as response:\n if response:\n for line in response.iter_lines():\n self.log(line)\n run_client.wait_for_finish()\n\n dataset_id = self._get_run_dataset_id(run_id)\n\n loader = ApifyDatasetLoader(\n dataset_id=dataset_id,\n dataset_mapping_function=lambda item: item\n if not fields\n else {k.replace(\".\", \"_\"): ApifyActorsComponent.get_nested_value(item, k) for k in fields},\n )\n return loader.load()\n\n @staticmethod\n def get_nested_value(data: dict[str, Any], key: str) -> Any:\n \"\"\"Get a nested value from a dictionary.\"\"\"\n keys = key.split(\".\")\n value = data\n for k in keys:\n if not isinstance(value, dict) or k not in value:\n return None\n value = value[k]\n return value\n\n @staticmethod\n def parse_dataset_fields(dataset_fields: str) -> list[str]:\n \"\"\"Convert a string of comma-separated fields into a list of fields.\"\"\"\n dataset_fields = dataset_fields.replace(\"'\", \"\").replace('\"', \"\").replace(\"`\", \"\")\n return [field.strip() for field in dataset_fields.split(\",\")]\n\n @staticmethod\n def flatten(d: dict) -> dict:\n \"\"\"Flatten a nested dictionary.\"\"\"\n\n def items():\n for key, value in d.items():\n if isinstance(value, dict):\n for subkey, subvalue in ApifyActorsComponent.flatten(value).items():\n yield key + \"_\" + subkey, subvalue\n else:\n yield key, value\n\n return dict(items())\n"
|
|
264
264
|
},
|
|
265
265
|
"dataset_fields": {
|
|
266
266
|
"_input_type": "MultilineInput",
|
|
@@ -375,7 +375,7 @@
|
|
|
375
375
|
"legacy": false,
|
|
376
376
|
"lf_version": "1.4.2",
|
|
377
377
|
"metadata": {
|
|
378
|
-
"code_hash": "
|
|
378
|
+
"code_hash": "3bc6aee68a53",
|
|
379
379
|
"dependencies": {
|
|
380
380
|
"dependencies": [
|
|
381
381
|
{
|
|
@@ -395,13 +395,13 @@
|
|
|
395
395
|
"version": "2.10.6"
|
|
396
396
|
},
|
|
397
397
|
{
|
|
398
|
-
"name": "
|
|
398
|
+
"name": "lfx",
|
|
399
399
|
"version": null
|
|
400
400
|
}
|
|
401
401
|
],
|
|
402
402
|
"total_dependencies": 5
|
|
403
403
|
},
|
|
404
|
-
"module": "
|
|
404
|
+
"module": "lfx.components.apify.apify_actor.ApifyActorsComponent"
|
|
405
405
|
},
|
|
406
406
|
"minimized": false,
|
|
407
407
|
"output_types": [],
|
|
@@ -491,7 +491,7 @@
|
|
|
491
491
|
"show": true,
|
|
492
492
|
"title_case": false,
|
|
493
493
|
"type": "code",
|
|
494
|
-
"value": "import json\nimport string\nfrom typing import Any, cast\n\nfrom apify_client import ApifyClient\nfrom langchain_community.document_loaders.apify_dataset import ApifyDatasetLoader\nfrom langchain_core.tools import BaseTool\nfrom pydantic import BaseModel, Field, field_serializer\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing import Tool\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import MultilineInput, Output, SecretStrInput, StrInput\nfrom langflow.schema.data import Data\n\nMAX_DESCRIPTION_LEN = 250\n\n\nclass ApifyActorsComponent(Component):\n display_name = \"Apify Actors\"\n description = (\n \"Use Apify Actors to extract data from hundreds of places fast. \"\n \"This component can be used in a flow to retrieve data or as a tool with an agent.\"\n )\n documentation: str = \"http://docs.langflow.org/integrations-apify\"\n icon = \"Apify\"\n name = \"ApifyActors\"\n\n inputs = [\n SecretStrInput(\n name=\"apify_token\",\n display_name=\"Apify Token\",\n info=\"The API token for the Apify account.\",\n required=True,\n password=True,\n ),\n StrInput(\n name=\"actor_id\",\n display_name=\"Actor\",\n info=(\n \"Actor name from Apify store to run. For example 'apify/website-content-crawler' \"\n \"to use the Website Content Crawler Actor.\"\n ),\n value=\"apify/website-content-crawler\",\n required=True,\n ),\n # multiline input is more pleasant to use than the nested dict input\n MultilineInput(\n name=\"run_input\",\n display_name=\"Run input\",\n info=(\n 'The JSON input for the Actor run. For example for the \"apify/website-content-crawler\" Actor: '\n '{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}'\n ),\n value='{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}',\n required=True,\n ),\n MultilineInput(\n name=\"dataset_fields\",\n display_name=\"Output fields\",\n info=(\n \"Fields to extract from the dataset, split by commas. \"\n \"Other fields will be ignored. Dots in nested structures will be replaced by underscores. \"\n \"Sample input: 'text, metadata.title'. \"\n \"Sample output: {'text': 'page content here', 'metadata_title': 'page title here'}. \"\n \"For example, for the 'apify/website-content-crawler' Actor, you can extract the 'markdown' field, \"\n \"which is the content of the website in markdown format.\"\n ),\n ),\n BoolInput(\n name=\"flatten_dataset\",\n display_name=\"Flatten output\",\n info=(\n \"The output dataset will be converted from a nested format to a flat structure. \"\n \"Dots in nested structure will be replaced by underscores. \"\n \"This is useful for further processing of the Data object. \"\n \"For example, {'a': {'b': 1}} will be flattened to {'a_b': 1}.\"\n ),\n ),\n ]\n\n outputs = [\n Output(display_name=\"Output\", name=\"output\", type_=list[Data], method=\"run_model\"),\n Output(display_name=\"Tool\", name=\"tool\", type_=Tool, method=\"build_tool\"),\n ]\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._apify_client: ApifyClient | None = None\n\n def run_model(self) -> list[Data]:\n \"\"\"Run the Actor and return node output.\"\"\"\n input_ = json.loads(self.run_input)\n fields = ApifyActorsComponent.parse_dataset_fields(self.dataset_fields) if self.dataset_fields else None\n res = self._run_actor(self.actor_id, input_, fields=fields)\n if self.flatten_dataset:\n res = [ApifyActorsComponent.flatten(item) for item in res]\n data = [Data(data=item) for item in res]\n\n self.status = data\n return data\n\n def build_tool(self) -> Tool:\n \"\"\"Build a tool for an agent that runs the Apify Actor.\"\"\"\n actor_id = self.actor_id\n\n build = self._get_actor_latest_build(actor_id)\n readme = build.get(\"readme\", \"\")[:250] + \"...\"\n if not (input_schema_str := build.get(\"inputSchema\")):\n msg = \"Input schema not found\"\n raise ValueError(msg)\n input_schema = json.loads(input_schema_str)\n properties, required = ApifyActorsComponent.get_actor_input_schema_from_build(input_schema)\n properties = {\"run_input\": properties}\n\n # works from input schema\n info_ = [\n (\n \"JSON encoded as a string with input schema (STRICTLY FOLLOW JSON FORMAT AND SCHEMA):\\n\\n\"\n f\"{json.dumps(properties, separators=(',', ':'))}\"\n )\n ]\n if required:\n info_.append(\"\\n\\nRequired fields:\\n\" + \"\\n\".join(required))\n\n info = \"\".join(info_)\n\n input_model_cls = ApifyActorsComponent.create_input_model_class(info)\n tool_cls = ApifyActorsComponent.create_tool_class(self, readme, input_model_cls, actor_id)\n\n return cast(\"Tool\", tool_cls())\n\n @staticmethod\n def create_tool_class(\n parent: \"ApifyActorsComponent\", readme: str, input_model: type[BaseModel], actor_id: str\n ) -> type[BaseTool]:\n \"\"\"Create a tool class that runs an Apify Actor.\"\"\"\n\n class ApifyActorRun(BaseTool):\n \"\"\"Tool that runs Apify Actors.\"\"\"\n\n name: str = f\"apify_actor_{ApifyActorsComponent.actor_id_to_tool_name(actor_id)}\"\n description: str = (\n \"Run an Apify Actor with the given input. \"\n \"Here is a part of the currently loaded Actor README:\\n\\n\"\n f\"{readme}\\n\\n\"\n )\n\n args_schema: type[BaseModel] = input_model\n\n @field_serializer(\"args_schema\")\n def serialize_args_schema(self, args_schema):\n return args_schema.schema()\n\n def _run(self, run_input: str | dict) -> str:\n \"\"\"Use the Apify Actor.\"\"\"\n input_dict = json.loads(run_input) if isinstance(run_input, str) else run_input\n\n # retrieve if nested, just in case\n input_dict = input_dict.get(\"run_input\", input_dict)\n\n res = parent._run_actor(actor_id, input_dict)\n return \"\\n\\n\".join([ApifyActorsComponent.dict_to_json_str(item) for item in res])\n\n return ApifyActorRun\n\n @staticmethod\n def create_input_model_class(description: str) -> type[BaseModel]:\n \"\"\"Create a Pydantic model class for the Actor input.\"\"\"\n\n class ActorInput(BaseModel):\n \"\"\"Input for the Apify Actor tool.\"\"\"\n\n run_input: str = Field(..., description=description)\n\n return ActorInput\n\n def _get_apify_client(self) -> ApifyClient:\n \"\"\"Get the Apify client.\n\n Is created if not exists or token changes.\n \"\"\"\n if not self.apify_token:\n msg = \"API token is required.\"\n raise ValueError(msg)\n # when token changes, create a new client\n if self._apify_client is None or self._apify_client.token != self.apify_token:\n self._apify_client = ApifyClient(self.apify_token)\n if httpx_client := self._apify_client.http_client.httpx_client:\n httpx_client.headers[\"user-agent\"] += \"; Origin/langflow\"\n return self._apify_client\n\n def _get_actor_latest_build(self, actor_id: str) -> dict:\n \"\"\"Get the latest build of an Actor from the default build tag.\"\"\"\n client = self._get_apify_client()\n actor = client.actor(actor_id=actor_id)\n if not (actor_info := actor.get()):\n msg = f\"Actor {actor_id} not found.\"\n raise ValueError(msg)\n\n default_build_tag = actor_info.get(\"defaultRunOptions\", {}).get(\"build\")\n latest_build_id = actor_info.get(\"taggedBuilds\", {}).get(default_build_tag, {}).get(\"buildId\")\n\n if (build := client.build(latest_build_id).get()) is None:\n msg = f\"Build {latest_build_id} not found.\"\n raise ValueError(msg)\n\n return build\n\n @staticmethod\n def get_actor_input_schema_from_build(input_schema: dict) -> tuple[dict, list[str]]:\n \"\"\"Get the input schema from the Actor build.\n\n Trim the description to 250 characters.\n \"\"\"\n properties = input_schema.get(\"properties\", {})\n required = input_schema.get(\"required\", [])\n\n properties_out: dict = {}\n for item, meta in properties.items():\n properties_out[item] = {}\n if desc := meta.get(\"description\"):\n properties_out[item][\"description\"] = (\n desc[:MAX_DESCRIPTION_LEN] + \"...\" if len(desc) > MAX_DESCRIPTION_LEN else desc\n )\n for key_name in (\"type\", \"default\", \"prefill\", \"enum\"):\n if value := meta.get(key_name):\n properties_out[item][key_name] = value\n\n return properties_out, required\n\n def _get_run_dataset_id(self, run_id: str) -> str:\n \"\"\"Get the dataset id from the run id.\"\"\"\n client = self._get_apify_client()\n run = client.run(run_id=run_id)\n if (dataset := run.dataset().get()) is None:\n msg = \"Dataset not found\"\n raise ValueError(msg)\n if (did := dataset.get(\"id\")) is None:\n msg = \"Dataset id not found\"\n raise ValueError(msg)\n return did\n\n @staticmethod\n def dict_to_json_str(d: dict) -> str:\n \"\"\"Convert a dictionary to a JSON string.\"\"\"\n return json.dumps(d, separators=(\",\", \":\"), default=lambda _: \"<n/a>\")\n\n @staticmethod\n def actor_id_to_tool_name(actor_id: str) -> str:\n \"\"\"Turn actor_id into a valid tool name.\n\n Tool name must only contain letters, numbers, underscores, dashes,\n and cannot contain spaces.\n \"\"\"\n valid_chars = string.ascii_letters + string.digits + \"_-\"\n return \"\".join(char if char in valid_chars else \"_\" for char in actor_id)\n\n def _run_actor(self, actor_id: str, run_input: dict, fields: list[str] | None = None) -> list[dict]:\n \"\"\"Run an Apify Actor and return the output dataset.\n\n Args:\n actor_id: Actor name from Apify store to run.\n run_input: JSON input for the Actor.\n fields: List of fields to extract from the dataset. Other fields will be ignored.\n \"\"\"\n client = self._get_apify_client()\n if (details := client.actor(actor_id=actor_id).call(run_input=run_input, wait_secs=1)) is None:\n msg = \"Actor run details not found\"\n raise ValueError(msg)\n if (run_id := details.get(\"id\")) is None:\n msg = \"Run id not found\"\n raise ValueError(msg)\n\n if (run_client := client.run(run_id)) is None:\n msg = \"Run client not found\"\n raise ValueError(msg)\n\n # stream logs\n with run_client.log().stream() as response:\n if response:\n for line in response.iter_lines():\n self.log(line)\n run_client.wait_for_finish()\n\n dataset_id = self._get_run_dataset_id(run_id)\n\n loader = ApifyDatasetLoader(\n dataset_id=dataset_id,\n dataset_mapping_function=lambda item: item\n if not fields\n else {k.replace(\".\", \"_\"): ApifyActorsComponent.get_nested_value(item, k) for k in fields},\n )\n return loader.load()\n\n @staticmethod\n def get_nested_value(data: dict[str, Any], key: str) -> Any:\n \"\"\"Get a nested value from a dictionary.\"\"\"\n keys = key.split(\".\")\n value = data\n for k in keys:\n if not isinstance(value, dict) or k not in value:\n return None\n value = value[k]\n return value\n\n @staticmethod\n def parse_dataset_fields(dataset_fields: str) -> list[str]:\n \"\"\"Convert a string of comma-separated fields into a list of fields.\"\"\"\n dataset_fields = dataset_fields.replace(\"'\", \"\").replace('\"', \"\").replace(\"`\", \"\")\n return [field.strip() for field in dataset_fields.split(\",\")]\n\n @staticmethod\n def flatten(d: dict) -> dict:\n \"\"\"Flatten a nested dictionary.\"\"\"\n\n def items():\n for key, value in d.items():\n if isinstance(value, dict):\n for subkey, subvalue in ApifyActorsComponent.flatten(value).items():\n yield key + \"_\" + subkey, subvalue\n else:\n yield key, value\n\n return dict(items())\n"
|
|
494
|
+
"value": "import json\nimport string\nfrom typing import Any, cast\n\nfrom apify_client import ApifyClient\nfrom langchain_community.document_loaders.apify_dataset import ApifyDatasetLoader\nfrom langchain_core.tools import BaseTool\nfrom pydantic import BaseModel, Field, field_serializer\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.field_typing import Tool\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import MultilineInput, Output, SecretStrInput, StrInput\nfrom lfx.schema.data import Data\n\nMAX_DESCRIPTION_LEN = 250\n\n\nclass ApifyActorsComponent(Component):\n display_name = \"Apify Actors\"\n description = (\n \"Use Apify Actors to extract data from hundreds of places fast. \"\n \"This component can be used in a flow to retrieve data or as a tool with an agent.\"\n )\n documentation: str = \"http://docs.langflow.org/integrations-apify\"\n icon = \"Apify\"\n name = \"ApifyActors\"\n\n inputs = [\n SecretStrInput(\n name=\"apify_token\",\n display_name=\"Apify Token\",\n info=\"The API token for the Apify account.\",\n required=True,\n password=True,\n ),\n StrInput(\n name=\"actor_id\",\n display_name=\"Actor\",\n info=(\n \"Actor name from Apify store to run. For example 'apify/website-content-crawler' \"\n \"to use the Website Content Crawler Actor.\"\n ),\n value=\"apify/website-content-crawler\",\n required=True,\n ),\n # multiline input is more pleasant to use than the nested dict input\n MultilineInput(\n name=\"run_input\",\n display_name=\"Run input\",\n info=(\n 'The JSON input for the Actor run. For example for the \"apify/website-content-crawler\" Actor: '\n '{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}'\n ),\n value='{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}',\n required=True,\n ),\n MultilineInput(\n name=\"dataset_fields\",\n display_name=\"Output fields\",\n info=(\n \"Fields to extract from the dataset, split by commas. \"\n \"Other fields will be ignored. Dots in nested structures will be replaced by underscores. \"\n \"Sample input: 'text, metadata.title'. \"\n \"Sample output: {'text': 'page content here', 'metadata_title': 'page title here'}. \"\n \"For example, for the 'apify/website-content-crawler' Actor, you can extract the 'markdown' field, \"\n \"which is the content of the website in markdown format.\"\n ),\n ),\n BoolInput(\n name=\"flatten_dataset\",\n display_name=\"Flatten output\",\n info=(\n \"The output dataset will be converted from a nested format to a flat structure. \"\n \"Dots in nested structure will be replaced by underscores. \"\n \"This is useful for further processing of the Data object. \"\n \"For example, {'a': {'b': 1}} will be flattened to {'a_b': 1}.\"\n ),\n ),\n ]\n\n outputs = [\n Output(display_name=\"Output\", name=\"output\", type_=list[Data], method=\"run_model\"),\n Output(display_name=\"Tool\", name=\"tool\", type_=Tool, method=\"build_tool\"),\n ]\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._apify_client: ApifyClient | None = None\n\n def run_model(self) -> list[Data]:\n \"\"\"Run the Actor and return node output.\"\"\"\n input_ = json.loads(self.run_input)\n fields = ApifyActorsComponent.parse_dataset_fields(self.dataset_fields) if self.dataset_fields else None\n res = self._run_actor(self.actor_id, input_, fields=fields)\n if self.flatten_dataset:\n res = [ApifyActorsComponent.flatten(item) for item in res]\n data = [Data(data=item) for item in res]\n\n self.status = data\n return data\n\n def build_tool(self) -> Tool:\n \"\"\"Build a tool for an agent that runs the Apify Actor.\"\"\"\n actor_id = self.actor_id\n\n build = self._get_actor_latest_build(actor_id)\n readme = build.get(\"readme\", \"\")[:250] + \"...\"\n if not (input_schema_str := build.get(\"inputSchema\")):\n msg = \"Input schema not found\"\n raise ValueError(msg)\n input_schema = json.loads(input_schema_str)\n properties, required = ApifyActorsComponent.get_actor_input_schema_from_build(input_schema)\n properties = {\"run_input\": properties}\n\n # works from input schema\n info_ = [\n (\n \"JSON encoded as a string with input schema (STRICTLY FOLLOW JSON FORMAT AND SCHEMA):\\n\\n\"\n f\"{json.dumps(properties, separators=(',', ':'))}\"\n )\n ]\n if required:\n info_.append(\"\\n\\nRequired fields:\\n\" + \"\\n\".join(required))\n\n info = \"\".join(info_)\n\n input_model_cls = ApifyActorsComponent.create_input_model_class(info)\n tool_cls = ApifyActorsComponent.create_tool_class(self, readme, input_model_cls, actor_id)\n\n return cast(\"Tool\", tool_cls())\n\n @staticmethod\n def create_tool_class(\n parent: \"ApifyActorsComponent\", readme: str, input_model: type[BaseModel], actor_id: str\n ) -> type[BaseTool]:\n \"\"\"Create a tool class that runs an Apify Actor.\"\"\"\n\n class ApifyActorRun(BaseTool):\n \"\"\"Tool that runs Apify Actors.\"\"\"\n\n name: str = f\"apify_actor_{ApifyActorsComponent.actor_id_to_tool_name(actor_id)}\"\n description: str = (\n \"Run an Apify Actor with the given input. \"\n \"Here is a part of the currently loaded Actor README:\\n\\n\"\n f\"{readme}\\n\\n\"\n )\n\n args_schema: type[BaseModel] = input_model\n\n @field_serializer(\"args_schema\")\n def serialize_args_schema(self, args_schema):\n return args_schema.schema()\n\n def _run(self, run_input: str | dict) -> str:\n \"\"\"Use the Apify Actor.\"\"\"\n input_dict = json.loads(run_input) if isinstance(run_input, str) else run_input\n\n # retrieve if nested, just in case\n input_dict = input_dict.get(\"run_input\", input_dict)\n\n res = parent._run_actor(actor_id, input_dict)\n return \"\\n\\n\".join([ApifyActorsComponent.dict_to_json_str(item) for item in res])\n\n return ApifyActorRun\n\n @staticmethod\n def create_input_model_class(description: str) -> type[BaseModel]:\n \"\"\"Create a Pydantic model class for the Actor input.\"\"\"\n\n class ActorInput(BaseModel):\n \"\"\"Input for the Apify Actor tool.\"\"\"\n\n run_input: str = Field(..., description=description)\n\n return ActorInput\n\n def _get_apify_client(self) -> ApifyClient:\n \"\"\"Get the Apify client.\n\n Is created if not exists or token changes.\n \"\"\"\n if not self.apify_token:\n msg = \"API token is required.\"\n raise ValueError(msg)\n # when token changes, create a new client\n if self._apify_client is None or self._apify_client.token != self.apify_token:\n self._apify_client = ApifyClient(self.apify_token)\n if httpx_client := self._apify_client.http_client.httpx_client:\n httpx_client.headers[\"user-agent\"] += \"; Origin/langflow\"\n return self._apify_client\n\n def _get_actor_latest_build(self, actor_id: str) -> dict:\n \"\"\"Get the latest build of an Actor from the default build tag.\"\"\"\n client = self._get_apify_client()\n actor = client.actor(actor_id=actor_id)\n if not (actor_info := actor.get()):\n msg = f\"Actor {actor_id} not found.\"\n raise ValueError(msg)\n\n default_build_tag = actor_info.get(\"defaultRunOptions\", {}).get(\"build\")\n latest_build_id = actor_info.get(\"taggedBuilds\", {}).get(default_build_tag, {}).get(\"buildId\")\n\n if (build := client.build(latest_build_id).get()) is None:\n msg = f\"Build {latest_build_id} not found.\"\n raise ValueError(msg)\n\n return build\n\n @staticmethod\n def get_actor_input_schema_from_build(input_schema: dict) -> tuple[dict, list[str]]:\n \"\"\"Get the input schema from the Actor build.\n\n Trim the description to 250 characters.\n \"\"\"\n properties = input_schema.get(\"properties\", {})\n required = input_schema.get(\"required\", [])\n\n properties_out: dict = {}\n for item, meta in properties.items():\n properties_out[item] = {}\n if desc := meta.get(\"description\"):\n properties_out[item][\"description\"] = (\n desc[:MAX_DESCRIPTION_LEN] + \"...\" if len(desc) > MAX_DESCRIPTION_LEN else desc\n )\n for key_name in (\"type\", \"default\", \"prefill\", \"enum\"):\n if value := meta.get(key_name):\n properties_out[item][key_name] = value\n\n return properties_out, required\n\n def _get_run_dataset_id(self, run_id: str) -> str:\n \"\"\"Get the dataset id from the run id.\"\"\"\n client = self._get_apify_client()\n run = client.run(run_id=run_id)\n if (dataset := run.dataset().get()) is None:\n msg = \"Dataset not found\"\n raise ValueError(msg)\n if (did := dataset.get(\"id\")) is None:\n msg = \"Dataset id not found\"\n raise ValueError(msg)\n return did\n\n @staticmethod\n def dict_to_json_str(d: dict) -> str:\n \"\"\"Convert a dictionary to a JSON string.\"\"\"\n return json.dumps(d, separators=(\",\", \":\"), default=lambda _: \"<n/a>\")\n\n @staticmethod\n def actor_id_to_tool_name(actor_id: str) -> str:\n \"\"\"Turn actor_id into a valid tool name.\n\n Tool name must only contain letters, numbers, underscores, dashes,\n and cannot contain spaces.\n \"\"\"\n valid_chars = string.ascii_letters + string.digits + \"_-\"\n return \"\".join(char if char in valid_chars else \"_\" for char in actor_id)\n\n def _run_actor(self, actor_id: str, run_input: dict, fields: list[str] | None = None) -> list[dict]:\n \"\"\"Run an Apify Actor and return the output dataset.\n\n Args:\n actor_id: Actor name from Apify store to run.\n run_input: JSON input for the Actor.\n fields: List of fields to extract from the dataset. Other fields will be ignored.\n \"\"\"\n client = self._get_apify_client()\n if (details := client.actor(actor_id=actor_id).call(run_input=run_input, wait_secs=1)) is None:\n msg = \"Actor run details not found\"\n raise ValueError(msg)\n if (run_id := details.get(\"id\")) is None:\n msg = \"Run id not found\"\n raise ValueError(msg)\n\n if (run_client := client.run(run_id)) is None:\n msg = \"Run client not found\"\n raise ValueError(msg)\n\n # stream logs\n with run_client.log().stream() as response:\n if response:\n for line in response.iter_lines():\n self.log(line)\n run_client.wait_for_finish()\n\n dataset_id = self._get_run_dataset_id(run_id)\n\n loader = ApifyDatasetLoader(\n dataset_id=dataset_id,\n dataset_mapping_function=lambda item: item\n if not fields\n else {k.replace(\".\", \"_\"): ApifyActorsComponent.get_nested_value(item, k) for k in fields},\n )\n return loader.load()\n\n @staticmethod\n def get_nested_value(data: dict[str, Any], key: str) -> Any:\n \"\"\"Get a nested value from a dictionary.\"\"\"\n keys = key.split(\".\")\n value = data\n for k in keys:\n if not isinstance(value, dict) or k not in value:\n return None\n value = value[k]\n return value\n\n @staticmethod\n def parse_dataset_fields(dataset_fields: str) -> list[str]:\n \"\"\"Convert a string of comma-separated fields into a list of fields.\"\"\"\n dataset_fields = dataset_fields.replace(\"'\", \"\").replace('\"', \"\").replace(\"`\", \"\")\n return [field.strip() for field in dataset_fields.split(\",\")]\n\n @staticmethod\n def flatten(d: dict) -> dict:\n \"\"\"Flatten a nested dictionary.\"\"\"\n\n def items():\n for key, value in d.items():\n if isinstance(value, dict):\n for subkey, subvalue in ApifyActorsComponent.flatten(value).items():\n yield key + \"_\" + subkey, subvalue\n else:\n yield key, value\n\n return dict(items())\n"
|
|
495
495
|
},
|
|
496
496
|
"dataset_fields": {
|
|
497
497
|
"_input_type": "MultilineInput",
|
|
@@ -693,17 +693,17 @@
|
|
|
693
693
|
"legacy": false,
|
|
694
694
|
"lf_version": "1.4.2",
|
|
695
695
|
"metadata": {
|
|
696
|
-
"code_hash": "
|
|
696
|
+
"code_hash": "715a37648834",
|
|
697
697
|
"dependencies": {
|
|
698
698
|
"dependencies": [
|
|
699
699
|
{
|
|
700
|
-
"name": "
|
|
700
|
+
"name": "lfx",
|
|
701
701
|
"version": null
|
|
702
702
|
}
|
|
703
703
|
],
|
|
704
704
|
"total_dependencies": 1
|
|
705
705
|
},
|
|
706
|
-
"module": "
|
|
706
|
+
"module": "lfx.components.input_output.chat.ChatInput"
|
|
707
707
|
},
|
|
708
708
|
"minimized": true,
|
|
709
709
|
"output_types": [],
|
|
@@ -788,7 +788,7 @@
|
|
|
788
788
|
"show": true,
|
|
789
789
|
"title_case": false,
|
|
790
790
|
"type": "code",
|
|
791
|
-
"value": "from
|
|
791
|
+
"value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
|
|
792
792
|
},
|
|
793
793
|
"files": {
|
|
794
794
|
"_input_type": "FileInput",
|
|
@@ -1017,7 +1017,7 @@
|
|
|
1017
1017
|
"legacy": false,
|
|
1018
1018
|
"lf_version": "1.4.2",
|
|
1019
1019
|
"metadata": {
|
|
1020
|
-
"code_hash": "
|
|
1020
|
+
"code_hash": "9619107fecd1",
|
|
1021
1021
|
"dependencies": {
|
|
1022
1022
|
"dependencies": [
|
|
1023
1023
|
{
|
|
@@ -1029,13 +1029,13 @@
|
|
|
1029
1029
|
"version": "0.116.1"
|
|
1030
1030
|
},
|
|
1031
1031
|
{
|
|
1032
|
-
"name": "
|
|
1032
|
+
"name": "lfx",
|
|
1033
1033
|
"version": null
|
|
1034
1034
|
}
|
|
1035
1035
|
],
|
|
1036
1036
|
"total_dependencies": 3
|
|
1037
1037
|
},
|
|
1038
|
-
"module": "
|
|
1038
|
+
"module": "lfx.components.input_output.chat_output.ChatOutput"
|
|
1039
1039
|
},
|
|
1040
1040
|
"minimized": true,
|
|
1041
1041
|
"output_types": [],
|
|
@@ -1138,7 +1138,7 @@
|
|
|
1138
1138
|
"show": true,
|
|
1139
1139
|
"title_case": false,
|
|
1140
1140
|
"type": "code",
|
|
1141
|
-
"value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom
|
|
1141
|
+
"value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
|
|
1142
1142
|
},
|
|
1143
1143
|
"data_template": {
|
|
1144
1144
|
"_input_type": "MessageTextInput",
|
|
@@ -1379,7 +1379,27 @@
|
|
|
1379
1379
|
"frozen": false,
|
|
1380
1380
|
"icon": "bot",
|
|
1381
1381
|
"legacy": false,
|
|
1382
|
-
"metadata": {
|
|
1382
|
+
"metadata": {
|
|
1383
|
+
"code_hash": "1a4bc0f629fe",
|
|
1384
|
+
"dependencies": {
|
|
1385
|
+
"dependencies": [
|
|
1386
|
+
{
|
|
1387
|
+
"name": "langchain_core",
|
|
1388
|
+
"version": "0.3.75"
|
|
1389
|
+
},
|
|
1390
|
+
{
|
|
1391
|
+
"name": "pydantic",
|
|
1392
|
+
"version": "2.10.6"
|
|
1393
|
+
},
|
|
1394
|
+
{
|
|
1395
|
+
"name": "lfx",
|
|
1396
|
+
"version": null
|
|
1397
|
+
}
|
|
1398
|
+
],
|
|
1399
|
+
"total_dependencies": 3
|
|
1400
|
+
},
|
|
1401
|
+
"module": "lfx.components.agents.agent.AgentComponent"
|
|
1402
|
+
},
|
|
1383
1403
|
"minimized": false,
|
|
1384
1404
|
"output_types": [],
|
|
1385
1405
|
"outputs": [
|
|
@@ -1388,17 +1408,28 @@
|
|
|
1388
1408
|
"cache": true,
|
|
1389
1409
|
"display_name": "Response",
|
|
1390
1410
|
"group_outputs": false,
|
|
1391
|
-
"hidden": null,
|
|
1392
1411
|
"method": "message_response",
|
|
1393
1412
|
"name": "response",
|
|
1394
|
-
"options": null,
|
|
1395
|
-
"required_inputs": null,
|
|
1396
1413
|
"selected": "Message",
|
|
1397
1414
|
"tool_mode": true,
|
|
1398
1415
|
"types": [
|
|
1399
1416
|
"Message"
|
|
1400
1417
|
],
|
|
1401
1418
|
"value": "__UNDEFINED__"
|
|
1419
|
+
},
|
|
1420
|
+
{
|
|
1421
|
+
"allows_loop": false,
|
|
1422
|
+
"cache": true,
|
|
1423
|
+
"display_name": "Structured Response",
|
|
1424
|
+
"group_outputs": false,
|
|
1425
|
+
"method": "json_response",
|
|
1426
|
+
"name": "structured_response",
|
|
1427
|
+
"selected": "Data",
|
|
1428
|
+
"tool_mode": false,
|
|
1429
|
+
"types": [
|
|
1430
|
+
"Data"
|
|
1431
|
+
],
|
|
1432
|
+
"value": "__UNDEFINED__"
|
|
1402
1433
|
}
|
|
1403
1434
|
],
|
|
1404
1435
|
"pinned": false,
|
|
@@ -1504,7 +1535,7 @@
|
|
|
1504
1535
|
"password": true,
|
|
1505
1536
|
"placeholder": "",
|
|
1506
1537
|
"real_time_refresh": true,
|
|
1507
|
-
"required":
|
|
1538
|
+
"required": false,
|
|
1508
1539
|
"show": true,
|
|
1509
1540
|
"title_case": false,
|
|
1510
1541
|
"type": "str",
|
|
@@ -1526,7 +1557,32 @@
|
|
|
1526
1557
|
"show": true,
|
|
1527
1558
|
"title_case": false,
|
|
1528
1559
|
"type": "code",
|
|
1529
|
-
"value": "import json\nimport re\n\nfrom langchain_core.tools import StructuredTool\nfrom pydantic import ValidationError\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.helpers.base_model import build_model_from_schema\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MultilineInput, Output, TableInput\nfrom langflow.logging import logger\nfrom langflow.schema.data import Data\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\nfrom langflow.schema.table import EditMode\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nMODEL_PROVIDERS_LIST = [\"Anthropic\", \"Google Generative AI\", \"Groq\", \"OpenAI\"]\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n documentation: str = \"https://docs.langflow.org/agents\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n # Filter out json_mode from OpenAI inputs since we handle structured output differently\n openai_inputs_filtered = [\n input_field\n for input_field in MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"]\n if not (hasattr(input_field, \"name\") and input_field.name == \"json_mode\")\n ]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST, \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST] + [{\"icon\": \"brain\"}],\n ),\n *openai_inputs_filtered,\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Chat History Messages\",\n value=100,\n info=\"Number of chat history messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MultilineInput(\n name=\"format_instructions\",\n display_name=\"Output Format Instructions\",\n info=\"Generic Template for structured output formatting. Valid only with Structured response.\",\n value=(\n \"You are an AI that extracts structured JSON objects from unstructured text. \"\n \"Use a predefined schema with expected types (str, int, float, bool, dict). \"\n \"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. \"\n \"Fill missing or ambiguous values with defaults: null for missing values. \"\n \"Remove exact duplicates but keep variations that have different field values. \"\n \"Always return valid JSON in the expected format, never throw errors. \"\n \"If multiple objects can be extracted, return them all in the structured format.\"\n ),\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=(\n \"Schema Validation: Define the structure and data types for structured output. \"\n \"No validation if no output schema.\"\n ),\n advanced=True,\n required=False,\n value=[],\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n {\n \"name\": \"multiple\",\n \"display_name\": \"As List\",\n \"type\": \"boolean\",\n \"description\": \"Set to True if this output field should be a list of the specified type.\",\n \"default\": \"False\",\n \"edit_mode\": EditMode.INLINE,\n },\n ],\n ),\n *LCToolsAgentComponent._base_inputs,\n # removed memory inputs from agent component\n # *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [\n Output(name=\"response\", display_name=\"Response\", method=\"message_response\"),\n Output(name=\"structured_response\", display_name=\"Structured Response\", method=\"json_response\", tool_mode=False),\n ]\n\n async def get_agent_requirements(self):\n \"\"\"Get the agent requirements for the agent.\"\"\"\n llm_model, display_name = await self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n if isinstance(self.chat_history, Message):\n self.chat_history = [self.chat_history]\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n return llm_model, self.chat_history, self.tools\n\n async def message_response(self) -> Message:\n try:\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n result = await self.run_agent(agent)\n\n # Store result for potential JSON output\n self._agent_result = result\n\n except (ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n await logger.aerror(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n # Avoid catching blind Exception; let truly unexpected exceptions propagate\n except Exception as e:\n await logger.aerror(f\"Unexpected error: {e!s}\")\n raise\n else:\n return result\n\n def _preprocess_schema(self, schema):\n \"\"\"Preprocess schema to ensure correct data types for build_model_from_schema.\"\"\"\n processed_schema = []\n for field in schema:\n processed_field = {\n \"name\": str(field.get(\"name\", \"field\")),\n \"type\": str(field.get(\"type\", \"str\")),\n \"description\": str(field.get(\"description\", \"\")),\n \"multiple\": field.get(\"multiple\", False),\n }\n # Ensure multiple is handled correctly\n if isinstance(processed_field[\"multiple\"], str):\n processed_field[\"multiple\"] = processed_field[\"multiple\"].lower() in [\"true\", \"1\", \"t\", \"y\", \"yes\"]\n processed_schema.append(processed_field)\n return processed_schema\n\n async def build_structured_output_base(self, content: str):\n \"\"\"Build structured output with optional BaseModel validation.\"\"\"\n json_pattern = r\"\\{.*\\}\"\n schema_error_msg = \"Try setting an output schema\"\n\n # Try to parse content as JSON first\n json_data = None\n try:\n json_data = json.loads(content)\n except json.JSONDecodeError:\n json_match = re.search(json_pattern, content, re.DOTALL)\n if json_match:\n try:\n json_data = json.loads(json_match.group())\n except json.JSONDecodeError:\n return {\"content\": content, \"error\": schema_error_msg}\n else:\n return {\"content\": content, \"error\": schema_error_msg}\n\n # If no output schema provided, return parsed JSON without validation\n if not hasattr(self, \"output_schema\") or not self.output_schema or len(self.output_schema) == 0:\n return json_data\n\n # Use BaseModel validation with schema\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n\n # Validate against the schema\n if isinstance(json_data, list):\n # Multiple objects\n validated_objects = []\n for item in json_data:\n try:\n validated_obj = output_model.model_validate(item)\n validated_objects.append(validated_obj.model_dump())\n except ValidationError as e:\n await logger.aerror(f\"Validation error for item: {e}\")\n # Include invalid items with error info\n validated_objects.append({\"data\": item, \"validation_error\": str(e)})\n return validated_objects\n\n # Single object\n try:\n validated_obj = output_model.model_validate(json_data)\n return [validated_obj.model_dump()] # Return as list for consistency\n except ValidationError as e:\n await logger.aerror(f\"Validation error: {e}\")\n return [{\"data\": json_data, \"validation_error\": str(e)}]\n\n except (TypeError, ValueError) as e:\n await logger.aerror(f\"Error building structured output: {e}\")\n # Fallback to parsed JSON without validation\n return json_data\n\n async def json_response(self) -> Data:\n \"\"\"Convert agent response to structured JSON Data output with schema validation.\"\"\"\n # Always use structured chat agent for JSON response mode for better JSON formatting\n try:\n system_components = []\n\n # 1. Agent Instructions (system_prompt)\n agent_instructions = getattr(self, \"system_prompt\", \"\") or \"\"\n if agent_instructions:\n system_components.append(f\"{agent_instructions}\")\n\n # 2. Format Instructions\n format_instructions = getattr(self, \"format_instructions\", \"\") or \"\"\n if format_instructions:\n system_components.append(f\"Format instructions: {format_instructions}\")\n\n # 3. Schema Information from BaseModel\n if hasattr(self, \"output_schema\") and self.output_schema and len(self.output_schema) > 0:\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n schema_dict = output_model.model_json_schema()\n schema_info = (\n \"You are given some text that may include format instructions, \"\n \"explanations, or other content alongside a JSON schema.\\n\\n\"\n \"Your task:\\n\"\n \"- Extract only the JSON schema.\\n\"\n \"- Return it as valid JSON.\\n\"\n \"- Do not include format instructions, explanations, or extra text.\\n\\n\"\n \"Input:\\n\"\n f\"{json.dumps(schema_dict, indent=2)}\\n\\n\"\n \"Output (only JSON schema):\"\n )\n system_components.append(schema_info)\n except (ValidationError, ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"Could not build schema for prompt: {e}\", exc_info=True)\n\n # Combine all components\n combined_instructions = \"\\n\\n\".join(system_components) if system_components else \"\"\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=combined_instructions,\n )\n\n # Create and run structured chat agent\n try:\n structured_agent = self.create_agent_runnable()\n except (NotImplementedError, ValueError, TypeError) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n raise\n try:\n result = await self.run_agent(structured_agent)\n except (ExceptionWithMessageError, ValueError, TypeError, RuntimeError) as e:\n await logger.aerror(f\"Error with structured agent result: {e}\")\n raise\n # Extract content from structured agent result\n if hasattr(result, \"content\"):\n content = result.content\n elif hasattr(result, \"text\"):\n content = result.text\n else:\n content = str(result)\n\n except (ExceptionWithMessageError, ValueError, TypeError, NotImplementedError, AttributeError) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n # Fallback to regular agent\n content_str = \"No content returned from agent\"\n return Data(data={\"content\": content_str, \"error\": str(e)})\n\n # Process with structured output validation\n try:\n structured_output = await self.build_structured_output_base(content)\n\n # Handle different output formats\n if isinstance(structured_output, list) and structured_output:\n if len(structured_output) == 1:\n return Data(data=structured_output[0])\n return Data(data={\"results\": structured_output})\n if isinstance(structured_output, dict):\n return Data(data=structured_output)\n return Data(data={\"content\": content})\n\n except (ValueError, TypeError) as e:\n await logger.aerror(f\"Error in structured output processing: {e}\")\n return Data(data={\"content\": content, \"error\": str(e)})\n\n async def get_memory_data(self):\n # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.\n messages = (\n await MemoryComponent(**self.get_base_args())\n .set(session_id=self.graph.session_id, order=\"Ascending\", n_messages=self.n_messages)\n .retrieve_messages()\n )\n return [\n message for message in messages if getattr(message, \"id\", None) != getattr(self.input_value, \"id\", None)\n ]\n\n async def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except (AttributeError, ValueError, TypeError, RuntimeError) as e:\n await logger.aerror(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n # Filter out json_mode and only use attributes that exist on this component\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n build_config[\"agent_llm\"][\"display_name\"] = \"Model Provider\"\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
|
|
1560
|
+
"value": "import json\nimport re\n\nfrom langchain_core.tools import StructuredTool, Tool\nfrom pydantic import ValidationError\n\nfrom lfx.base.agents.agent import LCToolsAgentComponent\nfrom lfx.base.agents.events import ExceptionWithMessageError\nfrom lfx.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom lfx.base.models.model_utils import get_model_name\nfrom lfx.components.helpers.current_date import CurrentDateComponent\nfrom lfx.components.helpers.memory import MemoryComponent\nfrom lfx.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom lfx.custom.custom_component.component import get_component_toolkit\nfrom lfx.custom.utils import update_component_build_config\nfrom lfx.helpers.base_model import build_model_from_schema\nfrom lfx.inputs.inputs import TableInput\nfrom lfx.io import BoolInput, DropdownInput, IntInput, MultilineInput, Output\nfrom lfx.log.logger import logger\nfrom lfx.schema.data import Data\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.schema.message import Message\nfrom lfx.schema.table import EditMode\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nMODEL_PROVIDERS_LIST = [\"Anthropic\", \"Google Generative AI\", \"Groq\", \"OpenAI\"]\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n documentation: str = \"https://docs.langflow.org/agents\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n # Filter out json_mode from OpenAI inputs since we handle structured output differently\n if \"OpenAI\" in MODEL_PROVIDERS_DICT:\n openai_inputs_filtered = [\n input_field\n for input_field in MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"]\n if not (hasattr(input_field, \"name\") and input_field.name == \"json_mode\")\n ]\n else:\n openai_inputs_filtered = []\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST, \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST if key in MODELS_METADATA]\n + [{\"icon\": \"brain\"}],\n ),\n *openai_inputs_filtered,\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Chat History Messages\",\n value=100,\n info=\"Number of chat history messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MultilineInput(\n name=\"format_instructions\",\n display_name=\"Output Format Instructions\",\n info=\"Generic Template for structured output formatting. Valid only with Structured response.\",\n value=(\n \"You are an AI that extracts structured JSON objects from unstructured text. \"\n \"Use a predefined schema with expected types (str, int, float, bool, dict). \"\n \"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. \"\n \"Fill missing or ambiguous values with defaults: null for missing values. \"\n \"Remove exact duplicates but keep variations that have different field values. \"\n \"Always return valid JSON in the expected format, never throw errors. \"\n \"If multiple objects can be extracted, return them all in the structured format.\"\n ),\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=(\n \"Schema Validation: Define the structure and data types for structured output. \"\n \"No validation if no output schema.\"\n ),\n advanced=True,\n required=False,\n value=[],\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n {\n \"name\": \"multiple\",\n \"display_name\": \"As List\",\n \"type\": \"boolean\",\n \"description\": \"Set to True if this output field should be a list of the specified type.\",\n \"default\": \"False\",\n \"edit_mode\": EditMode.INLINE,\n },\n ],\n ),\n *LCToolsAgentComponent.get_base_inputs(),\n # removed memory inputs from agent component\n # *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [\n Output(name=\"response\", display_name=\"Response\", method=\"message_response\"),\n Output(name=\"structured_response\", display_name=\"Structured Response\", method=\"json_response\", tool_mode=False),\n ]\n\n async def get_agent_requirements(self):\n \"\"\"Get the agent requirements for the agent.\"\"\"\n llm_model, display_name = await self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n if isinstance(self.chat_history, Message):\n self.chat_history = [self.chat_history]\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n return llm_model, self.chat_history, self.tools\n\n async def message_response(self) -> Message:\n try:\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n result = await self.run_agent(agent)\n\n # Store result for potential JSON output\n self._agent_result = result\n\n except (ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n await logger.aerror(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n # Avoid catching blind Exception; let truly unexpected exceptions propagate\n except Exception as e:\n await logger.aerror(f\"Unexpected error: {e!s}\")\n raise\n else:\n return result\n\n def _preprocess_schema(self, schema):\n \"\"\"Preprocess schema to ensure correct data types for build_model_from_schema.\"\"\"\n processed_schema = []\n for field in schema:\n processed_field = {\n \"name\": str(field.get(\"name\", \"field\")),\n \"type\": str(field.get(\"type\", \"str\")),\n \"description\": str(field.get(\"description\", \"\")),\n \"multiple\": field.get(\"multiple\", False),\n }\n # Ensure multiple is handled correctly\n if isinstance(processed_field[\"multiple\"], str):\n processed_field[\"multiple\"] = processed_field[\"multiple\"].lower() in [\"true\", \"1\", \"t\", \"y\", \"yes\"]\n processed_schema.append(processed_field)\n return processed_schema\n\n async def build_structured_output_base(self, content: str):\n \"\"\"Build structured output with optional BaseModel validation.\"\"\"\n json_pattern = r\"\\{.*\\}\"\n schema_error_msg = \"Try setting an output schema\"\n\n # Try to parse content as JSON first\n json_data = None\n try:\n json_data = json.loads(content)\n except json.JSONDecodeError:\n json_match = re.search(json_pattern, content, re.DOTALL)\n if json_match:\n try:\n json_data = json.loads(json_match.group())\n except json.JSONDecodeError:\n return {\"content\": content, \"error\": schema_error_msg}\n else:\n return {\"content\": content, \"error\": schema_error_msg}\n\n # If no output schema provided, return parsed JSON without validation\n if not hasattr(self, \"output_schema\") or not self.output_schema or len(self.output_schema) == 0:\n return json_data\n\n # Use BaseModel validation with schema\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n\n # Validate against the schema\n if isinstance(json_data, list):\n # Multiple objects\n validated_objects = []\n for item in json_data:\n try:\n validated_obj = output_model.model_validate(item)\n validated_objects.append(validated_obj.model_dump())\n except ValidationError as e:\n await logger.aerror(f\"Validation error for item: {e}\")\n # Include invalid items with error info\n validated_objects.append({\"data\": item, \"validation_error\": str(e)})\n return validated_objects\n\n # Single object\n try:\n validated_obj = output_model.model_validate(json_data)\n return [validated_obj.model_dump()] # Return as list for consistency\n except ValidationError as e:\n await logger.aerror(f\"Validation error: {e}\")\n return [{\"data\": json_data, \"validation_error\": str(e)}]\n\n except (TypeError, ValueError) as e:\n await logger.aerror(f\"Error building structured output: {e}\")\n # Fallback to parsed JSON without validation\n return json_data\n\n async def json_response(self) -> Data:\n \"\"\"Convert agent response to structured JSON Data output with schema validation.\"\"\"\n # Always use structured chat agent for JSON response mode for better JSON formatting\n try:\n system_components = []\n\n # 1. Agent Instructions (system_prompt)\n agent_instructions = getattr(self, \"system_prompt\", \"\") or \"\"\n if agent_instructions:\n system_components.append(f\"{agent_instructions}\")\n\n # 2. Format Instructions\n format_instructions = getattr(self, \"format_instructions\", \"\") or \"\"\n if format_instructions:\n system_components.append(f\"Format instructions: {format_instructions}\")\n\n # 3. Schema Information from BaseModel\n if hasattr(self, \"output_schema\") and self.output_schema and len(self.output_schema) > 0:\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n schema_dict = output_model.model_json_schema()\n schema_info = (\n \"You are given some text that may include format instructions, \"\n \"explanations, or other content alongside a JSON schema.\\n\\n\"\n \"Your task:\\n\"\n \"- Extract only the JSON schema.\\n\"\n \"- Return it as valid JSON.\\n\"\n \"- Do not include format instructions, explanations, or extra text.\\n\\n\"\n \"Input:\\n\"\n f\"{json.dumps(schema_dict, indent=2)}\\n\\n\"\n \"Output (only JSON schema):\"\n )\n system_components.append(schema_info)\n except (ValidationError, ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"Could not build schema for prompt: {e}\", exc_info=True)\n\n # Combine all components\n combined_instructions = \"\\n\\n\".join(system_components) if system_components else \"\"\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=combined_instructions,\n )\n\n # Create and run structured chat agent\n try:\n structured_agent = self.create_agent_runnable()\n except (NotImplementedError, ValueError, TypeError) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n raise\n try:\n result = await self.run_agent(structured_agent)\n except (ExceptionWithMessageError, ValueError, TypeError, RuntimeError) as e:\n await logger.aerror(f\"Error with structured agent result: {e}\")\n raise\n # Extract content from structured agent result\n if hasattr(result, \"content\"):\n content = result.content\n elif hasattr(result, \"text\"):\n content = result.text\n else:\n content = str(result)\n\n except (ExceptionWithMessageError, ValueError, TypeError, NotImplementedError, AttributeError) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n # Fallback to regular agent\n content_str = \"No content returned from agent\"\n return Data(data={\"content\": content_str, \"error\": str(e)})\n\n # Process with structured output validation\n try:\n structured_output = await self.build_structured_output_base(content)\n\n # Handle different output formats\n if isinstance(structured_output, list) and structured_output:\n if len(structured_output) == 1:\n return Data(data=structured_output[0])\n return Data(data={\"results\": structured_output})\n if isinstance(structured_output, dict):\n return Data(data=structured_output)\n return Data(data={\"content\": content})\n\n except (ValueError, TypeError) as e:\n await logger.aerror(f\"Error in structured output processing: {e}\")\n return Data(data={\"content\": content, \"error\": str(e)})\n\n async def get_memory_data(self):\n # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.\n messages = (\n await MemoryComponent(**self.get_base_args())\n .set(session_id=self.graph.session_id, order=\"Ascending\", n_messages=self.n_messages)\n .retrieve_messages()\n )\n return [\n message for message in messages if getattr(message, \"id\", None) != getattr(self.input_value, \"id\", None)\n ]\n\n async def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except (AttributeError, ValueError, TypeError, RuntimeError) as e:\n await logger.aerror(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n # Filter out json_mode and only use attributes that exist on this component\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n build_config[\"agent_llm\"][\"display_name\"] = \"Model Provider\"\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n def _get_tools(self) -> list[Tool]:\n component_toolkit = get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
|
|
1561
|
+
},
|
|
1562
|
+
"format_instructions": {
|
|
1563
|
+
"_input_type": "MultilineInput",
|
|
1564
|
+
"advanced": true,
|
|
1565
|
+
"copy_field": false,
|
|
1566
|
+
"display_name": "Output Format Instructions",
|
|
1567
|
+
"dynamic": false,
|
|
1568
|
+
"info": "Generic Template for structured output formatting. Valid only with Structured response.",
|
|
1569
|
+
"input_types": [
|
|
1570
|
+
"Message"
|
|
1571
|
+
],
|
|
1572
|
+
"list": false,
|
|
1573
|
+
"list_add_label": "Add More",
|
|
1574
|
+
"load_from_db": false,
|
|
1575
|
+
"multiline": true,
|
|
1576
|
+
"name": "format_instructions",
|
|
1577
|
+
"placeholder": "",
|
|
1578
|
+
"required": false,
|
|
1579
|
+
"show": true,
|
|
1580
|
+
"title_case": false,
|
|
1581
|
+
"tool_mode": false,
|
|
1582
|
+
"trace_as_input": true,
|
|
1583
|
+
"trace_as_metadata": true,
|
|
1584
|
+
"type": "str",
|
|
1585
|
+
"value": "You are an AI that extracts structured JSON objects from unstructured text. Use a predefined schema with expected types (str, int, float, bool, dict). Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. Fill missing or ambiguous values with defaults: null for missing values. Remove exact duplicates but keep variations that have different field values. Always return valid JSON in the expected format, never throw errors. If multiple objects can be extracted, return them all in the structured format."
|
|
1530
1586
|
},
|
|
1531
1587
|
"handle_parsing_errors": {
|
|
1532
1588
|
"_input_type": "BoolInput",
|
|
@@ -1569,24 +1625,6 @@
|
|
|
1569
1625
|
"type": "str",
|
|
1570
1626
|
"value": ""
|
|
1571
1627
|
},
|
|
1572
|
-
"json_mode": {
|
|
1573
|
-
"_input_type": "BoolInput",
|
|
1574
|
-
"advanced": true,
|
|
1575
|
-
"display_name": "JSON Mode",
|
|
1576
|
-
"dynamic": false,
|
|
1577
|
-
"info": "If True, it will output JSON regardless of passing a schema.",
|
|
1578
|
-
"list": false,
|
|
1579
|
-
"list_add_label": "Add More",
|
|
1580
|
-
"name": "json_mode",
|
|
1581
|
-
"placeholder": "",
|
|
1582
|
-
"required": false,
|
|
1583
|
-
"show": true,
|
|
1584
|
-
"title_case": false,
|
|
1585
|
-
"tool_mode": false,
|
|
1586
|
-
"trace_as_metadata": true,
|
|
1587
|
-
"type": "bool",
|
|
1588
|
-
"value": false
|
|
1589
|
-
},
|
|
1590
1628
|
"max_iterations": {
|
|
1591
1629
|
"_input_type": "IntInput",
|
|
1592
1630
|
"advanced": true,
|
|
@@ -1681,12 +1719,20 @@
|
|
|
1681
1719
|
"gpt-4.1",
|
|
1682
1720
|
"gpt-4.1-mini",
|
|
1683
1721
|
"gpt-4.1-nano",
|
|
1684
|
-
"gpt-4.5-preview",
|
|
1685
1722
|
"gpt-4-turbo",
|
|
1686
1723
|
"gpt-4-turbo-preview",
|
|
1687
1724
|
"gpt-4",
|
|
1688
1725
|
"gpt-3.5-turbo",
|
|
1689
|
-
"
|
|
1726
|
+
"gpt-5",
|
|
1727
|
+
"gpt-5-mini",
|
|
1728
|
+
"gpt-5-nano",
|
|
1729
|
+
"gpt-5-chat-latest",
|
|
1730
|
+
"o1",
|
|
1731
|
+
"o3-mini",
|
|
1732
|
+
"o3",
|
|
1733
|
+
"o3-pro",
|
|
1734
|
+
"o4-mini",
|
|
1735
|
+
"o4-mini-high"
|
|
1690
1736
|
],
|
|
1691
1737
|
"options_metadata": [],
|
|
1692
1738
|
"placeholder": "",
|
|
@@ -1737,6 +1783,68 @@
|
|
|
1737
1783
|
"type": "str",
|
|
1738
1784
|
"value": ""
|
|
1739
1785
|
},
|
|
1786
|
+
"output_schema": {
|
|
1787
|
+
"_input_type": "TableInput",
|
|
1788
|
+
"advanced": true,
|
|
1789
|
+
"display_name": "Output Schema",
|
|
1790
|
+
"dynamic": false,
|
|
1791
|
+
"info": "Schema Validation: Define the structure and data types for structured output. No validation if no output schema.",
|
|
1792
|
+
"is_list": true,
|
|
1793
|
+
"list_add_label": "Add More",
|
|
1794
|
+
"name": "output_schema",
|
|
1795
|
+
"placeholder": "",
|
|
1796
|
+
"required": false,
|
|
1797
|
+
"show": true,
|
|
1798
|
+
"table_icon": "Table",
|
|
1799
|
+
"table_schema": [
|
|
1800
|
+
{
|
|
1801
|
+
"default": "field",
|
|
1802
|
+
"description": "Specify the name of the output field.",
|
|
1803
|
+
"display_name": "Name",
|
|
1804
|
+
"edit_mode": "inline",
|
|
1805
|
+
"name": "name",
|
|
1806
|
+
"type": "str"
|
|
1807
|
+
},
|
|
1808
|
+
{
|
|
1809
|
+
"default": "description of field",
|
|
1810
|
+
"description": "Describe the purpose of the output field.",
|
|
1811
|
+
"display_name": "Description",
|
|
1812
|
+
"edit_mode": "popover",
|
|
1813
|
+
"name": "description",
|
|
1814
|
+
"type": "str"
|
|
1815
|
+
},
|
|
1816
|
+
{
|
|
1817
|
+
"default": "str",
|
|
1818
|
+
"description": "Indicate the data type of the output field (e.g., str, int, float, bool, dict).",
|
|
1819
|
+
"display_name": "Type",
|
|
1820
|
+
"edit_mode": "inline",
|
|
1821
|
+
"name": "type",
|
|
1822
|
+
"options": [
|
|
1823
|
+
"str",
|
|
1824
|
+
"int",
|
|
1825
|
+
"float",
|
|
1826
|
+
"bool",
|
|
1827
|
+
"dict"
|
|
1828
|
+
],
|
|
1829
|
+
"type": "str"
|
|
1830
|
+
},
|
|
1831
|
+
{
|
|
1832
|
+
"default": "False",
|
|
1833
|
+
"description": "Set to True if this output field should be a list of the specified type.",
|
|
1834
|
+
"display_name": "As List",
|
|
1835
|
+
"edit_mode": "inline",
|
|
1836
|
+
"name": "multiple",
|
|
1837
|
+
"type": "boolean"
|
|
1838
|
+
}
|
|
1839
|
+
],
|
|
1840
|
+
"title_case": false,
|
|
1841
|
+
"tool_mode": false,
|
|
1842
|
+
"trace_as_metadata": true,
|
|
1843
|
+
"trigger_icon": "Table",
|
|
1844
|
+
"trigger_text": "Open table",
|
|
1845
|
+
"type": "table",
|
|
1846
|
+
"value": []
|
|
1847
|
+
},
|
|
1740
1848
|
"seed": {
|
|
1741
1849
|
"_input_type": "IntInput",
|
|
1742
1850
|
"advanced": true,
|