lfx-nightly 0.2.0.dev25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lfx-nightly might be problematic. Click here for more details.
- lfx/__init__.py +0 -0
- lfx/__main__.py +25 -0
- lfx/_assets/component_index.json +1 -0
- lfx/base/__init__.py +0 -0
- lfx/base/agents/__init__.py +0 -0
- lfx/base/agents/agent.py +375 -0
- lfx/base/agents/altk_base_agent.py +380 -0
- lfx/base/agents/altk_tool_wrappers.py +565 -0
- lfx/base/agents/callback.py +130 -0
- lfx/base/agents/context.py +109 -0
- lfx/base/agents/crewai/__init__.py +0 -0
- lfx/base/agents/crewai/crew.py +231 -0
- lfx/base/agents/crewai/tasks.py +12 -0
- lfx/base/agents/default_prompts.py +23 -0
- lfx/base/agents/errors.py +15 -0
- lfx/base/agents/events.py +430 -0
- lfx/base/agents/utils.py +237 -0
- lfx/base/astra_assistants/__init__.py +0 -0
- lfx/base/astra_assistants/util.py +171 -0
- lfx/base/chains/__init__.py +0 -0
- lfx/base/chains/model.py +19 -0
- lfx/base/composio/__init__.py +0 -0
- lfx/base/composio/composio_base.py +2584 -0
- lfx/base/compressors/__init__.py +0 -0
- lfx/base/compressors/model.py +60 -0
- lfx/base/constants.py +46 -0
- lfx/base/curl/__init__.py +0 -0
- lfx/base/curl/parse.py +188 -0
- lfx/base/data/__init__.py +5 -0
- lfx/base/data/base_file.py +810 -0
- lfx/base/data/docling_utils.py +338 -0
- lfx/base/data/storage_utils.py +192 -0
- lfx/base/data/utils.py +362 -0
- lfx/base/datastax/__init__.py +5 -0
- lfx/base/datastax/astradb_base.py +896 -0
- lfx/base/document_transformers/__init__.py +0 -0
- lfx/base/document_transformers/model.py +43 -0
- lfx/base/embeddings/__init__.py +0 -0
- lfx/base/embeddings/aiml_embeddings.py +62 -0
- lfx/base/embeddings/embeddings_class.py +113 -0
- lfx/base/embeddings/model.py +26 -0
- lfx/base/flow_processing/__init__.py +0 -0
- lfx/base/flow_processing/utils.py +86 -0
- lfx/base/huggingface/__init__.py +0 -0
- lfx/base/huggingface/model_bridge.py +133 -0
- lfx/base/io/__init__.py +0 -0
- lfx/base/io/chat.py +21 -0
- lfx/base/io/text.py +22 -0
- lfx/base/knowledge_bases/__init__.py +3 -0
- lfx/base/knowledge_bases/knowledge_base_utils.py +137 -0
- lfx/base/langchain_utilities/__init__.py +0 -0
- lfx/base/langchain_utilities/model.py +35 -0
- lfx/base/langchain_utilities/spider_constants.py +1 -0
- lfx/base/langwatch/__init__.py +0 -0
- lfx/base/langwatch/utils.py +18 -0
- lfx/base/mcp/__init__.py +0 -0
- lfx/base/mcp/constants.py +2 -0
- lfx/base/mcp/util.py +1659 -0
- lfx/base/memory/__init__.py +0 -0
- lfx/base/memory/memory.py +49 -0
- lfx/base/memory/model.py +38 -0
- lfx/base/models/__init__.py +3 -0
- lfx/base/models/aiml_constants.py +51 -0
- lfx/base/models/anthropic_constants.py +51 -0
- lfx/base/models/aws_constants.py +151 -0
- lfx/base/models/chat_result.py +76 -0
- lfx/base/models/cometapi_constants.py +54 -0
- lfx/base/models/google_generative_ai_constants.py +70 -0
- lfx/base/models/google_generative_ai_model.py +38 -0
- lfx/base/models/groq_constants.py +150 -0
- lfx/base/models/groq_model_discovery.py +265 -0
- lfx/base/models/model.py +375 -0
- lfx/base/models/model_input_constants.py +378 -0
- lfx/base/models/model_metadata.py +41 -0
- lfx/base/models/model_utils.py +108 -0
- lfx/base/models/novita_constants.py +35 -0
- lfx/base/models/ollama_constants.py +52 -0
- lfx/base/models/openai_constants.py +129 -0
- lfx/base/models/sambanova_constants.py +18 -0
- lfx/base/models/watsonx_constants.py +36 -0
- lfx/base/processing/__init__.py +0 -0
- lfx/base/prompts/__init__.py +0 -0
- lfx/base/prompts/api_utils.py +224 -0
- lfx/base/prompts/utils.py +61 -0
- lfx/base/textsplitters/__init__.py +0 -0
- lfx/base/textsplitters/model.py +28 -0
- lfx/base/tools/__init__.py +0 -0
- lfx/base/tools/base.py +26 -0
- lfx/base/tools/component_tool.py +325 -0
- lfx/base/tools/constants.py +49 -0
- lfx/base/tools/flow_tool.py +132 -0
- lfx/base/tools/run_flow.py +698 -0
- lfx/base/vectorstores/__init__.py +0 -0
- lfx/base/vectorstores/model.py +193 -0
- lfx/base/vectorstores/utils.py +22 -0
- lfx/base/vectorstores/vector_store_connection_decorator.py +52 -0
- lfx/cli/__init__.py +5 -0
- lfx/cli/commands.py +327 -0
- lfx/cli/common.py +650 -0
- lfx/cli/run.py +506 -0
- lfx/cli/script_loader.py +289 -0
- lfx/cli/serve_app.py +546 -0
- lfx/cli/validation.py +69 -0
- lfx/components/FAISS/__init__.py +34 -0
- lfx/components/FAISS/faiss.py +111 -0
- lfx/components/Notion/__init__.py +19 -0
- lfx/components/Notion/add_content_to_page.py +269 -0
- lfx/components/Notion/create_page.py +94 -0
- lfx/components/Notion/list_database_properties.py +68 -0
- lfx/components/Notion/list_pages.py +122 -0
- lfx/components/Notion/list_users.py +77 -0
- lfx/components/Notion/page_content_viewer.py +93 -0
- lfx/components/Notion/search.py +111 -0
- lfx/components/Notion/update_page_property.py +114 -0
- lfx/components/__init__.py +428 -0
- lfx/components/_importing.py +42 -0
- lfx/components/agentql/__init__.py +3 -0
- lfx/components/agentql/agentql_api.py +151 -0
- lfx/components/aiml/__init__.py +37 -0
- lfx/components/aiml/aiml.py +115 -0
- lfx/components/aiml/aiml_embeddings.py +37 -0
- lfx/components/altk/__init__.py +34 -0
- lfx/components/altk/altk_agent.py +193 -0
- lfx/components/amazon/__init__.py +36 -0
- lfx/components/amazon/amazon_bedrock_converse.py +195 -0
- lfx/components/amazon/amazon_bedrock_embedding.py +109 -0
- lfx/components/amazon/amazon_bedrock_model.py +130 -0
- lfx/components/amazon/s3_bucket_uploader.py +211 -0
- lfx/components/anthropic/__init__.py +34 -0
- lfx/components/anthropic/anthropic.py +187 -0
- lfx/components/apify/__init__.py +5 -0
- lfx/components/apify/apify_actor.py +325 -0
- lfx/components/arxiv/__init__.py +3 -0
- lfx/components/arxiv/arxiv.py +169 -0
- lfx/components/assemblyai/__init__.py +46 -0
- lfx/components/assemblyai/assemblyai_get_subtitles.py +83 -0
- lfx/components/assemblyai/assemblyai_lemur.py +183 -0
- lfx/components/assemblyai/assemblyai_list_transcripts.py +95 -0
- lfx/components/assemblyai/assemblyai_poll_transcript.py +72 -0
- lfx/components/assemblyai/assemblyai_start_transcript.py +188 -0
- lfx/components/azure/__init__.py +37 -0
- lfx/components/azure/azure_openai.py +95 -0
- lfx/components/azure/azure_openai_embeddings.py +83 -0
- lfx/components/baidu/__init__.py +32 -0
- lfx/components/baidu/baidu_qianfan_chat.py +113 -0
- lfx/components/bing/__init__.py +3 -0
- lfx/components/bing/bing_search_api.py +61 -0
- lfx/components/cassandra/__init__.py +40 -0
- lfx/components/cassandra/cassandra.py +264 -0
- lfx/components/cassandra/cassandra_chat.py +92 -0
- lfx/components/cassandra/cassandra_graph.py +238 -0
- lfx/components/chains/__init__.py +3 -0
- lfx/components/chroma/__init__.py +34 -0
- lfx/components/chroma/chroma.py +169 -0
- lfx/components/cleanlab/__init__.py +40 -0
- lfx/components/cleanlab/cleanlab_evaluator.py +155 -0
- lfx/components/cleanlab/cleanlab_rag_evaluator.py +254 -0
- lfx/components/cleanlab/cleanlab_remediator.py +131 -0
- lfx/components/clickhouse/__init__.py +34 -0
- lfx/components/clickhouse/clickhouse.py +135 -0
- lfx/components/cloudflare/__init__.py +32 -0
- lfx/components/cloudflare/cloudflare.py +81 -0
- lfx/components/cohere/__init__.py +40 -0
- lfx/components/cohere/cohere_embeddings.py +81 -0
- lfx/components/cohere/cohere_models.py +46 -0
- lfx/components/cohere/cohere_rerank.py +51 -0
- lfx/components/cometapi/__init__.py +32 -0
- lfx/components/cometapi/cometapi.py +166 -0
- lfx/components/composio/__init__.py +222 -0
- lfx/components/composio/agentql_composio.py +11 -0
- lfx/components/composio/agiled_composio.py +11 -0
- lfx/components/composio/airtable_composio.py +11 -0
- lfx/components/composio/apollo_composio.py +11 -0
- lfx/components/composio/asana_composio.py +11 -0
- lfx/components/composio/attio_composio.py +11 -0
- lfx/components/composio/bitbucket_composio.py +11 -0
- lfx/components/composio/bolna_composio.py +11 -0
- lfx/components/composio/brightdata_composio.py +11 -0
- lfx/components/composio/calendly_composio.py +11 -0
- lfx/components/composio/canva_composio.py +11 -0
- lfx/components/composio/canvas_composio.py +11 -0
- lfx/components/composio/coda_composio.py +11 -0
- lfx/components/composio/composio_api.py +278 -0
- lfx/components/composio/contentful_composio.py +11 -0
- lfx/components/composio/digicert_composio.py +11 -0
- lfx/components/composio/discord_composio.py +11 -0
- lfx/components/composio/dropbox_compnent.py +11 -0
- lfx/components/composio/elevenlabs_composio.py +11 -0
- lfx/components/composio/exa_composio.py +11 -0
- lfx/components/composio/figma_composio.py +11 -0
- lfx/components/composio/finage_composio.py +11 -0
- lfx/components/composio/firecrawl_composio.py +11 -0
- lfx/components/composio/fireflies_composio.py +11 -0
- lfx/components/composio/fixer_composio.py +11 -0
- lfx/components/composio/flexisign_composio.py +11 -0
- lfx/components/composio/freshdesk_composio.py +11 -0
- lfx/components/composio/github_composio.py +11 -0
- lfx/components/composio/gmail_composio.py +38 -0
- lfx/components/composio/googlebigquery_composio.py +11 -0
- lfx/components/composio/googlecalendar_composio.py +11 -0
- lfx/components/composio/googleclassroom_composio.py +11 -0
- lfx/components/composio/googledocs_composio.py +11 -0
- lfx/components/composio/googlemeet_composio.py +11 -0
- lfx/components/composio/googlesheets_composio.py +11 -0
- lfx/components/composio/googletasks_composio.py +8 -0
- lfx/components/composio/heygen_composio.py +11 -0
- lfx/components/composio/instagram_composio.py +11 -0
- lfx/components/composio/jira_composio.py +11 -0
- lfx/components/composio/jotform_composio.py +11 -0
- lfx/components/composio/klaviyo_composio.py +11 -0
- lfx/components/composio/linear_composio.py +11 -0
- lfx/components/composio/listennotes_composio.py +11 -0
- lfx/components/composio/mem0_composio.py +11 -0
- lfx/components/composio/miro_composio.py +11 -0
- lfx/components/composio/missive_composio.py +11 -0
- lfx/components/composio/notion_composio.py +11 -0
- lfx/components/composio/onedrive_composio.py +11 -0
- lfx/components/composio/outlook_composio.py +11 -0
- lfx/components/composio/pandadoc_composio.py +11 -0
- lfx/components/composio/peopledatalabs_composio.py +11 -0
- lfx/components/composio/perplexityai_composio.py +11 -0
- lfx/components/composio/reddit_composio.py +11 -0
- lfx/components/composio/serpapi_composio.py +11 -0
- lfx/components/composio/slack_composio.py +11 -0
- lfx/components/composio/slackbot_composio.py +11 -0
- lfx/components/composio/snowflake_composio.py +11 -0
- lfx/components/composio/supabase_composio.py +11 -0
- lfx/components/composio/tavily_composio.py +11 -0
- lfx/components/composio/timelinesai_composio.py +11 -0
- lfx/components/composio/todoist_composio.py +11 -0
- lfx/components/composio/wrike_composio.py +11 -0
- lfx/components/composio/youtube_composio.py +11 -0
- lfx/components/confluence/__init__.py +3 -0
- lfx/components/confluence/confluence.py +84 -0
- lfx/components/couchbase/__init__.py +34 -0
- lfx/components/couchbase/couchbase.py +102 -0
- lfx/components/crewai/__init__.py +49 -0
- lfx/components/crewai/crewai.py +108 -0
- lfx/components/crewai/hierarchical_crew.py +47 -0
- lfx/components/crewai/hierarchical_task.py +45 -0
- lfx/components/crewai/sequential_crew.py +53 -0
- lfx/components/crewai/sequential_task.py +74 -0
- lfx/components/crewai/sequential_task_agent.py +144 -0
- lfx/components/cuga/__init__.py +34 -0
- lfx/components/cuga/cuga_agent.py +730 -0
- lfx/components/custom_component/__init__.py +34 -0
- lfx/components/custom_component/custom_component.py +31 -0
- lfx/components/data/__init__.py +114 -0
- lfx/components/data_source/__init__.py +58 -0
- lfx/components/data_source/api_request.py +577 -0
- lfx/components/data_source/csv_to_data.py +101 -0
- lfx/components/data_source/json_to_data.py +106 -0
- lfx/components/data_source/mock_data.py +398 -0
- lfx/components/data_source/news_search.py +166 -0
- lfx/components/data_source/rss.py +71 -0
- lfx/components/data_source/sql_executor.py +101 -0
- lfx/components/data_source/url.py +311 -0
- lfx/components/data_source/web_search.py +326 -0
- lfx/components/datastax/__init__.py +76 -0
- lfx/components/datastax/astradb_assistant_manager.py +307 -0
- lfx/components/datastax/astradb_chatmemory.py +40 -0
- lfx/components/datastax/astradb_cql.py +288 -0
- lfx/components/datastax/astradb_graph.py +217 -0
- lfx/components/datastax/astradb_tool.py +378 -0
- lfx/components/datastax/astradb_vectorize.py +122 -0
- lfx/components/datastax/astradb_vectorstore.py +449 -0
- lfx/components/datastax/create_assistant.py +59 -0
- lfx/components/datastax/create_thread.py +33 -0
- lfx/components/datastax/dotenv.py +36 -0
- lfx/components/datastax/get_assistant.py +38 -0
- lfx/components/datastax/getenvvar.py +31 -0
- lfx/components/datastax/graph_rag.py +141 -0
- lfx/components/datastax/hcd.py +315 -0
- lfx/components/datastax/list_assistants.py +26 -0
- lfx/components/datastax/run.py +90 -0
- lfx/components/deactivated/__init__.py +15 -0
- lfx/components/deactivated/amazon_kendra.py +66 -0
- lfx/components/deactivated/chat_litellm_model.py +158 -0
- lfx/components/deactivated/code_block_extractor.py +26 -0
- lfx/components/deactivated/documents_to_data.py +22 -0
- lfx/components/deactivated/embed.py +16 -0
- lfx/components/deactivated/extract_key_from_data.py +46 -0
- lfx/components/deactivated/json_document_builder.py +57 -0
- lfx/components/deactivated/list_flows.py +20 -0
- lfx/components/deactivated/mcp_sse.py +61 -0
- lfx/components/deactivated/mcp_stdio.py +62 -0
- lfx/components/deactivated/merge_data.py +93 -0
- lfx/components/deactivated/message.py +37 -0
- lfx/components/deactivated/metal.py +54 -0
- lfx/components/deactivated/multi_query.py +59 -0
- lfx/components/deactivated/retriever.py +43 -0
- lfx/components/deactivated/selective_passthrough.py +77 -0
- lfx/components/deactivated/should_run_next.py +40 -0
- lfx/components/deactivated/split_text.py +63 -0
- lfx/components/deactivated/store_message.py +24 -0
- lfx/components/deactivated/sub_flow.py +124 -0
- lfx/components/deactivated/vectara_self_query.py +76 -0
- lfx/components/deactivated/vector_store.py +24 -0
- lfx/components/deepseek/__init__.py +34 -0
- lfx/components/deepseek/deepseek.py +136 -0
- lfx/components/docling/__init__.py +43 -0
- lfx/components/docling/chunk_docling_document.py +186 -0
- lfx/components/docling/docling_inline.py +238 -0
- lfx/components/docling/docling_remote.py +195 -0
- lfx/components/docling/export_docling_document.py +117 -0
- lfx/components/documentloaders/__init__.py +3 -0
- lfx/components/duckduckgo/__init__.py +3 -0
- lfx/components/duckduckgo/duck_duck_go_search_run.py +92 -0
- lfx/components/elastic/__init__.py +37 -0
- lfx/components/elastic/elasticsearch.py +267 -0
- lfx/components/elastic/opensearch.py +789 -0
- lfx/components/elastic/opensearch_multimodal.py +1575 -0
- lfx/components/embeddings/__init__.py +37 -0
- lfx/components/embeddings/similarity.py +77 -0
- lfx/components/embeddings/text_embedder.py +65 -0
- lfx/components/exa/__init__.py +3 -0
- lfx/components/exa/exa_search.py +68 -0
- lfx/components/files_and_knowledge/__init__.py +47 -0
- lfx/components/files_and_knowledge/directory.py +113 -0
- lfx/components/files_and_knowledge/file.py +841 -0
- lfx/components/files_and_knowledge/ingestion.py +694 -0
- lfx/components/files_and_knowledge/retrieval.py +264 -0
- lfx/components/files_and_knowledge/save_file.py +746 -0
- lfx/components/firecrawl/__init__.py +43 -0
- lfx/components/firecrawl/firecrawl_crawl_api.py +88 -0
- lfx/components/firecrawl/firecrawl_extract_api.py +136 -0
- lfx/components/firecrawl/firecrawl_map_api.py +89 -0
- lfx/components/firecrawl/firecrawl_scrape_api.py +73 -0
- lfx/components/flow_controls/__init__.py +58 -0
- lfx/components/flow_controls/conditional_router.py +208 -0
- lfx/components/flow_controls/data_conditional_router.py +126 -0
- lfx/components/flow_controls/flow_tool.py +111 -0
- lfx/components/flow_controls/listen.py +29 -0
- lfx/components/flow_controls/loop.py +163 -0
- lfx/components/flow_controls/notify.py +88 -0
- lfx/components/flow_controls/pass_message.py +36 -0
- lfx/components/flow_controls/run_flow.py +108 -0
- lfx/components/flow_controls/sub_flow.py +115 -0
- lfx/components/git/__init__.py +4 -0
- lfx/components/git/git.py +262 -0
- lfx/components/git/gitextractor.py +196 -0
- lfx/components/glean/__init__.py +3 -0
- lfx/components/glean/glean_search_api.py +173 -0
- lfx/components/google/__init__.py +17 -0
- lfx/components/google/gmail.py +193 -0
- lfx/components/google/google_bq_sql_executor.py +157 -0
- lfx/components/google/google_drive.py +92 -0
- lfx/components/google/google_drive_search.py +152 -0
- lfx/components/google/google_generative_ai.py +144 -0
- lfx/components/google/google_generative_ai_embeddings.py +141 -0
- lfx/components/google/google_oauth_token.py +89 -0
- lfx/components/google/google_search_api_core.py +68 -0
- lfx/components/google/google_serper_api_core.py +74 -0
- lfx/components/groq/__init__.py +34 -0
- lfx/components/groq/groq.py +143 -0
- lfx/components/helpers/__init__.py +154 -0
- lfx/components/homeassistant/__init__.py +7 -0
- lfx/components/homeassistant/home_assistant_control.py +152 -0
- lfx/components/homeassistant/list_home_assistant_states.py +137 -0
- lfx/components/huggingface/__init__.py +37 -0
- lfx/components/huggingface/huggingface.py +199 -0
- lfx/components/huggingface/huggingface_inference_api.py +106 -0
- lfx/components/ibm/__init__.py +34 -0
- lfx/components/ibm/watsonx.py +207 -0
- lfx/components/ibm/watsonx_embeddings.py +135 -0
- lfx/components/icosacomputing/__init__.py +5 -0
- lfx/components/icosacomputing/combinatorial_reasoner.py +84 -0
- lfx/components/input_output/__init__.py +40 -0
- lfx/components/input_output/chat.py +109 -0
- lfx/components/input_output/chat_output.py +184 -0
- lfx/components/input_output/text.py +27 -0
- lfx/components/input_output/text_output.py +29 -0
- lfx/components/input_output/webhook.py +56 -0
- lfx/components/jigsawstack/__init__.py +23 -0
- lfx/components/jigsawstack/ai_scrape.py +126 -0
- lfx/components/jigsawstack/ai_web_search.py +136 -0
- lfx/components/jigsawstack/file_read.py +115 -0
- lfx/components/jigsawstack/file_upload.py +94 -0
- lfx/components/jigsawstack/image_generation.py +205 -0
- lfx/components/jigsawstack/nsfw.py +60 -0
- lfx/components/jigsawstack/object_detection.py +124 -0
- lfx/components/jigsawstack/sentiment.py +112 -0
- lfx/components/jigsawstack/text_to_sql.py +90 -0
- lfx/components/jigsawstack/text_translate.py +77 -0
- lfx/components/jigsawstack/vocr.py +107 -0
- lfx/components/knowledge_bases/__init__.py +89 -0
- lfx/components/langchain_utilities/__init__.py +109 -0
- lfx/components/langchain_utilities/character.py +53 -0
- lfx/components/langchain_utilities/conversation.py +59 -0
- lfx/components/langchain_utilities/csv_agent.py +175 -0
- lfx/components/langchain_utilities/fake_embeddings.py +26 -0
- lfx/components/langchain_utilities/html_link_extractor.py +35 -0
- lfx/components/langchain_utilities/json_agent.py +100 -0
- lfx/components/langchain_utilities/langchain_hub.py +126 -0
- lfx/components/langchain_utilities/language_recursive.py +49 -0
- lfx/components/langchain_utilities/language_semantic.py +138 -0
- lfx/components/langchain_utilities/llm_checker.py +39 -0
- lfx/components/langchain_utilities/llm_math.py +42 -0
- lfx/components/langchain_utilities/natural_language.py +61 -0
- lfx/components/langchain_utilities/openai_tools.py +53 -0
- lfx/components/langchain_utilities/openapi.py +48 -0
- lfx/components/langchain_utilities/recursive_character.py +60 -0
- lfx/components/langchain_utilities/retrieval_qa.py +83 -0
- lfx/components/langchain_utilities/runnable_executor.py +137 -0
- lfx/components/langchain_utilities/self_query.py +80 -0
- lfx/components/langchain_utilities/spider.py +142 -0
- lfx/components/langchain_utilities/sql.py +40 -0
- lfx/components/langchain_utilities/sql_database.py +35 -0
- lfx/components/langchain_utilities/sql_generator.py +78 -0
- lfx/components/langchain_utilities/tool_calling.py +59 -0
- lfx/components/langchain_utilities/vector_store_info.py +49 -0
- lfx/components/langchain_utilities/vector_store_router.py +33 -0
- lfx/components/langchain_utilities/xml_agent.py +71 -0
- lfx/components/langwatch/__init__.py +3 -0
- lfx/components/langwatch/langwatch.py +278 -0
- lfx/components/link_extractors/__init__.py +3 -0
- lfx/components/llm_operations/__init__.py +46 -0
- lfx/components/llm_operations/batch_run.py +205 -0
- lfx/components/llm_operations/lambda_filter.py +218 -0
- lfx/components/llm_operations/llm_conditional_router.py +421 -0
- lfx/components/llm_operations/llm_selector.py +499 -0
- lfx/components/llm_operations/structured_output.py +244 -0
- lfx/components/lmstudio/__init__.py +34 -0
- lfx/components/lmstudio/lmstudioembeddings.py +89 -0
- lfx/components/lmstudio/lmstudiomodel.py +133 -0
- lfx/components/logic/__init__.py +181 -0
- lfx/components/maritalk/__init__.py +32 -0
- lfx/components/maritalk/maritalk.py +52 -0
- lfx/components/mem0/__init__.py +3 -0
- lfx/components/mem0/mem0_chat_memory.py +147 -0
- lfx/components/milvus/__init__.py +34 -0
- lfx/components/milvus/milvus.py +115 -0
- lfx/components/mistral/__init__.py +37 -0
- lfx/components/mistral/mistral.py +114 -0
- lfx/components/mistral/mistral_embeddings.py +58 -0
- lfx/components/models/__init__.py +89 -0
- lfx/components/models_and_agents/__init__.py +49 -0
- lfx/components/models_and_agents/agent.py +644 -0
- lfx/components/models_and_agents/embedding_model.py +423 -0
- lfx/components/models_and_agents/language_model.py +398 -0
- lfx/components/models_and_agents/mcp_component.py +594 -0
- lfx/components/models_and_agents/memory.py +268 -0
- lfx/components/models_and_agents/prompt.py +67 -0
- lfx/components/mongodb/__init__.py +34 -0
- lfx/components/mongodb/mongodb_atlas.py +213 -0
- lfx/components/needle/__init__.py +3 -0
- lfx/components/needle/needle.py +104 -0
- lfx/components/notdiamond/__init__.py +34 -0
- lfx/components/notdiamond/notdiamond.py +228 -0
- lfx/components/novita/__init__.py +32 -0
- lfx/components/novita/novita.py +130 -0
- lfx/components/nvidia/__init__.py +57 -0
- lfx/components/nvidia/nvidia.py +151 -0
- lfx/components/nvidia/nvidia_embedding.py +77 -0
- lfx/components/nvidia/nvidia_ingest.py +317 -0
- lfx/components/nvidia/nvidia_rerank.py +63 -0
- lfx/components/nvidia/system_assist.py +65 -0
- lfx/components/olivya/__init__.py +3 -0
- lfx/components/olivya/olivya.py +116 -0
- lfx/components/ollama/__init__.py +37 -0
- lfx/components/ollama/ollama.py +548 -0
- lfx/components/ollama/ollama_embeddings.py +103 -0
- lfx/components/openai/__init__.py +37 -0
- lfx/components/openai/openai.py +100 -0
- lfx/components/openai/openai_chat_model.py +176 -0
- lfx/components/openrouter/__init__.py +32 -0
- lfx/components/openrouter/openrouter.py +104 -0
- lfx/components/output_parsers/__init__.py +3 -0
- lfx/components/perplexity/__init__.py +34 -0
- lfx/components/perplexity/perplexity.py +75 -0
- lfx/components/pgvector/__init__.py +34 -0
- lfx/components/pgvector/pgvector.py +72 -0
- lfx/components/pinecone/__init__.py +34 -0
- lfx/components/pinecone/pinecone.py +134 -0
- lfx/components/processing/__init__.py +72 -0
- lfx/components/processing/alter_metadata.py +109 -0
- lfx/components/processing/combine_text.py +40 -0
- lfx/components/processing/converter.py +248 -0
- lfx/components/processing/create_data.py +111 -0
- lfx/components/processing/create_list.py +40 -0
- lfx/components/processing/data_operations.py +528 -0
- lfx/components/processing/data_to_dataframe.py +71 -0
- lfx/components/processing/dataframe_operations.py +313 -0
- lfx/components/processing/dataframe_to_toolset.py +259 -0
- lfx/components/processing/dynamic_create_data.py +357 -0
- lfx/components/processing/extract_key.py +54 -0
- lfx/components/processing/filter_data.py +43 -0
- lfx/components/processing/filter_data_values.py +89 -0
- lfx/components/processing/json_cleaner.py +104 -0
- lfx/components/processing/merge_data.py +91 -0
- lfx/components/processing/message_to_data.py +37 -0
- lfx/components/processing/output_parser.py +46 -0
- lfx/components/processing/parse_data.py +71 -0
- lfx/components/processing/parse_dataframe.py +69 -0
- lfx/components/processing/parse_json_data.py +91 -0
- lfx/components/processing/parser.py +148 -0
- lfx/components/processing/regex.py +83 -0
- lfx/components/processing/select_data.py +49 -0
- lfx/components/processing/split_text.py +141 -0
- lfx/components/processing/store_message.py +91 -0
- lfx/components/processing/update_data.py +161 -0
- lfx/components/prototypes/__init__.py +35 -0
- lfx/components/prototypes/python_function.py +73 -0
- lfx/components/qdrant/__init__.py +34 -0
- lfx/components/qdrant/qdrant.py +109 -0
- lfx/components/redis/__init__.py +37 -0
- lfx/components/redis/redis.py +89 -0
- lfx/components/redis/redis_chat.py +43 -0
- lfx/components/sambanova/__init__.py +32 -0
- lfx/components/sambanova/sambanova.py +84 -0
- lfx/components/scrapegraph/__init__.py +40 -0
- lfx/components/scrapegraph/scrapegraph_markdownify_api.py +64 -0
- lfx/components/scrapegraph/scrapegraph_search_api.py +64 -0
- lfx/components/scrapegraph/scrapegraph_smart_scraper_api.py +71 -0
- lfx/components/searchapi/__init__.py +34 -0
- lfx/components/searchapi/search.py +79 -0
- lfx/components/serpapi/__init__.py +3 -0
- lfx/components/serpapi/serp.py +115 -0
- lfx/components/supabase/__init__.py +34 -0
- lfx/components/supabase/supabase.py +76 -0
- lfx/components/tavily/__init__.py +4 -0
- lfx/components/tavily/tavily_extract.py +117 -0
- lfx/components/tavily/tavily_search.py +212 -0
- lfx/components/textsplitters/__init__.py +3 -0
- lfx/components/toolkits/__init__.py +3 -0
- lfx/components/tools/__init__.py +66 -0
- lfx/components/tools/calculator.py +109 -0
- lfx/components/tools/google_search_api.py +45 -0
- lfx/components/tools/google_serper_api.py +115 -0
- lfx/components/tools/python_code_structured_tool.py +328 -0
- lfx/components/tools/python_repl.py +98 -0
- lfx/components/tools/search_api.py +88 -0
- lfx/components/tools/searxng.py +145 -0
- lfx/components/tools/serp_api.py +120 -0
- lfx/components/tools/tavily_search_tool.py +345 -0
- lfx/components/tools/wikidata_api.py +103 -0
- lfx/components/tools/wikipedia_api.py +50 -0
- lfx/components/tools/yahoo_finance.py +130 -0
- lfx/components/twelvelabs/__init__.py +52 -0
- lfx/components/twelvelabs/convert_astra_results.py +84 -0
- lfx/components/twelvelabs/pegasus_index.py +311 -0
- lfx/components/twelvelabs/split_video.py +301 -0
- lfx/components/twelvelabs/text_embeddings.py +57 -0
- lfx/components/twelvelabs/twelvelabs_pegasus.py +408 -0
- lfx/components/twelvelabs/video_embeddings.py +100 -0
- lfx/components/twelvelabs/video_file.py +191 -0
- lfx/components/unstructured/__init__.py +3 -0
- lfx/components/unstructured/unstructured.py +121 -0
- lfx/components/upstash/__init__.py +34 -0
- lfx/components/upstash/upstash.py +124 -0
- lfx/components/utilities/__init__.py +43 -0
- lfx/components/utilities/calculator_core.py +89 -0
- lfx/components/utilities/current_date.py +42 -0
- lfx/components/utilities/id_generator.py +42 -0
- lfx/components/utilities/python_repl_core.py +98 -0
- lfx/components/vectara/__init__.py +37 -0
- lfx/components/vectara/vectara.py +97 -0
- lfx/components/vectara/vectara_rag.py +164 -0
- lfx/components/vectorstores/__init__.py +34 -0
- lfx/components/vectorstores/local_db.py +270 -0
- lfx/components/vertexai/__init__.py +37 -0
- lfx/components/vertexai/vertexai.py +71 -0
- lfx/components/vertexai/vertexai_embeddings.py +67 -0
- lfx/components/vlmrun/__init__.py +34 -0
- lfx/components/vlmrun/vlmrun_transcription.py +224 -0
- lfx/components/weaviate/__init__.py +34 -0
- lfx/components/weaviate/weaviate.py +89 -0
- lfx/components/wikipedia/__init__.py +4 -0
- lfx/components/wikipedia/wikidata.py +86 -0
- lfx/components/wikipedia/wikipedia.py +53 -0
- lfx/components/wolframalpha/__init__.py +3 -0
- lfx/components/wolframalpha/wolfram_alpha_api.py +54 -0
- lfx/components/xai/__init__.py +32 -0
- lfx/components/xai/xai.py +167 -0
- lfx/components/yahoosearch/__init__.py +3 -0
- lfx/components/yahoosearch/yahoo.py +137 -0
- lfx/components/youtube/__init__.py +52 -0
- lfx/components/youtube/channel.py +227 -0
- lfx/components/youtube/comments.py +231 -0
- lfx/components/youtube/playlist.py +33 -0
- lfx/components/youtube/search.py +120 -0
- lfx/components/youtube/trending.py +285 -0
- lfx/components/youtube/video_details.py +263 -0
- lfx/components/youtube/youtube_transcripts.py +206 -0
- lfx/components/zep/__init__.py +3 -0
- lfx/components/zep/zep.py +45 -0
- lfx/constants.py +6 -0
- lfx/custom/__init__.py +7 -0
- lfx/custom/attributes.py +87 -0
- lfx/custom/code_parser/__init__.py +3 -0
- lfx/custom/code_parser/code_parser.py +361 -0
- lfx/custom/custom_component/__init__.py +0 -0
- lfx/custom/custom_component/base_component.py +128 -0
- lfx/custom/custom_component/component.py +1890 -0
- lfx/custom/custom_component/component_with_cache.py +8 -0
- lfx/custom/custom_component/custom_component.py +650 -0
- lfx/custom/dependency_analyzer.py +165 -0
- lfx/custom/directory_reader/__init__.py +3 -0
- lfx/custom/directory_reader/directory_reader.py +359 -0
- lfx/custom/directory_reader/utils.py +171 -0
- lfx/custom/eval.py +12 -0
- lfx/custom/schema.py +32 -0
- lfx/custom/tree_visitor.py +21 -0
- lfx/custom/utils.py +877 -0
- lfx/custom/validate.py +523 -0
- lfx/events/__init__.py +1 -0
- lfx/events/event_manager.py +110 -0
- lfx/exceptions/__init__.py +0 -0
- lfx/exceptions/component.py +15 -0
- lfx/field_typing/__init__.py +91 -0
- lfx/field_typing/constants.py +216 -0
- lfx/field_typing/range_spec.py +35 -0
- lfx/graph/__init__.py +6 -0
- lfx/graph/edge/__init__.py +0 -0
- lfx/graph/edge/base.py +300 -0
- lfx/graph/edge/schema.py +119 -0
- lfx/graph/edge/utils.py +0 -0
- lfx/graph/graph/__init__.py +0 -0
- lfx/graph/graph/ascii.py +202 -0
- lfx/graph/graph/base.py +2298 -0
- lfx/graph/graph/constants.py +63 -0
- lfx/graph/graph/runnable_vertices_manager.py +133 -0
- lfx/graph/graph/schema.py +53 -0
- lfx/graph/graph/state_model.py +66 -0
- lfx/graph/graph/utils.py +1024 -0
- lfx/graph/schema.py +75 -0
- lfx/graph/state/__init__.py +0 -0
- lfx/graph/state/model.py +250 -0
- lfx/graph/utils.py +206 -0
- lfx/graph/vertex/__init__.py +0 -0
- lfx/graph/vertex/base.py +826 -0
- lfx/graph/vertex/constants.py +0 -0
- lfx/graph/vertex/exceptions.py +4 -0
- lfx/graph/vertex/param_handler.py +316 -0
- lfx/graph/vertex/schema.py +26 -0
- lfx/graph/vertex/utils.py +19 -0
- lfx/graph/vertex/vertex_types.py +489 -0
- lfx/helpers/__init__.py +141 -0
- lfx/helpers/base_model.py +71 -0
- lfx/helpers/custom.py +13 -0
- lfx/helpers/data.py +167 -0
- lfx/helpers/flow.py +308 -0
- lfx/inputs/__init__.py +68 -0
- lfx/inputs/constants.py +2 -0
- lfx/inputs/input_mixin.py +352 -0
- lfx/inputs/inputs.py +718 -0
- lfx/inputs/validators.py +19 -0
- lfx/interface/__init__.py +6 -0
- lfx/interface/components.py +897 -0
- lfx/interface/importing/__init__.py +5 -0
- lfx/interface/importing/utils.py +39 -0
- lfx/interface/initialize/__init__.py +3 -0
- lfx/interface/initialize/loading.py +317 -0
- lfx/interface/listing.py +26 -0
- lfx/interface/run.py +16 -0
- lfx/interface/utils.py +111 -0
- lfx/io/__init__.py +63 -0
- lfx/io/schema.py +295 -0
- lfx/load/__init__.py +8 -0
- lfx/load/load.py +256 -0
- lfx/load/utils.py +99 -0
- lfx/log/__init__.py +5 -0
- lfx/log/logger.py +411 -0
- lfx/logging/__init__.py +11 -0
- lfx/logging/logger.py +24 -0
- lfx/memory/__init__.py +70 -0
- lfx/memory/stubs.py +302 -0
- lfx/processing/__init__.py +1 -0
- lfx/processing/process.py +238 -0
- lfx/processing/utils.py +25 -0
- lfx/py.typed +0 -0
- lfx/schema/__init__.py +66 -0
- lfx/schema/artifact.py +83 -0
- lfx/schema/content_block.py +62 -0
- lfx/schema/content_types.py +91 -0
- lfx/schema/cross_module.py +80 -0
- lfx/schema/data.py +309 -0
- lfx/schema/dataframe.py +210 -0
- lfx/schema/dotdict.py +74 -0
- lfx/schema/encoders.py +13 -0
- lfx/schema/graph.py +47 -0
- lfx/schema/image.py +184 -0
- lfx/schema/json_schema.py +186 -0
- lfx/schema/log.py +62 -0
- lfx/schema/message.py +493 -0
- lfx/schema/openai_responses_schemas.py +74 -0
- lfx/schema/properties.py +41 -0
- lfx/schema/schema.py +180 -0
- lfx/schema/serialize.py +13 -0
- lfx/schema/table.py +142 -0
- lfx/schema/validators.py +114 -0
- lfx/serialization/__init__.py +5 -0
- lfx/serialization/constants.py +2 -0
- lfx/serialization/serialization.py +314 -0
- lfx/services/__init__.py +26 -0
- lfx/services/base.py +28 -0
- lfx/services/cache/__init__.py +6 -0
- lfx/services/cache/base.py +183 -0
- lfx/services/cache/service.py +166 -0
- lfx/services/cache/utils.py +169 -0
- lfx/services/chat/__init__.py +1 -0
- lfx/services/chat/config.py +2 -0
- lfx/services/chat/schema.py +10 -0
- lfx/services/database/__init__.py +5 -0
- lfx/services/database/service.py +25 -0
- lfx/services/deps.py +194 -0
- lfx/services/factory.py +19 -0
- lfx/services/initialize.py +19 -0
- lfx/services/interfaces.py +103 -0
- lfx/services/manager.py +185 -0
- lfx/services/mcp_composer/__init__.py +6 -0
- lfx/services/mcp_composer/factory.py +16 -0
- lfx/services/mcp_composer/service.py +1441 -0
- lfx/services/schema.py +21 -0
- lfx/services/session.py +87 -0
- lfx/services/settings/__init__.py +3 -0
- lfx/services/settings/auth.py +133 -0
- lfx/services/settings/base.py +668 -0
- lfx/services/settings/constants.py +43 -0
- lfx/services/settings/factory.py +23 -0
- lfx/services/settings/feature_flags.py +11 -0
- lfx/services/settings/service.py +35 -0
- lfx/services/settings/utils.py +40 -0
- lfx/services/shared_component_cache/__init__.py +1 -0
- lfx/services/shared_component_cache/factory.py +30 -0
- lfx/services/shared_component_cache/service.py +9 -0
- lfx/services/storage/__init__.py +5 -0
- lfx/services/storage/local.py +185 -0
- lfx/services/storage/service.py +177 -0
- lfx/services/tracing/__init__.py +1 -0
- lfx/services/tracing/service.py +21 -0
- lfx/settings.py +6 -0
- lfx/template/__init__.py +6 -0
- lfx/template/field/__init__.py +0 -0
- lfx/template/field/base.py +260 -0
- lfx/template/field/prompt.py +15 -0
- lfx/template/frontend_node/__init__.py +6 -0
- lfx/template/frontend_node/base.py +214 -0
- lfx/template/frontend_node/constants.py +65 -0
- lfx/template/frontend_node/custom_components.py +79 -0
- lfx/template/template/__init__.py +0 -0
- lfx/template/template/base.py +100 -0
- lfx/template/utils.py +217 -0
- lfx/type_extraction/__init__.py +19 -0
- lfx/type_extraction/type_extraction.py +75 -0
- lfx/type_extraction.py +80 -0
- lfx/utils/__init__.py +1 -0
- lfx/utils/async_helpers.py +42 -0
- lfx/utils/component_utils.py +154 -0
- lfx/utils/concurrency.py +60 -0
- lfx/utils/connection_string_parser.py +11 -0
- lfx/utils/constants.py +233 -0
- lfx/utils/data_structure.py +212 -0
- lfx/utils/exceptions.py +22 -0
- lfx/utils/helpers.py +34 -0
- lfx/utils/image.py +79 -0
- lfx/utils/langflow_utils.py +52 -0
- lfx/utils/lazy_load.py +15 -0
- lfx/utils/request_utils.py +18 -0
- lfx/utils/schemas.py +139 -0
- lfx/utils/ssrf_protection.py +384 -0
- lfx/utils/util.py +626 -0
- lfx/utils/util_strings.py +56 -0
- lfx/utils/validate_cloud.py +26 -0
- lfx/utils/version.py +24 -0
- lfx_nightly-0.2.0.dev25.dist-info/METADATA +312 -0
- lfx_nightly-0.2.0.dev25.dist-info/RECORD +769 -0
- lfx_nightly-0.2.0.dev25.dist-info/WHEEL +4 -0
- lfx_nightly-0.2.0.dev25.dist-info/entry_points.txt +2 -0
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
from langchain.docstore.document import Document
|
|
2
|
+
from langchain_experimental.text_splitter import SemanticChunker
|
|
3
|
+
|
|
4
|
+
from lfx.base.textsplitters.model import LCTextSplitterComponent
|
|
5
|
+
from lfx.io import (
|
|
6
|
+
DropdownInput,
|
|
7
|
+
FloatInput,
|
|
8
|
+
HandleInput,
|
|
9
|
+
IntInput,
|
|
10
|
+
MessageTextInput,
|
|
11
|
+
Output,
|
|
12
|
+
)
|
|
13
|
+
from lfx.schema.data import Data
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class SemanticTextSplitterComponent(LCTextSplitterComponent):
|
|
17
|
+
"""Split text into semantically meaningful chunks using semantic similarity."""
|
|
18
|
+
|
|
19
|
+
display_name: str = "Semantic Text Splitter"
|
|
20
|
+
name: str = "SemanticTextSplitter"
|
|
21
|
+
description: str = "Split text into semantically meaningful chunks using semantic similarity."
|
|
22
|
+
documentation = "https://python.langchain.com/docs/how_to/semantic-chunker/"
|
|
23
|
+
beta = True # this component is beta because it is imported from langchain_experimental
|
|
24
|
+
icon = "LangChain"
|
|
25
|
+
|
|
26
|
+
inputs = [
|
|
27
|
+
HandleInput(
|
|
28
|
+
name="data_inputs",
|
|
29
|
+
display_name="Data Inputs",
|
|
30
|
+
info="List of Data objects containing text and metadata to split.",
|
|
31
|
+
input_types=["Data"],
|
|
32
|
+
is_list=True,
|
|
33
|
+
required=True,
|
|
34
|
+
),
|
|
35
|
+
HandleInput(
|
|
36
|
+
name="embeddings",
|
|
37
|
+
display_name="Embeddings",
|
|
38
|
+
info="Embeddings model to use for semantic similarity. Required.",
|
|
39
|
+
input_types=["Embeddings"],
|
|
40
|
+
is_list=False,
|
|
41
|
+
required=True,
|
|
42
|
+
),
|
|
43
|
+
DropdownInput(
|
|
44
|
+
name="breakpoint_threshold_type",
|
|
45
|
+
display_name="Breakpoint Threshold Type",
|
|
46
|
+
info=(
|
|
47
|
+
"Method to determine breakpoints. Options: 'percentile', "
|
|
48
|
+
"'standard_deviation', 'interquartile'. Defaults to 'percentile'."
|
|
49
|
+
),
|
|
50
|
+
value="percentile",
|
|
51
|
+
options=["percentile", "standard_deviation", "interquartile"],
|
|
52
|
+
),
|
|
53
|
+
FloatInput(
|
|
54
|
+
name="breakpoint_threshold_amount",
|
|
55
|
+
display_name="Breakpoint Threshold Amount",
|
|
56
|
+
info="Numerical amount for the breakpoint threshold.",
|
|
57
|
+
value=0.5,
|
|
58
|
+
),
|
|
59
|
+
IntInput(
|
|
60
|
+
name="number_of_chunks",
|
|
61
|
+
display_name="Number of Chunks",
|
|
62
|
+
info="Number of chunks to split the text into.",
|
|
63
|
+
value=5,
|
|
64
|
+
),
|
|
65
|
+
MessageTextInput(
|
|
66
|
+
name="sentence_split_regex",
|
|
67
|
+
display_name="Sentence Split Regex",
|
|
68
|
+
info="Regular expression to split sentences. Optional.",
|
|
69
|
+
value="",
|
|
70
|
+
advanced=True,
|
|
71
|
+
),
|
|
72
|
+
IntInput(
|
|
73
|
+
name="buffer_size",
|
|
74
|
+
display_name="Buffer Size",
|
|
75
|
+
info="Size of the buffer.",
|
|
76
|
+
value=0,
|
|
77
|
+
advanced=True,
|
|
78
|
+
),
|
|
79
|
+
]
|
|
80
|
+
|
|
81
|
+
outputs = [
|
|
82
|
+
Output(display_name="Chunks", name="chunks", method="split_text"),
|
|
83
|
+
]
|
|
84
|
+
|
|
85
|
+
def _docs_to_data(self, docs: list[Document]) -> list[Data]:
|
|
86
|
+
"""Convert a list of Document objects to Data objects."""
|
|
87
|
+
return [Data(text=doc.page_content, data=doc.metadata) for doc in docs]
|
|
88
|
+
|
|
89
|
+
def split_text(self) -> list[Data]:
|
|
90
|
+
"""Split the input data into semantically meaningful chunks."""
|
|
91
|
+
try:
|
|
92
|
+
embeddings = getattr(self, "embeddings", None)
|
|
93
|
+
if embeddings is None:
|
|
94
|
+
error_msg = "An embeddings model is required for SemanticTextSplitter."
|
|
95
|
+
raise ValueError(error_msg)
|
|
96
|
+
|
|
97
|
+
if not self.data_inputs:
|
|
98
|
+
error_msg = "Data inputs cannot be empty."
|
|
99
|
+
raise ValueError(error_msg)
|
|
100
|
+
|
|
101
|
+
documents = []
|
|
102
|
+
for _input in self.data_inputs:
|
|
103
|
+
if isinstance(_input, Data):
|
|
104
|
+
documents.append(_input.to_lc_document())
|
|
105
|
+
else:
|
|
106
|
+
error_msg = f"Invalid data input type: {_input}"
|
|
107
|
+
raise TypeError(error_msg)
|
|
108
|
+
|
|
109
|
+
if not documents:
|
|
110
|
+
error_msg = "No valid Data objects found in data_inputs."
|
|
111
|
+
raise ValueError(error_msg)
|
|
112
|
+
|
|
113
|
+
texts = [doc.page_content for doc in documents]
|
|
114
|
+
metadatas = [doc.metadata for doc in documents]
|
|
115
|
+
|
|
116
|
+
splitter_params = {
|
|
117
|
+
"embeddings": embeddings,
|
|
118
|
+
"breakpoint_threshold_type": self.breakpoint_threshold_type or "percentile",
|
|
119
|
+
"breakpoint_threshold_amount": self.breakpoint_threshold_amount,
|
|
120
|
+
"number_of_chunks": self.number_of_chunks,
|
|
121
|
+
"buffer_size": self.buffer_size,
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
if self.sentence_split_regex:
|
|
125
|
+
splitter_params["sentence_split_regex"] = self.sentence_split_regex
|
|
126
|
+
|
|
127
|
+
splitter = SemanticChunker(**splitter_params)
|
|
128
|
+
docs = splitter.create_documents(texts, metadatas=metadatas)
|
|
129
|
+
|
|
130
|
+
data = self._docs_to_data(docs)
|
|
131
|
+
self.status = data
|
|
132
|
+
|
|
133
|
+
except Exception as e:
|
|
134
|
+
error_msg = f"An error occurred during semantic splitting: {e}"
|
|
135
|
+
raise RuntimeError(error_msg) from e
|
|
136
|
+
|
|
137
|
+
else:
|
|
138
|
+
return data
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
from langchain.chains import LLMCheckerChain
|
|
2
|
+
|
|
3
|
+
from lfx.base.chains.model import LCChainComponent
|
|
4
|
+
from lfx.inputs.inputs import HandleInput, MultilineInput
|
|
5
|
+
from lfx.schema import Message
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class LLMCheckerChainComponent(LCChainComponent):
|
|
9
|
+
display_name = "LLMCheckerChain"
|
|
10
|
+
description = "Chain for question-answering with self-verification."
|
|
11
|
+
documentation = "https://python.langchain.com/docs/modules/chains/additional/llm_checker"
|
|
12
|
+
name = "LLMCheckerChain"
|
|
13
|
+
legacy: bool = True
|
|
14
|
+
icon = "LangChain"
|
|
15
|
+
inputs = [
|
|
16
|
+
MultilineInput(
|
|
17
|
+
name="input_value",
|
|
18
|
+
display_name="Input",
|
|
19
|
+
info="The input value to pass to the chain.",
|
|
20
|
+
required=True,
|
|
21
|
+
),
|
|
22
|
+
HandleInput(
|
|
23
|
+
name="llm",
|
|
24
|
+
display_name="Language Model",
|
|
25
|
+
input_types=["LanguageModel"],
|
|
26
|
+
required=True,
|
|
27
|
+
),
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
def invoke_chain(self) -> Message:
|
|
31
|
+
chain = LLMCheckerChain.from_llm(llm=self.llm)
|
|
32
|
+
response = chain.invoke(
|
|
33
|
+
{chain.input_key: self.input_value},
|
|
34
|
+
config={"callbacks": self.get_langchain_callbacks()},
|
|
35
|
+
)
|
|
36
|
+
result = response.get(chain.output_key, "")
|
|
37
|
+
result = str(result)
|
|
38
|
+
self.status = result
|
|
39
|
+
return Message(text=result)
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from langchain.chains import LLMMathChain
|
|
2
|
+
|
|
3
|
+
from lfx.base.chains.model import LCChainComponent
|
|
4
|
+
from lfx.inputs.inputs import HandleInput, MultilineInput
|
|
5
|
+
from lfx.schema import Message
|
|
6
|
+
from lfx.template.field.base import Output
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class LLMMathChainComponent(LCChainComponent):
|
|
10
|
+
display_name = "LLMMathChain"
|
|
11
|
+
description = "Chain that interprets a prompt and executes python code to do math."
|
|
12
|
+
documentation = "https://python.langchain.com/docs/modules/chains/additional/llm_math"
|
|
13
|
+
name = "LLMMathChain"
|
|
14
|
+
legacy: bool = True
|
|
15
|
+
icon = "LangChain"
|
|
16
|
+
inputs = [
|
|
17
|
+
MultilineInput(
|
|
18
|
+
name="input_value",
|
|
19
|
+
display_name="Input",
|
|
20
|
+
info="The input value to pass to the chain.",
|
|
21
|
+
required=True,
|
|
22
|
+
),
|
|
23
|
+
HandleInput(
|
|
24
|
+
name="llm",
|
|
25
|
+
display_name="Language Model",
|
|
26
|
+
input_types=["LanguageModel"],
|
|
27
|
+
required=True,
|
|
28
|
+
),
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
outputs = [Output(display_name="Message", name="text", method="invoke_chain")]
|
|
32
|
+
|
|
33
|
+
def invoke_chain(self) -> Message:
|
|
34
|
+
chain = LLMMathChain.from_llm(llm=self.llm)
|
|
35
|
+
response = chain.invoke(
|
|
36
|
+
{chain.input_key: self.input_value},
|
|
37
|
+
config={"callbacks": self.get_langchain_callbacks()},
|
|
38
|
+
)
|
|
39
|
+
result = response.get(chain.output_key, "")
|
|
40
|
+
result = str(result)
|
|
41
|
+
self.status = result
|
|
42
|
+
return Message(text=result)
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from langchain_text_splitters import NLTKTextSplitter, TextSplitter
|
|
4
|
+
|
|
5
|
+
from lfx.base.textsplitters.model import LCTextSplitterComponent
|
|
6
|
+
from lfx.inputs.inputs import DataInput, IntInput, MessageTextInput
|
|
7
|
+
from lfx.utils.util import unescape_string
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class NaturalLanguageTextSplitterComponent(LCTextSplitterComponent):
|
|
11
|
+
display_name = "Natural Language Text Splitter"
|
|
12
|
+
description = "Split text based on natural language boundaries, optimized for a specified language."
|
|
13
|
+
documentation = (
|
|
14
|
+
"https://python.langchain.com/v0.1/docs/modules/data_connection/document_transformers/split_by_token/#nltk"
|
|
15
|
+
)
|
|
16
|
+
name = "NaturalLanguageTextSplitter"
|
|
17
|
+
icon = "LangChain"
|
|
18
|
+
inputs = [
|
|
19
|
+
IntInput(
|
|
20
|
+
name="chunk_size",
|
|
21
|
+
display_name="Chunk Size",
|
|
22
|
+
info="The maximum number of characters in each chunk after splitting.",
|
|
23
|
+
value=1000,
|
|
24
|
+
),
|
|
25
|
+
IntInput(
|
|
26
|
+
name="chunk_overlap",
|
|
27
|
+
display_name="Chunk Overlap",
|
|
28
|
+
info="The number of characters that overlap between consecutive chunks.",
|
|
29
|
+
value=200,
|
|
30
|
+
),
|
|
31
|
+
DataInput(
|
|
32
|
+
name="data_input",
|
|
33
|
+
display_name="Input",
|
|
34
|
+
info="The text data to be split.",
|
|
35
|
+
input_types=["Document", "Data"],
|
|
36
|
+
required=True,
|
|
37
|
+
),
|
|
38
|
+
MessageTextInput(
|
|
39
|
+
name="separator",
|
|
40
|
+
display_name="Separator",
|
|
41
|
+
info='The character(s) to use as a delimiter when splitting text.\nDefaults to "\\n\\n" if left empty.',
|
|
42
|
+
),
|
|
43
|
+
MessageTextInput(
|
|
44
|
+
name="language",
|
|
45
|
+
display_name="Language",
|
|
46
|
+
info='The language of the text. Default is "English". '
|
|
47
|
+
"Supports multiple languages for better text boundary recognition.",
|
|
48
|
+
),
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
def get_data_input(self) -> Any:
|
|
52
|
+
return self.data_input
|
|
53
|
+
|
|
54
|
+
def build_text_splitter(self) -> TextSplitter:
|
|
55
|
+
separator = unescape_string(self.separator) if self.separator else "\n\n"
|
|
56
|
+
return NLTKTextSplitter(
|
|
57
|
+
language=self.language.lower() if self.language else "english",
|
|
58
|
+
separator=separator,
|
|
59
|
+
chunk_size=self.chunk_size,
|
|
60
|
+
chunk_overlap=self.chunk_overlap,
|
|
61
|
+
)
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from langchain.agents import create_openai_tools_agent
|
|
2
|
+
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate
|
|
3
|
+
|
|
4
|
+
from lfx.base.agents.agent import LCToolsAgentComponent
|
|
5
|
+
from lfx.inputs.inputs import (
|
|
6
|
+
DataInput,
|
|
7
|
+
HandleInput,
|
|
8
|
+
MultilineInput,
|
|
9
|
+
)
|
|
10
|
+
from lfx.schema.data import Data
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class OpenAIToolsAgentComponent(LCToolsAgentComponent):
|
|
14
|
+
display_name: str = "OpenAI Tools Agent"
|
|
15
|
+
description: str = "Agent that uses tools via openai-tools."
|
|
16
|
+
icon = "LangChain"
|
|
17
|
+
name = "OpenAIToolsAgent"
|
|
18
|
+
|
|
19
|
+
inputs = [
|
|
20
|
+
*LCToolsAgentComponent.get_base_inputs(),
|
|
21
|
+
HandleInput(
|
|
22
|
+
name="llm",
|
|
23
|
+
display_name="Language Model",
|
|
24
|
+
input_types=["LanguageModel", "ToolEnabledLanguageModel"],
|
|
25
|
+
required=True,
|
|
26
|
+
),
|
|
27
|
+
MultilineInput(
|
|
28
|
+
name="system_prompt",
|
|
29
|
+
display_name="System Prompt",
|
|
30
|
+
info="System prompt for the agent.",
|
|
31
|
+
value="You are a helpful assistant",
|
|
32
|
+
),
|
|
33
|
+
MultilineInput(
|
|
34
|
+
name="user_prompt", display_name="Prompt", info="This prompt must contain 'input' key.", value="{input}"
|
|
35
|
+
),
|
|
36
|
+
DataInput(name="chat_history", display_name="Chat History", is_list=True, advanced=True),
|
|
37
|
+
]
|
|
38
|
+
|
|
39
|
+
def get_chat_history_data(self) -> list[Data] | None:
|
|
40
|
+
return self.chat_history
|
|
41
|
+
|
|
42
|
+
def create_agent_runnable(self):
|
|
43
|
+
if "input" not in self.user_prompt:
|
|
44
|
+
msg = "Prompt must contain 'input' key."
|
|
45
|
+
raise ValueError(msg)
|
|
46
|
+
messages = [
|
|
47
|
+
("system", self.system_prompt),
|
|
48
|
+
("placeholder", "{chat_history}"),
|
|
49
|
+
HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=["input"], template=self.user_prompt)),
|
|
50
|
+
("placeholder", "{agent_scratchpad}"),
|
|
51
|
+
]
|
|
52
|
+
prompt = ChatPromptTemplate.from_messages(messages)
|
|
53
|
+
return create_openai_tools_agent(self.llm, self.tools, prompt)
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
import yaml
|
|
4
|
+
from langchain.agents import AgentExecutor
|
|
5
|
+
from langchain_community.agent_toolkits import create_openapi_agent
|
|
6
|
+
from langchain_community.agent_toolkits.openapi.toolkit import OpenAPIToolkit
|
|
7
|
+
from langchain_community.tools.json.tool import JsonSpec
|
|
8
|
+
from langchain_community.utilities.requests import TextRequestsWrapper
|
|
9
|
+
|
|
10
|
+
from lfx.base.agents.agent import LCAgentComponent
|
|
11
|
+
from lfx.inputs.inputs import BoolInput, FileInput, HandleInput
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OpenAPIAgentComponent(LCAgentComponent):
|
|
15
|
+
display_name = "OpenAPI Agent"
|
|
16
|
+
description = "Agent to interact with OpenAPI API."
|
|
17
|
+
name = "OpenAPIAgent"
|
|
18
|
+
icon = "LangChain"
|
|
19
|
+
inputs = [
|
|
20
|
+
*LCAgentComponent.get_base_inputs(),
|
|
21
|
+
HandleInput(name="llm", display_name="Language Model", input_types=["LanguageModel"], required=True),
|
|
22
|
+
FileInput(name="path", display_name="File Path", file_types=["json", "yaml", "yml"], required=True),
|
|
23
|
+
BoolInput(name="allow_dangerous_requests", display_name="Allow Dangerous Requests", value=False, required=True),
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
def build_agent(self) -> AgentExecutor:
|
|
27
|
+
path = Path(self.path)
|
|
28
|
+
if path.suffix in {"yaml", "yml"}:
|
|
29
|
+
with path.open(encoding="utf-8") as file:
|
|
30
|
+
yaml_dict = yaml.safe_load(file)
|
|
31
|
+
spec = JsonSpec(dict_=yaml_dict)
|
|
32
|
+
else:
|
|
33
|
+
spec = JsonSpec.from_file(path)
|
|
34
|
+
requests_wrapper = TextRequestsWrapper()
|
|
35
|
+
toolkit = OpenAPIToolkit.from_llm(
|
|
36
|
+
llm=self.llm,
|
|
37
|
+
json_spec=spec,
|
|
38
|
+
requests_wrapper=requests_wrapper,
|
|
39
|
+
allow_dangerous_requests=self.allow_dangerous_requests,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
agent_args = self.get_agent_kwargs()
|
|
43
|
+
|
|
44
|
+
# This is bit weird - generally other create_*_agent functions have max_iterations in the
|
|
45
|
+
# `agent_executor_kwargs`, but openai has this parameter passed directly.
|
|
46
|
+
agent_args["max_iterations"] = agent_args["agent_executor_kwargs"]["max_iterations"]
|
|
47
|
+
del agent_args["agent_executor_kwargs"]["max_iterations"]
|
|
48
|
+
return create_openapi_agent(llm=self.llm, toolkit=toolkit, **agent_args)
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
|
|
4
|
+
|
|
5
|
+
from lfx.base.textsplitters.model import LCTextSplitterComponent
|
|
6
|
+
from lfx.inputs.inputs import DataInput, IntInput, MessageTextInput
|
|
7
|
+
from lfx.utils.util import unescape_string
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class RecursiveCharacterTextSplitterComponent(LCTextSplitterComponent):
|
|
11
|
+
display_name: str = "Recursive Character Text Splitter"
|
|
12
|
+
description: str = "Split text trying to keep all related text together."
|
|
13
|
+
documentation: str = "https://docs.langflow.org/components-processing"
|
|
14
|
+
name = "RecursiveCharacterTextSplitter"
|
|
15
|
+
icon = "LangChain"
|
|
16
|
+
|
|
17
|
+
inputs = [
|
|
18
|
+
IntInput(
|
|
19
|
+
name="chunk_size",
|
|
20
|
+
display_name="Chunk Size",
|
|
21
|
+
info="The maximum length of each chunk.",
|
|
22
|
+
value=1000,
|
|
23
|
+
),
|
|
24
|
+
IntInput(
|
|
25
|
+
name="chunk_overlap",
|
|
26
|
+
display_name="Chunk Overlap",
|
|
27
|
+
info="The amount of overlap between chunks.",
|
|
28
|
+
value=200,
|
|
29
|
+
),
|
|
30
|
+
DataInput(
|
|
31
|
+
name="data_input",
|
|
32
|
+
display_name="Input",
|
|
33
|
+
info="The texts to split.",
|
|
34
|
+
input_types=["Document", "Data"],
|
|
35
|
+
required=True,
|
|
36
|
+
),
|
|
37
|
+
MessageTextInput(
|
|
38
|
+
name="separators",
|
|
39
|
+
display_name="Separators",
|
|
40
|
+
info='The characters to split on.\nIf left empty defaults to ["\\n\\n", "\\n", " ", ""].',
|
|
41
|
+
is_list=True,
|
|
42
|
+
),
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
def get_data_input(self) -> Any:
|
|
46
|
+
return self.data_input
|
|
47
|
+
|
|
48
|
+
def build_text_splitter(self) -> TextSplitter:
|
|
49
|
+
if not self.separators:
|
|
50
|
+
separators: list[str] | None = None
|
|
51
|
+
else:
|
|
52
|
+
# check if the separators list has escaped characters
|
|
53
|
+
# if there are escaped characters, unescape them
|
|
54
|
+
separators = [unescape_string(x) for x in self.separators]
|
|
55
|
+
|
|
56
|
+
return RecursiveCharacterTextSplitter(
|
|
57
|
+
separators=separators,
|
|
58
|
+
chunk_size=self.chunk_size,
|
|
59
|
+
chunk_overlap=self.chunk_overlap,
|
|
60
|
+
)
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
from typing import cast
|
|
2
|
+
|
|
3
|
+
from langchain.chains import RetrievalQA
|
|
4
|
+
|
|
5
|
+
from lfx.base.chains.model import LCChainComponent
|
|
6
|
+
from lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MultilineInput
|
|
7
|
+
from lfx.schema import Message
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class RetrievalQAComponent(LCChainComponent):
|
|
11
|
+
display_name = "Retrieval QA"
|
|
12
|
+
description = "Chain for question-answering querying sources from a retriever."
|
|
13
|
+
name = "RetrievalQA"
|
|
14
|
+
legacy: bool = True
|
|
15
|
+
icon = "LangChain"
|
|
16
|
+
inputs = [
|
|
17
|
+
MultilineInput(
|
|
18
|
+
name="input_value",
|
|
19
|
+
display_name="Input",
|
|
20
|
+
info="The input value to pass to the chain.",
|
|
21
|
+
required=True,
|
|
22
|
+
),
|
|
23
|
+
DropdownInput(
|
|
24
|
+
name="chain_type",
|
|
25
|
+
display_name="Chain Type",
|
|
26
|
+
info="Chain type to use.",
|
|
27
|
+
options=["Stuff", "Map Reduce", "Refine", "Map Rerank"],
|
|
28
|
+
value="Stuff",
|
|
29
|
+
advanced=True,
|
|
30
|
+
),
|
|
31
|
+
HandleInput(
|
|
32
|
+
name="llm",
|
|
33
|
+
display_name="Language Model",
|
|
34
|
+
input_types=["LanguageModel"],
|
|
35
|
+
required=True,
|
|
36
|
+
),
|
|
37
|
+
HandleInput(
|
|
38
|
+
name="retriever",
|
|
39
|
+
display_name="Retriever",
|
|
40
|
+
input_types=["Retriever"],
|
|
41
|
+
required=True,
|
|
42
|
+
),
|
|
43
|
+
HandleInput(
|
|
44
|
+
name="memory",
|
|
45
|
+
display_name="Memory",
|
|
46
|
+
input_types=["BaseChatMemory"],
|
|
47
|
+
),
|
|
48
|
+
BoolInput(
|
|
49
|
+
name="return_source_documents",
|
|
50
|
+
display_name="Return Source Documents",
|
|
51
|
+
value=False,
|
|
52
|
+
),
|
|
53
|
+
]
|
|
54
|
+
|
|
55
|
+
def invoke_chain(self) -> Message:
|
|
56
|
+
chain_type = self.chain_type.lower().replace(" ", "_")
|
|
57
|
+
if self.memory:
|
|
58
|
+
self.memory.input_key = "query"
|
|
59
|
+
self.memory.output_key = "result"
|
|
60
|
+
|
|
61
|
+
runnable = RetrievalQA.from_chain_type(
|
|
62
|
+
llm=self.llm,
|
|
63
|
+
chain_type=chain_type,
|
|
64
|
+
retriever=self.retriever,
|
|
65
|
+
memory=self.memory,
|
|
66
|
+
# always include to help debugging
|
|
67
|
+
#
|
|
68
|
+
return_source_documents=True,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
result = runnable.invoke(
|
|
72
|
+
{"query": self.input_value},
|
|
73
|
+
config={"callbacks": self.get_langchain_callbacks()},
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
source_docs = self.to_data(result.get("source_documents", keys=[]))
|
|
77
|
+
result_str = str(result.get("result", ""))
|
|
78
|
+
if self.return_source_documents and len(source_docs):
|
|
79
|
+
references_str = self.create_references_from_data(source_docs)
|
|
80
|
+
result_str = f"{result_str}\n{references_str}"
|
|
81
|
+
# put the entire result to debug history, query and content
|
|
82
|
+
self.status = {**result, "source_documents": source_docs, "output": result_str}
|
|
83
|
+
return cast("Message", result_str)
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
from langchain.agents import AgentExecutor
|
|
2
|
+
|
|
3
|
+
from lfx.custom.custom_component.component import Component
|
|
4
|
+
from lfx.inputs.inputs import BoolInput, HandleInput, MessageTextInput
|
|
5
|
+
from lfx.schema.message import Message
|
|
6
|
+
from lfx.template.field.base import Output
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class RunnableExecComponent(Component):
|
|
10
|
+
description = "Execute a runnable. It will try to guess the input and output keys."
|
|
11
|
+
display_name = "Runnable Executor"
|
|
12
|
+
name = "RunnableExecutor"
|
|
13
|
+
beta: bool = True
|
|
14
|
+
icon = "LangChain"
|
|
15
|
+
|
|
16
|
+
inputs = [
|
|
17
|
+
MessageTextInput(name="input_value", display_name="Input", required=True),
|
|
18
|
+
HandleInput(
|
|
19
|
+
name="runnable",
|
|
20
|
+
display_name="Agent Executor",
|
|
21
|
+
input_types=["Chain", "AgentExecutor", "Agent", "Runnable"],
|
|
22
|
+
required=True,
|
|
23
|
+
),
|
|
24
|
+
MessageTextInput(
|
|
25
|
+
name="input_key",
|
|
26
|
+
display_name="Input Key",
|
|
27
|
+
value="input",
|
|
28
|
+
advanced=True,
|
|
29
|
+
),
|
|
30
|
+
MessageTextInput(
|
|
31
|
+
name="output_key",
|
|
32
|
+
display_name="Output Key",
|
|
33
|
+
value="output",
|
|
34
|
+
advanced=True,
|
|
35
|
+
),
|
|
36
|
+
BoolInput(
|
|
37
|
+
name="use_stream",
|
|
38
|
+
display_name="Stream",
|
|
39
|
+
value=False,
|
|
40
|
+
),
|
|
41
|
+
]
|
|
42
|
+
|
|
43
|
+
outputs = [
|
|
44
|
+
Output(
|
|
45
|
+
display_name="Message",
|
|
46
|
+
name="text",
|
|
47
|
+
method="build_executor",
|
|
48
|
+
),
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
def get_output(self, result, input_key, output_key):
|
|
52
|
+
"""Retrieves the output value from the given result dictionary based on the specified input and output keys.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
result (dict): The result dictionary containing the output value.
|
|
56
|
+
input_key (str): The key used to retrieve the input value from the result dictionary.
|
|
57
|
+
output_key (str): The key used to retrieve the output value from the result dictionary.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
tuple: A tuple containing the output value and the status message.
|
|
61
|
+
|
|
62
|
+
"""
|
|
63
|
+
possible_output_keys = ["answer", "response", "output", "result", "text"]
|
|
64
|
+
status = ""
|
|
65
|
+
result_value = None
|
|
66
|
+
|
|
67
|
+
if output_key in result:
|
|
68
|
+
result_value = result.get(output_key)
|
|
69
|
+
elif len(result) == 2 and input_key in result: # noqa: PLR2004
|
|
70
|
+
# get the other key from the result dict
|
|
71
|
+
other_key = next(k for k in result if k != input_key)
|
|
72
|
+
if other_key == output_key:
|
|
73
|
+
result_value = result.get(output_key)
|
|
74
|
+
else:
|
|
75
|
+
status += f"Warning: The output key is not '{output_key}'. The output key is '{other_key}'."
|
|
76
|
+
result_value = result.get(other_key)
|
|
77
|
+
elif len(result) == 1:
|
|
78
|
+
result_value = next(iter(result.values()))
|
|
79
|
+
elif any(k in result for k in possible_output_keys):
|
|
80
|
+
for key in possible_output_keys:
|
|
81
|
+
if key in result:
|
|
82
|
+
result_value = result.get(key)
|
|
83
|
+
status += f"Output key: '{key}'."
|
|
84
|
+
break
|
|
85
|
+
if result_value is None:
|
|
86
|
+
result_value = result
|
|
87
|
+
status += f"Warning: The output key is not '{output_key}'."
|
|
88
|
+
else:
|
|
89
|
+
result_value = result
|
|
90
|
+
status += f"Warning: The output key is not '{output_key}'."
|
|
91
|
+
|
|
92
|
+
return result_value, status
|
|
93
|
+
|
|
94
|
+
def get_input_dict(self, runnable, input_key, input_value):
|
|
95
|
+
"""Returns a dictionary containing the input key-value pair for the given runnable.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
runnable: The runnable object.
|
|
99
|
+
input_key: The key for the input value.
|
|
100
|
+
input_value: The value for the input key.
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
input_dict: A dictionary containing the input key-value pair.
|
|
104
|
+
status: A status message indicating if the input key is not in the runnable's input keys.
|
|
105
|
+
"""
|
|
106
|
+
input_dict = {}
|
|
107
|
+
status = ""
|
|
108
|
+
if hasattr(runnable, "input_keys"):
|
|
109
|
+
# Check if input_key is in the runnable's input_keys
|
|
110
|
+
if input_key in runnable.input_keys:
|
|
111
|
+
input_dict[input_key] = input_value
|
|
112
|
+
else:
|
|
113
|
+
input_dict = dict.fromkeys(runnable.input_keys, input_value)
|
|
114
|
+
status = f"Warning: The input key is not '{input_key}'. The input key is '{runnable.input_keys}'."
|
|
115
|
+
return input_dict, status
|
|
116
|
+
|
|
117
|
+
async def build_executor(self) -> Message:
|
|
118
|
+
input_dict, status = self.get_input_dict(self.runnable, self.input_key, self.input_value)
|
|
119
|
+
if not isinstance(self.runnable, AgentExecutor):
|
|
120
|
+
msg = "The runnable must be an AgentExecutor"
|
|
121
|
+
raise TypeError(msg)
|
|
122
|
+
|
|
123
|
+
if self.use_stream:
|
|
124
|
+
return self.astream_events(input_dict)
|
|
125
|
+
result = await self.runnable.ainvoke(input_dict)
|
|
126
|
+
result_value, status_ = self.get_output(result, self.input_key, self.output_key)
|
|
127
|
+
status += status_
|
|
128
|
+
status += f"\n\nOutput: {result_value}\n\nRaw Output: {result}"
|
|
129
|
+
self.status = status
|
|
130
|
+
return result_value
|
|
131
|
+
|
|
132
|
+
async def astream_events(self, runnable_input):
|
|
133
|
+
async for event in self.runnable.astream_events(runnable_input, version="v1"):
|
|
134
|
+
if event.get("event") != "on_chat_model_stream":
|
|
135
|
+
continue
|
|
136
|
+
|
|
137
|
+
yield event.get("data").get("chunk")
|