lfx-nightly 0.1.11.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lfx/__init__.py +0 -0
- lfx/__main__.py +25 -0
- lfx/base/__init__.py +0 -0
- lfx/base/agents/__init__.py +0 -0
- lfx/base/agents/agent.py +268 -0
- lfx/base/agents/callback.py +130 -0
- lfx/base/agents/context.py +109 -0
- lfx/base/agents/crewai/__init__.py +0 -0
- lfx/base/agents/crewai/crew.py +231 -0
- lfx/base/agents/crewai/tasks.py +12 -0
- lfx/base/agents/default_prompts.py +23 -0
- lfx/base/agents/errors.py +15 -0
- lfx/base/agents/events.py +346 -0
- lfx/base/agents/utils.py +205 -0
- lfx/base/astra_assistants/__init__.py +0 -0
- lfx/base/astra_assistants/util.py +171 -0
- lfx/base/chains/__init__.py +0 -0
- lfx/base/chains/model.py +19 -0
- lfx/base/composio/__init__.py +0 -0
- lfx/base/composio/composio_base.py +1291 -0
- lfx/base/compressors/__init__.py +0 -0
- lfx/base/compressors/model.py +60 -0
- lfx/base/constants.py +46 -0
- lfx/base/curl/__init__.py +0 -0
- lfx/base/curl/parse.py +188 -0
- lfx/base/data/__init__.py +5 -0
- lfx/base/data/base_file.py +685 -0
- lfx/base/data/docling_utils.py +245 -0
- lfx/base/data/utils.py +198 -0
- lfx/base/document_transformers/__init__.py +0 -0
- lfx/base/document_transformers/model.py +43 -0
- lfx/base/embeddings/__init__.py +0 -0
- lfx/base/embeddings/aiml_embeddings.py +62 -0
- lfx/base/embeddings/model.py +26 -0
- lfx/base/flow_processing/__init__.py +0 -0
- lfx/base/flow_processing/utils.py +86 -0
- lfx/base/huggingface/__init__.py +0 -0
- lfx/base/huggingface/model_bridge.py +133 -0
- lfx/base/io/__init__.py +0 -0
- lfx/base/io/chat.py +20 -0
- lfx/base/io/text.py +22 -0
- lfx/base/langchain_utilities/__init__.py +0 -0
- lfx/base/langchain_utilities/model.py +35 -0
- lfx/base/langchain_utilities/spider_constants.py +1 -0
- lfx/base/langwatch/__init__.py +0 -0
- lfx/base/langwatch/utils.py +18 -0
- lfx/base/mcp/__init__.py +0 -0
- lfx/base/mcp/constants.py +2 -0
- lfx/base/mcp/util.py +1398 -0
- lfx/base/memory/__init__.py +0 -0
- lfx/base/memory/memory.py +49 -0
- lfx/base/memory/model.py +38 -0
- lfx/base/models/__init__.py +3 -0
- lfx/base/models/aiml_constants.py +51 -0
- lfx/base/models/anthropic_constants.py +47 -0
- lfx/base/models/aws_constants.py +151 -0
- lfx/base/models/chat_result.py +76 -0
- lfx/base/models/google_generative_ai_constants.py +70 -0
- lfx/base/models/groq_constants.py +134 -0
- lfx/base/models/model.py +375 -0
- lfx/base/models/model_input_constants.py +307 -0
- lfx/base/models/model_metadata.py +41 -0
- lfx/base/models/model_utils.py +8 -0
- lfx/base/models/novita_constants.py +35 -0
- lfx/base/models/ollama_constants.py +49 -0
- lfx/base/models/openai_constants.py +122 -0
- lfx/base/models/sambanova_constants.py +18 -0
- lfx/base/processing/__init__.py +0 -0
- lfx/base/prompts/__init__.py +0 -0
- lfx/base/prompts/api_utils.py +224 -0
- lfx/base/prompts/utils.py +61 -0
- lfx/base/textsplitters/__init__.py +0 -0
- lfx/base/textsplitters/model.py +28 -0
- lfx/base/tools/__init__.py +0 -0
- lfx/base/tools/base.py +26 -0
- lfx/base/tools/component_tool.py +325 -0
- lfx/base/tools/constants.py +49 -0
- lfx/base/tools/flow_tool.py +132 -0
- lfx/base/tools/run_flow.py +224 -0
- lfx/base/vectorstores/__init__.py +0 -0
- lfx/base/vectorstores/model.py +193 -0
- lfx/base/vectorstores/utils.py +22 -0
- lfx/base/vectorstores/vector_store_connection_decorator.py +52 -0
- lfx/cli/__init__.py +5 -0
- lfx/cli/commands.py +319 -0
- lfx/cli/common.py +650 -0
- lfx/cli/run.py +441 -0
- lfx/cli/script_loader.py +247 -0
- lfx/cli/serve_app.py +546 -0
- lfx/cli/validation.py +69 -0
- lfx/components/FAISS/__init__.py +34 -0
- lfx/components/FAISS/faiss.py +111 -0
- lfx/components/Notion/__init__.py +19 -0
- lfx/components/Notion/add_content_to_page.py +269 -0
- lfx/components/Notion/create_page.py +94 -0
- lfx/components/Notion/list_database_properties.py +68 -0
- lfx/components/Notion/list_pages.py +122 -0
- lfx/components/Notion/list_users.py +77 -0
- lfx/components/Notion/page_content_viewer.py +93 -0
- lfx/components/Notion/search.py +111 -0
- lfx/components/Notion/update_page_property.py +114 -0
- lfx/components/__init__.py +411 -0
- lfx/components/_importing.py +42 -0
- lfx/components/agentql/__init__.py +3 -0
- lfx/components/agentql/agentql_api.py +151 -0
- lfx/components/agents/__init__.py +34 -0
- lfx/components/agents/agent.py +558 -0
- lfx/components/agents/mcp_component.py +501 -0
- lfx/components/aiml/__init__.py +37 -0
- lfx/components/aiml/aiml.py +112 -0
- lfx/components/aiml/aiml_embeddings.py +37 -0
- lfx/components/amazon/__init__.py +36 -0
- lfx/components/amazon/amazon_bedrock_embedding.py +109 -0
- lfx/components/amazon/amazon_bedrock_model.py +124 -0
- lfx/components/amazon/s3_bucket_uploader.py +211 -0
- lfx/components/anthropic/__init__.py +34 -0
- lfx/components/anthropic/anthropic.py +187 -0
- lfx/components/apify/__init__.py +5 -0
- lfx/components/apify/apify_actor.py +325 -0
- lfx/components/arxiv/__init__.py +3 -0
- lfx/components/arxiv/arxiv.py +163 -0
- lfx/components/assemblyai/__init__.py +46 -0
- lfx/components/assemblyai/assemblyai_get_subtitles.py +83 -0
- lfx/components/assemblyai/assemblyai_lemur.py +183 -0
- lfx/components/assemblyai/assemblyai_list_transcripts.py +95 -0
- lfx/components/assemblyai/assemblyai_poll_transcript.py +72 -0
- lfx/components/assemblyai/assemblyai_start_transcript.py +188 -0
- lfx/components/azure/__init__.py +37 -0
- lfx/components/azure/azure_openai.py +95 -0
- lfx/components/azure/azure_openai_embeddings.py +83 -0
- lfx/components/baidu/__init__.py +32 -0
- lfx/components/baidu/baidu_qianfan_chat.py +113 -0
- lfx/components/bing/__init__.py +3 -0
- lfx/components/bing/bing_search_api.py +61 -0
- lfx/components/cassandra/__init__.py +40 -0
- lfx/components/cassandra/cassandra.py +264 -0
- lfx/components/cassandra/cassandra_chat.py +92 -0
- lfx/components/cassandra/cassandra_graph.py +238 -0
- lfx/components/chains/__init__.py +3 -0
- lfx/components/chroma/__init__.py +34 -0
- lfx/components/chroma/chroma.py +167 -0
- lfx/components/cleanlab/__init__.py +40 -0
- lfx/components/cleanlab/cleanlab_evaluator.py +155 -0
- lfx/components/cleanlab/cleanlab_rag_evaluator.py +254 -0
- lfx/components/cleanlab/cleanlab_remediator.py +131 -0
- lfx/components/clickhouse/__init__.py +34 -0
- lfx/components/clickhouse/clickhouse.py +135 -0
- lfx/components/cloudflare/__init__.py +32 -0
- lfx/components/cloudflare/cloudflare.py +81 -0
- lfx/components/cohere/__init__.py +40 -0
- lfx/components/cohere/cohere_embeddings.py +81 -0
- lfx/components/cohere/cohere_models.py +46 -0
- lfx/components/cohere/cohere_rerank.py +51 -0
- lfx/components/composio/__init__.py +74 -0
- lfx/components/composio/composio_api.py +268 -0
- lfx/components/composio/dropbox_compnent.py +11 -0
- lfx/components/composio/github_composio.py +11 -0
- lfx/components/composio/gmail_composio.py +38 -0
- lfx/components/composio/googlecalendar_composio.py +11 -0
- lfx/components/composio/googlemeet_composio.py +11 -0
- lfx/components/composio/googletasks_composio.py +8 -0
- lfx/components/composio/linear_composio.py +11 -0
- lfx/components/composio/outlook_composio.py +11 -0
- lfx/components/composio/reddit_composio.py +11 -0
- lfx/components/composio/slack_composio.py +582 -0
- lfx/components/composio/slackbot_composio.py +11 -0
- lfx/components/composio/supabase_composio.py +11 -0
- lfx/components/composio/todoist_composio.py +11 -0
- lfx/components/composio/youtube_composio.py +11 -0
- lfx/components/confluence/__init__.py +3 -0
- lfx/components/confluence/confluence.py +84 -0
- lfx/components/couchbase/__init__.py +34 -0
- lfx/components/couchbase/couchbase.py +102 -0
- lfx/components/crewai/__init__.py +49 -0
- lfx/components/crewai/crewai.py +107 -0
- lfx/components/crewai/hierarchical_crew.py +46 -0
- lfx/components/crewai/hierarchical_task.py +44 -0
- lfx/components/crewai/sequential_crew.py +52 -0
- lfx/components/crewai/sequential_task.py +73 -0
- lfx/components/crewai/sequential_task_agent.py +143 -0
- lfx/components/custom_component/__init__.py +34 -0
- lfx/components/custom_component/custom_component.py +31 -0
- lfx/components/data/__init__.py +64 -0
- lfx/components/data/api_request.py +544 -0
- lfx/components/data/csv_to_data.py +95 -0
- lfx/components/data/directory.py +113 -0
- lfx/components/data/file.py +577 -0
- lfx/components/data/json_to_data.py +98 -0
- lfx/components/data/news_search.py +164 -0
- lfx/components/data/rss.py +69 -0
- lfx/components/data/sql_executor.py +101 -0
- lfx/components/data/url.py +311 -0
- lfx/components/data/web_search.py +112 -0
- lfx/components/data/webhook.py +56 -0
- lfx/components/datastax/__init__.py +70 -0
- lfx/components/datastax/astra_assistant_manager.py +306 -0
- lfx/components/datastax/astra_db.py +75 -0
- lfx/components/datastax/astra_vectorize.py +124 -0
- lfx/components/datastax/astradb.py +1285 -0
- lfx/components/datastax/astradb_cql.py +314 -0
- lfx/components/datastax/astradb_graph.py +330 -0
- lfx/components/datastax/astradb_tool.py +414 -0
- lfx/components/datastax/astradb_vectorstore.py +1285 -0
- lfx/components/datastax/cassandra.py +92 -0
- lfx/components/datastax/create_assistant.py +58 -0
- lfx/components/datastax/create_thread.py +32 -0
- lfx/components/datastax/dotenv.py +35 -0
- lfx/components/datastax/get_assistant.py +37 -0
- lfx/components/datastax/getenvvar.py +30 -0
- lfx/components/datastax/graph_rag.py +141 -0
- lfx/components/datastax/hcd.py +314 -0
- lfx/components/datastax/list_assistants.py +25 -0
- lfx/components/datastax/run.py +89 -0
- lfx/components/deactivated/__init__.py +15 -0
- lfx/components/deactivated/amazon_kendra.py +66 -0
- lfx/components/deactivated/chat_litellm_model.py +158 -0
- lfx/components/deactivated/code_block_extractor.py +26 -0
- lfx/components/deactivated/documents_to_data.py +22 -0
- lfx/components/deactivated/embed.py +16 -0
- lfx/components/deactivated/extract_key_from_data.py +46 -0
- lfx/components/deactivated/json_document_builder.py +57 -0
- lfx/components/deactivated/list_flows.py +20 -0
- lfx/components/deactivated/mcp_sse.py +61 -0
- lfx/components/deactivated/mcp_stdio.py +62 -0
- lfx/components/deactivated/merge_data.py +93 -0
- lfx/components/deactivated/message.py +37 -0
- lfx/components/deactivated/metal.py +54 -0
- lfx/components/deactivated/multi_query.py +59 -0
- lfx/components/deactivated/retriever.py +43 -0
- lfx/components/deactivated/selective_passthrough.py +77 -0
- lfx/components/deactivated/should_run_next.py +40 -0
- lfx/components/deactivated/split_text.py +63 -0
- lfx/components/deactivated/store_message.py +24 -0
- lfx/components/deactivated/sub_flow.py +124 -0
- lfx/components/deactivated/vectara_self_query.py +76 -0
- lfx/components/deactivated/vector_store.py +24 -0
- lfx/components/deepseek/__init__.py +34 -0
- lfx/components/deepseek/deepseek.py +136 -0
- lfx/components/docling/__init__.py +43 -0
- lfx/components/docling/chunk_docling_document.py +186 -0
- lfx/components/docling/docling_inline.py +231 -0
- lfx/components/docling/docling_remote.py +193 -0
- lfx/components/docling/export_docling_document.py +117 -0
- lfx/components/documentloaders/__init__.py +3 -0
- lfx/components/duckduckgo/__init__.py +3 -0
- lfx/components/duckduckgo/duck_duck_go_search_run.py +92 -0
- lfx/components/elastic/__init__.py +37 -0
- lfx/components/elastic/elasticsearch.py +267 -0
- lfx/components/elastic/opensearch.py +243 -0
- lfx/components/embeddings/__init__.py +37 -0
- lfx/components/embeddings/similarity.py +76 -0
- lfx/components/embeddings/text_embedder.py +64 -0
- lfx/components/exa/__init__.py +3 -0
- lfx/components/exa/exa_search.py +68 -0
- lfx/components/firecrawl/__init__.py +43 -0
- lfx/components/firecrawl/firecrawl_crawl_api.py +88 -0
- lfx/components/firecrawl/firecrawl_extract_api.py +136 -0
- lfx/components/firecrawl/firecrawl_map_api.py +89 -0
- lfx/components/firecrawl/firecrawl_scrape_api.py +73 -0
- lfx/components/git/__init__.py +4 -0
- lfx/components/git/git.py +262 -0
- lfx/components/git/gitextractor.py +196 -0
- lfx/components/glean/__init__.py +3 -0
- lfx/components/glean/glean_search_api.py +173 -0
- lfx/components/google/__init__.py +17 -0
- lfx/components/google/gmail.py +192 -0
- lfx/components/google/google_bq_sql_executor.py +157 -0
- lfx/components/google/google_drive.py +92 -0
- lfx/components/google/google_drive_search.py +152 -0
- lfx/components/google/google_generative_ai.py +147 -0
- lfx/components/google/google_generative_ai_embeddings.py +141 -0
- lfx/components/google/google_oauth_token.py +89 -0
- lfx/components/google/google_search_api_core.py +68 -0
- lfx/components/google/google_serper_api_core.py +74 -0
- lfx/components/groq/__init__.py +34 -0
- lfx/components/groq/groq.py +136 -0
- lfx/components/helpers/__init__.py +52 -0
- lfx/components/helpers/calculator_core.py +89 -0
- lfx/components/helpers/create_list.py +40 -0
- lfx/components/helpers/current_date.py +42 -0
- lfx/components/helpers/id_generator.py +42 -0
- lfx/components/helpers/memory.py +251 -0
- lfx/components/helpers/output_parser.py +45 -0
- lfx/components/helpers/store_message.py +90 -0
- lfx/components/homeassistant/__init__.py +7 -0
- lfx/components/homeassistant/home_assistant_control.py +152 -0
- lfx/components/homeassistant/list_home_assistant_states.py +137 -0
- lfx/components/huggingface/__init__.py +37 -0
- lfx/components/huggingface/huggingface.py +197 -0
- lfx/components/huggingface/huggingface_inference_api.py +106 -0
- lfx/components/ibm/__init__.py +34 -0
- lfx/components/ibm/watsonx.py +203 -0
- lfx/components/ibm/watsonx_embeddings.py +135 -0
- lfx/components/icosacomputing/__init__.py +5 -0
- lfx/components/icosacomputing/combinatorial_reasoner.py +84 -0
- lfx/components/input_output/__init__.py +38 -0
- lfx/components/input_output/chat.py +120 -0
- lfx/components/input_output/chat_output.py +200 -0
- lfx/components/input_output/text.py +27 -0
- lfx/components/input_output/text_output.py +29 -0
- lfx/components/jigsawstack/__init__.py +23 -0
- lfx/components/jigsawstack/ai_scrape.py +126 -0
- lfx/components/jigsawstack/ai_web_search.py +136 -0
- lfx/components/jigsawstack/file_read.py +115 -0
- lfx/components/jigsawstack/file_upload.py +94 -0
- lfx/components/jigsawstack/image_generation.py +205 -0
- lfx/components/jigsawstack/nsfw.py +60 -0
- lfx/components/jigsawstack/object_detection.py +124 -0
- lfx/components/jigsawstack/sentiment.py +112 -0
- lfx/components/jigsawstack/text_to_sql.py +90 -0
- lfx/components/jigsawstack/text_translate.py +77 -0
- lfx/components/jigsawstack/vocr.py +107 -0
- lfx/components/langchain_utilities/__init__.py +109 -0
- lfx/components/langchain_utilities/character.py +53 -0
- lfx/components/langchain_utilities/conversation.py +59 -0
- lfx/components/langchain_utilities/csv_agent.py +107 -0
- lfx/components/langchain_utilities/fake_embeddings.py +26 -0
- lfx/components/langchain_utilities/html_link_extractor.py +35 -0
- lfx/components/langchain_utilities/json_agent.py +45 -0
- lfx/components/langchain_utilities/langchain_hub.py +126 -0
- lfx/components/langchain_utilities/language_recursive.py +49 -0
- lfx/components/langchain_utilities/language_semantic.py +138 -0
- lfx/components/langchain_utilities/llm_checker.py +39 -0
- lfx/components/langchain_utilities/llm_math.py +42 -0
- lfx/components/langchain_utilities/natural_language.py +61 -0
- lfx/components/langchain_utilities/openai_tools.py +53 -0
- lfx/components/langchain_utilities/openapi.py +48 -0
- lfx/components/langchain_utilities/recursive_character.py +60 -0
- lfx/components/langchain_utilities/retrieval_qa.py +83 -0
- lfx/components/langchain_utilities/runnable_executor.py +137 -0
- lfx/components/langchain_utilities/self_query.py +80 -0
- lfx/components/langchain_utilities/spider.py +142 -0
- lfx/components/langchain_utilities/sql.py +40 -0
- lfx/components/langchain_utilities/sql_database.py +35 -0
- lfx/components/langchain_utilities/sql_generator.py +78 -0
- lfx/components/langchain_utilities/tool_calling.py +59 -0
- lfx/components/langchain_utilities/vector_store_info.py +49 -0
- lfx/components/langchain_utilities/vector_store_router.py +33 -0
- lfx/components/langchain_utilities/xml_agent.py +71 -0
- lfx/components/langwatch/__init__.py +3 -0
- lfx/components/langwatch/langwatch.py +278 -0
- lfx/components/link_extractors/__init__.py +3 -0
- lfx/components/lmstudio/__init__.py +34 -0
- lfx/components/lmstudio/lmstudioembeddings.py +89 -0
- lfx/components/lmstudio/lmstudiomodel.py +129 -0
- lfx/components/logic/__init__.py +52 -0
- lfx/components/logic/conditional_router.py +171 -0
- lfx/components/logic/data_conditional_router.py +125 -0
- lfx/components/logic/flow_tool.py +110 -0
- lfx/components/logic/listen.py +29 -0
- lfx/components/logic/loop.py +125 -0
- lfx/components/logic/notify.py +88 -0
- lfx/components/logic/pass_message.py +35 -0
- lfx/components/logic/run_flow.py +71 -0
- lfx/components/logic/sub_flow.py +114 -0
- lfx/components/maritalk/__init__.py +32 -0
- lfx/components/maritalk/maritalk.py +52 -0
- lfx/components/mem0/__init__.py +3 -0
- lfx/components/mem0/mem0_chat_memory.py +136 -0
- lfx/components/milvus/__init__.py +34 -0
- lfx/components/milvus/milvus.py +115 -0
- lfx/components/mistral/__init__.py +37 -0
- lfx/components/mistral/mistral.py +114 -0
- lfx/components/mistral/mistral_embeddings.py +58 -0
- lfx/components/models/__init__.py +34 -0
- lfx/components/models/embedding_model.py +114 -0
- lfx/components/models/language_model.py +144 -0
- lfx/components/mongodb/__init__.py +34 -0
- lfx/components/mongodb/mongodb_atlas.py +213 -0
- lfx/components/needle/__init__.py +3 -0
- lfx/components/needle/needle.py +104 -0
- lfx/components/notdiamond/__init__.py +34 -0
- lfx/components/notdiamond/notdiamond.py +228 -0
- lfx/components/novita/__init__.py +32 -0
- lfx/components/novita/novita.py +130 -0
- lfx/components/nvidia/__init__.py +57 -0
- lfx/components/nvidia/nvidia.py +157 -0
- lfx/components/nvidia/nvidia_embedding.py +77 -0
- lfx/components/nvidia/nvidia_ingest.py +317 -0
- lfx/components/nvidia/nvidia_rerank.py +63 -0
- lfx/components/nvidia/system_assist.py +65 -0
- lfx/components/olivya/__init__.py +3 -0
- lfx/components/olivya/olivya.py +116 -0
- lfx/components/ollama/__init__.py +37 -0
- lfx/components/ollama/ollama.py +330 -0
- lfx/components/ollama/ollama_embeddings.py +106 -0
- lfx/components/openai/__init__.py +37 -0
- lfx/components/openai/openai.py +100 -0
- lfx/components/openai/openai_chat_model.py +176 -0
- lfx/components/openrouter/__init__.py +32 -0
- lfx/components/openrouter/openrouter.py +202 -0
- lfx/components/output_parsers/__init__.py +3 -0
- lfx/components/perplexity/__init__.py +34 -0
- lfx/components/perplexity/perplexity.py +75 -0
- lfx/components/pgvector/__init__.py +34 -0
- lfx/components/pgvector/pgvector.py +72 -0
- lfx/components/pinecone/__init__.py +34 -0
- lfx/components/pinecone/pinecone.py +134 -0
- lfx/components/processing/__init__.py +117 -0
- lfx/components/processing/alter_metadata.py +108 -0
- lfx/components/processing/batch_run.py +205 -0
- lfx/components/processing/combine_text.py +39 -0
- lfx/components/processing/converter.py +159 -0
- lfx/components/processing/create_data.py +110 -0
- lfx/components/processing/data_operations.py +438 -0
- lfx/components/processing/data_to_dataframe.py +70 -0
- lfx/components/processing/dataframe_operations.py +313 -0
- lfx/components/processing/extract_key.py +53 -0
- lfx/components/processing/filter_data.py +42 -0
- lfx/components/processing/filter_data_values.py +88 -0
- lfx/components/processing/json_cleaner.py +103 -0
- lfx/components/processing/lambda_filter.py +154 -0
- lfx/components/processing/llm_router.py +499 -0
- lfx/components/processing/merge_data.py +90 -0
- lfx/components/processing/message_to_data.py +36 -0
- lfx/components/processing/parse_data.py +70 -0
- lfx/components/processing/parse_dataframe.py +68 -0
- lfx/components/processing/parse_json_data.py +90 -0
- lfx/components/processing/parser.py +143 -0
- lfx/components/processing/prompt.py +67 -0
- lfx/components/processing/python_repl_core.py +98 -0
- lfx/components/processing/regex.py +82 -0
- lfx/components/processing/save_file.py +225 -0
- lfx/components/processing/select_data.py +48 -0
- lfx/components/processing/split_text.py +141 -0
- lfx/components/processing/structured_output.py +202 -0
- lfx/components/processing/update_data.py +160 -0
- lfx/components/prototypes/__init__.py +34 -0
- lfx/components/prototypes/python_function.py +73 -0
- lfx/components/qdrant/__init__.py +34 -0
- lfx/components/qdrant/qdrant.py +109 -0
- lfx/components/redis/__init__.py +37 -0
- lfx/components/redis/redis.py +89 -0
- lfx/components/redis/redis_chat.py +43 -0
- lfx/components/sambanova/__init__.py +32 -0
- lfx/components/sambanova/sambanova.py +84 -0
- lfx/components/scrapegraph/__init__.py +40 -0
- lfx/components/scrapegraph/scrapegraph_markdownify_api.py +64 -0
- lfx/components/scrapegraph/scrapegraph_search_api.py +64 -0
- lfx/components/scrapegraph/scrapegraph_smart_scraper_api.py +71 -0
- lfx/components/searchapi/__init__.py +34 -0
- lfx/components/searchapi/search.py +79 -0
- lfx/components/serpapi/__init__.py +3 -0
- lfx/components/serpapi/serp.py +115 -0
- lfx/components/supabase/__init__.py +34 -0
- lfx/components/supabase/supabase.py +76 -0
- lfx/components/tavily/__init__.py +4 -0
- lfx/components/tavily/tavily_extract.py +117 -0
- lfx/components/tavily/tavily_search.py +212 -0
- lfx/components/textsplitters/__init__.py +3 -0
- lfx/components/toolkits/__init__.py +3 -0
- lfx/components/tools/__init__.py +72 -0
- lfx/components/tools/calculator.py +108 -0
- lfx/components/tools/google_search_api.py +45 -0
- lfx/components/tools/google_serper_api.py +115 -0
- lfx/components/tools/python_code_structured_tool.py +327 -0
- lfx/components/tools/python_repl.py +97 -0
- lfx/components/tools/search_api.py +87 -0
- lfx/components/tools/searxng.py +145 -0
- lfx/components/tools/serp_api.py +119 -0
- lfx/components/tools/tavily_search_tool.py +344 -0
- lfx/components/tools/wikidata_api.py +102 -0
- lfx/components/tools/wikipedia_api.py +49 -0
- lfx/components/tools/yahoo_finance.py +129 -0
- lfx/components/twelvelabs/__init__.py +52 -0
- lfx/components/twelvelabs/convert_astra_results.py +84 -0
- lfx/components/twelvelabs/pegasus_index.py +311 -0
- lfx/components/twelvelabs/split_video.py +291 -0
- lfx/components/twelvelabs/text_embeddings.py +57 -0
- lfx/components/twelvelabs/twelvelabs_pegasus.py +408 -0
- lfx/components/twelvelabs/video_embeddings.py +100 -0
- lfx/components/twelvelabs/video_file.py +179 -0
- lfx/components/unstructured/__init__.py +3 -0
- lfx/components/unstructured/unstructured.py +121 -0
- lfx/components/upstash/__init__.py +34 -0
- lfx/components/upstash/upstash.py +124 -0
- lfx/components/vectara/__init__.py +37 -0
- lfx/components/vectara/vectara.py +97 -0
- lfx/components/vectara/vectara_rag.py +164 -0
- lfx/components/vectorstores/__init__.py +40 -0
- lfx/components/vectorstores/astradb.py +1285 -0
- lfx/components/vectorstores/astradb_graph.py +319 -0
- lfx/components/vectorstores/cassandra.py +264 -0
- lfx/components/vectorstores/cassandra_graph.py +238 -0
- lfx/components/vectorstores/chroma.py +167 -0
- lfx/components/vectorstores/clickhouse.py +135 -0
- lfx/components/vectorstores/couchbase.py +102 -0
- lfx/components/vectorstores/elasticsearch.py +267 -0
- lfx/components/vectorstores/faiss.py +111 -0
- lfx/components/vectorstores/graph_rag.py +141 -0
- lfx/components/vectorstores/hcd.py +314 -0
- lfx/components/vectorstores/local_db.py +261 -0
- lfx/components/vectorstores/milvus.py +115 -0
- lfx/components/vectorstores/mongodb_atlas.py +213 -0
- lfx/components/vectorstores/opensearch.py +243 -0
- lfx/components/vectorstores/pgvector.py +72 -0
- lfx/components/vectorstores/pinecone.py +134 -0
- lfx/components/vectorstores/qdrant.py +109 -0
- lfx/components/vectorstores/supabase.py +76 -0
- lfx/components/vectorstores/upstash.py +124 -0
- lfx/components/vectorstores/vectara.py +97 -0
- lfx/components/vectorstores/vectara_rag.py +164 -0
- lfx/components/vectorstores/weaviate.py +89 -0
- lfx/components/vertexai/__init__.py +37 -0
- lfx/components/vertexai/vertexai.py +71 -0
- lfx/components/vertexai/vertexai_embeddings.py +67 -0
- lfx/components/weaviate/__init__.py +34 -0
- lfx/components/weaviate/weaviate.py +89 -0
- lfx/components/wikipedia/__init__.py +4 -0
- lfx/components/wikipedia/wikidata.py +86 -0
- lfx/components/wikipedia/wikipedia.py +53 -0
- lfx/components/wolframalpha/__init__.py +3 -0
- lfx/components/wolframalpha/wolfram_alpha_api.py +54 -0
- lfx/components/xai/__init__.py +32 -0
- lfx/components/xai/xai.py +167 -0
- lfx/components/yahoosearch/__init__.py +3 -0
- lfx/components/yahoosearch/yahoo.py +137 -0
- lfx/components/youtube/__init__.py +52 -0
- lfx/components/youtube/channel.py +227 -0
- lfx/components/youtube/comments.py +231 -0
- lfx/components/youtube/playlist.py +33 -0
- lfx/components/youtube/search.py +120 -0
- lfx/components/youtube/trending.py +285 -0
- lfx/components/youtube/video_details.py +263 -0
- lfx/components/youtube/youtube_transcripts.py +118 -0
- lfx/components/zep/__init__.py +3 -0
- lfx/components/zep/zep.py +44 -0
- lfx/constants.py +6 -0
- lfx/custom/__init__.py +7 -0
- lfx/custom/attributes.py +86 -0
- lfx/custom/code_parser/__init__.py +3 -0
- lfx/custom/code_parser/code_parser.py +361 -0
- lfx/custom/custom_component/__init__.py +0 -0
- lfx/custom/custom_component/base_component.py +128 -0
- lfx/custom/custom_component/component.py +1808 -0
- lfx/custom/custom_component/component_with_cache.py +8 -0
- lfx/custom/custom_component/custom_component.py +588 -0
- lfx/custom/dependency_analyzer.py +165 -0
- lfx/custom/directory_reader/__init__.py +3 -0
- lfx/custom/directory_reader/directory_reader.py +359 -0
- lfx/custom/directory_reader/utils.py +171 -0
- lfx/custom/eval.py +12 -0
- lfx/custom/schema.py +32 -0
- lfx/custom/tree_visitor.py +21 -0
- lfx/custom/utils.py +877 -0
- lfx/custom/validate.py +488 -0
- lfx/events/__init__.py +1 -0
- lfx/events/event_manager.py +110 -0
- lfx/exceptions/__init__.py +0 -0
- lfx/exceptions/component.py +15 -0
- lfx/field_typing/__init__.py +91 -0
- lfx/field_typing/constants.py +215 -0
- lfx/field_typing/range_spec.py +35 -0
- lfx/graph/__init__.py +6 -0
- lfx/graph/edge/__init__.py +0 -0
- lfx/graph/edge/base.py +277 -0
- lfx/graph/edge/schema.py +119 -0
- lfx/graph/edge/utils.py +0 -0
- lfx/graph/graph/__init__.py +0 -0
- lfx/graph/graph/ascii.py +202 -0
- lfx/graph/graph/base.py +2238 -0
- lfx/graph/graph/constants.py +63 -0
- lfx/graph/graph/runnable_vertices_manager.py +133 -0
- lfx/graph/graph/schema.py +52 -0
- lfx/graph/graph/state_model.py +66 -0
- lfx/graph/graph/utils.py +1024 -0
- lfx/graph/schema.py +75 -0
- lfx/graph/state/__init__.py +0 -0
- lfx/graph/state/model.py +237 -0
- lfx/graph/utils.py +200 -0
- lfx/graph/vertex/__init__.py +0 -0
- lfx/graph/vertex/base.py +823 -0
- lfx/graph/vertex/constants.py +0 -0
- lfx/graph/vertex/exceptions.py +4 -0
- lfx/graph/vertex/param_handler.py +264 -0
- lfx/graph/vertex/schema.py +26 -0
- lfx/graph/vertex/utils.py +19 -0
- lfx/graph/vertex/vertex_types.py +489 -0
- lfx/helpers/__init__.py +1 -0
- lfx/helpers/base_model.py +71 -0
- lfx/helpers/custom.py +13 -0
- lfx/helpers/data.py +167 -0
- lfx/helpers/flow.py +194 -0
- lfx/inputs/__init__.py +68 -0
- lfx/inputs/constants.py +2 -0
- lfx/inputs/input_mixin.py +328 -0
- lfx/inputs/inputs.py +714 -0
- lfx/inputs/validators.py +19 -0
- lfx/interface/__init__.py +6 -0
- lfx/interface/components.py +489 -0
- lfx/interface/importing/__init__.py +5 -0
- lfx/interface/importing/utils.py +39 -0
- lfx/interface/initialize/__init__.py +3 -0
- lfx/interface/initialize/loading.py +224 -0
- lfx/interface/listing.py +26 -0
- lfx/interface/run.py +16 -0
- lfx/interface/utils.py +111 -0
- lfx/io/__init__.py +63 -0
- lfx/io/schema.py +289 -0
- lfx/load/__init__.py +8 -0
- lfx/load/load.py +256 -0
- lfx/load/utils.py +99 -0
- lfx/log/__init__.py +5 -0
- lfx/log/logger.py +385 -0
- lfx/memory/__init__.py +90 -0
- lfx/memory/stubs.py +283 -0
- lfx/processing/__init__.py +1 -0
- lfx/processing/process.py +238 -0
- lfx/processing/utils.py +25 -0
- lfx/py.typed +0 -0
- lfx/schema/__init__.py +66 -0
- lfx/schema/artifact.py +83 -0
- lfx/schema/content_block.py +62 -0
- lfx/schema/content_types.py +91 -0
- lfx/schema/data.py +308 -0
- lfx/schema/dataframe.py +210 -0
- lfx/schema/dotdict.py +74 -0
- lfx/schema/encoders.py +13 -0
- lfx/schema/graph.py +47 -0
- lfx/schema/image.py +131 -0
- lfx/schema/json_schema.py +141 -0
- lfx/schema/log.py +61 -0
- lfx/schema/message.py +473 -0
- lfx/schema/openai_responses_schemas.py +74 -0
- lfx/schema/properties.py +41 -0
- lfx/schema/schema.py +171 -0
- lfx/schema/serialize.py +13 -0
- lfx/schema/table.py +140 -0
- lfx/schema/validators.py +114 -0
- lfx/serialization/__init__.py +5 -0
- lfx/serialization/constants.py +2 -0
- lfx/serialization/serialization.py +314 -0
- lfx/services/__init__.py +23 -0
- lfx/services/base.py +28 -0
- lfx/services/cache/__init__.py +6 -0
- lfx/services/cache/base.py +183 -0
- lfx/services/cache/service.py +166 -0
- lfx/services/cache/utils.py +169 -0
- lfx/services/chat/__init__.py +1 -0
- lfx/services/chat/config.py +2 -0
- lfx/services/chat/schema.py +10 -0
- lfx/services/deps.py +129 -0
- lfx/services/factory.py +19 -0
- lfx/services/initialize.py +19 -0
- lfx/services/interfaces.py +103 -0
- lfx/services/manager.py +172 -0
- lfx/services/schema.py +20 -0
- lfx/services/session.py +82 -0
- lfx/services/settings/__init__.py +3 -0
- lfx/services/settings/auth.py +130 -0
- lfx/services/settings/base.py +539 -0
- lfx/services/settings/constants.py +31 -0
- lfx/services/settings/factory.py +23 -0
- lfx/services/settings/feature_flags.py +12 -0
- lfx/services/settings/service.py +35 -0
- lfx/services/settings/utils.py +40 -0
- lfx/services/shared_component_cache/__init__.py +1 -0
- lfx/services/shared_component_cache/factory.py +30 -0
- lfx/services/shared_component_cache/service.py +9 -0
- lfx/services/storage/__init__.py +5 -0
- lfx/services/storage/local.py +155 -0
- lfx/services/storage/service.py +54 -0
- lfx/services/tracing/__init__.py +1 -0
- lfx/services/tracing/service.py +21 -0
- lfx/settings.py +6 -0
- lfx/template/__init__.py +6 -0
- lfx/template/field/__init__.py +0 -0
- lfx/template/field/base.py +257 -0
- lfx/template/field/prompt.py +15 -0
- lfx/template/frontend_node/__init__.py +6 -0
- lfx/template/frontend_node/base.py +212 -0
- lfx/template/frontend_node/constants.py +65 -0
- lfx/template/frontend_node/custom_components.py +79 -0
- lfx/template/template/__init__.py +0 -0
- lfx/template/template/base.py +100 -0
- lfx/template/utils.py +217 -0
- lfx/type_extraction/__init__.py +19 -0
- lfx/type_extraction/type_extraction.py +75 -0
- lfx/type_extraction.py +80 -0
- lfx/utils/__init__.py +1 -0
- lfx/utils/async_helpers.py +42 -0
- lfx/utils/component_utils.py +154 -0
- lfx/utils/concurrency.py +60 -0
- lfx/utils/connection_string_parser.py +11 -0
- lfx/utils/constants.py +205 -0
- lfx/utils/data_structure.py +212 -0
- lfx/utils/exceptions.py +22 -0
- lfx/utils/helpers.py +28 -0
- lfx/utils/image.py +73 -0
- lfx/utils/lazy_load.py +15 -0
- lfx/utils/request_utils.py +18 -0
- lfx/utils/schemas.py +139 -0
- lfx/utils/util.py +481 -0
- lfx/utils/util_strings.py +56 -0
- lfx/utils/version.py +24 -0
- lfx_nightly-0.1.11.dev0.dist-info/METADATA +293 -0
- lfx_nightly-0.1.11.dev0.dist-info/RECORD +699 -0
- lfx_nightly-0.1.11.dev0.dist-info/WHEEL +4 -0
- lfx_nightly-0.1.11.dev0.dist-info/entry_points.txt +2 -0
@@ -0,0 +1,154 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import json
|
4
|
+
import re
|
5
|
+
from typing import TYPE_CHECKING, Any
|
6
|
+
|
7
|
+
from lfx.custom.custom_component.component import Component
|
8
|
+
from lfx.io import DataInput, HandleInput, IntInput, MultilineInput, Output
|
9
|
+
from lfx.schema.data import Data
|
10
|
+
from lfx.utils.data_structure import get_data_structure
|
11
|
+
|
12
|
+
if TYPE_CHECKING:
|
13
|
+
from collections.abc import Callable
|
14
|
+
|
15
|
+
|
16
|
+
class LambdaFilterComponent(Component):
|
17
|
+
display_name = "Smart Function"
|
18
|
+
description = "Uses an LLM to generate a function for filtering or transforming structured data."
|
19
|
+
documentation: str = "https://docs.langflow.org/components-processing#smart-function"
|
20
|
+
icon = "square-function"
|
21
|
+
name = "Smart Function"
|
22
|
+
|
23
|
+
inputs = [
|
24
|
+
DataInput(
|
25
|
+
name="data",
|
26
|
+
display_name="Data",
|
27
|
+
info="The structured data to filter or transform using a lambda function.",
|
28
|
+
is_list=True,
|
29
|
+
required=True,
|
30
|
+
),
|
31
|
+
HandleInput(
|
32
|
+
name="llm",
|
33
|
+
display_name="Language Model",
|
34
|
+
info="Connect the 'Language Model' output from your LLM component here.",
|
35
|
+
input_types=["LanguageModel"],
|
36
|
+
required=True,
|
37
|
+
),
|
38
|
+
MultilineInput(
|
39
|
+
name="filter_instruction",
|
40
|
+
display_name="Instructions",
|
41
|
+
info=(
|
42
|
+
"Natural language instructions for how to filter or transform the data using a lambda function. "
|
43
|
+
"Example: Filter the data to only include items where the 'status' is 'active'."
|
44
|
+
),
|
45
|
+
value="Filter the data to...",
|
46
|
+
required=True,
|
47
|
+
),
|
48
|
+
IntInput(
|
49
|
+
name="sample_size",
|
50
|
+
display_name="Sample Size",
|
51
|
+
info="For large datasets, number of items to sample from head/tail.",
|
52
|
+
value=1000,
|
53
|
+
advanced=True,
|
54
|
+
),
|
55
|
+
IntInput(
|
56
|
+
name="max_size",
|
57
|
+
display_name="Max Size",
|
58
|
+
info="Number of characters for the data to be considered large.",
|
59
|
+
value=30000,
|
60
|
+
advanced=True,
|
61
|
+
),
|
62
|
+
]
|
63
|
+
|
64
|
+
outputs = [
|
65
|
+
Output(
|
66
|
+
display_name="Filtered Data",
|
67
|
+
name="filtered_data",
|
68
|
+
method="filter_data",
|
69
|
+
),
|
70
|
+
]
|
71
|
+
|
72
|
+
def get_data_structure(self, data):
|
73
|
+
"""Extract the structure of a dictionary, replacing values with their types."""
|
74
|
+
return {k: get_data_structure(v) for k, v in data.items()}
|
75
|
+
|
76
|
+
def _validate_lambda(self, lambda_text: str) -> bool:
|
77
|
+
"""Validate the provided lambda function text."""
|
78
|
+
# Return False if the lambda function does not start with 'lambda' or does not contain a colon
|
79
|
+
return lambda_text.strip().startswith("lambda") and ":" in lambda_text
|
80
|
+
|
81
|
+
async def filter_data(self) -> list[Data]:
|
82
|
+
self.log(str(self.data))
|
83
|
+
data = self.data[0].data if isinstance(self.data, list) else self.data.data
|
84
|
+
|
85
|
+
dump = json.dumps(data)
|
86
|
+
self.log(str(data))
|
87
|
+
|
88
|
+
llm = self.llm
|
89
|
+
instruction = self.filter_instruction
|
90
|
+
sample_size = self.sample_size
|
91
|
+
|
92
|
+
# Get data structure and samples
|
93
|
+
data_structure = self.get_data_structure(data)
|
94
|
+
dump_structure = json.dumps(data_structure)
|
95
|
+
self.log(dump_structure)
|
96
|
+
|
97
|
+
# For large datasets, sample from head and tail
|
98
|
+
if len(dump) > self.max_size:
|
99
|
+
data_sample = (
|
100
|
+
f"Data is too long to display... \n\n First lines (head): {dump[:sample_size]} \n\n"
|
101
|
+
f" Last lines (tail): {dump[-sample_size:]})"
|
102
|
+
)
|
103
|
+
else:
|
104
|
+
data_sample = dump
|
105
|
+
|
106
|
+
self.log(data_sample)
|
107
|
+
|
108
|
+
prompt = f"""Given this data structure and examples, create a Python lambda function that
|
109
|
+
implements the following instruction:
|
110
|
+
|
111
|
+
Data Structure:
|
112
|
+
{dump_structure}
|
113
|
+
|
114
|
+
Example Items:
|
115
|
+
{data_sample}
|
116
|
+
|
117
|
+
Instruction: {instruction}
|
118
|
+
|
119
|
+
Return ONLY the lambda function and nothing else. No need for ```python or whatever.
|
120
|
+
Just a string starting with lambda.
|
121
|
+
"""
|
122
|
+
|
123
|
+
response = await llm.ainvoke(prompt)
|
124
|
+
response_text = response.content if hasattr(response, "content") else str(response)
|
125
|
+
self.log(response_text)
|
126
|
+
|
127
|
+
# Extract lambda using regex
|
128
|
+
lambda_match = re.search(r"lambda\s+\w+\s*:.*?(?=\n|$)", response_text)
|
129
|
+
if not lambda_match:
|
130
|
+
msg = f"Could not find lambda in response: {response_text}"
|
131
|
+
raise ValueError(msg)
|
132
|
+
|
133
|
+
lambda_text = lambda_match.group().strip()
|
134
|
+
self.log(lambda_text)
|
135
|
+
|
136
|
+
# Validation is commented out as requested
|
137
|
+
if not self._validate_lambda(lambda_text):
|
138
|
+
msg = f"Invalid lambda format: {lambda_text}"
|
139
|
+
raise ValueError(msg)
|
140
|
+
|
141
|
+
# Create and apply the function
|
142
|
+
fn: Callable[[Any], Any] = eval(lambda_text) # noqa: S307
|
143
|
+
|
144
|
+
# Apply the lambda function to the data
|
145
|
+
processed_data = fn(data)
|
146
|
+
|
147
|
+
# If it's a dict, wrap it in a Data object
|
148
|
+
if isinstance(processed_data, dict):
|
149
|
+
return [Data(**processed_data)]
|
150
|
+
# If it's a list, convert each item to a Data object
|
151
|
+
if isinstance(processed_data, list):
|
152
|
+
return [Data(**item) if isinstance(item, dict) else Data(text=str(item)) for item in processed_data]
|
153
|
+
# If it's anything else, convert to string and wrap in a Data object
|
154
|
+
return [Data(text=str(processed_data))]
|
@@ -0,0 +1,499 @@
|
|
1
|
+
import asyncio
|
2
|
+
import http # Added for HTTPStatus
|
3
|
+
import json
|
4
|
+
from typing import Any
|
5
|
+
|
6
|
+
import aiohttp
|
7
|
+
|
8
|
+
from lfx.base.models.chat_result import get_chat_result
|
9
|
+
from lfx.base.models.model_utils import get_model_name
|
10
|
+
from lfx.custom.custom_component.component import Component
|
11
|
+
from lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput
|
12
|
+
from lfx.schema.data import Data
|
13
|
+
from lfx.schema.message import Message
|
14
|
+
from lfx.template.field.base import Output
|
15
|
+
|
16
|
+
|
17
|
+
class LLMRouterComponent(Component):
|
18
|
+
display_name = "LLM Router"
|
19
|
+
description = "Routes the input to the most appropriate LLM based on OpenRouter model specifications"
|
20
|
+
documentation: str = "https://docs.langflow.org/components-processing#llm-router"
|
21
|
+
icon = "git-branch"
|
22
|
+
|
23
|
+
# Constants for magic values
|
24
|
+
MAX_DESCRIPTION_LENGTH = 500
|
25
|
+
QUERY_PREVIEW_MAX_LENGTH = 1000
|
26
|
+
|
27
|
+
inputs = [
|
28
|
+
HandleInput(
|
29
|
+
name="models",
|
30
|
+
display_name="Language Models",
|
31
|
+
input_types=["LanguageModel"],
|
32
|
+
required=True,
|
33
|
+
is_list=True,
|
34
|
+
info="List of LLMs to route between",
|
35
|
+
),
|
36
|
+
MultilineInput(
|
37
|
+
name="input_value",
|
38
|
+
display_name="Input",
|
39
|
+
required=True,
|
40
|
+
info="The input message to be routed",
|
41
|
+
),
|
42
|
+
HandleInput(
|
43
|
+
name="judge_llm",
|
44
|
+
display_name="Judge LLM",
|
45
|
+
input_types=["LanguageModel"],
|
46
|
+
required=True,
|
47
|
+
info="LLM that will evaluate and select the most appropriate model",
|
48
|
+
),
|
49
|
+
DropdownInput(
|
50
|
+
name="optimization",
|
51
|
+
display_name="Optimization",
|
52
|
+
options=["quality", "speed", "cost", "balanced"],
|
53
|
+
value="balanced",
|
54
|
+
info="Optimization preference for model selection",
|
55
|
+
),
|
56
|
+
BoolInput(
|
57
|
+
name="use_openrouter_specs",
|
58
|
+
display_name="Use OpenRouter Specs",
|
59
|
+
value=True,
|
60
|
+
info=(
|
61
|
+
"Fetch model specifications from OpenRouter API for enhanced routing decisions. "
|
62
|
+
"If false, only model names will be used."
|
63
|
+
),
|
64
|
+
advanced=True,
|
65
|
+
),
|
66
|
+
IntInput(
|
67
|
+
name="timeout",
|
68
|
+
display_name="API Timeout",
|
69
|
+
value=10,
|
70
|
+
info="Timeout for API requests in seconds",
|
71
|
+
advanced=True,
|
72
|
+
),
|
73
|
+
BoolInput(
|
74
|
+
name="fallback_to_first",
|
75
|
+
display_name="Fallback to First Model",
|
76
|
+
value=True,
|
77
|
+
info="Use first model as fallback when routing fails",
|
78
|
+
advanced=True,
|
79
|
+
),
|
80
|
+
]
|
81
|
+
|
82
|
+
outputs = [
|
83
|
+
Output(display_name="Output", name="output", method="route_to_model"),
|
84
|
+
Output(
|
85
|
+
display_name="Selected Model Info",
|
86
|
+
name="selected_model_info",
|
87
|
+
method="get_selected_model_info",
|
88
|
+
types=["Data"],
|
89
|
+
),
|
90
|
+
Output(
|
91
|
+
display_name="Routing Decision",
|
92
|
+
name="routing_decision",
|
93
|
+
method="get_routing_decision",
|
94
|
+
),
|
95
|
+
]
|
96
|
+
|
97
|
+
def __init__(self, **kwargs):
|
98
|
+
super().__init__(**kwargs)
|
99
|
+
self._selected_model_name: str | None = None
|
100
|
+
self._selected_api_model_id: str | None = None
|
101
|
+
self._routing_decision: str = ""
|
102
|
+
self._models_api_cache: dict[str, dict[str, Any]] = {}
|
103
|
+
self._model_name_to_api_id: dict[str, str] = {}
|
104
|
+
|
105
|
+
def _simplify_model_name(self, name: str) -> str:
|
106
|
+
"""Simplify model name for matching by lowercasing and removing non-alphanumerics."""
|
107
|
+
return "".join(c.lower() for c in name if c.isalnum())
|
108
|
+
|
109
|
+
async def _fetch_openrouter_models_data(self) -> None:
|
110
|
+
"""Fetch all models from OpenRouter API and cache them along with name mappings."""
|
111
|
+
if self._models_api_cache and self._model_name_to_api_id:
|
112
|
+
return
|
113
|
+
|
114
|
+
if not self.use_openrouter_specs:
|
115
|
+
self.log("OpenRouter specs are disabled. Skipping fetch.")
|
116
|
+
return
|
117
|
+
|
118
|
+
try:
|
119
|
+
self.status = "Fetching OpenRouter model specifications..."
|
120
|
+
self.log("Fetching all model specifications from OpenRouter API: https://openrouter.ai/api/v1/models")
|
121
|
+
async with (
|
122
|
+
aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=self.timeout)) as session,
|
123
|
+
session.get("https://openrouter.ai/api/v1/models") as response,
|
124
|
+
):
|
125
|
+
if response.status == http.HTTPStatus.OK:
|
126
|
+
data = await response.json()
|
127
|
+
models_list = data.get("data", [])
|
128
|
+
|
129
|
+
_models_api_cache_temp = {}
|
130
|
+
_model_name_to_api_id_temp = {}
|
131
|
+
|
132
|
+
for model_data in models_list:
|
133
|
+
api_model_id = model_data.get("id")
|
134
|
+
if not api_model_id:
|
135
|
+
continue
|
136
|
+
|
137
|
+
_models_api_cache_temp[api_model_id] = model_data
|
138
|
+
_model_name_to_api_id_temp[api_model_id] = api_model_id
|
139
|
+
|
140
|
+
api_model_name = model_data.get("name")
|
141
|
+
if api_model_name:
|
142
|
+
_model_name_to_api_id_temp[api_model_name] = api_model_id
|
143
|
+
simplified_api_name = self._simplify_model_name(api_model_name)
|
144
|
+
_model_name_to_api_id_temp[simplified_api_name] = api_model_id
|
145
|
+
|
146
|
+
hugging_face_id = model_data.get("hugging_face_id")
|
147
|
+
if hugging_face_id:
|
148
|
+
_model_name_to_api_id_temp[hugging_face_id] = api_model_id
|
149
|
+
simplified_hf_id = self._simplify_model_name(hugging_face_id)
|
150
|
+
_model_name_to_api_id_temp[simplified_hf_id] = api_model_id
|
151
|
+
|
152
|
+
if "/" in api_model_id:
|
153
|
+
try:
|
154
|
+
model_name_part_of_id = api_model_id.split("/", 1)[1]
|
155
|
+
if model_name_part_of_id:
|
156
|
+
_model_name_to_api_id_temp[model_name_part_of_id] = api_model_id
|
157
|
+
simplified_part_id = self._simplify_model_name(model_name_part_of_id)
|
158
|
+
_model_name_to_api_id_temp[simplified_part_id] = api_model_id
|
159
|
+
except IndexError:
|
160
|
+
pass # Should not happen if '/' is present
|
161
|
+
|
162
|
+
self._models_api_cache = _models_api_cache_temp
|
163
|
+
self._model_name_to_api_id = _model_name_to_api_id_temp
|
164
|
+
log_msg = (
|
165
|
+
f"Successfully fetched and cached {len(self._models_api_cache)} "
|
166
|
+
f"model specifications from OpenRouter."
|
167
|
+
)
|
168
|
+
self.log(log_msg)
|
169
|
+
else:
|
170
|
+
err_text = await response.text()
|
171
|
+
self.log(f"Failed to fetch OpenRouter models: HTTP {response.status} - {err_text}")
|
172
|
+
self._models_api_cache = {}
|
173
|
+
self._model_name_to_api_id = {}
|
174
|
+
except aiohttp.ClientError as e:
|
175
|
+
self.log(f"AIOHTTP ClientError fetching OpenRouter models: {e!s}", "error")
|
176
|
+
self._models_api_cache = {}
|
177
|
+
self._model_name_to_api_id = {}
|
178
|
+
except asyncio.TimeoutError:
|
179
|
+
self.log("Timeout fetching OpenRouter model specifications.", "error")
|
180
|
+
self._models_api_cache = {}
|
181
|
+
self._model_name_to_api_id = {}
|
182
|
+
except json.JSONDecodeError as e:
|
183
|
+
self.log(f"JSON decode error fetching OpenRouter models: {e!s}", "error")
|
184
|
+
self._models_api_cache = {}
|
185
|
+
self._model_name_to_api_id = {}
|
186
|
+
finally:
|
187
|
+
self.status = ""
|
188
|
+
|
189
|
+
def _get_api_model_id_for_langflow_model(self, langflow_model_name: str) -> str | None:
|
190
|
+
"""Attempt to find the OpenRouter API ID for a given Langflow model name."""
|
191
|
+
if not langflow_model_name:
|
192
|
+
return None
|
193
|
+
|
194
|
+
potential_names_to_check = [langflow_model_name, self._simplify_model_name(langflow_model_name)]
|
195
|
+
|
196
|
+
if langflow_model_name.startswith("models/"):
|
197
|
+
name_without_prefix = langflow_model_name[len("models/") :]
|
198
|
+
potential_names_to_check.append(name_without_prefix)
|
199
|
+
potential_names_to_check.append(self._simplify_model_name(name_without_prefix))
|
200
|
+
|
201
|
+
elif langflow_model_name.startswith("community_models/"):
|
202
|
+
name_without_prefix = langflow_model_name[len("community_models/") :]
|
203
|
+
potential_names_to_check.append(name_without_prefix)
|
204
|
+
simplified_no_prefix = self._simplify_model_name(name_without_prefix)
|
205
|
+
potential_names_to_check.append(simplified_no_prefix)
|
206
|
+
|
207
|
+
elif langflow_model_name.startswith("community_models/"):
|
208
|
+
name_without_prefix = langflow_model_name[len("community_models/") :]
|
209
|
+
potential_names_to_check.append(name_without_prefix)
|
210
|
+
simplified_no_prefix_comm = self._simplify_model_name(name_without_prefix)
|
211
|
+
potential_names_to_check.append(simplified_no_prefix_comm)
|
212
|
+
|
213
|
+
unique_names_to_check = list(dict.fromkeys(potential_names_to_check))
|
214
|
+
|
215
|
+
for name_variant in unique_names_to_check:
|
216
|
+
if name_variant in self._model_name_to_api_id:
|
217
|
+
return self._model_name_to_api_id[name_variant]
|
218
|
+
|
219
|
+
self.log(
|
220
|
+
f"Could not map Langflow model name '{langflow_model_name}' "
|
221
|
+
f"(tried variants: {unique_names_to_check}) to an OpenRouter API ID."
|
222
|
+
)
|
223
|
+
return None
|
224
|
+
|
225
|
+
def _get_model_specs_dict(self, langflow_model_name: str) -> dict[str, Any]:
|
226
|
+
"""Get a dictionary of relevant model specifications for a given Langflow model name."""
|
227
|
+
if not self.use_openrouter_specs or not self._models_api_cache:
|
228
|
+
return {
|
229
|
+
"id": langflow_model_name,
|
230
|
+
"name": langflow_model_name,
|
231
|
+
"description": "Specifications not available.",
|
232
|
+
}
|
233
|
+
|
234
|
+
api_model_id = self._get_api_model_id_for_langflow_model(langflow_model_name)
|
235
|
+
|
236
|
+
if not api_model_id or api_model_id not in self._models_api_cache:
|
237
|
+
log_msg = (
|
238
|
+
f"No cached API data found for Langflow model '{langflow_model_name}' "
|
239
|
+
f"(mapped API ID: {api_model_id}). Returning basic info."
|
240
|
+
)
|
241
|
+
self.log(log_msg)
|
242
|
+
return {
|
243
|
+
"id": langflow_model_name,
|
244
|
+
"name": langflow_model_name,
|
245
|
+
"description": "Full specifications not found in cache.",
|
246
|
+
}
|
247
|
+
|
248
|
+
model_data = self._models_api_cache[api_model_id]
|
249
|
+
top_provider_data = model_data.get("top_provider", {})
|
250
|
+
architecture_data = model_data.get("architecture", {})
|
251
|
+
pricing_data = model_data.get("pricing", {})
|
252
|
+
description = model_data.get("description", "No description available")
|
253
|
+
truncated_description = (
|
254
|
+
description[: self.MAX_DESCRIPTION_LENGTH - 3] + "..."
|
255
|
+
if len(description) > self.MAX_DESCRIPTION_LENGTH
|
256
|
+
else description
|
257
|
+
)
|
258
|
+
|
259
|
+
specs = {
|
260
|
+
"id": model_data.get("id"),
|
261
|
+
"name": model_data.get("name"),
|
262
|
+
"description": truncated_description,
|
263
|
+
"context_length": top_provider_data.get("context_length") or model_data.get("context_length"),
|
264
|
+
"max_completion_tokens": (
|
265
|
+
top_provider_data.get("max_completion_tokens") or model_data.get("max_completion_tokens")
|
266
|
+
),
|
267
|
+
"tokenizer": architecture_data.get("tokenizer"),
|
268
|
+
"input_modalities": architecture_data.get("input_modalities", []),
|
269
|
+
"output_modalities": architecture_data.get("output_modalities", []),
|
270
|
+
"pricing_prompt": pricing_data.get("prompt"),
|
271
|
+
"pricing_completion": pricing_data.get("completion"),
|
272
|
+
"is_moderated": top_provider_data.get("is_moderated"),
|
273
|
+
"supported_parameters": model_data.get("supported_parameters", []),
|
274
|
+
}
|
275
|
+
return {k: v for k, v in specs.items() if v is not None}
|
276
|
+
|
277
|
+
def _create_system_prompt(self) -> str:
|
278
|
+
"""Create system prompt for the judge LLM."""
|
279
|
+
return """\
|
280
|
+
You are an expert AI model selection specialist. Your task is to analyze the user's input query,
|
281
|
+
their optimization preference, and a list of available models with their specifications,
|
282
|
+
then select the most appropriate model.
|
283
|
+
|
284
|
+
Each model will be presented as a JSON object with its capabilities and characteristics.
|
285
|
+
|
286
|
+
Your decision should be based on:
|
287
|
+
1. Task complexity and requirements derived from the user's query.
|
288
|
+
2. Context length needed for the input.
|
289
|
+
3. Model capabilities (e.g., context window, input/output modalities, tokenizer).
|
290
|
+
4. Pricing considerations, if relevant to the optimization preference.
|
291
|
+
5. User's stated optimization preference (quality, speed, cost, balanced).
|
292
|
+
|
293
|
+
Return ONLY the index number (0, 1, 2, etc.) of the best model from the provided list.
|
294
|
+
Do not provide any explanation or reasoning, just the index number.
|
295
|
+
If multiple models seem equally suitable according to the preference, you may pick the first one that matches.
|
296
|
+
If no model seems suitable, pick the first model in the list (index 0) as a fallback."""
|
297
|
+
|
298
|
+
async def route_to_model(self) -> Message:
|
299
|
+
"""Main routing method."""
|
300
|
+
if not self.models or not self.input_value or not self.judge_llm:
|
301
|
+
error_msg = "Missing required inputs: models, input_value, or judge_llm"
|
302
|
+
self.status = error_msg
|
303
|
+
self.log(f"Validation Error: {error_msg}", "error")
|
304
|
+
raise ValueError(error_msg)
|
305
|
+
|
306
|
+
successful_result: Message | None = None
|
307
|
+
try:
|
308
|
+
self.log(f"Starting model routing with {len(self.models)} available Langflow models.")
|
309
|
+
self.log(f"Optimization preference: {self.optimization}")
|
310
|
+
self.log(f"Input length: {len(self.input_value)} characters")
|
311
|
+
|
312
|
+
if self.use_openrouter_specs and not self._models_api_cache:
|
313
|
+
await self._fetch_openrouter_models_data()
|
314
|
+
|
315
|
+
system_prompt_content = self._create_system_prompt()
|
316
|
+
system_message = {"role": "system", "content": system_prompt_content}
|
317
|
+
|
318
|
+
self.status = "Analyzing available models and preparing specifications..."
|
319
|
+
model_specs_for_judge = []
|
320
|
+
for i, langflow_model_instance in enumerate(self.models):
|
321
|
+
langflow_model_name = get_model_name(langflow_model_instance)
|
322
|
+
if not langflow_model_name:
|
323
|
+
self.log(f"Warning: Could not determine name for model at index {i}. Using placeholder.", "warning")
|
324
|
+
spec_dict = {
|
325
|
+
"id": f"unknown_model_{i}",
|
326
|
+
"name": f"Unknown Model {i}",
|
327
|
+
"description": "Name could not be determined.",
|
328
|
+
}
|
329
|
+
else:
|
330
|
+
spec_dict = self._get_model_specs_dict(langflow_model_name)
|
331
|
+
|
332
|
+
model_specs_for_judge.append({"index": i, "langflow_name": langflow_model_name, "specs": spec_dict})
|
333
|
+
self.log(
|
334
|
+
f"Prepared specs for Langflow model {i} ('{langflow_model_name}'): {spec_dict.get('name', 'N/A')}"
|
335
|
+
)
|
336
|
+
|
337
|
+
estimated_tokens = len(self.input_value.split()) * 1.3
|
338
|
+
self.log(f"Estimated input tokens: {int(estimated_tokens)}")
|
339
|
+
|
340
|
+
query_preview = self.input_value[: self.QUERY_PREVIEW_MAX_LENGTH]
|
341
|
+
if len(self.input_value) > self.QUERY_PREVIEW_MAX_LENGTH:
|
342
|
+
query_preview += "..."
|
343
|
+
|
344
|
+
user_message_content = f"""User Query: "{query_preview}"
|
345
|
+
Optimization Preference: {self.optimization}
|
346
|
+
Estimated Input Tokens: ~{int(estimated_tokens)}
|
347
|
+
|
348
|
+
Available Models (JSON list):
|
349
|
+
{json.dumps(model_specs_for_judge, indent=2)}
|
350
|
+
|
351
|
+
Based on the user query, optimization preference, and the detailed model specifications,
|
352
|
+
select the index of the most appropriate model.
|
353
|
+
Return ONLY the index number:"""
|
354
|
+
|
355
|
+
user_message = {"role": "user", "content": user_message_content}
|
356
|
+
|
357
|
+
self.log("Requesting model selection from judge LLM...")
|
358
|
+
self.status = "Judge LLM analyzing options..."
|
359
|
+
|
360
|
+
response = await self.judge_llm.ainvoke([system_message, user_message])
|
361
|
+
selected_index, chosen_model_instance = self._parse_judge_response(response.content.strip())
|
362
|
+
self._selected_model_name = get_model_name(chosen_model_instance)
|
363
|
+
if self._selected_model_name:
|
364
|
+
self._selected_api_model_id = (
|
365
|
+
self._get_api_model_id_for_langflow_model(self._selected_model_name) or self._selected_model_name
|
366
|
+
)
|
367
|
+
else:
|
368
|
+
self._selected_api_model_id = "unknown_model"
|
369
|
+
|
370
|
+
specs_source = (
|
371
|
+
"OpenRouter API"
|
372
|
+
if self.use_openrouter_specs and self._models_api_cache
|
373
|
+
else "Basic (Langflow model names only)"
|
374
|
+
)
|
375
|
+
self._routing_decision = f"""Model Selection Decision:
|
376
|
+
- Selected Model Index: {selected_index}
|
377
|
+
- Selected Langflow Model Name: {self._selected_model_name}
|
378
|
+
- Selected API Model ID (if resolved): {self._selected_api_model_id}
|
379
|
+
- Optimization Preference: {self.optimization}
|
380
|
+
- Input Query Length: {len(self.input_value)} characters (~{int(estimated_tokens)} tokens)
|
381
|
+
- Number of Models Considered: {len(self.models)}
|
382
|
+
- Specifications Source: {specs_source}"""
|
383
|
+
|
384
|
+
log_msg = (
|
385
|
+
f"DECISION by Judge LLM: Selected model index {selected_index} -> "
|
386
|
+
f"Langflow Name: '{self._selected_model_name}', API ID: '{self._selected_api_model_id}'"
|
387
|
+
)
|
388
|
+
self.log(log_msg)
|
389
|
+
|
390
|
+
self.status = f"Generating response with: {self._selected_model_name}"
|
391
|
+
input_message_obj = Message(text=self.input_value)
|
392
|
+
|
393
|
+
raw_result = get_chat_result(
|
394
|
+
runnable=chosen_model_instance,
|
395
|
+
input_value=input_message_obj,
|
396
|
+
)
|
397
|
+
result = Message(text=str(raw_result)) if not isinstance(raw_result, Message) else raw_result
|
398
|
+
|
399
|
+
self.status = f"Successfully routed to: {self._selected_model_name}"
|
400
|
+
successful_result = result
|
401
|
+
|
402
|
+
except (ValueError, TypeError, AttributeError, KeyError, RuntimeError) as e:
|
403
|
+
error_msg = f"Routing error: {type(e).__name__} - {e!s}"
|
404
|
+
self.log(f"{error_msg}", "error")
|
405
|
+
self.log("Detailed routing error occurred. Check logs for details.", "error")
|
406
|
+
self.status = error_msg
|
407
|
+
|
408
|
+
if self.fallback_to_first and self.models:
|
409
|
+
self.log("Activating fallback to first model due to error.", "warning")
|
410
|
+
chosen_model_instance = self.models[0]
|
411
|
+
self._selected_model_name = get_model_name(chosen_model_instance)
|
412
|
+
if self._selected_model_name:
|
413
|
+
mapped_id = self._get_api_model_id_for_langflow_model(self._selected_model_name)
|
414
|
+
self._selected_api_model_id = mapped_id or self._selected_model_name
|
415
|
+
else:
|
416
|
+
self._selected_api_model_id = "fallback_model"
|
417
|
+
self._routing_decision = f"""Fallback Decision:
|
418
|
+
- Error During Routing: {error_msg}
|
419
|
+
- Fallback Model Langflow Name: {self._selected_model_name}
|
420
|
+
- Fallback Model API ID (if resolved): {self._selected_api_model_id}
|
421
|
+
- Reason: Automatic fallback enabled"""
|
422
|
+
|
423
|
+
self.status = f"Fallback: Using {self._selected_model_name}"
|
424
|
+
input_message_obj = Message(text=self.input_value)
|
425
|
+
|
426
|
+
raw_fallback_result = get_chat_result(
|
427
|
+
runnable=chosen_model_instance,
|
428
|
+
input_value=input_message_obj,
|
429
|
+
)
|
430
|
+
if not isinstance(raw_fallback_result, Message):
|
431
|
+
successful_result = Message(text=str(raw_fallback_result))
|
432
|
+
else:
|
433
|
+
successful_result = raw_fallback_result
|
434
|
+
else:
|
435
|
+
self.log("No fallback model available or fallback disabled. Raising error.", "error")
|
436
|
+
raise
|
437
|
+
|
438
|
+
if successful_result is None:
|
439
|
+
error_message = "Unexpected state in route_to_model: No result produced."
|
440
|
+
self.log(f"Error: {error_message}", "error")
|
441
|
+
raise RuntimeError(error_message)
|
442
|
+
return successful_result
|
443
|
+
|
444
|
+
def _parse_judge_response(self, response_content: str) -> tuple[int, Any]:
|
445
|
+
"""Parse the judge's response to extract model index."""
|
446
|
+
try:
|
447
|
+
cleaned_response = "".join(filter(str.isdigit, response_content.strip()))
|
448
|
+
if not cleaned_response:
|
449
|
+
self.log(f"Judge LLM response was non-numeric: '{response_content}'. Defaulting to index 0.", "warning")
|
450
|
+
return 0, self.models[0]
|
451
|
+
|
452
|
+
selected_index = int(cleaned_response)
|
453
|
+
|
454
|
+
if 0 <= selected_index < len(self.models):
|
455
|
+
self.log(f"Judge LLM selected index: {selected_index}")
|
456
|
+
return selected_index, self.models[selected_index]
|
457
|
+
log_msg = (
|
458
|
+
f"Judge LLM selected index {selected_index} is out of bounds "
|
459
|
+
f"(0-{len(self.models) - 1}). Defaulting to index 0."
|
460
|
+
)
|
461
|
+
self.log(log_msg, "warning")
|
462
|
+
return 0, self.models[0]
|
463
|
+
|
464
|
+
except ValueError:
|
465
|
+
self.log(
|
466
|
+
f"Could not parse judge LLM response to integer: '{response_content}'. Defaulting to index 0.",
|
467
|
+
"warning",
|
468
|
+
)
|
469
|
+
return 0, self.models[0]
|
470
|
+
except (AttributeError, IndexError) as e:
|
471
|
+
self.log(f"Error parsing judge response '{response_content}': {e!s}. Defaulting to index 0.", "error")
|
472
|
+
return 0, self.models[0]
|
473
|
+
|
474
|
+
def get_selected_model_info(self) -> list[Data]:
|
475
|
+
"""Return detailed information about the selected model as a list of Data objects."""
|
476
|
+
if self._selected_model_name:
|
477
|
+
specs_dict = self._get_model_specs_dict(self._selected_model_name)
|
478
|
+
if "langflow_name" not in specs_dict:
|
479
|
+
specs_dict["langflow_model_name_used_for_lookup"] = self._selected_model_name
|
480
|
+
if self._selected_api_model_id and specs_dict.get("id") != self._selected_api_model_id:
|
481
|
+
specs_dict["resolved_api_model_id"] = self._selected_api_model_id
|
482
|
+
data_output = [Data(data=specs_dict)]
|
483
|
+
self.status = data_output
|
484
|
+
return data_output
|
485
|
+
|
486
|
+
data_output = [Data(data={"info": "No model selected yet - run the router first."})]
|
487
|
+
self.status = data_output
|
488
|
+
return data_output
|
489
|
+
|
490
|
+
def get_routing_decision(self) -> Message:
|
491
|
+
"""Return the comprehensive routing decision explanation."""
|
492
|
+
if self._routing_decision:
|
493
|
+
message_output = Message(text=f"{self._routing_decision}")
|
494
|
+
self.status = message_output
|
495
|
+
return message_output
|
496
|
+
|
497
|
+
message_output = Message(text="No routing decision made yet - run the router first.")
|
498
|
+
self.status = message_output
|
499
|
+
return message_output
|