langflow-base-nightly 0.5.0.dev38__py3-none-any.whl → 0.5.1.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langflow/alembic/versions/0882f9657f22_encrypt_existing_mcp_auth_settings_.py +122 -0
- langflow/api/router.py +2 -0
- langflow/api/v1/__init__.py +2 -0
- langflow/api/v1/endpoints.py +7 -1
- langflow/api/v1/mcp_projects.py +373 -52
- langflow/api/v1/openai_responses.py +545 -0
- langflow/api/v1/schemas.py +1 -2
- langflow/components/FAISS/__init__.py +34 -0
- langflow/components/agents/agent.py +246 -52
- langflow/components/cassandra/__init__.py +40 -0
- langflow/components/chroma/__init__.py +34 -0
- langflow/components/clickhouse/__init__.py +34 -0
- langflow/components/couchbase/__init__.py +34 -0
- langflow/components/data/file.py +302 -376
- langflow/components/datastax/__init__.py +3 -3
- langflow/components/docling/docling_inline.py +56 -4
- langflow/components/elastic/__init__.py +37 -0
- langflow/components/milvus/__init__.py +34 -0
- langflow/components/mongodb/__init__.py +34 -0
- langflow/components/nvidia/nvidia_ingest.py +3 -2
- langflow/components/ollama/ollama.py +1 -0
- langflow/components/perplexity/perplexity.py +3 -13
- langflow/components/pgvector/__init__.py +34 -0
- langflow/components/pinecone/__init__.py +34 -0
- langflow/components/qdrant/__init__.py +34 -0
- langflow/components/redis/__init__.py +36 -2
- langflow/components/redis/redis.py +75 -29
- langflow/components/redis/redis_chat.py +43 -0
- langflow/components/supabase/__init__.py +37 -0
- langflow/components/upstash/__init__.py +34 -0
- langflow/components/vectara/__init__.py +37 -0
- langflow/components/vectorstores/__init__.py +0 -69
- langflow/components/vectorstores/local_db.py +1 -0
- langflow/components/weaviate/__init__.py +34 -0
- langflow/components/youtube/channel.py +1 -1
- langflow/custom/custom_component/custom_component.py +11 -0
- langflow/custom/dependency_analyzer.py +165 -0
- langflow/custom/utils.py +34 -16
- langflow/frontend/assets/{SlackIcon-BhW6H3JR.js → SlackIcon-Cr3Q15Px.js} +1 -1
- langflow/frontend/assets/{Wikipedia-Dx5jbiy3.js → Wikipedia-GxM5sPdM.js} +1 -1
- langflow/frontend/assets/{Wolfram-CIyonzwo.js → Wolfram-BN3-VOCA.js} +1 -1
- langflow/frontend/assets/{index-DOEvKC2X.js → index-28oOcafk.js} +1 -1
- langflow/frontend/assets/{index-Bhv79Zso.js → index-2wSXqBtB.js} +1 -1
- langflow/frontend/assets/{index-BRmSeoWR.js → index-3wW7BClE.js} +1 -1
- langflow/frontend/assets/{index-eUkS6iJM.js → index-6pyH3ZJB.js} +1 -1
- langflow/frontend/assets/{index-Cr5v2ave.js → index-AWCSdofD.js} +1 -1
- langflow/frontend/assets/{index-C27Jj_26.js → index-B2Zgv_xv.js} +1 -1
- langflow/frontend/assets/{index-BKKrUElc.js → index-B2ptVQGM.js} +1 -1
- langflow/frontend/assets/{index-BnAFhkSN.js → index-B3TANVes.js} +1 -1
- langflow/frontend/assets/{index-hZUcL0MZ.js → index-B4yCvZKV.js} +1 -1
- langflow/frontend/assets/{index-BPR2mEFC.js → index-BC65VuWx.js} +1 -1
- langflow/frontend/assets/{index-CgU7KF4I.js → index-BCDSei1q.js} +1 -1
- langflow/frontend/assets/{index-CzHzeZuA.js → index-BJy50PvP.js} +1 -1
- langflow/frontend/assets/{index-DkGhPNeA.js → index-BKseQQ2I.js} +1 -1
- langflow/frontend/assets/{index-BVFaF7HW.js → index-BLTxEeTi.js} +1 -1
- langflow/frontend/assets/{index-cEXY6V06.js → index-BRg1f4Mu.js} +1 -1
- langflow/frontend/assets/{index-C2eQmQsn.js → index-BS8Vo8nc.js} +1 -1
- langflow/frontend/assets/{index-gdb7XMS8.js → index-BTKOU4xC.js} +1 -1
- langflow/frontend/assets/{index-U9GWm1eH.js → index-BVwJDmw-.js} +1 -1
- langflow/frontend/assets/{index-BWt5xGeA.js → index-BWYuQ2Sj.js} +1 -1
- langflow/frontend/assets/{index-Dx-Z87KT.js → index-BWdLILDG.js} +1 -1
- langflow/frontend/assets/{index-paQEWYGT.js → index-BZcw4827.js} +1 -1
- langflow/frontend/assets/{index-BDQrd7Tj.js → index-Bbi87Ve4.js} +1 -1
- langflow/frontend/assets/{index-vJOO5U8M.js → index-Bf0IYKLd.js} +1 -1
- langflow/frontend/assets/{index-1Q3VBqKn.js → index-Bg5nrMRh.js} +1 -1
- langflow/frontend/assets/{index-BFQ8KFK0.js → index-BiC280Nx.js} +1 -1
- langflow/frontend/assets/{index-CFNTYfFK.js → index-BiKKN6FR.js} +1 -1
- langflow/frontend/assets/{index-BPfdqCc_.js → index-Bief6eyJ.js} +1 -1
- langflow/frontend/assets/{index-Cxy9sEpy.js → index-BkXec1Yf.js} +1 -1
- langflow/frontend/assets/{index-D4tjMhfY.js → index-Bnl6QHtP.js} +1 -1
- langflow/frontend/assets/{index-BD7Io1hL.js → index-BpxbUiZD.js} +1978 -1978
- langflow/frontend/assets/{index-Ch5r0oW6.js → index-BrJV8psX.js} +1 -1
- langflow/frontend/assets/{index-DOQDkSoK.js → index-BwLWcUXL.js} +1 -1
- langflow/frontend/assets/{index-CMHpjHZl.js → index-Bx7dBY26.js} +1 -1
- langflow/frontend/assets/{index-CbnWRlYY.js → index-C-EdnFdA.js} +1 -1
- langflow/frontend/assets/{index-DljpLeCW.js → index-C-Xfg4cD.js} +1 -1
- langflow/frontend/assets/{index-Bwi4flFg.js → index-C1f2wMat.js} +1 -1
- langflow/frontend/assets/index-C1xroOlH.css +1 -0
- langflow/frontend/assets/{index-D6CSIrp1.js → index-C3KequvP.js} +1 -1
- langflow/frontend/assets/{index-BYjw7Gk3.js → index-C3ZjKdCD.js} +1 -1
- langflow/frontend/assets/{index-DIKUsGLF.js → index-C3l0zYn0.js} +1 -1
- langflow/frontend/assets/{index-CfPBgkqg.js → index-C3yvArUT.js} +1 -1
- langflow/frontend/assets/{index-CsLQiWNf.js → index-C9Cxnkl8.js} +1 -1
- langflow/frontend/assets/{index-mzl9ULw5.js → index-CBc8fEAE.js} +1 -1
- langflow/frontend/assets/{index-CEJNWPhA.js → index-CBvrGgID.js} +1 -1
- langflow/frontend/assets/{index-DwfHWnX7.js → index-CD-PqGCY.js} +1 -1
- langflow/frontend/assets/{index-dyXKnkMi.js → index-CGO1CiUr.js} +1 -1
- langflow/frontend/assets/{index-Dka_Rk4-.js → index-CH5UVA9b.js} +1 -1
- langflow/frontend/assets/{index-uiKla4UR.js → index-CLJeJYjH.js} +1 -1
- langflow/frontend/assets/{index-D9kwEzPB.js → index-CMZ79X-Y.js} +1 -1
- langflow/frontend/assets/{index-BrVhdPZb.js → index-CMzfJKiW.js} +1 -1
- langflow/frontend/assets/{index-Bct1s6__.js → index-CNw1H-Wc.js} +1 -1
- langflow/frontend/assets/{index-B7uEuOPK.js → index-CPHEscq9.js} +1 -1
- langflow/frontend/assets/{index-ekfMOqrF.js → index-CRPKJZw9.js} +1 -1
- langflow/frontend/assets/{index-G4ro0MjT.js → index-CRPyCfYy.js} +1 -1
- langflow/frontend/assets/{index-CSu8KHOi.js → index-CRcMqCIj.js} +1 -1
- langflow/frontend/assets/{index-DsoX2o1S.js → index-CUVDws8F.js} +1 -1
- langflow/frontend/assets/{index-r_8gs4nL.js → index-CVWQfRYZ.js} +1 -1
- langflow/frontend/assets/{index-7hzXChQz.js → index-CVl6MbaM.js} +1 -1
- langflow/frontend/assets/{index-B8UR8v-Q.js → index-CVwWoX99.js} +1 -1
- langflow/frontend/assets/{index-Dda2u_yz.js → index-CWPzZtSx.js} +1 -1
- langflow/frontend/assets/{index-BKeZt2hQ.js → index-CZqRL9DE.js} +1 -1
- langflow/frontend/assets/{index-DHngW1k8.js → index-CdIf07Rw.js} +1 -1
- langflow/frontend/assets/{index-C--IDAyc.js → index-Cewy7JZE.js} +1 -1
- langflow/frontend/assets/{index-DZP_SaHb.js → index-CfwLpbMM.js} +1 -1
- langflow/frontend/assets/{index-CuCM7Wu7.js → index-CiR1dxI4.js} +1 -1
- langflow/frontend/assets/{index-Xi4TplbI.js → index-CiixOzDG.js} +1 -1
- langflow/frontend/assets/{index-BLYw9MK2.js → index-ClsuDmR6.js} +1 -1
- langflow/frontend/assets/{index-DMCWDJOl.js → index-CmEYYRN1.js} +1 -1
- langflow/frontend/assets/{index-CrAF-31Y.js → index-Co20d-eQ.js} +1 -1
- langflow/frontend/assets/{index-DXAfIEvs.js → index-CpzXS6md.js} +1 -1
- langflow/frontend/assets/{index-BmYJJ5YS.js → index-Cqpzl1J4.js} +1 -1
- langflow/frontend/assets/{index-KWY77KfV.js → index-CtVIONP2.js} +1 -1
- langflow/frontend/assets/{index-B3KCdQ91.js → index-CuFXdTx4.js} +1 -1
- langflow/frontend/assets/{index-p2kStSPe.js → index-Cyd2HtHK.js} +1 -1
- langflow/frontend/assets/{index-CkjwSTSM.js → index-D-1tA8Dt.js} +1 -1
- langflow/frontend/assets/{index-BFf0HTFI.js → index-D-KY3kkq.js} +1 -1
- langflow/frontend/assets/{index-BYhcGLTV.js → index-D-_B1a8v.js} +1 -1
- langflow/frontend/assets/{index-Dr6pVDPI.js → index-D14EWPyZ.js} +1 -1
- langflow/frontend/assets/{index-BDuk0d7P.js → index-D2N3l-cw.js} +1 -1
- langflow/frontend/assets/{index-BvGQfVBD.js → index-D5ETnvJa.js} +1 -1
- langflow/frontend/assets/{index-D1oynC8a.js → index-D7kquVv2.js} +1 -1
- langflow/frontend/assets/{index-B1XqWJhG.js → index-DA6-bvgN.js} +1 -1
- langflow/frontend/assets/{index-DzIv3RyR.js → index-DDWBeudF.js} +1 -1
- langflow/frontend/assets/{index-BKlQbl-6.js → index-DDcMAaG4.js} +1 -1
- langflow/frontend/assets/{index-CkK25zZO.js → index-DHgomBdh.js} +1 -1
- langflow/frontend/assets/{index-Bj3lSwvZ.js → index-DJP-ss47.js} +1 -1
- langflow/frontend/assets/{index-DDXsm8tz.js → index-DQ7VYqQc.js} +1 -1
- langflow/frontend/assets/{index-BNQIbda3.js → index-DTqbvGC0.js} +1 -1
- langflow/frontend/assets/{index-BzoRPtTY.js → index-DUpri6zF.js} +1 -1
- langflow/frontend/assets/{index-35sspuLu.js → index-DV3utZDZ.js} +1 -1
- langflow/frontend/assets/{index-BpmqDOeZ.js → index-DXRfN4HV.js} +1 -1
- langflow/frontend/assets/{index-C0E3_MIK.js → index-Db9dYSzy.js} +1 -1
- langflow/frontend/assets/{index-C8K0r39B.js → index-DdtMEn6I.js} +1 -1
- langflow/frontend/assets/{index-BLsVo9iW.js → index-DfDhMHgQ.js} +1 -1
- langflow/frontend/assets/{index-BZFljdMa.js → index-Dfe7qfvf.js} +1 -1
- langflow/frontend/assets/{index-CyP3py8K.js → index-DhtZ5hx8.js} +1 -1
- langflow/frontend/assets/{index-w72fDjpG.js → index-DiB3CTo8.js} +1 -1
- langflow/frontend/assets/{index-CY7_TBTC.js → index-DiGWASY5.js} +1 -1
- langflow/frontend/assets/{index-CmSFKgiD.js → index-Dl5amdBz.js} +1 -1
- langflow/frontend/assets/{index-B0m53xKd.js → index-DlD4dXlZ.js} +1 -1
- langflow/frontend/assets/{index-DnVYJtVO.js → index-DmeiHnfl.js} +1 -1
- langflow/frontend/assets/index-Dmu-X5-4.js +1 -0
- langflow/frontend/assets/{index-CWYiSeWV.js → index-DpVWih90.js} +1 -1
- langflow/frontend/assets/{index-CjsommIr.js → index-DrDrcajG.js} +1 -1
- langflow/frontend/assets/{index-Un9pWxnP.js → index-Du-pc0KE.js} +1 -1
- langflow/frontend/assets/{index-oxHBZk2v.js → index-DwPkMTaY.js} +1 -1
- langflow/frontend/assets/{index-CgwykVGh.js → index-DwQEZe3C.js} +1 -1
- langflow/frontend/assets/{index-BmIx1cws.js → index-DyJFTK24.js} +1 -1
- langflow/frontend/assets/{index-0XQqYgdG.js → index-J38wh62w.js} +1 -1
- langflow/frontend/assets/{index-H7J7w7fa.js → index-Kwdl-e29.js} +1 -1
- langflow/frontend/assets/{index-CUKmGsI6.js → index-OwPvCmpW.js} +1 -1
- langflow/frontend/assets/{index-zV82kQ6k.js → index-Tw3Os-DN.js} +1 -1
- langflow/frontend/assets/{index-8cuhogZP.js → index-X0guhYF8.js} +1 -1
- langflow/frontend/assets/{index-BUse-kxM.js → index-dJWNxIRH.js} +1 -1
- langflow/frontend/assets/{index-DyqITq51.js → index-dcJ8-agu.js} +1 -1
- langflow/frontend/assets/{index-Cg53lrYh.js → index-eo2mAtL-.js} +1 -1
- langflow/frontend/assets/{index-DqbzUcI5.js → index-hG24k5xJ.js} +1 -1
- langflow/frontend/assets/{index-BQrVDjR1.js → index-h_aSZHf3.js} +1 -1
- langflow/frontend/assets/{index-kkA-qHB_.js → index-hbndqB9B.js} +1 -1
- langflow/frontend/assets/{index-DZxUIhWh.js → index-iJngutFo.js} +1 -1
- langflow/frontend/assets/{index-Dg8N3NSO.js → index-lTpteg8t.js} +1 -1
- langflow/frontend/assets/{index-DDhJVVel.js → index-lZX9AvZW.js} +1 -1
- langflow/frontend/assets/{index-BHhnpSkW.js → index-m8QA6VNM.js} +1 -1
- langflow/frontend/assets/{index-Bk4mTwnI.js → index-o0D2S7xW.js} +1 -1
- langflow/frontend/assets/{index-DJESSNJi.js → index-ovFJ_0J6.js} +1 -1
- langflow/frontend/assets/{index-DH6o91_s.js → index-pYJJOcma.js} +1 -1
- langflow/frontend/assets/{index-Bo-ww0Bb.js → index-sI75DsdM.js} +1 -1
- langflow/frontend/assets/{index-BcAgItH4.js → index-xvFOmxx4.js} +1 -1
- langflow/frontend/assets/{index-_cbGmjF4.js → index-z3SRY-mX.js} +1 -1
- langflow/frontend/assets/lazyIconImports-D97HEZkE.js +2 -0
- langflow/frontend/assets/{use-post-add-user-CvtuazTg.js → use-post-add-user-C0MdTpQ5.js} +1 -1
- langflow/frontend/index.html +2 -2
- langflow/graph/graph/base.py +4 -2
- langflow/initial_setup/starter_projects/Basic Prompt Chaining.json +26 -0
- langflow/initial_setup/starter_projects/Basic Prompting.json +26 -0
- langflow/initial_setup/starter_projects/Blog Writer.json +56 -0
- langflow/initial_setup/starter_projects/Custom Component Generator.json +35 -0
- langflow/initial_setup/starter_projects/Document Q&A.json +27 -1
- langflow/initial_setup/starter_projects/Financial Report Parser.json +43 -0
- langflow/initial_setup/starter_projects/Hybrid Search RAG.json +83 -1
- langflow/initial_setup/starter_projects/Image Sentiment Analysis.json +43 -0
- langflow/initial_setup/starter_projects/Instagram Copywriter.json +49 -1
- langflow/initial_setup/starter_projects/Invoice Summarizer.json +40 -1
- langflow/initial_setup/starter_projects/Knowledge Ingestion.json +71 -0
- langflow/initial_setup/starter_projects/Knowledge Retrieval.json +63 -0
- langflow/initial_setup/starter_projects/Market Research.json +57 -1
- langflow/initial_setup/starter_projects/Meeting Summary.json +95 -0
- langflow/initial_setup/starter_projects/Memory Chatbot.json +35 -0
- langflow/initial_setup/starter_projects/News Aggregator.json +61 -1
- langflow/initial_setup/starter_projects/Nvidia Remix.json +67 -2
- langflow/initial_setup/starter_projects/Pok/303/251dex Agent.json" +48 -1
- langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json +44 -1
- langflow/initial_setup/starter_projects/Price Deal Finder.json +53 -1
- langflow/initial_setup/starter_projects/Research Agent.json +40 -1
- langflow/initial_setup/starter_projects/Research Translation Loop.json +66 -0
- langflow/initial_setup/starter_projects/SEO Keyword Generator.json +17 -0
- langflow/initial_setup/starter_projects/SaaS Pricing.json +27 -1
- langflow/initial_setup/starter_projects/Search agent.json +40 -1
- langflow/initial_setup/starter_projects/Sequential Tasks Agents.json +72 -3
- langflow/initial_setup/starter_projects/Simple Agent.json +57 -1
- langflow/initial_setup/starter_projects/Social Media Agent.json +77 -1
- langflow/initial_setup/starter_projects/Text Sentiment Analysis.json +35 -1
- langflow/initial_setup/starter_projects/Travel Planning Agents.json +51 -3
- langflow/initial_setup/starter_projects/Twitter Thread Generator.json +80 -0
- langflow/initial_setup/starter_projects/Vector Store RAG.json +110 -3
- langflow/initial_setup/starter_projects/Youtube Analysis.json +82 -1
- langflow/initial_setup/starter_projects/vector_store_rag.py +1 -1
- langflow/processing/process.py +3 -0
- langflow/schema/openai_responses_schemas.py +74 -0
- langflow/services/auth/mcp_encryption.py +104 -0
- langflow/services/settings/feature_flags.py +1 -1
- {langflow_base_nightly-0.5.0.dev38.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/METADATA +1 -1
- {langflow_base_nightly-0.5.0.dev38.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/RECORD +239 -219
- langflow/components/vectorstores/redis.py +0 -89
- langflow/frontend/assets/index-BWgIWfv2.js +0 -1
- langflow/frontend/assets/index-CqS7zir1.css +0 -1
- langflow/frontend/assets/lazyIconImports-DTNgvPE-.js +0 -2
- /langflow/components/{vectorstores → FAISS}/faiss.py +0 -0
- /langflow/components/{vectorstores → cassandra}/cassandra.py +0 -0
- /langflow/components/{datastax/cassandra.py → cassandra/cassandra_chat.py} +0 -0
- /langflow/components/{vectorstores → cassandra}/cassandra_graph.py +0 -0
- /langflow/components/{vectorstores → chroma}/chroma.py +0 -0
- /langflow/components/{vectorstores → clickhouse}/clickhouse.py +0 -0
- /langflow/components/{vectorstores → couchbase}/couchbase.py +0 -0
- /langflow/components/{vectorstores → datastax}/astradb.py +0 -0
- /langflow/components/{vectorstores → datastax}/astradb_graph.py +0 -0
- /langflow/components/{vectorstores → datastax}/graph_rag.py +0 -0
- /langflow/components/{vectorstores → datastax}/hcd.py +0 -0
- /langflow/components/{vectorstores → elastic}/elasticsearch.py +0 -0
- /langflow/components/{vectorstores → elastic}/opensearch.py +0 -0
- /langflow/components/{vectorstores → milvus}/milvus.py +0 -0
- /langflow/components/{vectorstores → mongodb}/mongodb_atlas.py +0 -0
- /langflow/components/{vectorstores → pgvector}/pgvector.py +0 -0
- /langflow/components/{vectorstores → pinecone}/pinecone.py +0 -0
- /langflow/components/{vectorstores → qdrant}/qdrant.py +0 -0
- /langflow/components/{vectorstores → supabase}/supabase.py +0 -0
- /langflow/components/{vectorstores → upstash}/upstash.py +0 -0
- /langflow/components/{vectorstores → vectara}/vectara.py +0 -0
- /langflow/components/{vectorstores → vectara}/vectara_rag.py +0 -0
- /langflow/components/{vectorstores → weaviate}/weaviate.py +0 -0
- {langflow_base_nightly-0.5.0.dev38.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/WHEEL +0 -0
- {langflow_base_nightly-0.5.0.dev38.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,545 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
import uuid
|
|
5
|
+
from collections.abc import AsyncGenerator
|
|
6
|
+
from typing import Annotated, Any
|
|
7
|
+
|
|
8
|
+
from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, Request
|
|
9
|
+
from fastapi.responses import StreamingResponse
|
|
10
|
+
from loguru import logger
|
|
11
|
+
|
|
12
|
+
from langflow.api.v1.endpoints import consume_and_yield, run_flow_generator, simple_run_flow
|
|
13
|
+
from langflow.api.v1.schemas import SimplifiedAPIRequest
|
|
14
|
+
from langflow.events.event_manager import create_stream_tokens_event_manager
|
|
15
|
+
from langflow.helpers.flow import get_flow_by_id_or_endpoint_name
|
|
16
|
+
from langflow.schema.content_types import ToolContent
|
|
17
|
+
from langflow.schema.openai_responses_schemas import (
|
|
18
|
+
OpenAIErrorResponse,
|
|
19
|
+
OpenAIResponsesRequest,
|
|
20
|
+
OpenAIResponsesResponse,
|
|
21
|
+
OpenAIResponsesStreamChunk,
|
|
22
|
+
create_openai_error,
|
|
23
|
+
)
|
|
24
|
+
from langflow.services.auth.utils import api_key_security
|
|
25
|
+
from langflow.services.database.models.flow.model import FlowRead
|
|
26
|
+
from langflow.services.database.models.user.model import UserRead
|
|
27
|
+
from langflow.services.deps import get_telemetry_service
|
|
28
|
+
from langflow.services.telemetry.schema import RunPayload
|
|
29
|
+
from langflow.services.telemetry.service import TelemetryService
|
|
30
|
+
|
|
31
|
+
router = APIRouter(tags=["OpenAI Responses API"])
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def has_chat_input(flow_data: dict | None) -> bool:
|
|
35
|
+
"""Check if the flow has a chat input component."""
|
|
36
|
+
if not flow_data or "nodes" not in flow_data:
|
|
37
|
+
return False
|
|
38
|
+
|
|
39
|
+
return any(node.get("data", {}).get("type") in ["ChatInput", "Chat Input"] for node in flow_data["nodes"])
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def has_chat_output(flow_data: dict | None) -> bool:
|
|
43
|
+
"""Check if the flow has a chat input component."""
|
|
44
|
+
if not flow_data or "nodes" not in flow_data:
|
|
45
|
+
return False
|
|
46
|
+
|
|
47
|
+
return any(node.get("data", {}).get("type") in ["ChatOutput", "Chat Output"] for node in flow_data["nodes"])
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
async def run_flow_for_openai_responses(
|
|
51
|
+
flow: FlowRead,
|
|
52
|
+
request: OpenAIResponsesRequest,
|
|
53
|
+
api_key_user: UserRead,
|
|
54
|
+
*,
|
|
55
|
+
stream: bool = False,
|
|
56
|
+
variables: dict[str, str] | None = None,
|
|
57
|
+
) -> OpenAIResponsesResponse | StreamingResponse:
|
|
58
|
+
"""Run a flow for OpenAI Responses API compatibility."""
|
|
59
|
+
# Check if flow has chat input
|
|
60
|
+
if not has_chat_input(flow.data):
|
|
61
|
+
msg = "Flow must have a ChatInput component to be compatible with OpenAI Responses API"
|
|
62
|
+
raise ValueError(msg)
|
|
63
|
+
|
|
64
|
+
if not has_chat_output(flow.data):
|
|
65
|
+
msg = "Flow must have a ChatOutput component to be compatible with OpenAI Responses API"
|
|
66
|
+
raise ValueError(msg)
|
|
67
|
+
|
|
68
|
+
# Use previous_response_id as session_id for conversation continuity
|
|
69
|
+
# If no previous_response_id, create a new session_id
|
|
70
|
+
session_id = request.previous_response_id or str(uuid.uuid4())
|
|
71
|
+
|
|
72
|
+
# Store header variables in context for global variable override
|
|
73
|
+
context = {}
|
|
74
|
+
if variables:
|
|
75
|
+
context["request_variables"] = variables
|
|
76
|
+
logger.debug(f"Added request variables to context: {variables}")
|
|
77
|
+
|
|
78
|
+
# Convert OpenAI request to SimplifiedAPIRequest
|
|
79
|
+
# Note: We're moving away from tweaks to a context-based approach
|
|
80
|
+
simplified_request = SimplifiedAPIRequest(
|
|
81
|
+
input_value=request.input,
|
|
82
|
+
input_type="chat", # Use chat input type for better compatibility
|
|
83
|
+
output_type="chat", # Use chat output type for better compatibility
|
|
84
|
+
tweaks={}, # Empty tweaks, using context instead
|
|
85
|
+
session_id=session_id,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
# Context will be passed separately to simple_run_flow
|
|
89
|
+
|
|
90
|
+
logger.debug(f"SimplifiedAPIRequest created with context: {context}")
|
|
91
|
+
|
|
92
|
+
# Use session_id as response_id for OpenAI compatibility
|
|
93
|
+
response_id = session_id
|
|
94
|
+
created_timestamp = int(time.time())
|
|
95
|
+
|
|
96
|
+
if stream:
|
|
97
|
+
# Handle streaming response
|
|
98
|
+
asyncio_queue: asyncio.Queue = asyncio.Queue()
|
|
99
|
+
asyncio_queue_client_consumed: asyncio.Queue = asyncio.Queue()
|
|
100
|
+
event_manager = create_stream_tokens_event_manager(queue=asyncio_queue)
|
|
101
|
+
|
|
102
|
+
async def openai_stream_generator() -> AsyncGenerator[str, None]:
|
|
103
|
+
"""Convert Langflow events to OpenAI Responses API streaming format."""
|
|
104
|
+
main_task = asyncio.create_task(
|
|
105
|
+
run_flow_generator(
|
|
106
|
+
flow=flow,
|
|
107
|
+
input_request=simplified_request,
|
|
108
|
+
api_key_user=api_key_user,
|
|
109
|
+
event_manager=event_manager,
|
|
110
|
+
client_consumed_queue=asyncio_queue_client_consumed,
|
|
111
|
+
context=context,
|
|
112
|
+
)
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
# Send initial chunk to establish connection
|
|
117
|
+
initial_chunk = OpenAIResponsesStreamChunk(
|
|
118
|
+
id=response_id,
|
|
119
|
+
created=created_timestamp,
|
|
120
|
+
model=request.model,
|
|
121
|
+
delta={"content": ""},
|
|
122
|
+
)
|
|
123
|
+
yield f"data: {initial_chunk.model_dump_json()}\n\n"
|
|
124
|
+
|
|
125
|
+
tool_call_counter = 0
|
|
126
|
+
processed_tools = set() # Track processed tool calls to avoid duplicates
|
|
127
|
+
previous_content = "" # Track content already sent to calculate deltas
|
|
128
|
+
|
|
129
|
+
async for event_data in consume_and_yield(asyncio_queue, asyncio_queue_client_consumed):
|
|
130
|
+
if event_data is None:
|
|
131
|
+
break
|
|
132
|
+
|
|
133
|
+
content = ""
|
|
134
|
+
|
|
135
|
+
# Parse byte string events as JSON
|
|
136
|
+
if isinstance(event_data, bytes):
|
|
137
|
+
try:
|
|
138
|
+
import json
|
|
139
|
+
|
|
140
|
+
event_str = event_data.decode("utf-8")
|
|
141
|
+
parsed_event = json.loads(event_str)
|
|
142
|
+
|
|
143
|
+
if isinstance(parsed_event, dict):
|
|
144
|
+
event_type = parsed_event.get("event")
|
|
145
|
+
data = parsed_event.get("data", {})
|
|
146
|
+
|
|
147
|
+
# Handle add_message events
|
|
148
|
+
if event_type == "add_message":
|
|
149
|
+
sender_name = data.get("sender_name", "")
|
|
150
|
+
text = data.get("text", "")
|
|
151
|
+
sender = data.get("sender", "")
|
|
152
|
+
content_blocks = data.get("content_blocks", [])
|
|
153
|
+
|
|
154
|
+
# Look for Agent Steps in content_blocks
|
|
155
|
+
for block in content_blocks:
|
|
156
|
+
if block.get("title") == "Agent Steps":
|
|
157
|
+
contents = block.get("contents", [])
|
|
158
|
+
for step in contents:
|
|
159
|
+
# Look for tool_use type items
|
|
160
|
+
if step.get("type") == "tool_use":
|
|
161
|
+
tool_name = step.get("name", "")
|
|
162
|
+
tool_input = step.get("tool_input", {})
|
|
163
|
+
tool_output = step.get("output")
|
|
164
|
+
|
|
165
|
+
# Only emit tool calls with explicit tool names and
|
|
166
|
+
# meaningful arguments
|
|
167
|
+
if tool_name and tool_input is not None and tool_output is not None:
|
|
168
|
+
# Create unique identifier for this tool call
|
|
169
|
+
tool_signature = (
|
|
170
|
+
f"{tool_name}:{hash(str(sorted(tool_input.items())))}"
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
# Skip if we've already processed this tool call
|
|
174
|
+
if tool_signature in processed_tools:
|
|
175
|
+
continue
|
|
176
|
+
|
|
177
|
+
processed_tools.add(tool_signature)
|
|
178
|
+
tool_call_counter += 1
|
|
179
|
+
call_id = f"call_{tool_call_counter}"
|
|
180
|
+
tool_id = f"fc_{tool_call_counter}"
|
|
181
|
+
tool_call_event = {
|
|
182
|
+
"type": "response.output_item.added",
|
|
183
|
+
"item": {
|
|
184
|
+
"id": tool_id,
|
|
185
|
+
"type": "function_call", # OpenAI uses "function_call"
|
|
186
|
+
"status": "in_progress", # OpenAI includes status
|
|
187
|
+
"name": tool_name,
|
|
188
|
+
"arguments": "", # Start with empty, build via deltas
|
|
189
|
+
"call_id": call_id,
|
|
190
|
+
},
|
|
191
|
+
}
|
|
192
|
+
yield (
|
|
193
|
+
f"event: response.output_item.added\n"
|
|
194
|
+
f"data: {json.dumps(tool_call_event)}\n\n"
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# Send function call arguments as delta events (like OpenAI)
|
|
198
|
+
arguments_str = json.dumps(tool_input)
|
|
199
|
+
arg_delta_event = {
|
|
200
|
+
"type": "response.function_call_arguments.delta",
|
|
201
|
+
"delta": arguments_str,
|
|
202
|
+
"item_id": tool_id,
|
|
203
|
+
"output_index": 0,
|
|
204
|
+
}
|
|
205
|
+
yield (
|
|
206
|
+
f"event: response.function_call_arguments.delta\n"
|
|
207
|
+
f"data: {json.dumps(arg_delta_event)}\n\n"
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
# Send function call arguments done event
|
|
211
|
+
arg_done_event = {
|
|
212
|
+
"type": "response.function_call_arguments.done",
|
|
213
|
+
"arguments": arguments_str,
|
|
214
|
+
"item_id": tool_id,
|
|
215
|
+
"output_index": 0,
|
|
216
|
+
}
|
|
217
|
+
yield (
|
|
218
|
+
f"event: response.function_call_arguments.done\n"
|
|
219
|
+
f"data: {json.dumps(arg_done_event)}\n\n"
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# If there's output, send completion event
|
|
223
|
+
if tool_output is not None:
|
|
224
|
+
# Check if include parameter requests tool_call.results
|
|
225
|
+
include_results = (
|
|
226
|
+
request.include
|
|
227
|
+
and "tool_call.results" in request.include
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
if include_results:
|
|
231
|
+
# Format with detailed results
|
|
232
|
+
tool_done_event = {
|
|
233
|
+
"type": "response.output_item.done",
|
|
234
|
+
"item": {
|
|
235
|
+
"id": f"{tool_name}_{tool_id}",
|
|
236
|
+
"inputs": tool_input, # Raw inputs as-is
|
|
237
|
+
"status": "completed",
|
|
238
|
+
"type": "tool_call",
|
|
239
|
+
"tool_name": f"{tool_name}",
|
|
240
|
+
"results": tool_output, # Raw output as-is
|
|
241
|
+
},
|
|
242
|
+
"output_index": 0,
|
|
243
|
+
"sequence_number": tool_call_counter + 5,
|
|
244
|
+
}
|
|
245
|
+
else:
|
|
246
|
+
# Regular function call format
|
|
247
|
+
tool_done_event = {
|
|
248
|
+
"type": "response.output_item.done",
|
|
249
|
+
"item": {
|
|
250
|
+
"id": tool_id,
|
|
251
|
+
"type": "function_call", # Match OpenAI format
|
|
252
|
+
"status": "completed",
|
|
253
|
+
"arguments": arguments_str,
|
|
254
|
+
"call_id": call_id,
|
|
255
|
+
"name": tool_name,
|
|
256
|
+
},
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
yield (
|
|
260
|
+
f"event: response.output_item.done\n"
|
|
261
|
+
f"data: {json.dumps(tool_done_event)}\n\n"
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
# Extract text content for streaming (only AI responses)
|
|
265
|
+
if (
|
|
266
|
+
sender in ["Machine", "AI", "Agent"]
|
|
267
|
+
and text != request.input
|
|
268
|
+
and sender_name == "Agent"
|
|
269
|
+
):
|
|
270
|
+
# Calculate delta: only send newly generated content
|
|
271
|
+
if text.startswith(previous_content):
|
|
272
|
+
content = text[len(previous_content) :]
|
|
273
|
+
previous_content = text
|
|
274
|
+
else:
|
|
275
|
+
# If text doesn't start with previous content, send full text
|
|
276
|
+
# This handles cases where the content might be reset
|
|
277
|
+
content = text
|
|
278
|
+
previous_content = text
|
|
279
|
+
|
|
280
|
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
281
|
+
continue
|
|
282
|
+
|
|
283
|
+
# Only send chunks with actual content
|
|
284
|
+
if content:
|
|
285
|
+
chunk = OpenAIResponsesStreamChunk(
|
|
286
|
+
id=response_id,
|
|
287
|
+
created=created_timestamp,
|
|
288
|
+
model=request.model,
|
|
289
|
+
delta={"content": content},
|
|
290
|
+
)
|
|
291
|
+
yield f"data: {chunk.model_dump_json()}\n\n"
|
|
292
|
+
|
|
293
|
+
# Send final completion chunk
|
|
294
|
+
final_chunk = OpenAIResponsesStreamChunk(
|
|
295
|
+
id=response_id,
|
|
296
|
+
created=created_timestamp,
|
|
297
|
+
model=request.model,
|
|
298
|
+
delta={},
|
|
299
|
+
status="completed",
|
|
300
|
+
)
|
|
301
|
+
yield f"data: {final_chunk.model_dump_json()}\n\n"
|
|
302
|
+
yield "data: [DONE]\n\n"
|
|
303
|
+
|
|
304
|
+
except Exception as e: # noqa: BLE001
|
|
305
|
+
logger.error(f"Error in stream generator: {e}")
|
|
306
|
+
error_response = create_openai_error(
|
|
307
|
+
message=str(e),
|
|
308
|
+
type_="processing_error",
|
|
309
|
+
)
|
|
310
|
+
yield f"data: {error_response}\n\n"
|
|
311
|
+
finally:
|
|
312
|
+
if not main_task.done():
|
|
313
|
+
main_task.cancel()
|
|
314
|
+
|
|
315
|
+
return StreamingResponse(
|
|
316
|
+
openai_stream_generator(),
|
|
317
|
+
media_type="text/event-stream",
|
|
318
|
+
headers={
|
|
319
|
+
"Cache-Control": "no-cache",
|
|
320
|
+
"Connection": "keep-alive",
|
|
321
|
+
"Access-Control-Allow-Origin": "*",
|
|
322
|
+
},
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
# Handle non-streaming response
|
|
326
|
+
result = await simple_run_flow(
|
|
327
|
+
flow=flow,
|
|
328
|
+
input_request=simplified_request,
|
|
329
|
+
stream=False,
|
|
330
|
+
api_key_user=api_key_user,
|
|
331
|
+
context=context,
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
# Extract output text and tool calls from result
|
|
335
|
+
output_text = ""
|
|
336
|
+
tool_calls: list[dict[str, Any]] = []
|
|
337
|
+
|
|
338
|
+
if result.outputs:
|
|
339
|
+
for run_output in result.outputs:
|
|
340
|
+
if run_output and run_output.outputs:
|
|
341
|
+
for component_output in run_output.outputs:
|
|
342
|
+
if component_output:
|
|
343
|
+
# Handle messages (final chat outputs)
|
|
344
|
+
if hasattr(component_output, "messages") and component_output.messages:
|
|
345
|
+
for msg in component_output.messages:
|
|
346
|
+
if hasattr(msg, "message"):
|
|
347
|
+
output_text = msg.message
|
|
348
|
+
break
|
|
349
|
+
# Handle results
|
|
350
|
+
if not output_text and hasattr(component_output, "results") and component_output.results:
|
|
351
|
+
for value in component_output.results.values():
|
|
352
|
+
if hasattr(value, "get_text"):
|
|
353
|
+
output_text = value.get_text()
|
|
354
|
+
break
|
|
355
|
+
if isinstance(value, str):
|
|
356
|
+
output_text = value
|
|
357
|
+
break
|
|
358
|
+
|
|
359
|
+
if hasattr(component_output, "results") and component_output.results:
|
|
360
|
+
for blocks in component_output.results.get("message", {}).content_blocks:
|
|
361
|
+
tool_calls.extend(
|
|
362
|
+
{
|
|
363
|
+
"name": content.name,
|
|
364
|
+
"input": content.tool_input,
|
|
365
|
+
"output": content.output,
|
|
366
|
+
}
|
|
367
|
+
for content in blocks.contents
|
|
368
|
+
if isinstance(content, ToolContent)
|
|
369
|
+
)
|
|
370
|
+
if output_text:
|
|
371
|
+
break
|
|
372
|
+
if output_text:
|
|
373
|
+
break
|
|
374
|
+
|
|
375
|
+
# Build output array
|
|
376
|
+
output_items = []
|
|
377
|
+
|
|
378
|
+
# Add tool calls if includes parameter requests them
|
|
379
|
+
include_results = request.include and "tool_call.results" in request.include
|
|
380
|
+
|
|
381
|
+
tool_call_id_counter = 1
|
|
382
|
+
for tool_call in tool_calls:
|
|
383
|
+
if include_results:
|
|
384
|
+
# Format as detailed tool call with results (like file_search_call in sample)
|
|
385
|
+
tool_call_item = {
|
|
386
|
+
"id": f"{tool_call['name']}_{tool_call_id_counter}",
|
|
387
|
+
"queries": list(tool_call["input"].values())
|
|
388
|
+
if isinstance(tool_call["input"], dict)
|
|
389
|
+
else [str(tool_call["input"])],
|
|
390
|
+
"status": "completed",
|
|
391
|
+
"tool_name": f"{tool_call['name']}",
|
|
392
|
+
"type": "tool_call",
|
|
393
|
+
"results": tool_call["output"] if tool_call["output"] is not None else [],
|
|
394
|
+
}
|
|
395
|
+
else:
|
|
396
|
+
# Format as basic function call
|
|
397
|
+
tool_call_item = {
|
|
398
|
+
"id": f"fc_{tool_call_id_counter}",
|
|
399
|
+
"type": "function_call",
|
|
400
|
+
"status": "completed",
|
|
401
|
+
"name": tool_call["name"],
|
|
402
|
+
"arguments": json.dumps(tool_call["input"]) if tool_call["input"] is not None else "{}",
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
output_items.append(tool_call_item)
|
|
406
|
+
tool_call_id_counter += 1
|
|
407
|
+
|
|
408
|
+
# Add the message output
|
|
409
|
+
output_message = {
|
|
410
|
+
"type": "message",
|
|
411
|
+
"id": f"msg_{response_id}",
|
|
412
|
+
"status": "completed",
|
|
413
|
+
"role": "assistant",
|
|
414
|
+
"content": [{"type": "output_text", "text": output_text, "annotations": []}],
|
|
415
|
+
}
|
|
416
|
+
output_items.append(output_message)
|
|
417
|
+
|
|
418
|
+
return OpenAIResponsesResponse(
|
|
419
|
+
id=response_id,
|
|
420
|
+
created_at=created_timestamp,
|
|
421
|
+
model=request.model,
|
|
422
|
+
output=output_items,
|
|
423
|
+
previous_response_id=request.previous_response_id,
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
@router.post("/responses", response_model=None)
|
|
428
|
+
async def create_response(
|
|
429
|
+
request: OpenAIResponsesRequest,
|
|
430
|
+
background_tasks: BackgroundTasks,
|
|
431
|
+
api_key_user: Annotated[UserRead, Depends(api_key_security)],
|
|
432
|
+
telemetry_service: Annotated[TelemetryService, Depends(get_telemetry_service)],
|
|
433
|
+
http_request: Request,
|
|
434
|
+
) -> OpenAIResponsesResponse | StreamingResponse | OpenAIErrorResponse:
|
|
435
|
+
"""Create a response using OpenAI Responses API format.
|
|
436
|
+
|
|
437
|
+
This endpoint accepts a flow_id in the model parameter and processes
|
|
438
|
+
the input through the specified Langflow flow.
|
|
439
|
+
|
|
440
|
+
Args:
|
|
441
|
+
request: OpenAI Responses API request with model (flow_id) and input
|
|
442
|
+
background_tasks: FastAPI background task manager
|
|
443
|
+
api_key_user: Authenticated user from API key
|
|
444
|
+
http_request: The incoming HTTP request
|
|
445
|
+
telemetry_service: Telemetry service for logging
|
|
446
|
+
|
|
447
|
+
Returns:
|
|
448
|
+
OpenAI-compatible response or streaming response
|
|
449
|
+
|
|
450
|
+
Raises:
|
|
451
|
+
HTTPException: For validation errors or flow execution issues
|
|
452
|
+
"""
|
|
453
|
+
start_time = time.perf_counter()
|
|
454
|
+
|
|
455
|
+
# Extract global variables from X-LANGFLOW-GLOBAL-VAR-* headers
|
|
456
|
+
variables = {}
|
|
457
|
+
header_prefix = "x-langflow-global-var-"
|
|
458
|
+
|
|
459
|
+
logger.debug(f"All headers received: {list(http_request.headers.keys())}")
|
|
460
|
+
logger.debug(f"Looking for headers starting with: {header_prefix}")
|
|
461
|
+
|
|
462
|
+
for header_name, header_value in http_request.headers.items():
|
|
463
|
+
header_lower = header_name.lower()
|
|
464
|
+
logger.debug(f"Checking header: '{header_lower}' (original: '{header_name}')")
|
|
465
|
+
if header_lower.startswith(header_prefix):
|
|
466
|
+
# Extract variable name from header (remove prefix) and convert to uppercase
|
|
467
|
+
var_name_lower = header_lower[len(header_prefix) :]
|
|
468
|
+
var_name = var_name_lower.upper() # Default to uppercase
|
|
469
|
+
|
|
470
|
+
variables[var_name] = header_value
|
|
471
|
+
logger.debug(
|
|
472
|
+
f"Found global variable: {var_name} = {header_value} "
|
|
473
|
+
f"(converted to uppercase from header: {header_name})"
|
|
474
|
+
)
|
|
475
|
+
|
|
476
|
+
logger.debug(f"Extracted global variables from headers: {list(variables.keys())}")
|
|
477
|
+
logger.debug(f"Variables dict: {variables}")
|
|
478
|
+
|
|
479
|
+
# Validate tools parameter - error out if tools are provided
|
|
480
|
+
if request.tools is not None:
|
|
481
|
+
error_response = create_openai_error(
|
|
482
|
+
message="Tools are not supported yet",
|
|
483
|
+
type_="invalid_request_error",
|
|
484
|
+
code="tools_not_supported",
|
|
485
|
+
)
|
|
486
|
+
return OpenAIErrorResponse(error=error_response["error"])
|
|
487
|
+
|
|
488
|
+
# Get flow using the model field (which contains flow_id)
|
|
489
|
+
try:
|
|
490
|
+
flow = await get_flow_by_id_or_endpoint_name(request.model, str(api_key_user.id))
|
|
491
|
+
except HTTPException:
|
|
492
|
+
flow = None
|
|
493
|
+
|
|
494
|
+
if flow is None:
|
|
495
|
+
error_response = create_openai_error(
|
|
496
|
+
message=f"Flow with id '{request.model}' not found",
|
|
497
|
+
type_="invalid_request_error",
|
|
498
|
+
code="flow_not_found",
|
|
499
|
+
)
|
|
500
|
+
return OpenAIErrorResponse(error=error_response["error"])
|
|
501
|
+
|
|
502
|
+
try:
|
|
503
|
+
# Process the request
|
|
504
|
+
result = await run_flow_for_openai_responses(
|
|
505
|
+
flow=flow,
|
|
506
|
+
request=request,
|
|
507
|
+
api_key_user=api_key_user,
|
|
508
|
+
stream=request.stream,
|
|
509
|
+
variables=variables,
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
# Log telemetry for successful completion
|
|
513
|
+
if not request.stream: # Only log for non-streaming responses
|
|
514
|
+
end_time = time.perf_counter()
|
|
515
|
+
background_tasks.add_task(
|
|
516
|
+
telemetry_service.log_package_run,
|
|
517
|
+
RunPayload(
|
|
518
|
+
run_is_webhook=False,
|
|
519
|
+
run_seconds=int(end_time - start_time),
|
|
520
|
+
run_success=True,
|
|
521
|
+
run_error_message="",
|
|
522
|
+
),
|
|
523
|
+
)
|
|
524
|
+
|
|
525
|
+
except Exception as exc: # noqa: BLE001
|
|
526
|
+
logger.error(f"Error processing OpenAI Responses request: {exc}")
|
|
527
|
+
|
|
528
|
+
# Log telemetry for failed completion
|
|
529
|
+
background_tasks.add_task(
|
|
530
|
+
telemetry_service.log_package_run,
|
|
531
|
+
RunPayload(
|
|
532
|
+
run_is_webhook=False,
|
|
533
|
+
run_seconds=int(time.perf_counter() - start_time),
|
|
534
|
+
run_success=False,
|
|
535
|
+
run_error_message=str(exc),
|
|
536
|
+
),
|
|
537
|
+
)
|
|
538
|
+
|
|
539
|
+
# Return OpenAI-compatible error
|
|
540
|
+
error_response = create_openai_error(
|
|
541
|
+
message=str(exc),
|
|
542
|
+
type_="processing_error",
|
|
543
|
+
)
|
|
544
|
+
return OpenAIErrorResponse(error=error_response["error"])
|
|
545
|
+
return result
|
langflow/api/v1/schemas.py
CHANGED
|
@@ -445,13 +445,12 @@ class AuthSettings(BaseModel):
|
|
|
445
445
|
"""Model representing authentication settings for MCP."""
|
|
446
446
|
|
|
447
447
|
auth_type: Literal["none", "apikey", "oauth"] = "none"
|
|
448
|
-
api_key: SecretStr | None = None
|
|
449
448
|
oauth_host: str | None = None
|
|
450
449
|
oauth_port: str | None = None
|
|
451
450
|
oauth_server_url: str | None = None
|
|
452
451
|
oauth_callback_path: str | None = None
|
|
453
452
|
oauth_client_id: str | None = None
|
|
454
|
-
oauth_client_secret:
|
|
453
|
+
oauth_client_secret: SecretStr | None = None
|
|
455
454
|
oauth_auth_url: str | None = None
|
|
456
455
|
oauth_token_url: str | None = None
|
|
457
456
|
oauth_mcp_scope: str | None = None
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any
|
|
4
|
+
|
|
5
|
+
from langflow.components._importing import import_mod
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from .faiss import FaissVectorStoreComponent
|
|
9
|
+
|
|
10
|
+
_dynamic_imports = {
|
|
11
|
+
"FaissVectorStoreComponent": "faiss",
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"FaissVectorStoreComponent",
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def __getattr__(attr_name: str) -> Any:
|
|
20
|
+
"""Lazily import FAISS components on attribute access."""
|
|
21
|
+
if attr_name not in _dynamic_imports:
|
|
22
|
+
msg = f"module '{__name__}' has no attribute '{attr_name}'"
|
|
23
|
+
raise AttributeError(msg)
|
|
24
|
+
try:
|
|
25
|
+
result = import_mod(attr_name, _dynamic_imports[attr_name], __spec__.parent)
|
|
26
|
+
except (ModuleNotFoundError, ImportError, AttributeError) as e:
|
|
27
|
+
msg = f"Could not import '{attr_name}' from '{__name__}': {e}"
|
|
28
|
+
raise AttributeError(msg) from e
|
|
29
|
+
globals()[attr_name] = result
|
|
30
|
+
return result
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def __dir__() -> list[str]:
|
|
34
|
+
return list(__all__)
|