langflow-base-nightly 0.5.0.dev39__py3-none-any.whl → 0.5.1.dev0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. langflow/api/router.py +2 -0
  2. langflow/api/v1/__init__.py +2 -0
  3. langflow/api/v1/endpoints.py +7 -1
  4. langflow/api/v1/openai_responses.py +545 -0
  5. langflow/components/data/file.py +302 -376
  6. langflow/components/docling/docling_inline.py +56 -4
  7. langflow/components/nvidia/nvidia_ingest.py +3 -2
  8. langflow/components/youtube/channel.py +1 -1
  9. langflow/custom/custom_component/custom_component.py +11 -0
  10. langflow/graph/graph/base.py +3 -1
  11. langflow/initial_setup/starter_projects/Basic Prompt Chaining.json +1 -1
  12. langflow/initial_setup/starter_projects/Basic Prompting.json +1 -1
  13. langflow/initial_setup/starter_projects/Blog Writer.json +2 -2
  14. langflow/initial_setup/starter_projects/Custom Component Generator.json +1 -1
  15. langflow/initial_setup/starter_projects/Document Q&A.json +2 -2
  16. langflow/initial_setup/starter_projects/Financial Report Parser.json +1 -1
  17. langflow/initial_setup/starter_projects/Hybrid Search RAG.json +2 -2
  18. langflow/initial_setup/starter_projects/Image Sentiment Analysis.json +1 -1
  19. langflow/initial_setup/starter_projects/Instagram Copywriter.json +2 -2
  20. langflow/initial_setup/starter_projects/Invoice Summarizer.json +1 -1
  21. langflow/initial_setup/starter_projects/Knowledge Ingestion.json +2 -2
  22. langflow/initial_setup/starter_projects/Knowledge Retrieval.json +1 -1
  23. langflow/initial_setup/starter_projects/Market Research.json +2 -2
  24. langflow/initial_setup/starter_projects/Meeting Summary.json +3 -3
  25. langflow/initial_setup/starter_projects/Memory Chatbot.json +1 -1
  26. langflow/initial_setup/starter_projects/News Aggregator.json +3 -3
  27. langflow/initial_setup/starter_projects/Nvidia Remix.json +2 -2
  28. langflow/initial_setup/starter_projects/Pok/303/251dex Agent.json" +2 -2
  29. langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json +2 -2
  30. langflow/initial_setup/starter_projects/Price Deal Finder.json +3 -3
  31. langflow/initial_setup/starter_projects/Research Agent.json +2 -2
  32. langflow/initial_setup/starter_projects/Research Translation Loop.json +1 -1
  33. langflow/initial_setup/starter_projects/SEO Keyword Generator.json +1 -1
  34. langflow/initial_setup/starter_projects/SaaS Pricing.json +1 -1
  35. langflow/initial_setup/starter_projects/Search agent.json +2 -2
  36. langflow/initial_setup/starter_projects/Sequential Tasks Agents.json +3 -3
  37. langflow/initial_setup/starter_projects/Simple Agent.json +2 -2
  38. langflow/initial_setup/starter_projects/Social Media Agent.json +5 -5
  39. langflow/initial_setup/starter_projects/Text Sentiment Analysis.json +3 -3
  40. langflow/initial_setup/starter_projects/Travel Planning Agents.json +1 -1
  41. langflow/initial_setup/starter_projects/Twitter Thread Generator.json +1 -1
  42. langflow/initial_setup/starter_projects/Vector Store RAG.json +5 -5
  43. langflow/initial_setup/starter_projects/Youtube Analysis.json +2 -2
  44. langflow/schema/openai_responses_schemas.py +74 -0
  45. {langflow_base_nightly-0.5.0.dev39.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/METADATA +1 -1
  46. {langflow_base_nightly-0.5.0.dev39.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/RECORD +48 -46
  47. {langflow_base_nightly-0.5.0.dev39.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/WHEEL +0 -0
  48. {langflow_base_nightly-0.5.0.dev39.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/entry_points.txt +0 -0
langflow/api/router.py CHANGED
@@ -13,6 +13,7 @@ from langflow.api.v1 import (
13
13
  mcp_projects_router,
14
14
  mcp_router,
15
15
  monitor_router,
16
+ openai_responses_router,
16
17
  projects_router,
17
18
  starter_projects_router,
18
19
  store_router,
@@ -50,6 +51,7 @@ router_v1.include_router(knowledge_bases_router)
50
51
  router_v1.include_router(mcp_router)
51
52
  router_v1.include_router(voice_mode_router)
52
53
  router_v1.include_router(mcp_projects_router)
54
+ router_v1.include_router(openai_responses_router)
53
55
 
54
56
  router_v2.include_router(files_router_v2)
55
57
  router_v2.include_router(mcp_router_v2)
@@ -9,6 +9,7 @@ from langflow.api.v1.login import router as login_router
9
9
  from langflow.api.v1.mcp import router as mcp_router
10
10
  from langflow.api.v1.mcp_projects import router as mcp_projects_router
11
11
  from langflow.api.v1.monitor import router as monitor_router
12
+ from langflow.api.v1.openai_responses import router as openai_responses_router
12
13
  from langflow.api.v1.projects import router as projects_router
13
14
  from langflow.api.v1.starter_projects import router as starter_projects_router
14
15
  from langflow.api.v1.store import router as store_router
@@ -29,6 +30,7 @@ __all__ = [
29
30
  "mcp_projects_router",
30
31
  "mcp_router",
31
32
  "monitor_router",
33
+ "openai_responses_router",
32
34
  "projects_router",
33
35
  "starter_projects_router",
34
36
  "store_router",
@@ -116,6 +116,7 @@ async def simple_run_flow(
116
116
  stream: bool = False,
117
117
  api_key_user: User | None = None,
118
118
  event_manager: EventManager | None = None,
119
+ context: dict | None = None,
119
120
  ):
120
121
  validate_input_and_tweaks(input_request)
121
122
  try:
@@ -127,7 +128,9 @@ async def simple_run_flow(
127
128
  raise ValueError(msg)
128
129
  graph_data = flow.data.copy()
129
130
  graph_data = process_tweaks(graph_data, input_request.tweaks or {}, stream=stream)
130
- graph = Graph.from_payload(graph_data, flow_id=flow_id_str, user_id=str(user_id), flow_name=flow.name)
131
+ graph = Graph.from_payload(
132
+ graph_data, flow_id=flow_id_str, user_id=str(user_id), flow_name=flow.name, context=context
133
+ )
131
134
  inputs = None
132
135
  if input_request.input_value is not None:
133
136
  inputs = [
@@ -228,6 +231,7 @@ async def run_flow_generator(
228
231
  api_key_user: User | None,
229
232
  event_manager: EventManager,
230
233
  client_consumed_queue: asyncio.Queue,
234
+ context: dict | None = None,
231
235
  ) -> None:
232
236
  """Executes a flow asynchronously and manages event streaming to the client.
233
237
 
@@ -240,6 +244,7 @@ async def run_flow_generator(
240
244
  api_key_user (User | None): Optional authenticated user running the flow
241
245
  event_manager (EventManager): Manages the streaming of events to the client
242
246
  client_consumed_queue (asyncio.Queue): Tracks client consumption of events
247
+ context (dict | None): Optional context to pass to the flow
243
248
 
244
249
  Events Generated:
245
250
  - "add_message": Sent when new messages are added during flow execution
@@ -260,6 +265,7 @@ async def run_flow_generator(
260
265
  stream=True,
261
266
  api_key_user=api_key_user,
262
267
  event_manager=event_manager,
268
+ context=context,
263
269
  )
264
270
  event_manager.on_end(data={"result": result.model_dump()})
265
271
  await client_consumed_queue.get()
@@ -0,0 +1,545 @@
1
+ import asyncio
2
+ import json
3
+ import time
4
+ import uuid
5
+ from collections.abc import AsyncGenerator
6
+ from typing import Annotated, Any
7
+
8
+ from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, Request
9
+ from fastapi.responses import StreamingResponse
10
+ from loguru import logger
11
+
12
+ from langflow.api.v1.endpoints import consume_and_yield, run_flow_generator, simple_run_flow
13
+ from langflow.api.v1.schemas import SimplifiedAPIRequest
14
+ from langflow.events.event_manager import create_stream_tokens_event_manager
15
+ from langflow.helpers.flow import get_flow_by_id_or_endpoint_name
16
+ from langflow.schema.content_types import ToolContent
17
+ from langflow.schema.openai_responses_schemas import (
18
+ OpenAIErrorResponse,
19
+ OpenAIResponsesRequest,
20
+ OpenAIResponsesResponse,
21
+ OpenAIResponsesStreamChunk,
22
+ create_openai_error,
23
+ )
24
+ from langflow.services.auth.utils import api_key_security
25
+ from langflow.services.database.models.flow.model import FlowRead
26
+ from langflow.services.database.models.user.model import UserRead
27
+ from langflow.services.deps import get_telemetry_service
28
+ from langflow.services.telemetry.schema import RunPayload
29
+ from langflow.services.telemetry.service import TelemetryService
30
+
31
+ router = APIRouter(tags=["OpenAI Responses API"])
32
+
33
+
34
+ def has_chat_input(flow_data: dict | None) -> bool:
35
+ """Check if the flow has a chat input component."""
36
+ if not flow_data or "nodes" not in flow_data:
37
+ return False
38
+
39
+ return any(node.get("data", {}).get("type") in ["ChatInput", "Chat Input"] for node in flow_data["nodes"])
40
+
41
+
42
+ def has_chat_output(flow_data: dict | None) -> bool:
43
+ """Check if the flow has a chat input component."""
44
+ if not flow_data or "nodes" not in flow_data:
45
+ return False
46
+
47
+ return any(node.get("data", {}).get("type") in ["ChatOutput", "Chat Output"] for node in flow_data["nodes"])
48
+
49
+
50
+ async def run_flow_for_openai_responses(
51
+ flow: FlowRead,
52
+ request: OpenAIResponsesRequest,
53
+ api_key_user: UserRead,
54
+ *,
55
+ stream: bool = False,
56
+ variables: dict[str, str] | None = None,
57
+ ) -> OpenAIResponsesResponse | StreamingResponse:
58
+ """Run a flow for OpenAI Responses API compatibility."""
59
+ # Check if flow has chat input
60
+ if not has_chat_input(flow.data):
61
+ msg = "Flow must have a ChatInput component to be compatible with OpenAI Responses API"
62
+ raise ValueError(msg)
63
+
64
+ if not has_chat_output(flow.data):
65
+ msg = "Flow must have a ChatOutput component to be compatible with OpenAI Responses API"
66
+ raise ValueError(msg)
67
+
68
+ # Use previous_response_id as session_id for conversation continuity
69
+ # If no previous_response_id, create a new session_id
70
+ session_id = request.previous_response_id or str(uuid.uuid4())
71
+
72
+ # Store header variables in context for global variable override
73
+ context = {}
74
+ if variables:
75
+ context["request_variables"] = variables
76
+ logger.debug(f"Added request variables to context: {variables}")
77
+
78
+ # Convert OpenAI request to SimplifiedAPIRequest
79
+ # Note: We're moving away from tweaks to a context-based approach
80
+ simplified_request = SimplifiedAPIRequest(
81
+ input_value=request.input,
82
+ input_type="chat", # Use chat input type for better compatibility
83
+ output_type="chat", # Use chat output type for better compatibility
84
+ tweaks={}, # Empty tweaks, using context instead
85
+ session_id=session_id,
86
+ )
87
+
88
+ # Context will be passed separately to simple_run_flow
89
+
90
+ logger.debug(f"SimplifiedAPIRequest created with context: {context}")
91
+
92
+ # Use session_id as response_id for OpenAI compatibility
93
+ response_id = session_id
94
+ created_timestamp = int(time.time())
95
+
96
+ if stream:
97
+ # Handle streaming response
98
+ asyncio_queue: asyncio.Queue = asyncio.Queue()
99
+ asyncio_queue_client_consumed: asyncio.Queue = asyncio.Queue()
100
+ event_manager = create_stream_tokens_event_manager(queue=asyncio_queue)
101
+
102
+ async def openai_stream_generator() -> AsyncGenerator[str, None]:
103
+ """Convert Langflow events to OpenAI Responses API streaming format."""
104
+ main_task = asyncio.create_task(
105
+ run_flow_generator(
106
+ flow=flow,
107
+ input_request=simplified_request,
108
+ api_key_user=api_key_user,
109
+ event_manager=event_manager,
110
+ client_consumed_queue=asyncio_queue_client_consumed,
111
+ context=context,
112
+ )
113
+ )
114
+
115
+ try:
116
+ # Send initial chunk to establish connection
117
+ initial_chunk = OpenAIResponsesStreamChunk(
118
+ id=response_id,
119
+ created=created_timestamp,
120
+ model=request.model,
121
+ delta={"content": ""},
122
+ )
123
+ yield f"data: {initial_chunk.model_dump_json()}\n\n"
124
+
125
+ tool_call_counter = 0
126
+ processed_tools = set() # Track processed tool calls to avoid duplicates
127
+ previous_content = "" # Track content already sent to calculate deltas
128
+
129
+ async for event_data in consume_and_yield(asyncio_queue, asyncio_queue_client_consumed):
130
+ if event_data is None:
131
+ break
132
+
133
+ content = ""
134
+
135
+ # Parse byte string events as JSON
136
+ if isinstance(event_data, bytes):
137
+ try:
138
+ import json
139
+
140
+ event_str = event_data.decode("utf-8")
141
+ parsed_event = json.loads(event_str)
142
+
143
+ if isinstance(parsed_event, dict):
144
+ event_type = parsed_event.get("event")
145
+ data = parsed_event.get("data", {})
146
+
147
+ # Handle add_message events
148
+ if event_type == "add_message":
149
+ sender_name = data.get("sender_name", "")
150
+ text = data.get("text", "")
151
+ sender = data.get("sender", "")
152
+ content_blocks = data.get("content_blocks", [])
153
+
154
+ # Look for Agent Steps in content_blocks
155
+ for block in content_blocks:
156
+ if block.get("title") == "Agent Steps":
157
+ contents = block.get("contents", [])
158
+ for step in contents:
159
+ # Look for tool_use type items
160
+ if step.get("type") == "tool_use":
161
+ tool_name = step.get("name", "")
162
+ tool_input = step.get("tool_input", {})
163
+ tool_output = step.get("output")
164
+
165
+ # Only emit tool calls with explicit tool names and
166
+ # meaningful arguments
167
+ if tool_name and tool_input is not None and tool_output is not None:
168
+ # Create unique identifier for this tool call
169
+ tool_signature = (
170
+ f"{tool_name}:{hash(str(sorted(tool_input.items())))}"
171
+ )
172
+
173
+ # Skip if we've already processed this tool call
174
+ if tool_signature in processed_tools:
175
+ continue
176
+
177
+ processed_tools.add(tool_signature)
178
+ tool_call_counter += 1
179
+ call_id = f"call_{tool_call_counter}"
180
+ tool_id = f"fc_{tool_call_counter}"
181
+ tool_call_event = {
182
+ "type": "response.output_item.added",
183
+ "item": {
184
+ "id": tool_id,
185
+ "type": "function_call", # OpenAI uses "function_call"
186
+ "status": "in_progress", # OpenAI includes status
187
+ "name": tool_name,
188
+ "arguments": "", # Start with empty, build via deltas
189
+ "call_id": call_id,
190
+ },
191
+ }
192
+ yield (
193
+ f"event: response.output_item.added\n"
194
+ f"data: {json.dumps(tool_call_event)}\n\n"
195
+ )
196
+
197
+ # Send function call arguments as delta events (like OpenAI)
198
+ arguments_str = json.dumps(tool_input)
199
+ arg_delta_event = {
200
+ "type": "response.function_call_arguments.delta",
201
+ "delta": arguments_str,
202
+ "item_id": tool_id,
203
+ "output_index": 0,
204
+ }
205
+ yield (
206
+ f"event: response.function_call_arguments.delta\n"
207
+ f"data: {json.dumps(arg_delta_event)}\n\n"
208
+ )
209
+
210
+ # Send function call arguments done event
211
+ arg_done_event = {
212
+ "type": "response.function_call_arguments.done",
213
+ "arguments": arguments_str,
214
+ "item_id": tool_id,
215
+ "output_index": 0,
216
+ }
217
+ yield (
218
+ f"event: response.function_call_arguments.done\n"
219
+ f"data: {json.dumps(arg_done_event)}\n\n"
220
+ )
221
+
222
+ # If there's output, send completion event
223
+ if tool_output is not None:
224
+ # Check if include parameter requests tool_call.results
225
+ include_results = (
226
+ request.include
227
+ and "tool_call.results" in request.include
228
+ )
229
+
230
+ if include_results:
231
+ # Format with detailed results
232
+ tool_done_event = {
233
+ "type": "response.output_item.done",
234
+ "item": {
235
+ "id": f"{tool_name}_{tool_id}",
236
+ "inputs": tool_input, # Raw inputs as-is
237
+ "status": "completed",
238
+ "type": "tool_call",
239
+ "tool_name": f"{tool_name}",
240
+ "results": tool_output, # Raw output as-is
241
+ },
242
+ "output_index": 0,
243
+ "sequence_number": tool_call_counter + 5,
244
+ }
245
+ else:
246
+ # Regular function call format
247
+ tool_done_event = {
248
+ "type": "response.output_item.done",
249
+ "item": {
250
+ "id": tool_id,
251
+ "type": "function_call", # Match OpenAI format
252
+ "status": "completed",
253
+ "arguments": arguments_str,
254
+ "call_id": call_id,
255
+ "name": tool_name,
256
+ },
257
+ }
258
+
259
+ yield (
260
+ f"event: response.output_item.done\n"
261
+ f"data: {json.dumps(tool_done_event)}\n\n"
262
+ )
263
+
264
+ # Extract text content for streaming (only AI responses)
265
+ if (
266
+ sender in ["Machine", "AI", "Agent"]
267
+ and text != request.input
268
+ and sender_name == "Agent"
269
+ ):
270
+ # Calculate delta: only send newly generated content
271
+ if text.startswith(previous_content):
272
+ content = text[len(previous_content) :]
273
+ previous_content = text
274
+ else:
275
+ # If text doesn't start with previous content, send full text
276
+ # This handles cases where the content might be reset
277
+ content = text
278
+ previous_content = text
279
+
280
+ except (json.JSONDecodeError, UnicodeDecodeError):
281
+ continue
282
+
283
+ # Only send chunks with actual content
284
+ if content:
285
+ chunk = OpenAIResponsesStreamChunk(
286
+ id=response_id,
287
+ created=created_timestamp,
288
+ model=request.model,
289
+ delta={"content": content},
290
+ )
291
+ yield f"data: {chunk.model_dump_json()}\n\n"
292
+
293
+ # Send final completion chunk
294
+ final_chunk = OpenAIResponsesStreamChunk(
295
+ id=response_id,
296
+ created=created_timestamp,
297
+ model=request.model,
298
+ delta={},
299
+ status="completed",
300
+ )
301
+ yield f"data: {final_chunk.model_dump_json()}\n\n"
302
+ yield "data: [DONE]\n\n"
303
+
304
+ except Exception as e: # noqa: BLE001
305
+ logger.error(f"Error in stream generator: {e}")
306
+ error_response = create_openai_error(
307
+ message=str(e),
308
+ type_="processing_error",
309
+ )
310
+ yield f"data: {error_response}\n\n"
311
+ finally:
312
+ if not main_task.done():
313
+ main_task.cancel()
314
+
315
+ return StreamingResponse(
316
+ openai_stream_generator(),
317
+ media_type="text/event-stream",
318
+ headers={
319
+ "Cache-Control": "no-cache",
320
+ "Connection": "keep-alive",
321
+ "Access-Control-Allow-Origin": "*",
322
+ },
323
+ )
324
+
325
+ # Handle non-streaming response
326
+ result = await simple_run_flow(
327
+ flow=flow,
328
+ input_request=simplified_request,
329
+ stream=False,
330
+ api_key_user=api_key_user,
331
+ context=context,
332
+ )
333
+
334
+ # Extract output text and tool calls from result
335
+ output_text = ""
336
+ tool_calls: list[dict[str, Any]] = []
337
+
338
+ if result.outputs:
339
+ for run_output in result.outputs:
340
+ if run_output and run_output.outputs:
341
+ for component_output in run_output.outputs:
342
+ if component_output:
343
+ # Handle messages (final chat outputs)
344
+ if hasattr(component_output, "messages") and component_output.messages:
345
+ for msg in component_output.messages:
346
+ if hasattr(msg, "message"):
347
+ output_text = msg.message
348
+ break
349
+ # Handle results
350
+ if not output_text and hasattr(component_output, "results") and component_output.results:
351
+ for value in component_output.results.values():
352
+ if hasattr(value, "get_text"):
353
+ output_text = value.get_text()
354
+ break
355
+ if isinstance(value, str):
356
+ output_text = value
357
+ break
358
+
359
+ if hasattr(component_output, "results") and component_output.results:
360
+ for blocks in component_output.results.get("message", {}).content_blocks:
361
+ tool_calls.extend(
362
+ {
363
+ "name": content.name,
364
+ "input": content.tool_input,
365
+ "output": content.output,
366
+ }
367
+ for content in blocks.contents
368
+ if isinstance(content, ToolContent)
369
+ )
370
+ if output_text:
371
+ break
372
+ if output_text:
373
+ break
374
+
375
+ # Build output array
376
+ output_items = []
377
+
378
+ # Add tool calls if includes parameter requests them
379
+ include_results = request.include and "tool_call.results" in request.include
380
+
381
+ tool_call_id_counter = 1
382
+ for tool_call in tool_calls:
383
+ if include_results:
384
+ # Format as detailed tool call with results (like file_search_call in sample)
385
+ tool_call_item = {
386
+ "id": f"{tool_call['name']}_{tool_call_id_counter}",
387
+ "queries": list(tool_call["input"].values())
388
+ if isinstance(tool_call["input"], dict)
389
+ else [str(tool_call["input"])],
390
+ "status": "completed",
391
+ "tool_name": f"{tool_call['name']}",
392
+ "type": "tool_call",
393
+ "results": tool_call["output"] if tool_call["output"] is not None else [],
394
+ }
395
+ else:
396
+ # Format as basic function call
397
+ tool_call_item = {
398
+ "id": f"fc_{tool_call_id_counter}",
399
+ "type": "function_call",
400
+ "status": "completed",
401
+ "name": tool_call["name"],
402
+ "arguments": json.dumps(tool_call["input"]) if tool_call["input"] is not None else "{}",
403
+ }
404
+
405
+ output_items.append(tool_call_item)
406
+ tool_call_id_counter += 1
407
+
408
+ # Add the message output
409
+ output_message = {
410
+ "type": "message",
411
+ "id": f"msg_{response_id}",
412
+ "status": "completed",
413
+ "role": "assistant",
414
+ "content": [{"type": "output_text", "text": output_text, "annotations": []}],
415
+ }
416
+ output_items.append(output_message)
417
+
418
+ return OpenAIResponsesResponse(
419
+ id=response_id,
420
+ created_at=created_timestamp,
421
+ model=request.model,
422
+ output=output_items,
423
+ previous_response_id=request.previous_response_id,
424
+ )
425
+
426
+
427
+ @router.post("/responses", response_model=None)
428
+ async def create_response(
429
+ request: OpenAIResponsesRequest,
430
+ background_tasks: BackgroundTasks,
431
+ api_key_user: Annotated[UserRead, Depends(api_key_security)],
432
+ telemetry_service: Annotated[TelemetryService, Depends(get_telemetry_service)],
433
+ http_request: Request,
434
+ ) -> OpenAIResponsesResponse | StreamingResponse | OpenAIErrorResponse:
435
+ """Create a response using OpenAI Responses API format.
436
+
437
+ This endpoint accepts a flow_id in the model parameter and processes
438
+ the input through the specified Langflow flow.
439
+
440
+ Args:
441
+ request: OpenAI Responses API request with model (flow_id) and input
442
+ background_tasks: FastAPI background task manager
443
+ api_key_user: Authenticated user from API key
444
+ http_request: The incoming HTTP request
445
+ telemetry_service: Telemetry service for logging
446
+
447
+ Returns:
448
+ OpenAI-compatible response or streaming response
449
+
450
+ Raises:
451
+ HTTPException: For validation errors or flow execution issues
452
+ """
453
+ start_time = time.perf_counter()
454
+
455
+ # Extract global variables from X-LANGFLOW-GLOBAL-VAR-* headers
456
+ variables = {}
457
+ header_prefix = "x-langflow-global-var-"
458
+
459
+ logger.debug(f"All headers received: {list(http_request.headers.keys())}")
460
+ logger.debug(f"Looking for headers starting with: {header_prefix}")
461
+
462
+ for header_name, header_value in http_request.headers.items():
463
+ header_lower = header_name.lower()
464
+ logger.debug(f"Checking header: '{header_lower}' (original: '{header_name}')")
465
+ if header_lower.startswith(header_prefix):
466
+ # Extract variable name from header (remove prefix) and convert to uppercase
467
+ var_name_lower = header_lower[len(header_prefix) :]
468
+ var_name = var_name_lower.upper() # Default to uppercase
469
+
470
+ variables[var_name] = header_value
471
+ logger.debug(
472
+ f"Found global variable: {var_name} = {header_value} "
473
+ f"(converted to uppercase from header: {header_name})"
474
+ )
475
+
476
+ logger.debug(f"Extracted global variables from headers: {list(variables.keys())}")
477
+ logger.debug(f"Variables dict: {variables}")
478
+
479
+ # Validate tools parameter - error out if tools are provided
480
+ if request.tools is not None:
481
+ error_response = create_openai_error(
482
+ message="Tools are not supported yet",
483
+ type_="invalid_request_error",
484
+ code="tools_not_supported",
485
+ )
486
+ return OpenAIErrorResponse(error=error_response["error"])
487
+
488
+ # Get flow using the model field (which contains flow_id)
489
+ try:
490
+ flow = await get_flow_by_id_or_endpoint_name(request.model, str(api_key_user.id))
491
+ except HTTPException:
492
+ flow = None
493
+
494
+ if flow is None:
495
+ error_response = create_openai_error(
496
+ message=f"Flow with id '{request.model}' not found",
497
+ type_="invalid_request_error",
498
+ code="flow_not_found",
499
+ )
500
+ return OpenAIErrorResponse(error=error_response["error"])
501
+
502
+ try:
503
+ # Process the request
504
+ result = await run_flow_for_openai_responses(
505
+ flow=flow,
506
+ request=request,
507
+ api_key_user=api_key_user,
508
+ stream=request.stream,
509
+ variables=variables,
510
+ )
511
+
512
+ # Log telemetry for successful completion
513
+ if not request.stream: # Only log for non-streaming responses
514
+ end_time = time.perf_counter()
515
+ background_tasks.add_task(
516
+ telemetry_service.log_package_run,
517
+ RunPayload(
518
+ run_is_webhook=False,
519
+ run_seconds=int(end_time - start_time),
520
+ run_success=True,
521
+ run_error_message="",
522
+ ),
523
+ )
524
+
525
+ except Exception as exc: # noqa: BLE001
526
+ logger.error(f"Error processing OpenAI Responses request: {exc}")
527
+
528
+ # Log telemetry for failed completion
529
+ background_tasks.add_task(
530
+ telemetry_service.log_package_run,
531
+ RunPayload(
532
+ run_is_webhook=False,
533
+ run_seconds=int(time.perf_counter() - start_time),
534
+ run_success=False,
535
+ run_error_message=str(exc),
536
+ ),
537
+ )
538
+
539
+ # Return OpenAI-compatible error
540
+ error_response = create_openai_error(
541
+ message=str(exc),
542
+ type_="processing_error",
543
+ )
544
+ return OpenAIErrorResponse(error=error_response["error"])
545
+ return result