appkit-assistant 0.17.3__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. appkit_assistant/backend/{models.py → database/models.py} +32 -132
  2. appkit_assistant/backend/{repositories.py → database/repositories.py} +93 -1
  3. appkit_assistant/backend/model_manager.py +5 -5
  4. appkit_assistant/backend/models/__init__.py +28 -0
  5. appkit_assistant/backend/models/anthropic.py +31 -0
  6. appkit_assistant/backend/models/google.py +27 -0
  7. appkit_assistant/backend/models/openai.py +50 -0
  8. appkit_assistant/backend/models/perplexity.py +56 -0
  9. appkit_assistant/backend/processors/__init__.py +29 -0
  10. appkit_assistant/backend/processors/claude_responses_processor.py +205 -387
  11. appkit_assistant/backend/processors/gemini_responses_processor.py +290 -352
  12. appkit_assistant/backend/processors/lorem_ipsum_processor.py +6 -4
  13. appkit_assistant/backend/processors/mcp_mixin.py +297 -0
  14. appkit_assistant/backend/processors/openai_base.py +11 -125
  15. appkit_assistant/backend/processors/openai_chat_completion_processor.py +5 -3
  16. appkit_assistant/backend/processors/openai_responses_processor.py +480 -402
  17. appkit_assistant/backend/processors/perplexity_processor.py +156 -79
  18. appkit_assistant/backend/{processor.py → processors/processor_base.py} +7 -2
  19. appkit_assistant/backend/processors/streaming_base.py +188 -0
  20. appkit_assistant/backend/schemas.py +138 -0
  21. appkit_assistant/backend/services/auth_error_detector.py +99 -0
  22. appkit_assistant/backend/services/chunk_factory.py +273 -0
  23. appkit_assistant/backend/services/citation_handler.py +292 -0
  24. appkit_assistant/backend/services/file_cleanup_service.py +316 -0
  25. appkit_assistant/backend/services/file_upload_service.py +903 -0
  26. appkit_assistant/backend/services/file_validation.py +138 -0
  27. appkit_assistant/backend/{mcp_auth_service.py → services/mcp_auth_service.py} +4 -2
  28. appkit_assistant/backend/services/mcp_token_service.py +61 -0
  29. appkit_assistant/backend/services/message_converter.py +289 -0
  30. appkit_assistant/backend/services/openai_client_service.py +120 -0
  31. appkit_assistant/backend/{response_accumulator.py → services/response_accumulator.py} +163 -1
  32. appkit_assistant/backend/services/system_prompt_builder.py +89 -0
  33. appkit_assistant/backend/services/thread_service.py +5 -3
  34. appkit_assistant/backend/system_prompt_cache.py +3 -3
  35. appkit_assistant/components/__init__.py +8 -4
  36. appkit_assistant/components/composer.py +59 -24
  37. appkit_assistant/components/file_manager.py +623 -0
  38. appkit_assistant/components/mcp_server_dialogs.py +12 -20
  39. appkit_assistant/components/mcp_server_table.py +12 -2
  40. appkit_assistant/components/message.py +119 -2
  41. appkit_assistant/components/thread.py +1 -1
  42. appkit_assistant/components/threadlist.py +4 -2
  43. appkit_assistant/components/tools_modal.py +37 -20
  44. appkit_assistant/configuration.py +12 -0
  45. appkit_assistant/state/file_manager_state.py +697 -0
  46. appkit_assistant/state/mcp_oauth_state.py +3 -3
  47. appkit_assistant/state/mcp_server_state.py +47 -2
  48. appkit_assistant/state/system_prompt_state.py +1 -1
  49. appkit_assistant/state/thread_list_state.py +99 -5
  50. appkit_assistant/state/thread_state.py +88 -9
  51. {appkit_assistant-0.17.3.dist-info → appkit_assistant-1.0.1.dist-info}/METADATA +8 -6
  52. appkit_assistant-1.0.1.dist-info/RECORD +58 -0
  53. appkit_assistant/backend/processors/claude_base.py +0 -178
  54. appkit_assistant/backend/processors/gemini_base.py +0 -84
  55. appkit_assistant-0.17.3.dist-info/RECORD +0 -39
  56. /appkit_assistant/backend/{file_manager.py → services/file_manager.py} +0 -0
  57. {appkit_assistant-0.17.3.dist-info → appkit_assistant-1.0.1.dist-info}/WHEEL +0 -0
@@ -1,32 +1,39 @@
1
1
  import asyncio
2
- import json
3
2
  import logging
4
3
  from collections.abc import AsyncGenerator
5
4
  from typing import Any, Final
6
5
 
7
- import reflex as rx
6
+ from openai import AsyncOpenAI
7
+ from sqlalchemy import select
8
8
 
9
- from appkit_assistant.backend.mcp_auth_service import MCPAuthService
10
- from appkit_assistant.backend.models import (
9
+ from appkit_assistant.backend.database.models import (
10
+ AssistantThread,
11
+ MCPServer,
12
+ )
13
+ from appkit_assistant.backend.processors.mcp_mixin import MCPCapabilities
14
+ from appkit_assistant.backend.processors.processor_base import mcp_oauth_redirect_uri
15
+ from appkit_assistant.backend.processors.streaming_base import StreamingProcessorBase
16
+ from appkit_assistant.backend.schemas import (
11
17
  AIModel,
12
- AssistantMCPUserToken,
13
18
  Chunk,
14
19
  ChunkType,
15
20
  MCPAuthType,
16
- MCPServer,
17
21
  Message,
18
22
  MessageType,
19
23
  )
20
- from appkit_assistant.backend.processor import mcp_oauth_redirect_uri
21
- from appkit_assistant.backend.processors.openai_base import BaseOpenAIProcessor
22
- from appkit_assistant.backend.system_prompt_cache import get_system_prompt
23
- from appkit_commons.database.session import get_session_manager
24
+ from appkit_assistant.backend.services.file_upload_service import (
25
+ FileUploadError,
26
+ FileUploadService,
27
+ )
28
+ from appkit_assistant.backend.services.system_prompt_builder import SystemPromptBuilder
29
+ from appkit_assistant.configuration import FileUploadConfig
30
+ from appkit_commons.database.session import get_asyncdb_session
24
31
 
25
32
  logger = logging.getLogger(__name__)
26
33
  default_oauth_redirect_uri: Final[str] = mcp_oauth_redirect_uri()
27
34
 
28
35
 
29
- class OpenAIResponsesProcessor(BaseOpenAIProcessor):
36
+ class OpenAIResponsesProcessor(StreamingProcessorBase, MCPCapabilities):
30
37
  """Simplified processor using content accumulator pattern."""
31
38
 
32
39
  def __init__(
@@ -36,20 +43,58 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
36
43
  base_url: str | None = None,
37
44
  is_azure: bool = False,
38
45
  oauth_redirect_uri: str = default_oauth_redirect_uri,
46
+ file_upload_config: FileUploadConfig | None = None,
39
47
  ) -> None:
40
- super().__init__(models, api_key, base_url, is_azure)
41
- self._current_reasoning_session: str | None = None
42
- self._current_user_id: int | None = None
43
- self._mcp_auth_service = MCPAuthService(redirect_uri=oauth_redirect_uri)
44
- self._pending_auth_servers: list[MCPServer] = []
48
+ StreamingProcessorBase.__init__(self, models, "openai_responses")
49
+ MCPCapabilities.__init__(self, oauth_redirect_uri, "openai_responses")
50
+
51
+ self.api_key = api_key
52
+ self.base_url = base_url
53
+ self.is_azure = is_azure
54
+ self.client = self._create_client()
55
+
56
+ # Services
57
+ self._system_prompt_builder = SystemPromptBuilder()
58
+ self._file_upload_config = file_upload_config or FileUploadConfig()
59
+ self._file_upload_service: FileUploadService | None = None
60
+
61
+ # Tool name tracking: tool_id -> tool_name for MCP streaming events
62
+ self._tool_name_map: dict[str, str] = {}
63
+
64
+ # Store available MCP servers for lookup during error handling
65
+ self._available_mcp_servers: list[MCPServer] = []
66
+
67
+ # Initialize file upload service if client is available
68
+ if self.client:
69
+ self._file_upload_service = FileUploadService(
70
+ client=self.client,
71
+ config=self._file_upload_config,
72
+ )
45
73
 
46
- logger.debug("Using redirect URI for MCP OAuth: %s", oauth_redirect_uri)
74
+ def get_supported_models(self) -> dict[str, AIModel]:
75
+ """Return supported models if API key is available."""
76
+ return self.models if self.api_key else {}
47
77
 
48
- async def process(
78
+ def _create_client(self) -> AsyncOpenAI | None:
79
+ """Create OpenAI client based on configuration."""
80
+ if not self.api_key:
81
+ logger.warning("No API key found. Processor will not work.")
82
+ return None
83
+ if self.base_url and self.is_azure:
84
+ return AsyncOpenAI(
85
+ api_key=self.api_key,
86
+ base_url=f"{self.base_url}/openai/v1",
87
+ default_query={"api-version": "preview"},
88
+ )
89
+ if self.base_url:
90
+ return AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
91
+ return AsyncOpenAI(api_key=self.api_key)
92
+
93
+ async def process( # noqa: PLR0912
49
94
  self,
50
95
  messages: list[Message],
51
96
  model_id: str,
52
- files: list[str] | None = None, # noqa: ARG002
97
+ files: list[str] | None = None,
53
98
  mcp_servers: list[MCPServer] | None = None,
54
99
  payload: dict[str, Any] | None = None,
55
100
  user_id: int | None = None,
@@ -64,12 +109,24 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
64
109
  raise ValueError(msg)
65
110
 
66
111
  model = self.models[model_id]
67
- self._current_user_id = user_id
68
- self._pending_auth_servers = []
112
+ self.current_user_id = user_id
113
+ self.clear_pending_auth_servers()
114
+
115
+ # Process file uploads and yield progress in real-time
116
+ vector_store_id: str | None = None
117
+ async for chunk in self._process_file_uploads_streaming(
118
+ files=files,
119
+ payload=payload,
120
+ user_id=user_id,
121
+ ):
122
+ # Extract vector_store_id from final chunk metadata
123
+ if chunk.chunk_metadata and "vector_store_id" in chunk.chunk_metadata:
124
+ vector_store_id = chunk.chunk_metadata["vector_store_id"]
125
+ yield chunk
69
126
 
70
127
  try:
71
128
  session = await self._create_responses_request(
72
- messages, model, mcp_servers, payload, user_id
129
+ messages, model, mcp_servers, payload, user_id, vector_store_id
73
130
  )
74
131
 
75
132
  try:
@@ -84,13 +141,8 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
84
141
  else: # Non-streaming
85
142
  content = self._extract_responses_content(session)
86
143
  if content:
87
- yield Chunk(
88
- type=ChunkType.TEXT,
89
- text=content,
90
- chunk_metadata={
91
- "source": "responses_api",
92
- "streaming": "false",
93
- },
144
+ yield self.chunk_factory.text(
145
+ content, {"source": "responses_api", "streaming": "false"}
94
146
  )
95
147
  except Exception as e:
96
148
  error_msg = str(e)
@@ -98,111 +150,250 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
98
150
  # Only yield error chunk if NOT an auth error
99
151
  # and no auth servers are pending (they'll show auth card instead)
100
152
  is_auth_related = (
101
- self._is_auth_error(error_msg) or self._pending_auth_servers
153
+ self.auth_detector.is_auth_error(error_msg)
154
+ or self.pending_auth_servers
102
155
  )
103
156
  if not is_auth_related:
104
- yield Chunk(
105
- type=ChunkType.ERROR,
106
- text=f"Ein Fehler ist aufgetreten: {error_msg}",
107
- chunk_metadata={
108
- "source": "responses_api",
109
- "error_type": type(e).__name__,
110
- },
157
+ yield self.chunk_factory.error(
158
+ f"Ein Fehler ist aufgetreten: {error_msg}",
159
+ error_type=type(e).__name__,
111
160
  )
112
161
 
113
162
  # After processing (or on error), yield any pending auth requirements
114
- logger.debug(
115
- "Processing pending auth servers: %d", len(self._pending_auth_servers)
116
- )
117
- for server in self._pending_auth_servers:
118
- logger.debug("Yielding auth chunk for server: %s", server.name)
119
- yield await self._create_auth_required_chunk(server)
163
+ async for auth_chunk in self.yield_pending_auth_chunks():
164
+ yield auth_chunk
120
165
 
121
166
  except Exception as e:
122
167
  logger.error("Critical error in OpenAI processor: %s", e)
123
168
  raise e
124
169
 
170
+ def _get_event_handlers(self) -> dict[str, Any]:
171
+ """Get the event handler mapping for OpenAI events."""
172
+ # OpenAI uses a different event dispatch pattern with multiple handlers
173
+ # This returns an empty dict as we use _handle_event directly
174
+ return {}
175
+
176
+ def _processing_chunk(
177
+ self, status: str, vector_store_id: str | None = None, **extra: Any
178
+ ) -> Chunk:
179
+ """Create a processing chunk with standard metadata."""
180
+ metadata = {"status": status, "vector_store_id": vector_store_id, **extra}
181
+ return Chunk(type=ChunkType.PROCESSING, text="", chunk_metadata=metadata)
182
+
183
+ async def _process_file_uploads_streaming(
184
+ self,
185
+ files: list[str] | None,
186
+ payload: dict[str, Any] | None,
187
+ user_id: int | None,
188
+ ) -> AsyncGenerator[Chunk, None]:
189
+ """Process file uploads and yield progress chunks in real-time."""
190
+ thread_uuid = payload.get("thread_uuid") if payload else None
191
+
192
+ if not thread_uuid:
193
+ if files:
194
+ logger.warning(
195
+ "Files provided but no thread_uuid in payload, skipping upload"
196
+ )
197
+ yield self._processing_chunk("skipped")
198
+ return
199
+
200
+ if not user_id:
201
+ if files:
202
+ logger.warning("Files provided but no user_id, skipping upload")
203
+ yield self._processing_chunk("skipped")
204
+ return
205
+
206
+ # Look up thread to get database ID and existing vector store
207
+ async with get_asyncdb_session() as session:
208
+ result = await session.execute(
209
+ select(AssistantThread).where(AssistantThread.thread_id == thread_uuid)
210
+ )
211
+ thread = result.scalar_one_or_none()
212
+
213
+ if not thread:
214
+ if files:
215
+ logger.warning(
216
+ "Thread %s not found in database, cannot upload files",
217
+ thread_uuid,
218
+ )
219
+ yield self._processing_chunk("skipped")
220
+ return
221
+
222
+ thread_db_id = thread.id
223
+ existing_vector_store_id = thread.vector_store_id
224
+
225
+ # If no files but thread has existing vector store, validate and use it
226
+ if not files:
227
+ if existing_vector_store_id and self._file_upload_service:
228
+ logger.debug(
229
+ "Validating existing vector store %s for thread %s",
230
+ existing_vector_store_id,
231
+ thread_uuid,
232
+ )
233
+ validated_id, _ = await self._file_upload_service.get_vector_store(
234
+ thread_id=thread_db_id,
235
+ thread_uuid=thread_uuid,
236
+ )
237
+ yield self._processing_chunk("completed", validated_id)
238
+ return
239
+ yield self._processing_chunk("completed", existing_vector_store_id)
240
+ return
241
+
242
+ # Process file uploads with streaming progress
243
+ if not self._file_upload_service:
244
+ logger.warning("File upload service not available")
245
+ yield self._processing_chunk("completed", existing_vector_store_id)
246
+ return
247
+
248
+ try:
249
+ async for chunk in self._file_upload_service.process_files(
250
+ file_paths=files,
251
+ thread_db_id=thread_db_id,
252
+ thread_uuid=thread_uuid,
253
+ user_id=user_id,
254
+ ):
255
+ yield chunk
256
+
257
+ logger.info("Processed %d files for thread %s", len(files), thread_uuid)
258
+ except FileUploadError as e:
259
+ logger.error("File upload failed: %s", e)
260
+ yield self._processing_chunk(
261
+ "failed", existing_vector_store_id, error=str(e)
262
+ )
263
+
125
264
  def _handle_event(self, event: Any) -> Chunk | None:
126
265
  """Simplified event handler returning actual event content in chunks."""
127
266
  if not hasattr(event, "type"):
128
267
  return None
129
268
 
130
269
  event_type = event.type
131
- handlers = [
270
+ for handler in (
132
271
  self._handle_lifecycle_events,
133
- lambda et: self._handle_text_events(et, event),
134
- lambda et: self._handle_item_events(et, event),
135
- lambda et: self._handle_mcp_events(et, event),
136
- lambda et: self._handle_content_events(et, event),
137
- lambda et: self._handle_completion_events(et, event),
138
- lambda et: self._handle_image_events(et, event),
139
- ]
140
-
141
- for handler in handlers:
142
- result = handler(event_type)
143
- if result:
144
- # content_preview = result.text[:50] if result.text else ""
145
- # logger.debug(
146
- # "Event %s → Chunk: type=%s, content=%s",
147
- # event_type,
148
- # result.type,
149
- # content_preview,
150
- # )
272
+ self._handle_text_events,
273
+ self._handle_item_events,
274
+ self._handle_search_events, # Handle file/web search specifically
275
+ self._handle_mcp_events,
276
+ self._handle_content_events,
277
+ self._handle_completion_events,
278
+ self._handle_image_events,
279
+ ):
280
+ if result := handler(event_type, event):
151
281
  return result
152
282
 
153
283
  logger.debug("Unhandled event type: %s", event_type)
154
284
  return None
155
285
 
156
- def _handle_lifecycle_events(self, event_type: str) -> Chunk | None:
157
- """Handle lifecycle events."""
158
- lifecycle_events = {
159
- "response.created": ("created", {"stage": "created"}),
160
- "response.in_progress": ("in_progress", {"stage": "in_progress"}),
161
- "response.done": ("done", {"stage": "done"}),
162
- }
286
+ def _handle_search_events(self, event_type: str, event: Any) -> Chunk | None:
287
+ """Handle file_search and web_search specific events."""
288
+ if "file_search_call" in event_type:
289
+ return self._handle_file_search_event(event_type, event)
290
+
291
+ if "web_search_call" in event_type:
292
+ return self._handle_web_search_event(event_type, event)
293
+
294
+ return None
295
+
296
+ def _handle_file_search_event(self, event_type: str, event: Any) -> Chunk | None:
297
+ call_id = getattr(event, "call_id", "unknown_id")
298
+
299
+ if event_type == "response.file_search_call.searching":
300
+ return self.chunk_factory.tool_call(
301
+ "Durchsuche Dateien...",
302
+ tool_name="file_search",
303
+ tool_id=call_id,
304
+ status="searching",
305
+ reasoning_session=self.current_reasoning_session,
306
+ )
307
+
308
+ if event_type == "response.file_search_call.completed":
309
+ return self.chunk_factory.tool_result(
310
+ "Dateisuche abgeschlossen.",
311
+ tool_id=call_id,
312
+ status="completed",
313
+ reasoning_session=self.current_reasoning_session,
314
+ )
315
+ return None
316
+
317
+ def _handle_web_search_event(self, event_type: str, event: Any) -> Chunk | None:
318
+ call_id = getattr(event, "call_id", "unknown_id")
319
+
320
+ if event_type == "response.web_search_call.searching":
321
+ query_set = getattr(event, "query_set", None)
322
+ query_text = "Durchsuche das Web..."
323
+ if query_set and hasattr(query_set, "queries") and query_set.queries:
324
+ query_text = f"Suche nach: {query_set.queries[0]}"
325
+
326
+ return self.chunk_factory.tool_call(
327
+ query_text,
328
+ tool_name="web_search",
329
+ tool_id=call_id,
330
+ status="searching",
331
+ reasoning_session=self.current_reasoning_session,
332
+ )
163
333
 
164
- if event_type in lifecycle_events:
165
- content, metadata = lifecycle_events[event_type]
166
- chunk_type = (
167
- ChunkType.LIFECYCLE
168
- if event_type != "response.done"
169
- else ChunkType.COMPLETION
334
+ if event_type == "response.web_search_call.completed":
335
+ return self.chunk_factory.tool_result(
336
+ "Websuche abgeschlossen.",
337
+ tool_id=call_id,
338
+ status="completed",
339
+ reasoning_session=self.current_reasoning_session,
170
340
  )
171
- return self._create_chunk(chunk_type, content, metadata)
341
+ return None
342
+
343
+ def _handle_lifecycle_events(self, event_type: str, event: Any) -> Chunk | None: # noqa: ARG002
344
+ """Handle lifecycle events."""
345
+ if event_type == "response.created":
346
+ return self.chunk_factory.lifecycle("created", {"stage": "created"})
347
+ if event_type == "response.in_progress":
348
+ return self.chunk_factory.lifecycle("in_progress", {"stage": "in_progress"})
349
+ if event_type == "response.done":
350
+ return self.chunk_factory.completion(status="done")
172
351
  return None
173
352
 
174
353
  def _handle_text_events(self, event_type: str, event: Any) -> Chunk | None:
175
354
  """Handle text-related events."""
176
355
  if event_type == "response.output_text.delta":
177
- return self._create_chunk(
178
- ChunkType.TEXT, event.delta, {"delta": event.delta}
179
- )
180
-
356
+ return self.chunk_factory.text(event.delta, {"delta": event.delta})
181
357
  if event_type == "response.output_text.annotation.added":
182
- return self._create_chunk(
183
- ChunkType.ANNOTATION,
184
- event.annotation,
185
- {"annotation": event.annotation},
358
+ annotation = event.annotation
359
+ annotation_text = self._extract_annotation_text(annotation)
360
+ logger.debug(
361
+ "Annotation added: type=%s, text=%s",
362
+ getattr(annotation, "type", type(annotation).__name__),
363
+ annotation_text[:50] if annotation_text else "None",
364
+ )
365
+ return self.chunk_factory.annotation(
366
+ annotation_text, {"annotation": str(annotation)}
186
367
  )
187
-
188
368
  return None
189
369
 
370
+ def _extract_annotation_text(self, annotation: Any) -> str:
371
+ """Extract display text from an annotation (dict or SDK object)."""
372
+
373
+ def get_val(key: str) -> Any:
374
+ if isinstance(annotation, dict):
375
+ return annotation.get(key)
376
+ return getattr(annotation, key, None)
377
+
378
+ # First try to get the display text (e.g. [1] or similar citation mark)
379
+ if text := get_val("text"):
380
+ return text
381
+
382
+ ann_type = get_val("type")
383
+ if ann_type == "url_citation":
384
+ return get_val("url") or str(annotation)
385
+ if ann_type == "file_citation" or not ann_type:
386
+ return get_val("filename") or str(annotation)
387
+ return str(annotation)
388
+
190
389
  def _handle_item_events(self, event_type: str, event: Any) -> Chunk | None:
191
390
  """Handle item added/done events for MCP calls and reasoning."""
192
- if (
193
- event_type == "response.output_item.added"
194
- and hasattr(event, "item")
195
- and hasattr(event.item, "type")
196
- ):
391
+ if not hasattr(event, "item") or not hasattr(event.item, "type"):
392
+ return None
393
+ if event_type == "response.output_item.added":
197
394
  return self._handle_item_added(event.item)
198
-
199
- if (
200
- event_type == "response.output_item.done"
201
- and hasattr(event, "item")
202
- and hasattr(event.item, "type")
203
- ):
395
+ if event_type == "response.output_item.done":
204
396
  return self._handle_item_done(event.item)
205
-
206
397
  return None
207
398
 
208
399
  def _handle_item_added(self, item: Any) -> Chunk | None:
@@ -211,32 +402,48 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
211
402
  tool_name = getattr(item, "name", "unknown_tool")
212
403
  tool_id = getattr(item, "id", "unknown_id")
213
404
  server_label = getattr(item, "server_label", "unknown_server")
405
+ # Store tool name mapping for streaming events
406
+ self._tool_name_map[tool_id] = f"{server_label}.{tool_name}"
214
407
  logger.debug(
215
408
  "MCP call started: %s.%s (id=%s)",
216
409
  server_label,
217
410
  tool_name,
218
411
  tool_id,
219
412
  )
220
- return self._create_chunk(
221
- ChunkType.TOOL_CALL,
413
+ return self.chunk_factory.tool_call(
222
414
  f"Benutze Werkzeug: {server_label}.{tool_name}",
223
- {
224
- "tool_name": tool_name,
225
- "tool_id": tool_id,
226
- "server_label": server_label,
227
- "status": "starting",
228
- "reasoning_session": self._current_reasoning_session,
229
- },
415
+ tool_name=tool_name,
416
+ tool_id=tool_id,
417
+ server_label=server_label,
418
+ status="starting",
419
+ reasoning_session=self.current_reasoning_session,
420
+ )
421
+
422
+ if item.type == "function_call":
423
+ tool_name = getattr(item, "name", "function")
424
+ tool_id = getattr(item, "call_id", "unknown_id")
425
+ return self.chunk_factory.tool_call(
426
+ f"Benutze Funktion: {tool_name}",
427
+ tool_name=tool_name,
428
+ tool_id=tool_id,
429
+ status="starting",
430
+ reasoning_session=self.current_reasoning_session,
431
+ )
432
+
433
+ if item.type in ("file_search_call", "web_search_call"):
434
+ tool_name = (
435
+ "file_search" if item.type == "file_search_call" else "web_search"
230
436
  )
437
+ # Actual searching happens in sub-events, just log start here
438
+ logger.debug("%s started", tool_name)
439
+ return None
231
440
 
232
441
  if item.type == "reasoning":
233
442
  reasoning_id = getattr(item, "id", "unknown_id")
234
443
  # Track the current reasoning session
235
- self._current_reasoning_session = reasoning_id
236
- return self._create_chunk(
237
- ChunkType.THINKING,
238
- "Denke nach...",
239
- {"reasoning_id": reasoning_id, "status": "starting"},
444
+ self.current_reasoning_session = reasoning_id
445
+ return self.chunk_factory.thinking(
446
+ "Denke nach...", reasoning_id=reasoning_id, status="starting"
240
447
  )
241
448
  return None
242
449
 
@@ -245,11 +452,25 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
245
452
  if item.type == "mcp_call":
246
453
  return self._handle_mcp_call_done(item)
247
454
 
455
+ if item.type == "function_call":
456
+ tool_id = getattr(item, "call_id", "unknown_id")
457
+ output = getattr(item, "output", "")
458
+ return self.chunk_factory.tool_result(
459
+ str(output),
460
+ tool_id=tool_id,
461
+ status="completed",
462
+ reasoning_session=self.current_reasoning_session,
463
+ )
464
+
465
+ # file_search_call / web_search_call done events are handled in _handle_search_events
466
+ if item.type in ("file_search_call", "web_search_call"):
467
+ return None
468
+
248
469
  if item.type == "reasoning":
249
470
  reasoning_id = getattr(item, "id", "unknown_id")
250
471
  summary = getattr(item, "summary", [])
251
472
  summary_text = str(summary) if summary else "beendet."
252
- return self._create_chunk(
473
+ return self.chunk_factory.create(
253
474
  ChunkType.THINKING_RESULT,
254
475
  summary_text,
255
476
  {"reasoning_id": reasoning_id, "status": "completed"},
@@ -271,136 +492,87 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
271
492
  error_text = self._extract_error_text(error)
272
493
 
273
494
  # Check for authentication errors (401/403)
274
- if self._is_auth_error(error):
495
+ if self.auth_detector.is_auth_error(error):
275
496
  # Find the server config and queue for auth flow
276
- return self._create_chunk(
277
- ChunkType.TOOL_RESULT,
278
- f"Authentifizierung erforderlich für {server_label}",
279
- {
280
- "tool_id": tool_id,
281
- "tool_name": tool_name,
282
- "server_label": server_label,
283
- "status": "auth_required",
284
- "error": True,
285
- "auth_required": True,
286
- "reasoning_session": self._current_reasoning_session,
287
- },
497
+ return self.chunk_factory.tool_result(
498
+ f"Authentifizierung erforderlich für {server_label}.{tool_name}",
499
+ tool_id=tool_id,
500
+ status="auth_required",
501
+ is_error=True,
502
+ reasoning_session=self.current_reasoning_session,
288
503
  )
289
504
 
290
- return self._create_chunk(
291
- ChunkType.TOOL_RESULT,
292
- f"Werkzeugfehler: {error_text}",
293
- {
294
- "tool_id": tool_id,
295
- "tool_name": tool_name,
296
- "status": "error",
297
- "error": True,
298
- "error_details": str(error),
299
- "reasoning_session": self._current_reasoning_session,
300
- },
505
+ return self.chunk_factory.tool_result(
506
+ f"Werkzeugfehler bei {tool_name}: {error_text}",
507
+ tool_id=tool_id,
508
+ status="error",
509
+ is_error=True,
510
+ reasoning_session=self.current_reasoning_session,
301
511
  )
302
512
 
303
513
  output_text = str(output) if output else "Werkzeug erfolgreich aufgerufen"
304
- return self._create_chunk(
305
- ChunkType.TOOL_RESULT,
514
+ return self.chunk_factory.tool_result(
306
515
  output_text,
307
- {
308
- "tool_id": tool_id,
309
- "tool_name": tool_name,
310
- "status": "completed",
311
- "reasoning_session": self._current_reasoning_session,
312
- },
516
+ tool_id=tool_id,
517
+ status="completed",
518
+ reasoning_session=self.current_reasoning_session,
313
519
  )
314
520
 
315
- def _is_auth_error(self, error: Any) -> bool:
316
- """Check if an error indicates authentication failure (401/403)."""
317
- error_str = str(error).lower()
318
- auth_indicators = [
319
- "401",
320
- "403",
321
- "unauthorized",
322
- "forbidden",
323
- "authentication required",
324
- "access denied",
325
- "invalid token",
326
- "token expired",
327
- ]
328
- return any(indicator in error_str for indicator in auth_indicators)
329
-
330
521
  def _extract_error_text(self, error: Any) -> str:
331
522
  """Extract readable error text from error object."""
332
- if isinstance(error, dict) and "content" in error:
333
- content = error["content"]
523
+ if isinstance(error, dict):
524
+ content = error.get("content", [])
334
525
  if isinstance(content, list) and content:
335
526
  return content[0].get("text", str(error))
336
527
  return "Unknown error"
337
528
 
338
- def _handle_mcp_events(self, event_type: str, event: Any) -> Chunk | None: # noqa: PLR0911, PLR0912
529
+ def _handle_mcp_events( # noqa: PLR0911, PLR0912, PLR0915
530
+ self, event_type: str, event: Any
531
+ ) -> Chunk | None:
339
532
  """Handle MCP-specific events."""
340
533
  if event_type == "response.mcp_call_arguments.delta":
341
534
  tool_id = getattr(event, "item_id", "unknown_id")
342
535
  arguments_delta = getattr(event, "delta", "")
343
- return self._create_chunk(
344
- ChunkType.TOOL_CALL,
536
+ tool_name = self._tool_name_map.get(tool_id, "mcp_tool")
537
+ return self.chunk_factory.tool_call(
345
538
  arguments_delta,
346
- {
347
- "tool_id": tool_id,
348
- "status": "arguments_streaming",
349
- "delta": arguments_delta,
350
- "reasoning_session": self._current_reasoning_session,
351
- },
539
+ tool_name=tool_name,
540
+ tool_id=tool_id,
541
+ status="arguments_streaming",
542
+ reasoning_session=self.current_reasoning_session,
352
543
  )
353
544
 
354
545
  if event_type == "response.mcp_call_arguments.done":
355
546
  tool_id = getattr(event, "item_id", "unknown_id")
356
547
  arguments = getattr(event, "arguments", "")
357
- return self._create_chunk(
358
- ChunkType.TOOL_CALL,
548
+ tool_name = self._tool_name_map.get(tool_id, "mcp_tool")
549
+ return self.chunk_factory.tool_call(
359
550
  f"Parameter: {arguments}",
360
- {
361
- "tool_id": tool_id,
362
- "status": "arguments_complete",
363
- "arguments": arguments,
364
- "reasoning_session": self._current_reasoning_session,
365
- },
551
+ tool_name=tool_name,
552
+ tool_id=tool_id,
553
+ status="arguments_complete",
554
+ reasoning_session=self.current_reasoning_session,
366
555
  )
367
556
 
368
557
  if event_type == "response.mcp_call.failed":
369
558
  tool_id = getattr(event, "item_id", "unknown_id")
370
- return self._create_chunk(
371
- ChunkType.TOOL_RESULT,
372
- f"Werkzeugnutzung abgebrochen: {tool_id}",
373
- {
374
- "tool_id": tool_id,
375
- "status": "failed",
376
- "error": True,
377
- "reasoning_session": self._current_reasoning_session,
378
- },
559
+ tool_name = self._tool_name_map.get(tool_id, tool_id)
560
+ return self.chunk_factory.tool_result(
561
+ f"Werkzeugnutzung abgebrochen: {tool_name}",
562
+ tool_id=tool_id,
563
+ status="failed",
564
+ is_error=True,
565
+ reasoning_session=self.current_reasoning_session,
379
566
  )
380
567
 
381
- if event_type == "response.mcp_call.in_progress":
382
- tool_id = getattr(event, "item_id", "unknown_id")
383
- # This event doesn't have tool details, just acknowledge it
384
- logger.debug("MCP call in progress: %s", tool_id)
385
- return None
386
-
387
- if event_type == "response.mcp_call.completed":
388
- # MCP call completed successfully - handled via response.output_item.done
389
- # but we can log for debugging
390
- tool_id = getattr(event, "item_id", "unknown_id")
391
- logger.debug("MCP call completed: %s", tool_id)
392
- return None
393
-
394
- if event_type == "response.mcp_list_tools.in_progress":
395
- # This is a setup event, not a tool call - just log and return None
396
- tool_id = getattr(event, "item_id", "unknown_id")
397
- logger.debug("MCP list_tools in progress: %s", tool_id)
398
- return None
399
-
400
- if event_type == "response.mcp_list_tools.completed":
401
- # This is a setup event, not a tool call - just log and return None
568
+ if event_type in {
569
+ "response.mcp_call.in_progress",
570
+ "response.mcp_call.completed",
571
+ "response.mcp_list_tools.in_progress",
572
+ "response.mcp_list_tools.completed",
573
+ }:
402
574
  tool_id = getattr(event, "item_id", "unknown_id")
403
- logger.debug("MCP list_tools completed: %s", tool_id)
575
+ logger.debug("MCP event %s: %s", event_type, tool_id)
404
576
  return None
405
577
 
406
578
  if event_type == "response.mcp_list_tools.failed":
@@ -422,127 +594,94 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
422
594
  error_str = str(error)
423
595
 
424
596
  # Check for authentication errors (401/403)
425
- # OR if we have pending auth servers (strong signal we missed a token)
426
- is_auth_error = self._is_auth_error(error_str)
427
- pending_server = None
597
+ is_auth_error = self.auth_detector.is_auth_error(error_str)
598
+ auth_server = None
428
599
 
429
- # 1. Try to find matching server by name in error message
430
- for server in self._pending_auth_servers:
600
+ # 1. Find matching server by name in error message from pending servers
601
+ for server in self.pending_auth_servers:
431
602
  if server.name.lower() in error_str.lower():
432
- pending_server = server
603
+ auth_server = server
433
604
  break
434
605
 
435
- # 2. If no match but we have pending servers and it looks like an auth error
436
- # OR if we have pending servers and likely one of them failed (len=1)
437
- # We assume the failure belongs to the pending server if we can't be sure
606
+ # 2. If not found in pending, search available servers by name
607
+ if not auth_server and is_auth_error:
608
+ for server in self._available_mcp_servers:
609
+ if server.name.lower() in error_str.lower():
610
+ auth_server = server
611
+ logger.debug(
612
+ "Found server %s in available servers for auth error",
613
+ server.name,
614
+ )
615
+ break
616
+
617
+ # 3. Fallback: if we have pending servers and it looks like an auth error,
618
+ # assume the failure belongs to the first pending server
438
619
  if (
439
- not pending_server
440
- and self._pending_auth_servers
441
- and (is_auth_error or len(self._pending_auth_servers) == 1)
620
+ not auth_server
621
+ and self.pending_auth_servers
622
+ and (is_auth_error or len(self.pending_auth_servers) == 1)
442
623
  ):
443
- pending_server = self._pending_auth_servers[0]
624
+ auth_server = self.pending_auth_servers[0]
444
625
  logger.debug(
445
626
  "Assuming pending server %s for list_tools failure '%s'",
446
- pending_server.name,
627
+ auth_server.name,
447
628
  error_str,
448
629
  )
449
630
 
450
- if pending_server:
631
+ if auth_server:
451
632
  logger.debug(
452
633
  "Queuing Auth Card for server: %s (Error: %s)",
453
- pending_server.name,
634
+ auth_server.name,
454
635
  error_str,
455
636
  )
456
637
  # Queue for async processing in the main process loop
457
638
  # The auth chunk will be yielded after event processing completes
458
- if pending_server not in self._pending_auth_servers:
459
- self._pending_auth_servers.append(pending_server)
460
- return self._create_chunk(
461
- ChunkType.TOOL_RESULT,
462
- f"Authentifizierung erforderlich für {pending_server.name}",
463
- {
464
- "tool_id": tool_id,
465
- "status": "auth_required",
466
- "server_name": pending_server.name,
467
- "auth_pending": True,
468
- "reasoning_session": self._current_reasoning_session,
469
- },
639
+ self.add_pending_auth_server(auth_server)
640
+ return self.chunk_factory.tool_result(
641
+ f"Authentifizierung erforderlich für {auth_server.name}",
642
+ tool_id=tool_id,
643
+ status="auth_required",
644
+ is_error=True,
645
+ reasoning_session=self.current_reasoning_session,
470
646
  )
471
647
 
472
648
  logger.error("MCP tool listing failed for tool_id: %s", str(event))
473
- return self._create_chunk(
474
- ChunkType.TOOL_RESULT,
649
+ return self.chunk_factory.tool_result(
475
650
  f"Werkzeugliste konnte nicht geladen werden: {tool_id}",
476
- {
477
- "tool_id": tool_id,
478
- "status": "listing_failed",
479
- "error": True,
480
- "reasoning_session": self._current_reasoning_session,
481
- },
651
+ tool_id=tool_id,
652
+ status="listing_failed",
653
+ is_error=True,
654
+ reasoning_session=self.current_reasoning_session,
482
655
  )
483
656
 
484
657
  return None
485
658
 
486
659
  def _handle_content_events(self, event_type: str, event: Any) -> Chunk | None: # noqa: ARG002
487
- """Handle content-related events."""
488
- if event_type == "response.content_part.added":
489
- # Content part added - this typically starts text streaming
490
- return None # No need to show this as a separate chunk
491
-
492
- if event_type == "response.content_part.done":
493
- # Content part completed - this typically ends text streaming
494
- return None # No need to show this as a separate chunk
495
-
496
- if event_type == "response.output_text.done":
497
- # Text output completed - already received via delta events
498
- # Skip to avoid duplicate content
499
- return None
500
-
660
+ """Handle content-related events (no-op for streaming events)."""
661
+ # These events are handled elsewhere or don't need chunks:
662
+ # - response.content_part.added/done: streaming lifecycle
663
+ # - response.output_text.done: already received via delta events
501
664
  return None
502
665
 
503
666
  def _handle_completion_events(self, event_type: str, event: Any) -> Chunk | None: # noqa: ARG002
504
667
  """Handle completion-related events."""
505
- if event_type == "response.completed":
506
- return self._create_chunk(
507
- ChunkType.COMPLETION,
508
- "Response generation completed",
509
- {"status": "response_complete"},
510
- )
511
- return None
668
+ return (
669
+ self.chunk_factory.completion(status="response_complete")
670
+ if event_type == "response.completed"
671
+ else None
672
+ )
512
673
 
513
674
  def _handle_image_events(self, event_type: str, event: Any) -> Chunk | None:
514
675
  """Handle image-related events."""
515
- if "image" in event_type and (hasattr(event, "url") or hasattr(event, "data")):
516
- image_data = {
517
- "url": getattr(event, "url", ""),
518
- "data": getattr(event, "data", ""),
519
- }
520
- image_str = str(image_data)
521
- return self._create_chunk(ChunkType.IMAGE, image_str, image_data)
522
- return None
523
-
524
- def _create_chunk(
525
- self,
526
- chunk_type: ChunkType,
527
- content: str,
528
- extra_metadata: dict[str, str] | None = None,
529
- ) -> Chunk:
530
- """Create a Chunk with actual content from the event"""
531
- metadata = {
532
- "processor": "openai_responses_simplified",
676
+ if "image" not in event_type:
677
+ return None
678
+ if not (hasattr(event, "url") or hasattr(event, "data")):
679
+ return None
680
+ image_data = {
681
+ "url": getattr(event, "url", ""),
682
+ "data": getattr(event, "data", ""),
533
683
  }
534
-
535
- if extra_metadata:
536
- # Ensure all metadata values are strings
537
- for key, value in extra_metadata.items():
538
- if value is not None:
539
- metadata[key] = str(value)
540
-
541
- return Chunk(
542
- type=chunk_type,
543
- text=content,
544
- chunk_metadata=metadata,
545
- )
684
+ return self.chunk_factory.create(ChunkType.IMAGE, str(image_data), image_data)
546
685
 
547
686
  async def _create_responses_request(
548
687
  self,
@@ -551,6 +690,7 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
551
690
  mcp_servers: list[MCPServer] | None = None,
552
691
  payload: dict[str, Any] | None = None,
553
692
  user_id: int | None = None,
693
+ vector_store_id: str | None = None,
554
694
  ) -> Any:
555
695
  """Create a simplified responses API request."""
556
696
  # Configure MCP tools if provided (now async for token lookup)
@@ -560,11 +700,38 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
560
700
  else ([], "")
561
701
  )
562
702
 
703
+ # Add file_search tool if vector store is available
704
+ if vector_store_id:
705
+ file_search_tool = {
706
+ "type": "file_search",
707
+ "vector_store_ids": [vector_store_id],
708
+ "max_num_results": 20,
709
+ }
710
+ tools.append(file_search_tool)
711
+ logger.debug(
712
+ "Added file_search tool with vector_store: %s",
713
+ vector_store_id,
714
+ )
715
+
716
+ # Add web_search tool if enabled and supported
717
+ if model.supports_search and payload and payload.get("web_search_enabled"):
718
+ tools.append({"type": "web_search"})
719
+ payload.pop("web_search_enabled", None)
720
+ logger.debug("Added web_search tool")
721
+
563
722
  # Convert messages to responses format with system message
564
723
  input_messages = await self._convert_messages_to_responses_format(
565
724
  messages, mcp_prompt=mcp_prompt
566
725
  )
567
726
 
727
+ # Filter out internal payload keys that shouldn't go to OpenAI
728
+ filtered_payload = {}
729
+ if payload:
730
+ internal_keys = {"thread_uuid"}
731
+ filtered_payload = {
732
+ k: v for k, v in payload.items() if k not in internal_keys
733
+ }
734
+
568
735
  params = {
569
736
  "model": model.model,
570
737
  "input": input_messages,
@@ -572,7 +739,7 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
572
739
  "temperature": model.temperature,
573
740
  "tools": tools,
574
741
  "reasoning": {"effort": "medium"},
575
- **(payload or {}),
742
+ **filtered_payload,
576
743
  }
577
744
 
578
745
  return await self.client.responses.create(**params)
@@ -590,8 +757,12 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
590
757
  tuple: (tools list, concatenated prompts string)
591
758
  """
592
759
  if not mcp_servers:
760
+ self._available_mcp_servers = []
593
761
  return [], ""
594
762
 
763
+ # Store for lookup during error handling (e.g., 401 errors)
764
+ self._available_mcp_servers = mcp_servers
765
+
595
766
  tools = []
596
767
  prompts = []
597
768
 
@@ -604,20 +775,18 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
604
775
  }
605
776
 
606
777
  # Start with existing headers
607
- headers = {}
608
- if server.headers and server.headers != "{}":
609
- headers = json.loads(server.headers)
778
+ headers = self.parse_mcp_headers(server)
610
779
 
611
780
  # Inject OAuth token if server requires OAuth and user is authenticated
612
781
  if server.auth_type == MCPAuthType.OAUTH_DISCOVERY and user_id is not None:
613
- token = await self._get_valid_token_for_server(server, user_id)
782
+ token = await self.get_valid_token(server, user_id)
614
783
  if token:
615
784
  headers["Authorization"] = f"Bearer {token.access_token}"
616
785
  logger.debug("Injected OAuth token for server %s", server.name)
617
786
  else:
618
787
  # No valid token - server will likely fail with 401
619
788
  # Track this server for potential auth flow
620
- self._pending_auth_servers.append(server)
789
+ self.add_pending_auth_server(server)
621
790
  logger.debug(
622
791
  "No valid token for OAuth server %s, auth may be required",
623
792
  server.name,
@@ -646,18 +815,8 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
646
815
  """
647
816
  input_messages = []
648
817
 
649
- # Always add system message as first message
650
- if mcp_prompt:
651
- mcp_prompt = (
652
- "### Tool-Auswahlrichtlinien (Einbettung externer Beschreibungen)\n"
653
- f"{mcp_prompt}"
654
- )
655
- else:
656
- mcp_prompt = ""
657
-
658
818
  if use_system_prompt:
659
- system_prompt_template = await get_system_prompt()
660
- system_text = system_prompt_template.format(mcp_prompts=mcp_prompt)
819
+ system_text = await self._system_prompt_builder.build(mcp_prompt)
661
820
  input_messages.append(
662
821
  {
663
822
  "role": "system",
@@ -680,93 +839,12 @@ class OpenAIResponsesProcessor(BaseOpenAIProcessor):
680
839
 
681
840
  def _extract_responses_content(self, session: Any) -> str | None:
682
841
  """Extract content from non-streaming responses."""
683
- if (
684
- hasattr(session, "output")
685
- and session.output
686
- and isinstance(session.output, list)
687
- and session.output
688
- ):
689
- first_output = session.output[0]
690
- if hasattr(first_output, "content") and first_output.content:
691
- if isinstance(first_output.content, list):
692
- return first_output.content[0].get("text", "")
693
- return str(first_output.content)
694
- return None
695
-
696
- async def _get_valid_token_for_server(
697
- self,
698
- server: MCPServer,
699
- user_id: int,
700
- ) -> AssistantMCPUserToken | None:
701
- """Get a valid OAuth token for the given server and user.
702
-
703
- Refreshes the token if expired and refresh token is available.
704
-
705
- Args:
706
- server: The MCP server configuration.
707
- user_id: The user's ID.
708
-
709
- Returns:
710
- A valid token or None if not available.
711
- """
712
- if server.id is None:
842
+ output = getattr(session, "output", None)
843
+ if not output or not isinstance(output, list):
713
844
  return None
714
-
715
- with rx.session() as session:
716
- token = self._mcp_auth_service.get_user_token(session, user_id, server.id)
717
-
718
- if token is None:
719
- return None
720
-
721
- # Check if token is valid or can be refreshed
722
- return await self._mcp_auth_service.ensure_valid_token(
723
- session, server, token
724
- )
725
-
726
- async def _create_auth_required_chunk(self, server: MCPServer) -> Chunk:
727
- """Create an AUTH_REQUIRED chunk for a server that needs authentication.
728
-
729
- Args:
730
- server: The MCP server requiring authentication.
731
-
732
- Returns:
733
- A chunk signaling auth is required with the auth URL.
734
- """
735
- # Build the authorization URL
736
- try:
737
- # We use a session to store the PKCE state
738
- # NOTE: rx.session() is for Reflex user session, not DB session.
739
- # We use get_session_manager().session() for DB access required by PKCE.
740
- with get_session_manager().session() as session:
741
- # Use the async method that supports DCR
742
- auth_service = self._mcp_auth_service
743
- (
744
- auth_url,
745
- state,
746
- ) = await auth_service.build_authorization_url_with_registration(
747
- server,
748
- session=session,
749
- user_id=self._current_user_id,
750
- )
751
- logger.info(
752
- "Built auth URL for server %s, state=%s, url=%s",
753
- server.name,
754
- state,
755
- auth_url[:100] if auth_url else "None",
756
- )
757
- except (ValueError, Exception) as e:
758
- logger.error("Cannot build auth URL for server %s: %s", server.name, str(e))
759
- auth_url = ""
760
- state = ""
761
-
762
- return Chunk(
763
- type=ChunkType.AUTH_REQUIRED,
764
- text=f"{server.name} benötigt Ihre Autorisierung",
765
- chunk_metadata={
766
- "server_id": str(server.id) if server.id else "",
767
- "server_name": server.name,
768
- "auth_url": auth_url,
769
- "state": state,
770
- "processor": "openai_responses",
771
- },
772
- )
845
+ content = getattr(output[0], "content", None)
846
+ if not content:
847
+ return None
848
+ if isinstance(content, list):
849
+ return content[0].get("text", "")
850
+ return str(content)