massgen 0.0.3__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (268) hide show
  1. massgen/__init__.py +142 -8
  2. massgen/adapters/__init__.py +29 -0
  3. massgen/adapters/ag2_adapter.py +483 -0
  4. massgen/adapters/base.py +183 -0
  5. massgen/adapters/tests/__init__.py +0 -0
  6. massgen/adapters/tests/test_ag2_adapter.py +439 -0
  7. massgen/adapters/tests/test_agent_adapter.py +128 -0
  8. massgen/adapters/utils/__init__.py +2 -0
  9. massgen/adapters/utils/ag2_utils.py +236 -0
  10. massgen/adapters/utils/tests/__init__.py +0 -0
  11. massgen/adapters/utils/tests/test_ag2_utils.py +138 -0
  12. massgen/agent_config.py +329 -55
  13. massgen/api_params_handler/__init__.py +10 -0
  14. massgen/api_params_handler/_api_params_handler_base.py +99 -0
  15. massgen/api_params_handler/_chat_completions_api_params_handler.py +176 -0
  16. massgen/api_params_handler/_claude_api_params_handler.py +113 -0
  17. massgen/api_params_handler/_response_api_params_handler.py +130 -0
  18. massgen/backend/__init__.py +39 -4
  19. massgen/backend/azure_openai.py +385 -0
  20. massgen/backend/base.py +341 -69
  21. massgen/backend/base_with_mcp.py +1102 -0
  22. massgen/backend/capabilities.py +386 -0
  23. massgen/backend/chat_completions.py +577 -130
  24. massgen/backend/claude.py +1033 -537
  25. massgen/backend/claude_code.py +1203 -0
  26. massgen/backend/cli_base.py +209 -0
  27. massgen/backend/docs/BACKEND_ARCHITECTURE.md +126 -0
  28. massgen/backend/{CLAUDE_API_RESEARCH.md → docs/CLAUDE_API_RESEARCH.md} +18 -18
  29. massgen/backend/{GEMINI_API_DOCUMENTATION.md → docs/GEMINI_API_DOCUMENTATION.md} +9 -9
  30. massgen/backend/docs/Gemini MCP Integration Analysis.md +1050 -0
  31. massgen/backend/docs/MCP_IMPLEMENTATION_CLAUDE_BACKEND.md +177 -0
  32. massgen/backend/docs/MCP_INTEGRATION_RESPONSE_BACKEND.md +352 -0
  33. massgen/backend/docs/OPENAI_GPT5_MODELS.md +211 -0
  34. massgen/backend/{OPENAI_RESPONSES_API_FORMAT.md → docs/OPENAI_RESPONSE_API_TOOL_CALLS.md} +3 -3
  35. massgen/backend/docs/OPENAI_response_streaming.md +20654 -0
  36. massgen/backend/docs/inference_backend.md +257 -0
  37. massgen/backend/docs/permissions_and_context_files.md +1085 -0
  38. massgen/backend/external.py +126 -0
  39. massgen/backend/gemini.py +1850 -241
  40. massgen/backend/grok.py +40 -156
  41. massgen/backend/inference.py +156 -0
  42. massgen/backend/lmstudio.py +171 -0
  43. massgen/backend/response.py +1095 -322
  44. massgen/chat_agent.py +131 -113
  45. massgen/cli.py +1560 -275
  46. massgen/config_builder.py +2396 -0
  47. massgen/configs/BACKEND_CONFIGURATION.md +458 -0
  48. massgen/configs/README.md +559 -216
  49. massgen/configs/ag2/ag2_case_study.yaml +27 -0
  50. massgen/configs/ag2/ag2_coder.yaml +34 -0
  51. massgen/configs/ag2/ag2_coder_case_study.yaml +36 -0
  52. massgen/configs/ag2/ag2_gemini.yaml +27 -0
  53. massgen/configs/ag2/ag2_groupchat.yaml +108 -0
  54. massgen/configs/ag2/ag2_groupchat_gpt.yaml +118 -0
  55. massgen/configs/ag2/ag2_single_agent.yaml +21 -0
  56. massgen/configs/basic/multi/fast_timeout_example.yaml +37 -0
  57. massgen/configs/basic/multi/gemini_4o_claude.yaml +31 -0
  58. massgen/configs/basic/multi/gemini_gpt5nano_claude.yaml +36 -0
  59. massgen/configs/{gemini_4o_claude.yaml → basic/multi/geminicode_4o_claude.yaml} +3 -3
  60. massgen/configs/basic/multi/geminicode_gpt5nano_claude.yaml +36 -0
  61. massgen/configs/basic/multi/glm_gemini_claude.yaml +25 -0
  62. massgen/configs/basic/multi/gpt4o_audio_generation.yaml +30 -0
  63. massgen/configs/basic/multi/gpt4o_image_generation.yaml +31 -0
  64. massgen/configs/basic/multi/gpt5nano_glm_qwen.yaml +26 -0
  65. massgen/configs/basic/multi/gpt5nano_image_understanding.yaml +26 -0
  66. massgen/configs/{three_agents_default.yaml → basic/multi/three_agents_default.yaml} +8 -4
  67. massgen/configs/basic/multi/three_agents_opensource.yaml +27 -0
  68. massgen/configs/basic/multi/three_agents_vllm.yaml +20 -0
  69. massgen/configs/basic/multi/two_agents_gemini.yaml +19 -0
  70. massgen/configs/{two_agents.yaml → basic/multi/two_agents_gpt5.yaml} +14 -6
  71. massgen/configs/basic/multi/two_agents_opensource_lmstudio.yaml +31 -0
  72. massgen/configs/basic/multi/two_qwen_vllm_sglang.yaml +28 -0
  73. massgen/configs/{single_agent.yaml → basic/single/single_agent.yaml} +1 -1
  74. massgen/configs/{single_flash2.5.yaml → basic/single/single_flash2.5.yaml} +1 -2
  75. massgen/configs/basic/single/single_gemini2.5pro.yaml +16 -0
  76. massgen/configs/basic/single/single_gpt4o_audio_generation.yaml +22 -0
  77. massgen/configs/basic/single/single_gpt4o_image_generation.yaml +22 -0
  78. massgen/configs/basic/single/single_gpt4o_video_generation.yaml +24 -0
  79. massgen/configs/basic/single/single_gpt5nano.yaml +20 -0
  80. massgen/configs/basic/single/single_gpt5nano_file_search.yaml +18 -0
  81. massgen/configs/basic/single/single_gpt5nano_image_understanding.yaml +17 -0
  82. massgen/configs/basic/single/single_gptoss120b.yaml +15 -0
  83. massgen/configs/basic/single/single_openrouter_audio_understanding.yaml +15 -0
  84. massgen/configs/basic/single/single_qwen_video_understanding.yaml +15 -0
  85. massgen/configs/debug/code_execution/command_filtering_blacklist.yaml +29 -0
  86. massgen/configs/debug/code_execution/command_filtering_whitelist.yaml +28 -0
  87. massgen/configs/debug/code_execution/docker_verification.yaml +29 -0
  88. massgen/configs/debug/skip_coordination_test.yaml +27 -0
  89. massgen/configs/debug/test_sdk_migration.yaml +17 -0
  90. massgen/configs/docs/DISCORD_MCP_SETUP.md +208 -0
  91. massgen/configs/docs/TWITTER_MCP_ENESCINAR_SETUP.md +82 -0
  92. massgen/configs/providers/azure/azure_openai_multi.yaml +21 -0
  93. massgen/configs/providers/azure/azure_openai_single.yaml +19 -0
  94. massgen/configs/providers/claude/claude.yaml +14 -0
  95. massgen/configs/providers/gemini/gemini_gpt5nano.yaml +28 -0
  96. massgen/configs/providers/local/lmstudio.yaml +11 -0
  97. massgen/configs/providers/openai/gpt5.yaml +46 -0
  98. massgen/configs/providers/openai/gpt5_nano.yaml +46 -0
  99. massgen/configs/providers/others/grok_single_agent.yaml +19 -0
  100. massgen/configs/providers/others/zai_coding_team.yaml +108 -0
  101. massgen/configs/providers/others/zai_glm45.yaml +12 -0
  102. massgen/configs/{creative_team.yaml → teams/creative/creative_team.yaml} +16 -6
  103. massgen/configs/{travel_planning.yaml → teams/creative/travel_planning.yaml} +16 -6
  104. massgen/configs/{news_analysis.yaml → teams/research/news_analysis.yaml} +16 -6
  105. massgen/configs/{research_team.yaml → teams/research/research_team.yaml} +15 -7
  106. massgen/configs/{technical_analysis.yaml → teams/research/technical_analysis.yaml} +16 -6
  107. massgen/configs/tools/code-execution/basic_command_execution.yaml +25 -0
  108. massgen/configs/tools/code-execution/code_execution_use_case_simple.yaml +41 -0
  109. massgen/configs/tools/code-execution/docker_claude_code.yaml +32 -0
  110. massgen/configs/tools/code-execution/docker_multi_agent.yaml +32 -0
  111. massgen/configs/tools/code-execution/docker_simple.yaml +29 -0
  112. massgen/configs/tools/code-execution/docker_with_resource_limits.yaml +32 -0
  113. massgen/configs/tools/code-execution/multi_agent_playwright_automation.yaml +57 -0
  114. massgen/configs/tools/filesystem/cc_gpt5_gemini_filesystem.yaml +34 -0
  115. massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +68 -0
  116. massgen/configs/tools/filesystem/claude_code_flash2.5.yaml +43 -0
  117. massgen/configs/tools/filesystem/claude_code_flash2.5_gptoss.yaml +49 -0
  118. massgen/configs/tools/filesystem/claude_code_gpt5nano.yaml +31 -0
  119. massgen/configs/tools/filesystem/claude_code_single.yaml +40 -0
  120. massgen/configs/tools/filesystem/fs_permissions_test.yaml +87 -0
  121. massgen/configs/tools/filesystem/gemini_gemini_workspace_cleanup.yaml +54 -0
  122. massgen/configs/tools/filesystem/gemini_gpt5_filesystem_casestudy.yaml +30 -0
  123. massgen/configs/tools/filesystem/gemini_gpt5nano_file_context_path.yaml +43 -0
  124. massgen/configs/tools/filesystem/gemini_gpt5nano_protected_paths.yaml +45 -0
  125. massgen/configs/tools/filesystem/gpt5mini_cc_fs_context_path.yaml +31 -0
  126. massgen/configs/tools/filesystem/grok4_gpt5_gemini_filesystem.yaml +32 -0
  127. massgen/configs/tools/filesystem/multiturn/grok4_gpt5_claude_code_filesystem_multiturn.yaml +58 -0
  128. massgen/configs/tools/filesystem/multiturn/grok4_gpt5_gemini_filesystem_multiturn.yaml +58 -0
  129. massgen/configs/tools/filesystem/multiturn/two_claude_code_filesystem_multiturn.yaml +47 -0
  130. massgen/configs/tools/filesystem/multiturn/two_gemini_flash_filesystem_multiturn.yaml +48 -0
  131. massgen/configs/tools/mcp/claude_code_discord_mcp_example.yaml +27 -0
  132. massgen/configs/tools/mcp/claude_code_simple_mcp.yaml +35 -0
  133. massgen/configs/tools/mcp/claude_code_twitter_mcp_example.yaml +32 -0
  134. massgen/configs/tools/mcp/claude_mcp_example.yaml +24 -0
  135. massgen/configs/tools/mcp/claude_mcp_test.yaml +27 -0
  136. massgen/configs/tools/mcp/five_agents_travel_mcp_test.yaml +157 -0
  137. massgen/configs/tools/mcp/five_agents_weather_mcp_test.yaml +103 -0
  138. massgen/configs/tools/mcp/gemini_mcp_example.yaml +24 -0
  139. massgen/configs/tools/mcp/gemini_mcp_filesystem_test.yaml +23 -0
  140. massgen/configs/tools/mcp/gemini_mcp_filesystem_test_sharing.yaml +23 -0
  141. massgen/configs/tools/mcp/gemini_mcp_filesystem_test_single_agent.yaml +17 -0
  142. massgen/configs/tools/mcp/gemini_mcp_filesystem_test_with_claude_code.yaml +24 -0
  143. massgen/configs/tools/mcp/gemini_mcp_test.yaml +27 -0
  144. massgen/configs/tools/mcp/gemini_notion_mcp.yaml +52 -0
  145. massgen/configs/tools/mcp/gpt5_nano_mcp_example.yaml +24 -0
  146. massgen/configs/tools/mcp/gpt5_nano_mcp_test.yaml +27 -0
  147. massgen/configs/tools/mcp/gpt5mini_claude_code_discord_mcp_example.yaml +38 -0
  148. massgen/configs/tools/mcp/gpt_oss_mcp_example.yaml +25 -0
  149. massgen/configs/tools/mcp/gpt_oss_mcp_test.yaml +28 -0
  150. massgen/configs/tools/mcp/grok3_mini_mcp_example.yaml +24 -0
  151. massgen/configs/tools/mcp/grok3_mini_mcp_test.yaml +27 -0
  152. massgen/configs/tools/mcp/multimcp_gemini.yaml +111 -0
  153. massgen/configs/tools/mcp/qwen_api_mcp_example.yaml +25 -0
  154. massgen/configs/tools/mcp/qwen_api_mcp_test.yaml +28 -0
  155. massgen/configs/tools/mcp/qwen_local_mcp_example.yaml +24 -0
  156. massgen/configs/tools/mcp/qwen_local_mcp_test.yaml +27 -0
  157. massgen/configs/tools/planning/five_agents_discord_mcp_planning_mode.yaml +140 -0
  158. massgen/configs/tools/planning/five_agents_filesystem_mcp_planning_mode.yaml +151 -0
  159. massgen/configs/tools/planning/five_agents_notion_mcp_planning_mode.yaml +151 -0
  160. massgen/configs/tools/planning/five_agents_twitter_mcp_planning_mode.yaml +155 -0
  161. massgen/configs/tools/planning/gpt5_mini_case_study_mcp_planning_mode.yaml +73 -0
  162. massgen/configs/tools/web-search/claude_streamable_http_test.yaml +43 -0
  163. massgen/configs/tools/web-search/gemini_streamable_http_test.yaml +43 -0
  164. massgen/configs/tools/web-search/gpt5_mini_streamable_http_test.yaml +43 -0
  165. massgen/configs/tools/web-search/gpt_oss_streamable_http_test.yaml +44 -0
  166. massgen/configs/tools/web-search/grok3_mini_streamable_http_test.yaml +43 -0
  167. massgen/configs/tools/web-search/qwen_api_streamable_http_test.yaml +44 -0
  168. massgen/configs/tools/web-search/qwen_local_streamable_http_test.yaml +43 -0
  169. massgen/coordination_tracker.py +708 -0
  170. massgen/docker/README.md +462 -0
  171. massgen/filesystem_manager/__init__.py +21 -0
  172. massgen/filesystem_manager/_base.py +9 -0
  173. massgen/filesystem_manager/_code_execution_server.py +545 -0
  174. massgen/filesystem_manager/_docker_manager.py +477 -0
  175. massgen/filesystem_manager/_file_operation_tracker.py +248 -0
  176. massgen/filesystem_manager/_filesystem_manager.py +813 -0
  177. massgen/filesystem_manager/_path_permission_manager.py +1261 -0
  178. massgen/filesystem_manager/_workspace_tools_server.py +1815 -0
  179. massgen/formatter/__init__.py +10 -0
  180. massgen/formatter/_chat_completions_formatter.py +284 -0
  181. massgen/formatter/_claude_formatter.py +235 -0
  182. massgen/formatter/_formatter_base.py +156 -0
  183. massgen/formatter/_response_formatter.py +263 -0
  184. massgen/frontend/__init__.py +1 -2
  185. massgen/frontend/coordination_ui.py +471 -286
  186. massgen/frontend/displays/base_display.py +56 -11
  187. massgen/frontend/displays/create_coordination_table.py +1956 -0
  188. massgen/frontend/displays/rich_terminal_display.py +1259 -619
  189. massgen/frontend/displays/simple_display.py +9 -4
  190. massgen/frontend/displays/terminal_display.py +27 -68
  191. massgen/logger_config.py +681 -0
  192. massgen/mcp_tools/README.md +232 -0
  193. massgen/mcp_tools/__init__.py +105 -0
  194. massgen/mcp_tools/backend_utils.py +1035 -0
  195. massgen/mcp_tools/circuit_breaker.py +195 -0
  196. massgen/mcp_tools/client.py +894 -0
  197. massgen/mcp_tools/config_validator.py +138 -0
  198. massgen/mcp_tools/docs/circuit_breaker.md +646 -0
  199. massgen/mcp_tools/docs/client.md +950 -0
  200. massgen/mcp_tools/docs/config_validator.md +478 -0
  201. massgen/mcp_tools/docs/exceptions.md +1165 -0
  202. massgen/mcp_tools/docs/security.md +854 -0
  203. massgen/mcp_tools/exceptions.py +338 -0
  204. massgen/mcp_tools/hooks.py +212 -0
  205. massgen/mcp_tools/security.py +780 -0
  206. massgen/message_templates.py +342 -64
  207. massgen/orchestrator.py +1515 -241
  208. massgen/stream_chunk/__init__.py +35 -0
  209. massgen/stream_chunk/base.py +92 -0
  210. massgen/stream_chunk/multimodal.py +237 -0
  211. massgen/stream_chunk/text.py +162 -0
  212. massgen/tests/mcp_test_server.py +150 -0
  213. massgen/tests/multi_turn_conversation_design.md +0 -8
  214. massgen/tests/test_azure_openai_backend.py +156 -0
  215. massgen/tests/test_backend_capabilities.py +262 -0
  216. massgen/tests/test_backend_event_loop_all.py +179 -0
  217. massgen/tests/test_chat_completions_refactor.py +142 -0
  218. massgen/tests/test_claude_backend.py +15 -28
  219. massgen/tests/test_claude_code.py +268 -0
  220. massgen/tests/test_claude_code_context_sharing.py +233 -0
  221. massgen/tests/test_claude_code_orchestrator.py +175 -0
  222. massgen/tests/test_cli_backends.py +180 -0
  223. massgen/tests/test_code_execution.py +679 -0
  224. massgen/tests/test_external_agent_backend.py +134 -0
  225. massgen/tests/test_final_presentation_fallback.py +237 -0
  226. massgen/tests/test_gemini_planning_mode.py +351 -0
  227. massgen/tests/test_grok_backend.py +7 -10
  228. massgen/tests/test_http_mcp_server.py +42 -0
  229. massgen/tests/test_integration_simple.py +198 -0
  230. massgen/tests/test_mcp_blocking.py +125 -0
  231. massgen/tests/test_message_context_building.py +29 -47
  232. massgen/tests/test_orchestrator_final_presentation.py +48 -0
  233. massgen/tests/test_path_permission_manager.py +2087 -0
  234. massgen/tests/test_rich_terminal_display.py +14 -13
  235. massgen/tests/test_timeout.py +133 -0
  236. massgen/tests/test_v3_3agents.py +11 -12
  237. massgen/tests/test_v3_simple.py +8 -13
  238. massgen/tests/test_v3_three_agents.py +11 -18
  239. massgen/tests/test_v3_two_agents.py +8 -13
  240. massgen/token_manager/__init__.py +7 -0
  241. massgen/token_manager/token_manager.py +400 -0
  242. massgen/utils.py +52 -16
  243. massgen/v1/agent.py +45 -91
  244. massgen/v1/agents.py +18 -53
  245. massgen/v1/backends/gemini.py +50 -153
  246. massgen/v1/backends/grok.py +21 -54
  247. massgen/v1/backends/oai.py +39 -111
  248. massgen/v1/cli.py +36 -93
  249. massgen/v1/config.py +8 -12
  250. massgen/v1/logging.py +43 -127
  251. massgen/v1/main.py +18 -32
  252. massgen/v1/orchestrator.py +68 -209
  253. massgen/v1/streaming_display.py +62 -163
  254. massgen/v1/tools.py +8 -12
  255. massgen/v1/types.py +9 -23
  256. massgen/v1/utils.py +5 -23
  257. massgen-0.1.0.dist-info/METADATA +1245 -0
  258. massgen-0.1.0.dist-info/RECORD +273 -0
  259. massgen-0.1.0.dist-info/entry_points.txt +2 -0
  260. massgen/frontend/logging/__init__.py +0 -9
  261. massgen/frontend/logging/realtime_logger.py +0 -197
  262. massgen-0.0.3.dist-info/METADATA +0 -568
  263. massgen-0.0.3.dist-info/RECORD +0 -76
  264. massgen-0.0.3.dist-info/entry_points.txt +0 -2
  265. /massgen/backend/{Function calling openai responses.md → docs/Function calling openai responses.md} +0 -0
  266. {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/WHEEL +0 -0
  267. {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/licenses/LICENSE +0 -0
  268. {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/top_level.txt +0 -0
@@ -1,341 +1,1132 @@
1
- from __future__ import annotations
2
-
1
+ # -*- coding: utf-8 -*-
3
2
  """
4
- Response API backend implementation.
3
+ Response API backend implementation with multimodal support.
5
4
  Standalone implementation optimized for the standard Response API format (originated by OpenAI).
5
+ Supports image input (URL and base64) and image generation via tools.
6
6
  """
7
+ from __future__ import annotations
7
8
 
9
+ import asyncio
8
10
  import os
9
- from typing import Dict, List, Any, AsyncGenerator, Optional
10
- from .base import LLMBackend, StreamChunk
11
+ from datetime import datetime, timezone
12
+ from io import BytesIO
13
+ from pathlib import Path
14
+ from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, Union
15
+ from urllib.parse import urlparse
11
16
 
17
+ import httpx
18
+ import openai
19
+ from openai import AsyncOpenAI
12
20
 
13
- class ResponseBackend(LLMBackend):
14
- """Backend using the standard Response API format."""
21
+ from ..api_params_handler import ResponseAPIParamsHandler
22
+ from ..formatter import ResponseFormatter
23
+ from ..logger_config import log_backend_agent_message, log_stream_chunk, logger
24
+ from ..stream_chunk import ChunkType, TextStreamChunk
25
+ from .base import FilesystemSupport, StreamChunk
26
+ from .base_with_mcp import MCPBackend, UploadFileError
27
+
28
+
29
+ class ResponseBackend(MCPBackend):
30
+ """Backend using the standard Response API format with multimodal support."""
15
31
 
16
32
  def __init__(self, api_key: Optional[str] = None, **kwargs):
17
33
  super().__init__(api_key, **kwargs)
18
34
  self.api_key = api_key or os.getenv("OPENAI_API_KEY")
35
+ self.formatter = ResponseFormatter()
36
+ self.api_params_handler = ResponseAPIParamsHandler(self)
37
+
38
+ # Queue for pending image saves
39
+ self._pending_image_saves = []
19
40
 
20
- def convert_tools_to_response_api_format(
21
- self, tools: List[Dict[str, Any]]
22
- ) -> List[Dict[str, Any]]:
23
- """Convert tools from Chat Completions format to Response API format if needed.
41
+ # File Search tracking for cleanup
42
+ self._vector_store_ids: List[str] = []
43
+ self._uploaded_file_ids: List[str] = []
24
44
 
25
- Chat Completions format: {"type": "function", "function": {"name": ..., "description": ..., "parameters": ...}}
26
- Response API format: {"type": "function", "name": ..., "description": ..., "parameters": ...}
45
+ def supports_upload_files(self) -> bool:
46
+ return True
47
+
48
+ async def stream_with_tools(
49
+ self,
50
+ messages: List[Dict[str, Any]],
51
+ tools: List[Dict[str, Any]],
52
+ **kwargs,
53
+ ) -> AsyncGenerator[StreamChunk, None]:
54
+ """Stream response using OpenAI Response API with unified MCP/non-MCP processing.
55
+
56
+ Wraps parent implementation to ensure File Search cleanup happens after streaming completes.
27
57
  """
28
- if not tools:
29
- return tools
58
+ try:
59
+ async for chunk in super().stream_with_tools(messages, tools, **kwargs):
60
+ yield chunk
61
+ finally:
62
+ # Cleanup File Search resources after stream completes
63
+ await self._cleanup_file_search_if_needed(**kwargs)
30
64
 
31
- converted_tools = []
32
- for tool in tools:
33
- if tool.get("type") == "function" and "function" in tool:
34
- # Chat Completions format - convert to Response API format
35
- func = tool["function"]
36
- converted_tools.append(
37
- {
38
- "type": "function",
39
- "name": func["name"],
40
- "description": func["description"],
41
- "parameters": func.get("parameters", {}),
65
+ async def _cleanup_file_search_if_needed(self, **kwargs) -> None:
66
+ """Cleanup File Search resources if needed."""
67
+ if not (self._vector_store_ids or self._uploaded_file_ids):
68
+ return
69
+
70
+ agent_id = kwargs.get("agent_id")
71
+ logger.info("Cleaning up File Search resources...")
72
+
73
+ client = None
74
+ try:
75
+ # Create a client for cleanup
76
+ client = self._create_client(**kwargs)
77
+ await self._cleanup_file_search_resources(client, agent_id)
78
+ except Exception as cleanup_error:
79
+ logger.error(
80
+ f"Error during File Search cleanup: {cleanup_error}",
81
+ extra={"agent_id": agent_id},
82
+ )
83
+ finally:
84
+ # Close the client if it has an aclose method
85
+ if client and hasattr(client, "aclose"):
86
+ try:
87
+ await client.aclose()
88
+ except Exception:
89
+ pass
90
+
91
+ async def _stream_without_mcp_tools(
92
+ self,
93
+ messages: List[Dict[str, Any]],
94
+ tools: List[Dict[str, Any]],
95
+ client,
96
+ **kwargs,
97
+ ) -> AsyncGenerator[StreamChunk, None]:
98
+ agent_id = kwargs.get("agent_id")
99
+ all_params = {**self.config, **kwargs}
100
+
101
+ processed_messages = await self._process_upload_files(messages, all_params)
102
+
103
+ if all_params.get("_has_file_search_files"):
104
+ logger.info("Processing File Search uploads...")
105
+ processed_messages, vector_store_id = await self._upload_files_and_create_vector_store(
106
+ processed_messages,
107
+ client,
108
+ agent_id,
109
+ )
110
+ if vector_store_id:
111
+ existing_ids = list(all_params.get("_file_search_vector_store_ids", []))
112
+ existing_ids.append(vector_store_id)
113
+ all_params["_file_search_vector_store_ids"] = existing_ids
114
+ logger.info(f"File Search enabled with vector store: {vector_store_id}")
115
+ all_params.pop("_has_file_search_files", None)
116
+
117
+ api_params = await self.api_params_handler.build_api_params(processed_messages, tools, all_params)
118
+
119
+ if "tools" in api_params:
120
+ non_mcp_tools = []
121
+ for tool in api_params.get("tools", []):
122
+ if tool.get("type") == "function":
123
+ name = tool.get("function", {}).get("name") if "function" in tool else tool.get("name")
124
+ if name and name in self._mcp_function_names:
125
+ continue
126
+ elif tool.get("type") == "mcp":
127
+ continue
128
+ non_mcp_tools.append(tool)
129
+ api_params["tools"] = non_mcp_tools
130
+
131
+ stream = await client.responses.create(**api_params)
132
+
133
+ async for chunk in self._process_stream(stream, all_params, agent_id):
134
+ yield chunk
135
+
136
+ async def _stream_with_mcp_tools(
137
+ self,
138
+ current_messages: List[Dict[str, Any]],
139
+ tools: List[Dict[str, Any]],
140
+ client,
141
+ **kwargs,
142
+ ) -> AsyncGenerator[StreamChunk, None]:
143
+ """Recursively stream MCP responses, executing function calls as needed."""
144
+ agent_id = kwargs.get("agent_id")
145
+
146
+ # Build API params for this iteration
147
+ all_params = {**self.config, **kwargs}
148
+
149
+ if all_params.get("_has_file_search_files"):
150
+ logger.info("Processing File Search uploads...")
151
+ current_messages, vector_store_id = await self._upload_files_and_create_vector_store(
152
+ current_messages,
153
+ client,
154
+ agent_id,
155
+ )
156
+ if vector_store_id:
157
+ existing_ids = list(all_params.get("_file_search_vector_store_ids", []))
158
+ existing_ids.append(vector_store_id)
159
+ all_params["_file_search_vector_store_ids"] = existing_ids
160
+ logger.info(f"File Search enabled with vector store: {vector_store_id}")
161
+ all_params.pop("_has_file_search_files", None)
162
+
163
+ api_params = await self.api_params_handler.build_api_params(current_messages, tools, all_params)
164
+
165
+ # Start streaming
166
+ stream = await client.responses.create(**api_params)
167
+
168
+ # Track function calls in this iteration
169
+ captured_function_calls = []
170
+ current_function_call = None
171
+ response_completed = False
172
+
173
+ async for chunk in stream:
174
+ if hasattr(chunk, "type"):
175
+ # Detect function call start
176
+ if chunk.type == "response.output_item.added" and hasattr(chunk, "item") and chunk.item and getattr(chunk.item, "type", None) == "function_call":
177
+ current_function_call = {
178
+ "call_id": getattr(chunk.item, "call_id", ""),
179
+ "name": getattr(chunk.item, "name", ""),
180
+ "arguments": "",
42
181
  }
182
+ logger.info(f"Function call detected: {current_function_call['name']}")
183
+
184
+ # Accumulate function arguments
185
+ elif chunk.type == "response.function_call_arguments.delta" and current_function_call is not None:
186
+ delta = getattr(chunk, "delta", "")
187
+ current_function_call["arguments"] += delta
188
+
189
+ # Function call completed
190
+ elif chunk.type == "response.output_item.done" and current_function_call is not None:
191
+ captured_function_calls.append(current_function_call)
192
+ current_function_call = None
193
+
194
+ # Handle regular content and other events
195
+ elif chunk.type == "response.output_text.delta":
196
+ delta = getattr(chunk, "delta", "")
197
+ yield TextStreamChunk(
198
+ type=ChunkType.CONTENT,
199
+ content=delta,
200
+ source="response_api",
201
+ )
202
+
203
+ # Handle other streaming events (reasoning, provider tools, etc.)
204
+ else:
205
+ result = self._process_stream_chunk(chunk, agent_id)
206
+ yield result
207
+
208
+ # Response completed
209
+ if chunk.type == "response.completed":
210
+ response_completed = True
211
+ if captured_function_calls:
212
+ # Execute captured function calls and recurse
213
+ break # Exit chunk loop to execute functions
214
+ else:
215
+ # No function calls, we're done (base case)
216
+ yield TextStreamChunk(type=ChunkType.DONE, source="response_api")
217
+ return
218
+
219
+ # Execute any captured function calls
220
+ if captured_function_calls and response_completed:
221
+ # Check if any of the function calls are NOT MCP functions
222
+ non_mcp_functions = [call for call in captured_function_calls if call["name"] not in self._mcp_functions]
223
+
224
+ if non_mcp_functions:
225
+ logger.info(f"Non-MCP function calls detected: {[call['name'] for call in non_mcp_functions]}. Ending MCP processing.")
226
+ yield TextStreamChunk(type=ChunkType.DONE, source="response_api")
227
+ return
228
+
229
+ # Check circuit breaker status before executing MCP functions
230
+ if not await super()._check_circuit_breaker_before_execution():
231
+ logger.warning("All MCP servers blocked by circuit breaker")
232
+ yield TextStreamChunk(
233
+ type=ChunkType.MCP_STATUS,
234
+ status="mcp_blocked",
235
+ content="⚠️ [MCP] All servers blocked by circuit breaker",
236
+ source="circuit_breaker",
43
237
  )
44
- else:
45
- # Already in Response API format or non-function tool
46
- converted_tools.append(tool)
238
+ yield TextStreamChunk(type=ChunkType.DONE, source="response_api")
239
+ return
47
240
 
48
- return converted_tools
241
+ # Execute only MCP function calls
242
+ mcp_functions_executed = False
243
+ updated_messages = current_messages.copy()
49
244
 
50
- def convert_messages_to_response_api_format(
51
- self, messages: List[Dict[str, Any]]
52
- ) -> List[Dict[str, Any]]:
53
- """Convert messages from Chat Completions format to Response API format.
245
+ # Check if planning mode is enabled - block MCP tool execution during planning
246
+ if self.is_planning_mode_enabled():
247
+ logger.info("[MCP] Planning mode enabled - blocking all MCP tool execution")
248
+ yield StreamChunk(
249
+ type="mcp_status",
250
+ status="planning_mode_blocked",
251
+ content="🚫 [MCP] Planning mode active - MCP tools blocked during coordination",
252
+ source="planning_mode",
253
+ )
254
+ # Skip all MCP tool execution but still continue with workflow
255
+ yield StreamChunk(type="done")
256
+ return
54
257
 
55
- Chat Completions tool message: {"role": "tool", "tool_call_id": "...", "content": "..."}
56
- Response API tool message: {"type": "function_call_output", "call_id": "...", "output": "..."}
258
+ # Ensure every captured function call gets a result to prevent hanging
259
+ processed_call_ids = set()
57
260
 
58
- Note: Assistant messages with tool_calls should not be in input - they're generated by the backend.
59
- """
60
- converted_messages = []
261
+ for call in captured_function_calls:
262
+ function_name = call["name"]
263
+ if function_name in self._mcp_functions:
264
+ # Yield MCP tool call status
265
+ yield TextStreamChunk(
266
+ type=ChunkType.MCP_STATUS,
267
+ status="mcp_tool_called",
268
+ content=f"🔧 [MCP Tool] Calling {function_name}...",
269
+ source=f"mcp_{function_name}",
270
+ )
61
271
 
62
- for message in messages:
63
- if message.get("role") == "tool":
64
- # Convert Chat Completions tool message to Response API format
65
- converted_messages.append(
66
- {
272
+ try:
273
+ # Execute MCP function with retry and exponential backoff
274
+ result, result_obj = await super()._execute_mcp_function_with_retry(
275
+ function_name,
276
+ call["arguments"],
277
+ )
278
+
279
+ # Check if function failed after all retries
280
+ if isinstance(result, str) and result.startswith("Error:"):
281
+ # Log failure but still create tool response
282
+ logger.warning(f"MCP function {function_name} failed after retries: {result}")
283
+
284
+ # Add error result to messages
285
+ function_call_msg = {
286
+ "type": "function_call",
287
+ "call_id": call["call_id"],
288
+ "name": function_name,
289
+ "arguments": call["arguments"],
290
+ }
291
+ updated_messages.append(function_call_msg)
292
+
293
+ error_output_msg = {
294
+ "type": "function_call_output",
295
+ "call_id": call["call_id"],
296
+ "output": result,
297
+ }
298
+ updated_messages.append(error_output_msg)
299
+
300
+ processed_call_ids.add(call["call_id"])
301
+ mcp_functions_executed = True
302
+ continue
303
+
304
+ except Exception as e:
305
+ # Only catch unexpected non-MCP system errors
306
+ logger.error(f"Unexpected error in MCP function execution: {e}")
307
+ error_msg = f"Error executing {function_name}: {str(e)}"
308
+
309
+ # Add error result to messages
310
+ function_call_msg = {
311
+ "type": "function_call",
312
+ "call_id": call["call_id"],
313
+ "name": function_name,
314
+ "arguments": call["arguments"],
315
+ }
316
+ updated_messages.append(function_call_msg)
317
+
318
+ error_output_msg = {
319
+ "type": "function_call_output",
320
+ "call_id": call["call_id"],
321
+ "output": error_msg,
322
+ }
323
+ updated_messages.append(error_output_msg)
324
+
325
+ processed_call_ids.add(call["call_id"])
326
+ mcp_functions_executed = True
327
+ continue
328
+
329
+ # Add function call to messages and yield status chunk
330
+ function_call_msg = {
331
+ "type": "function_call",
332
+ "call_id": call["call_id"],
333
+ "name": function_name,
334
+ "arguments": call["arguments"],
335
+ }
336
+ updated_messages.append(function_call_msg)
337
+ yield TextStreamChunk(
338
+ type=ChunkType.MCP_STATUS,
339
+ status="function_call",
340
+ content=f"Arguments for Calling {function_name}: {call['arguments']}",
341
+ source=f"mcp_{function_name}",
342
+ )
343
+
344
+ # Add function output to messages and yield status chunk
345
+ function_output_msg = {
67
346
  "type": "function_call_output",
68
- "call_id": message.get("tool_call_id"),
69
- "output": message.get("content", ""),
347
+ "call_id": call["call_id"],
348
+ "output": str(result),
70
349
  }
71
- )
72
- elif message.get("type") == "function_call_output":
73
- # Already in Response API format - keep as-is
74
- converted_messages.append(message)
75
- elif message.get("role") == "assistant" and "tool_calls" in message:
76
- # Assistant message with tool_calls in native Responses API format
77
- # Remove tool_calls when sending as input - only results should be sent back
78
- cleaned_message = {
79
- k: v for k, v in message.items() if k != "tool_calls"
80
- }
81
- converted_messages.append(cleaned_message)
350
+ updated_messages.append(function_output_msg)
351
+ yield TextStreamChunk(
352
+ type=ChunkType.MCP_STATUS,
353
+ status="function_call_output",
354
+ content=f"Results for Calling {function_name}: {str(result_obj.content[0].text)}",
355
+ source=f"mcp_{function_name}",
356
+ )
357
+
358
+ logger.info(f"Executed MCP function {function_name} (stdio/streamable-http)")
359
+ processed_call_ids.add(call["call_id"])
360
+
361
+ # Yield MCP tool response status
362
+ yield TextStreamChunk(
363
+ type=ChunkType.MCP_STATUS,
364
+ status="mcp_tool_response",
365
+ content=f"✅ [MCP Tool] {function_name} completed",
366
+ source=f"mcp_{function_name}",
367
+ )
368
+
369
+ mcp_functions_executed = True
370
+
371
+ # Ensure all captured function calls have results to prevent hanging
372
+ for call in captured_function_calls:
373
+ if call["call_id"] not in processed_call_ids:
374
+ logger.warning(f"Tool call {call['call_id']} for function {call['name']} was not processed - adding error result")
375
+
376
+ # Add missing function call and error result to messages
377
+ function_call_msg = {
378
+ "type": "function_call",
379
+ "call_id": call["call_id"],
380
+ "name": call["name"],
381
+ "arguments": call["arguments"],
382
+ }
383
+ updated_messages.append(function_call_msg)
384
+
385
+ error_output_msg = {
386
+ "type": "function_call_output",
387
+ "call_id": call["call_id"],
388
+ "output": f"Error: Tool call {call['call_id']} for function {call['name']} was not processed. This may indicate a validation or execution error.",
389
+ }
390
+ updated_messages.append(error_output_msg)
391
+ mcp_functions_executed = True
392
+
393
+ # Trim history after function executions to bound memory usage
394
+ if mcp_functions_executed:
395
+ updated_messages = super()._trim_message_history(updated_messages)
396
+
397
+ # Recursive call with updated messages
398
+ async for chunk in self._stream_with_mcp_tools(updated_messages, tools, client, **kwargs):
399
+ yield chunk
82
400
  else:
83
- # Keep other message types as-is
84
- converted_messages.append(message)
401
+ # No MCP functions were executed, we're done
85
402
 
86
- return converted_messages
403
+ yield TextStreamChunk(type=ChunkType.DONE, source="response_api")
404
+ return
405
+
406
+ elif response_completed:
407
+ # Response completed with no function calls - we're done (base case)
408
+
409
+ yield TextStreamChunk(
410
+ type=ChunkType.MCP_STATUS,
411
+ status="mcp_session_complete",
412
+ content="✅ [MCP] Session completed",
413
+ source="mcp_session",
414
+ )
415
+ yield TextStreamChunk(type=ChunkType.DONE, source="response_api")
416
+ return
417
+
418
+ async def _upload_files_and_create_vector_store(
419
+ self,
420
+ messages: List[Dict[str, Any]],
421
+ client: AsyncOpenAI,
422
+ agent_id: Optional[str] = None,
423
+ ) -> Tuple[List[Dict[str, Any]], Optional[str]]:
424
+ """Upload file_pending_upload items and create a vector store."""
87
425
 
88
- async def stream_with_tools(
89
- self, messages: List[Dict[str, Any]], tools: List[Dict[str, Any]], **kwargs
90
- ) -> AsyncGenerator[StreamChunk, None]:
91
- """Stream response using OpenAI Response API."""
92
426
  try:
93
- import openai
427
+ pending_files: List[Dict[str, Any]] = []
428
+ file_locations: List[Tuple[int, int]] = []
429
+
430
+ for message_index, message in enumerate(messages):
431
+ content = message.get("content")
432
+ if not isinstance(content, list):
433
+ continue
434
+
435
+ for item_index, item in enumerate(content):
436
+ if isinstance(item, dict) and item.get("type") == "file_pending_upload":
437
+ pending_files.append(item)
438
+ file_locations.append((message_index, item_index))
439
+
440
+ if not pending_files:
441
+ return messages, None
442
+
443
+ uploaded_file_ids: List[str] = []
444
+
445
+ http_client: Optional[httpx.AsyncClient] = None
446
+
447
+ try:
448
+ for pending in pending_files:
449
+ source = pending.get("source")
450
+
451
+ if source == "local":
452
+ path_str = pending.get("path")
453
+ if not path_str:
454
+ logger.warning("Missing local path for file_pending_upload entry")
455
+ continue
456
+
457
+ file_path = Path(path_str)
458
+ if not file_path.exists():
459
+ raise UploadFileError(f"File not found for upload: {file_path}")
460
+
461
+ try:
462
+ with file_path.open("rb") as file_handle:
463
+ uploaded_file = await client.files.create(
464
+ purpose="assistants",
465
+ file=file_handle,
466
+ )
467
+ except Exception as exc:
468
+ raise UploadFileError(f"Failed to upload file {file_path}: {exc}") from exc
469
+
470
+ elif source == "url":
471
+ file_url = pending.get("url")
472
+ if not file_url:
473
+ logger.warning("Missing URL for file_pending_upload entry")
474
+ continue
94
475
 
95
- client = openai.AsyncOpenAI(api_key=self.api_key)
476
+ parsed = urlparse(file_url)
477
+ if parsed.scheme not in {"http", "https"}:
478
+ raise UploadFileError(f"Unsupported URL scheme for file upload: {file_url}")
96
479
 
97
- # Extract model and provider tool settings
98
- model = kwargs.get("model", "gpt-4o-mini")
99
- enable_web_search = kwargs.get("enable_web_search", False)
100
- enable_code_interpreter = kwargs.get("enable_code_interpreter", False)
480
+ if http_client is None:
481
+ http_client = httpx.AsyncClient()
101
482
 
102
- # Convert messages to Response API format (handles tool messages)
103
- converted_messages = self.convert_messages_to_response_api_format(messages)
483
+ try:
484
+ response = await http_client.get(file_url, timeout=30.0)
485
+ response.raise_for_status()
486
+ except httpx.HTTPError as exc:
487
+ raise UploadFileError(f"Failed to download file from URL {file_url}: {exc}") from exc
104
488
 
105
- # Response API parameters (uses 'input', not 'messages')
106
- api_params = {"model": model, "input": converted_messages, "stream": True}
489
+ filename = Path(parsed.path).name or "remote_file"
490
+ file_bytes = BytesIO(response.content)
107
491
 
108
- # Add max_output_tokens if specified (o-series models don't support this)
109
- max_tokens = kwargs.get("max_tokens")
110
- if max_tokens and not model.startswith("o"):
111
- api_params["max_output_tokens"] = max_tokens
492
+ try:
493
+ uploaded_file = await client.files.create(
494
+ purpose="assistants",
495
+ file=(filename, file_bytes),
496
+ )
497
+ except Exception as exc:
498
+ raise UploadFileError(f"Failed to upload file from URL {file_url}: {exc}") from exc
499
+
500
+ else:
501
+ raise UploadFileError(f"Unknown file_pending_upload source: {source}")
502
+
503
+ file_id = getattr(uploaded_file, "id", None)
504
+ if not file_id:
505
+ raise UploadFileError("Uploaded file response missing ID")
112
506
 
113
- # Add framework tools (convert to Response API format)
114
- if tools:
115
- converted_tools = self.convert_tools_to_response_api_format(tools)
116
- api_params["tools"] = converted_tools
507
+ uploaded_file_ids.append(file_id)
508
+ self._uploaded_file_ids.append(file_id)
509
+ logger.info(f"Uploaded file for File Search (file_id={file_id})")
117
510
 
118
- # Add provider tools (web search, code interpreter) if enabled
119
- provider_tools = []
120
- if enable_web_search:
121
- provider_tools.append({"type": "web_search"})
511
+ finally:
512
+ if http_client is not None:
513
+ await http_client.aclose()
514
+
515
+ timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
516
+ vector_store_name = f"massgen_file_search_{agent_id or 'default'}_{timestamp}"
517
+
518
+ try:
519
+ vector_store = await client.vector_stores.create(name=vector_store_name)
520
+ except Exception as exc:
521
+ raise UploadFileError(f"Failed to create vector store: {exc}") from exc
122
522
 
123
- if enable_code_interpreter:
124
- provider_tools.append(
125
- {"type": "code_interpreter", "container": {"type": "auto"}}
523
+ vector_store_id = getattr(vector_store, "id", None)
524
+ if not vector_store_id:
525
+ raise UploadFileError("Vector store response missing ID")
526
+
527
+ self._vector_store_ids.append(vector_store_id)
528
+ logger.info(
529
+ "Created vector store for File Search",
530
+ extra={
531
+ "vector_store_id": vector_store_id,
532
+ "file_count": len(uploaded_file_ids),
533
+ },
534
+ )
535
+
536
+ for file_id in uploaded_file_ids:
537
+ try:
538
+ vs_file = await client.vector_stores.files.create_and_poll(
539
+ vector_store_id=vector_store_id,
540
+ file_id=file_id,
541
+ )
542
+ logger.info(
543
+ "File indexed and attached to vector store",
544
+ extra={
545
+ "vector_store_id": vector_store_id,
546
+ "file_id": file_id,
547
+ "status": getattr(vs_file, "status", None),
548
+ },
549
+ )
550
+ except Exception as exc:
551
+ raise UploadFileError(
552
+ f"Failed to attach and index file {file_id} to vector store {vector_store_id}: {exc}",
553
+ ) from exc
554
+
555
+ if uploaded_file_ids:
556
+ logger.info(
557
+ "All files indexed for File Search; waiting 2s for vector store to stabilize",
558
+ extra={
559
+ "vector_store_id": vector_store_id,
560
+ "file_count": len(uploaded_file_ids),
561
+ },
126
562
  )
563
+ await asyncio.sleep(2)
127
564
 
128
- if provider_tools:
129
- if "tools" not in api_params:
130
- api_params["tools"] = []
131
- api_params["tools"].extend(provider_tools)
132
-
133
- stream = await client.responses.create(**api_params)
134
-
135
- content = ""
136
-
137
- async for chunk in stream:
138
- # Handle Responses API streaming format
139
- if hasattr(chunk, "type"):
140
- if chunk.type == "response.output_text.delta" and hasattr(
141
- chunk, "delta"
142
- ):
143
- content += chunk.delta
144
- yield StreamChunk(type="content", content=chunk.delta)
145
- elif chunk.type == "response.web_search_call.in_progress":
146
- yield StreamChunk(
147
- type="content",
148
- content=f"\n🔍 [Provider Tool: Web Search] Starting search...",
149
- )
150
- elif chunk.type == "response.web_search_call.searching":
151
- yield StreamChunk(
152
- type="content",
153
- content=f"🔍 [Provider Tool: Web Search] Searching...",
154
- )
155
- elif chunk.type == "response.web_search_call.completed":
156
- yield StreamChunk(
157
- type="content",
158
- content=f"✅ [Provider Tool: Web Search] Search completed",
565
+ updated_messages = []
566
+ for message in messages:
567
+ cloned = dict(message)
568
+ if isinstance(message.get("content"), list):
569
+ cloned["content"] = [dict(item) if isinstance(item, dict) else item for item in message["content"]]
570
+ updated_messages.append(cloned)
571
+ for message_index, item_index in reversed(file_locations):
572
+ content_list = updated_messages[message_index].get("content")
573
+ if isinstance(content_list, list):
574
+ content_list.pop(item_index)
575
+ if not content_list:
576
+ content_list.append(
577
+ {
578
+ "type": "text",
579
+ "text": "[Files uploaded for search integration]",
580
+ },
159
581
  )
160
- elif chunk.type == "response.code_interpreter_call.in_progress":
161
- yield StreamChunk(
162
- type="content",
163
- content=f"\n💻 [Provider Tool: Code Interpreter] Starting execution...",
164
- )
165
- elif chunk.type == "response.code_interpreter_call.executing":
166
- yield StreamChunk(
167
- type="content",
168
- content=f"💻 [Provider Tool: Code Interpreter] Executing...",
169
- )
170
- elif chunk.type == "response.code_interpreter_call.completed":
171
- yield StreamChunk(
172
- type="content",
173
- content=f"✅ [Provider Tool: Code Interpreter] Execution completed",
174
- )
175
- elif chunk.type == "response.output_item.done":
176
- # Get search query or executed code details - show them right after completion
177
- if hasattr(chunk, "item") and chunk.item:
178
- if (
179
- hasattr(chunk.item, "type")
180
- and chunk.item.type == "web_search_call"
181
- ):
182
- if hasattr(chunk.item, "action") and hasattr(
183
- chunk.item.action, "query"
184
- ):
185
- search_query = chunk.item.action.query
186
- if search_query:
187
- yield StreamChunk(
188
- type="content",
189
- content=f"🔍 [Search Query] '{search_query}'",
190
- )
191
- elif (
192
- hasattr(chunk.item, "type")
193
- and chunk.item.type == "code_interpreter_call"
194
- ):
195
- if hasattr(chunk.item, "code") and chunk.item.code:
196
- # Format code as a proper code block - don't assume language
197
- yield StreamChunk(
198
- type="content",
199
- content=f"💻 [Code Executed]\n```\n{chunk.item.code}\n```\n",
200
- )
201
-
202
- # Also show the execution output if available
203
- if (
204
- hasattr(chunk.item, "outputs")
205
- and chunk.item.outputs
206
- ):
207
- for output in chunk.item.outputs:
208
- output_text = None
209
- if hasattr(output, "text") and output.text:
210
- output_text = output.text
211
- elif (
212
- hasattr(output, "content")
213
- and output.content
214
- ):
215
- output_text = output.content
216
- elif hasattr(output, "data") and output.data:
217
- output_text = str(output.data)
218
- elif isinstance(output, str):
219
- output_text = output
220
- elif isinstance(output, dict):
221
- # Handle dict format outputs
222
- if "text" in output:
223
- output_text = output["text"]
224
- elif "content" in output:
225
- output_text = output["content"]
226
- elif "data" in output:
227
- output_text = str(output["data"])
228
-
229
- if output_text and output_text.strip():
230
- yield StreamChunk(
231
- type="content",
232
- content=f"📊 [Result] {output_text.strip()}\n",
233
- )
234
- elif chunk.type == "response.completed":
235
- # Extract and yield tool calls from the complete response
236
- if hasattr(chunk, "response"):
237
- response_dict = self._convert_to_dict(chunk.response)
238
-
239
- # Extract builtin tool results from output array
240
- builtin_tool_results = []
241
- if (
242
- isinstance(response_dict, dict)
243
- and "output" in response_dict
244
- ):
245
- for item in response_dict["output"]:
246
- if item.get("type") == "code_interpreter_call":
247
- # Code execution result
248
- builtin_tool_results.append(
249
- {
250
- "id": item.get("id", ""),
251
- "tool_type": "code_interpreter",
252
- "status": item.get("status"),
253
- "code": item.get("code", ""),
254
- "outputs": item.get("outputs"),
255
- "container_id": item.get(
256
- "container_id"
257
- ),
258
- }
259
- )
260
- elif item.get("type") == "web_search_call":
261
- # Web search result
262
- builtin_tool_results.append(
263
- {
264
- "id": item.get("id", ""),
265
- "tool_type": "web_search",
266
- "status": item.get("status"),
267
- "query": item.get("query", ""),
268
- "results": item.get("results"),
269
- }
270
- )
271
-
272
- # Yield builtin tool results if any were found
273
- if builtin_tool_results:
274
- yield StreamChunk(
275
- type="builtin_tool_results",
276
- builtin_tool_results=builtin_tool_results,
277
- )
278
582
 
279
- # Yield the complete response for internal use
280
- yield StreamChunk(
281
- type="complete_response", response=response_dict
583
+ return updated_messages, vector_store_id
584
+
585
+ except Exception as error:
586
+ logger.warning(f"File Search upload failed: {error}. Continuing without file search.")
587
+ return messages, None
588
+
589
+ async def _cleanup_file_search_resources(
590
+ self,
591
+ client: AsyncOpenAI,
592
+ agent_id: Optional[str] = None,
593
+ ) -> None:
594
+ """Clean up File Search vector stores and uploaded files."""
595
+
596
+ for vector_store_id in list(self._vector_store_ids):
597
+ try:
598
+ await client.vector_stores.delete(vector_store_id)
599
+ logger.info(
600
+ "Deleted File Search vector store",
601
+ extra={
602
+ "vector_store_id": vector_store_id,
603
+ "agent_id": agent_id,
604
+ },
605
+ )
606
+ except Exception as exc:
607
+ logger.warning(
608
+ f"Failed to delete vector store {vector_store_id}: {exc}",
609
+ extra={"agent_id": agent_id},
610
+ )
611
+
612
+ for file_id in list(self._uploaded_file_ids):
613
+ try:
614
+ await client.files.delete(file_id)
615
+ logger.debug(
616
+ "Deleted File Search uploaded file",
617
+ extra={
618
+ "file_id": file_id,
619
+ "agent_id": agent_id,
620
+ },
621
+ )
622
+ except Exception as exc:
623
+ logger.warning(
624
+ f"Failed to delete file {file_id}: {exc}",
625
+ extra={"agent_id": agent_id},
626
+ )
627
+
628
+ self._vector_store_ids.clear()
629
+ self._uploaded_file_ids.clear()
630
+
631
+ # def _save_image_sync(
632
+ # self,
633
+ # image_data: str,
634
+ # prompt: str = None,
635
+ # image_format: str = "png",
636
+ # ) -> Optional[str]:
637
+ # """
638
+ # Save generated image directly to filesystem (synchronous version).
639
+
640
+ # Args:
641
+ # image_data: Base64 encoded image data
642
+ # prompt: Generation prompt (used for naming)
643
+ # image_format: Image format (default png)
644
+
645
+ # Returns:
646
+ # Saved file path, or None if failed
647
+ # """
648
+ # try:
649
+ # # Use agent's filesystem workspace if available, otherwise use current working directory
650
+ # if self.filesystem_manager:
651
+ # workspace_path = self.filesystem_manager.get_current_workspace()
652
+ # else:
653
+ # workspace_path = Path.cwd()
654
+
655
+ # # Create generated_images subdirectory path
656
+ # images_dir = workspace_path
657
+
658
+ # # Create directory if it doesn't exist
659
+ # images_dir.mkdir(parents=True, exist_ok=True)
660
+
661
+ # # Generate filename
662
+ # timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
663
+ # if prompt:
664
+ # # Clean prompt for filename
665
+ # clean_prompt = "".join(c for c in prompt[:30] if c.isalnum() or c in (" ", "-", "_")).strip()
666
+ # clean_prompt = clean_prompt.replace(" ", "_")
667
+ # filename = f"{timestamp}_{clean_prompt}.{image_format}"
668
+ # else:
669
+ # filename = f"{timestamp}_generated.{image_format}"
670
+
671
+ # file_path = images_dir / filename
672
+
673
+ # # Decode base64 and write to file
674
+ # image_bytes = base64.b64decode(image_data)
675
+ # file_path.write_bytes(image_bytes)
676
+
677
+ # logger.info(f"Image saved to: {file_path}")
678
+ # return str(file_path)
679
+
680
+ # except Exception as e:
681
+ # logger.error(f"Error saving image: {e}")
682
+ # return None
683
+
684
+ def _convert_mcp_tools_to_openai_format(self) -> List[Dict[str, Any]]:
685
+ """Convert MCP tools (stdio + streamable-http) to OpenAI function declarations."""
686
+ if not self._mcp_functions:
687
+ return []
688
+
689
+ converted_tools = []
690
+ for function in self._mcp_functions.values():
691
+ converted_tools.append(function.to_openai_format())
692
+
693
+ logger.debug(
694
+ f"Converted {len(converted_tools)} MCP tools (stdio + streamable-http) to OpenAI format",
695
+ )
696
+ return converted_tools
697
+
698
+ async def _process_stream(self, stream, all_params, agent_id=None):
699
+ async for chunk in stream:
700
+ processed = self._process_stream_chunk(chunk, agent_id)
701
+ if processed.type == "complete_response":
702
+ # Yield the complete response first
703
+ yield processed
704
+ # Then signal completion with done chunk
705
+ log_stream_chunk("backend.response", "done", None, agent_id)
706
+ yield TextStreamChunk(type=ChunkType.DONE, source="response_api")
707
+ else:
708
+ yield processed
709
+
710
+ def _process_stream_chunk(self, chunk, agent_id) -> Union[TextStreamChunk, StreamChunk]:
711
+ """
712
+ Process individual stream chunks and convert to appropriate chunk format.
713
+
714
+ Returns TextStreamChunk for text/reasoning/tool content,
715
+ or legacy StreamChunk for backward compatibility.
716
+ """
717
+
718
+ if not hasattr(chunk, "type"):
719
+ # Return legacy StreamChunk for backward compatibility
720
+ return StreamChunk(type="content", content="")
721
+ chunk_type = chunk.type
722
+
723
+ # Handle different chunk types
724
+ if chunk_type == "response.output_text.delta" and hasattr(chunk, "delta"):
725
+ log_backend_agent_message(
726
+ agent_id or "default",
727
+ "RECV",
728
+ {"content": chunk.delta},
729
+ backend_name=self.get_provider_name(),
730
+ )
731
+ log_stream_chunk("backend.response", "content", chunk.delta, agent_id)
732
+ return TextStreamChunk(
733
+ type=ChunkType.CONTENT,
734
+ content=chunk.delta,
735
+ source="response_api",
736
+ )
737
+
738
+ elif chunk_type == "response.reasoning_text.delta" and hasattr(chunk, "delta"):
739
+ log_stream_chunk("backend.response", "reasoning", chunk.delta, agent_id)
740
+ return TextStreamChunk(
741
+ type=ChunkType.REASONING,
742
+ content=f"🧠 [Reasoning] {chunk.delta}",
743
+ reasoning_delta=chunk.delta,
744
+ item_id=getattr(chunk, "item_id", None),
745
+ content_index=getattr(chunk, "content_index", None),
746
+ source="response_api",
747
+ )
748
+
749
+ elif chunk_type == "response.reasoning_text.done":
750
+ reasoning_text = getattr(chunk, "text", "")
751
+ log_stream_chunk("backend.response", "reasoning_done", reasoning_text, agent_id)
752
+ return TextStreamChunk(
753
+ type=ChunkType.REASONING_DONE,
754
+ content="\n🧠 [Reasoning Complete]\n",
755
+ reasoning_text=reasoning_text,
756
+ item_id=getattr(chunk, "item_id", None),
757
+ content_index=getattr(chunk, "content_index", None),
758
+ source="response_api",
759
+ )
760
+
761
+ elif chunk_type == "response.reasoning_summary_text.delta" and hasattr(chunk, "delta"):
762
+ log_stream_chunk("backend.response", "reasoning_summary", chunk.delta, agent_id)
763
+ return TextStreamChunk(
764
+ type=ChunkType.REASONING_SUMMARY,
765
+ content=chunk.delta,
766
+ reasoning_summary_delta=chunk.delta,
767
+ item_id=getattr(chunk, "item_id", None),
768
+ summary_index=getattr(chunk, "summary_index", None),
769
+ source="response_api",
770
+ )
771
+
772
+ elif chunk_type == "response.reasoning_summary_text.done":
773
+ summary_text = getattr(chunk, "text", "")
774
+ log_stream_chunk("backend.response", "reasoning_summary_done", summary_text, agent_id)
775
+ return TextStreamChunk(
776
+ type=ChunkType.REASONING_SUMMARY_DONE,
777
+ content="\n📋 [Reasoning Summary Complete]\n",
778
+ reasoning_summary_text=summary_text,
779
+ item_id=getattr(chunk, "item_id", None),
780
+ summary_index=getattr(chunk, "summary_index", None),
781
+ source="response_api",
782
+ )
783
+
784
+ # Provider tool events
785
+ elif chunk_type == "response.file_search_call.in_progress":
786
+ item_id = getattr(chunk, "item_id", None)
787
+ output_index = getattr(chunk, "output_index", None)
788
+ log_stream_chunk("backend.response", "file_search", "Starting file search", agent_id)
789
+ return TextStreamChunk(
790
+ type=ChunkType.CONTENT,
791
+ content="\n📁 [File Search] Starting search...",
792
+ item_id=item_id,
793
+ content_index=output_index,
794
+ source="response_api",
795
+ )
796
+ elif chunk_type == "response.file_search_call.searching":
797
+ item_id = getattr(chunk, "item_id", None)
798
+ output_index = getattr(chunk, "output_index", None)
799
+ queries = getattr(chunk, "queries", None)
800
+ query_text = ""
801
+ if queries:
802
+ try:
803
+ if isinstance(queries, (list, tuple)):
804
+ query_text = ", ".join(str(q) for q in queries if q)
805
+ else:
806
+ query_text = str(queries)
807
+ except Exception:
808
+ query_text = ""
809
+ message = "\n📁 [File Search] Searching..."
810
+ if query_text:
811
+ message += f" Query: {query_text}"
812
+ log_stream_chunk(
813
+ "backend.response",
814
+ "file_search",
815
+ f"Searching files{f' for {query_text}' if query_text else ''}",
816
+ agent_id,
817
+ )
818
+ return TextStreamChunk(
819
+ type=ChunkType.CONTENT,
820
+ content=message,
821
+ item_id=item_id,
822
+ content_index=output_index,
823
+ source="response_api",
824
+ )
825
+ elif chunk_type == "response.file_search_call.completed":
826
+ item_id = getattr(chunk, "item_id", None)
827
+ output_index = getattr(chunk, "output_index", None)
828
+ results = getattr(chunk, "results", None)
829
+ if results is None:
830
+ results = getattr(chunk, "search_results", None)
831
+ queries = getattr(chunk, "queries", None)
832
+ query_text = ""
833
+ if queries:
834
+ try:
835
+ if isinstance(queries, (list, tuple)):
836
+ query_text = ", ".join(str(q) for q in queries if q)
837
+ else:
838
+ query_text = str(queries)
839
+ except Exception:
840
+ query_text = ""
841
+ if results is not None:
842
+ try:
843
+ result_count = len(results)
844
+ except Exception:
845
+ result_count = None
846
+ else:
847
+ result_count = None
848
+ message_parts = ["\n✅ [File Search] Completed"]
849
+ if query_text:
850
+ message_parts.append(f"Query: {query_text}")
851
+ if result_count is not None:
852
+ message_parts.append(f"Results: {result_count}")
853
+ message = " ".join(message_parts)
854
+ log_stream_chunk(
855
+ "backend.response",
856
+ "file_search",
857
+ f"Completed file search{f' for {query_text}' if query_text else ''}{f' with {result_count} results' if result_count is not None else ''}",
858
+ agent_id,
859
+ )
860
+ return TextStreamChunk(
861
+ type=ChunkType.CONTENT,
862
+ content=message,
863
+ item_id=item_id,
864
+ content_index=output_index,
865
+ source="response_api",
866
+ )
867
+
868
+ elif chunk_type == "response.web_search_call.in_progress":
869
+ log_stream_chunk("backend.response", "web_search", "Starting search", agent_id)
870
+ return TextStreamChunk(
871
+ type=ChunkType.CONTENT,
872
+ content="\n🔍 [Provider Tool: Web Search] Starting search...",
873
+ source="response_api",
874
+ )
875
+ elif chunk_type == "response.web_search_call.searching":
876
+ log_stream_chunk("backend.response", "web_search", "Searching", agent_id)
877
+ return TextStreamChunk(
878
+ type=ChunkType.CONTENT,
879
+ content="\n🔍 [Provider Tool: Web Search] Searching...",
880
+ source="response_api",
881
+ )
882
+ elif chunk_type == "response.web_search_call.completed":
883
+ log_stream_chunk("backend.response", "web_search", "Search completed", agent_id)
884
+ return TextStreamChunk(
885
+ type=ChunkType.CONTENT,
886
+ content="\n✅ [Provider Tool: Web Search] Search completed",
887
+ source="response_api",
888
+ )
889
+
890
+ elif chunk_type == "response.code_interpreter_call.in_progress":
891
+ log_stream_chunk("backend.response", "code_interpreter", "Starting execution", agent_id)
892
+ return TextStreamChunk(
893
+ type=ChunkType.CONTENT,
894
+ content="\n💻 [Provider Tool: Code Interpreter] Starting execution...",
895
+ source="response_api",
896
+ )
897
+ elif chunk_type == "response.code_interpreter_call.executing":
898
+ log_stream_chunk("backend.response", "code_interpreter", "Executing", agent_id)
899
+ return TextStreamChunk(
900
+ type=ChunkType.CONTENT,
901
+ content="\n💻 [Provider Tool: Code Interpreter] Executing...",
902
+ source="response_api",
903
+ )
904
+ elif chunk_type == "response.code_interpreter_call.completed":
905
+ log_stream_chunk("backend.response", "code_interpreter", "Execution completed", agent_id)
906
+ return TextStreamChunk(
907
+ type=ChunkType.CONTENT,
908
+ content="\n✅ [Provider Tool: Code Interpreter] Execution completed",
909
+ source="response_api",
910
+ )
911
+
912
+ # Image Generation events
913
+ elif chunk_type == "response.image_generation_call.in_progress":
914
+ log_stream_chunk("backend.response", "image_generation", "Starting image generation", agent_id)
915
+ return TextStreamChunk(
916
+ type=ChunkType.CONTENT,
917
+ content="\n🎨 [Provider Tool: Image Generation] Starting generation...",
918
+ source="response_api",
919
+ )
920
+ elif chunk_type == "response.image_generation_call.generating":
921
+ log_stream_chunk("backend.response", "image_generation", "Generating image", agent_id)
922
+ return TextStreamChunk(
923
+ type=ChunkType.CONTENT,
924
+ content="\n🎨 [Provider Tool: Image Generation] Generating image...",
925
+ source="response_api",
926
+ )
927
+ elif chunk_type == "response.image_generation_call.completed":
928
+ log_stream_chunk("backend.response", "image_generation", "Image generation completed", agent_id)
929
+ return TextStreamChunk(
930
+ type=ChunkType.CONTENT,
931
+ content="\n✅ [Provider Tool: Image Generation] Image generated successfully",
932
+ source="response_api",
933
+ )
934
+ elif chunk_type == "image_generation.completed":
935
+ # Handle the final image generation result
936
+ if hasattr(chunk, "b64_json"):
937
+ log_stream_chunk("backend.response", "image_generation", "Image data received", agent_id)
938
+ # The image is complete, return a status message
939
+ return TextStreamChunk(
940
+ type=ChunkType.CONTENT,
941
+ content="\n✅ [Image Generation] Image successfully created",
942
+ source="response_api",
943
+ )
944
+ elif chunk.type == "response.output_item.done":
945
+ # Get search query or executed code details - show them right after completion
946
+ if hasattr(chunk, "item") and chunk.item:
947
+ if hasattr(chunk.item, "type") and chunk.item.type == "web_search_call":
948
+ if hasattr(chunk.item, "action") and ("query" in chunk.item.action):
949
+ search_query = chunk.item.action["query"]
950
+ if search_query:
951
+ log_stream_chunk("backend.response", "search_query", search_query, agent_id)
952
+ return TextStreamChunk(
953
+ type=ChunkType.CONTENT,
954
+ content=f"\n🔍 [Search Query] '{search_query}'\n",
955
+ source="response_api",
282
956
  )
283
- else:
284
- # Fallback if no response object
285
- complete_message = {
286
- "role": "assistant",
287
- "content": content.strip(),
288
- }
289
- yield StreamChunk(
290
- type="complete_message",
291
- complete_message=complete_message,
957
+ elif hasattr(chunk.item, "type") and chunk.item.type == "code_interpreter_call":
958
+ if hasattr(chunk.item, "code") and chunk.item.code:
959
+ # Format code as a proper code block - don't assume language
960
+ log_stream_chunk("backend.response", "code_executed", chunk.item.code, agent_id)
961
+ return TextStreamChunk(
962
+ type=ChunkType.CONTENT,
963
+ content=f"💻 [Code Executed]\n```\n{chunk.item.code}\n```\n",
964
+ source="response_api",
965
+ )
966
+
967
+ # Also show the execution output if available
968
+ if hasattr(chunk.item, "outputs") and chunk.item.outputs:
969
+ for output in chunk.item.outputs:
970
+ output_text = None
971
+ if hasattr(output, "text") and output.text:
972
+ output_text = output.text
973
+ elif hasattr(output, "content") and output.content:
974
+ output_text = output.content
975
+ elif hasattr(output, "data") and output.data:
976
+ output_text = str(output.data)
977
+ elif isinstance(output, str):
978
+ output_text = output
979
+ elif isinstance(output, dict):
980
+ # Handle dict format outputs
981
+ if "text" in output:
982
+ output_text = output["text"]
983
+ elif "content" in output:
984
+ output_text = output["content"]
985
+ elif "data" in output:
986
+ output_text = str(output["data"])
987
+
988
+ if output_text and output_text.strip():
989
+ log_stream_chunk("backend.response", "code_result", output_text.strip(), agent_id)
990
+ return TextStreamChunk(
991
+ type=ChunkType.CONTENT,
992
+ content=f"📊 [Result] {output_text.strip()}\n",
993
+ source="response_api",
994
+ )
995
+ elif hasattr(chunk.item, "type") and chunk.item.type == "image_generation_call":
996
+ # Image generation completed - show details
997
+ if hasattr(chunk.item, "action") and chunk.item.action:
998
+ prompt = chunk.item.action.get("prompt", "")
999
+ size = chunk.item.action.get("size", "1024x1024")
1000
+ if prompt:
1001
+ log_stream_chunk("backend.response", "image_prompt", prompt, agent_id)
1002
+ return TextStreamChunk(
1003
+ type=ChunkType.CONTENT,
1004
+ content=f"\n🎨 [Image Generated] Prompt: '{prompt}' (Size: {size})\n",
1005
+ source="response_api",
292
1006
  )
1007
+ # MCP events
1008
+ elif chunk_type == "response.mcp_list_tools.started":
1009
+ return TextStreamChunk(
1010
+ type=ChunkType.MCP_STATUS,
1011
+ content="\n🔧 [MCP] Listing available tools...",
1012
+ source="response_api",
1013
+ )
1014
+ elif chunk_type == "response.mcp_list_tools.completed":
1015
+ return TextStreamChunk(
1016
+ type=ChunkType.MCP_STATUS,
1017
+ content="\n✅ [MCP] Tool listing completed",
1018
+ source="response_api",
1019
+ )
1020
+ elif chunk_type == "response.mcp_list_tools.failed":
1021
+ return TextStreamChunk(
1022
+ type=ChunkType.MCP_STATUS,
1023
+ content="\n❌ [MCP] Tool listing failed",
1024
+ source="response_api",
1025
+ )
293
1026
 
294
- # Signal completion
295
- yield StreamChunk(type="done")
1027
+ elif chunk_type == "response.mcp_call.started":
1028
+ tool_name = getattr(chunk, "tool_name", "unknown")
1029
+ return TextStreamChunk(
1030
+ type=ChunkType.MCP_STATUS,
1031
+ content=f"\n🔧 [MCP] Calling tool '{tool_name}'...",
1032
+ source="response_api",
1033
+ )
1034
+ elif chunk_type == "response.mcp_call.in_progress":
1035
+ return TextStreamChunk(
1036
+ type=ChunkType.MCP_STATUS,
1037
+ content="\n⏳ [MCP] Tool execution in progress...",
1038
+ source="response_api",
1039
+ )
1040
+ elif chunk_type == "response.mcp_call.completed":
1041
+ tool_name = getattr(chunk, "tool_name", "unknown")
1042
+ return TextStreamChunk(
1043
+ type=ChunkType.MCP_STATUS,
1044
+ content=f"\n✅ [MCP] Tool '{tool_name}' completed",
1045
+ source="response_api",
1046
+ )
1047
+ elif chunk_type == "response.mcp_call.failed":
1048
+ tool_name = getattr(chunk, "tool_name", "unknown")
1049
+ error_msg = getattr(chunk, "error", "unknown error")
1050
+ return TextStreamChunk(
1051
+ type=ChunkType.MCP_STATUS,
1052
+ content=f"\n❌ [MCP] Tool '{tool_name}' failed: {error_msg}",
1053
+ source="response_api",
1054
+ )
296
1055
 
297
- except Exception as e:
298
- yield StreamChunk(type="error", error=str(e))
1056
+ elif chunk.type == "response.completed":
1057
+ # Extract and yield tool calls from the complete response
1058
+ if hasattr(chunk, "response"):
1059
+ response_dict = self._convert_to_dict(chunk.response)
299
1060
 
300
- def get_provider_name(self) -> str:
301
- """Get the provider name."""
302
- return "OpenAI"
1061
+ # Handle builtin tool results from output array with simple content format
1062
+ if isinstance(response_dict, dict) and "output" in response_dict:
1063
+ for item in response_dict["output"]:
1064
+ if item.get("type") == "code_interpreter_call":
1065
+ # Code execution result
1066
+ status = item.get("status", "unknown")
1067
+ code = item.get("code", "")
1068
+ outputs = item.get("outputs")
1069
+ content = f"\n🔧 Code Interpreter [{status.title()}]"
1070
+ if code:
1071
+ content += f": {code}"
1072
+ if outputs:
1073
+ content += f" → {outputs}"
303
1074
 
304
- def get_supported_builtin_tools(self) -> List[str]:
305
- """Get list of builtin tools supported by OpenAI."""
306
- return ["web_search", "code_interpreter"]
1075
+ log_stream_chunk("backend.response", "code_interpreter_result", content, agent_id)
1076
+ return TextStreamChunk(
1077
+ type=ChunkType.CONTENT,
1078
+ content=content,
1079
+ source="response_api",
1080
+ )
1081
+ elif item.get("type") == "web_search_call":
1082
+ # Web search result
1083
+ status = item.get("status", "unknown")
1084
+ # Query is in action.query, not directly in item
1085
+ query = item.get("action", {}).get("query", "")
1086
+ results = item.get("results")
307
1087
 
308
- def extract_tool_name(self, tool_call: Dict[str, Any]) -> str:
309
- """Extract tool name from OpenAI format (handles both Chat Completions and Responses API)."""
310
- # Check if it's Chat Completions format
311
- if "function" in tool_call:
312
- return tool_call.get("function", {}).get("name", "unknown")
313
- # Otherwise assume Responses API format
314
- return tool_call.get("name", "unknown")
315
-
316
- def extract_tool_arguments(self, tool_call: Dict[str, Any]) -> Dict[str, Any]:
317
- """Extract tool arguments from OpenAI format (handles both Chat Completions and Responses API)."""
318
- # Check if it's Chat Completions format
319
- if "function" in tool_call:
320
- return tool_call.get("function", {}).get("arguments", {})
321
- # Otherwise assume Responses API format
322
- arguments = tool_call.get("arguments", {})
323
- if isinstance(arguments, str):
324
- try:
325
- import json
1088
+ # Only show web search completion if query is present
1089
+ if query:
1090
+ content = f"\n🔧 Web Search [{status.title()}]: {query}"
1091
+ if results:
1092
+ content += f" → Found {len(results)} results"
1093
+ log_stream_chunk("backend.response", "web_search_result", content, agent_id)
1094
+ return TextStreamChunk(
1095
+ type=ChunkType.CONTENT,
1096
+ content=content,
1097
+ source="response_api",
1098
+ )
1099
+ elif item.get("type") == "image_generation_call":
1100
+ # Image generation result in completed response
1101
+ status = item.get("status", "unknown")
1102
+ action = item.get("action", {})
1103
+ prompt = action.get("prompt", "")
1104
+ size = action.get("size", "1024x1024")
326
1105
 
327
- return json.loads(arguments)
328
- except:
329
- return {}
330
- return arguments
1106
+ if prompt:
1107
+ content = f"\n🔧 Image Generation [{status.title()}]: {prompt} (Size: {size})"
1108
+ log_stream_chunk("backend.response", "image_generation_result", content, agent_id)
1109
+ return TextStreamChunk(
1110
+ type=ChunkType.CONTENT,
1111
+ content=content,
1112
+ source="response_api",
1113
+ )
1114
+ # Yield the complete response for internal use
1115
+ log_stream_chunk("backend.response", "complete_response", "Response completed", agent_id)
1116
+ return TextStreamChunk(
1117
+ type=ChunkType.COMPLETE_RESPONSE,
1118
+ response=response_dict,
1119
+ source="response_api",
1120
+ )
331
1121
 
332
- def extract_tool_call_id(self, tool_call: Dict[str, Any]) -> str:
333
- """Extract tool call ID from OpenAI format (handles both Chat Completions and Responses API)."""
334
- # For Responses API, use call_id (for tool results), for Chat Completions use id
335
- return tool_call.get("call_id") or tool_call.get("id") or ""
1122
+ # Default chunk - this should not happen for valid responses
1123
+ # Return legacy StreamChunk for backward compatibility
1124
+ return StreamChunk(type="content", content="")
336
1125
 
337
1126
  def create_tool_result_message(
338
- self, tool_call: Dict[str, Any], result_content: str
1127
+ self,
1128
+ tool_call: Dict[str, Any],
1129
+ result_content: str,
339
1130
  ) -> Dict[str, Any]:
340
1131
  """Create tool result message for OpenAI Responses API format."""
341
1132
  tool_call_id = self.extract_tool_call_id(tool_call)
@@ -350,6 +1141,9 @@ class ResponseBackend(LLMBackend):
350
1141
  """Extract content from OpenAI Responses API tool result message."""
351
1142
  return tool_result_message.get("output", "")
352
1143
 
1144
+ def _create_client(self, **kwargs) -> AsyncOpenAI:
1145
+ return openai.AsyncOpenAI(api_key=self.api_key)
1146
+
353
1147
  def _convert_to_dict(self, obj) -> Dict[str, Any]:
354
1148
  """Convert any object to dictionary with multiple fallback methods."""
355
1149
  try:
@@ -359,39 +1153,18 @@ class ResponseBackend(LLMBackend):
359
1153
  return obj.dict()
360
1154
  else:
361
1155
  return dict(obj)
362
- except:
1156
+ except Exception:
363
1157
  # Final fallback: extract key attributes manually
364
- return {
365
- key: getattr(obj, key, None)
366
- for key in dir(obj)
367
- if not key.startswith("_") and not callable(getattr(obj, key, None))
368
- }
369
-
370
- def estimate_tokens(self, text: str) -> int:
371
- """Estimate token count for text (rough approximation)."""
372
- return len(text) // 4
373
-
374
- def calculate_cost(
375
- self, input_tokens: int, output_tokens: int, model: str
376
- ) -> float:
377
- """Calculate cost for OpenAI token usage (2024-2025 pricing)."""
378
- model_lower = model.lower()
379
-
380
- if "gpt-4" in model_lower:
381
- if "4o-mini" in model_lower:
382
- input_cost = input_tokens * 0.00015 / 1000
383
- output_cost = output_tokens * 0.0006 / 1000
384
- elif "4o" in model_lower:
385
- input_cost = input_tokens * 0.005 / 1000
386
- output_cost = output_tokens * 0.020 / 1000
387
- else:
388
- input_cost = input_tokens * 0.03 / 1000
389
- output_cost = output_tokens * 0.06 / 1000
390
- elif "gpt-3.5" in model_lower:
391
- input_cost = input_tokens * 0.0005 / 1000
392
- output_cost = output_tokens * 0.0015 / 1000
393
- else:
394
- input_cost = input_tokens * 0.0005 / 1000
395
- output_cost = output_tokens * 0.0015 / 1000
396
-
397
- return input_cost + output_cost
1158
+ return {key: getattr(obj, key, None) for key in dir(obj) if not key.startswith("_") and not callable(getattr(obj, key, None))}
1159
+
1160
+ def get_provider_name(self) -> str:
1161
+ """Get the provider name."""
1162
+ return "OpenAI"
1163
+
1164
+ def get_filesystem_support(self) -> FilesystemSupport:
1165
+ """OpenAI supports filesystem through MCP servers."""
1166
+ return FilesystemSupport.MCP
1167
+
1168
+ def get_supported_builtin_tools(self) -> List[str]:
1169
+ """Get list of builtin tools supported by OpenAI."""
1170
+ return ["web_search", "code_interpreter"]