massgen 0.0.3__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (268) hide show
  1. massgen/__init__.py +142 -8
  2. massgen/adapters/__init__.py +29 -0
  3. massgen/adapters/ag2_adapter.py +483 -0
  4. massgen/adapters/base.py +183 -0
  5. massgen/adapters/tests/__init__.py +0 -0
  6. massgen/adapters/tests/test_ag2_adapter.py +439 -0
  7. massgen/adapters/tests/test_agent_adapter.py +128 -0
  8. massgen/adapters/utils/__init__.py +2 -0
  9. massgen/adapters/utils/ag2_utils.py +236 -0
  10. massgen/adapters/utils/tests/__init__.py +0 -0
  11. massgen/adapters/utils/tests/test_ag2_utils.py +138 -0
  12. massgen/agent_config.py +329 -55
  13. massgen/api_params_handler/__init__.py +10 -0
  14. massgen/api_params_handler/_api_params_handler_base.py +99 -0
  15. massgen/api_params_handler/_chat_completions_api_params_handler.py +176 -0
  16. massgen/api_params_handler/_claude_api_params_handler.py +113 -0
  17. massgen/api_params_handler/_response_api_params_handler.py +130 -0
  18. massgen/backend/__init__.py +39 -4
  19. massgen/backend/azure_openai.py +385 -0
  20. massgen/backend/base.py +341 -69
  21. massgen/backend/base_with_mcp.py +1102 -0
  22. massgen/backend/capabilities.py +386 -0
  23. massgen/backend/chat_completions.py +577 -130
  24. massgen/backend/claude.py +1033 -537
  25. massgen/backend/claude_code.py +1203 -0
  26. massgen/backend/cli_base.py +209 -0
  27. massgen/backend/docs/BACKEND_ARCHITECTURE.md +126 -0
  28. massgen/backend/{CLAUDE_API_RESEARCH.md → docs/CLAUDE_API_RESEARCH.md} +18 -18
  29. massgen/backend/{GEMINI_API_DOCUMENTATION.md → docs/GEMINI_API_DOCUMENTATION.md} +9 -9
  30. massgen/backend/docs/Gemini MCP Integration Analysis.md +1050 -0
  31. massgen/backend/docs/MCP_IMPLEMENTATION_CLAUDE_BACKEND.md +177 -0
  32. massgen/backend/docs/MCP_INTEGRATION_RESPONSE_BACKEND.md +352 -0
  33. massgen/backend/docs/OPENAI_GPT5_MODELS.md +211 -0
  34. massgen/backend/{OPENAI_RESPONSES_API_FORMAT.md → docs/OPENAI_RESPONSE_API_TOOL_CALLS.md} +3 -3
  35. massgen/backend/docs/OPENAI_response_streaming.md +20654 -0
  36. massgen/backend/docs/inference_backend.md +257 -0
  37. massgen/backend/docs/permissions_and_context_files.md +1085 -0
  38. massgen/backend/external.py +126 -0
  39. massgen/backend/gemini.py +1850 -241
  40. massgen/backend/grok.py +40 -156
  41. massgen/backend/inference.py +156 -0
  42. massgen/backend/lmstudio.py +171 -0
  43. massgen/backend/response.py +1095 -322
  44. massgen/chat_agent.py +131 -113
  45. massgen/cli.py +1560 -275
  46. massgen/config_builder.py +2396 -0
  47. massgen/configs/BACKEND_CONFIGURATION.md +458 -0
  48. massgen/configs/README.md +559 -216
  49. massgen/configs/ag2/ag2_case_study.yaml +27 -0
  50. massgen/configs/ag2/ag2_coder.yaml +34 -0
  51. massgen/configs/ag2/ag2_coder_case_study.yaml +36 -0
  52. massgen/configs/ag2/ag2_gemini.yaml +27 -0
  53. massgen/configs/ag2/ag2_groupchat.yaml +108 -0
  54. massgen/configs/ag2/ag2_groupchat_gpt.yaml +118 -0
  55. massgen/configs/ag2/ag2_single_agent.yaml +21 -0
  56. massgen/configs/basic/multi/fast_timeout_example.yaml +37 -0
  57. massgen/configs/basic/multi/gemini_4o_claude.yaml +31 -0
  58. massgen/configs/basic/multi/gemini_gpt5nano_claude.yaml +36 -0
  59. massgen/configs/{gemini_4o_claude.yaml → basic/multi/geminicode_4o_claude.yaml} +3 -3
  60. massgen/configs/basic/multi/geminicode_gpt5nano_claude.yaml +36 -0
  61. massgen/configs/basic/multi/glm_gemini_claude.yaml +25 -0
  62. massgen/configs/basic/multi/gpt4o_audio_generation.yaml +30 -0
  63. massgen/configs/basic/multi/gpt4o_image_generation.yaml +31 -0
  64. massgen/configs/basic/multi/gpt5nano_glm_qwen.yaml +26 -0
  65. massgen/configs/basic/multi/gpt5nano_image_understanding.yaml +26 -0
  66. massgen/configs/{three_agents_default.yaml → basic/multi/three_agents_default.yaml} +8 -4
  67. massgen/configs/basic/multi/three_agents_opensource.yaml +27 -0
  68. massgen/configs/basic/multi/three_agents_vllm.yaml +20 -0
  69. massgen/configs/basic/multi/two_agents_gemini.yaml +19 -0
  70. massgen/configs/{two_agents.yaml → basic/multi/two_agents_gpt5.yaml} +14 -6
  71. massgen/configs/basic/multi/two_agents_opensource_lmstudio.yaml +31 -0
  72. massgen/configs/basic/multi/two_qwen_vllm_sglang.yaml +28 -0
  73. massgen/configs/{single_agent.yaml → basic/single/single_agent.yaml} +1 -1
  74. massgen/configs/{single_flash2.5.yaml → basic/single/single_flash2.5.yaml} +1 -2
  75. massgen/configs/basic/single/single_gemini2.5pro.yaml +16 -0
  76. massgen/configs/basic/single/single_gpt4o_audio_generation.yaml +22 -0
  77. massgen/configs/basic/single/single_gpt4o_image_generation.yaml +22 -0
  78. massgen/configs/basic/single/single_gpt4o_video_generation.yaml +24 -0
  79. massgen/configs/basic/single/single_gpt5nano.yaml +20 -0
  80. massgen/configs/basic/single/single_gpt5nano_file_search.yaml +18 -0
  81. massgen/configs/basic/single/single_gpt5nano_image_understanding.yaml +17 -0
  82. massgen/configs/basic/single/single_gptoss120b.yaml +15 -0
  83. massgen/configs/basic/single/single_openrouter_audio_understanding.yaml +15 -0
  84. massgen/configs/basic/single/single_qwen_video_understanding.yaml +15 -0
  85. massgen/configs/debug/code_execution/command_filtering_blacklist.yaml +29 -0
  86. massgen/configs/debug/code_execution/command_filtering_whitelist.yaml +28 -0
  87. massgen/configs/debug/code_execution/docker_verification.yaml +29 -0
  88. massgen/configs/debug/skip_coordination_test.yaml +27 -0
  89. massgen/configs/debug/test_sdk_migration.yaml +17 -0
  90. massgen/configs/docs/DISCORD_MCP_SETUP.md +208 -0
  91. massgen/configs/docs/TWITTER_MCP_ENESCINAR_SETUP.md +82 -0
  92. massgen/configs/providers/azure/azure_openai_multi.yaml +21 -0
  93. massgen/configs/providers/azure/azure_openai_single.yaml +19 -0
  94. massgen/configs/providers/claude/claude.yaml +14 -0
  95. massgen/configs/providers/gemini/gemini_gpt5nano.yaml +28 -0
  96. massgen/configs/providers/local/lmstudio.yaml +11 -0
  97. massgen/configs/providers/openai/gpt5.yaml +46 -0
  98. massgen/configs/providers/openai/gpt5_nano.yaml +46 -0
  99. massgen/configs/providers/others/grok_single_agent.yaml +19 -0
  100. massgen/configs/providers/others/zai_coding_team.yaml +108 -0
  101. massgen/configs/providers/others/zai_glm45.yaml +12 -0
  102. massgen/configs/{creative_team.yaml → teams/creative/creative_team.yaml} +16 -6
  103. massgen/configs/{travel_planning.yaml → teams/creative/travel_planning.yaml} +16 -6
  104. massgen/configs/{news_analysis.yaml → teams/research/news_analysis.yaml} +16 -6
  105. massgen/configs/{research_team.yaml → teams/research/research_team.yaml} +15 -7
  106. massgen/configs/{technical_analysis.yaml → teams/research/technical_analysis.yaml} +16 -6
  107. massgen/configs/tools/code-execution/basic_command_execution.yaml +25 -0
  108. massgen/configs/tools/code-execution/code_execution_use_case_simple.yaml +41 -0
  109. massgen/configs/tools/code-execution/docker_claude_code.yaml +32 -0
  110. massgen/configs/tools/code-execution/docker_multi_agent.yaml +32 -0
  111. massgen/configs/tools/code-execution/docker_simple.yaml +29 -0
  112. massgen/configs/tools/code-execution/docker_with_resource_limits.yaml +32 -0
  113. massgen/configs/tools/code-execution/multi_agent_playwright_automation.yaml +57 -0
  114. massgen/configs/tools/filesystem/cc_gpt5_gemini_filesystem.yaml +34 -0
  115. massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +68 -0
  116. massgen/configs/tools/filesystem/claude_code_flash2.5.yaml +43 -0
  117. massgen/configs/tools/filesystem/claude_code_flash2.5_gptoss.yaml +49 -0
  118. massgen/configs/tools/filesystem/claude_code_gpt5nano.yaml +31 -0
  119. massgen/configs/tools/filesystem/claude_code_single.yaml +40 -0
  120. massgen/configs/tools/filesystem/fs_permissions_test.yaml +87 -0
  121. massgen/configs/tools/filesystem/gemini_gemini_workspace_cleanup.yaml +54 -0
  122. massgen/configs/tools/filesystem/gemini_gpt5_filesystem_casestudy.yaml +30 -0
  123. massgen/configs/tools/filesystem/gemini_gpt5nano_file_context_path.yaml +43 -0
  124. massgen/configs/tools/filesystem/gemini_gpt5nano_protected_paths.yaml +45 -0
  125. massgen/configs/tools/filesystem/gpt5mini_cc_fs_context_path.yaml +31 -0
  126. massgen/configs/tools/filesystem/grok4_gpt5_gemini_filesystem.yaml +32 -0
  127. massgen/configs/tools/filesystem/multiturn/grok4_gpt5_claude_code_filesystem_multiturn.yaml +58 -0
  128. massgen/configs/tools/filesystem/multiturn/grok4_gpt5_gemini_filesystem_multiturn.yaml +58 -0
  129. massgen/configs/tools/filesystem/multiturn/two_claude_code_filesystem_multiturn.yaml +47 -0
  130. massgen/configs/tools/filesystem/multiturn/two_gemini_flash_filesystem_multiturn.yaml +48 -0
  131. massgen/configs/tools/mcp/claude_code_discord_mcp_example.yaml +27 -0
  132. massgen/configs/tools/mcp/claude_code_simple_mcp.yaml +35 -0
  133. massgen/configs/tools/mcp/claude_code_twitter_mcp_example.yaml +32 -0
  134. massgen/configs/tools/mcp/claude_mcp_example.yaml +24 -0
  135. massgen/configs/tools/mcp/claude_mcp_test.yaml +27 -0
  136. massgen/configs/tools/mcp/five_agents_travel_mcp_test.yaml +157 -0
  137. massgen/configs/tools/mcp/five_agents_weather_mcp_test.yaml +103 -0
  138. massgen/configs/tools/mcp/gemini_mcp_example.yaml +24 -0
  139. massgen/configs/tools/mcp/gemini_mcp_filesystem_test.yaml +23 -0
  140. massgen/configs/tools/mcp/gemini_mcp_filesystem_test_sharing.yaml +23 -0
  141. massgen/configs/tools/mcp/gemini_mcp_filesystem_test_single_agent.yaml +17 -0
  142. massgen/configs/tools/mcp/gemini_mcp_filesystem_test_with_claude_code.yaml +24 -0
  143. massgen/configs/tools/mcp/gemini_mcp_test.yaml +27 -0
  144. massgen/configs/tools/mcp/gemini_notion_mcp.yaml +52 -0
  145. massgen/configs/tools/mcp/gpt5_nano_mcp_example.yaml +24 -0
  146. massgen/configs/tools/mcp/gpt5_nano_mcp_test.yaml +27 -0
  147. massgen/configs/tools/mcp/gpt5mini_claude_code_discord_mcp_example.yaml +38 -0
  148. massgen/configs/tools/mcp/gpt_oss_mcp_example.yaml +25 -0
  149. massgen/configs/tools/mcp/gpt_oss_mcp_test.yaml +28 -0
  150. massgen/configs/tools/mcp/grok3_mini_mcp_example.yaml +24 -0
  151. massgen/configs/tools/mcp/grok3_mini_mcp_test.yaml +27 -0
  152. massgen/configs/tools/mcp/multimcp_gemini.yaml +111 -0
  153. massgen/configs/tools/mcp/qwen_api_mcp_example.yaml +25 -0
  154. massgen/configs/tools/mcp/qwen_api_mcp_test.yaml +28 -0
  155. massgen/configs/tools/mcp/qwen_local_mcp_example.yaml +24 -0
  156. massgen/configs/tools/mcp/qwen_local_mcp_test.yaml +27 -0
  157. massgen/configs/tools/planning/five_agents_discord_mcp_planning_mode.yaml +140 -0
  158. massgen/configs/tools/planning/five_agents_filesystem_mcp_planning_mode.yaml +151 -0
  159. massgen/configs/tools/planning/five_agents_notion_mcp_planning_mode.yaml +151 -0
  160. massgen/configs/tools/planning/five_agents_twitter_mcp_planning_mode.yaml +155 -0
  161. massgen/configs/tools/planning/gpt5_mini_case_study_mcp_planning_mode.yaml +73 -0
  162. massgen/configs/tools/web-search/claude_streamable_http_test.yaml +43 -0
  163. massgen/configs/tools/web-search/gemini_streamable_http_test.yaml +43 -0
  164. massgen/configs/tools/web-search/gpt5_mini_streamable_http_test.yaml +43 -0
  165. massgen/configs/tools/web-search/gpt_oss_streamable_http_test.yaml +44 -0
  166. massgen/configs/tools/web-search/grok3_mini_streamable_http_test.yaml +43 -0
  167. massgen/configs/tools/web-search/qwen_api_streamable_http_test.yaml +44 -0
  168. massgen/configs/tools/web-search/qwen_local_streamable_http_test.yaml +43 -0
  169. massgen/coordination_tracker.py +708 -0
  170. massgen/docker/README.md +462 -0
  171. massgen/filesystem_manager/__init__.py +21 -0
  172. massgen/filesystem_manager/_base.py +9 -0
  173. massgen/filesystem_manager/_code_execution_server.py +545 -0
  174. massgen/filesystem_manager/_docker_manager.py +477 -0
  175. massgen/filesystem_manager/_file_operation_tracker.py +248 -0
  176. massgen/filesystem_manager/_filesystem_manager.py +813 -0
  177. massgen/filesystem_manager/_path_permission_manager.py +1261 -0
  178. massgen/filesystem_manager/_workspace_tools_server.py +1815 -0
  179. massgen/formatter/__init__.py +10 -0
  180. massgen/formatter/_chat_completions_formatter.py +284 -0
  181. massgen/formatter/_claude_formatter.py +235 -0
  182. massgen/formatter/_formatter_base.py +156 -0
  183. massgen/formatter/_response_formatter.py +263 -0
  184. massgen/frontend/__init__.py +1 -2
  185. massgen/frontend/coordination_ui.py +471 -286
  186. massgen/frontend/displays/base_display.py +56 -11
  187. massgen/frontend/displays/create_coordination_table.py +1956 -0
  188. massgen/frontend/displays/rich_terminal_display.py +1259 -619
  189. massgen/frontend/displays/simple_display.py +9 -4
  190. massgen/frontend/displays/terminal_display.py +27 -68
  191. massgen/logger_config.py +681 -0
  192. massgen/mcp_tools/README.md +232 -0
  193. massgen/mcp_tools/__init__.py +105 -0
  194. massgen/mcp_tools/backend_utils.py +1035 -0
  195. massgen/mcp_tools/circuit_breaker.py +195 -0
  196. massgen/mcp_tools/client.py +894 -0
  197. massgen/mcp_tools/config_validator.py +138 -0
  198. massgen/mcp_tools/docs/circuit_breaker.md +646 -0
  199. massgen/mcp_tools/docs/client.md +950 -0
  200. massgen/mcp_tools/docs/config_validator.md +478 -0
  201. massgen/mcp_tools/docs/exceptions.md +1165 -0
  202. massgen/mcp_tools/docs/security.md +854 -0
  203. massgen/mcp_tools/exceptions.py +338 -0
  204. massgen/mcp_tools/hooks.py +212 -0
  205. massgen/mcp_tools/security.py +780 -0
  206. massgen/message_templates.py +342 -64
  207. massgen/orchestrator.py +1515 -241
  208. massgen/stream_chunk/__init__.py +35 -0
  209. massgen/stream_chunk/base.py +92 -0
  210. massgen/stream_chunk/multimodal.py +237 -0
  211. massgen/stream_chunk/text.py +162 -0
  212. massgen/tests/mcp_test_server.py +150 -0
  213. massgen/tests/multi_turn_conversation_design.md +0 -8
  214. massgen/tests/test_azure_openai_backend.py +156 -0
  215. massgen/tests/test_backend_capabilities.py +262 -0
  216. massgen/tests/test_backend_event_loop_all.py +179 -0
  217. massgen/tests/test_chat_completions_refactor.py +142 -0
  218. massgen/tests/test_claude_backend.py +15 -28
  219. massgen/tests/test_claude_code.py +268 -0
  220. massgen/tests/test_claude_code_context_sharing.py +233 -0
  221. massgen/tests/test_claude_code_orchestrator.py +175 -0
  222. massgen/tests/test_cli_backends.py +180 -0
  223. massgen/tests/test_code_execution.py +679 -0
  224. massgen/tests/test_external_agent_backend.py +134 -0
  225. massgen/tests/test_final_presentation_fallback.py +237 -0
  226. massgen/tests/test_gemini_planning_mode.py +351 -0
  227. massgen/tests/test_grok_backend.py +7 -10
  228. massgen/tests/test_http_mcp_server.py +42 -0
  229. massgen/tests/test_integration_simple.py +198 -0
  230. massgen/tests/test_mcp_blocking.py +125 -0
  231. massgen/tests/test_message_context_building.py +29 -47
  232. massgen/tests/test_orchestrator_final_presentation.py +48 -0
  233. massgen/tests/test_path_permission_manager.py +2087 -0
  234. massgen/tests/test_rich_terminal_display.py +14 -13
  235. massgen/tests/test_timeout.py +133 -0
  236. massgen/tests/test_v3_3agents.py +11 -12
  237. massgen/tests/test_v3_simple.py +8 -13
  238. massgen/tests/test_v3_three_agents.py +11 -18
  239. massgen/tests/test_v3_two_agents.py +8 -13
  240. massgen/token_manager/__init__.py +7 -0
  241. massgen/token_manager/token_manager.py +400 -0
  242. massgen/utils.py +52 -16
  243. massgen/v1/agent.py +45 -91
  244. massgen/v1/agents.py +18 -53
  245. massgen/v1/backends/gemini.py +50 -153
  246. massgen/v1/backends/grok.py +21 -54
  247. massgen/v1/backends/oai.py +39 -111
  248. massgen/v1/cli.py +36 -93
  249. massgen/v1/config.py +8 -12
  250. massgen/v1/logging.py +43 -127
  251. massgen/v1/main.py +18 -32
  252. massgen/v1/orchestrator.py +68 -209
  253. massgen/v1/streaming_display.py +62 -163
  254. massgen/v1/tools.py +8 -12
  255. massgen/v1/types.py +9 -23
  256. massgen/v1/utils.py +5 -23
  257. massgen-0.1.0.dist-info/METADATA +1245 -0
  258. massgen-0.1.0.dist-info/RECORD +273 -0
  259. massgen-0.1.0.dist-info/entry_points.txt +2 -0
  260. massgen/frontend/logging/__init__.py +0 -9
  261. massgen/frontend/logging/realtime_logger.py +0 -197
  262. massgen-0.0.3.dist-info/METADATA +0 -568
  263. massgen-0.0.3.dist-info/RECORD +0 -76
  264. massgen-0.0.3.dist-info/entry_points.txt +0 -2
  265. /massgen/backend/{Function calling openai responses.md → docs/Function calling openai responses.md} +0 -0
  266. {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/WHEEL +0 -0
  267. {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/licenses/LICENSE +0 -0
  268. {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/top_level.txt +0 -0
@@ -1,67 +1,411 @@
1
- from __future__ import annotations
2
-
1
+ # -*- coding: utf-8 -*-
3
2
  """
4
3
  Base class for backends using OpenAI Chat Completions API format.
5
4
  Handles common message processing, tool conversion, and streaming patterns.
5
+
6
+ Supported Providers and Environment Variables:
7
+ - OpenAI: OPENAI_API_KEY
8
+ - Cerebras AI: CEREBRAS_API_KEY
9
+ - Together AI: TOGETHER_API_KEY
10
+ - Fireworks AI: FIREWORKS_API_KEY
11
+ - Groq: GROQ_API_KEY
12
+ - Kimi/Moonshot: MOONSHOT_API_KEY or KIMI_API_KEY
13
+ - Nebius AI Studio: NEBIUS_API_KEY
14
+ - OpenRouter: OPENROUTER_API_KEY
15
+ - ZAI: ZAI_API_KEY
16
+ - POE: POE_API_KEY
17
+ - Qwen: QWEN_API_KEY
6
18
  """
7
19
 
8
- import os
9
- from typing import Dict, List, Any, AsyncGenerator, Optional
10
- from .base import LLMBackend, StreamChunk
20
+ from __future__ import annotations
21
+
22
+ # Standard library imports
23
+ from typing import Any, AsyncGenerator, Dict, List, Optional
24
+
25
+ # Third-party imports
26
+ from openai import AsyncOpenAI
27
+
28
+ from ..api_params_handler import ChatCompletionsAPIParamsHandler
29
+ from ..formatter import ChatCompletionsFormatter
30
+ from ..logger_config import log_backend_agent_message, log_stream_chunk, logger
31
+
32
+ # Local imports
33
+ from .base import FilesystemSupport, StreamChunk
34
+ from .base_with_mcp import MCPBackend
35
+
11
36
 
37
+ class ChatCompletionsBackend(MCPBackend):
38
+ """Complete OpenAI-compatible Chat Completions API backend.
12
39
 
13
- class ChatCompletionsBackend(LLMBackend):
14
- """Base class for backends using Chat Completions API with shared streaming logic."""
40
+ Can be used directly with any OpenAI-compatible provider by setting provider name.
41
+ Supports Cerebras AI, Together AI, Fireworks AI, DeepInfra, and other compatible providers.
42
+
43
+ Environment Variables:
44
+ Provider-specific API keys are automatically detected based on provider name.
45
+ See ProviderRegistry.PROVIDERS for the complete list.
46
+
47
+ """
15
48
 
16
49
  def __init__(self, api_key: Optional[str] = None, **kwargs):
17
50
  super().__init__(api_key, **kwargs)
51
+ # Backend name is already set in MCPBackend, but we may need to override it
52
+ self.backend_name = self.get_provider_name()
53
+ self.formatter = ChatCompletionsFormatter()
54
+ self.api_params_handler = ChatCompletionsAPIParamsHandler(self)
55
+
56
+ def supports_upload_files(self) -> bool:
57
+ """Chat Completions backend supports upload_files preprocessing."""
58
+ return True
59
+
60
+ async def stream_with_tools(
61
+ self,
62
+ messages: List[Dict[str, Any]],
63
+ tools: List[Dict[str, Any]],
64
+ **kwargs,
65
+ ) -> AsyncGenerator[StreamChunk, None]:
66
+ """Stream response using OpenAI Response API with unified MCP/non-MCP processing."""
67
+ async for chunk in super().stream_with_tools(messages, tools, **kwargs):
68
+ yield chunk
69
+
70
+ async def _stream_with_mcp_tools(
71
+ self,
72
+ current_messages: List[Dict[str, Any]],
73
+ tools: List[Dict[str, Any]],
74
+ client,
75
+ **kwargs,
76
+ ) -> AsyncGenerator[StreamChunk, None]:
77
+ """Recursively stream MCP responses, executing function calls as needed."""
78
+
79
+ # Build API params for this iteration
80
+ all_params = {**self.config, **kwargs}
81
+ api_params = await self.api_params_handler.build_api_params(current_messages, tools, all_params)
82
+
83
+ # Add provider tools (web search, code interpreter) if enabled
84
+ provider_tools = self.api_params_handler.get_provider_tools(all_params)
85
+
86
+ if provider_tools:
87
+ if "tools" not in api_params:
88
+ api_params["tools"] = []
89
+ api_params["tools"].extend(provider_tools)
90
+
91
+ # Start streaming
92
+ stream = await client.chat.completions.create(**api_params)
93
+
94
+ # Track function calls in this iteration
95
+ captured_function_calls = []
96
+ current_tool_calls = {}
97
+ response_completed = False
98
+ content = ""
99
+
100
+ async for chunk in stream:
101
+ try:
102
+ if hasattr(chunk, "choices") and chunk.choices:
103
+ choice = chunk.choices[0]
104
+
105
+ # Handle content delta
106
+ if hasattr(choice, "delta") and choice.delta:
107
+ delta = choice.delta
108
+
109
+ # Plain text content
110
+ if getattr(delta, "content", None):
111
+ content_chunk = delta.content
112
+ content += content_chunk
113
+ yield StreamChunk(type="content", content=content_chunk)
114
+
115
+ # Tool calls streaming (OpenAI-style)
116
+ if getattr(delta, "tool_calls", None):
117
+ for tool_call_delta in delta.tool_calls:
118
+ index = getattr(tool_call_delta, "index", 0)
119
+
120
+ if index not in current_tool_calls:
121
+ current_tool_calls[index] = {
122
+ "id": "",
123
+ "function": {
124
+ "name": "",
125
+ "arguments": "",
126
+ },
127
+ }
128
+
129
+ # Accumulate id
130
+ if getattr(tool_call_delta, "id", None):
131
+ current_tool_calls[index]["id"] = tool_call_delta.id
132
+
133
+ # Function name
134
+ if hasattr(tool_call_delta, "function") and tool_call_delta.function:
135
+ if getattr(tool_call_delta.function, "name", None):
136
+ current_tool_calls[index]["function"]["name"] = tool_call_delta.function.name
137
+
138
+ # Accumulate arguments (as string chunks)
139
+ if getattr(tool_call_delta.function, "arguments", None):
140
+ current_tool_calls[index]["function"]["arguments"] += tool_call_delta.function.arguments
141
+
142
+ # Handle finish reason
143
+ if getattr(choice, "finish_reason", None):
144
+ if choice.finish_reason == "tool_calls" and current_tool_calls:
145
+ final_tool_calls = []
146
+
147
+ for index in sorted(current_tool_calls.keys()):
148
+ call = current_tool_calls[index]
149
+ function_name = call["function"]["name"]
150
+ arguments_str = call["function"]["arguments"]
151
+
152
+ # Providers expect arguments to be a JSON string
153
+ arguments_str_sanitized = arguments_str if arguments_str.strip() else "{}"
154
+
155
+ final_tool_calls.append(
156
+ {
157
+ "id": call["id"],
158
+ "type": "function",
159
+ "function": {
160
+ "name": function_name,
161
+ "arguments": arguments_str_sanitized,
162
+ },
163
+ },
164
+ )
165
+
166
+ # Convert to captured format for processing (ensure arguments is a JSON string)
167
+ for tool_call in final_tool_calls:
168
+ args_value = tool_call["function"]["arguments"]
169
+ if not isinstance(args_value, str):
170
+ args_value = self.formatter._serialize_tool_arguments(args_value)
171
+ captured_function_calls.append(
172
+ {
173
+ "call_id": tool_call["id"],
174
+ "name": tool_call["function"]["name"],
175
+ "arguments": args_value,
176
+ },
177
+ )
178
+
179
+ yield StreamChunk(type="tool_calls", tool_calls=final_tool_calls)
180
+
181
+ response_completed = True
182
+ break # Exit chunk loop to execute functions
183
+
184
+ elif choice.finish_reason in ["stop", "length"]:
185
+ response_completed = True
186
+ # No function calls, we're done (base case)
187
+ yield StreamChunk(type="done")
188
+ return
189
+
190
+ except Exception as chunk_error:
191
+ yield StreamChunk(type="error", error=f"Chunk processing error: {chunk_error}")
192
+ continue
18
193
 
19
- def convert_tools_to_chat_completions_format(
20
- self, tools: List[Dict[str, Any]]
21
- ) -> List[Dict[str, Any]]:
22
- """Convert tools from Response API format to Chat Completions format if needed.
23
-
24
- Response API format: {"type": "function", "name": ..., "description": ..., "parameters": ...}
25
- Chat Completions format: {"type": "function", "function": {"name": ..., "description": ..., "parameters": ...}}
26
- """
27
- if not tools:
28
- return tools
29
-
30
- converted_tools = []
31
- for tool in tools:
32
- if tool.get("type") == "function":
33
- if "function" in tool:
34
- # Already in Chat Completions format
35
- converted_tools.append(tool)
36
- elif "name" in tool and "description" in tool:
37
- # Response API format - convert to Chat Completions format
38
- converted_tools.append(
194
+ # Execute any captured function calls
195
+ if captured_function_calls and response_completed:
196
+ # Check if any of the function calls are NOT MCP functions
197
+ non_mcp_functions = [call for call in captured_function_calls if call["name"] not in self._mcp_functions]
198
+
199
+ if non_mcp_functions:
200
+ logger.info(f"Non-MCP function calls detected (will be ignored in MCP execution): {[call['name'] for call in non_mcp_functions]}")
201
+
202
+ # Check circuit breaker status before executing MCP functions
203
+ if not await self._check_circuit_breaker_before_execution():
204
+ yield StreamChunk(
205
+ type="mcp_status",
206
+ status="mcp_blocked",
207
+ content="⚠️ [MCP] All servers blocked by circuit breaker",
208
+ source="circuit_breaker",
209
+ )
210
+ yield StreamChunk(type="done")
211
+ return
212
+
213
+ # Execute only MCP function calls
214
+ mcp_functions_executed = False
215
+ updated_messages = current_messages.copy()
216
+
217
+ # Check if planning mode is enabled - block MCP tool execution during planning
218
+ if self.is_planning_mode_enabled():
219
+ logger.info("[MCP] Planning mode enabled - blocking all MCP tool execution")
220
+ yield StreamChunk(
221
+ type="mcp_status",
222
+ status="planning_mode_blocked",
223
+ content="🚫 [MCP] Planning mode active - MCP tools blocked during coordination",
224
+ source="planning_mode",
225
+ )
226
+ # Skip all MCP tool execution but still continue with workflow
227
+ yield StreamChunk(type="done")
228
+ return
229
+
230
+ # Create single assistant message with all tool calls
231
+ if captured_function_calls:
232
+ # First add the assistant message with ALL tool_calls (both MCP and non-MCP)
233
+ all_tool_calls = []
234
+ for call in captured_function_calls:
235
+ all_tool_calls.append(
39
236
  {
237
+ "id": call["call_id"],
40
238
  "type": "function",
41
239
  "function": {
42
- "name": tool["name"],
43
- "description": tool["description"],
44
- "parameters": tool.get("parameters", {}),
240
+ "name": call["name"],
241
+ "arguments": self.formatter._serialize_tool_arguments(call["arguments"]),
242
+ },
243
+ },
244
+ )
245
+
246
+ # Add assistant message with all tool calls
247
+ if all_tool_calls:
248
+ assistant_message = {
249
+ "role": "assistant",
250
+ "content": content.strip() if content.strip() else None,
251
+ "tool_calls": all_tool_calls,
252
+ }
253
+ updated_messages.append(assistant_message)
254
+
255
+ # Execute functions and collect results
256
+ tool_results = []
257
+ for call in captured_function_calls:
258
+ function_name = call["name"]
259
+ if self.is_mcp_tool_call(function_name):
260
+ yield StreamChunk(
261
+ type="mcp_status",
262
+ status="mcp_tool_called",
263
+ content=f"🔧 [MCP Tool] Calling {function_name}...",
264
+ source=f"mcp_{function_name}",
265
+ )
266
+
267
+ # Yield detailed MCP status as StreamChunk (similar to gemini.py)
268
+ tools_info = f" ({len(self._mcp_functions)} tools available)" if self._mcp_functions else ""
269
+ yield StreamChunk(
270
+ type="mcp_status",
271
+ status="mcp_tools_initiated",
272
+ content=f"MCP tool call initiated (call #{self._mcp_tool_calls_count}){tools_info}: {function_name}",
273
+ source=f"mcp_{function_name}",
274
+ )
275
+
276
+ try:
277
+ # Execute MCP function with retry and exponential backoff
278
+ (
279
+ result_str,
280
+ result_obj,
281
+ ) = await self._execute_mcp_function_with_retry(function_name, call["arguments"])
282
+
283
+ # Check if function failed after all retries
284
+ if isinstance(result_str, str) and result_str.startswith("Error:"):
285
+ # Log failure but still create tool response
286
+ logger.warning(f"MCP function {function_name} failed after retries: {result_str}")
287
+ tool_results.append(
288
+ {
289
+ "tool_call_id": call["call_id"],
290
+ "content": result_str,
291
+ "success": False,
292
+ },
293
+ )
294
+ else:
295
+ # Yield MCP success status as StreamChunk (similar to gemini.py)
296
+ yield StreamChunk(
297
+ type="mcp_status",
298
+ status="mcp_tools_success",
299
+ content=f"MCP tool call succeeded (call #{self._mcp_tool_calls_count})",
300
+ source=f"mcp_{function_name}",
301
+ )
302
+
303
+ tool_results.append(
304
+ {
305
+ "tool_call_id": call["call_id"],
306
+ "content": result_str,
307
+ "success": True,
308
+ "result_obj": result_obj,
309
+ },
310
+ )
311
+
312
+ except Exception as e:
313
+ # Only catch unexpected non-MCP system errors
314
+ logger.error(f"Unexpected error in MCP function execution: {e}")
315
+ error_msg = f"Error executing {function_name}: {str(e)}"
316
+ tool_results.append(
317
+ {
318
+ "tool_call_id": call["call_id"],
319
+ "content": error_msg,
320
+ "success": False,
45
321
  },
46
- }
322
+ )
323
+ continue
324
+
325
+ # Yield function_call status
326
+ yield StreamChunk(
327
+ type="mcp_status",
328
+ status="function_call",
329
+ content=f"Arguments for Calling {function_name}: {call['arguments']}",
330
+ source=f"mcp_{function_name}",
47
331
  )
332
+
333
+ logger.info(f"Executed MCP function {function_name} (stdio/streamable-http)")
334
+ mcp_functions_executed = True
48
335
  else:
49
- # Unknown format - keep as-is
50
- converted_tools.append(tool)
336
+ # For non-MCP functions, add a dummy tool result to maintain message consistency
337
+ logger.info(f"Non-MCP function {function_name} detected, creating placeholder response")
338
+ tool_results.append(
339
+ {
340
+ "tool_call_id": call["call_id"],
341
+ "content": f"Function {function_name} is not available in this MCP session.",
342
+ "success": False,
343
+ },
344
+ )
345
+
346
+ # Add all tool response messages after the assistant message
347
+ for result in tool_results:
348
+ # Yield function_call_output status with preview
349
+ result_text = str(result["content"])
350
+ if result.get("success") and hasattr(result.get("result_obj"), "content") and result["result_obj"].content:
351
+ obj = result["result_obj"]
352
+ if isinstance(obj.content, list) and len(obj.content) > 0:
353
+ first_item = obj.content[0]
354
+ if hasattr(first_item, "text"):
355
+ result_text = first_item.text
356
+
357
+ yield StreamChunk(
358
+ type="mcp_status",
359
+ status="function_call_output",
360
+ content=f"Results for Calling {function_name}: {result_text}",
361
+ source=f"mcp_{function_name}",
362
+ )
363
+
364
+ function_output_msg = {
365
+ "role": "tool",
366
+ "tool_call_id": result["tool_call_id"],
367
+ "content": result["content"],
368
+ }
369
+ updated_messages.append(function_output_msg)
370
+
371
+ yield StreamChunk(
372
+ type="mcp_status",
373
+ status="mcp_tool_response",
374
+ content=f"✅ [MCP Tool] {function_name} completed",
375
+ source=f"mcp_{function_name}",
376
+ )
377
+
378
+ # Trim history after function executions to bound memory usage
379
+ if mcp_functions_executed:
380
+ updated_messages = self._trim_message_history(updated_messages)
381
+
382
+ # Recursive call with updated messages
383
+ async for chunk in self._stream_with_mcp_tools(updated_messages, tools, client, **kwargs):
384
+ yield chunk
51
385
  else:
52
- # Non-function tool - keep as-is
53
- converted_tools.append(tool)
386
+ # No MCP functions were executed, we're done
387
+ yield StreamChunk(type="done")
388
+ return
54
389
 
55
- return converted_tools
390
+ elif response_completed:
391
+ # Response completed with no function calls - we're done (base case)
392
+ yield StreamChunk(
393
+ type="mcp_status",
394
+ status="mcp_session_complete",
395
+ content="✅ [MCP] Session completed",
396
+ source="mcp_session",
397
+ )
398
+ return
399
+
400
+ async def _process_stream(self, stream, all_params, agent_id) -> AsyncGenerator[StreamChunk, None]:
401
+ """Handle standard Chat Completions API streaming format with logging."""
56
402
 
57
- async def handle_chat_completions_stream(
58
- self, stream, enable_web_search: bool = False
59
- ) -> AsyncGenerator[StreamChunk, None]:
60
- """Handle standard Chat Completions API streaming format."""
61
403
  content = ""
62
404
  current_tool_calls = {}
63
405
  search_sources_used = 0
64
- citations = []
406
+ provider_name = self.get_provider_name()
407
+ enable_web_search = all_params.get("enable_web_search", False)
408
+ log_prefix = f"backend.{provider_name.lower().replace(' ', '_')}"
65
409
 
66
410
  async for chunk in stream:
67
411
  try:
@@ -70,122 +414,135 @@ class ChatCompletionsBackend(LLMBackend):
70
414
 
71
415
  # Handle content delta
72
416
  if hasattr(choice, "delta") and choice.delta:
73
- if hasattr(choice.delta, "content") and choice.delta.content:
74
- content_chunk = choice.delta.content
417
+ delta = choice.delta
418
+
419
+ # Plain text content
420
+ if getattr(delta, "content", None):
421
+ # handle reasoning first
422
+ reasoning_chunk = self._handle_reasoning_transition(log_prefix, agent_id)
423
+ if reasoning_chunk:
424
+ yield reasoning_chunk
425
+ content_chunk = delta.content
75
426
  content += content_chunk
427
+ log_backend_agent_message(
428
+ agent_id or "default",
429
+ "RECV",
430
+ {"content": content_chunk},
431
+ backend_name=provider_name,
432
+ )
433
+ log_stream_chunk(log_prefix, "content", content_chunk, agent_id)
76
434
  yield StreamChunk(type="content", content=content_chunk)
77
435
 
78
- # Handle tool calls streaming
79
- if (
80
- hasattr(choice.delta, "tool_calls")
81
- and choice.delta.tool_calls
82
- ):
83
- for tool_call_delta in choice.delta.tool_calls:
436
+ # Provider-specific reasoning/thinking streams (non-standard OpenAI fields)
437
+ if getattr(delta, "reasoning_content", None):
438
+ reasoning_active_key = "_reasoning_active"
439
+ setattr(self, reasoning_active_key, True)
440
+ thinking_delta = getattr(delta, "reasoning_content")
441
+ if thinking_delta:
442
+ log_stream_chunk(log_prefix, "reasoning", thinking_delta, agent_id)
443
+ yield StreamChunk(
444
+ type="reasoning",
445
+ content=thinking_delta,
446
+ reasoning_delta=thinking_delta,
447
+ )
448
+
449
+ # Tool calls streaming (OpenAI-style)
450
+ if getattr(delta, "tool_calls", None):
451
+ # handle reasoning first
452
+ reasoning_chunk = self._handle_reasoning_transition(log_prefix, agent_id)
453
+ if reasoning_chunk:
454
+ yield reasoning_chunk
455
+
456
+ for tool_call_delta in delta.tool_calls:
84
457
  index = getattr(tool_call_delta, "index", 0)
85
458
 
86
459
  if index not in current_tool_calls:
87
460
  current_tool_calls[index] = {
88
461
  "id": "",
89
- "name": "",
90
- "arguments": "",
462
+ "function": {
463
+ "name": "",
464
+ "arguments": "",
465
+ },
91
466
  }
92
467
 
93
- if (
94
- hasattr(tool_call_delta, "id")
95
- and tool_call_delta.id
96
- ):
468
+ # Accumulate id
469
+ if getattr(tool_call_delta, "id", None):
97
470
  current_tool_calls[index]["id"] = tool_call_delta.id
98
471
 
99
- if (
100
- hasattr(tool_call_delta, "function")
101
- and tool_call_delta.function
102
- ):
103
- if (
104
- hasattr(tool_call_delta.function, "name")
105
- and tool_call_delta.function.name
106
- ):
107
- current_tool_calls[index][
108
- "name"
109
- ] = tool_call_delta.function.name
110
-
111
- if (
112
- hasattr(tool_call_delta.function, "arguments")
113
- and tool_call_delta.function.arguments
114
- ):
115
- current_tool_calls[index][
116
- "arguments"
117
- ] += tool_call_delta.function.arguments
472
+ # Function name
473
+ if hasattr(tool_call_delta, "function") and tool_call_delta.function:
474
+ if getattr(tool_call_delta.function, "name", None):
475
+ current_tool_calls[index]["function"]["name"] = tool_call_delta.function.name
476
+
477
+ # Accumulate arguments (as string chunks)
478
+ if getattr(tool_call_delta.function, "arguments", None):
479
+ current_tool_calls[index]["function"]["arguments"] += tool_call_delta.function.arguments
118
480
 
119
481
  # Handle finish reason
120
- if hasattr(choice, "finish_reason") and choice.finish_reason:
482
+ if getattr(choice, "finish_reason", None):
483
+ # handle reasoning first
484
+ reasoning_chunk = self._handle_reasoning_transition(log_prefix, agent_id)
485
+ if reasoning_chunk:
486
+ yield reasoning_chunk
487
+
121
488
  if choice.finish_reason == "tool_calls" and current_tool_calls:
122
- # Convert accumulated tool calls to final format
123
489
  final_tool_calls = []
490
+
124
491
  for index in sorted(current_tool_calls.keys()):
125
- tool_call = current_tool_calls[index]
126
-
127
- # Parse arguments as JSON
128
- arguments = tool_call["arguments"]
129
- if isinstance(arguments, str):
130
- try:
131
- import json
132
-
133
- arguments = (
134
- json.loads(arguments)
135
- if arguments.strip()
136
- else {}
137
- )
138
- except json.JSONDecodeError:
139
- arguments = {}
492
+ call = current_tool_calls[index]
493
+ function_name = call["function"]["name"]
494
+ arguments_str = call["function"]["arguments"]
495
+
496
+ # Providers expect arguments to be a JSON string
497
+ arguments_str_sanitized = arguments_str if arguments_str.strip() else "{}"
140
498
 
141
499
  final_tool_calls.append(
142
500
  {
143
- "id": tool_call["id"] or f"call_{index}",
501
+ "id": call["id"],
144
502
  "type": "function",
145
503
  "function": {
146
- "name": tool_call["name"],
147
- "arguments": arguments,
504
+ "name": function_name,
505
+ "arguments": arguments_str_sanitized,
148
506
  },
149
- }
507
+ },
150
508
  )
151
509
 
152
- yield StreamChunk(
153
- type="tool_calls", tool_calls=final_tool_calls
154
- )
510
+ log_stream_chunk(log_prefix, "tool_calls", final_tool_calls, agent_id)
511
+ yield StreamChunk(type="tool_calls", tool_calls=final_tool_calls)
155
512
 
156
- # Build and yield complete message
157
513
  complete_message = {
158
514
  "role": "assistant",
159
515
  "content": content.strip(),
516
+ "tool_calls": final_tool_calls,
160
517
  }
161
- if final_tool_calls:
162
- complete_message["tool_calls"] = final_tool_calls
518
+
163
519
  yield StreamChunk(
164
520
  type="complete_message",
165
521
  complete_message=complete_message,
166
522
  )
523
+ log_stream_chunk(log_prefix, "done", None, agent_id)
524
+ yield StreamChunk(type="done")
525
+ return
526
+
167
527
  elif choice.finish_reason in ["stop", "length"]:
168
528
  if search_sources_used > 0:
529
+ search_complete_msg = f"\n✅ [Live Search Complete] Used {search_sources_used} sources\n"
530
+ log_stream_chunk(log_prefix, "content", search_complete_msg, agent_id)
169
531
  yield StreamChunk(
170
532
  type="content",
171
- content=f"\n✅ [Live Search Complete] Used {search_sources_used} sources\n",
533
+ content=search_complete_msg,
172
534
  )
173
535
 
174
- # Check for citations before building complete message
175
- if (
176
- hasattr(chunk, "citations")
177
- and chunk.citations
178
- and len(chunk.citations) > 0
179
- ):
536
+ # Handle citations if present
537
+ if hasattr(chunk, "citations") and chunk.citations:
180
538
  if enable_web_search:
181
539
  citation_text = "\n📚 **Citations:**\n"
182
540
  for i, citation in enumerate(chunk.citations, 1):
183
541
  citation_text += f"{i}. {citation}\n"
184
- yield StreamChunk(
185
- type="content", content=citation_text
186
- )
542
+ log_stream_chunk(log_prefix, "content", citation_text, agent_id)
543
+ yield StreamChunk(type="content", content=citation_text)
187
544
 
188
- # Build and yield complete message (no tool calls)
545
+ # Return final message
189
546
  complete_message = {
190
547
  "role": "assistant",
191
548
  "content": content.strip(),
@@ -194,35 +551,125 @@ class ChatCompletionsBackend(LLMBackend):
194
551
  type="complete_message",
195
552
  complete_message=complete_message,
196
553
  )
197
-
554
+ log_stream_chunk(log_prefix, "done", None, agent_id)
198
555
  yield StreamChunk(type="done")
199
- return
556
+ return
200
557
 
201
- # Check for usage information (search sources) and citations
558
+ # Optionally handle usage metadata
202
559
  if hasattr(chunk, "usage") and chunk.usage:
203
- if (
204
- hasattr(chunk.usage, "num_sources_used")
205
- and chunk.usage.num_sources_used
206
- ):
560
+ if getattr(chunk.usage, "num_sources_used", 0) > 0:
207
561
  search_sources_used = chunk.usage.num_sources_used
208
- if enable_web_search and search_sources_used > 0:
562
+ if enable_web_search:
563
+ search_msg = f"\n📊 [Live Search] Using {search_sources_used} sources for real-time data\n"
564
+ log_stream_chunk(log_prefix, "content", search_msg, agent_id)
209
565
  yield StreamChunk(
210
566
  type="content",
211
- content=f"\n📊 [Live Search] Using {search_sources_used} sources for real-time data\n",
567
+ content=search_msg,
212
568
  )
213
569
 
214
570
  except Exception as chunk_error:
215
- yield StreamChunk(
216
- type="error", error=f"Chunk processing error: {chunk_error}"
217
- )
571
+ error_msg = f"Chunk processing error: {chunk_error}"
572
+ log_stream_chunk(log_prefix, "error", error_msg, agent_id)
573
+ yield StreamChunk(type="error", error=error_msg)
218
574
  continue
219
575
 
576
+ # Fallback in case stream ends without finish_reason
577
+ log_stream_chunk(log_prefix, "done", None, agent_id)
220
578
  yield StreamChunk(type="done")
221
579
 
222
- def extract_tool_name(self, tool_call: Dict[str, Any]) -> str:
223
- """Extract tool name from Chat Completions format."""
224
- return tool_call.get("function", {}).get("name", "unknown")
580
+ def create_tool_result_message(self, tool_call: Dict[str, Any], result_content: str) -> Dict[str, Any]:
581
+ """Create tool result message for Chat Completions format."""
582
+ tool_call_id = self.extract_tool_call_id(tool_call)
583
+ return {
584
+ "role": "tool",
585
+ "tool_call_id": tool_call_id,
586
+ "content": result_content,
587
+ }
588
+
589
+ def extract_tool_result_content(self, tool_result_message: Dict[str, Any]) -> str:
590
+ """Extract content from Chat Completions tool result message."""
591
+ return tool_result_message.get("content", "")
592
+
593
+ def _convert_messages_for_mcp_chat_completions(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
594
+ """Convert messages for MCP Chat Completions format if needed."""
595
+ # For Chat Completions, messages are already in the correct format
596
+ # Just ensure tool result messages use the correct format
597
+ converted_messages = []
598
+
599
+ for message in messages:
600
+ if message.get("type") == "function_call_output":
601
+ # Convert Response API format to Chat Completions format
602
+ converted_message = {
603
+ "role": "tool",
604
+ "tool_call_id": message.get("call_id"),
605
+ "content": message.get("output", ""),
606
+ }
607
+ converted_messages.append(converted_message)
608
+ else:
609
+ # Pass through other messages as-is
610
+ converted_messages.append(message.copy())
611
+
612
+ return converted_messages
613
+
614
+ def get_provider_name(self) -> str:
615
+ """Get the name of this provider."""
616
+ # Check if provider name was explicitly set in config
617
+ if "provider" in self.config:
618
+ return self.config["provider"]
619
+ elif "provider_name" in self.config:
620
+ return self.config["provider_name"]
621
+
622
+ # Try to infer from base_url
623
+ base_url = self.config.get("base_url", "")
624
+ if "openai.com" in base_url:
625
+ return "OpenAI"
626
+ elif "cerebras.ai" in base_url:
627
+ return "Cerebras AI"
628
+ elif "together.xyz" in base_url:
629
+ return "Together AI"
630
+ elif "fireworks.ai" in base_url:
631
+ return "Fireworks AI"
632
+ elif "groq.com" in base_url:
633
+ return "Groq"
634
+ elif "openrouter.ai" in base_url:
635
+ return "OpenRouter"
636
+ elif "z.ai" in base_url or "bigmodel.cn" in base_url:
637
+ return "ZAI"
638
+ elif "nebius.com" in base_url:
639
+ return "Nebius AI Studio"
640
+ elif "moonshot.ai" in base_url or "moonshot.cn" in base_url:
641
+ return "Kimi"
642
+ elif "poe.com" in base_url:
643
+ return "POE"
644
+ elif "aliyuncs.com" in base_url:
645
+ return "Qwen"
646
+ else:
647
+ return "ChatCompletion"
648
+
649
+ def get_filesystem_support(self) -> FilesystemSupport:
650
+ """Chat Completions supports filesystem through MCP servers."""
651
+ return FilesystemSupport.MCP
652
+
653
+ def get_supported_builtin_tools(self) -> List[str]:
654
+ """Get list of builtin tools supported by this provider."""
655
+ # Chat Completions API doesn't typically support builtin tools like web_search
656
+ # But some providers might - this can be overridden in subclasses
657
+ return []
658
+
659
+ def _create_client(self, **kwargs) -> AsyncOpenAI:
660
+ """Create OpenAI client with consistent configuration."""
661
+ import openai
662
+
663
+ all_params = {**self.config, **kwargs}
664
+ base_url = all_params.get("base_url", "https://api.openai.com/v1")
665
+ return openai.AsyncOpenAI(api_key=self.api_key, base_url=base_url)
225
666
 
226
- def extract_tool_arguments(self, tool_call: Dict[str, Any]) -> Dict[str, Any]:
227
- """Extract tool arguments from Chat Completions format."""
228
- return tool_call.get("function", {}).get("arguments", {})
667
+ def _handle_reasoning_transition(self, log_prefix: str, agent_id: Optional[str]) -> Optional[StreamChunk]:
668
+ """Handle reasoning state transition and return StreamChunk if transition occurred."""
669
+ reasoning_active_key = "_reasoning_active"
670
+ if hasattr(self, reasoning_active_key):
671
+ if getattr(self, reasoning_active_key) is True:
672
+ setattr(self, reasoning_active_key, False)
673
+ log_stream_chunk(log_prefix, "reasoning_done", "", agent_id)
674
+ return StreamChunk(type="reasoning_done", content="")
675
+ return None