massgen 0.0.3__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (268) hide show
  1. massgen/__init__.py +142 -8
  2. massgen/adapters/__init__.py +29 -0
  3. massgen/adapters/ag2_adapter.py +483 -0
  4. massgen/adapters/base.py +183 -0
  5. massgen/adapters/tests/__init__.py +0 -0
  6. massgen/adapters/tests/test_ag2_adapter.py +439 -0
  7. massgen/adapters/tests/test_agent_adapter.py +128 -0
  8. massgen/adapters/utils/__init__.py +2 -0
  9. massgen/adapters/utils/ag2_utils.py +236 -0
  10. massgen/adapters/utils/tests/__init__.py +0 -0
  11. massgen/adapters/utils/tests/test_ag2_utils.py +138 -0
  12. massgen/agent_config.py +329 -55
  13. massgen/api_params_handler/__init__.py +10 -0
  14. massgen/api_params_handler/_api_params_handler_base.py +99 -0
  15. massgen/api_params_handler/_chat_completions_api_params_handler.py +176 -0
  16. massgen/api_params_handler/_claude_api_params_handler.py +113 -0
  17. massgen/api_params_handler/_response_api_params_handler.py +130 -0
  18. massgen/backend/__init__.py +39 -4
  19. massgen/backend/azure_openai.py +385 -0
  20. massgen/backend/base.py +341 -69
  21. massgen/backend/base_with_mcp.py +1102 -0
  22. massgen/backend/capabilities.py +386 -0
  23. massgen/backend/chat_completions.py +577 -130
  24. massgen/backend/claude.py +1033 -537
  25. massgen/backend/claude_code.py +1203 -0
  26. massgen/backend/cli_base.py +209 -0
  27. massgen/backend/docs/BACKEND_ARCHITECTURE.md +126 -0
  28. massgen/backend/{CLAUDE_API_RESEARCH.md → docs/CLAUDE_API_RESEARCH.md} +18 -18
  29. massgen/backend/{GEMINI_API_DOCUMENTATION.md → docs/GEMINI_API_DOCUMENTATION.md} +9 -9
  30. massgen/backend/docs/Gemini MCP Integration Analysis.md +1050 -0
  31. massgen/backend/docs/MCP_IMPLEMENTATION_CLAUDE_BACKEND.md +177 -0
  32. massgen/backend/docs/MCP_INTEGRATION_RESPONSE_BACKEND.md +352 -0
  33. massgen/backend/docs/OPENAI_GPT5_MODELS.md +211 -0
  34. massgen/backend/{OPENAI_RESPONSES_API_FORMAT.md → docs/OPENAI_RESPONSE_API_TOOL_CALLS.md} +3 -3
  35. massgen/backend/docs/OPENAI_response_streaming.md +20654 -0
  36. massgen/backend/docs/inference_backend.md +257 -0
  37. massgen/backend/docs/permissions_and_context_files.md +1085 -0
  38. massgen/backend/external.py +126 -0
  39. massgen/backend/gemini.py +1850 -241
  40. massgen/backend/grok.py +40 -156
  41. massgen/backend/inference.py +156 -0
  42. massgen/backend/lmstudio.py +171 -0
  43. massgen/backend/response.py +1095 -322
  44. massgen/chat_agent.py +131 -113
  45. massgen/cli.py +1560 -275
  46. massgen/config_builder.py +2396 -0
  47. massgen/configs/BACKEND_CONFIGURATION.md +458 -0
  48. massgen/configs/README.md +559 -216
  49. massgen/configs/ag2/ag2_case_study.yaml +27 -0
  50. massgen/configs/ag2/ag2_coder.yaml +34 -0
  51. massgen/configs/ag2/ag2_coder_case_study.yaml +36 -0
  52. massgen/configs/ag2/ag2_gemini.yaml +27 -0
  53. massgen/configs/ag2/ag2_groupchat.yaml +108 -0
  54. massgen/configs/ag2/ag2_groupchat_gpt.yaml +118 -0
  55. massgen/configs/ag2/ag2_single_agent.yaml +21 -0
  56. massgen/configs/basic/multi/fast_timeout_example.yaml +37 -0
  57. massgen/configs/basic/multi/gemini_4o_claude.yaml +31 -0
  58. massgen/configs/basic/multi/gemini_gpt5nano_claude.yaml +36 -0
  59. massgen/configs/{gemini_4o_claude.yaml → basic/multi/geminicode_4o_claude.yaml} +3 -3
  60. massgen/configs/basic/multi/geminicode_gpt5nano_claude.yaml +36 -0
  61. massgen/configs/basic/multi/glm_gemini_claude.yaml +25 -0
  62. massgen/configs/basic/multi/gpt4o_audio_generation.yaml +30 -0
  63. massgen/configs/basic/multi/gpt4o_image_generation.yaml +31 -0
  64. massgen/configs/basic/multi/gpt5nano_glm_qwen.yaml +26 -0
  65. massgen/configs/basic/multi/gpt5nano_image_understanding.yaml +26 -0
  66. massgen/configs/{three_agents_default.yaml → basic/multi/three_agents_default.yaml} +8 -4
  67. massgen/configs/basic/multi/three_agents_opensource.yaml +27 -0
  68. massgen/configs/basic/multi/three_agents_vllm.yaml +20 -0
  69. massgen/configs/basic/multi/two_agents_gemini.yaml +19 -0
  70. massgen/configs/{two_agents.yaml → basic/multi/two_agents_gpt5.yaml} +14 -6
  71. massgen/configs/basic/multi/two_agents_opensource_lmstudio.yaml +31 -0
  72. massgen/configs/basic/multi/two_qwen_vllm_sglang.yaml +28 -0
  73. massgen/configs/{single_agent.yaml → basic/single/single_agent.yaml} +1 -1
  74. massgen/configs/{single_flash2.5.yaml → basic/single/single_flash2.5.yaml} +1 -2
  75. massgen/configs/basic/single/single_gemini2.5pro.yaml +16 -0
  76. massgen/configs/basic/single/single_gpt4o_audio_generation.yaml +22 -0
  77. massgen/configs/basic/single/single_gpt4o_image_generation.yaml +22 -0
  78. massgen/configs/basic/single/single_gpt4o_video_generation.yaml +24 -0
  79. massgen/configs/basic/single/single_gpt5nano.yaml +20 -0
  80. massgen/configs/basic/single/single_gpt5nano_file_search.yaml +18 -0
  81. massgen/configs/basic/single/single_gpt5nano_image_understanding.yaml +17 -0
  82. massgen/configs/basic/single/single_gptoss120b.yaml +15 -0
  83. massgen/configs/basic/single/single_openrouter_audio_understanding.yaml +15 -0
  84. massgen/configs/basic/single/single_qwen_video_understanding.yaml +15 -0
  85. massgen/configs/debug/code_execution/command_filtering_blacklist.yaml +29 -0
  86. massgen/configs/debug/code_execution/command_filtering_whitelist.yaml +28 -0
  87. massgen/configs/debug/code_execution/docker_verification.yaml +29 -0
  88. massgen/configs/debug/skip_coordination_test.yaml +27 -0
  89. massgen/configs/debug/test_sdk_migration.yaml +17 -0
  90. massgen/configs/docs/DISCORD_MCP_SETUP.md +208 -0
  91. massgen/configs/docs/TWITTER_MCP_ENESCINAR_SETUP.md +82 -0
  92. massgen/configs/providers/azure/azure_openai_multi.yaml +21 -0
  93. massgen/configs/providers/azure/azure_openai_single.yaml +19 -0
  94. massgen/configs/providers/claude/claude.yaml +14 -0
  95. massgen/configs/providers/gemini/gemini_gpt5nano.yaml +28 -0
  96. massgen/configs/providers/local/lmstudio.yaml +11 -0
  97. massgen/configs/providers/openai/gpt5.yaml +46 -0
  98. massgen/configs/providers/openai/gpt5_nano.yaml +46 -0
  99. massgen/configs/providers/others/grok_single_agent.yaml +19 -0
  100. massgen/configs/providers/others/zai_coding_team.yaml +108 -0
  101. massgen/configs/providers/others/zai_glm45.yaml +12 -0
  102. massgen/configs/{creative_team.yaml → teams/creative/creative_team.yaml} +16 -6
  103. massgen/configs/{travel_planning.yaml → teams/creative/travel_planning.yaml} +16 -6
  104. massgen/configs/{news_analysis.yaml → teams/research/news_analysis.yaml} +16 -6
  105. massgen/configs/{research_team.yaml → teams/research/research_team.yaml} +15 -7
  106. massgen/configs/{technical_analysis.yaml → teams/research/technical_analysis.yaml} +16 -6
  107. massgen/configs/tools/code-execution/basic_command_execution.yaml +25 -0
  108. massgen/configs/tools/code-execution/code_execution_use_case_simple.yaml +41 -0
  109. massgen/configs/tools/code-execution/docker_claude_code.yaml +32 -0
  110. massgen/configs/tools/code-execution/docker_multi_agent.yaml +32 -0
  111. massgen/configs/tools/code-execution/docker_simple.yaml +29 -0
  112. massgen/configs/tools/code-execution/docker_with_resource_limits.yaml +32 -0
  113. massgen/configs/tools/code-execution/multi_agent_playwright_automation.yaml +57 -0
  114. massgen/configs/tools/filesystem/cc_gpt5_gemini_filesystem.yaml +34 -0
  115. massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +68 -0
  116. massgen/configs/tools/filesystem/claude_code_flash2.5.yaml +43 -0
  117. massgen/configs/tools/filesystem/claude_code_flash2.5_gptoss.yaml +49 -0
  118. massgen/configs/tools/filesystem/claude_code_gpt5nano.yaml +31 -0
  119. massgen/configs/tools/filesystem/claude_code_single.yaml +40 -0
  120. massgen/configs/tools/filesystem/fs_permissions_test.yaml +87 -0
  121. massgen/configs/tools/filesystem/gemini_gemini_workspace_cleanup.yaml +54 -0
  122. massgen/configs/tools/filesystem/gemini_gpt5_filesystem_casestudy.yaml +30 -0
  123. massgen/configs/tools/filesystem/gemini_gpt5nano_file_context_path.yaml +43 -0
  124. massgen/configs/tools/filesystem/gemini_gpt5nano_protected_paths.yaml +45 -0
  125. massgen/configs/tools/filesystem/gpt5mini_cc_fs_context_path.yaml +31 -0
  126. massgen/configs/tools/filesystem/grok4_gpt5_gemini_filesystem.yaml +32 -0
  127. massgen/configs/tools/filesystem/multiturn/grok4_gpt5_claude_code_filesystem_multiturn.yaml +58 -0
  128. massgen/configs/tools/filesystem/multiturn/grok4_gpt5_gemini_filesystem_multiturn.yaml +58 -0
  129. massgen/configs/tools/filesystem/multiturn/two_claude_code_filesystem_multiturn.yaml +47 -0
  130. massgen/configs/tools/filesystem/multiturn/two_gemini_flash_filesystem_multiturn.yaml +48 -0
  131. massgen/configs/tools/mcp/claude_code_discord_mcp_example.yaml +27 -0
  132. massgen/configs/tools/mcp/claude_code_simple_mcp.yaml +35 -0
  133. massgen/configs/tools/mcp/claude_code_twitter_mcp_example.yaml +32 -0
  134. massgen/configs/tools/mcp/claude_mcp_example.yaml +24 -0
  135. massgen/configs/tools/mcp/claude_mcp_test.yaml +27 -0
  136. massgen/configs/tools/mcp/five_agents_travel_mcp_test.yaml +157 -0
  137. massgen/configs/tools/mcp/five_agents_weather_mcp_test.yaml +103 -0
  138. massgen/configs/tools/mcp/gemini_mcp_example.yaml +24 -0
  139. massgen/configs/tools/mcp/gemini_mcp_filesystem_test.yaml +23 -0
  140. massgen/configs/tools/mcp/gemini_mcp_filesystem_test_sharing.yaml +23 -0
  141. massgen/configs/tools/mcp/gemini_mcp_filesystem_test_single_agent.yaml +17 -0
  142. massgen/configs/tools/mcp/gemini_mcp_filesystem_test_with_claude_code.yaml +24 -0
  143. massgen/configs/tools/mcp/gemini_mcp_test.yaml +27 -0
  144. massgen/configs/tools/mcp/gemini_notion_mcp.yaml +52 -0
  145. massgen/configs/tools/mcp/gpt5_nano_mcp_example.yaml +24 -0
  146. massgen/configs/tools/mcp/gpt5_nano_mcp_test.yaml +27 -0
  147. massgen/configs/tools/mcp/gpt5mini_claude_code_discord_mcp_example.yaml +38 -0
  148. massgen/configs/tools/mcp/gpt_oss_mcp_example.yaml +25 -0
  149. massgen/configs/tools/mcp/gpt_oss_mcp_test.yaml +28 -0
  150. massgen/configs/tools/mcp/grok3_mini_mcp_example.yaml +24 -0
  151. massgen/configs/tools/mcp/grok3_mini_mcp_test.yaml +27 -0
  152. massgen/configs/tools/mcp/multimcp_gemini.yaml +111 -0
  153. massgen/configs/tools/mcp/qwen_api_mcp_example.yaml +25 -0
  154. massgen/configs/tools/mcp/qwen_api_mcp_test.yaml +28 -0
  155. massgen/configs/tools/mcp/qwen_local_mcp_example.yaml +24 -0
  156. massgen/configs/tools/mcp/qwen_local_mcp_test.yaml +27 -0
  157. massgen/configs/tools/planning/five_agents_discord_mcp_planning_mode.yaml +140 -0
  158. massgen/configs/tools/planning/five_agents_filesystem_mcp_planning_mode.yaml +151 -0
  159. massgen/configs/tools/planning/five_agents_notion_mcp_planning_mode.yaml +151 -0
  160. massgen/configs/tools/planning/five_agents_twitter_mcp_planning_mode.yaml +155 -0
  161. massgen/configs/tools/planning/gpt5_mini_case_study_mcp_planning_mode.yaml +73 -0
  162. massgen/configs/tools/web-search/claude_streamable_http_test.yaml +43 -0
  163. massgen/configs/tools/web-search/gemini_streamable_http_test.yaml +43 -0
  164. massgen/configs/tools/web-search/gpt5_mini_streamable_http_test.yaml +43 -0
  165. massgen/configs/tools/web-search/gpt_oss_streamable_http_test.yaml +44 -0
  166. massgen/configs/tools/web-search/grok3_mini_streamable_http_test.yaml +43 -0
  167. massgen/configs/tools/web-search/qwen_api_streamable_http_test.yaml +44 -0
  168. massgen/configs/tools/web-search/qwen_local_streamable_http_test.yaml +43 -0
  169. massgen/coordination_tracker.py +708 -0
  170. massgen/docker/README.md +462 -0
  171. massgen/filesystem_manager/__init__.py +21 -0
  172. massgen/filesystem_manager/_base.py +9 -0
  173. massgen/filesystem_manager/_code_execution_server.py +545 -0
  174. massgen/filesystem_manager/_docker_manager.py +477 -0
  175. massgen/filesystem_manager/_file_operation_tracker.py +248 -0
  176. massgen/filesystem_manager/_filesystem_manager.py +813 -0
  177. massgen/filesystem_manager/_path_permission_manager.py +1261 -0
  178. massgen/filesystem_manager/_workspace_tools_server.py +1815 -0
  179. massgen/formatter/__init__.py +10 -0
  180. massgen/formatter/_chat_completions_formatter.py +284 -0
  181. massgen/formatter/_claude_formatter.py +235 -0
  182. massgen/formatter/_formatter_base.py +156 -0
  183. massgen/formatter/_response_formatter.py +263 -0
  184. massgen/frontend/__init__.py +1 -2
  185. massgen/frontend/coordination_ui.py +471 -286
  186. massgen/frontend/displays/base_display.py +56 -11
  187. massgen/frontend/displays/create_coordination_table.py +1956 -0
  188. massgen/frontend/displays/rich_terminal_display.py +1259 -619
  189. massgen/frontend/displays/simple_display.py +9 -4
  190. massgen/frontend/displays/terminal_display.py +27 -68
  191. massgen/logger_config.py +681 -0
  192. massgen/mcp_tools/README.md +232 -0
  193. massgen/mcp_tools/__init__.py +105 -0
  194. massgen/mcp_tools/backend_utils.py +1035 -0
  195. massgen/mcp_tools/circuit_breaker.py +195 -0
  196. massgen/mcp_tools/client.py +894 -0
  197. massgen/mcp_tools/config_validator.py +138 -0
  198. massgen/mcp_tools/docs/circuit_breaker.md +646 -0
  199. massgen/mcp_tools/docs/client.md +950 -0
  200. massgen/mcp_tools/docs/config_validator.md +478 -0
  201. massgen/mcp_tools/docs/exceptions.md +1165 -0
  202. massgen/mcp_tools/docs/security.md +854 -0
  203. massgen/mcp_tools/exceptions.py +338 -0
  204. massgen/mcp_tools/hooks.py +212 -0
  205. massgen/mcp_tools/security.py +780 -0
  206. massgen/message_templates.py +342 -64
  207. massgen/orchestrator.py +1515 -241
  208. massgen/stream_chunk/__init__.py +35 -0
  209. massgen/stream_chunk/base.py +92 -0
  210. massgen/stream_chunk/multimodal.py +237 -0
  211. massgen/stream_chunk/text.py +162 -0
  212. massgen/tests/mcp_test_server.py +150 -0
  213. massgen/tests/multi_turn_conversation_design.md +0 -8
  214. massgen/tests/test_azure_openai_backend.py +156 -0
  215. massgen/tests/test_backend_capabilities.py +262 -0
  216. massgen/tests/test_backend_event_loop_all.py +179 -0
  217. massgen/tests/test_chat_completions_refactor.py +142 -0
  218. massgen/tests/test_claude_backend.py +15 -28
  219. massgen/tests/test_claude_code.py +268 -0
  220. massgen/tests/test_claude_code_context_sharing.py +233 -0
  221. massgen/tests/test_claude_code_orchestrator.py +175 -0
  222. massgen/tests/test_cli_backends.py +180 -0
  223. massgen/tests/test_code_execution.py +679 -0
  224. massgen/tests/test_external_agent_backend.py +134 -0
  225. massgen/tests/test_final_presentation_fallback.py +237 -0
  226. massgen/tests/test_gemini_planning_mode.py +351 -0
  227. massgen/tests/test_grok_backend.py +7 -10
  228. massgen/tests/test_http_mcp_server.py +42 -0
  229. massgen/tests/test_integration_simple.py +198 -0
  230. massgen/tests/test_mcp_blocking.py +125 -0
  231. massgen/tests/test_message_context_building.py +29 -47
  232. massgen/tests/test_orchestrator_final_presentation.py +48 -0
  233. massgen/tests/test_path_permission_manager.py +2087 -0
  234. massgen/tests/test_rich_terminal_display.py +14 -13
  235. massgen/tests/test_timeout.py +133 -0
  236. massgen/tests/test_v3_3agents.py +11 -12
  237. massgen/tests/test_v3_simple.py +8 -13
  238. massgen/tests/test_v3_three_agents.py +11 -18
  239. massgen/tests/test_v3_two_agents.py +8 -13
  240. massgen/token_manager/__init__.py +7 -0
  241. massgen/token_manager/token_manager.py +400 -0
  242. massgen/utils.py +52 -16
  243. massgen/v1/agent.py +45 -91
  244. massgen/v1/agents.py +18 -53
  245. massgen/v1/backends/gemini.py +50 -153
  246. massgen/v1/backends/grok.py +21 -54
  247. massgen/v1/backends/oai.py +39 -111
  248. massgen/v1/cli.py +36 -93
  249. massgen/v1/config.py +8 -12
  250. massgen/v1/logging.py +43 -127
  251. massgen/v1/main.py +18 -32
  252. massgen/v1/orchestrator.py +68 -209
  253. massgen/v1/streaming_display.py +62 -163
  254. massgen/v1/tools.py +8 -12
  255. massgen/v1/types.py +9 -23
  256. massgen/v1/utils.py +5 -23
  257. massgen-0.1.0.dist-info/METADATA +1245 -0
  258. massgen-0.1.0.dist-info/RECORD +273 -0
  259. massgen-0.1.0.dist-info/entry_points.txt +2 -0
  260. massgen/frontend/logging/__init__.py +0 -9
  261. massgen/frontend/logging/realtime_logger.py +0 -197
  262. massgen-0.0.3.dist-info/METADATA +0 -568
  263. massgen-0.0.3.dist-info/RECORD +0 -76
  264. massgen-0.0.3.dist-info/entry_points.txt +0 -2
  265. /massgen/backend/{Function calling openai responses.md → docs/Function calling openai responses.md} +0 -0
  266. {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/WHEEL +0 -0
  267. {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/licenses/LICENSE +0 -0
  268. {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/top_level.txt +0 -0
@@ -1,28 +1,28 @@
1
+ # -*- coding: utf-8 -*-
1
2
  """
2
3
  MassGen Coordination UI
3
4
 
4
- Main interface for coordinating agents with visual display and logging.
5
+ Main interface for coordinating agents with visual display.
5
6
  """
6
7
 
7
- import time
8
8
  import asyncio
9
- from typing import Optional, List, Dict, Any, AsyncGenerator
9
+ import time
10
+ from typing import Any, Dict, List, Optional
11
+
10
12
  from .displays.base_display import BaseDisplay
11
- from .displays.terminal_display import TerminalDisplay
12
- from .displays.simple_display import SimpleDisplay
13
13
  from .displays.rich_terminal_display import RichTerminalDisplay, is_rich_available
14
- from .logging.realtime_logger import RealtimeLogger
14
+ from .displays.simple_display import SimpleDisplay
15
+ from .displays.terminal_display import TerminalDisplay
15
16
 
16
17
 
17
18
  class CoordinationUI:
18
- """Main coordination interface with display and logging capabilities."""
19
+ """Main coordination interface with display capabilities."""
19
20
 
20
21
  def __init__(
21
22
  self,
22
23
  display: Optional[BaseDisplay] = None,
23
- logger: Optional[RealtimeLogger] = None,
24
+ logger: Optional[Any] = None,
24
25
  display_type: str = "terminal",
25
- logging_enabled: bool = True,
26
26
  enable_final_presentation: bool = False,
27
27
  **kwargs,
28
28
  ):
@@ -32,21 +32,12 @@ class CoordinationUI:
32
32
  display: Custom display instance (overrides display_type)
33
33
  logger: Custom logger instance
34
34
  display_type: Type of display ("terminal", "simple", "rich_terminal", "textual_terminal")
35
- logging_enabled: Whether to enable real-time logging
36
35
  enable_final_presentation: Whether to ask winning agent to present final answer
37
36
  **kwargs: Additional configuration passed to display/logger
38
37
  """
39
38
  self.enable_final_presentation = enable_final_presentation
40
39
  self.display = display
41
- # Filter kwargs for logger (only pass logger-specific params)
42
- logger_kwargs = {
43
- k: v for k, v in kwargs.items() if k in ["filename", "update_frequency"]
44
- }
45
- self.logger = (
46
- logger
47
- if logger is not None
48
- else (RealtimeLogger(**logger_kwargs) if logging_enabled else None)
49
- )
40
+ self.logger = logger
50
41
  self.display_type = display_type
51
42
  self.config = kwargs
52
43
 
@@ -56,6 +47,50 @@ class CoordinationUI:
56
47
 
57
48
  # Flush output configuration (matches rich_terminal_display)
58
49
  self._flush_char_delay = 0.03 # 30ms between characters
50
+
51
+ # Initialize answer buffer state
52
+ self._answer_buffer = ""
53
+ self._answer_timeout_task = None
54
+ self._final_answer_shown = False
55
+
56
+ def _process_reasoning_summary(self, chunk_type: str, summary_delta: str, source: str) -> str:
57
+ """Process reasoning summary content using display's shared logic."""
58
+ if self.display and hasattr(self.display, "process_reasoning_content"):
59
+ return self.display.process_reasoning_content(chunk_type, summary_delta, source)
60
+ else:
61
+ # Fallback logic if no display available
62
+ if chunk_type == "reasoning_summary":
63
+ summary_active_key = f"_summary_active_{source}"
64
+ if not getattr(self, summary_active_key, False):
65
+ setattr(self, summary_active_key, True)
66
+ return f"📋 [Reasoning Summary]\n{summary_delta}\n"
67
+ return summary_delta
68
+ elif chunk_type == "reasoning_summary_done":
69
+ summary_active_key = f"_summary_active_{source}"
70
+ if hasattr(self, summary_active_key):
71
+ setattr(self, summary_active_key, False)
72
+ return summary_delta
73
+
74
+ def _process_reasoning_content(self, chunk_type: str, reasoning_delta: str, source: str) -> str:
75
+ """Process reasoning summary content using display's shared logic."""
76
+ if self.display and hasattr(self.display, "process_reasoning_content"):
77
+ return self.display.process_reasoning_content(chunk_type, reasoning_delta, source)
78
+ else:
79
+ # Fallback logic if no display available
80
+ if chunk_type == "reasoning":
81
+ reasoning_active_key = f"_reasoning_active_{source}"
82
+ if not getattr(self, reasoning_active_key, False):
83
+ setattr(self, reasoning_active_key, True)
84
+ return f"🧠 [Reasoning Started]\n{reasoning_delta}\n"
85
+ return reasoning_delta
86
+ elif chunk_type == "reasoning_done":
87
+ reasoning_active_key = f"_reasoning_active_{source}"
88
+ if hasattr(self, reasoning_active_key):
89
+ setattr(self, reasoning_active_key, False)
90
+ return reasoning_delta
91
+
92
+ def __post_init__(self):
93
+ """Post-initialization setup."""
59
94
  self._flush_word_delay = 0.08 # 80ms after punctuation
60
95
 
61
96
  # Initialize answer buffer state
@@ -86,10 +121,8 @@ class CoordinationUI:
86
121
  if hasattr(self, "_final_answer_shown"):
87
122
  self._final_answer_shown = False
88
123
 
89
- async def coordinate(
90
- self, orchestrator, question: str, agent_ids: Optional[List[str]] = None
91
- ) -> str:
92
- """Coordinate agents with visual display and logging.
124
+ async def coordinate(self, orchestrator, question: str, agent_ids: Optional[List[str]] = None) -> str:
125
+ """Coordinate agents with visual display.
93
126
 
94
127
  Args:
95
128
  orchestrator: MassGen orchestrator instance
@@ -99,6 +132,12 @@ class CoordinationUI:
99
132
  Returns:
100
133
  Final coordinated response
101
134
  """
135
+ # Initialize variables that may be referenced in finally block
136
+ selected_agent = ""
137
+ vote_results = {}
138
+ final_result = ""
139
+ final_answer = ""
140
+
102
141
  # Reset display to ensure clean state for each coordination
103
142
  if self.display is not None:
104
143
  self.display.cleanup()
@@ -120,9 +159,7 @@ class CoordinationUI:
120
159
  self.display = SimpleDisplay(self.agent_ids, **self.config)
121
160
  elif self.display_type == "rich_terminal":
122
161
  if not is_rich_available():
123
- print(
124
- "⚠️ Rich library not available. Falling back to terminal display."
125
- )
162
+ print("⚠️ Rich library not available. Falling back to terminal display.")
126
163
  print(" Install with: pip install rich")
127
164
  self.display = TerminalDisplay(self.agent_ids, **self.config)
128
165
  else:
@@ -149,6 +186,10 @@ class CoordinationUI:
149
186
 
150
187
  self.display.initialize(question, log_filename)
151
188
 
189
+ # Initialize variables to avoid reference before assignment error in finally block
190
+ selected_agent = None
191
+ vote_results = {}
192
+
152
193
  try:
153
194
  # Process coordination stream
154
195
  full_response = ""
@@ -166,37 +207,88 @@ class CoordinationUI:
166
207
  self.display.update_agent_status(source, status)
167
208
  continue
168
209
 
169
- # Handle builtin tool results
170
- elif chunk_type == "builtin_tool_results":
171
- builtin_results = getattr(chunk, "builtin_tool_results", [])
172
- if builtin_results and source:
173
- for result in builtin_results:
174
- tool_type = result.get("tool_type", "unknown")
175
- status_result = result.get("status", "unknown")
176
- tool_msg = (
177
- f"🔧 [{tool_type.title()}] {status_result.title()}"
178
- )
179
-
180
- if tool_type in ["code_interpreter", "code_execution"]:
181
- code = result.get("code", "") or result.get(
182
- "input", {}
183
- ).get("code", "")
184
- outputs = result.get("outputs")
185
- if code:
186
- tool_msg += f" - Code: {code[:50]}{'...' if len(code) > 50 else ''}"
187
- if outputs:
188
- tool_msg += f" - Result: {outputs}"
189
- elif tool_type == "web_search":
190
- query = result.get("query", "") or result.get(
191
- "input", {}
192
- ).get("query", "")
193
- if query:
194
- tool_msg += f" - Query: '{query}'"
195
-
196
- # Display as tool content for the specific agent
197
- await self._process_agent_content(source, tool_msg)
210
+ # Filter out debug chunks from display
211
+ elif chunk_type == "debug":
212
+ # Log debug info but don't display it
213
+ if self.logger:
214
+ self.logger.log_chunk(source, content, chunk_type)
215
+ continue
216
+
217
+ # Filter out mcp_status chunks - display via agent panel instead of console
218
+ elif chunk_type == "mcp_status":
219
+ # Let the display handle MCP status via agent panel
220
+ if source and source in self.agent_ids:
221
+ self.display.update_agent_content(source, content, "tool")
222
+ if self.logger:
223
+ self.logger.log_chunk(source, content, chunk_type)
224
+ continue
225
+
226
+ # builtin_tool_results handling removed - now handled as simple content
227
+
228
+ # Handle reasoning streams
229
+ elif chunk_type in [
230
+ "reasoning",
231
+ "reasoning_done",
232
+ "reasoning_summary",
233
+ "reasoning_summary_done",
234
+ ]:
235
+ if source:
236
+ reasoning_content = ""
237
+ if chunk_type == "reasoning":
238
+ # Stream reasoning delta as thinking content
239
+ reasoning_delta = getattr(chunk, "reasoning_delta", "")
240
+ if reasoning_delta:
241
+ # reasoning_content = reasoning_delta
242
+ reasoning_content = self._process_reasoning_content(chunk_type, reasoning_delta, source)
243
+ elif chunk_type == "reasoning_done":
244
+ # Complete reasoning text
245
+ reasoning_text = getattr(chunk, "reasoning_text", "")
246
+ if reasoning_text:
247
+ reasoning_content = f"\n🧠 [Reasoning Complete]\n{reasoning_text}\n"
248
+ else:
249
+ reasoning_content = "\n🧠 [Reasoning Complete]\n"
250
+
251
+ # Reset flag using helper method
252
+ self._process_reasoning_content(chunk_type, reasoning_content, source)
253
+
254
+ # Mark summary as complete - next summary can get a prefix
255
+ reasoning_active_key = "_reasoning_active"
256
+ if hasattr(self, reasoning_active_key):
257
+ delattr(self, reasoning_active_key)
258
+
259
+ elif chunk_type == "reasoning_summary":
260
+ # Stream reasoning summary delta
261
+ summary_delta = getattr(chunk, "reasoning_summary_delta", "")
262
+ if summary_delta:
263
+ reasoning_content = self._process_reasoning_summary(chunk_type, summary_delta, source)
264
+ elif chunk_type == "reasoning_summary_done":
265
+ # Complete reasoning summary
266
+ summary_text = getattr(chunk, "reasoning_summary_text", "")
267
+ if summary_text:
268
+ reasoning_content = f"\n📋 [Reasoning Summary Complete]\n{summary_text}\n"
269
+
270
+ # Reset flag using helper method
271
+ self._process_reasoning_summary(chunk_type, "", source)
272
+
273
+ # Mark summary as complete - next summary can get a prefix
274
+ summary_active_key = f"_summary_active_{source}"
275
+ if hasattr(self, summary_active_key):
276
+ delattr(self, summary_active_key)
277
+
278
+ if reasoning_content:
279
+ # Display reasoning as thinking content
280
+ self.display.update_agent_content(source, reasoning_content, "thinking")
281
+ if self.logger:
282
+ self.logger.log_agent_content(source, reasoning_content, "reasoning")
198
283
  continue
199
284
 
285
+ # Reset reasoning prefix state when final presentation starts
286
+ if chunk_type == "status" and "presenting final answer" in content:
287
+ # Clear all summary active flags for final presentation
288
+ for attr_name in list(vars(self).keys()):
289
+ if attr_name.startswith("_summary_active_"):
290
+ delattr(self, attr_name)
291
+
200
292
  if content:
201
293
  full_response += content
202
294
 
@@ -212,6 +304,10 @@ class CoordinationUI:
212
304
  vote_results = status.get("vote_results", {})
213
305
  selected_agent = status.get("selected_agent")
214
306
 
307
+ # Ensure selected_agent is not None to prevent UnboundLocalError
308
+ if selected_agent is None:
309
+ selected_agent = ""
310
+
215
311
  # if vote_results.get('vote_counts'):
216
312
  # self._display_vote_results(vote_results)
217
313
  # # Allow time for voting results to be visible
@@ -219,20 +315,72 @@ class CoordinationUI:
219
315
  # time.sleep(1.0)
220
316
 
221
317
  # Get final presentation from winning agent
222
- if (
223
- self.enable_final_presentation
224
- and selected_agent
225
- and vote_results.get("vote_counts")
226
- ):
227
- print(f"\n🎤 Final Presentation from {selected_agent}:")
228
- print("=" * 60)
318
+ if self.enable_final_presentation and selected_agent and vote_results.get("vote_counts"):
319
+ # Don't print - let the display handle it
320
+ # print(f"\n🎤 Final Presentation from {selected_agent}:")
321
+ # print("=" * 60)
229
322
 
230
323
  presentation_content = ""
231
324
  try:
232
- async for chunk in orchestrator.get_final_presentation(
233
- selected_agent, vote_results
234
- ):
325
+ async for chunk in orchestrator.get_final_presentation(selected_agent, vote_results):
235
326
  content = getattr(chunk, "content", "") or ""
327
+ chunk_type = getattr(chunk, "type", "")
328
+
329
+ # Use the same reasoning processing as main coordination
330
+ if chunk_type in [
331
+ "reasoning",
332
+ "reasoning_done",
333
+ "reasoning_summary",
334
+ "reasoning_summary_done",
335
+ ]:
336
+ source = getattr(chunk, "source", selected_agent)
337
+
338
+ reasoning_content = ""
339
+ if chunk_type == "reasoning":
340
+ # Stream reasoning delta as thinking content
341
+ reasoning_delta = getattr(chunk, "reasoning_delta", "")
342
+ if reasoning_delta:
343
+ # reasoning_content = reasoning_delta
344
+ reasoning_content = self._process_reasoning_content(chunk_type, reasoning_delta, source)
345
+ elif chunk_type == "reasoning_done":
346
+ # Complete reasoning text
347
+ reasoning_text = getattr(chunk, "reasoning_text", "")
348
+ if reasoning_text:
349
+ reasoning_content = f"\n🧠 [Reasoning Complete]\n{reasoning_text}\n"
350
+ else:
351
+ reasoning_content = "\n🧠 [Reasoning Complete]\n"
352
+
353
+ # Reset flag using helper method
354
+ self._process_reasoning_content(chunk_type, reasoning_content, source)
355
+
356
+ # Mark summary as complete - next summary can get a prefix
357
+ reasoning_active_key = "_reasoning_active"
358
+ if hasattr(self, reasoning_active_key):
359
+ delattr(self, reasoning_active_key)
360
+
361
+ elif chunk_type == "reasoning_summary":
362
+ # Stream reasoning summary delta
363
+ summary_delta = getattr(chunk, "reasoning_summary_delta", "")
364
+ if summary_delta:
365
+ reasoning_content = self._process_reasoning_summary(chunk_type, summary_delta, source)
366
+ elif chunk_type == "reasoning_summary_done":
367
+ # Complete reasoning summary
368
+ summary_text = getattr(chunk, "reasoning_summary_text", "")
369
+ if summary_text:
370
+ reasoning_content = f"\n📋 [Reasoning Summary Complete]\n{summary_text}\n"
371
+
372
+ # Reset flag using helper method
373
+ self._process_reasoning_summary(chunk_type, "", source)
374
+
375
+ # Reset the prefix flag so next summary can get a prefix
376
+ summary_active_key = f"_summary_active_{source}"
377
+ if hasattr(self, summary_active_key):
378
+ delattr(self, summary_active_key)
379
+
380
+ if reasoning_content:
381
+ # Add to presentation content and display
382
+ content = reasoning_content
383
+
236
384
  if content:
237
385
  # Ensure content is a string
238
386
  if isinstance(content, list):
@@ -255,76 +403,53 @@ class CoordinationUI:
255
403
  if self.display:
256
404
  try:
257
405
  await self._process_content(selected_agent, content)
258
- except Exception as e:
406
+ except Exception:
259
407
  # Error processing presentation content - continue gracefully
260
408
  pass
261
- # Also print to console with flush using consistent timing with rich display
262
- self._print_with_flush(content)
409
+ # Don't print - let the display handle it
410
+ # self._print_with_flush(content)
263
411
  else:
264
- # Simple print for non-display mode
412
+ # Simple print for non-display mode (only if no display)
265
413
  print(content, end="", flush=True)
266
414
  except AttributeError:
267
415
  # get_final_presentation method doesn't exist or failed
268
- print(
269
- "Final presentation not available - using coordination result"
270
- )
416
+ # print("Final presentation not available - using coordination result")
271
417
  presentation_content = ""
272
418
 
273
419
  final_answer = presentation_content
274
- print("\n" + "=" * 60)
420
+ # Don't print - let the display handle it
421
+ # print("\n" + "=" * 60)
275
422
  # Allow time for final presentation to be fully visible
276
423
  time.sleep(1.5)
277
424
 
278
- # Get the clean final answer from orchestrator's stored state (avoids token spacing issues)
425
+ # Get the final presentation content (synthesis) or fall back to stored answer
279
426
  orchestrator_final_answer = None
280
- if (
281
- selected_agent
282
- and hasattr(orchestrator, "agent_states")
283
- and selected_agent in orchestrator.agent_states
284
- ):
427
+
428
+ # First try to get the synthesized final presentation content
429
+ if hasattr(orchestrator, "_final_presentation_content") and orchestrator._final_presentation_content:
430
+ orchestrator_final_answer = orchestrator._final_presentation_content.strip()
431
+ elif selected_agent and hasattr(orchestrator, "agent_states") and selected_agent in orchestrator.agent_states:
432
+ # Fall back to stored answer if no final presentation content
285
433
  stored_answer = orchestrator.agent_states[selected_agent].answer
286
434
  if stored_answer:
287
435
  # Clean up the stored answer
288
- orchestrator_final_answer = (
289
- stored_answer.replace("\\", "\n").replace("**", "").strip()
290
- )
436
+ orchestrator_final_answer = stored_answer.replace("\\", "\n").replace("**", "").strip()
291
437
 
292
438
  # Use orchestrator's clean answer if available, otherwise fall back to presentation
293
- final_result = (
294
- orchestrator_final_answer
295
- if orchestrator_final_answer
296
- else (final_answer if final_answer else full_response)
297
- )
298
- if final_result:
299
- # print(f"\n🎯 FINAL COORDINATED ANSWER")
300
- # print("=" * 80)
301
- # print(f"{final_result.strip()}")
302
- # print("=" * 80)
303
-
304
- # Show which agent was selected
305
- if selected_agent:
306
- print(f"✅ Selected by: {selected_agent}")
307
- if vote_results.get("vote_counts"):
308
- vote_summary = ", ".join(
309
- [
310
- f"{agent}: {count}"
311
- for agent, count in vote_results["vote_counts"].items()
312
- ]
313
- )
314
- print(f"🗳️ Vote results: {vote_summary}")
315
- print()
439
+ final_result = orchestrator_final_answer if orchestrator_final_answer else (final_answer if final_answer else full_response)
316
440
 
317
441
  # Finalize session
318
442
  if self.logger:
319
- session_info = self.logger.finalize_session(final_answer, success=True)
320
- print(f"💾 Session log: {session_info['filename']}")
321
- print(
322
- f"⏱️ Duration: {session_info['duration']:.1f}s | Chunks: {session_info['total_chunks']} | Events: {session_info['orchestrator_events']}"
443
+ session_info = self.logger.finalize_session(
444
+ final_result if "final_result" in locals() else (final_answer if "final_answer" in locals() else ""),
445
+ success=True,
323
446
  )
447
+ print(f"💾 Session log: {session_info['filename']}")
448
+ print(f"⏱️ Duration: {session_info['duration']:.1f}s | Chunks: {session_info['total_chunks']} | Events: {session_info['orchestrator_events']}")
324
449
 
325
450
  return final_result
326
451
 
327
- except Exception as e:
452
+ except Exception:
328
453
  if self.logger:
329
454
  self.logger.finalize_session("", success=False)
330
455
  raise
@@ -336,20 +461,12 @@ class CoordinationUI:
336
461
  await asyncio.wait_for(self._answer_timeout_task, timeout=1.0)
337
462
  except (asyncio.TimeoutError, asyncio.CancelledError):
338
463
  # If it takes too long or was cancelled, force flush
339
- if (
340
- hasattr(self, "_answer_buffer")
341
- and self._answer_buffer
342
- and not self._final_answer_shown
343
- ):
464
+ if hasattr(self, "_answer_buffer") and self._answer_buffer and not self._final_answer_shown:
344
465
  await self._flush_final_answer()
345
466
  self._answer_timeout_task.cancel()
346
467
 
347
468
  # Final check to flush any remaining buffered answer
348
- if (
349
- hasattr(self, "_answer_buffer")
350
- and self._answer_buffer
351
- and not self._final_answer_shown
352
- ):
469
+ if hasattr(self, "_answer_buffer") and self._answer_buffer and not self._final_answer_shown:
353
470
  await self._flush_final_answer()
354
471
 
355
472
  # Small delay to ensure display updates are processed
@@ -358,24 +475,21 @@ class CoordinationUI:
358
475
  if self.display:
359
476
  self.display.cleanup()
360
477
 
361
- if selected_agent:
362
- print(f"✅ Selected by: {selected_agent}")
363
- if vote_results.get("vote_counts"):
364
- vote_summary = ", ".join(
365
- [
366
- f"{agent}: {count}"
367
- for agent, count in vote_results["vote_counts"].items()
368
- ]
369
- )
370
- print(f"🗳️ Vote results: {vote_summary}")
371
- print()
478
+ # Don't print - display already showed this info
479
+ # if selected_agent:
480
+ # print(f"✅ Selected by: {selected_agent}")
481
+ # if vote_results.get("vote_counts"):
482
+ # vote_summary = ", ".join([f"{agent}: {count}" for agent, count in vote_results["vote_counts"].items()])
483
+ # print(f"🗳️ Vote results: {vote_summary}")
484
+ # print()
372
485
 
373
486
  if self.logger:
374
- session_info = self.logger.finalize_session(final_answer, success=True)
375
- print(f"💾 Session log: {session_info['filename']}")
376
- print(
377
- f"⏱️ Duration: {session_info['duration']:.1f}s | Chunks: {session_info['total_chunks']} | Events: {session_info['orchestrator_events']}"
487
+ session_info = self.logger.finalize_session(
488
+ final_result if "final_result" in locals() else (final_answer if "final_answer" in locals() else ""),
489
+ success=True,
378
490
  )
491
+ print(f"💾 Session log: {session_info['filename']}")
492
+ print(f"⏱️ Duration: {session_info['duration']:.1f}s | Chunks: {session_info['total_chunks']} | Events: {session_info['orchestrator_events']}")
379
493
 
380
494
  async def coordinate_with_context(
381
495
  self,
@@ -395,6 +509,12 @@ class CoordinationUI:
395
509
  Returns:
396
510
  Final coordinated response
397
511
  """
512
+ # Initialize variables that may be referenced in finally block
513
+ selected_agent = ""
514
+ vote_results = {}
515
+ final_result = ""
516
+ final_answer = ""
517
+
398
518
  # Reset display to ensure clean state for each coordination
399
519
  if self.display is not None:
400
520
  self.display.cleanup()
@@ -416,9 +536,7 @@ class CoordinationUI:
416
536
  self.display = SimpleDisplay(self.agent_ids, **self.config)
417
537
  elif self.display_type == "rich_terminal":
418
538
  if not is_rich_available():
419
- print(
420
- "⚠️ Rich library not available. Falling back to terminal display."
421
- )
539
+ print("⚠️ Rich library not available. Falling back to terminal display.")
422
540
  print(" Install with: pip install rich")
423
541
  self.display = TerminalDisplay(self.agent_ids, **self.config)
424
542
  else:
@@ -433,15 +551,9 @@ class CoordinationUI:
433
551
  log_filename = None
434
552
  if self.logger:
435
553
  # Add context info to session initialization
436
- context_info = (
437
- f"(with {len(messages)//2} previous exchanges)"
438
- if len(messages) > 1
439
- else ""
440
- )
554
+ context_info = f"(with {len(messages)//2} previous exchanges)" if len(messages) > 1 else ""
441
555
  session_question = f"{question} {context_info}"
442
- log_filename = self.logger.initialize_session(
443
- session_question, self.agent_ids
444
- )
556
+ log_filename = self.logger.initialize_session(session_question, self.agent_ids)
445
557
  monitoring = self.logger.get_monitoring_commands()
446
558
  print(f"📁 Real-time log: {log_filename}")
447
559
  print(f"💡 Monitor with: {monitoring['tail']}")
@@ -449,6 +561,11 @@ class CoordinationUI:
449
561
 
450
562
  self.display.initialize(question, log_filename)
451
563
 
564
+ # Initialize variables to avoid reference before assignment error in finally block
565
+ selected_agent = None
566
+ vote_results = {}
567
+ orchestrator_final_answer = None
568
+
452
569
  try:
453
570
  # Process coordination stream with conversation context
454
571
  full_response = ""
@@ -467,37 +584,87 @@ class CoordinationUI:
467
584
  self.display.update_agent_status(source, status)
468
585
  continue
469
586
 
470
- # Handle builtin tool results
471
- elif chunk_type == "builtin_tool_results":
472
- builtin_results = getattr(chunk, "builtin_tool_results", [])
473
- if builtin_results and source:
474
- for result in builtin_results:
475
- tool_type = result.get("tool_type", "unknown")
476
- status_result = result.get("status", "unknown")
477
- tool_msg = (
478
- f"🔧 [{tool_type.title()}] {status_result.title()}"
479
- )
480
-
481
- if tool_type in ["code_interpreter", "code_execution"]:
482
- code = result.get("code", "") or result.get(
483
- "input", {}
484
- ).get("code", "")
485
- outputs = result.get("outputs")
486
- if code:
487
- tool_msg += f" - Code: {code[:50]}{'...' if len(code) > 50 else ''}"
488
- if outputs:
489
- tool_msg += f" - Result: {outputs}"
490
- elif tool_type == "web_search":
491
- query = result.get("query", "") or result.get(
492
- "input", {}
493
- ).get("query", "")
494
- if query:
495
- tool_msg += f" - Query: '{query}'"
496
-
497
- # Display as tool content for the specific agent
498
- await self._process_agent_content(source, tool_msg)
587
+ # Filter out debug chunks from display
588
+ elif chunk_type == "debug":
589
+ # Log debug info but don't display it
590
+ if self.logger:
591
+ self.logger.log_chunk(source, content, chunk_type)
592
+ continue
593
+
594
+ # Filter out mcp_status chunks - display via agent panel instead of console
595
+ elif chunk_type == "mcp_status":
596
+ # Let the display handle MCP status via agent panel
597
+ if source and source in self.agent_ids:
598
+ self.display.update_agent_content(source, content, "tool")
599
+ if self.logger:
600
+ self.logger.log_chunk(source, content, chunk_type)
601
+ continue
602
+
603
+ # builtin_tool_results handling removed - now handled as simple content
604
+
605
+ # Handle reasoning streams
606
+ elif chunk_type in [
607
+ "reasoning",
608
+ "reasoning_done",
609
+ "reasoning_summary",
610
+ "reasoning_summary_done",
611
+ ]:
612
+ if source:
613
+ reasoning_content = ""
614
+ if chunk_type == "reasoning":
615
+ # Stream reasoning delta as thinking content
616
+ reasoning_delta = getattr(chunk, "reasoning_delta", "")
617
+ if reasoning_delta:
618
+ # reasoning_content = reasoning_delta
619
+ reasoning_content = self._process_reasoning_content(chunk_type, reasoning_delta, source)
620
+ elif chunk_type == "reasoning_done":
621
+ # Complete reasoning text
622
+ reasoning_text = getattr(chunk, "reasoning_text", "")
623
+ if reasoning_text:
624
+ reasoning_content = f"\n🧠 [Reasoning Complete]\n{reasoning_text}\n"
625
+ else:
626
+ reasoning_content = "\n🧠 [Reasoning Complete]\n"
627
+
628
+ # Reset flag using helper method
629
+ self._process_reasoning_content(chunk_type, reasoning_content, source)
630
+
631
+ # Mark summary as complete - next summary can get a prefix
632
+ reasoning_active_key = "_reasoning_active"
633
+ if hasattr(self, reasoning_active_key):
634
+ delattr(self, reasoning_active_key)
635
+ elif chunk_type == "reasoning_summary":
636
+ # Stream reasoning summary delta
637
+ summary_delta = getattr(chunk, "reasoning_summary_delta", "")
638
+ if summary_delta:
639
+ reasoning_content = self._process_reasoning_summary(chunk_type, summary_delta, source)
640
+ elif chunk_type == "reasoning_summary_done":
641
+ # Complete reasoning summary
642
+ summary_text = getattr(chunk, "reasoning_summary_text", "")
643
+ if summary_text:
644
+ reasoning_content = f"\n📋 [Reasoning Summary Complete]\n{summary_text}\n"
645
+
646
+ # Reset flag using helper method
647
+ self._process_reasoning_summary(chunk_type, "", source)
648
+
649
+ # Mark summary as complete - next summary can get a prefix
650
+ summary_active_key = f"_summary_active_{source}"
651
+ if hasattr(self, summary_active_key):
652
+ delattr(self, summary_active_key)
653
+
654
+ if reasoning_content:
655
+ # Display reasoning as thinking content
656
+ self.display.update_agent_content(source, reasoning_content, "thinking")
657
+ if self.logger:
658
+ self.logger.log_agent_content(source, reasoning_content, "reasoning")
499
659
  continue
500
660
 
661
+ # Reset reasoning prefix state when final presentation starts
662
+ if chunk_type == "status" and "presenting final answer" in content:
663
+ # Clear all summary active flags for final presentation
664
+ for attr_name in list(vars(self).keys()):
665
+ if attr_name.startswith("_summary_active_"):
666
+ delattr(self, attr_name)
667
+
501
668
  if content:
502
669
  full_response += content
503
670
 
@@ -513,6 +680,10 @@ class CoordinationUI:
513
680
  vote_results = status.get("vote_results", {})
514
681
  selected_agent = status.get("selected_agent")
515
682
 
683
+ # Ensure selected_agent is not None to prevent UnboundLocalError
684
+ if selected_agent is None:
685
+ selected_agent = ""
686
+
516
687
  # if vote_results.get('vote_counts'):
517
688
  # self._display_vote_results(vote_results)
518
689
  # # Allow time for voting results to be visible
@@ -520,20 +691,72 @@ class CoordinationUI:
520
691
  # time.sleep(1.0)
521
692
 
522
693
  # Get final presentation from winning agent
523
- if (
524
- self.enable_final_presentation
525
- and selected_agent
526
- and vote_results.get("vote_counts")
527
- ):
528
- print(f"\n🎤 Final Presentation from {selected_agent}:")
529
- print("=" * 60)
694
+ if self.enable_final_presentation and selected_agent and vote_results.get("vote_counts"):
695
+ # Don't print - let the display handle it
696
+ # print(f"\n🎤 Final Presentation from {selected_agent}:")
697
+ # print("=" * 60)
530
698
 
531
699
  presentation_content = ""
532
700
  try:
533
- async for chunk in orchestrator.get_final_presentation(
534
- selected_agent, vote_results
535
- ):
701
+ async for chunk in orchestrator.get_final_presentation(selected_agent, vote_results):
536
702
  content = getattr(chunk, "content", "") or ""
703
+ chunk_type = getattr(chunk, "type", "")
704
+
705
+ # Use the same reasoning processing as main coordination
706
+ if chunk_type in [
707
+ "reasoning",
708
+ "reasoning_done",
709
+ "reasoning_summary",
710
+ "reasoning_summary_done",
711
+ ]:
712
+ source = getattr(chunk, "source", selected_agent)
713
+
714
+ reasoning_content = ""
715
+ if chunk_type == "reasoning":
716
+ # Stream reasoning delta as thinking content
717
+ reasoning_delta = getattr(chunk, "reasoning_delta", "")
718
+ if reasoning_delta:
719
+ # reasoning_content = reasoning_delta
720
+ reasoning_content = self._process_reasoning_content(chunk_type, reasoning_delta, source)
721
+ elif chunk_type == "reasoning_done":
722
+ # Complete reasoning text
723
+ reasoning_text = getattr(chunk, "reasoning_text", "")
724
+ if reasoning_text:
725
+ reasoning_content = f"\n🧠 [Reasoning Complete]\n{reasoning_text}\n"
726
+ else:
727
+ reasoning_content = "\n🧠 [Reasoning Complete]\n"
728
+
729
+ # Reset flag using helper method
730
+ self._process_reasoning_content(chunk_type, reasoning_content, source)
731
+
732
+ # Mark summary as complete - next summary can get a prefix
733
+ reasoning_active_key = "_reasoning_active"
734
+ if hasattr(self, reasoning_active_key):
735
+ delattr(self, reasoning_active_key)
736
+
737
+ elif chunk_type == "reasoning_summary":
738
+ # Stream reasoning summary delta
739
+ summary_delta = getattr(chunk, "reasoning_summary_delta", "")
740
+ if summary_delta:
741
+ reasoning_content = self._process_reasoning_summary(chunk_type, summary_delta, source)
742
+ elif chunk_type == "reasoning_summary_done":
743
+ # Complete reasoning summary
744
+ summary_text = getattr(chunk, "reasoning_summary_text", "")
745
+ if summary_text:
746
+ reasoning_content = f"\n📋 [Reasoning Summary Complete]\n{summary_text}\n"
747
+
748
+ # Reset flag using helper method
749
+ self._process_reasoning_summary(chunk_type, "", source)
750
+
751
+ # Reset the prefix flag so next summary can get a prefix
752
+ summary_active_key = f"_summary_active_{source}"
753
+ if hasattr(self, summary_active_key):
754
+ delattr(self, summary_active_key)
755
+
756
+ if reasoning_content:
757
+ # Add to presentation content and display
758
+ content = reasoning_content
759
+
537
760
  if content:
538
761
  # Ensure content is a string
539
762
  if isinstance(content, list):
@@ -552,8 +775,8 @@ class CoordinationUI:
552
775
  getattr(chunk, "type", "presentation"),
553
776
  )
554
777
 
555
- # Stream presentation to console with consistent flush timing
556
- self._print_with_flush(content)
778
+ # Don't print - let the display handle it
779
+ # self._print_with_flush(content)
557
780
 
558
781
  # Update display
559
782
  await self._process_content(selected_agent, content)
@@ -561,65 +784,40 @@ class CoordinationUI:
561
784
  if getattr(chunk, "type", "") == "done":
562
785
  break
563
786
 
564
- except Exception as e:
565
- print(f"\n❌ Error during final presentation: {e}")
787
+ except Exception:
788
+ # Don't print - let the display handle errors
789
+ # print(f"\n❌ Error during final presentation: {e}")
566
790
  presentation_content = full_response # Fallback
567
791
 
568
792
  final_answer = presentation_content
569
- print("\n" + "=" * 60)
793
+ # Don't print - let the display handle it
794
+ # print("\n" + "=" * 60)
570
795
  # Allow time for final presentation to be fully visible
571
796
  time.sleep(1.5)
572
797
 
573
798
  # Get the clean final answer from orchestrator's stored state
574
799
  orchestrator_final_answer = None
575
- if (
576
- selected_agent
577
- and hasattr(orchestrator, "agent_states")
578
- and selected_agent in orchestrator.agent_states
579
- ):
800
+ if selected_agent and hasattr(orchestrator, "agent_states") and selected_agent in orchestrator.agent_states:
580
801
  stored_answer = orchestrator.agent_states[selected_agent].answer
581
802
  if stored_answer:
582
803
  # Clean up the stored answer
583
- orchestrator_final_answer = (
584
- stored_answer.replace("\\", "\n").replace("**", "").strip()
585
- )
804
+ orchestrator_final_answer = stored_answer.replace("\\", "\n").replace("**", "").strip()
586
805
 
587
806
  # Use orchestrator's clean answer if available, otherwise fall back to presentation
588
- final_result = (
589
- orchestrator_final_answer
590
- if orchestrator_final_answer
591
- else (final_answer if final_answer else full_response)
592
- )
593
- if final_result:
594
- # print(f"\n🎯 FINAL COORDINATED ANSWER")
595
- # print("=" * 80)
596
- # print(f"{final_result.strip()}")
597
- # print("=" * 80)
598
-
599
- # Show which agent was selected
600
- if selected_agent:
601
- print(f"✅ Selected by: {selected_agent}")
602
- if vote_results.get("vote_counts"):
603
- vote_summary = ", ".join(
604
- [
605
- f"{agent}: {count}"
606
- for agent, count in vote_results["vote_counts"].items()
607
- ]
608
- )
609
- print(f"🗳️ Vote results: {vote_summary}")
610
- print()
807
+ final_result = orchestrator_final_answer if orchestrator_final_answer else (final_answer if final_answer else full_response)
611
808
 
612
809
  # Finalize session
613
810
  if self.logger:
614
- session_info = self.logger.finalize_session(final_answer, success=True)
615
- print(f"💾 Session log: {session_info['filename']}")
616
- print(
617
- f"⏱️ Duration: {session_info['duration']:.1f}s | Chunks: {session_info['total_chunks']} | Events: {session_info['orchestrator_events']}"
811
+ session_info = self.logger.finalize_session(
812
+ final_result if "final_result" in locals() else (final_answer if "final_answer" in locals() else ""),
813
+ success=True,
618
814
  )
815
+ print(f"💾 Session log: {session_info['filename']}")
816
+ print(f"⏱️ Duration: {session_info['duration']:.1f}s | Chunks: {session_info['total_chunks']} | Events: {session_info['orchestrator_events']}")
619
817
 
620
818
  return final_result
621
819
 
622
- except Exception as e:
820
+ except Exception:
623
821
  if self.logger:
624
822
  self.logger.finalize_session("", success=False)
625
823
  raise
@@ -631,20 +829,12 @@ class CoordinationUI:
631
829
  await asyncio.wait_for(self._answer_timeout_task, timeout=1.0)
632
830
  except (asyncio.TimeoutError, asyncio.CancelledError):
633
831
  # If it takes too long or was cancelled, force flush
634
- if (
635
- hasattr(self, "_answer_buffer")
636
- and self._answer_buffer
637
- and not self._final_answer_shown
638
- ):
832
+ if hasattr(self, "_answer_buffer") and self._answer_buffer and not self._final_answer_shown:
639
833
  await self._flush_final_answer()
640
834
  self._answer_timeout_task.cancel()
641
835
 
642
836
  # Final check to flush any remaining buffered answer
643
- if (
644
- hasattr(self, "_answer_buffer")
645
- and self._answer_buffer
646
- and not self._final_answer_shown
647
- ):
837
+ if hasattr(self, "_answer_buffer") and self._answer_buffer and not self._final_answer_shown:
648
838
  await self._flush_final_answer()
649
839
 
650
840
  # Small delay to ensure display updates are processed
@@ -655,7 +845,7 @@ class CoordinationUI:
655
845
 
656
846
  def _display_vote_results(self, vote_results: Dict[str, Any]):
657
847
  """Display voting results in a formatted table."""
658
- print(f"\n🗳️ VOTING RESULTS")
848
+ print("\n🗳️ VOTING RESULTS")
659
849
  print("=" * 50)
660
850
 
661
851
  vote_counts = vote_results.get("vote_counts", {})
@@ -665,19 +855,15 @@ class CoordinationUI:
665
855
 
666
856
  # Display vote counts
667
857
  if vote_counts:
668
- print(f"\n📊 Vote Count:")
669
- for agent_id, count in sorted(
670
- vote_counts.items(), key=lambda x: x[1], reverse=True
671
- ):
858
+ print("\n📊 Vote Count:")
859
+ for agent_id, count in sorted(vote_counts.items(), key=lambda x: x[1], reverse=True):
672
860
  winner_mark = "🏆" if agent_id == winner else " "
673
861
  tie_mark = " (tie-broken)" if is_tie and agent_id == winner else ""
674
- print(
675
- f" {winner_mark} {agent_id}: {count} vote{'s' if count != 1 else ''}{tie_mark}"
676
- )
862
+ print(f" {winner_mark} {agent_id}: {count} vote{'s' if count != 1 else ''}{tie_mark}")
677
863
 
678
864
  # Display voter details
679
865
  if voter_details:
680
- print(f"\n🔍 Vote Details:")
866
+ print("\n🔍 Vote Details:")
681
867
  for voted_for, voters in voter_details.items():
682
868
  print(f" → {voted_for}:")
683
869
  for voter_info in voters:
@@ -687,9 +873,7 @@ class CoordinationUI:
687
873
 
688
874
  # Display tie-breaking info
689
875
  if is_tie:
690
- print(
691
- f"\n⚖️ Tie broken by agent registration order (orchestrator setup order)"
692
- )
876
+ print("\n⚖️ Tie broken by agent registration order (orchestrator setup order)")
693
877
 
694
878
  # Display summary stats
695
879
  total_votes = vote_results.get("total_votes", 0)
@@ -719,11 +903,7 @@ class CoordinationUI:
719
903
  "Coordinating agents, please wait",
720
904
  ]
721
905
  ):
722
- event = (
723
- f"🔄 {source}: {clean_line}"
724
- if source and source not in ["coordination_hub", "orchestrator"]
725
- else f"🔄 {clean_line}"
726
- )
906
+ event = f"🔄 {source}: {clean_line}" if source and source not in ["coordination_hub", "orchestrator"] else f"🔄 {clean_line}"
727
907
  self.display.add_orchestrator_event(event)
728
908
  if self.logger:
729
909
  self.logger.log_orchestrator_event(event)
@@ -784,7 +964,7 @@ class CoordinationUI:
784
964
  pass
785
965
 
786
966
  # Handle coordination events (provided answer, votes)
787
- elif any(marker in content for marker in ["✅", "🗳️", "🔄", "❌"]):
967
+ elif any(marker in content for marker in ["✅", "🗳️", "🔄", "❌", "⚠️"]):
788
968
  clean_line = content.replace("**", "").replace("##", "").strip()
789
969
  if clean_line and not any(
790
970
  skip in clean_line
@@ -817,11 +997,7 @@ class CoordinationUI:
817
997
  ):
818
998
  # Extract clean final answer content
819
999
  clean_content = content.strip()
820
- if (
821
- clean_content
822
- and not clean_content.startswith("---")
823
- and not clean_content.startswith("*Coordinated by")
824
- ):
1000
+ if clean_content and not clean_content.startswith("---") and not clean_content.startswith("*Coordinated by"):
825
1001
  # Add to buffer
826
1002
  if self._answer_buffer:
827
1003
  self._answer_buffer += " " + clean_content
@@ -833,9 +1009,7 @@ class CoordinationUI:
833
1009
  self._answer_timeout_task.cancel()
834
1010
 
835
1011
  # Set a timeout to flush the answer (in case streaming stops)
836
- self._answer_timeout_task = asyncio.create_task(
837
- self._schedule_final_answer_flush()
838
- )
1012
+ self._answer_timeout_task = asyncio.create_task(self._schedule_final_answer_flush())
839
1013
 
840
1014
  # Create event for this chunk but don't call show_final_answer yet
841
1015
  status = self.orchestrator.get_status()
@@ -846,19 +1020,36 @@ class CoordinationUI:
846
1020
 
847
1021
  # Only create final event for first chunk to avoid spam
848
1022
  if self._answer_buffer == clean_content: # First chunk
849
- if vote_counts:
850
- vote_summary = ", ".join(
851
- [
852
- f"{agent}: {count} vote{'s' if count != 1 else ''}"
853
- for agent, count in vote_counts.items()
854
- ]
855
- )
856
- tie_info = (
857
- " (tie-broken by registration order)" if is_tie else ""
858
- )
859
- event = f"🎯 FINAL: {selected_agent} selected ({vote_summary}{tie_info}) [buffering...]"
1023
+ # Check if orchestrator timed out
1024
+ orchestrator_timeout = getattr(self.orchestrator, "is_orchestrator_timeout", False)
1025
+
1026
+ if selected_agent == "Unknown" or selected_agent is None:
1027
+ if orchestrator_timeout:
1028
+ # Even with timeout, try to select agent from available votes
1029
+ if vote_counts:
1030
+ # Find agent with most votes
1031
+ max_votes = max(vote_counts.values())
1032
+ tied_agents = [agent for agent, count in vote_counts.items() if count == max_votes]
1033
+ # Use first tied agent (following orchestrator's tie-breaking logic)
1034
+ timeout_selected_agent = tied_agents[0] if tied_agents else None
1035
+ if timeout_selected_agent:
1036
+ vote_summary = ", ".join([f"{agent}: {count}" for agent, count in vote_counts.items()])
1037
+ tie_info = " (tie-broken by registration order)" if len(tied_agents) > 1 else ""
1038
+ event = f"🎯 FINAL: {timeout_selected_agent} selected from partial votes ({vote_summary}{tie_info}) → orchestrator timeout → [buffering...]"
1039
+ else:
1040
+ event = "🎯 FINAL: None selected → orchestrator timeout (no agents completed voting in time) → [buffering...]"
1041
+ else:
1042
+ event = "🎯 FINAL: None selected → orchestrator timeout (no agents completed voting in time) → [buffering...]"
1043
+ else:
1044
+ event = "🎯 FINAL: None selected → [buffering...]"
1045
+ elif vote_counts:
1046
+ vote_summary = ", ".join([f"{agent}: {count} vote{'s' if count != 1 else ''}" for agent, count in vote_counts.items()])
1047
+ tie_info = " (tie-broken by registration order)" if is_tie else ""
1048
+ timeout_info = " (despite timeout)" if orchestrator_timeout else ""
1049
+ event = f"🎯 FINAL: {selected_agent} selected ({vote_summary}{tie_info}){timeout_info} → [buffering...]"
860
1050
  else:
861
- event = f"🎯 FINAL: {selected_agent} selected [buffering...]"
1051
+ timeout_info = " (despite timeout)" if orchestrator_timeout else ""
1052
+ event = f"🎯 FINAL: {selected_agent} selected{timeout_info} → [buffering...]"
862
1053
 
863
1054
  self.display.add_orchestrator_event(event)
864
1055
  if self.logger:
@@ -880,10 +1071,8 @@ class CoordinationUI:
880
1071
 
881
1072
 
882
1073
  # Convenience functions for common use cases
883
- async def coordinate_with_terminal_ui(
884
- orchestrator, question: str, enable_final_presentation: bool = False, **kwargs
885
- ) -> str:
886
- """Quick coordination with terminal UI and logging.
1074
+ async def coordinate_with_terminal_ui(orchestrator, question: str, enable_final_presentation: bool = False, **kwargs) -> str:
1075
+ """Quick coordination with terminal UI.
887
1076
 
888
1077
  Args:
889
1078
  orchestrator: MassGen orchestrator instance
@@ -902,10 +1091,8 @@ async def coordinate_with_terminal_ui(
902
1091
  return await ui.coordinate(orchestrator, question)
903
1092
 
904
1093
 
905
- async def coordinate_with_simple_ui(
906
- orchestrator, question: str, enable_final_presentation: bool = False, **kwargs
907
- ) -> str:
908
- """Quick coordination with simple UI and logging.
1094
+ async def coordinate_with_simple_ui(orchestrator, question: str, enable_final_presentation: bool = False, **kwargs) -> str:
1095
+ """Quick coordination with simple UI.
909
1096
 
910
1097
  Args:
911
1098
  orchestrator: MassGen orchestrator instance
@@ -923,10 +1110,8 @@ async def coordinate_with_simple_ui(
923
1110
  return await ui.coordinate(orchestrator, question)
924
1111
 
925
1112
 
926
- async def coordinate_with_rich_ui(
927
- orchestrator, question: str, enable_final_presentation: bool = False, **kwargs
928
- ) -> str:
929
- """Quick coordination with rich terminal UI and logging.
1113
+ async def coordinate_with_rich_ui(orchestrator, question: str, enable_final_presentation: bool = False, **kwargs) -> str:
1114
+ """Quick coordination with rich terminal UI.
930
1115
 
931
1116
  Args:
932
1117
  orchestrator: MassGen orchestrator instance