massgen 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (82) hide show
  1. massgen/__init__.py +1 -1
  2. massgen/agent_config.py +33 -7
  3. massgen/api_params_handler/_api_params_handler_base.py +3 -0
  4. massgen/api_params_handler/_chat_completions_api_params_handler.py +4 -0
  5. massgen/api_params_handler/_claude_api_params_handler.py +4 -0
  6. massgen/api_params_handler/_gemini_api_params_handler.py +4 -0
  7. massgen/api_params_handler/_response_api_params_handler.py +4 -0
  8. massgen/backend/azure_openai.py +9 -1
  9. massgen/backend/base.py +4 -0
  10. massgen/backend/base_with_custom_tool_and_mcp.py +25 -5
  11. massgen/backend/claude_code.py +9 -1
  12. massgen/backend/docs/permissions_and_context_files.md +2 -2
  13. massgen/backend/gemini.py +35 -6
  14. massgen/backend/gemini_utils.py +30 -0
  15. massgen/backend/response.py +2 -0
  16. massgen/chat_agent.py +9 -3
  17. massgen/cli.py +291 -43
  18. massgen/config_builder.py +163 -18
  19. massgen/configs/README.md +69 -14
  20. massgen/configs/debug/restart_test_controlled.yaml +60 -0
  21. massgen/configs/debug/restart_test_controlled_filesystem.yaml +73 -0
  22. massgen/configs/tools/code-execution/docker_with_sudo.yaml +35 -0
  23. massgen/configs/tools/custom_tools/computer_use_browser_example.yaml +56 -0
  24. massgen/configs/tools/custom_tools/computer_use_docker_example.yaml +65 -0
  25. massgen/configs/tools/custom_tools/computer_use_example.yaml +50 -0
  26. massgen/configs/tools/custom_tools/crawl4ai_example.yaml +55 -0
  27. massgen/configs/tools/custom_tools/multimodal_tools/text_to_file_generation_multi.yaml +61 -0
  28. massgen/configs/tools/custom_tools/multimodal_tools/text_to_file_generation_single.yaml +29 -0
  29. massgen/configs/tools/custom_tools/multimodal_tools/text_to_image_generation_multi.yaml +51 -0
  30. massgen/configs/tools/custom_tools/multimodal_tools/text_to_image_generation_single.yaml +33 -0
  31. massgen/configs/tools/custom_tools/multimodal_tools/text_to_speech_generation_multi.yaml +55 -0
  32. massgen/configs/tools/custom_tools/multimodal_tools/text_to_speech_generation_single.yaml +33 -0
  33. massgen/configs/tools/custom_tools/multimodal_tools/text_to_video_generation_multi.yaml +47 -0
  34. massgen/configs/tools/custom_tools/multimodal_tools/text_to_video_generation_single.yaml +29 -0
  35. massgen/configs/tools/custom_tools/multimodal_tools/understand_audio.yaml +33 -0
  36. massgen/configs/tools/custom_tools/multimodal_tools/understand_file.yaml +34 -0
  37. massgen/configs/tools/custom_tools/multimodal_tools/understand_image.yaml +33 -0
  38. massgen/configs/tools/custom_tools/multimodal_tools/understand_video.yaml +34 -0
  39. massgen/configs/tools/custom_tools/multimodal_tools/youtube_video_analysis.yaml +59 -0
  40. massgen/docker/README.md +83 -0
  41. massgen/filesystem_manager/_code_execution_server.py +22 -7
  42. massgen/filesystem_manager/_docker_manager.py +21 -1
  43. massgen/filesystem_manager/_filesystem_manager.py +9 -0
  44. massgen/filesystem_manager/_path_permission_manager.py +148 -0
  45. massgen/filesystem_manager/_workspace_tools_server.py +0 -997
  46. massgen/formatter/_gemini_formatter.py +73 -0
  47. massgen/frontend/coordination_ui.py +175 -257
  48. massgen/frontend/displays/base_display.py +29 -0
  49. massgen/frontend/displays/rich_terminal_display.py +155 -9
  50. massgen/frontend/displays/simple_display.py +21 -0
  51. massgen/frontend/displays/terminal_display.py +22 -2
  52. massgen/logger_config.py +50 -6
  53. massgen/message_templates.py +283 -15
  54. massgen/orchestrator.py +335 -38
  55. massgen/tests/test_binary_file_blocking.py +274 -0
  56. massgen/tests/test_case_studies.md +12 -12
  57. massgen/tests/test_code_execution.py +178 -0
  58. massgen/tests/test_multimodal_size_limits.py +407 -0
  59. massgen/tests/test_orchestration_restart.py +204 -0
  60. massgen/tool/__init__.py +4 -0
  61. massgen/tool/_manager.py +7 -2
  62. massgen/tool/_multimodal_tools/image_to_image_generation.py +293 -0
  63. massgen/tool/_multimodal_tools/text_to_file_generation.py +455 -0
  64. massgen/tool/_multimodal_tools/text_to_image_generation.py +222 -0
  65. massgen/tool/_multimodal_tools/text_to_speech_continue_generation.py +226 -0
  66. massgen/tool/_multimodal_tools/text_to_speech_transcription_generation.py +217 -0
  67. massgen/tool/_multimodal_tools/text_to_video_generation.py +223 -0
  68. massgen/tool/_multimodal_tools/understand_audio.py +211 -0
  69. massgen/tool/_multimodal_tools/understand_file.py +555 -0
  70. massgen/tool/_multimodal_tools/understand_image.py +316 -0
  71. massgen/tool/_multimodal_tools/understand_video.py +340 -0
  72. massgen/tool/_web_tools/crawl4ai_tool.py +718 -0
  73. massgen/tool/docs/multimodal_tools.md +1368 -0
  74. massgen/tool/workflow_toolkits/__init__.py +26 -0
  75. massgen/tool/workflow_toolkits/post_evaluation.py +216 -0
  76. massgen/utils.py +1 -0
  77. {massgen-0.1.2.dist-info → massgen-0.1.4.dist-info}/METADATA +101 -69
  78. {massgen-0.1.2.dist-info → massgen-0.1.4.dist-info}/RECORD +82 -46
  79. {massgen-0.1.2.dist-info → massgen-0.1.4.dist-info}/WHEEL +0 -0
  80. {massgen-0.1.2.dist-info → massgen-0.1.4.dist-info}/entry_points.txt +0 -0
  81. {massgen-0.1.2.dist-info → massgen-0.1.4.dist-info}/licenses/LICENSE +0 -0
  82. {massgen-0.1.2.dist-info → massgen-0.1.4.dist-info}/top_level.txt +0 -0
massgen/orchestrator.py CHANGED
@@ -44,7 +44,7 @@ from .logger_config import (
44
44
  )
45
45
  from .message_templates import MessageTemplates
46
46
  from .stream_chunk import ChunkType
47
- from .tool import get_workflow_tools
47
+ from .tool import get_post_evaluation_tools, get_workflow_tools
48
48
  from .utils import ActionType, AgentStatus, CoordinationStage
49
49
 
50
50
 
@@ -164,6 +164,14 @@ class Orchestrator(ChatAgent):
164
164
  self.is_orchestrator_timeout: bool = False
165
165
  self.timeout_reason: Optional[str] = None
166
166
 
167
+ # Restart feature state tracking
168
+ self.current_attempt: int = 0
169
+ max_restarts = self.config.coordination_config.max_orchestration_restarts
170
+ self.max_attempts: int = 1 + max_restarts
171
+ self.restart_pending: bool = False
172
+ self.restart_reason: Optional[str] = None
173
+ self.restart_instructions: Optional[str] = None
174
+
167
175
  # Coordination state tracking for cleanup
168
176
  self._active_streams: Dict = {}
169
177
  self._active_tasks: Dict = {}
@@ -264,6 +272,9 @@ class Orchestrator(ChatAgent):
264
272
  self.coordination_tracker.initialize_session(list(self.agents.keys()), self.current_task)
265
273
  self.workflow_phase = "coordinating"
266
274
 
275
+ # Reset restart_pending flag at start of coordination (will be set again if restart needed)
276
+ self.restart_pending = False
277
+
267
278
  # Clear agent workspaces for new turn (if this is a multi-turn conversation with history)
268
279
  if conversation_context and conversation_context.get("conversation_history"):
269
280
  self._clear_agent_workspaces()
@@ -651,7 +662,12 @@ Your answer:"""
651
662
  return {"has_irreversible": True, "blocked_tools": set()}
652
663
 
653
664
  async def _coordinate_agents_with_timeout(self, conversation_context: Optional[Dict[str, Any]] = None) -> AsyncGenerator[StreamChunk, None]:
654
- """Execute coordination with orchestrator-level timeout protection."""
665
+ """Execute coordination with orchestrator-level timeout protection.
666
+
667
+ When restart is needed, this method completes and returns control to CLI,
668
+ which will call coordinate() again (similar to multiturn pattern).
669
+ """
670
+ # Reset timing and state for this attempt
655
671
  self.coordination_start_time = time.time()
656
672
  self.total_tokens = 0
657
673
  self.is_orchestrator_timeout = False
@@ -659,13 +675,19 @@ Your answer:"""
659
675
 
660
676
  log_orchestrator_activity(
661
677
  self.orchestrator_id,
662
- "Starting coordination with timeout",
678
+ f"Starting coordination attempt {self.current_attempt + 1}/{self.max_attempts}",
663
679
  {
664
680
  "timeout_seconds": self.config.timeout_config.orchestrator_timeout_seconds,
665
681
  "agents": list(self.agents.keys()),
682
+ "has_restart_context": bool(self.restart_reason),
666
683
  },
667
684
  )
668
685
 
686
+ # Set log attempt for directory organization
687
+ from massgen.logger_config import set_log_attempt
688
+
689
+ set_log_attempt(self.current_attempt + 1)
690
+
669
691
  # Track active coordination state for cleanup
670
692
  self._active_streams = {}
671
693
  self._active_tasks = {}
@@ -699,6 +721,8 @@ Your answer:"""
699
721
  async for chunk in self._handle_orchestrator_timeout():
700
722
  yield chunk
701
723
 
724
+ # Exit here - if restart is needed, CLI will call coordinate() again
725
+
702
726
  async def _coordinate_agents(self, conversation_context: Optional[Dict[str, Any]] = None) -> AsyncGenerator[StreamChunk, None]:
703
727
  """Execute unified MassGen coordination workflow with real-time streaming."""
704
728
  log_coordination_step(
@@ -1666,10 +1690,16 @@ Your answer:"""
1666
1690
 
1667
1691
  # Extract command execution parameters
1668
1692
  enable_command_execution = False
1693
+ docker_mode = False
1694
+ enable_sudo = False
1669
1695
  if hasattr(agent, "config") and agent.config:
1670
1696
  enable_command_execution = agent.config.backend_params.get("enable_mcp_command_line", False)
1697
+ docker_mode = agent.config.backend_params.get("command_line_execution_mode", "local") == "docker"
1698
+ enable_sudo = agent.config.backend_params.get("command_line_docker_enable_sudo", False)
1671
1699
  elif hasattr(agent, "backend") and hasattr(agent.backend, "backend_params"):
1672
1700
  enable_command_execution = agent.backend.backend_params.get("enable_mcp_command_line", False)
1701
+ docker_mode = agent.backend.backend_params.get("command_line_execution_mode", "local") == "docker"
1702
+ enable_sudo = agent.backend.backend_params.get("command_line_docker_enable_sudo", False)
1673
1703
 
1674
1704
  filesystem_system_message = self.message_templates.filesystem_system_message(
1675
1705
  main_workspace=main_workspace,
@@ -1680,6 +1710,8 @@ Your answer:"""
1680
1710
  enable_image_generation=enable_image_generation,
1681
1711
  agent_answers=answers,
1682
1712
  enable_command_execution=enable_command_execution,
1713
+ docker_mode=docker_mode,
1714
+ enable_sudo=enable_sudo,
1683
1715
  )
1684
1716
  agent_system_message = f"{agent_system_message}\n\n{filesystem_system_message}" if agent_system_message else filesystem_system_message
1685
1717
 
@@ -1724,6 +1756,15 @@ Your answer:"""
1724
1756
  base_system_message=agent_system_message,
1725
1757
  )
1726
1758
 
1759
+ # Inject restart context if this is a restart attempt (like multi-turn context)
1760
+ if self.restart_reason and self.restart_instructions:
1761
+ restart_context = self.message_templates.format_restart_context(
1762
+ self.restart_reason,
1763
+ self.restart_instructions,
1764
+ )
1765
+ # Prepend restart context to user message
1766
+ conversation["user_message"] = restart_context + "\n\n" + conversation["user_message"]
1767
+
1727
1768
  # Track all the context used for this agent execution
1728
1769
  self.coordination_tracker.track_agent_context(
1729
1770
  agent_id,
@@ -2205,48 +2246,81 @@ Your answer:"""
2205
2246
  return ("error", str(e))
2206
2247
 
2207
2248
  async def _present_final_answer(self) -> AsyncGenerator[StreamChunk, None]:
2208
- """Present the final coordinated answer."""
2209
- log_stream_chunk("orchestrator", "content", "## 🎯 Final Coordinated Answer\n")
2210
- yield StreamChunk(type="content", content="## 🎯 Final Coordinated Answer\n")
2249
+ """Present the final coordinated answer with optional post-evaluation and restart loop."""
2211
2250
 
2212
2251
  # Select the best agent based on current state
2213
2252
  if not self._selected_agent:
2214
2253
  self._selected_agent = self._determine_final_agent_from_states()
2215
- if self._selected_agent:
2216
- log_stream_chunk(
2217
- "orchestrator",
2218
- "content",
2219
- f"🏆 Selected Agent: {self._selected_agent}\n",
2220
- )
2221
- yield StreamChunk(
2222
- type="content",
2223
- content=f"🏆 Selected Agent: {self._selected_agent}\n",
2224
- )
2225
-
2226
- if self._selected_agent and self._selected_agent in self.agent_states and self.agent_states[self._selected_agent].answer:
2227
- final_answer = self.agent_states[self._selected_agent].answer # NOTE: This is the raw answer from the winning agent, not the actual final answer.
2228
-
2229
- # Add to conversation history
2230
- self.add_to_history("assistant", final_answer)
2231
2254
 
2232
- log_stream_chunk("orchestrator", "content", f"🏆 Selected Agent: {self._selected_agent}\n")
2233
- yield StreamChunk(type="content", content=f"🏆 Selected Agent: {self._selected_agent}\n")
2234
- log_stream_chunk("orchestrator", "content", final_answer)
2235
- yield StreamChunk(type="content", content=final_answer)
2236
- log_stream_chunk(
2237
- "orchestrator",
2238
- "content",
2239
- f"\n\n---\n*Coordinated by {len(self.agents)} agents via MassGen framework*",
2240
- )
2241
- yield StreamChunk(
2242
- type="content",
2243
- content=f"\n\n---\n*Coordinated by {len(self.agents)} agents via MassGen framework*",
2244
- )
2245
- else:
2255
+ if not self._selected_agent:
2246
2256
  error_msg = "❌ Unable to provide coordinated answer - no successful agents"
2247
2257
  self.add_to_history("assistant", error_msg)
2248
2258
  log_stream_chunk("orchestrator", "error", error_msg)
2249
2259
  yield StreamChunk(type="content", content=error_msg)
2260
+ self.workflow_phase = "presenting"
2261
+ log_stream_chunk("orchestrator", "done", None)
2262
+ yield StreamChunk(type="done")
2263
+ return
2264
+
2265
+ # Get vote results for presentation
2266
+ vote_results = self._get_vote_results()
2267
+
2268
+ log_stream_chunk("orchestrator", "content", "## 🎯 Final Coordinated Answer\n")
2269
+ yield StreamChunk(type="content", content="## 🎯 Final Coordinated Answer\n")
2270
+
2271
+ # Stream final presentation from winning agent
2272
+ log_stream_chunk("orchestrator", "content", f"🏆 Selected Agent: {self._selected_agent}\n")
2273
+ yield StreamChunk(type="content", content=f"🏆 Selected Agent: {self._selected_agent}\n")
2274
+
2275
+ # Stream the final presentation (with full tool support)
2276
+ presentation_content = ""
2277
+ async for chunk in self.get_final_presentation(self._selected_agent, vote_results):
2278
+ if chunk.type == "content" and chunk.content:
2279
+ presentation_content += chunk.content
2280
+ yield chunk
2281
+
2282
+ # Check if post-evaluation should run
2283
+ # Skip post-evaluation on final attempt (user clarification #4)
2284
+ is_final_attempt = self.current_attempt >= (self.max_attempts - 1)
2285
+ should_evaluate = self.max_attempts > 1 and not is_final_attempt
2286
+
2287
+ if should_evaluate:
2288
+ # Run post-evaluation
2289
+ final_answer_to_evaluate = self._final_presentation_content or presentation_content
2290
+ async for chunk in self.post_evaluate_answer(self._selected_agent, final_answer_to_evaluate):
2291
+ yield chunk
2292
+
2293
+ # Check if restart was requested
2294
+ if self.restart_pending and self.current_attempt < (self.max_attempts - 1):
2295
+ # Show restart banner
2296
+ restart_banner = f"""
2297
+
2298
+ 🔄 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
2299
+ ORCHESTRATION RESTART (Attempt {self.current_attempt + 2}/{self.max_attempts})
2300
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
2301
+
2302
+ REASON:
2303
+ {self.restart_reason}
2304
+
2305
+ INSTRUCTIONS FOR NEXT ATTEMPT:
2306
+ {self.restart_instructions}
2307
+
2308
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
2309
+
2310
+ """
2311
+ log_stream_chunk("orchestrator", "status", restart_banner)
2312
+ yield StreamChunk(type="restart_banner", content=restart_banner, source="orchestrator")
2313
+
2314
+ # Reset state for restart (prepare for next coordinate() call)
2315
+ self.handle_restart()
2316
+
2317
+ # Don't add to history or set workflow phase - restart is pending
2318
+ # Exit here - CLI will detect restart_pending and call coordinate() again
2319
+ return
2320
+
2321
+ # No restart - add final answer to conversation history
2322
+ if self._final_presentation_content:
2323
+ self.add_to_history("assistant", self._final_presentation_content)
2250
2324
 
2251
2325
  # Update workflow phase
2252
2326
  self.workflow_phase = "presenting"
@@ -2422,10 +2496,16 @@ Your answer:"""
2422
2496
 
2423
2497
  # Extract command execution parameters
2424
2498
  enable_command_execution = False
2499
+ docker_mode = False
2500
+ enable_sudo = False
2425
2501
  if hasattr(agent, "config") and agent.config:
2426
2502
  enable_command_execution = agent.config.backend_params.get("enable_mcp_command_line", False)
2503
+ docker_mode = agent.config.backend_params.get("command_line_execution_mode", "local") == "docker"
2504
+ enable_sudo = agent.config.backend_params.get("command_line_docker_enable_sudo", False)
2427
2505
  elif hasattr(agent, "backend") and hasattr(agent.backend, "backend_params"):
2428
2506
  enable_command_execution = agent.backend.backend_params.get("enable_mcp_command_line", False)
2507
+ docker_mode = agent.backend.backend_params.get("command_line_execution_mode", "local") == "docker"
2508
+ enable_sudo = agent.backend.backend_params.get("command_line_docker_enable_sudo", False)
2429
2509
  # Check if audio generation is enabled for this agent
2430
2510
  enable_audio_generation = False
2431
2511
  if hasattr(agent, "config") and agent.config:
@@ -2433,6 +2513,20 @@ Your answer:"""
2433
2513
  elif hasattr(agent, "backend") and hasattr(agent.backend, "backend_params"):
2434
2514
  enable_audio_generation = agent.backend.backend_params.get("enable_audio_generation", False)
2435
2515
 
2516
+ # Check if file generation is enabled for this agent
2517
+ enable_file_generation = False
2518
+ if hasattr(agent, "config") and agent.config:
2519
+ enable_file_generation = agent.config.backend_params.get("enable_file_generation", False)
2520
+ elif hasattr(agent, "backend") and hasattr(agent.backend, "backend_params"):
2521
+ enable_file_generation = agent.backend.backend_params.get("enable_file_generation", False)
2522
+
2523
+ # Check if video generation is enabled for this agent
2524
+ enable_video_generation = False
2525
+ if hasattr(agent, "config") and agent.config:
2526
+ enable_video_generation = agent.config.backend_params.get("enable_video_generation", False)
2527
+ elif hasattr(agent, "backend") and hasattr(agent.backend, "backend_params"):
2528
+ enable_video_generation = agent.backend.backend_params.get("enable_video_generation", False)
2529
+
2436
2530
  # Check if agent has write access to context paths (requires file delivery)
2437
2531
  has_irreversible_actions = False
2438
2532
  if agent.backend.filesystem_manager:
@@ -2445,6 +2539,8 @@ Your answer:"""
2445
2539
  agent_system_message,
2446
2540
  enable_image_generation,
2447
2541
  enable_audio_generation,
2542
+ enable_file_generation,
2543
+ enable_video_generation,
2448
2544
  has_irreversible_actions,
2449
2545
  enable_command_execution,
2450
2546
  )
@@ -2483,6 +2579,8 @@ Your answer:"""
2483
2579
  enable_image_generation=enable_image_generation,
2484
2580
  agent_answers=all_answers,
2485
2581
  enable_command_execution=enable_command_execution,
2582
+ docker_mode=docker_mode,
2583
+ enable_sudo=enable_sudo,
2486
2584
  )
2487
2585
  + "\n\n## Instructions\n"
2488
2586
  + base_system_message
@@ -2674,6 +2772,204 @@ Your answer:"""
2674
2772
  # Save logs
2675
2773
  self.save_coordination_logs()
2676
2774
 
2775
+ # Don't yield done here - let _present_final_answer handle final done after post-evaluation
2776
+
2777
+ async def post_evaluate_answer(self, selected_agent_id: str, final_answer: str) -> AsyncGenerator[StreamChunk, None]:
2778
+ """Post-evaluation phase where winning agent evaluates its own answer.
2779
+
2780
+ The agent reviews the final answer and decides whether to submit or restart
2781
+ with specific improvement instructions.
2782
+
2783
+ Args:
2784
+ selected_agent_id: The agent that won the vote and presented the answer
2785
+ final_answer: The final answer that was presented
2786
+
2787
+ Yields:
2788
+ StreamChunk: Stream chunks from the evaluation process
2789
+ """
2790
+ if selected_agent_id not in self.agents:
2791
+ log_stream_chunk("orchestrator", "error", f"Selected agent {selected_agent_id} not found for post-evaluation")
2792
+ yield StreamChunk(type="error", error=f"Selected agent {selected_agent_id} not found")
2793
+ return
2794
+
2795
+ agent = self.agents[selected_agent_id]
2796
+
2797
+ # Use debug override on first attempt if configured
2798
+ eval_answer = final_answer
2799
+ if self.config.debug_final_answer and self.current_attempt == 0:
2800
+ eval_answer = self.config.debug_final_answer
2801
+ log_stream_chunk("orchestrator", "debug", f"Using debug override for post-evaluation: {self.config.debug_final_answer}")
2802
+ yield StreamChunk(
2803
+ type="debug",
2804
+ content=f"[DEBUG MODE] Overriding answer for evaluation: {self.config.debug_final_answer}",
2805
+ source="orchestrator",
2806
+ )
2807
+
2808
+ # Build evaluation message
2809
+ evaluation_content = f"""{self.message_templates.format_original_message(self.current_task or "Task")}
2810
+
2811
+ FINAL ANSWER TO EVALUATE:
2812
+ {eval_answer}
2813
+
2814
+ Review this answer carefully and determine if it fully addresses the original task. Use your available tools to verify claims and check files as needed.
2815
+ Then call either submit(confirmed=True) if the answer is satisfactory, or restart_orchestration(reason, instructions) if improvements are needed."""
2816
+
2817
+ # Get agent's configurable system message
2818
+ agent_system_message = agent.get_configurable_system_message()
2819
+
2820
+ # Build post-evaluation system message
2821
+ base_system_message = self.message_templates.post_evaluation_system_message(agent_system_message)
2822
+
2823
+ # Add filesystem context if available (same as final presentation)
2824
+ if agent.backend.filesystem_manager:
2825
+ main_workspace = str(agent.backend.filesystem_manager.get_current_workspace())
2826
+ temp_workspace = str(agent.backend.filesystem_manager.agent_temporary_workspace) if agent.backend.filesystem_manager.agent_temporary_workspace else None
2827
+ context_paths = agent.backend.filesystem_manager.path_permission_manager.get_context_paths() if agent.backend.filesystem_manager.path_permission_manager else []
2828
+ previous_turns_context = self._get_previous_turns_context_paths()
2829
+ current_turn_num = len(previous_turns_context) + 1 if previous_turns_context else 1
2830
+ turns_to_show = [t for t in previous_turns_context if t["turn"] < current_turn_num - 1]
2831
+ workspace_prepopulated = len(previous_turns_context) > 0
2832
+
2833
+ # Get all answers for context
2834
+ all_answers = {aid: s.answer for aid, s in self.agent_states.items() if s.answer}
2835
+
2836
+ base_system_message = (
2837
+ self.message_templates.filesystem_system_message(
2838
+ main_workspace=main_workspace,
2839
+ temp_workspace=temp_workspace,
2840
+ context_paths=context_paths,
2841
+ previous_turns=turns_to_show,
2842
+ workspace_prepopulated=workspace_prepopulated,
2843
+ enable_image_generation=False,
2844
+ agent_answers=all_answers,
2845
+ enable_command_execution=False,
2846
+ docker_mode=False,
2847
+ enable_sudo=False,
2848
+ )
2849
+ + "\n\n## Post-Evaluation Task\n"
2850
+ + base_system_message
2851
+ )
2852
+
2853
+ # Create evaluation messages
2854
+ evaluation_messages = [
2855
+ {"role": "system", "content": base_system_message},
2856
+ {"role": "user", "content": evaluation_content},
2857
+ ]
2858
+
2859
+ # Get post-evaluation tools
2860
+ api_format = "chat_completions" # Default format
2861
+ if hasattr(agent.backend, "api_format"):
2862
+ api_format = agent.backend.api_format
2863
+ post_eval_tools = get_post_evaluation_tools(api_format=api_format)
2864
+
2865
+ log_stream_chunk("orchestrator", "status", "🔍 Post-evaluation: Reviewing final answer\n")
2866
+ yield StreamChunk(type="status", content="🔍 Post-evaluation: Reviewing final answer\n", source="orchestrator")
2867
+
2868
+ # Stream evaluation with tools (with timeout protection)
2869
+ evaluation_complete = False
2870
+ tool_call_detected = False
2871
+
2872
+ try:
2873
+ timeout_seconds = self.config.timeout_config.orchestrator_timeout_seconds
2874
+ async with asyncio.timeout(timeout_seconds):
2875
+ async for chunk in agent.chat(messages=evaluation_messages, tools=post_eval_tools, reset_chat=True, current_stage=CoordinationStage.POST_EVALUATION):
2876
+ chunk_type = self._get_chunk_type_value(chunk)
2877
+
2878
+ if chunk_type == "content" and chunk.content:
2879
+ log_stream_chunk("orchestrator", "content", chunk.content, selected_agent_id)
2880
+ yield StreamChunk(type="content", content=chunk.content, source=selected_agent_id)
2881
+ elif chunk_type in ["reasoning", "reasoning_done", "reasoning_summary", "reasoning_summary_done"]:
2882
+ reasoning_chunk = StreamChunk(
2883
+ type=chunk_type,
2884
+ content=chunk.content,
2885
+ source=selected_agent_id,
2886
+ reasoning_delta=getattr(chunk, "reasoning_delta", None),
2887
+ reasoning_text=getattr(chunk, "reasoning_text", None),
2888
+ reasoning_summary_delta=getattr(chunk, "reasoning_summary_delta", None),
2889
+ reasoning_summary_text=getattr(chunk, "reasoning_summary_text", None),
2890
+ item_id=getattr(chunk, "item_id", None),
2891
+ content_index=getattr(chunk, "content_index", None),
2892
+ summary_index=getattr(chunk, "summary_index", None),
2893
+ )
2894
+ log_stream_chunk("orchestrator", chunk.type, chunk.content, selected_agent_id)
2895
+ yield reasoning_chunk
2896
+ elif chunk_type == "tool_calls":
2897
+ # Post-evaluation tool call detected
2898
+ tool_call_detected = True
2899
+ if hasattr(chunk, "tool_calls") and chunk.tool_calls:
2900
+ for tool_call in chunk.tool_calls:
2901
+ # Use backend's tool extraction (same as regular coordination)
2902
+ tool_name = agent.backend.extract_tool_name(tool_call)
2903
+ tool_args = agent.backend.extract_tool_arguments(tool_call)
2904
+
2905
+ if tool_name == "submit":
2906
+ log_stream_chunk("orchestrator", "status", "✅ Evaluation complete - answer approved\n")
2907
+ yield StreamChunk(type="status", content="✅ Evaluation complete - answer approved\n", source="orchestrator")
2908
+ evaluation_complete = True
2909
+ elif tool_name == "restart_orchestration":
2910
+ # Parse restart parameters from extracted args
2911
+ self.restart_reason = tool_args.get("reason", "No reason provided")
2912
+ self.restart_instructions = tool_args.get("instructions", "No instructions provided")
2913
+ self.restart_pending = True
2914
+
2915
+ log_stream_chunk("orchestrator", "status", "🔄 Restart requested\n")
2916
+ yield StreamChunk(type="status", content="🔄 Restart requested\n", source="orchestrator")
2917
+ evaluation_complete = True
2918
+ elif chunk_type == "done":
2919
+ log_stream_chunk("orchestrator", "done", None, selected_agent_id)
2920
+ yield StreamChunk(type="done", source=selected_agent_id)
2921
+ elif chunk_type == "error":
2922
+ log_stream_chunk("orchestrator", "error", chunk.error, selected_agent_id)
2923
+ yield StreamChunk(type="error", error=chunk.error, source=selected_agent_id)
2924
+ else:
2925
+ # Pass through other chunk types
2926
+ log_stream_chunk("orchestrator", chunk_type, getattr(chunk, "content", ""), selected_agent_id)
2927
+ yield StreamChunk(
2928
+ type=chunk_type,
2929
+ content=getattr(chunk, "content", ""),
2930
+ source=selected_agent_id,
2931
+ **{k: v for k, v in chunk.__dict__.items() if k not in ["type", "content", "source", "timestamp", "sequence_number"]},
2932
+ )
2933
+ except asyncio.TimeoutError:
2934
+ log_stream_chunk("orchestrator", "status", "⏱️ Post-evaluation timed out - auto-submitting answer\n")
2935
+ yield StreamChunk(type="status", content="⏱️ Post-evaluation timed out - auto-submitting answer\n", source="orchestrator")
2936
+ evaluation_complete = True
2937
+ # Don't set restart_pending - let it default to False (auto-submit)
2938
+ finally:
2939
+ # If no tool was called and evaluation didn't complete, auto-submit
2940
+ if not evaluation_complete and not tool_call_detected:
2941
+ log_stream_chunk("orchestrator", "status", "✅ Auto-submitting answer (no tool call detected)\n")
2942
+ yield StreamChunk(type="status", content="✅ Auto-submitting answer (no tool call detected)\n", source="orchestrator")
2943
+
2944
+ def handle_restart(self):
2945
+ """Reset orchestration state for restart attempt.
2946
+
2947
+ Clears agent states and coordination messages while preserving
2948
+ restart reason and instructions for the next attempt.
2949
+ """
2950
+ log_orchestrator_activity("handle_restart", f"Resetting state for restart attempt {self.current_attempt + 1}")
2951
+
2952
+ # Reset agent states
2953
+ for agent_id in self.agent_states:
2954
+ self.agent_states[agent_id] = AgentState()
2955
+
2956
+ # Clear coordination messages
2957
+ self._coordination_messages = []
2958
+ self._selected_agent = None
2959
+ self._final_presentation_content = None
2960
+
2961
+ # Reset coordination tracker for new attempt
2962
+ self.coordination_tracker = CoordinationTracker()
2963
+ self.coordination_tracker.initialize_session(list(self.agents.keys()))
2964
+
2965
+ # Reset workflow phase to idle so next coordinate() call starts fresh
2966
+ self.workflow_phase = "idle"
2967
+
2968
+ # Increment attempt counter
2969
+ self.current_attempt += 1
2970
+
2971
+ log_orchestrator_activity("handle_restart", f"State reset complete - starting attempt {self.current_attempt + 1}")
2972
+
2677
2973
  def _get_vote_results(self) -> Dict[str, Any]:
2678
2974
  """Get current vote results and statistics."""
2679
2975
  agent_answers = {aid: state.answer for aid, state in self.agent_states.items() if state.answer}
@@ -2867,8 +3163,9 @@ Your answer:"""
2867
3163
  """
2868
3164
  if self.config and hasattr(self.config, "get_configurable_system_message"):
2869
3165
  return self.config.get_configurable_system_message()
2870
- elif self.config and hasattr(self.config, "custom_system_instruction"):
2871
- return self.config.custom_system_instruction
3166
+ elif self.config and hasattr(self.config, "_custom_system_instruction"):
3167
+ # Access private attribute to avoid deprecation warning
3168
+ return self.config._custom_system_instruction
2872
3169
  elif self.config and self.config.backend_params:
2873
3170
  # Check for backend-specific system prompts
2874
3171
  backend_params = self.config.backend_params