massgen 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +1 -1
- massgen/agent_config.py +33 -7
- massgen/api_params_handler/_api_params_handler_base.py +3 -0
- massgen/api_params_handler/_chat_completions_api_params_handler.py +4 -0
- massgen/api_params_handler/_claude_api_params_handler.py +4 -0
- massgen/api_params_handler/_gemini_api_params_handler.py +4 -0
- massgen/api_params_handler/_response_api_params_handler.py +4 -0
- massgen/backend/azure_openai.py +9 -1
- massgen/backend/base.py +4 -0
- massgen/backend/base_with_custom_tool_and_mcp.py +25 -5
- massgen/backend/claude_code.py +9 -1
- massgen/backend/docs/permissions_and_context_files.md +2 -2
- massgen/backend/gemini.py +35 -6
- massgen/backend/gemini_utils.py +30 -0
- massgen/backend/response.py +2 -0
- massgen/chat_agent.py +9 -3
- massgen/cli.py +291 -43
- massgen/config_builder.py +163 -18
- massgen/configs/README.md +69 -14
- massgen/configs/debug/restart_test_controlled.yaml +60 -0
- massgen/configs/debug/restart_test_controlled_filesystem.yaml +73 -0
- massgen/configs/tools/code-execution/docker_with_sudo.yaml +35 -0
- massgen/configs/tools/custom_tools/computer_use_browser_example.yaml +56 -0
- massgen/configs/tools/custom_tools/computer_use_docker_example.yaml +65 -0
- massgen/configs/tools/custom_tools/computer_use_example.yaml +50 -0
- massgen/configs/tools/custom_tools/crawl4ai_example.yaml +55 -0
- massgen/configs/tools/custom_tools/multimodal_tools/text_to_file_generation_multi.yaml +61 -0
- massgen/configs/tools/custom_tools/multimodal_tools/text_to_file_generation_single.yaml +29 -0
- massgen/configs/tools/custom_tools/multimodal_tools/text_to_image_generation_multi.yaml +51 -0
- massgen/configs/tools/custom_tools/multimodal_tools/text_to_image_generation_single.yaml +33 -0
- massgen/configs/tools/custom_tools/multimodal_tools/text_to_speech_generation_multi.yaml +55 -0
- massgen/configs/tools/custom_tools/multimodal_tools/text_to_speech_generation_single.yaml +33 -0
- massgen/configs/tools/custom_tools/multimodal_tools/text_to_video_generation_multi.yaml +47 -0
- massgen/configs/tools/custom_tools/multimodal_tools/text_to_video_generation_single.yaml +29 -0
- massgen/configs/tools/custom_tools/multimodal_tools/understand_audio.yaml +33 -0
- massgen/configs/tools/custom_tools/multimodal_tools/understand_file.yaml +34 -0
- massgen/configs/tools/custom_tools/multimodal_tools/understand_image.yaml +33 -0
- massgen/configs/tools/custom_tools/multimodal_tools/understand_video.yaml +34 -0
- massgen/configs/tools/custom_tools/multimodal_tools/youtube_video_analysis.yaml +59 -0
- massgen/docker/README.md +83 -0
- massgen/filesystem_manager/_code_execution_server.py +22 -7
- massgen/filesystem_manager/_docker_manager.py +21 -1
- massgen/filesystem_manager/_filesystem_manager.py +9 -0
- massgen/filesystem_manager/_path_permission_manager.py +148 -0
- massgen/filesystem_manager/_workspace_tools_server.py +0 -997
- massgen/formatter/_gemini_formatter.py +73 -0
- massgen/frontend/coordination_ui.py +175 -257
- massgen/frontend/displays/base_display.py +29 -0
- massgen/frontend/displays/rich_terminal_display.py +155 -9
- massgen/frontend/displays/simple_display.py +21 -0
- massgen/frontend/displays/terminal_display.py +22 -2
- massgen/logger_config.py +50 -6
- massgen/message_templates.py +283 -15
- massgen/orchestrator.py +335 -38
- massgen/tests/test_binary_file_blocking.py +274 -0
- massgen/tests/test_case_studies.md +12 -12
- massgen/tests/test_code_execution.py +178 -0
- massgen/tests/test_multimodal_size_limits.py +407 -0
- massgen/tests/test_orchestration_restart.py +204 -0
- massgen/tool/__init__.py +4 -0
- massgen/tool/_manager.py +7 -2
- massgen/tool/_multimodal_tools/image_to_image_generation.py +293 -0
- massgen/tool/_multimodal_tools/text_to_file_generation.py +455 -0
- massgen/tool/_multimodal_tools/text_to_image_generation.py +222 -0
- massgen/tool/_multimodal_tools/text_to_speech_continue_generation.py +226 -0
- massgen/tool/_multimodal_tools/text_to_speech_transcription_generation.py +217 -0
- massgen/tool/_multimodal_tools/text_to_video_generation.py +223 -0
- massgen/tool/_multimodal_tools/understand_audio.py +211 -0
- massgen/tool/_multimodal_tools/understand_file.py +555 -0
- massgen/tool/_multimodal_tools/understand_image.py +316 -0
- massgen/tool/_multimodal_tools/understand_video.py +340 -0
- massgen/tool/_web_tools/crawl4ai_tool.py +718 -0
- massgen/tool/docs/multimodal_tools.md +1368 -0
- massgen/tool/workflow_toolkits/__init__.py +26 -0
- massgen/tool/workflow_toolkits/post_evaluation.py +216 -0
- massgen/utils.py +1 -0
- {massgen-0.1.2.dist-info → massgen-0.1.4.dist-info}/METADATA +101 -69
- {massgen-0.1.2.dist-info → massgen-0.1.4.dist-info}/RECORD +82 -46
- {massgen-0.1.2.dist-info → massgen-0.1.4.dist-info}/WHEEL +0 -0
- {massgen-0.1.2.dist-info → massgen-0.1.4.dist-info}/entry_points.txt +0 -0
- {massgen-0.1.2.dist-info → massgen-0.1.4.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.1.2.dist-info → massgen-0.1.4.dist-info}/top_level.txt +0 -0
|
@@ -6,7 +6,6 @@ Main interface for coordinating agents with visual display.
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
import asyncio
|
|
9
|
-
import time
|
|
10
9
|
from typing import Any, Dict, List, Optional
|
|
11
10
|
|
|
12
11
|
from .displays.base_display import BaseDisplay
|
|
@@ -189,6 +188,7 @@ class CoordinationUI:
|
|
|
189
188
|
# Initialize variables to avoid reference before assignment error in finally block
|
|
190
189
|
selected_agent = None
|
|
191
190
|
vote_results = {}
|
|
191
|
+
user_quit = False # Track if user quit
|
|
192
192
|
|
|
193
193
|
try:
|
|
194
194
|
# Process coordination stream
|
|
@@ -196,6 +196,12 @@ class CoordinationUI:
|
|
|
196
196
|
final_answer = ""
|
|
197
197
|
|
|
198
198
|
async for chunk in orchestrator.chat_simple(question):
|
|
199
|
+
# Check if user requested quit
|
|
200
|
+
if self.display and hasattr(self.display, "_user_quit_requested") and self.display._user_quit_requested:
|
|
201
|
+
# User pressed 'q' - exit gracefully
|
|
202
|
+
user_quit = True
|
|
203
|
+
raise SystemExit(0)
|
|
204
|
+
|
|
199
205
|
content = getattr(chunk, "content", "") or ""
|
|
200
206
|
source = getattr(chunk, "source", None)
|
|
201
207
|
chunk_type = getattr(chunk, "type", "")
|
|
@@ -282,6 +288,23 @@ class CoordinationUI:
|
|
|
282
288
|
self.logger.log_agent_content(source, reasoning_content, "reasoning")
|
|
283
289
|
continue
|
|
284
290
|
|
|
291
|
+
# Handle restart banner
|
|
292
|
+
elif chunk_type == "restart_banner":
|
|
293
|
+
# Extract restart info from orchestrator state
|
|
294
|
+
reason = getattr(orchestrator, "restart_reason", "Answer needs improvement")
|
|
295
|
+
instructions = getattr(orchestrator, "restart_instructions", "Please address the issues identified")
|
|
296
|
+
# Next attempt number (current is 0-indexed, so current_attempt=0 means attempt 1 just finished, attempt 2 is next)
|
|
297
|
+
attempt = getattr(orchestrator, "current_attempt", 0) + 2
|
|
298
|
+
max_attempts = getattr(orchestrator, "max_attempts", 3)
|
|
299
|
+
|
|
300
|
+
self.display.show_restart_banner(reason, instructions, attempt, max_attempts)
|
|
301
|
+
continue
|
|
302
|
+
|
|
303
|
+
# Handle restart required signal (internal - don't display)
|
|
304
|
+
elif chunk_type == "restart_required":
|
|
305
|
+
# Signal that orchestration will restart - UI will be reinitialized
|
|
306
|
+
continue
|
|
307
|
+
|
|
285
308
|
# Reset reasoning prefix state when final presentation starts
|
|
286
309
|
if chunk_type == "status" and "presenting final answer" in content:
|
|
287
310
|
# Clear all summary active flags for final presentation
|
|
@@ -289,6 +312,17 @@ class CoordinationUI:
|
|
|
289
312
|
if attr_name.startswith("_summary_active_"):
|
|
290
313
|
delattr(self, attr_name)
|
|
291
314
|
|
|
315
|
+
# Handle post-evaluation content streaming
|
|
316
|
+
if source and content and chunk_type == "content":
|
|
317
|
+
# Check if we're in post-evaluation
|
|
318
|
+
if hasattr(self, "_in_post_evaluation") and self._in_post_evaluation:
|
|
319
|
+
if self.display and hasattr(self.display, "show_post_evaluation_content"):
|
|
320
|
+
self.display.show_post_evaluation_content(content, source)
|
|
321
|
+
|
|
322
|
+
# Detect post-evaluation start
|
|
323
|
+
if chunk_type == "status" and "Post-evaluation" in content:
|
|
324
|
+
self._in_post_evaluation = True
|
|
325
|
+
|
|
292
326
|
if content:
|
|
293
327
|
full_response += content
|
|
294
328
|
|
|
@@ -299,145 +333,26 @@ class CoordinationUI:
|
|
|
299
333
|
# Process content by source
|
|
300
334
|
await self._process_content(source, content)
|
|
301
335
|
|
|
302
|
-
#
|
|
336
|
+
# Get final presentation content from orchestrator state
|
|
337
|
+
# Note: With restart feature, get_final_presentation is called INSIDE the orchestrator
|
|
338
|
+
# during _present_final_answer, so chunks already came through the main stream above.
|
|
339
|
+
# We just need to retrieve the final result for return value.
|
|
303
340
|
status = orchestrator.get_status()
|
|
304
341
|
vote_results = status.get("vote_results", {})
|
|
305
|
-
selected_agent = status.get("selected_agent")
|
|
306
|
-
|
|
307
|
-
# Ensure selected_agent is not None to prevent UnboundLocalError
|
|
308
|
-
if selected_agent is None:
|
|
309
|
-
selected_agent = ""
|
|
342
|
+
selected_agent = status.get("selected_agent", "")
|
|
310
343
|
|
|
311
|
-
#
|
|
312
|
-
# self._display_vote_results(vote_results)
|
|
313
|
-
# # Allow time for voting results to be visible
|
|
314
|
-
# import time
|
|
315
|
-
# time.sleep(1.0)
|
|
316
|
-
|
|
317
|
-
# Get final presentation from winning agent
|
|
318
|
-
# Run final presentation if enabled and there's a selected agent (regardless of votes)
|
|
319
|
-
if self.enable_final_presentation and selected_agent:
|
|
320
|
-
# Don't print - let the display handle it
|
|
321
|
-
# print(f"\n🎤 Final Presentation from {selected_agent}:")
|
|
322
|
-
# print("=" * 60)
|
|
323
|
-
|
|
324
|
-
presentation_content = ""
|
|
325
|
-
try:
|
|
326
|
-
async for chunk in orchestrator.get_final_presentation(selected_agent, vote_results):
|
|
327
|
-
content = getattr(chunk, "content", "") or ""
|
|
328
|
-
chunk_type = getattr(chunk, "type", "")
|
|
329
|
-
|
|
330
|
-
# Use the same reasoning processing as main coordination
|
|
331
|
-
if chunk_type in [
|
|
332
|
-
"reasoning",
|
|
333
|
-
"reasoning_done",
|
|
334
|
-
"reasoning_summary",
|
|
335
|
-
"reasoning_summary_done",
|
|
336
|
-
]:
|
|
337
|
-
source = getattr(chunk, "source", selected_agent)
|
|
338
|
-
|
|
339
|
-
reasoning_content = ""
|
|
340
|
-
if chunk_type == "reasoning":
|
|
341
|
-
# Stream reasoning delta as thinking content
|
|
342
|
-
reasoning_delta = getattr(chunk, "reasoning_delta", "")
|
|
343
|
-
if reasoning_delta:
|
|
344
|
-
# reasoning_content = reasoning_delta
|
|
345
|
-
reasoning_content = self._process_reasoning_content(chunk_type, reasoning_delta, source)
|
|
346
|
-
elif chunk_type == "reasoning_done":
|
|
347
|
-
# Complete reasoning text
|
|
348
|
-
reasoning_text = getattr(chunk, "reasoning_text", "")
|
|
349
|
-
if reasoning_text:
|
|
350
|
-
reasoning_content = f"\n🧠 [Reasoning Complete]\n{reasoning_text}\n"
|
|
351
|
-
else:
|
|
352
|
-
reasoning_content = "\n🧠 [Reasoning Complete]\n"
|
|
353
|
-
|
|
354
|
-
# Reset flag using helper method
|
|
355
|
-
self._process_reasoning_content(chunk_type, reasoning_content, source)
|
|
356
|
-
|
|
357
|
-
# Mark summary as complete - next summary can get a prefix
|
|
358
|
-
reasoning_active_key = "_reasoning_active"
|
|
359
|
-
if hasattr(self, reasoning_active_key):
|
|
360
|
-
delattr(self, reasoning_active_key)
|
|
361
|
-
|
|
362
|
-
elif chunk_type == "reasoning_summary":
|
|
363
|
-
# Stream reasoning summary delta
|
|
364
|
-
summary_delta = getattr(chunk, "reasoning_summary_delta", "")
|
|
365
|
-
if summary_delta:
|
|
366
|
-
reasoning_content = self._process_reasoning_summary(chunk_type, summary_delta, source)
|
|
367
|
-
elif chunk_type == "reasoning_summary_done":
|
|
368
|
-
# Complete reasoning summary
|
|
369
|
-
summary_text = getattr(chunk, "reasoning_summary_text", "")
|
|
370
|
-
if summary_text:
|
|
371
|
-
reasoning_content = f"\n📋 [Reasoning Summary Complete]\n{summary_text}\n"
|
|
372
|
-
|
|
373
|
-
# Reset flag using helper method
|
|
374
|
-
self._process_reasoning_summary(chunk_type, "", source)
|
|
375
|
-
|
|
376
|
-
# Reset the prefix flag so next summary can get a prefix
|
|
377
|
-
summary_active_key = f"_summary_active_{source}"
|
|
378
|
-
if hasattr(self, summary_active_key):
|
|
379
|
-
delattr(self, summary_active_key)
|
|
380
|
-
|
|
381
|
-
if reasoning_content:
|
|
382
|
-
# Add to presentation content and display
|
|
383
|
-
content = reasoning_content
|
|
384
|
-
|
|
385
|
-
if content:
|
|
386
|
-
# Ensure content is a string
|
|
387
|
-
if isinstance(content, list):
|
|
388
|
-
content = " ".join(str(item) for item in content)
|
|
389
|
-
elif not isinstance(content, str):
|
|
390
|
-
content = str(content)
|
|
391
|
-
|
|
392
|
-
# Simple content accumulation - let the display handle formatting
|
|
393
|
-
presentation_content += content
|
|
394
|
-
|
|
395
|
-
# Log presentation chunk
|
|
396
|
-
if self.logger:
|
|
397
|
-
self.logger.log_chunk(
|
|
398
|
-
selected_agent,
|
|
399
|
-
content,
|
|
400
|
-
getattr(chunk, "type", "presentation"),
|
|
401
|
-
)
|
|
402
|
-
|
|
403
|
-
# Display the presentation in real-time
|
|
404
|
-
if self.display:
|
|
405
|
-
try:
|
|
406
|
-
await self._process_content(selected_agent, content)
|
|
407
|
-
except Exception:
|
|
408
|
-
# Error processing presentation content - continue gracefully
|
|
409
|
-
pass
|
|
410
|
-
# Don't print - let the display handle it
|
|
411
|
-
# self._print_with_flush(content)
|
|
412
|
-
else:
|
|
413
|
-
# Simple print for non-display mode (only if no display)
|
|
414
|
-
print(content, end="", flush=True)
|
|
415
|
-
except AttributeError:
|
|
416
|
-
# get_final_presentation method doesn't exist or failed
|
|
417
|
-
# print("Final presentation not available - using coordination result")
|
|
418
|
-
presentation_content = ""
|
|
419
|
-
|
|
420
|
-
final_answer = presentation_content
|
|
421
|
-
# Don't print - let the display handle it
|
|
422
|
-
# print("\n" + "=" * 60)
|
|
423
|
-
# Allow time for final presentation to be fully visible
|
|
424
|
-
time.sleep(1.5)
|
|
425
|
-
|
|
426
|
-
# Get the final presentation content (synthesis) or fall back to stored answer
|
|
344
|
+
# Get the final presentation content from orchestrator state
|
|
427
345
|
orchestrator_final_answer = None
|
|
428
|
-
|
|
429
|
-
# First try to get the synthesized final presentation content
|
|
430
346
|
if hasattr(orchestrator, "_final_presentation_content") and orchestrator._final_presentation_content:
|
|
431
347
|
orchestrator_final_answer = orchestrator._final_presentation_content.strip()
|
|
432
348
|
elif selected_agent and hasattr(orchestrator, "agent_states") and selected_agent in orchestrator.agent_states:
|
|
433
349
|
# Fall back to stored answer if no final presentation content
|
|
434
350
|
stored_answer = orchestrator.agent_states[selected_agent].answer
|
|
435
351
|
if stored_answer:
|
|
436
|
-
|
|
437
|
-
orchestrator_final_answer = stored_answer.replace("\\", "\n").replace("**", "").strip()
|
|
352
|
+
orchestrator_final_answer = stored_answer.strip()
|
|
438
353
|
|
|
439
|
-
# Use orchestrator's clean answer
|
|
440
|
-
final_result = orchestrator_final_answer if orchestrator_final_answer else
|
|
354
|
+
# Use orchestrator's clean answer or fall back to full response
|
|
355
|
+
final_result = orchestrator_final_answer if orchestrator_final_answer else full_response
|
|
441
356
|
|
|
442
357
|
# Finalize session
|
|
443
358
|
if self.logger:
|
|
@@ -450,30 +365,59 @@ class CoordinationUI:
|
|
|
450
365
|
|
|
451
366
|
return final_result
|
|
452
367
|
|
|
368
|
+
except SystemExit:
|
|
369
|
+
# User pressed 'q' - cleanup and exit gracefully
|
|
370
|
+
if self.logger:
|
|
371
|
+
self.logger.finalize_session("User quit", success=True)
|
|
372
|
+
# Cleanup agent backends
|
|
373
|
+
if hasattr(orchestrator, "agents"):
|
|
374
|
+
for agent_id, agent in orchestrator.agents.items():
|
|
375
|
+
if hasattr(agent.backend, "reset_state"):
|
|
376
|
+
try:
|
|
377
|
+
await agent.backend.reset_state()
|
|
378
|
+
except Exception:
|
|
379
|
+
pass
|
|
380
|
+
raise
|
|
453
381
|
except Exception:
|
|
454
382
|
if self.logger:
|
|
455
383
|
self.logger.finalize_session("", success=False)
|
|
456
384
|
raise
|
|
457
385
|
finally:
|
|
458
386
|
# Wait for any pending timeout task to complete before cleanup
|
|
387
|
+
# Wrap in try-except to handle cancellation gracefully (e.g., when user presses 'q')
|
|
459
388
|
if hasattr(self, "_answer_timeout_task") and self._answer_timeout_task:
|
|
460
389
|
try:
|
|
461
390
|
# Give the task a chance to complete
|
|
462
391
|
await asyncio.wait_for(self._answer_timeout_task, timeout=1.0)
|
|
463
392
|
except (asyncio.TimeoutError, asyncio.CancelledError):
|
|
464
393
|
# If it takes too long or was cancelled, force flush
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
394
|
+
try:
|
|
395
|
+
if hasattr(self, "_answer_buffer") and self._answer_buffer and not self._final_answer_shown:
|
|
396
|
+
await self._flush_final_answer()
|
|
397
|
+
except asyncio.CancelledError:
|
|
398
|
+
pass # Silently handle cancellation
|
|
399
|
+
try:
|
|
400
|
+
self._answer_timeout_task.cancel()
|
|
401
|
+
except Exception:
|
|
402
|
+
pass
|
|
468
403
|
|
|
469
404
|
# Final check to flush any remaining buffered answer
|
|
470
|
-
|
|
471
|
-
|
|
405
|
+
try:
|
|
406
|
+
if hasattr(self, "_answer_buffer") and self._answer_buffer and not self._final_answer_shown:
|
|
407
|
+
await self._flush_final_answer()
|
|
408
|
+
except asyncio.CancelledError:
|
|
409
|
+
pass # Silently handle cancellation
|
|
472
410
|
|
|
473
411
|
# Small delay to ensure display updates are processed
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
412
|
+
try:
|
|
413
|
+
await asyncio.sleep(0.1)
|
|
414
|
+
except asyncio.CancelledError:
|
|
415
|
+
pass # Silently handle cancellation
|
|
416
|
+
|
|
417
|
+
# Only cleanup (which shows inspection menu) if coordination is truly finished
|
|
418
|
+
# Check workflow_phase to see if we're in "presenting" state (finished) vs still coordinating (restarting)
|
|
419
|
+
is_finished = hasattr(orchestrator, "workflow_phase") and orchestrator.workflow_phase == "presenting"
|
|
420
|
+
if self.display and is_finished:
|
|
477
421
|
self.display.cleanup()
|
|
478
422
|
|
|
479
423
|
# Don't print - display already showed this info
|
|
@@ -484,7 +428,7 @@ class CoordinationUI:
|
|
|
484
428
|
# print(f"🗳️ Vote results: {vote_summary}")
|
|
485
429
|
# print()
|
|
486
430
|
|
|
487
|
-
if self.logger:
|
|
431
|
+
if self.logger and is_finished:
|
|
488
432
|
session_info = self.logger.finalize_session(
|
|
489
433
|
final_result if "final_result" in locals() else (final_answer if "final_answer" in locals() else ""),
|
|
490
434
|
success=True,
|
|
@@ -566,6 +510,7 @@ class CoordinationUI:
|
|
|
566
510
|
selected_agent = None
|
|
567
511
|
vote_results = {}
|
|
568
512
|
orchestrator_final_answer = None
|
|
513
|
+
user_quit = False # Track if user quit
|
|
569
514
|
|
|
570
515
|
try:
|
|
571
516
|
# Process coordination stream with conversation context
|
|
@@ -574,6 +519,12 @@ class CoordinationUI:
|
|
|
574
519
|
|
|
575
520
|
# Use the orchestrator's chat method with full message context
|
|
576
521
|
async for chunk in orchestrator.chat(messages):
|
|
522
|
+
# Check if user requested quit
|
|
523
|
+
if self.display and hasattr(self.display, "_user_quit_requested") and self.display._user_quit_requested:
|
|
524
|
+
# User pressed 'q' - exit gracefully
|
|
525
|
+
user_quit = True
|
|
526
|
+
raise SystemExit(0)
|
|
527
|
+
|
|
577
528
|
content = getattr(chunk, "content", "") or ""
|
|
578
529
|
source = getattr(chunk, "source", None)
|
|
579
530
|
chunk_type = getattr(chunk, "type", "")
|
|
@@ -659,6 +610,23 @@ class CoordinationUI:
|
|
|
659
610
|
self.logger.log_agent_content(source, reasoning_content, "reasoning")
|
|
660
611
|
continue
|
|
661
612
|
|
|
613
|
+
# Handle restart banner
|
|
614
|
+
elif chunk_type == "restart_banner":
|
|
615
|
+
# Extract restart info from orchestrator state
|
|
616
|
+
reason = getattr(orchestrator, "restart_reason", "Answer needs improvement")
|
|
617
|
+
instructions = getattr(orchestrator, "restart_instructions", "Please address the issues identified")
|
|
618
|
+
# Next attempt number (current is 0-indexed, so current_attempt=0 means attempt 1 just finished, attempt 2 is next)
|
|
619
|
+
attempt = getattr(orchestrator, "current_attempt", 0) + 2
|
|
620
|
+
max_attempts = getattr(orchestrator, "max_attempts", 3)
|
|
621
|
+
|
|
622
|
+
self.display.show_restart_banner(reason, instructions, attempt, max_attempts)
|
|
623
|
+
continue
|
|
624
|
+
|
|
625
|
+
# Handle restart required signal (internal - don't display)
|
|
626
|
+
elif chunk_type == "restart_required":
|
|
627
|
+
# Signal that orchestration will restart - UI will be reinitialized
|
|
628
|
+
continue
|
|
629
|
+
|
|
662
630
|
# Reset reasoning prefix state when final presentation starts
|
|
663
631
|
if chunk_type == "status" and "presenting final answer" in content:
|
|
664
632
|
# Clear all summary active flags for final presentation
|
|
@@ -666,6 +634,17 @@ class CoordinationUI:
|
|
|
666
634
|
if attr_name.startswith("_summary_active_"):
|
|
667
635
|
delattr(self, attr_name)
|
|
668
636
|
|
|
637
|
+
# Handle post-evaluation content streaming
|
|
638
|
+
if source and content and chunk_type == "content":
|
|
639
|
+
# Check if we're in post-evaluation by looking for the status message
|
|
640
|
+
if hasattr(self, "_in_post_evaluation") and self._in_post_evaluation:
|
|
641
|
+
if self.display and hasattr(self.display, "show_post_evaluation_content"):
|
|
642
|
+
self.display.show_post_evaluation_content(content, source)
|
|
643
|
+
|
|
644
|
+
# Detect post-evaluation start
|
|
645
|
+
if chunk_type == "status" and "Post-evaluation" in content:
|
|
646
|
+
self._in_post_evaluation = True
|
|
647
|
+
|
|
669
648
|
if content:
|
|
670
649
|
full_response += content
|
|
671
650
|
|
|
@@ -691,122 +670,23 @@ class CoordinationUI:
|
|
|
691
670
|
# import time
|
|
692
671
|
# time.sleep(1.0)
|
|
693
672
|
|
|
694
|
-
# Get final presentation from
|
|
695
|
-
#
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
# print(f"\n🎤 Final Presentation from {selected_agent}:")
|
|
699
|
-
# print("=" * 60)
|
|
700
|
-
|
|
701
|
-
presentation_content = ""
|
|
702
|
-
try:
|
|
703
|
-
async for chunk in orchestrator.get_final_presentation(selected_agent, vote_results):
|
|
704
|
-
content = getattr(chunk, "content", "") or ""
|
|
705
|
-
chunk_type = getattr(chunk, "type", "")
|
|
706
|
-
|
|
707
|
-
# Use the same reasoning processing as main coordination
|
|
708
|
-
if chunk_type in [
|
|
709
|
-
"reasoning",
|
|
710
|
-
"reasoning_done",
|
|
711
|
-
"reasoning_summary",
|
|
712
|
-
"reasoning_summary_done",
|
|
713
|
-
]:
|
|
714
|
-
source = getattr(chunk, "source", selected_agent)
|
|
715
|
-
|
|
716
|
-
reasoning_content = ""
|
|
717
|
-
if chunk_type == "reasoning":
|
|
718
|
-
# Stream reasoning delta as thinking content
|
|
719
|
-
reasoning_delta = getattr(chunk, "reasoning_delta", "")
|
|
720
|
-
if reasoning_delta:
|
|
721
|
-
# reasoning_content = reasoning_delta
|
|
722
|
-
reasoning_content = self._process_reasoning_content(chunk_type, reasoning_delta, source)
|
|
723
|
-
elif chunk_type == "reasoning_done":
|
|
724
|
-
# Complete reasoning text
|
|
725
|
-
reasoning_text = getattr(chunk, "reasoning_text", "")
|
|
726
|
-
if reasoning_text:
|
|
727
|
-
reasoning_content = f"\n🧠 [Reasoning Complete]\n{reasoning_text}\n"
|
|
728
|
-
else:
|
|
729
|
-
reasoning_content = "\n🧠 [Reasoning Complete]\n"
|
|
730
|
-
|
|
731
|
-
# Reset flag using helper method
|
|
732
|
-
self._process_reasoning_content(chunk_type, reasoning_content, source)
|
|
733
|
-
|
|
734
|
-
# Mark summary as complete - next summary can get a prefix
|
|
735
|
-
reasoning_active_key = "_reasoning_active"
|
|
736
|
-
if hasattr(self, reasoning_active_key):
|
|
737
|
-
delattr(self, reasoning_active_key)
|
|
738
|
-
|
|
739
|
-
elif chunk_type == "reasoning_summary":
|
|
740
|
-
# Stream reasoning summary delta
|
|
741
|
-
summary_delta = getattr(chunk, "reasoning_summary_delta", "")
|
|
742
|
-
if summary_delta:
|
|
743
|
-
reasoning_content = self._process_reasoning_summary(chunk_type, summary_delta, source)
|
|
744
|
-
elif chunk_type == "reasoning_summary_done":
|
|
745
|
-
# Complete reasoning summary
|
|
746
|
-
summary_text = getattr(chunk, "reasoning_summary_text", "")
|
|
747
|
-
if summary_text:
|
|
748
|
-
reasoning_content = f"\n📋 [Reasoning Summary Complete]\n{summary_text}\n"
|
|
749
|
-
|
|
750
|
-
# Reset flag using helper method
|
|
751
|
-
self._process_reasoning_summary(chunk_type, "", source)
|
|
752
|
-
|
|
753
|
-
# Reset the prefix flag so next summary can get a prefix
|
|
754
|
-
summary_active_key = f"_summary_active_{source}"
|
|
755
|
-
if hasattr(self, summary_active_key):
|
|
756
|
-
delattr(self, summary_active_key)
|
|
757
|
-
|
|
758
|
-
if reasoning_content:
|
|
759
|
-
# Add to presentation content and display
|
|
760
|
-
content = reasoning_content
|
|
761
|
-
|
|
762
|
-
if content:
|
|
763
|
-
# Ensure content is a string
|
|
764
|
-
if isinstance(content, list):
|
|
765
|
-
content = " ".join(str(item) for item in content)
|
|
766
|
-
elif not isinstance(content, str):
|
|
767
|
-
content = str(content)
|
|
768
|
-
|
|
769
|
-
# Simple content accumulation - let the display handle formatting
|
|
770
|
-
presentation_content += content
|
|
771
|
-
|
|
772
|
-
# Log presentation chunk
|
|
773
|
-
if self.logger:
|
|
774
|
-
self.logger.log_chunk(
|
|
775
|
-
selected_agent,
|
|
776
|
-
content,
|
|
777
|
-
getattr(chunk, "type", "presentation"),
|
|
778
|
-
)
|
|
779
|
-
|
|
780
|
-
# Don't print - let the display handle it
|
|
781
|
-
# self._print_with_flush(content)
|
|
673
|
+
# Get final presentation content from orchestrator state
|
|
674
|
+
# Note: With restart feature, get_final_presentation is called INSIDE the orchestrator
|
|
675
|
+
# during _present_final_answer, so chunks already came through the main stream above.
|
|
676
|
+
# We just need to retrieve the final result for return value.
|
|
782
677
|
|
|
783
|
-
|
|
784
|
-
await self._process_content(selected_agent, content)
|
|
785
|
-
|
|
786
|
-
if getattr(chunk, "type", "") == "done":
|
|
787
|
-
break
|
|
788
|
-
|
|
789
|
-
except Exception:
|
|
790
|
-
# Don't print - let the display handle errors
|
|
791
|
-
# print(f"\n❌ Error during final presentation: {e}")
|
|
792
|
-
presentation_content = full_response # Fallback
|
|
793
|
-
|
|
794
|
-
final_answer = presentation_content
|
|
795
|
-
# Don't print - let the display handle it
|
|
796
|
-
# print("\n" + "=" * 60)
|
|
797
|
-
# Allow time for final presentation to be fully visible
|
|
798
|
-
time.sleep(1.5)
|
|
799
|
-
|
|
800
|
-
# Get the clean final answer from orchestrator's stored state
|
|
678
|
+
# Get the final answer from orchestrator's stored state
|
|
801
679
|
orchestrator_final_answer = None
|
|
802
|
-
if
|
|
680
|
+
if hasattr(orchestrator, "_final_presentation_content") and orchestrator._final_presentation_content:
|
|
681
|
+
orchestrator_final_answer = orchestrator._final_presentation_content.strip()
|
|
682
|
+
elif selected_agent and hasattr(orchestrator, "agent_states") and selected_agent in orchestrator.agent_states:
|
|
683
|
+
# Fall back to stored answer if no final presentation content
|
|
803
684
|
stored_answer = orchestrator.agent_states[selected_agent].answer
|
|
804
685
|
if stored_answer:
|
|
805
|
-
|
|
806
|
-
orchestrator_final_answer = stored_answer.replace("\\", "\n").replace("**", "").strip()
|
|
686
|
+
orchestrator_final_answer = stored_answer.strip()
|
|
807
687
|
|
|
808
|
-
# Use orchestrator's clean answer
|
|
809
|
-
final_result = orchestrator_final_answer if orchestrator_final_answer else
|
|
688
|
+
# Use orchestrator's clean answer or fall back to full response
|
|
689
|
+
final_result = orchestrator_final_answer if orchestrator_final_answer else full_response
|
|
810
690
|
|
|
811
691
|
# Finalize session
|
|
812
692
|
if self.logger:
|
|
@@ -819,30 +699,58 @@ class CoordinationUI:
|
|
|
819
699
|
|
|
820
700
|
return final_result
|
|
821
701
|
|
|
702
|
+
except SystemExit:
|
|
703
|
+
# User pressed 'q' - cleanup and exit gracefully
|
|
704
|
+
if self.logger:
|
|
705
|
+
self.logger.finalize_session("User quit", success=True)
|
|
706
|
+
# Cleanup agent backends
|
|
707
|
+
if hasattr(orchestrator, "agents"):
|
|
708
|
+
for agent_id, agent in orchestrator.agents.items():
|
|
709
|
+
if hasattr(agent.backend, "reset_state"):
|
|
710
|
+
try:
|
|
711
|
+
await agent.backend.reset_state()
|
|
712
|
+
except Exception:
|
|
713
|
+
pass
|
|
714
|
+
raise
|
|
822
715
|
except Exception:
|
|
823
716
|
if self.logger:
|
|
824
717
|
self.logger.finalize_session("", success=False)
|
|
825
718
|
raise
|
|
826
719
|
finally:
|
|
827
720
|
# Wait for any pending timeout task to complete before cleanup
|
|
721
|
+
# Wrap in try-except to handle cancellation gracefully (e.g., when user presses 'q')
|
|
828
722
|
if hasattr(self, "_answer_timeout_task") and self._answer_timeout_task:
|
|
829
723
|
try:
|
|
830
724
|
# Give the task a chance to complete
|
|
831
725
|
await asyncio.wait_for(self._answer_timeout_task, timeout=1.0)
|
|
832
726
|
except (asyncio.TimeoutError, asyncio.CancelledError):
|
|
833
727
|
# If it takes too long or was cancelled, force flush
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
728
|
+
try:
|
|
729
|
+
if hasattr(self, "_answer_buffer") and self._answer_buffer and not self._final_answer_shown:
|
|
730
|
+
await self._flush_final_answer()
|
|
731
|
+
except asyncio.CancelledError:
|
|
732
|
+
pass # Silently handle cancellation
|
|
733
|
+
try:
|
|
734
|
+
self._answer_timeout_task.cancel()
|
|
735
|
+
except Exception:
|
|
736
|
+
pass
|
|
837
737
|
|
|
838
738
|
# Final check to flush any remaining buffered answer
|
|
839
|
-
|
|
840
|
-
|
|
739
|
+
try:
|
|
740
|
+
if hasattr(self, "_answer_buffer") and self._answer_buffer and not self._final_answer_shown:
|
|
741
|
+
await self._flush_final_answer()
|
|
742
|
+
except asyncio.CancelledError:
|
|
743
|
+
pass # Silently handle cancellation
|
|
841
744
|
|
|
842
745
|
# Small delay to ensure display updates are processed
|
|
843
|
-
|
|
746
|
+
try:
|
|
747
|
+
await asyncio.sleep(0.1)
|
|
748
|
+
except asyncio.CancelledError:
|
|
749
|
+
pass # Silently handle cancellation
|
|
844
750
|
|
|
845
|
-
if
|
|
751
|
+
# Only cleanup (which shows inspection menu) if coordination is truly finished
|
|
752
|
+
is_finished = hasattr(orchestrator, "workflow_phase") and orchestrator.workflow_phase == "presenting"
|
|
753
|
+
if self.display and is_finished:
|
|
846
754
|
self.display.cleanup()
|
|
847
755
|
|
|
848
756
|
def _display_vote_results(self, vote_results: Dict[str, Any]):
|
|
@@ -943,6 +851,16 @@ class CoordinationUI:
|
|
|
943
851
|
if self._final_answer_shown or not self._answer_buffer.strip():
|
|
944
852
|
return
|
|
945
853
|
|
|
854
|
+
# Don't show final answer (and inspection menu) if post-evaluation might still run
|
|
855
|
+
# Only show when orchestration is TRULY finished
|
|
856
|
+
if hasattr(self.orchestrator, "max_attempts"):
|
|
857
|
+
post_eval_enabled = self.orchestrator.max_attempts > 1
|
|
858
|
+
is_finished = hasattr(self.orchestrator, "workflow_phase") and self.orchestrator.workflow_phase == "presenting"
|
|
859
|
+
|
|
860
|
+
# If post-eval is enabled, only show after workflow is finished
|
|
861
|
+
if post_eval_enabled and not is_finished:
|
|
862
|
+
return
|
|
863
|
+
|
|
946
864
|
# Get orchestrator status for voting results and winner
|
|
947
865
|
status = self.orchestrator.get_status()
|
|
948
866
|
selected_agent = status.get("selected_agent", "Unknown")
|
|
@@ -951,7 +869,7 @@ class CoordinationUI:
|
|
|
951
869
|
# Mark as shown to prevent duplicate calls
|
|
952
870
|
self._final_answer_shown = True
|
|
953
871
|
|
|
954
|
-
# Show the final answer
|
|
872
|
+
# Show the final answer (which includes inspection menu)
|
|
955
873
|
self.display.show_final_answer(
|
|
956
874
|
self._answer_buffer.strip(),
|
|
957
875
|
vote_results=vote_results,
|
|
@@ -61,6 +61,35 @@ class BaseDisplay(ABC):
|
|
|
61
61
|
selected_agent: The selected agent (optional)
|
|
62
62
|
"""
|
|
63
63
|
|
|
64
|
+
@abstractmethod
|
|
65
|
+
def show_post_evaluation_content(self, content: str, agent_id: str):
|
|
66
|
+
"""Display post-evaluation streaming content.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
content: Post-evaluation content from the agent
|
|
70
|
+
agent_id: The agent performing the evaluation
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
@abstractmethod
|
|
74
|
+
def show_restart_banner(self, reason: str, instructions: str, attempt: int, max_attempts: int):
|
|
75
|
+
"""Display restart decision banner.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
reason: Why the restart was triggered
|
|
79
|
+
instructions: Instructions for the next attempt
|
|
80
|
+
attempt: Next attempt number
|
|
81
|
+
max_attempts: Maximum attempts allowed
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
@abstractmethod
|
|
85
|
+
def show_restart_context_panel(self, reason: str, instructions: str):
|
|
86
|
+
"""Display restart context panel at top of UI (for attempt 2+).
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
reason: Why the previous attempt restarted
|
|
90
|
+
instructions: Instructions for this attempt
|
|
91
|
+
"""
|
|
92
|
+
|
|
64
93
|
@abstractmethod
|
|
65
94
|
def cleanup(self):
|
|
66
95
|
"""Clean up display resources."""
|