massgen 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +94 -0
- massgen/agent_config.py +507 -0
- massgen/backend/CLAUDE_API_RESEARCH.md +266 -0
- massgen/backend/Function calling openai responses.md +1161 -0
- massgen/backend/GEMINI_API_DOCUMENTATION.md +410 -0
- massgen/backend/OPENAI_RESPONSES_API_FORMAT.md +65 -0
- massgen/backend/__init__.py +25 -0
- massgen/backend/base.py +180 -0
- massgen/backend/chat_completions.py +228 -0
- massgen/backend/claude.py +661 -0
- massgen/backend/gemini.py +652 -0
- massgen/backend/grok.py +187 -0
- massgen/backend/response.py +397 -0
- massgen/chat_agent.py +440 -0
- massgen/cli.py +686 -0
- massgen/configs/README.md +293 -0
- massgen/configs/creative_team.yaml +53 -0
- massgen/configs/gemini_4o_claude.yaml +31 -0
- massgen/configs/news_analysis.yaml +51 -0
- massgen/configs/research_team.yaml +51 -0
- massgen/configs/single_agent.yaml +18 -0
- massgen/configs/single_flash2.5.yaml +44 -0
- massgen/configs/technical_analysis.yaml +51 -0
- massgen/configs/three_agents_default.yaml +31 -0
- massgen/configs/travel_planning.yaml +51 -0
- massgen/configs/two_agents.yaml +39 -0
- massgen/frontend/__init__.py +20 -0
- massgen/frontend/coordination_ui.py +945 -0
- massgen/frontend/displays/__init__.py +24 -0
- massgen/frontend/displays/base_display.py +83 -0
- massgen/frontend/displays/rich_terminal_display.py +3497 -0
- massgen/frontend/displays/simple_display.py +93 -0
- massgen/frontend/displays/terminal_display.py +381 -0
- massgen/frontend/logging/__init__.py +9 -0
- massgen/frontend/logging/realtime_logger.py +197 -0
- massgen/message_templates.py +431 -0
- massgen/orchestrator.py +1222 -0
- massgen/tests/__init__.py +10 -0
- massgen/tests/multi_turn_conversation_design.md +214 -0
- massgen/tests/multiturn_llm_input_analysis.md +189 -0
- massgen/tests/test_case_studies.md +113 -0
- massgen/tests/test_claude_backend.py +310 -0
- massgen/tests/test_grok_backend.py +160 -0
- massgen/tests/test_message_context_building.py +293 -0
- massgen/tests/test_rich_terminal_display.py +378 -0
- massgen/tests/test_v3_3agents.py +117 -0
- massgen/tests/test_v3_simple.py +216 -0
- massgen/tests/test_v3_three_agents.py +272 -0
- massgen/tests/test_v3_two_agents.py +176 -0
- massgen/utils.py +79 -0
- massgen/v1/README.md +330 -0
- massgen/v1/__init__.py +91 -0
- massgen/v1/agent.py +605 -0
- massgen/v1/agents.py +330 -0
- massgen/v1/backends/gemini.py +584 -0
- massgen/v1/backends/grok.py +410 -0
- massgen/v1/backends/oai.py +571 -0
- massgen/v1/cli.py +351 -0
- massgen/v1/config.py +169 -0
- massgen/v1/examples/fast-4o-mini-config.yaml +44 -0
- massgen/v1/examples/fast_config.yaml +44 -0
- massgen/v1/examples/production.yaml +70 -0
- massgen/v1/examples/single_agent.yaml +39 -0
- massgen/v1/logging.py +974 -0
- massgen/v1/main.py +368 -0
- massgen/v1/orchestrator.py +1138 -0
- massgen/v1/streaming_display.py +1190 -0
- massgen/v1/tools.py +160 -0
- massgen/v1/types.py +245 -0
- massgen/v1/utils.py +199 -0
- massgen-0.0.3.dist-info/METADATA +568 -0
- massgen-0.0.3.dist-info/RECORD +76 -0
- massgen-0.0.3.dist-info/WHEEL +5 -0
- massgen-0.0.3.dist-info/entry_points.txt +2 -0
- massgen-0.0.3.dist-info/licenses/LICENSE +204 -0
- massgen-0.0.3.dist-info/top_level.txt +1 -0
massgen/orchestrator.py
ADDED
|
@@ -0,0 +1,1222 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MassGen Orchestrator Agent - Chat interface that manages sub-agents internally.
|
|
3
|
+
|
|
4
|
+
The orchestrator presents a unified chat interface to users while coordinating
|
|
5
|
+
multiple sub-agents using the proven binary decision framework behind the scenes.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
from typing import Dict, List, Optional, Any, AsyncGenerator
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from .message_templates import MessageTemplates
|
|
12
|
+
from .agent_config import AgentConfig
|
|
13
|
+
from .backend.base import StreamChunk
|
|
14
|
+
from .chat_agent import ChatAgent
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class AgentState:
|
|
19
|
+
"""Runtime state for an agent during coordination.
|
|
20
|
+
|
|
21
|
+
Attributes:
|
|
22
|
+
answer: The agent's current answer/summary, if any
|
|
23
|
+
has_voted: Whether the agent has voted in the current round
|
|
24
|
+
votes: Dictionary storing vote data for this agent
|
|
25
|
+
restart_pending: Whether the agent should gracefully restart due to new answers
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
answer: Optional[str] = None
|
|
29
|
+
has_voted: bool = False
|
|
30
|
+
votes: Dict[str, Any] = field(default_factory=dict)
|
|
31
|
+
restart_pending: bool = False
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class Orchestrator(ChatAgent):
|
|
35
|
+
"""
|
|
36
|
+
Orchestrator Agent - Unified chat interface with sub-agent coordination.
|
|
37
|
+
|
|
38
|
+
The orchestrator acts as a single agent from the user's perspective, but internally
|
|
39
|
+
coordinates multiple sub-agents using the proven binary decision framework.
|
|
40
|
+
|
|
41
|
+
Key Features:
|
|
42
|
+
- Unified chat interface (same as any individual agent)
|
|
43
|
+
- Automatic sub-agent coordination and conflict resolution
|
|
44
|
+
- Transparent MassGen workflow execution
|
|
45
|
+
- Real-time streaming with proper source attribution
|
|
46
|
+
- Graceful restart mechanism for dynamic case transitions
|
|
47
|
+
- Session management
|
|
48
|
+
|
|
49
|
+
TODO - Missing Configuration Options:
|
|
50
|
+
- Option to include/exclude voting details in user messages
|
|
51
|
+
- Configurable timeout settings for agent responses
|
|
52
|
+
- Configurable retry limits and backoff strategies
|
|
53
|
+
- Custom voting strategies beyond simple majority
|
|
54
|
+
- Configurable presentation formats for final answers
|
|
55
|
+
- Advanced coordination workflows (hierarchical, weighted voting, etc.)
|
|
56
|
+
|
|
57
|
+
Restart Behavior:
|
|
58
|
+
When an agent provides new_answer, all agents gracefully restart to ensure
|
|
59
|
+
consistent coordination state. This allows all agents to transition to Case 2
|
|
60
|
+
evaluation with the new answers available.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
def __init__(
|
|
64
|
+
self,
|
|
65
|
+
agents: Dict[str, ChatAgent],
|
|
66
|
+
orchestrator_id: str = "orchestrator",
|
|
67
|
+
session_id: Optional[str] = None,
|
|
68
|
+
config: Optional[AgentConfig] = None,
|
|
69
|
+
):
|
|
70
|
+
"""
|
|
71
|
+
Initialize MassGen orchestrator.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
agents: Dictionary of {agent_id: ChatAgent} - can be individual agents or other orchestrators
|
|
75
|
+
orchestrator_id: Unique identifier for this orchestrator (default: "orchestrator")
|
|
76
|
+
session_id: Optional session identifier
|
|
77
|
+
config: Optional AgentConfig for customizing orchestrator behavior
|
|
78
|
+
"""
|
|
79
|
+
super().__init__(session_id)
|
|
80
|
+
self.orchestrator_id = orchestrator_id
|
|
81
|
+
self.agents = agents
|
|
82
|
+
self.agent_states = {aid: AgentState() for aid in agents.keys()}
|
|
83
|
+
self.config = config or AgentConfig.create_openai_config()
|
|
84
|
+
|
|
85
|
+
# Get message templates from config
|
|
86
|
+
self.message_templates = self.config.message_templates or MessageTemplates()
|
|
87
|
+
# Create workflow tools for agents (vote and new_answer)
|
|
88
|
+
self.workflow_tools = self.message_templates.get_standard_tools(
|
|
89
|
+
list(agents.keys())
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# MassGen-specific state
|
|
93
|
+
self.current_task: Optional[str] = None
|
|
94
|
+
self.workflow_phase: str = "idle" # idle, coordinating, presenting
|
|
95
|
+
|
|
96
|
+
# Internal coordination state
|
|
97
|
+
self._coordination_messages: List[Dict[str, str]] = []
|
|
98
|
+
self._selected_agent: Optional[str] = None
|
|
99
|
+
|
|
100
|
+
async def chat(
|
|
101
|
+
self,
|
|
102
|
+
messages: List[Dict[str, Any]],
|
|
103
|
+
tools: List[Dict[str, Any]] = None,
|
|
104
|
+
reset_chat: bool = False,
|
|
105
|
+
clear_history: bool = False,
|
|
106
|
+
) -> AsyncGenerator[StreamChunk, None]:
|
|
107
|
+
"""
|
|
108
|
+
Main chat interface - handles user messages and coordinates sub-agents.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
messages: List of conversation messages
|
|
112
|
+
tools: Ignored by orchestrator (uses internal workflow tools)
|
|
113
|
+
reset_chat: If True, reset conversation and start fresh
|
|
114
|
+
clear_history: If True, clear history before processing
|
|
115
|
+
|
|
116
|
+
Yields:
|
|
117
|
+
StreamChunk: Streaming response chunks
|
|
118
|
+
"""
|
|
119
|
+
_ = tools # Unused parameter
|
|
120
|
+
|
|
121
|
+
# Handle conversation management
|
|
122
|
+
if clear_history:
|
|
123
|
+
self.conversation_history.clear()
|
|
124
|
+
if reset_chat:
|
|
125
|
+
self.reset()
|
|
126
|
+
|
|
127
|
+
# Process all messages to build conversation context
|
|
128
|
+
conversation_context = self._build_conversation_context(messages)
|
|
129
|
+
user_message = conversation_context.get("current_message")
|
|
130
|
+
|
|
131
|
+
if not user_message:
|
|
132
|
+
yield StreamChunk(
|
|
133
|
+
type="error", error="No user message found in conversation"
|
|
134
|
+
)
|
|
135
|
+
return
|
|
136
|
+
|
|
137
|
+
# Add user message to history
|
|
138
|
+
self.add_to_history("user", user_message)
|
|
139
|
+
|
|
140
|
+
# Determine what to do based on current state and conversation context
|
|
141
|
+
if self.workflow_phase == "idle":
|
|
142
|
+
# New task - start MassGen coordination with full context
|
|
143
|
+
self.current_task = user_message
|
|
144
|
+
self.workflow_phase = "coordinating"
|
|
145
|
+
|
|
146
|
+
async for chunk in self._coordinate_agents(conversation_context):
|
|
147
|
+
yield chunk
|
|
148
|
+
|
|
149
|
+
elif self.workflow_phase == "presenting":
|
|
150
|
+
# Handle follow-up question with full conversation context
|
|
151
|
+
async for chunk in self._handle_followup(
|
|
152
|
+
user_message, conversation_context
|
|
153
|
+
):
|
|
154
|
+
yield chunk
|
|
155
|
+
else:
|
|
156
|
+
# Already coordinating - provide status update
|
|
157
|
+
yield StreamChunk(
|
|
158
|
+
type="content", content="🔄 Coordinating agents, please wait..."
|
|
159
|
+
)
|
|
160
|
+
# Note: In production, you might want to queue follow-up questions
|
|
161
|
+
|
|
162
|
+
async def chat_simple(self, user_message: str) -> AsyncGenerator[StreamChunk, None]:
|
|
163
|
+
"""
|
|
164
|
+
Backwards compatible simple chat interface.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
user_message: Simple string message from user
|
|
168
|
+
|
|
169
|
+
Yields:
|
|
170
|
+
StreamChunk: Streaming response chunks
|
|
171
|
+
"""
|
|
172
|
+
messages = [{"role": "user", "content": user_message}]
|
|
173
|
+
async for chunk in self.chat(messages):
|
|
174
|
+
yield chunk
|
|
175
|
+
|
|
176
|
+
def _build_conversation_context(
|
|
177
|
+
self, messages: List[Dict[str, Any]]
|
|
178
|
+
) -> Dict[str, Any]:
|
|
179
|
+
"""Build conversation context from message list."""
|
|
180
|
+
conversation_history = []
|
|
181
|
+
current_message = None
|
|
182
|
+
|
|
183
|
+
# Process messages to extract conversation history and current message
|
|
184
|
+
for message in messages:
|
|
185
|
+
role = message.get("role")
|
|
186
|
+
content = message.get("content", "")
|
|
187
|
+
|
|
188
|
+
if role == "user":
|
|
189
|
+
current_message = content
|
|
190
|
+
# Add to history (excluding the current message)
|
|
191
|
+
if len(conversation_history) > 0 or len(messages) > 1:
|
|
192
|
+
conversation_history.append(message.copy())
|
|
193
|
+
elif role == "assistant":
|
|
194
|
+
conversation_history.append(message.copy())
|
|
195
|
+
elif role == "system":
|
|
196
|
+
# System messages are typically not part of conversation history
|
|
197
|
+
pass
|
|
198
|
+
|
|
199
|
+
# Remove the last user message from history since that's the current message
|
|
200
|
+
if conversation_history and conversation_history[-1].get("role") == "user":
|
|
201
|
+
conversation_history.pop()
|
|
202
|
+
|
|
203
|
+
return {
|
|
204
|
+
"current_message": current_message,
|
|
205
|
+
"conversation_history": conversation_history,
|
|
206
|
+
"full_messages": messages,
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
async def _coordinate_agents(
|
|
210
|
+
self, conversation_context: Optional[Dict[str, Any]] = None
|
|
211
|
+
) -> AsyncGenerator[StreamChunk, None]:
|
|
212
|
+
"""Execute unified MassGen coordination workflow with real-time streaming."""
|
|
213
|
+
yield StreamChunk(
|
|
214
|
+
type="content",
|
|
215
|
+
content="🚀 Starting multi-agent coordination...\n\n",
|
|
216
|
+
source=self.orchestrator_id,
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
votes = {} # Track votes: voter_id -> {"agent_id": voted_for, "reason": reason}
|
|
220
|
+
|
|
221
|
+
# Initialize all agents with has_voted = False and set restart flags
|
|
222
|
+
for agent_id in self.agents.keys():
|
|
223
|
+
self.agent_states[agent_id].has_voted = False
|
|
224
|
+
self.agent_states[agent_id].restart_pending = True
|
|
225
|
+
|
|
226
|
+
yield StreamChunk(
|
|
227
|
+
type="content",
|
|
228
|
+
content="## 📋 Agents Coordinating\n",
|
|
229
|
+
source=self.orchestrator_id,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
# Start streaming coordination with real-time agent output
|
|
233
|
+
async for chunk in self._stream_coordination_with_agents(
|
|
234
|
+
votes, conversation_context
|
|
235
|
+
):
|
|
236
|
+
yield chunk
|
|
237
|
+
|
|
238
|
+
# Determine final agent based on votes
|
|
239
|
+
current_answers = {
|
|
240
|
+
aid: state.answer
|
|
241
|
+
for aid, state in self.agent_states.items()
|
|
242
|
+
if state.answer
|
|
243
|
+
}
|
|
244
|
+
self._selected_agent = self._determine_final_agent_from_votes(
|
|
245
|
+
votes, current_answers
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
# Present final answer
|
|
249
|
+
async for chunk in self._present_final_answer():
|
|
250
|
+
yield chunk
|
|
251
|
+
|
|
252
|
+
async def _stream_coordination_with_agents(
|
|
253
|
+
self,
|
|
254
|
+
votes: Dict[str, Dict],
|
|
255
|
+
conversation_context: Optional[Dict[str, Any]] = None,
|
|
256
|
+
) -> AsyncGenerator[StreamChunk, None]:
|
|
257
|
+
"""
|
|
258
|
+
Coordinate agents with real-time streaming of their outputs.
|
|
259
|
+
|
|
260
|
+
Processes agent stream signals:
|
|
261
|
+
- "content": Streams real-time agent output to user
|
|
262
|
+
- "result": Records votes/answers, triggers restart_pending for other agents
|
|
263
|
+
- "error": Displays error and closes agent stream (self-terminating)
|
|
264
|
+
- "done": Closes agent stream gracefully
|
|
265
|
+
|
|
266
|
+
Restart Mechanism:
|
|
267
|
+
When any agent provides new_answer, all other agents get restart_pending=True
|
|
268
|
+
and gracefully terminate their current work before restarting.
|
|
269
|
+
"""
|
|
270
|
+
active_streams = {}
|
|
271
|
+
active_tasks = {} # Track active tasks to prevent duplicate task creation
|
|
272
|
+
|
|
273
|
+
# Stream agent outputs in real-time until all have voted
|
|
274
|
+
while not all(state.has_voted for state in self.agent_states.values()):
|
|
275
|
+
# Start any agents that aren't running and haven't voted yet
|
|
276
|
+
current_answers = {
|
|
277
|
+
aid: state.answer
|
|
278
|
+
for aid, state in self.agent_states.items()
|
|
279
|
+
if state.answer
|
|
280
|
+
}
|
|
281
|
+
for agent_id in self.agents.keys():
|
|
282
|
+
if (
|
|
283
|
+
agent_id not in active_streams
|
|
284
|
+
and not self.agent_states[agent_id].has_voted
|
|
285
|
+
):
|
|
286
|
+
active_streams[agent_id] = self._stream_agent_execution(
|
|
287
|
+
agent_id,
|
|
288
|
+
self.current_task,
|
|
289
|
+
current_answers,
|
|
290
|
+
conversation_context,
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
if not active_streams:
|
|
294
|
+
break
|
|
295
|
+
|
|
296
|
+
# Create tasks only for streams that don't already have active tasks
|
|
297
|
+
for agent_id, stream in active_streams.items():
|
|
298
|
+
if agent_id not in active_tasks:
|
|
299
|
+
active_tasks[agent_id] = asyncio.create_task(
|
|
300
|
+
self._get_next_chunk(stream)
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
if not active_tasks:
|
|
304
|
+
break
|
|
305
|
+
|
|
306
|
+
done, _ = await asyncio.wait(
|
|
307
|
+
active_tasks.values(), return_when=asyncio.FIRST_COMPLETED
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
# Collect results from completed agents
|
|
311
|
+
reset_signal = False
|
|
312
|
+
voted_agents = {}
|
|
313
|
+
answered_agents = {}
|
|
314
|
+
|
|
315
|
+
# Process completed stream chunks
|
|
316
|
+
for task in done:
|
|
317
|
+
agent_id = next(aid for aid, t in active_tasks.items() if t is task)
|
|
318
|
+
# Remove completed task from active_tasks
|
|
319
|
+
del active_tasks[agent_id]
|
|
320
|
+
|
|
321
|
+
try:
|
|
322
|
+
chunk_type, chunk_data = await task
|
|
323
|
+
|
|
324
|
+
if chunk_type == "content":
|
|
325
|
+
# Stream agent content in real-time with source info
|
|
326
|
+
yield StreamChunk(
|
|
327
|
+
type="content", content=chunk_data, source=agent_id
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
elif chunk_type == "result":
|
|
331
|
+
# Agent completed with result
|
|
332
|
+
result_type, result_data = chunk_data
|
|
333
|
+
|
|
334
|
+
# Emit agent completion status immediately upon result
|
|
335
|
+
yield StreamChunk(
|
|
336
|
+
type="agent_status",
|
|
337
|
+
source=agent_id,
|
|
338
|
+
status="completed",
|
|
339
|
+
content="",
|
|
340
|
+
)
|
|
341
|
+
await self._close_agent_stream(agent_id, active_streams)
|
|
342
|
+
|
|
343
|
+
if result_type == "answer":
|
|
344
|
+
# Agent provided an answer (initial or improved)
|
|
345
|
+
# Always record answers, even from restarting agents (orchestrator accepts them)
|
|
346
|
+
answered_agents[agent_id] = result_data
|
|
347
|
+
reset_signal = True
|
|
348
|
+
yield StreamChunk(
|
|
349
|
+
type="content",
|
|
350
|
+
content=f"[{agent_id}] ✅ Answer provided",
|
|
351
|
+
source=agent_id,
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
elif result_type == "vote":
|
|
355
|
+
# Agent voted for existing answer
|
|
356
|
+
# Ignore votes from agents with restart pending (votes are about current state)
|
|
357
|
+
if self.agent_states[agent_id].restart_pending:
|
|
358
|
+
voted_for = result_data.get("agent_id", "<unknown>")
|
|
359
|
+
reason = result_data.get("reason", "No reason provided")
|
|
360
|
+
yield StreamChunk(
|
|
361
|
+
type="content",
|
|
362
|
+
content=f"🔄 Vote by [{agent_id}] for [{voted_for}] ignored (reason: {reason}) - restarting due to new answers",
|
|
363
|
+
source=agent_id,
|
|
364
|
+
)
|
|
365
|
+
# yield StreamChunk(type="content", content="🔄 Vote ignored - restarting due to new answers", source=agent_id)
|
|
366
|
+
else:
|
|
367
|
+
voted_agents[agent_id] = result_data
|
|
368
|
+
yield StreamChunk(
|
|
369
|
+
type="content",
|
|
370
|
+
content=f"[{agent_id}] ✅ Vote recorded for {result_data['agent_id']}",
|
|
371
|
+
source=agent_id,
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
elif chunk_type == "error":
|
|
375
|
+
# Agent error
|
|
376
|
+
yield StreamChunk(
|
|
377
|
+
type="content", content=f"❌ {chunk_data}", source=agent_id
|
|
378
|
+
)
|
|
379
|
+
# Emit agent completion status for errors too
|
|
380
|
+
yield StreamChunk(
|
|
381
|
+
type="agent_status",
|
|
382
|
+
source=agent_id,
|
|
383
|
+
status="completed",
|
|
384
|
+
content="",
|
|
385
|
+
)
|
|
386
|
+
await self._close_agent_stream(agent_id, active_streams)
|
|
387
|
+
|
|
388
|
+
elif chunk_type == "done":
|
|
389
|
+
# Stream completed - emit completion status for frontend
|
|
390
|
+
yield StreamChunk(
|
|
391
|
+
type="agent_status",
|
|
392
|
+
source=agent_id,
|
|
393
|
+
status="completed",
|
|
394
|
+
content="",
|
|
395
|
+
)
|
|
396
|
+
await self._close_agent_stream(agent_id, active_streams)
|
|
397
|
+
|
|
398
|
+
except Exception as e:
|
|
399
|
+
yield StreamChunk(
|
|
400
|
+
type="content",
|
|
401
|
+
content=f"❌ Stream error - {e}",
|
|
402
|
+
source=agent_id,
|
|
403
|
+
)
|
|
404
|
+
await self._close_agent_stream(agent_id, active_streams)
|
|
405
|
+
|
|
406
|
+
# Apply all state changes atomically after processing all results
|
|
407
|
+
if reset_signal:
|
|
408
|
+
# Reset all agents' has_voted to False (any new answer invalidates all votes)
|
|
409
|
+
for state in self.agent_states.values():
|
|
410
|
+
state.has_voted = False
|
|
411
|
+
votes.clear()
|
|
412
|
+
# Signal ALL agents to gracefully restart
|
|
413
|
+
for agent_id in self.agent_states.keys():
|
|
414
|
+
self.agent_states[agent_id].restart_pending = True
|
|
415
|
+
# Set has_voted = True for agents that voted (only if no reset signal)
|
|
416
|
+
else:
|
|
417
|
+
for agent_id, vote_data in voted_agents.items():
|
|
418
|
+
self.agent_states[agent_id].has_voted = True
|
|
419
|
+
votes[agent_id] = vote_data
|
|
420
|
+
|
|
421
|
+
# Update answers for agents that provided them
|
|
422
|
+
for agent_id, answer in answered_agents.items():
|
|
423
|
+
self.agent_states[agent_id].answer = answer
|
|
424
|
+
|
|
425
|
+
# Cancel any remaining tasks and close streams
|
|
426
|
+
for task in active_tasks.values():
|
|
427
|
+
task.cancel()
|
|
428
|
+
for agent_id in list(active_streams.keys()):
|
|
429
|
+
await self._close_agent_stream(agent_id, active_streams)
|
|
430
|
+
|
|
431
|
+
async def _close_agent_stream(
|
|
432
|
+
self, agent_id: str, active_streams: Dict[str, AsyncGenerator]
|
|
433
|
+
) -> None:
|
|
434
|
+
"""Close and remove an agent stream safely."""
|
|
435
|
+
if agent_id in active_streams:
|
|
436
|
+
try:
|
|
437
|
+
await active_streams[agent_id].aclose()
|
|
438
|
+
except:
|
|
439
|
+
pass # Ignore cleanup errors
|
|
440
|
+
del active_streams[agent_id]
|
|
441
|
+
|
|
442
|
+
def _check_restart_pending(self, agent_id: str) -> bool:
|
|
443
|
+
"""Check if agent should restart and yield restart message if needed."""
|
|
444
|
+
return self.agent_states[agent_id].restart_pending
|
|
445
|
+
|
|
446
|
+
def _create_tool_error_messages(
|
|
447
|
+
self,
|
|
448
|
+
agent: "ChatAgent",
|
|
449
|
+
tool_calls: List[Dict[str, Any]],
|
|
450
|
+
primary_error_msg: str,
|
|
451
|
+
secondary_error_msg: str = None,
|
|
452
|
+
) -> List[Dict[str, Any]]:
|
|
453
|
+
"""
|
|
454
|
+
Create tool error messages for all tool calls in a response.
|
|
455
|
+
|
|
456
|
+
Args:
|
|
457
|
+
agent: The ChatAgent instance for backend access
|
|
458
|
+
tool_calls: List of tool calls that need error responses
|
|
459
|
+
primary_error_msg: Error message for the first tool call
|
|
460
|
+
secondary_error_msg: Error message for additional tool calls (defaults to primary_error_msg)
|
|
461
|
+
|
|
462
|
+
Returns:
|
|
463
|
+
List of tool result messages that can be sent back to the agent
|
|
464
|
+
"""
|
|
465
|
+
if not tool_calls:
|
|
466
|
+
return []
|
|
467
|
+
|
|
468
|
+
if secondary_error_msg is None:
|
|
469
|
+
secondary_error_msg = primary_error_msg
|
|
470
|
+
|
|
471
|
+
enforcement_msgs = []
|
|
472
|
+
|
|
473
|
+
# Send primary error for the first tool call
|
|
474
|
+
first_tool_call = tool_calls[0]
|
|
475
|
+
error_result_msg = agent.backend.create_tool_result_message(
|
|
476
|
+
first_tool_call, primary_error_msg
|
|
477
|
+
)
|
|
478
|
+
enforcement_msgs.append(error_result_msg)
|
|
479
|
+
|
|
480
|
+
# Send secondary error messages for any additional tool calls (API requires response to ALL calls)
|
|
481
|
+
for additional_tool_call in tool_calls[1:]:
|
|
482
|
+
neutral_msg = agent.backend.create_tool_result_message(
|
|
483
|
+
additional_tool_call, secondary_error_msg
|
|
484
|
+
)
|
|
485
|
+
enforcement_msgs.append(neutral_msg)
|
|
486
|
+
|
|
487
|
+
return enforcement_msgs
|
|
488
|
+
|
|
489
|
+
async def _stream_agent_execution(
|
|
490
|
+
self,
|
|
491
|
+
agent_id: str,
|
|
492
|
+
task: str,
|
|
493
|
+
answers: Dict[str, str],
|
|
494
|
+
conversation_context: Optional[Dict[str, Any]] = None,
|
|
495
|
+
) -> AsyncGenerator[tuple, None]:
|
|
496
|
+
"""
|
|
497
|
+
Stream agent execution with real-time content and final result.
|
|
498
|
+
|
|
499
|
+
Yields:
|
|
500
|
+
("content", str): Real-time agent output (source attribution added by caller)
|
|
501
|
+
("result", (type, data)): Final result - ("vote", vote_data) or ("answer", content)
|
|
502
|
+
("error", str): Error message (self-terminating)
|
|
503
|
+
("done", None): Graceful completion signal
|
|
504
|
+
|
|
505
|
+
Restart Behavior:
|
|
506
|
+
If restart_pending is True, agent gracefully terminates with "done" signal.
|
|
507
|
+
restart_pending is cleared at the beginning of execution.
|
|
508
|
+
"""
|
|
509
|
+
agent = self.agents[agent_id]
|
|
510
|
+
|
|
511
|
+
# Clear restart pending flag at the beginning of agent execution
|
|
512
|
+
self.agent_states[agent_id].restart_pending = False
|
|
513
|
+
|
|
514
|
+
try:
|
|
515
|
+
# Build conversation with context support
|
|
516
|
+
if conversation_context and conversation_context.get(
|
|
517
|
+
"conversation_history"
|
|
518
|
+
):
|
|
519
|
+
# Use conversation context-aware building
|
|
520
|
+
conversation = self.message_templates.build_conversation_with_context(
|
|
521
|
+
current_task=task,
|
|
522
|
+
conversation_history=conversation_context.get(
|
|
523
|
+
"conversation_history", []
|
|
524
|
+
),
|
|
525
|
+
agent_summaries=answers,
|
|
526
|
+
valid_agent_ids=list(answers.keys()) if answers else None,
|
|
527
|
+
)
|
|
528
|
+
else:
|
|
529
|
+
# Fallback to standard conversation building
|
|
530
|
+
conversation = self.message_templates.build_initial_conversation(
|
|
531
|
+
task=task,
|
|
532
|
+
agent_summaries=answers,
|
|
533
|
+
valid_agent_ids=list(answers.keys()) if answers else None,
|
|
534
|
+
)
|
|
535
|
+
|
|
536
|
+
# Clean startup without redundant messages
|
|
537
|
+
|
|
538
|
+
# Build proper conversation messages with system + user messages
|
|
539
|
+
max_attempts = 3
|
|
540
|
+
conversation_messages = [
|
|
541
|
+
{"role": "system", "content": conversation["system_message"]},
|
|
542
|
+
{"role": "user", "content": conversation["user_message"]},
|
|
543
|
+
]
|
|
544
|
+
enforcement_msg = self.message_templates.enforcement_message()
|
|
545
|
+
|
|
546
|
+
for attempt in range(max_attempts):
|
|
547
|
+
if self._check_restart_pending(agent_id):
|
|
548
|
+
# yield ("content", "🔄 Gracefully restarting due to new answers from other agents")
|
|
549
|
+
yield (
|
|
550
|
+
"content",
|
|
551
|
+
f"🔁 Agent [{agent_id}] gracefully restarting due to new answer detected",
|
|
552
|
+
)
|
|
553
|
+
yield ("done", None)
|
|
554
|
+
return
|
|
555
|
+
|
|
556
|
+
# Stream agent response with workflow tools
|
|
557
|
+
if attempt == 0:
|
|
558
|
+
# First attempt: orchestrator provides initial conversation
|
|
559
|
+
# But we need the agent to have this in its history for subsequent calls
|
|
560
|
+
# First attempt: provide complete conversation and reset agent's history
|
|
561
|
+
chat_stream = agent.chat(
|
|
562
|
+
conversation_messages, self.workflow_tools, reset_chat=True
|
|
563
|
+
)
|
|
564
|
+
else:
|
|
565
|
+
# Subsequent attempts: send enforcement message (set by error handling)
|
|
566
|
+
|
|
567
|
+
if isinstance(enforcement_msg, list):
|
|
568
|
+
# Tool message array
|
|
569
|
+
chat_stream = agent.chat(
|
|
570
|
+
enforcement_msg, self.workflow_tools, reset_chat=False
|
|
571
|
+
)
|
|
572
|
+
else:
|
|
573
|
+
# Single user message
|
|
574
|
+
enforcement_message = {
|
|
575
|
+
"role": "user",
|
|
576
|
+
"content": enforcement_msg,
|
|
577
|
+
}
|
|
578
|
+
chat_stream = agent.chat(
|
|
579
|
+
[enforcement_message], self.workflow_tools, reset_chat=False
|
|
580
|
+
)
|
|
581
|
+
response_text = ""
|
|
582
|
+
tool_calls = []
|
|
583
|
+
workflow_tool_found = False
|
|
584
|
+
async for chunk in chat_stream:
|
|
585
|
+
if chunk.type == "content":
|
|
586
|
+
response_text += chunk.content
|
|
587
|
+
# Stream agent content directly - source field handles attribution
|
|
588
|
+
yield ("content", chunk.content)
|
|
589
|
+
elif chunk.type == "tool_calls":
|
|
590
|
+
# Use the correct tool_calls field
|
|
591
|
+
chunk_tool_calls = getattr(chunk, "tool_calls", []) or []
|
|
592
|
+
tool_calls.extend(chunk_tool_calls)
|
|
593
|
+
# Stream tool calls to show agent actions
|
|
594
|
+
for tool_call in chunk_tool_calls:
|
|
595
|
+
tool_name = agent.backend.extract_tool_name(tool_call)
|
|
596
|
+
tool_args = agent.backend.extract_tool_arguments(tool_call)
|
|
597
|
+
|
|
598
|
+
if tool_name == "new_answer":
|
|
599
|
+
content = tool_args.get("content", "")
|
|
600
|
+
yield ("content", f'💡 Providing answer: "{content}"')
|
|
601
|
+
elif tool_name == "vote":
|
|
602
|
+
agent_voted_for = tool_args.get("agent_id", "")
|
|
603
|
+
reason = tool_args.get("reason", "")
|
|
604
|
+
|
|
605
|
+
# Convert anonymous agent ID to real agent ID for display
|
|
606
|
+
real_agent_id = agent_voted_for
|
|
607
|
+
if answers: # Only do mapping if answers exist
|
|
608
|
+
agent_mapping = {}
|
|
609
|
+
for i, real_id in enumerate(
|
|
610
|
+
sorted(answers.keys()), 1
|
|
611
|
+
):
|
|
612
|
+
agent_mapping[f"agent{i}"] = real_id
|
|
613
|
+
real_agent_id = agent_mapping.get(
|
|
614
|
+
agent_voted_for, agent_voted_for
|
|
615
|
+
)
|
|
616
|
+
|
|
617
|
+
yield (
|
|
618
|
+
"content",
|
|
619
|
+
f"🗳️ Voting for {real_agent_id}: {reason}",
|
|
620
|
+
)
|
|
621
|
+
else:
|
|
622
|
+
yield ("content", f"🔧 Using {tool_name}")
|
|
623
|
+
elif chunk.type == "error":
|
|
624
|
+
# Stream error information to user interface
|
|
625
|
+
error_msg = (
|
|
626
|
+
getattr(chunk, "error", str(chunk.content))
|
|
627
|
+
if hasattr(chunk, "error")
|
|
628
|
+
else str(chunk.content)
|
|
629
|
+
)
|
|
630
|
+
yield ("content", f"❌ Error: {error_msg}")
|
|
631
|
+
|
|
632
|
+
# Check for multiple vote calls before processing
|
|
633
|
+
vote_calls = [
|
|
634
|
+
tc
|
|
635
|
+
for tc in tool_calls
|
|
636
|
+
if agent.backend.extract_tool_name(tc) == "vote"
|
|
637
|
+
]
|
|
638
|
+
if len(vote_calls) > 1:
|
|
639
|
+
if attempt < max_attempts - 1:
|
|
640
|
+
if self._check_restart_pending(agent_id):
|
|
641
|
+
yield (
|
|
642
|
+
"content",
|
|
643
|
+
f"🔁 Agent [{agent_id}] gracefully restarting due to new answer detected",
|
|
644
|
+
)
|
|
645
|
+
yield ("done", None)
|
|
646
|
+
return
|
|
647
|
+
error_msg = f"Multiple vote calls not allowed. Made {len(vote_calls)} calls but must make exactly 1. Call vote tool once with chosen agent."
|
|
648
|
+
yield ("content", f"❌ {error_msg}")
|
|
649
|
+
|
|
650
|
+
# Send tool error response for all tool calls
|
|
651
|
+
enforcement_msg = self._create_tool_error_messages(
|
|
652
|
+
agent,
|
|
653
|
+
tool_calls,
|
|
654
|
+
error_msg,
|
|
655
|
+
"Vote rejected due to multiple votes.",
|
|
656
|
+
)
|
|
657
|
+
continue # Retry this attempt
|
|
658
|
+
else:
|
|
659
|
+
yield (
|
|
660
|
+
"error",
|
|
661
|
+
f"Agent made {len(vote_calls)} vote calls in single response after max attempts",
|
|
662
|
+
)
|
|
663
|
+
yield ("done", None)
|
|
664
|
+
return
|
|
665
|
+
|
|
666
|
+
# Check for mixed new_answer and vote calls - violates binary decision framework
|
|
667
|
+
new_answer_calls = [
|
|
668
|
+
tc
|
|
669
|
+
for tc in tool_calls
|
|
670
|
+
if agent.backend.extract_tool_name(tc) == "new_answer"
|
|
671
|
+
]
|
|
672
|
+
if len(vote_calls) > 0 and len(new_answer_calls) > 0:
|
|
673
|
+
if attempt < max_attempts - 1:
|
|
674
|
+
if self._check_restart_pending(agent_id):
|
|
675
|
+
yield (
|
|
676
|
+
"content",
|
|
677
|
+
f"🔁 Agent [{agent_id}] gracefully restarting due to new answer detected",
|
|
678
|
+
)
|
|
679
|
+
yield ("done", None)
|
|
680
|
+
return
|
|
681
|
+
error_msg = "Cannot use both 'vote' and 'new_answer' in same response. Choose one: vote for existing answer OR provide new answer."
|
|
682
|
+
yield ("content", f"❌ {error_msg}")
|
|
683
|
+
|
|
684
|
+
# Send tool error response for all tool calls that caused the violation
|
|
685
|
+
enforcement_msg = self._create_tool_error_messages(
|
|
686
|
+
agent, tool_calls, error_msg
|
|
687
|
+
)
|
|
688
|
+
continue # Retry this attempt
|
|
689
|
+
else:
|
|
690
|
+
yield (
|
|
691
|
+
"error",
|
|
692
|
+
f"Agent used both vote and new_answer tools in single response after max attempts",
|
|
693
|
+
)
|
|
694
|
+
yield ("done", None)
|
|
695
|
+
return
|
|
696
|
+
|
|
697
|
+
# Process all tool calls
|
|
698
|
+
if tool_calls:
|
|
699
|
+
for tool_call in tool_calls:
|
|
700
|
+
tool_name = agent.backend.extract_tool_name(tool_call)
|
|
701
|
+
tool_args = agent.backend.extract_tool_arguments(tool_call)
|
|
702
|
+
|
|
703
|
+
if tool_name == "vote":
|
|
704
|
+
# Check if agent should restart - votes invalid during restart
|
|
705
|
+
if self.agent_states[agent_id].restart_pending:
|
|
706
|
+
yield (
|
|
707
|
+
"content",
|
|
708
|
+
f"🔄 Agent [{agent_id}] Vote invalid - restarting due to new answers",
|
|
709
|
+
)
|
|
710
|
+
yield ("done", None)
|
|
711
|
+
return
|
|
712
|
+
|
|
713
|
+
workflow_tool_found = True
|
|
714
|
+
# Vote for existing answer (requires existing answers)
|
|
715
|
+
if not answers:
|
|
716
|
+
# Invalid - can't vote when no answers exist
|
|
717
|
+
if attempt < max_attempts - 1:
|
|
718
|
+
if self._check_restart_pending(agent_id):
|
|
719
|
+
yield (
|
|
720
|
+
"content",
|
|
721
|
+
f"🔁 Agent [{agent_id}] gracefully restarting due to new answer detected",
|
|
722
|
+
)
|
|
723
|
+
yield ("done", None)
|
|
724
|
+
return
|
|
725
|
+
error_msg = "Cannot vote when no answers exist. Use new_answer tool."
|
|
726
|
+
yield ("content", f"❌ {error_msg}")
|
|
727
|
+
# Create proper tool error message for retry
|
|
728
|
+
enforcement_msg = self._create_tool_error_messages(
|
|
729
|
+
agent, [tool_call], error_msg
|
|
730
|
+
)
|
|
731
|
+
continue
|
|
732
|
+
else:
|
|
733
|
+
yield (
|
|
734
|
+
"error",
|
|
735
|
+
"Cannot vote when no answers exist after max attempts",
|
|
736
|
+
)
|
|
737
|
+
yield ("done", None)
|
|
738
|
+
return
|
|
739
|
+
|
|
740
|
+
voted_agent_anon = tool_args.get("agent_id")
|
|
741
|
+
reason = tool_args.get("reason", "")
|
|
742
|
+
|
|
743
|
+
# Convert anonymous agent ID back to real agent ID
|
|
744
|
+
agent_mapping = {}
|
|
745
|
+
for i, real_agent_id in enumerate(
|
|
746
|
+
sorted(answers.keys()), 1
|
|
747
|
+
):
|
|
748
|
+
agent_mapping[f"agent{i}"] = real_agent_id
|
|
749
|
+
|
|
750
|
+
voted_agent = agent_mapping.get(
|
|
751
|
+
voted_agent_anon, voted_agent_anon
|
|
752
|
+
)
|
|
753
|
+
|
|
754
|
+
# Handle invalid agent_id
|
|
755
|
+
if voted_agent not in answers:
|
|
756
|
+
if attempt < max_attempts - 1:
|
|
757
|
+
if self._check_restart_pending(agent_id):
|
|
758
|
+
yield (
|
|
759
|
+
"content",
|
|
760
|
+
f"🔁 Agent [{agent_id}] gracefully restarting due to new answer detected",
|
|
761
|
+
)
|
|
762
|
+
yield ("done", None)
|
|
763
|
+
return
|
|
764
|
+
# Create reverse mapping for error message
|
|
765
|
+
reverse_mapping = {
|
|
766
|
+
real_id: f"agent{i}"
|
|
767
|
+
for i, real_id in enumerate(
|
|
768
|
+
sorted(answers.keys()), 1
|
|
769
|
+
)
|
|
770
|
+
}
|
|
771
|
+
valid_anon_agents = [
|
|
772
|
+
reverse_mapping[real_id]
|
|
773
|
+
for real_id in answers.keys()
|
|
774
|
+
]
|
|
775
|
+
error_msg = f"Invalid agent_id '{voted_agent_anon}'. Valid agents: {', '.join(valid_anon_agents)}"
|
|
776
|
+
# Send tool error result back to agent
|
|
777
|
+
yield ("content", f"❌ {error_msg}")
|
|
778
|
+
# Create proper tool error message for retry
|
|
779
|
+
enforcement_msg = self._create_tool_error_messages(
|
|
780
|
+
agent, [tool_call], error_msg
|
|
781
|
+
)
|
|
782
|
+
continue # Retry with updated conversation
|
|
783
|
+
else:
|
|
784
|
+
yield (
|
|
785
|
+
"error",
|
|
786
|
+
f"Invalid agent_id after {max_attempts} attempts",
|
|
787
|
+
)
|
|
788
|
+
yield ("done", None)
|
|
789
|
+
return
|
|
790
|
+
# Record the vote locally (but orchestrator may still ignore it)
|
|
791
|
+
self.agent_states[agent_id].votes = {
|
|
792
|
+
"agent_id": voted_agent,
|
|
793
|
+
"reason": reason,
|
|
794
|
+
}
|
|
795
|
+
|
|
796
|
+
# Send tool result - orchestrator will decide if vote is accepted
|
|
797
|
+
# Vote submitted (result will be shown by orchestrator)
|
|
798
|
+
yield (
|
|
799
|
+
"result",
|
|
800
|
+
("vote", {"agent_id": voted_agent, "reason": reason}),
|
|
801
|
+
)
|
|
802
|
+
yield ("done", None)
|
|
803
|
+
return
|
|
804
|
+
|
|
805
|
+
elif tool_name == "new_answer":
|
|
806
|
+
workflow_tool_found = True
|
|
807
|
+
# Agent provided new answer
|
|
808
|
+
content = tool_args.get("content", response_text.strip())
|
|
809
|
+
|
|
810
|
+
# Check for duplicate answer
|
|
811
|
+
for existing_agent_id, existing_content in answers.items():
|
|
812
|
+
if content.strip() == existing_content.strip():
|
|
813
|
+
if attempt < max_attempts - 1:
|
|
814
|
+
if self._check_restart_pending(agent_id):
|
|
815
|
+
yield (
|
|
816
|
+
"content",
|
|
817
|
+
f"🔁 Agent [{agent_id}] gracefully restarting due to new answer detected",
|
|
818
|
+
)
|
|
819
|
+
yield ("done", None)
|
|
820
|
+
return
|
|
821
|
+
error_msg = f"Answer already provided by {existing_agent_id}. Provide different answer or vote for existing one."
|
|
822
|
+
yield ("content", f"❌ {error_msg}")
|
|
823
|
+
# Create proper tool error message for retry
|
|
824
|
+
enforcement_msg = (
|
|
825
|
+
self._create_tool_error_messages(
|
|
826
|
+
agent, [tool_call], error_msg
|
|
827
|
+
)
|
|
828
|
+
)
|
|
829
|
+
continue
|
|
830
|
+
else:
|
|
831
|
+
yield (
|
|
832
|
+
"error",
|
|
833
|
+
f"Duplicate answer provided after {max_attempts} attempts",
|
|
834
|
+
)
|
|
835
|
+
yield ("done", None)
|
|
836
|
+
return
|
|
837
|
+
# Send successful tool result back to agent
|
|
838
|
+
# Answer recorded (result will be shown by orchestrator)
|
|
839
|
+
yield ("result", ("answer", content))
|
|
840
|
+
yield ("done", None)
|
|
841
|
+
return
|
|
842
|
+
|
|
843
|
+
else:
|
|
844
|
+
# Non-workflow tools not yet implemented
|
|
845
|
+
yield (
|
|
846
|
+
"content",
|
|
847
|
+
f"🔧 used {tool_name} tool (not implemented)",
|
|
848
|
+
)
|
|
849
|
+
|
|
850
|
+
# Case 3: Non-workflow response, need enforcement (only if no workflow tool was found)
|
|
851
|
+
if not workflow_tool_found:
|
|
852
|
+
if self._check_restart_pending(agent_id):
|
|
853
|
+
yield (
|
|
854
|
+
"content",
|
|
855
|
+
f"🔁 Agent [{agent_id}] gracefully restarting due to new answer detected",
|
|
856
|
+
)
|
|
857
|
+
yield ("done", None)
|
|
858
|
+
return
|
|
859
|
+
if attempt < max_attempts - 1:
|
|
860
|
+
yield ("content", f"🔄 needs to use workflow tools...")
|
|
861
|
+
# Reset to default enforcement message for this case
|
|
862
|
+
enforcement_msg = self.message_templates.enforcement_message()
|
|
863
|
+
continue # Retry with updated conversation
|
|
864
|
+
else:
|
|
865
|
+
# Last attempt failed, agent did not provide proper workflow response
|
|
866
|
+
yield (
|
|
867
|
+
"error",
|
|
868
|
+
f"Agent failed to use workflow tools after {max_attempts} attempts",
|
|
869
|
+
)
|
|
870
|
+
yield ("done", None)
|
|
871
|
+
return
|
|
872
|
+
|
|
873
|
+
except Exception as e:
|
|
874
|
+
yield ("error", f"Agent execution failed: {str(e)}")
|
|
875
|
+
yield ("done", None)
|
|
876
|
+
|
|
877
|
+
async def _get_next_chunk(self, stream: AsyncGenerator[tuple, None]) -> tuple:
|
|
878
|
+
"""Get the next chunk from an agent stream."""
|
|
879
|
+
try:
|
|
880
|
+
return await stream.__anext__()
|
|
881
|
+
except StopAsyncIteration:
|
|
882
|
+
return ("done", None)
|
|
883
|
+
except Exception as e:
|
|
884
|
+
return ("error", str(e))
|
|
885
|
+
|
|
886
|
+
async def _present_final_answer(self) -> AsyncGenerator[StreamChunk, None]:
|
|
887
|
+
"""Present the final coordinated answer."""
|
|
888
|
+
yield StreamChunk(type="content", content="## 🎯 Final Coordinated Answer\n")
|
|
889
|
+
|
|
890
|
+
# Select the best agent based on current state
|
|
891
|
+
if not self._selected_agent:
|
|
892
|
+
self._selected_agent = self._determine_final_agent_from_states()
|
|
893
|
+
if self._selected_agent:
|
|
894
|
+
yield StreamChunk(
|
|
895
|
+
type="content",
|
|
896
|
+
content=f"🏆 Selected Agent: {self._selected_agent}\n",
|
|
897
|
+
)
|
|
898
|
+
|
|
899
|
+
if (
|
|
900
|
+
self._selected_agent
|
|
901
|
+
and self._selected_agent in self.agent_states
|
|
902
|
+
and self.agent_states[self._selected_agent].answer
|
|
903
|
+
):
|
|
904
|
+
final_answer = self.agent_states[self._selected_agent].answer
|
|
905
|
+
|
|
906
|
+
# Add to conversation history
|
|
907
|
+
self.add_to_history("assistant", final_answer)
|
|
908
|
+
|
|
909
|
+
yield StreamChunk(
|
|
910
|
+
type="content", content=f"🏆 Selected Agent: {self._selected_agent}\n"
|
|
911
|
+
)
|
|
912
|
+
yield StreamChunk(type="content", content=final_answer)
|
|
913
|
+
yield StreamChunk(
|
|
914
|
+
type="content",
|
|
915
|
+
content=f"\n\n---\n*Coordinated by {len(self.agents)} agents via MassGen framework*",
|
|
916
|
+
)
|
|
917
|
+
else:
|
|
918
|
+
error_msg = "❌ Unable to provide coordinated answer - no successful agents"
|
|
919
|
+
self.add_to_history("assistant", error_msg)
|
|
920
|
+
yield StreamChunk(type="content", content=error_msg)
|
|
921
|
+
|
|
922
|
+
# Update workflow phase
|
|
923
|
+
self.workflow_phase = "presenting"
|
|
924
|
+
yield StreamChunk(type="done")
|
|
925
|
+
|
|
926
|
+
def _determine_final_agent_from_votes(
|
|
927
|
+
self, votes: Dict[str, Dict], agent_answers: Dict[str, str]
|
|
928
|
+
) -> str:
|
|
929
|
+
"""Determine which agent should present the final answer based on votes."""
|
|
930
|
+
if not votes:
|
|
931
|
+
# No votes yet, return first agent with an answer (earliest by generation time)
|
|
932
|
+
return next(iter(agent_answers)) if agent_answers else None
|
|
933
|
+
|
|
934
|
+
# Count votes for each agent
|
|
935
|
+
vote_counts = {}
|
|
936
|
+
for vote_data in votes.values():
|
|
937
|
+
voted_for = vote_data.get("agent_id")
|
|
938
|
+
if voted_for:
|
|
939
|
+
vote_counts[voted_for] = vote_counts.get(voted_for, 0) + 1
|
|
940
|
+
|
|
941
|
+
if not vote_counts:
|
|
942
|
+
return next(iter(agent_answers)) if agent_answers else None
|
|
943
|
+
|
|
944
|
+
# Find agents with maximum votes
|
|
945
|
+
max_votes = max(vote_counts.values())
|
|
946
|
+
tied_agents = [
|
|
947
|
+
agent_id for agent_id, count in vote_counts.items() if count == max_votes
|
|
948
|
+
]
|
|
949
|
+
|
|
950
|
+
# Break ties by agent registration order (order in agent_states dict)
|
|
951
|
+
for agent_id in agent_answers.keys():
|
|
952
|
+
if agent_id in tied_agents:
|
|
953
|
+
return agent_id
|
|
954
|
+
|
|
955
|
+
# Fallback to first tied agent
|
|
956
|
+
return (
|
|
957
|
+
tied_agents[0]
|
|
958
|
+
if tied_agents
|
|
959
|
+
else next(iter(agent_answers)) if agent_answers else None
|
|
960
|
+
)
|
|
961
|
+
|
|
962
|
+
async def get_final_presentation(
|
|
963
|
+
self, selected_agent_id: str, vote_results: Dict[str, Any]
|
|
964
|
+
) -> AsyncGenerator[StreamChunk, None]:
|
|
965
|
+
"""Ask the winning agent to present their final answer with voting context."""
|
|
966
|
+
if selected_agent_id not in self.agents:
|
|
967
|
+
yield StreamChunk(
|
|
968
|
+
type="error", error=f"Selected agent {selected_agent_id} not found"
|
|
969
|
+
)
|
|
970
|
+
return
|
|
971
|
+
|
|
972
|
+
agent = self.agents[selected_agent_id]
|
|
973
|
+
|
|
974
|
+
# Prepare context about the voting
|
|
975
|
+
vote_counts = vote_results.get("vote_counts", {})
|
|
976
|
+
voter_details = vote_results.get("voter_details", {})
|
|
977
|
+
is_tie = vote_results.get("is_tie", False)
|
|
978
|
+
|
|
979
|
+
# Build voting summary
|
|
980
|
+
voting_summary = f"You received {vote_counts.get(selected_agent_id, 0)} vote(s)"
|
|
981
|
+
if voter_details.get(selected_agent_id):
|
|
982
|
+
reasons = [v["reason"] for v in voter_details[selected_agent_id]]
|
|
983
|
+
voting_summary += f" with feedback: {'; '.join(reasons)}"
|
|
984
|
+
|
|
985
|
+
if is_tie:
|
|
986
|
+
voting_summary += " (tie-broken by registration order)"
|
|
987
|
+
|
|
988
|
+
# Get all answers for context
|
|
989
|
+
all_answers = {
|
|
990
|
+
aid: s.answer for aid, s in self.agent_states.items() if s.answer
|
|
991
|
+
}
|
|
992
|
+
|
|
993
|
+
# Use MessageTemplates to build the presentation message
|
|
994
|
+
presentation_content = self.message_templates.build_final_presentation_message(
|
|
995
|
+
original_task=self.current_task or "Task coordination",
|
|
996
|
+
vote_summary=voting_summary,
|
|
997
|
+
all_answers=all_answers,
|
|
998
|
+
selected_agent_id=selected_agent_id,
|
|
999
|
+
)
|
|
1000
|
+
|
|
1001
|
+
# Get agent's original system message if available
|
|
1002
|
+
agent_system_message = getattr(agent, "system_message", None)
|
|
1003
|
+
# Create conversation with system and user messages
|
|
1004
|
+
presentation_messages = [
|
|
1005
|
+
{
|
|
1006
|
+
"role": "system",
|
|
1007
|
+
"content": self.message_templates.final_presentation_system_message(
|
|
1008
|
+
agent_system_message
|
|
1009
|
+
),
|
|
1010
|
+
},
|
|
1011
|
+
{"role": "user", "content": presentation_content},
|
|
1012
|
+
]
|
|
1013
|
+
yield StreamChunk(
|
|
1014
|
+
type="status",
|
|
1015
|
+
content=f"🎤 [{selected_agent_id}] presenting final answer\n",
|
|
1016
|
+
)
|
|
1017
|
+
|
|
1018
|
+
# Use agent's chat method with proper system message (reset chat for clean presentation)
|
|
1019
|
+
async for chunk in agent.chat(presentation_messages, reset_chat=True):
|
|
1020
|
+
# Use the same streaming approach as regular coordination
|
|
1021
|
+
if chunk.type == "content" and chunk.content:
|
|
1022
|
+
yield StreamChunk(
|
|
1023
|
+
type="content", content=chunk.content, source=selected_agent_id
|
|
1024
|
+
)
|
|
1025
|
+
elif chunk.type == "done":
|
|
1026
|
+
yield StreamChunk(type="done", source=selected_agent_id)
|
|
1027
|
+
elif chunk.type == "error":
|
|
1028
|
+
yield StreamChunk(
|
|
1029
|
+
type="error", error=chunk.error, source=selected_agent_id
|
|
1030
|
+
)
|
|
1031
|
+
# Pass through other chunk types as-is but with source
|
|
1032
|
+
else:
|
|
1033
|
+
if hasattr(chunk, "source"):
|
|
1034
|
+
chunk.source = selected_agent_id
|
|
1035
|
+
yield chunk
|
|
1036
|
+
|
|
1037
|
+
def _get_vote_results(self) -> Dict[str, Any]:
|
|
1038
|
+
"""Get current vote results and statistics."""
|
|
1039
|
+
agent_answers = {
|
|
1040
|
+
aid: state.answer
|
|
1041
|
+
for aid, state in self.agent_states.items()
|
|
1042
|
+
if state.answer
|
|
1043
|
+
}
|
|
1044
|
+
votes = {
|
|
1045
|
+
aid: state.votes for aid, state in self.agent_states.items() if state.votes
|
|
1046
|
+
}
|
|
1047
|
+
|
|
1048
|
+
# Count votes for each agent
|
|
1049
|
+
vote_counts = {}
|
|
1050
|
+
voter_details = {}
|
|
1051
|
+
|
|
1052
|
+
for voter_id, vote_data in votes.items():
|
|
1053
|
+
voted_for = vote_data.get("agent_id")
|
|
1054
|
+
if voted_for:
|
|
1055
|
+
vote_counts[voted_for] = vote_counts.get(voted_for, 0) + 1
|
|
1056
|
+
if voted_for not in voter_details:
|
|
1057
|
+
voter_details[voted_for] = []
|
|
1058
|
+
voter_details[voted_for].append(
|
|
1059
|
+
{
|
|
1060
|
+
"voter": voter_id,
|
|
1061
|
+
"reason": vote_data.get("reason", "No reason provided"),
|
|
1062
|
+
}
|
|
1063
|
+
)
|
|
1064
|
+
|
|
1065
|
+
# Determine winner
|
|
1066
|
+
winner = None
|
|
1067
|
+
is_tie = False
|
|
1068
|
+
if vote_counts:
|
|
1069
|
+
max_votes = max(vote_counts.values())
|
|
1070
|
+
tied_agents = [
|
|
1071
|
+
agent_id
|
|
1072
|
+
for agent_id, count in vote_counts.items()
|
|
1073
|
+
if count == max_votes
|
|
1074
|
+
]
|
|
1075
|
+
is_tie = len(tied_agents) > 1
|
|
1076
|
+
|
|
1077
|
+
# Break ties by agent registration order
|
|
1078
|
+
for agent_id in agent_answers.keys():
|
|
1079
|
+
if agent_id in tied_agents:
|
|
1080
|
+
winner = agent_id
|
|
1081
|
+
break
|
|
1082
|
+
|
|
1083
|
+
if not winner:
|
|
1084
|
+
winner = tied_agents[0] if tied_agents else None
|
|
1085
|
+
|
|
1086
|
+
return {
|
|
1087
|
+
"vote_counts": vote_counts,
|
|
1088
|
+
"voter_details": voter_details,
|
|
1089
|
+
"winner": winner,
|
|
1090
|
+
"is_tie": is_tie,
|
|
1091
|
+
"total_votes": len(votes),
|
|
1092
|
+
"agents_with_answers": len(agent_answers),
|
|
1093
|
+
"agents_voted": len([v for v in votes.values() if v.get("agent_id")]),
|
|
1094
|
+
}
|
|
1095
|
+
|
|
1096
|
+
def _determine_final_agent_from_states(self) -> Optional[str]:
|
|
1097
|
+
"""Determine final agent based on current agent states."""
|
|
1098
|
+
# Find agents with answers
|
|
1099
|
+
agents_with_answers = {
|
|
1100
|
+
aid: state.answer
|
|
1101
|
+
for aid, state in self.agent_states.items()
|
|
1102
|
+
if state.answer
|
|
1103
|
+
}
|
|
1104
|
+
|
|
1105
|
+
if not agents_with_answers:
|
|
1106
|
+
return None
|
|
1107
|
+
|
|
1108
|
+
# Return the first agent with an answer (by order in agent_states)
|
|
1109
|
+
return next(iter(agents_with_answers))
|
|
1110
|
+
|
|
1111
|
+
async def _handle_followup(
|
|
1112
|
+
self, user_message: str, conversation_context: Optional[Dict[str, Any]] = None
|
|
1113
|
+
) -> AsyncGenerator[StreamChunk, None]:
|
|
1114
|
+
"""Handle follow-up questions after presenting final answer with conversation context."""
|
|
1115
|
+
# For now, acknowledge with context awareness
|
|
1116
|
+
# Future: implement full re-coordination with follow-up context
|
|
1117
|
+
|
|
1118
|
+
if (
|
|
1119
|
+
conversation_context
|
|
1120
|
+
and len(conversation_context.get("conversation_history", [])) > 0
|
|
1121
|
+
):
|
|
1122
|
+
yield StreamChunk(
|
|
1123
|
+
type="content",
|
|
1124
|
+
content=f"🤔 Thank you for your follow-up question in our ongoing conversation. I understand you're asking: '{user_message}'. Currently, the coordination is complete, but I can help clarify the answer or coordinate a new task that takes our conversation history into account.",
|
|
1125
|
+
)
|
|
1126
|
+
else:
|
|
1127
|
+
yield StreamChunk(
|
|
1128
|
+
type="content",
|
|
1129
|
+
content=f"🤔 Thank you for your follow-up: '{user_message}'. The coordination is complete, but I can help clarify the answer or coordinate a new task if needed.",
|
|
1130
|
+
)
|
|
1131
|
+
|
|
1132
|
+
yield StreamChunk(type="done")
|
|
1133
|
+
|
|
1134
|
+
# =============================================================================
|
|
1135
|
+
# PUBLIC API METHODS
|
|
1136
|
+
# =============================================================================
|
|
1137
|
+
|
|
1138
|
+
def add_agent(self, agent_id: str, agent: ChatAgent) -> None:
|
|
1139
|
+
"""Add a new sub-agent to the orchestrator."""
|
|
1140
|
+
self.agents[agent_id] = agent
|
|
1141
|
+
self.agent_states[agent_id] = AgentState()
|
|
1142
|
+
|
|
1143
|
+
def remove_agent(self, agent_id: str) -> None:
|
|
1144
|
+
"""Remove a sub-agent from the orchestrator."""
|
|
1145
|
+
if agent_id in self.agents:
|
|
1146
|
+
del self.agents[agent_id]
|
|
1147
|
+
if agent_id in self.agent_states:
|
|
1148
|
+
del self.agent_states[agent_id]
|
|
1149
|
+
|
|
1150
|
+
def get_status(self) -> Dict[str, Any]:
|
|
1151
|
+
"""Get current orchestrator status."""
|
|
1152
|
+
# Calculate vote results
|
|
1153
|
+
vote_results = self._get_vote_results()
|
|
1154
|
+
|
|
1155
|
+
return {
|
|
1156
|
+
"session_id": self.session_id,
|
|
1157
|
+
"workflow_phase": self.workflow_phase,
|
|
1158
|
+
"current_task": self.current_task,
|
|
1159
|
+
"selected_agent": self._selected_agent,
|
|
1160
|
+
"vote_results": vote_results,
|
|
1161
|
+
"agents": {
|
|
1162
|
+
aid: {
|
|
1163
|
+
"agent_status": agent.get_status(),
|
|
1164
|
+
"coordination_state": {
|
|
1165
|
+
"answer": state.answer,
|
|
1166
|
+
"has_voted": state.has_voted,
|
|
1167
|
+
},
|
|
1168
|
+
}
|
|
1169
|
+
for aid, (agent, state) in zip(
|
|
1170
|
+
self.agents.keys(),
|
|
1171
|
+
zip(self.agents.values(), self.agent_states.values()),
|
|
1172
|
+
)
|
|
1173
|
+
},
|
|
1174
|
+
"conversation_length": len(self.conversation_history),
|
|
1175
|
+
}
|
|
1176
|
+
|
|
1177
|
+
def reset(self) -> None:
|
|
1178
|
+
"""Reset orchestrator state for new task."""
|
|
1179
|
+
self.conversation_history.clear()
|
|
1180
|
+
self.current_task = None
|
|
1181
|
+
self.workflow_phase = "idle"
|
|
1182
|
+
self._coordination_messages.clear()
|
|
1183
|
+
self._selected_agent = None
|
|
1184
|
+
|
|
1185
|
+
# Reset agent states
|
|
1186
|
+
for state in self.agent_states.values():
|
|
1187
|
+
state.answer = None
|
|
1188
|
+
state.has_voted = False
|
|
1189
|
+
state.restart_pending = False
|
|
1190
|
+
|
|
1191
|
+
|
|
1192
|
+
# =============================================================================
|
|
1193
|
+
# CONVENIENCE FUNCTIONS
|
|
1194
|
+
# =============================================================================
|
|
1195
|
+
|
|
1196
|
+
|
|
1197
|
+
def create_orchestrator(
|
|
1198
|
+
agents: List[tuple],
|
|
1199
|
+
orchestrator_id: str = "orchestrator",
|
|
1200
|
+
session_id: Optional[str] = None,
|
|
1201
|
+
config: Optional[AgentConfig] = None,
|
|
1202
|
+
) -> Orchestrator:
|
|
1203
|
+
"""
|
|
1204
|
+
Create a MassGen orchestrator with sub-agents.
|
|
1205
|
+
|
|
1206
|
+
Args:
|
|
1207
|
+
agents: List of (agent_id, ChatAgent) tuples
|
|
1208
|
+
orchestrator_id: Unique identifier for this orchestrator (default: "orchestrator")
|
|
1209
|
+
session_id: Optional session ID
|
|
1210
|
+
config: Optional AgentConfig for orchestrator customization
|
|
1211
|
+
|
|
1212
|
+
Returns:
|
|
1213
|
+
Configured Orchestrator
|
|
1214
|
+
"""
|
|
1215
|
+
agents_dict = {agent_id: agent for agent_id, agent in agents}
|
|
1216
|
+
|
|
1217
|
+
return Orchestrator(
|
|
1218
|
+
agents=agents_dict,
|
|
1219
|
+
orchestrator_id=orchestrator_id,
|
|
1220
|
+
session_id=session_id,
|
|
1221
|
+
config=config,
|
|
1222
|
+
)
|