massgen 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +94 -0
- massgen/agent_config.py +507 -0
- massgen/backend/CLAUDE_API_RESEARCH.md +266 -0
- massgen/backend/Function calling openai responses.md +1161 -0
- massgen/backend/GEMINI_API_DOCUMENTATION.md +410 -0
- massgen/backend/OPENAI_RESPONSES_API_FORMAT.md +65 -0
- massgen/backend/__init__.py +25 -0
- massgen/backend/base.py +180 -0
- massgen/backend/chat_completions.py +228 -0
- massgen/backend/claude.py +661 -0
- massgen/backend/gemini.py +652 -0
- massgen/backend/grok.py +187 -0
- massgen/backend/response.py +397 -0
- massgen/chat_agent.py +440 -0
- massgen/cli.py +686 -0
- massgen/configs/README.md +293 -0
- massgen/configs/creative_team.yaml +53 -0
- massgen/configs/gemini_4o_claude.yaml +31 -0
- massgen/configs/news_analysis.yaml +51 -0
- massgen/configs/research_team.yaml +51 -0
- massgen/configs/single_agent.yaml +18 -0
- massgen/configs/single_flash2.5.yaml +44 -0
- massgen/configs/technical_analysis.yaml +51 -0
- massgen/configs/three_agents_default.yaml +31 -0
- massgen/configs/travel_planning.yaml +51 -0
- massgen/configs/two_agents.yaml +39 -0
- massgen/frontend/__init__.py +20 -0
- massgen/frontend/coordination_ui.py +945 -0
- massgen/frontend/displays/__init__.py +24 -0
- massgen/frontend/displays/base_display.py +83 -0
- massgen/frontend/displays/rich_terminal_display.py +3497 -0
- massgen/frontend/displays/simple_display.py +93 -0
- massgen/frontend/displays/terminal_display.py +381 -0
- massgen/frontend/logging/__init__.py +9 -0
- massgen/frontend/logging/realtime_logger.py +197 -0
- massgen/message_templates.py +431 -0
- massgen/orchestrator.py +1222 -0
- massgen/tests/__init__.py +10 -0
- massgen/tests/multi_turn_conversation_design.md +214 -0
- massgen/tests/multiturn_llm_input_analysis.md +189 -0
- massgen/tests/test_case_studies.md +113 -0
- massgen/tests/test_claude_backend.py +310 -0
- massgen/tests/test_grok_backend.py +160 -0
- massgen/tests/test_message_context_building.py +293 -0
- massgen/tests/test_rich_terminal_display.py +378 -0
- massgen/tests/test_v3_3agents.py +117 -0
- massgen/tests/test_v3_simple.py +216 -0
- massgen/tests/test_v3_three_agents.py +272 -0
- massgen/tests/test_v3_two_agents.py +176 -0
- massgen/utils.py +79 -0
- massgen/v1/README.md +330 -0
- massgen/v1/__init__.py +91 -0
- massgen/v1/agent.py +605 -0
- massgen/v1/agents.py +330 -0
- massgen/v1/backends/gemini.py +584 -0
- massgen/v1/backends/grok.py +410 -0
- massgen/v1/backends/oai.py +571 -0
- massgen/v1/cli.py +351 -0
- massgen/v1/config.py +169 -0
- massgen/v1/examples/fast-4o-mini-config.yaml +44 -0
- massgen/v1/examples/fast_config.yaml +44 -0
- massgen/v1/examples/production.yaml +70 -0
- massgen/v1/examples/single_agent.yaml +39 -0
- massgen/v1/logging.py +974 -0
- massgen/v1/main.py +368 -0
- massgen/v1/orchestrator.py +1138 -0
- massgen/v1/streaming_display.py +1190 -0
- massgen/v1/tools.py +160 -0
- massgen/v1/types.py +245 -0
- massgen/v1/utils.py +199 -0
- massgen-0.0.3.dist-info/METADATA +568 -0
- massgen-0.0.3.dist-info/RECORD +76 -0
- massgen-0.0.3.dist-info/WHEEL +5 -0
- massgen-0.0.3.dist-info/entry_points.txt +2 -0
- massgen-0.0.3.dist-info/licenses/LICENSE +204 -0
- massgen-0.0.3.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1138 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import threading
|
|
3
|
+
import time
|
|
4
|
+
import json
|
|
5
|
+
from collections import Counter
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from typing import Any, Optional, Dict, List
|
|
8
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
9
|
+
|
|
10
|
+
from .types import SystemState, AgentState, TaskInput, VoteRecord
|
|
11
|
+
from .logging import get_log_manager
|
|
12
|
+
|
|
13
|
+
# Set up logging
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class MassOrchestrator:
|
|
18
|
+
"""
|
|
19
|
+
Central orchestrator for managing multiple agents in the MassGen framework, and logging for all events.
|
|
20
|
+
|
|
21
|
+
Simplified workflow:
|
|
22
|
+
1. Agents work on task (status: "working")
|
|
23
|
+
2. When agents vote, they become "voted"
|
|
24
|
+
3. When all votable agents have voted:
|
|
25
|
+
- Check consensus
|
|
26
|
+
- If consensus reached: select representative to present final answer
|
|
27
|
+
- If no consensus: restart all agents for debate
|
|
28
|
+
4. Representative presents final answer and system completes
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
max_duration: int = 600,
|
|
34
|
+
consensus_threshold: float = 0.0,
|
|
35
|
+
max_debate_rounds: int = 1,
|
|
36
|
+
status_check_interval: float = 2.0,
|
|
37
|
+
thread_pool_timeout: int = 5,
|
|
38
|
+
streaming_orchestrator=None,
|
|
39
|
+
):
|
|
40
|
+
"""
|
|
41
|
+
Initialize the orchestrator.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
max_duration: Maximum duration for the entire task in seconds
|
|
45
|
+
consensus_threshold: Fraction of agents that must agree for consensus (1.0 = unanimous)
|
|
46
|
+
max_debate_rounds: Maximum number of debate rounds before fallback
|
|
47
|
+
status_check_interval: Interval for checking agent status (seconds)
|
|
48
|
+
thread_pool_timeout: Timeout for shutting down thread pool executor (seconds)
|
|
49
|
+
streaming_orchestrator: Optional streaming orchestrator for real-time display
|
|
50
|
+
"""
|
|
51
|
+
self.agents: Dict[int, Any] = {} # agent_id -> MassAgent instance
|
|
52
|
+
self.agent_states: Dict[int, AgentState] = {} # agent_id -> AgentState instance
|
|
53
|
+
self.votes: List[VoteRecord] = []
|
|
54
|
+
self.system_state = SystemState()
|
|
55
|
+
self.max_duration = max_duration
|
|
56
|
+
self.consensus_threshold = consensus_threshold
|
|
57
|
+
self.max_debate_rounds = max_debate_rounds
|
|
58
|
+
self.status_check_interval = status_check_interval
|
|
59
|
+
self.thread_pool_timeout = thread_pool_timeout
|
|
60
|
+
self.streaming_orchestrator = streaming_orchestrator
|
|
61
|
+
|
|
62
|
+
# Simplified coordination
|
|
63
|
+
self._lock = threading.RLock()
|
|
64
|
+
self._stop_event = threading.Event()
|
|
65
|
+
|
|
66
|
+
# Communication and logging
|
|
67
|
+
self.communication_log: List[Dict[str, Any]] = []
|
|
68
|
+
self.final_response: Optional[str] = None
|
|
69
|
+
|
|
70
|
+
# Initialize log manager
|
|
71
|
+
self.log_manager = get_log_manager()
|
|
72
|
+
|
|
73
|
+
def register_agent(self, agent):
|
|
74
|
+
"""
|
|
75
|
+
Register an agent with the orchestrator.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
agent: MassAgent instance to register
|
|
79
|
+
"""
|
|
80
|
+
with self._lock:
|
|
81
|
+
self.agents[agent.agent_id] = agent
|
|
82
|
+
self.agent_states[agent.agent_id] = agent.state
|
|
83
|
+
agent.orchestrator = self
|
|
84
|
+
|
|
85
|
+
def _log_event(self, event_type: str, data: Dict[str, Any]):
|
|
86
|
+
"""Log an orchestrator event."""
|
|
87
|
+
self.communication_log.append(
|
|
88
|
+
{"timestamp": time.time(), "event_type": event_type, "data": data}
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
def update_agent_answer(self, agent_id: int, answer: str):
|
|
92
|
+
"""
|
|
93
|
+
Update an agent's running answer.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
agent_id: ID of the agent updating their answer
|
|
97
|
+
answer: New answer content
|
|
98
|
+
"""
|
|
99
|
+
with self._lock:
|
|
100
|
+
if agent_id not in self.agent_states:
|
|
101
|
+
raise ValueError(f"Agent {agent_id} not registered")
|
|
102
|
+
|
|
103
|
+
old_answer_length = len(self.agent_states[agent_id].curr_answer)
|
|
104
|
+
self.agent_states[agent_id].add_update(answer)
|
|
105
|
+
|
|
106
|
+
preview = answer[:100] + "..." if len(answer) > 100 else answer
|
|
107
|
+
print(
|
|
108
|
+
f"๐ Agent {agent_id} answer updated ({old_answer_length} โ {len(answer)} chars)"
|
|
109
|
+
)
|
|
110
|
+
print(f" ๐ {preview}")
|
|
111
|
+
|
|
112
|
+
# Log to the comprehensive logging system
|
|
113
|
+
if self.log_manager:
|
|
114
|
+
self.log_manager.log_agent_answer_update(
|
|
115
|
+
agent_id=agent_id,
|
|
116
|
+
answer=answer,
|
|
117
|
+
phase=self.system_state.phase,
|
|
118
|
+
orchestrator=self,
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
self._log_event(
|
|
122
|
+
"answer_updated",
|
|
123
|
+
{"agent_id": agent_id, "answer": answer, "timestamp": time.time()},
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
def _get_current_vote_counts(self) -> Counter:
|
|
127
|
+
"""
|
|
128
|
+
Get current vote counts based on agent states' vote_target.
|
|
129
|
+
Returns Counter of agent_id -> vote_count for ALL agents (0 if no votes).
|
|
130
|
+
"""
|
|
131
|
+
current_votes = []
|
|
132
|
+
for agent_id, state in self.agent_states.items():
|
|
133
|
+
if state.status == "voted" and state.curr_vote is not None:
|
|
134
|
+
current_votes.append(state.curr_vote.target_id)
|
|
135
|
+
|
|
136
|
+
# Create counter from actual votes
|
|
137
|
+
vote_counts = Counter(current_votes)
|
|
138
|
+
|
|
139
|
+
# Ensure all agents are represented (0 if no votes)
|
|
140
|
+
for agent_id in self.agent_states.keys():
|
|
141
|
+
if agent_id not in vote_counts:
|
|
142
|
+
vote_counts[agent_id] = 0
|
|
143
|
+
|
|
144
|
+
return vote_counts
|
|
145
|
+
|
|
146
|
+
def _get_current_voted_agents_count(self) -> int:
|
|
147
|
+
"""
|
|
148
|
+
Get count of agents who currently have status "voted".
|
|
149
|
+
"""
|
|
150
|
+
return len([s for s in self.agent_states.values() if s.status == "voted"])
|
|
151
|
+
|
|
152
|
+
def _get_voting_status(self) -> Dict[str, Any]:
|
|
153
|
+
"""Get current voting status and distribution."""
|
|
154
|
+
vote_counts = self._get_current_vote_counts()
|
|
155
|
+
total_agents = len(self.agents)
|
|
156
|
+
failed_agents = len(
|
|
157
|
+
[s for s in self.agent_states.values() if s.status == "failed"]
|
|
158
|
+
)
|
|
159
|
+
votable_agents = total_agents - failed_agents
|
|
160
|
+
voted_agents = self._get_current_voted_agents_count()
|
|
161
|
+
|
|
162
|
+
return {
|
|
163
|
+
"vote_distribution": dict(vote_counts),
|
|
164
|
+
"total_agents": total_agents,
|
|
165
|
+
"failed_agents": failed_agents,
|
|
166
|
+
"votable_agents": votable_agents,
|
|
167
|
+
"voted_agents": voted_agents,
|
|
168
|
+
"votes_needed_for_consensus": max(
|
|
169
|
+
1, int(votable_agents * self.consensus_threshold)
|
|
170
|
+
),
|
|
171
|
+
"leading_agent": vote_counts.most_common(1)[0] if vote_counts else None,
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
def get_system_status(self) -> Dict[str, Any]:
|
|
175
|
+
"""Get comprehensive system status information."""
|
|
176
|
+
return {
|
|
177
|
+
"phase": self.system_state.phase,
|
|
178
|
+
"consensus_reached": self.system_state.consensus_reached,
|
|
179
|
+
"agents": {
|
|
180
|
+
agent_id: {
|
|
181
|
+
"status": state.status,
|
|
182
|
+
"update_times": len(state.updated_answers),
|
|
183
|
+
"chat_round": state.chat_round,
|
|
184
|
+
"vote_target": (
|
|
185
|
+
state.curr_vote.target_id if state.curr_vote else None
|
|
186
|
+
),
|
|
187
|
+
"execution_time": state.execution_time,
|
|
188
|
+
}
|
|
189
|
+
for agent_id, state in self.agent_states.items()
|
|
190
|
+
},
|
|
191
|
+
"voting_status": self._get_voting_status(),
|
|
192
|
+
"runtime": (
|
|
193
|
+
(time.time() - self.system_state.start_time)
|
|
194
|
+
if self.system_state.start_time
|
|
195
|
+
else 0
|
|
196
|
+
),
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
def cast_vote(self, voter_id: int, target_id: int, reason: str = ""):
|
|
200
|
+
"""
|
|
201
|
+
Record a vote from one agent for another agent's solution.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
voter_id: ID of the agent casting the vote
|
|
205
|
+
target_id: ID of the agent being voted for
|
|
206
|
+
reason: The reason for the vote (optional)
|
|
207
|
+
"""
|
|
208
|
+
with self._lock:
|
|
209
|
+
logger.info(f"๐ณ๏ธ VOTING: Agent {voter_id} casting vote")
|
|
210
|
+
|
|
211
|
+
print(
|
|
212
|
+
f"๐ณ๏ธ VOTE: Agent {voter_id} โ Agent {target_id} ({self.system_state.phase})"
|
|
213
|
+
)
|
|
214
|
+
if reason:
|
|
215
|
+
print(f" ๐ Voting reason: {len(reason)} chars")
|
|
216
|
+
|
|
217
|
+
if voter_id not in self.agent_states:
|
|
218
|
+
logger.error(f" โ Invalid voter: Agent {voter_id} not registered")
|
|
219
|
+
raise ValueError(f"Voter agent {voter_id} not registered")
|
|
220
|
+
if target_id not in self.agent_states:
|
|
221
|
+
logger.error(f" โ Invalid target: Agent {target_id} not registered")
|
|
222
|
+
raise ValueError(f"Target agent {target_id} not registered")
|
|
223
|
+
|
|
224
|
+
# Check current vote status
|
|
225
|
+
previous_vote = self.agent_states[voter_id].curr_vote
|
|
226
|
+
# Log vote change type
|
|
227
|
+
if previous_vote:
|
|
228
|
+
logger.info(
|
|
229
|
+
f" ๐ Agent {voter_id} changed vote from Agent {previous_vote.target_id} to Agent {target_id}"
|
|
230
|
+
)
|
|
231
|
+
else:
|
|
232
|
+
logger.info(f" โจ Agent {voter_id} new vote for Agent {target_id}")
|
|
233
|
+
|
|
234
|
+
# Add vote record to permanent history (only for actual changes)
|
|
235
|
+
vote = VoteRecord(
|
|
236
|
+
voter_id=voter_id,
|
|
237
|
+
target_id=target_id,
|
|
238
|
+
reason=reason,
|
|
239
|
+
timestamp=time.time(),
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
# record the vote in the system's vote history
|
|
243
|
+
self.votes.append(vote)
|
|
244
|
+
|
|
245
|
+
# Update agent state
|
|
246
|
+
old_status = self.agent_states[voter_id].status
|
|
247
|
+
self.agent_states[voter_id].status = "voted"
|
|
248
|
+
self.agent_states[voter_id].curr_vote = vote
|
|
249
|
+
self.agent_states[voter_id].cast_votes.append(vote)
|
|
250
|
+
self.agent_states[voter_id].execution_end_time = time.time()
|
|
251
|
+
|
|
252
|
+
# Update streaming display
|
|
253
|
+
if self.streaming_orchestrator:
|
|
254
|
+
self.streaming_orchestrator.update_agent_status(voter_id, "voted")
|
|
255
|
+
self.streaming_orchestrator.update_agent_vote_target(
|
|
256
|
+
voter_id, target_id
|
|
257
|
+
)
|
|
258
|
+
# Update agent update count
|
|
259
|
+
update_count = len(self.agent_states[voter_id].updated_answers)
|
|
260
|
+
self.streaming_orchestrator.update_agent_update_count(
|
|
261
|
+
voter_id, update_count
|
|
262
|
+
)
|
|
263
|
+
# Update vote cast counts for all agents
|
|
264
|
+
for agent_id, agent_state in self.agent_states.items():
|
|
265
|
+
vote_cast_count = len(agent_state.cast_votes)
|
|
266
|
+
self.streaming_orchestrator.update_agent_votes_cast(
|
|
267
|
+
agent_id, vote_cast_count
|
|
268
|
+
)
|
|
269
|
+
vote_counts = self._get_current_vote_counts()
|
|
270
|
+
self.streaming_orchestrator.update_vote_distribution(dict(vote_counts))
|
|
271
|
+
vote_msg = f"๐ Agent {voter_id} voted for Agent {target_id}"
|
|
272
|
+
self.streaming_orchestrator.add_system_message(vote_msg)
|
|
273
|
+
|
|
274
|
+
# Log to the comprehensive logging system
|
|
275
|
+
if self.log_manager:
|
|
276
|
+
self.log_manager.log_voting_event(
|
|
277
|
+
voter_id=voter_id,
|
|
278
|
+
target_id=target_id,
|
|
279
|
+
phase=self.system_state.phase,
|
|
280
|
+
reason=reason,
|
|
281
|
+
orchestrator=self,
|
|
282
|
+
)
|
|
283
|
+
self.log_manager.log_agent_status_change(
|
|
284
|
+
agent_id=voter_id,
|
|
285
|
+
old_status=old_status,
|
|
286
|
+
new_status="voted",
|
|
287
|
+
phase=self.system_state.phase,
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
# Show current vote distribution
|
|
291
|
+
vote_counts = self._get_current_vote_counts()
|
|
292
|
+
voted_agents_count = self._get_current_voted_agents_count()
|
|
293
|
+
logger.info(f" ๐ Vote distribution: {dict(vote_counts)}")
|
|
294
|
+
logger.info(
|
|
295
|
+
f" ๐ Voting progress: {voted_agents_count}/{len(self.agent_states)} agents voted"
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
# Calculate consensus requirements
|
|
299
|
+
total_agents = len(self.agent_states)
|
|
300
|
+
votes_needed = max(1, int(total_agents * self.consensus_threshold))
|
|
301
|
+
if vote_counts:
|
|
302
|
+
leading_agent, leading_votes = vote_counts.most_common(1)[0]
|
|
303
|
+
logger.info(
|
|
304
|
+
f" ๐ Leading: Agent {leading_agent} with {leading_votes} votes (need {votes_needed} for consensus)"
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
# Log event for internal tracking
|
|
308
|
+
self._log_event(
|
|
309
|
+
"vote_cast",
|
|
310
|
+
{
|
|
311
|
+
"voter_id": voter_id,
|
|
312
|
+
"target_id": target_id,
|
|
313
|
+
"timestamp": vote.timestamp,
|
|
314
|
+
"vote_distribution": dict(vote_counts),
|
|
315
|
+
"total_votes": voted_agents_count,
|
|
316
|
+
},
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
def notify_answer_update(self, agent_id: int, answer: str):
|
|
320
|
+
"""
|
|
321
|
+
Called when an agent updates their answer.
|
|
322
|
+
This should restart all voted agents who haven't seen this update yet.
|
|
323
|
+
"""
|
|
324
|
+
logger.info(f"๐ข Agent {agent_id} updated answer")
|
|
325
|
+
|
|
326
|
+
# Update the answer in agent state
|
|
327
|
+
self.update_agent_answer(agent_id, answer)
|
|
328
|
+
|
|
329
|
+
# Update streaming display
|
|
330
|
+
if self.streaming_orchestrator:
|
|
331
|
+
answer_msg = f"๐ Agent {agent_id} updated answer ({len(answer)} chars)"
|
|
332
|
+
self.streaming_orchestrator.add_system_message(answer_msg)
|
|
333
|
+
# Update agent update count
|
|
334
|
+
update_count = len(self.agent_states[agent_id].updated_answers)
|
|
335
|
+
self.streaming_orchestrator.update_agent_update_count(
|
|
336
|
+
agent_id, update_count
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
# CRITICAL FIX: Restart voted agents when any agent shares new updates
|
|
340
|
+
with self._lock:
|
|
341
|
+
restarted_agents = []
|
|
342
|
+
current_time = time.time()
|
|
343
|
+
|
|
344
|
+
for other_agent_id, state in self.agent_states.items():
|
|
345
|
+
if other_agent_id != agent_id and state.status == "voted":
|
|
346
|
+
|
|
347
|
+
# Restart the voted agent
|
|
348
|
+
state.status = "working"
|
|
349
|
+
# This vote should be cleared as answers have been updated
|
|
350
|
+
state.curr_vote = None
|
|
351
|
+
state.execution_start_time = time.time()
|
|
352
|
+
restarted_agents.append(other_agent_id)
|
|
353
|
+
|
|
354
|
+
logger.info(
|
|
355
|
+
f"๐ Agent {other_agent_id} restarted due to update from Agent {agent_id}"
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
# Update streaming display
|
|
359
|
+
if self.streaming_orchestrator:
|
|
360
|
+
self.streaming_orchestrator.update_agent_status(
|
|
361
|
+
other_agent_id, "working"
|
|
362
|
+
)
|
|
363
|
+
self.streaming_orchestrator.update_agent_vote_target(
|
|
364
|
+
other_agent_id, None
|
|
365
|
+
) # Clear vote target in display
|
|
366
|
+
# Update agent update count for restarted agent
|
|
367
|
+
update_count = len(
|
|
368
|
+
self.agent_states[other_agent_id].updated_answers
|
|
369
|
+
)
|
|
370
|
+
self.streaming_orchestrator.update_agent_update_count(
|
|
371
|
+
other_agent_id, update_count
|
|
372
|
+
)
|
|
373
|
+
restart_msg = (
|
|
374
|
+
f"๐ Agent {other_agent_id} restarted due to new update"
|
|
375
|
+
)
|
|
376
|
+
self.streaming_orchestrator.add_system_message(restart_msg)
|
|
377
|
+
|
|
378
|
+
# Log agent restart
|
|
379
|
+
if self.log_manager:
|
|
380
|
+
self.log_manager.log_agent_restart(
|
|
381
|
+
agent_id=other_agent_id,
|
|
382
|
+
reason=f"new_update_from_agent_{agent_id}",
|
|
383
|
+
phase=self.system_state.phase,
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
if restarted_agents:
|
|
387
|
+
# Note: We don't remove historical votes as self.votes is a permanent record
|
|
388
|
+
# The current vote distribution will automatically reflect the change via agent.vote_target = None
|
|
389
|
+
logger.info(f"๐ Restarted agents: {restarted_agents}")
|
|
390
|
+
|
|
391
|
+
# Update vote distribution in streaming display
|
|
392
|
+
if self.streaming_orchestrator:
|
|
393
|
+
vote_counts = self._get_current_vote_counts()
|
|
394
|
+
self.streaming_orchestrator.update_vote_distribution(
|
|
395
|
+
dict(vote_counts)
|
|
396
|
+
)
|
|
397
|
+
# Update vote cast counts for all agents to ensure accuracy
|
|
398
|
+
for agent_id, agent_state in self.agent_states.items():
|
|
399
|
+
vote_cast_count = len(agent_state.cast_votes)
|
|
400
|
+
self.streaming_orchestrator.update_agent_votes_cast(
|
|
401
|
+
agent_id, vote_cast_count
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
return restarted_agents
|
|
405
|
+
|
|
406
|
+
def _check_consensus(self) -> bool:
|
|
407
|
+
"""
|
|
408
|
+
Check if consensus has been reached based on current votes.
|
|
409
|
+
Improved to handle edge cases and ensure proper consensus calculation.
|
|
410
|
+
"""
|
|
411
|
+
with self._lock:
|
|
412
|
+
total_agents = len(self.agents)
|
|
413
|
+
failed_agents_count = len(
|
|
414
|
+
[s for s in self.agent_states.values() if s.status == "failed"]
|
|
415
|
+
)
|
|
416
|
+
votable_agents_count = total_agents - failed_agents_count
|
|
417
|
+
|
|
418
|
+
# Edge case: no votable agents
|
|
419
|
+
if votable_agents_count == 0:
|
|
420
|
+
logger.warning("โ ๏ธ No votable agents available for consensus")
|
|
421
|
+
return False
|
|
422
|
+
|
|
423
|
+
# Edge case: only one votable agent
|
|
424
|
+
if votable_agents_count == 1:
|
|
425
|
+
working_agents = [
|
|
426
|
+
aid
|
|
427
|
+
for aid, state in self.agent_states.items()
|
|
428
|
+
if state.status == "working"
|
|
429
|
+
]
|
|
430
|
+
if not working_agents: # The single agent has voted
|
|
431
|
+
# Find the single votable agent
|
|
432
|
+
votable_agent = [
|
|
433
|
+
aid
|
|
434
|
+
for aid, state in self.agent_states.items()
|
|
435
|
+
if state.status != "failed"
|
|
436
|
+
][0]
|
|
437
|
+
logger.info(f"๐ฏ Single agent consensus: Agent {votable_agent}")
|
|
438
|
+
self._reach_consensus(votable_agent)
|
|
439
|
+
return True
|
|
440
|
+
return False
|
|
441
|
+
|
|
442
|
+
vote_counts = self._get_current_vote_counts()
|
|
443
|
+
votes_needed = max(1, int(votable_agents_count * self.consensus_threshold))
|
|
444
|
+
|
|
445
|
+
if vote_counts and vote_counts.most_common(1)[0][1] >= votes_needed:
|
|
446
|
+
winning_agent_id = vote_counts.most_common(1)[0][0]
|
|
447
|
+
winning_votes = vote_counts.most_common(1)[0][1]
|
|
448
|
+
|
|
449
|
+
# Ensure the winning agent is still votable (not failed)
|
|
450
|
+
if self.agent_states[winning_agent_id].status == "failed":
|
|
451
|
+
logger.warning(
|
|
452
|
+
f"โ ๏ธ Winning agent {winning_agent_id} has failed - recalculating"
|
|
453
|
+
)
|
|
454
|
+
return False
|
|
455
|
+
|
|
456
|
+
logger.info(
|
|
457
|
+
f"โ
Consensus reached: Agent {winning_agent_id} with {winning_votes}/{votable_agents_count} votes"
|
|
458
|
+
)
|
|
459
|
+
self._reach_consensus(winning_agent_id)
|
|
460
|
+
return True
|
|
461
|
+
|
|
462
|
+
return False
|
|
463
|
+
|
|
464
|
+
def mark_agent_failed(self, agent_id: int, reason: str = ""):
|
|
465
|
+
"""
|
|
466
|
+
Mark an agent as failed.
|
|
467
|
+
|
|
468
|
+
Args:
|
|
469
|
+
agent_id: ID of the agent to mark as failed
|
|
470
|
+
reason: Optional reason for the failure
|
|
471
|
+
"""
|
|
472
|
+
with self._lock:
|
|
473
|
+
logger.info(f"๐ฅ AGENT FAILURE: Agent {agent_id} marked as failed")
|
|
474
|
+
|
|
475
|
+
print(f" ๐ฅ MARK_FAILED: Agent {agent_id}")
|
|
476
|
+
print(f" ๐ Current phase: {self.system_state.phase}")
|
|
477
|
+
|
|
478
|
+
if agent_id not in self.agent_states:
|
|
479
|
+
logger.error(f" โ Invalid agent: Agent {agent_id} not registered")
|
|
480
|
+
raise ValueError(f"Agent {agent_id} not registered")
|
|
481
|
+
|
|
482
|
+
# Update agent state
|
|
483
|
+
old_status = self.agent_states[agent_id].status
|
|
484
|
+
self.agent_states[agent_id].status = "failed"
|
|
485
|
+
self.agent_states[agent_id].execution_end_time = time.time()
|
|
486
|
+
|
|
487
|
+
# Update streaming display
|
|
488
|
+
if self.streaming_orchestrator:
|
|
489
|
+
self.streaming_orchestrator.update_agent_status(agent_id, "failed")
|
|
490
|
+
failure_msg = (
|
|
491
|
+
f"๐ฅ Agent {agent_id} failed: {reason}"
|
|
492
|
+
if reason
|
|
493
|
+
else f"๐ฅ Agent {agent_id} failed"
|
|
494
|
+
)
|
|
495
|
+
self.streaming_orchestrator.add_system_message(failure_msg)
|
|
496
|
+
|
|
497
|
+
# Log to the comprehensive logging system
|
|
498
|
+
if self.log_manager:
|
|
499
|
+
self.log_manager.log_agent_status_change(
|
|
500
|
+
agent_id=agent_id,
|
|
501
|
+
old_status=old_status,
|
|
502
|
+
new_status="failed",
|
|
503
|
+
phase=self.system_state.phase,
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
# Log the failure event
|
|
507
|
+
self._log_event(
|
|
508
|
+
"agent_failed",
|
|
509
|
+
{
|
|
510
|
+
"agent_id": agent_id,
|
|
511
|
+
"reason": reason,
|
|
512
|
+
"timestamp": time.time(),
|
|
513
|
+
"old_status": old_status,
|
|
514
|
+
},
|
|
515
|
+
)
|
|
516
|
+
|
|
517
|
+
# Show current agent status distribution
|
|
518
|
+
status_counts = Counter(
|
|
519
|
+
state.status for state in self.agent_states.values()
|
|
520
|
+
)
|
|
521
|
+
logger.info(f" ๐ Status distribution: {dict(status_counts)}")
|
|
522
|
+
logger.info(
|
|
523
|
+
f" ๐ Failed agents: {status_counts.get('failed', 0)}/{len(self.agent_states)} total"
|
|
524
|
+
)
|
|
525
|
+
|
|
526
|
+
def _reach_consensus(self, winning_agent_id: int):
|
|
527
|
+
"""Mark consensus as reached and finalize the system."""
|
|
528
|
+
old_phase = self.system_state.phase
|
|
529
|
+
self.system_state.consensus_reached = True
|
|
530
|
+
self.system_state.representative_agent_id = winning_agent_id
|
|
531
|
+
self.system_state.phase = "consensus"
|
|
532
|
+
|
|
533
|
+
# Update streaming orchestrator if available
|
|
534
|
+
if self.streaming_orchestrator:
|
|
535
|
+
vote_distribution = dict(self._get_current_vote_counts())
|
|
536
|
+
self.streaming_orchestrator.update_consensus_status(
|
|
537
|
+
winning_agent_id, vote_distribution
|
|
538
|
+
)
|
|
539
|
+
self.streaming_orchestrator.update_phase(old_phase, "consensus")
|
|
540
|
+
|
|
541
|
+
# Log to the comprehensive logging system
|
|
542
|
+
if self.log_manager:
|
|
543
|
+
vote_distribution = dict(self._get_current_vote_counts())
|
|
544
|
+
self.log_manager.log_consensus_reached(
|
|
545
|
+
winning_agent_id=winning_agent_id,
|
|
546
|
+
vote_distribution=vote_distribution,
|
|
547
|
+
is_fallback=False,
|
|
548
|
+
phase=self.system_state.phase,
|
|
549
|
+
)
|
|
550
|
+
self.log_manager.log_phase_transition(
|
|
551
|
+
old_phase=old_phase,
|
|
552
|
+
new_phase="consensus",
|
|
553
|
+
additional_data={
|
|
554
|
+
"consensus_reached": True,
|
|
555
|
+
"winning_agent_id": winning_agent_id,
|
|
556
|
+
"is_fallback": False,
|
|
557
|
+
},
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
self._log_event(
|
|
561
|
+
"consensus_reached",
|
|
562
|
+
{
|
|
563
|
+
"winning_agent_id": winning_agent_id,
|
|
564
|
+
"fallback_to_majority": False,
|
|
565
|
+
"final_vote_distribution": dict(self._get_current_vote_counts()),
|
|
566
|
+
},
|
|
567
|
+
)
|
|
568
|
+
|
|
569
|
+
def export_detailed_session_log(self) -> Dict[str, Any]:
|
|
570
|
+
"""
|
|
571
|
+
Export complete detailed session information for comprehensive analysis.
|
|
572
|
+
Includes all outputs, metrics, and evaluation results.
|
|
573
|
+
"""
|
|
574
|
+
session_log = {
|
|
575
|
+
"session_metadata": {
|
|
576
|
+
"session_id": (
|
|
577
|
+
f"mass_session_{int(self.system_state.start_time)}"
|
|
578
|
+
if self.system_state.start_time
|
|
579
|
+
else None
|
|
580
|
+
),
|
|
581
|
+
"start_time": self.system_state.start_time,
|
|
582
|
+
"end_time": self.system_state.end_time,
|
|
583
|
+
"total_duration": (
|
|
584
|
+
(self.system_state.end_time - self.system_state.start_time)
|
|
585
|
+
if self.system_state.start_time and self.system_state.end_time
|
|
586
|
+
else None
|
|
587
|
+
),
|
|
588
|
+
"timestamp": datetime.now().isoformat(),
|
|
589
|
+
"system_version": "MassGen v1.0",
|
|
590
|
+
},
|
|
591
|
+
"task_information": {
|
|
592
|
+
"question": (
|
|
593
|
+
self.system_state.task.question if self.system_state.task else None
|
|
594
|
+
),
|
|
595
|
+
"task_id": (
|
|
596
|
+
self.system_state.task.task_id if self.system_state.task else None
|
|
597
|
+
),
|
|
598
|
+
"context": (
|
|
599
|
+
self.system_state.task.context if self.system_state.task else None
|
|
600
|
+
),
|
|
601
|
+
},
|
|
602
|
+
"system_configuration": {
|
|
603
|
+
"max_duration": self.max_duration,
|
|
604
|
+
"consensus_threshold": self.consensus_threshold,
|
|
605
|
+
"max_debate_rounds": self.max_debate_rounds,
|
|
606
|
+
"agents": [agent.model for agent in self.agents.values()],
|
|
607
|
+
},
|
|
608
|
+
"agent_details": {
|
|
609
|
+
agent_id: {
|
|
610
|
+
"status": state.status,
|
|
611
|
+
"updates_count": len(state.updated_answers),
|
|
612
|
+
"chat_length": len(state.chat_history),
|
|
613
|
+
"chat_round": state.chat_round,
|
|
614
|
+
"vote_target": (
|
|
615
|
+
state.curr_vote.target_id if state.curr_vote else None
|
|
616
|
+
),
|
|
617
|
+
"execution_time": state.execution_time,
|
|
618
|
+
"execution_start_time": state.execution_start_time,
|
|
619
|
+
"execution_end_time": state.execution_end_time,
|
|
620
|
+
"updated_answers": [
|
|
621
|
+
{
|
|
622
|
+
"timestamp": update.timestamp,
|
|
623
|
+
"status": update.status,
|
|
624
|
+
"answer_length": len(update.answer),
|
|
625
|
+
}
|
|
626
|
+
for update in state.updated_answers
|
|
627
|
+
],
|
|
628
|
+
}
|
|
629
|
+
for agent_id, state in self.agent_states.items()
|
|
630
|
+
},
|
|
631
|
+
"voting_analysis": {
|
|
632
|
+
"vote_records": [
|
|
633
|
+
{
|
|
634
|
+
"voter_id": vote.voter_id,
|
|
635
|
+
"target_id": vote.target_id,
|
|
636
|
+
"timestamp": vote.timestamp,
|
|
637
|
+
"reason_length": len(vote.reason) if vote.reason else 0,
|
|
638
|
+
}
|
|
639
|
+
for vote in self.votes
|
|
640
|
+
],
|
|
641
|
+
"vote_timeline": [
|
|
642
|
+
{
|
|
643
|
+
"timestamp": vote.timestamp,
|
|
644
|
+
"event": f"Agent {vote.voter_id} โ Agent {vote.target_id}",
|
|
645
|
+
}
|
|
646
|
+
for vote in self.votes
|
|
647
|
+
],
|
|
648
|
+
},
|
|
649
|
+
"communication_log": self.communication_log,
|
|
650
|
+
"system_events": [
|
|
651
|
+
{
|
|
652
|
+
"timestamp": entry["timestamp"],
|
|
653
|
+
"event_type": entry["event_type"],
|
|
654
|
+
"data_summary": {
|
|
655
|
+
k: (len(v) if isinstance(v, (str, list, dict)) else v)
|
|
656
|
+
for k, v in entry["data"].items()
|
|
657
|
+
},
|
|
658
|
+
}
|
|
659
|
+
for entry in self.communication_log
|
|
660
|
+
],
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
return session_log
|
|
664
|
+
|
|
665
|
+
def start_task(self, task: TaskInput):
|
|
666
|
+
"""
|
|
667
|
+
Initialize the system for a new task and run the main workflow.
|
|
668
|
+
|
|
669
|
+
Args:
|
|
670
|
+
task: TaskInput containing the problem to solve
|
|
671
|
+
|
|
672
|
+
Returns:
|
|
673
|
+
response: Dict[str, Any] containing the final answer to the task's question, and relevant information
|
|
674
|
+
"""
|
|
675
|
+
with self._lock:
|
|
676
|
+
logger.info("๐ฏ ORCHESTRATOR: Starting new task")
|
|
677
|
+
logger.info(f" Task ID: {task.task_id}")
|
|
678
|
+
logger.info(f" Question preview: {task.question}")
|
|
679
|
+
logger.info(f" Registered agents: {list(self.agents.keys())}")
|
|
680
|
+
logger.info(f" Max duration: {self.max_duration}")
|
|
681
|
+
logger.info(f" Consensus threshold: {self.consensus_threshold}")
|
|
682
|
+
|
|
683
|
+
self.system_state.task = task
|
|
684
|
+
self.system_state.start_time = time.time()
|
|
685
|
+
self.system_state.phase = "collaboration"
|
|
686
|
+
self.final_response = None
|
|
687
|
+
|
|
688
|
+
# Reset all agent states
|
|
689
|
+
for agent_id, agent in self.agents.items():
|
|
690
|
+
agent.state = AgentState(agent_id=agent_id)
|
|
691
|
+
self.agent_states[agent_id] = agent.state
|
|
692
|
+
# Initialize the saved chat
|
|
693
|
+
agent.state.chat_history = []
|
|
694
|
+
|
|
695
|
+
# Initialize streaming display for each agent
|
|
696
|
+
if self.streaming_orchestrator:
|
|
697
|
+
self.streaming_orchestrator.set_agent_model(agent_id, agent.model)
|
|
698
|
+
self.streaming_orchestrator.update_agent_status(agent_id, "working")
|
|
699
|
+
# Initialize agent update count
|
|
700
|
+
self.streaming_orchestrator.update_agent_update_count(agent_id, 0)
|
|
701
|
+
|
|
702
|
+
# Clear previous session data
|
|
703
|
+
self.votes.clear()
|
|
704
|
+
self.communication_log.clear()
|
|
705
|
+
|
|
706
|
+
# Initialize streaming display system message
|
|
707
|
+
if self.streaming_orchestrator:
|
|
708
|
+
self.streaming_orchestrator.update_phase("unknown", "collaboration")
|
|
709
|
+
# Initialize debate rounds to 0
|
|
710
|
+
self.streaming_orchestrator.update_debate_rounds(0)
|
|
711
|
+
init_msg = f"๐ Starting MassGen task with {len(self.agents)} agents"
|
|
712
|
+
self.streaming_orchestrator.add_system_message(init_msg)
|
|
713
|
+
|
|
714
|
+
self._log_event(
|
|
715
|
+
"task_started", {"task_id": task.task_id, "question": task.question}
|
|
716
|
+
)
|
|
717
|
+
logger.info("โ
Task initialization completed successfully")
|
|
718
|
+
|
|
719
|
+
# Run the workflow
|
|
720
|
+
return self._run_mass_workflow(task)
|
|
721
|
+
|
|
722
|
+
def _run_mass_workflow(self, task: TaskInput) -> Dict[str, Any]:
|
|
723
|
+
"""
|
|
724
|
+
Run the MassGen workflow with dynamic agent restart support:
|
|
725
|
+
1. All agents work in parallel
|
|
726
|
+
2. Agents restart when others share updates (if they had voted)
|
|
727
|
+
3. When all have voted, check consensus
|
|
728
|
+
4. If no consensus, restart all for debate
|
|
729
|
+
5. If consensus, representative presents final answer
|
|
730
|
+
"""
|
|
731
|
+
logger.info("๐ Starting MassGen workflow")
|
|
732
|
+
|
|
733
|
+
debate_rounds = 0
|
|
734
|
+
start_time = time.time()
|
|
735
|
+
|
|
736
|
+
while not self._stop_event.is_set():
|
|
737
|
+
# Check timeout
|
|
738
|
+
if time.time() - start_time > self.max_duration:
|
|
739
|
+
logger.warning("โฐ Maximum duration reached - forcing consensus")
|
|
740
|
+
self._force_consensus_by_timeout()
|
|
741
|
+
# Representative will present final answer
|
|
742
|
+
self._present_final_answer(task)
|
|
743
|
+
break
|
|
744
|
+
|
|
745
|
+
# Run all agents with dynamic restart support
|
|
746
|
+
# Restart all agents if they have been updated
|
|
747
|
+
logger.info(f"๐ข Starting collaboration round {debate_rounds + 1}")
|
|
748
|
+
self._run_all_agents_with_dynamic_restart(task)
|
|
749
|
+
|
|
750
|
+
# Check if all votable agents have voted
|
|
751
|
+
if self._all_agents_voted():
|
|
752
|
+
logger.info("๐ณ๏ธ All agents have voted - checking consensus")
|
|
753
|
+
|
|
754
|
+
if self._check_consensus():
|
|
755
|
+
logger.info("๐ Consensus reached!")
|
|
756
|
+
# Representative will present final answer
|
|
757
|
+
self._present_final_answer(task)
|
|
758
|
+
break
|
|
759
|
+
else:
|
|
760
|
+
# No consensus - start debate round
|
|
761
|
+
debate_rounds += 1
|
|
762
|
+
# Update streaming display with new debate round count
|
|
763
|
+
if self.streaming_orchestrator:
|
|
764
|
+
self.streaming_orchestrator.update_debate_rounds(debate_rounds)
|
|
765
|
+
|
|
766
|
+
if debate_rounds > self.max_debate_rounds:
|
|
767
|
+
logger.warning(
|
|
768
|
+
f"โ ๏ธ Maximum debate rounds ({self.max_debate_rounds}) reached"
|
|
769
|
+
)
|
|
770
|
+
self._force_consensus_by_timeout()
|
|
771
|
+
# Representative will present final answer
|
|
772
|
+
self._present_final_answer(task)
|
|
773
|
+
break
|
|
774
|
+
|
|
775
|
+
logger.info(
|
|
776
|
+
f"๐ฃ๏ธ No consensus - starting debate round {debate_rounds}"
|
|
777
|
+
)
|
|
778
|
+
# Add debate instruction to the chat history and will be restarted in the next round
|
|
779
|
+
self._restart_all_agents_for_debate()
|
|
780
|
+
else:
|
|
781
|
+
# Still waiting for some agents to vote
|
|
782
|
+
time.sleep(self.status_check_interval)
|
|
783
|
+
|
|
784
|
+
return self._finalize_session()
|
|
785
|
+
|
|
786
|
+
def _run_all_agents_with_dynamic_restart(self, task: TaskInput):
|
|
787
|
+
"""
|
|
788
|
+
Run all agents in parallel with support for dynamic restarts.
|
|
789
|
+
This approach handles agents restarting mid-execution.
|
|
790
|
+
"""
|
|
791
|
+
active_futures = {}
|
|
792
|
+
executor = ThreadPoolExecutor(max_workers=len(self.agents))
|
|
793
|
+
|
|
794
|
+
try:
|
|
795
|
+
# Start all working agents
|
|
796
|
+
for agent_id in self.agents.keys():
|
|
797
|
+
if self.agent_states[agent_id].status not in ["failed"]:
|
|
798
|
+
self._start_agent_if_working(
|
|
799
|
+
agent_id, task, executor, active_futures
|
|
800
|
+
)
|
|
801
|
+
|
|
802
|
+
# Monitor agents and handle restarts
|
|
803
|
+
while active_futures and not self._all_agents_voted():
|
|
804
|
+
completed_futures = []
|
|
805
|
+
|
|
806
|
+
# Check for completed agents
|
|
807
|
+
for agent_id, future in list(active_futures.items()):
|
|
808
|
+
if future.done():
|
|
809
|
+
completed_futures.append(agent_id)
|
|
810
|
+
try:
|
|
811
|
+
future.result() # Get result and handle exceptions
|
|
812
|
+
except Exception as e:
|
|
813
|
+
logger.error(f"โ Agent {agent_id} failed: {e}")
|
|
814
|
+
self.mark_agent_failed(agent_id, str(e))
|
|
815
|
+
|
|
816
|
+
# Remove completed futures
|
|
817
|
+
for agent_id in completed_futures:
|
|
818
|
+
del active_futures[agent_id]
|
|
819
|
+
|
|
820
|
+
# Check for agents that need to restart (status changed back to "working")
|
|
821
|
+
for agent_id in self.agents.keys():
|
|
822
|
+
if (
|
|
823
|
+
agent_id not in active_futures
|
|
824
|
+
and self.agent_states[agent_id].status == "working"
|
|
825
|
+
):
|
|
826
|
+
self._start_agent_if_working(
|
|
827
|
+
agent_id, task, executor, active_futures
|
|
828
|
+
)
|
|
829
|
+
|
|
830
|
+
time.sleep(0.1) # Small delay to prevent busy waiting
|
|
831
|
+
|
|
832
|
+
finally:
|
|
833
|
+
# Cancel any remaining futures
|
|
834
|
+
for future in active_futures.values():
|
|
835
|
+
future.cancel()
|
|
836
|
+
executor.shutdown(wait=True)
|
|
837
|
+
|
|
838
|
+
def _start_agent_if_working(
|
|
839
|
+
self,
|
|
840
|
+
agent_id: int,
|
|
841
|
+
task: TaskInput,
|
|
842
|
+
executor: ThreadPoolExecutor,
|
|
843
|
+
active_futures: Dict,
|
|
844
|
+
):
|
|
845
|
+
"""Start an agent if it's in working status and not already running."""
|
|
846
|
+
if (
|
|
847
|
+
self.agent_states[agent_id].status == "working"
|
|
848
|
+
and agent_id not in active_futures
|
|
849
|
+
):
|
|
850
|
+
|
|
851
|
+
self.agent_states[agent_id].execution_start_time = time.time()
|
|
852
|
+
future = executor.submit(self._run_single_agent, agent_id, task)
|
|
853
|
+
active_futures[agent_id] = future
|
|
854
|
+
logger.info(f"๐ค Agent {agent_id} started/restarted")
|
|
855
|
+
|
|
856
|
+
def _run_single_agent(self, agent_id: int, task: TaskInput):
|
|
857
|
+
"""Run a single agent's work_on_task method."""
|
|
858
|
+
agent = self.agents[agent_id]
|
|
859
|
+
try:
|
|
860
|
+
logger.info(f"๐ค Agent {agent_id} starting work")
|
|
861
|
+
|
|
862
|
+
# Run agent's work_on_task with current conversation state
|
|
863
|
+
updated_messages = agent.work_on_task(task)
|
|
864
|
+
|
|
865
|
+
# Update conversation state
|
|
866
|
+
self.agent_states[agent_id].chat_history.append(updated_messages)
|
|
867
|
+
self.agent_states[agent_id].chat_round = agent.state.chat_round
|
|
868
|
+
|
|
869
|
+
# Update streaming display with chat round
|
|
870
|
+
if self.streaming_orchestrator:
|
|
871
|
+
self.streaming_orchestrator.update_agent_chat_round(
|
|
872
|
+
agent_id, agent.state.chat_round
|
|
873
|
+
)
|
|
874
|
+
# Update agent update count
|
|
875
|
+
update_count = len(self.agent_states[agent_id].updated_answers)
|
|
876
|
+
self.streaming_orchestrator.update_agent_update_count(
|
|
877
|
+
agent_id, update_count
|
|
878
|
+
)
|
|
879
|
+
|
|
880
|
+
logger.info(
|
|
881
|
+
f"โ
Agent {agent_id} completed work with status: {self.agent_states[agent_id].status}"
|
|
882
|
+
)
|
|
883
|
+
|
|
884
|
+
except Exception as e:
|
|
885
|
+
logger.error(f"โ Agent {agent_id} failed: {e}")
|
|
886
|
+
self.mark_agent_failed(agent_id, str(e))
|
|
887
|
+
|
|
888
|
+
def _all_agents_voted(self) -> bool:
|
|
889
|
+
"""Check if all votable agents have voted."""
|
|
890
|
+
votable_agents = [
|
|
891
|
+
aid
|
|
892
|
+
for aid, state in self.agent_states.items()
|
|
893
|
+
if state.status not in ["failed"]
|
|
894
|
+
]
|
|
895
|
+
voted_agents = [
|
|
896
|
+
aid for aid, state in self.agent_states.items() if state.status == "voted"
|
|
897
|
+
]
|
|
898
|
+
|
|
899
|
+
return len(voted_agents) == len(votable_agents) and len(votable_agents) > 0
|
|
900
|
+
|
|
901
|
+
def _restart_all_agents_for_debate(self):
|
|
902
|
+
"""
|
|
903
|
+
Restart all agents for debate by resetting their status
|
|
904
|
+
We don't clear vote target when restarting for debate as answers are not updated
|
|
905
|
+
"""
|
|
906
|
+
logger.info("๐ Restarting all agents for debate")
|
|
907
|
+
|
|
908
|
+
with self._lock:
|
|
909
|
+
|
|
910
|
+
# Update streaming display
|
|
911
|
+
if self.streaming_orchestrator:
|
|
912
|
+
self.streaming_orchestrator.reset_consensus()
|
|
913
|
+
self.streaming_orchestrator.update_phase(
|
|
914
|
+
self.system_state.phase, "collaboration"
|
|
915
|
+
)
|
|
916
|
+
self.streaming_orchestrator.add_system_message(
|
|
917
|
+
"๐ฃ๏ธ Starting debate phase - no consensus reached"
|
|
918
|
+
)
|
|
919
|
+
|
|
920
|
+
# Log debate start
|
|
921
|
+
if self.log_manager:
|
|
922
|
+
self.log_manager.log_debate_started(phase="collaboration")
|
|
923
|
+
self.log_manager.log_phase_transition(
|
|
924
|
+
old_phase=self.system_state.phase,
|
|
925
|
+
new_phase="collaboration",
|
|
926
|
+
additional_data={
|
|
927
|
+
"reason": "no_consensus_reached",
|
|
928
|
+
"debate_round": True,
|
|
929
|
+
},
|
|
930
|
+
)
|
|
931
|
+
|
|
932
|
+
# Reset agent statuses and add debate instruction to conversation
|
|
933
|
+
# Note: We don't clear self.votes as it's a historical record
|
|
934
|
+
for agent_id, state in self.agent_states.items():
|
|
935
|
+
if state.status not in ["failed"]:
|
|
936
|
+
old_status = state.status
|
|
937
|
+
state.status = "working"
|
|
938
|
+
# We don't clear vote target when restarting for debate
|
|
939
|
+
# state.curr_vote = None
|
|
940
|
+
|
|
941
|
+
# Update streaming display for each agent
|
|
942
|
+
if self.streaming_orchestrator:
|
|
943
|
+
self.streaming_orchestrator.update_agent_status(
|
|
944
|
+
agent_id, "working"
|
|
945
|
+
)
|
|
946
|
+
|
|
947
|
+
# Log agent restart
|
|
948
|
+
if self.log_manager:
|
|
949
|
+
self.log_manager.log_agent_restart(
|
|
950
|
+
agent_id=agent_id,
|
|
951
|
+
reason="debate_phase_restart",
|
|
952
|
+
phase="collaboration",
|
|
953
|
+
)
|
|
954
|
+
|
|
955
|
+
# Update system phase
|
|
956
|
+
self.system_state.phase = "collaboration"
|
|
957
|
+
|
|
958
|
+
def _present_final_answer(self, task: TaskInput):
|
|
959
|
+
"""
|
|
960
|
+
Run the final presentation by the representative agent.
|
|
961
|
+
"""
|
|
962
|
+
representative_id = self.system_state.representative_agent_id
|
|
963
|
+
if not representative_id:
|
|
964
|
+
logger.error("No representative agent selected")
|
|
965
|
+
return
|
|
966
|
+
|
|
967
|
+
logger.info(f"๐ฏ Agent {representative_id} presenting final answer")
|
|
968
|
+
|
|
969
|
+
try:
|
|
970
|
+
representative_agent = self.agents[representative_id]
|
|
971
|
+
# if self.final_response:
|
|
972
|
+
# logger.info(f"โ
Final response already exists")
|
|
973
|
+
# return
|
|
974
|
+
|
|
975
|
+
# if representative_agent.state.curr_answer:
|
|
976
|
+
# self.final_response = representative_agent.state.curr_answer
|
|
977
|
+
# else:
|
|
978
|
+
|
|
979
|
+
# Run one more inference to generate the final answer
|
|
980
|
+
_, user_input = representative_agent._get_task_input(task)
|
|
981
|
+
|
|
982
|
+
messages = [
|
|
983
|
+
{
|
|
984
|
+
"role": "system",
|
|
985
|
+
"content": """
|
|
986
|
+
You are given a task and multiple agents' answers and their votes.
|
|
987
|
+
Please incorporate these information and provide a final BEST answer to the original message.
|
|
988
|
+
""",
|
|
989
|
+
},
|
|
990
|
+
{
|
|
991
|
+
"role": "user",
|
|
992
|
+
"content": user_input
|
|
993
|
+
+ """
|
|
994
|
+
Please provide the final BEST answer to the original message by incorporating these information.
|
|
995
|
+
The final answer must be self-contained, complete, well-sourced, compelling, and ready to serve as the definitive final response.
|
|
996
|
+
""",
|
|
997
|
+
},
|
|
998
|
+
]
|
|
999
|
+
result = representative_agent.process_message(messages)
|
|
1000
|
+
self.final_response = result.text
|
|
1001
|
+
|
|
1002
|
+
# Mark
|
|
1003
|
+
self.system_state.phase = "completed"
|
|
1004
|
+
self.system_state.end_time = time.time()
|
|
1005
|
+
|
|
1006
|
+
logger.info(f"โ
Final presentation completed by Agent {representative_id}")
|
|
1007
|
+
|
|
1008
|
+
except Exception as e:
|
|
1009
|
+
logger.error(f"โ Final presentation failed: {e}")
|
|
1010
|
+
self.final_response = f"Error in final presentation: {str(e)}"
|
|
1011
|
+
|
|
1012
|
+
def _force_consensus_by_timeout(self):
|
|
1013
|
+
"""
|
|
1014
|
+
Force consensus selection when maximum duration is reached.
|
|
1015
|
+
"""
|
|
1016
|
+
logger.warning("โฐ Forcing consensus due to timeout")
|
|
1017
|
+
|
|
1018
|
+
with self._lock:
|
|
1019
|
+
# Find agent with most votes, or earliest voter in case of tie
|
|
1020
|
+
vote_counts = self._get_current_vote_counts()
|
|
1021
|
+
|
|
1022
|
+
if vote_counts:
|
|
1023
|
+
# Select agent with most votes
|
|
1024
|
+
winning_agent_id = vote_counts.most_common(1)[0][0]
|
|
1025
|
+
logger.info(
|
|
1026
|
+
f" Selected Agent {winning_agent_id} with {vote_counts[winning_agent_id]} votes"
|
|
1027
|
+
)
|
|
1028
|
+
else:
|
|
1029
|
+
# No votes - select first working agent
|
|
1030
|
+
working_agents = [
|
|
1031
|
+
aid
|
|
1032
|
+
for aid, state in self.agent_states.items()
|
|
1033
|
+
if state.status == "working"
|
|
1034
|
+
]
|
|
1035
|
+
winning_agent_id = (
|
|
1036
|
+
working_agents[0] if working_agents else list(self.agents.keys())[0]
|
|
1037
|
+
)
|
|
1038
|
+
logger.info(
|
|
1039
|
+
f" No votes - selected Agent {winning_agent_id} as fallback"
|
|
1040
|
+
)
|
|
1041
|
+
|
|
1042
|
+
self._reach_consensus(winning_agent_id)
|
|
1043
|
+
|
|
1044
|
+
def _finalize_session(self) -> Dict[str, Any]:
|
|
1045
|
+
"""
|
|
1046
|
+
Finalize the session and return comprehensive results.
|
|
1047
|
+
"""
|
|
1048
|
+
logger.info("๐ Finalizing session")
|
|
1049
|
+
|
|
1050
|
+
with self._lock:
|
|
1051
|
+
if not self.system_state.end_time:
|
|
1052
|
+
self.system_state.end_time = time.time()
|
|
1053
|
+
|
|
1054
|
+
session_duration = (
|
|
1055
|
+
self.system_state.end_time - self.system_state.start_time
|
|
1056
|
+
if self.system_state.start_time
|
|
1057
|
+
else 0
|
|
1058
|
+
)
|
|
1059
|
+
|
|
1060
|
+
# Save final agent states to files
|
|
1061
|
+
if self.log_manager:
|
|
1062
|
+
self.log_manager.save_agent_states(self)
|
|
1063
|
+
self.log_manager.log_task_completion(
|
|
1064
|
+
{
|
|
1065
|
+
"final_answer": self.final_response,
|
|
1066
|
+
"consensus_reached": self.system_state.consensus_reached,
|
|
1067
|
+
"representative_agent_id": self.system_state.representative_agent_id,
|
|
1068
|
+
"session_duration": session_duration,
|
|
1069
|
+
}
|
|
1070
|
+
)
|
|
1071
|
+
|
|
1072
|
+
# Prepare clean, user-facing result
|
|
1073
|
+
result = {
|
|
1074
|
+
"answer": self.final_response or "No final answer generated",
|
|
1075
|
+
"consensus_reached": self.system_state.consensus_reached,
|
|
1076
|
+
"representative_agent_id": self.system_state.representative_agent_id,
|
|
1077
|
+
"session_duration": session_duration,
|
|
1078
|
+
"summary": {
|
|
1079
|
+
"total_agents": len(self.agents),
|
|
1080
|
+
"failed_agents": len(
|
|
1081
|
+
[s for s in self.agent_states.values() if s.status == "failed"]
|
|
1082
|
+
),
|
|
1083
|
+
"total_votes": len(self.votes),
|
|
1084
|
+
"final_vote_distribution": dict(self._get_current_vote_counts()),
|
|
1085
|
+
},
|
|
1086
|
+
"system_logs": self.export_detailed_session_log(),
|
|
1087
|
+
}
|
|
1088
|
+
|
|
1089
|
+
# Save result to result.json in the session directory
|
|
1090
|
+
if self.log_manager and not self.log_manager.non_blocking:
|
|
1091
|
+
try:
|
|
1092
|
+
result_file = self.log_manager.session_dir / "result.json"
|
|
1093
|
+
with open(result_file, "w", encoding="utf-8") as f:
|
|
1094
|
+
json.dump(result, f, indent=2, ensure_ascii=False, default=str)
|
|
1095
|
+
logger.info(f"๐พ Result saved to {result_file}")
|
|
1096
|
+
except Exception as e:
|
|
1097
|
+
logger.warning(f"โ ๏ธ Failed to save result.json: {e}")
|
|
1098
|
+
|
|
1099
|
+
logger.info(f"โ
Session completed in {session_duration:.2f} seconds")
|
|
1100
|
+
logger.info(f" Consensus: {result['consensus_reached']}")
|
|
1101
|
+
logger.info(f" Representative: Agent {result['representative_agent_id']}")
|
|
1102
|
+
|
|
1103
|
+
return result
|
|
1104
|
+
|
|
1105
|
+
def cleanup(self):
|
|
1106
|
+
"""
|
|
1107
|
+
Clean up resources and stop all agents.
|
|
1108
|
+
"""
|
|
1109
|
+
logger.info("๐งน Cleaning up orchestrator resources")
|
|
1110
|
+
self._stop_event.set()
|
|
1111
|
+
|
|
1112
|
+
# Save final agent states before cleanup
|
|
1113
|
+
if self.log_manager and self.agent_states:
|
|
1114
|
+
try:
|
|
1115
|
+
self.log_manager.save_agent_states(self)
|
|
1116
|
+
logger.info("โ
Final agent states saved")
|
|
1117
|
+
except Exception as e:
|
|
1118
|
+
logger.warning(f"โ ๏ธ Error saving final agent states: {e}")
|
|
1119
|
+
|
|
1120
|
+
# Clean up logging manager
|
|
1121
|
+
if self.log_manager:
|
|
1122
|
+
try:
|
|
1123
|
+
self.log_manager.cleanup()
|
|
1124
|
+
logger.info("โ
Log manager cleaned up")
|
|
1125
|
+
except Exception as e:
|
|
1126
|
+
logger.warning(f"โ ๏ธ Error cleaning up log manager: {e}")
|
|
1127
|
+
|
|
1128
|
+
# Clean up streaming orchestrator if it exists
|
|
1129
|
+
if self.streaming_orchestrator:
|
|
1130
|
+
try:
|
|
1131
|
+
self.streaming_orchestrator.cleanup()
|
|
1132
|
+
logger.info("โ
Streaming orchestrator cleaned up")
|
|
1133
|
+
except Exception as e:
|
|
1134
|
+
logger.warning(f"โ ๏ธ Error cleaning up streaming orchestrator: {e}")
|
|
1135
|
+
|
|
1136
|
+
# No longer using _agent_threads since we use ThreadPoolExecutor in workflow methods
|
|
1137
|
+
# The executor is properly shut down in _run_all_agents_with_dynamic_restart
|
|
1138
|
+
logger.info("โ
Orchestrator cleanup completed")
|