massgen 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (76) hide show
  1. massgen/__init__.py +94 -0
  2. massgen/agent_config.py +507 -0
  3. massgen/backend/CLAUDE_API_RESEARCH.md +266 -0
  4. massgen/backend/Function calling openai responses.md +1161 -0
  5. massgen/backend/GEMINI_API_DOCUMENTATION.md +410 -0
  6. massgen/backend/OPENAI_RESPONSES_API_FORMAT.md +65 -0
  7. massgen/backend/__init__.py +25 -0
  8. massgen/backend/base.py +180 -0
  9. massgen/backend/chat_completions.py +228 -0
  10. massgen/backend/claude.py +661 -0
  11. massgen/backend/gemini.py +652 -0
  12. massgen/backend/grok.py +187 -0
  13. massgen/backend/response.py +397 -0
  14. massgen/chat_agent.py +440 -0
  15. massgen/cli.py +686 -0
  16. massgen/configs/README.md +293 -0
  17. massgen/configs/creative_team.yaml +53 -0
  18. massgen/configs/gemini_4o_claude.yaml +31 -0
  19. massgen/configs/news_analysis.yaml +51 -0
  20. massgen/configs/research_team.yaml +51 -0
  21. massgen/configs/single_agent.yaml +18 -0
  22. massgen/configs/single_flash2.5.yaml +44 -0
  23. massgen/configs/technical_analysis.yaml +51 -0
  24. massgen/configs/three_agents_default.yaml +31 -0
  25. massgen/configs/travel_planning.yaml +51 -0
  26. massgen/configs/two_agents.yaml +39 -0
  27. massgen/frontend/__init__.py +20 -0
  28. massgen/frontend/coordination_ui.py +945 -0
  29. massgen/frontend/displays/__init__.py +24 -0
  30. massgen/frontend/displays/base_display.py +83 -0
  31. massgen/frontend/displays/rich_terminal_display.py +3497 -0
  32. massgen/frontend/displays/simple_display.py +93 -0
  33. massgen/frontend/displays/terminal_display.py +381 -0
  34. massgen/frontend/logging/__init__.py +9 -0
  35. massgen/frontend/logging/realtime_logger.py +197 -0
  36. massgen/message_templates.py +431 -0
  37. massgen/orchestrator.py +1222 -0
  38. massgen/tests/__init__.py +10 -0
  39. massgen/tests/multi_turn_conversation_design.md +214 -0
  40. massgen/tests/multiturn_llm_input_analysis.md +189 -0
  41. massgen/tests/test_case_studies.md +113 -0
  42. massgen/tests/test_claude_backend.py +310 -0
  43. massgen/tests/test_grok_backend.py +160 -0
  44. massgen/tests/test_message_context_building.py +293 -0
  45. massgen/tests/test_rich_terminal_display.py +378 -0
  46. massgen/tests/test_v3_3agents.py +117 -0
  47. massgen/tests/test_v3_simple.py +216 -0
  48. massgen/tests/test_v3_three_agents.py +272 -0
  49. massgen/tests/test_v3_two_agents.py +176 -0
  50. massgen/utils.py +79 -0
  51. massgen/v1/README.md +330 -0
  52. massgen/v1/__init__.py +91 -0
  53. massgen/v1/agent.py +605 -0
  54. massgen/v1/agents.py +330 -0
  55. massgen/v1/backends/gemini.py +584 -0
  56. massgen/v1/backends/grok.py +410 -0
  57. massgen/v1/backends/oai.py +571 -0
  58. massgen/v1/cli.py +351 -0
  59. massgen/v1/config.py +169 -0
  60. massgen/v1/examples/fast-4o-mini-config.yaml +44 -0
  61. massgen/v1/examples/fast_config.yaml +44 -0
  62. massgen/v1/examples/production.yaml +70 -0
  63. massgen/v1/examples/single_agent.yaml +39 -0
  64. massgen/v1/logging.py +974 -0
  65. massgen/v1/main.py +368 -0
  66. massgen/v1/orchestrator.py +1138 -0
  67. massgen/v1/streaming_display.py +1190 -0
  68. massgen/v1/tools.py +160 -0
  69. massgen/v1/types.py +245 -0
  70. massgen/v1/utils.py +199 -0
  71. massgen-0.0.3.dist-info/METADATA +568 -0
  72. massgen-0.0.3.dist-info/RECORD +76 -0
  73. massgen-0.0.3.dist-info/WHEEL +5 -0
  74. massgen-0.0.3.dist-info/entry_points.txt +2 -0
  75. massgen-0.0.3.dist-info/licenses/LICENSE +204 -0
  76. massgen-0.0.3.dist-info/top_level.txt +1 -0
massgen/v1/agent.py ADDED
@@ -0,0 +1,605 @@
1
+ import os
2
+ import sys
3
+ import time
4
+ import json
5
+ from typing import Callable, Union, Optional, List, Dict
6
+ from concurrent.futures import ThreadPoolExecutor, TimeoutError as FutureTimeoutError
7
+
8
+ from dotenv import load_dotenv
9
+
10
+ load_dotenv()
11
+
12
+ from .types import TaskInput, AgentState, AgentResponse, ModelConfig
13
+ from .utils import get_agent_type_from_model, function_to_json
14
+ from abc import ABC, abstractmethod
15
+ from typing import Any, Callable, Union, Optional, List, Dict
16
+ from .backends import oai, gemini, grok
17
+
18
+ # TASK_INSTRUCTION = """
19
+ # Please use your expertise and tools (if available) to fully verify if the best CURRENT ANSWER addresses the ORIGINAL MESSAGE.
20
+ # - If YES, use the `vote` tool to record your vote and skip the `add_answer` tool.
21
+ # - If NO, do additional work first, then use the `add_answer` tool to record a better answer to the ORIGINAL MESSAGE. Make sure you actually call the tool.
22
+
23
+ # Any answer must be self-contained, complete, well-sourced, compelling, and ready to serve as the definitive final response.
24
+ # """
25
+
26
+ SYSTEM_INSTRUCTION = f"""
27
+ You are evaluating answers from multiple agents for final response to a message.
28
+
29
+ For every aspect, claim, reasoning steps in the CURRENT ANSWERS, verify correctness, factual accuracy, and completeness using your expertise, reasoning, and available tools.
30
+
31
+ If the CURRENT ANSWERS fully address the ORIGINAL MESSAGE, use the `vote` tool to record your vote and skip the `add_answer` tool.
32
+
33
+ If the CURRENT ANSWERS are incomplete, incorrect, or not fully address the ORIGINAL MESSAGE, conduct any necessary reasoning or research. Then, use the `add_answer` tool to submit a new response.
34
+
35
+ Your new answer must be self-contained, process-complete, well-sourced, and compelling—ready to serve as the final reply.
36
+
37
+ **Important**: Be sure to actually call the `add_answer` tool to submit your new answer.
38
+
39
+ *Note*: The CURRENT TIME is **{time.strftime("%Y-%m-%d %H:%M:%S")}**.
40
+ For any time-sensitive requests, use the search tool (if available) rather than relying on prior knowledge.
41
+ """
42
+
43
+ AGENT_ANSWER_MESSAGE = """
44
+ <ORIGINAL MESSAGE>
45
+ {task}
46
+ <END OF ORIGINAL MESSAGE>
47
+
48
+ <CURRENT ANSWERS>
49
+ {agent_answers}
50
+ <END OF CURRENT ANSWERS>
51
+ """
52
+
53
+ AGENT_ANSWER_AND_VOTE_MESSAGE = """
54
+ <ORIGINAL MESSAGE>
55
+ {task}
56
+ <END OF ORIGINAL MESSAGE>
57
+
58
+ <CURRENT ANSWERS>
59
+ {agent_answers}
60
+ <END OF CURRENT ANSWERS>
61
+
62
+ <CURRENT VOTES>
63
+ {agent_votes}
64
+ <END OF CURRENT VOTES>
65
+ """
66
+
67
+
68
+ class MassAgent(ABC):
69
+ """
70
+ Abstract base class for all agents in the MassGen system.
71
+
72
+ All agent implementations must inherit from this class and implement
73
+ the required methods while following the standardized workflow.
74
+ """
75
+
76
+ def __init__(
77
+ self,
78
+ agent_id: int,
79
+ orchestrator=None,
80
+ model_config: Optional[ModelConfig] = None,
81
+ stream_callback: Optional[Callable] = None,
82
+ **kwargs,
83
+ ):
84
+ """
85
+ Initialize the agent with configuration parameters.
86
+
87
+ Args:
88
+ agent_id: Unique identifier for this agent
89
+ orchestrator: Reference to the MassOrchestrator
90
+ model_config: Configuration object containing model parameters (model, tools,
91
+ temperature, top_p, max_tokens, inference_timeout, max_retries, stream)
92
+ stream_callback: Optional callback function for streaming chunks
93
+ agent_type: Type of agent ("openai", "gemini", "grok") to determine backend
94
+ **kwargs: Additional parameters specific to the agent implementation
95
+ """
96
+ self.agent_id = agent_id
97
+ self.orchestrator = orchestrator
98
+ self.state = AgentState(agent_id=agent_id)
99
+
100
+ # Initialize model configuration with defaults if not provided
101
+ if model_config is None:
102
+ model_config = ModelConfig()
103
+
104
+ # Store configuration parameters
105
+ self.model = model_config.model
106
+ self.agent_type = get_agent_type_from_model(self.model)
107
+ # Map agent types to their backend modules
108
+ process_message_impl_map = {
109
+ "openai": oai.process_message,
110
+ "gemini": gemini.process_message,
111
+ "grok": grok.process_message,
112
+ }
113
+ if self.agent_type not in process_message_impl_map:
114
+ raise ValueError(
115
+ f"Unknown agent type: {self.agent_type}. Available types: {list(process_message_impl_map.keys())}"
116
+ )
117
+
118
+ # Get the appropriate process_message implementation based on the agent type
119
+ self.process_message_impl = process_message_impl_map[self.agent_type]
120
+
121
+ # Other model configuration parameters
122
+ self.tools = model_config.tools
123
+ self.max_retries = model_config.max_retries
124
+ self.max_rounds = model_config.max_rounds
125
+ self.max_tokens = model_config.max_tokens
126
+ self.temperature = model_config.temperature
127
+ self.top_p = model_config.top_p
128
+ self.inference_timeout = model_config.inference_timeout
129
+ self.stream = model_config.stream
130
+ self.stream_callback = stream_callback
131
+ self.kwargs = kwargs
132
+
133
+ def process_message(
134
+ self, messages: List[Dict[str, str]], tools: List[str] = None
135
+ ) -> AgentResponse:
136
+ """
137
+ Core LLM inference function for task processing.
138
+
139
+ This method handles the actual LLM interaction using the agent's
140
+ specific backend (OpenAI, Gemini, Grok, etc.) and returns a standardized response.
141
+ All configuration parameters are stored as instance variables and accessed
142
+ via self.model, self.tools, self.temperature, etc.
143
+
144
+ Args:
145
+ messages: List of messages in OpenAI format
146
+ tools: List of tools to use
147
+
148
+ Returns:
149
+ AgentResponse containing the agent's response text, code, citations, etc.
150
+ """
151
+
152
+ # Create configuration dictionary using model configuration parameters
153
+ config = {
154
+ "model": self.model,
155
+ "max_retries": self.max_retries,
156
+ "max_tokens": self.max_tokens,
157
+ "temperature": self.temperature,
158
+ "top_p": self.top_p,
159
+ "api_key": None, # Let backend use environment variable
160
+ "stream": self.stream,
161
+ "stream_callback": self.stream_callback,
162
+ }
163
+
164
+ try:
165
+ # Use ThreadPoolExecutor to implement timeout
166
+ with ThreadPoolExecutor(max_workers=1) as executor:
167
+ future = executor.submit(
168
+ self.process_message_impl, messages=messages, tools=tools, **config
169
+ )
170
+
171
+ try:
172
+ # Wait for result with timeout
173
+ result = future.result(timeout=self.inference_timeout)
174
+ # Backend implementations now return AgentResponse objects directly
175
+ return result
176
+ except FutureTimeoutError:
177
+ # Mark agent as failed due to timeout
178
+ timeout_msg = f"Agent {self.agent_id} timed out after {self.inference_timeout} seconds"
179
+ self.mark_failed(timeout_msg)
180
+ return AgentResponse(
181
+ text=f"Agent processing timed out after {self.inference_timeout} seconds",
182
+ code=[],
183
+ citations=[],
184
+ function_calls=[],
185
+ )
186
+
187
+ except Exception as e:
188
+ # Return error response
189
+ return AgentResponse(
190
+ text=f"Error in {self.model} agent processing: {str(e)}",
191
+ code=[],
192
+ citations=[],
193
+ function_calls=[],
194
+ )
195
+
196
+ def add_answer(self, new_answer: str):
197
+ """
198
+ Record your work on the task: your analysis, approach, solution, and reasoning. Update when you solve the problem, find better solutions, or incorporate valuable insights from other agents.
199
+
200
+ Args:
201
+ answer: The new answer, which should be self-contained, complete, and ready to serve as the definitive final response.
202
+ """
203
+ # Use the orchestrator to update the answer and notify other agents to restart
204
+ self.orchestrator.notify_answer_update(self.agent_id, new_answer)
205
+ return f"The new answer has been added."
206
+
207
+ def vote(
208
+ self, agent_id: int, reason: str = "", invalid_vote_options: List[int] = []
209
+ ):
210
+ """
211
+ Vote for the representative agent, who you believe has found the correct solution.
212
+
213
+ Args:
214
+ agent_id: ID of the voted agent
215
+ reason: Your full explanation of why you voted for this agent
216
+ invalid_vote_options: The list of agent IDs that are invalid to vote for (have new updates)
217
+ """
218
+ if agent_id in invalid_vote_options:
219
+ return f"Error: Voting for agent {agent_id} is not allowed as its answer has been updated!"
220
+ self.orchestrator.cast_vote(self.agent_id, agent_id, reason)
221
+ return f"Your vote for Agent {agent_id} has been cast."
222
+
223
+ def check_update(self) -> List[int]:
224
+ """
225
+ Check if there are any updates from other agents since this agent last saw them.
226
+ """
227
+ agents_with_update = set()
228
+ # Get updates from other agents since this agent last saw them
229
+ for other_id, other_state in self.orchestrator.agent_states.items():
230
+ if other_id != self.agent_id and other_state.updated_answers:
231
+ for update in other_state.updated_answers:
232
+ last_seen = self.state.seen_updates_timestamps.get(other_id, 0)
233
+ # Check if the update is newer than the last seen timestamp
234
+ if update.timestamp > last_seen:
235
+ # update the last seen timestamp
236
+ self.state.seen_updates_timestamps[other_id] = update.timestamp
237
+ agents_with_update.add(other_id)
238
+ return list(agents_with_update)
239
+
240
+ def mark_failed(self, reason: str = ""):
241
+ """
242
+ Mark this agent as failed.
243
+
244
+ Args:
245
+ reason: Optional reason for the failure
246
+ """
247
+ # Report the failure to the orchestrator for logging
248
+ self.orchestrator.mark_agent_failed(self.agent_id, reason)
249
+
250
+ def deduplicate_function_calls(self, function_calls: List[Dict]):
251
+ """Deduplicate function calls by their name and arguments."""
252
+ deduplicated_function_calls = []
253
+ for func_call in function_calls:
254
+ if func_call not in deduplicated_function_calls:
255
+ deduplicated_function_calls.append(func_call)
256
+ return deduplicated_function_calls
257
+
258
+ def _execute_function_calls(
259
+ self, function_calls: List[Dict], invalid_vote_options: List[int] = []
260
+ ):
261
+ """Execute function calls and return function outputs."""
262
+ from .tools import register_tool
263
+
264
+ function_outputs = []
265
+ successful_called = []
266
+
267
+ for func_call in function_calls:
268
+ func_call_id = func_call.get("call_id")
269
+ func_name = func_call.get("name")
270
+ func_args = func_call.get("arguments", {})
271
+ if isinstance(func_args, str):
272
+ func_args = json.loads(func_args)
273
+
274
+ try:
275
+ if func_name == "add_answer":
276
+ result = self.add_answer(func_args.get("new_answer", ""))
277
+ elif func_name == "vote":
278
+ result = self.vote(
279
+ func_args.get("agent_id"),
280
+ func_args.get("reason", ""),
281
+ invalid_vote_options,
282
+ )
283
+ elif func_name in register_tool:
284
+ result = register_tool[func_name](**func_args)
285
+ else:
286
+ result = {
287
+ "type": "function_call_output",
288
+ "call_id": func_call_id,
289
+ "output": f"Error: Function '{func_name}' not found in tool mapping",
290
+ }
291
+
292
+ # Add function call and result to messages
293
+ function_output = {
294
+ "type": "function_call_output",
295
+ "call_id": func_call_id,
296
+ "output": str(result),
297
+ }
298
+ function_outputs.append(function_output)
299
+ successful_called.append(True)
300
+
301
+ except Exception as e:
302
+ # Handle execution errors
303
+ error_output = {
304
+ "type": "function_call_output",
305
+ "call_id": func_call_id,
306
+ "output": f"Error executing function: {str(e)}",
307
+ }
308
+ function_outputs.append(error_output)
309
+ successful_called.append(False)
310
+ print(f"Error executing function {func_name}: {e}")
311
+
312
+ # DEBUGGING
313
+ with open("function_calls.txt", "a") as f:
314
+ f.write(
315
+ f"[{time.strftime('%Y-%m-%d %H:%M:%S')}] Agent {self.agent_id} ({self.model}):\n"
316
+ )
317
+ f.write(f"{json.dumps(error_output, indent=2)}\n")
318
+ f.write(f"Successful called: {False}\n")
319
+
320
+ return function_outputs, successful_called
321
+
322
+ def _get_system_tools(self) -> List[Dict[str, Any]]:
323
+ """
324
+ The system tools available to this agent for orchestration:
325
+ - add_answer: Your added new answer, which should be self-contained, complete, and ready to serve as the definitive final response.
326
+ - vote: Vote for the representative agent, who you believe has found the correct solution.
327
+ """
328
+ add_answer_schema = {
329
+ "type": "function",
330
+ "name": "add_answer",
331
+ "description": "Add your new answer if you believe it is better than the current answers.",
332
+ "parameters": {
333
+ "type": "object",
334
+ "properties": {
335
+ "new_answer": {
336
+ "type": "string",
337
+ "description": "Your new answer, which should be self-contained, complete, and ready to serve as the definitive final response.",
338
+ }
339
+ },
340
+ "required": ["new_answer"],
341
+ },
342
+ }
343
+ vote_schema = {
344
+ "type": "function",
345
+ "name": "vote",
346
+ "description": "Vote for the best agent to present final answer. Submit its agent_id (integer) and reason for your vote.",
347
+ "parameters": {
348
+ "type": "object",
349
+ "properties": {
350
+ "agent_id": {
351
+ "type": "integer",
352
+ "description": "The ID of the agent you believe has found the best answer that addresses the original message.",
353
+ },
354
+ "reason": {
355
+ "type": "string",
356
+ "description": "Your full explanation of why you voted for this agent.",
357
+ },
358
+ },
359
+ "required": ["agent_id", "reason"],
360
+ },
361
+ }
362
+ # Check if there are any available options to vote for. If not, only return the add_answer schema.
363
+ available_options = [
364
+ agent_id
365
+ for agent_id, agent_state in self.orchestrator.agent_states.items()
366
+ if agent_state.curr_answer
367
+ ]
368
+ return (
369
+ [add_answer_schema, vote_schema]
370
+ if available_options
371
+ else [add_answer_schema]
372
+ )
373
+
374
+ def _get_registered_tools(self) -> List[Dict[str, Any]]:
375
+ """Return the tool schema for the tools that are available to this agent."""
376
+ # Register tools from the global registry, JSON schema
377
+ custom_tools = []
378
+ from .tools import register_tool
379
+
380
+ for tool_name, tool_func in register_tool.items():
381
+ if tool_name in self.tools:
382
+ tool_schema = function_to_json(tool_func)
383
+ custom_tools.append(tool_schema)
384
+ return custom_tools
385
+
386
+ def _get_builtin_tools(self) -> List[Dict[str, Any]]:
387
+ """
388
+ Override the parent method due to the Gemini's limitation.
389
+ Return the built-in tools that are available to Gemini models.
390
+ live_search and code_execution are supported right now.
391
+ However, the built-in tools and function call are not supported at the same time.
392
+ """
393
+ builtin_tools = []
394
+ for tool in self.tools:
395
+ if tool in ["live_search", "code_execution"]:
396
+ builtin_tools.append(tool)
397
+ return builtin_tools
398
+
399
+ def _get_all_answers(self) -> List[str]:
400
+ """Get all answers from all agents.
401
+ Format:
402
+ **Agent 1**: Answer 1
403
+ **Agent 2**: Answer 2
404
+ ...
405
+ """
406
+ # Case 1: Initial round without running answer
407
+ agent_answers = []
408
+ for agent_id, agent_state in self.orchestrator.agent_states.items():
409
+ if agent_state.curr_answer:
410
+ agent_answers.append(f"**Agent {agent_id}**: {agent_state.curr_answer}")
411
+ return agent_answers
412
+
413
+ def _get_all_votes(self) -> List[str]:
414
+ """Get all votes from all agents.
415
+ Format:
416
+ **Vote for Agent 1**: Reason 1
417
+ **Vote for Agent 2**: Reason 2
418
+ ...
419
+ """
420
+ agent_votes = []
421
+ for agent_id, agent_state in self.orchestrator.agent_states.items():
422
+ if agent_state.curr_vote:
423
+ agent_votes.append(
424
+ f"**Vote for Agent {agent_state.curr_vote.target_id}**: {agent_state.curr_vote.reason}"
425
+ )
426
+ return agent_votes
427
+
428
+ def _get_task_input(self, task: TaskInput) -> str:
429
+ """Get the initial task input as the user message. Return Both the current status and the task input."""
430
+ # Case 1: Initial round without running answer
431
+ if not self.state.curr_answer:
432
+ status = "initial"
433
+ task_input = (
434
+ AGENT_ANSWER_MESSAGE.format(task=task.question, agent_answers="None")
435
+ + "There are no current answers right now. Please use your expertise and tools (if available) to provide a new answer and submit it using the `add_answer` tool first."
436
+ )
437
+ return status, task_input
438
+
439
+ # Not the initial round
440
+ all_agent_answers = self._get_all_answers()
441
+ all_agent_answers_str = "\n\n".join(all_agent_answers)
442
+ # Check if in debate mode or not
443
+ voted_agents = [
444
+ agent_id
445
+ for agent_id, agent_state in self.orchestrator.agent_states.items()
446
+ if agent_state.curr_vote is not None
447
+ ]
448
+ if len(voted_agents) == len(self.orchestrator.agent_states):
449
+ # Case 2: All agents have voted and are debating. Can not use agent status to check as they have been updated to 'working/debate'
450
+ all_agent_votes = self._get_all_votes()
451
+ all_agent_votes_str = "\n\n".join(all_agent_votes)
452
+ status = "debate"
453
+ task_input = AGENT_ANSWER_AND_VOTE_MESSAGE.format(
454
+ task=task.question,
455
+ agent_answers=all_agent_answers_str,
456
+ agent_votes=all_agent_votes_str,
457
+ )
458
+ else:
459
+ # Case 3: All agents are working and not in debating
460
+ status = "working"
461
+ task_input = AGENT_ANSWER_MESSAGE.format(
462
+ task=task.question, agent_answers=all_agent_answers_str
463
+ )
464
+
465
+ return status, task_input
466
+
467
+ def _get_task_input_messages(self, user_input: str) -> List[Dict[str, str]]:
468
+ """Get the task input messages for the agent."""
469
+ return [
470
+ {"role": "system", "content": SYSTEM_INSTRUCTION},
471
+ {"role": "user", "content": user_input},
472
+ ]
473
+
474
+ def _get_curr_messages_and_tools(self, task: TaskInput):
475
+ """Get the current messages and tools for the agent."""
476
+ working_status, user_input = self._get_task_input(task)
477
+ working_messages = self._get_task_input_messages(user_input)
478
+ # Get available tools (system tools + built-in tools + custom tools)
479
+ all_tools = []
480
+ all_tools.extend(self._get_builtin_tools())
481
+ all_tools.extend(self._get_registered_tools())
482
+ all_tools.extend(self._get_system_tools())
483
+ return working_status, working_messages, all_tools
484
+
485
+ def work_on_task(self, task: TaskInput) -> List[Dict[str, str]]:
486
+ """
487
+ Work on the task with conversation continuation.
488
+
489
+ Args:
490
+ task: The task to work on
491
+ messages: Current conversation history
492
+ restart_instruction: Optional instruction for restarting work (e.g., updates from other agents)
493
+
494
+ Returns:
495
+ Updated conversation history including agent's work
496
+
497
+ This method should be implemented by concrete agent classes.
498
+ The agent continues the conversation until it votes or reaches max rounds.
499
+ """
500
+
501
+ # Initialize working messages
502
+ curr_round = 0
503
+ working_status, working_messages, all_tools = self._get_curr_messages_and_tools(
504
+ task
505
+ )
506
+
507
+ # Start the task solving loop
508
+ while curr_round < self.max_rounds and self.state.status == "working":
509
+ try:
510
+ # Call LLM with current conversation
511
+ result = self.process_message(
512
+ messages=working_messages, tools=all_tools
513
+ )
514
+
515
+ # Before Making the new result into effect, check if there is any update from other agents that are unseen by this agent
516
+ agents_with_update = self.check_update()
517
+ has_update = len(agents_with_update) > 0
518
+ # Case 1: if vote() is called and there are new update: make it invalid and renew the conversation
519
+ # Case 2: if add_answer() is called and there are new update: make it valid and renew the conversation
520
+ # Case 3: if no function call is made and there are new update: renew the conversation
521
+
522
+ # Add assistant response
523
+ if result.text:
524
+ working_messages.append(
525
+ {"role": "assistant", "content": result.text}
526
+ )
527
+
528
+ # Execute function calls if any
529
+ if result.function_calls:
530
+ # Deduplicate function calls by their name
531
+ result.function_calls = self.deduplicate_function_calls(
532
+ result.function_calls
533
+ )
534
+ # Not voting if there is any update
535
+ function_outputs, successful_called = self._execute_function_calls(
536
+ result.function_calls, invalid_vote_options=agents_with_update
537
+ )
538
+
539
+ renew_conversation = False
540
+ for function_call, function_output, successful_called in zip(
541
+ result.function_calls, function_outputs, successful_called
542
+ ):
543
+ # If call `add_answer`, we need to rebuild the conversation history with new answers
544
+ if (
545
+ function_call.get("name") == "add_answer"
546
+ and successful_called
547
+ ):
548
+ renew_conversation = True
549
+ break
550
+
551
+ # If call `vote`, we need to break the loop
552
+ if function_call.get("name") == "vote" and successful_called:
553
+ renew_conversation = True
554
+ break
555
+
556
+ if (
557
+ not renew_conversation
558
+ ): # Add all function call results to the current conversation and continue the loop
559
+ for function_call, function_output in zip(
560
+ result.function_calls, function_outputs
561
+ ):
562
+ working_messages.extend([function_call, function_output])
563
+ else: # Renew the conversation
564
+ working_status, working_messages, all_tools = (
565
+ self._get_curr_messages_and_tools(task)
566
+ )
567
+ else:
568
+ # No function calls - check if we should continue or stop
569
+ if self.state.status == "voted":
570
+ # Agent has voted, exit the work loop
571
+ break
572
+ else:
573
+ # Check if there is any update from other agents that are unseen by this agent
574
+ if has_update and working_status != "initial":
575
+ # The vote option has changed, thus we need to renew the conversation within the loop
576
+ working_status, working_messages, all_tools = (
577
+ self._get_curr_messages_and_tools(task)
578
+ )
579
+ else: # Continue the current conversation and prompting checkin
580
+ working_messages.append(
581
+ {
582
+ "role": "user",
583
+ "content": "Finish your work above by making a tool call of `vote` or `add_answer`. Make sure you actually call the tool.",
584
+ }
585
+ )
586
+
587
+ curr_round += 1
588
+ self.state.chat_round += 1
589
+
590
+ # Check if agent voted or failed
591
+ if self.state.status in ["voted", "failed"]:
592
+ break
593
+
594
+ except Exception as e:
595
+ print(
596
+ f"❌ Agent {self.agent_id} error in round {self.state.chat_round}: {e}"
597
+ )
598
+ if self.orchestrator:
599
+ self.orchestrator.mark_agent_failed(self.agent_id, str(e))
600
+
601
+ self.state.chat_round += 1
602
+ curr_round += 1
603
+ break
604
+
605
+ return working_messages