massgen 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +1 -1
- massgen/chat_agent.py +340 -20
- massgen/cli.py +326 -19
- massgen/configs/README.md +52 -10
- massgen/configs/memory/gpt5mini_gemini_baseline_research_to_implementation.yaml +94 -0
- massgen/configs/memory/gpt5mini_gemini_context_window_management.yaml +187 -0
- massgen/configs/memory/gpt5mini_gemini_research_to_implementation.yaml +127 -0
- massgen/configs/memory/gpt5mini_high_reasoning_gemini.yaml +107 -0
- massgen/configs/memory/single_agent_compression_test.yaml +64 -0
- massgen/configs/tools/custom_tools/multimodal_tools/playwright_with_img_understanding.yaml +98 -0
- massgen/configs/tools/custom_tools/multimodal_tools/understand_video_example.yaml +54 -0
- massgen/memory/README.md +277 -0
- massgen/memory/__init__.py +26 -0
- massgen/memory/_base.py +193 -0
- massgen/memory/_compression.py +237 -0
- massgen/memory/_context_monitor.py +211 -0
- massgen/memory/_conversation.py +255 -0
- massgen/memory/_fact_extraction_prompts.py +333 -0
- massgen/memory/_mem0_adapters.py +257 -0
- massgen/memory/_persistent.py +687 -0
- massgen/memory/docker-compose.qdrant.yml +36 -0
- massgen/memory/docs/DESIGN.md +388 -0
- massgen/memory/docs/QUICKSTART.md +409 -0
- massgen/memory/docs/SUMMARY.md +319 -0
- massgen/memory/docs/agent_use_memory.md +408 -0
- massgen/memory/docs/orchestrator_use_memory.md +586 -0
- massgen/memory/examples.py +237 -0
- massgen/orchestrator.py +207 -7
- massgen/tests/memory/test_agent_compression.py +174 -0
- massgen/tests/memory/test_context_window_management.py +286 -0
- massgen/tests/memory/test_force_compression.py +154 -0
- massgen/tests/memory/test_simple_compression.py +147 -0
- massgen/tests/test_agent_memory.py +534 -0
- massgen/tests/test_conversation_memory.py +382 -0
- massgen/tests/test_orchestrator_memory.py +620 -0
- massgen/tests/test_persistent_memory.py +435 -0
- massgen/token_manager/token_manager.py +6 -0
- massgen/tools/__init__.py +8 -0
- massgen/tools/_planning_mcp_server.py +520 -0
- massgen/tools/planning_dataclasses.py +434 -0
- {massgen-0.1.4.dist-info → massgen-0.1.5.dist-info}/METADATA +109 -76
- {massgen-0.1.4.dist-info → massgen-0.1.5.dist-info}/RECORD +46 -12
- {massgen-0.1.4.dist-info → massgen-0.1.5.dist-info}/WHEEL +0 -0
- {massgen-0.1.4.dist-info → massgen-0.1.5.dist-info}/entry_points.txt +0 -0
- {massgen-0.1.4.dist-info → massgen-0.1.5.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.1.4.dist-info → massgen-0.1.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
Usage examples for MassGen memory system.
|
|
4
|
+
|
|
5
|
+
These examples demonstrate how to use conversation and persistent memory
|
|
6
|
+
in your MassGen agents.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
async def example_conversation_memory():
|
|
13
|
+
"""Example: Using ConversationMemory for short-term dialogue."""
|
|
14
|
+
from massgen.memory import ConversationMemory
|
|
15
|
+
|
|
16
|
+
print("=" * 60)
|
|
17
|
+
print("Example 1: Conversation Memory (Short-term)")
|
|
18
|
+
print("=" * 60)
|
|
19
|
+
|
|
20
|
+
memory = ConversationMemory()
|
|
21
|
+
|
|
22
|
+
# Simulate a conversation
|
|
23
|
+
conversation = [
|
|
24
|
+
{"role": "user", "content": "Hello! My name is Alice."},
|
|
25
|
+
{"role": "assistant", "content": "Hi Alice! How can I help you today?"},
|
|
26
|
+
{"role": "user", "content": "I'm interested in learning about Python."},
|
|
27
|
+
{
|
|
28
|
+
"role": "assistant",
|
|
29
|
+
"content": "Great! Python is a versatile programming language...",
|
|
30
|
+
},
|
|
31
|
+
]
|
|
32
|
+
|
|
33
|
+
# Add messages one by one
|
|
34
|
+
for msg in conversation:
|
|
35
|
+
await memory.add(msg)
|
|
36
|
+
print(f"Added: {msg['role']} - {msg['content'][:50]}...")
|
|
37
|
+
|
|
38
|
+
# Get all messages
|
|
39
|
+
await memory.get_messages()
|
|
40
|
+
print(f"\nTotal messages: {await memory.size()}")
|
|
41
|
+
|
|
42
|
+
# Get last message
|
|
43
|
+
last = await memory.get_last_message()
|
|
44
|
+
print(f"Last message role: {last['role']}")
|
|
45
|
+
|
|
46
|
+
# Filter by role
|
|
47
|
+
user_messages = await memory.get_messages_by_role("user")
|
|
48
|
+
print(f"User messages: {len(user_messages)}")
|
|
49
|
+
|
|
50
|
+
# Truncate to keep only recent messages
|
|
51
|
+
await memory.truncate_to_size(2)
|
|
52
|
+
print(f"After truncation: {await memory.size()} messages")
|
|
53
|
+
|
|
54
|
+
# Save and restore state
|
|
55
|
+
state = memory.state_dict()
|
|
56
|
+
print(f"\nState saved: {len(state['messages'])} messages")
|
|
57
|
+
|
|
58
|
+
# Create new memory from state
|
|
59
|
+
restored_memory = ConversationMemory()
|
|
60
|
+
restored_memory.load_state_dict(state)
|
|
61
|
+
print(f"State restored: {await restored_memory.size()} messages")
|
|
62
|
+
|
|
63
|
+
print("\n✅ Conversation memory example completed!\n")
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
async def example_persistent_memory():
|
|
67
|
+
"""
|
|
68
|
+
Example: Using PersistentMemory for long-term storage.
|
|
69
|
+
|
|
70
|
+
Note: This requires mem0 to be installed and proper backends configured.
|
|
71
|
+
This is a conceptual example - adjust backends as needed.
|
|
72
|
+
"""
|
|
73
|
+
print("=" * 60)
|
|
74
|
+
print("Example 2: Persistent Memory (Long-term)")
|
|
75
|
+
print("=" * 60)
|
|
76
|
+
|
|
77
|
+
# NOTE: This is a conceptual example
|
|
78
|
+
# In practice, you need to provide actual MassGen backends
|
|
79
|
+
print("\n⚠️ This example requires actual LLM and embedding backends.")
|
|
80
|
+
print(" Uncomment and configure backends to run this example.\n")
|
|
81
|
+
|
|
82
|
+
# Conceptual usage:
|
|
83
|
+
"""
|
|
84
|
+
from massgen.memory import PersistentMemory
|
|
85
|
+
from massgen.backend import OpenAIBackend # Or your backend
|
|
86
|
+
|
|
87
|
+
# Initialize backends
|
|
88
|
+
llm_backend = OpenAIBackend(model="gpt-4")
|
|
89
|
+
embedding_backend = OpenAIBackend(model="text-embedding-3-small")
|
|
90
|
+
|
|
91
|
+
# Create persistent memory
|
|
92
|
+
memory = PersistentMemory(
|
|
93
|
+
agent_name="learning_assistant",
|
|
94
|
+
user_name="alice",
|
|
95
|
+
llm_backend=llm_backend,
|
|
96
|
+
embedding_backend=embedding_backend,
|
|
97
|
+
on_disk=True
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
# Record a conversation
|
|
101
|
+
await memory.record([
|
|
102
|
+
{"role": "user", "content": "I love Python programming"},
|
|
103
|
+
{"role": "assistant", "content": "That's great! Python is very versatile."}
|
|
104
|
+
])
|
|
105
|
+
print("✓ Recorded conversation to long-term memory")
|
|
106
|
+
|
|
107
|
+
# Retrieve relevant memories
|
|
108
|
+
query = "What programming languages does the user like?"
|
|
109
|
+
relevant = await memory.retrieve(query)
|
|
110
|
+
print(f"Retrieved: {relevant}")
|
|
111
|
+
|
|
112
|
+
# Agent-controlled memory saving
|
|
113
|
+
result = await memory.save_to_memory(
|
|
114
|
+
thinking="User expressed interest in a topic",
|
|
115
|
+
content=["User likes Python", "User is a beginner"]
|
|
116
|
+
)
|
|
117
|
+
print(f"Save result: {result}")
|
|
118
|
+
|
|
119
|
+
# Agent-controlled memory recall
|
|
120
|
+
result = await memory.recall_from_memory(
|
|
121
|
+
keywords=["programming", "Python"],
|
|
122
|
+
limit=3
|
|
123
|
+
)
|
|
124
|
+
print(f"Recalled {result['count']} memories")
|
|
125
|
+
for mem in result['memories']:
|
|
126
|
+
print(f" - {mem}")
|
|
127
|
+
"""
|
|
128
|
+
|
|
129
|
+
print("✅ Persistent memory example completed!\n")
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
async def example_combined_usage():
|
|
133
|
+
"""Example: Using both memory types together."""
|
|
134
|
+
print("=" * 60)
|
|
135
|
+
print("Example 3: Combined Memory Usage")
|
|
136
|
+
print("=" * 60)
|
|
137
|
+
|
|
138
|
+
from massgen.memory import ConversationMemory
|
|
139
|
+
|
|
140
|
+
# Short-term memory for active conversation
|
|
141
|
+
short_term = ConversationMemory()
|
|
142
|
+
|
|
143
|
+
# Simulate ongoing conversation
|
|
144
|
+
messages = [
|
|
145
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
146
|
+
{"role": "user", "content": "What's the weather like?"},
|
|
147
|
+
{"role": "assistant", "content": "I can help you check the weather!"},
|
|
148
|
+
]
|
|
149
|
+
|
|
150
|
+
for msg in messages:
|
|
151
|
+
await short_term.add(msg)
|
|
152
|
+
|
|
153
|
+
print(f"Short-term memory: {await short_term.size()} messages")
|
|
154
|
+
|
|
155
|
+
# In a real agent, you would:
|
|
156
|
+
# 1. Retrieve relevant long-term memories based on current message
|
|
157
|
+
# 2. Inject them into the conversation context
|
|
158
|
+
# 3. Generate response
|
|
159
|
+
# 4. Add response to short-term memory
|
|
160
|
+
# 5. Optionally save important parts to long-term memory
|
|
161
|
+
|
|
162
|
+
print("\n💡 In production, this would be integrated with:")
|
|
163
|
+
print(" - LLM backend for generating responses")
|
|
164
|
+
print(" - Persistent memory for cross-session knowledge")
|
|
165
|
+
print(" - Tool system for agent-controlled memory")
|
|
166
|
+
|
|
167
|
+
print("\n✅ Combined usage example completed!\n")
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
async def example_memory_management():
|
|
171
|
+
"""Example: Memory management best practices."""
|
|
172
|
+
print("=" * 60)
|
|
173
|
+
print("Example 4: Memory Management")
|
|
174
|
+
print("=" * 60)
|
|
175
|
+
|
|
176
|
+
from massgen.memory import ConversationMemory
|
|
177
|
+
|
|
178
|
+
memory = ConversationMemory()
|
|
179
|
+
|
|
180
|
+
# Add many messages to simulate long conversation
|
|
181
|
+
for i in range(100):
|
|
182
|
+
await memory.add(
|
|
183
|
+
{
|
|
184
|
+
"role": "user" if i % 2 == 0 else "assistant",
|
|
185
|
+
"content": f"Message {i}",
|
|
186
|
+
},
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
print(f"Added {await memory.size()} messages")
|
|
190
|
+
|
|
191
|
+
# Best practice 1: Regular truncation
|
|
192
|
+
await memory.truncate_to_size(50)
|
|
193
|
+
print(f"After truncation: {await memory.size()} messages")
|
|
194
|
+
|
|
195
|
+
# Best practice 2: Get only recent messages
|
|
196
|
+
recent = await memory.get_messages(limit=10)
|
|
197
|
+
print(f"Retrieved last {len(recent)} messages")
|
|
198
|
+
|
|
199
|
+
# Best practice 3: Periodic cleanup
|
|
200
|
+
user_msgs = await memory.get_messages_by_role("user")
|
|
201
|
+
print(f"User sent {len(user_msgs)} messages")
|
|
202
|
+
|
|
203
|
+
# Best practice 4: Clear when starting new topic
|
|
204
|
+
await memory.clear()
|
|
205
|
+
print(f"After clearing: {await memory.size()} messages")
|
|
206
|
+
|
|
207
|
+
# Best practice 5: State persistence for crash recovery
|
|
208
|
+
await memory.add({"role": "user", "content": "Important message"})
|
|
209
|
+
state = memory.state_dict()
|
|
210
|
+
print(f"State saved with {len(state['messages'])} messages")
|
|
211
|
+
|
|
212
|
+
print("\n💾 Save this state to disk for persistence across restarts!")
|
|
213
|
+
print("\n✅ Memory management example completed!\n")
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
async def main():
|
|
217
|
+
"""Run all examples."""
|
|
218
|
+
print("\n🚀 MassGen Memory System Examples\n")
|
|
219
|
+
|
|
220
|
+
await example_conversation_memory()
|
|
221
|
+
await example_persistent_memory()
|
|
222
|
+
await example_combined_usage()
|
|
223
|
+
await example_memory_management()
|
|
224
|
+
|
|
225
|
+
print("=" * 60)
|
|
226
|
+
print("All examples completed! 🎉")
|
|
227
|
+
print("=" * 60)
|
|
228
|
+
print("\nNext steps:")
|
|
229
|
+
print("1. Install mem0: pip install mem0ai")
|
|
230
|
+
print("2. Configure your LLM and embedding backends")
|
|
231
|
+
print("3. Try persistent memory with real backends")
|
|
232
|
+
print("4. Integrate into your MassGen agents")
|
|
233
|
+
print("\n")
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
if __name__ == "__main__":
|
|
237
|
+
asyncio.run(main())
|
massgen/orchestrator.py
CHANGED
|
@@ -42,6 +42,7 @@ from .logger_config import (
|
|
|
42
42
|
log_stream_chunk,
|
|
43
43
|
log_tool_call,
|
|
44
44
|
)
|
|
45
|
+
from .memory import ConversationMemory, PersistentMemoryBase
|
|
45
46
|
from .message_templates import MessageTemplates
|
|
46
47
|
from .stream_chunk import ChunkType
|
|
47
48
|
from .tool import get_post_evaluation_tools, get_workflow_tools
|
|
@@ -118,6 +119,9 @@ class Orchestrator(ChatAgent):
|
|
|
118
119
|
snapshot_storage: Optional[str] = None,
|
|
119
120
|
agent_temporary_workspace: Optional[str] = None,
|
|
120
121
|
previous_turns: Optional[List[Dict[str, Any]]] = None,
|
|
122
|
+
winning_agents_history: Optional[List[Dict[str, Any]]] = None,
|
|
123
|
+
shared_conversation_memory: Optional[ConversationMemory] = None,
|
|
124
|
+
shared_persistent_memory: Optional[PersistentMemoryBase] = None,
|
|
121
125
|
):
|
|
122
126
|
"""
|
|
123
127
|
Initialize MassGen orchestrator.
|
|
@@ -130,13 +134,22 @@ class Orchestrator(ChatAgent):
|
|
|
130
134
|
snapshot_storage: Optional path to store agent workspace snapshots
|
|
131
135
|
agent_temporary_workspace: Optional path for agent temporary workspaces
|
|
132
136
|
previous_turns: List of previous turn metadata for multi-turn conversations (loaded by CLI)
|
|
137
|
+
winning_agents_history: List of previous winning agents for memory sharing
|
|
138
|
+
Format: [{"agent_id": "agent_b", "turn": 1}, ...]
|
|
139
|
+
Loaded from session storage to persist across orchestrator recreations
|
|
140
|
+
shared_conversation_memory: Optional shared conversation memory for all agents
|
|
141
|
+
shared_persistent_memory: Optional shared persistent memory for all agents
|
|
133
142
|
"""
|
|
134
|
-
super().__init__(session_id)
|
|
143
|
+
super().__init__(session_id, shared_conversation_memory, shared_persistent_memory)
|
|
135
144
|
self.orchestrator_id = orchestrator_id
|
|
136
145
|
self.agents = agents
|
|
137
146
|
self.agent_states = {aid: AgentState() for aid in agents.keys()}
|
|
138
147
|
self.config = config or AgentConfig.create_openai_config()
|
|
139
148
|
|
|
149
|
+
# Shared memory for all agents
|
|
150
|
+
self.shared_conversation_memory = shared_conversation_memory
|
|
151
|
+
self.shared_persistent_memory = shared_persistent_memory
|
|
152
|
+
|
|
140
153
|
# Get message templates from config
|
|
141
154
|
self.message_templates = self.config.message_templates or MessageTemplates(
|
|
142
155
|
voting_sensitivity=self.config.voting_sensitivity,
|
|
@@ -158,6 +171,14 @@ class Orchestrator(ChatAgent):
|
|
|
158
171
|
self._selected_agent: Optional[str] = None
|
|
159
172
|
self._final_presentation_content: Optional[str] = None
|
|
160
173
|
|
|
174
|
+
# Track winning agents by turn for memory sharing
|
|
175
|
+
# Format: [{"agent_id": "agent_b", "turn": 1}, {"agent_id": "agent_a", "turn": 2}]
|
|
176
|
+
# Restore from session storage if provided (for multi-turn persistence)
|
|
177
|
+
self._winning_agents_history: List[Dict[str, Any]] = winning_agents_history or []
|
|
178
|
+
if self._winning_agents_history:
|
|
179
|
+
logger.info(f"📚 Restored {len(self._winning_agents_history)} winning agent(s) from session: {self._winning_agents_history}")
|
|
180
|
+
self._current_turn: int = 0
|
|
181
|
+
|
|
161
182
|
# Timeout and resource tracking
|
|
162
183
|
self.total_tokens: int = 0
|
|
163
184
|
self.coordination_start_time: float = 0
|
|
@@ -365,6 +386,113 @@ class Orchestrator(ChatAgent):
|
|
|
365
386
|
"full_messages": messages,
|
|
366
387
|
}
|
|
367
388
|
|
|
389
|
+
async def _inject_shared_memory_context(
|
|
390
|
+
self,
|
|
391
|
+
messages: List[Dict[str, Any]],
|
|
392
|
+
agent_id: str,
|
|
393
|
+
) -> List[Dict[str, Any]]:
|
|
394
|
+
"""
|
|
395
|
+
Inject shared memory context into agent messages.
|
|
396
|
+
|
|
397
|
+
This allows all agents to see shared memories including what other agents
|
|
398
|
+
have stored in the shared memory.
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
messages: Original messages to send to agent
|
|
402
|
+
agent_id: ID of the agent receiving the messages
|
|
403
|
+
|
|
404
|
+
Returns:
|
|
405
|
+
Messages with shared memory context injected
|
|
406
|
+
"""
|
|
407
|
+
if not self.shared_conversation_memory and not self.shared_persistent_memory:
|
|
408
|
+
# No shared memory configured, return original messages
|
|
409
|
+
return messages
|
|
410
|
+
|
|
411
|
+
memory_context_parts = []
|
|
412
|
+
|
|
413
|
+
# Get conversation memory content
|
|
414
|
+
if self.shared_conversation_memory:
|
|
415
|
+
try:
|
|
416
|
+
conv_messages = await self.shared_conversation_memory.get_messages()
|
|
417
|
+
if conv_messages:
|
|
418
|
+
memory_context_parts.append("=== SHARED CONVERSATION MEMORY ===")
|
|
419
|
+
for msg in conv_messages[-10:]: # Last 10 messages
|
|
420
|
+
role = msg.get("role", "unknown")
|
|
421
|
+
content = msg.get("content", "")
|
|
422
|
+
agent_source = msg.get("agent_id", "unknown")
|
|
423
|
+
memory_context_parts.append(f"[{agent_source}] {role}: {content}")
|
|
424
|
+
except Exception as e:
|
|
425
|
+
logger.warning(f"Failed to retrieve shared conversation memory: {e}")
|
|
426
|
+
|
|
427
|
+
# Get persistent memory content
|
|
428
|
+
if self.shared_persistent_memory:
|
|
429
|
+
try:
|
|
430
|
+
# Extract user message for retrieval
|
|
431
|
+
user_messages = [msg for msg in messages if msg.get("role") == "user"]
|
|
432
|
+
if user_messages:
|
|
433
|
+
retrieved = await self.shared_persistent_memory.retrieve(user_messages)
|
|
434
|
+
if retrieved:
|
|
435
|
+
memory_context_parts.append("\n=== SHARED PERSISTENT MEMORY ===")
|
|
436
|
+
memory_context_parts.append(retrieved)
|
|
437
|
+
except NotImplementedError:
|
|
438
|
+
# Memory backend doesn't support retrieve
|
|
439
|
+
pass
|
|
440
|
+
except Exception as e:
|
|
441
|
+
logger.warning(f"Failed to retrieve shared persistent memory: {e}")
|
|
442
|
+
|
|
443
|
+
# Inject memory context if we have any
|
|
444
|
+
if memory_context_parts:
|
|
445
|
+
memory_message = {
|
|
446
|
+
"role": "system",
|
|
447
|
+
"content": ("You have access to shared memory that all agents can see and contribute to.\n" + "\n".join(memory_context_parts)),
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
# Insert after existing system messages but before user messages
|
|
451
|
+
system_count = sum(1 for msg in messages if msg.get("role") == "system")
|
|
452
|
+
modified_messages = messages.copy()
|
|
453
|
+
modified_messages.insert(system_count, memory_message)
|
|
454
|
+
return modified_messages
|
|
455
|
+
|
|
456
|
+
return messages
|
|
457
|
+
|
|
458
|
+
async def _record_to_shared_memory(
|
|
459
|
+
self,
|
|
460
|
+
agent_id: str,
|
|
461
|
+
content: str,
|
|
462
|
+
role: str = "assistant",
|
|
463
|
+
) -> None:
|
|
464
|
+
"""
|
|
465
|
+
Record agent's contribution to shared memory.
|
|
466
|
+
|
|
467
|
+
Args:
|
|
468
|
+
agent_id: ID of the agent contributing
|
|
469
|
+
content: Content to record
|
|
470
|
+
role: Role of the message (default: "assistant")
|
|
471
|
+
"""
|
|
472
|
+
message = {
|
|
473
|
+
"role": role,
|
|
474
|
+
"content": content,
|
|
475
|
+
"agent_id": agent_id,
|
|
476
|
+
"timestamp": time.time(),
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
# Add to conversation memory
|
|
480
|
+
if self.shared_conversation_memory:
|
|
481
|
+
try:
|
|
482
|
+
await self.shared_conversation_memory.add(message)
|
|
483
|
+
except Exception as e:
|
|
484
|
+
logger.warning(f"Failed to add to shared conversation memory: {e}")
|
|
485
|
+
|
|
486
|
+
# Record to persistent memory
|
|
487
|
+
if self.shared_persistent_memory:
|
|
488
|
+
try:
|
|
489
|
+
await self.shared_persistent_memory.record([message])
|
|
490
|
+
except NotImplementedError:
|
|
491
|
+
# Memory backend doesn't support record
|
|
492
|
+
pass
|
|
493
|
+
except Exception as e:
|
|
494
|
+
logger.warning(f"Failed to record to shared persistent memory: {e}")
|
|
495
|
+
|
|
368
496
|
def save_coordination_logs(self):
|
|
369
497
|
"""Public method to save coordination logs after final presentation is complete."""
|
|
370
498
|
# End the coordination session
|
|
@@ -798,6 +926,18 @@ Your answer:"""
|
|
|
798
926
|
current_answers = {aid: state.answer for aid, state in self.agent_states.items() if state.answer}
|
|
799
927
|
self._selected_agent = self._determine_final_agent_from_votes(votes, current_answers)
|
|
800
928
|
|
|
929
|
+
# Track winning agent for memory sharing in future turns
|
|
930
|
+
self._current_turn += 1
|
|
931
|
+
if self._selected_agent:
|
|
932
|
+
winner_entry = {
|
|
933
|
+
"agent_id": self._selected_agent,
|
|
934
|
+
"turn": self._current_turn,
|
|
935
|
+
}
|
|
936
|
+
self._winning_agents_history.append(winner_entry)
|
|
937
|
+
logger.info(
|
|
938
|
+
f"🏆 Turn {self._current_turn} winner: {self._selected_agent} " f"(tracked for memory sharing)",
|
|
939
|
+
)
|
|
940
|
+
|
|
801
941
|
log_coordination_step(
|
|
802
942
|
"Final agent selected",
|
|
803
943
|
{"selected_agent": self._selected_agent, "votes": votes},
|
|
@@ -1806,6 +1946,13 @@ Your answer:"""
|
|
|
1806
1946
|
{"role": "system", "content": conversation["system_message"]},
|
|
1807
1947
|
{"role": "user", "content": conversation["user_message"]},
|
|
1808
1948
|
]
|
|
1949
|
+
|
|
1950
|
+
# Inject shared memory context
|
|
1951
|
+
conversation_messages = await self._inject_shared_memory_context(
|
|
1952
|
+
conversation_messages,
|
|
1953
|
+
agent_id,
|
|
1954
|
+
)
|
|
1955
|
+
|
|
1809
1956
|
enforcement_msg = self.message_templates.enforcement_message()
|
|
1810
1957
|
|
|
1811
1958
|
# Update agent status to STREAMING
|
|
@@ -1832,20 +1979,42 @@ Your answer:"""
|
|
|
1832
1979
|
# First attempt: orchestrator provides initial conversation
|
|
1833
1980
|
# But we need the agent to have this in its history for subsequent calls
|
|
1834
1981
|
# First attempt: provide complete conversation and reset agent's history
|
|
1835
|
-
|
|
1982
|
+
# Pass current turn and previous winners for memory sharing
|
|
1983
|
+
chat_stream = agent.chat(
|
|
1984
|
+
conversation_messages,
|
|
1985
|
+
self.workflow_tools,
|
|
1986
|
+
reset_chat=True,
|
|
1987
|
+
current_stage=CoordinationStage.INITIAL_ANSWER,
|
|
1988
|
+
orchestrator_turn=self._current_turn + 1, # Next turn number
|
|
1989
|
+
previous_winners=self._winning_agents_history.copy(),
|
|
1990
|
+
)
|
|
1836
1991
|
else:
|
|
1837
1992
|
# Subsequent attempts: send enforcement message (set by error handling)
|
|
1838
1993
|
|
|
1839
1994
|
if isinstance(enforcement_msg, list):
|
|
1840
1995
|
# Tool message array
|
|
1841
|
-
chat_stream = agent.chat(
|
|
1996
|
+
chat_stream = agent.chat(
|
|
1997
|
+
enforcement_msg,
|
|
1998
|
+
self.workflow_tools,
|
|
1999
|
+
reset_chat=False,
|
|
2000
|
+
current_stage=CoordinationStage.ENFORCEMENT,
|
|
2001
|
+
orchestrator_turn=self._current_turn + 1,
|
|
2002
|
+
previous_winners=self._winning_agents_history.copy(),
|
|
2003
|
+
)
|
|
1842
2004
|
else:
|
|
1843
2005
|
# Single user message
|
|
1844
2006
|
enforcement_message = {
|
|
1845
2007
|
"role": "user",
|
|
1846
2008
|
"content": enforcement_msg,
|
|
1847
2009
|
}
|
|
1848
|
-
chat_stream = agent.chat(
|
|
2010
|
+
chat_stream = agent.chat(
|
|
2011
|
+
[enforcement_message],
|
|
2012
|
+
self.workflow_tools,
|
|
2013
|
+
reset_chat=False,
|
|
2014
|
+
current_stage=CoordinationStage.ENFORCEMENT,
|
|
2015
|
+
orchestrator_turn=self._current_turn + 1,
|
|
2016
|
+
previous_winners=self._winning_agents_history.copy(),
|
|
2017
|
+
)
|
|
1849
2018
|
response_text = ""
|
|
1850
2019
|
tool_calls = []
|
|
1851
2020
|
workflow_tool_found = False
|
|
@@ -2101,6 +2270,14 @@ Your answer:"""
|
|
|
2101
2270
|
"reason": reason,
|
|
2102
2271
|
}
|
|
2103
2272
|
|
|
2273
|
+
# Record vote to shared memory
|
|
2274
|
+
vote_message = f"Voted for {voted_agent}. Reason: {reason}"
|
|
2275
|
+
await self._record_to_shared_memory(
|
|
2276
|
+
agent_id=agent_id,
|
|
2277
|
+
content=vote_message,
|
|
2278
|
+
role="assistant",
|
|
2279
|
+
)
|
|
2280
|
+
|
|
2104
2281
|
# Send tool result - orchestrator will decide if vote is accepted
|
|
2105
2282
|
# Vote submitted (result will be shown by orchestrator)
|
|
2106
2283
|
yield (
|
|
@@ -2193,6 +2370,14 @@ Your answer:"""
|
|
|
2193
2370
|
return
|
|
2194
2371
|
# Send successful tool result back to agent
|
|
2195
2372
|
# Answer recorded (result will be shown by orchestrator)
|
|
2373
|
+
|
|
2374
|
+
# Record to shared memory
|
|
2375
|
+
await self._record_to_shared_memory(
|
|
2376
|
+
agent_id=agent_id,
|
|
2377
|
+
content=content,
|
|
2378
|
+
role="assistant",
|
|
2379
|
+
)
|
|
2380
|
+
|
|
2196
2381
|
yield ("result", ("answer", content))
|
|
2197
2382
|
yield ("done", None)
|
|
2198
2383
|
return
|
|
@@ -2623,7 +2808,13 @@ INSTRUCTIONS FOR NEXT ATTEMPT:
|
|
|
2623
2808
|
|
|
2624
2809
|
try:
|
|
2625
2810
|
# Track final round iterations (each chunk is like an iteration)
|
|
2626
|
-
async for chunk in agent.chat(
|
|
2811
|
+
async for chunk in agent.chat(
|
|
2812
|
+
presentation_messages,
|
|
2813
|
+
reset_chat=True,
|
|
2814
|
+
current_stage=CoordinationStage.PRESENTATION,
|
|
2815
|
+
orchestrator_turn=self._current_turn,
|
|
2816
|
+
previous_winners=self._winning_agents_history.copy(),
|
|
2817
|
+
):
|
|
2627
2818
|
chunk_type = self._get_chunk_type_value(chunk)
|
|
2628
2819
|
# Start new iteration for this chunk
|
|
2629
2820
|
self.coordination_tracker.start_new_iteration()
|
|
@@ -2872,7 +3063,14 @@ Then call either submit(confirmed=True) if the answer is satisfactory, or restar
|
|
|
2872
3063
|
try:
|
|
2873
3064
|
timeout_seconds = self.config.timeout_config.orchestrator_timeout_seconds
|
|
2874
3065
|
async with asyncio.timeout(timeout_seconds):
|
|
2875
|
-
async for chunk in agent.chat(
|
|
3066
|
+
async for chunk in agent.chat(
|
|
3067
|
+
messages=evaluation_messages,
|
|
3068
|
+
tools=post_eval_tools,
|
|
3069
|
+
reset_chat=True,
|
|
3070
|
+
current_stage=CoordinationStage.POST_EVALUATION,
|
|
3071
|
+
orchestrator_turn=self._current_turn,
|
|
3072
|
+
previous_winners=self._winning_agents_history.copy(),
|
|
3073
|
+
):
|
|
2876
3074
|
chunk_type = self._get_chunk_type_value(chunk)
|
|
2877
3075
|
|
|
2878
3076
|
if chunk_type == "content" and chunk.content:
|
|
@@ -3103,7 +3301,8 @@ Then call either submit(confirmed=True) if the answer is satisfactory, or restar
|
|
|
3103
3301
|
Get final result for session persistence.
|
|
3104
3302
|
|
|
3105
3303
|
Returns:
|
|
3106
|
-
Dict with final_answer, winning_agent_id,
|
|
3304
|
+
Dict with final_answer, winning_agent_id, workspace_path, and winning_agents_history,
|
|
3305
|
+
or None if not available
|
|
3107
3306
|
"""
|
|
3108
3307
|
if not self._selected_agent or not self._final_presentation_content:
|
|
3109
3308
|
return None
|
|
@@ -3117,6 +3316,7 @@ Then call either submit(confirmed=True) if the answer is satisfactory, or restar
|
|
|
3117
3316
|
"final_answer": self._final_presentation_content,
|
|
3118
3317
|
"winning_agent_id": self._selected_agent,
|
|
3119
3318
|
"workspace_path": workspace_path,
|
|
3319
|
+
"winning_agents_history": self._winning_agents_history.copy(), # For cross-turn memory sharing
|
|
3120
3320
|
}
|
|
3121
3321
|
|
|
3122
3322
|
def get_status(self) -> Dict[str, Any]:
|