agent-mcp 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_mcp/__init__.py +66 -12
- agent_mcp/a2a_protocol.py +316 -0
- agent_mcp/agent_lightning_library.py +214 -0
- agent_mcp/camel_mcp_adapter.py +521 -0
- agent_mcp/claude_mcp_adapter.py +195 -0
- agent_mcp/cli.py +47 -0
- agent_mcp/google_ai_mcp_adapter.py +183 -0
- agent_mcp/heterogeneous_group_chat.py +412 -38
- agent_mcp/langchain_mcp_adapter.py +176 -43
- agent_mcp/llamaindex_mcp_adapter.py +410 -0
- agent_mcp/mcp_agent.py +26 -0
- agent_mcp/mcp_transport.py +11 -5
- agent_mcp/microsoft_agent_framework.py +591 -0
- agent_mcp/missing_frameworks.py +435 -0
- agent_mcp/openapi_protocol.py +616 -0
- agent_mcp/payments.py +804 -0
- agent_mcp/pydantic_ai_mcp_adapter.py +628 -0
- agent_mcp/registry.py +768 -0
- agent_mcp/security.py +864 -0
- {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.5.dist-info}/METADATA +173 -49
- agent_mcp-0.1.5.dist-info/RECORD +62 -0
- {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.5.dist-info}/WHEEL +1 -1
- agent_mcp-0.1.5.dist-info/entry_points.txt +4 -0
- agent_mcp-0.1.5.dist-info/top_level.txt +3 -0
- demos/__init__.py +1 -0
- demos/basic/__init__.py +1 -0
- demos/basic/framework_examples.py +108 -0
- demos/basic/langchain_camel_demo.py +272 -0
- demos/basic/simple_chat.py +355 -0
- demos/basic/simple_integration_example.py +51 -0
- demos/collaboration/collaborative_task_example.py +437 -0
- demos/collaboration/group_chat_example.py +130 -0
- demos/collaboration/simplified_crewai_example.py +39 -0
- demos/comprehensive_framework_demo.py +202 -0
- demos/langgraph/autonomous_langgraph_network.py +808 -0
- demos/langgraph/langgraph_agent_network.py +415 -0
- demos/langgraph/langgraph_collaborative_task.py +619 -0
- demos/langgraph/langgraph_example.py +227 -0
- demos/langgraph/run_langgraph_examples.py +213 -0
- demos/network/agent_network_example.py +381 -0
- demos/network/email_agent.py +130 -0
- demos/network/email_agent_demo.py +46 -0
- demos/network/heterogeneous_network_example.py +216 -0
- demos/network/multi_framework_example.py +199 -0
- demos/utils/check_imports.py +49 -0
- demos/workflows/autonomous_agent_workflow.py +248 -0
- demos/workflows/mcp_features_demo.py +353 -0
- demos/workflows/run_agent_collaboration_demo.py +63 -0
- demos/workflows/run_agent_collaboration_with_logs.py +396 -0
- demos/workflows/show_agent_interactions.py +107 -0
- demos/workflows/simplified_autonomous_demo.py +74 -0
- functions/main.py +144 -0
- functions/mcp_network_server.py +513 -0
- functions/utils.py +47 -0
- agent_mcp-0.1.3.dist-info/RECORD +0 -18
- agent_mcp-0.1.3.dist-info/entry_points.txt +0 -2
- agent_mcp-0.1.3.dist-info/top_level.txt +0 -1
|
@@ -11,6 +11,122 @@ from typing import List, Dict, Any, Optional, Union, Sequence
|
|
|
11
11
|
from .mcp_transport import HTTPTransport
|
|
12
12
|
from .enhanced_mcp_agent import EnhancedMCPAgent
|
|
13
13
|
from .mcp_agent import MCPAgent
|
|
14
|
+
import re
|
|
15
|
+
import string
|
|
16
|
+
import logging
|
|
17
|
+
from typing import Dict, Any, Optional, List
|
|
18
|
+
import os
|
|
19
|
+
import time
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
class CoordinatorAgent(EnhancedMCPAgent):
|
|
24
|
+
def __init__(self, group_chat, *args, **kwargs):
|
|
25
|
+
super().__init__(*args, **kwargs)
|
|
26
|
+
self.group_chat = group_chat # Reference to HeterogeneousGroupChat instance
|
|
27
|
+
|
|
28
|
+
async def handle_incoming_message(self, message: Dict):
|
|
29
|
+
|
|
30
|
+
# First, call the super method to handle default processing
|
|
31
|
+
await super().handle_incoming_message(message)
|
|
32
|
+
|
|
33
|
+
# Delegate to the group chat's custom handler
|
|
34
|
+
await self.group_chat._handle_coordinator_message(message, message.get('message_id'))
|
|
35
|
+
|
|
36
|
+
class ContextAgent(EnhancedMCPAgent):
|
|
37
|
+
"""Agent that maintains and provides access to task context and results."""
|
|
38
|
+
|
|
39
|
+
def __init__(self, name: str, **kwargs):
|
|
40
|
+
super().__init__(
|
|
41
|
+
name=name,
|
|
42
|
+
transport=None, # No transport needed for internal use
|
|
43
|
+
server_mode=False,
|
|
44
|
+
client_mode=False,
|
|
45
|
+
**kwargs
|
|
46
|
+
)
|
|
47
|
+
self.task_results = {} # Store task results
|
|
48
|
+
self.task_descriptions = {} # Store task descriptions
|
|
49
|
+
self._pending_tasks = {} # Track pending tasks with their completion events
|
|
50
|
+
print(f"[DEBUG] __init__: self ID: {id(self)}, _pending_tasks ID: {id(self._pending_tasks)}")
|
|
51
|
+
|
|
52
|
+
logger.info(f"[{self.name}] Initialized as context agent")
|
|
53
|
+
|
|
54
|
+
async def query_context(self, query: str) -> Dict[str, Any]:
|
|
55
|
+
"""
|
|
56
|
+
Query the context agent for information.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
query: Natural language query about tasks, results, or context.
|
|
60
|
+
Can also be a request to generate email content.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Dict with 'answer' and 'supporting_data' keys
|
|
64
|
+
"""
|
|
65
|
+
# Regular context query
|
|
66
|
+
return await self.generate_response(query)
|
|
67
|
+
|
|
68
|
+
async def generate_response(self, query: str) -> Dict[str, Any]:
|
|
69
|
+
"""Answer general questions about tasks and results."""
|
|
70
|
+
context = {
|
|
71
|
+
"task_descriptions": self.task_descriptions,
|
|
72
|
+
"task_results": {
|
|
73
|
+
tid: str(r)[:500] for tid, r in self.task_results.items()
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
messages = [{
|
|
78
|
+
"role": "system",
|
|
79
|
+
"content": f"""You are a context assistant that generates responses, results or content based on task as query.
|
|
80
|
+
You will be given a query, task description and expected to generate a content, response, as result or output
|
|
81
|
+
|
|
82
|
+
Available context, tasks and their results: {context}
|
|
83
|
+
|
|
84
|
+
"""
|
|
85
|
+
}, {
|
|
86
|
+
"role": "user",
|
|
87
|
+
"content": f"""Generate a response, result or content based on these instructions:
|
|
88
|
+
{query}
|
|
89
|
+
|
|
90
|
+
"""
|
|
91
|
+
}]
|
|
92
|
+
|
|
93
|
+
try:
|
|
94
|
+
response = await self.a_generate_reply(messages)
|
|
95
|
+
|
|
96
|
+
# Ensure we have a valid response
|
|
97
|
+
if not response:
|
|
98
|
+
raise ValueError("Empty response from LLM")
|
|
99
|
+
|
|
100
|
+
# Handle both string and dictionary responses
|
|
101
|
+
if isinstance(response, str):
|
|
102
|
+
content = response
|
|
103
|
+
elif isinstance(response, dict):
|
|
104
|
+
content = response.get("content", "")
|
|
105
|
+
if not content and "message" in response:
|
|
106
|
+
content = response.get("message", {}).get("content", "")
|
|
107
|
+
else:
|
|
108
|
+
content = str(response)
|
|
109
|
+
|
|
110
|
+
print(f"Generated response: {content}") # Log first 200 chars
|
|
111
|
+
return {
|
|
112
|
+
"answer": content,
|
|
113
|
+
"supporting_data": context
|
|
114
|
+
}
|
|
115
|
+
except Exception as e:
|
|
116
|
+
logger.error(f"Error querying context: {e}")
|
|
117
|
+
return {
|
|
118
|
+
"answer": f"Error: {str(e)}",
|
|
119
|
+
"supporting_data": {}
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
async def update_task(self, task_id: str, task_data: Dict, result: Optional[Any] = None):
|
|
124
|
+
"""Update task information and results."""
|
|
125
|
+
self.task_descriptions[task_id] = task_data.get("description", "No description")
|
|
126
|
+
if result is not None:
|
|
127
|
+
self.task_results[task_id] = result
|
|
128
|
+
logger.debug(f"Updated context for task {task_id}")
|
|
129
|
+
|
|
14
130
|
|
|
15
131
|
class HeterogeneousGroupChat:
|
|
16
132
|
"""
|
|
@@ -44,12 +160,38 @@ class HeterogeneousGroupChat:
|
|
|
44
160
|
# Initialize directly on the group chat instance first
|
|
45
161
|
self.task_results: Dict[str, Any] = {}
|
|
46
162
|
self.task_dependencies: Dict[str, Dict] = {}
|
|
163
|
+
self.dependency_results: Dict[str, Any] = {} # Initialize dependency results
|
|
164
|
+
|
|
165
|
+
# Store coordinator config for later use
|
|
166
|
+
self.coordinator_config = coordinator_config or {}
|
|
167
|
+
|
|
168
|
+
# Initialize context agent with default LLM config
|
|
169
|
+
default_llm_config = {
|
|
170
|
+
"config_list": [{
|
|
171
|
+
"model": "gpt-3.5-turbo",
|
|
172
|
+
"api_key": os.getenv("OPENAI_API_KEY"), # Will be set when coordinator is created
|
|
173
|
+
"api_type": "openai"
|
|
174
|
+
}]
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
# Use provided config or default
|
|
178
|
+
llm_config = self.coordinator_config.get("llm_config", default_llm_config).copy()
|
|
179
|
+
|
|
180
|
+
# Ensure config_list exists and has at least one config
|
|
181
|
+
if not llm_config.get("config_list"):
|
|
182
|
+
llm_config["config_list"] = default_llm_config["config_list"]
|
|
183
|
+
|
|
184
|
+
# Create context agent
|
|
185
|
+
self.context_agent = ContextAgent(
|
|
186
|
+
name=f"{self.name}_context",
|
|
187
|
+
llm_config=llm_config
|
|
188
|
+
)
|
|
47
189
|
|
|
48
190
|
def _get_agent_url(self, agent_name: str) -> str:
|
|
49
191
|
"""Get the URL for an agent on the deployed server"""
|
|
50
192
|
return f"{self.server_url}/agents/{agent_name}"
|
|
51
193
|
|
|
52
|
-
def create_coordinator(self, api_key: str) ->
|
|
194
|
+
def create_coordinator(self, api_key: str) -> CoordinatorAgent:
|
|
53
195
|
"""Create the coordinator agent for the group chat"""
|
|
54
196
|
# Avoid creating coordinator if it already exists
|
|
55
197
|
if self.coordinator:
|
|
@@ -79,6 +221,7 @@ class HeterogeneousGroupChat:
|
|
|
79
221
|
},
|
|
80
222
|
}
|
|
81
223
|
|
|
224
|
+
|
|
82
225
|
# --- Merge Default and User Config ---
|
|
83
226
|
# User config takes precedence
|
|
84
227
|
final_config = default_config.copy() # Start with defaults
|
|
@@ -102,12 +245,24 @@ class HeterogeneousGroupChat:
|
|
|
102
245
|
final_config["llm_config"]["config_list"][0]["api_key"] = api_key
|
|
103
246
|
|
|
104
247
|
|
|
248
|
+
# Update context agent's LLM config to match coordinator's
|
|
249
|
+
if hasattr(self, 'context_agent') and self.context_agent:
|
|
250
|
+
# Get the final config that will be used by the coordinator
|
|
251
|
+
context_llm_config = final_config.get('llm_config', {})
|
|
252
|
+
# Update the context agent's config
|
|
253
|
+
if hasattr(self.context_agent, 'llm_config'):
|
|
254
|
+
self.context_agent.llm_config = context_llm_config
|
|
255
|
+
logger.info(f"Updated context agent's LLM config to match coordinator")
|
|
256
|
+
|
|
105
257
|
# --- Create Coordinator Agent ---
|
|
106
258
|
print(f"Creating coordinator with config: {final_config}") # Debug: Log final config
|
|
107
|
-
self.coordinator =
|
|
259
|
+
self.coordinator = CoordinatorAgent(self, **final_config)
|
|
108
260
|
|
|
109
261
|
# --- Set Message Handler ---
|
|
110
|
-
self.coordinator.transport.set_message_handler(self._handle_coordinator_message)
|
|
262
|
+
#self.coordinator.transport.set_message_handler(self._handle_coordinator_message)
|
|
263
|
+
# Use a lambda to explicitly capture the correct 'self' (the HeterogeneousGroupChat instance)
|
|
264
|
+
#self.coordinator.transport.set_message_handler(lambda msg, msg_id: self._handle_coordinator_message(msg, msg_id))
|
|
265
|
+
|
|
111
266
|
return self.coordinator
|
|
112
267
|
|
|
113
268
|
def add_agents(self, agents: Union[MCPAgent, Sequence[MCPAgent]]) -> List[MCPAgent]:
|
|
@@ -218,27 +373,186 @@ class HeterogeneousGroupChat:
|
|
|
218
373
|
# Don't run the agent if registration fails - it won't be able to communicate
|
|
219
374
|
return None # Indicate failure
|
|
220
375
|
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
376
|
+
|
|
377
|
+
async def query_context(self, query: str) -> Dict[str, Any]:
|
|
378
|
+
"""
|
|
379
|
+
Query the context agent for information about tasks and results.
|
|
380
|
+
|
|
381
|
+
Args:
|
|
382
|
+
query: Natural language query about tasks, results, or context
|
|
226
383
|
|
|
227
|
-
|
|
384
|
+
Returns:
|
|
385
|
+
Dict with 'answer' and 'supporting_data' keys
|
|
386
|
+
"""
|
|
387
|
+
return await self.context_agent.query_context(query)
|
|
388
|
+
|
|
389
|
+
def _inject_dependency_results(self, step: Dict, dependency_results: Dict) -> Dict:
|
|
390
|
+
"""Injects dependency results into a step's content.
|
|
391
|
+
|
|
392
|
+
If the step contains string.Template style placeholders (e.g., ${task_id}),
|
|
393
|
+
it substitutes them with the corresponding results.
|
|
394
|
+
|
|
395
|
+
If no placeholders are found, it assumes the agent needs the raw results
|
|
396
|
+
and adds them to the step's content under the key 'dependency_data'.
|
|
397
|
+
"""
|
|
398
|
+
if not step:
|
|
399
|
+
return step
|
|
400
|
+
|
|
401
|
+
# Check if any part of the step contains a placeholder
|
|
402
|
+
logger.info(f"No placeholders detected in step {step.get('task_id', 'N/A')}. Adding raw dependency data.")
|
|
403
|
+
dependency_data = {}
|
|
404
|
+
for dep_task_id in step.get("depends_on", []):
|
|
405
|
+
result_value = dependency_results.get(dep_task_id)
|
|
406
|
+
if result_value is None:
|
|
407
|
+
logger.warning(f"No result found for dependency '{dep_task_id}' when preparing raw data for step '{step.get('task_id', 'N/A')}'")
|
|
408
|
+
extracted_value = None # Or some placeholder?
|
|
409
|
+
elif isinstance(result_value, dict):
|
|
410
|
+
# Prioritize 'output', then 'result', then string representation
|
|
411
|
+
if 'output' in result_value: # Check presence first
|
|
412
|
+
extracted_value = result_value['output']
|
|
413
|
+
elif 'result' in result_value: # Check presence first
|
|
414
|
+
extracted_value = result_value['result']
|
|
415
|
+
else:
|
|
416
|
+
logger.warning(f"Raw dependency '{dep_task_id}': Neither 'output' nor 'result' key found in dict result. Using full dict.")
|
|
417
|
+
extracted_value = result_value # Pass the whole dict
|
|
418
|
+
else:
|
|
419
|
+
extracted_value = result_value # Pass strings, numbers, lists as-is
|
|
420
|
+
|
|
421
|
+
dependency_data[dep_task_id] = extracted_value
|
|
422
|
+
|
|
423
|
+
# Ensure 'content' exists and add the data
|
|
424
|
+
if "content" not in step:
|
|
425
|
+
step["content"] = {}
|
|
426
|
+
if not isinstance(step["content"], dict):
|
|
427
|
+
logger.warning(f"Step {step.get('task_id', 'N/A')} content is not a dict, cannot add dependency_data. Content: {step['content']}")
|
|
428
|
+
else:
|
|
429
|
+
step["content"]["dependency_data"] = dependency_data
|
|
430
|
+
|
|
431
|
+
return step
|
|
432
|
+
|
|
433
|
+
async def submit_task(self, task: Dict[str, Any], inject_at_submit_time: bool = False) -> None:
|
|
434
|
+
"""
|
|
435
|
+
Submit a group task. If inject_at_submit_time is True, inject dependency results into each step now.
|
|
436
|
+
If False, inject at the last possible moment (just before sending to agents).
|
|
437
|
+
"""
|
|
438
|
+
# Reset state for new task submission
|
|
439
|
+
self.task_results = {}
|
|
440
|
+
self.context_agent.task_results = {}
|
|
441
|
+
self.context_agent.task_descriptions = {}
|
|
442
|
+
self._pending_tasks = {} # Track pending tasks with their completion events to ensure its always new for each submission
|
|
443
|
+
print(f"[DEBUG] __init__: self ID: {id(self)}, _pending_tasks ID: {id(self._pending_tasks)}")
|
|
444
|
+
steps = task.get("steps", [])
|
|
445
|
+
self.task_dependencies = {step["task_id"]: step for step in steps} # Store task dependencies
|
|
446
|
+
|
|
447
|
+
self._inject_at_submit_time = inject_at_submit_time
|
|
448
|
+
if inject_at_submit_time:
|
|
449
|
+
steps = [self._inject_dependency_results(step, self.task_results) for step in steps]
|
|
450
|
+
self._pending_steps = steps # Store for later use
|
|
451
|
+
await self._submit_steps(steps)
|
|
452
|
+
|
|
453
|
+
async def _submit_steps(self, steps):
|
|
454
|
+
for step in steps:
|
|
455
|
+
try:
|
|
456
|
+
# Only inject here if not already injected at submit time
|
|
457
|
+
if not getattr(self, '_inject_at_submit_time', True):
|
|
458
|
+
step = self._inject_dependency_results(step, self.task_results)
|
|
459
|
+
|
|
460
|
+
# Create and store event for this task
|
|
461
|
+
task_id = step['task_id']
|
|
462
|
+
future = asyncio.Future()
|
|
463
|
+
self._pending_tasks[task_id] = future
|
|
464
|
+
print(f"[DEBUG] _submit_steps: Added task {task_id}. Current _pending_tasks: {list(self._pending_tasks.keys())} (ID: {id(self._pending_tasks)})")
|
|
465
|
+
print(f"[DEBUG] Added task {task_id} to pending_tasks")
|
|
466
|
+
# Submit the task
|
|
467
|
+
await self._send_step_to_agent(step)
|
|
468
|
+
|
|
469
|
+
# Wait for task completion with timeout
|
|
470
|
+
try:
|
|
471
|
+
await asyncio.wait_for(future, timeout=60)
|
|
472
|
+
print(f"[DEBUG] Task {task_id} completed with result: {future.result()}")
|
|
473
|
+
except asyncio.TimeoutError:
|
|
474
|
+
logger.error(f"Task {task_id} timed out")
|
|
475
|
+
raise TimeoutError(f"Task {task_id} timed out")
|
|
476
|
+
finally:
|
|
477
|
+
# Clean up
|
|
478
|
+
self._pending_tasks.pop(task_id, None)
|
|
479
|
+
|
|
480
|
+
except Exception as e:
|
|
481
|
+
logger.error(f"Error in step {step.get('task_id')}: {e}")
|
|
482
|
+
raise
|
|
483
|
+
|
|
484
|
+
async def _generate_content(self, description: str, content: Dict) -> Dict:
|
|
485
|
+
"""Generate content using the ContextAgent based on the task description.
|
|
486
|
+
|
|
487
|
+
Args:
|
|
488
|
+
description: The task description
|
|
489
|
+
content: Existing content to be augmented with generated content
|
|
490
|
+
|
|
491
|
+
Returns:
|
|
492
|
+
Dict: Content with generated fields merged in
|
|
493
|
+
"""
|
|
494
|
+
# Ensure we have a dictionary to work with
|
|
495
|
+
if not isinstance(content, dict):
|
|
496
|
+
content = {}
|
|
497
|
+
|
|
498
|
+
try:
|
|
499
|
+
# Generate content using the context agent
|
|
500
|
+
generated = await self.context_agent.generate_response(description)
|
|
501
|
+
|
|
502
|
+
print(f"Generated content: {generated}")
|
|
503
|
+
if generated and isinstance(generated, dict) and "answer" in generated:
|
|
504
|
+
try:
|
|
505
|
+
# Try to parse as JSON first
|
|
506
|
+
generated_content = json.loads(generated["answer"])
|
|
507
|
+
# If it's a dictionary, merge it intelligently
|
|
508
|
+
if isinstance(generated_content, dict):
|
|
509
|
+
# Merge with existing content, with generated content taking precedence
|
|
510
|
+
content = {**content, **generated_content}
|
|
511
|
+
# If it's not a dictionary, store it as generated_content
|
|
512
|
+
else:
|
|
513
|
+
content["content"] = generated_content # this is the generated content that will be sent to the agent
|
|
514
|
+
except (json.JSONDecodeError, TypeError):
|
|
515
|
+
# If not JSON, store the raw answer
|
|
516
|
+
content["content"] = generated["answer"] # this is the generated content that will be sent to the agent
|
|
517
|
+
|
|
518
|
+
except Exception as e:
|
|
519
|
+
logger.error(f"Error generating content: {e}")
|
|
520
|
+
print(f"Error generating content: {e}")
|
|
521
|
+
content["error"] = str(e)
|
|
522
|
+
|
|
523
|
+
return content
|
|
524
|
+
|
|
525
|
+
async def _send_step_to_agent(self, step):
|
|
526
|
+
# 1-line dependency enforcement: skip if any dependency missing or empty
|
|
527
|
+
#if any(not self.dependency_results.get(dep) for dep in step.get("depends_on", [])):
|
|
528
|
+
# print(f"Skipping {step['task_id']} because dependencies not satisfied: {step.get('depends_on', [])}")
|
|
529
|
+
# return
|
|
530
|
+
step = self._inject_dependency_results(step, self.task_results)
|
|
531
|
+
|
|
532
|
+
# Update context agent with task info
|
|
533
|
+
task_id = step.get("task_id", str(id(step)))
|
|
534
|
+
await self.context_agent.update_task(task_id, step)
|
|
535
|
+
|
|
228
536
|
print("\n=== Submitting task to group ===")
|
|
229
537
|
|
|
230
538
|
# Ensure task is in the correct format
|
|
231
|
-
if not isinstance(
|
|
232
|
-
task
|
|
539
|
+
if not isinstance(step, dict):
|
|
540
|
+
raise ValueError("Invalid task format. Expected a dictionary.")
|
|
541
|
+
|
|
542
|
+
# Handle content generation if requested
|
|
543
|
+
if step.get("content", {}).get("generate_content", False):
|
|
544
|
+
description = step.get("description", "")
|
|
545
|
+
if description:
|
|
546
|
+
# Make a copy to avoid modifying the original
|
|
547
|
+
content = step.get("content", {}).copy()
|
|
548
|
+
# Remove the flag before generation
|
|
549
|
+
content.pop("generate_content", None)
|
|
550
|
+
# Generate and merge content
|
|
551
|
+
step["content"] = await self._generate_content(description, content)
|
|
233
552
|
|
|
234
553
|
# Store task dependencies from the input task definition
|
|
235
554
|
# We need a dictionary where keys are the step task_ids
|
|
236
|
-
|
|
237
|
-
# If task is already a dict mapping task_ids to task info
|
|
238
|
-
self.task_dependencies = task['content']
|
|
239
|
-
else:
|
|
240
|
-
# If task has a steps list, convert it to a dict
|
|
241
|
-
self.task_dependencies = {step["task_id"]: step for step in task['content'].get("steps", [])}
|
|
555
|
+
self.task_dependencies[step["task_id"]] = step
|
|
242
556
|
print(f"Parsed Step Dependencies: {self.task_dependencies}")
|
|
243
557
|
|
|
244
558
|
# Also store in coordinator instance if it exists
|
|
@@ -259,22 +573,21 @@ class HeterogeneousGroupChat:
|
|
|
259
573
|
|
|
260
574
|
# Assign tasks to agents based on the structure
|
|
261
575
|
# Submit tasks to their respective agents
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
await coordinator_transport.send_message(agent_name, message)
|
|
576
|
+
agent_name = step["agent"]
|
|
577
|
+
# Create message with all necessary fields including content
|
|
578
|
+
message = {
|
|
579
|
+
"type": "task",
|
|
580
|
+
"task_id": step["task_id"],
|
|
581
|
+
"description": step["description"],
|
|
582
|
+
"sender": self.coordinator.name,
|
|
583
|
+
"content": step.get("content", {}), # Include task content
|
|
584
|
+
"depends_on": step.get("depends_on", []), # Include dependencies
|
|
585
|
+
"reply_to": f"{self.server_url}/message/{self.coordinator.name}" # Full URL for reply
|
|
586
|
+
}
|
|
587
|
+
print(f"Sending task to {agent_name}")
|
|
588
|
+
print(f"Task message: {message}")
|
|
589
|
+
# Use coordinator's transport to send task to agent
|
|
590
|
+
await coordinator_transport.send_message(agent_name, message)
|
|
278
591
|
|
|
279
592
|
print("Task submitted. Waiting for completion...")
|
|
280
593
|
|
|
@@ -317,11 +630,12 @@ class HeterogeneousGroupChat:
|
|
|
317
630
|
|
|
318
631
|
async def _handle_coordinator_message(self, message: Dict, message_id: str):
|
|
319
632
|
"""Handles messages received by the coordinator's transport."""
|
|
633
|
+
|
|
320
634
|
if not self.coordinator: # Ensure coordinator exists
|
|
321
|
-
print("[Coordinator Handler] Error: Coordinator not initialized.")
|
|
635
|
+
print(f"[Coordinator Handler] Error: Coordinator not initialized. Self ID: {id(self)}")
|
|
322
636
|
return
|
|
323
637
|
|
|
324
|
-
print(f"\n[Coordinator {self.coordinator.name}] Received message: {message}")
|
|
638
|
+
print(f"\n[Coordinator {self.coordinator.name}] Received message: {message}. Self ID: {id(self)}")
|
|
325
639
|
|
|
326
640
|
# Handle messages wrapped in 'body' field
|
|
327
641
|
if isinstance(message, dict) and 'body' in message:
|
|
@@ -339,19 +653,66 @@ class HeterogeneousGroupChat:
|
|
|
339
653
|
msg_type = message.get("type")
|
|
340
654
|
task_id = message.get("task_id")
|
|
341
655
|
|
|
342
|
-
print(f"[Coordinator {self.coordinator.name}] Processing message type '{msg_type}' for task {task_id}")
|
|
656
|
+
print(f"[Coordinator {self.coordinator.name}] Processing message type '{msg_type}' for task {task_id}. Current _pending_tasks in handler: {list(self._pending_tasks.keys())} (ID: {id(self._pending_tasks)})")
|
|
343
657
|
|
|
344
658
|
if msg_type in ["result", "task_result"]: # Handle both result types
|
|
345
|
-
|
|
659
|
+
# First try direct fields, then try parsing content.text if it exists
|
|
660
|
+
result_content = None
|
|
661
|
+
|
|
662
|
+
# Try direct fields first
|
|
663
|
+
result_content = message.get("result") or message.get("description")
|
|
664
|
+
|
|
665
|
+
# If not found, try to parse from content.text
|
|
666
|
+
if result_content is None and "content" in message and isinstance(message["content"], dict):
|
|
667
|
+
content_text = message["content"].get("text")
|
|
668
|
+
if content_text:
|
|
669
|
+
try:
|
|
670
|
+
content_data = json.loads(content_text)
|
|
671
|
+
result_content = content_data.get("result") or content_data.get("description")
|
|
672
|
+
# Update task_id from content if not set
|
|
673
|
+
if not task_id and "task_id" in content_data:
|
|
674
|
+
task_id = content_data["task_id"]
|
|
675
|
+
except (json.JSONDecodeError, AttributeError, TypeError) as e:
|
|
676
|
+
print(f"[Coordinator {self.coordinator.name}] Error parsing content.text: {e}")
|
|
677
|
+
|
|
346
678
|
if task_id and result_content is not None:
|
|
347
679
|
print(f"[Coordinator {self.coordinator.name}] Storing result for task {task_id}")
|
|
348
680
|
# Store result in both the group chat and coordinator
|
|
349
681
|
self.task_results[task_id] = result_content
|
|
682
|
+
self.dependency_results[task_id] = result_content # Required for template resolution
|
|
683
|
+
if "dependency_results" not in self.coordinator.task_results:
|
|
684
|
+
self.coordinator.task_results["dependency_results"] = {}
|
|
685
|
+
self.coordinator.task_results["dependency_results"][task_id] = result_content
|
|
350
686
|
self.coordinator.task_results[task_id] = result_content
|
|
351
|
-
print(f"[Coordinator {self.coordinator.name}] Stored result
|
|
687
|
+
print(f"[Coordinator {self.coordinator.name}] Stored result for task {task_id}")
|
|
688
|
+
print(f"[Coordinator {self.coordinator.name}] Stored result: {result_content}...")
|
|
352
689
|
print(f"[Coordinator {self.coordinator.name}] Current task results: {list(self.task_results.keys())}")
|
|
353
690
|
print(f"[Coordinator {self.coordinator.name}] Current dependencies: {self.task_dependencies}")
|
|
354
691
|
|
|
692
|
+
# Signal task completion if anyone is waiting
|
|
693
|
+
if not hasattr(self, '_pending_tasks'):
|
|
694
|
+
self._pending_tasks = {}
|
|
695
|
+
print(f"[Coordinator {self.coordinator.name}] Current pending tasks: {list(self._pending_tasks.keys())}")
|
|
696
|
+
print(f"[DEBUG] Checking if task {task_id} is in pending_tasks: {task_id in self._pending_tasks}")
|
|
697
|
+
|
|
698
|
+
if task_id in self._pending_tasks:
|
|
699
|
+
print(f"[Coordinator {self.coordinator.name}] Signaling completion for task {task_id}")
|
|
700
|
+
future = self._pending_tasks[task_id]
|
|
701
|
+
if not future.done():
|
|
702
|
+
#future.set_result(result_content)
|
|
703
|
+
asyncio.get_event_loop().call_soon_threadsafe(lambda: future.set_result(result_content) if not future.done() else None)
|
|
704
|
+
print(f"[DEBUG] Set result for task {task_id}")
|
|
705
|
+
await asyncio.sleep(0)
|
|
706
|
+
print(f"[Coordinator {self.coordinator.name}] Completed task {task_id}")
|
|
707
|
+
#asyncio.get_running_loop().call_soon(future.set_result, result_content)
|
|
708
|
+
#asyncio.get_event_loop().call_soon_threadsafe(lambda: future.set_result(result_content) if not future.done() else None)
|
|
709
|
+
# Clean up the task after signaling
|
|
710
|
+
#if task_id in self._pending_tasks:
|
|
711
|
+
#del self._pending_tasks[task_id]
|
|
712
|
+
print(f"[Coordinator {self.coordinator.name}] Completed task {task_id}")
|
|
713
|
+
else:
|
|
714
|
+
print(f"[Coordinator {self.coordinator.name}] Task {task_id} not found in pending tasks")
|
|
715
|
+
|
|
355
716
|
# Acknowledge the message
|
|
356
717
|
try:
|
|
357
718
|
if message_id: # Only acknowledge if we have a message ID
|
|
@@ -359,6 +720,7 @@ class HeterogeneousGroupChat:
|
|
|
359
720
|
print(f"[Coordinator {self.coordinator.name}] Acknowledged message {message_id}")
|
|
360
721
|
except Exception as e:
|
|
361
722
|
print(f"[Coordinator {self.coordinator.name}] Error acknowledging message {message_id}: {e}")
|
|
723
|
+
return
|
|
362
724
|
else:
|
|
363
725
|
print(f"[Coordinator {self.coordinator.name}] Received invalid result message (missing task_id or result): {message}")
|
|
364
726
|
elif msg_type == "get_result": # Handle get result request
|
|
@@ -393,6 +755,14 @@ class HeterogeneousGroupChat:
|
|
|
393
755
|
except Exception as e:
|
|
394
756
|
print(f"[Coordinator {self.coordinator.name}] Error acknowledging message {message_id}: {e}")
|
|
395
757
|
|
|
758
|
+
@property
|
|
759
|
+
def group_state(self) -> dict:
|
|
760
|
+
"""
|
|
761
|
+
Returns a merged dictionary of all task results (group and coordinator).
|
|
762
|
+
Agents can use this to access the shared group chat history/results.
|
|
763
|
+
"""
|
|
764
|
+
return {**self.task_results, **(self.coordinator.task_results if self.coordinator else {})}
|
|
765
|
+
|
|
396
766
|
async def shutdown(self):
|
|
397
767
|
"""Gracefully disconnect all agents and cancel their tasks."""
|
|
398
768
|
print(f"Initiating shutdown for {len(self._agent_tasks)} agent tasks...")
|
|
@@ -422,3 +792,7 @@ class HeterogeneousGroupChat:
|
|
|
422
792
|
print("All agent transports disconnected.")
|
|
423
793
|
|
|
424
794
|
print("Shutdown complete.")
|
|
795
|
+
|
|
796
|
+
# === Minimal free-flow chat: send a message to any agent ===
|
|
797
|
+
async def send_chat_message(self, agent_name, message):
|
|
798
|
+
await self.coordinator.transport.send_message(agent_name, {"type": "message", "content": message})
|