agent-mcp 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. agent_mcp/__init__.py +16 -0
  2. agent_mcp/camel_mcp_adapter.py +521 -0
  3. agent_mcp/cli.py +47 -0
  4. agent_mcp/crewai_mcp_adapter.py +281 -0
  5. agent_mcp/enhanced_mcp_agent.py +601 -0
  6. agent_mcp/heterogeneous_group_chat.py +798 -0
  7. agent_mcp/langchain_mcp_adapter.py +458 -0
  8. agent_mcp/langgraph_mcp_adapter.py +325 -0
  9. agent_mcp/mcp_agent.py +658 -0
  10. agent_mcp/mcp_decorator.py +257 -0
  11. agent_mcp/mcp_langgraph.py +733 -0
  12. agent_mcp/mcp_transaction.py +97 -0
  13. agent_mcp/mcp_transport.py +706 -0
  14. agent_mcp/mcp_transport_enhanced.py +46 -0
  15. agent_mcp/proxy_agent.py +24 -0
  16. agent_mcp-0.1.4.dist-info/METADATA +333 -0
  17. agent_mcp-0.1.4.dist-info/RECORD +49 -0
  18. {agent_mcp-0.1.2.dist-info → agent_mcp-0.1.4.dist-info}/WHEEL +1 -1
  19. agent_mcp-0.1.4.dist-info/entry_points.txt +2 -0
  20. agent_mcp-0.1.4.dist-info/top_level.txt +3 -0
  21. demos/__init__.py +1 -0
  22. demos/basic/__init__.py +1 -0
  23. demos/basic/framework_examples.py +108 -0
  24. demos/basic/langchain_camel_demo.py +272 -0
  25. demos/basic/simple_chat.py +355 -0
  26. demos/basic/simple_integration_example.py +51 -0
  27. demos/collaboration/collaborative_task_example.py +437 -0
  28. demos/collaboration/group_chat_example.py +130 -0
  29. demos/collaboration/simplified_crewai_example.py +39 -0
  30. demos/langgraph/autonomous_langgraph_network.py +808 -0
  31. demos/langgraph/langgraph_agent_network.py +415 -0
  32. demos/langgraph/langgraph_collaborative_task.py +619 -0
  33. demos/langgraph/langgraph_example.py +227 -0
  34. demos/langgraph/run_langgraph_examples.py +213 -0
  35. demos/network/agent_network_example.py +381 -0
  36. demos/network/email_agent.py +130 -0
  37. demos/network/email_agent_demo.py +46 -0
  38. demos/network/heterogeneous_network_example.py +216 -0
  39. demos/network/multi_framework_example.py +199 -0
  40. demos/utils/check_imports.py +49 -0
  41. demos/workflows/autonomous_agent_workflow.py +248 -0
  42. demos/workflows/mcp_features_demo.py +353 -0
  43. demos/workflows/run_agent_collaboration_demo.py +63 -0
  44. demos/workflows/run_agent_collaboration_with_logs.py +396 -0
  45. demos/workflows/show_agent_interactions.py +107 -0
  46. demos/workflows/simplified_autonomous_demo.py +74 -0
  47. functions/main.py +144 -0
  48. functions/mcp_network_server.py +513 -0
  49. functions/utils.py +47 -0
  50. agent_mcp-0.1.2.dist-info/METADATA +0 -475
  51. agent_mcp-0.1.2.dist-info/RECORD +0 -5
  52. agent_mcp-0.1.2.dist-info/entry_points.txt +0 -2
  53. agent_mcp-0.1.2.dist-info/top_level.txt +0 -1
@@ -0,0 +1,798 @@
1
+ """
2
+ HeterogeneousGroupChat - A group chat implementation for heterogeneous agents.
3
+
4
+ This module provides a high-level abstraction for creating group chats with agents
5
+ from different frameworks (Autogen, Langchain, etc.) that can collaborate on tasks.
6
+ """
7
+
8
+ import asyncio
9
+ import json
10
+ from typing import List, Dict, Any, Optional, Union, Sequence
11
+ from .mcp_transport import HTTPTransport
12
+ from .enhanced_mcp_agent import EnhancedMCPAgent
13
+ from .mcp_agent import MCPAgent
14
+ import re
15
+ import string
16
+ import logging
17
+ from typing import Dict, Any, Optional, List
18
+ import os
19
+ import time
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+ class CoordinatorAgent(EnhancedMCPAgent):
24
+ def __init__(self, group_chat, *args, **kwargs):
25
+ super().__init__(*args, **kwargs)
26
+ self.group_chat = group_chat # Reference to HeterogeneousGroupChat instance
27
+
28
+ async def handle_incoming_message(self, message: Dict):
29
+
30
+ # First, call the super method to handle default processing
31
+ await super().handle_incoming_message(message)
32
+
33
+ # Delegate to the group chat's custom handler
34
+ await self.group_chat._handle_coordinator_message(message, message.get('message_id'))
35
+
36
+ class ContextAgent(EnhancedMCPAgent):
37
+ """Agent that maintains and provides access to task context and results."""
38
+
39
+ def __init__(self, name: str, **kwargs):
40
+ super().__init__(
41
+ name=name,
42
+ transport=None, # No transport needed for internal use
43
+ server_mode=False,
44
+ client_mode=False,
45
+ **kwargs
46
+ )
47
+ self.task_results = {} # Store task results
48
+ self.task_descriptions = {} # Store task descriptions
49
+ self._pending_tasks = {} # Track pending tasks with their completion events
50
+ print(f"[DEBUG] __init__: self ID: {id(self)}, _pending_tasks ID: {id(self._pending_tasks)}")
51
+
52
+ logger.info(f"[{self.name}] Initialized as context agent")
53
+
54
+ async def query_context(self, query: str) -> Dict[str, Any]:
55
+ """
56
+ Query the context agent for information.
57
+
58
+ Args:
59
+ query: Natural language query about tasks, results, or context.
60
+ Can also be a request to generate email content.
61
+
62
+ Returns:
63
+ Dict with 'answer' and 'supporting_data' keys
64
+ """
65
+ # Regular context query
66
+ return await self.generate_response(query)
67
+
68
+ async def generate_response(self, query: str) -> Dict[str, Any]:
69
+ """Answer general questions about tasks and results."""
70
+ context = {
71
+ "task_descriptions": self.task_descriptions,
72
+ "task_results": {
73
+ tid: str(r)[:500] for tid, r in self.task_results.items()
74
+ }
75
+ }
76
+
77
+ messages = [{
78
+ "role": "system",
79
+ "content": f"""You are a context assistant that generates responses, results or content based on task as query.
80
+ You will be given a query, task description and expected to generate a content, response, as result or output
81
+
82
+ Available context, tasks and their results: {context}
83
+
84
+ """
85
+ }, {
86
+ "role": "user",
87
+ "content": f"""Generate a response, result or content based on these instructions:
88
+ {query}
89
+
90
+ """
91
+ }]
92
+
93
+ try:
94
+ response = await self.a_generate_reply(messages)
95
+
96
+ # Ensure we have a valid response
97
+ if not response:
98
+ raise ValueError("Empty response from LLM")
99
+
100
+ # Handle both string and dictionary responses
101
+ if isinstance(response, str):
102
+ content = response
103
+ elif isinstance(response, dict):
104
+ content = response.get("content", "")
105
+ if not content and "message" in response:
106
+ content = response.get("message", {}).get("content", "")
107
+ else:
108
+ content = str(response)
109
+
110
+ print(f"Generated response: {content}") # Log first 200 chars
111
+ return {
112
+ "answer": content,
113
+ "supporting_data": context
114
+ }
115
+ except Exception as e:
116
+ logger.error(f"Error querying context: {e}")
117
+ return {
118
+ "answer": f"Error: {str(e)}",
119
+ "supporting_data": {}
120
+ }
121
+
122
+
123
+ async def update_task(self, task_id: str, task_data: Dict, result: Optional[Any] = None):
124
+ """Update task information and results."""
125
+ self.task_descriptions[task_id] = task_data.get("description", "No description")
126
+ if result is not None:
127
+ self.task_results[task_id] = result
128
+ logger.debug(f"Updated context for task {task_id}")
129
+
130
+
131
+ class HeterogeneousGroupChat:
132
+ """
133
+ A group chat for heterogeneous agents that abstracts away the complexity
134
+ of setting up connections and coordinating tasks between different frameworks.
135
+ """
136
+
137
+ def __init__(
138
+ self,
139
+ name: str,
140
+ server_url: str = "https://mcp-server-ixlfhxquwq-ew.a.run.app",
141
+ coordinator_config: Optional[Dict[str, Any]] = None
142
+ ):
143
+ """
144
+ Initialize a heterogeneous group chat.
145
+
146
+ Args:
147
+ name: Name of the group chat
148
+ server_url: URL of the deployed MCP server
149
+ coordinator_config: Optional configuration for the coordinator agent
150
+ """
151
+ self.name = name
152
+ self.server_url = server_url
153
+ self.agents: List[MCPAgent] = []
154
+ self.coordinator: Optional[EnhancedMCPAgent] = None
155
+ self.coordinator_config = coordinator_config or {}
156
+ self.coordinator_url = server_url
157
+ self.agent_tokens: Dict[str, str] = {} # Store agent tokens
158
+ self._register_event = asyncio.Event()
159
+ self._agent_tasks = [] # Initialize list to store agent tasks
160
+ # Initialize directly on the group chat instance first
161
+ self.task_results: Dict[str, Any] = {}
162
+ self.task_dependencies: Dict[str, Dict] = {}
163
+ self.dependency_results: Dict[str, Any] = {} # Initialize dependency results
164
+
165
+ # Store coordinator config for later use
166
+ self.coordinator_config = coordinator_config or {}
167
+
168
+ # Initialize context agent with default LLM config
169
+ default_llm_config = {
170
+ "config_list": [{
171
+ "model": "gpt-3.5-turbo",
172
+ "api_key": os.getenv("OPENAI_API_KEY"), # Will be set when coordinator is created
173
+ "api_type": "openai"
174
+ }]
175
+ }
176
+
177
+ # Use provided config or default
178
+ llm_config = self.coordinator_config.get("llm_config", default_llm_config).copy()
179
+
180
+ # Ensure config_list exists and has at least one config
181
+ if not llm_config.get("config_list"):
182
+ llm_config["config_list"] = default_llm_config["config_list"]
183
+
184
+ # Create context agent
185
+ self.context_agent = ContextAgent(
186
+ name=f"{self.name}_context",
187
+ llm_config=llm_config
188
+ )
189
+
190
+ def _get_agent_url(self, agent_name: str) -> str:
191
+ """Get the URL for an agent on the deployed server"""
192
+ return f"{self.server_url}/agents/{agent_name}"
193
+
194
+ def create_coordinator(self, api_key: str) -> CoordinatorAgent:
195
+ """Create the coordinator agent for the group chat"""
196
+ # Avoid creating coordinator if it already exists
197
+ if self.coordinator:
198
+ return self.coordinator
199
+
200
+ # Define coordinator name (use config if provided, else default)
201
+ coordinator_name = self.coordinator_config.get("name", f"{self.name}Coordinator")
202
+
203
+ # Create transport for coordinator, passing its name
204
+ coordinator_transport = HTTPTransport.from_url(
205
+ self.server_url,
206
+ agent_name=coordinator_name
207
+ )
208
+
209
+ # --- Default Coordinator Configuration ---
210
+ default_config = {
211
+ "name": coordinator_name,
212
+ "transport": coordinator_transport,
213
+ "system_message": "You are a helpful AI assistant coordinating tasks between other specialized agents. You receive task results and ensure the overall goal is achieved.",
214
+ "llm_config": {
215
+ # Default model, can be overridden by coordinator_config
216
+ "config_list": [{
217
+ "model": "gpt-3.5-turbo",
218
+ "api_key": api_key
219
+ }],
220
+ "cache_seed": 42 # Or None for no caching
221
+ },
222
+ }
223
+
224
+
225
+ # --- Merge Default and User Config ---
226
+ # User config takes precedence
227
+ final_config = default_config.copy() # Start with defaults
228
+ final_config.update(self.coordinator_config) # Update with user overrides
229
+
230
+ # Ensure llm_config is properly structured if overridden
231
+ if "llm_config" in self.coordinator_config and "config_list" not in final_config["llm_config"]:
232
+ print("Warning: coordinator_config provided llm_config without config_list. Re-structuring.")
233
+ # Assume the user provided a simple dict like {"api_key": ..., "model": ...}
234
+ # We need to wrap it in config_list for AutoGen
235
+ user_llm_config = final_config["llm_config"]
236
+ final_config["llm_config"] = {
237
+ "config_list": [user_llm_config],
238
+ "cache_seed": user_llm_config.get("cache_seed", 42)
239
+ }
240
+ elif "llm_config" in final_config and "api_key" not in final_config["llm_config"].get("config_list", [{}])[0]:
241
+ # If llm_config exists but api_key is missing in the primary config
242
+ print("Warning: api_key missing in llm_config config_list. Injecting from create_coordinator argument.")
243
+ if "config_list" not in final_config["llm_config"]:
244
+ final_config["llm_config"]["config_list"] = [{}]
245
+ final_config["llm_config"]["config_list"][0]["api_key"] = api_key
246
+
247
+
248
+ # Update context agent's LLM config to match coordinator's
249
+ if hasattr(self, 'context_agent') and self.context_agent:
250
+ # Get the final config that will be used by the coordinator
251
+ context_llm_config = final_config.get('llm_config', {})
252
+ # Update the context agent's config
253
+ if hasattr(self.context_agent, 'llm_config'):
254
+ self.context_agent.llm_config = context_llm_config
255
+ logger.info(f"Updated context agent's LLM config to match coordinator")
256
+
257
+ # --- Create Coordinator Agent ---
258
+ print(f"Creating coordinator with config: {final_config}") # Debug: Log final config
259
+ self.coordinator = CoordinatorAgent(self, **final_config)
260
+
261
+ # --- Set Message Handler ---
262
+ #self.coordinator.transport.set_message_handler(self._handle_coordinator_message)
263
+ # Use a lambda to explicitly capture the correct 'self' (the HeterogeneousGroupChat instance)
264
+ #self.coordinator.transport.set_message_handler(lambda msg, msg_id: self._handle_coordinator_message(msg, msg_id))
265
+
266
+ return self.coordinator
267
+
268
+ def add_agents(self, agents: Union[MCPAgent, Sequence[MCPAgent]]) -> List[MCPAgent]:
269
+ """
270
+ Add one or more agents to the group chat.
271
+
272
+ Args:
273
+ agents: A single MCPAgent or a sequence of MCPAgents
274
+
275
+ Returns:
276
+ List of added agents
277
+
278
+ Example:
279
+ # Add a single agent
280
+ group.add_agents(agent1)
281
+
282
+ # Add multiple agents
283
+ group.add_agents([agent1, agent2, agent3])
284
+
285
+ # Add agents as separate arguments
286
+ group.add_agents(agent1, agent2, agent3)
287
+ """
288
+ if not isinstance(agents, (list, tuple)):
289
+ agents = [agents]
290
+
291
+ added_agents = []
292
+ for agent in agents:
293
+ # Retrieve token if agent was already registered
294
+ token = self.agent_tokens.get(agent.name)
295
+ if not self.server_url:
296
+ raise ValueError("Cannot add agents before connecting. Call connect() first.")
297
+
298
+ # Create transport for the agent, passing its name and token
299
+ agent.transport = HTTPTransport.from_url(self.server_url, agent_name=agent.name, token=token)
300
+
301
+ # Set client mode if needed
302
+ if hasattr(agent, 'client_mode'):
303
+ agent.client_mode = True
304
+
305
+ self.agents.append(agent)
306
+ added_agents.append(agent)
307
+
308
+ return added_agents
309
+
310
+ # Alias for backward compatibility
311
+ add_agent = add_agents
312
+
313
+ async def connect(self):
314
+ """Register all agents and start their processing loops."""
315
+ print("Registering coordinator...")
316
+ coord_task = await self._register_and_start_agent(self.coordinator)
317
+ if not coord_task:
318
+ print("Coordinator registration failed. Aborting connect.")
319
+ return
320
+
321
+ print("Registering agents...")
322
+ tasks = [coord_task] # Start with coordinator task
323
+ for agent in self.agents:
324
+ agent_task = await self._register_and_start_agent(agent)
325
+ if agent_task: # Only add task if registration was successful
326
+ tasks.append(agent_task)
327
+ else:
328
+ print(f"Skipping agent {agent.name} due to registration failure.")
329
+ # Optionally, handle failed agents (e.g., remove from group?)
330
+
331
+ if not tasks:
332
+ print("No agents were successfully registered and started.")
333
+ return
334
+
335
+ print(f"All {len(tasks)} agents registered and started.")
336
+ # Store tasks but don't wait for them - they'll run in the background
337
+ self._agent_tasks = tasks
338
+ print("Group chat ready for task submission.")
339
+
340
+ async def _register_and_start_agent(self, agent: MCPAgent):
341
+ """Register an agent, start its event stream, and its processors."""
342
+ if not agent.transport or not isinstance(agent.transport, HTTPTransport):
343
+ raise ValueError(f"Agent {agent.name} has no valid HTTPTransport defined.")
344
+
345
+ response = await agent.transport.register_agent(agent)
346
+
347
+ # Parse response which may be in {'body': '{...}'} format
348
+ if isinstance(response, dict):
349
+ if 'body' in response:
350
+ # Response is wrapped, parse the body string
351
+ try:
352
+ response = json.loads(response['body'])
353
+ except json.JSONDecodeError:
354
+ print(f"Error parsing agent registration response body: {response}")
355
+
356
+ if response and isinstance(response, dict) and "token" in response:
357
+ token = response["token"]
358
+ self.agent_tokens[agent.name] = token
359
+ agent.transport.token = token
360
+ agent.transport.auth_token = token
361
+ print(f"Agent {agent.name} registered successfully with token.")
362
+
363
+ # Start polling *before* starting the agent's run loop
364
+ await agent.transport.start_polling()
365
+
366
+ # Start agent's main run loop (message processing, etc.)
367
+ # We create the task but don't await it here; the calling function (connect) will gather tasks.
368
+ task = asyncio.create_task(agent.run())
369
+ self._agent_tasks.append(task) # Store the task
370
+ return task # Return the task for potential gathering
371
+ else:
372
+ print(f"Warning: Agent {agent.name} registration failed or did not return a token. Response: {response}")
373
+ # Don't run the agent if registration fails - it won't be able to communicate
374
+ return None # Indicate failure
375
+
376
+
377
+ async def query_context(self, query: str) -> Dict[str, Any]:
378
+ """
379
+ Query the context agent for information about tasks and results.
380
+
381
+ Args:
382
+ query: Natural language query about tasks, results, or context
383
+
384
+ Returns:
385
+ Dict with 'answer' and 'supporting_data' keys
386
+ """
387
+ return await self.context_agent.query_context(query)
388
+
389
+ def _inject_dependency_results(self, step: Dict, dependency_results: Dict) -> Dict:
390
+ """Injects dependency results into a step's content.
391
+
392
+ If the step contains string.Template style placeholders (e.g., ${task_id}),
393
+ it substitutes them with the corresponding results.
394
+
395
+ If no placeholders are found, it assumes the agent needs the raw results
396
+ and adds them to the step's content under the key 'dependency_data'.
397
+ """
398
+ if not step:
399
+ return step
400
+
401
+ # Check if any part of the step contains a placeholder
402
+ logger.info(f"No placeholders detected in step {step.get('task_id', 'N/A')}. Adding raw dependency data.")
403
+ dependency_data = {}
404
+ for dep_task_id in step.get("depends_on", []):
405
+ result_value = dependency_results.get(dep_task_id)
406
+ if result_value is None:
407
+ logger.warning(f"No result found for dependency '{dep_task_id}' when preparing raw data for step '{step.get('task_id', 'N/A')}'")
408
+ extracted_value = None # Or some placeholder?
409
+ elif isinstance(result_value, dict):
410
+ # Prioritize 'output', then 'result', then string representation
411
+ if 'output' in result_value: # Check presence first
412
+ extracted_value = result_value['output']
413
+ elif 'result' in result_value: # Check presence first
414
+ extracted_value = result_value['result']
415
+ else:
416
+ logger.warning(f"Raw dependency '{dep_task_id}': Neither 'output' nor 'result' key found in dict result. Using full dict.")
417
+ extracted_value = result_value # Pass the whole dict
418
+ else:
419
+ extracted_value = result_value # Pass strings, numbers, lists as-is
420
+
421
+ dependency_data[dep_task_id] = extracted_value
422
+
423
+ # Ensure 'content' exists and add the data
424
+ if "content" not in step:
425
+ step["content"] = {}
426
+ if not isinstance(step["content"], dict):
427
+ logger.warning(f"Step {step.get('task_id', 'N/A')} content is not a dict, cannot add dependency_data. Content: {step['content']}")
428
+ else:
429
+ step["content"]["dependency_data"] = dependency_data
430
+
431
+ return step
432
+
433
+ async def submit_task(self, task: Dict[str, Any], inject_at_submit_time: bool = False) -> None:
434
+ """
435
+ Submit a group task. If inject_at_submit_time is True, inject dependency results into each step now.
436
+ If False, inject at the last possible moment (just before sending to agents).
437
+ """
438
+ # Reset state for new task submission
439
+ self.task_results = {}
440
+ self.context_agent.task_results = {}
441
+ self.context_agent.task_descriptions = {}
442
+ self._pending_tasks = {} # Track pending tasks with their completion events to ensure its always new for each submission
443
+ print(f"[DEBUG] __init__: self ID: {id(self)}, _pending_tasks ID: {id(self._pending_tasks)}")
444
+ steps = task.get("steps", [])
445
+ self.task_dependencies = {step["task_id"]: step for step in steps} # Store task dependencies
446
+
447
+ self._inject_at_submit_time = inject_at_submit_time
448
+ if inject_at_submit_time:
449
+ steps = [self._inject_dependency_results(step, self.task_results) for step in steps]
450
+ self._pending_steps = steps # Store for later use
451
+ await self._submit_steps(steps)
452
+
453
+ async def _submit_steps(self, steps):
454
+ for step in steps:
455
+ try:
456
+ # Only inject here if not already injected at submit time
457
+ if not getattr(self, '_inject_at_submit_time', True):
458
+ step = self._inject_dependency_results(step, self.task_results)
459
+
460
+ # Create and store event for this task
461
+ task_id = step['task_id']
462
+ future = asyncio.Future()
463
+ self._pending_tasks[task_id] = future
464
+ print(f"[DEBUG] _submit_steps: Added task {task_id}. Current _pending_tasks: {list(self._pending_tasks.keys())} (ID: {id(self._pending_tasks)})")
465
+ print(f"[DEBUG] Added task {task_id} to pending_tasks")
466
+ # Submit the task
467
+ await self._send_step_to_agent(step)
468
+
469
+ # Wait for task completion with timeout
470
+ try:
471
+ await asyncio.wait_for(future, timeout=60)
472
+ print(f"[DEBUG] Task {task_id} completed with result: {future.result()}")
473
+ except asyncio.TimeoutError:
474
+ logger.error(f"Task {task_id} timed out")
475
+ raise TimeoutError(f"Task {task_id} timed out")
476
+ finally:
477
+ # Clean up
478
+ self._pending_tasks.pop(task_id, None)
479
+
480
+ except Exception as e:
481
+ logger.error(f"Error in step {step.get('task_id')}: {e}")
482
+ raise
483
+
484
+ async def _generate_content(self, description: str, content: Dict) -> Dict:
485
+ """Generate content using the ContextAgent based on the task description.
486
+
487
+ Args:
488
+ description: The task description
489
+ content: Existing content to be augmented with generated content
490
+
491
+ Returns:
492
+ Dict: Content with generated fields merged in
493
+ """
494
+ # Ensure we have a dictionary to work with
495
+ if not isinstance(content, dict):
496
+ content = {}
497
+
498
+ try:
499
+ # Generate content using the context agent
500
+ generated = await self.context_agent.generate_response(description)
501
+
502
+ print(f"Generated content: {generated}")
503
+ if generated and isinstance(generated, dict) and "answer" in generated:
504
+ try:
505
+ # Try to parse as JSON first
506
+ generated_content = json.loads(generated["answer"])
507
+ # If it's a dictionary, merge it intelligently
508
+ if isinstance(generated_content, dict):
509
+ # Merge with existing content, with generated content taking precedence
510
+ content = {**content, **generated_content}
511
+ # If it's not a dictionary, store it as generated_content
512
+ else:
513
+ content["content"] = generated_content # this is the generated content that will be sent to the agent
514
+ except (json.JSONDecodeError, TypeError):
515
+ # If not JSON, store the raw answer
516
+ content["content"] = generated["answer"] # this is the generated content that will be sent to the agent
517
+
518
+ except Exception as e:
519
+ logger.error(f"Error generating content: {e}")
520
+ print(f"Error generating content: {e}")
521
+ content["error"] = str(e)
522
+
523
+ return content
524
+
525
+ async def _send_step_to_agent(self, step):
526
+ # 1-line dependency enforcement: skip if any dependency missing or empty
527
+ #if any(not self.dependency_results.get(dep) for dep in step.get("depends_on", [])):
528
+ # print(f"Skipping {step['task_id']} because dependencies not satisfied: {step.get('depends_on', [])}")
529
+ # return
530
+ step = self._inject_dependency_results(step, self.task_results)
531
+
532
+ # Update context agent with task info
533
+ task_id = step.get("task_id", str(id(step)))
534
+ await self.context_agent.update_task(task_id, step)
535
+
536
+ print("\n=== Submitting task to group ===")
537
+
538
+ # Ensure task is in the correct format
539
+ if not isinstance(step, dict):
540
+ raise ValueError("Invalid task format. Expected a dictionary.")
541
+
542
+ # Handle content generation if requested
543
+ if step.get("content", {}).get("generate_content", False):
544
+ description = step.get("description", "")
545
+ if description:
546
+ # Make a copy to avoid modifying the original
547
+ content = step.get("content", {}).copy()
548
+ # Remove the flag before generation
549
+ content.pop("generate_content", None)
550
+ # Generate and merge content
551
+ step["content"] = await self._generate_content(description, content)
552
+
553
+ # Store task dependencies from the input task definition
554
+ # We need a dictionary where keys are the step task_ids
555
+ self.task_dependencies[step["task_id"]] = step
556
+ print(f"Parsed Step Dependencies: {self.task_dependencies}")
557
+
558
+ # Also store in coordinator instance if it exists
559
+ if self.coordinator:
560
+ # Ensure the coordinator has the dict initialized
561
+ if not hasattr(self.coordinator, 'task_dependencies') or not isinstance(getattr(self.coordinator, 'task_dependencies', None), dict):
562
+ self.coordinator.task_dependencies = {}
563
+ self.coordinator.task_dependencies.update(self.task_dependencies)
564
+
565
+ if not self.coordinator or not self.coordinator.transport:
566
+ print("CRITICAL ERROR: Coordinator is not initialized or has no transport. Cannot submit task.")
567
+ return
568
+
569
+ coordinator_transport = self.coordinator.transport
570
+
571
+ print(f"[DEBUG - {self.name}] Starting submit_task loop over {len(self.task_dependencies)} dependencies.", flush=True)
572
+ print(f"***** [{self.name}] Dependencies Content: {self.task_dependencies} *****", flush=True) # Log content before loop
573
+
574
+ # Assign tasks to agents based on the structure
575
+ # Submit tasks to their respective agents
576
+ agent_name = step["agent"]
577
+ # Create message with all necessary fields including content
578
+ message = {
579
+ "type": "task",
580
+ "task_id": step["task_id"],
581
+ "description": step["description"],
582
+ "sender": self.coordinator.name,
583
+ "content": step.get("content", {}), # Include task content
584
+ "depends_on": step.get("depends_on", []), # Include dependencies
585
+ "reply_to": f"{self.server_url}/message/{self.coordinator.name}" # Full URL for reply
586
+ }
587
+ print(f"Sending task to {agent_name}")
588
+ print(f"Task message: {message}")
589
+ # Use coordinator's transport to send task to agent
590
+ await coordinator_transport.send_message(agent_name, message)
591
+
592
+ print("Task submitted. Waiting for completion...")
593
+
594
+ async def wait_for_completion(self, check_interval: float = 1.0):
595
+ """
596
+ Wait for all tasks to complete.
597
+
598
+ Args:
599
+ check_interval: How often to check for completion in seconds
600
+ """
601
+ if not self.coordinator:
602
+ raise ValueError("Group chat not connected. Call connect() first.")
603
+
604
+ try:
605
+ while True:
606
+ # Check if all tasks have results
607
+ all_completed = True
608
+ # Use the dependencies stored in the coordinator
609
+ for task_id in self.task_dependencies:
610
+ # Check both group chat and coordinator results
611
+ if task_id not in self.task_results and task_id not in self.coordinator.task_results:
612
+ all_completed = False
613
+ print(f"Waiting for task {task_id}...")
614
+ break
615
+
616
+ if all_completed:
617
+ print("\n=== All tasks completed! ===")
618
+ print("\nResults:")
619
+ # Merge results from both sources
620
+ all_results = {**self.task_results, **self.coordinator.task_results}
621
+ for task_id, result in all_results.items():
622
+ print(f"\n{task_id}:")
623
+ print(result)
624
+ break
625
+
626
+ await asyncio.sleep(check_interval)
627
+
628
+ except KeyboardInterrupt:
629
+ print("\nStopping group chat...")
630
+
631
+ async def _handle_coordinator_message(self, message: Dict, message_id: str):
632
+ """Handles messages received by the coordinator's transport."""
633
+
634
+ if not self.coordinator: # Ensure coordinator exists
635
+ print(f"[Coordinator Handler] Error: Coordinator not initialized. Self ID: {id(self)}")
636
+ return
637
+
638
+ print(f"\n[Coordinator {self.coordinator.name}] Received message: {message}. Self ID: {id(self)}")
639
+
640
+ # Handle messages wrapped in 'body' field
641
+ if isinstance(message, dict) and 'body' in message:
642
+ try:
643
+ if isinstance(message['body'], str):
644
+ message = json.loads(message['body'])
645
+ else:
646
+ message = message['body']
647
+ print(f"[Coordinator {self.coordinator.name}] Unwrapped message body: {message}")
648
+ except json.JSONDecodeError:
649
+ print(f"[Coordinator {self.coordinator.name}] Error decoding message body: {message}")
650
+ return
651
+
652
+ # Look for type and task_id at top level
653
+ msg_type = message.get("type")
654
+ task_id = message.get("task_id")
655
+
656
+ print(f"[Coordinator {self.coordinator.name}] Processing message type '{msg_type}' for task {task_id}. Current _pending_tasks in handler: {list(self._pending_tasks.keys())} (ID: {id(self._pending_tasks)})")
657
+
658
+ if msg_type in ["result", "task_result"]: # Handle both result types
659
+ # First try direct fields, then try parsing content.text if it exists
660
+ result_content = None
661
+
662
+ # Try direct fields first
663
+ result_content = message.get("result") or message.get("description")
664
+
665
+ # If not found, try to parse from content.text
666
+ if result_content is None and "content" in message and isinstance(message["content"], dict):
667
+ content_text = message["content"].get("text")
668
+ if content_text:
669
+ try:
670
+ content_data = json.loads(content_text)
671
+ result_content = content_data.get("result") or content_data.get("description")
672
+ # Update task_id from content if not set
673
+ if not task_id and "task_id" in content_data:
674
+ task_id = content_data["task_id"]
675
+ except (json.JSONDecodeError, AttributeError, TypeError) as e:
676
+ print(f"[Coordinator {self.coordinator.name}] Error parsing content.text: {e}")
677
+
678
+ if task_id and result_content is not None:
679
+ print(f"[Coordinator {self.coordinator.name}] Storing result for task {task_id}")
680
+ # Store result in both the group chat and coordinator
681
+ self.task_results[task_id] = result_content
682
+ self.dependency_results[task_id] = result_content # Required for template resolution
683
+ if "dependency_results" not in self.coordinator.task_results:
684
+ self.coordinator.task_results["dependency_results"] = {}
685
+ self.coordinator.task_results["dependency_results"][task_id] = result_content
686
+ self.coordinator.task_results[task_id] = result_content
687
+ print(f"[Coordinator {self.coordinator.name}] Stored result for task {task_id}")
688
+ print(f"[Coordinator {self.coordinator.name}] Stored result: {result_content}...")
689
+ print(f"[Coordinator {self.coordinator.name}] Current task results: {list(self.task_results.keys())}")
690
+ print(f"[Coordinator {self.coordinator.name}] Current dependencies: {self.task_dependencies}")
691
+
692
+ # Signal task completion if anyone is waiting
693
+ if not hasattr(self, '_pending_tasks'):
694
+ self._pending_tasks = {}
695
+ print(f"[Coordinator {self.coordinator.name}] Current pending tasks: {list(self._pending_tasks.keys())}")
696
+ print(f"[DEBUG] Checking if task {task_id} is in pending_tasks: {task_id in self._pending_tasks}")
697
+
698
+ if task_id in self._pending_tasks:
699
+ print(f"[Coordinator {self.coordinator.name}] Signaling completion for task {task_id}")
700
+ future = self._pending_tasks[task_id]
701
+ if not future.done():
702
+ #future.set_result(result_content)
703
+ asyncio.get_event_loop().call_soon_threadsafe(lambda: future.set_result(result_content) if not future.done() else None)
704
+ print(f"[DEBUG] Set result for task {task_id}")
705
+ await asyncio.sleep(0)
706
+ print(f"[Coordinator {self.coordinator.name}] Completed task {task_id}")
707
+ #asyncio.get_running_loop().call_soon(future.set_result, result_content)
708
+ #asyncio.get_event_loop().call_soon_threadsafe(lambda: future.set_result(result_content) if not future.done() else None)
709
+ # Clean up the task after signaling
710
+ #if task_id in self._pending_tasks:
711
+ #del self._pending_tasks[task_id]
712
+ print(f"[Coordinator {self.coordinator.name}] Completed task {task_id}")
713
+ else:
714
+ print(f"[Coordinator {self.coordinator.name}] Task {task_id} not found in pending tasks")
715
+
716
+ # Acknowledge the message
717
+ try:
718
+ if message_id: # Only acknowledge if we have a message ID
719
+ await self.coordinator.transport.acknowledge_message(self.coordinator.name, message_id)
720
+ print(f"[Coordinator {self.coordinator.name}] Acknowledged message {message_id}")
721
+ except Exception as e:
722
+ print(f"[Coordinator {self.coordinator.name}] Error acknowledging message {message_id}: {e}")
723
+ return
724
+ else:
725
+ print(f"[Coordinator {self.coordinator.name}] Received invalid result message (missing task_id or result): {message}")
726
+ elif msg_type == "get_result": # Handle get result request
727
+ result = None
728
+ if task_id in self.task_results:
729
+ result = self.task_results[task_id]
730
+ elif task_id in self.coordinator.task_results:
731
+ result = self.coordinator.task_results[task_id]
732
+
733
+ if result:
734
+ print(f"[Coordinator {self.coordinator.name}] Found result for task {task_id}")
735
+ # Send result back
736
+ try:
737
+ await self.coordinator.transport.send_message(
738
+ f"{self.server_url}/message/{message.get('sender', 'unknown')}",
739
+ {
740
+ "type": "task_result",
741
+ "task_id": task_id,
742
+ "result": result
743
+ }
744
+ )
745
+ print(f"[Coordinator {self.coordinator.name}] Sent result for task {task_id}")
746
+ except Exception as e:
747
+ print(f"[Coordinator {self.coordinator.name}] Error sending result: {e}")
748
+ else:
749
+ print(f"[Coordinator {self.coordinator.name}] No result found for task {task_id}")
750
+ else:
751
+ print(f"[Coordinator {self.coordinator.name}] Received unhandled message type '{msg_type}': {message}")
752
+ # Optionally, acknowledge other messages too or handle errors
753
+ try:
754
+ await self.coordinator.transport.acknowledge_message(message_id)
755
+ except Exception as e:
756
+ print(f"[Coordinator {self.coordinator.name}] Error acknowledging message {message_id}: {e}")
757
+
758
+ @property
759
+ def group_state(self) -> dict:
760
+ """
761
+ Returns a merged dictionary of all task results (group and coordinator).
762
+ Agents can use this to access the shared group chat history/results.
763
+ """
764
+ return {**self.task_results, **(self.coordinator.task_results if self.coordinator else {})}
765
+
766
+ async def shutdown(self):
767
+ """Gracefully disconnect all agents and cancel their tasks."""
768
+ print(f"Initiating shutdown for {len(self._agent_tasks)} agent tasks...")
769
+
770
+ # 1. Cancel all running agent tasks
771
+ for task in self._agent_tasks:
772
+ if task and not task.done():
773
+ print(f"Cancelling task {task.get_name()}...")
774
+ task.cancel()
775
+
776
+ # Wait for all tasks to be cancelled
777
+ if self._agent_tasks:
778
+ await asyncio.gather(*[t for t in self._agent_tasks if t], return_exceptions=True)
779
+ print("All agent tasks cancelled or finished.")
780
+ self._agent_tasks.clear() # Clear the list of tasks
781
+
782
+ # 2. Disconnect transports for all agents (coordinator + regular agents)
783
+ all_agents = [self.coordinator] + self.agents
784
+ disconnect_tasks = []
785
+ for agent in all_agents:
786
+ if hasattr(agent, 'transport') and hasattr(agent.transport, 'disconnect'):
787
+ print(f"Disconnecting transport for {agent.name}...")
788
+ disconnect_tasks.append(agent.transport.disconnect())
789
+
790
+ if disconnect_tasks:
791
+ await asyncio.gather(*disconnect_tasks, return_exceptions=True)
792
+ print("All agent transports disconnected.")
793
+
794
+ print("Shutdown complete.")
795
+
796
+ # === Minimal free-flow chat: send a message to any agent ===
797
+ async def send_chat_message(self, agent_name, message):
798
+ await self.coordinator.transport.send_message(agent_name, {"type": "message", "content": message})