agent-mcp 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. agent_mcp/__init__.py +16 -0
  2. agent_mcp/camel_mcp_adapter.py +521 -0
  3. agent_mcp/cli.py +47 -0
  4. agent_mcp/crewai_mcp_adapter.py +281 -0
  5. agent_mcp/enhanced_mcp_agent.py +601 -0
  6. agent_mcp/heterogeneous_group_chat.py +798 -0
  7. agent_mcp/langchain_mcp_adapter.py +458 -0
  8. agent_mcp/langgraph_mcp_adapter.py +325 -0
  9. agent_mcp/mcp_agent.py +658 -0
  10. agent_mcp/mcp_decorator.py +257 -0
  11. agent_mcp/mcp_langgraph.py +733 -0
  12. agent_mcp/mcp_transaction.py +97 -0
  13. agent_mcp/mcp_transport.py +706 -0
  14. agent_mcp/mcp_transport_enhanced.py +46 -0
  15. agent_mcp/proxy_agent.py +24 -0
  16. agent_mcp-0.1.4.dist-info/METADATA +333 -0
  17. agent_mcp-0.1.4.dist-info/RECORD +49 -0
  18. {agent_mcp-0.1.2.dist-info → agent_mcp-0.1.4.dist-info}/WHEEL +1 -1
  19. agent_mcp-0.1.4.dist-info/entry_points.txt +2 -0
  20. agent_mcp-0.1.4.dist-info/top_level.txt +3 -0
  21. demos/__init__.py +1 -0
  22. demos/basic/__init__.py +1 -0
  23. demos/basic/framework_examples.py +108 -0
  24. demos/basic/langchain_camel_demo.py +272 -0
  25. demos/basic/simple_chat.py +355 -0
  26. demos/basic/simple_integration_example.py +51 -0
  27. demos/collaboration/collaborative_task_example.py +437 -0
  28. demos/collaboration/group_chat_example.py +130 -0
  29. demos/collaboration/simplified_crewai_example.py +39 -0
  30. demos/langgraph/autonomous_langgraph_network.py +808 -0
  31. demos/langgraph/langgraph_agent_network.py +415 -0
  32. demos/langgraph/langgraph_collaborative_task.py +619 -0
  33. demos/langgraph/langgraph_example.py +227 -0
  34. demos/langgraph/run_langgraph_examples.py +213 -0
  35. demos/network/agent_network_example.py +381 -0
  36. demos/network/email_agent.py +130 -0
  37. demos/network/email_agent_demo.py +46 -0
  38. demos/network/heterogeneous_network_example.py +216 -0
  39. demos/network/multi_framework_example.py +199 -0
  40. demos/utils/check_imports.py +49 -0
  41. demos/workflows/autonomous_agent_workflow.py +248 -0
  42. demos/workflows/mcp_features_demo.py +353 -0
  43. demos/workflows/run_agent_collaboration_demo.py +63 -0
  44. demos/workflows/run_agent_collaboration_with_logs.py +396 -0
  45. demos/workflows/show_agent_interactions.py +107 -0
  46. demos/workflows/simplified_autonomous_demo.py +74 -0
  47. functions/main.py +144 -0
  48. functions/mcp_network_server.py +513 -0
  49. functions/utils.py +47 -0
  50. agent_mcp-0.1.2.dist-info/METADATA +0 -475
  51. agent_mcp-0.1.2.dist-info/RECORD +0 -5
  52. agent_mcp-0.1.2.dist-info/entry_points.txt +0 -2
  53. agent_mcp-0.1.2.dist-info/top_level.txt +0 -1
@@ -0,0 +1,458 @@
1
+ """
2
+ Adapter for Langchain agents to work with MCP.
3
+ """
4
+
5
+ import asyncio
6
+ from typing import Dict, Any, Optional
7
+ from .mcp_agent import MCPAgent
8
+ from .mcp_transport import MCPTransport
9
+ from langchain.agents import AgentExecutor
10
+ from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
11
+ import traceback
12
+ import json
13
+ import uuid
14
+
15
+ # --- Setup Logger ---
16
+ import logging
17
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
18
+ logger = logging.getLogger(__name__)
19
+ # --- End Logger Setup ---
20
+
21
+ class LangchainMCPAdapter(MCPAgent):
22
+ """Adapter for Langchain agents to work with MCP"""
23
+
24
+ def __init__(self,
25
+ name: str,
26
+ transport: Optional[MCPTransport] = None,
27
+ client_mode: bool = False,
28
+ langchain_agent: OpenAIFunctionsAgent = None,
29
+ agent_executor: AgentExecutor = None,
30
+ system_message: str = "",
31
+ **kwargs):
32
+ # Set default system message if none provided
33
+ if not system_message:
34
+ system_message = "I am a Langchain agent that can help with various tasks."
35
+
36
+ # Initialize parent with system message
37
+ super().__init__(name=name, system_message=system_message, **kwargs)
38
+
39
+ # Set instance attributes
40
+ self.transport = transport
41
+ self.client_mode = client_mode
42
+ self.langchain_agent = langchain_agent
43
+ self.agent_executor = agent_executor
44
+ self.task_queue = asyncio.Queue()
45
+ self._task_processor = None
46
+ self._message_processor = None
47
+ self._processed_tasks = set() # For idempotency check
48
+
49
+ async def connect_to_server(self, server_url: str):
50
+ """Connect to another agent's server"""
51
+ if not self.client_mode or not self.transport:
52
+ raise ValueError("Agent not configured for client mode")
53
+
54
+ # Register with the server
55
+ registration = {
56
+ "type": "registration",
57
+ "agent_id": self.mcp_id,
58
+ "name": self.name,
59
+ "capabilities": []
60
+ }
61
+
62
+ response = await self.transport.send_message(server_url, registration)
63
+ if response.get("status") == "ok":
64
+ print(f"Successfully connected to server at {server_url}")
65
+
66
+ async def handle_incoming_message(self, message: Dict[str, Any], message_id: Optional[str] = None):
67
+ """Handle incoming messages from other agents"""
68
+ # First check if type is directly in the message
69
+ msg_type = message.get("type")
70
+ logger.info(f"[{self.name}] Raw message: {message}")
71
+
72
+ # If not, check if it's inside the content field
73
+ if not msg_type and "content" in message and isinstance(message["content"], dict):
74
+ msg_type = message["content"].get("type")
75
+
76
+ sender = self._extract_sender(message)
77
+ task_id = message.get("task_id") or message.get("content", {}).get("task_id") if isinstance(message.get("content"), dict) else message.get("task_id")
78
+ logger.info(f"[{self.name}] Received message (ID: {message_id}) of type '{msg_type}' from {sender} (Task ID: {task_id})")
79
+
80
+ # --- Idempotency Check ---
81
+ if not super()._should_process_message(message):
82
+ # If skipped, acknowledge and stop
83
+ if message_id and self.transport:
84
+ asyncio.create_task(self.transport.acknowledge_message(self.name, message_id))
85
+ logger.info(f"[{self.name}] Acknowledged duplicate task {task_id} (msg_id: {message_id})")
86
+ return
87
+ # --- End Idempotency Check ---
88
+
89
+ if msg_type == "task":
90
+ logger.info(f"[{self.name}] Queueing task {task_id} (message_id: {message_id}) from {sender}")
91
+ content = message.get("content", {})
92
+ current_task_id = content.get("task_id") or message.get("task_id") # Handle potential nesting
93
+ description = content.get("description") or message.get("description")
94
+ reply_to = content.get("reply_to") or message.get("reply_to")
95
+
96
+ if not current_task_id or not description:
97
+ logger.error(f"[{self.name}] Task message missing required fields: {message}")
98
+ # Acknowledge if possible to prevent reprocessing bad message
99
+ if message_id and self.transport:
100
+ asyncio.create_task(self.transport.acknowledge_message(self.name, message_id))
101
+ return
102
+
103
+ # Add message_id to task context for processing
104
+ message['message_id'] = message_id
105
+
106
+ #task_context = {
107
+ # "type": "task", # Ensure type is explicitly set for process_tasks
108
+ # "task_id": current_task_id,
109
+ # "description": description,
110
+ # "reply_to": reply_to,
111
+ # "sender": sender,
112
+ # "message_id": message_id
113
+ #}
114
+ #logger.debug(f"[{self.name}] Queueing task context: {task_context}")
115
+ logger.debug(f"[DEBUG] {self.name}: Queueing task {task_id} with message_id {message_id} for processing")
116
+
117
+ await self.task_queue.put(message)
118
+ logger.debug(f"[{self.name}] Successfully queued task {current_task_id}")
119
+
120
+ elif msg_type == "task_result":
121
+ # Received a result, treat it as the next step in the conversation
122
+ result_content = message.get("result")
123
+
124
+ # --- Robust extraction for various formats ---
125
+ content = message.get("content")
126
+ if result_content is None and content is not None:
127
+ # 1. Try content["result"]
128
+ if isinstance(content, dict) and "result" in content:
129
+ result_content = content["result"]
130
+ # 2. Try content["text"] as JSON
131
+ elif isinstance(content, dict) and "text" in content:
132
+ text_val = content["text"]
133
+ if isinstance(text_val, str):
134
+ try:
135
+ parsed = json.loads(text_val)
136
+ if isinstance(parsed, dict) and "result" in parsed:
137
+ result_content = parsed["result"]
138
+ except Exception:
139
+ pass
140
+ # 3. Try content itself as JSON string
141
+ elif isinstance(content, str):
142
+ try:
143
+ parsed = json.loads(content)
144
+ if isinstance(parsed, dict) and "result" in parsed:
145
+ result_content = parsed["result"]
146
+ except Exception:
147
+ pass
148
+ # 4. Fallback: use content["text"] as plain string
149
+ if result_content is None and isinstance(content, dict) and "text" in content:
150
+ result_content = content["text"]
151
+
152
+ # Handle JSON string content
153
+ if isinstance(result_content, str):
154
+ try:
155
+ result_content = json.loads(result_content)
156
+ except json.JSONDecodeError:
157
+ pass
158
+
159
+ # Direct parsing of content["text"] structure
160
+ if isinstance(result_content, str):
161
+ try:
162
+ text_content = json.loads(result_content)
163
+ if isinstance(text_content, dict):
164
+ result_content = text_content
165
+ except json.JSONDecodeError:
166
+ pass
167
+
168
+ # --- End Robust extraction ---
169
+ original_task_id = (
170
+ (result_content.get("task_id") if isinstance(result_content, dict) else None)
171
+ or message.get("task_id")
172
+ )
173
+ logger.info(f"[{self.name}] Received task_result from {sender} for task {original_task_id}. Content: '{str(result_content)[:100]}...'")
174
+
175
+ if not result_content:
176
+ logger.warning(f"[{self.name}] Received task_result from {sender} with empty content.")
177
+
178
+ # Acknowledge the result message even if content is empty
179
+ if message_id and self.transport:
180
+ asyncio.create_task(self.transport.acknowledge_message(self.name, message_id))
181
+ return
182
+
183
+ # Create a *new* task for this agent based on the received result
184
+ #new_task_id = f"conv_{uuid.uuid4()}" # Generate a new ID for this conversational turn
185
+ #new_task_context = {
186
+ # "type": "task", # Still a task for this agent to process
187
+ # "task_id": new_task_id,
188
+ # "description": str(result_content), # The result becomes the new input/description
189
+ # "reply_to": message.get("reply_to") or result_content.get("reply_to"),
190
+ # "sender": sender, # This agent is the conceptual sender of this internal task
191
+ # "message_id": message_id # Carry over original message ID for acknowledgement
192
+ #}
193
+
194
+ #logger.info(f"[{self.name}] Queueing new conversational task {new_task_id} based on result from {sender}")
195
+ #await self.task_queue.put(new_task_context)
196
+ #logger.debug(f"[{self.name}] Successfully queued new task {new_task_id}")
197
+
198
+ else:
199
+ logger.warning(f"[{self.name}] Received unknown message type: {msg_type}. Message: {message}")
200
+ # Acknowledge other message types immediately if they have an ID
201
+ #if message_id and self.transport:
202
+ # asyncio.create_task(self.transport.acknowledge_message(self.name, message_id))
203
+
204
+ async def _handle_task(self, message: Dict[str, Any]):
205
+ """Handle incoming task"""
206
+ print(f"{self.name}: Received task: {message}")
207
+ await self.task_queue.put(message)
208
+ return {"status": "ok"}
209
+
210
+ async def process_messages(self):
211
+ logger.info(f"[{self.name}] Message processor loop started.")
212
+ while True:
213
+ try:
214
+ logger.debug(f"[{self.name}] Waiting for message from transport...")
215
+ # Pass agent name to receive_message
216
+ message, message_id = await self.transport.receive_message()
217
+ logger.debug(f"[{self.name}] Received raw message from transport: {message} (ID: {message_id})")
218
+
219
+ if message is None:
220
+ print(f"[{self.name}] Received None message, skipping...")
221
+ continue
222
+
223
+ await self.handle_incoming_message(message, message_id)
224
+ except asyncio.CancelledError:
225
+ print(f"[{self.name}] Message processor cancelled.")
226
+ break
227
+ except Exception as e:
228
+ print(f"[{self.name}] Error in message processor: {e}")
229
+ traceback.print_exc()
230
+ break
231
+ except Exception as e:
232
+ print(f"[{self.name}] Error in message processor: {e}")
233
+ await asyncio.sleep(1)
234
+ print(f"[{self.name}] Message processor loop finished.")
235
+
236
+ async def process_tasks(self):
237
+ print(f"[{self.name}] Task processor loop started.")
238
+ while True:
239
+ try:
240
+ print(f"[{self.name}] Waiting for task from queue...")
241
+ task = await self.task_queue.get()
242
+ print(f"\n[{self.name}] Got item from queue: {task}")
243
+
244
+ if not isinstance(task, dict):
245
+ print(f"[ERROR] {self.name}: Task item is not a dictionary: {task}")
246
+ self.task_queue.task_done()
247
+ continue
248
+
249
+ # Extract task details (handle both original message format and task_context format)
250
+ task_desc = task.get("description")
251
+ task_id = task.get("task_id")
252
+ task_type = task.get("type") # Should always be 'task' if queued correctly
253
+ reply_to = task.get("reply_to")
254
+ message_id = task.get("message_id") # For acknowledgement
255
+ sender = self._extract_sender(task)
256
+ # Fallback for nested content (less likely now but safe)
257
+ if not task_desc and isinstance(task.get("content"), dict):
258
+ content = task.get("content", {})
259
+ task_desc = content.get("description")
260
+ if not task_id: task_id = content.get("task_id")
261
+ if not task_type: task_type = content.get("type")
262
+ if not reply_to: reply_to = content.get("reply_to")
263
+ if not sender: sender = content.get("sender", "from")
264
+
265
+ print(f"[DEBUG] {self.name}: Processing task details:")
266
+ print(f" - Task ID: {task_id}")
267
+ print(f" - Type: {task_type}")
268
+ print(f" - Sender: {sender}")
269
+ print(f" - Reply To: {reply_to}")
270
+ print(f" - Description: {str(task_desc)[:100]}...")
271
+ print(f" - Original Message ID: {message_id}")
272
+
273
+ if not task_desc or not task_id:
274
+ print(f"[ERROR] {self.name}: Task is missing description or task_id: {task}")
275
+ self.task_queue.task_done()
276
+ # Acknowledge if possible
277
+ #if message_id and self.transport:
278
+ # asyncio.create_task(self.transport.acknowledge_message(self.name, message_id))
279
+ continue
280
+
281
+ # We only queue tasks now, so this check might be redundant but safe
282
+ if task_type != "task":
283
+ print(f"[ERROR] {self.name}: Invalid item type received in task queue: {task_type}. Item: {task}")
284
+ self.task_queue.task_done()
285
+ #if message_id and self.transport:
286
+ # asyncio.create_task(self.transport.acknowledge_message(self.name, message_id))
287
+ continue
288
+
289
+ print(f"[DEBUG] {self.name}: Starting execution of task {task_id}")
290
+ # Execute task using Langchain agent
291
+ try:
292
+ print(f"[DEBUG] {self.name}: Calling agent_executor.ainvoke with task description")
293
+ # Execute the task using the Langchain agent executor's ainvoke method
294
+ # Pass input AND agent_name in a dictionary matching the prompt's input variables
295
+ input_data = {
296
+ "input": task_desc,
297
+ "agent_name": self.name # Add agent_name here to indicate this agent as the executor (who's currently executing the task)
298
+ }
299
+ result_dict = await self.agent_executor.ainvoke(input_data)
300
+ print(f"[DEBUG] {self.name}: Agent execution completed. Full result: {result_dict}")
301
+ # Extract the final output string, typically under the 'output' key
302
+ if isinstance(result_dict, dict) and 'output' in result_dict:
303
+ result = result_dict['output']
304
+ print(f"[DEBUG] {self.name}: Extracted output: {result}")
305
+ else:
306
+ logger.warning(f"[{self.name}] Could not find 'output' key in agent result: {result_dict}. Using full dict as string.")
307
+ result = str(result_dict)
308
+ except Exception as e:
309
+ print(f"[ERROR] {self.name}: Agent execution failed: {e}")
310
+ print(f"[ERROR] {self.name}: Error type: {type(e)}")
311
+ traceback.print_exc() # Print the full traceback for detailed debugging
312
+ # Assign error message to result variable for graceful failure
313
+ result = f"Agent execution failed due to an error: {str(e)}"
314
+
315
+ # Ensure result is always a string before sending
316
+ if not isinstance(result, str):
317
+ try:
318
+ result_str = json.dumps(result) # Try serializing if complex type
319
+ except (TypeError, OverflowError):
320
+ result_str = str(result) # Fallback to string conversion
321
+ else:
322
+ result_str = result
323
+
324
+ print(f"[DEBUG] {self.name}: Sending task result for task_id: {task_id}")
325
+ # Send the result back
326
+ if reply_to and self.transport:
327
+ try:
328
+ # --- FIX: Extract agent name from reply_to URL ---
329
+ try:
330
+ # Handle both URL paths and direct agent names
331
+ if '/' in reply_to:
332
+ target_agent_name = reply_to.split('/')[-1]
333
+ else:
334
+ target_agent_name = reply_to
335
+ except IndexError:
336
+ print(f"[ERROR] {self.name}: Could not extract agent name from reply_to: {reply_to}")
337
+ target_agent_name = reply_to # Fallback, though likely wrong
338
+
339
+ print(f"[DEBUG] Conversation Routing - Original sender: {reply_to}, Current agent: {self.name}, Final reply_to: {reply_to}")
340
+ print(f"[DEBUG] Derived target agent: {target_agent_name} from reply_to: {reply_to}")
341
+ print(f"[DEBUG] TASK_MESSAGE: {task}")
342
+ print(f"[DEBUG] Message Chain - From: {sender} -> To: {self.name} -> ReplyTo: {reply_to}")
343
+
344
+ print(f"[DEBUG] {self.name}: Sending result to target agent: {target_agent_name} (extracted from {reply_to})")
345
+ # --- END FIX ---
346
+
347
+ await self.transport.send_message(
348
+ target_agent_name, # <<< Use extracted name, not full URL
349
+ {
350
+ "type": "task_result",
351
+ "task_id": task_id,
352
+ "result": result_str,
353
+ "sender": self.name,
354
+ "original_message_id": message_id # Include original message ID
355
+ }
356
+ )
357
+ print(f"[DEBUG] {self.name}: Result sent successfully")
358
+
359
+ # Acknowledge task completion using message_id
360
+ if message_id:
361
+ await self.transport.acknowledge_message(self.name, message_id)
362
+ print(f"[DEBUG] {self.name}: Task {task_id} acknowledged with message_id {message_id}")
363
+ else:
364
+ print(f"[WARN] {self.name}: No message_id for task {task_id}, cannot acknowledge")
365
+ except Exception as send_error:
366
+ print(f"[ERROR] {self.name}: Failed to send result: {str(send_error)}")
367
+ traceback.print_exc()
368
+ else:
369
+ print(f"[WARN] {self.name}: No reply_to URL in task {task_id}, cannot send result")
370
+
371
+ super()._mark_task_completed(task_id) # Call base class method
372
+
373
+ self.task_queue.task_done()
374
+ print(f"[DEBUG] {self.name}: Task {task_id} fully processed")
375
+
376
+ except Exception as e:
377
+ print(f"[ERROR] {self.name}: Error processing task: {e}")
378
+ traceback.print_exc()
379
+ await asyncio.sleep(1)
380
+ print(f"[{self.name}] Task processor loop finished.")
381
+
382
+ def _should_process_message(self, message: Dict[str, Any]) -> bool:
383
+ """Check if a message should be processed based on idempotency"""
384
+ task_id = message.get("content", {}).get("task_id") if isinstance(message.get("content"), dict) else message.get("task_id")
385
+ if task_id in self._processed_tasks:
386
+ logger.info(f"[{self.name}] Skipping duplicate task {task_id}")
387
+ return False
388
+ return True
389
+
390
+ def _mark_task_completed(self, task_id: str) -> None:
391
+ """Mark a task as completed for idempotency"""
392
+ self._processed_tasks.add(task_id)
393
+ logger.info(f"[{self.name}] Marked task {task_id} as completed")
394
+
395
+ async def run(self):
396
+ """Run the agent's main loop asynchronously."""
397
+ print(f"[{self.name}] Starting agent run loop...")
398
+
399
+ # Ensure transport is ready (polling should be started by HeterogeneousGroupChat)
400
+ if not self.transport:
401
+ print(f"[ERROR] {self.name}: Transport is not configured. Cannot run agent.")
402
+ return
403
+
404
+ # We no longer call connect_to_server here, as registration and polling start
405
+ # are handled by HeterogeneousGroupChat._register_and_start_agent
406
+ # if self.client_mode and hasattr(self.transport, 'connect'):
407
+ # print(f"[{self.name}] Client mode: connecting transport...")
408
+ # # Assuming connect handles polling start now
409
+ # await self.transport.connect(agent_name=self.name, token=self.transport.token)
410
+ # else:
411
+ # print(f"[{self.name}] Not in client mode or transport does not support connect. Assuming ready.")
412
+
413
+ # Start message and task processors as background tasks
414
+ try:
415
+ print(f"[{self.name}] Creating message and task processor tasks...")
416
+ self._message_processor = asyncio.create_task(self.process_messages())
417
+ self._task_processor = asyncio.create_task(self.process_tasks())
418
+ print(f"[{self.name}] Processor tasks created.")
419
+
420
+ # Wait for either task to complete (or be cancelled)
421
+ # This keeps the agent alive while processors are running
422
+ done, pending = await asyncio.wait(
423
+ [self._message_processor, self._task_processor],
424
+ return_when=asyncio.FIRST_COMPLETED,
425
+ )
426
+
427
+ print(f"[{self.name}] One of the processor tasks completed or was cancelled.")
428
+ # Handle completion or cancellation if needed
429
+ for task in done:
430
+ try:
431
+ # Check if task raised an exception
432
+ exc = task.exception()
433
+ if exc:
434
+ print(f"[{self.name}] Processor task ended with error: {exc}")
435
+ # Optionally re-raise or handle
436
+ except asyncio.CancelledError:
437
+ print(f"[{self.name}] Processor task was cancelled.")
438
+
439
+ # Cancel any pending tasks to ensure clean shutdown
440
+ for task in pending:
441
+ print(f"[{self.name}] Cancelling pending processor task...")
442
+ task.cancel()
443
+ try:
444
+ await task # Await cancellation
445
+ except asyncio.CancelledError:
446
+ pass # Expected
447
+
448
+ except Exception as e:
449
+ print(f"[ERROR] {self.name}: Unhandled exception in run loop: {e}")
450
+ traceback.print_exc()
451
+ finally:
452
+ print(f"[{self.name}] Agent run loop finished.")
453
+ # Ensure processors are stopped if they weren't already cancelled
454
+ if self._message_processor and not self._message_processor.done():
455
+ self._message_processor.cancel()
456
+ if self._task_processor and not self._task_processor.done():
457
+ self._task_processor.cancel()
458
+ # Note: Transport disconnect should be handled by HeterogeneousGroupChat.shutdown()