agent-mcp 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. agent_mcp/__init__.py +16 -0
  2. agent_mcp/camel_mcp_adapter.py +521 -0
  3. agent_mcp/cli.py +47 -0
  4. agent_mcp/crewai_mcp_adapter.py +281 -0
  5. agent_mcp/enhanced_mcp_agent.py +601 -0
  6. agent_mcp/heterogeneous_group_chat.py +798 -0
  7. agent_mcp/langchain_mcp_adapter.py +458 -0
  8. agent_mcp/langgraph_mcp_adapter.py +325 -0
  9. agent_mcp/mcp_agent.py +658 -0
  10. agent_mcp/mcp_decorator.py +257 -0
  11. agent_mcp/mcp_langgraph.py +733 -0
  12. agent_mcp/mcp_transaction.py +97 -0
  13. agent_mcp/mcp_transport.py +706 -0
  14. agent_mcp/mcp_transport_enhanced.py +46 -0
  15. agent_mcp/proxy_agent.py +24 -0
  16. agent_mcp-0.1.4.dist-info/METADATA +333 -0
  17. agent_mcp-0.1.4.dist-info/RECORD +49 -0
  18. {agent_mcp-0.1.2.dist-info → agent_mcp-0.1.4.dist-info}/WHEEL +1 -1
  19. agent_mcp-0.1.4.dist-info/entry_points.txt +2 -0
  20. agent_mcp-0.1.4.dist-info/top_level.txt +3 -0
  21. demos/__init__.py +1 -0
  22. demos/basic/__init__.py +1 -0
  23. demos/basic/framework_examples.py +108 -0
  24. demos/basic/langchain_camel_demo.py +272 -0
  25. demos/basic/simple_chat.py +355 -0
  26. demos/basic/simple_integration_example.py +51 -0
  27. demos/collaboration/collaborative_task_example.py +437 -0
  28. demos/collaboration/group_chat_example.py +130 -0
  29. demos/collaboration/simplified_crewai_example.py +39 -0
  30. demos/langgraph/autonomous_langgraph_network.py +808 -0
  31. demos/langgraph/langgraph_agent_network.py +415 -0
  32. demos/langgraph/langgraph_collaborative_task.py +619 -0
  33. demos/langgraph/langgraph_example.py +227 -0
  34. demos/langgraph/run_langgraph_examples.py +213 -0
  35. demos/network/agent_network_example.py +381 -0
  36. demos/network/email_agent.py +130 -0
  37. demos/network/email_agent_demo.py +46 -0
  38. demos/network/heterogeneous_network_example.py +216 -0
  39. demos/network/multi_framework_example.py +199 -0
  40. demos/utils/check_imports.py +49 -0
  41. demos/workflows/autonomous_agent_workflow.py +248 -0
  42. demos/workflows/mcp_features_demo.py +353 -0
  43. demos/workflows/run_agent_collaboration_demo.py +63 -0
  44. demos/workflows/run_agent_collaboration_with_logs.py +396 -0
  45. demos/workflows/show_agent_interactions.py +107 -0
  46. demos/workflows/simplified_autonomous_demo.py +74 -0
  47. functions/main.py +144 -0
  48. functions/mcp_network_server.py +513 -0
  49. functions/utils.py +47 -0
  50. agent_mcp-0.1.2.dist-info/METADATA +0 -475
  51. agent_mcp-0.1.2.dist-info/RECORD +0 -5
  52. agent_mcp-0.1.2.dist-info/entry_points.txt +0 -2
  53. agent_mcp-0.1.2.dist-info/top_level.txt +0 -1
@@ -0,0 +1,325 @@
1
+ """
2
+ LangGraph MCP Adapter - Adapt LangGraph agents to work with MCP.
3
+
4
+ This module provides an adapter that allows LangGraph agents to work within
5
+ the Model Context Protocol (MCP) framework, enabling them to collaborate
6
+ with agents from other frameworks like Autogen and CrewAI.
7
+
8
+ Supports both workflow-based and tool-based LangGraph agents:
9
+ 1. Workflow-based: Uses StateGraph for defining agent behavior
10
+ 2. Tool-based: Uses LangChain tools and agent executors
11
+ """
12
+
13
+ import asyncio
14
+ import traceback
15
+ from typing import Dict, Any, Optional, Callable, List, Union
16
+ from langchain.tools import BaseTool
17
+ from langchain.agents import AgentExecutor
18
+ from langchain_openai import ChatOpenAI
19
+ from langchain.agents import create_openai_tools_agent
20
+ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
21
+ from langgraph.graph import StateGraph
22
+ from .mcp_agent import MCPAgent
23
+ from .mcp_transport import HTTPTransport
24
+ from fastapi import FastAPI, Request
25
+ import uvicorn
26
+ from threading import Thread
27
+ import time
28
+
29
+ class LangGraphMCPAdapter(MCPAgent):
30
+ """
31
+ Adapter for LangGraph agents to work with MCP.
32
+
33
+ This adapter supports both:
34
+ 1. Workflow-based agents using StateGraph
35
+ 2. Tool-based agents using LangChain tools
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ name: str,
41
+ workflow: Optional[StateGraph] = None,
42
+ tools: Optional[List[BaseTool]] = None,
43
+ process_message: Optional[Callable] = None,
44
+ transport: Optional[HTTPTransport] = None,
45
+ client_mode: bool = True,
46
+ state_type: Optional[type] = None,
47
+ **kwargs
48
+ ):
49
+ """
50
+ Initialize the LangGraph MCP adapter.
51
+
52
+ Args:
53
+ name: Name of the agent
54
+ workflow: Optional StateGraph workflow for workflow-based agents
55
+ tools: Optional list of tools for tool-based agents
56
+ process_message: Optional custom message processing function
57
+ transport: Optional transport layer
58
+ client_mode: Whether to run in client mode
59
+ **kwargs: Additional arguments to pass to MCPAgent
60
+ """
61
+ # Initialize MCPAgent with transport
62
+ super().__init__(name=name, transport=transport, **kwargs)
63
+
64
+ if workflow and tools:
65
+ raise ValueError("Cannot specify both workflow and tools. Choose one pattern.")
66
+
67
+ if workflow:
68
+ # Workflow-based agent
69
+ self.workflow = workflow
70
+ self.state_type = state_type
71
+ self.executor = None
72
+ elif tools:
73
+ # Tool-based agent
74
+ llm = ChatOpenAI(temperature=0)
75
+ prompt = ChatPromptTemplate.from_messages([
76
+ ("system", "You are a helpful AI assistant that can use tools to accomplish tasks."),
77
+ ("human", "{input}"),
78
+ MessagesPlaceholder(variable_name="agent_scratchpad")
79
+ ])
80
+ agent = create_openai_tools_agent(llm, tools, prompt)
81
+ self.executor = AgentExecutor.from_agent_and_tools(
82
+ agent=agent,
83
+ tools=tools,
84
+ handle_parsing_errors=True
85
+ )
86
+ self.workflow = None
87
+ self.state_type = None
88
+ else:
89
+ raise ValueError("Must specify either workflow or tools")
90
+
91
+ self.custom_process_message = process_message
92
+ self.transport = transport
93
+ self.client_mode = client_mode
94
+ self.task_queue = asyncio.Queue()
95
+ self.state: Dict[str, Any] = {}
96
+ self.server_ready = asyncio.Event()
97
+
98
+ # Create FastAPI app for server mode
99
+ self.app = FastAPI()
100
+
101
+ @self.app.post("/message")
102
+ async def handle_message(request: Request):
103
+ return await self._handle_message(request)
104
+
105
+ @self.app.on_event("startup")
106
+ async def startup_event():
107
+ self.server_ready.set()
108
+
109
+ self.server_thread = None
110
+
111
+ async def _handle_message(self, request: Request):
112
+ """Handle incoming HTTP messages"""
113
+ try:
114
+ message = await request.json()
115
+ await self.task_queue.put(message)
116
+ return {"status": "ok"}
117
+ except Exception as e:
118
+ return {"status": "error", "message": str(e)}
119
+
120
+ async def process_messages(self):
121
+ """Process incoming messages from the transport layer"""
122
+ while True:
123
+ try:
124
+ message, message_id = await self.transport.receive_message()
125
+ print(f"{self.name}: Received message {message_id}: {message}")
126
+
127
+ if message and isinstance(message, dict):
128
+ # Add message_id to message for tracking
129
+ message['message_id'] = message_id
130
+
131
+ # Standardize message structure
132
+ if 'content' not in message and message.get('type') == 'task':
133
+ message = {
134
+ 'type': 'task',
135
+ 'content': {
136
+ 'task_id': message.get('task_id'),
137
+ 'description': message.get('description'),
138
+ 'type': 'task'
139
+ },
140
+ 'message_id': message_id,
141
+ 'from': message.get('from', 'unknown')
142
+ }
143
+
144
+ # --- Idempotency Check ---
145
+ if not super()._should_process_message(message):
146
+ if message_id and self.transport:
147
+ asyncio.create_task(self.transport.acknowledge_message(self.name, message_id))
148
+ print(f"[{self.name}] Acknowledged duplicate task {message.get('task_id')} (msg_id: {message_id})")
149
+ continue
150
+
151
+ if message.get('type') == 'task':
152
+ print(f"{self.name}: Queueing task with message_id {message_id}")
153
+ await self.task_queue.put(message)
154
+ elif self.custom_process_message:
155
+ await self.custom_process_message(self, message)
156
+ else:
157
+ print(f"{self.name}: Unknown message type: {message.get('type')}")
158
+ if message_id and self.transport:
159
+ await self.transport.acknowledge_message(self.name, message_id)
160
+ print(f"{self.name}: Acknowledged unknown message {message_id}")
161
+ except asyncio.CancelledError:
162
+ print(f"{self.name}: Message processor cancelled")
163
+ break
164
+ except Exception as e:
165
+ print(f"{self.name}: Error processing message: {e}")
166
+ traceback.print_exc()
167
+ await asyncio.sleep(1)
168
+
169
+ async def execute_task(self, task: Dict[str, Any]):
170
+ """Execute a task using either workflow or executor"""
171
+ try:
172
+ if self.workflow:
173
+ # Always initialize state as a dictionary for LangGraph workflows here.
174
+ # LangGraph itself handles the state type defined in StateGraph().
175
+ state_dict = {"message": task, "result": None}
176
+
177
+ # Run workflow
178
+ try:
179
+ print(f"{self.name}: Running workflow with initial state_dict: {state_dict}")
180
+ # Compile the workflow if not already compiled
181
+ if not hasattr(self, '_compiled_workflow'):
182
+ self._compiled_workflow = self.workflow.compile()
183
+ # Pass the initial state dict directly
184
+ final_state = await self._compiled_workflow.ainvoke(state_dict)
185
+ # Use the workflow's final state
186
+ result = final_state
187
+ print(f"{self.name}: Workflow finished with final_state: {result}")
188
+ return {"result": result, "error": None}
189
+ except Exception as e:
190
+ print(f"Error running workflow: {str(e)}")
191
+ import traceback
192
+ traceback.print_exc()
193
+ return {"result": None, "error": str(e)}
194
+ elif self.executor:
195
+ # Run with executor
196
+ result = await self.executor.arun(task)
197
+ return {"result": result, "error": None}
198
+ else:
199
+ return {"result": None, "error": "No workflow or executor configured"}
200
+ except Exception as e:
201
+ return {
202
+ "result": f"[FROM EXECUTE_TASK] Error executing task: {str(e)}",
203
+ "error": True
204
+ }
205
+
206
+ async def process_tasks(self):
207
+ """Process tasks from the queue"""
208
+ while True:
209
+ try:
210
+ task = await self.task_queue.get()
211
+
212
+ # Extract task details
213
+ task_content = task.get('content', task.get('task', {}))
214
+ task_id = task.get('task_id') or task_content.get('task_id')
215
+ task_description = task.get('description') or task_content.get('description')
216
+ message_id = task.get('message_id')
217
+ reply_to = task.get('reply_to')
218
+
219
+ if not task_id or not task_description:
220
+ print(f"[ERROR] {self.name}: Task missing required fields: {task}")
221
+ self.task_queue.task_done()
222
+ continue
223
+
224
+ print(f"\n{self.name}: Processing task {task_id} with message_id {message_id}")
225
+
226
+ try:
227
+ # Execute the task
228
+ result = await self.execute_task(task_content)
229
+
230
+ # Mark task completed
231
+ super()._mark_task_completed(task_id)
232
+
233
+ # Send result back if reply_to is specified
234
+ if reply_to:
235
+ print(f"{self.name}: Sending result back to {reply_to}")
236
+ await self.transport.send_message(
237
+ reply_to,
238
+ {
239
+ "type": "task_result",
240
+ "task_id": task_id,
241
+ "result": result['result'],
242
+ "sender": self.name,
243
+ "original_message_id": message_id,
244
+ "error": result['error']
245
+ }
246
+ )
247
+ print(f"{self.name}: Result sent successfully")
248
+
249
+ # Acknowledge task completion
250
+ if message_id:
251
+ await self.transport.acknowledge_message(self.name, message_id)
252
+ print(f"{self.name}: Task {task_id} acknowledged with message_id {message_id}")
253
+ else:
254
+ print(f"{self.name}: No message_id for task {task_id}, cannot acknowledge")
255
+ except Exception as e:
256
+ print(f"{self.name}: Error processing task: {e}")
257
+ traceback.print_exc()
258
+
259
+ if reply_to:
260
+ await self.transport.send_message(
261
+ reply_to,
262
+ {
263
+ "type": "task_result",
264
+ "task_id": task_id,
265
+ "result": f"Error: {str(e)}",
266
+ "sender": self.name,
267
+ "original_message_id": message_id,
268
+ "error": True
269
+ }
270
+ )
271
+
272
+ self.task_queue.task_done()
273
+
274
+ except Exception as e:
275
+ print(f"{self.name}: Error processing task: {e}")
276
+ traceback.print_exc()
277
+ await asyncio.sleep(1)
278
+
279
+ def run(self):
280
+ """Start the message and task processors"""
281
+ if not self.transport:
282
+ raise ValueError(f"{self.name}: No transport configured")
283
+
284
+ # Start the transport server if not in client mode
285
+ if not self.client_mode:
286
+ def run_server():
287
+ config = uvicorn.Config(
288
+ app=self.app,
289
+ host=self.transport.host,
290
+ port=self.transport.port,
291
+ log_level="info"
292
+ )
293
+ server = uvicorn.Server(config)
294
+ server.run()
295
+
296
+ self.server_thread = Thread(target=run_server, daemon=True)
297
+ self.server_thread.start()
298
+ else:
299
+ # In client mode, we're ready immediately
300
+ self.server_ready.set()
301
+
302
+ print(f"{self.name}: Starting message processor...")
303
+ asyncio.create_task(self.process_messages())
304
+
305
+ print(f"{self.name}: Starting task processor...")
306
+ asyncio.create_task(self.process_tasks())
307
+
308
+ async def connect_to_server(self, server_url: str):
309
+ """Connect to a coordinator server"""
310
+ if not self.client_mode:
311
+ raise ValueError("Agent not configured for client mode")
312
+
313
+ # Wait for server to be ready before connecting
314
+ if not self.server_ready.is_set():
315
+ await asyncio.wait_for(self.server_ready.wait(), timeout=10)
316
+
317
+ # Register with the coordinator
318
+ await self.transport.send_message(
319
+ server_url,
320
+ {
321
+ "type": "register",
322
+ "agent_name": self.name,
323
+ "agent_url": self.transport.get_url()
324
+ }
325
+ )