agent-mcp 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. agent_mcp/__init__.py +2 -2
  2. agent_mcp/camel_mcp_adapter.py +521 -0
  3. agent_mcp/cli.py +47 -0
  4. agent_mcp/heterogeneous_group_chat.py +412 -38
  5. agent_mcp/langchain_mcp_adapter.py +176 -43
  6. agent_mcp/mcp_agent.py +26 -0
  7. agent_mcp/mcp_transport.py +11 -5
  8. {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.4.dist-info}/METADATA +6 -4
  9. agent_mcp-0.1.4.dist-info/RECORD +49 -0
  10. {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.4.dist-info}/WHEEL +1 -1
  11. agent_mcp-0.1.4.dist-info/entry_points.txt +2 -0
  12. agent_mcp-0.1.4.dist-info/top_level.txt +3 -0
  13. demos/__init__.py +1 -0
  14. demos/basic/__init__.py +1 -0
  15. demos/basic/framework_examples.py +108 -0
  16. demos/basic/langchain_camel_demo.py +272 -0
  17. demos/basic/simple_chat.py +355 -0
  18. demos/basic/simple_integration_example.py +51 -0
  19. demos/collaboration/collaborative_task_example.py +437 -0
  20. demos/collaboration/group_chat_example.py +130 -0
  21. demos/collaboration/simplified_crewai_example.py +39 -0
  22. demos/langgraph/autonomous_langgraph_network.py +808 -0
  23. demos/langgraph/langgraph_agent_network.py +415 -0
  24. demos/langgraph/langgraph_collaborative_task.py +619 -0
  25. demos/langgraph/langgraph_example.py +227 -0
  26. demos/langgraph/run_langgraph_examples.py +213 -0
  27. demos/network/agent_network_example.py +381 -0
  28. demos/network/email_agent.py +130 -0
  29. demos/network/email_agent_demo.py +46 -0
  30. demos/network/heterogeneous_network_example.py +216 -0
  31. demos/network/multi_framework_example.py +199 -0
  32. demos/utils/check_imports.py +49 -0
  33. demos/workflows/autonomous_agent_workflow.py +248 -0
  34. demos/workflows/mcp_features_demo.py +353 -0
  35. demos/workflows/run_agent_collaboration_demo.py +63 -0
  36. demos/workflows/run_agent_collaboration_with_logs.py +396 -0
  37. demos/workflows/show_agent_interactions.py +107 -0
  38. demos/workflows/simplified_autonomous_demo.py +74 -0
  39. functions/main.py +144 -0
  40. functions/mcp_network_server.py +513 -0
  41. functions/utils.py +47 -0
  42. agent_mcp-0.1.3.dist-info/RECORD +0 -18
  43. agent_mcp-0.1.3.dist-info/entry_points.txt +0 -2
  44. agent_mcp-0.1.3.dist-info/top_level.txt +0 -1
@@ -0,0 +1,216 @@
1
+ """
2
+ Example of heterogeneous agent network with Autogen and Langchain agents.
3
+ """
4
+
5
+ import os
6
+ import asyncio
7
+ from typing import Dict, Any
8
+ import openai
9
+ from agent_mcp.enhanced_mcp_agent import EnhancedMCPAgent
10
+ from agent_mcp.langchain_mcp_adapter import LangchainMCPAdapter
11
+ from agent_mcp.mcp_transport import HTTPTransport
12
+
13
+ # Langchain imports
14
+ from langchain_openai import ChatOpenAI
15
+ from langchain_community.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
16
+ from langchain_community.tools import Tool
17
+ from langchain.agents import AgentExecutor, OpenAIFunctionsAgent
18
+ from langchain.schema.messages import SystemMessage
19
+
20
+ # Check for OpenAI API key
21
+ api_key = os.getenv("OPENAI_API_KEY")
22
+ if not api_key:
23
+ raise ValueError("Please set the OPENAI_API_KEY environment variable")
24
+
25
+ openai.api_key = api_key
26
+
27
+ async def setup_langchain_agent():
28
+ """Setup a Langchain agent with search capabilities"""
29
+ # Create Langchain tools
30
+ search = DuckDuckGoSearchAPIWrapper()
31
+ search_tool = Tool(
32
+ name="duckduckgo_search",
33
+ description="Search the web using DuckDuckGo",
34
+ func=search.run
35
+ )
36
+ tools = [search_tool]
37
+
38
+ # Create Langchain model and agent
39
+ llm = ChatOpenAI(temperature=0)
40
+ agent = OpenAIFunctionsAgent.from_llm_and_tools(
41
+ llm=llm,
42
+ tools=tools,
43
+ system_message=SystemMessage(content=(
44
+ "You are a research assistant that helps find and analyze information."
45
+ ))
46
+ )
47
+
48
+ # Create the agent executor
49
+ agent_executor = AgentExecutor.from_agent_and_tools(
50
+ agent=agent,
51
+ tools=tools,
52
+ verbose=True,
53
+ handle_parsing_errors=True
54
+ )
55
+
56
+ return agent, agent_executor
57
+
58
+ async def check_task_completion(coordinator, task):
59
+ """Check if all tasks are completed and generate final summary"""
60
+ all_completed = True
61
+ all_results = {}
62
+
63
+ for step in task["steps"]:
64
+ task_id = step["task_id"]
65
+ if task_id not in coordinator.task_results or coordinator.task_results[task_id] is None:
66
+ all_completed = False
67
+ break
68
+ all_results[task_id] = coordinator.task_results[task_id]
69
+
70
+ if all_completed:
71
+ print("\n=== All tasks completed! ===")
72
+ print("\nTask Results:")
73
+ for task_id, result in all_results.items():
74
+ print(f"\n{task_id}:")
75
+ print(result)
76
+
77
+ # Generate final summary using the coordinator
78
+ summary_task = {
79
+ "task_id": "final_summary",
80
+ "description": "Create a comprehensive summary of the quantum computing research. First summarize the initial research findings, then analyze the key insights and implications for the future of quantum computing.",
81
+ "previous_results": all_results,
82
+ "reply_to": "http://localhost:8000"
83
+ }
84
+
85
+ await coordinator.assign_task("http://localhost:8001", summary_task)
86
+ return True
87
+
88
+ return False
89
+
90
+ async def main():
91
+ # Create transport layers
92
+ coordinator_transport = HTTPTransport(host="localhost", port=8000)
93
+ autogen_transport = HTTPTransport(host="localhost", port=8001)
94
+ langchain_transport = HTTPTransport(host="localhost", port=8002)
95
+
96
+ # Create coordinator agent (Autogen-based)
97
+ coordinator = EnhancedMCPAgent(
98
+ name="Coordinator",
99
+ transport=coordinator_transport,
100
+ server_mode=True,
101
+ system_message="You coordinate tasks between different agents",
102
+ llm_config={
103
+ "config_list": [{
104
+ "model": "gpt-3.5-turbo",
105
+ "api_key": api_key
106
+ }]
107
+ }
108
+ )
109
+
110
+ # Create Autogen worker agent
111
+ autogen_worker = EnhancedMCPAgent(
112
+ name="AutogenWorker",
113
+ transport=autogen_transport,
114
+ client_mode=True,
115
+ system_message="I help with text analysis and summarization",
116
+ llm_config={
117
+ "config_list": [{
118
+ "model": "gpt-3.5-turbo",
119
+ "api_key": api_key
120
+ }]
121
+ }
122
+ )
123
+
124
+ # Create and setup Langchain agent
125
+ langchain_agent, agent_executor = await setup_langchain_agent()
126
+
127
+ # Create Langchain worker agent with adapter
128
+ langchain_worker = LangchainMCPAdapter(
129
+ name="LangchainWorker",
130
+ transport=langchain_transport,
131
+ client_mode=True,
132
+ langchain_agent=langchain_agent,
133
+ agent_executor=agent_executor
134
+ )
135
+
136
+ # Start the coordinator server
137
+ print("Starting coordinator server...")
138
+ coordinator.run()
139
+
140
+ # Give the server a moment to start
141
+ await asyncio.sleep(2)
142
+
143
+ # Start workers first
144
+ print("Starting workers...")
145
+ autogen_worker.run()
146
+ langchain_worker.run()
147
+
148
+ # Give workers a moment to start
149
+ await asyncio.sleep(2)
150
+
151
+ # Connect workers to coordinator
152
+ print("Connecting workers to coordinator...")
153
+ await autogen_worker.connect_to_server("http://localhost:8000")
154
+ await langchain_worker.connect_to_server("http://localhost:8000")
155
+
156
+ # Example collaborative task
157
+ task = {
158
+ "task_id": "research_task_1",
159
+ "type": "collaborative_task",
160
+ "description": "Research the latest developments in quantum computing and prepare a summary",
161
+ "steps": [
162
+ {
163
+ "agent": "LangchainWorker",
164
+ "task_id": "research_task_1_LangchainWorker",
165
+ "description": "Search for recent quantum computing breakthroughs in 2024",
166
+ "url": "http://localhost:8002"
167
+ },
168
+ {
169
+ "agent": "AutogenWorker",
170
+ "task_id": "research_task_1_AutogenWorker",
171
+ "description": "Analyze and summarize the findings",
172
+ "url": "http://localhost:8001",
173
+ "depends_on": ["research_task_1_LangchainWorker"]
174
+ }
175
+ ]
176
+ }
177
+
178
+ print("Assigning tasks to agents...")
179
+
180
+ # Store task dependencies
181
+ coordinator.task_dependencies = {}
182
+ for step in task["steps"]:
183
+ task_id = step["task_id"]
184
+ coordinator.task_dependencies[task_id] = {
185
+ "url": step["url"],
186
+ "depends_on": [dep for dep in step.get("depends_on", [])]
187
+ }
188
+
189
+ print(f"Task dependencies: {coordinator.task_dependencies}")
190
+
191
+ # Assign tasks to agents
192
+ for step in task["steps"]:
193
+ await coordinator.assign_task(step["url"], {
194
+ "task_id": step["task_id"],
195
+ "description": step["description"],
196
+ "reply_to": "http://localhost:8000"
197
+ })
198
+
199
+ # Keep checking for task completion
200
+ print("Tasks assigned. Waiting for results...")
201
+ try:
202
+ while True:
203
+ if await check_task_completion(coordinator, task):
204
+ print("\nWaiting for final summary...")
205
+ await asyncio.sleep(5) # Wait for final summary
206
+ if "final_summary" in coordinator.task_results:
207
+ print("\n=== Final Summary ===")
208
+ print(coordinator.task_results["final_summary"])
209
+ print("\nAll tasks completed successfully. Shutting down...")
210
+ break
211
+ await asyncio.sleep(1)
212
+ except KeyboardInterrupt:
213
+ print("Shutting down...")
214
+
215
+ if __name__ == "__main__":
216
+ asyncio.run(main())
@@ -0,0 +1,199 @@
1
+ """
2
+ Example of using multiple agent frameworks together in a collaborative task.
3
+
4
+ This example demonstrates how agents from Autogen, Langchain, CrewAI, and LangGraph
5
+ can work together seamlessly through the MCP framework.
6
+ """
7
+
8
+ import os
9
+ import asyncio
10
+ import openai
11
+ from crewai import Agent as CrewAgent
12
+ from langchain.tools import Tool
13
+ from agent_mcp.enhanced_mcp_agent import EnhancedMCPAgent
14
+ from agent_mcp.langchain_mcp_adapter import LangchainMCPAdapter
15
+ from agent_mcp.crewai_mcp_adapter import CrewAIMCPAdapter
16
+ from agent_mcp.langgraph_mcp_adapter import LangGraphMCPAdapter
17
+ from agent_mcp.heterogeneous_group_chat import HeterogeneousGroupChat
18
+ from agent_mcp.mcp_transport import HTTPTransport
19
+
20
+ # Standard imports for Langchain
21
+ from langchain_openai import ChatOpenAI
22
+ from langchain_community.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
23
+ from langchain.agents import AgentExecutor, OpenAIFunctionsAgent
24
+ from langchain.schema.messages import SystemMessage
25
+
26
+ # Check for OpenAI API key
27
+ api_key = os.getenv("OPENAI_API_KEY")
28
+ if not api_key:
29
+ raise ValueError("Please set the OPENAI_API_KEY environment variable")
30
+
31
+ openai.api_key = api_key
32
+
33
+ async def setup_langchain_agent():
34
+ """Setup a Langchain agent with search capabilities"""
35
+ search = DuckDuckGoSearchAPIWrapper()
36
+ search_tool = Tool(
37
+ name="duckduckgo_search",
38
+ description="Search the web using DuckDuckGo",
39
+ func=search.run
40
+ )
41
+ tools = [search_tool]
42
+
43
+ llm = ChatOpenAI(temperature=0)
44
+ agent = OpenAIFunctionsAgent.from_llm_and_tools(
45
+ llm=llm,
46
+ tools=tools,
47
+ system_message=SystemMessage(content=(
48
+ "You are a research assistant that helps find information."
49
+ ))
50
+ )
51
+
52
+ agent_executor = AgentExecutor.from_agent_and_tools(
53
+ agent=agent,
54
+ tools=tools,
55
+ verbose=True,
56
+ handle_parsing_errors=True
57
+ )
58
+
59
+ return agent, agent_executor
60
+
61
+ def setup_crewai_agent():
62
+ """Setup a CrewAI agent for analysis"""
63
+ return CrewAgent(
64
+ role="Data Analyst",
65
+ goal="Analyze and extract insights from research data",
66
+ backstory="You are an expert data analyst with experience in scientific research",
67
+ allow_delegation=False
68
+ )
69
+
70
+ def setup_summary_tools():
71
+ """Setup tools for summarization"""
72
+ llm = ChatOpenAI(temperature=0)
73
+
74
+ # Create a summarization tool
75
+ async def summarize(input: str) -> str:
76
+ """Summarize text in a clear and concise way"""
77
+ response = await llm.ainvoke(
78
+ "Summarize the following in a clear and concise way: " + input
79
+ )
80
+ return response.content
81
+
82
+ summarize_tool = Tool(
83
+ name="summarize",
84
+ description="Summarize text in a clear and concise way",
85
+ func=summarize
86
+ )
87
+
88
+ return [summarize_tool]
89
+
90
+ async def create_worker_agents():
91
+ """Create agents from different frameworks"""
92
+ # Autogen worker for task coordination
93
+ autogen_worker = EnhancedMCPAgent(
94
+ name="AutogenWorker",
95
+ transport=HTTPTransport(host="localhost", port=8001),
96
+ client_mode=True,
97
+ system_message="I help coordinate tasks and integrate results",
98
+ llm_config={
99
+ "config_list": [{
100
+ "model": "gpt-3.5-turbo",
101
+ "api_key": api_key
102
+ }]
103
+ }
104
+ )
105
+
106
+ # Langchain worker for research
107
+ langchain_agent, agent_executor = await setup_langchain_agent()
108
+ langchain_worker = LangchainMCPAdapter(
109
+ name="LangchainWorker",
110
+ transport=HTTPTransport(host="localhost", port=8002),
111
+ client_mode=True,
112
+ langchain_agent=langchain_agent,
113
+ agent_executor=agent_executor
114
+ )
115
+
116
+ # CrewAI worker for analysis
117
+ crewai_agent = setup_crewai_agent()
118
+ crewai_worker = CrewAIMCPAdapter(
119
+ name="CrewAIWorker",
120
+ transport=HTTPTransport(host="localhost", port=8003),
121
+ client_mode=True,
122
+ crewai_agent=crewai_agent
123
+ )
124
+
125
+ # LangGraph worker for summarization
126
+ summary_tools = setup_summary_tools()
127
+ langgraph_worker = LangGraphMCPAdapter(
128
+ name="LangGraphWorker",
129
+ transport=HTTPTransport(host="localhost", port=8004),
130
+ client_mode=True,
131
+ tools=summary_tools
132
+ )
133
+
134
+ return [
135
+ autogen_worker,
136
+ langchain_worker,
137
+ crewai_worker,
138
+ langgraph_worker
139
+ ]
140
+
141
+ async def main():
142
+ # Create the group chat
143
+ group = HeterogeneousGroupChat(
144
+ name="ResearchTeam",
145
+ server_url="https://mcp-server-ixlfhxquwq-ew.a.run.app"
146
+ )
147
+
148
+ # Create and add the coordinator
149
+ coordinator = group.create_coordinator(api_key)
150
+
151
+ # Create and add all worker agents
152
+ workers = await create_worker_agents()
153
+ group.add_agents(workers)
154
+
155
+ # Connect all agents
156
+ await group.connect()
157
+
158
+ # Define a collaborative research task
159
+ task = {
160
+ "task_id": "quantum_research",
161
+ "type": "collaborative_task",
162
+ "description": "Research and analyze quantum computing developments",
163
+ "steps": [
164
+ {
165
+ "agent": "LangchainWorker",
166
+ "task_id": "research",
167
+ "description": "Search for recent quantum computing breakthroughs in 2024",
168
+ "url": "https://mcp-server-ixlfhxquwq-ew.a.run.app"
169
+ },
170
+ {
171
+ "agent": "CrewAIWorker",
172
+ "task_id": "analysis",
173
+ "description": "Analyze the research findings and identify key trends and implications",
174
+ "url": "https://mcp-server-ixlfhxquwq-ew.a.run.app",
175
+ "depends_on": ["research"]
176
+ },
177
+ {
178
+ "agent": "AutogenWorker",
179
+ "task_id": "integration",
180
+ "description": "Integrate the research and analysis into a cohesive narrative",
181
+ "url": "https://mcp-server-ixlfhxquwq-ew.a.run.app",
182
+ "depends_on": ["analysis"]
183
+ },
184
+ {
185
+ "agent": "LangGraphWorker",
186
+ "task_id": "summary",
187
+ "description": "Create a final executive summary of all findings",
188
+ "url": "https://mcp-server-ixlfhxquwq-ew.a.run.app",
189
+ "depends_on": ["integration"]
190
+ }
191
+ ]
192
+ }
193
+
194
+ # Submit task and wait for completion
195
+ await group.submit_task(task)
196
+ await group.wait_for_completion()
197
+
198
+ if __name__ == "__main__":
199
+ asyncio.run(main())
@@ -0,0 +1,49 @@
1
+ """
2
+ Check Imports for MCPAgent Project.
3
+
4
+ This script checks if all required dependencies for the MCPAgent project are available.
5
+ """
6
+
7
+ import importlib
8
+ import sys
9
+
10
+ def check_import(module_name, display_name=None):
11
+ """Check if a module can be imported and print the result."""
12
+ if display_name is None:
13
+ display_name = module_name
14
+
15
+ try:
16
+ module = importlib.import_module(module_name)
17
+ version = getattr(module, "__version__", "unknown version")
18
+ print(f"✓ {display_name} is available (version: {version})")
19
+ return True
20
+ except ImportError as e:
21
+ print(f"✗ {display_name} is NOT available: {e}")
22
+ return False
23
+
24
+ def main():
25
+ """Check all required imports."""
26
+ print("=== Checking Required Dependencies ===\n")
27
+
28
+ # Basic Python version check
29
+ python_version = ".".join(map(str, sys.version_info[:3]))
30
+ print(f"Python version: {python_version}")
31
+
32
+ # Core dependencies
33
+ check_import("autogen", "AutoGen")
34
+ check_import("openai", "OpenAI API")
35
+
36
+ # LangGraph dependencies
37
+ check_import("langchain_core", "LangChain Core")
38
+ check_import("langchain_openai", "LangChain OpenAI")
39
+ check_import("langgraph", "LangGraph")
40
+
41
+ # Check our own modules
42
+ try:
43
+ import mcp_agent
44
+ print("✓ MCPAgent module is available")
45
+ except ImportError as e:
46
+ print(f"✗ MCPAgent module is NOT available: {e}")
47
+
48
+ if __name__ == "__main__":
49
+ main()