agent-mcp 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. agent_mcp/__init__.py +2 -2
  2. agent_mcp/camel_mcp_adapter.py +521 -0
  3. agent_mcp/cli.py +47 -0
  4. agent_mcp/heterogeneous_group_chat.py +412 -38
  5. agent_mcp/langchain_mcp_adapter.py +176 -43
  6. agent_mcp/mcp_agent.py +26 -0
  7. agent_mcp/mcp_transport.py +11 -5
  8. {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.4.dist-info}/METADATA +6 -4
  9. agent_mcp-0.1.4.dist-info/RECORD +49 -0
  10. {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.4.dist-info}/WHEEL +1 -1
  11. agent_mcp-0.1.4.dist-info/entry_points.txt +2 -0
  12. agent_mcp-0.1.4.dist-info/top_level.txt +3 -0
  13. demos/__init__.py +1 -0
  14. demos/basic/__init__.py +1 -0
  15. demos/basic/framework_examples.py +108 -0
  16. demos/basic/langchain_camel_demo.py +272 -0
  17. demos/basic/simple_chat.py +355 -0
  18. demos/basic/simple_integration_example.py +51 -0
  19. demos/collaboration/collaborative_task_example.py +437 -0
  20. demos/collaboration/group_chat_example.py +130 -0
  21. demos/collaboration/simplified_crewai_example.py +39 -0
  22. demos/langgraph/autonomous_langgraph_network.py +808 -0
  23. demos/langgraph/langgraph_agent_network.py +415 -0
  24. demos/langgraph/langgraph_collaborative_task.py +619 -0
  25. demos/langgraph/langgraph_example.py +227 -0
  26. demos/langgraph/run_langgraph_examples.py +213 -0
  27. demos/network/agent_network_example.py +381 -0
  28. demos/network/email_agent.py +130 -0
  29. demos/network/email_agent_demo.py +46 -0
  30. demos/network/heterogeneous_network_example.py +216 -0
  31. demos/network/multi_framework_example.py +199 -0
  32. demos/utils/check_imports.py +49 -0
  33. demos/workflows/autonomous_agent_workflow.py +248 -0
  34. demos/workflows/mcp_features_demo.py +353 -0
  35. demos/workflows/run_agent_collaboration_demo.py +63 -0
  36. demos/workflows/run_agent_collaboration_with_logs.py +396 -0
  37. demos/workflows/show_agent_interactions.py +107 -0
  38. demos/workflows/simplified_autonomous_demo.py +74 -0
  39. functions/main.py +144 -0
  40. functions/mcp_network_server.py +513 -0
  41. functions/utils.py +47 -0
  42. agent_mcp-0.1.3.dist-info/RECORD +0 -18
  43. agent_mcp-0.1.3.dist-info/entry_points.txt +0 -2
  44. agent_mcp-0.1.3.dist-info/top_level.txt +0 -1
@@ -0,0 +1,248 @@
1
+ """
2
+ Autonomous Agent Collaboration Example
3
+
4
+ This script demonstrates how agents can autonomously collaborate without
5
+ hardcoded interaction patterns. The flow is determined by the agents themselves
6
+ based on their analysis of the conversation.
7
+ """
8
+
9
+ import os
10
+ import sys
11
+ import time
12
+ from typing import Dict, Any, Optional
13
+
14
+ # Import the required langgraph components
15
+ try:
16
+ from langgraph.graph import StateGraph
17
+ from langchain_core.messages import HumanMessage, AIMessage
18
+ from langchain_openai import ChatOpenAI
19
+ except ImportError:
20
+ print("Error: Required packages not found. Make sure langgraph, langchain-core, and langchain-openai are installed.")
21
+ sys.exit(1)
22
+
23
+ class Agent:
24
+ """A simple agent implementation that can make decisions about collaboration."""
25
+
26
+ def __init__(self, name: str, specialty: str, system_message: str = None):
27
+ """Initialize the agent with a name, specialty, and optional system message."""
28
+ self.name = name
29
+ self.specialty = specialty
30
+ self.system_message = system_message or f"You are {name}, an expert in {specialty}."
31
+
32
+ # Create the OpenAI chat model
33
+ api_key = os.environ.get("OPENAI_API_KEY")
34
+ if not api_key:
35
+ raise ValueError("OPENAI_API_KEY environment variable not set")
36
+
37
+ self.llm = ChatOpenAI(
38
+ model="gpt-4",
39
+ temperature=0.2,
40
+ api_key=api_key,
41
+ model_kwargs={"messages": [{"role": "system", "content": self.system_message}]}
42
+ )
43
+
44
+ def respond(self, messages: list) -> Dict[str, Any]:
45
+ """Generate a response and determine who to collaborate with next."""
46
+
47
+ # Convert to format expected by langchain
48
+ lc_messages = []
49
+ for msg in messages:
50
+ if msg["role"] == "user":
51
+ lc_messages.append(HumanMessage(content=msg["content"]))
52
+ elif msg["role"] == "assistant":
53
+ lc_messages.append(AIMessage(content=msg["content"], name=msg.get("name")))
54
+
55
+ # Generate a response to the conversation
56
+ response = self.llm.invoke(lc_messages)
57
+ content = response.content
58
+
59
+ # Determine who should respond next
60
+ # We'll use a special format in the response: [NEXT: agent_name] to indicate
61
+ collaboration_prompt = f"""
62
+ Based on your response, determine which team member should contribute next:
63
+ - Researcher (information gathering)
64
+ - Analyst (data interpretation)
65
+ - Creative (innovative ideas)
66
+ - Planner (implementation strategy)
67
+ - Coordinator (synthesizing and directing)
68
+
69
+ Choose the most appropriate agent based on what's needed next.
70
+ Format your choice as [NEXT: agent_name]
71
+ """
72
+
73
+ next_agent_decision = self.llm.invoke([
74
+ HumanMessage(content=collaboration_prompt + "\n\nYour response was: " + content)
75
+ ])
76
+
77
+ # Extract the next agent from the decision
78
+ next_agent = "coordinator" # Default
79
+ decision_text = next_agent_decision.content.lower()
80
+
81
+ if "[next:" in decision_text:
82
+ # Extract the agent name from the format [NEXT: agent_name]
83
+ start = decision_text.find("[next:") + 6
84
+ end = decision_text.find("]", start)
85
+ if end > start:
86
+ next_agent = decision_text[start:end].strip().lower()
87
+
88
+ # Return both the response and the next agent decision
89
+ return {
90
+ "response": content,
91
+ "next": next_agent
92
+ }
93
+
94
+ class AutonomousCollaboration:
95
+ """Manages autonomous collaboration between agents."""
96
+
97
+ def __init__(self):
98
+ """Initialize the collaboration environment."""
99
+ self.agents = {}
100
+ self.message_history = []
101
+
102
+ def create_agents(self):
103
+ """Create the agent team.
104
+
105
+ This method initializes a team of specialized agents with predefined roles
106
+ and system messages. Each agent is created with specific expertise and
107
+ personality traits to contribute effectively to the collaboration.
108
+ """
109
+ self.agents = {
110
+ "coordinator": Agent(
111
+ name="Coordinator",
112
+ specialty="coordinating team efforts",
113
+ system_message="""You are the Coordinator. Your role is to guide the research process,
114
+ ensure all perspectives are considered, and synthesize information from all team members.
115
+ When appropriate, suggest which agent would be best to handle the next step."""
116
+ ),
117
+ "researcher": Agent(
118
+ name="Researcher",
119
+ specialty="information gathering and fact-finding",
120
+ system_message="""You are the Researcher. Your role is to gather relevant information,
121
+ identify key facts, and provide evidence-based context for the topic.
122
+ When appropriate, suggest which agent would be best to handle the next step."""
123
+ ),
124
+ "analyst": Agent(
125
+ name="Analyst",
126
+ specialty="analyzing data and identifying patterns",
127
+ system_message="""You are the Analyst. Your role is to interpret information,
128
+ identify patterns, evaluate implications, and provide critical insights.
129
+ When appropriate, suggest which agent would be best to handle the next step."""
130
+ ),
131
+ "creative": Agent(
132
+ name="Creative",
133
+ specialty="generating innovative ideas and approaches",
134
+ system_message="""You are the Creative. Your role is to think outside the box,
135
+ generate innovative ideas, make unexpected connections, and envision new possibilities.
136
+ When appropriate, suggest which agent would be best to handle the next step."""
137
+ ),
138
+ "planner": Agent(
139
+ name="Planner",
140
+ specialty="creating implementation strategies",
141
+ system_message="""You are the Planner. Your role is to develop practical strategies,
142
+ create roadmaps, identify necessary resources, and outline implementation steps.
143
+ When appropriate, suggest which agent would be best to handle the next step."""
144
+ )
145
+ }
146
+
147
+ print(f"Created {len(self.agents)} agents:")
148
+ for agent_id, agent in self.agents.items():
149
+ print(f"- {agent.name} ({agent_id}): {agent.specialty}")
150
+
151
+ def run_collaboration(self, topic: str, max_steps: int = 5):
152
+ """Run an autonomous collaboration on the given topic.
153
+
154
+ Args:
155
+ topic: The subject matter for agents to collaborate on
156
+ max_steps: Maximum number of interaction steps (default: 5)
157
+
158
+ This method orchestrates the autonomous collaboration process where agents
159
+ interact and build upon each other's contributions. Each agent decides
160
+ which team member should contribute next based on the conversation flow.
161
+ The process continues until max_steps is reached.
162
+ """
163
+ print(f"\n{'='*80}")
164
+ print(f"AUTONOMOUS COLLABORATION ON: {topic}")
165
+ print(f"{'='*80}\n")
166
+
167
+ print("Starting collaboration process...\n")
168
+
169
+ # Add the initial message from the user
170
+ self.message_history.append({
171
+ "role": "user",
172
+ "content": f"I need your help researching this topic: {topic}. Please collaborate as a team to explore it thoroughly."
173
+ })
174
+
175
+ # Start with the coordinator
176
+ current_agent = "coordinator"
177
+
178
+ # Track the interaction flow
179
+ interaction_flow = []
180
+
181
+ # Run for max_steps or until we detect a loop
182
+ for step in range(1, max_steps + 1):
183
+ agent = self.agents[current_agent]
184
+ print(f"\n[Step {step}] {agent.name} is responding...")
185
+
186
+ # Get the agent's response
187
+ result = agent.respond(self.message_history)
188
+
189
+ # Record the response in the message history
190
+ self.message_history.append({
191
+ "role": "assistant",
192
+ "name": agent.name,
193
+ "content": result["response"]
194
+ })
195
+
196
+ # Display a summary of the response
197
+ response_summary = result["response"]
198
+ if len(response_summary) > 150:
199
+ response_summary = response_summary[:150] + "..."
200
+ print(f"Response: {response_summary}")
201
+
202
+ # Track the flow
203
+ interaction_flow.append({
204
+ "step": step,
205
+ "agent": agent.name,
206
+ "next": result["next"]
207
+ })
208
+
209
+ # Update the current agent based on the agent's decision
210
+ print(f"{agent.name} suggests that {result['next']} should respond next.")
211
+ current_agent = result["next"]
212
+
213
+ # Add a short delay to make the output more readable
214
+ time.sleep(1)
215
+
216
+ print(f"\n{'='*80}")
217
+ print(f"AUTONOMOUS COLLABORATION COMPLETE")
218
+ print(f"{'='*80}\n")
219
+
220
+ # Display the interaction flow
221
+ print("Agent Interaction Flow:")
222
+ print("-" * 40)
223
+ for step in interaction_flow:
224
+ print(f"Step {step['step']}: {step['agent']} → {step['next']}")
225
+
226
+ return self.message_history
227
+
228
+ def main():
229
+ """Run the autonomous collaboration example.
230
+
231
+ This function serves as the entry point for the script, setting up and
232
+ executing an autonomous collaboration session. It accepts an optional
233
+ command-line argument for the collaboration topic, defaulting to
234
+ 'Sustainable urban development' if none is provided.
235
+ """
236
+ # Get topic from command line argument or use default
237
+ if len(sys.argv) > 1:
238
+ topic = " ".join(sys.argv[1:])
239
+ else:
240
+ topic = "Sustainable urban development"
241
+
242
+ # Create and run the collaboration
243
+ collab = AutonomousCollaboration()
244
+ collab.create_agents()
245
+ collab.run_collaboration(topic, max_steps=5)
246
+
247
+ if __name__ == "__main__":
248
+ main()
@@ -0,0 +1,353 @@
1
+ """
2
+ MCP Features Demo
3
+
4
+ This script demonstrates the key features of the Model Context Protocol
5
+ implementation in MCPAgent with detailed explanations of what's happening
6
+ at each step.
7
+ """
8
+
9
+ import os
10
+ import json
11
+ from typing import Dict, List, Any, Optional
12
+
13
+ # Import MCPAgent
14
+ from agent_mcp.mcp_agent import MCPAgent
15
+
16
+ # Check for API key
17
+ api_key = os.environ.get("OPENAI_API_KEY")
18
+ if not api_key:
19
+ raise ValueError("Please set the OPENAI_API_KEY environment variable")
20
+
21
+ # LLM configuration - using gpt-3.5-turbo for faster responses
22
+ config = {
23
+ "config_list": [{"model": "gpt-3.5-turbo", "api_key": api_key}],
24
+ }
25
+
26
+ def demonstrate_context_operations():
27
+ """Demonstrate basic context operations with MCPAgent."""
28
+ print("\n=== Demonstrating Context Operations ===")
29
+
30
+ # Create a simple MCP agent without LLM
31
+ agent = MCPAgent(
32
+ name="ContextDemo",
33
+ system_message="You demonstrate context operations."
34
+ )
35
+
36
+ # 1. Set context using direct method
37
+ print("\n1. Setting context using direct update_context() method")
38
+ agent.update_context("user", {
39
+ "name": "Alice",
40
+ "preferences": {
41
+ "color": "blue",
42
+ "language": "English",
43
+ "notifications": True
44
+ }
45
+ })
46
+ print("User context added")
47
+
48
+ # 2. Get context using direct method
49
+ print("\n2. Getting context using direct get_context() method")
50
+ user_context = agent.get_context("user")
51
+ print(f"Retrieved user context: {json.dumps(user_context, indent=2)}")
52
+
53
+ # 3. Set context using MCP tool
54
+ print("\n3. Setting context using context_set tool")
55
+ result = agent.execute_tool(
56
+ "context_set",
57
+ key="weather",
58
+ value={"location": "New York", "temperature": 72, "conditions": "Sunny"}
59
+ )
60
+ print(f"Tool result: {result}")
61
+
62
+ # 4. Get context using MCP tool
63
+ print("\n4. Getting context using context_get tool")
64
+ result = agent.execute_tool("context_get", key="weather")
65
+ print(f"Tool result: {json.dumps(result, indent=2)}")
66
+
67
+ # 5. List all context keys
68
+ print("\n5. Listing all context keys using context_list tool")
69
+ result = agent.execute_tool("context_list")
70
+ print(f"Tool result: {json.dumps(result, indent=2)}")
71
+
72
+ # 6. Demonstrate context summary generation
73
+ print("\n6. Generating context summary for LLM integration")
74
+ summary = agent._generate_context_summary()
75
+ print(f"Context summary:\n{summary}")
76
+
77
+ # 7. Remove context using MCP tool
78
+ print("\n7. Removing context using context_remove tool")
79
+ result = agent.execute_tool("context_remove", key="weather")
80
+ print(f"Tool result: {json.dumps(result, indent=2)}")
81
+
82
+ # 8. Verify context was removed
83
+ print("\n8. Verifying context was removed by listing keys again")
84
+ result = agent.execute_tool("context_list")
85
+ print(f"Tool result: {json.dumps(result, indent=2)}")
86
+
87
+ def demonstrate_custom_tools():
88
+ """Demonstrate registering and using custom tools with MCPAgent."""
89
+ print("\n=== Demonstrating Custom Tool Registration ===")
90
+
91
+ # Create a simple MCP agent
92
+ agent = MCPAgent(
93
+ name="ToolDemo",
94
+ system_message="You demonstrate tool operations."
95
+ )
96
+
97
+ # 1. Define and register a simple calculator tool
98
+ print("\n1. Defining and registering a calculator tool")
99
+
100
+ def calculate(operation: str, a: float, b: float) -> Dict:
101
+ """Perform a basic calculation."""
102
+ result = None
103
+ if operation == "add":
104
+ result = a + b
105
+ elif operation == "subtract":
106
+ result = a - b
107
+ elif operation == "multiply":
108
+ result = a * b
109
+ elif operation == "divide":
110
+ if b == 0:
111
+ return {"status": "error", "message": "Cannot divide by zero"}
112
+ result = a / b
113
+ else:
114
+ return {"status": "error", "message": f"Unknown operation: {operation}"}
115
+
116
+ return {"status": "success", "result": result}
117
+
118
+ # Register the tool with parameter descriptions
119
+ agent.register_mcp_tool(
120
+ name="math_calculate",
121
+ description="Perform basic mathematical calculations",
122
+ func=calculate,
123
+ operation_description="The operation to perform: add, subtract, multiply, or divide",
124
+ a_description="First number",
125
+ b_description="Second number"
126
+ )
127
+ print("Calculator tool registered")
128
+
129
+ # 2. Define and register a text processing tool
130
+ print("\n2. Defining and registering a text processing tool")
131
+
132
+ def process_text(text: str, operation: str) -> Dict:
133
+ """Process text with various operations."""
134
+ if operation == "uppercase":
135
+ return {"status": "success", "result": text.upper()}
136
+ elif operation == "lowercase":
137
+ return {"status": "success", "result": text.lower()}
138
+ elif operation == "capitalize":
139
+ return {"status": "success", "result": text.capitalize()}
140
+ elif operation == "word_count":
141
+ return {"status": "success", "result": len(text.split())}
142
+ else:
143
+ return {"status": "error", "message": f"Unknown operation: {operation}"}
144
+
145
+ # Register the tool
146
+ agent.register_mcp_tool(
147
+ name="text_process",
148
+ description="Process text with various operations",
149
+ func=process_text,
150
+ text_description="The text to process",
151
+ operation_description="The operation to perform: uppercase, lowercase, capitalize, or word_count"
152
+ )
153
+ print("Text processing tool registered")
154
+
155
+ # 3. Get information about all tools
156
+ print("\n3. Getting information about all available tools")
157
+ tool_info = agent.execute_tool("mcp_info")
158
+ print(f"Agent ID: {tool_info['id']}")
159
+ print(f"Agent Name: {tool_info['name']}")
160
+ print(f"MCP Version: {tool_info['version']}")
161
+ print("Available tools:")
162
+ for tool in tool_info['tools']:
163
+ print(f"- {tool['name']}: {tool['description']}")
164
+ if tool['parameters']:
165
+ for param in tool['parameters']:
166
+ required = "required" if param.get('required', False) else "optional"
167
+ print(f" • {param['name']} ({required}): {param['description']}")
168
+
169
+ # 4. Use the calculator tool
170
+ print("\n4. Using the calculator tool")
171
+ result = agent.execute_tool("math_calculate", operation="add", a=5, b=7)
172
+ print(f"5 + 7 = {json.dumps(result, indent=2)}")
173
+
174
+ result = agent.execute_tool("math_calculate", operation="multiply", a=6, b=8)
175
+ print(f"6 * 8 = {json.dumps(result, indent=2)}")
176
+
177
+ # 5. Use the text processing tool
178
+ print("\n5. Using the text processing tool")
179
+ result = agent.execute_tool("text_process", text="Hello World", operation="uppercase")
180
+ print(f"Uppercase: {json.dumps(result, indent=2)}")
181
+
182
+ result = agent.execute_tool("text_process", text="Hello World", operation="word_count")
183
+ print(f"Word count: {json.dumps(result, indent=2)}")
184
+
185
+ def demonstrate_agent_as_tool():
186
+ """Demonstrate registering and using an agent as a tool."""
187
+ print("\n=== Demonstrating Agent-as-Tool ===")
188
+
189
+ # 1. Create two MCP agents
190
+ print("\n1. Creating a helper agent and a main agent")
191
+
192
+ helper = MCPAgent(
193
+ name="HelperAgent",
194
+ system_message="You are a helpful assistant that specializes in providing definitions and explanations."
195
+ )
196
+
197
+ # Give the helper some context to use
198
+ helper.update_context("definitions", {
199
+ "Machine Learning": "A field of study that gives computers the ability to learn without being explicitly programmed.",
200
+ "Neural Network": "A computing system inspired by biological neural networks that can learn to perform tasks by analyzing examples.",
201
+ "Context Protocol": "A standardized way for AI systems to share and manage context information."
202
+ })
203
+
204
+ main_agent = MCPAgent(
205
+ name="MainAgent",
206
+ system_message="You are a coordinator that can ask other agents for help."
207
+ )
208
+
209
+ # 2. Register the helper agent as a tool
210
+ print("\n2. Registering the helper agent as a tool for the main agent")
211
+ main_agent.register_agent_as_tool(helper)
212
+ print("Helper agent registered as a tool")
213
+
214
+ # 3. List all tools on the main agent
215
+ print("\n3. Listing all tools available to the main agent")
216
+ tool_info = main_agent.execute_tool("mcp_info")
217
+ print("Available tools:")
218
+ for tool in tool_info['tools']:
219
+ print(f"- {tool['name']}: {tool['description']}")
220
+
221
+ # 4. Use the agent tool
222
+ print("\n4. Using the agent tool to ask for a definition")
223
+ result = main_agent.execute_tool(
224
+ "agent_HelperAgent",
225
+ message="Can you define what Neural Network means?"
226
+ )
227
+ print(f"Response from helper: {json.dumps(result, indent=2)}")
228
+
229
+ # 5. Show how context is maintained in the helper
230
+ print("\n5. Adding more context to the helper agent")
231
+ helper.update_context("definitions", {
232
+ "Machine Learning": "A field of study that gives computers the ability to learn without being explicitly programmed.",
233
+ "Neural Network": "A computing system inspired by biological neural networks that can learn to perform tasks by analyzing examples.",
234
+ "Context Protocol": "A standardized way for AI systems to share and manage context information.",
235
+ "Deep Learning": "A subset of machine learning that uses multi-layered neural networks to model complex patterns."
236
+ })
237
+
238
+ print("\n6. Asking for a new definition that was just added")
239
+ result = main_agent.execute_tool(
240
+ "agent_HelperAgent",
241
+ message="What is Deep Learning?"
242
+ )
243
+ print(f"Response from helper: {json.dumps(result, indent=2)}")
244
+
245
+ def demonstrate_llm_integration():
246
+ """Demonstrate using MCPAgent with a real LLM."""
247
+ print("\n=== Demonstrating LLM Integration ===")
248
+
249
+ # Skip if no API key
250
+ if not api_key:
251
+ print("Skipping LLM demo - no API key provided")
252
+ return
253
+
254
+ # 1. Create an MCP agent with LLM capability
255
+ print("\n1. Creating an MCP agent with LLM capability")
256
+ agent = MCPAgent(
257
+ name="LLMAgent",
258
+ system_message="You are a helpful assistant that uses context to enhance your responses.",
259
+ llm_config=config
260
+ )
261
+ print("Agent created with LLM capability")
262
+
263
+ # 2. Add context that will be used by the LLM
264
+ print("\n2. Adding context about the user and weather")
265
+ agent.update_context("user", {
266
+ "name": "Bob",
267
+ "location": "San Francisco",
268
+ "interests": ["hiking", "photography", "cooking"]
269
+ })
270
+
271
+ agent.update_context("weather", {
272
+ "location": "San Francisco",
273
+ "current": {
274
+ "temperature": 68,
275
+ "conditions": "Partly cloudy",
276
+ "wind": "Light breeze"
277
+ },
278
+ "forecast": ["Sunny", "Sunny", "Rain", "Partly cloudy"]
279
+ })
280
+ print("Context added")
281
+
282
+ # 3. Test with a prompt that should use the context
283
+ print("\n3. Testing with a prompt that should use context")
284
+ prompt = "What's the weather like today and what activities would you recommend for me?"
285
+
286
+ print(f"User: {prompt}")
287
+ response = agent.generate_reply(
288
+ messages=[{"role": "user", "content": prompt}]
289
+ )
290
+ print(f"Agent: {response}")
291
+
292
+ # 4. Add a tool call to update context
293
+ print("\n4. Adding a new interest through natural language")
294
+ prompt = "Please add 'tennis' to my list of interests."
295
+
296
+ print(f"User: {prompt}")
297
+ response = agent.generate_reply(
298
+ messages=[{"role": "user", "content": prompt}]
299
+ )
300
+ print(f"Agent: {response}")
301
+
302
+ # Manual detection since it might not work automatically
303
+ user_context = agent.get_context("user")
304
+ if user_context and "interests" in user_context and isinstance(user_context["interests"], list):
305
+ if "tennis" not in user_context["interests"]:
306
+ user_context["interests"].append("tennis")
307
+ agent.update_context("user", user_context)
308
+ print("Manually added 'tennis' to interests")
309
+
310
+ # 5. Check if the context was updated
311
+ print("\n5. Checking if context was updated")
312
+ user_context = agent.get_context("user")
313
+ print(f"Updated user context: {json.dumps(user_context, indent=2)}")
314
+
315
+ # 6. Ask about the updated interests
316
+ print("\n6. Asking about updated interests")
317
+ prompt = "What are my interests now?"
318
+
319
+ print(f"User: {prompt}")
320
+ response = agent.generate_reply(
321
+ messages=[{"role": "user", "content": prompt}]
322
+ )
323
+ print(f"Agent: {response}")
324
+
325
+ def main():
326
+ """Run the MCP features demonstration."""
327
+ print("=== MCP Features Demonstration ===")
328
+ print("This script shows the key features of the MCPAgent with detailed explanations.")
329
+
330
+ # Demonstrate basic context operations
331
+ demonstrate_context_operations()
332
+
333
+ # Demonstrate custom tools
334
+ demonstrate_custom_tools()
335
+
336
+ # Demonstrate agent as tool
337
+ demonstrate_agent_as_tool()
338
+
339
+ # Demonstrate LLM integration
340
+ demonstrate_llm_integration()
341
+
342
+ print("\n=== Demonstration Complete ===")
343
+ print("You've now seen the main features of MCPAgent:")
344
+ print("1. Context management (get, set, list, remove)")
345
+ print("2. Custom tool registration and usage")
346
+ print("3. Using agents as tools")
347
+ print("4. LLM integration with context")
348
+ print("\nTo run the more interactive examples, try:")
349
+ print("- agent_network_example.py: A social network of agents")
350
+ print("- collaborative_task_example.py: A team of agents working on a project")
351
+
352
+ if __name__ == "__main__":
353
+ main()
@@ -0,0 +1,63 @@
1
+ """
2
+ Autonomous Agent Collaboration Demo
3
+
4
+ This script demonstrates how agents can autonomously collaborate on a topic without
5
+ hardcoded interaction patterns. The user only provides the topic, and the agents
6
+ decide for themselves who to collaborate with and in what order.
7
+ """
8
+
9
+ from demos.langgraph.autonomous_langgraph_network import AutonomousAgentNetwork
10
+ import sys
11
+ import time
12
+
13
+ def run_autonomous_collaboration(topic):
14
+ """
15
+ Run an autonomous collaborative research process on a given topic.
16
+
17
+ Args:
18
+ topic: The topic for the agents to research and collaborate on
19
+ """
20
+ print(f"\n{'='*80}")
21
+ print(f"AUTONOMOUS AGENT COLLABORATION: {topic}")
22
+ print(f"{'='*80}\n")
23
+
24
+ # Create the autonomous agent network
25
+ print("Initializing autonomous agent network...")
26
+ network = AutonomousAgentNetwork()
27
+ network.create_network()
28
+ print("Network created with autonomous decision-making capabilities")
29
+
30
+ print(f"\nStarting collaborative research on: {topic}")
31
+ print(f"{'='*40}")
32
+ print("Each agent will autonomously decide which other agents to collaborate with.")
33
+ print("Agents will share information through the workspace without predefined patterns.")
34
+ print(f"{'='*40}\n")
35
+
36
+ # Start the autonomous research process with a maximum number of steps
37
+ max_steps = 10
38
+ print(f"Beginning research (max {max_steps} interaction steps)...")
39
+
40
+ # Run the autonomous research
41
+ results = network.research_topic(topic, max_steps=max_steps)
42
+
43
+ # Show the final state of the workspace
44
+ print("\nFinal Shared Workspace State:")
45
+ network.show_workspace()
46
+
47
+ print(f"\n{'='*80}")
48
+ print(f"RESEARCH COMPLETE")
49
+ print(f"{'='*80}\n")
50
+ return results
51
+
52
+ if __name__ == "__main__":
53
+ # If a topic is provided as a command line argument, use that
54
+ if len(sys.argv) > 1:
55
+ topic = " ".join(sys.argv[1:])
56
+ else:
57
+ # Otherwise use a default topic or ask for input
58
+ topic = input("Enter a research topic for autonomous agent collaboration: ")
59
+
60
+ if not topic:
61
+ topic = "The future of autonomous AI agents in business"
62
+
63
+ run_autonomous_collaboration(topic)