agent-mcp 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_mcp/__init__.py +2 -2
- agent_mcp/camel_mcp_adapter.py +521 -0
- agent_mcp/cli.py +47 -0
- agent_mcp/heterogeneous_group_chat.py +412 -38
- agent_mcp/langchain_mcp_adapter.py +176 -43
- agent_mcp/mcp_agent.py +26 -0
- agent_mcp/mcp_transport.py +11 -5
- {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.4.dist-info}/METADATA +6 -4
- agent_mcp-0.1.4.dist-info/RECORD +49 -0
- {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.4.dist-info}/WHEEL +1 -1
- agent_mcp-0.1.4.dist-info/entry_points.txt +2 -0
- agent_mcp-0.1.4.dist-info/top_level.txt +3 -0
- demos/__init__.py +1 -0
- demos/basic/__init__.py +1 -0
- demos/basic/framework_examples.py +108 -0
- demos/basic/langchain_camel_demo.py +272 -0
- demos/basic/simple_chat.py +355 -0
- demos/basic/simple_integration_example.py +51 -0
- demos/collaboration/collaborative_task_example.py +437 -0
- demos/collaboration/group_chat_example.py +130 -0
- demos/collaboration/simplified_crewai_example.py +39 -0
- demos/langgraph/autonomous_langgraph_network.py +808 -0
- demos/langgraph/langgraph_agent_network.py +415 -0
- demos/langgraph/langgraph_collaborative_task.py +619 -0
- demos/langgraph/langgraph_example.py +227 -0
- demos/langgraph/run_langgraph_examples.py +213 -0
- demos/network/agent_network_example.py +381 -0
- demos/network/email_agent.py +130 -0
- demos/network/email_agent_demo.py +46 -0
- demos/network/heterogeneous_network_example.py +216 -0
- demos/network/multi_framework_example.py +199 -0
- demos/utils/check_imports.py +49 -0
- demos/workflows/autonomous_agent_workflow.py +248 -0
- demos/workflows/mcp_features_demo.py +353 -0
- demos/workflows/run_agent_collaboration_demo.py +63 -0
- demos/workflows/run_agent_collaboration_with_logs.py +396 -0
- demos/workflows/show_agent_interactions.py +107 -0
- demos/workflows/simplified_autonomous_demo.py +74 -0
- functions/main.py +144 -0
- functions/mcp_network_server.py +513 -0
- functions/utils.py +47 -0
- agent_mcp-0.1.3.dist-info/RECORD +0 -18
- agent_mcp-0.1.3.dist-info/entry_points.txt +0 -2
- agent_mcp-0.1.3.dist-info/top_level.txt +0 -1
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LangGraph Example using MCPNode.
|
|
3
|
+
|
|
4
|
+
This example demonstrates the use of Model Context Protocol with LangGraph,
|
|
5
|
+
showing how to build agent graphs with shared context and dynamic behavior.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
import json
|
|
10
|
+
from typing import Dict, List, Any
|
|
11
|
+
|
|
12
|
+
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
|
|
13
|
+
from langchain_core.tools import tool
|
|
14
|
+
import langgraph.graph
|
|
15
|
+
from langgraph.graph import END
|
|
16
|
+
from langgraph.prebuilt import ToolNode
|
|
17
|
+
from openai import OpenAI
|
|
18
|
+
|
|
19
|
+
# Import our MCP implementation for LangGraph
|
|
20
|
+
from agent_mcp.mcp_langgraph import MCPNode, MCPReactAgent, create_mcp_langgraph
|
|
21
|
+
|
|
22
|
+
# Initialize OpenAI client
|
|
23
|
+
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
|
|
24
|
+
openai = OpenAI(api_key=OPENAI_API_KEY)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def get_llm():
|
|
28
|
+
"""Get the OpenAI LLM wrapper that implements the langchain interface."""
|
|
29
|
+
from langchain_openai import ChatOpenAI
|
|
30
|
+
|
|
31
|
+
# Initialize with the newest model (gpt-4o) which was released after your knowledge cutoff
|
|
32
|
+
return ChatOpenAI(model="gpt-4o", temperature=0.7)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class LangGraphExample:
|
|
36
|
+
"""Demonstration of the MCP protocol with LangGraph."""
|
|
37
|
+
|
|
38
|
+
def __init__(self):
|
|
39
|
+
"""Initialize the LangGraph Example with MCP capabilities."""
|
|
40
|
+
self.llm = get_llm()
|
|
41
|
+
|
|
42
|
+
def run_simple_example(self):
|
|
43
|
+
"""Run a simple example of MCP with LangGraph."""
|
|
44
|
+
print("=== Simple MCP LangGraph Example ===")
|
|
45
|
+
|
|
46
|
+
# Create a graph with MCP capabilities
|
|
47
|
+
graph = create_mcp_langgraph(
|
|
48
|
+
self.llm,
|
|
49
|
+
name="SimpleMCPGraph",
|
|
50
|
+
system_message="You are a helpful assistant that uses context to answer questions."
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
# Access the MCP agent for the graph
|
|
54
|
+
mcp_agent = graph.mcp_agent
|
|
55
|
+
|
|
56
|
+
# Add context to the MCP agent
|
|
57
|
+
print("1. Adding context to the MCP agent")
|
|
58
|
+
mcp_agent.update_context("user_info", {
|
|
59
|
+
"name": "Alice",
|
|
60
|
+
"occupation": "Data Scientist",
|
|
61
|
+
"interests": ["AI", "machine learning", "hiking"]
|
|
62
|
+
})
|
|
63
|
+
mcp_agent.update_context("current_weather", {
|
|
64
|
+
"location": "San Francisco",
|
|
65
|
+
"condition": "Sunny",
|
|
66
|
+
"temperature": 72
|
|
67
|
+
})
|
|
68
|
+
|
|
69
|
+
# List the available context
|
|
70
|
+
print("2. Listing available context")
|
|
71
|
+
context_list = mcp_agent.execute_tool("context_list")
|
|
72
|
+
print(f"Available context keys: {json.dumps(context_list['keys'], indent=2)}")
|
|
73
|
+
|
|
74
|
+
# Run the graph with a user question
|
|
75
|
+
print("3. Running the graph with a user query")
|
|
76
|
+
question = "What outdoor activities would you recommend for me today?"
|
|
77
|
+
|
|
78
|
+
# Create the initial state
|
|
79
|
+
initial_state = {"messages": [HumanMessage(content=question)]}
|
|
80
|
+
|
|
81
|
+
# Execute the graph
|
|
82
|
+
result = graph.invoke(initial_state)
|
|
83
|
+
|
|
84
|
+
# Print the response
|
|
85
|
+
ai_message = next(msg for msg in result["messages"] if isinstance(msg, AIMessage))
|
|
86
|
+
print(f"User: {question}")
|
|
87
|
+
print(f"Agent: {ai_message.content}")
|
|
88
|
+
|
|
89
|
+
# Update context through a tool call
|
|
90
|
+
print("\n4. Updating context through a tool call")
|
|
91
|
+
new_state = {
|
|
92
|
+
"messages": result["messages"] + [
|
|
93
|
+
HumanMessage(content="Please add 'mountain biking' to my interests.")
|
|
94
|
+
]
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
result = graph.invoke(new_state)
|
|
98
|
+
|
|
99
|
+
# Print the response
|
|
100
|
+
ai_message = next(msg for msg in result["messages"] if isinstance(msg, AIMessage))
|
|
101
|
+
print("User: Please add 'mountain biking' to my interests.")
|
|
102
|
+
print(f"Agent: {ai_message.content}")
|
|
103
|
+
|
|
104
|
+
# Get the updated user info
|
|
105
|
+
print("\n5. Getting the updated user info")
|
|
106
|
+
user_info = mcp_agent.get_context("user_info")
|
|
107
|
+
print(f"Updated user info: {json.dumps(user_info, indent=2)}")
|
|
108
|
+
|
|
109
|
+
print("\nSimple MCP LangGraph Example completed.")
|
|
110
|
+
|
|
111
|
+
def run_multi_node_example(self):
|
|
112
|
+
"""Run an example with multiple nodes sharing context."""
|
|
113
|
+
print("\n=== Multi-Node MCP LangGraph Example ===")
|
|
114
|
+
|
|
115
|
+
# Create custom tools
|
|
116
|
+
@tool("search_database")
|
|
117
|
+
def search_database(query: str) -> str:
|
|
118
|
+
"""Search a database for information."""
|
|
119
|
+
# Simulate database search
|
|
120
|
+
if "weather" in query.lower():
|
|
121
|
+
return json.dumps({
|
|
122
|
+
"result": "Found weather data for San Francisco: Sunny, 72°F"
|
|
123
|
+
})
|
|
124
|
+
elif "restaurants" in query.lower():
|
|
125
|
+
return json.dumps({
|
|
126
|
+
"result": "Found 5 restaurants near downtown: Sushi Place, Burger Joint, Italian Corner, Thai Spice, Taco Shop"
|
|
127
|
+
})
|
|
128
|
+
else:
|
|
129
|
+
return json.dumps({
|
|
130
|
+
"result": f"No specific data found for: {query}"
|
|
131
|
+
})
|
|
132
|
+
|
|
133
|
+
@tool("notify_user")
|
|
134
|
+
def notify_user(message: str) -> str:
|
|
135
|
+
"""Send a notification to the user."""
|
|
136
|
+
return json.dumps({
|
|
137
|
+
"status": "success",
|
|
138
|
+
"message": f"Notification sent: {message}"
|
|
139
|
+
})
|
|
140
|
+
|
|
141
|
+
# SIMPLIFIED APPROACH: Use a single MCP agent with all tools
|
|
142
|
+
# This avoids recursion issues in the graph
|
|
143
|
+
print("1. Creating a unified MCP agent with all tools")
|
|
144
|
+
agent = MCPReactAgent(
|
|
145
|
+
name="UnifiedAgent",
|
|
146
|
+
system_message="You are a helpful assistant that can research information and make recommendations."
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
# Register all tools with the agent
|
|
150
|
+
agent.register_custom_tool(
|
|
151
|
+
name="search_database",
|
|
152
|
+
description="Search a database for information",
|
|
153
|
+
func=search_database
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
agent.register_custom_tool(
|
|
157
|
+
name="notify_user",
|
|
158
|
+
description="Send a notification to the user",
|
|
159
|
+
func=notify_user
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Create a simple graph with just one node
|
|
163
|
+
builder = langgraph.graph.StateGraph(Dict)
|
|
164
|
+
|
|
165
|
+
# Add the single node
|
|
166
|
+
builder.add_node("agent", agent.create_agent(self.llm))
|
|
167
|
+
|
|
168
|
+
# Set entry point
|
|
169
|
+
builder.set_entry_point("agent")
|
|
170
|
+
|
|
171
|
+
# Simple edge - just go to END after the agent responds
|
|
172
|
+
builder.add_edge("agent", END)
|
|
173
|
+
|
|
174
|
+
# Compile the graph
|
|
175
|
+
graph = builder.compile()
|
|
176
|
+
|
|
177
|
+
# Add context to the agent
|
|
178
|
+
print("2. Setting up context")
|
|
179
|
+
user_preferences = {
|
|
180
|
+
"name": "Bob",
|
|
181
|
+
"location": "San Francisco",
|
|
182
|
+
"preferred_activities": ["dining", "outdoor activities"],
|
|
183
|
+
"dietary_restrictions": ["vegetarian"]
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
agent.update_context("user_preferences", user_preferences)
|
|
187
|
+
|
|
188
|
+
# Run the graph
|
|
189
|
+
print("3. Running the graph with a user query")
|
|
190
|
+
initial_state = {
|
|
191
|
+
"messages": [
|
|
192
|
+
HumanMessage(content="I'd like recommendations for activities today, including places to eat.")
|
|
193
|
+
]
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
# Execute the workflow
|
|
197
|
+
result = graph.invoke(initial_state)
|
|
198
|
+
|
|
199
|
+
# Print the final response
|
|
200
|
+
messages = result.get("messages", [])
|
|
201
|
+
last_ai_message = next((msg for msg in reversed(messages) if isinstance(msg, AIMessage)), None)
|
|
202
|
+
|
|
203
|
+
if last_ai_message:
|
|
204
|
+
print(f"Final response: {last_ai_message.content}")
|
|
205
|
+
|
|
206
|
+
# Check context
|
|
207
|
+
print("\n4. Verifying context")
|
|
208
|
+
context = agent.execute_tool("context_list")
|
|
209
|
+
|
|
210
|
+
print(f"Agent context keys: {context['keys']}")
|
|
211
|
+
|
|
212
|
+
print("\nMulti-Node MCP LangGraph Example completed.")
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def main():
|
|
216
|
+
"""Run the LangGraph examples."""
|
|
217
|
+
print("Starting LangGraph MCP Examples...")
|
|
218
|
+
|
|
219
|
+
example = LangGraphExample()
|
|
220
|
+
example.run_simple_example()
|
|
221
|
+
example.run_multi_node_example()
|
|
222
|
+
|
|
223
|
+
print("\nAll LangGraph MCP Examples completed successfully.")
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
if __name__ == "__main__":
|
|
227
|
+
main()
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Run LangGraph examples with pre-defined inputs to demonstrate a real use case.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import time
|
|
6
|
+
import subprocess
|
|
7
|
+
import langgraph_agent_network
|
|
8
|
+
import langgraph_collaborative_task
|
|
9
|
+
|
|
10
|
+
def run_agent_network_scenario():
|
|
11
|
+
"""Run a real-world scenario with the agent network."""
|
|
12
|
+
print("\n=== RUNNING AGENT NETWORK SCENARIO ===")
|
|
13
|
+
print("This scenario demonstrates how different agents can collaborate on an AI assistant project.")
|
|
14
|
+
|
|
15
|
+
# Create the network manually with our test case
|
|
16
|
+
network = langgraph_agent_network.LangGraphAgentNetwork()
|
|
17
|
+
network.create_network()
|
|
18
|
+
|
|
19
|
+
# Set a specific topic related to AI assistants
|
|
20
|
+
print("\n1. Setting the topic to 'AI Assistant Development with MCP'")
|
|
21
|
+
network.set_topic("AI Assistant Development with MCP")
|
|
22
|
+
time.sleep(1)
|
|
23
|
+
|
|
24
|
+
# Show the workspace to see how it's initialized
|
|
25
|
+
print("\n2. Examining the initial workspace")
|
|
26
|
+
network.show_workspace()
|
|
27
|
+
time.sleep(1)
|
|
28
|
+
|
|
29
|
+
# First, let's ask the researcher to find information about MCP
|
|
30
|
+
print("\n3. Asking the Researcher about MCP")
|
|
31
|
+
agent_id = "researcher"
|
|
32
|
+
query = "What is the Model Context Protocol and why is it important for AI assistants?"
|
|
33
|
+
print(f"\nUser query to {agent_id}: {query}")
|
|
34
|
+
|
|
35
|
+
# Interact with the researcher (simulate interaction)
|
|
36
|
+
response = network.agents[agent_id].generate_reply(
|
|
37
|
+
messages=[{"role": "user", "content": query}]
|
|
38
|
+
)
|
|
39
|
+
print(f"\nResponse from {agent_id}:\n{response}")
|
|
40
|
+
|
|
41
|
+
# Add this information to the shared workspace
|
|
42
|
+
network.update_workspace(
|
|
43
|
+
section="research",
|
|
44
|
+
key="mcp_definition",
|
|
45
|
+
value=response,
|
|
46
|
+
from_agent=agent_id
|
|
47
|
+
)
|
|
48
|
+
time.sleep(1)
|
|
49
|
+
|
|
50
|
+
# Now, let's ask the analyst to analyze the implications
|
|
51
|
+
print("\n4. Asking the Analyst to analyze the implications")
|
|
52
|
+
agent_id = "analyst"
|
|
53
|
+
query = "Based on the research about MCP, what are the key benefits and challenges for AI assistants?"
|
|
54
|
+
print(f"\nUser query to {agent_id}: {query}")
|
|
55
|
+
|
|
56
|
+
# Interact with the analyst
|
|
57
|
+
response = network.agents[agent_id].generate_reply(
|
|
58
|
+
messages=[{"role": "user", "content": query}]
|
|
59
|
+
)
|
|
60
|
+
print(f"\nResponse from {agent_id}:\n{response}")
|
|
61
|
+
|
|
62
|
+
# Add this analysis to the workspace
|
|
63
|
+
network.update_workspace(
|
|
64
|
+
section="analysis",
|
|
65
|
+
key="mcp_implications",
|
|
66
|
+
value=response,
|
|
67
|
+
from_agent=agent_id
|
|
68
|
+
)
|
|
69
|
+
time.sleep(1)
|
|
70
|
+
|
|
71
|
+
# Finally, let's have the planner create a roadmap
|
|
72
|
+
print("\n5. Asking the Planner to create an implementation roadmap")
|
|
73
|
+
agent_id = "planner"
|
|
74
|
+
query = "Create a roadmap for implementing MCP in our AI assistant platform."
|
|
75
|
+
print(f"\nUser query to {agent_id}: {query}")
|
|
76
|
+
|
|
77
|
+
# Interact with the planner
|
|
78
|
+
response = network.agents[agent_id].generate_reply(
|
|
79
|
+
messages=[{"role": "user", "content": query}]
|
|
80
|
+
)
|
|
81
|
+
print(f"\nResponse from {agent_id}:\n{response}")
|
|
82
|
+
|
|
83
|
+
# Add this plan to the workspace
|
|
84
|
+
network.update_workspace(
|
|
85
|
+
section="plan",
|
|
86
|
+
key="implementation_roadmap",
|
|
87
|
+
value=response,
|
|
88
|
+
from_agent=agent_id
|
|
89
|
+
)
|
|
90
|
+
time.sleep(1)
|
|
91
|
+
|
|
92
|
+
# Show the final workspace with all the contributions
|
|
93
|
+
print("\n6. Examining the final workspace with all contributions")
|
|
94
|
+
network.show_workspace()
|
|
95
|
+
|
|
96
|
+
# Show the message history
|
|
97
|
+
print("\n7. Viewing the message history")
|
|
98
|
+
network.show_messages()
|
|
99
|
+
|
|
100
|
+
print("\n=== AGENT NETWORK SCENARIO COMPLETED ===")
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def run_collaborative_task_scenario():
|
|
104
|
+
"""Run a real-world scenario with the collaborative task framework."""
|
|
105
|
+
print("\n=== RUNNING COLLABORATIVE TASK SCENARIO ===")
|
|
106
|
+
print("This scenario demonstrates a team working on developing an AI assistant with MCP.")
|
|
107
|
+
|
|
108
|
+
# Create the project manually
|
|
109
|
+
project = langgraph_collaborative_task.LangGraphCollaborativeProject(
|
|
110
|
+
project_name="AI Assistant Development"
|
|
111
|
+
)
|
|
112
|
+
project.create_team()
|
|
113
|
+
|
|
114
|
+
# Set the project topic and description
|
|
115
|
+
print("\n1. Setting up the project with topic and description")
|
|
116
|
+
project.set_project_topic(
|
|
117
|
+
topic="AI Assistant with Model Context Protocol",
|
|
118
|
+
description="Develop an AI assistant that leverages MCP for improved context handling."
|
|
119
|
+
)
|
|
120
|
+
time.sleep(1)
|
|
121
|
+
|
|
122
|
+
# Show the initial workspace
|
|
123
|
+
print("\n2. Examining the initial workspace")
|
|
124
|
+
project.show_workspace()
|
|
125
|
+
time.sleep(1)
|
|
126
|
+
|
|
127
|
+
# Assign research task to the researcher
|
|
128
|
+
print("\n3. Assigning a research task")
|
|
129
|
+
task = project.assign_task(
|
|
130
|
+
agent_id="researcher",
|
|
131
|
+
task_name="Research MCP implementations",
|
|
132
|
+
description="Find examples of how MCP is being implemented in various AI systems."
|
|
133
|
+
)
|
|
134
|
+
task_id = list(task.keys())[0]
|
|
135
|
+
time.sleep(1)
|
|
136
|
+
|
|
137
|
+
# Interact with the researcher to complete the task
|
|
138
|
+
print("\n4. Working with the Researcher on the task")
|
|
139
|
+
agent_id = "researcher"
|
|
140
|
+
query = "I need you to complete your assigned task on researching MCP implementations."
|
|
141
|
+
print(f"\nUser query to {agent_id}: {query}")
|
|
142
|
+
|
|
143
|
+
# Simulate the researcher's response
|
|
144
|
+
response = project.agents[agent_id].generate_reply(
|
|
145
|
+
messages=[{"role": "user", "content": query}]
|
|
146
|
+
)
|
|
147
|
+
print(f"\nResponse from {agent_id}:\n{response}")
|
|
148
|
+
|
|
149
|
+
# Update task status to completed
|
|
150
|
+
print("\n5. Updating the task status to completed")
|
|
151
|
+
project.update_task_status(
|
|
152
|
+
task_id=task_id,
|
|
153
|
+
status="completed",
|
|
154
|
+
result=response
|
|
155
|
+
)
|
|
156
|
+
time.sleep(1)
|
|
157
|
+
|
|
158
|
+
# Ask analyst to analyze the research
|
|
159
|
+
print("\n6. Assigning analysis task to the Analyst")
|
|
160
|
+
task = project.assign_task(
|
|
161
|
+
agent_id="analyst",
|
|
162
|
+
task_name="Analyze MCP implementation patterns",
|
|
163
|
+
description="Identify patterns and best practices from the research findings."
|
|
164
|
+
)
|
|
165
|
+
analysis_task_id = list(task.keys())[0]
|
|
166
|
+
time.sleep(1)
|
|
167
|
+
|
|
168
|
+
# Interact with the analyst
|
|
169
|
+
print("\n7. Working with the Analyst on their task")
|
|
170
|
+
agent_id = "analyst"
|
|
171
|
+
query = "Please analyze the research findings on MCP implementations and identify key patterns."
|
|
172
|
+
print(f"\nUser query to {agent_id}: {query}")
|
|
173
|
+
|
|
174
|
+
# Simulate the analyst's response
|
|
175
|
+
response = project.agents[agent_id].generate_reply(
|
|
176
|
+
messages=[{"role": "user", "content": query}]
|
|
177
|
+
)
|
|
178
|
+
print(f"\nResponse from {agent_id}:\n{response}")
|
|
179
|
+
|
|
180
|
+
# Update task status
|
|
181
|
+
project.update_task_status(
|
|
182
|
+
task_id=analysis_task_id,
|
|
183
|
+
status="completed",
|
|
184
|
+
result=response
|
|
185
|
+
)
|
|
186
|
+
time.sleep(1)
|
|
187
|
+
|
|
188
|
+
# Show the final workspace with completed tasks
|
|
189
|
+
print("\n8. Examining the final workspace with completed tasks")
|
|
190
|
+
project.show_workspace()
|
|
191
|
+
|
|
192
|
+
# Show communication log
|
|
193
|
+
print("\n9. Viewing the project communication log")
|
|
194
|
+
project.show_communication()
|
|
195
|
+
|
|
196
|
+
print("\n=== COLLABORATIVE TASK SCENARIO COMPLETED ===")
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def main():
|
|
200
|
+
"""Run both demonstrations with real-world scenarios."""
|
|
201
|
+
print("=== LANGGRAPH MCP EXAMPLE DEMONSTRATIONS ===")
|
|
202
|
+
|
|
203
|
+
# Run the agent network scenario
|
|
204
|
+
run_agent_network_scenario()
|
|
205
|
+
|
|
206
|
+
# Run the collaborative task scenario
|
|
207
|
+
run_collaborative_task_scenario()
|
|
208
|
+
|
|
209
|
+
print("\n=== ALL DEMONSTRATIONS COMPLETED ===")
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
if __name__ == "__main__":
|
|
213
|
+
main()
|