agent-mcp 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. agent_mcp/__init__.py +66 -12
  2. agent_mcp/a2a_protocol.py +316 -0
  3. agent_mcp/agent_lightning_library.py +214 -0
  4. agent_mcp/camel_mcp_adapter.py +521 -0
  5. agent_mcp/claude_mcp_adapter.py +195 -0
  6. agent_mcp/cli.py +47 -0
  7. agent_mcp/google_ai_mcp_adapter.py +183 -0
  8. agent_mcp/heterogeneous_group_chat.py +412 -38
  9. agent_mcp/langchain_mcp_adapter.py +176 -43
  10. agent_mcp/llamaindex_mcp_adapter.py +410 -0
  11. agent_mcp/mcp_agent.py +26 -0
  12. agent_mcp/mcp_transport.py +11 -5
  13. agent_mcp/microsoft_agent_framework.py +591 -0
  14. agent_mcp/missing_frameworks.py +435 -0
  15. agent_mcp/openapi_protocol.py +616 -0
  16. agent_mcp/payments.py +804 -0
  17. agent_mcp/pydantic_ai_mcp_adapter.py +628 -0
  18. agent_mcp/registry.py +768 -0
  19. agent_mcp/security.py +864 -0
  20. {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.5.dist-info}/METADATA +173 -49
  21. agent_mcp-0.1.5.dist-info/RECORD +62 -0
  22. {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.5.dist-info}/WHEEL +1 -1
  23. agent_mcp-0.1.5.dist-info/entry_points.txt +4 -0
  24. agent_mcp-0.1.5.dist-info/top_level.txt +3 -0
  25. demos/__init__.py +1 -0
  26. demos/basic/__init__.py +1 -0
  27. demos/basic/framework_examples.py +108 -0
  28. demos/basic/langchain_camel_demo.py +272 -0
  29. demos/basic/simple_chat.py +355 -0
  30. demos/basic/simple_integration_example.py +51 -0
  31. demos/collaboration/collaborative_task_example.py +437 -0
  32. demos/collaboration/group_chat_example.py +130 -0
  33. demos/collaboration/simplified_crewai_example.py +39 -0
  34. demos/comprehensive_framework_demo.py +202 -0
  35. demos/langgraph/autonomous_langgraph_network.py +808 -0
  36. demos/langgraph/langgraph_agent_network.py +415 -0
  37. demos/langgraph/langgraph_collaborative_task.py +619 -0
  38. demos/langgraph/langgraph_example.py +227 -0
  39. demos/langgraph/run_langgraph_examples.py +213 -0
  40. demos/network/agent_network_example.py +381 -0
  41. demos/network/email_agent.py +130 -0
  42. demos/network/email_agent_demo.py +46 -0
  43. demos/network/heterogeneous_network_example.py +216 -0
  44. demos/network/multi_framework_example.py +199 -0
  45. demos/utils/check_imports.py +49 -0
  46. demos/workflows/autonomous_agent_workflow.py +248 -0
  47. demos/workflows/mcp_features_demo.py +353 -0
  48. demos/workflows/run_agent_collaboration_demo.py +63 -0
  49. demos/workflows/run_agent_collaboration_with_logs.py +396 -0
  50. demos/workflows/show_agent_interactions.py +107 -0
  51. demos/workflows/simplified_autonomous_demo.py +74 -0
  52. functions/main.py +144 -0
  53. functions/mcp_network_server.py +513 -0
  54. functions/utils.py +47 -0
  55. agent_mcp-0.1.3.dist-info/RECORD +0 -18
  56. agent_mcp-0.1.3.dist-info/entry_points.txt +0 -2
  57. agent_mcp-0.1.3.dist-info/top_level.txt +0 -1
@@ -0,0 +1,272 @@
1
+ """
2
+ Demo showcasing LangchainMCPAdapter and CamelMCPAdapter.
3
+
4
+ This script initializes one agent of each type, starts their adapters,
5
+ and sends a simple task to each via the configured MCP transport.
6
+ While langchain uses chatopenai
7
+ Camel uses camel.agents.ChatAgent and ModelFactory.create to create a model instance.
8
+ Both different frameworks for building agents.
9
+ Communication is done via MCP.
10
+ Check the logs to see the agents processing the tasks.
11
+ """
12
+
13
+ import asyncio
14
+ import os
15
+ import logging
16
+ import uuid
17
+ from dotenv import load_dotenv
18
+ from typing import Dict, Any
19
+ import time
20
+
21
+ # MCP Components
22
+ from agent_mcp.mcp_agent import MCPAgent # Base class, maybe not needed directly
23
+ from agent_mcp.langchain_mcp_adapter import LangchainMCPAdapter
24
+ from agent_mcp.camel_mcp_adapter import CamelMCPAdapter
25
+ from agent_mcp.mcp_transport import HTTPTransport
26
+ from agent_mcp.mcp_decorator import mcp_agent, DEFAULT_MCP_SERVER
27
+
28
+ # Langchain Components
29
+ from langchain_openai import ChatOpenAI
30
+ from langchain.agents import AgentExecutor, create_openai_functions_agent
31
+ from langchain_core.prompts import ChatPromptTemplate
32
+ from langchain_core.tools import tool # Import the tool decorator
33
+
34
+ # Camel AI Components
35
+ from camel.agents import ChatAgent
36
+ from camel.models import ModelFactory
37
+ from camel.types import ModelType, ModelPlatformType
38
+ from camel.configs import ChatGPTConfig
39
+
40
+ # Load environment variables (.env file)
41
+ load_dotenv()
42
+
43
+ # Configure logging
44
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
45
+ logger = logging.getLogger(__name__)
46
+
47
+ # --- Configuration ---
48
+ LANGCHAIN_AGENT_NAME = "LangchainDemoAgent"
49
+ CAMEL_AGENT_NAME = "CamelDemoAgent"
50
+ MODEL_NAME = "gpt-4o-mini" # Or "gpt-4", "gpt-3.5-turbo", etc.
51
+ SENDER_NAME = "DemoRunner" # Represents the entity sending initial tasks
52
+
53
+ # --- Agent Setup ---
54
+
55
+ # 1. Langchain Agent Setup
56
+ def setup_langchain_agent():
57
+ logger.info("Setting up Langchain agent...")
58
+ llm = ChatOpenAI(model=MODEL_NAME, temperature=0.7, api_key=os.getenv("OPENAI_API_KEY"))
59
+
60
+ # Simple prompt
61
+ prompt = ChatPromptTemplate.from_messages([
62
+ ("system", "You are a helpful assistant called {agent_name}."),
63
+ ("user", "{input}"),
64
+ # Placeholder for agent scratchpad (required by create_openai_functions_agent)
65
+ ("placeholder", "{agent_scratchpad}"),
66
+ ])
67
+
68
+ # Define a dummy tool to satisfy the OpenAI functions agent requirement
69
+ @tool
70
+ def dummy_tool() -> str:
71
+ """A placeholder tool that does nothing."""
72
+ return "This tool does nothing."
73
+
74
+ # Add the dummy tool to the list
75
+ tools = [dummy_tool]
76
+
77
+ # Create the agent logic
78
+ agent = create_openai_functions_agent(llm, tools, prompt)
79
+
80
+ # Create the executor
81
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) # Set verbose=True for Langchain logs
82
+ logger.info("Langchain agent setup complete.")
83
+ return agent_executor
84
+
85
+ # 2. Camel AI Agent Setup
86
+ def setup_camel_agent():
87
+ logger.info("Setting up Camel AI agent...")
88
+ # Ensure API key is available for Camel's model factory
89
+ if not os.getenv("OPENAI_API_KEY"):
90
+ raise ValueError("OPENAI_API_KEY must be set for Camel AI agent.")
91
+
92
+ # Use Camel's ModelFactory
93
+ # Note: Camel might need specific model type enums, adjust if needed
94
+ try:
95
+ # Find the appropriate ModelType enum for the model name
96
+ camel_model_type = getattr(ModelType, MODEL_NAME.upper().replace("-", "_"), None)
97
+ if camel_model_type is None:
98
+ # Fallback or error - let's try a default
99
+ logger.warning(f"Camel ModelType for '{MODEL_NAME}' not found directly, using GPT_4O_MINI as fallback.")
100
+ camel_model_type = ModelType.GPT_4O_MINI # Adjust as needed
101
+
102
+ # Specify the platform (OpenAI in this case)
103
+ model_platform = ModelPlatformType.OPENAI
104
+
105
+ # Provide platform, type, and basic config
106
+ model_instance = ModelFactory.create(
107
+ model_platform=model_platform,
108
+ model_type=camel_model_type,
109
+ model_config_dict=ChatGPTConfig().as_dict() # Add config dict
110
+ )
111
+ except Exception as e:
112
+ logger.error(f"Failed to create Camel model: {e}. Ensure API keys are set and model type is supported.")
113
+ raise
114
+
115
+ # Create Camel ChatAgent
116
+ system_prompt = "You are a creative AI assistant called {agent_name}, skilled in writing poetry."
117
+ camel_agent = ChatAgent(system_message=system_prompt, model=model_instance)
118
+ logger.info("Camel AI agent setup complete.")
119
+ return camel_agent
120
+
121
+ # --- Main Execution ---
122
+
123
+ async def main():
124
+ logger.info("Starting Langchain & Camel Adapters Demo...")
125
+
126
+ # Ensure API Key is present
127
+ if not os.getenv("OPENAI_API_KEY"):
128
+ logger.error("FATAL: OPENAI_API_KEY environment variable not set.")
129
+ print("\nPlease set your OPENAI_API_KEY in a .env file or environment variables.\n")
130
+ return
131
+
132
+ # Initialize components
133
+ langchain_executor = setup_langchain_agent()
134
+ camel_chat_agent = setup_camel_agent()
135
+
136
+ # Adapters need to connect explicitly using the transport's connect method
137
+ # The run method in the adapters likely expects the transport to be ready
138
+ # Let's connect them before initializing the adapters that use them.
139
+ # Although, the adapters themselves might call connect... let's see.
140
+ # If the adapters call connect, we don't need to do it here.
141
+ # Let's assume the adapters handle calling connect.
142
+
143
+ transport = HTTPTransport.from_url(DEFAULT_MCP_SERVER)
144
+
145
+ # Initialize Adapters
146
+ logger.info("Initializing Adapters...")
147
+ langchain_adapter = LangchainMCPAdapter(
148
+ name=LANGCHAIN_AGENT_NAME,
149
+ agent_executor=langchain_executor,
150
+ transport=transport,
151
+ system_message=f"""I am the {LANGCHAIN_AGENT_NAME}. Let's have focused discussion about AI, multi-agent systems, and multi-agent collaboration."""
152
+ )
153
+
154
+ camel_adapter = CamelMCPAdapter(
155
+ name=CAMEL_AGENT_NAME,
156
+ transport=transport,
157
+ camel_agent=camel_chat_agent,
158
+ system_message=f"""I am the {CAMEL_AGENT_NAME}. Engage in substantive dialogue about AI agents."""
159
+ )
160
+
161
+ # Helper function to register an agent and extract token
162
+ import json
163
+ async def register_and_get_token(agent, agent_name):
164
+ logger.info(f"Registering {agent_name}...")
165
+ try:
166
+ registration = await transport.register_agent(agent)
167
+ body = json.loads(registration.get('body', '{}'))
168
+ token = body.get('token')
169
+ if token:
170
+ logger.info(f"Registration successful for {agent_name}")
171
+ return token
172
+ logger.error(f"No token in response: {body}")
173
+ except Exception as e:
174
+ logger.error(f"Registration error for {agent_name}: {e}")
175
+ return None
176
+
177
+ # Register agents and get tokens
178
+ langchain_token = await register_and_get_token(langchain_adapter, LANGCHAIN_AGENT_NAME)
179
+ camel_token = await register_and_get_token(camel_adapter, CAMEL_AGENT_NAME)
180
+
181
+ if not (langchain_token and camel_token):
182
+ logger.error("Failed to register one or both agents")
183
+ return
184
+
185
+ # Now connect with both agent_name and token parameters
186
+ await transport.connect(agent_name=LANGCHAIN_AGENT_NAME, token=langchain_token)
187
+ await transport.connect(agent_name=CAMEL_AGENT_NAME, token=camel_token)
188
+
189
+ # Start Adapters in background tasks
190
+ lc_task = asyncio.create_task(langchain_adapter.run(), name=f"{LANGCHAIN_AGENT_NAME}_run")
191
+ camel_task = asyncio.create_task(camel_adapter.run(), name=f"{CAMEL_AGENT_NAME}_run")
192
+
193
+ # Allow time for adapters to fully start their loops
194
+ logger.info("Waiting for adapters to initialize loops (2s)...")
195
+ await asyncio.sleep(2)
196
+ logger.info("Adapters should be running.")
197
+
198
+ # --- Initiate Conversation ---
199
+ initial_task_id = f"conv_start_{uuid.uuid4()}"
200
+ initial_message_content = """Let's explore multi-agent coordination patterns through 3-5 focused exchanges. \
201
+ Please aim to: \
202
+ 1. Identify key challenges\
203
+ 2. Discuss 2-3 solutions \
204
+ 3. Propose conclusion when we've covered substantive ground"""
205
+
206
+ initial_task = {
207
+ "type": "task",
208
+ "task_id": initial_task_id,
209
+ "description": initial_message_content,
210
+ "sender": LANGCHAIN_AGENT_NAME, # Langchain starts
211
+ "reply_to": CAMEL_AGENT_NAME # Send responder as CamelAgent
212
+ }
213
+
214
+ # Replace conversation timer with hybrid limits
215
+ MAX_DURATION = 60 # seconds (1 minute max)
216
+ MAX_TURNS = 10
217
+ TERMINATION_PHRASES = [
218
+ "wrap up this discussion",
219
+ "finalize our discussion",
220
+ "conclusion reached",
221
+ "summary of key points"
222
+ ]
223
+
224
+ try:
225
+ logger.info(f"[{LANGCHAIN_AGENT_NAME}] Sending initial message to {CAMEL_AGENT_NAME}...")
226
+ await transport.send_message(target=CAMEL_AGENT_NAME, message=initial_task)
227
+
228
+ start_time = time.monotonic()
229
+ turn_count = 0
230
+
231
+ while (time.monotonic() - start_time) < MAX_DURATION and turn_count < MAX_TURNS:
232
+ try:
233
+ msg, message_id = await asyncio.wait_for(transport.receive_message(), timeout=15)
234
+ if msg:
235
+ content = msg.get('content', {}).get('text', '').lower()
236
+ if any(phrase in content for phrase in TERMINATION_PHRASES):
237
+ logger.info("Natural conversation conclusion detected")
238
+ break
239
+ turn_count += 1
240
+ except asyncio.TimeoutError:
241
+ logger.info("No message received in 15 seconds")
242
+ break
243
+
244
+ logger.info(f"Conversation ended after {turn_count} turns")
245
+ except Exception as e:
246
+ logger.error(f"An error occurred during conversation initiation or waiting: {e}", exc_info=True)
247
+
248
+ finally:
249
+ logger.info("Initiating cleanup sequence...")
250
+
251
+ # Cancel agent tasks first
252
+ lc_task.cancel()
253
+ camel_task.cancel()
254
+
255
+ # Stop transport before awaiting
256
+ await transport.stop()
257
+
258
+ # Then await tasks
259
+ await asyncio.gather(lc_task, camel_task, return_exceptions=True)
260
+
261
+ # Finally disconnect transport
262
+ await transport.disconnect()
263
+ logger.info("Cleanup completed successfully")
264
+ logger.info("Demo finished.")
265
+
266
+ if __name__ == "__main__":
267
+ try:
268
+ asyncio.run(main())
269
+ except KeyboardInterrupt:
270
+ logger.info("Demo interrupted by user.")
271
+ except Exception as e:
272
+ logger.error(f"Unhandled exception in main: {e}", exc_info=True)
@@ -0,0 +1,355 @@
1
+ """
2
+ Super simple example of two agents chatting through the hosted server.
3
+ Just run this file and watch them talk!
4
+ """
5
+
6
+ import asyncio
7
+ import os
8
+ import logging
9
+ from typing import TypedDict, Optional, Union
10
+ from dotenv import load_dotenv
11
+ import autogen
12
+ from langchain_openai import ChatOpenAI
13
+ from langchain_core.prompts import ChatPromptTemplate
14
+ from langgraph.graph import StateGraph, START, END
15
+ from agent_mcp import mcp_agent
16
+ from agent_mcp.mcp_agent import MCPAgent
17
+ # Load environment variables
18
+ load_dotenv()
19
+
20
+ # Configure logging
21
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
22
+ logger = logging.getLogger(__name__)
23
+
24
+ # Server URL
25
+ #server_url = os.getenv('MCP_SERVER_URL', 'https://mcp-server-ixlfhxquwq-ew.a.run.app')
26
+ #print(f"Using MCP Server: {server_url}")
27
+
28
+ # --- Agent Definitions ---
29
+
30
+ # Autogen Agent with MCP
31
+ @mcp_agent(mcp_id="AutoGen_Alice")
32
+ class AutogenAgent(autogen.ConversableAgent):
33
+ def __init__(self, name="AutoGen_Alice", **kwargs):
34
+ llm_config = {
35
+ "config_list": [{
36
+ "model": "gpt-4",
37
+ "api_key": os.getenv("OPENAI_API_KEY")
38
+ }],
39
+ "temperature": 0.7
40
+ }
41
+ super().__init__(
42
+ name=name,
43
+ llm_config=llm_config,
44
+ system_message="You are Alice. Keep responses short. End the conversation if Bob says 'goodbye' or after 5 exchanges.",
45
+ human_input_mode="NEVER",
46
+ **kwargs
47
+ )
48
+ self.message_count = 0
49
+
50
+ async def process_received_message(self, message, sender):
51
+ """Process received message and generate reply using Autogen's capabilities"""
52
+ # Extract message content properly
53
+ message_text = self._extract_message_text(message)
54
+
55
+ # Ensure we have valid content
56
+ if not message_text:
57
+ logger.warning(f"Invalid message received: {message}")
58
+ return None
59
+
60
+ # Check for end conditions
61
+ self.message_count += 1
62
+ if self.message_count >= 5:
63
+ # Generate a farewell message
64
+ reply = await self.a_generate_reply(
65
+ messages=[{"role": "user", "content": "Generate a friendly goodbye message as we've reached the end of our conversation."}],
66
+ )
67
+ if isinstance(reply, dict):
68
+ reply = reply.get('content', '')
69
+ return {"content": {"text": str(reply)}}
70
+
71
+ # Use Autogen's built-in reply generation
72
+ reply = await self.a_generate_reply(
73
+ messages=[{
74
+ "role": "system",
75
+ "content": "You are having a friendly conversation. Respond naturally to the user's message."
76
+ }, {
77
+ "role": "user",
78
+ "content": message_text
79
+ }],
80
+ )
81
+ print("Message from Autogen: ", reply)
82
+ if isinstance(reply, dict):
83
+ reply = reply.get('content', '')
84
+ return {"content": {"text": str(reply)}}
85
+
86
+ def _extract_message_text(self, message: Union[dict, tuple]) -> str:
87
+ """Extract text content from message, handling different message formats"""
88
+ logger.info(f"Processing message: {message}")
89
+
90
+ # Handle tuple format
91
+ if isinstance(message, tuple):
92
+ message = message[0] # Extract message dict from tuple
93
+ logger.info(f"Extracted message from tuple: {message}")
94
+
95
+ # Handle dict format
96
+ if isinstance(message, dict):
97
+ # Check for nested content structure
98
+ if 'content' in message:
99
+ content = message['content']
100
+ if isinstance(content, dict):
101
+ # Handle nested content with text field
102
+ if 'text' in content:
103
+ text = content['text']
104
+ logger.info(f"Extracted text from nested content: {text}")
105
+ return text
106
+ else:
107
+ logger.warning(f"Content dict missing 'text' field: {content}")
108
+ return str(content)
109
+ elif isinstance(content, str):
110
+ # Handle direct string content
111
+ logger.info(f"Extracted direct string content: {content}")
112
+ return content
113
+ else:
114
+ logger.warning(f"Unexpected content type: {type(content)}")
115
+ return str(content)
116
+ else:
117
+ logger.warning(f"Message missing 'content' field: {message}")
118
+ return str(message)
119
+
120
+ logger.warning(f"Unexpected message format: {message}")
121
+ return str(message)
122
+
123
+ # LangGraph Agent with MCP
124
+ class ChatState(TypedDict):
125
+ """State definition for chat"""
126
+ messages: list[dict]
127
+ current_message: str
128
+ response: Optional[str]
129
+ message_count: int
130
+
131
+ @mcp_agent(mcp_id="LangGraph_Bob")
132
+ class LangGraphAgent:
133
+ """LangGraph-based agent with MCP integration"""
134
+ def __init__(self, name: str):
135
+ self.name = name
136
+ self.message_count = 0
137
+
138
+ # Initialize LLM
139
+ api_key = os.getenv("OPENAI_API_KEY")
140
+ if not api_key:
141
+ raise ValueError("OPENAI_API_KEY environment variable is required")
142
+ self.llm = ChatOpenAI(temperature=0.7)
143
+
144
+ # Create chat prompt
145
+ self.prompt = ChatPromptTemplate.from_messages([
146
+ ("system", "You are Bob, a friendly AI assistant having a conversation. Respond naturally and engagingly to the user's messages."),
147
+ ("user", "{message}")
148
+ ])
149
+
150
+ # Create LangGraph workflow
151
+ workflow = StateGraph(ChatState)
152
+
153
+ # Add processing node
154
+ workflow.add_node("process", self._process_message)
155
+
156
+ # Set entry point and connect nodes
157
+ workflow.add_edge(START, "process")
158
+ workflow.add_edge("process", END)
159
+
160
+ # Compile the graph
161
+ self.app = workflow.compile()
162
+
163
+ def _process_message(self, state: ChatState) -> ChatState:
164
+ """Process a message in the chat using LLM"""
165
+ # Get current message from state
166
+ message = state["current_message"]
167
+
168
+ # Generate response using LLM
169
+ chain = self.prompt | self.llm
170
+ response = chain.invoke({"message": message})
171
+ print("Message from LangGraph: ", response)
172
+ # Extract response content
173
+ response_text = response.content if hasattr(response, 'content') else str(response)
174
+
175
+ # Update state with generated response
176
+ return {
177
+ "messages": state.get("messages", []) + [{"role": "assistant", "content": response_text}],
178
+ "current_message": message,
179
+ "response": response_text,
180
+ "message_count": state.get("message_count", 0) + 1
181
+ }
182
+
183
+ async def process_received_message(self, message, sender):
184
+ """Process received message through LangGraph state machine"""
185
+ # Extract message content properly
186
+ message_text = self._extract_message_text(message)
187
+
188
+ if not message_text:
189
+ logger.warning(f"Invalid message received: {message}")
190
+ return None
191
+
192
+ # Set initial state
193
+ state = {
194
+ "messages": [],
195
+ "current_message": message_text,
196
+ "message_count": 0,
197
+ "response": None
198
+ }
199
+
200
+ # Process through LangGraph app
201
+ try:
202
+ # The app will run _process_message which updates the state
203
+ # Use asynchronous invoke to avoid blocking the event loop
204
+ result = await self.app.ainvoke(state)
205
+
206
+ # Get the response from the updated state
207
+ response_text = result.get('response', '')
208
+ if not response_text:
209
+ logger.warning("No response generated by workflow")
210
+ return None
211
+
212
+ # Update message count
213
+ self.message_count += 1
214
+
215
+ # Return response in proper format
216
+ return {
217
+ "content": {
218
+ "text": response_text
219
+ }
220
+ }
221
+
222
+ except Exception as e:
223
+ logger.error(f"Error processing message through workflow: {e}")
224
+ return None
225
+
226
+ def _extract_message_text(self, message: Union[dict, tuple]) -> str:
227
+ """Extract text content from message, handling different message formats"""
228
+ logger.info(f"Processing message: {message}")
229
+
230
+ # Handle tuple format
231
+ if isinstance(message, tuple):
232
+ message = message[0] # Extract message dict from tuple
233
+ logger.info(f"Extracted message from tuple: {message}")
234
+
235
+ # Handle dict format
236
+ if isinstance(message, dict):
237
+ # Check for nested content structure
238
+ if 'content' in message:
239
+ content = message['content']
240
+ if isinstance(content, dict):
241
+ # Handle nested content with text field
242
+ if 'text' in content:
243
+ text = content['text']
244
+ logger.info(f"Extracted text from nested content: {text}")
245
+ return text
246
+ else:
247
+ logger.warning(f"Content dict missing 'text' field: {content}")
248
+ return str(content)
249
+ elif isinstance(content, str):
250
+ # Handle direct string content
251
+ logger.info(f"Extracted direct string content: {content}")
252
+ return content
253
+ else:
254
+ logger.warning(f"Unexpected content type: {type(content)}")
255
+ return str(content)
256
+ else:
257
+ logger.warning(f"Message missing 'content' field: {message}")
258
+ return str(message)
259
+
260
+ logger.warning(f"Unexpected message format: {message}")
261
+ return str(message)
262
+
263
+ async def main():
264
+ # Initialize agents
265
+ alice = AutogenAgent()
266
+ bob = LangGraphAgent("LangGraph_Bob")
267
+
268
+ try:
269
+ # Connect agents
270
+ logger.info("Connecting agents...")
271
+ await asyncio.gather(
272
+ alice.connect(),
273
+ bob.connect()
274
+ )
275
+ logger.info("Agents connected successfully.")
276
+
277
+ # Clean up phase - acknowledge any old messages
278
+ logger.info("Cleaning up old messages...")
279
+ await asyncio.sleep(2) # Wait for initial polling
280
+
281
+ # Initial message
282
+ current_sender = alice
283
+ init_message = await alice.a_generate_reply(messages=[{
284
+ "role": "user",
285
+ "content": "generate a friendly greeting in a super duper casual way"
286
+ }])
287
+ print("Initial message: ", init_message)
288
+ current_receiver = bob
289
+ message = {
290
+ "content": {
291
+ "text": init_message,
292
+ }
293
+ }
294
+
295
+ processed_messages = set() # Track processed message IDs
296
+ message_count = 0 # Track total messages processed
297
+ MAX_MESSAGES = 10 # Maximum number of messages before ending conversation
298
+
299
+ # Conversation loop
300
+ while True:
301
+ try:
302
+ if message_count >= MAX_MESSAGES:
303
+ logger.info("Maximum message count reached. Ending conversation.")
304
+ break
305
+
306
+ # Send message
307
+ logger.info(f"[{current_sender.name}] Sending: {message}")
308
+ await current_sender.send_message(target=current_receiver.name, message=message)
309
+
310
+ # Wait for reply with timeout
311
+ logger.info(f"[{current_receiver.name}] Waiting for reply...")
312
+ received_msg = await current_receiver.receive_message() # Increased timeout
313
+
314
+ if not received_msg:
315
+ logger.warning("No message received. Ending conversation.")
316
+ break
317
+
318
+ # Process received message
319
+ response = await current_receiver.process_received_message(received_msg, current_sender.name)
320
+
321
+ # Check for valid response
322
+ if not response or not isinstance(response, dict) or 'content' not in response:
323
+ logger.warning("Invalid response format. Ending conversation.")
324
+ break
325
+
326
+ # Check for goodbye messages
327
+ content = response['content'].get('text', '').lower()
328
+ if 'goodbye' in content:
329
+ logger.info("Goodbye message detected. Ending conversation.")
330
+ break
331
+
332
+ # Swap sender and receiver and update message
333
+ current_sender, current_receiver = current_receiver, current_sender
334
+ # Use the generated response as the next message
335
+ message = response # Use the full response object as the next message
336
+ message_count += 1
337
+
338
+ except Exception as e:
339
+ logger.error(f"An error occurred: {e}", exc_info=True)
340
+ break
341
+
342
+ finally:
343
+ # Disconnect agents
344
+ logger.info("Disconnecting agents...")
345
+ await asyncio.gather(
346
+ alice.disconnect(),
347
+ bob.disconnect(),
348
+ return_exceptions=True
349
+ )
350
+ logger.info("Agents disconnected.")
351
+
352
+ if __name__ == "__main__":
353
+ if not os.getenv("OPENAI_API_KEY"):
354
+ print("Warning: OPENAI_API_KEY not found in .env file.")
355
+ asyncio.run(main())
@@ -0,0 +1,51 @@
1
+ """
2
+ Example demonstrating the simple one-line integration with AgentMCP.
3
+ """
4
+
5
+ from agent_mcp import mcp_agent
6
+ from agent_mcp.mcp_decorator import register_tool
7
+
8
+ # Example 1: Simple class-level integration
9
+ @mcp_agent(name="SimpleAgent")
10
+ class MyAgent:
11
+ def generate_response(self, message: str) -> str:
12
+ return f"Received: {message}"
13
+
14
+ @register_tool("greet", "Send a greeting message")
15
+ def greet(self, name: str) -> str:
16
+ return f"Hello, {name}!"
17
+
18
+ # Example 2: More complex agent with custom tools
19
+ @mcp_agent(
20
+ name="CalculatorAgent",
21
+ #system_message="I am a calculator agent that can perform basic math operations."
22
+ )
23
+ class CalculatorAgent:
24
+ @register_tool("add", "Add two numbers")
25
+ def add(self, a: float, b: float) -> float:
26
+ return a + b
27
+
28
+ @register_tool("multiply", "Multiply two numbers")
29
+ def multiply(self, a: float, b: float) -> float:
30
+ return a * b
31
+
32
+ def main():
33
+ # Create instances of our MCP-enabled agents
34
+ simple_agent = MyAgent()
35
+ calc_agent = CalculatorAgent()
36
+
37
+ # Test the agents
38
+ print("Testing SimpleAgent:")
39
+ print(simple_agent.generate_response("Hello!"))
40
+ print(simple_agent.greet("User"))
41
+
42
+ print("\nTesting CalculatorAgent:")
43
+ print(f"2 + 3 = {calc_agent.add(2, 3)}")
44
+ print(f"4 * 5 = {calc_agent.multiply(4, 5)}")
45
+
46
+ # Show available MCP tools for each agent
47
+ print("\nSimpleAgent MCP tools:", simple_agent.mcp_tools.keys())
48
+ print("CalculatorAgent MCP tools:", calc_agent.mcp_tools.keys())
49
+
50
+ if __name__ == "__main__":
51
+ main()