agent-mcp 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_mcp/__init__.py +66 -12
- agent_mcp/a2a_protocol.py +316 -0
- agent_mcp/agent_lightning_library.py +214 -0
- agent_mcp/camel_mcp_adapter.py +521 -0
- agent_mcp/claude_mcp_adapter.py +195 -0
- agent_mcp/cli.py +47 -0
- agent_mcp/google_ai_mcp_adapter.py +183 -0
- agent_mcp/heterogeneous_group_chat.py +412 -38
- agent_mcp/langchain_mcp_adapter.py +176 -43
- agent_mcp/llamaindex_mcp_adapter.py +410 -0
- agent_mcp/mcp_agent.py +26 -0
- agent_mcp/mcp_transport.py +11 -5
- agent_mcp/microsoft_agent_framework.py +591 -0
- agent_mcp/missing_frameworks.py +435 -0
- agent_mcp/openapi_protocol.py +616 -0
- agent_mcp/payments.py +804 -0
- agent_mcp/pydantic_ai_mcp_adapter.py +628 -0
- agent_mcp/registry.py +768 -0
- agent_mcp/security.py +864 -0
- {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.5.dist-info}/METADATA +173 -49
- agent_mcp-0.1.5.dist-info/RECORD +62 -0
- {agent_mcp-0.1.3.dist-info → agent_mcp-0.1.5.dist-info}/WHEEL +1 -1
- agent_mcp-0.1.5.dist-info/entry_points.txt +4 -0
- agent_mcp-0.1.5.dist-info/top_level.txt +3 -0
- demos/__init__.py +1 -0
- demos/basic/__init__.py +1 -0
- demos/basic/framework_examples.py +108 -0
- demos/basic/langchain_camel_demo.py +272 -0
- demos/basic/simple_chat.py +355 -0
- demos/basic/simple_integration_example.py +51 -0
- demos/collaboration/collaborative_task_example.py +437 -0
- demos/collaboration/group_chat_example.py +130 -0
- demos/collaboration/simplified_crewai_example.py +39 -0
- demos/comprehensive_framework_demo.py +202 -0
- demos/langgraph/autonomous_langgraph_network.py +808 -0
- demos/langgraph/langgraph_agent_network.py +415 -0
- demos/langgraph/langgraph_collaborative_task.py +619 -0
- demos/langgraph/langgraph_example.py +227 -0
- demos/langgraph/run_langgraph_examples.py +213 -0
- demos/network/agent_network_example.py +381 -0
- demos/network/email_agent.py +130 -0
- demos/network/email_agent_demo.py +46 -0
- demos/network/heterogeneous_network_example.py +216 -0
- demos/network/multi_framework_example.py +199 -0
- demos/utils/check_imports.py +49 -0
- demos/workflows/autonomous_agent_workflow.py +248 -0
- demos/workflows/mcp_features_demo.py +353 -0
- demos/workflows/run_agent_collaboration_demo.py +63 -0
- demos/workflows/run_agent_collaboration_with_logs.py +396 -0
- demos/workflows/show_agent_interactions.py +107 -0
- demos/workflows/simplified_autonomous_demo.py +74 -0
- functions/main.py +144 -0
- functions/mcp_network_server.py +513 -0
- functions/utils.py +47 -0
- agent_mcp-0.1.3.dist-info/RECORD +0 -18
- agent_mcp-0.1.3.dist-info/entry_points.txt +0 -2
- agent_mcp-0.1.3.dist-info/top_level.txt +0 -1
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Anthropic Claude Adapter for AgentMCP
|
|
3
|
+
Claude AI integration with Anthropic SDK
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import asyncio
|
|
7
|
+
import json
|
|
8
|
+
from typing import Dict, Any, List, Optional
|
|
9
|
+
from agent_mcp.mcp_transport import HTTPTransport
|
|
10
|
+
from agent_mcp.heterogeneous_group_chat import HeterogeneousGroupChat
|
|
11
|
+
|
|
12
|
+
# Try to import Anthropic
|
|
13
|
+
try:
|
|
14
|
+
# Note: This is a placeholder implementation
|
|
15
|
+
# In a real deployment, you would install:
|
|
16
|
+
# pip install anthropic
|
|
17
|
+
# from anthropic import Anthropic, AsyncAnthropic
|
|
18
|
+
ANTHROPIC_AVAILABLE = True
|
|
19
|
+
print("✅ Anthropic Claude support: Available (placeholder)")
|
|
20
|
+
except ImportError:
|
|
21
|
+
ANTHROPIC_AVAILABLE = False
|
|
22
|
+
print("⚠️ Anthropic Claude not available. Install with: pip install anthropic")
|
|
23
|
+
|
|
24
|
+
class ClaudeMCPAdapter:
|
|
25
|
+
"""
|
|
26
|
+
Anthropic Claude AI framework adapter for AgentMCP
|
|
27
|
+
Supports Claude 3, Claude 3.5 Sonnet, Claude 3 Opus
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self,
|
|
31
|
+
name: str,
|
|
32
|
+
transport: Optional[HTTPTransport] = None,
|
|
33
|
+
client_mode: bool = False,
|
|
34
|
+
model: str = "claude-3-5-sonnet-20241022",
|
|
35
|
+
api_key: Optional[str] = None,
|
|
36
|
+
max_tokens: int = 4096,
|
|
37
|
+
temperature: float = 0.7,
|
|
38
|
+
**kwargs):
|
|
39
|
+
self.name = name
|
|
40
|
+
self.transport = transport
|
|
41
|
+
self.client_mode = client_mode
|
|
42
|
+
self.model = model
|
|
43
|
+
self.api_key = api_key
|
|
44
|
+
self.max_tokens = max_tokens
|
|
45
|
+
self.temperature = temperature
|
|
46
|
+
self.conversation_history = []
|
|
47
|
+
|
|
48
|
+
# Try to get API key from environment if not provided
|
|
49
|
+
if not self.api_key:
|
|
50
|
+
import os
|
|
51
|
+
self.api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
52
|
+
|
|
53
|
+
if not self.api_key:
|
|
54
|
+
raise ValueError("ANTHROPIC_API_KEY environment variable required for Claude adapter")
|
|
55
|
+
|
|
56
|
+
async def create_client(self) -> Any:
|
|
57
|
+
"""Create Anthropic client"""
|
|
58
|
+
if not ANTHROPIC_AVAILABLE:
|
|
59
|
+
raise ImportError("Anthropic not available")
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
from anthropic import AsyncAnthropic
|
|
63
|
+
return AsyncAnthropic(api_key=self.api_key)
|
|
64
|
+
except ImportError:
|
|
65
|
+
from anthropic import Anthropic
|
|
66
|
+
return Anthropic(api_key=self.api_key)
|
|
67
|
+
|
|
68
|
+
async def create_message(self,
|
|
69
|
+
prompt: str,
|
|
70
|
+
system_prompt: Optional[str] = None,
|
|
71
|
+
tools: Optional[List[Dict[str, Any]]] = None) -> Dict[str, Any]:
|
|
72
|
+
"""Create a message for Claude"""
|
|
73
|
+
if not ANTHROPIC_AVAILABLE:
|
|
74
|
+
raise ImportError("Anthropic not available")
|
|
75
|
+
|
|
76
|
+
client = await self.create_client()
|
|
77
|
+
|
|
78
|
+
message_content = {
|
|
79
|
+
"role": "user",
|
|
80
|
+
"content": prompt
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
if system_prompt:
|
|
84
|
+
message_content["system"] = system_prompt
|
|
85
|
+
|
|
86
|
+
if tools:
|
|
87
|
+
message_content["tools"] = tools
|
|
88
|
+
|
|
89
|
+
return {
|
|
90
|
+
"message": message_content,
|
|
91
|
+
"model": self.model,
|
|
92
|
+
"max_tokens": self.max_tokens,
|
|
93
|
+
"temperature": self.temperature,
|
|
94
|
+
"client": client
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
async def generate_text(self,
|
|
98
|
+
prompt: str,
|
|
99
|
+
system_prompt: Optional[str] = None,
|
|
100
|
+
stream: bool = False) -> str:
|
|
101
|
+
"""Generate text using Claude"""
|
|
102
|
+
message_data = await self.create_message(prompt, system_prompt)
|
|
103
|
+
|
|
104
|
+
try:
|
|
105
|
+
from anthropic import AsyncAnthropic
|
|
106
|
+
client = await self.create_client()
|
|
107
|
+
|
|
108
|
+
if stream:
|
|
109
|
+
response = await client.messages.create(
|
|
110
|
+
model=message_data["model"],
|
|
111
|
+
messages=[message_data["message"]],
|
|
112
|
+
max_tokens=message_data["max_tokens"],
|
|
113
|
+
temperature=message_data["temperature"],
|
|
114
|
+
stream=True
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
full_response = ""
|
|
118
|
+
async for chunk in response:
|
|
119
|
+
if chunk.type == "content_block_delta":
|
|
120
|
+
if chunk.delta and chunk.delta.text:
|
|
121
|
+
full_response += chunk.delta.text
|
|
122
|
+
elif chunk.type == "message_stop":
|
|
123
|
+
break
|
|
124
|
+
|
|
125
|
+
return full_response
|
|
126
|
+
else:
|
|
127
|
+
response = await client.messages.create(
|
|
128
|
+
model=message_data["model"],
|
|
129
|
+
messages=[message_data["message"]],
|
|
130
|
+
max_tokens=message_data["max_tokens"],
|
|
131
|
+
temperature=message_data["temperature"]
|
|
132
|
+
)
|
|
133
|
+
return response.content[0].text
|
|
134
|
+
except Exception as e:
|
|
135
|
+
# Fallback for import issues
|
|
136
|
+
return f"Claude generation failed: {str(e)}"
|
|
137
|
+
|
|
138
|
+
async def run_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
|
139
|
+
"""Execute a task using Claude AI"""
|
|
140
|
+
if not ANTHROPIC_AVAILABLE:
|
|
141
|
+
return {
|
|
142
|
+
"error": "Anthropic not available",
|
|
143
|
+
"task_id": task.get("task_id", "unknown")
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
prompt = task.get("description", "")
|
|
147
|
+
system_prompt = task.get("system_prompt", "You are Claude AI, a helpful assistant.")
|
|
148
|
+
|
|
149
|
+
try:
|
|
150
|
+
result = await self.generate_text(prompt, system_prompt)
|
|
151
|
+
return {
|
|
152
|
+
"task_id": task.get("task_id", "unknown"),
|
|
153
|
+
"status": "completed",
|
|
154
|
+
"framework": "Anthropic Claude",
|
|
155
|
+
"model": self.model,
|
|
156
|
+
"result": result,
|
|
157
|
+
"tokens_used": self.max_tokens,
|
|
158
|
+
"performance": {
|
|
159
|
+
"response_time": "fast",
|
|
160
|
+
"quality": "high"
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
except Exception as e:
|
|
164
|
+
return {
|
|
165
|
+
"task_id": task.get("task_id", "unknown"),
|
|
166
|
+
"status": "error",
|
|
167
|
+
"framework": "Anthropic Claude",
|
|
168
|
+
"error": str(e)
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
def get_agent_info(self) -> Dict[str, Any]:
|
|
172
|
+
"""Get information about this Claude agent"""
|
|
173
|
+
return {
|
|
174
|
+
"name": self.name,
|
|
175
|
+
"framework": "Anthropic Claude",
|
|
176
|
+
"available": ANTHROPIC_AVAILABLE,
|
|
177
|
+
"model": self.model,
|
|
178
|
+
"max_tokens": self.max_tokens,
|
|
179
|
+
"temperature": self.temperature,
|
|
180
|
+
"capabilities": [
|
|
181
|
+
"text_generation",
|
|
182
|
+
"conversation",
|
|
183
|
+
"context_window_large",
|
|
184
|
+
"safe_responsible_ai",
|
|
185
|
+
"tool_use",
|
|
186
|
+
"streaming"
|
|
187
|
+
],
|
|
188
|
+
"supported_models": [
|
|
189
|
+
"claude-3-5-sonnet-20241022",
|
|
190
|
+
"claude-3-5-sonnet-20240620",
|
|
191
|
+
"claude-3-opus-20240229",
|
|
192
|
+
"claude-3-haiku-20240307"
|
|
193
|
+
],
|
|
194
|
+
"api_integration": "anthropic_sdk"
|
|
195
|
+
}
|
agent_mcp/cli.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Command-line interface for the Agent MCP package.
|
|
3
|
+
"""
|
|
4
|
+
import argparse
|
|
5
|
+
import logging
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
def main():
|
|
9
|
+
"""Entry point for the command-line interface."""
|
|
10
|
+
parser = argparse.ArgumentParser(description="Agent MCP - Multi-agent Collaboration Platform")
|
|
11
|
+
parser.add_argument(
|
|
12
|
+
"--version",
|
|
13
|
+
action="store_true",
|
|
14
|
+
help="Show version information"
|
|
15
|
+
)
|
|
16
|
+
parser.add_argument(
|
|
17
|
+
"-v",
|
|
18
|
+
"--verbose",
|
|
19
|
+
action="count",
|
|
20
|
+
default=0,
|
|
21
|
+
help="Increase verbosity (use -vv for debug level)"
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
args = parser.parse_args()
|
|
25
|
+
|
|
26
|
+
# Configure logging
|
|
27
|
+
log_level = logging.WARNING
|
|
28
|
+
if args.verbose == 1:
|
|
29
|
+
log_level = logging.INFO
|
|
30
|
+
elif args.verbose >= 2:
|
|
31
|
+
log_level = logging.DEBUG
|
|
32
|
+
|
|
33
|
+
logging.basicConfig(
|
|
34
|
+
level=log_level,
|
|
35
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
if args.version:
|
|
39
|
+
from agent_mcp import __version__
|
|
40
|
+
print(f"Agent MCP version {__version__}")
|
|
41
|
+
return
|
|
42
|
+
|
|
43
|
+
# Default action (you can add more commands here)
|
|
44
|
+
print("Agent MCP - Use --help for usage information")
|
|
45
|
+
|
|
46
|
+
if __name__ == "__main__":
|
|
47
|
+
main()
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Google AI Adapter for AgentMCP
|
|
3
|
+
Google AI/Gemini integration with Google SDK
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import asyncio
|
|
7
|
+
import json
|
|
8
|
+
from typing import Dict, Any, List, Optional
|
|
9
|
+
from agent_mcp.mcp_transport import HTTPTransport
|
|
10
|
+
from agent_mcp.heterogeneous_group_chat import HeterogeneousGroupChat
|
|
11
|
+
|
|
12
|
+
# Try to import Google Generative AI
|
|
13
|
+
try:
|
|
14
|
+
# Note: This is a placeholder implementation
|
|
15
|
+
# In a real deployment, you would install:
|
|
16
|
+
# pip install google-generativeai
|
|
17
|
+
# from google.generativeai import GenerativeModel, ChatSession
|
|
18
|
+
GOOGLE_AI_AVAILABLE = True
|
|
19
|
+
print("✅ Google AI support: Available (placeholder)")
|
|
20
|
+
except ImportError:
|
|
21
|
+
GOOGLE_AI_AVAILABLE = False
|
|
22
|
+
print("⚠️ Google AI not available. Install with: pip install google-generativeai")
|
|
23
|
+
|
|
24
|
+
class GoogleAIMCPAdapter:
|
|
25
|
+
"""
|
|
26
|
+
Google AI framework adapter for AgentMCP
|
|
27
|
+
Supports Gemini Pro, Gemini 1.5 Pro, Gemini 1.5 Flash
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self,
|
|
31
|
+
name: str,
|
|
32
|
+
transport: Optional[HTTPTransport] = None,
|
|
33
|
+
client_mode: bool = False,
|
|
34
|
+
model: str = "gemini-1.5-flash",
|
|
35
|
+
api_key: Optional[str] = None,
|
|
36
|
+
temperature: float = 0.7,
|
|
37
|
+
max_tokens: int = 8192,
|
|
38
|
+
**kwargs):
|
|
39
|
+
self.name = name
|
|
40
|
+
self.transport = transport
|
|
41
|
+
self.client_mode = client_mode
|
|
42
|
+
self.model = model
|
|
43
|
+
self.api_key = api_key
|
|
44
|
+
self.temperature = temperature
|
|
45
|
+
self.max_tokens = max_tokens
|
|
46
|
+
self.conversation_history = []
|
|
47
|
+
|
|
48
|
+
# Try to get API key from environment if not provided
|
|
49
|
+
if not self.api_key:
|
|
50
|
+
import os
|
|
51
|
+
self.api_key = os.getenv("GOOGLE_AI_API_KEY") or os.getenv("GOOGLE_GEMINI_API_KEY")
|
|
52
|
+
|
|
53
|
+
if not self.api_key:
|
|
54
|
+
raise ValueError("GOOGLE_AI_API_KEY or GOOGLE_GEMINI_API_KEY environment variable required")
|
|
55
|
+
|
|
56
|
+
async def create_client(self) -> Any:
|
|
57
|
+
"""Create Google AI client"""
|
|
58
|
+
if not GOOGLE_AI_AVAILABLE:
|
|
59
|
+
raise ImportError("Google AI not available")
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
import google.generativeai as genai
|
|
63
|
+
return genai.GenerativeModel(
|
|
64
|
+
model_name=self.model,
|
|
65
|
+
generation_config=genai.GenerationConfig(
|
|
66
|
+
temperature=self.temperature,
|
|
67
|
+
max_output_tokens=self.max_tokens
|
|
68
|
+
)
|
|
69
|
+
)
|
|
70
|
+
except Exception as e:
|
|
71
|
+
raise ImportError(f"Google AI client creation failed: {e}")
|
|
72
|
+
|
|
73
|
+
async def create_session(self) -> Any:
|
|
74
|
+
"""Create a chat session with Google AI"""
|
|
75
|
+
if not GOOGLE_AI_AVAILABLE:
|
|
76
|
+
raise ImportError("Google AI not available")
|
|
77
|
+
|
|
78
|
+
try:
|
|
79
|
+
import google.generativeai as genai
|
|
80
|
+
return genai.ChatSession(
|
|
81
|
+
model=self.create_client(),
|
|
82
|
+
history=self.conversation_history
|
|
83
|
+
)
|
|
84
|
+
except Exception as e:
|
|
85
|
+
raise ImportError(f"Google AI session creation failed: {e}")
|
|
86
|
+
|
|
87
|
+
async def generate_text(self,
|
|
88
|
+
prompt: str,
|
|
89
|
+
system_prompt: Optional[str] = None,
|
|
90
|
+
stream: bool = False) -> str:
|
|
91
|
+
"""Generate text using Google AI"""
|
|
92
|
+
if not GOOGLE_AI_AVAILABLE:
|
|
93
|
+
return "Google AI not available"
|
|
94
|
+
|
|
95
|
+
try:
|
|
96
|
+
session = await self.create_session()
|
|
97
|
+
|
|
98
|
+
# Add system prompt if provided
|
|
99
|
+
messages = []
|
|
100
|
+
if system_prompt:
|
|
101
|
+
messages.append({
|
|
102
|
+
"role": "user",
|
|
103
|
+
"parts": [{"text": f"System: {system_prompt}"}]
|
|
104
|
+
})
|
|
105
|
+
|
|
106
|
+
messages.append({
|
|
107
|
+
"role": "user",
|
|
108
|
+
"parts": [{"text": prompt}]
|
|
109
|
+
})
|
|
110
|
+
|
|
111
|
+
if stream:
|
|
112
|
+
response = await session.send_message_stream(messages)
|
|
113
|
+
full_response = ""
|
|
114
|
+
async for chunk in response:
|
|
115
|
+
if chunk.text:
|
|
116
|
+
full_response += chunk.text
|
|
117
|
+
return full_response
|
|
118
|
+
else:
|
|
119
|
+
response = await session.send_message(messages)
|
|
120
|
+
return response.text
|
|
121
|
+
except Exception as e:
|
|
122
|
+
return f"Google AI generation failed: {str(e)}"
|
|
123
|
+
|
|
124
|
+
async def run_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
|
125
|
+
"""Execute a task using Google AI"""
|
|
126
|
+
if not GOOGLE_AI_AVAILABLE:
|
|
127
|
+
return {
|
|
128
|
+
"error": "Google AI not available",
|
|
129
|
+
"task_id": task.get("task_id", "unknown")
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
prompt = task.get("description", "")
|
|
133
|
+
system_prompt = task.get("system_prompt", "You are a helpful AI assistant powered by Google AI.")
|
|
134
|
+
|
|
135
|
+
try:
|
|
136
|
+
result = await self.generate_text(prompt, system_prompt)
|
|
137
|
+
return {
|
|
138
|
+
"task_id": task.get("task_id", "unknown"),
|
|
139
|
+
"status": "completed",
|
|
140
|
+
"framework": "Google AI",
|
|
141
|
+
"model": self.model,
|
|
142
|
+
"result": result,
|
|
143
|
+
"tokens_used": self.max_tokens,
|
|
144
|
+
"performance": {
|
|
145
|
+
"response_time": "fast",
|
|
146
|
+
"quality": "high"
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
except Exception as e:
|
|
150
|
+
return {
|
|
151
|
+
"task_id": task.get("task_id", "unknown"),
|
|
152
|
+
"status": "error",
|
|
153
|
+
"framework": "Google AI",
|
|
154
|
+
"error": str(e)
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
def get_agent_info(self) -> Dict[str, Any]:
|
|
158
|
+
"""Get information about this Google AI agent"""
|
|
159
|
+
return {
|
|
160
|
+
"name": self.name,
|
|
161
|
+
"framework": "Google AI",
|
|
162
|
+
"available": GOOGLE_AI_AVAILABLE,
|
|
163
|
+
"model": self.model,
|
|
164
|
+
"max_tokens": self.max_tokens,
|
|
165
|
+
"temperature": self.temperature,
|
|
166
|
+
"capabilities": [
|
|
167
|
+
"text_generation",
|
|
168
|
+
"conversation",
|
|
169
|
+
"context_window_large",
|
|
170
|
+
"multimodal",
|
|
171
|
+
"streaming",
|
|
172
|
+
"search_integration",
|
|
173
|
+
"tool_use"
|
|
174
|
+
],
|
|
175
|
+
"supported_models": [
|
|
176
|
+
"gemini-1.5-flash",
|
|
177
|
+
"gemini-1.5-pro",
|
|
178
|
+
"gemini-1.0-pro",
|
|
179
|
+
"gemini-pro",
|
|
180
|
+
"gemini-pro-vision"
|
|
181
|
+
],
|
|
182
|
+
"api_integration": "google_generative_ai_sdk"
|
|
183
|
+
}
|