agent-mcp 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_mcp/__init__.py +66 -12
- agent_mcp/a2a_protocol.py +316 -0
- agent_mcp/agent_lightning_library.py +214 -0
- agent_mcp/claude_mcp_adapter.py +195 -0
- agent_mcp/google_ai_mcp_adapter.py +183 -0
- agent_mcp/llamaindex_mcp_adapter.py +410 -0
- agent_mcp/microsoft_agent_framework.py +591 -0
- agent_mcp/missing_frameworks.py +435 -0
- agent_mcp/openapi_protocol.py +616 -0
- agent_mcp/payments.py +804 -0
- agent_mcp/pydantic_ai_mcp_adapter.py +628 -0
- agent_mcp/registry.py +768 -0
- agent_mcp/security.py +864 -0
- {agent_mcp-0.1.4.dist-info → agent_mcp-0.1.5.dist-info}/METADATA +174 -52
- {agent_mcp-0.1.4.dist-info → agent_mcp-0.1.5.dist-info}/RECORD +19 -6
- {agent_mcp-0.1.4.dist-info → agent_mcp-0.1.5.dist-info}/WHEEL +1 -1
- agent_mcp-0.1.5.dist-info/entry_points.txt +4 -0
- demos/comprehensive_framework_demo.py +202 -0
- agent_mcp-0.1.4.dist-info/entry_points.txt +0 -2
- {agent_mcp-0.1.4.dist-info → agent_mcp-0.1.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,628 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Pydantic AI MCP Integration
|
|
3
|
+
FastAPI-like agent framework with native MCP support
|
|
4
|
+
|
|
5
|
+
This module provides integration between Pydantic AI agents and the Model Context Protocol,
|
|
6
|
+
offering type-safe, production-ready agent capabilities.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
import json
|
|
11
|
+
import uuid
|
|
12
|
+
from typing import Dict, Any, List, Optional, Callable, Union, Type
|
|
13
|
+
from dataclasses import dataclass, asdict
|
|
14
|
+
from datetime import datetime, timezone
|
|
15
|
+
import logging
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
# Try to import Pydantic AI
|
|
20
|
+
try:
|
|
21
|
+
from pydantic_ai import Agent, RunContext
|
|
22
|
+
from pydantic_ai.models import OpenAIModel
|
|
23
|
+
# from pydantic_ai.tools import Tool # Not available in all versions
|
|
24
|
+
from pydantic_ai.mcp import MCPServerTool, FastMCPToolset
|
|
25
|
+
PYDANTIC_AI_AVAILABLE = True
|
|
26
|
+
except ImportError:
|
|
27
|
+
PYDANTIC_AI_AVAILABLE = False
|
|
28
|
+
Agent = None
|
|
29
|
+
RunContext = None
|
|
30
|
+
logger.warning("Pydantic AI not available. Install with: pip install pydantic-ai")
|
|
31
|
+
|
|
32
|
+
from .mcp_transport import HTTPTransport
|
|
33
|
+
|
|
34
|
+
@dataclass
|
|
35
|
+
class PydanticAIAgentConfig:
|
|
36
|
+
"""Configuration for Pydantic AI agent integration"""
|
|
37
|
+
agent_id: str
|
|
38
|
+
name: str
|
|
39
|
+
description: str
|
|
40
|
+
model: str = "openai:gpt-4o"
|
|
41
|
+
model_type: str = "openai" # "openai", "anthropic", "gemini"
|
|
42
|
+
api_key: str = None
|
|
43
|
+
mcp_server_url: str = "https://mcp-server-ixlfhxquwq-ew.a.run.app"
|
|
44
|
+
enable_type_validation: bool = True
|
|
45
|
+
enable_retry: bool = True
|
|
46
|
+
max_retries: int = 3
|
|
47
|
+
timeout: int = 30
|
|
48
|
+
debug_mode: bool = False
|
|
49
|
+
|
|
50
|
+
class PydanticAIMCPAgent:
|
|
51
|
+
"""Pydantic AI Agent with MCP capabilities"""
|
|
52
|
+
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
config: PydanticAIAgentConfig,
|
|
56
|
+
transport: HTTPTransport = None
|
|
57
|
+
):
|
|
58
|
+
if not PYDANTIC_AI_AVAILABLE:
|
|
59
|
+
raise ImportError("Pydantic AI is not installed. Install with: pip install pydantic-ai")
|
|
60
|
+
|
|
61
|
+
self.config = config
|
|
62
|
+
self.transport = transport or HTTPTransport.from_url(config.mcp_server_url)
|
|
63
|
+
self.mcp_id = config.agent_id
|
|
64
|
+
self.mcp_version = "0.1.0"
|
|
65
|
+
self.mcp_tools = {}
|
|
66
|
+
|
|
67
|
+
# Initialize Pydantic AI agent
|
|
68
|
+
self.agent = None
|
|
69
|
+
self._setup_pydantic_agent()
|
|
70
|
+
|
|
71
|
+
# Register MCP tools
|
|
72
|
+
self._register_default_mcp_tools()
|
|
73
|
+
|
|
74
|
+
def _setup_pydantic_agent(self):
|
|
75
|
+
"""Setup the Pydantic AI agent"""
|
|
76
|
+
try:
|
|
77
|
+
# Configure model based on type
|
|
78
|
+
if self.config.model_type == "openai":
|
|
79
|
+
model = OpenAIModel(
|
|
80
|
+
model_name=self.config.model.replace("openai:", ""),
|
|
81
|
+
api_key=self.config.api_key
|
|
82
|
+
)
|
|
83
|
+
else:
|
|
84
|
+
# For other model types, you'd need to configure appropriately
|
|
85
|
+
model = OpenAIModel(
|
|
86
|
+
model_name="gpt-4o",
|
|
87
|
+
api_key=self.config.api_key
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
# Create the Pydantic AI agent
|
|
91
|
+
self.agent = Agent(
|
|
92
|
+
name=self.config.name,
|
|
93
|
+
model=model,
|
|
94
|
+
system_prompt=self.config.description,
|
|
95
|
+
retries=self.config.max_retries if self.config.enable_retry else 0
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
logger.info(f"Created Pydantic AI agent: {self.config.name}")
|
|
99
|
+
|
|
100
|
+
except Exception as e:
|
|
101
|
+
logger.error(f"Error setting up Pydantic AI agent: {e}")
|
|
102
|
+
|
|
103
|
+
def _register_default_mcp_tools(self):
|
|
104
|
+
"""Register default MCP tools for Pydantic AI agent"""
|
|
105
|
+
|
|
106
|
+
# Pydantic AI specific tools
|
|
107
|
+
self._register_pydantic_tools_as_mcp()
|
|
108
|
+
|
|
109
|
+
# MCP integration tools
|
|
110
|
+
self._register_mcp_integration_tools()
|
|
111
|
+
|
|
112
|
+
# Agent management tools
|
|
113
|
+
self._register_agent_management_tools()
|
|
114
|
+
|
|
115
|
+
def _register_pydantic_tools_as_mcp(self):
|
|
116
|
+
"""Register Pydantic AI tools as MCP tools"""
|
|
117
|
+
|
|
118
|
+
# Get tools from the agent if available
|
|
119
|
+
if hasattr(self.agent, 'tools') and self.agent.tools:
|
|
120
|
+
for tool_name, tool in self.agent.tools.items():
|
|
121
|
+
self._register_tool_as_mcp(tool_name, tool)
|
|
122
|
+
|
|
123
|
+
def _register_tool_as_mcp(self, tool_name: str, tool: Tool):
|
|
124
|
+
"""Register a Pydantic AI tool as MCP tool"""
|
|
125
|
+
|
|
126
|
+
async def pydantic_tool_wrapper(**kwargs):
|
|
127
|
+
"""Wrapper to call Pydantic AI tool through MCP"""
|
|
128
|
+
try:
|
|
129
|
+
# Execute the tool with type validation
|
|
130
|
+
if self.config.enable_type_validation:
|
|
131
|
+
# Pydantic AI handles type validation automatically
|
|
132
|
+
result = await tool(**kwargs)
|
|
133
|
+
else:
|
|
134
|
+
result = await tool(**kwargs)
|
|
135
|
+
|
|
136
|
+
return {
|
|
137
|
+
"status": "success",
|
|
138
|
+
"result": result,
|
|
139
|
+
"tool_name": tool_name,
|
|
140
|
+
"framework": "pydantic_ai"
|
|
141
|
+
}
|
|
142
|
+
except Exception as e:
|
|
143
|
+
logger.error(f"Error in Pydantic AI tool {tool_name}: {e}")
|
|
144
|
+
return {
|
|
145
|
+
"status": "error",
|
|
146
|
+
"message": str(e),
|
|
147
|
+
"tool_name": tool_name
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
# Extract tool metadata
|
|
151
|
+
tool_info = {
|
|
152
|
+
"name": tool_name,
|
|
153
|
+
"description": getattr(tool, 'description', f"Pydantic AI tool {tool_name}"),
|
|
154
|
+
"parameters": self._extract_pydantic_tool_parameters(tool),
|
|
155
|
+
"function": pydantic_tool_wrapper
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
self.mcp_tools[tool_name] = tool_info
|
|
159
|
+
|
|
160
|
+
def _extract_pydantic_tool_parameters(self, tool: Tool) -> List[Dict[str, Any]]:
|
|
161
|
+
"""Extract parameters from Pydantic AI tool"""
|
|
162
|
+
parameters = []
|
|
163
|
+
|
|
164
|
+
try:
|
|
165
|
+
# Pydantic AI tools have function signatures we can inspect
|
|
166
|
+
import inspect
|
|
167
|
+
sig = inspect.signature(tool)
|
|
168
|
+
|
|
169
|
+
for param_name, param in sig.parameters.items():
|
|
170
|
+
if param_name == 'ctx': # Skip RunContext parameter
|
|
171
|
+
continue
|
|
172
|
+
|
|
173
|
+
parameter = {
|
|
174
|
+
"name": param_name,
|
|
175
|
+
"description": f"Parameter {param_name}",
|
|
176
|
+
"type": "string", # Default to string
|
|
177
|
+
"required": param.default == inspect.Parameter.empty
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
# Try to determine type from annotation
|
|
181
|
+
if param.annotation != inspect.Parameter.empty:
|
|
182
|
+
type_str = str(param.annotation)
|
|
183
|
+
if "int" in type_str.lower():
|
|
184
|
+
parameter["type"] = "number"
|
|
185
|
+
elif "bool" in type_str.lower():
|
|
186
|
+
parameter["type"] = "boolean"
|
|
187
|
+
elif "list" in type_str.lower():
|
|
188
|
+
parameter["type"] = "array"
|
|
189
|
+
elif "dict" in type_str.lower():
|
|
190
|
+
parameter["type"] = "object"
|
|
191
|
+
|
|
192
|
+
parameters.append(parameter)
|
|
193
|
+
|
|
194
|
+
except Exception as e:
|
|
195
|
+
logger.error(f"Error extracting parameters from Pydantic AI tool: {e}")
|
|
196
|
+
|
|
197
|
+
return parameters
|
|
198
|
+
|
|
199
|
+
def _register_mcp_integration_tools(self):
|
|
200
|
+
"""Register MCP-specific integration tools"""
|
|
201
|
+
|
|
202
|
+
async def connect_to_mcp_server(server_url: str, tools: List[str] = None) -> Dict[str, Any]:
|
|
203
|
+
"""Connect to an external MCP server and use its tools"""
|
|
204
|
+
try:
|
|
205
|
+
if FastMCPToolset and MCPServerTool:
|
|
206
|
+
# Use Pydantic AI's built-in MCP support
|
|
207
|
+
mcp_toolset = FastMCPToolset(server_url)
|
|
208
|
+
|
|
209
|
+
# Add specific tools if requested
|
|
210
|
+
if tools:
|
|
211
|
+
available_tools = []
|
|
212
|
+
for tool_name in tools:
|
|
213
|
+
try:
|
|
214
|
+
tool = MCPServerTool(server_url, tool_name)
|
|
215
|
+
available_tools.append(tool)
|
|
216
|
+
except Exception as e:
|
|
217
|
+
logger.warning(f"Could not load MCP tool {tool_name}: {e}")
|
|
218
|
+
|
|
219
|
+
if available_tools:
|
|
220
|
+
# Add tools to agent
|
|
221
|
+
self.agent.tools.update({
|
|
222
|
+
f"mcp_{tool.name}": tool for tool in available_tools
|
|
223
|
+
})
|
|
224
|
+
|
|
225
|
+
return {
|
|
226
|
+
"status": "success",
|
|
227
|
+
"message": f"Connected to MCP server at {server_url}",
|
|
228
|
+
"tools_loaded": len(available_tools) if 'available_tools' in locals() else 0
|
|
229
|
+
}
|
|
230
|
+
else:
|
|
231
|
+
# Fallback: manual MCP connection
|
|
232
|
+
import aiohttp
|
|
233
|
+
async with aiohttp.ClientSession() as session:
|
|
234
|
+
async with session.get(f"{server_url}/tools") as response:
|
|
235
|
+
if response.status == 200:
|
|
236
|
+
tools_data = await response.json()
|
|
237
|
+
return {
|
|
238
|
+
"status": "success",
|
|
239
|
+
"server_url": server_url,
|
|
240
|
+
"available_tools": tools_data.get("tools", [])
|
|
241
|
+
}
|
|
242
|
+
else:
|
|
243
|
+
return {
|
|
244
|
+
"status": "error",
|
|
245
|
+
"message": f"Failed to connect to MCP server: HTTP {response.status}"
|
|
246
|
+
}
|
|
247
|
+
except Exception as e:
|
|
248
|
+
logger.error(f"Error connecting to MCP server {server_url}: {e}")
|
|
249
|
+
return {
|
|
250
|
+
"status": "error",
|
|
251
|
+
"message": str(e),
|
|
252
|
+
"server_url": server_url
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
self.mcp_tools["connect_to_mcp_server"] = {
|
|
256
|
+
"name": "connect_to_mcp_server",
|
|
257
|
+
"description": "Connect to an external MCP server and import its tools",
|
|
258
|
+
"parameters": [
|
|
259
|
+
{
|
|
260
|
+
"name": "server_url",
|
|
261
|
+
"description": "URL of the MCP server to connect to",
|
|
262
|
+
"type": "string",
|
|
263
|
+
"required": True
|
|
264
|
+
},
|
|
265
|
+
{
|
|
266
|
+
"name": "tools",
|
|
267
|
+
"description": "List of specific tools to import (optional)",
|
|
268
|
+
"type": "array",
|
|
269
|
+
"required": False
|
|
270
|
+
}
|
|
271
|
+
],
|
|
272
|
+
"function": connect_to_mcp_server
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
def _register_agent_management_tools(self):
|
|
276
|
+
"""Register agent management and monitoring tools"""
|
|
277
|
+
|
|
278
|
+
async def get_agent_info() -> Dict[str, Any]:
|
|
279
|
+
"""Get information about this Pydantic AI agent"""
|
|
280
|
+
return {
|
|
281
|
+
"status": "success",
|
|
282
|
+
"agent_info": {
|
|
283
|
+
"id": self.mcp_id,
|
|
284
|
+
"name": self.config.name,
|
|
285
|
+
"description": self.config.description,
|
|
286
|
+
"framework": "Pydantic AI",
|
|
287
|
+
"model": self.config.model,
|
|
288
|
+
"type_validation": self.config.enable_type_validation,
|
|
289
|
+
"retry_enabled": self.config.enable_retry,
|
|
290
|
+
"max_retries": self.config.max_retries,
|
|
291
|
+
"mcp_tools": list(self.mcp_tools.keys())
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
self.mcp_tools["get_agent_info"] = {
|
|
296
|
+
"name": "get_agent_info",
|
|
297
|
+
"description": "Get detailed information about this Pydantic AI agent",
|
|
298
|
+
"parameters": [],
|
|
299
|
+
"function": get_agent_info
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
async def update_agent_config(
|
|
303
|
+
new_description: str = None,
|
|
304
|
+
enable_type_validation: bool = None,
|
|
305
|
+
max_retries: int = None
|
|
306
|
+
) -> Dict[str, Any]:
|
|
307
|
+
"""Update agent configuration dynamically"""
|
|
308
|
+
try:
|
|
309
|
+
if new_description:
|
|
310
|
+
self.config.description = new_description
|
|
311
|
+
# Update agent system prompt
|
|
312
|
+
self.agent.system_prompt = new_description
|
|
313
|
+
|
|
314
|
+
if enable_type_validation is not None:
|
|
315
|
+
self.config.enable_type_validation = enable_type_validation
|
|
316
|
+
|
|
317
|
+
if max_retries is not None:
|
|
318
|
+
self.config.max_retries = max_retries
|
|
319
|
+
self.agent.retries = max_retries
|
|
320
|
+
|
|
321
|
+
return {
|
|
322
|
+
"status": "success",
|
|
323
|
+
"message": "Agent configuration updated",
|
|
324
|
+
"config": {
|
|
325
|
+
"description": self.config.description,
|
|
326
|
+
"type_validation": self.config.enable_type_validation,
|
|
327
|
+
"max_retries": self.config.max_retries
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
except Exception as e:
|
|
331
|
+
logger.error(f"Error updating agent config: {e}")
|
|
332
|
+
return {
|
|
333
|
+
"status": "error",
|
|
334
|
+
"message": str(e)
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
self.mcp_tools["update_agent_config"] = {
|
|
338
|
+
"name": "update_agent_config",
|
|
339
|
+
"description": "Update the configuration of this agent",
|
|
340
|
+
"parameters": [
|
|
341
|
+
{
|
|
342
|
+
"name": "new_description",
|
|
343
|
+
"description": "New system description",
|
|
344
|
+
"type": "string",
|
|
345
|
+
"required": False
|
|
346
|
+
},
|
|
347
|
+
{
|
|
348
|
+
"name": "enable_type_validation",
|
|
349
|
+
"description": "Enable Pydantic type validation",
|
|
350
|
+
"type": "boolean",
|
|
351
|
+
"required": False
|
|
352
|
+
},
|
|
353
|
+
{
|
|
354
|
+
"name": "max_retries",
|
|
355
|
+
"description": "Maximum number of retries",
|
|
356
|
+
"type": "number",
|
|
357
|
+
"required": False
|
|
358
|
+
}
|
|
359
|
+
],
|
|
360
|
+
"function": update_agent_config
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
def add_custom_tool(
|
|
364
|
+
self,
|
|
365
|
+
name: str,
|
|
366
|
+
description: str,
|
|
367
|
+
func: Callable,
|
|
368
|
+
parameters: List[Dict[str, Any]] = None
|
|
369
|
+
):
|
|
370
|
+
"""Add a custom tool to the Pydantic AI agent"""
|
|
371
|
+
|
|
372
|
+
# Register with Pydantic AI agent
|
|
373
|
+
if self.agent:
|
|
374
|
+
self.agent.tools[name] = func
|
|
375
|
+
|
|
376
|
+
# Register as MCP tool
|
|
377
|
+
async def custom_tool_wrapper(**kwargs):
|
|
378
|
+
"""Wrapper for custom tool"""
|
|
379
|
+
try:
|
|
380
|
+
result = await func(**kwargs)
|
|
381
|
+
return {
|
|
382
|
+
"status": "success",
|
|
383
|
+
"result": result,
|
|
384
|
+
"tool_name": name,
|
|
385
|
+
"type": "custom"
|
|
386
|
+
}
|
|
387
|
+
except Exception as e:
|
|
388
|
+
logger.error(f"Error in custom tool {name}: {e}")
|
|
389
|
+
return {
|
|
390
|
+
"status": "error",
|
|
391
|
+
"message": str(e),
|
|
392
|
+
"tool_name": name
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
self.mcp_tools[name] = {
|
|
396
|
+
"name": name,
|
|
397
|
+
"description": description,
|
|
398
|
+
"parameters": parameters or [],
|
|
399
|
+
"function": custom_tool_wrapper
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
async def run_agent(
|
|
403
|
+
self,
|
|
404
|
+
message: str,
|
|
405
|
+
context: Dict[str, Any] = None
|
|
406
|
+
) -> Dict[str, Any]:
|
|
407
|
+
"""Run the Pydantic AI agent with a message"""
|
|
408
|
+
try:
|
|
409
|
+
# Create run context if provided
|
|
410
|
+
run_context = None
|
|
411
|
+
if context:
|
|
412
|
+
run_context = RunContext(
|
|
413
|
+
deps=context
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
# Run the agent
|
|
417
|
+
result = await self.agent.run(message, ctx=run_context)
|
|
418
|
+
|
|
419
|
+
return {
|
|
420
|
+
"status": "success",
|
|
421
|
+
"result": result.data if hasattr(result, 'data') else str(result),
|
|
422
|
+
"message": message,
|
|
423
|
+
"context_used": context is not None,
|
|
424
|
+
"agent_id": self.mcp_id
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
except Exception as e:
|
|
428
|
+
logger.error(f"Error running Pydantic AI agent: {e}")
|
|
429
|
+
return {
|
|
430
|
+
"status": "error",
|
|
431
|
+
"message": str(e),
|
|
432
|
+
"message_sent": message
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
async def register_with_mcp_server(self) -> Dict[str, Any]:
|
|
436
|
+
"""Register this Pydantic AI agent with MCP server"""
|
|
437
|
+
registration_data = {
|
|
438
|
+
"agent_id": self.mcp_id,
|
|
439
|
+
"info": {
|
|
440
|
+
"name": self.config.name,
|
|
441
|
+
"description": self.config.description,
|
|
442
|
+
"framework": "Pydantic AI",
|
|
443
|
+
"model": self.config.model,
|
|
444
|
+
"capabilities": {
|
|
445
|
+
"type_validation": self.config.enable_type_validation,
|
|
446
|
+
"retry_mechanism": self.config.enable_retry,
|
|
447
|
+
"max_retries": self.config.max_retries,
|
|
448
|
+
"tools": list(self.mcp_tools.keys())
|
|
449
|
+
},
|
|
450
|
+
"version": self.mcp_version,
|
|
451
|
+
"features": {
|
|
452
|
+
"type_safe": True,
|
|
453
|
+
"fastapi_style": True,
|
|
454
|
+
"production_ready": True,
|
|
455
|
+
"debug_mode": self.config.debug_mode
|
|
456
|
+
}
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
return await self.transport.register_agent(self)
|
|
461
|
+
|
|
462
|
+
async def execute_mcp_tool(self, tool_name: str, **kwargs) -> Dict[str, Any]:
|
|
463
|
+
"""Execute an MCP tool"""
|
|
464
|
+
if tool_name in self.mcp_tools:
|
|
465
|
+
tool_func = self.mcp_tools[tool_name]["function"]
|
|
466
|
+
return await tool_func(**kwargs)
|
|
467
|
+
else:
|
|
468
|
+
return {
|
|
469
|
+
"status": "error",
|
|
470
|
+
"message": f"Tool {tool_name} not found",
|
|
471
|
+
"available_tools": list(self.mcp_tools.keys())
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
def get_mcp_tool_info(self) -> Dict[str, Any]:
|
|
475
|
+
"""Get information about all available MCP tools"""
|
|
476
|
+
return {
|
|
477
|
+
"agent_id": self.mcp_id,
|
|
478
|
+
"framework": "Pydantic AI",
|
|
479
|
+
"model": self.config.model,
|
|
480
|
+
"tools": [
|
|
481
|
+
{
|
|
482
|
+
"name": tool_info["name"],
|
|
483
|
+
"description": tool_info["description"],
|
|
484
|
+
"parameters": tool_info["parameters"]
|
|
485
|
+
}
|
|
486
|
+
for tool_info in self.mcp_tools.values()
|
|
487
|
+
],
|
|
488
|
+
"total_tools": len(self.mcp_tools)
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
class PydanticAIAgentBridge:
|
|
492
|
+
"""Bridge for Pydantic AI agent integration with MCP"""
|
|
493
|
+
|
|
494
|
+
def __init__(self, mcp_server_url: str = "https://mcp-server-ixlfhxquwq-ew.a.run.app"):
|
|
495
|
+
self.mcp_server_url = mcp_server_url
|
|
496
|
+
self.agents = {}
|
|
497
|
+
|
|
498
|
+
async def create_agent(
|
|
499
|
+
self,
|
|
500
|
+
agent_id: str,
|
|
501
|
+
name: str,
|
|
502
|
+
description: str,
|
|
503
|
+
model: str = "openai:gpt-4o",
|
|
504
|
+
api_key: str = None,
|
|
505
|
+
**kwargs
|
|
506
|
+
) -> PydanticAIMCPAgent:
|
|
507
|
+
"""Create and register a Pydantic AI agent"""
|
|
508
|
+
|
|
509
|
+
config = PydanticAIAgentConfig(
|
|
510
|
+
agent_id=agent_id,
|
|
511
|
+
name=name,
|
|
512
|
+
description=description,
|
|
513
|
+
model=model,
|
|
514
|
+
api_key=api_key,
|
|
515
|
+
mcp_server_url=self.mcp_server_url,
|
|
516
|
+
**kwargs
|
|
517
|
+
)
|
|
518
|
+
|
|
519
|
+
agent = PydanticAIMCPAgent(config)
|
|
520
|
+
|
|
521
|
+
# Register with MCP server
|
|
522
|
+
registration_result = await agent.register_with_mcp_server()
|
|
523
|
+
|
|
524
|
+
if registration_result.get("status") == "registered":
|
|
525
|
+
logger.info(f"Pydantic AI agent {agent_id} registered with MCP server")
|
|
526
|
+
self.agents[agent_id] = agent
|
|
527
|
+
else:
|
|
528
|
+
logger.error(f"Failed to register Pydantic AI agent {agent_id}: {registration_result}")
|
|
529
|
+
|
|
530
|
+
return agent
|
|
531
|
+
|
|
532
|
+
async def create_multi_agent_system(
|
|
533
|
+
self,
|
|
534
|
+
agents_config: List[Dict[str, Any]]
|
|
535
|
+
) -> Dict[str, PydanticAIMCPAgent]:
|
|
536
|
+
"""Create a multi-agent system with Pydantic AI"""
|
|
537
|
+
|
|
538
|
+
agents = {}
|
|
539
|
+
|
|
540
|
+
for config in agents_config:
|
|
541
|
+
agent = await self.create_agent(**config)
|
|
542
|
+
agents[config.get("agent_id", agent.mcp_id)] = agent
|
|
543
|
+
|
|
544
|
+
logger.info(f"Created Pydantic AI multi-agent system with {len(agents)} agents")
|
|
545
|
+
return agents
|
|
546
|
+
|
|
547
|
+
async def setup_agent_collaboration(
|
|
548
|
+
self,
|
|
549
|
+
agent_ids: List[str],
|
|
550
|
+
collaboration_pattern: str = "sequential"
|
|
551
|
+
) -> Dict[str, Any]:
|
|
552
|
+
"""Setup collaboration between Pydantic AI agents"""
|
|
553
|
+
|
|
554
|
+
agents = {aid: self.agents[aid] for aid in agent_ids if aid in self.agents}
|
|
555
|
+
|
|
556
|
+
if len(agents) < 2:
|
|
557
|
+
return {
|
|
558
|
+
"status": "error",
|
|
559
|
+
"message": "Need at least 2 agents for collaboration"
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
# Implement different collaboration patterns
|
|
563
|
+
if collaboration_pattern == "sequential":
|
|
564
|
+
# Agents work in sequence
|
|
565
|
+
async def sequential_collaboration(task: str) -> Dict[str, Any]:
|
|
566
|
+
results = []
|
|
567
|
+
current_input = task
|
|
568
|
+
|
|
569
|
+
for agent_id, agent in agents.items():
|
|
570
|
+
result = await agent.run_agent(current_input)
|
|
571
|
+
results.append({
|
|
572
|
+
"agent_id": agent_id,
|
|
573
|
+
"result": result
|
|
574
|
+
})
|
|
575
|
+
|
|
576
|
+
# Use result as input for next agent
|
|
577
|
+
if result.get("status") == "success":
|
|
578
|
+
current_input = result.get("result", "")
|
|
579
|
+
|
|
580
|
+
return {
|
|
581
|
+
"status": "success",
|
|
582
|
+
"collaboration_type": "sequential",
|
|
583
|
+
"results": results,
|
|
584
|
+
"final_result": current_input
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
elif collaboration_pattern == "parallel":
|
|
588
|
+
# Agents work in parallel
|
|
589
|
+
async def parallel_collaboration(task: str) -> Dict[str, Any]:
|
|
590
|
+
tasks = [
|
|
591
|
+
agent.run_agent(task)
|
|
592
|
+
for agent in agents.values()
|
|
593
|
+
]
|
|
594
|
+
|
|
595
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
596
|
+
|
|
597
|
+
return {
|
|
598
|
+
"status": "success",
|
|
599
|
+
"collaboration_type": "parallel",
|
|
600
|
+
"results": [
|
|
601
|
+
{
|
|
602
|
+
"agent_id": agent_id,
|
|
603
|
+
"result": result
|
|
604
|
+
}
|
|
605
|
+
for agent_id, result in zip(agent_ids, results)
|
|
606
|
+
]
|
|
607
|
+
}
|
|
608
|
+
|
|
609
|
+
else:
|
|
610
|
+
return {
|
|
611
|
+
"status": "error",
|
|
612
|
+
"message": f"Unknown collaboration pattern: {collaboration_pattern}"
|
|
613
|
+
}
|
|
614
|
+
|
|
615
|
+
# Return the collaboration function
|
|
616
|
+
return {
|
|
617
|
+
"status": "success",
|
|
618
|
+
"collaboration_function": locals().get(f"{collaboration_pattern}_collaboration"),
|
|
619
|
+
"agents": list(agents.keys()),
|
|
620
|
+
"pattern": collaboration_pattern
|
|
621
|
+
}
|
|
622
|
+
|
|
623
|
+
# Export classes for easy importing
|
|
624
|
+
__all__ = [
|
|
625
|
+
'PydanticAIAgentConfig',
|
|
626
|
+
'PydanticAIMCPAgent',
|
|
627
|
+
'PydanticAIAgentBridge'
|
|
628
|
+
]
|