clap-agents 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clap/__init__.py +57 -0
- clap/llm_services/__init__.py +0 -0
- clap/llm_services/base.py +68 -0
- clap/llm_services/google_openai_compat_service.py +122 -0
- clap/llm_services/groq_service.py +100 -0
- clap/mcp_client/__init__.py +0 -0
- clap/mcp_client/client.py +208 -0
- clap/multiagent_pattern/__init__.py +0 -0
- clap/multiagent_pattern/agent.py +128 -0
- clap/multiagent_pattern/team.py +154 -0
- clap/react_pattern/__init__.py +0 -0
- clap/react_pattern/react_agent.py +265 -0
- clap/tool_pattern/__init__.py +0 -0
- clap/tool_pattern/tool.py +229 -0
- clap/tool_pattern/tool_agent.py +241 -0
- clap/tools/__init__.py +13 -0
- clap/tools/email_tools.py +230 -0
- clap/tools/web_crawler.py +82 -0
- clap/tools/web_search.py +24 -0
- clap/utils/__init__.py +0 -0
- clap/utils/completions.py +173 -0
- clap/utils/extraction.py +42 -0
- clap/utils/logging.py +28 -0
- clap_agents-0.1.1.dist-info/METADATA +346 -0
- clap_agents-0.1.1.dist-info/RECORD +27 -0
- clap_agents-0.1.1.dist-info/WHEEL +4 -0
- clap_agents-0.1.1.dist-info/licenses/LICENSE +202 -0
clap/__init__.py
ADDED
@@ -0,0 +1,57 @@
|
|
1
|
+
# --- Example content for src/clap/__init__.py ---
|
2
|
+
|
3
|
+
# Import key classes/functions from submodules to make them accessible at the top level
|
4
|
+
|
5
|
+
# Multi-agent pattern
|
6
|
+
from .multiagent_pattern.agent import Agent
|
7
|
+
from .multiagent_pattern.team import Team
|
8
|
+
|
9
|
+
# ReAct pattern
|
10
|
+
from .react_pattern.react_agent import ReactAgent
|
11
|
+
|
12
|
+
# Tool pattern
|
13
|
+
from .tool_pattern.tool import tool, Tool
|
14
|
+
from .tool_pattern.tool_agent import ToolAgent
|
15
|
+
|
16
|
+
# LLM Services (Interface and implementations)
|
17
|
+
from .llm_services.base import LLMServiceInterface, StandardizedLLMResponse, LLMToolCall
|
18
|
+
from .llm_services.groq_service import GroqService
|
19
|
+
from .llm_services.google_openai_compat_service import GoogleOpenAICompatService
|
20
|
+
|
21
|
+
from .mcp_client.client import MCPClientManager, SseServerConfig
|
22
|
+
|
23
|
+
|
24
|
+
from .tools.web_search import duckduckgo_search
|
25
|
+
from .tools.web_crawler import scrape_url, extract_text_by_query
|
26
|
+
from .tools.email_tools import send_email, fetch_recent_emails
|
27
|
+
|
28
|
+
__all__ = [
|
29
|
+
# Core classes
|
30
|
+
"Agent",
|
31
|
+
"Team",
|
32
|
+
"ReactAgent",
|
33
|
+
"ToolAgent",
|
34
|
+
"Tool",
|
35
|
+
"tool", # The decorator
|
36
|
+
|
37
|
+
# LLM Services
|
38
|
+
"LLMServiceInterface",
|
39
|
+
"StandardizedLLMResponse",
|
40
|
+
"LLMToolCall",
|
41
|
+
"GroqService",
|
42
|
+
"GoogleOpenAICompatService",
|
43
|
+
|
44
|
+
# MCP Client
|
45
|
+
"MCPClientManager",
|
46
|
+
"SseServerConfig", # Expose config type
|
47
|
+
|
48
|
+
# Selected Tools (example)
|
49
|
+
"duckduckgo_search",
|
50
|
+
# Add others from .tools if desired as part of the core offering
|
51
|
+
]
|
52
|
+
|
53
|
+
# You might also want to define a package-level version variable here
|
54
|
+
# (though often handled by build tools or version files)
|
55
|
+
# __version__ = "0.1.0"
|
56
|
+
|
57
|
+
# --- End of src/clap/__init__.py ---
|
File without changes
|
@@ -0,0 +1,68 @@
|
|
1
|
+
|
2
|
+
import abc
|
3
|
+
from dataclasses import dataclass, field
|
4
|
+
from typing import Any, Dict, List, Optional, Union
|
5
|
+
|
6
|
+
|
7
|
+
|
8
|
+
@dataclass
|
9
|
+
class LLMToolCall:
|
10
|
+
"""Represents a tool call requested by the LLM."""
|
11
|
+
id: str # The unique ID for this specific tool call instance
|
12
|
+
function_name: str
|
13
|
+
function_arguments_json_str: str
|
14
|
+
|
15
|
+
@dataclass
|
16
|
+
class StandardizedLLMResponse:
|
17
|
+
"""A consistent format for LLM responses passed back to the agent."""
|
18
|
+
text_content: Optional[str] = None
|
19
|
+
tool_calls: List[LLMToolCall] = field(default_factory=list)
|
20
|
+
|
21
|
+
|
22
|
+
class LLMServiceInterface(abc.ABC):
|
23
|
+
"""
|
24
|
+
Abstract Base Class defining the interface for interacting with different LLM backends.
|
25
|
+
Concrete implementations (e.g., for Groq, Google GenAI) will inherit from this.
|
26
|
+
"""
|
27
|
+
|
28
|
+
@abc.abstractmethod
|
29
|
+
async def get_llm_response(
|
30
|
+
self,
|
31
|
+
model: str,
|
32
|
+
messages: List[Dict[str, Any]],
|
33
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
34
|
+
tool_choice: str = "auto",
|
35
|
+
# Optional: Add other common configuration parameters if needed later
|
36
|
+
# temperature: Optional[float] = None,
|
37
|
+
# max_tokens: Optional[int] = None,
|
38
|
+
) -> StandardizedLLMResponse:
|
39
|
+
"""
|
40
|
+
Sends messages to the configured LLM backend and returns a standardized response.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
model: The specific model identifier for the backend.
|
44
|
+
messages: The chat history in a list of dictionaries format
|
45
|
+
(e.g., [{'role': 'user', 'content': 'Hello'}]).
|
46
|
+
Implementations will need to translate this if their
|
47
|
+
native API uses a different format.
|
48
|
+
tools: A list of tool schemas available for the LLM to use,
|
49
|
+
formatted according to the OpenAI/Groq standard
|
50
|
+
`{"type": "function", "function": {...}}`.
|
51
|
+
Implementations will need to translate this if their
|
52
|
+
native API uses a different format.
|
53
|
+
tool_choice: How the LLM should use tools (e.g., "auto", "none").
|
54
|
+
|
55
|
+
Returns:
|
56
|
+
A StandardizedLLMResponse object containing the text content and/or
|
57
|
+
tool calls requested by the LLM.
|
58
|
+
|
59
|
+
Raises:
|
60
|
+
Exception: Can raise exceptions if the API call fails.
|
61
|
+
"""
|
62
|
+
pass
|
63
|
+
|
64
|
+
# Optional: Add other common methods if needed, e.g., for embedding generation
|
65
|
+
# @abc.abstractmethod
|
66
|
+
# async def get_embedding(self, text: str, model: str) -> List[float]:
|
67
|
+
# pass
|
68
|
+
|
@@ -0,0 +1,122 @@
|
|
1
|
+
# --- START OF agentic_patterns/llm_services/google_openai_compat_service.py ---
|
2
|
+
|
3
|
+
import os
|
4
|
+
import json
|
5
|
+
import uuid
|
6
|
+
from typing import Any, Dict, List, Optional
|
7
|
+
|
8
|
+
# Import the OpenAI library
|
9
|
+
try:
|
10
|
+
from openai import AsyncOpenAI, OpenAIError
|
11
|
+
except ImportError:
|
12
|
+
raise ImportError("OpenAI SDK not found. Please install it using: pip install openai")
|
13
|
+
|
14
|
+
from colorama import Fore
|
15
|
+
|
16
|
+
# Import the base interface and response structures
|
17
|
+
from .base import LLMServiceInterface, StandardizedLLMResponse, LLMToolCall
|
18
|
+
|
19
|
+
# Google's OpenAI-compatible endpoint
|
20
|
+
GOOGLE_COMPAT_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
|
21
|
+
|
22
|
+
class GoogleOpenAICompatService(LLMServiceInterface):
|
23
|
+
"""
|
24
|
+
LLM Service implementation using the OpenAI SDK configured for Google's
|
25
|
+
Generative Language API (Gemini models via compatibility layer).
|
26
|
+
"""
|
27
|
+
|
28
|
+
def __init__(self, api_key: Optional[str] = None, base_url: str = GOOGLE_COMPAT_BASE_URL):
|
29
|
+
"""
|
30
|
+
Initializes the service using the OpenAI client pointed at Google's endpoint.
|
31
|
+
|
32
|
+
Args:
|
33
|
+
api_key: Optional Google API key. If None, uses GOOGLE_API_KEY env var.
|
34
|
+
base_url: The base URL for the Google compatibility endpoint.
|
35
|
+
"""
|
36
|
+
effective_key = api_key or os.getenv("GOOGLE_API_KEY")
|
37
|
+
if not effective_key:
|
38
|
+
raise ValueError("Google API Key not provided or found in environment variables (GOOGLE_API_KEY).")
|
39
|
+
|
40
|
+
try:
|
41
|
+
self.client = AsyncOpenAI(
|
42
|
+
api_key=effective_key,
|
43
|
+
base_url=base_url,
|
44
|
+
)
|
45
|
+
except Exception as e:
|
46
|
+
print(f"{Fore.RED}Failed to initialize OpenAI client for Google: {e}{Fore.RESET}")
|
47
|
+
raise
|
48
|
+
|
49
|
+
async def get_llm_response(
|
50
|
+
self,
|
51
|
+
model: str,
|
52
|
+
messages: List[Dict[str, Any]],
|
53
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
54
|
+
tool_choice: str = "auto",
|
55
|
+
temperature: Optional[float] = None,
|
56
|
+
max_tokens: Optional[int] = None,
|
57
|
+
) -> StandardizedLLMResponse:
|
58
|
+
"""
|
59
|
+
Sends messages via the OpenAI SDK (to Google's endpoint) and returns a standardized response.
|
60
|
+
|
61
|
+
Args:
|
62
|
+
model: The Google model identifier (e.g., "gemini-1.5-flash").
|
63
|
+
messages: Chat history in the OpenAI dictionary format.
|
64
|
+
tools: Tool schemas in the OpenAI function format.
|
65
|
+
tool_choice: Tool choice setting ("auto", "none", etc.).
|
66
|
+
temperature: Sampling temperature.
|
67
|
+
max_tokens: Max output tokens.
|
68
|
+
|
69
|
+
Returns:
|
70
|
+
A StandardizedLLMResponse object.
|
71
|
+
|
72
|
+
Raises:
|
73
|
+
OpenAIError: If the API call fails.
|
74
|
+
Exception: For other unexpected errors.
|
75
|
+
"""
|
76
|
+
try:
|
77
|
+
api_kwargs = {
|
78
|
+
"messages": messages,
|
79
|
+
"model": model,
|
80
|
+
"tool_choice": tool_choice if tools else None,
|
81
|
+
"tools": tools if tools else None,
|
82
|
+
}
|
83
|
+
if temperature is not None: api_kwargs["temperature"] = temperature
|
84
|
+
if max_tokens is not None: api_kwargs["max_tokens"] = max_tokens
|
85
|
+
api_kwargs = {k: v for k, v in api_kwargs.items() if v is not None}
|
86
|
+
|
87
|
+
|
88
|
+
response = await self.client.chat.completions.create(**api_kwargs)
|
89
|
+
|
90
|
+
message = response.choices[0].message
|
91
|
+
text_content = message.content
|
92
|
+
tool_calls: List[LLMToolCall] = []
|
93
|
+
|
94
|
+
if message.tool_calls:
|
95
|
+
for tc in message.tool_calls:
|
96
|
+
tool_call_id = getattr(tc, 'id', None)
|
97
|
+
if not tool_call_id:
|
98
|
+
tool_call_id = f"compat_call_{uuid.uuid4().hex[:6]}" # Use uuid here
|
99
|
+
print(f"{Fore.YELLOW}Warning: Tool call from Google compat layer missing ID. Generated fallback: {tool_call_id}{Fore.RESET}")
|
100
|
+
|
101
|
+
if tc.function:
|
102
|
+
tool_calls.append(
|
103
|
+
LLMToolCall(
|
104
|
+
id=tool_call_id,
|
105
|
+
function_name=tc.function.name,
|
106
|
+
function_arguments_json_str=tc.function.arguments
|
107
|
+
)
|
108
|
+
)
|
109
|
+
|
110
|
+
return StandardizedLLMResponse(
|
111
|
+
text_content=text_content,
|
112
|
+
tool_calls=tool_calls
|
113
|
+
)
|
114
|
+
|
115
|
+
except OpenAIError as e:
|
116
|
+
print(f"{Fore.RED}Google (via OpenAI Compat Layer) API Error: {e}{Fore.RESET}")
|
117
|
+
raise
|
118
|
+
except Exception as e:
|
119
|
+
print(f"{Fore.RED}Error calling Google (via OpenAI Compat Layer) LLM API: {e}{Fore.RESET}")
|
120
|
+
raise
|
121
|
+
|
122
|
+
# --- END OF agentic_patterns/llm_services/google_openai_compat_service.py ---
|
@@ -0,0 +1,100 @@
|
|
1
|
+
# --- START OF agentic_patterns/llm_services/groq_service.py ---
|
2
|
+
|
3
|
+
from typing import Any, Dict, List, Optional
|
4
|
+
|
5
|
+
from groq import AsyncGroq, GroqError # Import AsyncGroq and potential errors
|
6
|
+
from colorama import Fore # For error printing
|
7
|
+
|
8
|
+
# Import the base interface and response structures
|
9
|
+
from .base import LLMServiceInterface, StandardizedLLMResponse, LLMToolCall
|
10
|
+
|
11
|
+
class GroqService(LLMServiceInterface):
|
12
|
+
"""LLM Service implementation for the Groq API."""
|
13
|
+
|
14
|
+
def __init__(self, client: Optional[AsyncGroq] = None):
|
15
|
+
"""
|
16
|
+
Initializes the Groq service.
|
17
|
+
|
18
|
+
Args:
|
19
|
+
client: An optional pre-configured AsyncGroq client.
|
20
|
+
If None, a new client will be created using environment variables.
|
21
|
+
"""
|
22
|
+
self.client = client or AsyncGroq()
|
23
|
+
# Add any other Groq-specific initialization here if needed
|
24
|
+
|
25
|
+
async def get_llm_response(
|
26
|
+
self,
|
27
|
+
model: str,
|
28
|
+
messages: List[Dict[str, Any]],
|
29
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
30
|
+
tool_choice: str = "auto",
|
31
|
+
# Add other relevant Groq parameters if desired, e.g., temperature, max_tokens
|
32
|
+
# temperature: Optional[float] = None,
|
33
|
+
# max_tokens: Optional[int] = None,
|
34
|
+
) -> StandardizedLLMResponse:
|
35
|
+
"""
|
36
|
+
Sends messages to the Groq API and returns a standardized response.
|
37
|
+
|
38
|
+
Args:
|
39
|
+
model: The Groq model identifier (e.g., "llama-3.3-70b-versatile").
|
40
|
+
messages: Chat history in the OpenAI/Groq dictionary format.
|
41
|
+
tools: Tool schemas in the OpenAI/Groq function format.
|
42
|
+
tool_choice: Tool choice setting ("auto", "none", etc.).
|
43
|
+
|
44
|
+
Returns:
|
45
|
+
A StandardizedLLMResponse object.
|
46
|
+
|
47
|
+
Raises:
|
48
|
+
GroqError: If the API call fails.
|
49
|
+
Exception: For other unexpected errors.
|
50
|
+
"""
|
51
|
+
try:
|
52
|
+
api_kwargs = {
|
53
|
+
"messages": messages,
|
54
|
+
"model": model,
|
55
|
+
# Pass other parameters if added to method signature
|
56
|
+
# "temperature": temperature,
|
57
|
+
# "max_tokens": max_tokens,
|
58
|
+
}
|
59
|
+
if tools:
|
60
|
+
api_kwargs["tools"] = tools
|
61
|
+
api_kwargs["tool_choice"] = tool_choice
|
62
|
+
|
63
|
+
# Call the Groq API asynchronously using the correct method name
|
64
|
+
response = await self.client.chat.completions.create(**api_kwargs)
|
65
|
+
|
66
|
+
# Process the response
|
67
|
+
message = response.choices[0].message
|
68
|
+
text_content = message.content
|
69
|
+
tool_calls: List[LLMToolCall] = []
|
70
|
+
|
71
|
+
if message.tool_calls:
|
72
|
+
for tc in message.tool_calls:
|
73
|
+
if tc.function: # Check if function attribute exists
|
74
|
+
tool_calls.append(
|
75
|
+
LLMToolCall(
|
76
|
+
id=tc.id,
|
77
|
+
function_name=tc.function.name,
|
78
|
+
function_arguments_json_str=tc.function.arguments
|
79
|
+
)
|
80
|
+
)
|
81
|
+
|
82
|
+
# Return the standardized response
|
83
|
+
return StandardizedLLMResponse(
|
84
|
+
text_content=text_content,
|
85
|
+
tool_calls=tool_calls
|
86
|
+
)
|
87
|
+
|
88
|
+
except GroqError as e:
|
89
|
+
# Catch specific Groq errors for potentially better handling
|
90
|
+
print(f"{Fore.RED}Groq API Error: {e}{Fore.RESET}")
|
91
|
+
# Re-raise or handle as needed, maybe return an error response?
|
92
|
+
# For now, re-raise to signal failure clearly
|
93
|
+
raise
|
94
|
+
except Exception as e:
|
95
|
+
print(f"{Fore.RED}Error calling Groq LLM API: {e}{Fore.RESET}")
|
96
|
+
# Depending on desired behavior, could return a StandardizedLLMResponse
|
97
|
+
# with error info in text_content, or re-raise. Re-raising is cleaner.
|
98
|
+
raise
|
99
|
+
|
100
|
+
# --- END OF agentic_patterns/llm_services/groq_service.py ---
|
File without changes
|
@@ -0,0 +1,208 @@
|
|
1
|
+
# --- START OF agentic_patterns/mcp_client/client.py (SSE Version) ---
|
2
|
+
|
3
|
+
import asyncio
|
4
|
+
import json
|
5
|
+
from contextlib import AsyncExitStack
|
6
|
+
from typing import Any, Dict, List, Optional
|
7
|
+
|
8
|
+
from pydantic import BaseModel, Field, HttpUrl # Import HttpUrl
|
9
|
+
|
10
|
+
# Imports from MCP SDK
|
11
|
+
from mcp import ClientSession, types
|
12
|
+
# Import sse_client instead of stdio_client
|
13
|
+
from mcp.client.sse import sse_client
|
14
|
+
# For logging/coloring output
|
15
|
+
from colorama import Fore
|
16
|
+
|
17
|
+
# Configuration model for a single SSE server
|
18
|
+
class SseServerConfig(BaseModel):
|
19
|
+
"""Configuration for connecting to an MCP server via SSE."""
|
20
|
+
# Expecting a URL like http://host:port (base URL)
|
21
|
+
# The sse_client will likely append the standard /sse path
|
22
|
+
url: HttpUrl = Field(description="The base URL of the MCP SSE server.")
|
23
|
+
# Optional headers if needed for authentication etc.
|
24
|
+
headers: Optional[Dict[str, str]] = Field(default=None, description="Optional headers for the connection.")
|
25
|
+
|
26
|
+
# Manager class focused on SSE
|
27
|
+
class MCPClientManager:
|
28
|
+
"""
|
29
|
+
Manages connections and interactions with multiple MCP servers via SSE.
|
30
|
+
|
31
|
+
Handles connecting, disconnecting, listing tools, and calling tools on
|
32
|
+
configured MCP servers accessible over HTTP/S.
|
33
|
+
"""
|
34
|
+
|
35
|
+
def __init__(self, server_configs: Dict[str, SseServerConfig]):
|
36
|
+
"""
|
37
|
+
Initializes the manager with SSE server configurations.
|
38
|
+
|
39
|
+
Args:
|
40
|
+
server_configs: A dictionary where keys are logical server names
|
41
|
+
and values are SseServerConfig objects.
|
42
|
+
"""
|
43
|
+
if not isinstance(server_configs, dict):
|
44
|
+
raise TypeError("server_configs must be a dictionary.")
|
45
|
+
self.server_configs = server_configs
|
46
|
+
self.sessions: Dict[str, ClientSession] = {}
|
47
|
+
self.exit_stacks: Dict[str, AsyncExitStack] = {}
|
48
|
+
self._connect_locks: Dict[str, asyncio.Lock] = {
|
49
|
+
name: asyncio.Lock() for name in server_configs
|
50
|
+
}
|
51
|
+
self._manager_lock = asyncio.Lock() # General lock for manager state
|
52
|
+
|
53
|
+
async def _ensure_connected(self, server_name: str):
|
54
|
+
"""
|
55
|
+
Ensures a connection via SSE to the specified server is active.
|
56
|
+
|
57
|
+
Args:
|
58
|
+
server_name: The logical name of the server to connect to.
|
59
|
+
|
60
|
+
Raises:
|
61
|
+
ValueError: If the server configuration is not found or URL is invalid.
|
62
|
+
RuntimeError: If connection or initialization fails.
|
63
|
+
"""
|
64
|
+
if server_name in self.sessions:
|
65
|
+
return
|
66
|
+
|
67
|
+
connect_lock = self._connect_locks.get(server_name)
|
68
|
+
if not connect_lock:
|
69
|
+
raise ValueError(f"Configuration or lock for server '{server_name}' not found.")
|
70
|
+
|
71
|
+
async with connect_lock:
|
72
|
+
if server_name in self.sessions:
|
73
|
+
return
|
74
|
+
|
75
|
+
config = self.server_configs.get(server_name)
|
76
|
+
if not config:
|
77
|
+
raise ValueError(f"Configuration for server '{server_name}' not found.")
|
78
|
+
|
79
|
+
print(f"{Fore.YELLOW}Attempting to connect to MCP server via SSE: {server_name} at {config.url}{Fore.RESET}")
|
80
|
+
|
81
|
+
# Construct the specific SSE endpoint URL (often /sse)
|
82
|
+
# Assuming the base URL is provided in config.url
|
83
|
+
sse_url = str(config.url).rstrip('/') + "/sse" # Standard convention
|
84
|
+
|
85
|
+
exit_stack = AsyncExitStack()
|
86
|
+
try:
|
87
|
+
# Establish SSE transport
|
88
|
+
# Pass headers if provided in config
|
89
|
+
sse_transport = await exit_stack.enter_async_context(
|
90
|
+
sse_client(url=sse_url, headers=config.headers)
|
91
|
+
)
|
92
|
+
read_stream, write_stream = sse_transport
|
93
|
+
|
94
|
+
# Establish MCP session
|
95
|
+
session = await exit_stack.enter_async_context(
|
96
|
+
ClientSession(read_stream, write_stream)
|
97
|
+
)
|
98
|
+
|
99
|
+
# Initialize session
|
100
|
+
await session.initialize()
|
101
|
+
|
102
|
+
async with self._manager_lock:
|
103
|
+
self.sessions[server_name] = session
|
104
|
+
self.exit_stacks[server_name] = exit_stack
|
105
|
+
print(f"{Fore.GREEN}Successfully connected to MCP server via SSE: {server_name}{Fore.RESET}")
|
106
|
+
|
107
|
+
except Exception as e:
|
108
|
+
await exit_stack.aclose()
|
109
|
+
print(f"{Fore.RED}Failed to connect to MCP server '{server_name}' via SSE: {e}{Fore.RESET}")
|
110
|
+
raise RuntimeError(f"SSE connection to '{server_name}' failed.") from e
|
111
|
+
|
112
|
+
async def disconnect(self, server_name: str):
|
113
|
+
"""
|
114
|
+
Disconnects from a specific server and cleans up resources.
|
115
|
+
|
116
|
+
Args:
|
117
|
+
server_name: The logical name of the server to disconnect from.
|
118
|
+
"""
|
119
|
+
async with self._manager_lock:
|
120
|
+
if server_name in self.sessions:
|
121
|
+
print(f"{Fore.YELLOW}Disconnecting from MCP server: {server_name}...{Fore.RESET}")
|
122
|
+
exit_stack = self.exit_stacks.pop(server_name)
|
123
|
+
del self.sessions[server_name]
|
124
|
+
await exit_stack.aclose()
|
125
|
+
print(f"{Fore.GREEN}Disconnected from MCP server: {server_name}{Fore.RESET}")
|
126
|
+
|
127
|
+
async def disconnect_all(self):
|
128
|
+
"""Disconnects from all currently connected servers."""
|
129
|
+
server_names = list(self.sessions.keys())
|
130
|
+
print(f"{Fore.YELLOW}Disconnecting from all servers: {server_names}{Fore.RESET}")
|
131
|
+
# Use asyncio.gather for concurrent disconnection
|
132
|
+
tasks = [self.disconnect(name) for name in server_names]
|
133
|
+
await asyncio.gather(*tasks, return_exceptions=True) # Handle errors during disconnect
|
134
|
+
print(f"{Fore.GREEN}Finished disconnecting all servers.{Fore.RESET}")
|
135
|
+
|
136
|
+
async def list_remote_tools(self, server_name: str) -> List[types.Tool]:
|
137
|
+
"""
|
138
|
+
Lists tools available on a specific connected SSE server.
|
139
|
+
|
140
|
+
Args:
|
141
|
+
server_name: The logical name of the server.
|
142
|
+
|
143
|
+
Returns:
|
144
|
+
A list of mcp.types.Tool objects provided by the server.
|
145
|
+
"""
|
146
|
+
await self._ensure_connected(server_name)
|
147
|
+
session = self.sessions.get(server_name)
|
148
|
+
if not session:
|
149
|
+
raise RuntimeError(f"Failed to get session for '{server_name}' after ensuring connection.")
|
150
|
+
|
151
|
+
try:
|
152
|
+
print(f"{Fore.CYAN}Listing tools for server: {server_name}...{Fore.RESET}")
|
153
|
+
tool_list_result = await session.list_tools()
|
154
|
+
print(f"{Fore.CYAN}Found {len(tool_list_result.tools)} tools on {server_name}.{Fore.RESET}")
|
155
|
+
return tool_list_result.tools
|
156
|
+
except Exception as e:
|
157
|
+
print(f"{Fore.RED}Error listing tools for server '{server_name}': {e}{Fore.RESET}")
|
158
|
+
raise RuntimeError(f"Failed to list tools for '{server_name}'.") from e
|
159
|
+
|
160
|
+
async def call_remote_tool(
|
161
|
+
self, server_name: str, tool_name: str, arguments: Dict[str, Any]
|
162
|
+
) -> str:
|
163
|
+
"""
|
164
|
+
Calls a tool on a specific connected SSE server.
|
165
|
+
|
166
|
+
Args:
|
167
|
+
server_name: The logical name of the server.
|
168
|
+
tool_name: The name of the tool to call.
|
169
|
+
arguments: A dictionary of arguments for the tool.
|
170
|
+
|
171
|
+
Returns:
|
172
|
+
A string representation of the tool's result content.
|
173
|
+
"""
|
174
|
+
await self._ensure_connected(server_name)
|
175
|
+
session = self.sessions.get(server_name)
|
176
|
+
if not session:
|
177
|
+
raise RuntimeError(f"Failed to get session for '{server_name}' after ensuring connection.")
|
178
|
+
|
179
|
+
print(f"{Fore.CYAN}Calling remote tool '{tool_name}' on server '{server_name}' with args: {arguments}{Fore.RESET}")
|
180
|
+
try:
|
181
|
+
result: types.CallToolResult = await session.call_tool(tool_name, arguments)
|
182
|
+
|
183
|
+
if result.isError:
|
184
|
+
error_content = result.content[0] if result.content else None
|
185
|
+
error_text = getattr(error_content, 'text', 'Unknown tool error')
|
186
|
+
print(f"{Fore.RED}MCP Tool '{tool_name}' on server '{server_name}' returned an error: {error_text}{Fore.RESET}")
|
187
|
+
raise RuntimeError(f"Tool call error on {server_name}.{tool_name}: {error_text}")
|
188
|
+
else:
|
189
|
+
response_parts = []
|
190
|
+
for content_item in result.content:
|
191
|
+
if isinstance(content_item, types.TextContent):
|
192
|
+
response_parts.append(content_item.text)
|
193
|
+
# Add handling for other content types if needed later
|
194
|
+
elif isinstance(content_item, types.ImageContent):
|
195
|
+
response_parts.append(f"[Image Content Received: {content_item.mimeType}]")
|
196
|
+
elif isinstance(content_item, types.EmbeddedResource):
|
197
|
+
response_parts.append(f"[Embedded Resource Received: {content_item.resource.uri}]")
|
198
|
+
else:
|
199
|
+
response_parts.append(f"[Unsupported content type: {getattr(content_item, 'type', 'unknown')}]")
|
200
|
+
combined_response = "\n".join(response_parts)
|
201
|
+
print(f"{Fore.GREEN}Tool '{tool_name}' result from '{server_name}': {combined_response[:100]}...{Fore.RESET}")
|
202
|
+
return combined_response
|
203
|
+
|
204
|
+
except Exception as e:
|
205
|
+
print(f"{Fore.RED}Error calling tool '{tool_name}' on server '{server_name}': {e}{Fore.RESET}")
|
206
|
+
raise RuntimeError(f"Failed to call tool '{tool_name}' on '{server_name}'.") from e
|
207
|
+
|
208
|
+
# --- END OF agentic_patterns/mcp_client/client.py (SSE Version) ---
|
File without changes
|