clap-agents 0.1.1__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clap/__init__.py CHANGED
@@ -1,57 +1,28 @@
1
- # --- Example content for src/clap/__init__.py ---
2
-
3
- # Import key classes/functions from submodules to make them accessible at the top level
4
-
5
- # Multi-agent pattern
6
1
  from .multiagent_pattern.agent import Agent
7
2
  from .multiagent_pattern.team import Team
8
-
9
- # ReAct pattern
10
3
  from .react_pattern.react_agent import ReactAgent
11
-
12
- # Tool pattern
13
- from .tool_pattern.tool import tool, Tool
14
4
  from .tool_pattern.tool_agent import ToolAgent
5
+ from .tool_pattern.tool import tool, Tool
15
6
 
16
- # LLM Services (Interface and implementations)
17
7
  from .llm_services.base import LLMServiceInterface, StandardizedLLMResponse, LLMToolCall
18
8
  from .llm_services.groq_service import GroqService
19
9
  from .llm_services.google_openai_compat_service import GoogleOpenAICompatService
20
10
 
21
- from .mcp_client.client import MCPClientManager, SseServerConfig
11
+ from .embedding.base_embedding import EmbeddingFunctionInterface
22
12
 
13
+ from .vector_stores.base import VectorStoreInterface, QueryResult
23
14
 
24
- from .tools.web_search import duckduckgo_search
25
- from .tools.web_crawler import scrape_url, extract_text_by_query
26
- from .tools.email_tools import send_email, fetch_recent_emails
15
+ from .mcp_client.client import MCPClientManager, SseServerConfig
27
16
 
28
- __all__ = [
29
- # Core classes
30
- "Agent",
31
- "Team",
32
- "ReactAgent",
33
- "ToolAgent",
34
- "Tool",
35
- "tool", # The decorator
36
-
37
- # LLM Services
38
- "LLMServiceInterface",
39
- "StandardizedLLMResponse",
40
- "LLMToolCall",
41
- "GroqService",
42
- "GoogleOpenAICompatService",
17
+ from .tools.web_search import duckduckgo_search
43
18
 
44
- # MCP Client
45
- "MCPClientManager",
46
- "SseServerConfig", # Expose config type
47
19
 
48
- # Selected Tools (example)
20
+ __all__ = [
21
+ "Agent", "Team", "ReactAgent", "ToolAgent", "Tool", "tool",
22
+ "LLMServiceInterface", "StandardizedLLMResponse", "LLMToolCall",
23
+ "GroqService", "GoogleOpenAICompatService",
24
+ "EmbeddingFunctionInterface", "SentenceTransformerEmbeddings",
25
+ "VectorStoreInterface", "QueryResult",
26
+ "MCPClientManager", "SseServerConfig",
49
27
  "duckduckgo_search",
50
- # Add others from .tools if desired as part of the core offering
51
- ]
52
-
53
- # You might also want to define a package-level version variable here
54
- # (though often handled by build tools or version files)
55
- # __version__ = "0.1.0"
56
-
57
- # --- End of src/clap/__init__.py ---
28
+ ]
@@ -0,0 +1,21 @@
1
+ from .base_embedding import EmbeddingFunctionInterface
2
+
3
+ __all__ = ["EmbeddingFunctionInterface"]
4
+
5
+ try:
6
+ from .sentence_transformer_embedding import SentenceTransformerEmbeddings
7
+ __all__.append("SentenceTransformerEmbeddings")
8
+ except ImportError:
9
+ pass
10
+
11
+ try:
12
+ from .fastembed_embedding import FastEmbedEmbeddings
13
+ __all__.append("FastEmbedEmbeddings")
14
+ except ImportError:
15
+ pass
16
+
17
+ try:
18
+ from .ollama_embedding import OllamaEmbeddings
19
+ __all__.append("OllamaEmbeddings")
20
+ except ImportError:
21
+ pass
@@ -0,0 +1,28 @@
1
+ import abc
2
+ from typing import List, Protocol
3
+
4
+ class EmbeddingFunctionInterface(Protocol):
5
+ """
6
+ A protocol for embedding functions to ensure they can provide
7
+ their output dimensionality and embed documents.
8
+ """
9
+
10
+ @abc.abstractmethod
11
+ def __call__(self, input: List[str]) -> List[List[float]]:
12
+ """
13
+ Embeds a list of texts.
14
+
15
+ Args:
16
+ input: A list of document texts.
17
+
18
+ Returns:
19
+ A list of embeddings (list of floats).
20
+ """
21
+ ...
22
+
23
+ @abc.abstractmethod
24
+ def get_embedding_dimension(self) -> int:
25
+ """
26
+ Returns the dimensionality of the embeddings produced by this function.
27
+ """
28
+ ...
@@ -0,0 +1,75 @@
1
+ import asyncio
2
+ import functools
3
+ from typing import List, Optional, Any, cast
4
+
5
+ import anyio
6
+
7
+ from .base_embedding import EmbeddingFunctionInterface
8
+
9
+ _FASTEMBED_LIB_AVAILABLE = False
10
+ _FastEmbed_TextEmbedding_Placeholder_Type = Any
11
+
12
+ try:
13
+ from fastembed import TextEmbedding as ActualTextEmbedding
14
+ _FastEmbed_TextEmbedding_Placeholder_Type = ActualTextEmbedding
15
+ _FASTEMBED_LIB_AVAILABLE = True
16
+ except ImportError:
17
+ pass
18
+
19
+ KNOWN_FASTEMBED_DIMENSIONS = {
20
+ "BAAI/bge-small-en-v1.5": 384,
21
+ "sentence-transformers/all-MiniLM-L6-v2": 384,
22
+ }
23
+ DEFAULT_FASTEMBED_MODEL = "BAAI/bge-small-en-v1.5"
24
+
25
+ class FastEmbedEmbeddings(EmbeddingFunctionInterface):
26
+ _model: _FastEmbed_TextEmbedding_Placeholder_Type
27
+ _dimension: int
28
+ DEFAULT_EMBED_BATCH_SIZE = 256
29
+
30
+ def __init__(self,
31
+ model_name: str = DEFAULT_FASTEMBED_MODEL,
32
+ dimension: Optional[int] = None,
33
+ embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
34
+ **kwargs: Any
35
+ ):
36
+ if not _FASTEMBED_LIB_AVAILABLE:
37
+ raise ImportError(
38
+ "The 'fastembed' library is required to use FastEmbedEmbeddings. "
39
+ "Install with 'pip install fastembed' or 'pip install \"clap-agents[qdrant]\"' (if qdrant includes it as an extra)."
40
+ )
41
+
42
+ self.model_name = model_name
43
+ self.embed_batch_size = embed_batch_size
44
+
45
+ if dimension is not None:
46
+ self._dimension = dimension
47
+ elif model_name in KNOWN_FASTEMBED_DIMENSIONS:
48
+ self._dimension = KNOWN_FASTEMBED_DIMENSIONS[model_name]
49
+ else:
50
+ raise ValueError(
51
+ f"Dimension for fastembed model '{self.model_name}' is unknown. "
52
+ "Provide 'dimension' parameter or update KNOWN_FASTEMBED_DIMENSIONS."
53
+ )
54
+
55
+ try:
56
+ self._model = _FastEmbed_TextEmbedding_Placeholder_Type(model_name=self.model_name, **kwargs)
57
+ except Exception as e:
58
+ raise RuntimeError(f"Failed to initialize fastembed model '{self.model_name}': {e}")
59
+
60
+ async def __call__(self, input: List[str]) -> List[List[float]]: # Changed to 'input'
61
+ if not input: return []
62
+ if not _FASTEMBED_LIB_AVAILABLE: raise RuntimeError("FastEmbed library not available.")
63
+
64
+ all_embeddings_list: List[List[float]] = []
65
+ for i in range(0, len(input), self.embed_batch_size):
66
+ batch_texts = input[i:i + self.embed_batch_size]
67
+ if not batch_texts: continue
68
+ try:
69
+ embeddings_iterable = await anyio.to_thread.run_sync(self._model.embed, list(batch_texts))
70
+ for emb_np in embeddings_iterable: all_embeddings_list.append(emb_np.tolist())
71
+ except Exception as e: print(f"Error embedding batch with fastembed: {e}"); raise
72
+ return all_embeddings_list
73
+
74
+ def get_embedding_dimension(self) -> int:
75
+ return self._dimension
@@ -0,0 +1,76 @@
1
+ import asyncio
2
+ import functools
3
+ from typing import List, Optional, Any, cast
4
+
5
+ import anyio
6
+
7
+ from .base_embedding import EmbeddingFunctionInterface
8
+
9
+ _OLLAMA_LIB_AVAILABLE = False
10
+ _OllamaAsyncClient_Placeholder_Type = Any
11
+ _OllamaResponseError_Placeholder_Type = type(Exception)
12
+
13
+ try:
14
+ from ollama import AsyncClient as ImportedOllamaAsyncClient
15
+ from ollama import ResponseError as ImportedOllamaResponseError
16
+ _OllamaAsyncClient_Placeholder_Type = ImportedOllamaAsyncClient
17
+ _OllamaResponseError_Placeholder_Type = ImportedOllamaResponseError
18
+ _OLLAMA_LIB_AVAILABLE = True
19
+ except ImportError:
20
+ pass
21
+
22
+ KNOWN_OLLAMA_EMBEDDING_DIMENSIONS = {
23
+ "nomic-embed-text": 768, "mxbai-embed-large": 1024, "all-minilm": 384,
24
+ "llama3": 4096, "llama3.2:latest": 4096, "nomic-embed-text:latest": 768,
25
+ }
26
+ DEFAULT_OLLAMA_EMBED_MODEL = "nomic-embed-text"
27
+
28
+ class OllamaEmbeddings(EmbeddingFunctionInterface):
29
+ _client: _OllamaAsyncClient_Placeholder_Type
30
+ _model_name: str
31
+ _dimension: int
32
+
33
+ def __init__(self,
34
+ model_name: str = DEFAULT_OLLAMA_EMBED_MODEL,
35
+ dimension: Optional[int] = None,
36
+ ollama_host: str = "http://localhost:11434",
37
+ **kwargs: Any):
38
+ if not _OLLAMA_LIB_AVAILABLE:
39
+ raise ImportError("The 'ollama' Python library is required. Install with: pip install 'clap-agents[ollama]'")
40
+
41
+ self.model_name = model_name
42
+ self._client = _OllamaAsyncClient_Placeholder_Type(host=ollama_host, **kwargs)
43
+
44
+ if dimension is not None: self._dimension = dimension
45
+ elif model_name in KNOWN_OLLAMA_EMBEDDING_DIMENSIONS:
46
+ self._dimension = KNOWN_OLLAMA_EMBEDDING_DIMENSIONS[model_name]
47
+ else:
48
+ raise ValueError(f"Dimension for Ollama model '{model_name}' unknown. Provide 'dimension' or update KNOWN_OLLAMA_EMBEDDING_DIMENSIONS.")
49
+ print(f"Initialized OllamaEmbeddings for model '{self.model_name}' (dim: {self._dimension}).")
50
+
51
+ async def __call__(self, input: List[str]) -> List[List[float]]:
52
+ if not input: return []
53
+ if not _OLLAMA_LIB_AVAILABLE: raise RuntimeError("Ollama library not available.")
54
+ try:
55
+ response = await self._client.embed(model=self.model_name, input=input)
56
+ embeddings_data = response.get("embeddings")
57
+ if embeddings_data is None and len(input) == 1 and response.get("embedding"):
58
+ single_embedding = response.get("embedding")
59
+ if isinstance(single_embedding, list) and all(isinstance(x, (int, float)) for x in single_embedding):
60
+ embeddings_data = [single_embedding]
61
+ if not isinstance(embeddings_data, list) or not all(isinstance(e, list) for e in embeddings_data):
62
+ raise TypeError(f"Ollama embed returned unexpected format. Expected List[List[float]]. Resp: {response}")
63
+ return cast(List[List[float]], embeddings_data)
64
+ except _OllamaResponseError_Placeholder_Type as e:
65
+ print(f"Ollama API error: {getattr(e, 'error', str(e))} (Status: {getattr(e, 'status_code', 'N/A')})")
66
+ raise
67
+ except Exception as e: print(f"Unexpected Ollama embedding error: {e}"); raise
68
+
69
+ def get_embedding_dimension(self) -> int: return self._dimension
70
+
71
+ async def close(self):
72
+ if _OLLAMA_LIB_AVAILABLE:
73
+ if hasattr(self._client, "_client") and hasattr(self._client._client, "is_closed"):
74
+ if not self._client._client.is_closed: await self._client._client.aclose()
75
+ elif hasattr(self._client, 'aclose'): await self._client.aclose()
76
+ print(f"OllamaEmbeddings: Closed client for {self.model_name}.")
@@ -0,0 +1,44 @@
1
+ from typing import List, Optional, Any
2
+ from .base_embedding import EmbeddingFunctionInterface
3
+
4
+ _ST_LIB_AVAILABLE = False
5
+ _SentenceTransformer_Placeholder_Type = Any
6
+
7
+ try:
8
+ from sentence_transformers import SentenceTransformer as ImportedSentenceTransformer
9
+ _SentenceTransformer_Placeholder_Type = ImportedSentenceTransformer
10
+ _ST_LIB_AVAILABLE = True
11
+ except ImportError:
12
+ pass
13
+
14
+ class SentenceTransformerEmbeddings(EmbeddingFunctionInterface):
15
+ model: _SentenceTransformer_Placeholder_Type
16
+ _dimension: int
17
+
18
+ def __init__(self, model_name: str = "all-MiniLM-L6-v2", device: Optional[str] = None):
19
+ if not _ST_LIB_AVAILABLE:
20
+ raise ImportError(
21
+ "The 'sentence-transformers' library is required to use SentenceTransformerEmbeddings. "
22
+ "Install with 'pip install sentence-transformers' or 'pip install \"clap-agents[sentence-transformers]\"'."
23
+ )
24
+
25
+ try:
26
+ self.model = _SentenceTransformer_Placeholder_Type(model_name, device=device)
27
+ dim = self.model.get_sentence_embedding_dimension()
28
+ if dim is None:
29
+ dummy_embedding = self.model.encode("test")
30
+ dim = len(dummy_embedding)
31
+ self._dimension = dim
32
+ except Exception as e:
33
+ raise RuntimeError(f"Failed to initialize SentenceTransformer model '{model_name}': {e}")
34
+
35
+
36
+
37
+ def __call__(self, input: List[str]) -> List[List[float]]:
38
+ if not _ST_LIB_AVAILABLE:
39
+ raise RuntimeError("SentenceTransformers library not available for embedding operation.")
40
+ embeddings_np = self.model.encode(input, convert_to_numpy=True)
41
+ return embeddings_np.tolist()
42
+
43
+ def get_embedding_dimension(self) -> int:
44
+ return self._dimension
@@ -0,0 +1,15 @@
1
+ # src/clap/llm_services/__init__.py
2
+ from .base import LLMServiceInterface, StandardizedLLMResponse, LLMToolCall
3
+ from .groq_service import GroqService
4
+ from .google_openai_compat_service import GoogleOpenAICompatService
5
+
6
+ __all__ = [
7
+ "LLMServiceInterface", "StandardizedLLMResponse", "LLMToolCall",
8
+ "GroqService", "GoogleOpenAICompatService",
9
+ ]
10
+
11
+ try:
12
+ from .ollama_service import OllamaOpenAICompatService as OllamaService # Assuming file is ollama_service.py
13
+ __all__.append("OllamaService")
14
+ except ImportError:
15
+ pass
clap/llm_services/base.py CHANGED
@@ -8,7 +8,7 @@ from typing import Any, Dict, List, Optional, Union
8
8
  @dataclass
9
9
  class LLMToolCall:
10
10
  """Represents a tool call requested by the LLM."""
11
- id: str # The unique ID for this specific tool call instance
11
+ id: str
12
12
  function_name: str
13
13
  function_arguments_json_str: str
14
14
 
@@ -32,7 +32,7 @@ class LLMServiceInterface(abc.ABC):
32
32
  messages: List[Dict[str, Any]],
33
33
  tools: Optional[List[Dict[str, Any]]] = None,
34
34
  tool_choice: str = "auto",
35
- # Optional: Add other common configuration parameters if needed later
35
+ # Optional:
36
36
  # temperature: Optional[float] = None,
37
37
  # max_tokens: Optional[int] = None,
38
38
  ) -> StandardizedLLMResponse:
@@ -61,8 +61,5 @@ class LLMServiceInterface(abc.ABC):
61
61
  """
62
62
  pass
63
63
 
64
- # Optional: Add other common methods if needed, e.g., for embedding generation
65
- # @abc.abstractmethod
66
- # async def get_embedding(self, text: str, model: str) -> List[float]:
67
- # pass
64
+
68
65
 
@@ -5,7 +5,6 @@ import json
5
5
  import uuid
6
6
  from typing import Any, Dict, List, Optional
7
7
 
8
- # Import the OpenAI library
9
8
  try:
10
9
  from openai import AsyncOpenAI, OpenAIError
11
10
  except ImportError:
@@ -13,10 +12,8 @@ except ImportError:
13
12
 
14
13
  from colorama import Fore
15
14
 
16
- # Import the base interface and response structures
17
15
  from .base import LLMServiceInterface, StandardizedLLMResponse, LLMToolCall
18
16
 
19
- # Google's OpenAI-compatible endpoint
20
17
  GOOGLE_COMPAT_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
21
18
 
22
19
  class GoogleOpenAICompatService(LLMServiceInterface):
@@ -95,7 +92,7 @@ class GoogleOpenAICompatService(LLMServiceInterface):
95
92
  for tc in message.tool_calls:
96
93
  tool_call_id = getattr(tc, 'id', None)
97
94
  if not tool_call_id:
98
- tool_call_id = f"compat_call_{uuid.uuid4().hex[:6]}" # Use uuid here
95
+ tool_call_id = f"compat_call_{uuid.uuid4().hex[:6]}"
99
96
  print(f"{Fore.YELLOW}Warning: Tool call from Google compat layer missing ID. Generated fallback: {tool_call_id}{Fore.RESET}")
100
97
 
101
98
  if tc.function:
@@ -119,4 +116,3 @@ class GoogleOpenAICompatService(LLMServiceInterface):
119
116
  print(f"{Fore.RED}Error calling Google (via OpenAI Compat Layer) LLM API: {e}{Fore.RESET}")
120
117
  raise
121
118
 
122
- # --- END OF agentic_patterns/llm_services/google_openai_compat_service.py ---
@@ -1,11 +1,9 @@
1
- # --- START OF agentic_patterns/llm_services/groq_service.py ---
2
1
 
3
2
  from typing import Any, Dict, List, Optional
4
3
 
5
- from groq import AsyncGroq, GroqError # Import AsyncGroq and potential errors
6
- from colorama import Fore # For error printing
4
+ from groq import AsyncGroq, GroqError
5
+ from colorama import Fore
7
6
 
8
- # Import the base interface and response structures
9
7
  from .base import LLMServiceInterface, StandardizedLLMResponse, LLMToolCall
10
8
 
11
9
  class GroqService(LLMServiceInterface):
@@ -60,17 +58,15 @@ class GroqService(LLMServiceInterface):
60
58
  api_kwargs["tools"] = tools
61
59
  api_kwargs["tool_choice"] = tool_choice
62
60
 
63
- # Call the Groq API asynchronously using the correct method name
64
61
  response = await self.client.chat.completions.create(**api_kwargs)
65
62
 
66
- # Process the response
67
63
  message = response.choices[0].message
68
64
  text_content = message.content
69
65
  tool_calls: List[LLMToolCall] = []
70
66
 
71
67
  if message.tool_calls:
72
68
  for tc in message.tool_calls:
73
- if tc.function: # Check if function attribute exists
69
+ if tc.function:
74
70
  tool_calls.append(
75
71
  LLMToolCall(
76
72
  id=tc.id,
@@ -86,15 +82,11 @@ class GroqService(LLMServiceInterface):
86
82
  )
87
83
 
88
84
  except GroqError as e:
89
- # Catch specific Groq errors for potentially better handling
90
85
  print(f"{Fore.RED}Groq API Error: {e}{Fore.RESET}")
91
- # Re-raise or handle as needed, maybe return an error response?
92
- # For now, re-raise to signal failure clearly
86
+
93
87
  raise
94
88
  except Exception as e:
95
89
  print(f"{Fore.RED}Error calling Groq LLM API: {e}{Fore.RESET}")
96
- # Depending on desired behavior, could return a StandardizedLLMResponse
97
- # with error info in text_content, or re-raise. Re-raising is cleaner.
90
+
98
91
  raise
99
92
 
100
- # --- END OF agentic_patterns/llm_services/groq_service.py ---
@@ -0,0 +1,101 @@
1
+ import os
2
+ import json
3
+ import uuid
4
+ from typing import Any, Dict, List, Optional
5
+
6
+ _OPENAI_LIB_AVAILABLE = False
7
+ _AsyncOpenAI_Placeholder_Type = Any
8
+ _OpenAIError_Placeholder_Type = type(Exception)
9
+
10
+ try:
11
+ from openai import AsyncOpenAI as ImportedAsyncOpenAI, OpenAIError as ImportedOpenAIError
12
+ _AsyncOpenAI_Placeholder_Type = ImportedAsyncOpenAI
13
+ _OpenAIError_Placeholder_Type = ImportedOpenAIError
14
+ _OPENAI_LIB_AVAILABLE = True
15
+ except ImportError:
16
+ pass
17
+
18
+ from colorama import Fore
19
+ from .base import LLMServiceInterface, StandardizedLLMResponse, LLMToolCall
20
+
21
+ OLLAMA_OPENAI_COMPAT_BASE_URL = "http://localhost:11434/v1"
22
+
23
+ class OllamaOpenAICompatService(LLMServiceInterface):
24
+ """
25
+ LLM Service implementation using the OpenAI SDK configured for a
26
+ local Ollama instance's OpenAI-compatible API.
27
+ """
28
+ _client: _AsyncOpenAI_Placeholder_Type
29
+
30
+ def __init__(
31
+ self,
32
+ base_url: str = OLLAMA_OPENAI_COMPAT_BASE_URL,
33
+ api_key: str = "ollama", # Dummy
34
+ default_model: Optional[str] = None
35
+ ):
36
+
37
+ """
38
+ Initializes the service using the OpenAI client pointed at Ollama.
39
+
40
+ Args:
41
+ base_url: The base URL for the Ollama OpenAI compatibility endpoint.
42
+ api_key: Dummy API key for the OpenAI client (Ollama ignores it).
43
+ default_model: Optional default Ollama model name to use if not specified in calls.
44
+ """
45
+ if not _OPENAI_LIB_AVAILABLE:
46
+ raise ImportError(
47
+ "The 'openai' Python library is required to use OllamaOpenAICompatService. "
48
+ "Install with 'pip install openai' or 'pip install \"clap-agents[ollama]\"' (if ollama extra includes openai)."
49
+ )
50
+ self.default_model = default_model
51
+ try:
52
+ self._client = _AsyncOpenAI_Placeholder_Type(base_url=base_url, api_key=api_key)
53
+ # print(f"OllamaService: Initialized OpenAI client for Ollama at {base_url}")
54
+ except Exception as e:
55
+ print(f"{Fore.RED}Failed to initialize OpenAI client for Ollama: {e}{Fore.RESET}"); raise
56
+
57
+ async def get_llm_response(self, model: str, messages: List[Dict[str, Any]], tools: Optional[List[Dict[str, Any]]] = None, tool_choice: str = "auto", temperature: Optional[float] = None, max_tokens: Optional[int] = None) -> StandardizedLLMResponse:
58
+ """
59
+ Sends messages via the OpenAI SDK (to Ollama's endpoint)
60
+ and returns a standardized response.
61
+ """
62
+ if not _OPENAI_LIB_AVAILABLE: raise RuntimeError("OpenAI library not available for Ollama service.")
63
+ request_model = model or self.default_model
64
+ if not request_model: raise ValueError("Ollama model name not specified.")
65
+ try:
66
+ api_kwargs: Dict[str, Any] = {"messages": messages, "model": request_model}
67
+ if tools and tool_choice != "none":
68
+ api_kwargs["tools"] = tools
69
+ if isinstance(tool_choice, dict) or tool_choice in ["auto", "required", "none"]: api_kwargs["tool_choice"] = tool_choice
70
+ else: api_kwargs["tools"] = None; api_kwargs["tool_choice"] = None
71
+ if temperature is not None: api_kwargs["temperature"] = temperature
72
+ if max_tokens is not None: api_kwargs["max_tokens"] = max_tokens
73
+ api_kwargs = {k: v for k, v in api_kwargs.items() if v is not None}
74
+ # print(f"OllamaService: Sending request to model '{request_model}'")
75
+ response = await self._client.chat.completions.create(**api_kwargs)
76
+ message = response.choices[0].message
77
+ text_content = message.content
78
+ tool_calls_std: List[LLMToolCall] = []
79
+ if message.tool_calls:
80
+ for tc in message.tool_calls:
81
+ if tc.id and tc.function and tc.function.name and tc.function.arguments is not None:
82
+ tool_calls_std.append(LLMToolCall(id=tc.id, function_name=tc.function.name, function_arguments_json_str=tc.function.arguments))
83
+ else: print(f"{Fore.YELLOW}Warning: Incomplete tool_call from Ollama: {tc}{Fore.RESET}")
84
+ return StandardizedLLMResponse(text_content=text_content, tool_calls=tool_calls_std)
85
+ except _OpenAIError_Placeholder_Type as e: # Use placeholder
86
+ err_msg = f"Ollama (OpenAI Compat) API Error: {e}"
87
+ if hasattr(e, 'response') and e.response and hasattr(e.response, 'text'): err_msg += f" - Details: {e.response.text}"
88
+ print(f"{Fore.RED}{err_msg}{Fore.RESET}")
89
+ return StandardizedLLMResponse(text_content=err_msg)
90
+ except Exception as e:
91
+ print(f"{Fore.RED}Unexpected error with Ollama (OpenAI Compat): {e}{Fore.RESET}")
92
+ return StandardizedLLMResponse(text_content=f"Ollama Unexpected Error: {e}")
93
+
94
+
95
+ async def close(self):
96
+ if _OPENAI_LIB_AVAILABLE and hasattr(self, '_client') and self._client:
97
+ if hasattr(self._client, "close"): await self._client.close() # For openai >1.0
98
+ elif hasattr(self._client, "_client") and hasattr(self._client._client, "is_closed"): # For httpx client in openai <1.0
99
+ if not self._client._client.is_closed: await self._client._client.aclose() # type: ignore
100
+ # print("OllamaService: Client closed.")
101
+ # --- END OF FILE ---
clap/mcp_client/client.py CHANGED
@@ -1,29 +1,20 @@
1
- # --- START OF agentic_patterns/mcp_client/client.py (SSE Version) ---
2
1
 
3
2
  import asyncio
4
3
  import json
5
4
  from contextlib import AsyncExitStack
6
5
  from typing import Any, Dict, List, Optional
7
6
 
8
- from pydantic import BaseModel, Field, HttpUrl # Import HttpUrl
7
+ from pydantic import BaseModel, Field, HttpUrl
9
8
 
10
- # Imports from MCP SDK
11
9
  from mcp import ClientSession, types
12
- # Import sse_client instead of stdio_client
13
10
  from mcp.client.sse import sse_client
14
- # For logging/coloring output
15
11
  from colorama import Fore
16
12
 
17
- # Configuration model for a single SSE server
18
13
  class SseServerConfig(BaseModel):
19
14
  """Configuration for connecting to an MCP server via SSE."""
20
- # Expecting a URL like http://host:port (base URL)
21
- # The sse_client will likely append the standard /sse path
22
15
  url: HttpUrl = Field(description="The base URL of the MCP SSE server.")
23
- # Optional headers if needed for authentication etc.
24
16
  headers: Optional[Dict[str, str]] = Field(default=None, description="Optional headers for the connection.")
25
17
 
26
- # Manager class focused on SSE
27
18
  class MCPClientManager:
28
19
  """
29
20
  Manages connections and interactions with multiple MCP servers via SSE.
@@ -79,24 +70,22 @@ class MCPClientManager:
79
70
  print(f"{Fore.YELLOW}Attempting to connect to MCP server via SSE: {server_name} at {config.url}{Fore.RESET}")
80
71
 
81
72
  # Construct the specific SSE endpoint URL (often /sse)
82
- # Assuming the base URL is provided in config.url
83
- sse_url = str(config.url).rstrip('/') + "/sse" # Standard convention
73
+ sse_url = str(config.url).rstrip('/') + "/sse"
84
74
 
85
75
  exit_stack = AsyncExitStack()
86
76
  try:
87
- # Establish SSE transport
88
- # Pass headers if provided in config
77
+
89
78
  sse_transport = await exit_stack.enter_async_context(
90
79
  sse_client(url=sse_url, headers=config.headers)
91
80
  )
92
81
  read_stream, write_stream = sse_transport
93
82
 
94
- # Establish MCP session
83
+
95
84
  session = await exit_stack.enter_async_context(
96
85
  ClientSession(read_stream, write_stream)
97
86
  )
98
87
 
99
- # Initialize session
88
+
100
89
  await session.initialize()
101
90
 
102
91
  async with self._manager_lock:
@@ -124,14 +113,16 @@ class MCPClientManager:
124
113
  await exit_stack.aclose()
125
114
  print(f"{Fore.GREEN}Disconnected from MCP server: {server_name}{Fore.RESET}")
126
115
 
116
+
127
117
  async def disconnect_all(self):
128
- """Disconnects from all currently connected servers."""
129
118
  server_names = list(self.sessions.keys())
130
- print(f"{Fore.YELLOW}Disconnecting from all servers: {server_names}{Fore.RESET}")
131
- # Use asyncio.gather for concurrent disconnection
132
- tasks = [self.disconnect(name) for name in server_names]
133
- await asyncio.gather(*tasks, return_exceptions=True) # Handle errors during disconnect
134
- print(f"{Fore.GREEN}Finished disconnecting all servers.{Fore.RESET}")
119
+ print(f"{Fore.YELLOW}MCPClientManager: Disconnecting from all servers ({len(server_names)})...{Fore.RESET}")
120
+ for name in server_names:
121
+ try:
122
+ await self.disconnect(name)
123
+ except Exception as e:
124
+ print(f"{Fore.RED}MCPClientManager: Error during disconnect of '{name}': {e}{Fore.RESET}")
125
+ print(f"{Fore.GREEN}MCPClientManager: Finished disconnecting all servers.{Fore.RESET}")
135
126
 
136
127
  async def list_remote_tools(self, server_name: str) -> List[types.Tool]:
137
128
  """
@@ -190,7 +181,6 @@ class MCPClientManager:
190
181
  for content_item in result.content:
191
182
  if isinstance(content_item, types.TextContent):
192
183
  response_parts.append(content_item.text)
193
- # Add handling for other content types if needed later
194
184
  elif isinstance(content_item, types.ImageContent):
195
185
  response_parts.append(f"[Image Content Received: {content_item.mimeType}]")
196
186
  elif isinstance(content_item, types.EmbeddedResource):
@@ -204,5 +194,3 @@ class MCPClientManager:
204
194
  except Exception as e:
205
195
  print(f"{Fore.RED}Error calling tool '{tool_name}' on server '{server_name}': {e}{Fore.RESET}")
206
196
  raise RuntimeError(f"Failed to call tool '{tool_name}' on '{server_name}'.") from e
207
-
208
- # --- END OF agentic_patterns/mcp_client/client.py (SSE Version) ---