clap-agents 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clap/embedding/fastembed_embedding.py +1 -1
- clap/llm_services/__init__.py +0 -1
- clap/llm_services/base.py +0 -1
- clap/llm_services/ollama_service.py +8 -0
- clap/mcp_client/client.py +1 -1
- clap/multiagent_pattern/agent.py +3 -1
- clap/tool_pattern/tool.py +2 -0
- {clap_agents-0.3.0.dist-info → clap_agents-0.3.1.dist-info}/METADATA +1 -1
- {clap_agents-0.3.0.dist-info → clap_agents-0.3.1.dist-info}/RECORD +11 -11
- {clap_agents-0.3.0.dist-info → clap_agents-0.3.1.dist-info}/WHEEL +0 -0
- {clap_agents-0.3.0.dist-info → clap_agents-0.3.1.dist-info}/licenses/LICENSE +0 -0
@@ -57,7 +57,7 @@ class FastEmbedEmbeddings(EmbeddingFunctionInterface):
|
|
57
57
|
except Exception as e:
|
58
58
|
raise RuntimeError(f"Failed to initialize fastembed model '{self.model_name}': {e}")
|
59
59
|
|
60
|
-
async def __call__(self, input: List[str]) -> List[List[float]]:
|
60
|
+
async def __call__(self, input: List[str]) -> List[List[float]]:
|
61
61
|
if not input: return []
|
62
62
|
if not _FASTEMBED_LIB_AVAILABLE: raise RuntimeError("FastEmbed library not available.")
|
63
63
|
|
clap/llm_services/__init__.py
CHANGED
clap/llm_services/base.py
CHANGED
@@ -32,7 +32,6 @@ class LLMServiceInterface(abc.ABC):
|
|
32
32
|
messages: List[Dict[str, Any]],
|
33
33
|
tools: Optional[List[Dict[str, Any]]] = None,
|
34
34
|
tool_choice: str = "auto",
|
35
|
-
# Optional:
|
36
35
|
# temperature: Optional[float] = None,
|
37
36
|
# max_tokens: Optional[int] = None,
|
38
37
|
) -> StandardizedLLMResponse:
|
@@ -64,27 +64,35 @@ class OllamaOpenAICompatService(LLMServiceInterface):
|
|
64
64
|
if not request_model: raise ValueError("Ollama model name not specified.")
|
65
65
|
try:
|
66
66
|
api_kwargs: Dict[str, Any] = {"messages": messages, "model": request_model}
|
67
|
+
|
67
68
|
if tools and tool_choice != "none":
|
68
69
|
api_kwargs["tools"] = tools
|
69
70
|
if isinstance(tool_choice, dict) or tool_choice in ["auto", "required", "none"]: api_kwargs["tool_choice"] = tool_choice
|
70
71
|
else: api_kwargs["tools"] = None; api_kwargs["tool_choice"] = None
|
72
|
+
|
71
73
|
if temperature is not None: api_kwargs["temperature"] = temperature
|
72
74
|
if max_tokens is not None: api_kwargs["max_tokens"] = max_tokens
|
73
75
|
api_kwargs = {k: v for k, v in api_kwargs.items() if v is not None}
|
74
76
|
# print(f"OllamaService: Sending request to model '{request_model}'")
|
75
77
|
response = await self._client.chat.completions.create(**api_kwargs)
|
78
|
+
|
76
79
|
message = response.choices[0].message
|
80
|
+
|
77
81
|
text_content = message.content
|
78
82
|
tool_calls_std: List[LLMToolCall] = []
|
83
|
+
|
79
84
|
if message.tool_calls:
|
80
85
|
for tc in message.tool_calls:
|
81
86
|
if tc.id and tc.function and tc.function.name and tc.function.arguments is not None:
|
82
87
|
tool_calls_std.append(LLMToolCall(id=tc.id, function_name=tc.function.name, function_arguments_json_str=tc.function.arguments))
|
83
88
|
else: print(f"{Fore.YELLOW}Warning: Incomplete tool_call from Ollama: {tc}{Fore.RESET}")
|
89
|
+
|
84
90
|
return StandardizedLLMResponse(text_content=text_content, tool_calls=tool_calls_std)
|
91
|
+
|
85
92
|
except _OpenAIError_Placeholder_Type as e: # Use placeholder
|
86
93
|
err_msg = f"Ollama (OpenAI Compat) API Error: {e}"
|
87
94
|
if hasattr(e, 'response') and e.response and hasattr(e.response, 'text'): err_msg += f" - Details: {e.response.text}"
|
95
|
+
|
88
96
|
print(f"{Fore.RED}{err_msg}{Fore.RESET}")
|
89
97
|
return StandardizedLLMResponse(text_content=err_msg)
|
90
98
|
except Exception as e:
|
clap/mcp_client/client.py
CHANGED
@@ -39,7 +39,7 @@ class MCPClientManager:
|
|
39
39
|
self._connect_locks: Dict[str, asyncio.Lock] = {
|
40
40
|
name: asyncio.Lock() for name in server_configs
|
41
41
|
}
|
42
|
-
self._manager_lock = asyncio.Lock()
|
42
|
+
self._manager_lock = asyncio.Lock()
|
43
43
|
|
44
44
|
async def _ensure_connected(self, server_name: str):
|
45
45
|
"""
|
clap/multiagent_pattern/agent.py
CHANGED
@@ -74,6 +74,7 @@ class Agent:
|
|
74
74
|
mcp_server_names: Optional[List[str]] = None,
|
75
75
|
vector_store: Optional[VectorStoreInterface] = None,
|
76
76
|
parallel_tool_calls: bool = True ,
|
77
|
+
**kwargs
|
77
78
|
# embedding_function: Optional[EmbeddingFunction] = None,
|
78
79
|
|
79
80
|
):
|
@@ -86,6 +87,7 @@ class Agent:
|
|
86
87
|
self.local_tools = tools or []
|
87
88
|
|
88
89
|
self.vector_store = vector_store
|
90
|
+
self.react_agent_kwargs = kwargs
|
89
91
|
# self.embedding_function = embedding_function
|
90
92
|
|
91
93
|
llm_service_instance = llm_service or GroqService()
|
@@ -193,7 +195,7 @@ class Agent:
|
|
193
195
|
self.task_description = original_task_description
|
194
196
|
|
195
197
|
print(f"Agent {self.name}: Running ReactAgent...")
|
196
|
-
raw_output = await self.react_agent.run(user_msg=msg)
|
198
|
+
raw_output = await self.react_agent.run(user_msg=msg,**self.react_agent_kwargs)
|
197
199
|
output_data = {"output": raw_output}
|
198
200
|
|
199
201
|
print(f"Agent {self.name}: Passing context to {len(self.dependents)} dependents...")
|
clap/tool_pattern/tool.py
CHANGED
@@ -16,9 +16,11 @@ def get_fn_signature(fn: Callable) -> dict:
|
|
16
16
|
sig = inspect.signature(fn)
|
17
17
|
for name, type_hint in fn.__annotations__.items():
|
18
18
|
if name == "return": continue
|
19
|
+
|
19
20
|
param_type_name = getattr(type_hint, "__name__", str(type_hint))
|
20
21
|
schema_type = type_mapping.get(param_type_name.lower(), "string")
|
21
22
|
parameters["properties"][name] = {"type": schema_type}
|
23
|
+
|
22
24
|
if sig.parameters[name].default is inspect.Parameter.empty:
|
23
25
|
parameters["required"].append(name)
|
24
26
|
if not parameters.get("required"):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: clap-agents
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.1
|
4
4
|
Summary: A Python framework for building cognitive agentic patterns including ReAct agents, Multi-Agent Teams, native tool calling, and MCP client integration.
|
5
5
|
Project-URL: Homepage, https://github.com/MaitreyaM/CLAP-AGENTS.git
|
6
6
|
Project-URL: Repository, https://github.com/MaitreyaM/CLAP-AGENTS.git
|
@@ -1,23 +1,23 @@
|
|
1
1
|
clap/__init__.py,sha256=rxxESl-xpSZpM4ZIh-GvHYF74CkQdbe-dSLvhMC_2dQ,1069
|
2
2
|
clap/embedding/__init__.py,sha256=PqnqcSiA_JwEvn69g2DCQHdsffL2l4GNEKo0fAtCqbs,520
|
3
3
|
clap/embedding/base_embedding.py,sha256=0SYicQ-A-rSDqHoFK0IOrRQe0cisOl8OBnis6V43Chs,696
|
4
|
-
clap/embedding/fastembed_embedding.py,sha256=
|
4
|
+
clap/embedding/fastembed_embedding.py,sha256=fUXCRyctPxwinAG2JCkdmlARU945z7dEsXScIkpqwb0,2862
|
5
5
|
clap/embedding/ollama_embedding.py,sha256=s7IYFs4BuM114Md1cqxim5WzCwCjbEJ48wAZZOgR7KQ,3702
|
6
6
|
clap/embedding/sentence_transformer_embedding.py,sha256=0RAqGxDpjZVwerOLmVirqqnCwC07kHdfAPiy2fgOSCk,1798
|
7
|
-
clap/llm_services/__init__.py,sha256=
|
8
|
-
clap/llm_services/base.py,sha256
|
7
|
+
clap/llm_services/__init__.py,sha256=IBvWmE99PGxHq5Dt4u0G1erZSV80QEC981UULnrD6Tk,496
|
8
|
+
clap/llm_services/base.py,sha256=-XKWd6gLAXedIhUUqM_f7sqkVxdfifP2j-BwmF0hUkI,2183
|
9
9
|
clap/llm_services/google_openai_compat_service.py,sha256=vN0osfCS6DIFHsCCiB03mKUp4n7SkIJd2PAypBAnC30,4552
|
10
10
|
clap/llm_services/groq_service.py,sha256=pcTp24_NgLfp3bGaABzli_Sey7wZsXvFI74VjZ1GvkQ,3051
|
11
|
-
clap/llm_services/ollama_service.py,sha256=
|
11
|
+
clap/llm_services/ollama_service.py,sha256=Qh3W2fb-NDMVB8DS9o3q4jisZvK9U6s-r4ATNbAwVLE,5333
|
12
12
|
clap/mcp_client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
|
-
clap/mcp_client/client.py,sha256=
|
13
|
+
clap/mcp_client/client.py,sha256=IVwtkOAEN7LRaFqw14HQDC7n6OTZ3ciVYWj-lWQddE0,8681
|
14
14
|
clap/multiagent_pattern/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
|
-
clap/multiagent_pattern/agent.py,sha256=
|
15
|
+
clap/multiagent_pattern/agent.py,sha256=SDebwxaUquFUN_MMCGSYKLIIA3a6tVIls-IGrnEKqJI,8617
|
16
16
|
clap/multiagent_pattern/team.py,sha256=t8Xru3fVPblw75pyuPT1wmI3jlsrZHD_GKAW5APbpFg,7966
|
17
17
|
clap/react_pattern/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
18
18
|
clap/react_pattern/react_agent.py,sha256=v9JYyIwv0vzkOl6kq8Aua7u50rJyU02NomCHTjt24vo,24596
|
19
19
|
clap/tool_pattern/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
|
-
clap/tool_pattern/tool.py,sha256=
|
20
|
+
clap/tool_pattern/tool.py,sha256=Y4Uvu5FsCA3S3BZfjM3OOKfhA-o5Q9SfhCzGWfGIQ6o,5974
|
21
21
|
clap/tool_pattern/tool_agent.py,sha256=VTQv9DNU16zgINZKVcX5oDw1lPfw5Y_8bUnW6wad2vE,14439
|
22
22
|
clap/tools/__init__.py,sha256=8UMtxaPkq-pEOD2C0Qm4WZoyJpMxEOEQSDhWNLwAAiI,822
|
23
23
|
clap/tools/email_tools.py,sha256=18aAlbjcSaOzpf9R3H-EGeRsqL5gdzmcJJcW619xOHU,9729
|
@@ -33,7 +33,7 @@ clap/vector_stores/__init__.py,sha256=H3w5jLdQFbXArVgiidy4RlAalM8a6LAiMlAX0Z-2v7
|
|
33
33
|
clap/vector_stores/base.py,sha256=nvk8J1oNG3OKFhJfxBGFyVeh9YxoDs9RkB_iOzPBm1w,2853
|
34
34
|
clap/vector_stores/chroma_store.py,sha256=vwkWWGxPwuW45T1PS6D44dXhDG9U_KZWjrMZCOkEXsA,7242
|
35
35
|
clap/vector_stores/qdrant_store.py,sha256=-SwMTb0yaGngpQ9AddDzDIt3x8GZevlFT-0FMkWD28I,9923
|
36
|
-
clap_agents-0.3.
|
37
|
-
clap_agents-0.3.
|
38
|
-
clap_agents-0.3.
|
39
|
-
clap_agents-0.3.
|
36
|
+
clap_agents-0.3.1.dist-info/METADATA,sha256=4HKAihZchNLte3fWinmWBZAKjDtkpIMYJYLyD6jgfUs,30573
|
37
|
+
clap_agents-0.3.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
38
|
+
clap_agents-0.3.1.dist-info/licenses/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
|
39
|
+
clap_agents-0.3.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|