clap-agents 0.1.1__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,20 +1,50 @@
1
1
 
2
-
3
2
  import asyncio
4
3
  import json
5
4
  from textwrap import dedent
6
- from typing import Any, List, Optional
5
+ from typing import Any, List, Optional, Dict
7
6
 
8
7
  from clap.llm_services.base import LLMServiceInterface
9
- from clap.llm_services.groq_service import GroqService
8
+ from clap.llm_services.groq_service import GroqService
9
+
10
10
  from clap.mcp_client.client import MCPClientManager
11
- from clap.react_pattern.react_agent import ReactAgent
11
+
12
12
  from clap.tool_pattern.tool import Tool
13
13
 
14
+ from clap.vector_stores.base import VectorStoreInterface
15
+
16
+
17
+
18
+
19
+ VECTOR_QUERY_TOOL_SCHEMA = {
20
+ "type": "function",
21
+ "function": {
22
+ "name": "vector_query",
23
+ "description": "Queries the configured vector store for relevant information based on the input query text. Use this to find context from stored documents before answering complex questions or summarizing information.",
24
+ "parameters": {
25
+ "type": "object",
26
+ "properties": {
27
+ "query": {
28
+ "type": "string",
29
+ "description": "The natural language query text to search for relevant documents."
30
+ },
31
+ "top_k": {
32
+ "type": "integer",
33
+ "description": "The maximum number of relevant document chunks to retrieve. Defaults to 3.",
34
+ "default": 3
35
+ },
36
+
37
+ },
38
+ "required": ["query"]
39
+ }
40
+ }
41
+ }
42
+
43
+
14
44
  class Agent:
15
45
  """
16
46
  Represents an AI agent using a configurable LLM Service.
17
- Can work in a team and use local or remote MCP tools.
47
+ Can work in a team, use local/remote MCP tools, and optionally a vector store.
18
48
 
19
49
  Args:
20
50
  name (str): Agent name.
@@ -22,22 +52,28 @@ class Agent:
22
52
  task_description (str): Description of the agent's specific task.
23
53
  task_expected_output (str): Expected output format.
24
54
  tools (Optional[List[Tool]]): Local tools for the agent.
25
- llm (str): Model identifier string (passed to llm_service).
55
+ model (str): Model identifier string (passed to llm_service).
26
56
  llm_service (Optional[LLMServiceInterface]): Service for LLM calls (defaults to GroqService).
27
57
  mcp_manager (Optional[MCPClientManager]): Shared MCP client manager.
28
58
  mcp_server_names (Optional[List[str]]): MCP servers this agent uses.
59
+ vector_store (Optional[VectorStoreInterface]): Vector store instance for RAG.
60
+ # embedding_function(Optional[EmbeddingFunction]): EF if needed by agent.
61
+
29
62
  """
30
63
  def __init__(
31
64
  self,
32
65
  name: str,
33
66
  backstory: str,
34
- task_description: str,
67
+ task_description: str = "No specific task assigned; await runtime user message.",
35
68
  task_expected_output: str = "",
36
69
  tools: Optional[List[Tool]] = None,
37
70
  model: str = "llama-3.3-70b-versatile",
38
71
  llm_service: Optional[LLMServiceInterface] = None,
39
72
  mcp_manager: Optional[MCPClientManager] = None,
40
73
  mcp_server_names: Optional[List[str]] = None,
74
+ vector_store: Optional[VectorStoreInterface] = None,
75
+ # embedding_function: Optional[EmbeddingFunction] = None,
76
+
41
77
  ):
42
78
  self.name = name
43
79
  self.backstory = backstory
@@ -45,36 +81,41 @@ class Agent:
45
81
  self.task_expected_output = task_expected_output
46
82
  self.mcp_manager = mcp_manager
47
83
  self.mcp_server_names = mcp_server_names or []
84
+ self.local_tools = tools or []
85
+
86
+ self.vector_store = vector_store
87
+ # self.embedding_function = embedding_function
48
88
 
49
89
  llm_service_instance = llm_service or GroqService()
50
90
 
91
+
92
+ from clap.react_pattern.react_agent import ReactAgent
51
93
  self.react_agent = ReactAgent(
94
+ agent_name=self.name,
52
95
  llm_service=llm_service_instance,
53
96
  model=model,
54
97
  system_prompt=self.backstory,
55
- tools=tools or [],
98
+ tools=self.local_tools,
56
99
  mcp_manager=self.mcp_manager,
57
- mcp_server_names=self.mcp_server_names
100
+ mcp_server_names=self.mcp_server_names,
101
+ vector_store=self.vector_store
58
102
  )
59
103
 
60
104
  self.dependencies: List['Agent'] = []
61
105
  self.dependents: List['Agent'] = []
62
106
  self.received_context: dict[str, Any] = {}
63
107
 
64
- from clap.multiagent_pattern.team import Team
108
+ from clap.multiagent_pattern.team import Team
65
109
  Team.register_agent(self)
66
110
 
67
-
111
+
68
112
  def __repr__(self): return f"{self.name}"
69
113
 
70
114
  def __rshift__(self, other: 'Agent') -> 'Agent': self.add_dependent(other); return other
71
-
72
115
  def __lshift__(self, other: 'Agent') -> 'Agent': self.add_dependency(other); return other
73
-
74
116
  def __rrshift__(self, other: List['Agent'] | 'Agent'): self.add_dependency(other); return self
75
-
76
117
  def __rlshift__(self, other: List['Agent'] | 'Agent'): self.add_dependent(other); return self
77
-
118
+
78
119
  def add_dependency(self, other: 'Agent' | List['Agent']):
79
120
  AgentClass = type(self)
80
121
  if isinstance(other, AgentClass):
@@ -95,34 +136,66 @@ class Agent:
95
136
  if self not in item.dependencies: item.dependencies.append(self)
96
137
  if item not in self.dependents: self.dependents.append(item)
97
138
  else: raise TypeError("The dependent must be an instance or list of Agent.")
98
- def receive_context(self, sender_name: str, input_data: Any): self.received_context[sender_name] = input_data
139
+
140
+ def receive_context(self, sender_name: str, input_data: Any):
141
+ self.received_context[sender_name] = input_data
142
+
99
143
  def create_prompt(self) -> str:
100
- context_str = "\n---\n".join(f"Context from {name}:\n{json.dumps(data, indent=2) if isinstance(data, dict) else str(data)}" for name, data in self.received_context.items())
101
- if not context_str: context_str = "No context received from other agents."
144
+ """Creates the initial prompt for the agent's task execution."""
145
+ context_str = "\n---\n".join(
146
+ f"Context from {name}:\n{json.dumps(data, indent=2, ensure_ascii=False) if isinstance(data, dict) else str(data)}"
147
+ for name, data in self.received_context.items()
148
+ )
149
+ if not context_str:
150
+ context_str = "No context received from other agents."
151
+
152
+ vector_store_info = ""
153
+ user_query = self.task_description
154
+
155
+ if self.vector_store:
156
+ vector_store_info = "\nVector Store Available: Use the 'vector_query' tool with the User Query below to find relevant context before answering factual questions."
157
+
158
+ task_info = f"""
159
+ User Query: {user_query}
160
+ Task: Answer the User Query. {vector_store_info or ''} Use context from other agents if provided.
161
+ Expected Output: {self.task_expected_output or 'Produce a meaningful response to complete the task.'}
162
+ """.strip()
163
+
102
164
  prompt = dedent(f"""
103
- You are an AI agent named {self.name}. Your backstory: {self.backstory}
104
- You are part of a team of agents working together to complete a task.
105
- Your immediate task is described below. Use the provided context from other agents if relevant.
165
+ Agent: {self.name}
166
+ Persona: {self.backstory}
167
+ Team Context: {context_str}
168
+
169
+ {task_info}
170
+
171
+ Execute now following the ReAct pattern. If using 'vector_query', use the User Query text as the 'query' argument.
172
+ """).strip()
173
+ return prompt
106
174
 
107
- <task_description>
108
- {self.task_description}
109
- </task_description>
175
+ async def run(self, user_msg: Optional[str] = None) -> dict[str, Any]:
176
+ """Runs the agent's task using its configured ReactAgent.
177
+ """
178
+ print(f"Agent {self.name}: Preparing to run...")
110
179
 
111
- <task_expected_output>
112
- {self.task_expected_output or 'Produce a meaningful response to complete the task.'}
113
- </task_expected_output>
180
+ current_task = user_msg if user_msg is not None else self.task_description
181
+ if not user_msg and self.task_description == "No specific task assigned; await runtime user message.":
182
+ print(f"Agent {self.name}: Warning - Running without a specific user_msg or a meaningful pre-set task_description.")
114
183
 
115
- <context>
116
- {context_str}
117
- </context>
184
+
185
+ original_task_description = self.task_description
186
+ self.task_description = current_task
118
187
 
119
- Now, execute your task based on the description, context, and expected output. Your response:
120
- """).strip(); return prompt
121
- async def run(self) -> dict[str, Any]:
122
188
  msg = self.create_prompt()
123
- raw_output = await self.react_agent.run(user_msg=msg)
189
+
190
+ self.task_description = original_task_description
191
+
192
+ print(f"Agent {self.name}: Running ReactAgent...")
193
+ raw_output = await self.react_agent.run(user_msg=msg)
124
194
  output_data = {"output": raw_output}
195
+
196
+ print(f"Agent {self.name}: Passing context to {len(self.dependents)} dependents...")
125
197
  for dependent in self.dependents:
126
198
  dependent.receive_context(self.name, output_data)
199
+
127
200
  return output_data
128
201
 
@@ -1,15 +1,22 @@
1
- # --- START OF team.py (Parallel Execution - Python 3.10 Compatible) ---
2
1
 
3
2
  import asyncio
4
3
  from collections import deque
5
- from typing import Any, Dict, List
6
-
4
+ from typing import Any, Dict, List , Optional
7
5
  from colorama import Fore
8
- from graphviz import Digraph
9
-
10
6
  from clap.utils.logging import fancy_print
11
7
  from clap.multiagent_pattern.agent import Agent
12
8
 
9
+ _GRAPHVIZ_AVAILABLE = False
10
+ _Digraph_Placeholder_Type = Any
11
+
12
+ try:
13
+ from graphviz import Digraph as ImportedDigraph
14
+ from graphviz.backend import ExecutableNotFound as GraphvizExecutableNotFound
15
+ _Digraph_Placeholder_Type = ImportedDigraph
16
+ _GRAPHVIZ_AVAILABLE = True
17
+ except ImportError:
18
+ pass
19
+
13
20
  class Team:
14
21
  """
15
22
  A class representing a team of agents working together asynchronously.
@@ -26,7 +33,6 @@ class Team:
26
33
  self.agents: List[Agent] = []
27
34
  self.results: dict[str, Any] = {}
28
35
 
29
- # __enter__, __exit__, add_agent, register_agent remain the same
30
36
  def __enter__(self): Team.current_team = self; return self
31
37
  def __exit__(self, exc_type, exc_val, exc_tb): Team.current_team = None
32
38
  def add_agent(self, agent: Agent):
@@ -35,7 +41,7 @@ class Team:
35
41
  def register_agent(agent: Agent):
36
42
  if Team.current_team is not None: Team.current_team.add_agent(agent)
37
43
 
38
- # topological_sort and plot remain the same
44
+
39
45
  def topological_sort(self) -> List[Agent]:
40
46
  in_degree: Dict[Agent, int] = {agent: 0 for agent in self.agents}
41
47
  adj: Dict[Agent, List[Agent]] = {agent: [] for agent in self.agents}
@@ -71,15 +77,46 @@ class Team:
71
77
  )
72
78
  return sorted_agents
73
79
 
74
- def plot(self):
75
- dot = Digraph(format="png")
76
- for agent in self.agents: dot.node(agent.name)
77
- for agent in self.agents:
78
- for dependent in agent.dependents:
79
- if dependent in self.agents: dot.edge(agent.name, dependent.name)
80
- return dot
80
+ def plot(self) -> Optional[_Digraph_Placeholder_Type]:
81
+ """
82
+ Generates a visual representation of the agent team's dependency graph.
83
+ Requires the 'graphviz' Python library and the Graphviz system software to be installed.
84
+ If Graphviz is not available, prints a warning and returns None.
85
+
86
+ Returns:
87
+ Optional[graphviz.Digraph]: A Digraph object if successful, else None.
88
+ """
89
+ if not _GRAPHVIZ_AVAILABLE:
90
+ print(
91
+ f"{Fore.YELLOW}CLAP Warning: The 'graphviz' Python library is not installed. "
92
+ f"To use the plot() feature, please install it (e.g., pip install \"clap-agents[viz]\").{Fore.RESET}"
93
+ )
94
+ return None
95
+
96
+ dot: Optional[_Digraph_Placeholder_Type] = None
97
+ try:
98
+ dot = _Digraph_Placeholder_Type(format="png")
99
+ for agent in self.agents:
100
+ dot.node(agent.name)
101
+ for agent in self.agents:
102
+ for dependent in agent.dependents:
103
+ if dependent in self.agents:
104
+ dot.edge(agent.name, dependent.name)
105
+ print(f"{Fore.GREEN}Team dependency graph created. To render, call .render() or .view() on the returned object.{Fore.RESET}")
106
+ return dot
107
+ except NameError:
108
+ print(f"{Fore.YELLOW}CLAP Warning: Graphviz Digraph class not found (likely import issue). Plotting unavailable.{Fore.RESET}")
109
+ return None
110
+ except GraphvizExecutableNotFound:
111
+ print(
112
+ f"{Fore.RED}CLAP Error: Graphviz system software (dot executable) not found in PATH. "
113
+ f"Team.plot() requires it to generate images. Please install Graphviz for your OS and ensure 'dot' is in your PATH.{Fore.RESET}"
114
+ )
115
+ return None
116
+ except Exception as e:
117
+ print(f"{Fore.RED}CLAP Error: An unexpected error occurred during graph plotting: {e}{Fore.RESET}")
118
+ return None
81
119
 
82
- # --- Modified run method for parallel execution (Python 3.10 compatible) ---
83
120
  async def run(self):
84
121
  """
85
122
  Runs all agents in the team asynchronously, executing them in parallel
@@ -93,29 +130,20 @@ class Team:
93
130
 
94
131
  self.results = {}
95
132
  agent_tasks: Dict[Agent, asyncio.Task] = {}
96
- # Use a standard try/except block for Python 3.10 compatibility
97
133
  try:
98
- # Use asyncio.gather for task management, requiring manual cancellation on error
99
134
  tasks_to_gather = []
100
135
  for agent in sorted_agents:
101
- # Create the task but store it before adding to gather list
102
136
  task = asyncio.create_task(self._run_agent_task(agent, agent_tasks))
103
137
  agent_tasks[agent] = task
104
138
  tasks_to_gather.append(task)
105
139
 
106
- # Wait for all tasks to complete. If one fails, gather will raise that first exception.
107
140
  await asyncio.gather(*tasks_to_gather)
108
141
  print(f"{Fore.BLUE}--- All agent tasks finished ---{Fore.RESET}")
109
142
 
110
143
  except Exception as e:
111
- # If any task failed, asyncio.gather raises the exception of the first task that failed.
112
144
  print(f"{Fore.RED}One or more agents failed during execution:{Fore.RESET}")
113
145
  print(f"{Fore.RED}- Error: {e}{Fore.RESET}")
114
- # Note: With asyncio.gather, cancelling sibling tasks automatically
115
- # when one fails requires more complex manual handling.
116
- # For simplicity here, we let other tasks potentially run to completion
117
- # or fail independently, but we report the first failure.
118
- # Consider using anyio's TaskGroup if Python 3.11+ is an option for better cancellation.
146
+
119
147
 
120
148
 
121
149
  async def _run_agent_task(self, agent: Agent, all_tasks: Dict[Agent, asyncio.Task]):
@@ -128,8 +156,7 @@ class Team:
128
156
  ]
129
157
  if dependency_tasks:
130
158
  print(f"{Fore.YELLOW}Agent {agent.name} waiting for dependencies: {[dep.name for dep in agent.dependencies if dep in all_tasks]}...{Fore.RESET}")
131
- # Wait for all dependency tasks using asyncio.gather
132
- # We want errors in dependencies to propagate, so return_exceptions=False
159
+
133
160
  await asyncio.gather(*dependency_tasks)
134
161
  print(f"{Fore.GREEN}Agent {agent.name} dependencies met.{Fore.RESET}")
135
162
 
@@ -148,7 +175,5 @@ class Team:
148
175
  fancy_print(f"ERROR IN AGENT: {agent.name}")
149
176
  print(f"{Fore.RED}Agent {agent.name} failed: {e}{Fore.RESET}")
150
177
  self.results[agent.name] = {"error": str(e)}
151
- # Re-raise the exception so asyncio.gather catches it
152
178
  raise
153
179
 
154
- # --- END OF team.py (Parallel Execution - Python 3.10 Compatible) ---