agentapps 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentapps/__init__.py +29 -0
- agentapps/agent.py +452 -0
- agentapps/model.py +667 -0
- agentapps/tools.py +514 -0
- agentapps-0.1.0.dist-info/METADATA +272 -0
- agentapps-0.1.0.dist-info/RECORD +8 -0
- agentapps-0.1.0.dist-info/WHEEL +5 -0
- agentapps-0.1.0.dist-info/top_level.txt +1 -0
agentapps/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# ============================================================================
|
|
2
|
+
# File: agentapps/__init__.py
|
|
3
|
+
# ============================================================================
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
AgentApps - Multi-Agent Orchestration Framework
|
|
7
|
+
|
|
8
|
+
A Python package for building intelligent agent systems with role-based
|
|
9
|
+
collaboration, tools integration, and LLM support.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
__version__ = "0.1.0"
|
|
13
|
+
__author__ = "Your Name"
|
|
14
|
+
__license__ = "MIT"
|
|
15
|
+
|
|
16
|
+
from .agent import Agent
|
|
17
|
+
from .model import OpenAIChat, Model
|
|
18
|
+
from .tools import Tool, WebSearchTool, WebScraperTool, SearchSummaryTool, CalculatorTool
|
|
19
|
+
|
|
20
|
+
__all__ = [
|
|
21
|
+
"Agent",
|
|
22
|
+
"OpenAIChat",
|
|
23
|
+
"Model",
|
|
24
|
+
"Tool",
|
|
25
|
+
"WebSearchTool",
|
|
26
|
+
"WebScraperTool",
|
|
27
|
+
"SearchSummaryTool",
|
|
28
|
+
"CalculatorTool",
|
|
29
|
+
]
|
agentapps/agent.py
ADDED
|
@@ -0,0 +1,452 @@
|
|
|
1
|
+
# ============================================================================
|
|
2
|
+
# File: agentapps/agent.py
|
|
3
|
+
# ============================================================================
|
|
4
|
+
|
|
5
|
+
"""Core Agent implementation"""
|
|
6
|
+
|
|
7
|
+
from typing import List, Dict, Any, Optional, Union
|
|
8
|
+
import json
|
|
9
|
+
import re
|
|
10
|
+
|
|
11
|
+
from .model import Model, OpenAIChat
|
|
12
|
+
from .tools import Tool
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class Agent:
|
|
16
|
+
"""
|
|
17
|
+
Agent class for building intelligent agents with tools and collaboration
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
name: Optional[str] = None,
|
|
23
|
+
role: Optional[str] = None,
|
|
24
|
+
model: Optional[Model] = None,
|
|
25
|
+
tools: Optional[List[Tool]] = None,
|
|
26
|
+
instructions: Optional[List[str]] = None,
|
|
27
|
+
team: Optional[List['Agent']] = None,
|
|
28
|
+
show_tool_calls: bool = False,
|
|
29
|
+
markdown: bool = False,
|
|
30
|
+
description: Optional[str] = None,
|
|
31
|
+
temperature: Optional[float] = None,
|
|
32
|
+
**kwargs
|
|
33
|
+
):
|
|
34
|
+
"""
|
|
35
|
+
Initialize an Agent
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
name: Agent name
|
|
39
|
+
role: Agent role/purpose
|
|
40
|
+
model: LLM model to use
|
|
41
|
+
tools: List of tools available to the agent
|
|
42
|
+
instructions: List of instructions for the agent
|
|
43
|
+
team: List of sub-agents (creates an agent team)
|
|
44
|
+
show_tool_calls: Whether to display tool calls
|
|
45
|
+
markdown: Whether to format output as markdown
|
|
46
|
+
description: Agent description
|
|
47
|
+
temperature: Temperature for model (overrides model default)
|
|
48
|
+
**kwargs: Additional configuration
|
|
49
|
+
"""
|
|
50
|
+
self.name = name or "Agent"
|
|
51
|
+
self.role = role or "General Assistant"
|
|
52
|
+
self.model = model
|
|
53
|
+
self.tools = tools or []
|
|
54
|
+
self.instructions = instructions or []
|
|
55
|
+
self.team = team or []
|
|
56
|
+
self.show_tool_calls = show_tool_calls
|
|
57
|
+
self.markdown = markdown
|
|
58
|
+
self.description = description or ""
|
|
59
|
+
self.temperature = temperature
|
|
60
|
+
self.kwargs = kwargs
|
|
61
|
+
|
|
62
|
+
# Execution state
|
|
63
|
+
self.conversation_history: List[Dict[str, str]] = []
|
|
64
|
+
self.tool_results: List[Dict[str, Any]] = []
|
|
65
|
+
|
|
66
|
+
# If this is a team agent, inherit instructions
|
|
67
|
+
if self.team:
|
|
68
|
+
self._setup_team()
|
|
69
|
+
|
|
70
|
+
def _setup_team(self):
|
|
71
|
+
"""Setup team agent configuration"""
|
|
72
|
+
if not self.model:
|
|
73
|
+
# Use the first team member's model as default
|
|
74
|
+
for member in self.team:
|
|
75
|
+
if member.model:
|
|
76
|
+
self.model = member.model
|
|
77
|
+
break
|
|
78
|
+
|
|
79
|
+
# Collect all tools from team members
|
|
80
|
+
all_tools = []
|
|
81
|
+
for member in self.team:
|
|
82
|
+
all_tools.extend(member.tools)
|
|
83
|
+
|
|
84
|
+
# Remove duplicates while preserving order
|
|
85
|
+
seen = set()
|
|
86
|
+
for tool in all_tools:
|
|
87
|
+
if tool.name not in seen:
|
|
88
|
+
seen.add(tool.name)
|
|
89
|
+
self.tools.append(tool)
|
|
90
|
+
|
|
91
|
+
# Set role if not specified
|
|
92
|
+
if self.role == "General Assistant":
|
|
93
|
+
team_roles = [m.role for m in self.team if m.role]
|
|
94
|
+
if team_roles:
|
|
95
|
+
self.role = f"Team coordinator managing: {', '.join(team_roles)}"
|
|
96
|
+
|
|
97
|
+
def add_tool(self, tool: Tool) -> None:
|
|
98
|
+
"""Add a tool to the agent"""
|
|
99
|
+
self.tools.append(tool)
|
|
100
|
+
if self.show_tool_calls:
|
|
101
|
+
print(f"✓ Added tool '{tool.name}' to agent '{self.name}'")
|
|
102
|
+
|
|
103
|
+
def add_team_member(self, agent: 'Agent') -> None:
|
|
104
|
+
"""Add a team member"""
|
|
105
|
+
self.team.append(agent)
|
|
106
|
+
# Inherit tools
|
|
107
|
+
self.tools.extend(agent.tools)
|
|
108
|
+
if self.show_tool_calls:
|
|
109
|
+
print(f"✓ Added team member '{agent.name}' to '{self.name}'")
|
|
110
|
+
|
|
111
|
+
def _build_system_prompt(self) -> str:
|
|
112
|
+
"""Build the system prompt for the agent"""
|
|
113
|
+
prompt_parts = []
|
|
114
|
+
|
|
115
|
+
# Name and role
|
|
116
|
+
prompt_parts.append(f"You are {self.name}, {self.role}.")
|
|
117
|
+
|
|
118
|
+
if self.description:
|
|
119
|
+
prompt_parts.append(f"\n{self.description}")
|
|
120
|
+
|
|
121
|
+
# Instructions
|
|
122
|
+
if self.instructions:
|
|
123
|
+
prompt_parts.append("\n\nInstructions:")
|
|
124
|
+
for instruction in self.instructions:
|
|
125
|
+
prompt_parts.append(f"- {instruction}")
|
|
126
|
+
|
|
127
|
+
# Team information - UPDATED FOR SEQUENTIAL WORK
|
|
128
|
+
if self.team:
|
|
129
|
+
prompt_parts.append("\n\nYou coordinate a team with these capabilities:")
|
|
130
|
+
for member in self.team:
|
|
131
|
+
prompt_parts.append(f"- {member.name} ({member.role}): Tools available - {', '.join([t.name for t in member.tools])}")
|
|
132
|
+
|
|
133
|
+
prompt_parts.append("\n\nYou have DIRECT ACCESS to all team tools - use them in the order needed:")
|
|
134
|
+
prompt_parts.append("1. If the task requires finding information first, use search tools")
|
|
135
|
+
prompt_parts.append("2. If you get URLs from search, use web_scraper to read them")
|
|
136
|
+
prompt_parts.append("3. Use multiple tool calls in sequence as needed")
|
|
137
|
+
prompt_parts.append("4. Complete each step before moving to the next")
|
|
138
|
+
|
|
139
|
+
# Tools information
|
|
140
|
+
if self.tools:
|
|
141
|
+
prompt_parts.append("\n\nYour available tools:")
|
|
142
|
+
for tool in self.tools:
|
|
143
|
+
prompt_parts.append(f"- {tool.name}: {tool.description}")
|
|
144
|
+
|
|
145
|
+
return "".join(prompt_parts)
|
|
146
|
+
|
|
147
|
+
def _execute_tools(self, tool_calls: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
148
|
+
"""Execute tool calls and return results"""
|
|
149
|
+
results = []
|
|
150
|
+
|
|
151
|
+
for tool_call in tool_calls:
|
|
152
|
+
tool_name = tool_call.get("function", {}).get("name")
|
|
153
|
+
tool_args = json.loads(tool_call.get("function", {}).get("arguments", "{}"))
|
|
154
|
+
|
|
155
|
+
# Find the tool
|
|
156
|
+
tool = next((t for t in self.tools if t.name == tool_name), None)
|
|
157
|
+
|
|
158
|
+
if tool:
|
|
159
|
+
if self.show_tool_calls:
|
|
160
|
+
print(f"\n🔧 Calling tool: {tool_name}")
|
|
161
|
+
print(f" Arguments: {tool_args}")
|
|
162
|
+
|
|
163
|
+
try:
|
|
164
|
+
result = tool.execute(**tool_args)
|
|
165
|
+
results.append({
|
|
166
|
+
"tool": tool_name,
|
|
167
|
+
"arguments": tool_args,
|
|
168
|
+
"result": result,
|
|
169
|
+
"status": "success"
|
|
170
|
+
})
|
|
171
|
+
|
|
172
|
+
if self.show_tool_calls:
|
|
173
|
+
print(f" ✓ Result: {result}")
|
|
174
|
+
|
|
175
|
+
except Exception as e:
|
|
176
|
+
results.append({
|
|
177
|
+
"tool": tool_name,
|
|
178
|
+
"arguments": tool_args,
|
|
179
|
+
"error": str(e),
|
|
180
|
+
"status": "error"
|
|
181
|
+
})
|
|
182
|
+
|
|
183
|
+
if self.show_tool_calls:
|
|
184
|
+
print(f" ✗ Error: {str(e)}")
|
|
185
|
+
else:
|
|
186
|
+
results.append({
|
|
187
|
+
"tool": tool_name,
|
|
188
|
+
"error": f"Tool '{tool_name}' not found",
|
|
189
|
+
"status": "error"
|
|
190
|
+
})
|
|
191
|
+
|
|
192
|
+
return results
|
|
193
|
+
|
|
194
|
+
def _delegate_to_team(self, query: str) -> Optional[str]:
|
|
195
|
+
"""Delegate query to appropriate team member"""
|
|
196
|
+
if not self.team:
|
|
197
|
+
return None
|
|
198
|
+
|
|
199
|
+
# Simple delegation based on role matching
|
|
200
|
+
query_lower = query.lower()
|
|
201
|
+
|
|
202
|
+
for member in self.team:
|
|
203
|
+
role_keywords = member.role.lower().split()
|
|
204
|
+
if any(keyword in query_lower for keyword in role_keywords):
|
|
205
|
+
if self.show_tool_calls:
|
|
206
|
+
print(f"\n👥 Delegating to team member: {member.name}")
|
|
207
|
+
return member.run(query)
|
|
208
|
+
|
|
209
|
+
# If no specific match, use first team member
|
|
210
|
+
return self.team[0].run(query)
|
|
211
|
+
|
|
212
|
+
def run(
|
|
213
|
+
self,
|
|
214
|
+
message: str,
|
|
215
|
+
stream: bool = False,
|
|
216
|
+
**kwargs
|
|
217
|
+
) -> str:
|
|
218
|
+
"""
|
|
219
|
+
Run the agent with a message
|
|
220
|
+
|
|
221
|
+
Args:
|
|
222
|
+
message: User message
|
|
223
|
+
stream: Whether to stream the response
|
|
224
|
+
**kwargs: Additional parameters
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
Agent response
|
|
228
|
+
"""
|
|
229
|
+
if not self.model:
|
|
230
|
+
error_msg = "Error: No model configured for this agent"
|
|
231
|
+
print(error_msg)
|
|
232
|
+
return error_msg
|
|
233
|
+
|
|
234
|
+
try:
|
|
235
|
+
# Build messages
|
|
236
|
+
messages = [
|
|
237
|
+
{"role": "system", "content": self._build_system_prompt()},
|
|
238
|
+
]
|
|
239
|
+
|
|
240
|
+
# Add conversation history
|
|
241
|
+
messages.extend(self.conversation_history)
|
|
242
|
+
|
|
243
|
+
# Add current message
|
|
244
|
+
messages.append({"role": "user", "content": message})
|
|
245
|
+
|
|
246
|
+
# Generate response
|
|
247
|
+
if stream:
|
|
248
|
+
return self._stream_response(messages, **kwargs)
|
|
249
|
+
else:
|
|
250
|
+
return self._generate_response(messages, **kwargs)
|
|
251
|
+
|
|
252
|
+
except Exception as e:
|
|
253
|
+
error_msg = f"Error running agent: {str(e)}"
|
|
254
|
+
print(error_msg)
|
|
255
|
+
import traceback
|
|
256
|
+
traceback.print_exc()
|
|
257
|
+
return error_msg
|
|
258
|
+
|
|
259
|
+
def _generate_response(self, messages: List[Dict[str, str]], **kwargs) -> str:
|
|
260
|
+
"""Generate a non-streaming response"""
|
|
261
|
+
try:
|
|
262
|
+
# Override temperature if set at agent level
|
|
263
|
+
if self.temperature is not None:
|
|
264
|
+
kwargs["temperature"] = self.temperature
|
|
265
|
+
|
|
266
|
+
# Add tool configuration if tools are available
|
|
267
|
+
if self.tools:
|
|
268
|
+
kwargs["tools"] = [tool.to_openai_tool() for tool in self.tools]
|
|
269
|
+
kwargs["tool_choice"] = "auto"
|
|
270
|
+
|
|
271
|
+
if self.show_tool_calls:
|
|
272
|
+
print(f"\n🔄 Generating response...")
|
|
273
|
+
|
|
274
|
+
response = self.model.generate(messages, **kwargs)
|
|
275
|
+
|
|
276
|
+
# Check if response is a dict (from API) or string (already processed)
|
|
277
|
+
if isinstance(response, dict):
|
|
278
|
+
# Handle function calling response
|
|
279
|
+
if response.get("tool_calls"):
|
|
280
|
+
return self._handle_tool_calls(messages, response, **kwargs)
|
|
281
|
+
else:
|
|
282
|
+
content = response.get("content", "")
|
|
283
|
+
else:
|
|
284
|
+
content = response
|
|
285
|
+
|
|
286
|
+
# Update conversation history
|
|
287
|
+
self.conversation_history.append(messages[-1])
|
|
288
|
+
self.conversation_history.append({"role": "assistant", "content": content})
|
|
289
|
+
|
|
290
|
+
return content
|
|
291
|
+
|
|
292
|
+
except Exception as e:
|
|
293
|
+
error_msg = f"Error generating response: {str(e)}"
|
|
294
|
+
print(error_msg)
|
|
295
|
+
import traceback
|
|
296
|
+
traceback.print_exc()
|
|
297
|
+
return error_msg
|
|
298
|
+
|
|
299
|
+
def _handle_tool_calls(self, messages: List[Dict[str, str]], response: Dict, **kwargs) -> str:
|
|
300
|
+
"""Handle tool calls from the model with support for multiple rounds"""
|
|
301
|
+
max_iterations = 5 # Prevent infinite loops
|
|
302
|
+
iteration = 0
|
|
303
|
+
|
|
304
|
+
while iteration < max_iterations:
|
|
305
|
+
iteration += 1
|
|
306
|
+
|
|
307
|
+
tool_calls = response.get("tool_calls", [])
|
|
308
|
+
|
|
309
|
+
if not tool_calls:
|
|
310
|
+
# No more tool calls, return the content
|
|
311
|
+
content = response.get("content", "") if isinstance(response, dict) else response
|
|
312
|
+
return content if content else "No response generated"
|
|
313
|
+
|
|
314
|
+
if self.show_tool_calls:
|
|
315
|
+
print(f"\n🔧 Model requested {len(tool_calls)} tool call(s) (round {iteration})")
|
|
316
|
+
|
|
317
|
+
# Execute tools
|
|
318
|
+
tool_results = []
|
|
319
|
+
for tool_call in tool_calls:
|
|
320
|
+
function_name = tool_call["function"]["name"]
|
|
321
|
+
function_args = json.loads(tool_call["function"]["arguments"])
|
|
322
|
+
|
|
323
|
+
# Find and execute the tool
|
|
324
|
+
tool = next((t for t in self.tools if t.name == function_name), None)
|
|
325
|
+
|
|
326
|
+
if tool:
|
|
327
|
+
if self.show_tool_calls:
|
|
328
|
+
print(f" Calling: {function_name}")
|
|
329
|
+
print(f" Args: {function_args}")
|
|
330
|
+
|
|
331
|
+
result = tool.execute(**function_args)
|
|
332
|
+
tool_results.append({
|
|
333
|
+
"tool_call_id": tool_call["id"],
|
|
334
|
+
"role": "tool",
|
|
335
|
+
"name": function_name,
|
|
336
|
+
"content": str(result)
|
|
337
|
+
})
|
|
338
|
+
|
|
339
|
+
if self.show_tool_calls:
|
|
340
|
+
print(f" Result: {result[:100]}...")
|
|
341
|
+
|
|
342
|
+
# Add assistant message with tool calls
|
|
343
|
+
messages.append({
|
|
344
|
+
"role": "assistant",
|
|
345
|
+
"content": response.get("content"),
|
|
346
|
+
"tool_calls": tool_calls
|
|
347
|
+
})
|
|
348
|
+
|
|
349
|
+
# Add tool results
|
|
350
|
+
for tool_result in tool_results:
|
|
351
|
+
messages.append(tool_result)
|
|
352
|
+
|
|
353
|
+
if self.show_tool_calls:
|
|
354
|
+
print(f"\n🤔 Processing tool results...")
|
|
355
|
+
|
|
356
|
+
# Get next response - WITHOUT tool parameter to prevent issues
|
|
357
|
+
final_params = {k: v for k, v in kwargs.items() if k not in ['tools', 'tool_choice']}
|
|
358
|
+
|
|
359
|
+
# Add tools back for the next call so model can make more tool calls if needed
|
|
360
|
+
if self.tools:
|
|
361
|
+
final_params["tools"] = [tool.to_openai_tool() for tool in self.tools]
|
|
362
|
+
final_params["tool_choice"] = "auto"
|
|
363
|
+
|
|
364
|
+
response = self.model.generate(messages, **final_params)
|
|
365
|
+
|
|
366
|
+
# If response is just a string, we're done
|
|
367
|
+
if isinstance(response, str):
|
|
368
|
+
return response
|
|
369
|
+
|
|
370
|
+
# If response is a dict but has no tool_calls, we're done
|
|
371
|
+
if isinstance(response, dict) and not response.get("tool_calls"):
|
|
372
|
+
return response.get("content", "")
|
|
373
|
+
|
|
374
|
+
# Otherwise, loop continues to handle the next round of tool calls
|
|
375
|
+
|
|
376
|
+
# Max iterations reached
|
|
377
|
+
if self.show_tool_calls:
|
|
378
|
+
print(f"\n⚠ Reached max iterations ({max_iterations})")
|
|
379
|
+
|
|
380
|
+
return response.get("content", "") if isinstance(response, dict) else str(response)
|
|
381
|
+
|
|
382
|
+
def _stream_response(self, messages: List[Dict[str, str]], **kwargs):
|
|
383
|
+
"""Generate a streaming response"""
|
|
384
|
+
full_response = ""
|
|
385
|
+
|
|
386
|
+
for chunk in self.model.stream(messages, **kwargs):
|
|
387
|
+
full_response += chunk
|
|
388
|
+
yield chunk
|
|
389
|
+
|
|
390
|
+
# Update conversation history
|
|
391
|
+
self.conversation_history.append(messages[-1])
|
|
392
|
+
self.conversation_history.append({"role": "assistant", "content": full_response})
|
|
393
|
+
|
|
394
|
+
def print_response(self, message: str, stream: bool = False, **kwargs) -> None:
|
|
395
|
+
"""
|
|
396
|
+
Run the agent and print the response
|
|
397
|
+
|
|
398
|
+
Args:
|
|
399
|
+
message: User message
|
|
400
|
+
stream: Whether to stream the response
|
|
401
|
+
**kwargs: Additional parameters
|
|
402
|
+
"""
|
|
403
|
+
try:
|
|
404
|
+
if self.show_tool_calls:
|
|
405
|
+
print(f"\n{'='*60}")
|
|
406
|
+
print(f"Agent: {self.name}")
|
|
407
|
+
print(f"Role: {self.role}")
|
|
408
|
+
print(f"{'='*60}\n")
|
|
409
|
+
print(f"Query: {message}\n")
|
|
410
|
+
|
|
411
|
+
if stream:
|
|
412
|
+
if self.show_tool_calls:
|
|
413
|
+
print("Response:\n")
|
|
414
|
+
|
|
415
|
+
for chunk in self.run(message, stream=True, **kwargs):
|
|
416
|
+
print(chunk, end="", flush=True)
|
|
417
|
+
print() # New line at the end
|
|
418
|
+
else:
|
|
419
|
+
response = self.run(message, stream=False, **kwargs)
|
|
420
|
+
|
|
421
|
+
if self.show_tool_calls:
|
|
422
|
+
print("Response:\n")
|
|
423
|
+
|
|
424
|
+
print(response)
|
|
425
|
+
|
|
426
|
+
if self.show_tool_calls:
|
|
427
|
+
print(f"\n{'='*60}\n")
|
|
428
|
+
|
|
429
|
+
except Exception as e:
|
|
430
|
+
print(f"\n❌ Error in print_response: {str(e)}")
|
|
431
|
+
import traceback
|
|
432
|
+
traceback.print_exc()
|
|
433
|
+
|
|
434
|
+
def clear_history(self) -> None:
|
|
435
|
+
"""Clear conversation history"""
|
|
436
|
+
self.conversation_history.clear()
|
|
437
|
+
self.tool_results.clear()
|
|
438
|
+
|
|
439
|
+
def get_info(self) -> Dict[str, Any]:
|
|
440
|
+
"""Get agent information"""
|
|
441
|
+
return {
|
|
442
|
+
"name": self.name,
|
|
443
|
+
"role": self.role,
|
|
444
|
+
"model": str(self.model) if self.model else None,
|
|
445
|
+
"tools": [tool.name for tool in self.tools],
|
|
446
|
+
"team": [member.name for member in self.team],
|
|
447
|
+
"instructions": self.instructions,
|
|
448
|
+
"conversation_turns": len(self.conversation_history) // 2
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
def __repr__(self) -> str:
|
|
452
|
+
return f"Agent(name='{self.name}', role='{self.role}', tools={len(self.tools)}, team={len(self.team)})"
|