neuroagent 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +273 -0
- package/dist/index.d.ts +55 -0
- package/dist/index.js +215 -0
- package/neuroagent/__init__.py +29 -0
- package/neuroagent/agents/__init__.py +3 -0
- package/neuroagent/agents/agent.py +253 -0
- package/neuroagent/cli/__init__.py +3 -0
- package/neuroagent/cli/neuroagent_cli.py +182 -0
- package/neuroagent/examples/__init__.py +1 -0
- package/neuroagent/examples/dev_agent.py +31 -0
- package/neuroagent/examples/multi_agent_team.py +45 -0
- package/neuroagent/examples/website_agent.py +27 -0
- package/neuroagent/frontend/__init__.py +1 -0
- package/neuroagent/frontend/widget.js +409 -0
- package/neuroagent/llm/__init__.py +5 -0
- package/neuroagent/llm/base.py +46 -0
- package/neuroagent/llm/local_model_provider.py +76 -0
- package/neuroagent/llm/openai_provider.py +58 -0
- package/neuroagent/memory/__init__.py +5 -0
- package/neuroagent/memory/long_memory.py +52 -0
- package/neuroagent/memory/short_memory.py +39 -0
- package/neuroagent/memory/vector_memory.py +57 -0
- package/neuroagent/planner/__init__.py +3 -0
- package/neuroagent/planner/planner.py +90 -0
- package/neuroagent/server/__init__.py +16 -0
- package/neuroagent/server/api_server.py +191 -0
- package/neuroagent/server/websocket_server.py +108 -0
- package/neuroagent/team/__init__.py +3 -0
- package/neuroagent/team/team.py +134 -0
- package/neuroagent/tools/__init__.py +19 -0
- package/neuroagent/tools/base.py +45 -0
- package/neuroagent/tools/code_executor.py +69 -0
- package/neuroagent/tools/file_manager.py +62 -0
- package/neuroagent/tools/http_client.py +57 -0
- package/neuroagent/tools/web_search.py +48 -0
- package/neuroagent/utils/__init__.py +3 -0
- package/neuroagent/utils/helpers.py +31 -0
- package/package.json +56 -0
- package/requirements.txt +15 -0
- package/setup.py +61 -0
- package/src/index.d.ts +55 -0
- package/src/index.js +215 -0
- package/web_example/index.html +249 -0
- package/web_example/neuroagent.js +301 -0
- package/web_example/script.js +114 -0
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
from typing import List, Dict, Any, Optional
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class VectorMemory:
|
|
7
|
+
def __init__(self, embedding_dim: int = 1536):
|
|
8
|
+
self.embedding_dim = embedding_dim
|
|
9
|
+
self._embeddings: List[np.ndarray] = []
|
|
10
|
+
self._memory: List[Dict[str, Any]] = []
|
|
11
|
+
|
|
12
|
+
def _create_embedding(self, text: str) -> np.ndarray:
|
|
13
|
+
hash_val = hash(text)
|
|
14
|
+
np.random.seed(hash_val % (2**32))
|
|
15
|
+
return np.random.randn(self.embedding_dim)
|
|
16
|
+
|
|
17
|
+
def _cosine_similarity(self, a: np.ndarray, b: np.ndarray) -> float:
|
|
18
|
+
dot_product = np.dot(a, b)
|
|
19
|
+
norm_a = np.linalg.norm(a)
|
|
20
|
+
norm_b = np.linalg.norm(b)
|
|
21
|
+
if norm_a == 0 or norm_b == 0:
|
|
22
|
+
return 0.0
|
|
23
|
+
return float(dot_product / (norm_a * norm_b))
|
|
24
|
+
|
|
25
|
+
def save(self, content: str, metadata: Optional[Dict[str, Any]] = None):
|
|
26
|
+
embedding = self._create_embedding(content)
|
|
27
|
+
entry = {
|
|
28
|
+
"content": content,
|
|
29
|
+
"timestamp": datetime.now().isoformat(),
|
|
30
|
+
"metadata": metadata or {}
|
|
31
|
+
}
|
|
32
|
+
self._embeddings.append(embedding)
|
|
33
|
+
self._memory.append(entry)
|
|
34
|
+
|
|
35
|
+
def search(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:
|
|
36
|
+
if not self._memory:
|
|
37
|
+
return []
|
|
38
|
+
|
|
39
|
+
query_embedding = self._create_embedding(query)
|
|
40
|
+
similarities = [
|
|
41
|
+
self._cosine_similarity(query_embedding, emb)
|
|
42
|
+
for emb in self._embeddings
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
top_indices = np.argsort(similarities)[-top_k:][::-1]
|
|
46
|
+
|
|
47
|
+
return [self._memory[i] for i in top_indices if similarities[i] > 0.1]
|
|
48
|
+
|
|
49
|
+
def get_all(self) -> List[Dict[str, Any]]:
|
|
50
|
+
return self._memory
|
|
51
|
+
|
|
52
|
+
def clear(self):
|
|
53
|
+
self._embeddings = []
|
|
54
|
+
self._memory = []
|
|
55
|
+
|
|
56
|
+
def to_messages(self) -> List[Dict[str, str]]:
|
|
57
|
+
return [{"role": "system", "content": e["content"]} for e in self._memory[-5:]]
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
from typing import List, Dict, Any, Optional
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class Task(BaseModel):
|
|
6
|
+
id: str
|
|
7
|
+
description: str
|
|
8
|
+
status: str = "pending"
|
|
9
|
+
result: Optional[Any] = None
|
|
10
|
+
subtasks: List["Task"] = []
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class Plan(BaseModel):
|
|
14
|
+
goal: str
|
|
15
|
+
tasks: List[Task]
|
|
16
|
+
current_task_index: int = 0
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Planner:
|
|
20
|
+
def __init__(self, llm_provider=None):
|
|
21
|
+
self.llm_provider = llm_provider
|
|
22
|
+
|
|
23
|
+
async def create_plan(self, goal: str, context: Optional[str] = None) -> Plan:
|
|
24
|
+
if self.llm_provider:
|
|
25
|
+
return await self._create_llm_plan(goal, context)
|
|
26
|
+
else:
|
|
27
|
+
return self._create_simple_plan(goal)
|
|
28
|
+
|
|
29
|
+
async def _create_llm_plan(self, goal: str, context: Optional[str] = None) -> Plan:
|
|
30
|
+
prompt = f"""Create a detailed plan to accomplish the following goal:
|
|
31
|
+
|
|
32
|
+
Goal: {goal}
|
|
33
|
+
|
|
34
|
+
{"Context: " + context if context else ""}
|
|
35
|
+
|
|
36
|
+
Provide a list of tasks in this format:
|
|
37
|
+
1. [Task description]
|
|
38
|
+
2. [Task description]
|
|
39
|
+
3. [Task description]
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
from neuroagent.llm.base import Message
|
|
43
|
+
response = await self.llm_provider.complete(
|
|
44
|
+
prompt=prompt,
|
|
45
|
+
max_tokens=500
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
tasks = self._parse_tasks(response.content, goal)
|
|
49
|
+
return Plan(goal=goal, tasks=tasks)
|
|
50
|
+
|
|
51
|
+
def _create_simple_plan(self, goal: str) -> Plan:
|
|
52
|
+
tasks = [
|
|
53
|
+
Task(id="1", description=f"Analyze goal: {goal}"),
|
|
54
|
+
Task(id="2", description="Break down into subtasks"),
|
|
55
|
+
Task(id="3", description="Execute subtasks"),
|
|
56
|
+
Task(id="4", description="Review and finalize results")
|
|
57
|
+
]
|
|
58
|
+
return Plan(goal=goal, tasks=tasks)
|
|
59
|
+
|
|
60
|
+
def _parse_tasks(self, response: str, goal: str) -> List[Task]:
|
|
61
|
+
lines = response.strip().split("\n")
|
|
62
|
+
tasks = []
|
|
63
|
+
|
|
64
|
+
for i, line in enumerate(lines):
|
|
65
|
+
line = line.strip()
|
|
66
|
+
if line and (line[0].isdigit() or line.startswith("-")):
|
|
67
|
+
task_desc = line.lstrip("0123456789.-) ").strip()
|
|
68
|
+
if task_desc:
|
|
69
|
+
tasks.append(Task(id=str(i + 1), description=task_desc))
|
|
70
|
+
|
|
71
|
+
if not tasks:
|
|
72
|
+
tasks = [Task(id="1", description=goal)]
|
|
73
|
+
|
|
74
|
+
return tasks
|
|
75
|
+
|
|
76
|
+
def get_next_task(self, plan: Plan) -> Optional[Task]:
|
|
77
|
+
if plan.current_task_index < len(plan.tasks):
|
|
78
|
+
return plan.tasks[plan.current_task_index]
|
|
79
|
+
return None
|
|
80
|
+
|
|
81
|
+
def complete_task(self, plan: Plan, task_id: str, result: Any) -> None:
|
|
82
|
+
for task in plan.tasks:
|
|
83
|
+
if task.id == task_id:
|
|
84
|
+
task.status = "completed"
|
|
85
|
+
task.result = result
|
|
86
|
+
|
|
87
|
+
plan.current_task_index += 1
|
|
88
|
+
|
|
89
|
+
def is_complete(self, plan: Plan) -> bool:
|
|
90
|
+
return all(task.status == "completed" for task in plan.tasks)
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from neuroagent.server.api_server import app, create_app
|
|
2
|
+
from neuroagent.server.websocket_server import (
|
|
3
|
+
manager,
|
|
4
|
+
handle_websocket,
|
|
5
|
+
register_agent,
|
|
6
|
+
get_registered_agents
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"app",
|
|
11
|
+
"create_app",
|
|
12
|
+
"manager",
|
|
13
|
+
"handle_websocket",
|
|
14
|
+
"register_agent",
|
|
15
|
+
"get_registered_agents"
|
|
16
|
+
]
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
from typing import Dict, Any, Optional
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
from fastapi import FastAPI, HTTPException, WebSocket, WebSocketDisconnect
|
|
4
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
5
|
+
import asyncio
|
|
6
|
+
|
|
7
|
+
from neuroagent.agents.agent import Agent
|
|
8
|
+
from neuroagent.team.team import Team
|
|
9
|
+
from neuroagent.llm.openai_provider import OpenAIProvider
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
app = FastAPI(title="NeuroAgent API", version="0.1.0")
|
|
13
|
+
|
|
14
|
+
app.add_middleware(
|
|
15
|
+
CORSMiddleware,
|
|
16
|
+
allow_origins=["*"],
|
|
17
|
+
allow_credentials=True,
|
|
18
|
+
allow_methods=["*"],
|
|
19
|
+
allow_headers=["*"],
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
_agents: Dict[str, Agent] = {}
|
|
23
|
+
_teams: Dict[str, Team] = {}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class AgentRequest(BaseModel):
|
|
27
|
+
agent: str
|
|
28
|
+
message: str
|
|
29
|
+
model: Optional[str] = "gpt-4"
|
|
30
|
+
temperature: Optional[float] = 0.7
|
|
31
|
+
max_tokens: Optional[int] = None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class AgentCreateRequest(BaseModel):
|
|
35
|
+
name: str
|
|
36
|
+
goal: str
|
|
37
|
+
model: Optional[str] = "gpt-4"
|
|
38
|
+
api_key: Optional[str] = None
|
|
39
|
+
enable_planning: Optional[bool] = True
|
|
40
|
+
tools: Optional[list[str]] = None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class TeamCreateRequest(BaseModel):
|
|
44
|
+
name: str
|
|
45
|
+
goal: str
|
|
46
|
+
model: Optional[str] = "gpt-4"
|
|
47
|
+
api_key: Optional[str] = None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class TeamAddAgentRequest(BaseModel):
|
|
51
|
+
team: str
|
|
52
|
+
agent: str
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@app.get("/")
|
|
56
|
+
async def root():
|
|
57
|
+
return {"message": "NeuroAgent API", "version": "0.1.0"}
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@app.get("/agents")
|
|
61
|
+
async def list_agents():
|
|
62
|
+
return {"agents": list(_agents.keys())}
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@app.post("/agents")
|
|
66
|
+
async def create_agent(request: AgentCreateRequest):
|
|
67
|
+
if request.name in _agents:
|
|
68
|
+
raise HTTPException(status_code=400, detail="Agent already exists")
|
|
69
|
+
|
|
70
|
+
agent = Agent(
|
|
71
|
+
name=request.name,
|
|
72
|
+
goal=request.goal,
|
|
73
|
+
model=request.model,
|
|
74
|
+
api_key=request.api_key,
|
|
75
|
+
enable_planning=request.enable_planning
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
if request.tools:
|
|
79
|
+
for tool in request.tools:
|
|
80
|
+
try:
|
|
81
|
+
agent.add_tool(tool)
|
|
82
|
+
except ValueError as e:
|
|
83
|
+
raise HTTPException(status_code=400, detail=str(e))
|
|
84
|
+
|
|
85
|
+
_agents[request.name] = agent
|
|
86
|
+
|
|
87
|
+
return {"message": f"Agent '{request.name}' created", "agent": request.name}
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
@app.post("/agent/run")
|
|
91
|
+
async def run_agent(request: AgentRequest):
|
|
92
|
+
if request.agent not in _agents:
|
|
93
|
+
raise HTTPException(status_code=404, detail="Agent not found")
|
|
94
|
+
|
|
95
|
+
agent = _agents[request.agent]
|
|
96
|
+
|
|
97
|
+
if request.model:
|
|
98
|
+
agent.llm_provider.model = request.model
|
|
99
|
+
if request.temperature:
|
|
100
|
+
agent.temperature = request.temperature
|
|
101
|
+
if request.max_tokens:
|
|
102
|
+
agent.max_tokens = request.max_tokens
|
|
103
|
+
|
|
104
|
+
result = await agent.run(request.message)
|
|
105
|
+
|
|
106
|
+
return {"response": result, "agent": request.agent}
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
@app.post("/agent/chat")
|
|
110
|
+
async def chat_agent(request: AgentRequest):
|
|
111
|
+
if request.agent not in _agents:
|
|
112
|
+
raise HTTPException(status_code=404, detail="Agent not found")
|
|
113
|
+
|
|
114
|
+
agent = _agents[request.agent]
|
|
115
|
+
result = await agent.chat(request.message)
|
|
116
|
+
|
|
117
|
+
return {"response": result, "agent": request.agent}
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
@app.delete("/agents/{agent_name}")
|
|
121
|
+
async def delete_agent(agent_name: str):
|
|
122
|
+
if agent_name not in _agents:
|
|
123
|
+
raise HTTPException(status_code=404, detail="Agent not found")
|
|
124
|
+
|
|
125
|
+
del _agents[agent_name]
|
|
126
|
+
return {"message": f"Agent '{agent_name}' deleted"}
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
@app.get("/teams")
|
|
130
|
+
async def list_teams():
|
|
131
|
+
return {"teams": list(_teams.keys())}
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
@app.post("/teams")
|
|
135
|
+
async def create_team(request: TeamCreateRequest):
|
|
136
|
+
if request.name in _teams:
|
|
137
|
+
raise HTTPException(status_code=400, detail="Team already exists")
|
|
138
|
+
|
|
139
|
+
team = Team(
|
|
140
|
+
name=request.name,
|
|
141
|
+
goal=request.goal,
|
|
142
|
+
model=request.model,
|
|
143
|
+
api_key=request.api_key
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
_teams[request.name] = team
|
|
147
|
+
|
|
148
|
+
return {"message": f"Team '{request.name}' created", "team": request.name}
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
@app.post("/teams/add-agent")
|
|
152
|
+
async def add_agent_to_team(request: TeamAddAgentRequest):
|
|
153
|
+
if request.team not in _teams:
|
|
154
|
+
raise HTTPException(status_code=404, detail="Team not found")
|
|
155
|
+
if request.agent not in _agents:
|
|
156
|
+
raise HTTPException(status_code=404, detail="Agent not found")
|
|
157
|
+
|
|
158
|
+
team = _teams[request.team]
|
|
159
|
+
agent = _agents[request.agent]
|
|
160
|
+
team.add_agent(agent)
|
|
161
|
+
|
|
162
|
+
return {"message": f"Agent '{request.agent}' added to team '{request.team}'"}
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
@app.post("/team/run")
|
|
166
|
+
async def run_team(request: AgentRequest):
|
|
167
|
+
if request.agent not in _teams:
|
|
168
|
+
raise HTTPException(status_code=404, detail="Team not found")
|
|
169
|
+
|
|
170
|
+
team = _teams[request.agent]
|
|
171
|
+
result = await team.run(request.message)
|
|
172
|
+
|
|
173
|
+
return {"response": result, "team": request.agent}
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
@app.delete("/teams/{team_name}")
|
|
177
|
+
async def delete_team(team_name: str):
|
|
178
|
+
if team_name not in _teams:
|
|
179
|
+
raise HTTPException(status_code=404, detail="Team not found")
|
|
180
|
+
|
|
181
|
+
del _teams[team_name]
|
|
182
|
+
return {"message": f"Team '{team_name}' deleted"}
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
@app.get("/health")
|
|
186
|
+
async def health_check():
|
|
187
|
+
return {"status": "healthy"}
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def create_app() -> FastAPI:
|
|
191
|
+
return app
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from typing import Dict, Set
|
|
2
|
+
import asyncio
|
|
3
|
+
import json
|
|
4
|
+
from fastapi import WebSocket, WebSocketDisconnect
|
|
5
|
+
|
|
6
|
+
from neuroagent.agents.agent import Agent
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ConnectionManager:
|
|
10
|
+
def __init__(self):
|
|
11
|
+
self.active_connections: Dict[str, Set[WebSocket]] = {}
|
|
12
|
+
|
|
13
|
+
async def connect(self, websocket: WebSocket, agent_name: str):
|
|
14
|
+
await websocket.accept()
|
|
15
|
+
if agent_name not in self.active_connections:
|
|
16
|
+
self.active_connections[agent_name] = set()
|
|
17
|
+
self.active_connections[agent_name].add(websocket)
|
|
18
|
+
|
|
19
|
+
def disconnect(self, websocket: WebSocket, agent_name: str):
|
|
20
|
+
if agent_name in self.active_connections:
|
|
21
|
+
self.active_connections[agent_name].discard(websocket)
|
|
22
|
+
|
|
23
|
+
async def send_message(self, message: str, agent_name: str):
|
|
24
|
+
if agent_name in self.active_connections:
|
|
25
|
+
for connection in self.active_connections[agent_name]:
|
|
26
|
+
try:
|
|
27
|
+
await connection.send_text(message)
|
|
28
|
+
except:
|
|
29
|
+
pass
|
|
30
|
+
|
|
31
|
+
async def broadcast(self, message: str):
|
|
32
|
+
for agent_name, connections in self.active_connections.items():
|
|
33
|
+
for connection in connections:
|
|
34
|
+
try:
|
|
35
|
+
await connection.send_text(message)
|
|
36
|
+
except:
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
manager = ConnectionManager()
|
|
41
|
+
|
|
42
|
+
_agents: Dict[str, Agent] = {}
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
async def handle_websocket(websocket: WebSocket, agent_name: str):
|
|
46
|
+
await manager.connect(websocket, agent_name)
|
|
47
|
+
|
|
48
|
+
if agent_name not in _agents:
|
|
49
|
+
await websocket.send_json({
|
|
50
|
+
"type": "error",
|
|
51
|
+
"message": f"Agent '{agent_name}' not found"
|
|
52
|
+
})
|
|
53
|
+
await websocket.close()
|
|
54
|
+
return
|
|
55
|
+
|
|
56
|
+
agent = _agents[agent_name]
|
|
57
|
+
|
|
58
|
+
try:
|
|
59
|
+
await websocket.send_json({
|
|
60
|
+
"type": "connected",
|
|
61
|
+
"message": f"Connected to agent {agent_name}",
|
|
62
|
+
"agent": agent_name,
|
|
63
|
+
"goal": agent.goal
|
|
64
|
+
})
|
|
65
|
+
|
|
66
|
+
while True:
|
|
67
|
+
data = await websocket.receive_text()
|
|
68
|
+
|
|
69
|
+
try:
|
|
70
|
+
message_data = json.loads(data)
|
|
71
|
+
message = message_data.get("message", "")
|
|
72
|
+
|
|
73
|
+
if not message:
|
|
74
|
+
await websocket.send_json({
|
|
75
|
+
"type": "error",
|
|
76
|
+
"message": "No message provided"
|
|
77
|
+
})
|
|
78
|
+
continue
|
|
79
|
+
|
|
80
|
+
await websocket.send_json({
|
|
81
|
+
"type": "thinking",
|
|
82
|
+
"message": "Agent is thinking..."
|
|
83
|
+
})
|
|
84
|
+
|
|
85
|
+
response = await agent.run(message)
|
|
86
|
+
|
|
87
|
+
await websocket.send_json({
|
|
88
|
+
"type": "response",
|
|
89
|
+
"message": response,
|
|
90
|
+
"agent": agent_name
|
|
91
|
+
})
|
|
92
|
+
|
|
93
|
+
except json.JSONDecodeError:
|
|
94
|
+
await websocket.send_json({
|
|
95
|
+
"type": "error",
|
|
96
|
+
"message": "Invalid JSON"
|
|
97
|
+
})
|
|
98
|
+
|
|
99
|
+
except WebSocketDisconnect:
|
|
100
|
+
manager.disconnect(websocket, agent_name)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def register_agent(agent_name: str, agent: Agent):
|
|
104
|
+
_agents[agent_name] = agent
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def get_registered_agents() -> Dict[str, Agent]:
|
|
108
|
+
return _agents
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
from typing import List, Dict, Any, Optional
|
|
2
|
+
import asyncio
|
|
3
|
+
|
|
4
|
+
from neuroagent.agents.agent import Agent
|
|
5
|
+
from neuroagent.llm.base import LLMProvider, Message
|
|
6
|
+
from neuroagent.llm.openai_provider import OpenAIProvider
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Team:
|
|
10
|
+
def __init__(
|
|
11
|
+
self,
|
|
12
|
+
name: str = "Team",
|
|
13
|
+
goal: str = "Work together to accomplish complex tasks",
|
|
14
|
+
llm_provider: Optional[LLMProvider] = None,
|
|
15
|
+
model: str = "gpt-4",
|
|
16
|
+
api_key: Optional[str] = None,
|
|
17
|
+
coordination_enabled: bool = True
|
|
18
|
+
):
|
|
19
|
+
self.name = name
|
|
20
|
+
self.goal = goal
|
|
21
|
+
self.coordination_enabled = coordination_enabled
|
|
22
|
+
self.llm_provider = llm_provider or OpenAIProvider(api_key=api_key, model=model)
|
|
23
|
+
|
|
24
|
+
self._agents: Dict[str, Agent] = {}
|
|
25
|
+
self._agent_order: List[str] = []
|
|
26
|
+
|
|
27
|
+
def add_agent(
|
|
28
|
+
self,
|
|
29
|
+
agent: Agent,
|
|
30
|
+
role: Optional[str] = None
|
|
31
|
+
) -> None:
|
|
32
|
+
self._agents[agent.name] = agent
|
|
33
|
+
self._agent_order.append(agent.name)
|
|
34
|
+
|
|
35
|
+
def get_agent(self, name: str) -> Optional[Agent]:
|
|
36
|
+
return self._agents.get(name)
|
|
37
|
+
|
|
38
|
+
def remove_agent(self, name: str) -> None:
|
|
39
|
+
if name in self._agents:
|
|
40
|
+
del self._agents[name]
|
|
41
|
+
self._agent_order.remove(name)
|
|
42
|
+
|
|
43
|
+
def list_agents(self) -> List[str]:
|
|
44
|
+
return list(self._agent_order)
|
|
45
|
+
|
|
46
|
+
async def run(
|
|
47
|
+
self,
|
|
48
|
+
task: str,
|
|
49
|
+
max_rounds: int = 5,
|
|
50
|
+
parallel: bool = False
|
|
51
|
+
) -> str:
|
|
52
|
+
if not self._agents:
|
|
53
|
+
return "Error: No agents in team"
|
|
54
|
+
|
|
55
|
+
if parallel:
|
|
56
|
+
return await self._run_parallel(task, max_rounds)
|
|
57
|
+
else:
|
|
58
|
+
return await self._run_sequential(task, max_rounds)
|
|
59
|
+
|
|
60
|
+
async def _run_sequential(self, task: str, max_rounds: int) -> str:
|
|
61
|
+
context = f"Original task: {task}\n\n"
|
|
62
|
+
|
|
63
|
+
for round_num in range(max_rounds):
|
|
64
|
+
round_results = []
|
|
65
|
+
|
|
66
|
+
for agent_name in self._agent_order:
|
|
67
|
+
agent = self._agents[agent_name]
|
|
68
|
+
agent_context = f"{context}\n\nOther agents' work:\n{round_results[-3:]}\n\nYour task: {task}"
|
|
69
|
+
|
|
70
|
+
result = await agent.run(agent_context, max_iterations=2)
|
|
71
|
+
round_results.append(f"{agent.name}: {result}")
|
|
72
|
+
|
|
73
|
+
context += f"\nRound {round_num + 1}:\n" + "\n".join(round_results)
|
|
74
|
+
|
|
75
|
+
if await self._check_completion(task, context):
|
|
76
|
+
break
|
|
77
|
+
|
|
78
|
+
return await self._summarize_results(task, context)
|
|
79
|
+
|
|
80
|
+
async def _run_parallel(self, task: str, max_rounds: int) -> str:
|
|
81
|
+
async def run_agent(agent: Agent):
|
|
82
|
+
return agent.name, await agent.run(task, max_iterations=2)
|
|
83
|
+
|
|
84
|
+
results = await asyncio.gather(*[
|
|
85
|
+
run_agent(self._agents[name])
|
|
86
|
+
for name in self._agent_order
|
|
87
|
+
])
|
|
88
|
+
|
|
89
|
+
combined_results = "\n".join([f"{name}: {result}" for name, result in results])
|
|
90
|
+
|
|
91
|
+
return await self._summarize_results(task, combined_results)
|
|
92
|
+
|
|
93
|
+
async def _check_completion(self, task: str, context: str) -> bool:
|
|
94
|
+
if not self.llm_provider:
|
|
95
|
+
return True
|
|
96
|
+
|
|
97
|
+
prompt = f"""Based on the following work, determine if the task is complete.
|
|
98
|
+
|
|
99
|
+
Task: {task}
|
|
100
|
+
|
|
101
|
+
Work done:
|
|
102
|
+
{context}
|
|
103
|
+
|
|
104
|
+
Is the task complete? Respond with just "YES" or "NO"."""
|
|
105
|
+
|
|
106
|
+
response = await self.llm_provider.complete(prompt, max_tokens=10)
|
|
107
|
+
return response.content.strip().upper() == "YES"
|
|
108
|
+
|
|
109
|
+
async def _summarize_results(self, task: str, context: str) -> str:
|
|
110
|
+
if not self.llm_provider:
|
|
111
|
+
return context
|
|
112
|
+
|
|
113
|
+
prompt = f"""Summarize the work done by the team to accomplish this task:
|
|
114
|
+
|
|
115
|
+
Task: {task}
|
|
116
|
+
|
|
117
|
+
Work completed:
|
|
118
|
+
{context}
|
|
119
|
+
|
|
120
|
+
Provide a clear summary of what was accomplished:"""
|
|
121
|
+
|
|
122
|
+
response = await self.llm_provider.complete(prompt, max_tokens=500)
|
|
123
|
+
return response.content
|
|
124
|
+
|
|
125
|
+
async def broadcast(self, message: str) -> Dict[str, str]:
|
|
126
|
+
async def send_message(agent: Agent):
|
|
127
|
+
return agent.name, await agent.chat(message)
|
|
128
|
+
|
|
129
|
+
results = await asyncio.gather(*[
|
|
130
|
+
send_message(self._agents[name])
|
|
131
|
+
for name in self._agent_order
|
|
132
|
+
])
|
|
133
|
+
|
|
134
|
+
return dict(results)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from neuroagent.tools.base import tool, Tool, FunctionTool
|
|
2
|
+
from neuroagent.tools.web_search import WebSearchTool, web_search
|
|
3
|
+
from neuroagent.tools.code_executor import CodeExecutorTool, code_executor
|
|
4
|
+
from neuroagent.tools.file_manager import FileManagerTool, file_manager
|
|
5
|
+
from neuroagent.tools.http_client import HTTPClientTool, http_client
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"tool",
|
|
9
|
+
"Tool",
|
|
10
|
+
"FunctionTool",
|
|
11
|
+
"WebSearchTool",
|
|
12
|
+
"web_search",
|
|
13
|
+
"CodeExecutorTool",
|
|
14
|
+
"code_executor",
|
|
15
|
+
"FileManagerTool",
|
|
16
|
+
"file_manager",
|
|
17
|
+
"HTTPClientTool",
|
|
18
|
+
"http_client",
|
|
19
|
+
]
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Any, Dict, Optional, Callable
|
|
3
|
+
from functools import wraps
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Tool(ABC):
|
|
7
|
+
def __init__(self, name: str, description: str, func: Optional[Callable] = None):
|
|
8
|
+
self.name = name
|
|
9
|
+
self.description = description
|
|
10
|
+
self.func = func
|
|
11
|
+
|
|
12
|
+
@abstractmethod
|
|
13
|
+
async def execute(self, *args, **kwargs) -> Any:
|
|
14
|
+
pass
|
|
15
|
+
|
|
16
|
+
def __call__(self, *args, **kwargs):
|
|
17
|
+
return self.execute(*args, **kwargs)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class FunctionTool(Tool):
|
|
21
|
+
def __init__(self, name: str, description: str, func: Callable):
|
|
22
|
+
super().__init__(name, description, func)
|
|
23
|
+
self._func = func
|
|
24
|
+
|
|
25
|
+
async def execute(self, *args, **kwargs) -> Any:
|
|
26
|
+
if asyncio.iscoroutinefunction(self._func):
|
|
27
|
+
return await self._func(*args, **kwargs)
|
|
28
|
+
return self._func(*args, **kwargs)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def tool(name: Optional[str] = None, description: str = ""):
|
|
32
|
+
def decorator(func: Callable) -> FunctionTool:
|
|
33
|
+
tool_name = name or func.__name__
|
|
34
|
+
tool_description = description or func.__doc__ or ""
|
|
35
|
+
|
|
36
|
+
@wraps(func)
|
|
37
|
+
def sync_wrapper(*args, **kwargs):
|
|
38
|
+
return func(*args, **kwargs)
|
|
39
|
+
|
|
40
|
+
return FunctionTool(tool_name, tool_description, sync_wrapper)
|
|
41
|
+
|
|
42
|
+
return decorator
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
import asyncio
|