neuroagent 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +273 -0
- package/dist/index.d.ts +55 -0
- package/dist/index.js +215 -0
- package/neuroagent/__init__.py +29 -0
- package/neuroagent/agents/__init__.py +3 -0
- package/neuroagent/agents/agent.py +253 -0
- package/neuroagent/cli/__init__.py +3 -0
- package/neuroagent/cli/neuroagent_cli.py +182 -0
- package/neuroagent/examples/__init__.py +1 -0
- package/neuroagent/examples/dev_agent.py +31 -0
- package/neuroagent/examples/multi_agent_team.py +45 -0
- package/neuroagent/examples/website_agent.py +27 -0
- package/neuroagent/frontend/__init__.py +1 -0
- package/neuroagent/frontend/widget.js +409 -0
- package/neuroagent/llm/__init__.py +5 -0
- package/neuroagent/llm/base.py +46 -0
- package/neuroagent/llm/local_model_provider.py +76 -0
- package/neuroagent/llm/openai_provider.py +58 -0
- package/neuroagent/memory/__init__.py +5 -0
- package/neuroagent/memory/long_memory.py +52 -0
- package/neuroagent/memory/short_memory.py +39 -0
- package/neuroagent/memory/vector_memory.py +57 -0
- package/neuroagent/planner/__init__.py +3 -0
- package/neuroagent/planner/planner.py +90 -0
- package/neuroagent/server/__init__.py +16 -0
- package/neuroagent/server/api_server.py +191 -0
- package/neuroagent/server/websocket_server.py +108 -0
- package/neuroagent/team/__init__.py +3 -0
- package/neuroagent/team/team.py +134 -0
- package/neuroagent/tools/__init__.py +19 -0
- package/neuroagent/tools/base.py +45 -0
- package/neuroagent/tools/code_executor.py +69 -0
- package/neuroagent/tools/file_manager.py +62 -0
- package/neuroagent/tools/http_client.py +57 -0
- package/neuroagent/tools/web_search.py +48 -0
- package/neuroagent/utils/__init__.py +3 -0
- package/neuroagent/utils/helpers.py +31 -0
- package/package.json +56 -0
- package/requirements.txt +15 -0
- package/setup.py +61 -0
- package/src/index.d.ts +55 -0
- package/src/index.js +215 -0
- package/web_example/index.html +249 -0
- package/web_example/neuroagent.js +301 -0
- package/web_example/script.js +114 -0
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
from typing import List, Dict, Any, Optional, Callable
|
|
2
|
+
import asyncio
|
|
3
|
+
|
|
4
|
+
from neuroagent.llm.base import LLMProvider, Message
|
|
5
|
+
from neuroagent.llm.openai_provider import OpenAIProvider
|
|
6
|
+
from neuroagent.memory.short_memory import ShortMemory
|
|
7
|
+
from neuroagent.memory.long_memory import LongMemory
|
|
8
|
+
from neuroagent.memory.vector_memory import VectorMemory
|
|
9
|
+
from neuroagent.planner.planner import Planner, Plan
|
|
10
|
+
from neuroagent.tools.base import Tool, FunctionTool
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class Agent:
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
name: str,
|
|
17
|
+
goal: str,
|
|
18
|
+
llm_provider: Optional[LLMProvider] = None,
|
|
19
|
+
model: str = "gpt-4",
|
|
20
|
+
api_key: Optional[str] = None,
|
|
21
|
+
temperature: float = 0.7,
|
|
22
|
+
max_tokens: Optional[int] = None,
|
|
23
|
+
enable_planning: bool = True,
|
|
24
|
+
enable_short_memory: bool = True,
|
|
25
|
+
enable_long_memory: bool = False,
|
|
26
|
+
enable_vector_memory: bool = False,
|
|
27
|
+
tools: Optional[List[Tool]] = None
|
|
28
|
+
):
|
|
29
|
+
self.name = name
|
|
30
|
+
self.goal = goal
|
|
31
|
+
self.temperature = temperature
|
|
32
|
+
self.max_tokens = max_tokens
|
|
33
|
+
self.enable_planning = enable_planning
|
|
34
|
+
|
|
35
|
+
self.llm_provider = llm_provider or OpenAIProvider(api_key=api_key, model=model)
|
|
36
|
+
|
|
37
|
+
self.short_memory = ShortMemory() if enable_short_memory else None
|
|
38
|
+
self.long_memory = LongMemory() if enable_long_memory else None
|
|
39
|
+
self.vector_memory = VectorMemory() if enable_vector_memory else None
|
|
40
|
+
|
|
41
|
+
self.planner = Planner(llm_provider=self.llm_provider) if enable_planning else None
|
|
42
|
+
self.current_plan: Optional[Plan] = None
|
|
43
|
+
|
|
44
|
+
self._tools: Dict[str, Tool] = {}
|
|
45
|
+
if tools:
|
|
46
|
+
for tool in tools:
|
|
47
|
+
self._tools[tool.name] = tool
|
|
48
|
+
|
|
49
|
+
def add_tool(self, tool_name: str) -> None:
|
|
50
|
+
tool_map = {
|
|
51
|
+
"web_search": "neuroagent.tools.web_search.WebSearchTool",
|
|
52
|
+
"code_executor": "neuroagent.tools.code_executor.CodeExecutorTool",
|
|
53
|
+
"file_manager": "neuroagent.tools.file_manager.FileManagerTool",
|
|
54
|
+
"http_client": "neuroagent.tools.http_client.HTTPClientTool",
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
if tool_name in tool_map:
|
|
58
|
+
module_path = tool_map[tool_name]
|
|
59
|
+
parts = module_path.rsplit(".", 1)
|
|
60
|
+
import importlib
|
|
61
|
+
module = importlib.import_module(parts[0])
|
|
62
|
+
tool_class = getattr(module, parts[1])
|
|
63
|
+
self._tools[tool_name] = tool_class()
|
|
64
|
+
else:
|
|
65
|
+
raise ValueError(f"Unknown built-in tool: {tool_name}")
|
|
66
|
+
|
|
67
|
+
def register_tool(self, tool: Tool) -> None:
|
|
68
|
+
self._tools[tool.name] = tool
|
|
69
|
+
|
|
70
|
+
def tool(self, name: Optional[str] = None, description: str = ""):
|
|
71
|
+
def decorator(func: Callable) -> Callable:
|
|
72
|
+
tool_obj = FunctionTool(
|
|
73
|
+
name=name or func.__name__,
|
|
74
|
+
description=description or func.__doc__ or "",
|
|
75
|
+
func=func
|
|
76
|
+
)
|
|
77
|
+
self._tools[tool_obj.name] = tool_obj
|
|
78
|
+
return func
|
|
79
|
+
return decorator
|
|
80
|
+
|
|
81
|
+
def _build_system_prompt(self) -> str:
|
|
82
|
+
prompt = f"""You are {self.name}, an AI agent.
|
|
83
|
+
|
|
84
|
+
Your goal is: {self.goal}
|
|
85
|
+
|
|
86
|
+
You have access to the following tools:
|
|
87
|
+
"""
|
|
88
|
+
for tool_name, tool in self._tools.items():
|
|
89
|
+
prompt += f"- {tool_name}: {tool.description}\n"
|
|
90
|
+
|
|
91
|
+
if self.current_plan:
|
|
92
|
+
prompt += f"\nCurrent plan:\n"
|
|
93
|
+
for i, task in enumerate(self.current_plan.tasks):
|
|
94
|
+
status = "✓" if task.status == "completed" else "○"
|
|
95
|
+
prompt += f"{status} {task.id}. {task.description}\n"
|
|
96
|
+
|
|
97
|
+
return prompt
|
|
98
|
+
|
|
99
|
+
def _get_memory_context(self) -> List[Message]:
|
|
100
|
+
messages = []
|
|
101
|
+
|
|
102
|
+
if self.vector_memory:
|
|
103
|
+
messages.extend(self.vector_memory.to_messages())
|
|
104
|
+
|
|
105
|
+
if self.long_memory:
|
|
106
|
+
messages.extend(self.long_memory.to_messages())
|
|
107
|
+
|
|
108
|
+
if self.short_memory:
|
|
109
|
+
for entry in self.short_memory.get_recent(10):
|
|
110
|
+
messages.append(Message(role="user", content=entry["content"]))
|
|
111
|
+
|
|
112
|
+
return messages
|
|
113
|
+
|
|
114
|
+
async def _execute_tool(self, tool_name: str, args: str) -> str:
|
|
115
|
+
if tool_name not in self._tools:
|
|
116
|
+
return f"Error: Tool '{tool_name}' not found"
|
|
117
|
+
|
|
118
|
+
tool = self._tools[tool_name]
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
result = await tool.execute(query=args)
|
|
122
|
+
return str(result)
|
|
123
|
+
except Exception as e:
|
|
124
|
+
return f"Error executing tool: {str(e)}"
|
|
125
|
+
|
|
126
|
+
async def _decide_tool_use(self, prompt: str) -> Optional[tuple[str, str]]:
|
|
127
|
+
if not self._tools:
|
|
128
|
+
return None
|
|
129
|
+
|
|
130
|
+
tool_descriptions = "\n".join([
|
|
131
|
+
f"{name}: {tool.description}"
|
|
132
|
+
for name, tool in self._tools.items()
|
|
133
|
+
])
|
|
134
|
+
|
|
135
|
+
decision_prompt = f"""Should you use a tool to answer this user request?
|
|
136
|
+
|
|
137
|
+
User request: {prompt}
|
|
138
|
+
|
|
139
|
+
Available tools:
|
|
140
|
+
{tool_descriptions}
|
|
141
|
+
|
|
142
|
+
Respond in this format only:
|
|
143
|
+
TOOL: <tool_name>
|
|
144
|
+
ARGS: <arguments>
|
|
145
|
+
|
|
146
|
+
If no tool is needed, respond:
|
|
147
|
+
TOOL: none
|
|
148
|
+
ARGS: none
|
|
149
|
+
"""
|
|
150
|
+
|
|
151
|
+
from neuroagent.llm.base import Message
|
|
152
|
+
response = await self.llm_provider.complete(
|
|
153
|
+
prompt=decision_prompt,
|
|
154
|
+
max_tokens=100
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
lines = response.content.strip().split("\n")
|
|
158
|
+
tool_name = ""
|
|
159
|
+
tool_args = ""
|
|
160
|
+
|
|
161
|
+
for line in lines:
|
|
162
|
+
if line.startswith("TOOL:"):
|
|
163
|
+
tool_name = line.replace("TOOL:", "").strip()
|
|
164
|
+
elif line.startswith("ARGS:"):
|
|
165
|
+
tool_args = line.replace("ARGS:", "").strip()
|
|
166
|
+
|
|
167
|
+
if tool_name and tool_name.lower() != "none":
|
|
168
|
+
return tool_name, tool_args
|
|
169
|
+
|
|
170
|
+
return None
|
|
171
|
+
|
|
172
|
+
async def run(self, prompt: str, max_iterations: int = 10) -> str:
|
|
173
|
+
self.short_memory.save(prompt, {"role": "user"})
|
|
174
|
+
|
|
175
|
+
if self.enable_planning and self.planner:
|
|
176
|
+
self.current_plan = await self.planner.create_plan(prompt)
|
|
177
|
+
|
|
178
|
+
iteration = 0
|
|
179
|
+
final_response = ""
|
|
180
|
+
|
|
181
|
+
while iteration < max_iterations:
|
|
182
|
+
iteration += 1
|
|
183
|
+
|
|
184
|
+
if self.current_plan and not self.planner.is_complete(self.current_plan):
|
|
185
|
+
task = self.planner.get_next_task(self.current_plan)
|
|
186
|
+
if task:
|
|
187
|
+
task_prompt = f"{prompt}\n\nCurrent task: {task.description}"
|
|
188
|
+
response = await self._process_input(task_prompt)
|
|
189
|
+
self.planner.complete_task(self.current_plan, task.id, response)
|
|
190
|
+
else:
|
|
191
|
+
response = await self._process_input(prompt)
|
|
192
|
+
final_response = response
|
|
193
|
+
break
|
|
194
|
+
|
|
195
|
+
self.short_memory.save(final_response, {"role": "assistant"})
|
|
196
|
+
|
|
197
|
+
if self.long_memory:
|
|
198
|
+
self.long_memory.save(f"User: {prompt}\nAssistant: {final_response}")
|
|
199
|
+
|
|
200
|
+
if self.vector_memory:
|
|
201
|
+
self.vector_memory.save(f"User: {prompt}\nAssistant: {final_response}")
|
|
202
|
+
|
|
203
|
+
return final_response
|
|
204
|
+
|
|
205
|
+
async def _process_input(self, prompt: str) -> str:
|
|
206
|
+
tool_use = await self._decide_tool_use(prompt)
|
|
207
|
+
|
|
208
|
+
if tool_use:
|
|
209
|
+
tool_name, tool_args = tool_use
|
|
210
|
+
tool_result = await self._execute_tool(tool_name, tool_args)
|
|
211
|
+
|
|
212
|
+
context_prompt = f"""User request: {prompt}
|
|
213
|
+
|
|
214
|
+
Tool '{tool_name}' result: {tool_result}
|
|
215
|
+
|
|
216
|
+
Based on the tool result, provide a helpful response to the user.
|
|
217
|
+
"""
|
|
218
|
+
|
|
219
|
+
memory_context = self._get_memory_context()
|
|
220
|
+
messages = [Message(role="system", content=self._build_system_prompt())]
|
|
221
|
+
messages.extend(memory_context)
|
|
222
|
+
messages.append(Message(role="user", content=context_prompt))
|
|
223
|
+
|
|
224
|
+
response = await self.llm_provider.chat(messages, temperature=self.temperature, max_tokens=self.max_tokens)
|
|
225
|
+
return response.content
|
|
226
|
+
|
|
227
|
+
messages = [Message(role="system", content=self._build_system_prompt())]
|
|
228
|
+
messages.extend(self._get_memory_context())
|
|
229
|
+
messages.append(Message(role="user", content=prompt))
|
|
230
|
+
|
|
231
|
+
response = await self.llm_provider.chat(messages, temperature=self.temperature, max_tokens=self.max_tokens)
|
|
232
|
+
return response.content
|
|
233
|
+
|
|
234
|
+
def memory(self, type: str = "short") -> Any:
|
|
235
|
+
if type == "short":
|
|
236
|
+
return self.short_memory
|
|
237
|
+
elif type == "long":
|
|
238
|
+
return self.long_memory
|
|
239
|
+
elif type == "vector":
|
|
240
|
+
return self.vector_memory
|
|
241
|
+
else:
|
|
242
|
+
raise ValueError(f"Unknown memory type: {type}")
|
|
243
|
+
|
|
244
|
+
def save_memory(self, content: str, memory_type: str = "short") -> None:
|
|
245
|
+
if memory_type == "short" and self.short_memory:
|
|
246
|
+
self.short_memory.save(content)
|
|
247
|
+
elif memory_type == "long" and self.long_memory:
|
|
248
|
+
self.long_memory.save(content)
|
|
249
|
+
elif memory_type == "vector" and self.vector_memory:
|
|
250
|
+
self.vector_memory.save(content)
|
|
251
|
+
|
|
252
|
+
async def chat(self, message: str) -> str:
|
|
253
|
+
return await self.run(message, max_iterations=1)
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
import click
|
|
2
|
+
import os
|
|
3
|
+
import sys
|
|
4
|
+
import asyncio
|
|
5
|
+
import json
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@click.group()
|
|
10
|
+
@click.version_option(version="0.1.0")
|
|
11
|
+
def cli():
|
|
12
|
+
"""NeuroAgent - AI Agent Framework"""
|
|
13
|
+
pass
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@cli.command()
|
|
17
|
+
@click.argument("project_name")
|
|
18
|
+
def init(project_name):
|
|
19
|
+
"""Initialize a new NeuroAgent project"""
|
|
20
|
+
project_path = Path(project_name)
|
|
21
|
+
|
|
22
|
+
if project_path.exists() and any(project_path.iterdir()):
|
|
23
|
+
click.echo(f"Error: Directory '{project_name}' is not empty", err=True)
|
|
24
|
+
sys.exit(1)
|
|
25
|
+
|
|
26
|
+
project_path.mkdir(parents=True, exist_ok=True)
|
|
27
|
+
|
|
28
|
+
files = {
|
|
29
|
+
"main.py": '''"""NeuroAgent Project"""
|
|
30
|
+
from neuroagent import Agent
|
|
31
|
+
|
|
32
|
+
agent = Agent(
|
|
33
|
+
name="MyAgent",
|
|
34
|
+
goal="Your agent goal here"
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
if __name__ == "__main__":
|
|
38
|
+
result = asyncio.run(agent.run("Hello, world!"))
|
|
39
|
+
print(result)
|
|
40
|
+
''',
|
|
41
|
+
"agents.py": '''"""Define your agents here"""
|
|
42
|
+
from neuroagent import Agent
|
|
43
|
+
|
|
44
|
+
def create_support_agent():
|
|
45
|
+
return Agent(
|
|
46
|
+
name="SupportAgent",
|
|
47
|
+
goal="Help users with their questions"
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
def create_coder_agent():
|
|
51
|
+
return Agent(
|
|
52
|
+
name="CoderAgent",
|
|
53
|
+
goal="Write code and help with programming"
|
|
54
|
+
)
|
|
55
|
+
''',
|
|
56
|
+
"tools.py": '''"""Custom tools for your agents"""
|
|
57
|
+
from neuroagent.tools import tool
|
|
58
|
+
|
|
59
|
+
@tool(name="my_custom_tool", description="Description of what this tool does")
|
|
60
|
+
def my_custom_tool(param: str):
|
|
61
|
+
"""Your custom tool implementation"""
|
|
62
|
+
return f"Result: {param}"
|
|
63
|
+
''',
|
|
64
|
+
".env": '''# Environment variables
|
|
65
|
+
OPENAI_API_KEY=your-api-key-here
|
|
66
|
+
''',
|
|
67
|
+
"requirements.txt": """neuroagent>=0.1.0
|
|
68
|
+
python-dotenv>=1.0.0
|
|
69
|
+
""",
|
|
70
|
+
"README.md": f"""# {project_name}
|
|
71
|
+
|
|
72
|
+
NeuroAgent project
|
|
73
|
+
|
|
74
|
+
## Setup
|
|
75
|
+
|
|
76
|
+
1. Install dependencies:
|
|
77
|
+
pip install -r requirements.txt
|
|
78
|
+
|
|
79
|
+
2. Configure environment variables in .env
|
|
80
|
+
|
|
81
|
+
3. Run:
|
|
82
|
+
python main.py
|
|
83
|
+
"""
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
for filename, content in files.items():
|
|
87
|
+
file_path = project_path / filename
|
|
88
|
+
with open(file_path, "w") as f:
|
|
89
|
+
f.write(content)
|
|
90
|
+
|
|
91
|
+
click.echo(f"Project '{project_name}' created successfully!")
|
|
92
|
+
click.echo(f"\nNext steps:")
|
|
93
|
+
click.echo(f" cd {project_name}")
|
|
94
|
+
click.echo(f" pip install -r requirements.txt")
|
|
95
|
+
click.echo(f" python main.py")
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
@cli.command()
|
|
99
|
+
@click.argument("agent_name")
|
|
100
|
+
@click.option("--goal", "-g", default="", help="Agent goal")
|
|
101
|
+
@click.option("--model", "-m", default="gpt-4", help="Model to use")
|
|
102
|
+
def create_agent(agent_name, goal, model):
|
|
103
|
+
"""Create a new agent configuration"""
|
|
104
|
+
if not goal:
|
|
105
|
+
goal = click.prompt("Enter agent goal: ")
|
|
106
|
+
|
|
107
|
+
agent_config = {
|
|
108
|
+
"name": agent_name,
|
|
109
|
+
"goal": goal,
|
|
110
|
+
"model": model
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
config_path = Path("agents") / f"{agent_name}.json"
|
|
114
|
+
config_path.parent.mkdir(exist_ok=True)
|
|
115
|
+
|
|
116
|
+
with open(config_path, "w") as f:
|
|
117
|
+
json.dump(agent_config, f, indent=2)
|
|
118
|
+
|
|
119
|
+
click.echo(f"Agent '{agent_name}' created!")
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
@cli.command()
|
|
123
|
+
@click.argument("agent_name")
|
|
124
|
+
@click.option("--message", "-m", default="Hello", help="Message to send")
|
|
125
|
+
def run(agent_name, message):
|
|
126
|
+
"""Run an agent"""
|
|
127
|
+
click.echo(f"Running agent '{agent_name}' with message: {message}")
|
|
128
|
+
click.echo("(This requires the agent to be registered in the server)")
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
@cli.command()
|
|
132
|
+
@click.option("--host", "-h", default="0.0.0.0", help="Host to bind to")
|
|
133
|
+
@click.option("--port", "-p", default=8000, help="Port to bind to")
|
|
134
|
+
@click.option("--reload", "-r", is_flag=True, help="Enable auto-reload")
|
|
135
|
+
def start_server(host, port, reload):
|
|
136
|
+
"""Start the NeuroAgent API server"""
|
|
137
|
+
import uvicorn
|
|
138
|
+
from neuroagent.server.api_server import app
|
|
139
|
+
|
|
140
|
+
click.echo(f"Starting NeuroAgent server on {host}:{port}")
|
|
141
|
+
|
|
142
|
+
uvicorn.run(
|
|
143
|
+
"neuroagent.server.api_server:app",
|
|
144
|
+
host=host,
|
|
145
|
+
port=port,
|
|
146
|
+
reload=reload
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
@cli.command()
|
|
151
|
+
@click.option("--host", "-h", default="0.0.0.0", help="Host to bind to")
|
|
152
|
+
@click.option("--port", "-p", default=8001, help="Port to bind to")
|
|
153
|
+
def start_websocket(host, port):
|
|
154
|
+
"""Start the NeuroAgent WebSocket server"""
|
|
155
|
+
import uvicorn
|
|
156
|
+
from neuroagent.server import websocket_server
|
|
157
|
+
|
|
158
|
+
click.echo(f"Starting WebSocket server on {host}:{port}")
|
|
159
|
+
|
|
160
|
+
async def ws_app():
|
|
161
|
+
while True:
|
|
162
|
+
await asyncio.sleep(1)
|
|
163
|
+
|
|
164
|
+
uvicorn.run(ws_app, host=host, port=port)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
@cli.command()
|
|
168
|
+
def list_agents():
|
|
169
|
+
"""List all available agents"""
|
|
170
|
+
click.echo("Available built-in tools:")
|
|
171
|
+
click.echo(" - web_search")
|
|
172
|
+
click.echo(" - code_executor")
|
|
173
|
+
click.echo(" - file_manager")
|
|
174
|
+
click.echo(" - http_client")
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def main():
|
|
178
|
+
cli()
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
if __name__ == "__main__":
|
|
182
|
+
main()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Examples module
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""Developer Agent Example"""
|
|
2
|
+
import asyncio
|
|
3
|
+
from neuroagent import Agent
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
async def main():
|
|
7
|
+
coder = Agent(
|
|
8
|
+
name="CoderAI",
|
|
9
|
+
goal="Create applications, write code, and help with programming tasks",
|
|
10
|
+
model="gpt-4"
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
coder.add_tool("code_executor")
|
|
14
|
+
coder.add_tool("file_manager")
|
|
15
|
+
coder.add_tool("web_search")
|
|
16
|
+
|
|
17
|
+
print("CoderAI - Ready to help with coding tasks!\n")
|
|
18
|
+
|
|
19
|
+
tasks = [
|
|
20
|
+
"Write a Python function to calculate fibonacci numbers",
|
|
21
|
+
"What is the best way to learn React?",
|
|
22
|
+
]
|
|
23
|
+
|
|
24
|
+
for task in tasks:
|
|
25
|
+
print(f"\nTask: {task}")
|
|
26
|
+
result = await coder.run(task)
|
|
27
|
+
print(f"Result:\n{result}\n")
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
if __name__ == "__main__":
|
|
31
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Multi-Agent Team Example"""
|
|
2
|
+
import asyncio
|
|
3
|
+
from neuroagent import Agent, Team
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
async def main():
|
|
7
|
+
research_agent = Agent(
|
|
8
|
+
name="ResearchAgent",
|
|
9
|
+
goal="Research topics and gather information"
|
|
10
|
+
)
|
|
11
|
+
research_agent.add_tool("web_search")
|
|
12
|
+
|
|
13
|
+
dev_agent = Agent(
|
|
14
|
+
name="DevAgent",
|
|
15
|
+
goal="Write code and create applications"
|
|
16
|
+
)
|
|
17
|
+
dev_agent.add_tool("code_executor")
|
|
18
|
+
dev_agent.add_tool("file_manager")
|
|
19
|
+
|
|
20
|
+
deploy_agent = Agent(
|
|
21
|
+
name="DeployAgent",
|
|
22
|
+
goal="Deploy applications and manage infrastructure"
|
|
23
|
+
)
|
|
24
|
+
deploy_agent.add_tool("http_client")
|
|
25
|
+
|
|
26
|
+
team = Team(
|
|
27
|
+
name="SaaSTeam",
|
|
28
|
+
goal="Create and deploy complete SaaS applications"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
team.add_agent(research_agent, role="research")
|
|
32
|
+
team.add_agent(dev_agent, role="development")
|
|
33
|
+
team.add_agent(deploy_agent, role="deployment")
|
|
34
|
+
|
|
35
|
+
print("SaaS Team assembled!\n")
|
|
36
|
+
|
|
37
|
+
task = "Create a simple task management app"
|
|
38
|
+
print(f"Task: {task}\n")
|
|
39
|
+
|
|
40
|
+
result = await team.run(task, max_rounds=3)
|
|
41
|
+
print(f"Final Result:\n{result}")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
if __name__ == "__main__":
|
|
45
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Website Assistant Example"""
|
|
2
|
+
import asyncio
|
|
3
|
+
from neuroagent import Agent
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
async def main():
|
|
7
|
+
assistant = Agent(
|
|
8
|
+
name="WebsiteAssistant",
|
|
9
|
+
goal="Help users of the website with their questions and provide information"
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
assistant.add_tool("web_search")
|
|
13
|
+
|
|
14
|
+
questions = [
|
|
15
|
+
"What is NeuroAgent?",
|
|
16
|
+
"How do I install it?",
|
|
17
|
+
"Can you help me create an agent?"
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
for question in questions:
|
|
21
|
+
print(f"\nUser: {question}")
|
|
22
|
+
response = await assistant.chat(question)
|
|
23
|
+
print(f"Assistant: {response}")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
if __name__ == "__main__":
|
|
27
|
+
asyncio.run(main())
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Frontend module
|