agentinit-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentinit/__init__.py +1 -0
- agentinit/cli.py +83 -0
- agentinit/generator.py +103 -0
- agentinit/templates/autogen/agents/base_agent.py.j2 +28 -0
- agentinit/templates/autogen/main.py.j2 +7 -0
- agentinit/templates/autogen/requirements.txt.j2 +5 -0
- agentinit/templates/autogen/tools/sample_tool.py.j2 +3 -0
- agentinit/templates/common/.env.example.j2 +2 -0
- agentinit/templates/common/Dockerfile.j2 +21 -0
- agentinit/templates/common/README.md.j2 +21 -0
- agentinit/templates/common/config.yaml.j2 +6 -0
- agentinit/templates/crewai/agents/base_agent.py.j2 +27 -0
- agentinit/templates/crewai/main.py.j2 +9 -0
- agentinit/templates/crewai/requirements.txt.j2 +5 -0
- agentinit/templates/crewai/tools/sample_tool.py.j2 +12 -0
- agentinit/templates/google_adk/agents/base_agent.py.j2 +11 -0
- agentinit/templates/google_adk/main.py.j2 +28 -0
- agentinit/templates/google_adk/requirements.txt.j2 +5 -0
- agentinit/templates/google_adk/tools/sample_tool.py.j2 +3 -0
- agentinit/templates/langgraph/agents/base_agent.py.j2 +32 -0
- agentinit/templates/langgraph/main.py.j2 +9 -0
- agentinit/templates/langgraph/requirements.txt.j2 +8 -0
- agentinit/templates/langgraph/tools/sample_tool.py.j2 +7 -0
- agentinit/templates/openai_agents/agents/base_agent.py.j2 +36 -0
- agentinit/templates/openai_agents/main.py.j2 +10 -0
- agentinit/templates/openai_agents/requirements.txt.j2 +8 -0
- agentinit/templates/openai_agents/tools/sample_tool.py.j2 +7 -0
- agentinit/templates/smolagents/agents/base_agent.py.j2 +52 -0
- agentinit/templates/smolagents/main.py.j2 +9 -0
- agentinit/templates/smolagents/requirements.txt.j2 +10 -0
- agentinit/templates/smolagents/tools/sample_tool.py.j2 +7 -0
- agentinit_cli-0.1.0.dist-info/METADATA +133 -0
- agentinit_cli-0.1.0.dist-info/RECORD +35 -0
- agentinit_cli-0.1.0.dist-info/WHEEL +4 -0
- agentinit_cli-0.1.0.dist-info/entry_points.txt +3 -0
agentinit/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.0"
|
agentinit/cli.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import click
|
|
2
|
+
from rich.console import Console
|
|
3
|
+
from rich.table import Table
|
|
4
|
+
from agentinit.generator import generate_project, add_agent
|
|
5
|
+
|
|
6
|
+
console = Console()
|
|
7
|
+
|
|
8
|
+
SUPPORTED_FRAMEWORKS = ["langgraph", "crewai", "autogen", "google_adk", "openai_agents", "smolagents"]
|
|
9
|
+
SUPPORTED_LLMS = ["openai", "anthropic", "groq", "azure", "bedrock", "gemini"]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@click.group()
|
|
13
|
+
def app():
|
|
14
|
+
"""agentinit - scaffold LLM agent projects in seconds"""
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@app.command("init")
|
|
19
|
+
@click.argument("project_name")
|
|
20
|
+
@click.option("--framework", "-f", required=True, help="Agent framework to use")
|
|
21
|
+
@click.option("--llm", "-l", required=True, help="LLM provider to use")
|
|
22
|
+
def init(project_name, framework, llm):
|
|
23
|
+
"""Scaffold a new LLM agent project."""
|
|
24
|
+
if framework not in SUPPORTED_FRAMEWORKS:
|
|
25
|
+
console.print(f"[red]Framework '{framework}' is not supported.[/red]")
|
|
26
|
+
console.print(f"Supported: {', '.join(SUPPORTED_FRAMEWORKS)}")
|
|
27
|
+
raise click.Abort()
|
|
28
|
+
|
|
29
|
+
if llm not in SUPPORTED_LLMS:
|
|
30
|
+
console.print(f"[red]LLM provider '{llm}' is not supported.[/red]")
|
|
31
|
+
console.print(f"Supported: {', '.join(SUPPORTED_LLMS)}")
|
|
32
|
+
raise click.Abort()
|
|
33
|
+
|
|
34
|
+
console.print(f"\n[bold green]Scaffolding project:[/bold green] {project_name}")
|
|
35
|
+
console.print(f" Framework : [cyan]{framework}[/cyan]")
|
|
36
|
+
console.print(f" LLM : [cyan]{llm}[/cyan]\n")
|
|
37
|
+
|
|
38
|
+
success = generate_project(project_name, framework, llm)
|
|
39
|
+
|
|
40
|
+
if not success:
|
|
41
|
+
raise click.Abort()
|
|
42
|
+
|
|
43
|
+
console.print(f"\n[bold green]Done![/bold green] Your project is ready at [cyan]./{project_name}[/cyan]")
|
|
44
|
+
console.print("\nNext steps:")
|
|
45
|
+
console.print(f" [yellow]cd {project_name}[/yellow]")
|
|
46
|
+
console.print(" [yellow]cp .env.example .env[/yellow]")
|
|
47
|
+
console.print(" [yellow]pip install -r requirements.txt[/yellow]")
|
|
48
|
+
console.print(" [yellow]python main.py[/yellow]\n")
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
@app.command("add-agent")
|
|
52
|
+
@click.argument("agent_name")
|
|
53
|
+
@click.option("--framework", "-f", required=True, help="Framework of the existing project")
|
|
54
|
+
def add_agent_cmd(agent_name, framework):
|
|
55
|
+
"""Add a new agent to an existing scaffolded project."""
|
|
56
|
+
if framework not in SUPPORTED_FRAMEWORKS:
|
|
57
|
+
console.print(f"[red]Framework '{framework}' is not supported.[/red]")
|
|
58
|
+
console.print(f"Supported: {', '.join(SUPPORTED_FRAMEWORKS)}")
|
|
59
|
+
raise click.Abort()
|
|
60
|
+
|
|
61
|
+
console.print(f"\n[bold green]Adding agent:[/bold green] {agent_name}")
|
|
62
|
+
console.print(f" Framework : [cyan]{framework}[/cyan]\n")
|
|
63
|
+
|
|
64
|
+
success = add_agent(agent_name, framework)
|
|
65
|
+
|
|
66
|
+
if not success:
|
|
67
|
+
raise click.Abort()
|
|
68
|
+
|
|
69
|
+
console.print(f"\n[bold green]Done![/bold green] Agent [cyan]agents/{agent_name}.py[/cyan] created.\n")
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@app.command("list-frameworks")
|
|
73
|
+
def list_frameworks():
|
|
74
|
+
"""List all supported frameworks and LLM providers."""
|
|
75
|
+
table = Table(title="Supported Frameworks & LLM Providers")
|
|
76
|
+
table.add_column("Frameworks", style="cyan")
|
|
77
|
+
table.add_column("LLM Providers", style="green")
|
|
78
|
+
|
|
79
|
+
rows = list(zip(SUPPORTED_FRAMEWORKS, SUPPORTED_LLMS))
|
|
80
|
+
for fw, llm in rows:
|
|
81
|
+
table.add_row(fw, llm)
|
|
82
|
+
|
|
83
|
+
console.print(table)
|
agentinit/generator.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from jinja2 import Environment, FileSystemLoader
|
|
3
|
+
from rich.console import Console
|
|
4
|
+
|
|
5
|
+
console = Console()
|
|
6
|
+
|
|
7
|
+
TEMPLATES_DIR = Path(__file__).parent / "templates"
|
|
8
|
+
|
|
9
|
+
LLM_CONFIG = {
|
|
10
|
+
"openai": {"api_key_env": "OPENAI_API_KEY", "model": "gpt-4o"},
|
|
11
|
+
"anthropic": {"api_key_env": "ANTHROPIC_API_KEY", "model": "claude-sonnet-4-20250514"},
|
|
12
|
+
"groq": {"api_key_env": "GROQ_API_KEY", "model": "llama3-70b-8192"},
|
|
13
|
+
"azure": {"api_key_env": "AZURE_OPENAI_API_KEY", "model": "gpt-4o"},
|
|
14
|
+
"bedrock": {"api_key_env": "AWS_ACCESS_KEY_ID", "model": "anthropic.claude-3-5-sonnet-20241022-v2:0"},
|
|
15
|
+
"gemini": {"api_key_env": "GOOGLE_API_KEY", "model": "gemini-2.0-flash"},
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
FRAMEWORK_FILES = [
|
|
19
|
+
("main.py.j2", "main.py"),
|
|
20
|
+
("requirements.txt.j2", "requirements.txt"),
|
|
21
|
+
("agents/base_agent.py.j2", "agents/base_agent.py"),
|
|
22
|
+
("tools/sample_tool.py.j2", "tools/sample_tool.py"),
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
COMMON_FILES = [
|
|
26
|
+
("config.yaml.j2", "config/config.yaml"),
|
|
27
|
+
("README.md.j2", "README.md"),
|
|
28
|
+
("Dockerfile.j2", "Dockerfile"),
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def write_file(output_path: Path, content: str):
|
|
33
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
34
|
+
output_path.write_text(content)
|
|
35
|
+
console.print(f" [green]created[/green] {output_path}")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def generate_project(project_name: str, framework: str, llm: str) -> bool:
|
|
39
|
+
project_path = Path(project_name)
|
|
40
|
+
|
|
41
|
+
if project_path.exists():
|
|
42
|
+
console.print(f"[red]Directory '{project_name}' already exists. Aborting.[/red]")
|
|
43
|
+
return False
|
|
44
|
+
|
|
45
|
+
context = {
|
|
46
|
+
"project_name": project_name,
|
|
47
|
+
"framework": framework,
|
|
48
|
+
"llm_provider": llm,
|
|
49
|
+
**LLM_CONFIG[llm],
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
fw_env = Environment(loader=FileSystemLoader(str(TEMPLATES_DIR / framework)))
|
|
53
|
+
for template_file, output_file in FRAMEWORK_FILES:
|
|
54
|
+
template = fw_env.get_template(template_file)
|
|
55
|
+
write_file(project_path / output_file, template.render(**context))
|
|
56
|
+
|
|
57
|
+
common_env = Environment(loader=FileSystemLoader(str(TEMPLATES_DIR / "common")))
|
|
58
|
+
for template_file, output_file in COMMON_FILES:
|
|
59
|
+
template = common_env.get_template(template_file)
|
|
60
|
+
write_file(project_path / output_file, template.render(**context))
|
|
61
|
+
|
|
62
|
+
env_content = build_env_file(llm)
|
|
63
|
+
write_file(project_path / ".env.example", env_content)
|
|
64
|
+
return True
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def build_env_file(llm: str) -> str:
|
|
68
|
+
lines = []
|
|
69
|
+
if llm == "azure":
|
|
70
|
+
lines.append("AZURE_OPENAI_API_KEY=your_api_key_here")
|
|
71
|
+
lines.append("AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/")
|
|
72
|
+
elif llm == "bedrock":
|
|
73
|
+
lines.append("AWS_ACCESS_KEY_ID=your_access_key_here")
|
|
74
|
+
lines.append("AWS_SECRET_ACCESS_KEY=your_secret_key_here")
|
|
75
|
+
lines.append("AWS_REGION=us-east-1")
|
|
76
|
+
else:
|
|
77
|
+
lines.append(f"{LLM_CONFIG[llm]['api_key_env']}=your_api_key_here")
|
|
78
|
+
return "\n".join(lines) + "\n"
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def add_agent(agent_name: str, framework: str) -> bool:
|
|
82
|
+
agents_path = Path("agents")
|
|
83
|
+
|
|
84
|
+
if not agents_path.exists():
|
|
85
|
+
console.print("[red]No 'agents/' folder found. Are you inside a scaffolded project?[/red]")
|
|
86
|
+
return False
|
|
87
|
+
|
|
88
|
+
output_path = agents_path / f"{agent_name}.py"
|
|
89
|
+
|
|
90
|
+
if output_path.exists():
|
|
91
|
+
console.print(f"[red]Agent '{agent_name}.py' already exists. Aborting.[/red]")
|
|
92
|
+
return False
|
|
93
|
+
|
|
94
|
+
template_path = TEMPLATES_DIR / framework / "agents" / "base_agent.py.j2"
|
|
95
|
+
if not template_path.exists():
|
|
96
|
+
console.print(f"[red]No template found for framework '{framework}'.[/red]")
|
|
97
|
+
return False
|
|
98
|
+
|
|
99
|
+
env = Environment(loader=FileSystemLoader(str(TEMPLATES_DIR / framework / "agents")))
|
|
100
|
+
template = env.get_template("base_agent.py.j2")
|
|
101
|
+
content = template.render(project_name=agent_name, framework=framework, llm_provider="openai", **LLM_CONFIG["openai"])
|
|
102
|
+
write_file(output_path, content)
|
|
103
|
+
return True
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import autogen
|
|
3
|
+
|
|
4
|
+
config_list = [
|
|
5
|
+
{
|
|
6
|
+
"model": "{{ model }}",
|
|
7
|
+
"api_key": os.getenv("{{ api_key_env }}"),
|
|
8
|
+
{% if llm_provider == "anthropic" %}"api_type": "anthropic",{% endif %}
|
|
9
|
+
{% if llm_provider == "groq" %}"base_url": "https://api.groq.com/openai/v1",{% endif %}
|
|
10
|
+
}
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
llm_config = {"config_list": config_list}
|
|
14
|
+
|
|
15
|
+
assistant = autogen.AssistantAgent(
|
|
16
|
+
name="assistant",
|
|
17
|
+
llm_config=llm_config,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
user_proxy = autogen.UserProxyAgent(
|
|
21
|
+
name="user_proxy",
|
|
22
|
+
human_input_mode="NEVER",
|
|
23
|
+
max_consecutive_auto_reply=1,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def run_agent(message: str):
|
|
28
|
+
user_proxy.initiate_chat(assistant, message=message)
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
FROM python:3.11-slim
|
|
2
|
+
|
|
3
|
+
WORKDIR /app
|
|
4
|
+
|
|
5
|
+
COPY requirements.txt .
|
|
6
|
+
RUN pip install --no-cache-dir -r requirements.txt
|
|
7
|
+
|
|
8
|
+
COPY . .
|
|
9
|
+
|
|
10
|
+
ENV {{ api_key_env }}=""
|
|
11
|
+
{% if llm_provider == "azure" %}
|
|
12
|
+
ENV AZURE_OPENAI_ENDPOINT=""
|
|
13
|
+
ENV AZURE_OPENAI_API_KEY=""
|
|
14
|
+
{% endif %}
|
|
15
|
+
{% if llm_provider == "bedrock" %}
|
|
16
|
+
ENV AWS_ACCESS_KEY_ID=""
|
|
17
|
+
ENV AWS_SECRET_ACCESS_KEY=""
|
|
18
|
+
ENV AWS_REGION="us-east-1"
|
|
19
|
+
{% endif %}
|
|
20
|
+
|
|
21
|
+
CMD ["python", "main.py"]
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# {{ project_name }}
|
|
2
|
+
|
|
3
|
+
Scaffolded with agentinit.
|
|
4
|
+
|
|
5
|
+
## Stack
|
|
6
|
+
- Framework: {{ framework }}
|
|
7
|
+
- LLM Provider: {{ llm_provider }}
|
|
8
|
+
- Model: {{ model }}
|
|
9
|
+
|
|
10
|
+
## Getting started
|
|
11
|
+
cp .env.example .env
|
|
12
|
+
pip install -r requirements.txt
|
|
13
|
+
python main.py
|
|
14
|
+
|
|
15
|
+
## Structure
|
|
16
|
+
├── agents/base_agent.py
|
|
17
|
+
├── tools/sample_tool.py
|
|
18
|
+
├── config/config.yaml
|
|
19
|
+
├── main.py
|
|
20
|
+
├── .env.example
|
|
21
|
+
└── requirements.txt
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from crewai import Agent, Task, Crew, Process
|
|
3
|
+
from tools.sample_tool import sample_tool
|
|
4
|
+
|
|
5
|
+
base_agent = Agent(
|
|
6
|
+
role="Assistant",
|
|
7
|
+
goal="Help the user accomplish their task",
|
|
8
|
+
backstory="You are a helpful AI assistant.",
|
|
9
|
+
tools=[sample_tool],
|
|
10
|
+
verbose=True,
|
|
11
|
+
llm="{{ llm_provider }}/{{ model }}",
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
task = Task(
|
|
15
|
+
description="Greet the user and introduce yourself.",
|
|
16
|
+
expected_output="A friendly greeting message.",
|
|
17
|
+
agent=base_agent,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def build_crew() -> Crew:
|
|
22
|
+
return Crew(
|
|
23
|
+
agents=[base_agent],
|
|
24
|
+
tasks=[task],
|
|
25
|
+
process=Process.sequential,
|
|
26
|
+
verbose=True,
|
|
27
|
+
)
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from crewai.tools import BaseTool
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class SampleTool(BaseTool):
|
|
5
|
+
name: str = "Sample Tool"
|
|
6
|
+
description: str = "A sample tool stub. Replace with your real logic."
|
|
7
|
+
|
|
8
|
+
def _run(self, query: str) -> str:
|
|
9
|
+
return f"Tool received: {query}"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
sample_tool = SampleTool()
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from google.adk.agents import Agent
|
|
3
|
+
from tools.sample_tool import sample_tool
|
|
4
|
+
|
|
5
|
+
root_agent = Agent(
|
|
6
|
+
name="{{ project_name }}_agent",
|
|
7
|
+
model="{{ model }}",
|
|
8
|
+
description="A helpful AI assistant.",
|
|
9
|
+
instruction="You are a helpful assistant. Answer the user's questions clearly.",
|
|
10
|
+
tools=[sample_tool],
|
|
11
|
+
)
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from dotenv import load_dotenv
|
|
3
|
+
from agents.base_agent import root_agent
|
|
4
|
+
from google.adk.runners import Runner
|
|
5
|
+
from google.adk.sessions import InMemorySessionService
|
|
6
|
+
from google.genai import types
|
|
7
|
+
|
|
8
|
+
load_dotenv()
|
|
9
|
+
|
|
10
|
+
APP_NAME = "{{ project_name }}"
|
|
11
|
+
USER_ID = "user_01"
|
|
12
|
+
SESSION_ID = "session_01"
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
async def main():
|
|
16
|
+
session_service = InMemorySessionService()
|
|
17
|
+
await session_service.create_session(
|
|
18
|
+
app_name=APP_NAME, user_id=USER_ID, session_id=SESSION_ID
|
|
19
|
+
)
|
|
20
|
+
runner = Runner(agent=root_agent, app_name=APP_NAME, session_service=session_service)
|
|
21
|
+
content = types.Content(role="user", parts=[types.Part(text="Hello, agent!")])
|
|
22
|
+
async for event in runner.run_async(user_id=USER_ID, session_id=SESSION_ID, new_message=content):
|
|
23
|
+
if event.is_final_response():
|
|
24
|
+
print(event.response.text)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
if __name__ == "__main__":
|
|
28
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from langgraph.graph import StateGraph, END
|
|
3
|
+
from typing import TypedDict, List
|
|
4
|
+
|
|
5
|
+
{% if llm_provider == "openai" %}
|
|
6
|
+
from langchain_openai import ChatOpenAI
|
|
7
|
+
llm = ChatOpenAI(model="{{ model }}", api_key=os.getenv("{{ api_key_env }}"))
|
|
8
|
+
{% elif llm_provider == "anthropic" %}
|
|
9
|
+
from langchain_anthropic import ChatAnthropic
|
|
10
|
+
llm = ChatAnthropic(model="{{ model }}", api_key=os.getenv("{{ api_key_env }}"))
|
|
11
|
+
{% elif llm_provider == "groq" %}
|
|
12
|
+
from langchain_groq import ChatGroq
|
|
13
|
+
llm = ChatGroq(model="{{ model }}", api_key=os.getenv("{{ api_key_env }}"))
|
|
14
|
+
{% endif %}
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class AgentState(TypedDict):
|
|
18
|
+
messages: List[dict]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def agent_node(state: AgentState) -> AgentState:
|
|
22
|
+
response = llm.invoke(state["messages"])
|
|
23
|
+
state["messages"].append({"role": "assistant", "content": response.content})
|
|
24
|
+
return state
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def build_graph() -> StateGraph:
|
|
28
|
+
graph = StateGraph(AgentState)
|
|
29
|
+
graph.add_node("agent", agent_node)
|
|
30
|
+
graph.set_entry_point("agent")
|
|
31
|
+
graph.add_edge("agent", END)
|
|
32
|
+
return graph.compile()
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from agents import Agent
|
|
3
|
+
from tools.sample_tool import sample_tool
|
|
4
|
+
|
|
5
|
+
{% if llm_provider == "openai" %}
|
|
6
|
+
from openai import AsyncOpenAI
|
|
7
|
+
client = AsyncOpenAI(api_key=os.getenv("{{ api_key_env }}"))
|
|
8
|
+
{% elif llm_provider == "azure" %}
|
|
9
|
+
from openai import AsyncAzureOpenAI
|
|
10
|
+
client = AsyncAzureOpenAI(
|
|
11
|
+
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
|
|
12
|
+
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
|
|
13
|
+
api_version="2024-02-01",
|
|
14
|
+
)
|
|
15
|
+
{% elif llm_provider == "gemini" %}
|
|
16
|
+
from openai import AsyncOpenAI
|
|
17
|
+
client = AsyncOpenAI(
|
|
18
|
+
api_key=os.getenv("{{ api_key_env }}"),
|
|
19
|
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
|
|
20
|
+
)
|
|
21
|
+
{% elif llm_provider == "bedrock" %}
|
|
22
|
+
from openai import AsyncOpenAI
|
|
23
|
+
client = AsyncOpenAI(
|
|
24
|
+
api_key=os.getenv("{{ api_key_env }}"),
|
|
25
|
+
base_url="https://bedrock-runtime.{{ region }}.amazonaws.com",
|
|
26
|
+
)
|
|
27
|
+
{% endif %}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def create_agent() -> Agent:
|
|
31
|
+
return Agent(
|
|
32
|
+
name="{{ project_name }}_agent",
|
|
33
|
+
instructions="You are a helpful assistant. Answer the user's questions clearly.",
|
|
34
|
+
model="{{ model }}",
|
|
35
|
+
tools=[sample_tool],
|
|
36
|
+
)
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
openai-agents
|
|
2
|
+
{% if llm_provider == "openai" %}openai{% endif %}
|
|
3
|
+
{% if llm_provider == "azure" %}openai
|
|
4
|
+
azure-identity{% endif %}
|
|
5
|
+
{% if llm_provider == "bedrock" %}boto3
|
|
6
|
+
anthropic[bedrock]{% endif %}
|
|
7
|
+
{% if llm_provider == "gemini" %}google-generativeai{% endif %}
|
|
8
|
+
python-dotenv
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from smolagents import CodeAgent
|
|
3
|
+
from tools.sample_tool import sample_tool
|
|
4
|
+
|
|
5
|
+
{% if llm_provider == "openai" %}
|
|
6
|
+
from smolagents import OpenAIServerModel
|
|
7
|
+
model = OpenAIServerModel(
|
|
8
|
+
model_id="{{ model }}",
|
|
9
|
+
api_key=os.getenv("{{ api_key_env }}"),
|
|
10
|
+
)
|
|
11
|
+
{% elif llm_provider == "anthropic" %}
|
|
12
|
+
from smolagents import LiteLLMModel
|
|
13
|
+
model = LiteLLMModel(
|
|
14
|
+
model_id="anthropic/{{ model }}",
|
|
15
|
+
api_key=os.getenv("{{ api_key_env }}"),
|
|
16
|
+
)
|
|
17
|
+
{% elif llm_provider == "groq" %}
|
|
18
|
+
from smolagents import LiteLLMModel
|
|
19
|
+
model = LiteLLMModel(
|
|
20
|
+
model_id="groq/{{ model }}",
|
|
21
|
+
api_key=os.getenv("{{ api_key_env }}"),
|
|
22
|
+
)
|
|
23
|
+
{% elif llm_provider == "azure" %}
|
|
24
|
+
from smolagents import OpenAIServerModel
|
|
25
|
+
model = OpenAIServerModel(
|
|
26
|
+
model_id="{{ model }}",
|
|
27
|
+
api_base=os.getenv("AZURE_OPENAI_ENDPOINT"),
|
|
28
|
+
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
|
|
29
|
+
)
|
|
30
|
+
{% elif llm_provider == "bedrock" %}
|
|
31
|
+
from smolagents import LiteLLMModel
|
|
32
|
+
model = LiteLLMModel(
|
|
33
|
+
model_id="bedrock/{{ model }}",
|
|
34
|
+
aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"),
|
|
35
|
+
aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY"),
|
|
36
|
+
aws_region_name=os.getenv("AWS_REGION", "us-east-1"),
|
|
37
|
+
)
|
|
38
|
+
{% elif llm_provider == "gemini" %}
|
|
39
|
+
from smolagents import LiteLLMModel
|
|
40
|
+
model = LiteLLMModel(
|
|
41
|
+
model_id="gemini/{{ model }}",
|
|
42
|
+
api_key=os.getenv("{{ api_key_env }}"),
|
|
43
|
+
)
|
|
44
|
+
{% endif %}
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def create_agent() -> CodeAgent:
|
|
48
|
+
return CodeAgent(
|
|
49
|
+
tools=[sample_tool],
|
|
50
|
+
model=model,
|
|
51
|
+
max_steps=5,
|
|
52
|
+
)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
smolagents
|
|
2
|
+
{% if llm_provider == "openai" %}openai{% endif %}
|
|
3
|
+
{% if llm_provider == "anthropic" %}anthropic{% endif %}
|
|
4
|
+
{% if llm_provider == "groq" %}groq{% endif %}
|
|
5
|
+
{% if llm_provider == "azure" %}openai
|
|
6
|
+
azure-identity{% endif %}
|
|
7
|
+
{% if llm_provider == "bedrock" %}boto3
|
|
8
|
+
anthropic[bedrock]{% endif %}
|
|
9
|
+
{% if llm_provider == "gemini" %}google-generativeai{% endif %}
|
|
10
|
+
python-dotenv
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: agentinit-cli
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A CLI tool to scaffold production-ready LLM agent projects
|
|
5
|
+
License: MIT
|
|
6
|
+
Keywords: llm,agents,scaffolding,langgraph,crewai,autogen,google-adk,cli
|
|
7
|
+
Author: Swapnil Bhattacharya
|
|
8
|
+
Author-email: your@email.com
|
|
9
|
+
Requires-Python: >=3.10,<4.0
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Operating System :: OS Independent
|
|
12
|
+
Classifier: Programming Language :: Python :: 3
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
18
|
+
Classifier: Topic :: Software Development :: Code Generators
|
|
19
|
+
Requires-Dist: click (>=8.1.0,<9.0.0)
|
|
20
|
+
Requires-Dist: jinja2 (>=3.1.0,<4.0.0)
|
|
21
|
+
Requires-Dist: rich (>=13.0.0,<14.0.0)
|
|
22
|
+
Project-URL: Homepage, https://github.com/NorthCommits/agentinit
|
|
23
|
+
Project-URL: Repository, https://github.com/NorthCommits/agentinit
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
|
|
26
|
+
# agentinit
|
|
27
|
+
|
|
28
|
+
A CLI tool to scaffold production-ready LLM agent projects in seconds.
|
|
29
|
+
|
|
30
|
+
Stop copy-pasting boilerplate. Run one command and get a fully structured, framework-specific agent project ready to run.
|
|
31
|
+
|
|
32
|
+
## Install
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
pip install agentinit
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
## Usage
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
agentinit init my-project --framework langgraph --llm openai
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
## Supported Frameworks
|
|
45
|
+
|
|
46
|
+
| Framework | Description |
|
|
47
|
+
|---|---|
|
|
48
|
+
| `langgraph` | LangChain's graph-based agent framework |
|
|
49
|
+
| `crewai` | Multi-agent role-based framework |
|
|
50
|
+
| `autogen` | Microsoft's conversational agent framework |
|
|
51
|
+
| `google_adk` | Google's Agent Development Kit |
|
|
52
|
+
| `openai_agents` | OpenAI's official agents SDK |
|
|
53
|
+
| `smolagents` | HuggingFace's lightweight agent framework |
|
|
54
|
+
|
|
55
|
+
## Supported LLM Providers
|
|
56
|
+
|
|
57
|
+
| Provider | Env Variable |
|
|
58
|
+
|---|---|
|
|
59
|
+
| `openai` | `OPENAI_API_KEY` |
|
|
60
|
+
| `anthropic` | `ANTHROPIC_API_KEY` |
|
|
61
|
+
| `groq` | `GROQ_API_KEY` |
|
|
62
|
+
| `azure` | `AZURE_OPENAI_API_KEY` + `AZURE_OPENAI_ENDPOINT` |
|
|
63
|
+
| `bedrock` | `AWS_ACCESS_KEY_ID` + `AWS_SECRET_ACCESS_KEY` |
|
|
64
|
+
| `gemini` | `GOOGLE_API_KEY` |
|
|
65
|
+
|
|
66
|
+
## Generated Project Structure
|
|
67
|
+
|
|
68
|
+
```
|
|
69
|
+
my-project/
|
|
70
|
+
├── agents/
|
|
71
|
+
│ └── base_agent.py # framework-specific agent logic
|
|
72
|
+
├── tools/
|
|
73
|
+
│ └── sample_tool.py # sample tool stub
|
|
74
|
+
├── config/
|
|
75
|
+
│ └── config.yaml # llm and project config
|
|
76
|
+
├── Dockerfile # ready to containerize
|
|
77
|
+
├── main.py # entry point
|
|
78
|
+
├── .env.example # environment variable template
|
|
79
|
+
└── requirements.txt # dependencies for chosen framework
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
## Commands
|
|
83
|
+
|
|
84
|
+
### Scaffold a new project
|
|
85
|
+
```bash
|
|
86
|
+
agentinit init <project-name> --framework <framework> --llm <provider>
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
### Add a new agent to an existing project
|
|
90
|
+
```bash
|
|
91
|
+
cd my-project
|
|
92
|
+
agentinit add-agent researcher --framework langgraph
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
### List all supported frameworks and providers
|
|
96
|
+
```bash
|
|
97
|
+
agentinit list-frameworks
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
## Examples
|
|
101
|
+
|
|
102
|
+
```bash
|
|
103
|
+
# LangGraph with OpenAI
|
|
104
|
+
agentinit init my-agent --framework langgraph --llm openai
|
|
105
|
+
|
|
106
|
+
# CrewAI with Anthropic
|
|
107
|
+
agentinit init my-crew --framework crewai --llm anthropic
|
|
108
|
+
|
|
109
|
+
# AutoGen with Groq
|
|
110
|
+
agentinit init my-autogen --framework autogen --llm groq
|
|
111
|
+
|
|
112
|
+
# Google ADK with Gemini
|
|
113
|
+
agentinit init my-adk --framework google_adk --llm gemini
|
|
114
|
+
|
|
115
|
+
# OpenAI Agents SDK with Azure
|
|
116
|
+
agentinit init my-openai-agent --framework openai_agents --llm azure
|
|
117
|
+
|
|
118
|
+
# Smolagents with Bedrock
|
|
119
|
+
agentinit init my-smol --framework smolagents --llm bedrock
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
## Getting Started After Scaffolding
|
|
123
|
+
|
|
124
|
+
```bash
|
|
125
|
+
cd my-project
|
|
126
|
+
cp .env.example .env # add your API keys
|
|
127
|
+
pip install -r requirements.txt
|
|
128
|
+
python main.py
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
## License
|
|
132
|
+
|
|
133
|
+
MIT
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
agentinit/__init__.py,sha256=Pru0BlFBASFCFo7McHdohtKkUtgMPDwbGfyUZlE2_Vw,21
|
|
2
|
+
agentinit/cli.py,sha256=vB3wd4u1pygS1fz2NLM86YGdJucTQbWJ7VyP1n-QNiw,3113
|
|
3
|
+
agentinit/generator.py,sha256=xfm0mt_Cq7IwbCvYudbW1hwrRHu2gZf_r47W3X15XPg,3894
|
|
4
|
+
agentinit/templates/autogen/agents/base_agent.py.j2,sha256=rgKULeHHmfGN0CL2s2y2X8SoNB6fc7FGFknvmKzxlG0,660
|
|
5
|
+
agentinit/templates/autogen/main.py.j2,sha256=7xsendP5oowINh1AC189QprHZCmcHwQtHfbKcICEuOQ,144
|
|
6
|
+
agentinit/templates/autogen/requirements.txt.j2,sha256=NkmWj20iwGE877f-gqFbHl_5Zt2Qcsp8RgUK2D2uJ2w,178
|
|
7
|
+
agentinit/templates/autogen/tools/sample_tool.py.j2,sha256=D4Wn4ySmb8bxgPE3-VJervsquGUjBJv5XZ06ncBzCsU,132
|
|
8
|
+
agentinit/templates/common/.env.example.j2,sha256=N9tx0vO8gwCqXW80wV1nkv79IGPt7JWoagYVzP3D-I4,81
|
|
9
|
+
agentinit/templates/common/Dockerfile.j2,sha256=VQRt4JdABc7CfvLdrmHkwtgesllzosR1QzCJChfOex4,404
|
|
10
|
+
agentinit/templates/common/README.md.j2,sha256=SBuTBz6DwahY-F76-K6I9Zehr-6T5l0NPlLoYZO8FXI,404
|
|
11
|
+
agentinit/templates/common/config.yaml.j2,sha256=Bsnvl6YJViSwsCDdIRZ4yPxJ_YE2moEgikfBi3a9Ovg,149
|
|
12
|
+
agentinit/templates/crewai/agents/base_agent.py.j2,sha256=5IXMFMZwFQ15rD_KMe8H3C3hjxergtHLBkRVv90eoF4,632
|
|
13
|
+
agentinit/templates/crewai/main.py.j2,sha256=QANzlTNvg7PIHE1zgRfxbScoa206jqgHn4Ru6xDUirM,184
|
|
14
|
+
agentinit/templates/crewai/requirements.txt.j2,sha256=pG7zbApsxMX4_L6u4SKPZJu1ovvS1_ifpbY_tGONIKg,175
|
|
15
|
+
agentinit/templates/crewai/tools/sample_tool.py.j2,sha256=5W8qtLZrh9DzyPCoTbyylCOH1xr-8JgQf4GWqX7dGUw,278
|
|
16
|
+
agentinit/templates/google_adk/agents/base_agent.py.j2,sha256=Rh8mNYdRLq5ME2X7W88LLy6ovivB0mFhteL9766vyWc,325
|
|
17
|
+
agentinit/templates/google_adk/main.py.j2,sha256=QEsMHQfrMYxNnVQh0KVs81XJ-AjvALrte4_NAeh5OQ8,890
|
|
18
|
+
agentinit/templates/google_adk/requirements.txt.j2,sha256=Pt3VubJzjLZNQXrpU2GmHlP4rMk4T9AyMbtvdiWuvco,179
|
|
19
|
+
agentinit/templates/google_adk/tools/sample_tool.py.j2,sha256=D4Wn4ySmb8bxgPE3-VJervsquGUjBJv5XZ06ncBzCsU,132
|
|
20
|
+
agentinit/templates/langgraph/agents/base_agent.py.j2,sha256=BB6JwgTDPVzAOlL5z9-rYjVwH9h6mYoEKz5MptW8Bes,1020
|
|
21
|
+
agentinit/templates/langgraph/main.py.j2,sha256=lx6QGjrdIxINzCa3mnDVoGB_DQ4muHYn7KWdejDVEGg,247
|
|
22
|
+
agentinit/templates/langgraph/requirements.txt.j2,sha256=b8IF-CSyLZLxLuM8ImOe4ELouuXIVXKGGRiR_902124,230
|
|
23
|
+
agentinit/templates/langgraph/tools/sample_tool.py.j2,sha256=7BrDDPR_072aw3Te5Gsd3ywkrI_VgKsOv2uHR88CS84,178
|
|
24
|
+
agentinit/templates/openai_agents/agents/base_agent.py.j2,sha256=X5bCCJXcm5jXC6thGiuIwwZFIC422T1azs0wXSurqt4,1102
|
|
25
|
+
agentinit/templates/openai_agents/main.py.j2,sha256=ijcNmgeRxuWoI1fqOJWVc7RN3g--f-zBRnnfMy9w6Ek,253
|
|
26
|
+
agentinit/templates/openai_agents/requirements.txt.j2,sha256=VYge6kWDuNHpW3e8zypwnQ1GYeF7H1Ajf7XbbG3rWL8,277
|
|
27
|
+
agentinit/templates/openai_agents/tools/sample_tool.py.j2,sha256=bapd2uqjVDlCFcZQWN_YKM3SVvbwJfy84TVMnYgSqlc,182
|
|
28
|
+
agentinit/templates/smolagents/agents/base_agent.py.j2,sha256=Jhr_nUTsXBkkhtRN9iUHvAYLNtzTUit0Q_0mkO0n5Ng,1470
|
|
29
|
+
agentinit/templates/smolagents/main.py.j2,sha256=dfV_Em-xv9wVVGd_kMxoB0dJGUtYohnOkQjnV7C_jmw,201
|
|
30
|
+
agentinit/templates/smolagents/requirements.txt.j2,sha256=0yHMue7GnzSohgS1cqHXqCwTOt2QHUt160pwzSAI0Vk,378
|
|
31
|
+
agentinit/templates/smolagents/tools/sample_tool.py.j2,sha256=vvfHOgmLCtiWWl3Z0FFyMfjfwnlKzZBLKd1O-ArMUBM,168
|
|
32
|
+
agentinit_cli-0.1.0.dist-info/METADATA,sha256=ceJVoMjtcGz47yos4TXq8vC-HHo4Gzs0phSmiRL765g,3664
|
|
33
|
+
agentinit_cli-0.1.0.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
|
|
34
|
+
agentinit_cli-0.1.0.dist-info/entry_points.txt,sha256=GpMShF0iUtWUG4XXtWlABy-jRBFD5DXjtBqFpLYZKzA,47
|
|
35
|
+
agentinit_cli-0.1.0.dist-info/RECORD,,
|