app-generator-cli 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- app_generator/__init__.py +10 -0
- app_generator/commands/__init__.py +1 -0
- app_generator/commands/create.py +74 -0
- app_generator/generator.py +265 -0
- app_generator/main.py +32 -0
- app_generator/templates/ai/.env.example +29 -0
- app_generator/templates/ai/.gitignore +47 -0
- app_generator/templates/ai/Dockerfile +22 -0
- app_generator/templates/ai/README.md +97 -0
- app_generator/templates/ai/app/__init__.py +1 -0
- app_generator/templates/ai/app/agents/__init__.py +1 -0
- app_generator/templates/ai/app/agents/assistant.py +100 -0
- app_generator/templates/ai/app/chains/__init__.py +1 -0
- app_generator/templates/ai/app/chains/rag.py +50 -0
- app_generator/templates/ai/app/config.py +47 -0
- app_generator/templates/ai/app/tools/__init__.py +1 -0
- app_generator/templates/ai/app/tools/registry.py +19 -0
- app_generator/templates/ai/app/tools/search.py +34 -0
- app_generator/templates/ai/docker-compose.yml +39 -0
- app_generator/templates/ai/main.py +40 -0
- app_generator/templates/ai/pyproject.toml +28 -0
- app_generator/templates/ai/tests/__init__.py +1 -0
- app_generator/templates/ai/tests/conftest.py +21 -0
- app_generator/templates/ai/tests/test_agent.py +53 -0
- app_generator/templates/fastapi/.env.example +17 -0
- app_generator/templates/fastapi/.gitignore +42 -0
- app_generator/templates/fastapi/Dockerfile +27 -0
- app_generator/templates/fastapi/README.md +68 -0
- app_generator/templates/fastapi/app/__init__.py +1 -0
- app_generator/templates/fastapi/app/api/__init__.py +1 -0
- app_generator/templates/fastapi/app/api/v1/__init__.py +1 -0
- app_generator/templates/fastapi/app/api/v1/health.py +25 -0
- app_generator/templates/fastapi/app/config.py +45 -0
- app_generator/templates/fastapi/app/db/__init__.py +1 -0
- app_generator/templates/fastapi/app/db/session.py +35 -0
- app_generator/templates/fastapi/app/dependencies.py +12 -0
- app_generator/templates/fastapi/app/main.py +58 -0
- app_generator/templates/fastapi/app/models/__init__.py +4 -0
- app_generator/templates/fastapi/app/models/base.py +23 -0
- app_generator/templates/fastapi/docker-compose.yml +39 -0
- app_generator/templates/fastapi/pyproject.toml +29 -0
- app_generator/templates/fastapi/tests/__init__.py +1 -0
- app_generator/templates/fastapi/tests/conftest.py +46 -0
- app_generator/templates/fastapi/tests/test_health.py +13 -0
- app_generator/templates/fastapi-with-frontend/.env.example +17 -0
- app_generator/templates/fastapi-with-frontend/.gitignore +42 -0
- app_generator/templates/fastapi-with-frontend/Dockerfile +27 -0
- app_generator/templates/fastapi-with-frontend/README.md +77 -0
- app_generator/templates/fastapi-with-frontend/app/__init__.py +1 -0
- app_generator/templates/fastapi-with-frontend/app/api/__init__.py +1 -0
- app_generator/templates/fastapi-with-frontend/app/api/v1/__init__.py +1 -0
- app_generator/templates/fastapi-with-frontend/app/api/v1/health.py +25 -0
- app_generator/templates/fastapi-with-frontend/app/config.py +45 -0
- app_generator/templates/fastapi-with-frontend/app/db/__init__.py +1 -0
- app_generator/templates/fastapi-with-frontend/app/db/session.py +35 -0
- app_generator/templates/fastapi-with-frontend/app/dependencies.py +12 -0
- app_generator/templates/fastapi-with-frontend/app/main.py +70 -0
- app_generator/templates/fastapi-with-frontend/app/models/__init__.py +4 -0
- app_generator/templates/fastapi-with-frontend/app/models/base.py +23 -0
- app_generator/templates/fastapi-with-frontend/app/templates/base.html +116 -0
- app_generator/templates/fastapi-with-frontend/app/templates/index.html +15 -0
- app_generator/templates/fastapi-with-frontend/app/templates/partials/footer.html +7 -0
- app_generator/templates/fastapi-with-frontend/app/templates/partials/header.html +7 -0
- app_generator/templates/fastapi-with-frontend/docker-compose.yml +39 -0
- app_generator/templates/fastapi-with-frontend/pyproject.toml +28 -0
- app_generator/templates/fastapi-with-frontend/tests/__init__.py +1 -0
- app_generator/templates/fastapi-with-frontend/tests/conftest.py +46 -0
- app_generator/templates/fastapi-with-frontend/tests/test_frontend.py +12 -0
- app_generator/templates/fastapi-with-frontend/tests/test_health.py +13 -0
- app_generator_cli-1.0.1.dist-info/METADATA +219 -0
- app_generator_cli-1.0.1.dist-info/RECORD +73 -0
- app_generator_cli-1.0.1.dist-info/WHEEL +4 -0
- app_generator_cli-1.0.1.dist-info/entry_points.txt +4 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""CLI command modules."""
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
"""
|
|
2
|
+
`appgenerator create` subcommand group.
|
|
3
|
+
Delegates to template-specific generators.
|
|
4
|
+
"""
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
import typer
|
|
11
|
+
from rich.console import Console
|
|
12
|
+
|
|
13
|
+
from app_generator.generator import ProjectGenerator
|
|
14
|
+
|
|
15
|
+
create_app = typer.Typer(no_args_is_help=True, rich_markup_mode="rich")
|
|
16
|
+
console = Console()
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _run_generator(
|
|
20
|
+
template: str,
|
|
21
|
+
project_name: str,
|
|
22
|
+
output_dir: Optional[Path],
|
|
23
|
+
docker: bool,
|
|
24
|
+
postgres: bool,
|
|
25
|
+
redis: bool,
|
|
26
|
+
) -> None:
|
|
27
|
+
target = (output_dir or Path.cwd()) / project_name
|
|
28
|
+
if target.exists():
|
|
29
|
+
console.print(f"[bold red]✗[/] Directory [bold]{target}[/] already exists.")
|
|
30
|
+
raise typer.Exit(code=1)
|
|
31
|
+
|
|
32
|
+
generator = ProjectGenerator(
|
|
33
|
+
template=template,
|
|
34
|
+
project_name=project_name,
|
|
35
|
+
target_dir=target,
|
|
36
|
+
options={"docker": docker, "postgres": postgres, "redis": redis},
|
|
37
|
+
)
|
|
38
|
+
generator.run()
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@create_app.command("fastapi", help="Scaffold a [bold]FastAPI[/] backend project.")
|
|
42
|
+
def create_fastapi(
|
|
43
|
+
project_name: str = typer.Argument(..., help="Name of the new project."),
|
|
44
|
+
output_dir: Optional[Path] = typer.Option(None, "--output", "-o", help="Parent directory."),
|
|
45
|
+
docker: bool = typer.Option(False, "--docker", help="Add Dockerfile & docker-compose.yml."),
|
|
46
|
+
postgres: bool = typer.Option(False, "--postgres", help="Add PostgreSQL support."),
|
|
47
|
+
redis: bool = typer.Option(False, "--redis", help="Add Redis support."),
|
|
48
|
+
) -> None:
|
|
49
|
+
_run_generator("fastapi", project_name, output_dir, docker, postgres, redis)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@create_app.command(
|
|
53
|
+
"fastapi-with-frontend",
|
|
54
|
+
help="Scaffold a [bold]FastAPI + Jinja frontend[/] project.",
|
|
55
|
+
)
|
|
56
|
+
def create_fastapi_with_frontend(
|
|
57
|
+
project_name: str = typer.Argument(..., help="Name of the new project."),
|
|
58
|
+
output_dir: Optional[Path] = typer.Option(None, "--output", "-o", help="Parent directory."),
|
|
59
|
+
docker: bool = typer.Option(False, "--docker", help="Add Dockerfile & docker-compose.yml."),
|
|
60
|
+
postgres: bool = typer.Option(False, "--postgres", help="Add PostgreSQL support."),
|
|
61
|
+
redis: bool = typer.Option(False, "--redis", help="Add Redis support."),
|
|
62
|
+
) -> None:
|
|
63
|
+
_run_generator("fastapi-with-frontend", project_name, output_dir, docker, postgres, redis)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@create_app.command("ai", help="Scaffold a [bold]LangChain / LangGraph[/] AI project.")
|
|
67
|
+
def create_ai(
|
|
68
|
+
project_name: str = typer.Argument(..., help="Name of the new project."),
|
|
69
|
+
output_dir: Optional[Path] = typer.Option(None, "--output", "-o", help="Parent directory."),
|
|
70
|
+
docker: bool = typer.Option(False, "--docker", help="Add Dockerfile & docker-compose.yml."),
|
|
71
|
+
postgres: bool = typer.Option(False, "--postgres", help="Add PostgreSQL vector DB support."),
|
|
72
|
+
redis: bool = typer.Option(False, "--redis", help="Add Redis cache support."),
|
|
73
|
+
) -> None:
|
|
74
|
+
_run_generator("ai", project_name, output_dir, docker, postgres, redis)
|
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Core project generation engine.
|
|
3
|
+
Handles file rendering, uv init, and dependency installation.
|
|
4
|
+
"""
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import shutil
|
|
8
|
+
import subprocess
|
|
9
|
+
import sys
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from jinja2 import Environment, FileSystemLoader
|
|
14
|
+
from rich.console import Console
|
|
15
|
+
from rich.panel import Panel
|
|
16
|
+
from rich.progress import Progress, SpinnerColumn, TextColumn
|
|
17
|
+
from rich.table import Table
|
|
18
|
+
|
|
19
|
+
console = Console()
|
|
20
|
+
|
|
21
|
+
# Package root — templates live here
|
|
22
|
+
TEMPLATES_DIR = Path(__file__).parent / "templates"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class ProjectGenerator:
|
|
26
|
+
"""Renders a project template and initialises it with uv."""
|
|
27
|
+
|
|
28
|
+
FASTAPI_DEPS = [
|
|
29
|
+
"fastapi",
|
|
30
|
+
"uvicorn[standard]",
|
|
31
|
+
"sqlmodel",
|
|
32
|
+
"aiosqlite",
|
|
33
|
+
"pydantic",
|
|
34
|
+
"pydantic-settings",
|
|
35
|
+
"python-dotenv",
|
|
36
|
+
"alembic",
|
|
37
|
+
"httpx",
|
|
38
|
+
]
|
|
39
|
+
FASTAPI_DEV_DEPS = ["pytest", "pytest-asyncio", "httpx", "ruff", "mypy"]
|
|
40
|
+
|
|
41
|
+
FASTAPI_FRONTEND_DEPS = FASTAPI_DEPS + ["jinja2"]
|
|
42
|
+
FASTAPI_FRONTEND_DEV_DEPS = FASTAPI_DEV_DEPS
|
|
43
|
+
|
|
44
|
+
AI_DEPS = [
|
|
45
|
+
"langchain",
|
|
46
|
+
"langgraph",
|
|
47
|
+
"langchain-community",
|
|
48
|
+
"langchain-openai",
|
|
49
|
+
"langchain-ollama",
|
|
50
|
+
"langchain-chroma",
|
|
51
|
+
"chromadb",
|
|
52
|
+
"openai",
|
|
53
|
+
"tiktoken",
|
|
54
|
+
"python-dotenv",
|
|
55
|
+
"pydantic",
|
|
56
|
+
"pydantic-settings",
|
|
57
|
+
"httpx",
|
|
58
|
+
]
|
|
59
|
+
AI_DEV_DEPS = ["pytest", "pytest-asyncio", "ruff", "mypy"]
|
|
60
|
+
|
|
61
|
+
TEMPLATE_DEPS: dict[str, list[str]] = {
|
|
62
|
+
"fastapi": FASTAPI_DEPS,
|
|
63
|
+
"fastapi-with-frontend": FASTAPI_FRONTEND_DEPS,
|
|
64
|
+
"ai": AI_DEPS,
|
|
65
|
+
}
|
|
66
|
+
TEMPLATE_DEV_DEPS: dict[str, list[str]] = {
|
|
67
|
+
"fastapi": FASTAPI_DEV_DEPS,
|
|
68
|
+
"fastapi-with-frontend": FASTAPI_FRONTEND_DEV_DEPS,
|
|
69
|
+
"ai": AI_DEV_DEPS,
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
OPTIONAL_DEPS: dict[str, list[str]] = {
|
|
73
|
+
"postgres": ["asyncpg", "psycopg2-binary"],
|
|
74
|
+
"redis": ["redis", "hiredis"],
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
def __init__(
|
|
78
|
+
self,
|
|
79
|
+
template: str,
|
|
80
|
+
project_name: str,
|
|
81
|
+
target_dir: Path,
|
|
82
|
+
options: dict[str, Any],
|
|
83
|
+
) -> None:
|
|
84
|
+
self.template = template # "fastapi" | "ai"
|
|
85
|
+
self.project_name = project_name
|
|
86
|
+
self.target_dir = target_dir
|
|
87
|
+
self.options = options
|
|
88
|
+
self.template_dir = TEMPLATES_DIR / template
|
|
89
|
+
self.env = Environment(
|
|
90
|
+
loader=FileSystemLoader(str(self.template_dir)),
|
|
91
|
+
keep_trailing_newline=True,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
# ------------------------------------------------------------------ #
|
|
95
|
+
# Public API
|
|
96
|
+
# ------------------------------------------------------------------ #
|
|
97
|
+
|
|
98
|
+
def run(self) -> None:
|
|
99
|
+
console.print()
|
|
100
|
+
console.print(
|
|
101
|
+
Panel.fit(
|
|
102
|
+
f"[bold cyan]⚒ AppGenerator[/] · Creating [bold]{self.project_name}[/] "
|
|
103
|
+
f"([italic]{self.template}[/] template)",
|
|
104
|
+
border_style="cyan",
|
|
105
|
+
)
|
|
106
|
+
)
|
|
107
|
+
console.print()
|
|
108
|
+
|
|
109
|
+
with Progress(
|
|
110
|
+
SpinnerColumn(),
|
|
111
|
+
TextColumn("[progress.description]{task.description}"),
|
|
112
|
+
console=console,
|
|
113
|
+
transient=True,
|
|
114
|
+
) as progress:
|
|
115
|
+
t = progress.add_task("Rendering template files …", total=None)
|
|
116
|
+
self._render_template()
|
|
117
|
+
progress.update(t, description="[green]✓[/] Template files written")
|
|
118
|
+
|
|
119
|
+
progress.update(t, description="Initialising uv project …", completed=None)
|
|
120
|
+
self._uv_init()
|
|
121
|
+
progress.update(t, description="[green]✓[/] uv project initialised")
|
|
122
|
+
|
|
123
|
+
progress.update(t, description="Installing dependencies …", completed=None)
|
|
124
|
+
self._install_deps()
|
|
125
|
+
progress.update(t, description="[green]✓[/] Dependencies installed")
|
|
126
|
+
|
|
127
|
+
self._print_success()
|
|
128
|
+
|
|
129
|
+
# ------------------------------------------------------------------ #
|
|
130
|
+
# Steps
|
|
131
|
+
# ------------------------------------------------------------------ #
|
|
132
|
+
|
|
133
|
+
def _render_template(self) -> None:
|
|
134
|
+
"""Walk the template directory, render Jinja files, copy static files."""
|
|
135
|
+
ctx = self._build_context()
|
|
136
|
+
|
|
137
|
+
for src in self.template_dir.rglob("*"):
|
|
138
|
+
if src.is_dir():
|
|
139
|
+
continue
|
|
140
|
+
|
|
141
|
+
rel = src.relative_to(self.template_dir)
|
|
142
|
+
dest = self.target_dir / rel
|
|
143
|
+
|
|
144
|
+
# Skip optional files based on flags
|
|
145
|
+
if self._should_skip(rel):
|
|
146
|
+
continue
|
|
147
|
+
|
|
148
|
+
dest.parent.mkdir(parents=True, exist_ok=True)
|
|
149
|
+
|
|
150
|
+
if src.suffix in {".py", ".toml", ".env", ".yml", ".yaml", ".md", ".txt", ".cfg", ".ini", ".dockerfile", ""} or src.name == ".env.example":
|
|
151
|
+
try:
|
|
152
|
+
template = self.env.get_template(str(rel).replace("\\", "/"))
|
|
153
|
+
dest.write_text(template.render(**ctx), encoding="utf-8")
|
|
154
|
+
except Exception:
|
|
155
|
+
# Binary or unparseable — just copy
|
|
156
|
+
shutil.copy2(src, dest)
|
|
157
|
+
else:
|
|
158
|
+
shutil.copy2(src, dest)
|
|
159
|
+
|
|
160
|
+
def _uv_init(self) -> None:
|
|
161
|
+
"""Run `uv init` inside the target directory (no-op if uv not found)."""
|
|
162
|
+
if not shutil.which("uv"):
|
|
163
|
+
console.print(
|
|
164
|
+
"[yellow]⚠[/] [bold]uv[/] not found — skipping venv initialisation. "
|
|
165
|
+
"Install uv: [link=https://docs.astral.sh/uv/]https://docs.astral.sh/uv/[/link]"
|
|
166
|
+
)
|
|
167
|
+
return
|
|
168
|
+
|
|
169
|
+
# uv init creates a pyproject.toml — we already created ours, so just create the venv
|
|
170
|
+
self._run(["uv", "venv"], cwd=self.target_dir)
|
|
171
|
+
|
|
172
|
+
def _install_deps(self) -> None:
|
|
173
|
+
"""Write dependencies into pyproject.toml and sync the venv.
|
|
174
|
+
|
|
175
|
+
Uses --no-sync on `uv add` so uv only updates pyproject.toml + uv.lock
|
|
176
|
+
without trying to build/install the generated project itself as an
|
|
177
|
+
editable package.
|
|
178
|
+
|
|
179
|
+
The actual installation is done by a follow-up
|
|
180
|
+
`uv sync --no-install-project`.
|
|
181
|
+
"""
|
|
182
|
+
if not shutil.which("uv"):
|
|
183
|
+
return
|
|
184
|
+
|
|
185
|
+
deps = self.TEMPLATE_DEPS.get(self.template)
|
|
186
|
+
dev_deps = self.TEMPLATE_DEV_DEPS.get(self.template)
|
|
187
|
+
if deps is None or dev_deps is None:
|
|
188
|
+
console.print(f"[red]Unknown template:[/] {self.template}")
|
|
189
|
+
sys.exit(1)
|
|
190
|
+
|
|
191
|
+
for flag, pkgs in self.OPTIONAL_DEPS.items():
|
|
192
|
+
if self.options.get(flag):
|
|
193
|
+
deps = deps + pkgs
|
|
194
|
+
|
|
195
|
+
# --no-sync: only pin versions into pyproject.toml / uv.lock, don't build project
|
|
196
|
+
self._run(["uv", "add", "--no-sync"] + deps, cwd=self.target_dir)
|
|
197
|
+
self._run(["uv", "add", "--no-sync", "--dev"] + dev_deps, cwd=self.target_dir)
|
|
198
|
+
|
|
199
|
+
# Install all pinned deps into .venv, skipping the project root package
|
|
200
|
+
self._run(["uv", "sync", "--no-install-project"], cwd=self.target_dir)
|
|
201
|
+
|
|
202
|
+
# ------------------------------------------------------------------ #
|
|
203
|
+
# Helpers
|
|
204
|
+
# ------------------------------------------------------------------ #
|
|
205
|
+
|
|
206
|
+
def _build_context(self) -> dict[str, Any]:
|
|
207
|
+
pkg = self.project_name.lower().replace("-", "_").replace(" ", "_")
|
|
208
|
+
return {
|
|
209
|
+
"project_name": self.project_name,
|
|
210
|
+
"package_name": pkg,
|
|
211
|
+
"template": self.template,
|
|
212
|
+
"docker": self.options.get("docker", False),
|
|
213
|
+
"postgres": self.options.get("postgres", False),
|
|
214
|
+
"redis": self.options.get("redis", False),
|
|
215
|
+
"fastapi_deps": self.FASTAPI_DEPS,
|
|
216
|
+
"ai_deps": self.AI_DEPS,
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
def _should_skip(self, rel: Path) -> bool:
|
|
220
|
+
name = rel.name
|
|
221
|
+
parts = set(rel.parts)
|
|
222
|
+
if name in {"Dockerfile", "docker-compose.yml"} and not self.options.get("docker"):
|
|
223
|
+
return True
|
|
224
|
+
if "postgres" in parts and not self.options.get("postgres"):
|
|
225
|
+
return True
|
|
226
|
+
if "redis" in parts and not self.options.get("redis"):
|
|
227
|
+
return True
|
|
228
|
+
return False
|
|
229
|
+
|
|
230
|
+
def _run(self, cmd: list[str], cwd: Path) -> None:
|
|
231
|
+
result = subprocess.run(
|
|
232
|
+
cmd,
|
|
233
|
+
cwd=cwd,
|
|
234
|
+
capture_output=True,
|
|
235
|
+
text=True,
|
|
236
|
+
)
|
|
237
|
+
if result.returncode != 0:
|
|
238
|
+
console.print(f"[red]Command failed:[/] {' '.join(cmd)}")
|
|
239
|
+
console.print(result.stderr)
|
|
240
|
+
sys.exit(result.returncode)
|
|
241
|
+
|
|
242
|
+
def _print_success(self) -> None:
|
|
243
|
+
table = Table.grid(padding=(0, 2))
|
|
244
|
+
table.add_column(style="bold green")
|
|
245
|
+
table.add_column()
|
|
246
|
+
|
|
247
|
+
rel = self.target_dir.resolve()
|
|
248
|
+
run_cmd = (
|
|
249
|
+
"uvicorn app.main:app --reload"
|
|
250
|
+
if self.template in {"fastapi", "fastapi-with-frontend"}
|
|
251
|
+
else "python main.py"
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
table.add_row("Project:", str(rel))
|
|
255
|
+
table.add_row("Template:", self.template)
|
|
256
|
+
table.add_row("venv:", str(rel / ".venv"))
|
|
257
|
+
table.add_row("", "")
|
|
258
|
+
table.add_row("Next steps:", f"cd {self.project_name}")
|
|
259
|
+
table.add_row("", "cp .env.example .env # fill in your secrets")
|
|
260
|
+
table.add_row("", f"uv run {run_cmd}")
|
|
261
|
+
|
|
262
|
+
console.print(
|
|
263
|
+
Panel(table, title="[bold green]✓ Project created[/]", border_style="green")
|
|
264
|
+
)
|
|
265
|
+
console.print()
|
app_generator/main.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AppGenerator CLI — A scaffolding tool for FastAPI and LangChain/LangGraph projects.
|
|
3
|
+
"""
|
|
4
|
+
import typer
|
|
5
|
+
from rich.console import Console
|
|
6
|
+
|
|
7
|
+
from app_generator.commands.create import create_app
|
|
8
|
+
|
|
9
|
+
app = typer.Typer(
|
|
10
|
+
name="app-generator-cli",
|
|
11
|
+
help="⚒️ AppGenerator — Scaffold production-ready Python projects instantly.",
|
|
12
|
+
no_args_is_help=True,
|
|
13
|
+
rich_markup_mode="rich",
|
|
14
|
+
)
|
|
15
|
+
console = Console()
|
|
16
|
+
|
|
17
|
+
app.add_typer(create_app, name="create", help="Create a new project from a template.")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@app.callback(invoke_without_command=True)
|
|
21
|
+
def main(
|
|
22
|
+
ctx: typer.Context,
|
|
23
|
+
version: bool = typer.Option(False, "--version", "-v", help="Show version and exit."),
|
|
24
|
+
) -> None:
|
|
25
|
+
if version:
|
|
26
|
+
from app_generator import __version__
|
|
27
|
+
console.print(f"[bold cyan]AppGenerator[/] version [bold]{__version__}[/]")
|
|
28
|
+
raise typer.Exit()
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
if __name__ == "__main__":
|
|
32
|
+
app()
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# ── LLM Providers ────────────────────────────────────────────
|
|
2
|
+
OPENAI_API_KEY=sk-...
|
|
3
|
+
OPENAI_MODEL=gpt-4o-mini
|
|
4
|
+
|
|
5
|
+
# Ollama (local models) — optional
|
|
6
|
+
OLLAMA_BASE_URL=http://localhost:11434
|
|
7
|
+
OLLAMA_MODEL=llama3.2
|
|
8
|
+
|
|
9
|
+
# ── Application ──────────────────────────────────────────────
|
|
10
|
+
APP_NAME="{{ project_name }}"
|
|
11
|
+
APP_ENV=development
|
|
12
|
+
DEBUG=true
|
|
13
|
+
|
|
14
|
+
# ── Vector Store ──────────────────────────────────────────────
|
|
15
|
+
CHROMA_PERSIST_DIR=./chroma_db
|
|
16
|
+
{% if postgres %}
|
|
17
|
+
# pgvector (PostgreSQL) — enable with --postgres
|
|
18
|
+
PGVECTOR_URL=postgresql://postgres:postgres@localhost:5432/{{ package_name }}_vectors
|
|
19
|
+
{% endif %}
|
|
20
|
+
|
|
21
|
+
{% if redis %}
|
|
22
|
+
# ── Redis Cache ───────────────────────────────────────────────
|
|
23
|
+
REDIS_URL=redis://localhost:6379/0
|
|
24
|
+
{% endif %}
|
|
25
|
+
|
|
26
|
+
# ── LangSmith (optional tracing) ─────────────────────────────
|
|
27
|
+
# LANGCHAIN_TRACING_V2=true
|
|
28
|
+
# LANGCHAIN_API_KEY=ls__...
|
|
29
|
+
# LANGCHAIN_PROJECT={{ project_name }}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# Python
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*.so
|
|
5
|
+
*.egg
|
|
6
|
+
*.egg-info/
|
|
7
|
+
dist/
|
|
8
|
+
build/
|
|
9
|
+
.eggs/
|
|
10
|
+
|
|
11
|
+
# Virtual environments
|
|
12
|
+
.venv/
|
|
13
|
+
venv/
|
|
14
|
+
env/
|
|
15
|
+
|
|
16
|
+
# uv
|
|
17
|
+
.uv/
|
|
18
|
+
|
|
19
|
+
# Environment
|
|
20
|
+
.env
|
|
21
|
+
!.env.example
|
|
22
|
+
|
|
23
|
+
# IDE
|
|
24
|
+
.vscode/
|
|
25
|
+
.idea/
|
|
26
|
+
*.swp
|
|
27
|
+
|
|
28
|
+
# Testing
|
|
29
|
+
.pytest_cache/
|
|
30
|
+
.coverage
|
|
31
|
+
htmlcov/
|
|
32
|
+
|
|
33
|
+
# Type checking
|
|
34
|
+
.mypy_cache/
|
|
35
|
+
|
|
36
|
+
# Vector DB / embeddings
|
|
37
|
+
chroma_db/
|
|
38
|
+
*.faiss
|
|
39
|
+
*.pkl
|
|
40
|
+
|
|
41
|
+
# LangSmith / LangChain cache
|
|
42
|
+
.langchain.db
|
|
43
|
+
langchain_cache/
|
|
44
|
+
|
|
45
|
+
# Logs
|
|
46
|
+
*.log
|
|
47
|
+
logs/
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# ── Build stage ────────────────────────────────────────────────────────────────
|
|
2
|
+
FROM python:3.12-slim AS builder
|
|
3
|
+
|
|
4
|
+
WORKDIR /app
|
|
5
|
+
|
|
6
|
+
COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
|
|
7
|
+
|
|
8
|
+
COPY pyproject.toml uv.lock* ./
|
|
9
|
+
RUN uv sync --frozen --no-dev
|
|
10
|
+
|
|
11
|
+
# ── Runtime stage ──────────────────────────────────────────────────────────────
|
|
12
|
+
FROM python:3.12-slim AS runtime
|
|
13
|
+
|
|
14
|
+
WORKDIR /app
|
|
15
|
+
|
|
16
|
+
COPY --from=builder /app/.venv /app/.venv
|
|
17
|
+
ENV PATH="/app/.venv/bin:$PATH"
|
|
18
|
+
|
|
19
|
+
COPY app ./app
|
|
20
|
+
COPY main.py ./main.py
|
|
21
|
+
|
|
22
|
+
CMD ["python", "main.py"]
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
# {{ project_name }}
|
|
2
|
+
|
|
3
|
+
A production-ready LangChain / LangGraph AI application, scaffolded by [AppGenerator](https://github.com/yourname/appgenerator-cli).
|
|
4
|
+
|
|
5
|
+
## Tech Stack
|
|
6
|
+
|
|
7
|
+
- **[LangChain](https://python.langchain.com/)** — LLM orchestration framework
|
|
8
|
+
- **[LangGraph](https://langchain-ai.github.io/langgraph/)** — stateful agent graphs
|
|
9
|
+
- **[LangChain-OpenAI](https://python.langchain.com/docs/integrations/chat/openai)** — OpenAI chat models
|
|
10
|
+
- **[LangChain-Ollama](https://python.langchain.com/docs/integrations/chat/ollama)** — local Ollama models
|
|
11
|
+
- **[ChromaDB](https://docs.trychroma.com/)** — local vector store for RAG
|
|
12
|
+
- **[Pydantic Settings](https://docs.pydantic.dev/latest/concepts/pydantic_settings/)** — typed config from env
|
|
13
|
+
- **[uv](https://docs.astral.sh/uv/)** — blazing-fast package management
|
|
14
|
+
{% if postgres %}- **pgvector** — PostgreSQL vector extension
|
|
15
|
+
{% endif %}{% if redis %}- **Redis** — semantic caching layer
|
|
16
|
+
{% endif %}{% if docker %}- **Docker** — containerisation
|
|
17
|
+
{% endif %}
|
|
18
|
+
|
|
19
|
+
## Project Structure
|
|
20
|
+
|
|
21
|
+
```
|
|
22
|
+
{{ project_name }}/
|
|
23
|
+
├── main.py # Interactive REPL entry point
|
|
24
|
+
├── app/
|
|
25
|
+
│ ├── config.py # Typed settings (pydantic-settings)
|
|
26
|
+
│ ├── agents/
|
|
27
|
+
│ │ ├── __init__.py
|
|
28
|
+
│ │ └── assistant.py # LangGraph ReAct agent
|
|
29
|
+
│ ├── chains/
|
|
30
|
+
│ │ ├── __init__.py
|
|
31
|
+
│ │ └── rag.py # RAG chain example
|
|
32
|
+
│ ├── tools/
|
|
33
|
+
│ │ ├── __init__.py
|
|
34
|
+
│ │ ├── registry.py # Tool registry (add tools here)
|
|
35
|
+
│ │ └── search.py # Web search tool stub
|
|
36
|
+
├── tests/
|
|
37
|
+
│ ├── conftest.py
|
|
38
|
+
│ ├── test_agent.py
|
|
39
|
+
│ └── __init__.py
|
|
40
|
+
├── .env.example
|
|
41
|
+
├── .gitignore
|
|
42
|
+
└── pyproject.toml
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
## Getting Started
|
|
46
|
+
|
|
47
|
+
```bash
|
|
48
|
+
# 1. Copy and configure your secrets
|
|
49
|
+
cp .env.example .env
|
|
50
|
+
# Edit .env — add your OPENAI_API_KEY
|
|
51
|
+
|
|
52
|
+
# 2. Run the interactive assistant
|
|
53
|
+
uv run python main.py
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Using Ollama (Local Models)
|
|
57
|
+
|
|
58
|
+
```bash
|
|
59
|
+
# Install Ollama: https://ollama.com
|
|
60
|
+
ollama pull llama3.2
|
|
61
|
+
|
|
62
|
+
# Update .env
|
|
63
|
+
OLLAMA_MODEL=llama3.2
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
Then swap the LLM in `app/agents/assistant.py`:
|
|
67
|
+
|
|
68
|
+
```python
|
|
69
|
+
from langchain_ollama import ChatOllama
|
|
70
|
+
self._llm = ChatOllama(model=settings.ollama_model, base_url=settings.ollama_base_url)
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## Adding Tools
|
|
74
|
+
|
|
75
|
+
Edit `app/tools/registry.py` and add your tools to `get_tools()`:
|
|
76
|
+
|
|
77
|
+
```python
|
|
78
|
+
from app.tools.my_tool import my_custom_tool
|
|
79
|
+
|
|
80
|
+
def get_tools():
|
|
81
|
+
return [web_search_tool, my_custom_tool]
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
## Running Tests
|
|
85
|
+
|
|
86
|
+
```bash
|
|
87
|
+
uv run pytest
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
## LangSmith Tracing (optional)
|
|
91
|
+
|
|
92
|
+
```bash
|
|
93
|
+
# .env
|
|
94
|
+
LANGCHAIN_TRACING_V2=true
|
|
95
|
+
LANGCHAIN_API_KEY=ls__your_key
|
|
96
|
+
LANGCHAIN_PROJECT={{ project_name }}
|
|
97
|
+
```
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""{{ project_name }} AI application package."""
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Agent definitions."""
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AssistantAgent — a simple LangGraph ReAct agent wired to the configured LLM.
|
|
3
|
+
|
|
4
|
+
Extend this class to add memory, tools, and custom graph nodes.
|
|
5
|
+
"""
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from langchain_core.messages import HumanMessage, SystemMessage
|
|
11
|
+
from langchain_openai import ChatOpenAI
|
|
12
|
+
from langgraph.graph import END, START, MessagesState, StateGraph
|
|
13
|
+
|
|
14
|
+
from app.config import settings
|
|
15
|
+
from app.tools.registry import get_tools
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AssistantAgent:
|
|
19
|
+
"""A LangGraph-powered conversational agent."""
|
|
20
|
+
|
|
21
|
+
SYSTEM_PROMPT = (
|
|
22
|
+
"You are a helpful, concise, and accurate assistant. "
|
|
23
|
+
"Use the available tools when appropriate. "
|
|
24
|
+
"If you don't know something, say so honestly."
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
def __init__(self) -> None:
|
|
28
|
+
self._llm = ChatOpenAI(
|
|
29
|
+
model=settings.openai_model,
|
|
30
|
+
api_key=settings.openai_api_key,
|
|
31
|
+
temperature=0.2,
|
|
32
|
+
)
|
|
33
|
+
self._tools = get_tools()
|
|
34
|
+
self._llm_with_tools = self._llm.bind_tools(self._tools)
|
|
35
|
+
self._graph = self._build_graph()
|
|
36
|
+
|
|
37
|
+
# ------------------------------------------------------------------ #
|
|
38
|
+
# Public
|
|
39
|
+
# ------------------------------------------------------------------ #
|
|
40
|
+
|
|
41
|
+
async def invoke(self, user_message: str, history: list[dict[str, str]] | None = None) -> str:
|
|
42
|
+
"""Run the agent and return the assistant's reply."""
|
|
43
|
+
messages = [SystemMessage(content=self.SYSTEM_PROMPT)]
|
|
44
|
+
|
|
45
|
+
for msg in (history or []):
|
|
46
|
+
if msg["role"] == "user":
|
|
47
|
+
messages.append(HumanMessage(content=msg["content"]))
|
|
48
|
+
# assistant messages are added by LangGraph internally
|
|
49
|
+
|
|
50
|
+
messages.append(HumanMessage(content=user_message))
|
|
51
|
+
|
|
52
|
+
result = await self._graph.ainvoke({"messages": messages})
|
|
53
|
+
return result["messages"][-1].content # type: ignore[index]
|
|
54
|
+
|
|
55
|
+
# ------------------------------------------------------------------ #
|
|
56
|
+
# Graph construction
|
|
57
|
+
# ------------------------------------------------------------------ #
|
|
58
|
+
|
|
59
|
+
def _build_graph(self) -> Any:
|
|
60
|
+
"""Build a minimal ReAct graph: call model → maybe call tools → end."""
|
|
61
|
+
builder = StateGraph(MessagesState)
|
|
62
|
+
|
|
63
|
+
builder.add_node("agent", self._call_model)
|
|
64
|
+
builder.add_node("tools", self._call_tools)
|
|
65
|
+
|
|
66
|
+
builder.add_edge(START, "agent")
|
|
67
|
+
builder.add_conditional_edges("agent", self._should_use_tools)
|
|
68
|
+
builder.add_edge("tools", "agent")
|
|
69
|
+
|
|
70
|
+
return builder.compile()
|
|
71
|
+
|
|
72
|
+
async def _call_model(self, state: MessagesState) -> dict[str, Any]:
|
|
73
|
+
response = await self._llm_with_tools.ainvoke(state["messages"])
|
|
74
|
+
return {"messages": [response]}
|
|
75
|
+
|
|
76
|
+
async def _call_tools(self, state: MessagesState) -> dict[str, Any]:
|
|
77
|
+
from langchain_core.messages import ToolMessage
|
|
78
|
+
|
|
79
|
+
last_message = state["messages"][-1]
|
|
80
|
+
tool_map = {t.name: t for t in self._tools}
|
|
81
|
+
results = []
|
|
82
|
+
|
|
83
|
+
for tool_call in last_message.tool_calls: # type: ignore[attr-defined]
|
|
84
|
+
tool = tool_map.get(tool_call["name"])
|
|
85
|
+
if tool is None:
|
|
86
|
+
output = f"Error: unknown tool '{tool_call['name']}'"
|
|
87
|
+
else:
|
|
88
|
+
output = await tool.ainvoke(tool_call["args"])
|
|
89
|
+
results.append(
|
|
90
|
+
ToolMessage(content=str(output), tool_call_id=tool_call["id"])
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
return {"messages": results}
|
|
94
|
+
|
|
95
|
+
@staticmethod
|
|
96
|
+
def _should_use_tools(state: MessagesState) -> str:
|
|
97
|
+
last = state["messages"][-1]
|
|
98
|
+
if hasattr(last, "tool_calls") and last.tool_calls: # type: ignore[attr-defined]
|
|
99
|
+
return "tools"
|
|
100
|
+
return END
|