@totaland/create-starter-kit 2.0.4 ā 2.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/index.js +61 -16
- package/package.json +5 -6
- package/templates/python-backend/.env.example +36 -0
- package/templates/python-backend/Makefile +26 -0
- package/templates/python-backend/README.md +123 -0
- package/templates/python-backend/pyproject.toml +143 -0
- package/templates/python-backend/src/__init__.py +1 -0
- package/templates/python-backend/src/config.py +50 -0
- package/templates/python-backend/src/features/__init__.py +1 -0
- package/templates/python-backend/src/features/agents/__init__.py +3 -0
- package/templates/python-backend/src/features/agents/router.py +164 -0
- package/templates/python-backend/src/features/agents/schemas.py +52 -0
- package/templates/python-backend/src/features/chat/__init__.py +3 -0
- package/templates/python-backend/src/features/chat/router.py +98 -0
- package/templates/python-backend/src/features/chat/schemas.py +36 -0
- package/templates/python-backend/src/features/health/__init__.py +3 -0
- package/templates/python-backend/src/features/health/router.py +13 -0
- package/templates/python-backend/src/features/health/schemas.py +6 -0
- package/templates/python-backend/src/features/orders/__init__.py +3 -0
- package/templates/python-backend/src/features/orders/router.py +40 -0
- package/templates/python-backend/src/features/orders/schemas.py +18 -0
- package/templates/python-backend/src/lib/__init__.py +1 -0
- package/templates/python-backend/src/lib/agents.py +167 -0
- package/templates/python-backend/src/lib/cache.py +38 -0
- package/templates/python-backend/src/lib/database.py +31 -0
- package/templates/python-backend/src/lib/llm.py +155 -0
- package/templates/python-backend/src/lib/logging.py +25 -0
- package/templates/python-backend/src/main.py +41 -0
- package/templates/python-backend/tests/__init__.py +1 -0
- package/templates/python-backend/tests/test_health.py +26 -0
- package/templates/python-backend/tests/test_orders.py +38 -0
package/bin/index.js
CHANGED
|
@@ -21,7 +21,7 @@ const templateArg = process.argv[3]; // Optional template argument
|
|
|
21
21
|
if (!projectName) {
|
|
22
22
|
console.error('Error: Please provide a project name');
|
|
23
23
|
console.log('Usage: pnpm create @totaland/starter-kit <project-name> [template]');
|
|
24
|
-
console.log('Templates: backend, frontend, fullstack');
|
|
24
|
+
console.log('Templates: backend, frontend, fullstack, python-backend, ai-fullstack');
|
|
25
25
|
process.exit(1);
|
|
26
26
|
}
|
|
27
27
|
|
|
@@ -47,11 +47,23 @@ const TEMPLATES = {
|
|
|
47
47
|
description: 'React + Vite with TypeScript, Tailwind CSS v4, shadcn/ui, and TanStack Query',
|
|
48
48
|
dir: 'frontend',
|
|
49
49
|
},
|
|
50
|
+
'python-backend': {
|
|
51
|
+
name: 'Python AI Backend',
|
|
52
|
+
description: 'FastAPI + LangGraph + OpenAI/Anthropic for AI orchestration',
|
|
53
|
+
dir: 'python-backend',
|
|
54
|
+
isPython: true,
|
|
55
|
+
},
|
|
50
56
|
fullstack: {
|
|
51
57
|
name: 'Fullstack',
|
|
52
58
|
description: 'Both Backend and Frontend templates combined',
|
|
53
59
|
dirs: ['backend', 'frontend'],
|
|
54
60
|
},
|
|
61
|
+
'ai-fullstack': {
|
|
62
|
+
name: 'AI Fullstack',
|
|
63
|
+
description: 'Python AI Backend + React Frontend',
|
|
64
|
+
dirs: ['python-backend', 'frontend'],
|
|
65
|
+
isPython: true,
|
|
66
|
+
},
|
|
55
67
|
};
|
|
56
68
|
|
|
57
69
|
// Directories and files to exclude when copying
|
|
@@ -69,6 +81,14 @@ const EXCLUDE = new Set([
|
|
|
69
81
|
'.env',
|
|
70
82
|
'.env.local',
|
|
71
83
|
'.DS_Store',
|
|
84
|
+
// Python exclusions
|
|
85
|
+
'__pycache__',
|
|
86
|
+
'.venv',
|
|
87
|
+
'venv',
|
|
88
|
+
'.pytest_cache',
|
|
89
|
+
'.mypy_cache',
|
|
90
|
+
'.ruff_cache',
|
|
91
|
+
'.eggs',
|
|
72
92
|
]);
|
|
73
93
|
|
|
74
94
|
// Function to recursively copy directory
|
|
@@ -102,22 +122,32 @@ async function promptTemplate() {
|
|
|
102
122
|
});
|
|
103
123
|
|
|
104
124
|
console.log('\nš¦ Select a template:\n');
|
|
105
|
-
console.log('1. Backend
|
|
106
|
-
console.log('2. Frontend
|
|
107
|
-
console.log('3. Fullstack
|
|
125
|
+
console.log('1. Backend - Express.js + TypeScript + Drizzle ORM');
|
|
126
|
+
console.log('2. Frontend - React + Vite + Tailwind CSS v4 + shadcn/ui');
|
|
127
|
+
console.log('3. Fullstack - Backend + Frontend');
|
|
128
|
+
console.log('4. Python Backend - FastAPI + LangGraph + OpenAI/Anthropic (AI)');
|
|
129
|
+
console.log('5. AI Fullstack - Python Backend + Frontend\n');
|
|
108
130
|
|
|
109
|
-
const answer = await rl.question('Enter your choice (1
|
|
131
|
+
const answer = await rl.question('Enter your choice (1-5): ');
|
|
110
132
|
rl.close();
|
|
111
133
|
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
134
|
+
const choices = {
|
|
135
|
+
'1': 'backend',
|
|
136
|
+
'backend': 'backend',
|
|
137
|
+
'2': 'frontend',
|
|
138
|
+
'frontend': 'frontend',
|
|
139
|
+
'3': 'fullstack',
|
|
140
|
+
'fullstack': 'fullstack',
|
|
141
|
+
'4': 'python-backend',
|
|
142
|
+
'python-backend': 'python-backend',
|
|
143
|
+
'python': 'python-backend',
|
|
144
|
+
'5': 'ai-fullstack',
|
|
145
|
+
'ai-fullstack': 'ai-fullstack',
|
|
146
|
+
'ai': 'ai-fullstack',
|
|
147
|
+
};
|
|
148
|
+
|
|
149
|
+
const choice = choices[answer.toLowerCase()];
|
|
150
|
+
if (choice) return choice;
|
|
121
151
|
|
|
122
152
|
console.error('Invalid choice. Please run the command again.');
|
|
123
153
|
process.exit(1);
|
|
@@ -187,19 +217,34 @@ async function main() {
|
|
|
187
217
|
console.log('ā
Project created successfully!\n');
|
|
188
218
|
console.log('š Next steps:');
|
|
189
219
|
console.log(` cd ${projectName}`);
|
|
220
|
+
|
|
190
221
|
if (templateKey === 'fullstack') {
|
|
191
222
|
console.log(' cd backend && pnpm install && pnpm dev');
|
|
192
223
|
console.log(' cd frontend && pnpm install && pnpm dev\n');
|
|
224
|
+
} else if (templateKey === 'ai-fullstack') {
|
|
225
|
+
console.log(' cd python-backend && pip install -e ".[dev]" && uvicorn src.main:app --reload');
|
|
226
|
+
console.log(' cd frontend && pnpm install && pnpm dev\n');
|
|
227
|
+
} else if (template.isPython) {
|
|
228
|
+
console.log(' python -m venv .venv && source .venv/bin/activate');
|
|
229
|
+
console.log(' pip install -e ".[dev]"');
|
|
230
|
+
console.log(' cp .env.example .env # Add your API keys');
|
|
231
|
+
console.log(' uvicorn src.main:app --reload\n');
|
|
193
232
|
} else {
|
|
194
233
|
console.log(' pnpm install');
|
|
195
234
|
console.log(' pnpm dev\n');
|
|
196
235
|
}
|
|
197
236
|
|
|
198
|
-
if (templateKey === 'frontend' || templateKey === 'fullstack') {
|
|
237
|
+
if (templateKey === 'frontend' || templateKey === 'fullstack' || templateKey === 'ai-fullstack') {
|
|
199
238
|
console.log('š” Tip: Add shadcn/ui components with:');
|
|
200
|
-
const cdPath = templateKey === 'fullstack' ? 'cd frontend && ' : '';
|
|
239
|
+
const cdPath = (templateKey === 'fullstack' || templateKey === 'ai-fullstack') ? 'cd frontend && ' : '';
|
|
201
240
|
console.log(` ${cdPath}pnpm dlx shadcn@latest add button card dialog\n`);
|
|
202
241
|
}
|
|
242
|
+
|
|
243
|
+
if (template.isPython) {
|
|
244
|
+
console.log('š¤ AI Setup: Add your API keys to .env:');
|
|
245
|
+
console.log(' OPENAI_API_KEY=sk-...');
|
|
246
|
+
console.log(' ANTHROPIC_API_KEY=sk-ant-...\n');
|
|
247
|
+
}
|
|
203
248
|
} catch (error) {
|
|
204
249
|
console.error('Error creating project:', error.message);
|
|
205
250
|
process.exit(1);
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@totaland/create-starter-kit",
|
|
3
|
-
"version": "2.0.
|
|
3
|
+
"version": "2.0.5",
|
|
4
4
|
"description": "Scaffolding tool for creating new starter-kit projects",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"publishConfig": {
|
|
@@ -13,10 +13,6 @@
|
|
|
13
13
|
"bin",
|
|
14
14
|
"templates"
|
|
15
15
|
],
|
|
16
|
-
"scripts": {
|
|
17
|
-
"sync-templates": "node scripts/sync-templates.js",
|
|
18
|
-
"prepublishOnly": "node scripts/sync-templates.js"
|
|
19
|
-
},
|
|
20
16
|
"keywords": [
|
|
21
17
|
"starter-kit",
|
|
22
18
|
"scaffold",
|
|
@@ -24,5 +20,8 @@
|
|
|
24
20
|
],
|
|
25
21
|
"dependencies": {
|
|
26
22
|
"fast-glob": "^3.3.2"
|
|
23
|
+
},
|
|
24
|
+
"scripts": {
|
|
25
|
+
"sync-templates": "node scripts/sync-templates.js"
|
|
27
26
|
}
|
|
28
|
-
}
|
|
27
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
APP_NAME="AI Starter Kit"
|
|
2
|
+
DEBUG=true
|
|
3
|
+
|
|
4
|
+
# Database
|
|
5
|
+
DATABASE_URL=postgresql+asyncpg://postgres:postgres@localhost:5432/app
|
|
6
|
+
|
|
7
|
+
# Redis
|
|
8
|
+
REDIS_URL=redis://localhost:6379
|
|
9
|
+
|
|
10
|
+
# Server
|
|
11
|
+
HOST=0.0.0.0
|
|
12
|
+
PORT=8000
|
|
13
|
+
|
|
14
|
+
# ===== AI / LLM Configuration =====
|
|
15
|
+
|
|
16
|
+
# OpenAI - https://platform.openai.com/api-keys
|
|
17
|
+
OPENAI_API_KEY=sk-...
|
|
18
|
+
OPENAI_MODEL=gpt-4o
|
|
19
|
+
OPENAI_EMBEDDING_MODEL=text-embedding-3-small
|
|
20
|
+
|
|
21
|
+
# Anthropic - https://console.anthropic.com/settings/keys
|
|
22
|
+
ANTHROPIC_API_KEY=sk-ant-...
|
|
23
|
+
ANTHROPIC_MODEL=claude-sonnet-4-20250514
|
|
24
|
+
|
|
25
|
+
# Default LLM provider: "openai" or "anthropic"
|
|
26
|
+
DEFAULT_LLM_PROVIDER=openai
|
|
27
|
+
|
|
28
|
+
# LangSmith (optional) - https://smith.langchain.com/
|
|
29
|
+
LANGSMITH_API_KEY=lsv2_...
|
|
30
|
+
LANGSMITH_PROJECT=ai-starter-kit
|
|
31
|
+
LANGSMITH_TRACING=false
|
|
32
|
+
|
|
33
|
+
# LLM Settings
|
|
34
|
+
LLM_TEMPERATURE=0.7
|
|
35
|
+
LLM_MAX_TOKENS=4096
|
|
36
|
+
LLM_STREAMING=true
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
.PHONY: install dev test lint format typecheck clean
|
|
2
|
+
|
|
3
|
+
install:
|
|
4
|
+
pip install -e ".[dev]"
|
|
5
|
+
|
|
6
|
+
dev:
|
|
7
|
+
uvicorn src.main:app --reload --host 0.0.0.0 --port 8000
|
|
8
|
+
|
|
9
|
+
test:
|
|
10
|
+
pytest
|
|
11
|
+
|
|
12
|
+
test-cov:
|
|
13
|
+
pytest --cov=src --cov-report=html
|
|
14
|
+
|
|
15
|
+
lint:
|
|
16
|
+
ruff check src tests
|
|
17
|
+
|
|
18
|
+
format:
|
|
19
|
+
ruff format src tests
|
|
20
|
+
|
|
21
|
+
typecheck:
|
|
22
|
+
mypy src
|
|
23
|
+
|
|
24
|
+
clean:
|
|
25
|
+
rm -rf __pycache__ .pytest_cache .mypy_cache .ruff_cache htmlcov .coverage
|
|
26
|
+
find . -type d -name "__pycache__" -exec rm -rf {} +
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
# AI Starter Kit
|
|
2
|
+
|
|
3
|
+
Production-ready Python backend for AI application orchestration with LangGraph, OpenAI, and Anthropic.
|
|
4
|
+
|
|
5
|
+
## AI / LLM Features
|
|
6
|
+
|
|
7
|
+
| Feature | Description |
|
|
8
|
+
|---------|-------------|
|
|
9
|
+
| **LangGraph** | State machine orchestration for multi-step AI agents |
|
|
10
|
+
| **Multi-provider LLMs** | OpenAI (GPT-4o) and Anthropic (Claude) with easy switching |
|
|
11
|
+
| **Streaming** | Server-Sent Events for real-time token streaming |
|
|
12
|
+
| **Tool calling** | Built-in tool execution with ReAct agent pattern |
|
|
13
|
+
| **LangSmith** | Optional tracing and observability integration |
|
|
14
|
+
| **Memory** | Conversation memory with checkpointing |
|
|
15
|
+
|
|
16
|
+
## Performance Packages Included
|
|
17
|
+
|
|
18
|
+
| Package | Purpose | Why It's Fast |
|
|
19
|
+
|---------|---------|---------------|
|
|
20
|
+
| **FastAPI** | Web framework | Built on Starlette, async-first, fastest Python framework |
|
|
21
|
+
| **uvicorn** | ASGI server | Uses uvloop (libuv-based event loop) |
|
|
22
|
+
| **orjson** | JSON serialization | Written in Rust, 3-10x faster than stdlib json |
|
|
23
|
+
| **msgspec** | Serialization/validation | Zero-copy deserialization, faster than Pydantic |
|
|
24
|
+
| **polars** | DataFrames | Rust-based, multithreaded, lazy evaluation |
|
|
25
|
+
| **asyncpg** | PostgreSQL driver | Native async, fastest Python PostgreSQL driver |
|
|
26
|
+
| **pendulum** | Date/time | Drop-in datetime replacement with better perf |
|
|
27
|
+
| **httpx** | HTTP client | Async support, HTTP/2 support |
|
|
28
|
+
| **structlog** | Logging | Structured logging with minimal overhead |
|
|
29
|
+
|
|
30
|
+
## Quick Start
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
# Create virtual environment
|
|
34
|
+
python -m venv .venv
|
|
35
|
+
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
|
36
|
+
|
|
37
|
+
# Install dependencies
|
|
38
|
+
pip install -e ".[dev]"
|
|
39
|
+
|
|
40
|
+
# Copy environment file
|
|
41
|
+
cp .env.example .env
|
|
42
|
+
|
|
43
|
+
# Run development server
|
|
44
|
+
uvicorn src.main:app --reload --host 0.0.0.0 --port 8000
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Project Structure
|
|
48
|
+
|
|
49
|
+
```
|
|
50
|
+
python-backend/
|
|
51
|
+
āāā src/
|
|
52
|
+
ā āāā features/ # Feature-based organization
|
|
53
|
+
ā ā āāā agents/ # LangGraph agent workflows
|
|
54
|
+
ā ā āāā chat/ # Chat completion endpoints
|
|
55
|
+
ā ā āāā health/ # Health check endpoints
|
|
56
|
+
ā ā āāā orders/ # Order management example
|
|
57
|
+
ā āāā lib/ # Shared utilities
|
|
58
|
+
ā ā āāā agents.py # LangGraph agent builder & tools
|
|
59
|
+
ā ā āāā cache.py # In-memory caching
|
|
60
|
+
ā ā āāā database.py # Async SQLAlchemy setup
|
|
61
|
+
ā ā āāā llm.py # LLM provider abstraction
|
|
62
|
+
ā ā āāā logging.py # Structured logging
|
|
63
|
+
ā āāā config.py # Settings via pydantic-settings
|
|
64
|
+
ā āāā main.py # Application entry point
|
|
65
|
+
āāā tests/ # Test files
|
|
66
|
+
āāā pyproject.toml # Dependencies & tool config
|
|
67
|
+
āāā .env.example # Environment template
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## Development Commands
|
|
71
|
+
|
|
72
|
+
```bash
|
|
73
|
+
# Run tests
|
|
74
|
+
pytest
|
|
75
|
+
|
|
76
|
+
# Run tests with coverage
|
|
77
|
+
pytest --cov=src --cov-report=html
|
|
78
|
+
|
|
79
|
+
# Type checking
|
|
80
|
+
mypy src
|
|
81
|
+
|
|
82
|
+
# Linting & formatting
|
|
83
|
+
ruff check src tests
|
|
84
|
+
ruff format src tests
|
|
85
|
+
|
|
86
|
+
# Run with auto-reload
|
|
87
|
+
uvicorn src.main:app --reload
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
## API Endpoints
|
|
91
|
+
|
|
92
|
+
### Chat Completions
|
|
93
|
+
```bash
|
|
94
|
+
# Streaming chat (SSE)
|
|
95
|
+
curl -X POST http://localhost:8000/chat/completions \
|
|
96
|
+
-H "Content-Type: application/json" \
|
|
97
|
+
-d '{"messages": [{"role": "user", "content": "Hello!"}], "stream": true}'
|
|
98
|
+
|
|
99
|
+
# Non-streaming chat
|
|
100
|
+
curl -X POST http://localhost:8000/chat/completions/sync \
|
|
101
|
+
-H "Content-Type: application/json" \
|
|
102
|
+
-d '{"messages": [{"role": "user", "content": "Hello!"}]}'
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
### Agent Workflows
|
|
106
|
+
```bash
|
|
107
|
+
# Invoke agent with tools
|
|
108
|
+
curl -X POST http://localhost:8000/agents/invoke \
|
|
109
|
+
-H "Content-Type: application/json" \
|
|
110
|
+
-d '{"message": "What time is it and calculate 42 * 17?"}'
|
|
111
|
+
|
|
112
|
+
# List available tools
|
|
113
|
+
curl http://localhost:8000/agents/tools
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
## API Documentation
|
|
117
|
+
|
|
118
|
+
- **Swagger UI**: http://localhost:8000/docs
|
|
119
|
+
- **ReDoc**: http://localhost:8000/redoc
|
|
120
|
+
|
|
121
|
+
## License
|
|
122
|
+
|
|
123
|
+
MIT
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "python-starter-kit"
|
|
3
|
+
version = "1.0.0"
|
|
4
|
+
description = "AI-powered Python backend with LangGraph orchestration"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
requires-python = ">=3.11"
|
|
7
|
+
dependencies = [
|
|
8
|
+
# Web Framework - fastest Python web framework
|
|
9
|
+
"fastapi>=0.115.0",
|
|
10
|
+
|
|
11
|
+
# ASGI Server - high performance
|
|
12
|
+
"uvicorn[standard]>=0.32.0",
|
|
13
|
+
|
|
14
|
+
# Fast JSON serialization/deserialization
|
|
15
|
+
"orjson>=3.10.0",
|
|
16
|
+
|
|
17
|
+
# Ultra-fast serialization library (faster than pydantic for many cases)
|
|
18
|
+
"msgspec>=0.19.0",
|
|
19
|
+
|
|
20
|
+
# High-performance DataFrame library (faster than pandas)
|
|
21
|
+
"polars>=1.18.0",
|
|
22
|
+
|
|
23
|
+
# Async PostgreSQL driver
|
|
24
|
+
"asyncpg>=0.30.0",
|
|
25
|
+
|
|
26
|
+
# Async SQLAlchemy ORM
|
|
27
|
+
"sqlalchemy[asyncio]>=2.0.36",
|
|
28
|
+
|
|
29
|
+
# Environment variables
|
|
30
|
+
"python-dotenv>=1.0.0",
|
|
31
|
+
|
|
32
|
+
# Data validation (Pydantic v2 with Rust core)
|
|
33
|
+
"pydantic>=2.10.0",
|
|
34
|
+
"pydantic-settings>=2.6.0",
|
|
35
|
+
|
|
36
|
+
# Caching with Redis
|
|
37
|
+
"redis>=5.2.0",
|
|
38
|
+
|
|
39
|
+
# HTTP client (async)
|
|
40
|
+
"httpx>=0.28.0",
|
|
41
|
+
|
|
42
|
+
# Date/time handling (faster than datetime)
|
|
43
|
+
"pendulum>=3.0.0",
|
|
44
|
+
|
|
45
|
+
# LRU cache with TTL
|
|
46
|
+
"cachetools>=5.5.0",
|
|
47
|
+
|
|
48
|
+
# Structured logging
|
|
49
|
+
"structlog>=24.4.0",
|
|
50
|
+
|
|
51
|
+
# ===== AI / LLM Orchestration =====
|
|
52
|
+
# LangGraph - state machine orchestration for AI agents
|
|
53
|
+
"langgraph>=0.2.0",
|
|
54
|
+
|
|
55
|
+
# LangChain core abstractions
|
|
56
|
+
"langchain>=0.3.0",
|
|
57
|
+
"langchain-core>=0.3.0",
|
|
58
|
+
|
|
59
|
+
# LLM Providers
|
|
60
|
+
"langchain-openai>=0.2.0",
|
|
61
|
+
"langchain-anthropic>=0.2.0",
|
|
62
|
+
"openai>=1.50.0",
|
|
63
|
+
"anthropic>=0.39.0",
|
|
64
|
+
|
|
65
|
+
# LangSmith for tracing & observability
|
|
66
|
+
"langsmith>=0.1.140",
|
|
67
|
+
|
|
68
|
+
# Embeddings & Vector stores
|
|
69
|
+
"langchain-community>=0.3.0",
|
|
70
|
+
|
|
71
|
+
# Tiktoken for token counting
|
|
72
|
+
"tiktoken>=0.8.0",
|
|
73
|
+
|
|
74
|
+
# Server-Sent Events for streaming
|
|
75
|
+
"sse-starlette>=2.1.0",
|
|
76
|
+
|
|
77
|
+
# Tenacity for retry logic
|
|
78
|
+
"tenacity>=9.0.0",
|
|
79
|
+
]
|
|
80
|
+
|
|
81
|
+
[project.optional-dependencies]
|
|
82
|
+
dev = [
|
|
83
|
+
# Testing
|
|
84
|
+
"pytest>=8.3.0",
|
|
85
|
+
"pytest-asyncio>=0.24.0",
|
|
86
|
+
"pytest-cov>=6.0.0",
|
|
87
|
+
"httpx>=0.28.0",
|
|
88
|
+
|
|
89
|
+
# Type checking
|
|
90
|
+
"mypy>=1.13.0",
|
|
91
|
+
|
|
92
|
+
# Linting & Formatting
|
|
93
|
+
"ruff>=0.8.0",
|
|
94
|
+
|
|
95
|
+
# Hot reload for development
|
|
96
|
+
"watchfiles>=1.0.0",
|
|
97
|
+
]
|
|
98
|
+
|
|
99
|
+
[build-system]
|
|
100
|
+
requires = ["hatchling"]
|
|
101
|
+
build-backend = "hatchling.build"
|
|
102
|
+
|
|
103
|
+
[tool.ruff]
|
|
104
|
+
target-version = "py311"
|
|
105
|
+
line-length = 100
|
|
106
|
+
|
|
107
|
+
[tool.ruff.lint]
|
|
108
|
+
select = [
|
|
109
|
+
"E", # pycodestyle errors
|
|
110
|
+
"W", # pycodestyle warnings
|
|
111
|
+
"F", # pyflakes
|
|
112
|
+
"I", # isort
|
|
113
|
+
"B", # flake8-bugbear
|
|
114
|
+
"C4", # flake8-comprehensions
|
|
115
|
+
"UP", # pyupgrade
|
|
116
|
+
"ARG", # flake8-unused-arguments
|
|
117
|
+
"SIM", # flake8-simplify
|
|
118
|
+
]
|
|
119
|
+
ignore = ["E501"]
|
|
120
|
+
|
|
121
|
+
[tool.ruff.lint.isort]
|
|
122
|
+
known-first-party = ["src"]
|
|
123
|
+
|
|
124
|
+
[tool.mypy]
|
|
125
|
+
python_version = "3.11"
|
|
126
|
+
strict = true
|
|
127
|
+
warn_return_any = true
|
|
128
|
+
warn_unused_ignores = true
|
|
129
|
+
|
|
130
|
+
[tool.pytest.ini_options]
|
|
131
|
+
asyncio_mode = "auto"
|
|
132
|
+
testpaths = ["tests"]
|
|
133
|
+
addopts = "-v --tb=short"
|
|
134
|
+
|
|
135
|
+
[tool.coverage.run]
|
|
136
|
+
source = ["src"]
|
|
137
|
+
branch = true
|
|
138
|
+
|
|
139
|
+
[tool.coverage.report]
|
|
140
|
+
exclude_lines = [
|
|
141
|
+
"pragma: no cover",
|
|
142
|
+
"if TYPE_CHECKING:",
|
|
143
|
+
]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from pydantic_settings import BaseSettings
|
|
2
|
+
from functools import lru_cache
|
|
3
|
+
from typing import Literal
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Settings(BaseSettings):
|
|
7
|
+
app_name: str = "AI Starter Kit"
|
|
8
|
+
debug: bool = False
|
|
9
|
+
|
|
10
|
+
# Database
|
|
11
|
+
database_url: str = "postgresql+asyncpg://postgres:postgres@localhost:5432/app"
|
|
12
|
+
|
|
13
|
+
# Redis
|
|
14
|
+
redis_url: str = "redis://localhost:6379"
|
|
15
|
+
|
|
16
|
+
# Server
|
|
17
|
+
host: str = "0.0.0.0"
|
|
18
|
+
port: int = 8000
|
|
19
|
+
|
|
20
|
+
# ===== AI / LLM Configuration =====
|
|
21
|
+
# OpenAI
|
|
22
|
+
openai_api_key: str = ""
|
|
23
|
+
openai_model: str = "gpt-4o"
|
|
24
|
+
openai_embedding_model: str = "text-embedding-3-small"
|
|
25
|
+
|
|
26
|
+
# Anthropic
|
|
27
|
+
anthropic_api_key: str = ""
|
|
28
|
+
anthropic_model: str = "claude-sonnet-4-20250514"
|
|
29
|
+
|
|
30
|
+
# Default LLM provider
|
|
31
|
+
default_llm_provider: Literal["openai", "anthropic"] = "openai"
|
|
32
|
+
|
|
33
|
+
# LangSmith (optional - for tracing)
|
|
34
|
+
langsmith_api_key: str = ""
|
|
35
|
+
langsmith_project: str = "ai-starter-kit"
|
|
36
|
+
langsmith_tracing: bool = False
|
|
37
|
+
|
|
38
|
+
# LLM Settings
|
|
39
|
+
llm_temperature: float = 0.7
|
|
40
|
+
llm_max_tokens: int = 4096
|
|
41
|
+
llm_streaming: bool = True
|
|
42
|
+
|
|
43
|
+
class Config:
|
|
44
|
+
env_file = ".env"
|
|
45
|
+
env_file_encoding = "utf-8"
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@lru_cache
|
|
49
|
+
def get_settings() -> Settings:
|
|
50
|
+
return Settings()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|