@totaland/create-starter-kit 2.0.4 → 2.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bin/index.js CHANGED
@@ -20,8 +20,9 @@ const templateArg = process.argv[3]; // Optional template argument
20
20
 
21
21
  if (!projectName) {
22
22
  console.error('Error: Please provide a project name');
23
- console.log('Usage: pnpm create @totaland/starter-kit <project-name> [template]');
24
- console.log('Templates: backend, frontend, fullstack');
23
+ console.log('Usage: pnpm create @totaland/create-starter-kit <project-name> [template]');
24
+ console.log(' create-starter-kit <project-name> [template]');
25
+ console.log('Templates: backend, frontend, fullstack, python-backend, ai-fullstack');
25
26
  process.exit(1);
26
27
  }
27
28
 
@@ -47,11 +48,23 @@ const TEMPLATES = {
47
48
  description: 'React + Vite with TypeScript, Tailwind CSS v4, shadcn/ui, and TanStack Query',
48
49
  dir: 'frontend',
49
50
  },
51
+ 'python-backend': {
52
+ name: 'Python AI Backend',
53
+ description: 'FastAPI + LangGraph + OpenAI/Anthropic for AI orchestration',
54
+ dir: 'python-backend',
55
+ isPython: true,
56
+ },
50
57
  fullstack: {
51
58
  name: 'Fullstack',
52
59
  description: 'Both Backend and Frontend templates combined',
53
60
  dirs: ['backend', 'frontend'],
54
61
  },
62
+ 'ai-fullstack': {
63
+ name: 'AI Fullstack',
64
+ description: 'Python AI Backend + React Frontend',
65
+ dirs: ['python-backend', 'frontend'],
66
+ isPython: true,
67
+ },
55
68
  };
56
69
 
57
70
  // Directories and files to exclude when copying
@@ -69,6 +82,14 @@ const EXCLUDE = new Set([
69
82
  '.env',
70
83
  '.env.local',
71
84
  '.DS_Store',
85
+ // Python exclusions
86
+ '__pycache__',
87
+ '.venv',
88
+ 'venv',
89
+ '.pytest_cache',
90
+ '.mypy_cache',
91
+ '.ruff_cache',
92
+ '.eggs',
72
93
  ]);
73
94
 
74
95
  // Function to recursively copy directory
@@ -102,22 +123,32 @@ async function promptTemplate() {
102
123
  });
103
124
 
104
125
  console.log('\nšŸ“¦ Select a template:\n');
105
- console.log('1. Backend - Express.js + TypeScript + Drizzle ORM');
106
- console.log('2. Frontend - React + Vite + Tailwind CSS v4 + shadcn/ui');
107
- console.log('3. Fullstack - Both Backend and Frontend\n');
126
+ console.log('1. Backend - Express.js + TypeScript + Drizzle ORM');
127
+ console.log('2. Frontend - React + Vite + Tailwind CSS v4 + shadcn/ui');
128
+ console.log('3. Fullstack - Backend + Frontend');
129
+ console.log('4. Python Backend - FastAPI + LangGraph + OpenAI/Anthropic (AI)');
130
+ console.log('5. AI Fullstack - Python Backend + Frontend\n');
108
131
 
109
- const answer = await rl.question('Enter your choice (1, 2, or 3): ');
132
+ const answer = await rl.question('Enter your choice (1-5): ');
110
133
  rl.close();
111
134
 
112
- if (answer === '1' || answer.toLowerCase() === 'backend') {
113
- return 'backend';
114
- }
115
- if (answer === '2' || answer.toLowerCase() === 'frontend') {
116
- return 'frontend';
117
- }
118
- if (answer === '3' || answer.toLowerCase() === 'fullstack') {
119
- return 'fullstack';
120
- }
135
+ const choices = {
136
+ '1': 'backend',
137
+ 'backend': 'backend',
138
+ '2': 'frontend',
139
+ 'frontend': 'frontend',
140
+ '3': 'fullstack',
141
+ 'fullstack': 'fullstack',
142
+ '4': 'python-backend',
143
+ 'python-backend': 'python-backend',
144
+ 'python': 'python-backend',
145
+ '5': 'ai-fullstack',
146
+ 'ai-fullstack': 'ai-fullstack',
147
+ 'ai': 'ai-fullstack',
148
+ };
149
+
150
+ const choice = choices[answer.toLowerCase()];
151
+ if (choice) return choice;
121
152
 
122
153
  console.error('Invalid choice. Please run the command again.');
123
154
  process.exit(1);
@@ -133,7 +164,7 @@ async function main() {
133
164
  templateKey = templateArg.toLowerCase();
134
165
  if (!TEMPLATES[templateKey]) {
135
166
  console.error(`Error: Invalid template "${templateArg}"`);
136
- console.log('Available templates: backend, frontend, fullstack');
167
+ console.log('Available templates: backend, frontend, fullstack, python-backend, ai-fullstack');
137
168
  process.exit(1);
138
169
  }
139
170
  } else {
@@ -187,19 +218,34 @@ async function main() {
187
218
  console.log('āœ… Project created successfully!\n');
188
219
  console.log('šŸ“ Next steps:');
189
220
  console.log(` cd ${projectName}`);
221
+
190
222
  if (templateKey === 'fullstack') {
191
223
  console.log(' cd backend && pnpm install && pnpm dev');
192
224
  console.log(' cd frontend && pnpm install && pnpm dev\n');
225
+ } else if (templateKey === 'ai-fullstack') {
226
+ console.log(' cd python-backend && pip install -e ".[dev]" && uvicorn src.main:app --reload');
227
+ console.log(' cd frontend && pnpm install && pnpm dev\n');
228
+ } else if (template.isPython) {
229
+ console.log(' python -m venv .venv && source .venv/bin/activate');
230
+ console.log(' pip install -e ".[dev]"');
231
+ console.log(' cp .env.example .env # Add your API keys');
232
+ console.log(' uvicorn src.main:app --reload\n');
193
233
  } else {
194
234
  console.log(' pnpm install');
195
235
  console.log(' pnpm dev\n');
196
236
  }
197
237
 
198
- if (templateKey === 'frontend' || templateKey === 'fullstack') {
238
+ if (templateKey === 'frontend' || templateKey === 'fullstack' || templateKey === 'ai-fullstack') {
199
239
  console.log('šŸ’” Tip: Add shadcn/ui components with:');
200
- const cdPath = templateKey === 'fullstack' ? 'cd frontend && ' : '';
240
+ const cdPath = (templateKey === 'fullstack' || templateKey === 'ai-fullstack') ? 'cd frontend && ' : '';
201
241
  console.log(` ${cdPath}pnpm dlx shadcn@latest add button card dialog\n`);
202
242
  }
243
+
244
+ if (template.isPython) {
245
+ console.log('šŸ¤– AI Setup: Add your API keys to .env:');
246
+ console.log(' OPENAI_API_KEY=sk-...');
247
+ console.log(' ANTHROPIC_API_KEY=sk-ant-...\n');
248
+ }
203
249
  } catch (error) {
204
250
  console.error('Error creating project:', error.message);
205
251
  process.exit(1);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@totaland/create-starter-kit",
3
- "version": "2.0.4",
3
+ "version": "2.0.7",
4
4
  "description": "Scaffolding tool for creating new starter-kit projects",
5
5
  "type": "module",
6
6
  "publishConfig": {
@@ -0,0 +1,36 @@
1
+ APP_NAME="AI Starter Kit"
2
+ DEBUG=true
3
+
4
+ # Database
5
+ DATABASE_URL=postgresql+asyncpg://postgres:postgres@localhost:5432/app
6
+
7
+ # Redis
8
+ REDIS_URL=redis://localhost:6379
9
+
10
+ # Server
11
+ HOST=0.0.0.0
12
+ PORT=8000
13
+
14
+ # ===== AI / LLM Configuration =====
15
+
16
+ # OpenAI - https://platform.openai.com/api-keys
17
+ OPENAI_API_KEY=sk-...
18
+ OPENAI_MODEL=gpt-4o
19
+ OPENAI_EMBEDDING_MODEL=text-embedding-3-small
20
+
21
+ # Anthropic - https://console.anthropic.com/settings/keys
22
+ ANTHROPIC_API_KEY=sk-ant-...
23
+ ANTHROPIC_MODEL=claude-sonnet-4-20250514
24
+
25
+ # Default LLM provider: "openai" or "anthropic"
26
+ DEFAULT_LLM_PROVIDER=openai
27
+
28
+ # LangSmith (optional) - https://smith.langchain.com/
29
+ LANGSMITH_API_KEY=lsv2_...
30
+ LANGSMITH_PROJECT=ai-starter-kit
31
+ LANGSMITH_TRACING=false
32
+
33
+ # LLM Settings
34
+ LLM_TEMPERATURE=0.7
35
+ LLM_MAX_TOKENS=4096
36
+ LLM_STREAMING=true
@@ -0,0 +1,26 @@
1
+ .PHONY: install dev test lint format typecheck clean
2
+
3
+ install:
4
+ pip install -e ".[dev]"
5
+
6
+ dev:
7
+ uvicorn src.main:app --reload --host 0.0.0.0 --port 8000
8
+
9
+ test:
10
+ pytest
11
+
12
+ test-cov:
13
+ pytest --cov=src --cov-report=html
14
+
15
+ lint:
16
+ ruff check src tests
17
+
18
+ format:
19
+ ruff format src tests
20
+
21
+ typecheck:
22
+ mypy src
23
+
24
+ clean:
25
+ rm -rf __pycache__ .pytest_cache .mypy_cache .ruff_cache htmlcov .coverage
26
+ find . -type d -name "__pycache__" -exec rm -rf {} +
@@ -0,0 +1,123 @@
1
+ # AI Starter Kit
2
+
3
+ Production-ready Python backend for AI application orchestration with LangGraph, OpenAI, and Anthropic.
4
+
5
+ ## AI / LLM Features
6
+
7
+ | Feature | Description |
8
+ |---------|-------------|
9
+ | **LangGraph** | State machine orchestration for multi-step AI agents |
10
+ | **Multi-provider LLMs** | OpenAI (GPT-4o) and Anthropic (Claude) with easy switching |
11
+ | **Streaming** | Server-Sent Events for real-time token streaming |
12
+ | **Tool calling** | Built-in tool execution with ReAct agent pattern |
13
+ | **LangSmith** | Optional tracing and observability integration |
14
+ | **Memory** | Conversation memory with checkpointing |
15
+
16
+ ## Performance Packages Included
17
+
18
+ | Package | Purpose | Why It's Fast |
19
+ |---------|---------|---------------|
20
+ | **FastAPI** | Web framework | Built on Starlette, async-first, fastest Python framework |
21
+ | **uvicorn** | ASGI server | Uses uvloop (libuv-based event loop) |
22
+ | **orjson** | JSON serialization | Written in Rust, 3-10x faster than stdlib json |
23
+ | **msgspec** | Serialization/validation | Zero-copy deserialization, faster than Pydantic |
24
+ | **polars** | DataFrames | Rust-based, multithreaded, lazy evaluation |
25
+ | **asyncpg** | PostgreSQL driver | Native async, fastest Python PostgreSQL driver |
26
+ | **pendulum** | Date/time | Drop-in datetime replacement with better perf |
27
+ | **httpx** | HTTP client | Async support, HTTP/2 support |
28
+ | **structlog** | Logging | Structured logging with minimal overhead |
29
+
30
+ ## Quick Start
31
+
32
+ ```bash
33
+ # Create virtual environment
34
+ python -m venv .venv
35
+ source .venv/bin/activate # On Windows: .venv\Scripts\activate
36
+
37
+ # Install dependencies
38
+ pip install -e ".[dev]"
39
+
40
+ # Copy environment file
41
+ cp .env.example .env
42
+
43
+ # Run development server
44
+ uvicorn src.main:app --reload --host 0.0.0.0 --port 8000
45
+ ```
46
+
47
+ ## Project Structure
48
+
49
+ ```
50
+ python-backend/
51
+ ā”œā”€ā”€ src/
52
+ │ ā”œā”€ā”€ features/ # Feature-based organization
53
+ │ │ ā”œā”€ā”€ agents/ # LangGraph agent workflows
54
+ │ │ ā”œā”€ā”€ chat/ # Chat completion endpoints
55
+ │ │ ā”œā”€ā”€ health/ # Health check endpoints
56
+ │ │ └── orders/ # Order management example
57
+ │ ā”œā”€ā”€ lib/ # Shared utilities
58
+ │ │ ā”œā”€ā”€ agents.py # LangGraph agent builder & tools
59
+ │ │ ā”œā”€ā”€ cache.py # In-memory caching
60
+ │ │ ā”œā”€ā”€ database.py # Async SQLAlchemy setup
61
+ │ │ ā”œā”€ā”€ llm.py # LLM provider abstraction
62
+ │ │ └── logging.py # Structured logging
63
+ │ ā”œā”€ā”€ config.py # Settings via pydantic-settings
64
+ │ └── main.py # Application entry point
65
+ ā”œā”€ā”€ tests/ # Test files
66
+ ā”œā”€ā”€ pyproject.toml # Dependencies & tool config
67
+ └── .env.example # Environment template
68
+ ```
69
+
70
+ ## Development Commands
71
+
72
+ ```bash
73
+ # Run tests
74
+ pytest
75
+
76
+ # Run tests with coverage
77
+ pytest --cov=src --cov-report=html
78
+
79
+ # Type checking
80
+ mypy src
81
+
82
+ # Linting & formatting
83
+ ruff check src tests
84
+ ruff format src tests
85
+
86
+ # Run with auto-reload
87
+ uvicorn src.main:app --reload
88
+ ```
89
+
90
+ ## API Endpoints
91
+
92
+ ### Chat Completions
93
+ ```bash
94
+ # Streaming chat (SSE)
95
+ curl -X POST http://localhost:8000/chat/completions \
96
+ -H "Content-Type: application/json" \
97
+ -d '{"messages": [{"role": "user", "content": "Hello!"}], "stream": true}'
98
+
99
+ # Non-streaming chat
100
+ curl -X POST http://localhost:8000/chat/completions/sync \
101
+ -H "Content-Type: application/json" \
102
+ -d '{"messages": [{"role": "user", "content": "Hello!"}]}'
103
+ ```
104
+
105
+ ### Agent Workflows
106
+ ```bash
107
+ # Invoke agent with tools
108
+ curl -X POST http://localhost:8000/agents/invoke \
109
+ -H "Content-Type: application/json" \
110
+ -d '{"message": "What time is it and calculate 42 * 17?"}'
111
+
112
+ # List available tools
113
+ curl http://localhost:8000/agents/tools
114
+ ```
115
+
116
+ ## API Documentation
117
+
118
+ - **Swagger UI**: http://localhost:8000/docs
119
+ - **ReDoc**: http://localhost:8000/redoc
120
+
121
+ ## License
122
+
123
+ MIT
@@ -0,0 +1,143 @@
1
+ [project]
2
+ name = "python-starter-kit"
3
+ version = "1.0.0"
4
+ description = "AI-powered Python backend with LangGraph orchestration"
5
+ readme = "README.md"
6
+ requires-python = ">=3.11"
7
+ dependencies = [
8
+ # Web Framework - fastest Python web framework
9
+ "fastapi>=0.129.0",
10
+
11
+ # ASGI Server - high performance
12
+ "uvicorn[standard]>=0.41.0",
13
+
14
+ # Fast JSON serialization/deserialization
15
+ "orjson>=3.11.7",
16
+
17
+ # Ultra-fast serialization library (faster than pydantic for many cases)
18
+ "msgspec>=0.20.0",
19
+
20
+ # High-performance DataFrame library (faster than pandas)
21
+ "polars>=1.38.1",
22
+
23
+ # Async PostgreSQL driver
24
+ "asyncpg>=0.31.0",
25
+
26
+ # Async SQLAlchemy ORM
27
+ "sqlalchemy[asyncio]>=2.0.46",
28
+
29
+ # Environment variables
30
+ "python-dotenv>=1.2.1",
31
+
32
+ # Data validation (Pydantic v2 with Rust core)
33
+ "pydantic>=2.12.5",
34
+ "pydantic-settings>=2.13.0",
35
+
36
+ # Caching with Redis
37
+ "redis>=7.2.0",
38
+
39
+ # HTTP client (async)
40
+ "httpx>=0.28.1",
41
+
42
+ # Date/time handling (faster than datetime)
43
+ "pendulum>=3.2.0",
44
+
45
+ # LRU cache with TTL
46
+ "cachetools>=7.0.1",
47
+
48
+ # Structured logging
49
+ "structlog>=25.5.0",
50
+
51
+ # ===== AI / LLM Orchestration =====
52
+ # LangGraph - state machine orchestration for AI agents
53
+ "langgraph>=1.0.8",
54
+
55
+ # LangChain core abstractions
56
+ "langchain>=1.2.10",
57
+ "langchain-core>=1.2.13",
58
+
59
+ # LLM Providers
60
+ "langchain-openai>=1.1.10",
61
+ "langchain-anthropic>=1.3.3",
62
+ "openai>=2.21.0",
63
+ "anthropic>=0.39.0",
64
+
65
+ # LangSmith for tracing & observability
66
+ "langsmith>=0.1.140",
67
+
68
+ # Embeddings & Vector stores
69
+ "langchain-community>=0.3.0",
70
+
71
+ # Tiktoken for token counting
72
+ "tiktoken>=0.8.0",
73
+
74
+ # Server-Sent Events for streaming
75
+ "sse-starlette>=3.2.0",
76
+
77
+ # Tenacity for retry logic
78
+ "tenacity>=9.0.0",
79
+ ]
80
+
81
+ [project.optional-dependencies]
82
+ dev = [
83
+ # Testing
84
+ "pytest>=9.0.2",
85
+ "pytest-asyncio>=1.3.0",
86
+ "pytest-cov>=7.0.0",
87
+ "httpx>=0.28.0",
88
+
89
+ # Type checking
90
+ "mypy>=1.13.0",
91
+
92
+ # Linting & Formatting
93
+ "ruff>=0.8.0",
94
+
95
+ # Hot reload for development
96
+ "watchfiles>=1.0.0",
97
+ ]
98
+
99
+ [build-system]
100
+ requires = ["hatchling"]
101
+ build-backend = "hatchling.build"
102
+
103
+ [tool.ruff]
104
+ target-version = "py311"
105
+ line-length = 100
106
+
107
+ [tool.ruff.lint]
108
+ select = [
109
+ "E", # pycodestyle errors
110
+ "W", # pycodestyle warnings
111
+ "F", # pyflakes
112
+ "I", # isort
113
+ "B", # flake8-bugbear
114
+ "C4", # flake8-comprehensions
115
+ "UP", # pyupgrade
116
+ "ARG", # flake8-unused-arguments
117
+ "SIM", # flake8-simplify
118
+ ]
119
+ ignore = ["E501"]
120
+
121
+ [tool.ruff.lint.isort]
122
+ known-first-party = ["src"]
123
+
124
+ [tool.mypy]
125
+ python_version = "3.11"
126
+ strict = true
127
+ warn_return_any = true
128
+ warn_unused_ignores = true
129
+
130
+ [tool.pytest.ini_options]
131
+ asyncio_mode = "auto"
132
+ testpaths = ["tests"]
133
+ addopts = "-v --tb=short"
134
+
135
+ [tool.coverage.run]
136
+ source = ["src"]
137
+ branch = true
138
+
139
+ [tool.coverage.report]
140
+ exclude_lines = [
141
+ "pragma: no cover",
142
+ "if TYPE_CHECKING:",
143
+ ]
@@ -0,0 +1,27 @@
1
+ from pydantic_settings import BaseSettings
2
+ from functools import lru_cache
3
+
4
+
5
+ class Settings(BaseSettings):
6
+ app_name: str = "AI Starter Kit"
7
+ debug: bool = False
8
+
9
+ # Database
10
+ database_url: str = "postgresql+asyncpg://postgres:postgres@localhost:5432/app"
11
+
12
+ # Redis
13
+ redis_url: str = "redis://localhost:6379"
14
+
15
+ # Server
16
+ host: str = "0.0.0.0"
17
+ port: int = 8000
18
+
19
+
20
+ class Config:
21
+ env_file = ".env"
22
+ env_file_encoding = "utf-8"
23
+
24
+
25
+ @lru_cache
26
+ def get_settings() -> Settings:
27
+ return Settings()
@@ -0,0 +1,3 @@
1
+ from .router import router
2
+
3
+ __all__ = ["router"]
@@ -0,0 +1,13 @@
1
+ from fastapi import APIRouter
2
+ from .schemas import HealthResponse
3
+ import pendulum
4
+
5
+ router = APIRouter(prefix="/health", tags=["Health"])
6
+
7
+
8
+ @router.get("", response_model=HealthResponse)
9
+ async def health_check() -> HealthResponse:
10
+ return HealthResponse(
11
+ status="healthy",
12
+ timestamp=pendulum.now("UTC").to_iso8601_string(),
13
+ )
@@ -0,0 +1,6 @@
1
+ import msgspec
2
+
3
+
4
+ class HealthResponse(msgspec.Struct):
5
+ status: str
6
+ timestamp: str
@@ -0,0 +1,38 @@
1
+ from cachetools import TTLCache
2
+ from functools import wraps
3
+ from typing import TypeVar, Callable, Any
4
+ import asyncio
5
+
6
+ T = TypeVar("T")
7
+
8
+ _cache: TTLCache[str, Any] = TTLCache(maxsize=1000, ttl=300)
9
+
10
+
11
+ def cached(ttl: int = 300, maxsize: int = 128) -> Callable[[Callable[..., T]], Callable[..., T]]:
12
+ """Simple in-memory cache decorator with TTL support."""
13
+ cache: TTLCache[str, Any] = TTLCache(maxsize=maxsize, ttl=ttl)
14
+
15
+ def decorator(func: Callable[..., T]) -> Callable[..., T]:
16
+ @wraps(func)
17
+ async def async_wrapper(*args: Any, **kwargs: Any) -> T:
18
+ key = f"{func.__name__}:{args}:{kwargs}"
19
+ if key in cache:
20
+ return cache[key]
21
+ result = await func(*args, **kwargs)
22
+ cache[key] = result
23
+ return result
24
+
25
+ @wraps(func)
26
+ def sync_wrapper(*args: Any, **kwargs: Any) -> T:
27
+ key = f"{func.__name__}:{args}:{kwargs}"
28
+ if key in cache:
29
+ return cache[key]
30
+ result = func(*args, **kwargs)
31
+ cache[key] = result
32
+ return result
33
+
34
+ if asyncio.iscoroutinefunction(func):
35
+ return async_wrapper # type: ignore
36
+ return sync_wrapper # type: ignore
37
+
38
+ return decorator
@@ -0,0 +1,31 @@
1
+ from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
2
+ from sqlalchemy.orm import DeclarativeBase
3
+ from contextlib import asynccontextmanager
4
+ from typing import AsyncGenerator
5
+
6
+ from src.config import get_settings
7
+
8
+
9
+ class Base(DeclarativeBase):
10
+ pass
11
+
12
+
13
+ settings = get_settings()
14
+ engine = create_async_engine(settings.database_url, echo=settings.debug)
15
+ async_session_maker = async_sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
16
+
17
+
18
+ async def init_db() -> None:
19
+ async with engine.begin() as conn:
20
+ await conn.run_sync(Base.metadata.create_all)
21
+
22
+
23
+ @asynccontextmanager
24
+ async def get_session() -> AsyncGenerator[AsyncSession, None]:
25
+ async with async_session_maker() as session:
26
+ try:
27
+ yield session
28
+ await session.commit()
29
+ except Exception:
30
+ await session.rollback()
31
+ raise
@@ -0,0 +1,25 @@
1
+ import structlog
2
+ from src.config import get_settings
3
+
4
+ settings = get_settings()
5
+
6
+
7
+ def configure_logging() -> None:
8
+ structlog.configure(
9
+ processors=[
10
+ structlog.contextvars.merge_contextvars,
11
+ structlog.processors.add_log_level,
12
+ structlog.processors.StackInfoRenderer(),
13
+ structlog.dev.set_exc_info,
14
+ structlog.processors.TimeStamper(fmt="iso"),
15
+ structlog.dev.ConsoleRenderer() if settings.debug else structlog.processors.JSONRenderer(),
16
+ ],
17
+ wrapper_class=structlog.make_filtering_bound_logger(10 if settings.debug else 20),
18
+ context_class=dict,
19
+ logger_factory=structlog.PrintLoggerFactory(),
20
+ cache_logger_on_first_use=True,
21
+ )
22
+
23
+
24
+ def get_logger(name: str = __name__) -> structlog.stdlib.BoundLogger:
25
+ return structlog.get_logger(name)
@@ -0,0 +1,38 @@
1
+ from contextlib import asynccontextmanager
2
+ from typing import AsyncGenerator
3
+ from fastapi import FastAPI
4
+ from fastapi.responses import ORJSONResponse
5
+
6
+ from src.config import get_settings
7
+ from src.lib.logging import configure_logging, get_logger
8
+ from src.features import health
9
+
10
+
11
+ settings = get_settings()
12
+ configure_logging()
13
+ logger = get_logger(__name__)
14
+
15
+
16
+ @asynccontextmanager
17
+ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
18
+ logger.info("Starting application", app_name=settings.app_name)
19
+ yield
20
+ logger.info("Shutting down application")
21
+
22
+
23
+ app = FastAPI(
24
+ title=settings.app_name,
25
+ description="Fast Python backend starter kit with high-performance packages",
26
+ version="1.0.0",
27
+ default_response_class=ORJSONResponse,
28
+ lifespan=lifespan,
29
+ docs_url="/docs",
30
+ redoc_url="/redoc",
31
+ )
32
+
33
+ app.include_router(health.router)
34
+
35
+
36
+ @app.get("/")
37
+ async def root() -> dict[str, str]:
38
+ return {"message": "Welcome to Python Starter Kit"}
@@ -0,0 +1,26 @@
1
+ import pytest
2
+ from httpx import AsyncClient, ASGITransport
3
+ from src.main import app
4
+
5
+
6
+ @pytest.fixture
7
+ async def client() -> AsyncClient:
8
+ transport = ASGITransport(app=app)
9
+ async with AsyncClient(transport=transport, base_url="http://test") as ac:
10
+ yield ac
11
+
12
+
13
+ @pytest.mark.asyncio
14
+ async def test_health_check(client: AsyncClient) -> None:
15
+ response = await client.get("/health")
16
+ assert response.status_code == 200
17
+ data = response.json()
18
+ assert data["status"] == "healthy"
19
+ assert "timestamp" in data
20
+
21
+
22
+ @pytest.mark.asyncio
23
+ async def test_root(client: AsyncClient) -> None:
24
+ response = await client.get("/")
25
+ assert response.status_code == 200
26
+ assert response.json() == {"message": "Welcome to Python Starter Kit"}