@totaland/create-starter-kit 2.0.3 → 2.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/index.js +61 -16
- package/package.json +5 -6
- package/templates/python-backend/.env.example +36 -0
- package/templates/python-backend/Makefile +26 -0
- package/templates/python-backend/README.md +123 -0
- package/templates/python-backend/pyproject.toml +143 -0
- package/templates/python-backend/src/__init__.py +1 -0
- package/templates/python-backend/src/config.py +50 -0
- package/templates/python-backend/src/features/__init__.py +1 -0
- package/templates/python-backend/src/features/agents/__init__.py +3 -0
- package/templates/python-backend/src/features/agents/router.py +164 -0
- package/templates/python-backend/src/features/agents/schemas.py +52 -0
- package/templates/python-backend/src/features/chat/__init__.py +3 -0
- package/templates/python-backend/src/features/chat/router.py +98 -0
- package/templates/python-backend/src/features/chat/schemas.py +36 -0
- package/templates/python-backend/src/features/health/__init__.py +3 -0
- package/templates/python-backend/src/features/health/router.py +13 -0
- package/templates/python-backend/src/features/health/schemas.py +6 -0
- package/templates/python-backend/src/features/orders/__init__.py +3 -0
- package/templates/python-backend/src/features/orders/router.py +40 -0
- package/templates/python-backend/src/features/orders/schemas.py +18 -0
- package/templates/python-backend/src/lib/__init__.py +1 -0
- package/templates/python-backend/src/lib/agents.py +167 -0
- package/templates/python-backend/src/lib/cache.py +38 -0
- package/templates/python-backend/src/lib/database.py +31 -0
- package/templates/python-backend/src/lib/llm.py +155 -0
- package/templates/python-backend/src/lib/logging.py +25 -0
- package/templates/python-backend/src/main.py +41 -0
- package/templates/python-backend/tests/__init__.py +1 -0
- package/templates/python-backend/tests/test_health.py +26 -0
- package/templates/python-backend/tests/test_orders.py +38 -0
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM Provider abstraction layer.
|
|
3
|
+
|
|
4
|
+
Provides a unified interface for OpenAI and Anthropic models with:
|
|
5
|
+
- Streaming support
|
|
6
|
+
- Token counting
|
|
7
|
+
- Retry logic with exponential backoff
|
|
8
|
+
- LangSmith tracing integration
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from typing import Literal, AsyncIterator
|
|
12
|
+
from functools import lru_cache
|
|
13
|
+
|
|
14
|
+
from langchain_openai import ChatOpenAI
|
|
15
|
+
from langchain_anthropic import ChatAnthropic
|
|
16
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
|
17
|
+
from langchain_core.messages import BaseMessage, AIMessageChunk
|
|
18
|
+
from langchain_core.callbacks import AsyncCallbackHandler
|
|
19
|
+
import tiktoken
|
|
20
|
+
|
|
21
|
+
from src.config import get_settings
|
|
22
|
+
from src.lib.logging import get_logger
|
|
23
|
+
|
|
24
|
+
logger = get_logger(__name__)
|
|
25
|
+
|
|
26
|
+
LLMProvider = Literal["openai", "anthropic"]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@lru_cache
|
|
30
|
+
def get_openai_model(
|
|
31
|
+
model: str | None = None,
|
|
32
|
+
temperature: float | None = None,
|
|
33
|
+
max_tokens: int | None = None,
|
|
34
|
+
streaming: bool | None = None,
|
|
35
|
+
) -> ChatOpenAI:
|
|
36
|
+
"""Get a cached OpenAI chat model instance."""
|
|
37
|
+
settings = get_settings()
|
|
38
|
+
return ChatOpenAI(
|
|
39
|
+
model=model or settings.openai_model,
|
|
40
|
+
api_key=settings.openai_api_key,
|
|
41
|
+
temperature=temperature if temperature is not None else settings.llm_temperature,
|
|
42
|
+
max_tokens=max_tokens or settings.llm_max_tokens,
|
|
43
|
+
streaming=streaming if streaming is not None else settings.llm_streaming,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@lru_cache
|
|
48
|
+
def get_anthropic_model(
|
|
49
|
+
model: str | None = None,
|
|
50
|
+
temperature: float | None = None,
|
|
51
|
+
max_tokens: int | None = None,
|
|
52
|
+
streaming: bool | None = None,
|
|
53
|
+
) -> ChatAnthropic:
|
|
54
|
+
"""Get a cached Anthropic chat model instance."""
|
|
55
|
+
settings = get_settings()
|
|
56
|
+
return ChatAnthropic(
|
|
57
|
+
model=model or settings.anthropic_model,
|
|
58
|
+
api_key=settings.anthropic_api_key,
|
|
59
|
+
temperature=temperature if temperature is not None else settings.llm_temperature,
|
|
60
|
+
max_tokens=max_tokens or settings.llm_max_tokens,
|
|
61
|
+
streaming=streaming if streaming is not None else settings.llm_streaming,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def get_llm(
|
|
66
|
+
provider: LLMProvider | None = None,
|
|
67
|
+
model: str | None = None,
|
|
68
|
+
temperature: float | None = None,
|
|
69
|
+
max_tokens: int | None = None,
|
|
70
|
+
streaming: bool | None = None,
|
|
71
|
+
) -> BaseChatModel:
|
|
72
|
+
"""
|
|
73
|
+
Get an LLM instance based on the provider.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
provider: "openai" or "anthropic". Defaults to settings.default_llm_provider
|
|
77
|
+
model: Model name override
|
|
78
|
+
temperature: Temperature override
|
|
79
|
+
max_tokens: Max tokens override
|
|
80
|
+
streaming: Streaming override
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
A configured chat model instance
|
|
84
|
+
"""
|
|
85
|
+
settings = get_settings()
|
|
86
|
+
provider = provider or settings.default_llm_provider
|
|
87
|
+
|
|
88
|
+
if provider == "openai":
|
|
89
|
+
return get_openai_model(model, temperature, max_tokens, streaming)
|
|
90
|
+
elif provider == "anthropic":
|
|
91
|
+
return get_anthropic_model(model, temperature, max_tokens, streaming)
|
|
92
|
+
else:
|
|
93
|
+
raise ValueError(f"Unknown provider: {provider}")
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
async def stream_llm_response(
|
|
97
|
+
messages: list[BaseMessage],
|
|
98
|
+
provider: LLMProvider | None = None,
|
|
99
|
+
model: str | None = None,
|
|
100
|
+
callbacks: list[AsyncCallbackHandler] | None = None,
|
|
101
|
+
) -> AsyncIterator[str]:
|
|
102
|
+
"""
|
|
103
|
+
Stream LLM response as text chunks.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
messages: List of chat messages
|
|
107
|
+
provider: LLM provider
|
|
108
|
+
model: Model name override
|
|
109
|
+
callbacks: Optional async callbacks
|
|
110
|
+
|
|
111
|
+
Yields:
|
|
112
|
+
Text chunks from the LLM response
|
|
113
|
+
"""
|
|
114
|
+
llm = get_llm(provider=provider, model=model, streaming=True)
|
|
115
|
+
|
|
116
|
+
async for chunk in llm.astream(messages, config={"callbacks": callbacks or []}):
|
|
117
|
+
if isinstance(chunk, AIMessageChunk) and chunk.content:
|
|
118
|
+
yield str(chunk.content)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def count_tokens(text: str, model: str = "gpt-4o") -> int:
|
|
122
|
+
"""
|
|
123
|
+
Count tokens in text using tiktoken.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
text: Text to count tokens for
|
|
127
|
+
model: Model name for encoding selection
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
Token count
|
|
131
|
+
"""
|
|
132
|
+
try:
|
|
133
|
+
encoding = tiktoken.encoding_for_model(model)
|
|
134
|
+
except KeyError:
|
|
135
|
+
encoding = tiktoken.get_encoding("cl100k_base")
|
|
136
|
+
return len(encoding.encode(text))
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def count_message_tokens(messages: list[BaseMessage], model: str = "gpt-4o") -> int:
|
|
140
|
+
"""
|
|
141
|
+
Count tokens in a list of messages.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
messages: List of messages to count
|
|
145
|
+
model: Model name for encoding
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
Total token count
|
|
149
|
+
"""
|
|
150
|
+
total = 0
|
|
151
|
+
for message in messages:
|
|
152
|
+
total += count_tokens(str(message.content), model)
|
|
153
|
+
total += 4 # Overhead per message
|
|
154
|
+
total += 2 # Conversation overhead
|
|
155
|
+
return total
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import structlog
|
|
2
|
+
from src.config import get_settings
|
|
3
|
+
|
|
4
|
+
settings = get_settings()
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def configure_logging() -> None:
|
|
8
|
+
structlog.configure(
|
|
9
|
+
processors=[
|
|
10
|
+
structlog.contextvars.merge_contextvars,
|
|
11
|
+
structlog.processors.add_log_level,
|
|
12
|
+
structlog.processors.StackInfoRenderer(),
|
|
13
|
+
structlog.dev.set_exc_info,
|
|
14
|
+
structlog.processors.TimeStamper(fmt="iso"),
|
|
15
|
+
structlog.dev.ConsoleRenderer() if settings.debug else structlog.processors.JSONRenderer(),
|
|
16
|
+
],
|
|
17
|
+
wrapper_class=structlog.make_filtering_bound_logger(10 if settings.debug else 20),
|
|
18
|
+
context_class=dict,
|
|
19
|
+
logger_factory=structlog.PrintLoggerFactory(),
|
|
20
|
+
cache_logger_on_first_use=True,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def get_logger(name: str = __name__) -> structlog.stdlib.BoundLogger:
|
|
25
|
+
return structlog.get_logger(name)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from contextlib import asynccontextmanager
|
|
2
|
+
from typing import AsyncGenerator
|
|
3
|
+
from fastapi import FastAPI
|
|
4
|
+
from fastapi.responses import ORJSONResponse
|
|
5
|
+
|
|
6
|
+
from src.config import get_settings
|
|
7
|
+
from src.lib.logging import configure_logging, get_logger
|
|
8
|
+
from src.features import health, orders, chat, agents
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
settings = get_settings()
|
|
12
|
+
configure_logging()
|
|
13
|
+
logger = get_logger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@asynccontextmanager
|
|
17
|
+
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
|
18
|
+
logger.info("Starting application", app_name=settings.app_name)
|
|
19
|
+
yield
|
|
20
|
+
logger.info("Shutting down application")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
app = FastAPI(
|
|
24
|
+
title=settings.app_name,
|
|
25
|
+
description="Fast Python backend starter kit with high-performance packages",
|
|
26
|
+
version="1.0.0",
|
|
27
|
+
default_response_class=ORJSONResponse,
|
|
28
|
+
lifespan=lifespan,
|
|
29
|
+
docs_url="/docs",
|
|
30
|
+
redoc_url="/redoc",
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
app.include_router(health.router)
|
|
34
|
+
app.include_router(orders.router)
|
|
35
|
+
app.include_router(chat.router)
|
|
36
|
+
app.include_router(agents.router)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@app.get("/")
|
|
40
|
+
async def root() -> dict[str, str]:
|
|
41
|
+
return {"message": "Welcome to Python Starter Kit"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from httpx import AsyncClient, ASGITransport
|
|
3
|
+
from src.main import app
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@pytest.fixture
|
|
7
|
+
async def client() -> AsyncClient:
|
|
8
|
+
transport = ASGITransport(app=app)
|
|
9
|
+
async with AsyncClient(transport=transport, base_url="http://test") as ac:
|
|
10
|
+
yield ac
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@pytest.mark.asyncio
|
|
14
|
+
async def test_health_check(client: AsyncClient) -> None:
|
|
15
|
+
response = await client.get("/health")
|
|
16
|
+
assert response.status_code == 200
|
|
17
|
+
data = response.json()
|
|
18
|
+
assert data["status"] == "healthy"
|
|
19
|
+
assert "timestamp" in data
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@pytest.mark.asyncio
|
|
23
|
+
async def test_root(client: AsyncClient) -> None:
|
|
24
|
+
response = await client.get("/")
|
|
25
|
+
assert response.status_code == 200
|
|
26
|
+
assert response.json() == {"message": "Welcome to Python Starter Kit"}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from httpx import AsyncClient, ASGITransport
|
|
3
|
+
from src.main import app
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@pytest.fixture
|
|
7
|
+
async def client() -> AsyncClient:
|
|
8
|
+
transport = ASGITransport(app=app)
|
|
9
|
+
async with AsyncClient(transport=transport, base_url="http://test") as ac:
|
|
10
|
+
yield ac
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@pytest.mark.asyncio
|
|
14
|
+
async def test_create_order(client: AsyncClient) -> None:
|
|
15
|
+
response = await client.post(
|
|
16
|
+
"/orders",
|
|
17
|
+
json={"product": "Widget", "quantity": 5, "price": 19.99},
|
|
18
|
+
)
|
|
19
|
+
assert response.status_code == 201
|
|
20
|
+
data = response.json()
|
|
21
|
+
assert data["product"] == "Widget"
|
|
22
|
+
assert data["quantity"] == 5
|
|
23
|
+
assert data["price"] == 19.99
|
|
24
|
+
assert "id" in data
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@pytest.mark.asyncio
|
|
28
|
+
async def test_list_orders(client: AsyncClient) -> None:
|
|
29
|
+
response = await client.get("/orders")
|
|
30
|
+
assert response.status_code == 200
|
|
31
|
+
data = response.json()
|
|
32
|
+
assert "orders" in data
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@pytest.mark.asyncio
|
|
36
|
+
async def test_get_order_not_found(client: AsyncClient) -> None:
|
|
37
|
+
response = await client.get("/orders/nonexistent")
|
|
38
|
+
assert response.status_code == 404
|