@totaland/create-starter-kit 2.0.3 → 2.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/bin/index.js +61 -16
  2. package/package.json +5 -6
  3. package/templates/python-backend/.env.example +36 -0
  4. package/templates/python-backend/Makefile +26 -0
  5. package/templates/python-backend/README.md +123 -0
  6. package/templates/python-backend/pyproject.toml +143 -0
  7. package/templates/python-backend/src/__init__.py +1 -0
  8. package/templates/python-backend/src/config.py +50 -0
  9. package/templates/python-backend/src/features/__init__.py +1 -0
  10. package/templates/python-backend/src/features/agents/__init__.py +3 -0
  11. package/templates/python-backend/src/features/agents/router.py +164 -0
  12. package/templates/python-backend/src/features/agents/schemas.py +52 -0
  13. package/templates/python-backend/src/features/chat/__init__.py +3 -0
  14. package/templates/python-backend/src/features/chat/router.py +98 -0
  15. package/templates/python-backend/src/features/chat/schemas.py +36 -0
  16. package/templates/python-backend/src/features/health/__init__.py +3 -0
  17. package/templates/python-backend/src/features/health/router.py +13 -0
  18. package/templates/python-backend/src/features/health/schemas.py +6 -0
  19. package/templates/python-backend/src/features/orders/__init__.py +3 -0
  20. package/templates/python-backend/src/features/orders/router.py +40 -0
  21. package/templates/python-backend/src/features/orders/schemas.py +18 -0
  22. package/templates/python-backend/src/lib/__init__.py +1 -0
  23. package/templates/python-backend/src/lib/agents.py +167 -0
  24. package/templates/python-backend/src/lib/cache.py +38 -0
  25. package/templates/python-backend/src/lib/database.py +31 -0
  26. package/templates/python-backend/src/lib/llm.py +155 -0
  27. package/templates/python-backend/src/lib/logging.py +25 -0
  28. package/templates/python-backend/src/main.py +41 -0
  29. package/templates/python-backend/tests/__init__.py +1 -0
  30. package/templates/python-backend/tests/test_health.py +26 -0
  31. package/templates/python-backend/tests/test_orders.py +38 -0
@@ -0,0 +1,164 @@
1
+ """Agent workflow endpoints with LangGraph orchestration."""
2
+
3
+ import uuid
4
+ import json
5
+ from typing import AsyncIterator
6
+
7
+ from fastapi import APIRouter, HTTPException
8
+ from sse_starlette.sse import EventSourceResponse
9
+ from langchain_core.messages import HumanMessage
10
+
11
+ from src.lib.llm import get_llm
12
+ from src.lib.agents import AgentBuilder, get_default_tools, AgentConfig
13
+ from src.lib.logging import get_logger
14
+ from src.config import get_settings
15
+ from .schemas import AgentRequest, AgentResponse, AgentStep, AgentStreamEvent
16
+
17
+ router = APIRouter(prefix="/agents", tags=["Agents"])
18
+ logger = get_logger(__name__)
19
+ settings = get_settings()
20
+
21
+ DEFAULT_SYSTEM_PROMPT = """You are a helpful AI assistant with access to tools.
22
+
23
+ When you need to take an action or get information, use the available tools.
24
+ Think step by step and explain your reasoning.
25
+ Always provide a clear, helpful final answer.
26
+ """
27
+
28
+
29
+ @router.post("/invoke", response_model=AgentResponse)
30
+ async def invoke_agent(request: AgentRequest):
31
+ """
32
+ Invoke an AI agent to process a message.
33
+
34
+ The agent will:
35
+ 1. Analyze the request
36
+ 2. Use tools if needed
37
+ 3. Reason through the problem
38
+ 4. Return a final answer
39
+
40
+ Supports streaming for real-time step updates.
41
+ """
42
+ try:
43
+ provider = request.provider or settings.default_llm_provider
44
+ thread_id = request.thread_id or str(uuid.uuid4())
45
+
46
+ llm = get_llm(
47
+ provider=provider,
48
+ model=request.model,
49
+ streaming=request.stream,
50
+ )
51
+
52
+ tools = get_default_tools()
53
+
54
+ agent = (
55
+ AgentBuilder(name="assistant")
56
+ .with_llm(llm)
57
+ .with_tools(tools)
58
+ .with_system_prompt(request.system_prompt or DEFAULT_SYSTEM_PROMPT)
59
+ .with_memory()
60
+ .with_config(AgentConfig(max_iterations=request.max_iterations))
61
+ .build()
62
+ )
63
+
64
+ if request.stream:
65
+ return EventSourceResponse(
66
+ stream_agent_execution(agent, request.message, thread_id, provider, request.model),
67
+ media_type="text/event-stream"
68
+ )
69
+
70
+ result = await agent.ainvoke(
71
+ {"messages": [HumanMessage(content=request.message)]},
72
+ config={"configurable": {"thread_id": thread_id}}
73
+ )
74
+
75
+ messages = result.get("messages", [])
76
+ final_message = messages[-1] if messages else None
77
+
78
+ return AgentResponse(
79
+ answer=str(final_message.content) if final_message else "No response generated",
80
+ steps=[],
81
+ thread_id=thread_id,
82
+ model=request.model or (settings.openai_model if provider == "openai" else settings.anthropic_model),
83
+ provider=provider,
84
+ )
85
+
86
+ except Exception as e:
87
+ logger.error("Agent invocation failed", error=str(e))
88
+ raise HTTPException(status_code=500, detail=str(e))
89
+
90
+
91
+ async def stream_agent_execution(
92
+ agent,
93
+ message: str,
94
+ thread_id: str,
95
+ provider: str,
96
+ model: str | None
97
+ ) -> AsyncIterator[dict]:
98
+ """Stream agent execution events."""
99
+ try:
100
+ config = {"configurable": {"thread_id": thread_id}}
101
+
102
+ async for event in agent.astream_events(
103
+ {"messages": [HumanMessage(content=message)]},
104
+ config=config,
105
+ version="v2"
106
+ ):
107
+ kind = event["event"]
108
+
109
+ if kind == "on_chat_model_stream":
110
+ chunk = event["data"]["chunk"]
111
+ if chunk.content:
112
+ yield {
113
+ "event": "token",
114
+ "data": json.dumps({"content": chunk.content})
115
+ }
116
+
117
+ elif kind == "on_tool_start":
118
+ yield {
119
+ "event": "tool_start",
120
+ "data": json.dumps({
121
+ "tool": event["name"],
122
+ "input": event["data"].get("input", {})
123
+ })
124
+ }
125
+
126
+ elif kind == "on_tool_end":
127
+ yield {
128
+ "event": "tool_end",
129
+ "data": json.dumps({
130
+ "tool": event["name"],
131
+ "output": str(event["data"].get("output", ""))
132
+ })
133
+ }
134
+
135
+ yield {
136
+ "event": "done",
137
+ "data": json.dumps({
138
+ "thread_id": thread_id,
139
+ "model": model or settings.openai_model,
140
+ "provider": provider
141
+ })
142
+ }
143
+
144
+ except Exception as e:
145
+ logger.error("Agent streaming error", error=str(e))
146
+ yield {
147
+ "event": "error",
148
+ "data": json.dumps({"error": str(e)})
149
+ }
150
+
151
+
152
+ @router.get("/tools")
153
+ async def list_available_tools():
154
+ """List all available tools for agents."""
155
+ tools = get_default_tools()
156
+ return {
157
+ "tools": [
158
+ {
159
+ "name": tool.name,
160
+ "description": tool.description,
161
+ }
162
+ for tool in tools
163
+ ]
164
+ }
@@ -0,0 +1,52 @@
1
+ """Agent workflow schemas."""
2
+
3
+ from pydantic import BaseModel, Field
4
+ from typing import Literal, Any
5
+
6
+
7
+ class AgentMessage(BaseModel):
8
+ """A message in the agent conversation."""
9
+ role: Literal["user", "assistant", "tool"] = Field(description="Message role")
10
+ content: str = Field(description="Message content")
11
+ tool_calls: list[dict] | None = Field(default=None, description="Tool calls made")
12
+ tool_call_id: str | None = Field(default=None, description="ID of tool call this responds to")
13
+
14
+
15
+ class AgentRequest(BaseModel):
16
+ """Request to invoke an agent."""
17
+ message: str = Field(description="User message to process")
18
+ thread_id: str | None = Field(default=None, description="Thread ID for conversation memory")
19
+ provider: Literal["openai", "anthropic"] | None = Field(default=None)
20
+ model: str | None = Field(default=None)
21
+ system_prompt: str | None = Field(default=None, description="Override system prompt")
22
+ tools: list[str] | None = Field(
23
+ default=None,
24
+ description="Tool names to enable (default: all)"
25
+ )
26
+ max_iterations: int = Field(default=10, ge=1, le=50, description="Max reasoning steps")
27
+ stream: bool = Field(default=True, description="Stream intermediate steps")
28
+
29
+
30
+ class AgentStep(BaseModel):
31
+ """A single step in agent execution."""
32
+ step_type: Literal["thought", "tool_call", "tool_result", "final_answer"]
33
+ content: str
34
+ tool_name: str | None = None
35
+ tool_input: dict | None = None
36
+ tool_output: str | None = None
37
+
38
+
39
+ class AgentResponse(BaseModel):
40
+ """Complete agent response."""
41
+ answer: str = Field(description="Final answer from the agent")
42
+ steps: list[AgentStep] = Field(default_factory=list, description="Execution steps")
43
+ thread_id: str | None = Field(description="Thread ID for follow-up")
44
+ tokens_used: int | None = None
45
+ model: str
46
+ provider: str
47
+
48
+
49
+ class AgentStreamEvent(BaseModel):
50
+ """Streaming event from agent execution."""
51
+ event: Literal["step", "tool_start", "tool_end", "token", "done", "error"]
52
+ data: dict[str, Any]
@@ -0,0 +1,3 @@
1
+ from .router import router
2
+
3
+ __all__ = ["router"]
@@ -0,0 +1,98 @@
1
+ """Chat completion endpoints with streaming support."""
2
+
3
+ from fastapi import APIRouter, HTTPException
4
+ from fastapi.responses import StreamingResponse
5
+ from sse_starlette.sse import EventSourceResponse
6
+ from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
7
+ import json
8
+
9
+ from src.lib.llm import get_llm, stream_llm_response, count_message_tokens
10
+ from src.lib.logging import get_logger
11
+ from src.config import get_settings
12
+ from .schemas import ChatRequest, ChatResponse, ChatMessage
13
+
14
+ router = APIRouter(prefix="/chat", tags=["Chat"])
15
+ logger = get_logger(__name__)
16
+ settings = get_settings()
17
+
18
+
19
+ def convert_to_langchain_messages(messages: list[ChatMessage]):
20
+ """Convert API messages to LangChain format."""
21
+ result = []
22
+ for msg in messages:
23
+ if msg.role == "user":
24
+ result.append(HumanMessage(content=msg.content))
25
+ elif msg.role == "assistant":
26
+ result.append(AIMessage(content=msg.content))
27
+ elif msg.role == "system":
28
+ result.append(SystemMessage(content=msg.content))
29
+ return result
30
+
31
+
32
+ @router.post("/completions", response_model=ChatResponse)
33
+ async def create_chat_completion(request: ChatRequest) -> ChatResponse | StreamingResponse:
34
+ """
35
+ Create a chat completion.
36
+
37
+ Supports both streaming (SSE) and non-streaming responses.
38
+ """
39
+ try:
40
+ provider = request.provider or settings.default_llm_provider
41
+ messages = convert_to_langchain_messages(request.messages)
42
+
43
+ if request.stream:
44
+ return EventSourceResponse(
45
+ stream_chat_response(messages, provider, request.model),
46
+ media_type="text/event-stream"
47
+ )
48
+
49
+ llm = get_llm(
50
+ provider=provider,
51
+ model=request.model,
52
+ temperature=request.temperature,
53
+ max_tokens=request.max_tokens,
54
+ streaming=False,
55
+ )
56
+
57
+ response = await llm.ainvoke(messages)
58
+
59
+ return ChatResponse(
60
+ message=ChatMessage(role="assistant", content=str(response.content)),
61
+ usage={
62
+ "prompt_tokens": count_message_tokens(messages),
63
+ "completion_tokens": count_message_tokens([response]),
64
+ "total_tokens": count_message_tokens(messages + [response]),
65
+ },
66
+ model=request.model or (settings.openai_model if provider == "openai" else settings.anthropic_model),
67
+ provider=provider,
68
+ )
69
+ except Exception as e:
70
+ logger.error("Chat completion failed", error=str(e))
71
+ raise HTTPException(status_code=500, detail=str(e))
72
+
73
+
74
+ async def stream_chat_response(messages, provider: str, model: str | None):
75
+ """Generate SSE events for streaming chat response."""
76
+ try:
77
+ async for chunk in stream_llm_response(messages, provider=provider, model=model):
78
+ yield {
79
+ "event": "message",
80
+ "data": json.dumps({"delta": chunk})
81
+ }
82
+ yield {
83
+ "event": "done",
84
+ "data": json.dumps({"finish_reason": "stop"})
85
+ }
86
+ except Exception as e:
87
+ logger.error("Streaming error", error=str(e))
88
+ yield {
89
+ "event": "error",
90
+ "data": json.dumps({"error": str(e)})
91
+ }
92
+
93
+
94
+ @router.post("/completions/sync", response_model=ChatResponse)
95
+ async def create_chat_completion_sync(request: ChatRequest) -> ChatResponse:
96
+ """Create a non-streaming chat completion."""
97
+ request.stream = False
98
+ return await create_chat_completion(request)
@@ -0,0 +1,36 @@
1
+ """Chat feature schemas."""
2
+
3
+ from pydantic import BaseModel, Field
4
+ from typing import Literal
5
+
6
+
7
+ class ChatMessage(BaseModel):
8
+ """A single chat message."""
9
+ role: Literal["user", "assistant", "system"] = Field(description="Message role")
10
+ content: str = Field(description="Message content")
11
+
12
+
13
+ class ChatRequest(BaseModel):
14
+ """Request for chat completion."""
15
+ messages: list[ChatMessage] = Field(description="Conversation messages")
16
+ provider: Literal["openai", "anthropic"] | None = Field(
17
+ default=None, description="LLM provider to use"
18
+ )
19
+ model: str | None = Field(default=None, description="Model override")
20
+ temperature: float | None = Field(default=None, ge=0, le=2, description="Temperature")
21
+ max_tokens: int | None = Field(default=None, ge=1, description="Max tokens")
22
+ stream: bool = Field(default=True, description="Enable streaming response")
23
+
24
+
25
+ class ChatResponse(BaseModel):
26
+ """Non-streaming chat response."""
27
+ message: ChatMessage
28
+ usage: dict[str, int] | None = None
29
+ model: str
30
+ provider: str
31
+
32
+
33
+ class ChatStreamChunk(BaseModel):
34
+ """A streaming response chunk."""
35
+ delta: str
36
+ finish_reason: str | None = None
@@ -0,0 +1,3 @@
1
+ from .router import router
2
+
3
+ __all__ = ["router"]
@@ -0,0 +1,13 @@
1
+ from fastapi import APIRouter
2
+ from .schemas import HealthResponse
3
+ import pendulum
4
+
5
+ router = APIRouter(prefix="/health", tags=["Health"])
6
+
7
+
8
+ @router.get("", response_model=HealthResponse)
9
+ async def health_check() -> HealthResponse:
10
+ return HealthResponse(
11
+ status="healthy",
12
+ timestamp=pendulum.now("UTC").to_iso8601_string(),
13
+ )
@@ -0,0 +1,6 @@
1
+ import msgspec
2
+
3
+
4
+ class HealthResponse(msgspec.Struct):
5
+ status: str
6
+ timestamp: str
@@ -0,0 +1,3 @@
1
+ from .router import router
2
+
3
+ __all__ = ["router"]
@@ -0,0 +1,40 @@
1
+ from fastapi import APIRouter, HTTPException
2
+ from uuid import uuid4
3
+ from .schemas import Order, CreateOrderRequest, OrderListResponse
4
+ from src.lib.cache import cached
5
+
6
+ router = APIRouter(prefix="/orders", tags=["Orders"])
7
+
8
+ _orders: dict[str, Order] = {}
9
+
10
+
11
+ @router.get("", response_model=OrderListResponse)
12
+ @cached(ttl=60)
13
+ async def list_orders() -> OrderListResponse:
14
+ return OrderListResponse(orders=list(_orders.values()))
15
+
16
+
17
+ @router.get("/{order_id}", response_model=Order)
18
+ async def get_order(order_id: str) -> Order:
19
+ if order_id not in _orders:
20
+ raise HTTPException(status_code=404, detail="Order not found")
21
+ return _orders[order_id]
22
+
23
+
24
+ @router.post("", response_model=Order, status_code=201)
25
+ async def create_order(request: CreateOrderRequest) -> Order:
26
+ order = Order(
27
+ id=str(uuid4()),
28
+ product=request.product,
29
+ quantity=request.quantity,
30
+ price=request.price,
31
+ )
32
+ _orders[order.id] = order
33
+ return order
34
+
35
+
36
+ @router.delete("/{order_id}", status_code=204)
37
+ async def delete_order(order_id: str) -> None:
38
+ if order_id not in _orders:
39
+ raise HTTPException(status_code=404, detail="Order not found")
40
+ del _orders[order_id]
@@ -0,0 +1,18 @@
1
+ import msgspec
2
+
3
+
4
+ class Order(msgspec.Struct):
5
+ id: str
6
+ product: str
7
+ quantity: int
8
+ price: float
9
+
10
+
11
+ class CreateOrderRequest(msgspec.Struct):
12
+ product: str
13
+ quantity: int
14
+ price: float
15
+
16
+
17
+ class OrderListResponse(msgspec.Struct):
18
+ orders: list[Order]
@@ -0,0 +1,167 @@
1
+ """
2
+ LangGraph Agent utilities and base components.
3
+
4
+ Provides:
5
+ - AgentState: Base state class for LangGraph workflows
6
+ - Tool registration and execution helpers
7
+ - Graph building utilities
8
+ - Common agent patterns (ReAct, Plan-and-Execute)
9
+ """
10
+
11
+ from typing import Annotated, Sequence, TypedDict, Any, Callable
12
+ from dataclasses import dataclass, field
13
+ import operator
14
+ import json
15
+
16
+ from langchain_core.messages import BaseMessage, HumanMessage, AIMessage, ToolMessage
17
+ from langchain_core.tools import BaseTool, tool
18
+ from langgraph.graph import StateGraph, END
19
+ from langgraph.prebuilt import ToolNode
20
+ from langgraph.checkpoint.memory import MemorySaver
21
+
22
+ from src.lib.logging import get_logger
23
+
24
+ logger = get_logger(__name__)
25
+
26
+
27
+ class AgentState(TypedDict):
28
+ """Base state for LangGraph agents."""
29
+ messages: Annotated[Sequence[BaseMessage], operator.add]
30
+ next_action: str | None
31
+ intermediate_steps: list[tuple[str, str]]
32
+ metadata: dict[str, Any]
33
+
34
+
35
+ @dataclass
36
+ class AgentConfig:
37
+ """Configuration for agent behavior."""
38
+ max_iterations: int = 10
39
+ return_intermediate_steps: bool = False
40
+ handle_parsing_errors: bool = True
41
+ verbose: bool = False
42
+
43
+
44
+ def create_tool_node(tools: list[BaseTool]) -> ToolNode:
45
+ """Create a LangGraph ToolNode from a list of tools."""
46
+ return ToolNode(tools)
47
+
48
+
49
+ def should_continue(state: AgentState) -> str:
50
+ """Determine if the agent should continue or end."""
51
+ messages = state["messages"]
52
+ last_message = messages[-1]
53
+
54
+ if hasattr(last_message, "tool_calls") and last_message.tool_calls:
55
+ return "tools"
56
+ return END
57
+
58
+
59
+ def format_tool_result(tool_name: str, result: Any) -> str:
60
+ """Format tool execution result for the agent."""
61
+ if isinstance(result, dict):
62
+ return json.dumps(result, indent=2)
63
+ return str(result)
64
+
65
+
66
+ class AgentBuilder:
67
+ """Builder pattern for creating LangGraph agents."""
68
+
69
+ def __init__(self, name: str = "agent"):
70
+ self.name = name
71
+ self.tools: list[BaseTool] = []
72
+ self.llm = None
73
+ self.system_prompt: str = ""
74
+ self.checkpointer = None
75
+ self.config = AgentConfig()
76
+
77
+ def with_llm(self, llm) -> "AgentBuilder":
78
+ """Set the LLM for the agent."""
79
+ self.llm = llm
80
+ return self
81
+
82
+ def with_tools(self, tools: list[BaseTool]) -> "AgentBuilder":
83
+ """Add tools to the agent."""
84
+ self.tools.extend(tools)
85
+ return self
86
+
87
+ def with_system_prompt(self, prompt: str) -> "AgentBuilder":
88
+ """Set the system prompt."""
89
+ self.system_prompt = prompt
90
+ return self
91
+
92
+ def with_memory(self) -> "AgentBuilder":
93
+ """Enable memory/checkpointing."""
94
+ self.checkpointer = MemorySaver()
95
+ return self
96
+
97
+ def with_config(self, config: AgentConfig) -> "AgentBuilder":
98
+ """Set agent configuration."""
99
+ self.config = config
100
+ return self
101
+
102
+ def build(self) -> StateGraph:
103
+ """Build and return the agent graph."""
104
+ if not self.llm:
105
+ raise ValueError("LLM must be set before building")
106
+
107
+ llm_with_tools = self.llm.bind_tools(self.tools) if self.tools else self.llm
108
+
109
+ def agent_node(state: AgentState) -> dict:
110
+ """The main agent reasoning node."""
111
+ messages = list(state["messages"])
112
+
113
+ if self.system_prompt and not any(
114
+ isinstance(m, HumanMessage) and "system" in str(m.content).lower()
115
+ for m in messages[:1]
116
+ ):
117
+ from langchain_core.messages import SystemMessage
118
+ messages = [SystemMessage(content=self.system_prompt)] + messages
119
+
120
+ response = llm_with_tools.invoke(messages)
121
+ return {"messages": [response]}
122
+
123
+ graph = StateGraph(AgentState)
124
+ graph.add_node("agent", agent_node)
125
+
126
+ if self.tools:
127
+ tool_node = create_tool_node(self.tools)
128
+ graph.add_node("tools", tool_node)
129
+ graph.add_conditional_edges("agent", should_continue, {"tools": "tools", END: END})
130
+ graph.add_edge("tools", "agent")
131
+ else:
132
+ graph.add_edge("agent", END)
133
+
134
+ graph.set_entry_point("agent")
135
+
136
+ return graph.compile(checkpointer=self.checkpointer)
137
+
138
+
139
+ @tool
140
+ def search_web(query: str) -> str:
141
+ """Search the web for information. Use this when you need current information."""
142
+ return f"[Mock search result for: {query}] - Replace with actual implementation"
143
+
144
+
145
+ @tool
146
+ def get_current_time() -> str:
147
+ """Get the current date and time."""
148
+ import pendulum
149
+ return pendulum.now("UTC").to_iso8601_string()
150
+
151
+
152
+ @tool
153
+ def calculate(expression: str) -> str:
154
+ """Evaluate a mathematical expression. Example: calculate('2 + 2')"""
155
+ try:
156
+ allowed_chars = set("0123456789+-*/.() ")
157
+ if not all(c in allowed_chars for c in expression):
158
+ return "Error: Invalid characters in expression"
159
+ result = eval(expression)
160
+ return str(result)
161
+ except Exception as e:
162
+ return f"Error: {e}"
163
+
164
+
165
+ def get_default_tools() -> list[BaseTool]:
166
+ """Get the default set of tools for agents."""
167
+ return [search_web, get_current_time, calculate]
@@ -0,0 +1,38 @@
1
+ from cachetools import TTLCache
2
+ from functools import wraps
3
+ from typing import TypeVar, Callable, Any
4
+ import asyncio
5
+
6
+ T = TypeVar("T")
7
+
8
+ _cache: TTLCache[str, Any] = TTLCache(maxsize=1000, ttl=300)
9
+
10
+
11
+ def cached(ttl: int = 300, maxsize: int = 128) -> Callable[[Callable[..., T]], Callable[..., T]]:
12
+ """Simple in-memory cache decorator with TTL support."""
13
+ cache: TTLCache[str, Any] = TTLCache(maxsize=maxsize, ttl=ttl)
14
+
15
+ def decorator(func: Callable[..., T]) -> Callable[..., T]:
16
+ @wraps(func)
17
+ async def async_wrapper(*args: Any, **kwargs: Any) -> T:
18
+ key = f"{func.__name__}:{args}:{kwargs}"
19
+ if key in cache:
20
+ return cache[key]
21
+ result = await func(*args, **kwargs)
22
+ cache[key] = result
23
+ return result
24
+
25
+ @wraps(func)
26
+ def sync_wrapper(*args: Any, **kwargs: Any) -> T:
27
+ key = f"{func.__name__}:{args}:{kwargs}"
28
+ if key in cache:
29
+ return cache[key]
30
+ result = func(*args, **kwargs)
31
+ cache[key] = result
32
+ return result
33
+
34
+ if asyncio.iscoroutinefunction(func):
35
+ return async_wrapper # type: ignore
36
+ return sync_wrapper # type: ignore
37
+
38
+ return decorator
@@ -0,0 +1,31 @@
1
+ from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
2
+ from sqlalchemy.orm import DeclarativeBase
3
+ from contextlib import asynccontextmanager
4
+ from typing import AsyncGenerator
5
+
6
+ from src.config import get_settings
7
+
8
+
9
+ class Base(DeclarativeBase):
10
+ pass
11
+
12
+
13
+ settings = get_settings()
14
+ engine = create_async_engine(settings.database_url, echo=settings.debug)
15
+ async_session_maker = async_sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
16
+
17
+
18
+ async def init_db() -> None:
19
+ async with engine.begin() as conn:
20
+ await conn.run_sync(Base.metadata.create_all)
21
+
22
+
23
+ @asynccontextmanager
24
+ async def get_session() -> AsyncGenerator[AsyncSession, None]:
25
+ async with async_session_maker() as session:
26
+ try:
27
+ yield session
28
+ await session.commit()
29
+ except Exception:
30
+ await session.rollback()
31
+ raise