fastapi-async-sqlalchemy 0.7.1.post3__tar.gz → 0.8.0a1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fastapi_async_sqlalchemy-0.8.0a1/PKG-INFO +368 -0
- fastapi_async_sqlalchemy-0.8.0a1/README.md +325 -0
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/fastapi_async_sqlalchemy/__init__.py +1 -1
- fastapi_async_sqlalchemy-0.8.0a1/fastapi_async_sqlalchemy/middleware.py +777 -0
- fastapi_async_sqlalchemy-0.8.0a1/fastapi_async_sqlalchemy.egg-info/PKG-INFO +368 -0
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/fastapi_async_sqlalchemy.egg-info/SOURCES.txt +5 -0
- fastapi_async_sqlalchemy-0.8.0a1/fastapi_async_sqlalchemy.egg-info/requires.txt +2 -0
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/pyproject.toml +1 -2
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/setup.py +5 -5
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_additional_coverage.py +22 -11
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_backward_compat_gather.py +29 -57
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_concurrent_queries.py +7 -7
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_coverage_boost.py +9 -19
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_coverage_improvements.py +18 -16
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_custom_engine_branch.py +40 -23
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_edge_cases_coverage.py +75 -69
- fastapi_async_sqlalchemy-0.8.0a1/tests/test_full_coverage.py +227 -0
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_import_fallback_simulation.py +17 -9
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_import_fallbacks.py +0 -24
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_maximum_coverage.py +33 -41
- fastapi_async_sqlalchemy-0.8.0a1/tests/test_multi_session_fixes.py +180 -0
- fastapi_async_sqlalchemy-0.8.0a1/tests/test_pool_throttling.py +722 -0
- fastapi_async_sqlalchemy-0.8.0a1/tests/test_resource_lifecycle.py +675 -0
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_session.py +20 -14
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_single_session_no_gather.py +18 -11
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_sqlmodel.py +18 -20
- fastapi_async_sqlalchemy-0.8.0a1/tests/test_streaming_and_waiter_shutdown.py +90 -0
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_type_hints_compatibility.py +2 -3
- fastapi_async_sqlalchemy-0.7.1.post3/PKG-INFO +0 -212
- fastapi_async_sqlalchemy-0.7.1.post3/README.md +0 -169
- fastapi_async_sqlalchemy-0.7.1.post3/fastapi_async_sqlalchemy/middleware.py +0 -146
- fastapi_async_sqlalchemy-0.7.1.post3/fastapi_async_sqlalchemy.egg-info/PKG-INFO +0 -212
- fastapi_async_sqlalchemy-0.7.1.post3/fastapi_async_sqlalchemy.egg-info/requires.txt +0 -2
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/LICENSE +0 -0
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/fastapi_async_sqlalchemy/exceptions.py +0 -0
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/fastapi_async_sqlalchemy/py.typed +0 -0
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/fastapi_async_sqlalchemy.egg-info/dependency_links.txt +0 -0
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/fastapi_async_sqlalchemy.egg-info/not-zip-safe +0 -0
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/fastapi_async_sqlalchemy.egg-info/top_level.txt +0 -0
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/setup.cfg +0 -0
- {fastapi_async_sqlalchemy-0.7.1.post3 → fastapi_async_sqlalchemy-0.8.0a1}/tests/test_import_without_sqlmodel.py +0 -0
|
@@ -0,0 +1,368 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: fastapi-async-sqlalchemy
|
|
3
|
+
Version: 0.8.0a1
|
|
4
|
+
Summary: SQLAlchemy middleware for FastAPI
|
|
5
|
+
Home-page: https://github.com/h0rn3t/fastapi-async-sqlalchemy.git
|
|
6
|
+
Author: Eugene Shershen
|
|
7
|
+
Author-email: h0rn3t.null@gmail.com
|
|
8
|
+
License: MIT
|
|
9
|
+
Project-URL: Code, https://github.com/h0rn3t/fastapi-async-sqlalchemy
|
|
10
|
+
Project-URL: Issue tracker, https://github.com/h0rn3t/fastapi-async-sqlalchemy/issues
|
|
11
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
12
|
+
Classifier: Environment :: Web Environment
|
|
13
|
+
Classifier: Framework :: AsyncIO
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Operating System :: OS Independent
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.15
|
|
21
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
22
|
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
23
|
+
Classifier: Topic :: Internet :: WWW/HTTP :: HTTP Servers
|
|
24
|
+
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
|
|
25
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
26
|
+
Requires-Python: >=3.12
|
|
27
|
+
Description-Content-Type: text/markdown
|
|
28
|
+
License-File: LICENSE
|
|
29
|
+
Requires-Dist: starlette>=0.40
|
|
30
|
+
Requires-Dist: SQLAlchemy>=2.0
|
|
31
|
+
Dynamic: author
|
|
32
|
+
Dynamic: author-email
|
|
33
|
+
Dynamic: classifier
|
|
34
|
+
Dynamic: description
|
|
35
|
+
Dynamic: description-content-type
|
|
36
|
+
Dynamic: home-page
|
|
37
|
+
Dynamic: license
|
|
38
|
+
Dynamic: license-file
|
|
39
|
+
Dynamic: project-url
|
|
40
|
+
Dynamic: requires-dist
|
|
41
|
+
Dynamic: requires-python
|
|
42
|
+
Dynamic: summary
|
|
43
|
+
|
|
44
|
+
# SQLAlchemy FastAPI middleware
|
|
45
|
+
|
|
46
|
+
[](https://img.shields.io/badge/Support-Ukraine-FFD500?style=flat&labelColor=005BBB)
|
|
47
|
+
[](https://github.com/h0rn3t/fastapi-async-sqlalchemy/workflows/ci/badge.svg)
|
|
48
|
+
[](https://codecov.io/gh/h0rn3t/fastapi-async-sqlalchemy)
|
|
49
|
+
[](https://opensource.org/licenses/MIT)
|
|
50
|
+
[](https://pypi.org/project/fastapi-async-sqlalchemy/)
|
|
51
|
+
[](https://pepy.tech/project/fastapi-async-sqlalchemy)
|
|
52
|
+
[](https://pyup.io/repos/github/h0rn3t/fastapi-async-sqlalchemy/)
|
|
53
|
+
|
|
54
|
+
### Description
|
|
55
|
+
|
|
56
|
+
Provides SQLAlchemy middleware for FastAPI using AsyncSession and async engine.
|
|
57
|
+
|
|
58
|
+
### Install
|
|
59
|
+
|
|
60
|
+
```bash
|
|
61
|
+
pip install fastapi-async-sqlalchemy
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
It also works with ```sqlmodel```
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
### Examples
|
|
69
|
+
|
|
70
|
+
Note that the session object provided by ``db.session`` is based on the Python3.7+ ``ContextVar``. This means that
|
|
71
|
+
each session is linked to the individual request context in which it was created.
|
|
72
|
+
|
|
73
|
+
```python
|
|
74
|
+
|
|
75
|
+
from fastapi import FastAPI
|
|
76
|
+
from fastapi_async_sqlalchemy import SQLAlchemyMiddleware
|
|
77
|
+
from fastapi_async_sqlalchemy import db # provide access to a database session
|
|
78
|
+
from sqlalchemy import column
|
|
79
|
+
from sqlalchemy import table
|
|
80
|
+
|
|
81
|
+
app = FastAPI()
|
|
82
|
+
app.add_middleware(
|
|
83
|
+
SQLAlchemyMiddleware,
|
|
84
|
+
db_url="postgresql+asyncpg://user:user@192.168.88.200:5432/primary_db",
|
|
85
|
+
engine_args={ # engine arguments example
|
|
86
|
+
"echo": True, # print all SQL statements
|
|
87
|
+
"pool_pre_ping": True, # feature will normally emit SQL equivalent to “SELECT 1” each time a connection is checked out from the pool
|
|
88
|
+
"pool_size": 5, # number of connections to keep open at a time
|
|
89
|
+
"max_overflow": 10, # number of connections to allow to be opened above pool_size
|
|
90
|
+
},
|
|
91
|
+
)
|
|
92
|
+
# Engines created from ``db_url`` are owned by the middleware and are disposed
|
|
93
|
+
# during the application shutdown lifespan. Tests that need shutdown behavior
|
|
94
|
+
# should run the app lifespan, for example with ``with TestClient(app)``.
|
|
95
|
+
# once the middleware is applied, any route can then access the database session
|
|
96
|
+
# from the global ``db``
|
|
97
|
+
|
|
98
|
+
foo = table("ms_files", column("id"))
|
|
99
|
+
|
|
100
|
+
# Usage inside of a route
|
|
101
|
+
@app.get("/")
|
|
102
|
+
async def get_files():
|
|
103
|
+
result = await db.session.execute(foo.select())
|
|
104
|
+
return result.fetchall()
|
|
105
|
+
|
|
106
|
+
async def get_db_fetch():
|
|
107
|
+
# It uses the same ``db`` object and use it as a context manager:
|
|
108
|
+
async with db():
|
|
109
|
+
result = await db.session.execute(foo.select())
|
|
110
|
+
return result.fetchall()
|
|
111
|
+
|
|
112
|
+
# Usage inside of a route using a db context
|
|
113
|
+
@app.get("/db_context")
|
|
114
|
+
async def db_context():
|
|
115
|
+
return await get_db_fetch()
|
|
116
|
+
|
|
117
|
+
# Usage outside of a route using a db context
|
|
118
|
+
@app.on_event("startup")
|
|
119
|
+
async def on_startup():
|
|
120
|
+
# We are outside of a request context, therefore we cannot rely on ``SQLAlchemyMiddleware``
|
|
121
|
+
# to create a database session for us.
|
|
122
|
+
result = await get_db_fetch()
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
if __name__ == "__main__":
|
|
126
|
+
import uvicorn
|
|
127
|
+
uvicorn.run(app, host="0.0.0.0", port=8002)
|
|
128
|
+
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
#### Engine ownership
|
|
132
|
+
|
|
133
|
+
When the middleware receives ``db_url``, it creates and owns the async engine.
|
|
134
|
+
The engine is kept for the application lifetime and disposed when the ASGI
|
|
135
|
+
lifespan shutdown completes. It is not disposed per request. Disposal also
|
|
136
|
+
runs when the lifespan ends with a failure (``lifespan.shutdown.failed`` or
|
|
137
|
+
``lifespan.startup.failed``), so a raising user shutdown handler does not leak
|
|
138
|
+
the connection pool.
|
|
139
|
+
|
|
140
|
+
Engine disposal happens before the lifespan acknowledgement is forwarded to
|
|
141
|
+
the ASGI server, so a stuck pool drain will block the server's graceful
|
|
142
|
+
shutdown ack. Configure your ASGI server's graceful shutdown timeout (for
|
|
143
|
+
example uvicorn's ``--timeout-graceful-shutdown``) so it accommodates the
|
|
144
|
+
worst-case time required to close active connections.
|
|
145
|
+
|
|
146
|
+
When the middleware receives ``custom_engine``, the caller owns that engine. The
|
|
147
|
+
middleware will use it but will not dispose it during application shutdown:
|
|
148
|
+
|
|
149
|
+
```python
|
|
150
|
+
from sqlalchemy.ext.asyncio import create_async_engine
|
|
151
|
+
|
|
152
|
+
engine = create_async_engine("postgresql+asyncpg://user:pass@host/db")
|
|
153
|
+
app.add_middleware(SQLAlchemyMiddleware, custom_engine=engine)
|
|
154
|
+
|
|
155
|
+
# Later, in caller-managed shutdown code or test cleanup:
|
|
156
|
+
await engine.dispose()
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
#### Manual disposal outside ASGI lifespan
|
|
160
|
+
|
|
161
|
+
When ``SQLAlchemyMiddleware(db_url=...)`` is constructed outside an ASGI
|
|
162
|
+
application lifespan — for example in a script, an ad-hoc test harness, or
|
|
163
|
+
when embedding the middleware in a non-ASGI runtime — there is no
|
|
164
|
+
``lifespan.shutdown`` event to trigger engine disposal. In that case call
|
|
165
|
+
``await middleware.dispose()`` explicitly so the middleware-owned engine is
|
|
166
|
+
released:
|
|
167
|
+
|
|
168
|
+
```python
|
|
169
|
+
middleware = SQLAlchemyMiddleware(app, db_url="postgresql+asyncpg://...")
|
|
170
|
+
try:
|
|
171
|
+
... # use db.session
|
|
172
|
+
finally:
|
|
173
|
+
await middleware.dispose()
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
``dispose()`` is idempotent on success and is safe to retry if it raises:
|
|
177
|
+
the proxy session bindings are cleared deterministically so a subsequent
|
|
178
|
+
call actually re-attempts the underlying ``engine.dispose()``. The same
|
|
179
|
+
guidance applies to each pair created by
|
|
180
|
+
``create_middleware_and_session_proxy()``.
|
|
181
|
+
|
|
182
|
+
#### Request transactions and streaming responses
|
|
183
|
+
|
|
184
|
+
When ``SQLAlchemyMiddleware(..., commit_on_exit=True)`` manages a normal
|
|
185
|
+
non-streaming HTTP request, the request session is committed before
|
|
186
|
+
``http.response.start`` is forwarded to the ASGI server. If commit, rollback,
|
|
187
|
+
or close fails, the failure happens before a successful response is reported to
|
|
188
|
+
the client.
|
|
189
|
+
|
|
190
|
+
Streaming response body generation has a different lifetime from a normal
|
|
191
|
+
request transaction. Do not rely on the middleware-managed request session to
|
|
192
|
+
stay open while a ``StreamingResponse``/``FileResponse`` yields chunks. Open an
|
|
193
|
+
explicit session inside the generator so the body owns the database lifetime:
|
|
194
|
+
|
|
195
|
+
```python
|
|
196
|
+
from fastapi.responses import StreamingResponse
|
|
197
|
+
|
|
198
|
+
@app.get("/export")
|
|
199
|
+
async def export():
|
|
200
|
+
async def rows():
|
|
201
|
+
async with db():
|
|
202
|
+
result = await db.session.stream(foo.select())
|
|
203
|
+
async for row in result:
|
|
204
|
+
yield f"{row.id}\n".encode()
|
|
205
|
+
return StreamingResponse(rows(), media_type="text/plain")
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
Implicit ``commit_on_exit=True`` is not a safe way to report streaming write
|
|
209
|
+
success: the response may have already started before an unbounded body is
|
|
210
|
+
finished. If a streaming route needs database writes, either complete and
|
|
211
|
+
commit the write in a separate explicit ``async with db(commit_on_exit=True)``
|
|
212
|
+
block before creating the streaming response, or make the streaming generator
|
|
213
|
+
use an explicit ``async with db(commit_on_exit=True)`` block and design the API
|
|
214
|
+
so clients do not treat early chunks as write success.
|
|
215
|
+
|
|
216
|
+
For applications that previously used ``db.session`` directly inside streaming
|
|
217
|
+
generators, move that code into an explicit generator-owned context as shown
|
|
218
|
+
above. This keeps database access available for the whole body while making it
|
|
219
|
+
clear that the session lifetime belongs to the stream, not the original request
|
|
220
|
+
transaction.
|
|
221
|
+
|
|
222
|
+
#### SQLAlchemy events (`before_insert`, `after_insert`, ...)
|
|
223
|
+
|
|
224
|
+
SQLAlchemy's event system is independent of the session/engine — register
|
|
225
|
+
listeners on your mapped classes (or on `Mapper`/`Session`) with
|
|
226
|
+
`sqlalchemy.event.listens_for` exactly as you would with a synchronous
|
|
227
|
+
SQLAlchemy setup. The middleware does not change how events fire.
|
|
228
|
+
|
|
229
|
+
```python
|
|
230
|
+
from datetime import datetime
|
|
231
|
+
from sqlalchemy import Column, DateTime, Integer, String, event
|
|
232
|
+
from sqlalchemy.orm import DeclarativeBase
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
class Base(DeclarativeBase):
|
|
236
|
+
pass
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
class User(Base):
|
|
240
|
+
__tablename__ = "users"
|
|
241
|
+
id = Column(Integer, primary_key=True)
|
|
242
|
+
username = Column(String(50), unique=True, nullable=False)
|
|
243
|
+
created_at = Column(DateTime, default=datetime.utcnow)
|
|
244
|
+
updated_at = Column(DateTime, default=datetime.utcnow)
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
@event.listens_for(User, "before_insert")
|
|
248
|
+
def normalize(mapper, connection, target):
|
|
249
|
+
target.username = target.username.lower().strip()
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
@event.listens_for(User, "before_update")
|
|
253
|
+
def touch_updated_at(mapper, connection, target):
|
|
254
|
+
target.updated_at = datetime.utcnow()
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
@event.listens_for(User, "after_insert")
|
|
258
|
+
def log_insert(mapper, connection, target):
|
|
259
|
+
print(f"user created: id={target.id}")
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
Mapper-level events (`before_insert`, `after_insert`, `before_update`,
|
|
263
|
+
`after_update`, `before_delete`, `after_delete`) receive a synchronous
|
|
264
|
+
`connection` argument — do **not** `await` inside them and do **not** call
|
|
265
|
+
async ORM APIs there. If you need async work after a write, do it after
|
|
266
|
+
`await db.session.commit()` returns, or use `Session`-level events such as
|
|
267
|
+
`after_flush` / `after_commit` and schedule async work from there.
|
|
268
|
+
|
|
269
|
+
A complete runnable example with validation, timestamps, logging, and
|
|
270
|
+
soft-delete hooks lives at [examples/events_example.py](examples/events_example.py).
|
|
271
|
+
|
|
272
|
+
#### Usage of multiple databases
|
|
273
|
+
|
|
274
|
+
databases.py
|
|
275
|
+
|
|
276
|
+
```python
|
|
277
|
+
from fastapi import FastAPI
|
|
278
|
+
from fastapi_async_sqlalchemy import create_middleware_and_session_proxy
|
|
279
|
+
|
|
280
|
+
FirstSQLAlchemyMiddleware, first_db = create_middleware_and_session_proxy()
|
|
281
|
+
SecondSQLAlchemyMiddleware, second_db = create_middleware_and_session_proxy()
|
|
282
|
+
```
|
|
283
|
+
|
|
284
|
+
Use a separate middleware/session proxy pair for each independent app or
|
|
285
|
+
database. Reusing the same proxy with a different live engine is rejected so
|
|
286
|
+
requests cannot silently switch to another database binding.
|
|
287
|
+
|
|
288
|
+
main.py
|
|
289
|
+
|
|
290
|
+
```python
|
|
291
|
+
from fastapi import FastAPI
|
|
292
|
+
|
|
293
|
+
from databases import FirstSQLAlchemyMiddleware, SecondSQLAlchemyMiddleware
|
|
294
|
+
from routes import router
|
|
295
|
+
|
|
296
|
+
app = FastAPI()
|
|
297
|
+
|
|
298
|
+
app.include_router(router)
|
|
299
|
+
|
|
300
|
+
app.add_middleware(
|
|
301
|
+
FirstSQLAlchemyMiddleware,
|
|
302
|
+
db_url="postgresql+asyncpg://user:user@192.168.88.200:5432/primary_db",
|
|
303
|
+
engine_args={
|
|
304
|
+
"pool_size": 5,
|
|
305
|
+
"max_overflow": 10,
|
|
306
|
+
},
|
|
307
|
+
)
|
|
308
|
+
app.add_middleware(
|
|
309
|
+
SecondSQLAlchemyMiddleware,
|
|
310
|
+
db_url="mysql+aiomysql://user:user@192.168.88.200:5432/primary_db",
|
|
311
|
+
engine_args={
|
|
312
|
+
"pool_size": 5,
|
|
313
|
+
"max_overflow": 10,
|
|
314
|
+
},
|
|
315
|
+
)
|
|
316
|
+
```
|
|
317
|
+
|
|
318
|
+
routes.py
|
|
319
|
+
|
|
320
|
+
```python
|
|
321
|
+
import asyncio
|
|
322
|
+
|
|
323
|
+
from fastapi import APIRouter
|
|
324
|
+
from sqlalchemy import column, table, text
|
|
325
|
+
|
|
326
|
+
from databases import first_db, second_db
|
|
327
|
+
|
|
328
|
+
router = APIRouter()
|
|
329
|
+
|
|
330
|
+
foo = table("ms_files", column("id"))
|
|
331
|
+
|
|
332
|
+
@router.get("/first-db-files")
|
|
333
|
+
async def get_files_from_first_db():
|
|
334
|
+
result = await first_db.session.execute(foo.select())
|
|
335
|
+
return result.fetchall()
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
@router.get("/second-db-files")
|
|
339
|
+
async def get_files_from_second_db():
|
|
340
|
+
result = await second_db.session.execute(foo.select())
|
|
341
|
+
return result.fetchall()
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
@router.get("/concurrent-queries")
|
|
345
|
+
async def parallel_select():
|
|
346
|
+
async with first_db(multi_sessions=True, max_concurrent=10):
|
|
347
|
+
async def execute_query(query):
|
|
348
|
+
async with first_db.connection() as session:
|
|
349
|
+
return await session.execute(text(query))
|
|
350
|
+
|
|
351
|
+
tasks = [
|
|
352
|
+
asyncio.create_task(execute_query("SELECT 1")),
|
|
353
|
+
asyncio.create_task(execute_query("SELECT 2")),
|
|
354
|
+
asyncio.create_task(execute_query("SELECT 3")),
|
|
355
|
+
asyncio.create_task(execute_query("SELECT 4")),
|
|
356
|
+
asyncio.create_task(execute_query("SELECT 5")),
|
|
357
|
+
asyncio.create_task(execute_query("SELECT 6")),
|
|
358
|
+
]
|
|
359
|
+
|
|
360
|
+
await asyncio.gather(*tasks)
|
|
361
|
+
```
|
|
362
|
+
|
|
363
|
+
Child tasks that use database sessions must finish before the owning
|
|
364
|
+
``async with db(multi_sessions=True)`` block exits. When ``max_concurrent`` is
|
|
365
|
+
set, child tasks should use ``db.connection()`` or pass coroutine objects to
|
|
366
|
+
``db.gather()`` so the middleware can own both the session lifetime and the
|
|
367
|
+
semaphore slot. Already-created ``Task`` or ``Future`` objects are rejected by
|
|
368
|
+
throttled ``db.gather()`` because they may have started outside the semaphore.
|
|
@@ -0,0 +1,325 @@
|
|
|
1
|
+
# SQLAlchemy FastAPI middleware
|
|
2
|
+
|
|
3
|
+
[](https://img.shields.io/badge/Support-Ukraine-FFD500?style=flat&labelColor=005BBB)
|
|
4
|
+
[](https://github.com/h0rn3t/fastapi-async-sqlalchemy/workflows/ci/badge.svg)
|
|
5
|
+
[](https://codecov.io/gh/h0rn3t/fastapi-async-sqlalchemy)
|
|
6
|
+
[](https://opensource.org/licenses/MIT)
|
|
7
|
+
[](https://pypi.org/project/fastapi-async-sqlalchemy/)
|
|
8
|
+
[](https://pepy.tech/project/fastapi-async-sqlalchemy)
|
|
9
|
+
[](https://pyup.io/repos/github/h0rn3t/fastapi-async-sqlalchemy/)
|
|
10
|
+
|
|
11
|
+
### Description
|
|
12
|
+
|
|
13
|
+
Provides SQLAlchemy middleware for FastAPI using AsyncSession and async engine.
|
|
14
|
+
|
|
15
|
+
### Install
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
pip install fastapi-async-sqlalchemy
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
It also works with ```sqlmodel```
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
### Examples
|
|
26
|
+
|
|
27
|
+
Note that the session object provided by ``db.session`` is based on the Python3.7+ ``ContextVar``. This means that
|
|
28
|
+
each session is linked to the individual request context in which it was created.
|
|
29
|
+
|
|
30
|
+
```python
|
|
31
|
+
|
|
32
|
+
from fastapi import FastAPI
|
|
33
|
+
from fastapi_async_sqlalchemy import SQLAlchemyMiddleware
|
|
34
|
+
from fastapi_async_sqlalchemy import db # provide access to a database session
|
|
35
|
+
from sqlalchemy import column
|
|
36
|
+
from sqlalchemy import table
|
|
37
|
+
|
|
38
|
+
app = FastAPI()
|
|
39
|
+
app.add_middleware(
|
|
40
|
+
SQLAlchemyMiddleware,
|
|
41
|
+
db_url="postgresql+asyncpg://user:user@192.168.88.200:5432/primary_db",
|
|
42
|
+
engine_args={ # engine arguments example
|
|
43
|
+
"echo": True, # print all SQL statements
|
|
44
|
+
"pool_pre_ping": True, # feature will normally emit SQL equivalent to “SELECT 1” each time a connection is checked out from the pool
|
|
45
|
+
"pool_size": 5, # number of connections to keep open at a time
|
|
46
|
+
"max_overflow": 10, # number of connections to allow to be opened above pool_size
|
|
47
|
+
},
|
|
48
|
+
)
|
|
49
|
+
# Engines created from ``db_url`` are owned by the middleware and are disposed
|
|
50
|
+
# during the application shutdown lifespan. Tests that need shutdown behavior
|
|
51
|
+
# should run the app lifespan, for example with ``with TestClient(app)``.
|
|
52
|
+
# once the middleware is applied, any route can then access the database session
|
|
53
|
+
# from the global ``db``
|
|
54
|
+
|
|
55
|
+
foo = table("ms_files", column("id"))
|
|
56
|
+
|
|
57
|
+
# Usage inside of a route
|
|
58
|
+
@app.get("/")
|
|
59
|
+
async def get_files():
|
|
60
|
+
result = await db.session.execute(foo.select())
|
|
61
|
+
return result.fetchall()
|
|
62
|
+
|
|
63
|
+
async def get_db_fetch():
|
|
64
|
+
# It uses the same ``db`` object and use it as a context manager:
|
|
65
|
+
async with db():
|
|
66
|
+
result = await db.session.execute(foo.select())
|
|
67
|
+
return result.fetchall()
|
|
68
|
+
|
|
69
|
+
# Usage inside of a route using a db context
|
|
70
|
+
@app.get("/db_context")
|
|
71
|
+
async def db_context():
|
|
72
|
+
return await get_db_fetch()
|
|
73
|
+
|
|
74
|
+
# Usage outside of a route using a db context
|
|
75
|
+
@app.on_event("startup")
|
|
76
|
+
async def on_startup():
|
|
77
|
+
# We are outside of a request context, therefore we cannot rely on ``SQLAlchemyMiddleware``
|
|
78
|
+
# to create a database session for us.
|
|
79
|
+
result = await get_db_fetch()
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
if __name__ == "__main__":
|
|
83
|
+
import uvicorn
|
|
84
|
+
uvicorn.run(app, host="0.0.0.0", port=8002)
|
|
85
|
+
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
#### Engine ownership
|
|
89
|
+
|
|
90
|
+
When the middleware receives ``db_url``, it creates and owns the async engine.
|
|
91
|
+
The engine is kept for the application lifetime and disposed when the ASGI
|
|
92
|
+
lifespan shutdown completes. It is not disposed per request. Disposal also
|
|
93
|
+
runs when the lifespan ends with a failure (``lifespan.shutdown.failed`` or
|
|
94
|
+
``lifespan.startup.failed``), so a raising user shutdown handler does not leak
|
|
95
|
+
the connection pool.
|
|
96
|
+
|
|
97
|
+
Engine disposal happens before the lifespan acknowledgement is forwarded to
|
|
98
|
+
the ASGI server, so a stuck pool drain will block the server's graceful
|
|
99
|
+
shutdown ack. Configure your ASGI server's graceful shutdown timeout (for
|
|
100
|
+
example uvicorn's ``--timeout-graceful-shutdown``) so it accommodates the
|
|
101
|
+
worst-case time required to close active connections.
|
|
102
|
+
|
|
103
|
+
When the middleware receives ``custom_engine``, the caller owns that engine. The
|
|
104
|
+
middleware will use it but will not dispose it during application shutdown:
|
|
105
|
+
|
|
106
|
+
```python
|
|
107
|
+
from sqlalchemy.ext.asyncio import create_async_engine
|
|
108
|
+
|
|
109
|
+
engine = create_async_engine("postgresql+asyncpg://user:pass@host/db")
|
|
110
|
+
app.add_middleware(SQLAlchemyMiddleware, custom_engine=engine)
|
|
111
|
+
|
|
112
|
+
# Later, in caller-managed shutdown code or test cleanup:
|
|
113
|
+
await engine.dispose()
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
#### Manual disposal outside ASGI lifespan
|
|
117
|
+
|
|
118
|
+
When ``SQLAlchemyMiddleware(db_url=...)`` is constructed outside an ASGI
|
|
119
|
+
application lifespan — for example in a script, an ad-hoc test harness, or
|
|
120
|
+
when embedding the middleware in a non-ASGI runtime — there is no
|
|
121
|
+
``lifespan.shutdown`` event to trigger engine disposal. In that case call
|
|
122
|
+
``await middleware.dispose()`` explicitly so the middleware-owned engine is
|
|
123
|
+
released:
|
|
124
|
+
|
|
125
|
+
```python
|
|
126
|
+
middleware = SQLAlchemyMiddleware(app, db_url="postgresql+asyncpg://...")
|
|
127
|
+
try:
|
|
128
|
+
... # use db.session
|
|
129
|
+
finally:
|
|
130
|
+
await middleware.dispose()
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
``dispose()`` is idempotent on success and is safe to retry if it raises:
|
|
134
|
+
the proxy session bindings are cleared deterministically so a subsequent
|
|
135
|
+
call actually re-attempts the underlying ``engine.dispose()``. The same
|
|
136
|
+
guidance applies to each pair created by
|
|
137
|
+
``create_middleware_and_session_proxy()``.
|
|
138
|
+
|
|
139
|
+
#### Request transactions and streaming responses
|
|
140
|
+
|
|
141
|
+
When ``SQLAlchemyMiddleware(..., commit_on_exit=True)`` manages a normal
|
|
142
|
+
non-streaming HTTP request, the request session is committed before
|
|
143
|
+
``http.response.start`` is forwarded to the ASGI server. If commit, rollback,
|
|
144
|
+
or close fails, the failure happens before a successful response is reported to
|
|
145
|
+
the client.
|
|
146
|
+
|
|
147
|
+
Streaming response body generation has a different lifetime from a normal
|
|
148
|
+
request transaction. Do not rely on the middleware-managed request session to
|
|
149
|
+
stay open while a ``StreamingResponse``/``FileResponse`` yields chunks. Open an
|
|
150
|
+
explicit session inside the generator so the body owns the database lifetime:
|
|
151
|
+
|
|
152
|
+
```python
|
|
153
|
+
from fastapi.responses import StreamingResponse
|
|
154
|
+
|
|
155
|
+
@app.get("/export")
|
|
156
|
+
async def export():
|
|
157
|
+
async def rows():
|
|
158
|
+
async with db():
|
|
159
|
+
result = await db.session.stream(foo.select())
|
|
160
|
+
async for row in result:
|
|
161
|
+
yield f"{row.id}\n".encode()
|
|
162
|
+
return StreamingResponse(rows(), media_type="text/plain")
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
Implicit ``commit_on_exit=True`` is not a safe way to report streaming write
|
|
166
|
+
success: the response may have already started before an unbounded body is
|
|
167
|
+
finished. If a streaming route needs database writes, either complete and
|
|
168
|
+
commit the write in a separate explicit ``async with db(commit_on_exit=True)``
|
|
169
|
+
block before creating the streaming response, or make the streaming generator
|
|
170
|
+
use an explicit ``async with db(commit_on_exit=True)`` block and design the API
|
|
171
|
+
so clients do not treat early chunks as write success.
|
|
172
|
+
|
|
173
|
+
For applications that previously used ``db.session`` directly inside streaming
|
|
174
|
+
generators, move that code into an explicit generator-owned context as shown
|
|
175
|
+
above. This keeps database access available for the whole body while making it
|
|
176
|
+
clear that the session lifetime belongs to the stream, not the original request
|
|
177
|
+
transaction.
|
|
178
|
+
|
|
179
|
+
#### SQLAlchemy events (`before_insert`, `after_insert`, ...)
|
|
180
|
+
|
|
181
|
+
SQLAlchemy's event system is independent of the session/engine — register
|
|
182
|
+
listeners on your mapped classes (or on `Mapper`/`Session`) with
|
|
183
|
+
`sqlalchemy.event.listens_for` exactly as you would with a synchronous
|
|
184
|
+
SQLAlchemy setup. The middleware does not change how events fire.
|
|
185
|
+
|
|
186
|
+
```python
|
|
187
|
+
from datetime import datetime
|
|
188
|
+
from sqlalchemy import Column, DateTime, Integer, String, event
|
|
189
|
+
from sqlalchemy.orm import DeclarativeBase
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
class Base(DeclarativeBase):
|
|
193
|
+
pass
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
class User(Base):
|
|
197
|
+
__tablename__ = "users"
|
|
198
|
+
id = Column(Integer, primary_key=True)
|
|
199
|
+
username = Column(String(50), unique=True, nullable=False)
|
|
200
|
+
created_at = Column(DateTime, default=datetime.utcnow)
|
|
201
|
+
updated_at = Column(DateTime, default=datetime.utcnow)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
@event.listens_for(User, "before_insert")
|
|
205
|
+
def normalize(mapper, connection, target):
|
|
206
|
+
target.username = target.username.lower().strip()
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
@event.listens_for(User, "before_update")
|
|
210
|
+
def touch_updated_at(mapper, connection, target):
|
|
211
|
+
target.updated_at = datetime.utcnow()
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
@event.listens_for(User, "after_insert")
|
|
215
|
+
def log_insert(mapper, connection, target):
|
|
216
|
+
print(f"user created: id={target.id}")
|
|
217
|
+
```
|
|
218
|
+
|
|
219
|
+
Mapper-level events (`before_insert`, `after_insert`, `before_update`,
|
|
220
|
+
`after_update`, `before_delete`, `after_delete`) receive a synchronous
|
|
221
|
+
`connection` argument — do **not** `await` inside them and do **not** call
|
|
222
|
+
async ORM APIs there. If you need async work after a write, do it after
|
|
223
|
+
`await db.session.commit()` returns, or use `Session`-level events such as
|
|
224
|
+
`after_flush` / `after_commit` and schedule async work from there.
|
|
225
|
+
|
|
226
|
+
A complete runnable example with validation, timestamps, logging, and
|
|
227
|
+
soft-delete hooks lives at [examples/events_example.py](examples/events_example.py).
|
|
228
|
+
|
|
229
|
+
#### Usage of multiple databases
|
|
230
|
+
|
|
231
|
+
databases.py
|
|
232
|
+
|
|
233
|
+
```python
|
|
234
|
+
from fastapi import FastAPI
|
|
235
|
+
from fastapi_async_sqlalchemy import create_middleware_and_session_proxy
|
|
236
|
+
|
|
237
|
+
FirstSQLAlchemyMiddleware, first_db = create_middleware_and_session_proxy()
|
|
238
|
+
SecondSQLAlchemyMiddleware, second_db = create_middleware_and_session_proxy()
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
Use a separate middleware/session proxy pair for each independent app or
|
|
242
|
+
database. Reusing the same proxy with a different live engine is rejected so
|
|
243
|
+
requests cannot silently switch to another database binding.
|
|
244
|
+
|
|
245
|
+
main.py
|
|
246
|
+
|
|
247
|
+
```python
|
|
248
|
+
from fastapi import FastAPI
|
|
249
|
+
|
|
250
|
+
from databases import FirstSQLAlchemyMiddleware, SecondSQLAlchemyMiddleware
|
|
251
|
+
from routes import router
|
|
252
|
+
|
|
253
|
+
app = FastAPI()
|
|
254
|
+
|
|
255
|
+
app.include_router(router)
|
|
256
|
+
|
|
257
|
+
app.add_middleware(
|
|
258
|
+
FirstSQLAlchemyMiddleware,
|
|
259
|
+
db_url="postgresql+asyncpg://user:user@192.168.88.200:5432/primary_db",
|
|
260
|
+
engine_args={
|
|
261
|
+
"pool_size": 5,
|
|
262
|
+
"max_overflow": 10,
|
|
263
|
+
},
|
|
264
|
+
)
|
|
265
|
+
app.add_middleware(
|
|
266
|
+
SecondSQLAlchemyMiddleware,
|
|
267
|
+
db_url="mysql+aiomysql://user:user@192.168.88.200:5432/primary_db",
|
|
268
|
+
engine_args={
|
|
269
|
+
"pool_size": 5,
|
|
270
|
+
"max_overflow": 10,
|
|
271
|
+
},
|
|
272
|
+
)
|
|
273
|
+
```
|
|
274
|
+
|
|
275
|
+
routes.py
|
|
276
|
+
|
|
277
|
+
```python
|
|
278
|
+
import asyncio
|
|
279
|
+
|
|
280
|
+
from fastapi import APIRouter
|
|
281
|
+
from sqlalchemy import column, table, text
|
|
282
|
+
|
|
283
|
+
from databases import first_db, second_db
|
|
284
|
+
|
|
285
|
+
router = APIRouter()
|
|
286
|
+
|
|
287
|
+
foo = table("ms_files", column("id"))
|
|
288
|
+
|
|
289
|
+
@router.get("/first-db-files")
|
|
290
|
+
async def get_files_from_first_db():
|
|
291
|
+
result = await first_db.session.execute(foo.select())
|
|
292
|
+
return result.fetchall()
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
@router.get("/second-db-files")
|
|
296
|
+
async def get_files_from_second_db():
|
|
297
|
+
result = await second_db.session.execute(foo.select())
|
|
298
|
+
return result.fetchall()
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
@router.get("/concurrent-queries")
|
|
302
|
+
async def parallel_select():
|
|
303
|
+
async with first_db(multi_sessions=True, max_concurrent=10):
|
|
304
|
+
async def execute_query(query):
|
|
305
|
+
async with first_db.connection() as session:
|
|
306
|
+
return await session.execute(text(query))
|
|
307
|
+
|
|
308
|
+
tasks = [
|
|
309
|
+
asyncio.create_task(execute_query("SELECT 1")),
|
|
310
|
+
asyncio.create_task(execute_query("SELECT 2")),
|
|
311
|
+
asyncio.create_task(execute_query("SELECT 3")),
|
|
312
|
+
asyncio.create_task(execute_query("SELECT 4")),
|
|
313
|
+
asyncio.create_task(execute_query("SELECT 5")),
|
|
314
|
+
asyncio.create_task(execute_query("SELECT 6")),
|
|
315
|
+
]
|
|
316
|
+
|
|
317
|
+
await asyncio.gather(*tasks)
|
|
318
|
+
```
|
|
319
|
+
|
|
320
|
+
Child tasks that use database sessions must finish before the owning
|
|
321
|
+
``async with db(multi_sessions=True)`` block exits. When ``max_concurrent`` is
|
|
322
|
+
set, child tasks should use ``db.connection()`` or pass coroutine objects to
|
|
323
|
+
``db.gather()`` so the middleware can own both the session lifetime and the
|
|
324
|
+
semaphore slot. Already-created ``Task`` or ``Future`` objects are rejected by
|
|
325
|
+
throttled ``db.gather()`` because they may have started outside the semaphore.
|