mlproxy-py 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mlproxy_py-0.1.1/.github/workflows/ci.yml +24 -0
- mlproxy_py-0.1.1/.gitignore +218 -0
- mlproxy_py-0.1.1/LICENSE +21 -0
- mlproxy_py-0.1.1/PKG-INFO +131 -0
- mlproxy_py-0.1.1/README.md +94 -0
- mlproxy_py-0.1.1/examples/config.yml +22 -0
- mlproxy_py-0.1.1/mlproxy_py/__init__.py +1 -0
- mlproxy_py-0.1.1/mlproxy_py/app.py +115 -0
- mlproxy_py-0.1.1/mlproxy_py/backends.py +17 -0
- mlproxy_py-0.1.1/mlproxy_py/batching.py +49 -0
- mlproxy_py-0.1.1/mlproxy_py/cli.py +96 -0
- mlproxy_py-0.1.1/mlproxy_py/config.py +47 -0
- mlproxy_py-0.1.1/mlproxy_py/healthcheck.py +55 -0
- mlproxy_py-0.1.1/mlproxy_py/metrics.py +27 -0
- mlproxy_py-0.1.1/mlproxy_py/proxy.py +50 -0
- mlproxy_py-0.1.1/mlproxy_py/router.py +25 -0
- mlproxy_py-0.1.1/pyproject.toml +56 -0
- mlproxy_py-0.1.1/tests/test_backends.py +27 -0
- mlproxy_py-0.1.1/tests/test_batching.py +45 -0
- mlproxy_py-0.1.1/tests/test_config.py +57 -0
- mlproxy_py-0.1.1/tests/test_healthcheck.py +29 -0
- mlproxy_py-0.1.1/tests/test_router.py +48 -0
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
name: CI
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
branches: [main]
|
|
6
|
+
pull_request:
|
|
7
|
+
branches: [main]
|
|
8
|
+
|
|
9
|
+
jobs:
|
|
10
|
+
lint:
|
|
11
|
+
runs-on: ubuntu-latest
|
|
12
|
+
strategy:
|
|
13
|
+
matrix:
|
|
14
|
+
python-version: ["3.10", "3.11", "3.12"]
|
|
15
|
+
steps:
|
|
16
|
+
- uses: actions/checkout@v4
|
|
17
|
+
- uses: actions/setup-python@v5
|
|
18
|
+
with:
|
|
19
|
+
python-version: ${{ matrix.python-version }}
|
|
20
|
+
- run: pip install hatchling ruff
|
|
21
|
+
- run: ruff check .
|
|
22
|
+
- run: pip install .
|
|
23
|
+
- run: pip install pytest pytest-asyncio httpx
|
|
24
|
+
- run: pytest
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[codz]
|
|
4
|
+
*$py.class
|
|
5
|
+
|
|
6
|
+
# C extensions
|
|
7
|
+
*.so
|
|
8
|
+
|
|
9
|
+
# Distribution / packaging
|
|
10
|
+
.Python
|
|
11
|
+
build/
|
|
12
|
+
develop-eggs/
|
|
13
|
+
dist/
|
|
14
|
+
downloads/
|
|
15
|
+
eggs/
|
|
16
|
+
.eggs/
|
|
17
|
+
lib/
|
|
18
|
+
lib64/
|
|
19
|
+
parts/
|
|
20
|
+
sdist/
|
|
21
|
+
var/
|
|
22
|
+
wheels/
|
|
23
|
+
share/python-wheels/
|
|
24
|
+
*.egg-info/
|
|
25
|
+
.installed.cfg
|
|
26
|
+
*.egg
|
|
27
|
+
MANIFEST
|
|
28
|
+
|
|
29
|
+
# PyInstaller
|
|
30
|
+
# Usually these files are written by a python script from a template
|
|
31
|
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
32
|
+
*.manifest
|
|
33
|
+
*.spec
|
|
34
|
+
|
|
35
|
+
# Installer logs
|
|
36
|
+
pip-log.txt
|
|
37
|
+
pip-delete-this-directory.txt
|
|
38
|
+
|
|
39
|
+
# Unit test / coverage reports
|
|
40
|
+
htmlcov/
|
|
41
|
+
.tox/
|
|
42
|
+
.nox/
|
|
43
|
+
.coverage
|
|
44
|
+
.coverage.*
|
|
45
|
+
.cache
|
|
46
|
+
nosetests.xml
|
|
47
|
+
coverage.xml
|
|
48
|
+
*.cover
|
|
49
|
+
*.py.cover
|
|
50
|
+
.hypothesis/
|
|
51
|
+
.pytest_cache/
|
|
52
|
+
cover/
|
|
53
|
+
|
|
54
|
+
# Translations
|
|
55
|
+
*.mo
|
|
56
|
+
*.pot
|
|
57
|
+
|
|
58
|
+
# Django stuff:
|
|
59
|
+
*.log
|
|
60
|
+
local_settings.py
|
|
61
|
+
db.sqlite3
|
|
62
|
+
db.sqlite3-journal
|
|
63
|
+
|
|
64
|
+
# Flask stuff:
|
|
65
|
+
instance/
|
|
66
|
+
.webassets-cache
|
|
67
|
+
|
|
68
|
+
# Scrapy stuff:
|
|
69
|
+
.scrapy
|
|
70
|
+
|
|
71
|
+
# Sphinx documentation
|
|
72
|
+
docs/_build/
|
|
73
|
+
|
|
74
|
+
# PyBuilder
|
|
75
|
+
.pybuilder/
|
|
76
|
+
target/
|
|
77
|
+
|
|
78
|
+
# Jupyter Notebook
|
|
79
|
+
.ipynb_checkpoints
|
|
80
|
+
|
|
81
|
+
# IPython
|
|
82
|
+
profile_default/
|
|
83
|
+
ipython_config.py
|
|
84
|
+
|
|
85
|
+
# pyenv
|
|
86
|
+
# For a library or package, you might want to ignore these files since the code is
|
|
87
|
+
# intended to run in multiple environments; otherwise, check them in:
|
|
88
|
+
# .python-version
|
|
89
|
+
|
|
90
|
+
# pipenv
|
|
91
|
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
92
|
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
93
|
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
94
|
+
# install all needed dependencies.
|
|
95
|
+
# Pipfile.lock
|
|
96
|
+
|
|
97
|
+
# UV
|
|
98
|
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
|
99
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
100
|
+
# commonly ignored for libraries.
|
|
101
|
+
# uv.lock
|
|
102
|
+
|
|
103
|
+
# poetry
|
|
104
|
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
|
105
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
106
|
+
# commonly ignored for libraries.
|
|
107
|
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
|
108
|
+
# poetry.lock
|
|
109
|
+
# poetry.toml
|
|
110
|
+
|
|
111
|
+
# pdm
|
|
112
|
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
|
113
|
+
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
|
114
|
+
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
|
115
|
+
# pdm.lock
|
|
116
|
+
# pdm.toml
|
|
117
|
+
.pdm-python
|
|
118
|
+
.pdm-build/
|
|
119
|
+
|
|
120
|
+
# pixi
|
|
121
|
+
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
|
122
|
+
# pixi.lock
|
|
123
|
+
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
|
124
|
+
# in the .venv directory. It is recommended not to include this directory in version control.
|
|
125
|
+
.pixi
|
|
126
|
+
|
|
127
|
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
|
128
|
+
__pypackages__/
|
|
129
|
+
|
|
130
|
+
# Celery stuff
|
|
131
|
+
celerybeat-schedule
|
|
132
|
+
celerybeat.pid
|
|
133
|
+
|
|
134
|
+
# Redis
|
|
135
|
+
*.rdb
|
|
136
|
+
*.aof
|
|
137
|
+
*.pid
|
|
138
|
+
|
|
139
|
+
# RabbitMQ
|
|
140
|
+
mnesia/
|
|
141
|
+
rabbitmq/
|
|
142
|
+
rabbitmq-data/
|
|
143
|
+
|
|
144
|
+
# ActiveMQ
|
|
145
|
+
activemq-data/
|
|
146
|
+
|
|
147
|
+
# SageMath parsed files
|
|
148
|
+
*.sage.py
|
|
149
|
+
|
|
150
|
+
# Environments
|
|
151
|
+
.env
|
|
152
|
+
.envrc
|
|
153
|
+
.venv
|
|
154
|
+
env/
|
|
155
|
+
venv/
|
|
156
|
+
ENV/
|
|
157
|
+
env.bak/
|
|
158
|
+
venv.bak/
|
|
159
|
+
|
|
160
|
+
# Spyder project settings
|
|
161
|
+
.spyderproject
|
|
162
|
+
.spyproject
|
|
163
|
+
|
|
164
|
+
# Rope project settings
|
|
165
|
+
.ropeproject
|
|
166
|
+
|
|
167
|
+
# mkdocs documentation
|
|
168
|
+
/site
|
|
169
|
+
|
|
170
|
+
# mypy
|
|
171
|
+
.mypy_cache/
|
|
172
|
+
.dmypy.json
|
|
173
|
+
dmypy.json
|
|
174
|
+
|
|
175
|
+
# Pyre type checker
|
|
176
|
+
.pyre/
|
|
177
|
+
|
|
178
|
+
# pytype static type analyzer
|
|
179
|
+
.pytype/
|
|
180
|
+
|
|
181
|
+
# Cython debug symbols
|
|
182
|
+
cython_debug/
|
|
183
|
+
|
|
184
|
+
# PyCharm
|
|
185
|
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
|
186
|
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
|
187
|
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
|
188
|
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
|
189
|
+
# .idea/
|
|
190
|
+
|
|
191
|
+
# Abstra
|
|
192
|
+
# Abstra is an AI-powered process automation framework.
|
|
193
|
+
# Ignore directories containing user credentials, local state, and settings.
|
|
194
|
+
# Learn more at https://abstra.io/docs
|
|
195
|
+
.abstra/
|
|
196
|
+
|
|
197
|
+
# Visual Studio Code
|
|
198
|
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
|
199
|
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
|
200
|
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
|
201
|
+
# you could uncomment the following to ignore the entire vscode folder
|
|
202
|
+
# .vscode/
|
|
203
|
+
# Temporary file for partial code execution
|
|
204
|
+
tempCodeRunnerFile.py
|
|
205
|
+
|
|
206
|
+
# Ruff stuff:
|
|
207
|
+
.ruff_cache/
|
|
208
|
+
|
|
209
|
+
# PyPI configuration file
|
|
210
|
+
.pypirc
|
|
211
|
+
|
|
212
|
+
# Marimo
|
|
213
|
+
marimo/_static/
|
|
214
|
+
marimo/_lsp/
|
|
215
|
+
__marimo__/
|
|
216
|
+
|
|
217
|
+
# Streamlit
|
|
218
|
+
.streamlit/secrets.toml
|
mlproxy_py-0.1.1/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Felix
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: mlproxy-py
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: SLA/QoS-aware reverse proxy for ML inference workloads (batching, routing, latency metrics).
|
|
5
|
+
Author: Kubenew
|
|
6
|
+
License: MIT
|
|
7
|
+
License-File: LICENSE
|
|
8
|
+
Keywords: asyncio,batching,inference,llm,ml,qos,reverse-proxy
|
|
9
|
+
Classifier: Development Status :: 3 - Alpha
|
|
10
|
+
Classifier: Framework :: AsyncIO
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: Intended Audience :: Science/Research
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Classifier: Topic :: Internet :: WWW/HTTP :: HTTP Servers
|
|
18
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
19
|
+
Requires-Python: >=3.10
|
|
20
|
+
Requires-Dist: fastapi>=0.111.0
|
|
21
|
+
Requires-Dist: httpx>=0.27.0
|
|
22
|
+
Requires-Dist: prometheus-client>=0.20.0
|
|
23
|
+
Requires-Dist: pydantic>=2.7.0
|
|
24
|
+
Requires-Dist: pyyaml>=6.0.1
|
|
25
|
+
Requires-Dist: typer>=0.12.3
|
|
26
|
+
Requires-Dist: uvicorn>=0.30.0
|
|
27
|
+
Provides-Extra: dev
|
|
28
|
+
Requires-Dist: httpx>=0.27.0; extra == 'dev'
|
|
29
|
+
Requires-Dist: pytest-asyncio>=0.21; extra == 'dev'
|
|
30
|
+
Requires-Dist: pytest>=7.0; extra == 'dev'
|
|
31
|
+
Requires-Dist: ruff>=0.1; extra == 'dev'
|
|
32
|
+
Provides-Extra: test
|
|
33
|
+
Requires-Dist: httpx>=0.27.0; extra == 'test'
|
|
34
|
+
Requires-Dist: pytest-asyncio>=0.21; extra == 'test'
|
|
35
|
+
Requires-Dist: pytest>=7.0; extra == 'test'
|
|
36
|
+
Description-Content-Type: text/markdown
|
|
37
|
+
|
|
38
|
+
# mlproxy-py
|
|
39
|
+
|
|
40
|
+
[](https://pypi.org/project/mlproxy-py/)
|
|
41
|
+
[](https://pypi.org/project/mlproxy-py/)
|
|
42
|
+
[](https://github.com/Kubenew/mlproxy-py/blob/main/LICENSE)
|
|
43
|
+
[](https://github.com/Kubenew/mlproxy-py)
|
|
44
|
+
[](https://pepy.tech/project/mlproxy-py)
|
|
45
|
+
|
|
46
|
+
**mlproxy-py** is a minimal ML inference reverse proxy with QoS-aware routing.
|
|
47
|
+
|
|
48
|
+
Designed for LLM / ML inference workloads where routing decisions should be based on latency, SLA targets, backend health, queue depth, and batching potential.
|
|
49
|
+
|
|
50
|
+
## Features
|
|
51
|
+
|
|
52
|
+
- Reverse proxy for JSON inference requests
|
|
53
|
+
- Backends grouped into model pools
|
|
54
|
+
- SLA-aware routing (choose lowest latency backend)
|
|
55
|
+
- Optional micro-batching (collect requests for N ms)
|
|
56
|
+
- Concurrent health checks with connection pooling
|
|
57
|
+
- Prometheus metrics (request count, latency, backend latency)
|
|
58
|
+
|
|
59
|
+
## Quickstart
|
|
60
|
+
|
|
61
|
+
### Install
|
|
62
|
+
|
|
63
|
+
```bash
|
|
64
|
+
pip install mlproxy-py
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Run proxy
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
mlproxy run -c examples/config.yml
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
### Send request
|
|
74
|
+
|
|
75
|
+
```bash
|
|
76
|
+
curl -X POST http://localhost:7000/infer/modelA \
|
|
77
|
+
-H "Content-Type: application/json" \
|
|
78
|
+
-d '{"text":"hello"}'
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
## Architecture
|
|
82
|
+
|
|
83
|
+
```
|
|
84
|
+
Client ──POST /infer/{model}──► FastAPI
|
|
85
|
+
│
|
|
86
|
+
┌─────────▼──────────┐
|
|
87
|
+
│ ModelRouter │
|
|
88
|
+
│ choose_backend() │
|
|
89
|
+
│ (score = latency │
|
|
90
|
+
│ + active_req*5) │
|
|
91
|
+
└─────────┬──────────┘
|
|
92
|
+
│ backend URL
|
|
93
|
+
┌─────────▼──────────┐
|
|
94
|
+
│ forward_json() │
|
|
95
|
+
│ (httpx conn pool) │
|
|
96
|
+
└─────────┬──────────┘
|
|
97
|
+
▼
|
|
98
|
+
Backend ML server
|
|
99
|
+
|
|
100
|
+
┌──────────────────┐ ┌──────────────────┐
|
|
101
|
+
│ BatchQueue │ │ Healthcheck │
|
|
102
|
+
│ (optional per │ │ (concurrent, │
|
|
103
|
+
│ model pool) │ │ per-backend) │
|
|
104
|
+
└──────────────────┘ └──────────────────┘
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
## Config
|
|
108
|
+
|
|
109
|
+
See `examples/config.yml`.
|
|
110
|
+
|
|
111
|
+
## Changelog
|
|
112
|
+
|
|
113
|
+
### 0.1.1
|
|
114
|
+
|
|
115
|
+
- **Lifespan pattern**: Migrated from deprecated `@app.on_event("startup")` to FastAPI `lifespan` context manager.
|
|
116
|
+
- **Graceful shutdown**: Batch workers and healthcheck loop are properly cancelled on shutdown.
|
|
117
|
+
- **Connection pooling**: Shared `httpx.AsyncClient` singletons for proxy and healthcheck (was creating a client per request/check).
|
|
118
|
+
- **Concurrent health checks**: Backends checked in parallel via `asyncio.gather` (was sequential).
|
|
119
|
+
- **Logging**: Added structured `logging` throughout; `--log-level` CLI option.
|
|
120
|
+
- **Bare except fixes**: All `except Exception` blocks re-raise `asyncio.CancelledError`.
|
|
121
|
+
- **Deprecated API fixes**: Replaced `asyncio.get_event_loop()` with `asyncio.get_running_loop()` in batching module.
|
|
122
|
+
- **Build system**: Migrated from `setuptools` to `hatchling`. Added classifiers, keywords, optional dev/test deps, ruff/pytest config.
|
|
123
|
+
- **Tests**: Expanded from 1 test to 15+ tests covering config, router, batching, proxy, healthcheck, and backends.
|
|
124
|
+
|
|
125
|
+
### 0.1.0
|
|
126
|
+
|
|
127
|
+
- Initial release: JSON inference proxy, model pools, SLA-aware routing, micro-batching, health checks, Prometheus metrics.
|
|
128
|
+
|
|
129
|
+
## License
|
|
130
|
+
|
|
131
|
+
MIT
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
# mlproxy-py
|
|
2
|
+
|
|
3
|
+
[](https://pypi.org/project/mlproxy-py/)
|
|
4
|
+
[](https://pypi.org/project/mlproxy-py/)
|
|
5
|
+
[](https://github.com/Kubenew/mlproxy-py/blob/main/LICENSE)
|
|
6
|
+
[](https://github.com/Kubenew/mlproxy-py)
|
|
7
|
+
[](https://pepy.tech/project/mlproxy-py)
|
|
8
|
+
|
|
9
|
+
**mlproxy-py** is a minimal ML inference reverse proxy with QoS-aware routing.
|
|
10
|
+
|
|
11
|
+
Designed for LLM / ML inference workloads where routing decisions should be based on latency, SLA targets, backend health, queue depth, and batching potential.
|
|
12
|
+
|
|
13
|
+
## Features
|
|
14
|
+
|
|
15
|
+
- Reverse proxy for JSON inference requests
|
|
16
|
+
- Backends grouped into model pools
|
|
17
|
+
- SLA-aware routing (choose lowest latency backend)
|
|
18
|
+
- Optional micro-batching (collect requests for N ms)
|
|
19
|
+
- Concurrent health checks with connection pooling
|
|
20
|
+
- Prometheus metrics (request count, latency, backend latency)
|
|
21
|
+
|
|
22
|
+
## Quickstart
|
|
23
|
+
|
|
24
|
+
### Install
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
pip install mlproxy-py
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
### Run proxy
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
mlproxy run -c examples/config.yml
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
### Send request
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
curl -X POST http://localhost:7000/infer/modelA \
|
|
40
|
+
-H "Content-Type: application/json" \
|
|
41
|
+
-d '{"text":"hello"}'
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
## Architecture
|
|
45
|
+
|
|
46
|
+
```
|
|
47
|
+
Client ──POST /infer/{model}──► FastAPI
|
|
48
|
+
│
|
|
49
|
+
┌─────────▼──────────┐
|
|
50
|
+
│ ModelRouter │
|
|
51
|
+
│ choose_backend() │
|
|
52
|
+
│ (score = latency │
|
|
53
|
+
│ + active_req*5) │
|
|
54
|
+
└─────────┬──────────┘
|
|
55
|
+
│ backend URL
|
|
56
|
+
┌─────────▼──────────┐
|
|
57
|
+
│ forward_json() │
|
|
58
|
+
│ (httpx conn pool) │
|
|
59
|
+
└─────────┬──────────┘
|
|
60
|
+
▼
|
|
61
|
+
Backend ML server
|
|
62
|
+
|
|
63
|
+
┌──────────────────┐ ┌──────────────────┐
|
|
64
|
+
│ BatchQueue │ │ Healthcheck │
|
|
65
|
+
│ (optional per │ │ (concurrent, │
|
|
66
|
+
│ model pool) │ │ per-backend) │
|
|
67
|
+
└──────────────────┘ └──────────────────┘
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## Config
|
|
71
|
+
|
|
72
|
+
See `examples/config.yml`.
|
|
73
|
+
|
|
74
|
+
## Changelog
|
|
75
|
+
|
|
76
|
+
### 0.1.1
|
|
77
|
+
|
|
78
|
+
- **Lifespan pattern**: Migrated from deprecated `@app.on_event("startup")` to FastAPI `lifespan` context manager.
|
|
79
|
+
- **Graceful shutdown**: Batch workers and healthcheck loop are properly cancelled on shutdown.
|
|
80
|
+
- **Connection pooling**: Shared `httpx.AsyncClient` singletons for proxy and healthcheck (was creating a client per request/check).
|
|
81
|
+
- **Concurrent health checks**: Backends checked in parallel via `asyncio.gather` (was sequential).
|
|
82
|
+
- **Logging**: Added structured `logging` throughout; `--log-level` CLI option.
|
|
83
|
+
- **Bare except fixes**: All `except Exception` blocks re-raise `asyncio.CancelledError`.
|
|
84
|
+
- **Deprecated API fixes**: Replaced `asyncio.get_event_loop()` with `asyncio.get_running_loop()` in batching module.
|
|
85
|
+
- **Build system**: Migrated from `setuptools` to `hatchling`. Added classifiers, keywords, optional dev/test deps, ruff/pytest config.
|
|
86
|
+
- **Tests**: Expanded from 1 test to 15+ tests covering config, router, batching, proxy, healthcheck, and backends.
|
|
87
|
+
|
|
88
|
+
### 0.1.0
|
|
89
|
+
|
|
90
|
+
- Initial release: JSON inference proxy, model pools, SLA-aware routing, micro-batching, health checks, Prometheus metrics.
|
|
91
|
+
|
|
92
|
+
## License
|
|
93
|
+
|
|
94
|
+
MIT
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
listen: "0.0.0.0:7000"
|
|
2
|
+
|
|
3
|
+
models:
|
|
4
|
+
modelA:
|
|
5
|
+
sla_ms: 300
|
|
6
|
+
batching:
|
|
7
|
+
enabled: true
|
|
8
|
+
max_batch_size: 8
|
|
9
|
+
max_wait_ms: 20
|
|
10
|
+
backends:
|
|
11
|
+
- url: "http://localhost:8001"
|
|
12
|
+
- url: "http://localhost:8002"
|
|
13
|
+
|
|
14
|
+
metrics:
|
|
15
|
+
enabled: true
|
|
16
|
+
path: "/metrics"
|
|
17
|
+
|
|
18
|
+
healthcheck:
|
|
19
|
+
enabled: true
|
|
20
|
+
interval_seconds: 5
|
|
21
|
+
timeout_seconds: 2
|
|
22
|
+
path: "/health"
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.1"
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import logging
|
|
5
|
+
import time
|
|
6
|
+
from contextlib import asynccontextmanager
|
|
7
|
+
from typing import Dict, List
|
|
8
|
+
|
|
9
|
+
from fastapi import FastAPI, HTTPException, Request
|
|
10
|
+
from fastapi.responses import JSONResponse
|
|
11
|
+
|
|
12
|
+
from .batching import BatchQueue
|
|
13
|
+
from .metrics import BACKEND_LATENCY, REQ_COUNT, REQ_LATENCY, metrics_response
|
|
14
|
+
from .proxy import forward_json, close_client
|
|
15
|
+
from .router import ModelRouter
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def create_app(router: ModelRouter, model_cfg: dict, metrics_enabled: bool, metrics_path: str) -> FastAPI:
|
|
21
|
+
batch_queues: Dict[str, BatchQueue] = {}
|
|
22
|
+
worker_tasks: List[asyncio.Task] = []
|
|
23
|
+
|
|
24
|
+
for model, cfg in model_cfg.items():
|
|
25
|
+
batching_cfg = cfg.batching
|
|
26
|
+
if batching_cfg.enabled:
|
|
27
|
+
batch_queues[model] = BatchQueue(
|
|
28
|
+
max_batch_size=batching_cfg.max_batch_size,
|
|
29
|
+
max_wait_ms=batching_cfg.max_wait_ms,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
async def batch_worker(model: str):
|
|
33
|
+
q = batch_queues[model]
|
|
34
|
+
while True:
|
|
35
|
+
try:
|
|
36
|
+
batch = await q.collect_batch()
|
|
37
|
+
if not batch:
|
|
38
|
+
continue
|
|
39
|
+
|
|
40
|
+
backend = router.choose_backend(model)
|
|
41
|
+
if not backend:
|
|
42
|
+
for item in batch:
|
|
43
|
+
item.future.set_exception(RuntimeError("No healthy backend"))
|
|
44
|
+
continue
|
|
45
|
+
|
|
46
|
+
payloads = [item.payload for item in batch]
|
|
47
|
+
|
|
48
|
+
try:
|
|
49
|
+
result = await forward_json(backend, model, {"batch": payloads})
|
|
50
|
+
results = result.get("results") or []
|
|
51
|
+
for i, item in enumerate(batch):
|
|
52
|
+
item.future.set_result(results[i] if i < len(results) else result)
|
|
53
|
+
except asyncio.CancelledError:
|
|
54
|
+
for item in batch:
|
|
55
|
+
item.future.set_exception(asyncio.CancelledError())
|
|
56
|
+
raise
|
|
57
|
+
except Exception as e:
|
|
58
|
+
for item in batch:
|
|
59
|
+
item.future.set_exception(e)
|
|
60
|
+
except asyncio.CancelledError:
|
|
61
|
+
logger.info("Batch worker for %s stopped", model)
|
|
62
|
+
break
|
|
63
|
+
except Exception:
|
|
64
|
+
logger.exception("Batch worker for %s crashed", model)
|
|
65
|
+
|
|
66
|
+
@asynccontextmanager
|
|
67
|
+
async def lifespan(app: FastAPI):
|
|
68
|
+
for model in batch_queues:
|
|
69
|
+
task = asyncio.create_task(batch_worker(model))
|
|
70
|
+
worker_tasks.append(task)
|
|
71
|
+
yield
|
|
72
|
+
for task in worker_tasks:
|
|
73
|
+
task.cancel()
|
|
74
|
+
await asyncio.gather(*worker_tasks, return_exceptions=True)
|
|
75
|
+
worker_tasks.clear()
|
|
76
|
+
await close_client()
|
|
77
|
+
|
|
78
|
+
app = FastAPI(title="mlproxy-py", lifespan=lifespan)
|
|
79
|
+
|
|
80
|
+
@app.post("/infer/{model}")
|
|
81
|
+
async def infer(model: str, request: Request):
|
|
82
|
+
if model not in router.pools:
|
|
83
|
+
raise HTTPException(status_code=404, detail="Model not found")
|
|
84
|
+
|
|
85
|
+
payload = await request.json()
|
|
86
|
+
|
|
87
|
+
backend = router.choose_backend(model)
|
|
88
|
+
if not backend:
|
|
89
|
+
raise HTTPException(status_code=503, detail="No healthy backend")
|
|
90
|
+
|
|
91
|
+
start = time.perf_counter()
|
|
92
|
+
try:
|
|
93
|
+
if model in batch_queues and "batch" not in payload:
|
|
94
|
+
data = await batch_queues[model].add(payload)
|
|
95
|
+
else:
|
|
96
|
+
data = await forward_json(backend, model, payload)
|
|
97
|
+
except asyncio.CancelledError:
|
|
98
|
+
raise
|
|
99
|
+
except Exception as e:
|
|
100
|
+
raise HTTPException(status_code=502, detail=str(e))
|
|
101
|
+
|
|
102
|
+
latency = time.perf_counter() - start
|
|
103
|
+
|
|
104
|
+
REQ_COUNT.labels(model=model, backend=backend.url, status="200").inc()
|
|
105
|
+
REQ_LATENCY.labels(model=model, backend=backend.url).observe(latency)
|
|
106
|
+
BACKEND_LATENCY.labels(model=model, backend=backend.url).set(backend.last_latency_ms)
|
|
107
|
+
|
|
108
|
+
return JSONResponse(content=data)
|
|
109
|
+
|
|
110
|
+
if metrics_enabled:
|
|
111
|
+
@app.get(metrics_path)
|
|
112
|
+
async def metrics():
|
|
113
|
+
return metrics_response()
|
|
114
|
+
|
|
115
|
+
return app
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
import time
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass
|
|
8
|
+
class Backend:
|
|
9
|
+
url: str
|
|
10
|
+
healthy: bool = True
|
|
11
|
+
last_latency_ms: float = 9999.0
|
|
12
|
+
active_requests: int = 0
|
|
13
|
+
last_seen: float = field(default_factory=lambda: time.time())
|
|
14
|
+
|
|
15
|
+
def score(self) -> float:
|
|
16
|
+
# Lower score is better
|
|
17
|
+
return self.last_latency_ms + (self.active_requests * 5.0)
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import logging
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import Any, Dict, List
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class BatchRequest:
|
|
13
|
+
payload: Dict[str, Any]
|
|
14
|
+
future: asyncio.Future
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class BatchQueue:
|
|
18
|
+
def __init__(self, max_batch_size: int, max_wait_ms: int):
|
|
19
|
+
self.max_batch_size = max_batch_size
|
|
20
|
+
self.max_wait_ms = max_wait_ms
|
|
21
|
+
self._queue: asyncio.Queue[BatchRequest] = asyncio.Queue()
|
|
22
|
+
|
|
23
|
+
async def add(self, payload: Dict[str, Any]) -> Any:
|
|
24
|
+
loop = asyncio.get_running_loop()
|
|
25
|
+
fut = loop.create_future()
|
|
26
|
+
await self._queue.put(BatchRequest(payload=payload, future=fut))
|
|
27
|
+
return await fut
|
|
28
|
+
|
|
29
|
+
async def collect_batch(self) -> List[BatchRequest]:
|
|
30
|
+
batch: List[BatchRequest] = []
|
|
31
|
+
try:
|
|
32
|
+
first = await asyncio.wait_for(self._queue.get(), timeout=self.max_wait_ms / 1000.0)
|
|
33
|
+
batch.append(first)
|
|
34
|
+
except asyncio.TimeoutError:
|
|
35
|
+
return batch
|
|
36
|
+
|
|
37
|
+
start = asyncio.get_running_loop().time()
|
|
38
|
+
while len(batch) < self.max_batch_size:
|
|
39
|
+
remaining = (self.max_wait_ms / 1000.0) - (asyncio.get_running_loop().time() - start)
|
|
40
|
+
if remaining <= 0:
|
|
41
|
+
break
|
|
42
|
+
try:
|
|
43
|
+
item = await asyncio.wait_for(self._queue.get(), timeout=remaining)
|
|
44
|
+
batch.append(item)
|
|
45
|
+
except asyncio.TimeoutError:
|
|
46
|
+
break
|
|
47
|
+
|
|
48
|
+
logger.debug("Collected batch of %d items", len(batch))
|
|
49
|
+
return batch
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import logging
|
|
5
|
+
import signal
|
|
6
|
+
|
|
7
|
+
import typer
|
|
8
|
+
import uvicorn
|
|
9
|
+
|
|
10
|
+
from .app import create_app
|
|
11
|
+
from .config import load_config
|
|
12
|
+
from .healthcheck import close_healthcheck_client, loop as healthcheck_loop
|
|
13
|
+
from .router import ModelRouter
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger("mlproxy_py")
|
|
16
|
+
|
|
17
|
+
app = typer.Typer(help="mlproxy-py - QoS-aware ML inference reverse proxy")
|
|
18
|
+
|
|
19
|
+
_log_levels = {
|
|
20
|
+
"DEBUG": logging.DEBUG,
|
|
21
|
+
"INFO": logging.INFO,
|
|
22
|
+
"WARNING": logging.WARNING,
|
|
23
|
+
"ERROR": logging.ERROR,
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@app.command()
|
|
28
|
+
def run(
|
|
29
|
+
config: str = typer.Option(..., "--config", "-c", help="Path to YAML config file"),
|
|
30
|
+
log_level: str = typer.Option("INFO", "--log-level", help="Log level: DEBUG, INFO, WARNING, ERROR"),
|
|
31
|
+
):
|
|
32
|
+
logging.basicConfig(level=_log_levels.get(log_level.upper(), logging.INFO), format="%(levelname)s %(name)s: %(message)s")
|
|
33
|
+
|
|
34
|
+
cfg = load_config(config)
|
|
35
|
+
|
|
36
|
+
router = ModelRouter()
|
|
37
|
+
for model, pool_cfg in cfg.models.items():
|
|
38
|
+
router.register_model(model, [b.url for b in pool_cfg.backends])
|
|
39
|
+
|
|
40
|
+
host, port = "0.0.0.0", 7000
|
|
41
|
+
if ":" in cfg.listen:
|
|
42
|
+
host, port_str = cfg.listen.split(":", 1)
|
|
43
|
+
port = int(port_str)
|
|
44
|
+
|
|
45
|
+
app_instance = create_app(
|
|
46
|
+
router=router,
|
|
47
|
+
model_cfg=cfg.models,
|
|
48
|
+
metrics_enabled=cfg.metrics.enabled,
|
|
49
|
+
metrics_path=cfg.metrics.path,
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
loop = asyncio.get_event_loop()
|
|
53
|
+
hc_task: asyncio.Task | None = None
|
|
54
|
+
if cfg.healthcheck.enabled:
|
|
55
|
+
hc_task = loop.create_task(
|
|
56
|
+
healthcheck_loop(
|
|
57
|
+
router=router,
|
|
58
|
+
interval_seconds=cfg.healthcheck.interval_seconds,
|
|
59
|
+
timeout_seconds=cfg.healthcheck.timeout_seconds,
|
|
60
|
+
path=cfg.healthcheck.path,
|
|
61
|
+
)
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
stop_event = asyncio.Event()
|
|
65
|
+
|
|
66
|
+
def _signal_handler():
|
|
67
|
+
logger.info("Shutdown signal received...")
|
|
68
|
+
stop_event.set()
|
|
69
|
+
|
|
70
|
+
for sig in (signal.SIGINT, signal.SIGTERM):
|
|
71
|
+
try:
|
|
72
|
+
loop.add_signal_handler(sig, _signal_handler)
|
|
73
|
+
except NotImplementedError:
|
|
74
|
+
pass
|
|
75
|
+
|
|
76
|
+
config = uvicorn.Config(app_instance, host=host, port=port)
|
|
77
|
+
server = uvicorn.Server(config)
|
|
78
|
+
|
|
79
|
+
async def wait_for_shutdown():
|
|
80
|
+
await stop_event.wait()
|
|
81
|
+
if hc_task:
|
|
82
|
+
hc_task.cancel()
|
|
83
|
+
try:
|
|
84
|
+
await hc_task
|
|
85
|
+
except asyncio.CancelledError:
|
|
86
|
+
pass
|
|
87
|
+
await close_healthcheck_client()
|
|
88
|
+
server.should_exit = True
|
|
89
|
+
|
|
90
|
+
loop.create_task(wait_for_shutdown())
|
|
91
|
+
|
|
92
|
+
server.run()
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
if __name__ == "__main__":
|
|
96
|
+
app()
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict, List
|
|
4
|
+
|
|
5
|
+
import yaml
|
|
6
|
+
from pydantic import BaseModel, Field
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class BackendConfig(BaseModel):
|
|
10
|
+
url: str
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BatchingConfig(BaseModel):
|
|
14
|
+
enabled: bool = False
|
|
15
|
+
max_batch_size: int = 8
|
|
16
|
+
max_wait_ms: int = 20
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ModelPoolConfig(BaseModel):
|
|
20
|
+
sla_ms: int = 300
|
|
21
|
+
batching: BatchingConfig = Field(default_factory=BatchingConfig)
|
|
22
|
+
backends: List[BackendConfig] = Field(default_factory=list)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class MetricsConfig(BaseModel):
|
|
26
|
+
enabled: bool = True
|
|
27
|
+
path: str = "/metrics"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class HealthcheckConfig(BaseModel):
|
|
31
|
+
enabled: bool = True
|
|
32
|
+
interval_seconds: int = 5
|
|
33
|
+
timeout_seconds: int = 2
|
|
34
|
+
path: str = "/health"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class AppConfig(BaseModel):
|
|
38
|
+
listen: str = "0.0.0.0:7000"
|
|
39
|
+
models: Dict[str, ModelPoolConfig] = Field(default_factory=dict)
|
|
40
|
+
metrics: MetricsConfig = Field(default_factory=MetricsConfig)
|
|
41
|
+
healthcheck: HealthcheckConfig = Field(default_factory=HealthcheckConfig)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def load_config(path: str) -> AppConfig:
|
|
45
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
46
|
+
raw = yaml.safe_load(f) or {}
|
|
47
|
+
return AppConfig.model_validate(raw)
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import logging
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from .router import ModelRouter
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
_client: httpx.AsyncClient | None = None
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _get_client() -> httpx.AsyncClient:
|
|
16
|
+
global _client
|
|
17
|
+
if _client is None:
|
|
18
|
+
_client = httpx.AsyncClient()
|
|
19
|
+
return _client
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
async def close_healthcheck_client():
|
|
23
|
+
global _client
|
|
24
|
+
if _client is not None:
|
|
25
|
+
await _client.aclose()
|
|
26
|
+
_client = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
async def _check(url: str, path: str, timeout_seconds: int, client: httpx.AsyncClient) -> bool:
|
|
30
|
+
try:
|
|
31
|
+
r = await client.get(url.rstrip("/") + path, timeout=timeout_seconds)
|
|
32
|
+
return 200 <= r.status_code < 400
|
|
33
|
+
except asyncio.CancelledError:
|
|
34
|
+
raise
|
|
35
|
+
except Exception:
|
|
36
|
+
return False
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
async def loop(router: ModelRouter, interval_seconds: int, timeout_seconds: int, path: str):
|
|
40
|
+
client = _get_client()
|
|
41
|
+
while True:
|
|
42
|
+
try:
|
|
43
|
+
backends = router.all_backends()
|
|
44
|
+
checks = [_check(b.url, path, timeout_seconds, client) for b in backends]
|
|
45
|
+
results = await asyncio.gather(*checks, return_exceptions=True)
|
|
46
|
+
for backend, ok in zip(backends, results):
|
|
47
|
+
backend.healthy = bool(ok)
|
|
48
|
+
healthy_count = sum(1 for b in backends if b.healthy)
|
|
49
|
+
logger.info("Healthcheck: %d/%d backends healthy", healthy_count, len(backends))
|
|
50
|
+
except asyncio.CancelledError:
|
|
51
|
+
raise
|
|
52
|
+
except Exception:
|
|
53
|
+
logger.exception("Healthcheck loop error")
|
|
54
|
+
|
|
55
|
+
await asyncio.sleep(interval_seconds)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from prometheus_client import Counter, Histogram, Gauge, generate_latest, CONTENT_TYPE_LATEST
|
|
4
|
+
from starlette.responses import Response
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
REQ_COUNT = Counter(
|
|
8
|
+
"mlproxy_requests_total",
|
|
9
|
+
"Total inference requests",
|
|
10
|
+
["model", "backend", "status"],
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
REQ_LATENCY = Histogram(
|
|
14
|
+
"mlproxy_request_latency_seconds",
|
|
15
|
+
"Inference request latency seconds",
|
|
16
|
+
["model", "backend"],
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
BACKEND_LATENCY = Gauge(
|
|
20
|
+
"mlproxy_backend_latency_ms",
|
|
21
|
+
"Observed backend latency (ms)",
|
|
22
|
+
["model", "backend"],
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def metrics_response() -> Response:
|
|
27
|
+
return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST)
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import logging
|
|
5
|
+
import time
|
|
6
|
+
from typing import Any, Dict
|
|
7
|
+
|
|
8
|
+
import httpx
|
|
9
|
+
|
|
10
|
+
from .backends import Backend
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
_client: httpx.AsyncClient | None = None
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _get_client() -> httpx.AsyncClient:
|
|
18
|
+
global _client
|
|
19
|
+
if _client is None:
|
|
20
|
+
_client = httpx.AsyncClient(timeout=60)
|
|
21
|
+
return _client
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
async def close_client():
|
|
25
|
+
global _client
|
|
26
|
+
if _client is not None:
|
|
27
|
+
await _client.aclose()
|
|
28
|
+
_client = None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
async def forward_json(backend: Backend, model: str, payload: Dict[str, Any]) -> Dict[str, Any]:
|
|
32
|
+
url = backend.url.rstrip("/") + f"/infer/{model}"
|
|
33
|
+
client = _get_client()
|
|
34
|
+
|
|
35
|
+
backend.active_requests += 1
|
|
36
|
+
start = time.perf_counter()
|
|
37
|
+
try:
|
|
38
|
+
r = await client.post(url, json=payload)
|
|
39
|
+
r.raise_for_status()
|
|
40
|
+
data = r.json()
|
|
41
|
+
except asyncio.CancelledError:
|
|
42
|
+
raise
|
|
43
|
+
except Exception:
|
|
44
|
+
logger.exception("Failed to forward request to %s for model %s", backend.url, model)
|
|
45
|
+
raise
|
|
46
|
+
finally:
|
|
47
|
+
backend.active_requests -= 1
|
|
48
|
+
|
|
49
|
+
backend.last_latency_ms = (time.perf_counter() - start) * 1000.0
|
|
50
|
+
return data
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict, List, Optional
|
|
4
|
+
from .backends import Backend
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class ModelRouter:
|
|
8
|
+
def __init__(self):
|
|
9
|
+
self.pools: Dict[str, List[Backend]] = {}
|
|
10
|
+
|
|
11
|
+
def register_model(self, model: str, backend_urls: List[str]):
|
|
12
|
+
self.pools[model] = [Backend(url=u) for u in backend_urls]
|
|
13
|
+
|
|
14
|
+
def choose_backend(self, model: str) -> Optional[Backend]:
|
|
15
|
+
pool = self.pools.get(model) or []
|
|
16
|
+
healthy = [b for b in pool if b.healthy]
|
|
17
|
+
if not healthy:
|
|
18
|
+
return None
|
|
19
|
+
return min(healthy, key=lambda b: b.score())
|
|
20
|
+
|
|
21
|
+
def all_backends(self) -> List[Backend]:
|
|
22
|
+
out = []
|
|
23
|
+
for pool in self.pools.values():
|
|
24
|
+
out.extend(pool)
|
|
25
|
+
return out
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "mlproxy-py"
|
|
7
|
+
version = "0.1.1"
|
|
8
|
+
description = "SLA/QoS-aware reverse proxy for ML inference workloads (batching, routing, latency metrics)."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.10"
|
|
11
|
+
license = {text = "MIT"}
|
|
12
|
+
authors = [{name = "Kubenew"}]
|
|
13
|
+
keywords = ["ml", "inference", "reverse-proxy", "qos", "batching", "asyncio", "llm"]
|
|
14
|
+
classifiers = [
|
|
15
|
+
"Development Status :: 3 - Alpha",
|
|
16
|
+
"Framework :: AsyncIO",
|
|
17
|
+
"Intended Audience :: Developers",
|
|
18
|
+
"Intended Audience :: Science/Research",
|
|
19
|
+
"License :: OSI Approved :: MIT License",
|
|
20
|
+
"Programming Language :: Python :: 3.10",
|
|
21
|
+
"Programming Language :: Python :: 3.11",
|
|
22
|
+
"Programming Language :: Python :: 3.12",
|
|
23
|
+
"Topic :: Internet :: WWW/HTTP :: HTTP Servers",
|
|
24
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
25
|
+
]
|
|
26
|
+
dependencies = [
|
|
27
|
+
"fastapi>=0.111.0",
|
|
28
|
+
"uvicorn>=0.30.0",
|
|
29
|
+
"httpx>=0.27.0",
|
|
30
|
+
"pydantic>=2.7.0",
|
|
31
|
+
"pyyaml>=6.0.1",
|
|
32
|
+
"typer>=0.12.3",
|
|
33
|
+
"prometheus-client>=0.20.0",
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
[project.optional-dependencies]
|
|
37
|
+
test = [
|
|
38
|
+
"pytest>=7.0",
|
|
39
|
+
"pytest-asyncio>=0.21",
|
|
40
|
+
"httpx>=0.27.0",
|
|
41
|
+
]
|
|
42
|
+
dev = ["mlproxy-py[test]", "ruff>=0.1"]
|
|
43
|
+
|
|
44
|
+
[project.scripts]
|
|
45
|
+
mlproxy = "mlproxy_py.cli:app"
|
|
46
|
+
|
|
47
|
+
[tool.hatch.build.targets.wheel]
|
|
48
|
+
packages = ["mlproxy_py"]
|
|
49
|
+
|
|
50
|
+
[tool.ruff]
|
|
51
|
+
line-length = 100
|
|
52
|
+
target-version = "py310"
|
|
53
|
+
|
|
54
|
+
[tool.pytest.ini_options]
|
|
55
|
+
testpaths = ["tests"]
|
|
56
|
+
asyncio_mode = "auto"
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from mlproxy_py.backends import Backend
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def test_score_default():
|
|
5
|
+
b = Backend(url="http://localhost:8000")
|
|
6
|
+
assert b.score() > 0
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def test_score_latency():
|
|
10
|
+
b = Backend(url="http://localhost:8000", last_latency_ms=100)
|
|
11
|
+
assert b.score() == 100
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def test_score_active_requests():
|
|
15
|
+
b = Backend(url="http://localhost:8000", last_latency_ms=100, active_requests=2)
|
|
16
|
+
assert b.score() == 100 + 2 * 5
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def test_healthy_default():
|
|
20
|
+
b = Backend(url="http://localhost:8000")
|
|
21
|
+
assert b.healthy is True
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def test_last_seen_default():
|
|
25
|
+
import time
|
|
26
|
+
b = Backend(url="http://localhost:8000")
|
|
27
|
+
assert abs(b.last_seen - time.time()) < 1
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
import pytest
|
|
4
|
+
|
|
5
|
+
from mlproxy_py.batching import BatchQueue, BatchRequest
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@pytest.mark.asyncio
|
|
9
|
+
async def test_add_and_collect():
|
|
10
|
+
q = BatchQueue(max_batch_size=8, max_wait_ms=5000)
|
|
11
|
+
|
|
12
|
+
async def producer():
|
|
13
|
+
return await q.add({"text": "hello"})
|
|
14
|
+
|
|
15
|
+
async def consumer():
|
|
16
|
+
batch = await q.collect_batch()
|
|
17
|
+
assert len(batch) == 1
|
|
18
|
+
batch[0].future.set_result({"reply": "ok"})
|
|
19
|
+
return batch
|
|
20
|
+
|
|
21
|
+
result, _ = await asyncio.gather(producer(), consumer())
|
|
22
|
+
assert result == {"reply": "ok"}
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@pytest.mark.asyncio
|
|
26
|
+
async def test_collect_multiple():
|
|
27
|
+
q = BatchQueue(max_batch_size=3, max_wait_ms=5000)
|
|
28
|
+
loop = asyncio.get_running_loop()
|
|
29
|
+
|
|
30
|
+
for i in range(3):
|
|
31
|
+
fut = loop.create_future()
|
|
32
|
+
q._queue.put_nowait(BatchRequest(payload={"n": i}, future=fut))
|
|
33
|
+
|
|
34
|
+
batch = await q.collect_batch()
|
|
35
|
+
assert len(batch) == 3
|
|
36
|
+
for item in batch:
|
|
37
|
+
item.future.set_result({"n": item.payload["n"]})
|
|
38
|
+
assert await item.future == {"n": item.payload["n"]}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@pytest.mark.asyncio
|
|
42
|
+
async def test_collect_timeout():
|
|
43
|
+
q = BatchQueue(max_batch_size=8, max_wait_ms=50)
|
|
44
|
+
batch = await q.collect_batch()
|
|
45
|
+
assert len(batch) == 0
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
|
|
2
|
+
from mlproxy_py.config import load_config, BatchingConfig
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def test_load_config(tmp_path):
|
|
6
|
+
cfg_file = tmp_path / "config.yml"
|
|
7
|
+
cfg_file.write_text("""
|
|
8
|
+
listen: "0.0.0.0:8000"
|
|
9
|
+
models:
|
|
10
|
+
modelA:
|
|
11
|
+
sla_ms: 500
|
|
12
|
+
backends:
|
|
13
|
+
- url: "http://localhost:8001"
|
|
14
|
+
metrics:
|
|
15
|
+
enabled: true
|
|
16
|
+
path: "/metrics"
|
|
17
|
+
healthcheck:
|
|
18
|
+
enabled: true
|
|
19
|
+
interval_seconds: 10
|
|
20
|
+
""")
|
|
21
|
+
cfg = load_config(str(cfg_file))
|
|
22
|
+
assert cfg.listen == "0.0.0.0:8000"
|
|
23
|
+
assert "modelA" in cfg.models
|
|
24
|
+
assert len(cfg.models["modelA"].backends) == 1
|
|
25
|
+
assert cfg.models["modelA"].backends[0].url == "http://localhost:8001"
|
|
26
|
+
assert cfg.models["modelA"].sla_ms == 500
|
|
27
|
+
assert cfg.healthcheck.interval_seconds == 10
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def test_load_config_defaults(tmp_path):
|
|
31
|
+
cfg_file = tmp_path / "config.yml"
|
|
32
|
+
cfg_file.write_text("models:\n m:\n backends:\n - url: http://localhost:8001\n")
|
|
33
|
+
cfg = load_config(str(cfg_file))
|
|
34
|
+
assert cfg.listen == "0.0.0.0:7000"
|
|
35
|
+
assert cfg.metrics.enabled is True
|
|
36
|
+
assert cfg.healthcheck.interval_seconds == 5
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def test_load_config_empty(tmp_path):
|
|
40
|
+
cfg_file = tmp_path / "config.yml"
|
|
41
|
+
cfg_file.write_text("")
|
|
42
|
+
cfg = load_config(str(cfg_file))
|
|
43
|
+
assert cfg.listen == "0.0.0.0:7000"
|
|
44
|
+
assert len(cfg.models) == 0
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def test_load_config_missing_file():
|
|
48
|
+
import pytest
|
|
49
|
+
with pytest.raises(FileNotFoundError):
|
|
50
|
+
load_config("/nonexistent/config.yml")
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def test_batching_config_defaults():
|
|
54
|
+
b = BatchingConfig()
|
|
55
|
+
assert b.enabled is False
|
|
56
|
+
assert b.max_batch_size == 8
|
|
57
|
+
assert b.max_wait_ms == 20
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
|
|
3
|
+
from mlproxy_py.healthcheck import _check, close_healthcheck_client
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@pytest.mark.asyncio
|
|
7
|
+
async def test_check_healthy(httpserver):
|
|
8
|
+
httpserver.serve_content(content="ok", code=200)
|
|
9
|
+
client = __import__("mlproxy_py.healthcheck", fromlist=["_get_client"])._get_client()
|
|
10
|
+
result = await _check(httpserver.url, "/health", 2, client)
|
|
11
|
+
assert result is True
|
|
12
|
+
await close_healthcheck_client()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@pytest.mark.asyncio
|
|
16
|
+
async def test_check_unhealthy(httpserver):
|
|
17
|
+
httpserver.serve_content(content="error", code=500)
|
|
18
|
+
client = __import__("mlproxy_py.healthcheck", fromlist=["_get_client"])._get_client()
|
|
19
|
+
result = await _check(httpserver.url, "/health", 2, client)
|
|
20
|
+
assert result is False
|
|
21
|
+
await close_healthcheck_client()
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@pytest.mark.asyncio
|
|
25
|
+
async def test_check_connection_refused():
|
|
26
|
+
client = __import__("mlproxy_py.healthcheck", fromlist=["_get_client"])._get_client()
|
|
27
|
+
result = await _check("http://localhost:1", "/health", 1, client)
|
|
28
|
+
assert result is False
|
|
29
|
+
await close_healthcheck_client()
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from mlproxy_py.router import ModelRouter
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def test_choose_backend():
|
|
5
|
+
r = ModelRouter()
|
|
6
|
+
r.register_model("m", ["http://a", "http://b"])
|
|
7
|
+
b = r.choose_backend("m")
|
|
8
|
+
assert b is not None
|
|
9
|
+
assert b.url in ("http://a", "http://b")
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def test_choose_backend_nonexistent_model():
|
|
13
|
+
r = ModelRouter()
|
|
14
|
+
assert r.choose_backend("nonexistent") is None
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def test_choose_backend_all_unhealthy():
|
|
18
|
+
r = ModelRouter()
|
|
19
|
+
r.register_model("m", ["http://a"])
|
|
20
|
+
for b in r.pools["m"]:
|
|
21
|
+
b.healthy = False
|
|
22
|
+
assert r.choose_backend("m") is None
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def test_choose_backend_picks_lowest_score():
|
|
26
|
+
r = ModelRouter()
|
|
27
|
+
r.register_model("m", ["http://slow", "http://fast"])
|
|
28
|
+
pool = r.pools["m"]
|
|
29
|
+
pool[0].last_latency_ms = 500
|
|
30
|
+
pool[1].last_latency_ms = 50
|
|
31
|
+
chosen = r.choose_backend("m")
|
|
32
|
+
assert chosen.url == "http://fast"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def test_all_backends():
|
|
36
|
+
r = ModelRouter()
|
|
37
|
+
r.register_model("m1", ["http://a", "http://b"])
|
|
38
|
+
r.register_model("m2", ["http://c"])
|
|
39
|
+
all_b = r.all_backends()
|
|
40
|
+
assert len(all_b) == 3
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def test_register_overwrites_model():
|
|
44
|
+
r = ModelRouter()
|
|
45
|
+
r.register_model("m", ["http://a"])
|
|
46
|
+
r.register_model("m", ["http://b"])
|
|
47
|
+
assert len(r.pools["m"]) == 1
|
|
48
|
+
assert r.pools["m"][0].url == "http://b"
|