deeploi 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deeploi-0.1.0/LICENSE +0 -0
- deeploi-0.1.0/PKG-INFO +388 -0
- deeploi-0.1.0/README.md +347 -0
- deeploi-0.1.0/pyproject.toml +74 -0
- deeploi-0.1.0/setup.cfg +4 -0
- deeploi-0.1.0/src/deeploi/__init__.py +51 -0
- deeploi-0.1.0/src/deeploi/api.py +88 -0
- deeploi-0.1.0/src/deeploi/constants.py +43 -0
- deeploi-0.1.0/src/deeploi/exceptions.py +38 -0
- deeploi-0.1.0/src/deeploi/inspector.py +111 -0
- deeploi-0.1.0/src/deeploi/loader.py +87 -0
- deeploi-0.1.0/src/deeploi/metadata.py +43 -0
- deeploi-0.1.0/src/deeploi/package.py +255 -0
- deeploi-0.1.0/src/deeploi/schema.py +93 -0
- deeploi-0.1.0/src/deeploi/serialization.py +46 -0
- deeploi-0.1.0/src/deeploi/serving.py +123 -0
- deeploi-0.1.0/src/deeploi/types.py +117 -0
- deeploi-0.1.0/src/deeploi/utils/__init__.py +25 -0
- deeploi-0.1.0/src/deeploi/utils/dataframe.py +56 -0
- deeploi-0.1.0/src/deeploi/utils/env.py +30 -0
- deeploi-0.1.0/src/deeploi/utils/hashing.py +22 -0
- deeploi-0.1.0/src/deeploi/utils/io.py +54 -0
- deeploi-0.1.0/src/deeploi.egg-info/PKG-INFO +388 -0
- deeploi-0.1.0/src/deeploi.egg-info/SOURCES.txt +30 -0
- deeploi-0.1.0/src/deeploi.egg-info/dependency_links.txt +1 -0
- deeploi-0.1.0/src/deeploi.egg-info/requires.txt +17 -0
- deeploi-0.1.0/src/deeploi.egg-info/top_level.txt +1 -0
- deeploi-0.1.0/tests/test_package.py +244 -0
- deeploi-0.1.0/tests/test_schema.py +192 -0
- deeploi-0.1.0/tests/test_serving.py +278 -0
- deeploi-0.1.0/tests/test_sklearn.py +116 -0
- deeploi-0.1.0/tests/test_xgboost.py +147 -0
deeploi-0.1.0/LICENSE
ADDED
|
File without changes
|
deeploi-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,388 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: deeploi
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: One-line deployment for trained tabular ML models
|
|
5
|
+
Author-email: Christian <christian@example.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/deeploi/deeploi
|
|
8
|
+
Project-URL: Repository, https://github.com/deeploi/deeploi.git
|
|
9
|
+
Project-URL: Documentation, https://github.com/deeploi/deeploi#readme
|
|
10
|
+
Keywords: machine-learning,model-deployment,api,sklearn,xgboost
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
21
|
+
Requires-Python: >=3.8
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
License-File: LICENSE
|
|
24
|
+
Requires-Dist: pandas>=1.0.0
|
|
25
|
+
Requires-Dist: fastapi>=0.68.0
|
|
26
|
+
Requires-Dist: uvicorn>=0.15.0
|
|
27
|
+
Requires-Dist: pydantic>=1.8.0
|
|
28
|
+
Requires-Dist: joblib>=1.0.0
|
|
29
|
+
Requires-Dist: scikit-learn>=0.24.0
|
|
30
|
+
Requires-Dist: xgboost>=1.0.0
|
|
31
|
+
Provides-Extra: dev
|
|
32
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
33
|
+
Requires-Dist: pytest-cov>=3.0.0; extra == "dev"
|
|
34
|
+
Requires-Dist: black>=22.0.0; extra == "dev"
|
|
35
|
+
Requires-Dist: isort>=5.10.0; extra == "dev"
|
|
36
|
+
Requires-Dist: flake8>=4.0.0; extra == "dev"
|
|
37
|
+
Requires-Dist: mypy>=0.950; extra == "dev"
|
|
38
|
+
Requires-Dist: requests>=2.27.0; extra == "dev"
|
|
39
|
+
Requires-Dist: httpx>=0.23.0; extra == "dev"
|
|
40
|
+
Dynamic: license-file
|
|
41
|
+
|
|
42
|
+
# Deeploi
|
|
43
|
+
|
|
44
|
+
One-line deployment for trained tabular ML models.
|
|
45
|
+
|
|
46
|
+
**Deeploi** = instant API for sklearn and XGBoost → no config, no boilerplate, no DevOps required.
|
|
47
|
+
|
|
48
|
+
```python
|
|
49
|
+
from deeploi import deploy
|
|
50
|
+
|
|
51
|
+
deploy(model, sample=X_train)
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
That's it. Your model is now serving predictions at `http://127.0.0.1:8000`.
|
|
55
|
+
|
|
56
|
+
## Features
|
|
57
|
+
|
|
58
|
+
✅ **Instant API**
|
|
59
|
+
One command launches a FastAPI server with `/predict`, `/health`, `/meta` endpoints.
|
|
60
|
+
|
|
61
|
+
✅ **Schema Inference**
|
|
62
|
+
Learns feature names, dtypes, order from your training sample.
|
|
63
|
+
|
|
64
|
+
✅ **Artifact Packaging**
|
|
65
|
+
Save, version, and reload models with metadata.
|
|
66
|
+
|
|
67
|
+
✅ **Prediction Probabilities**
|
|
68
|
+
Classification models get `/predict_proba` endpoint automatic.
|
|
69
|
+
|
|
70
|
+
✅ **sklearn & XGBoost**
|
|
71
|
+
Classifiers and regressors, both frameworks.
|
|
72
|
+
|
|
73
|
+
✅ **Local-First**
|
|
74
|
+
Run anywhere — no cloud, no containers, no registry.
|
|
75
|
+
|
|
76
|
+
## Quick Start
|
|
77
|
+
|
|
78
|
+
### Installation
|
|
79
|
+
|
|
80
|
+
```bash
|
|
81
|
+
pip install deeploi
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
### The One-Liner
|
|
85
|
+
|
|
86
|
+
```python
|
|
87
|
+
from sklearn.datasets import load_iris
|
|
88
|
+
from sklearn.ensemble import RandomForestClassifier
|
|
89
|
+
from deeploi import deploy
|
|
90
|
+
|
|
91
|
+
iris = load_iris(as_frame=True)
|
|
92
|
+
X, y = iris.data, iris.target
|
|
93
|
+
|
|
94
|
+
model = RandomForestClassifier(n_estimators=10).fit(X, y)
|
|
95
|
+
|
|
96
|
+
deploy(model, sample=X)
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
Server starts at `http://127.0.0.1:8000`.
|
|
100
|
+
|
|
101
|
+
### Test It
|
|
102
|
+
|
|
103
|
+
```bash
|
|
104
|
+
curl -X POST http://127.0.0.1:8000/predict \
|
|
105
|
+
-H "Content-Type: application/json" \
|
|
106
|
+
-d '{
|
|
107
|
+
"records": [
|
|
108
|
+
{
|
|
109
|
+
"sepal length (cm)": 5.1,
|
|
110
|
+
"sepal width (cm)": 3.5,
|
|
111
|
+
"petal length (cm)": 1.4,
|
|
112
|
+
"petal width (cm)": 0.2
|
|
113
|
+
}
|
|
114
|
+
]
|
|
115
|
+
}'
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
Response:
|
|
119
|
+
```json
|
|
120
|
+
{
|
|
121
|
+
"predictions": [0],
|
|
122
|
+
"probabilities": [
|
|
123
|
+
{"0": 0.91, "1": 0.09}
|
|
124
|
+
]
|
|
125
|
+
}
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
## Core API
|
|
129
|
+
|
|
130
|
+
### Three Functions
|
|
131
|
+
|
|
132
|
+
#### 1. `deploy()` — One-liner, immediate serving
|
|
133
|
+
|
|
134
|
+
```python
|
|
135
|
+
from deeploi import deploy
|
|
136
|
+
|
|
137
|
+
deploy(model, sample=X_train, host="127.0.0.1", port=8000)
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
Infers schema → packages model → starts server (blocking).
|
|
141
|
+
|
|
142
|
+
#### 2. `package()` — Reusable object
|
|
143
|
+
|
|
144
|
+
```python
|
|
145
|
+
from deeploi import package
|
|
146
|
+
|
|
147
|
+
pkg = package(model, sample=X_train)
|
|
148
|
+
|
|
149
|
+
# Use it
|
|
150
|
+
preds = pkg.predict(X_test)
|
|
151
|
+
|
|
152
|
+
# or save it
|
|
153
|
+
pkg.save("artifacts/iris_rf")
|
|
154
|
+
|
|
155
|
+
# or serve it later
|
|
156
|
+
pkg.serve(port=8000)
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
#### 3. `load()` — Reload saved artifacts
|
|
160
|
+
|
|
161
|
+
```python
|
|
162
|
+
from deeploi import load
|
|
163
|
+
|
|
164
|
+
pkg = load("artifacts/iris_rf")
|
|
165
|
+
preds = pkg.predict(X_test)
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
## Endpoints
|
|
169
|
+
|
|
170
|
+
### `GET /health`
|
|
171
|
+
|
|
172
|
+
```bash
|
|
173
|
+
curl http://127.0.0.1:8000/health
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
```json
|
|
177
|
+
{
|
|
178
|
+
"status": "ok",
|
|
179
|
+
"version": "0.1.0"
|
|
180
|
+
}
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
### `GET /meta`
|
|
184
|
+
|
|
185
|
+
```bash
|
|
186
|
+
curl http://127.0.0.1:8000/meta
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
```json
|
|
190
|
+
{
|
|
191
|
+
"framework": "sklearn",
|
|
192
|
+
"estimator_class": "RandomForestClassifier",
|
|
193
|
+
"task_type": "classification",
|
|
194
|
+
"supports_predict_proba": true,
|
|
195
|
+
"python_version": "3.11.0",
|
|
196
|
+
"deeploi_version": "0.1.0",
|
|
197
|
+
"created_at": "2026-03-17T12:00:00Z"
|
|
198
|
+
}
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
### `POST /predict`
|
|
202
|
+
|
|
203
|
+
```bash
|
|
204
|
+
curl -X POST http://127.0.0.1:8000/predict \
|
|
205
|
+
-H "Content-Type: application/json" \
|
|
206
|
+
-d '{"records": [{"col_1": 1.0, "col_2": 2.0}]}'
|
|
207
|
+
```
|
|
208
|
+
|
|
209
|
+
**Regression response:**
|
|
210
|
+
```json
|
|
211
|
+
{
|
|
212
|
+
"predictions": [123.45, 118.91],
|
|
213
|
+
"probabilities": null
|
|
214
|
+
}
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
**Classification response:**
|
|
218
|
+
```json
|
|
219
|
+
{
|
|
220
|
+
"predictions": [0, 1],
|
|
221
|
+
"probabilities": [
|
|
222
|
+
{"0": 0.91, "1": 0.09},
|
|
223
|
+
{"0": 0.12, "1": 0.88}
|
|
224
|
+
]
|
|
225
|
+
}
|
|
226
|
+
```
|
|
227
|
+
|
|
228
|
+
### `POST /predict_proba`
|
|
229
|
+
|
|
230
|
+
Classification models only. Same as `/predict` with probabilities.
|
|
231
|
+
|
|
232
|
+
```bash
|
|
233
|
+
curl -X POST http://127.0.0.1:8000/predict_proba \
|
|
234
|
+
-H "Content-Type: application/json" \
|
|
235
|
+
-d '{"records": [{"col_1": 1.0, "col_2": 2.0}]}'
|
|
236
|
+
```
|
|
237
|
+
|
|
238
|
+
## Artifact Structure
|
|
239
|
+
|
|
240
|
+
When you call `pkg.save("path/to/artifact")`, you get:
|
|
241
|
+
|
|
242
|
+
```
|
|
243
|
+
path/to/artifact/
|
|
244
|
+
├── model.joblib # Serialized model
|
|
245
|
+
├── metadata.json # Versions, task type, timestamps
|
|
246
|
+
├── schema.json # Features, dtypes, column order
|
|
247
|
+
├── deeploi.json # Manifest
|
|
248
|
+
└── requirements.txt # Dependencies
|
|
249
|
+
```
|
|
250
|
+
|
|
251
|
+
**metadata.json:**
|
|
252
|
+
```json
|
|
253
|
+
{
|
|
254
|
+
"framework": "sklearn",
|
|
255
|
+
"estimator_class": "RandomForestClassifier",
|
|
256
|
+
"task_type": "classification",
|
|
257
|
+
"supports_predict_proba": true,
|
|
258
|
+
"created_at": "2026-03-17T12:00:00Z",
|
|
259
|
+
"python_version": "3.11.0",
|
|
260
|
+
"deeploi_version": "0.1.0",
|
|
261
|
+
"library_versions": {
|
|
262
|
+
"sklearn": "1.5.0",
|
|
263
|
+
"xgboost": "2.0.0",
|
|
264
|
+
"pandas": "2.0.0"
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
```
|
|
268
|
+
|
|
269
|
+
**schema.json:**
|
|
270
|
+
```json
|
|
271
|
+
{
|
|
272
|
+
"features": [
|
|
273
|
+
{"name": "sepal length (cm)", "dtype": "float64", "nullable": false},
|
|
274
|
+
{"name": "sepal width (cm)", "dtype": "float64", "nullable": false},
|
|
275
|
+
{"name": "petal length (cm)", "dtype": "float64", "nullable": false},
|
|
276
|
+
{"name": "petal width (cm)", "dtype": "float64", "nullable": false}
|
|
277
|
+
],
|
|
278
|
+
"column_order": [
|
|
279
|
+
"sepal length (cm)",
|
|
280
|
+
"sepal width (cm)",
|
|
281
|
+
"petal length (cm)",
|
|
282
|
+
"petal width (cm)"
|
|
283
|
+
]
|
|
284
|
+
}
|
|
285
|
+
```
|
|
286
|
+
|
|
287
|
+
## Examples
|
|
288
|
+
|
|
289
|
+
### Scikit-learn Regressor
|
|
290
|
+
|
|
291
|
+
```python
|
|
292
|
+
from sklearn.datasets import load_diabetes
|
|
293
|
+
from sklearn.ensemble import RandomForestRegressor
|
|
294
|
+
from deeploi import package
|
|
295
|
+
|
|
296
|
+
X, y = load_diabetes(return_X_y=True, as_frame=True)
|
|
297
|
+
model = RandomForestRegressor().fit(X, y)
|
|
298
|
+
|
|
299
|
+
pkg = package(model, X)
|
|
300
|
+
preds = pkg.predict(X[:5])
|
|
301
|
+
print(preds.to_json())
|
|
302
|
+
```
|
|
303
|
+
|
|
304
|
+
### XGBoost Classifier
|
|
305
|
+
|
|
306
|
+
```python
|
|
307
|
+
import pandas as pd
|
|
308
|
+
import xgboost as xgb
|
|
309
|
+
from deeploi import deploy
|
|
310
|
+
|
|
311
|
+
df = pd.read_csv("data.csv")
|
|
312
|
+
X = df.drop("target", axis=1)
|
|
313
|
+
y = df["target"]
|
|
314
|
+
|
|
315
|
+
model = xgb.XGBClassifier().fit(X, y)
|
|
316
|
+
|
|
317
|
+
deploy(model, sample=X, port=9000)
|
|
318
|
+
```
|
|
319
|
+
|
|
320
|
+
### Save & Load
|
|
321
|
+
|
|
322
|
+
```python
|
|
323
|
+
from deeploi import package, load
|
|
324
|
+
|
|
325
|
+
pkg = package(model, X_train)
|
|
326
|
+
pkg.save("artifacts/v1")
|
|
327
|
+
|
|
328
|
+
# Later...
|
|
329
|
+
pkg = load("artifacts/v1")
|
|
330
|
+
preds = pkg.predict(X_test)
|
|
331
|
+
```
|
|
332
|
+
|
|
333
|
+
## Error Handling
|
|
334
|
+
|
|
335
|
+
```python
|
|
336
|
+
from deeploi import package, UnsupportedModelError, InvalidSampleError
|
|
337
|
+
|
|
338
|
+
try:
|
|
339
|
+
pkg = package(my_model, my_sample)
|
|
340
|
+
except UnsupportedModelError:
|
|
341
|
+
print("Only sklearn and XGBoost are supported")
|
|
342
|
+
except InvalidSampleError:
|
|
343
|
+
print("Sample must be a non-empty DataFrame")
|
|
344
|
+
```
|
|
345
|
+
|
|
346
|
+
## What's in v0.1.0
|
|
347
|
+
|
|
348
|
+
**Supported:**
|
|
349
|
+
- sklearn classifiers and regressors
|
|
350
|
+
- XGBoost classifiers and regressors
|
|
351
|
+
- pandas DataFrame inputs
|
|
352
|
+
- Local FastAPI serving
|
|
353
|
+
- Model save/load
|
|
354
|
+
- Schema inference
|
|
355
|
+
- Prediction probabilities (classifiers)
|
|
356
|
+
|
|
357
|
+
**Not in v0.1.0:**
|
|
358
|
+
- S3 / cloud storage
|
|
359
|
+
- Docker generation
|
|
360
|
+
- Authentication
|
|
361
|
+
- Batch inference
|
|
362
|
+
- Async workers
|
|
363
|
+
- Model registry
|
|
364
|
+
- Monitoring
|
|
365
|
+
|
|
366
|
+
## Requirements
|
|
367
|
+
|
|
368
|
+
- Python 3.8+
|
|
369
|
+
- pandas >= 1.0
|
|
370
|
+
- scikit-learn >= 0.24
|
|
371
|
+
- xgboost >= 1.0
|
|
372
|
+
- fastapi >= 0.68
|
|
373
|
+
- uvicorn >= 0.15
|
|
374
|
+
|
|
375
|
+
## License
|
|
376
|
+
|
|
377
|
+
MIT. See [LICENSE](./LICENSE).
|
|
378
|
+
|
|
379
|
+
---
|
|
380
|
+
|
|
381
|
+
**Next Steps:**
|
|
382
|
+
|
|
383
|
+
1. Try the [Iris example](examples/sklearn_classifier.py)
|
|
384
|
+
2. Package your own model
|
|
385
|
+
3. Deploy locally
|
|
386
|
+
4. Hit `/predict`
|
|
387
|
+
|
|
388
|
+
Questions? Open an issue on [GitHub](https://github.com/deeploi/deeploi).
|
deeploi-0.1.0/README.md
ADDED
|
@@ -0,0 +1,347 @@
|
|
|
1
|
+
# Deeploi
|
|
2
|
+
|
|
3
|
+
One-line deployment for trained tabular ML models.
|
|
4
|
+
|
|
5
|
+
**Deeploi** = instant API for sklearn and XGBoost → no config, no boilerplate, no DevOps required.
|
|
6
|
+
|
|
7
|
+
```python
|
|
8
|
+
from deeploi import deploy
|
|
9
|
+
|
|
10
|
+
deploy(model, sample=X_train)
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
That's it. Your model is now serving predictions at `http://127.0.0.1:8000`.
|
|
14
|
+
|
|
15
|
+
## Features
|
|
16
|
+
|
|
17
|
+
✅ **Instant API**
|
|
18
|
+
One command launches a FastAPI server with `/predict`, `/health`, `/meta` endpoints.
|
|
19
|
+
|
|
20
|
+
✅ **Schema Inference**
|
|
21
|
+
Learns feature names, dtypes, order from your training sample.
|
|
22
|
+
|
|
23
|
+
✅ **Artifact Packaging**
|
|
24
|
+
Save, version, and reload models with metadata.
|
|
25
|
+
|
|
26
|
+
✅ **Prediction Probabilities**
|
|
27
|
+
Classification models get `/predict_proba` endpoint automatic.
|
|
28
|
+
|
|
29
|
+
✅ **sklearn & XGBoost**
|
|
30
|
+
Classifiers and regressors, both frameworks.
|
|
31
|
+
|
|
32
|
+
✅ **Local-First**
|
|
33
|
+
Run anywhere — no cloud, no containers, no registry.
|
|
34
|
+
|
|
35
|
+
## Quick Start
|
|
36
|
+
|
|
37
|
+
### Installation
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
pip install deeploi
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
### The One-Liner
|
|
44
|
+
|
|
45
|
+
```python
|
|
46
|
+
from sklearn.datasets import load_iris
|
|
47
|
+
from sklearn.ensemble import RandomForestClassifier
|
|
48
|
+
from deeploi import deploy
|
|
49
|
+
|
|
50
|
+
iris = load_iris(as_frame=True)
|
|
51
|
+
X, y = iris.data, iris.target
|
|
52
|
+
|
|
53
|
+
model = RandomForestClassifier(n_estimators=10).fit(X, y)
|
|
54
|
+
|
|
55
|
+
deploy(model, sample=X)
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
Server starts at `http://127.0.0.1:8000`.
|
|
59
|
+
|
|
60
|
+
### Test It
|
|
61
|
+
|
|
62
|
+
```bash
|
|
63
|
+
curl -X POST http://127.0.0.1:8000/predict \
|
|
64
|
+
-H "Content-Type: application/json" \
|
|
65
|
+
-d '{
|
|
66
|
+
"records": [
|
|
67
|
+
{
|
|
68
|
+
"sepal length (cm)": 5.1,
|
|
69
|
+
"sepal width (cm)": 3.5,
|
|
70
|
+
"petal length (cm)": 1.4,
|
|
71
|
+
"petal width (cm)": 0.2
|
|
72
|
+
}
|
|
73
|
+
]
|
|
74
|
+
}'
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
Response:
|
|
78
|
+
```json
|
|
79
|
+
{
|
|
80
|
+
"predictions": [0],
|
|
81
|
+
"probabilities": [
|
|
82
|
+
{"0": 0.91, "1": 0.09}
|
|
83
|
+
]
|
|
84
|
+
}
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
## Core API
|
|
88
|
+
|
|
89
|
+
### Three Functions
|
|
90
|
+
|
|
91
|
+
#### 1. `deploy()` — One-liner, immediate serving
|
|
92
|
+
|
|
93
|
+
```python
|
|
94
|
+
from deeploi import deploy
|
|
95
|
+
|
|
96
|
+
deploy(model, sample=X_train, host="127.0.0.1", port=8000)
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
Infers schema → packages model → starts server (blocking).
|
|
100
|
+
|
|
101
|
+
#### 2. `package()` — Reusable object
|
|
102
|
+
|
|
103
|
+
```python
|
|
104
|
+
from deeploi import package
|
|
105
|
+
|
|
106
|
+
pkg = package(model, sample=X_train)
|
|
107
|
+
|
|
108
|
+
# Use it
|
|
109
|
+
preds = pkg.predict(X_test)
|
|
110
|
+
|
|
111
|
+
# or save it
|
|
112
|
+
pkg.save("artifacts/iris_rf")
|
|
113
|
+
|
|
114
|
+
# or serve it later
|
|
115
|
+
pkg.serve(port=8000)
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
#### 3. `load()` — Reload saved artifacts
|
|
119
|
+
|
|
120
|
+
```python
|
|
121
|
+
from deeploi import load
|
|
122
|
+
|
|
123
|
+
pkg = load("artifacts/iris_rf")
|
|
124
|
+
preds = pkg.predict(X_test)
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
## Endpoints
|
|
128
|
+
|
|
129
|
+
### `GET /health`
|
|
130
|
+
|
|
131
|
+
```bash
|
|
132
|
+
curl http://127.0.0.1:8000/health
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
```json
|
|
136
|
+
{
|
|
137
|
+
"status": "ok",
|
|
138
|
+
"version": "0.1.0"
|
|
139
|
+
}
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
### `GET /meta`
|
|
143
|
+
|
|
144
|
+
```bash
|
|
145
|
+
curl http://127.0.0.1:8000/meta
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
```json
|
|
149
|
+
{
|
|
150
|
+
"framework": "sklearn",
|
|
151
|
+
"estimator_class": "RandomForestClassifier",
|
|
152
|
+
"task_type": "classification",
|
|
153
|
+
"supports_predict_proba": true,
|
|
154
|
+
"python_version": "3.11.0",
|
|
155
|
+
"deeploi_version": "0.1.0",
|
|
156
|
+
"created_at": "2026-03-17T12:00:00Z"
|
|
157
|
+
}
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
### `POST /predict`
|
|
161
|
+
|
|
162
|
+
```bash
|
|
163
|
+
curl -X POST http://127.0.0.1:8000/predict \
|
|
164
|
+
-H "Content-Type: application/json" \
|
|
165
|
+
-d '{"records": [{"col_1": 1.0, "col_2": 2.0}]}'
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
**Regression response:**
|
|
169
|
+
```json
|
|
170
|
+
{
|
|
171
|
+
"predictions": [123.45, 118.91],
|
|
172
|
+
"probabilities": null
|
|
173
|
+
}
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
**Classification response:**
|
|
177
|
+
```json
|
|
178
|
+
{
|
|
179
|
+
"predictions": [0, 1],
|
|
180
|
+
"probabilities": [
|
|
181
|
+
{"0": 0.91, "1": 0.09},
|
|
182
|
+
{"0": 0.12, "1": 0.88}
|
|
183
|
+
]
|
|
184
|
+
}
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
### `POST /predict_proba`
|
|
188
|
+
|
|
189
|
+
Classification models only. Same as `/predict` with probabilities.
|
|
190
|
+
|
|
191
|
+
```bash
|
|
192
|
+
curl -X POST http://127.0.0.1:8000/predict_proba \
|
|
193
|
+
-H "Content-Type: application/json" \
|
|
194
|
+
-d '{"records": [{"col_1": 1.0, "col_2": 2.0}]}'
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
## Artifact Structure
|
|
198
|
+
|
|
199
|
+
When you call `pkg.save("path/to/artifact")`, you get:
|
|
200
|
+
|
|
201
|
+
```
|
|
202
|
+
path/to/artifact/
|
|
203
|
+
├── model.joblib # Serialized model
|
|
204
|
+
├── metadata.json # Versions, task type, timestamps
|
|
205
|
+
├── schema.json # Features, dtypes, column order
|
|
206
|
+
├── deeploi.json # Manifest
|
|
207
|
+
└── requirements.txt # Dependencies
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
**metadata.json:**
|
|
211
|
+
```json
|
|
212
|
+
{
|
|
213
|
+
"framework": "sklearn",
|
|
214
|
+
"estimator_class": "RandomForestClassifier",
|
|
215
|
+
"task_type": "classification",
|
|
216
|
+
"supports_predict_proba": true,
|
|
217
|
+
"created_at": "2026-03-17T12:00:00Z",
|
|
218
|
+
"python_version": "3.11.0",
|
|
219
|
+
"deeploi_version": "0.1.0",
|
|
220
|
+
"library_versions": {
|
|
221
|
+
"sklearn": "1.5.0",
|
|
222
|
+
"xgboost": "2.0.0",
|
|
223
|
+
"pandas": "2.0.0"
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
```
|
|
227
|
+
|
|
228
|
+
**schema.json:**
|
|
229
|
+
```json
|
|
230
|
+
{
|
|
231
|
+
"features": [
|
|
232
|
+
{"name": "sepal length (cm)", "dtype": "float64", "nullable": false},
|
|
233
|
+
{"name": "sepal width (cm)", "dtype": "float64", "nullable": false},
|
|
234
|
+
{"name": "petal length (cm)", "dtype": "float64", "nullable": false},
|
|
235
|
+
{"name": "petal width (cm)", "dtype": "float64", "nullable": false}
|
|
236
|
+
],
|
|
237
|
+
"column_order": [
|
|
238
|
+
"sepal length (cm)",
|
|
239
|
+
"sepal width (cm)",
|
|
240
|
+
"petal length (cm)",
|
|
241
|
+
"petal width (cm)"
|
|
242
|
+
]
|
|
243
|
+
}
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
## Examples
|
|
247
|
+
|
|
248
|
+
### Scikit-learn Regressor
|
|
249
|
+
|
|
250
|
+
```python
|
|
251
|
+
from sklearn.datasets import load_diabetes
|
|
252
|
+
from sklearn.ensemble import RandomForestRegressor
|
|
253
|
+
from deeploi import package
|
|
254
|
+
|
|
255
|
+
X, y = load_diabetes(return_X_y=True, as_frame=True)
|
|
256
|
+
model = RandomForestRegressor().fit(X, y)
|
|
257
|
+
|
|
258
|
+
pkg = package(model, X)
|
|
259
|
+
preds = pkg.predict(X[:5])
|
|
260
|
+
print(preds.to_json())
|
|
261
|
+
```
|
|
262
|
+
|
|
263
|
+
### XGBoost Classifier
|
|
264
|
+
|
|
265
|
+
```python
|
|
266
|
+
import pandas as pd
|
|
267
|
+
import xgboost as xgb
|
|
268
|
+
from deeploi import deploy
|
|
269
|
+
|
|
270
|
+
df = pd.read_csv("data.csv")
|
|
271
|
+
X = df.drop("target", axis=1)
|
|
272
|
+
y = df["target"]
|
|
273
|
+
|
|
274
|
+
model = xgb.XGBClassifier().fit(X, y)
|
|
275
|
+
|
|
276
|
+
deploy(model, sample=X, port=9000)
|
|
277
|
+
```
|
|
278
|
+
|
|
279
|
+
### Save & Load
|
|
280
|
+
|
|
281
|
+
```python
|
|
282
|
+
from deeploi import package, load
|
|
283
|
+
|
|
284
|
+
pkg = package(model, X_train)
|
|
285
|
+
pkg.save("artifacts/v1")
|
|
286
|
+
|
|
287
|
+
# Later...
|
|
288
|
+
pkg = load("artifacts/v1")
|
|
289
|
+
preds = pkg.predict(X_test)
|
|
290
|
+
```
|
|
291
|
+
|
|
292
|
+
## Error Handling
|
|
293
|
+
|
|
294
|
+
```python
|
|
295
|
+
from deeploi import package, UnsupportedModelError, InvalidSampleError
|
|
296
|
+
|
|
297
|
+
try:
|
|
298
|
+
pkg = package(my_model, my_sample)
|
|
299
|
+
except UnsupportedModelError:
|
|
300
|
+
print("Only sklearn and XGBoost are supported")
|
|
301
|
+
except InvalidSampleError:
|
|
302
|
+
print("Sample must be a non-empty DataFrame")
|
|
303
|
+
```
|
|
304
|
+
|
|
305
|
+
## What's in v0.1.0
|
|
306
|
+
|
|
307
|
+
**Supported:**
|
|
308
|
+
- sklearn classifiers and regressors
|
|
309
|
+
- XGBoost classifiers and regressors
|
|
310
|
+
- pandas DataFrame inputs
|
|
311
|
+
- Local FastAPI serving
|
|
312
|
+
- Model save/load
|
|
313
|
+
- Schema inference
|
|
314
|
+
- Prediction probabilities (classifiers)
|
|
315
|
+
|
|
316
|
+
**Not in v0.1.0:**
|
|
317
|
+
- S3 / cloud storage
|
|
318
|
+
- Docker generation
|
|
319
|
+
- Authentication
|
|
320
|
+
- Batch inference
|
|
321
|
+
- Async workers
|
|
322
|
+
- Model registry
|
|
323
|
+
- Monitoring
|
|
324
|
+
|
|
325
|
+
## Requirements
|
|
326
|
+
|
|
327
|
+
- Python 3.8+
|
|
328
|
+
- pandas >= 1.0
|
|
329
|
+
- scikit-learn >= 0.24
|
|
330
|
+
- xgboost >= 1.0
|
|
331
|
+
- fastapi >= 0.68
|
|
332
|
+
- uvicorn >= 0.15
|
|
333
|
+
|
|
334
|
+
## License
|
|
335
|
+
|
|
336
|
+
MIT. See [LICENSE](./LICENSE).
|
|
337
|
+
|
|
338
|
+
---
|
|
339
|
+
|
|
340
|
+
**Next Steps:**
|
|
341
|
+
|
|
342
|
+
1. Try the [Iris example](examples/sklearn_classifier.py)
|
|
343
|
+
2. Package your own model
|
|
344
|
+
3. Deploy locally
|
|
345
|
+
4. Hit `/predict`
|
|
346
|
+
|
|
347
|
+
Questions? Open an issue on [GitHub](https://github.com/deeploi/deeploi).
|