mlproxy-py 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mlproxy_py/__init__.py ADDED
@@ -0,0 +1 @@
1
+ __version__ = "0.1.1"
mlproxy_py/app.py ADDED
@@ -0,0 +1,115 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import logging
5
+ import time
6
+ from contextlib import asynccontextmanager
7
+ from typing import Dict, List
8
+
9
+ from fastapi import FastAPI, HTTPException, Request
10
+ from fastapi.responses import JSONResponse
11
+
12
+ from .batching import BatchQueue
13
+ from .metrics import BACKEND_LATENCY, REQ_COUNT, REQ_LATENCY, metrics_response
14
+ from .proxy import forward_json, close_client
15
+ from .router import ModelRouter
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ def create_app(router: ModelRouter, model_cfg: dict, metrics_enabled: bool, metrics_path: str) -> FastAPI:
21
+ batch_queues: Dict[str, BatchQueue] = {}
22
+ worker_tasks: List[asyncio.Task] = []
23
+
24
+ for model, cfg in model_cfg.items():
25
+ batching_cfg = cfg.batching
26
+ if batching_cfg.enabled:
27
+ batch_queues[model] = BatchQueue(
28
+ max_batch_size=batching_cfg.max_batch_size,
29
+ max_wait_ms=batching_cfg.max_wait_ms,
30
+ )
31
+
32
+ async def batch_worker(model: str):
33
+ q = batch_queues[model]
34
+ while True:
35
+ try:
36
+ batch = await q.collect_batch()
37
+ if not batch:
38
+ continue
39
+
40
+ backend = router.choose_backend(model)
41
+ if not backend:
42
+ for item in batch:
43
+ item.future.set_exception(RuntimeError("No healthy backend"))
44
+ continue
45
+
46
+ payloads = [item.payload for item in batch]
47
+
48
+ try:
49
+ result = await forward_json(backend, model, {"batch": payloads})
50
+ results = result.get("results") or []
51
+ for i, item in enumerate(batch):
52
+ item.future.set_result(results[i] if i < len(results) else result)
53
+ except asyncio.CancelledError:
54
+ for item in batch:
55
+ item.future.set_exception(asyncio.CancelledError())
56
+ raise
57
+ except Exception as e:
58
+ for item in batch:
59
+ item.future.set_exception(e)
60
+ except asyncio.CancelledError:
61
+ logger.info("Batch worker for %s stopped", model)
62
+ break
63
+ except Exception:
64
+ logger.exception("Batch worker for %s crashed", model)
65
+
66
+ @asynccontextmanager
67
+ async def lifespan(app: FastAPI):
68
+ for model in batch_queues:
69
+ task = asyncio.create_task(batch_worker(model))
70
+ worker_tasks.append(task)
71
+ yield
72
+ for task in worker_tasks:
73
+ task.cancel()
74
+ await asyncio.gather(*worker_tasks, return_exceptions=True)
75
+ worker_tasks.clear()
76
+ await close_client()
77
+
78
+ app = FastAPI(title="mlproxy-py", lifespan=lifespan)
79
+
80
+ @app.post("/infer/{model}")
81
+ async def infer(model: str, request: Request):
82
+ if model not in router.pools:
83
+ raise HTTPException(status_code=404, detail="Model not found")
84
+
85
+ payload = await request.json()
86
+
87
+ backend = router.choose_backend(model)
88
+ if not backend:
89
+ raise HTTPException(status_code=503, detail="No healthy backend")
90
+
91
+ start = time.perf_counter()
92
+ try:
93
+ if model in batch_queues and "batch" not in payload:
94
+ data = await batch_queues[model].add(payload)
95
+ else:
96
+ data = await forward_json(backend, model, payload)
97
+ except asyncio.CancelledError:
98
+ raise
99
+ except Exception as e:
100
+ raise HTTPException(status_code=502, detail=str(e))
101
+
102
+ latency = time.perf_counter() - start
103
+
104
+ REQ_COUNT.labels(model=model, backend=backend.url, status="200").inc()
105
+ REQ_LATENCY.labels(model=model, backend=backend.url).observe(latency)
106
+ BACKEND_LATENCY.labels(model=model, backend=backend.url).set(backend.last_latency_ms)
107
+
108
+ return JSONResponse(content=data)
109
+
110
+ if metrics_enabled:
111
+ @app.get(metrics_path)
112
+ async def metrics():
113
+ return metrics_response()
114
+
115
+ return app
mlproxy_py/backends.py ADDED
@@ -0,0 +1,17 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+ import time
5
+
6
+
7
+ @dataclass
8
+ class Backend:
9
+ url: str
10
+ healthy: bool = True
11
+ last_latency_ms: float = 9999.0
12
+ active_requests: int = 0
13
+ last_seen: float = field(default_factory=lambda: time.time())
14
+
15
+ def score(self) -> float:
16
+ # Lower score is better
17
+ return self.last_latency_ms + (self.active_requests * 5.0)
mlproxy_py/batching.py ADDED
@@ -0,0 +1,49 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import logging
5
+ from dataclasses import dataclass
6
+ from typing import Any, Dict, List
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ @dataclass
12
+ class BatchRequest:
13
+ payload: Dict[str, Any]
14
+ future: asyncio.Future
15
+
16
+
17
+ class BatchQueue:
18
+ def __init__(self, max_batch_size: int, max_wait_ms: int):
19
+ self.max_batch_size = max_batch_size
20
+ self.max_wait_ms = max_wait_ms
21
+ self._queue: asyncio.Queue[BatchRequest] = asyncio.Queue()
22
+
23
+ async def add(self, payload: Dict[str, Any]) -> Any:
24
+ loop = asyncio.get_running_loop()
25
+ fut = loop.create_future()
26
+ await self._queue.put(BatchRequest(payload=payload, future=fut))
27
+ return await fut
28
+
29
+ async def collect_batch(self) -> List[BatchRequest]:
30
+ batch: List[BatchRequest] = []
31
+ try:
32
+ first = await asyncio.wait_for(self._queue.get(), timeout=self.max_wait_ms / 1000.0)
33
+ batch.append(first)
34
+ except asyncio.TimeoutError:
35
+ return batch
36
+
37
+ start = asyncio.get_running_loop().time()
38
+ while len(batch) < self.max_batch_size:
39
+ remaining = (self.max_wait_ms / 1000.0) - (asyncio.get_running_loop().time() - start)
40
+ if remaining <= 0:
41
+ break
42
+ try:
43
+ item = await asyncio.wait_for(self._queue.get(), timeout=remaining)
44
+ batch.append(item)
45
+ except asyncio.TimeoutError:
46
+ break
47
+
48
+ logger.debug("Collected batch of %d items", len(batch))
49
+ return batch
mlproxy_py/cli.py ADDED
@@ -0,0 +1,96 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import logging
5
+ import signal
6
+
7
+ import typer
8
+ import uvicorn
9
+
10
+ from .app import create_app
11
+ from .config import load_config
12
+ from .healthcheck import close_healthcheck_client, loop as healthcheck_loop
13
+ from .router import ModelRouter
14
+
15
+ logger = logging.getLogger("mlproxy_py")
16
+
17
+ app = typer.Typer(help="mlproxy-py - QoS-aware ML inference reverse proxy")
18
+
19
+ _log_levels = {
20
+ "DEBUG": logging.DEBUG,
21
+ "INFO": logging.INFO,
22
+ "WARNING": logging.WARNING,
23
+ "ERROR": logging.ERROR,
24
+ }
25
+
26
+
27
+ @app.command()
28
+ def run(
29
+ config: str = typer.Option(..., "--config", "-c", help="Path to YAML config file"),
30
+ log_level: str = typer.Option("INFO", "--log-level", help="Log level: DEBUG, INFO, WARNING, ERROR"),
31
+ ):
32
+ logging.basicConfig(level=_log_levels.get(log_level.upper(), logging.INFO), format="%(levelname)s %(name)s: %(message)s")
33
+
34
+ cfg = load_config(config)
35
+
36
+ router = ModelRouter()
37
+ for model, pool_cfg in cfg.models.items():
38
+ router.register_model(model, [b.url for b in pool_cfg.backends])
39
+
40
+ host, port = "0.0.0.0", 7000
41
+ if ":" in cfg.listen:
42
+ host, port_str = cfg.listen.split(":", 1)
43
+ port = int(port_str)
44
+
45
+ app_instance = create_app(
46
+ router=router,
47
+ model_cfg=cfg.models,
48
+ metrics_enabled=cfg.metrics.enabled,
49
+ metrics_path=cfg.metrics.path,
50
+ )
51
+
52
+ loop = asyncio.get_event_loop()
53
+ hc_task: asyncio.Task | None = None
54
+ if cfg.healthcheck.enabled:
55
+ hc_task = loop.create_task(
56
+ healthcheck_loop(
57
+ router=router,
58
+ interval_seconds=cfg.healthcheck.interval_seconds,
59
+ timeout_seconds=cfg.healthcheck.timeout_seconds,
60
+ path=cfg.healthcheck.path,
61
+ )
62
+ )
63
+
64
+ stop_event = asyncio.Event()
65
+
66
+ def _signal_handler():
67
+ logger.info("Shutdown signal received...")
68
+ stop_event.set()
69
+
70
+ for sig in (signal.SIGINT, signal.SIGTERM):
71
+ try:
72
+ loop.add_signal_handler(sig, _signal_handler)
73
+ except NotImplementedError:
74
+ pass
75
+
76
+ config = uvicorn.Config(app_instance, host=host, port=port)
77
+ server = uvicorn.Server(config)
78
+
79
+ async def wait_for_shutdown():
80
+ await stop_event.wait()
81
+ if hc_task:
82
+ hc_task.cancel()
83
+ try:
84
+ await hc_task
85
+ except asyncio.CancelledError:
86
+ pass
87
+ await close_healthcheck_client()
88
+ server.should_exit = True
89
+
90
+ loop.create_task(wait_for_shutdown())
91
+
92
+ server.run()
93
+
94
+
95
+ if __name__ == "__main__":
96
+ app()
mlproxy_py/config.py ADDED
@@ -0,0 +1,47 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Dict, List
4
+
5
+ import yaml
6
+ from pydantic import BaseModel, Field
7
+
8
+
9
+ class BackendConfig(BaseModel):
10
+ url: str
11
+
12
+
13
+ class BatchingConfig(BaseModel):
14
+ enabled: bool = False
15
+ max_batch_size: int = 8
16
+ max_wait_ms: int = 20
17
+
18
+
19
+ class ModelPoolConfig(BaseModel):
20
+ sla_ms: int = 300
21
+ batching: BatchingConfig = Field(default_factory=BatchingConfig)
22
+ backends: List[BackendConfig] = Field(default_factory=list)
23
+
24
+
25
+ class MetricsConfig(BaseModel):
26
+ enabled: bool = True
27
+ path: str = "/metrics"
28
+
29
+
30
+ class HealthcheckConfig(BaseModel):
31
+ enabled: bool = True
32
+ interval_seconds: int = 5
33
+ timeout_seconds: int = 2
34
+ path: str = "/health"
35
+
36
+
37
+ class AppConfig(BaseModel):
38
+ listen: str = "0.0.0.0:7000"
39
+ models: Dict[str, ModelPoolConfig] = Field(default_factory=dict)
40
+ metrics: MetricsConfig = Field(default_factory=MetricsConfig)
41
+ healthcheck: HealthcheckConfig = Field(default_factory=HealthcheckConfig)
42
+
43
+
44
+ def load_config(path: str) -> AppConfig:
45
+ with open(path, "r", encoding="utf-8") as f:
46
+ raw = yaml.safe_load(f) or {}
47
+ return AppConfig.model_validate(raw)
@@ -0,0 +1,55 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import logging
5
+
6
+ import httpx
7
+
8
+ from .router import ModelRouter
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ _client: httpx.AsyncClient | None = None
13
+
14
+
15
+ def _get_client() -> httpx.AsyncClient:
16
+ global _client
17
+ if _client is None:
18
+ _client = httpx.AsyncClient()
19
+ return _client
20
+
21
+
22
+ async def close_healthcheck_client():
23
+ global _client
24
+ if _client is not None:
25
+ await _client.aclose()
26
+ _client = None
27
+
28
+
29
+ async def _check(url: str, path: str, timeout_seconds: int, client: httpx.AsyncClient) -> bool:
30
+ try:
31
+ r = await client.get(url.rstrip("/") + path, timeout=timeout_seconds)
32
+ return 200 <= r.status_code < 400
33
+ except asyncio.CancelledError:
34
+ raise
35
+ except Exception:
36
+ return False
37
+
38
+
39
+ async def loop(router: ModelRouter, interval_seconds: int, timeout_seconds: int, path: str):
40
+ client = _get_client()
41
+ while True:
42
+ try:
43
+ backends = router.all_backends()
44
+ checks = [_check(b.url, path, timeout_seconds, client) for b in backends]
45
+ results = await asyncio.gather(*checks, return_exceptions=True)
46
+ for backend, ok in zip(backends, results):
47
+ backend.healthy = bool(ok)
48
+ healthy_count = sum(1 for b in backends if b.healthy)
49
+ logger.info("Healthcheck: %d/%d backends healthy", healthy_count, len(backends))
50
+ except asyncio.CancelledError:
51
+ raise
52
+ except Exception:
53
+ logger.exception("Healthcheck loop error")
54
+
55
+ await asyncio.sleep(interval_seconds)
mlproxy_py/metrics.py ADDED
@@ -0,0 +1,27 @@
1
+ from __future__ import annotations
2
+
3
+ from prometheus_client import Counter, Histogram, Gauge, generate_latest, CONTENT_TYPE_LATEST
4
+ from starlette.responses import Response
5
+
6
+
7
+ REQ_COUNT = Counter(
8
+ "mlproxy_requests_total",
9
+ "Total inference requests",
10
+ ["model", "backend", "status"],
11
+ )
12
+
13
+ REQ_LATENCY = Histogram(
14
+ "mlproxy_request_latency_seconds",
15
+ "Inference request latency seconds",
16
+ ["model", "backend"],
17
+ )
18
+
19
+ BACKEND_LATENCY = Gauge(
20
+ "mlproxy_backend_latency_ms",
21
+ "Observed backend latency (ms)",
22
+ ["model", "backend"],
23
+ )
24
+
25
+
26
+ def metrics_response() -> Response:
27
+ return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST)
mlproxy_py/proxy.py ADDED
@@ -0,0 +1,50 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import logging
5
+ import time
6
+ from typing import Any, Dict
7
+
8
+ import httpx
9
+
10
+ from .backends import Backend
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ _client: httpx.AsyncClient | None = None
15
+
16
+
17
+ def _get_client() -> httpx.AsyncClient:
18
+ global _client
19
+ if _client is None:
20
+ _client = httpx.AsyncClient(timeout=60)
21
+ return _client
22
+
23
+
24
+ async def close_client():
25
+ global _client
26
+ if _client is not None:
27
+ await _client.aclose()
28
+ _client = None
29
+
30
+
31
+ async def forward_json(backend: Backend, model: str, payload: Dict[str, Any]) -> Dict[str, Any]:
32
+ url = backend.url.rstrip("/") + f"/infer/{model}"
33
+ client = _get_client()
34
+
35
+ backend.active_requests += 1
36
+ start = time.perf_counter()
37
+ try:
38
+ r = await client.post(url, json=payload)
39
+ r.raise_for_status()
40
+ data = r.json()
41
+ except asyncio.CancelledError:
42
+ raise
43
+ except Exception:
44
+ logger.exception("Failed to forward request to %s for model %s", backend.url, model)
45
+ raise
46
+ finally:
47
+ backend.active_requests -= 1
48
+
49
+ backend.last_latency_ms = (time.perf_counter() - start) * 1000.0
50
+ return data
mlproxy_py/router.py ADDED
@@ -0,0 +1,25 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Dict, List, Optional
4
+ from .backends import Backend
5
+
6
+
7
+ class ModelRouter:
8
+ def __init__(self):
9
+ self.pools: Dict[str, List[Backend]] = {}
10
+
11
+ def register_model(self, model: str, backend_urls: List[str]):
12
+ self.pools[model] = [Backend(url=u) for u in backend_urls]
13
+
14
+ def choose_backend(self, model: str) -> Optional[Backend]:
15
+ pool = self.pools.get(model) or []
16
+ healthy = [b for b in pool if b.healthy]
17
+ if not healthy:
18
+ return None
19
+ return min(healthy, key=lambda b: b.score())
20
+
21
+ def all_backends(self) -> List[Backend]:
22
+ out = []
23
+ for pool in self.pools.values():
24
+ out.extend(pool)
25
+ return out
@@ -0,0 +1,131 @@
1
+ Metadata-Version: 2.4
2
+ Name: mlproxy-py
3
+ Version: 0.1.1
4
+ Summary: SLA/QoS-aware reverse proxy for ML inference workloads (batching, routing, latency metrics).
5
+ Author: Kubenew
6
+ License: MIT
7
+ License-File: LICENSE
8
+ Keywords: asyncio,batching,inference,llm,ml,qos,reverse-proxy
9
+ Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Framework :: AsyncIO
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Science/Research
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3.10
15
+ Classifier: Programming Language :: Python :: 3.11
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Classifier: Topic :: Internet :: WWW/HTTP :: HTTP Servers
18
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
19
+ Requires-Python: >=3.10
20
+ Requires-Dist: fastapi>=0.111.0
21
+ Requires-Dist: httpx>=0.27.0
22
+ Requires-Dist: prometheus-client>=0.20.0
23
+ Requires-Dist: pydantic>=2.7.0
24
+ Requires-Dist: pyyaml>=6.0.1
25
+ Requires-Dist: typer>=0.12.3
26
+ Requires-Dist: uvicorn>=0.30.0
27
+ Provides-Extra: dev
28
+ Requires-Dist: httpx>=0.27.0; extra == 'dev'
29
+ Requires-Dist: pytest-asyncio>=0.21; extra == 'dev'
30
+ Requires-Dist: pytest>=7.0; extra == 'dev'
31
+ Requires-Dist: ruff>=0.1; extra == 'dev'
32
+ Provides-Extra: test
33
+ Requires-Dist: httpx>=0.27.0; extra == 'test'
34
+ Requires-Dist: pytest-asyncio>=0.21; extra == 'test'
35
+ Requires-Dist: pytest>=7.0; extra == 'test'
36
+ Description-Content-Type: text/markdown
37
+
38
+ # mlproxy-py
39
+
40
+ [![PyPI](https://img.shields.io/pypi/v/mlproxy-py)](https://pypi.org/project/mlproxy-py/)
41
+ [![Python Versions](https://img.shields.io/pypi/pyversions/mlproxy-py)](https://pypi.org/project/mlproxy-py/)
42
+ [![License](https://img.shields.io/pypi/l/mlproxy-py)](https://github.com/Kubenew/mlproxy-py/blob/main/LICENSE)
43
+ [![GitHub stars](https://img.shields.io/github/stars/Kubenew/mlproxy-py?style=flat&logo=github)](https://github.com/Kubenew/mlproxy-py)
44
+ [![Downloads](https://img.shields.io/pepy.tech/dt/mlproxy-py)](https://pepy.tech/project/mlproxy-py)
45
+
46
+ **mlproxy-py** is a minimal ML inference reverse proxy with QoS-aware routing.
47
+
48
+ Designed for LLM / ML inference workloads where routing decisions should be based on latency, SLA targets, backend health, queue depth, and batching potential.
49
+
50
+ ## Features
51
+
52
+ - Reverse proxy for JSON inference requests
53
+ - Backends grouped into model pools
54
+ - SLA-aware routing (choose lowest latency backend)
55
+ - Optional micro-batching (collect requests for N ms)
56
+ - Concurrent health checks with connection pooling
57
+ - Prometheus metrics (request count, latency, backend latency)
58
+
59
+ ## Quickstart
60
+
61
+ ### Install
62
+
63
+ ```bash
64
+ pip install mlproxy-py
65
+ ```
66
+
67
+ ### Run proxy
68
+
69
+ ```bash
70
+ mlproxy run -c examples/config.yml
71
+ ```
72
+
73
+ ### Send request
74
+
75
+ ```bash
76
+ curl -X POST http://localhost:7000/infer/modelA \
77
+ -H "Content-Type: application/json" \
78
+ -d '{"text":"hello"}'
79
+ ```
80
+
81
+ ## Architecture
82
+
83
+ ```
84
+ Client ──POST /infer/{model}──► FastAPI
85
+
86
+ ┌─────────▼──────────┐
87
+ │ ModelRouter │
88
+ │ choose_backend() │
89
+ │ (score = latency │
90
+ │ + active_req*5) │
91
+ └─────────┬──────────┘
92
+ │ backend URL
93
+ ┌─────────▼──────────┐
94
+ │ forward_json() │
95
+ │ (httpx conn pool) │
96
+ └─────────┬──────────┘
97
+
98
+ Backend ML server
99
+
100
+ ┌──────────────────┐ ┌──────────────────┐
101
+ │ BatchQueue │ │ Healthcheck │
102
+ │ (optional per │ │ (concurrent, │
103
+ │ model pool) │ │ per-backend) │
104
+ └──────────────────┘ └──────────────────┘
105
+ ```
106
+
107
+ ## Config
108
+
109
+ See `examples/config.yml`.
110
+
111
+ ## Changelog
112
+
113
+ ### 0.1.1
114
+
115
+ - **Lifespan pattern**: Migrated from deprecated `@app.on_event("startup")` to FastAPI `lifespan` context manager.
116
+ - **Graceful shutdown**: Batch workers and healthcheck loop are properly cancelled on shutdown.
117
+ - **Connection pooling**: Shared `httpx.AsyncClient` singletons for proxy and healthcheck (was creating a client per request/check).
118
+ - **Concurrent health checks**: Backends checked in parallel via `asyncio.gather` (was sequential).
119
+ - **Logging**: Added structured `logging` throughout; `--log-level` CLI option.
120
+ - **Bare except fixes**: All `except Exception` blocks re-raise `asyncio.CancelledError`.
121
+ - **Deprecated API fixes**: Replaced `asyncio.get_event_loop()` with `asyncio.get_running_loop()` in batching module.
122
+ - **Build system**: Migrated from `setuptools` to `hatchling`. Added classifiers, keywords, optional dev/test deps, ruff/pytest config.
123
+ - **Tests**: Expanded from 1 test to 15+ tests covering config, router, batching, proxy, healthcheck, and backends.
124
+
125
+ ### 0.1.0
126
+
127
+ - Initial release: JSON inference proxy, model pools, SLA-aware routing, micro-batching, health checks, Prometheus metrics.
128
+
129
+ ## License
130
+
131
+ MIT
@@ -0,0 +1,15 @@
1
+ mlproxy_py/__init__.py,sha256=rnObPjuBcEStqSO0S6gsdS_ot8ITOQjVj_-P1LUUYpg,22
2
+ mlproxy_py/app.py,sha256=87Tf2wdWftSzPBFKmE6Z830DG9LuDF_1e5GPZ4XGyFE,4074
3
+ mlproxy_py/backends.py,sha256=W4SccgOYpjaHHHjA3BENSpqvqhJA3KsPNkSlpcWVtYQ,433
4
+ mlproxy_py/batching.py,sha256=eS-5UOekkS3KJO6aSU4bcHp1IhuC7ZAK5zFt370SQeU,1564
5
+ mlproxy_py/cli.py,sha256=S7II83cmqQpt4a5oymSYFYragKMlglwIY5HQDNHkKeI,2591
6
+ mlproxy_py/config.py,sha256=QdHOGGoaHwinHhexVZD0q8fSebsZZLNDHufS1R1pCbI,1151
7
+ mlproxy_py/healthcheck.py,sha256=KsYQgX7CQjBZ5t9OH41Cg-asQq5q-9divCD0x-gAEc8,1595
8
+ mlproxy_py/metrics.py,sha256=-R_KAKm2bY6hneDw1tWQ3Y-2CdWJ4fjM4uJQtsOeMdA,697
9
+ mlproxy_py/proxy.py,sha256=9sev4XvTdkNcrdyVDCTDtLjakTy6PCA-17rMoW-Kshg,1184
10
+ mlproxy_py/router.py,sha256=G2pXMmcJ8uSxuFxdylY6sgyLvQljOZ5VKYr8rwJZLVo,770
11
+ mlproxy_py-0.1.1.dist-info/METADATA,sha256=cOPh380QHzWFA2L-joqIZNZNDDQ73IBaxdP6kwaAU8E,5406
12
+ mlproxy_py-0.1.1.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
13
+ mlproxy_py-0.1.1.dist-info/entry_points.txt,sha256=AqwSTT4aMX5pviguPmAZcQyBTNBjysYbJJ6fIcGNlSE,47
14
+ mlproxy_py-0.1.1.dist-info/licenses/LICENSE,sha256=3J2rBqh6X_suoDNzlioYNHzH04qbE-xgJsZEgFsYuds,1083
15
+ mlproxy_py-0.1.1.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.29.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ mlproxy = mlproxy_py.cli:app
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Felix
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.