mlx-omni-server 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mlx_omni_server-0.1.0/PKG-INFO +153 -0
- mlx_omni_server-0.1.0/README.md +120 -0
- mlx_omni_server-0.1.0/pyproject.toml +45 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/__init__.py +0 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/api/__init__.py +0 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/api/endpoints/__init__.py +0 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/api/endpoints/chat.py +44 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/api/endpoints/images.py +29 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/api/endpoints/models.py +65 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/api/endpoints/stt.py +40 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/api/endpoints/tts.py +48 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/api/routers.py +10 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/main.py +23 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/middleware/logging.py +113 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/schemas/__init__.py +0 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/schemas/chat_schema.py +133 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/schemas/images_schema.py +79 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/schemas/models_schema.py +32 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/schemas/stt_schema.py +137 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/schemas/tools_schema.py +69 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/schemas/tts_schema.py +45 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/__init__.py +0 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/chat/base_models.py +39 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/chat/mlx_model.py +237 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/chat/models.py +41 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/chat/tools/chat_tokenizer.py +52 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/chat/tools/hugging_face.py +84 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/chat/tools/llama3.py +62 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/chat/tools/mistral.py +101 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/chat/tools/qwen2.py +82 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/chat_service.py +28 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/images_service.py +170 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/models_service.py +204 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/stt_service.py +146 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/services/tts_service.py +51 -0
- mlx_omni_server-0.1.0/src/mlx_omni_server/utils/logger.py +30 -0
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: mlx-omni-server
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary:
|
|
5
|
+
Home-page: https://github.com/madroidmaq/mlx-omni-server
|
|
6
|
+
License: MIT
|
|
7
|
+
Keywords: mlx,ai,agi,aigc,server,openai,tts,stt
|
|
8
|
+
Author: madroid
|
|
9
|
+
Author-email: madroidmaq@gmail.com
|
|
10
|
+
Requires-Python: >=3.11,<4.0
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Operating System :: OS Independent
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Requires-Dist: diffusionkit (>=0.5.1,<0.6.0)
|
|
20
|
+
Requires-Dist: f5-tts-mlx (>=0.1.7,<0.2.0)
|
|
21
|
+
Requires-Dist: fastapi (>=0.115.4,<0.116.0)
|
|
22
|
+
Requires-Dist: huggingface-hub (>=0.26.2,<0.27.0)
|
|
23
|
+
Requires-Dist: mlx-lm (>=0.20.0,<0.21.0)
|
|
24
|
+
Requires-Dist: mlx-whisper (>=0.4.1,<0.5.0)
|
|
25
|
+
Requires-Dist: numba (>=0.57.0)
|
|
26
|
+
Requires-Dist: pydantic (>=2.9.2,<3.0.0)
|
|
27
|
+
Requires-Dist: python-multipart (>=0.0.17,<0.0.18)
|
|
28
|
+
Requires-Dist: sse-starlette (>=2.1.3,<3.0.0)
|
|
29
|
+
Requires-Dist: uvicorn (>=0.32.0,<0.33.0)
|
|
30
|
+
Project-URL: Repository, https://github.com/madroidmaq/mlx-omni-server
|
|
31
|
+
Description-Content-Type: text/markdown
|
|
32
|
+
|
|
33
|
+
# MLX Omni Server
|
|
34
|
+
|
|
35
|
+
MLX Omni Server is a local inference server powered by Apple's MLX framework, specifically designed for Apple Silicon (M-series) chips. It implements
|
|
36
|
+
OpenAI-compatible API endpoints, enabling seamless integration with existing OpenAI SDK clients while leveraging the power of local ML inference.
|
|
37
|
+
|
|
38
|
+
## Features
|
|
39
|
+
|
|
40
|
+
- 🚀 **Apple Silicon Optimized**: Built on MLX framework, optimized for M1/M2/M3/M4 series chips
|
|
41
|
+
- 🔌 **OpenAI API Compatible**: Drop-in replacement for OpenAI API endpoints
|
|
42
|
+
- 🎯 **Multiple AI Capabilities**:
|
|
43
|
+
- Audio Processing:
|
|
44
|
+
- Text-to-Speech (TTS)
|
|
45
|
+
- Speech-to-Text (STT/ASR)
|
|
46
|
+
- Chat Completion
|
|
47
|
+
- Image Generation
|
|
48
|
+
- ⚡ **High Performance**: Local inference with hardware acceleration
|
|
49
|
+
- 🔐 **Privacy-First**: All processing happens locally on your machine
|
|
50
|
+
- 🛠 **SDK Support**: Works with official OpenAI SDK and other compatible clients
|
|
51
|
+
|
|
52
|
+
## Support API Endpoints
|
|
53
|
+
|
|
54
|
+
The server implements OpenAI-compatible endpoints:
|
|
55
|
+
|
|
56
|
+
- [Chat](https://platform.openai.com/docs/api-reference/chat)
|
|
57
|
+
- 🚧 `/v1/chat/completions` - Chat completions
|
|
58
|
+
- [Audio](https://platform.openai.com/docs/api-reference/audio)
|
|
59
|
+
- ✅ `/v1/audio/speech` - Text-to-Speech
|
|
60
|
+
- ✅ `/v1/audio/transcriptions` - Speech-to-Text
|
|
61
|
+
- [Models](https://platform.openai.com/docs/api-reference/models/list)
|
|
62
|
+
- ✅ `/v1/models` - List models
|
|
63
|
+
- ✅ `/v1/models/{model}` - Retrieve or Delete model
|
|
64
|
+
- [Images](https://platform.openai.com/docs/api-reference/images)
|
|
65
|
+
- ✅ `/v1/images/generations` - Image generation
|
|
66
|
+
|
|
67
|
+
## Installation
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
# Install using pip
|
|
71
|
+
pip install mlx-omni-server
|
|
72
|
+
|
|
73
|
+
# Or install using poetry
|
|
74
|
+
poetry add mlx-omni-server
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
## Quick Start
|
|
78
|
+
|
|
79
|
+
1. Start the server:
|
|
80
|
+
|
|
81
|
+
```bash
|
|
82
|
+
# If installed via pip as a package
|
|
83
|
+
mlx-omni-server start
|
|
84
|
+
|
|
85
|
+
# If installed via poetry (recommended during development)
|
|
86
|
+
poetry run start
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
2. Use with OpenAI SDK:
|
|
90
|
+
|
|
91
|
+
```python
|
|
92
|
+
from openai import OpenAI
|
|
93
|
+
|
|
94
|
+
# Configure client to use local server
|
|
95
|
+
client = OpenAI(
|
|
96
|
+
base_url="http://localhost:10240/v1", # Point to local server
|
|
97
|
+
api_key="not-needed" # API key is not required for local server
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
# Text-to-Speech Example
|
|
101
|
+
response = client.audio.speech.create(
|
|
102
|
+
model="lucasnewman/f5-tts-mlx",
|
|
103
|
+
input="Hello, welcome to MLX Omni Server!"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# Speech-to-Text Example
|
|
107
|
+
audio_file = open("speech.mp3", "rb")
|
|
108
|
+
transcript = client.audio.transcriptions.create(
|
|
109
|
+
model="mlx-community/whisper-large-v3-turbo",
|
|
110
|
+
file=audio_file
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# Chat Completion Example
|
|
114
|
+
chat_completion = client.chat.completions.create(
|
|
115
|
+
model="meta-llama/Llama-3.2-3B-Instruct",
|
|
116
|
+
messages=[
|
|
117
|
+
{"role": "user", "content": "What can you do?"}
|
|
118
|
+
]
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
# Image Generation Example
|
|
122
|
+
image_response = client.images.generate(
|
|
123
|
+
model="argmaxinc/mlx-FLUX.1-schnell",
|
|
124
|
+
prompt="A serene landscape with mountains and a lake",
|
|
125
|
+
n=1,
|
|
126
|
+
size="512x512"
|
|
127
|
+
)
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
You can view more examples in [examples](examples).
|
|
131
|
+
|
|
132
|
+
## Contributing
|
|
133
|
+
|
|
134
|
+
Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to
|
|
135
|
+
change.
|
|
136
|
+
|
|
137
|
+
## License
|
|
138
|
+
|
|
139
|
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
|
140
|
+
|
|
141
|
+
## Acknowledgments
|
|
142
|
+
|
|
143
|
+
- Built with [MLX](https://github.com/ml-explore/mlx) by Apple
|
|
144
|
+
- API design inspired by [OpenAI](https://openai.com)
|
|
145
|
+
- Uses [FastAPI](https://fastapi.tiangolo.com/) for the server implementation
|
|
146
|
+
- Text-to-Speech powered by [lucasnewman/f5-tts-mlx](https://github.com/lucasnewman/f5-tts-mlx)
|
|
147
|
+
- Speech-to-Text powered by [mlx-lm](https://github.com/ml-explore/mlx-examples/tree/main/llms/mlx_lm)
|
|
148
|
+
|
|
149
|
+
## Disclaimer
|
|
150
|
+
|
|
151
|
+
This project is not affiliated with or endorsed by OpenAI or Apple. It's an independent implementation that provides OpenAI-compatible APIs using
|
|
152
|
+
Apple's MLX framework.
|
|
153
|
+
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
# MLX Omni Server
|
|
2
|
+
|
|
3
|
+
MLX Omni Server is a local inference server powered by Apple's MLX framework, specifically designed for Apple Silicon (M-series) chips. It implements
|
|
4
|
+
OpenAI-compatible API endpoints, enabling seamless integration with existing OpenAI SDK clients while leveraging the power of local ML inference.
|
|
5
|
+
|
|
6
|
+
## Features
|
|
7
|
+
|
|
8
|
+
- 🚀 **Apple Silicon Optimized**: Built on MLX framework, optimized for M1/M2/M3/M4 series chips
|
|
9
|
+
- 🔌 **OpenAI API Compatible**: Drop-in replacement for OpenAI API endpoints
|
|
10
|
+
- 🎯 **Multiple AI Capabilities**:
|
|
11
|
+
- Audio Processing:
|
|
12
|
+
- Text-to-Speech (TTS)
|
|
13
|
+
- Speech-to-Text (STT/ASR)
|
|
14
|
+
- Chat Completion
|
|
15
|
+
- Image Generation
|
|
16
|
+
- ⚡ **High Performance**: Local inference with hardware acceleration
|
|
17
|
+
- 🔐 **Privacy-First**: All processing happens locally on your machine
|
|
18
|
+
- 🛠 **SDK Support**: Works with official OpenAI SDK and other compatible clients
|
|
19
|
+
|
|
20
|
+
## Support API Endpoints
|
|
21
|
+
|
|
22
|
+
The server implements OpenAI-compatible endpoints:
|
|
23
|
+
|
|
24
|
+
- [Chat](https://platform.openai.com/docs/api-reference/chat)
|
|
25
|
+
- 🚧 `/v1/chat/completions` - Chat completions
|
|
26
|
+
- [Audio](https://platform.openai.com/docs/api-reference/audio)
|
|
27
|
+
- ✅ `/v1/audio/speech` - Text-to-Speech
|
|
28
|
+
- ✅ `/v1/audio/transcriptions` - Speech-to-Text
|
|
29
|
+
- [Models](https://platform.openai.com/docs/api-reference/models/list)
|
|
30
|
+
- ✅ `/v1/models` - List models
|
|
31
|
+
- ✅ `/v1/models/{model}` - Retrieve or Delete model
|
|
32
|
+
- [Images](https://platform.openai.com/docs/api-reference/images)
|
|
33
|
+
- ✅ `/v1/images/generations` - Image generation
|
|
34
|
+
|
|
35
|
+
## Installation
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
# Install using pip
|
|
39
|
+
pip install mlx-omni-server
|
|
40
|
+
|
|
41
|
+
# Or install using poetry
|
|
42
|
+
poetry add mlx-omni-server
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
## Quick Start
|
|
46
|
+
|
|
47
|
+
1. Start the server:
|
|
48
|
+
|
|
49
|
+
```bash
|
|
50
|
+
# If installed via pip as a package
|
|
51
|
+
mlx-omni-server start
|
|
52
|
+
|
|
53
|
+
# If installed via poetry (recommended during development)
|
|
54
|
+
poetry run start
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
2. Use with OpenAI SDK:
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
from openai import OpenAI
|
|
61
|
+
|
|
62
|
+
# Configure client to use local server
|
|
63
|
+
client = OpenAI(
|
|
64
|
+
base_url="http://localhost:10240/v1", # Point to local server
|
|
65
|
+
api_key="not-needed" # API key is not required for local server
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Text-to-Speech Example
|
|
69
|
+
response = client.audio.speech.create(
|
|
70
|
+
model="lucasnewman/f5-tts-mlx",
|
|
71
|
+
input="Hello, welcome to MLX Omni Server!"
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
# Speech-to-Text Example
|
|
75
|
+
audio_file = open("speech.mp3", "rb")
|
|
76
|
+
transcript = client.audio.transcriptions.create(
|
|
77
|
+
model="mlx-community/whisper-large-v3-turbo",
|
|
78
|
+
file=audio_file
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# Chat Completion Example
|
|
82
|
+
chat_completion = client.chat.completions.create(
|
|
83
|
+
model="meta-llama/Llama-3.2-3B-Instruct",
|
|
84
|
+
messages=[
|
|
85
|
+
{"role": "user", "content": "What can you do?"}
|
|
86
|
+
]
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
# Image Generation Example
|
|
90
|
+
image_response = client.images.generate(
|
|
91
|
+
model="argmaxinc/mlx-FLUX.1-schnell",
|
|
92
|
+
prompt="A serene landscape with mountains and a lake",
|
|
93
|
+
n=1,
|
|
94
|
+
size="512x512"
|
|
95
|
+
)
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
You can view more examples in [examples](examples).
|
|
99
|
+
|
|
100
|
+
## Contributing
|
|
101
|
+
|
|
102
|
+
Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to
|
|
103
|
+
change.
|
|
104
|
+
|
|
105
|
+
## License
|
|
106
|
+
|
|
107
|
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
|
108
|
+
|
|
109
|
+
## Acknowledgments
|
|
110
|
+
|
|
111
|
+
- Built with [MLX](https://github.com/ml-explore/mlx) by Apple
|
|
112
|
+
- API design inspired by [OpenAI](https://openai.com)
|
|
113
|
+
- Uses [FastAPI](https://fastapi.tiangolo.com/) for the server implementation
|
|
114
|
+
- Text-to-Speech powered by [lucasnewman/f5-tts-mlx](https://github.com/lucasnewman/f5-tts-mlx)
|
|
115
|
+
- Speech-to-Text powered by [mlx-lm](https://github.com/ml-explore/mlx-examples/tree/main/llms/mlx_lm)
|
|
116
|
+
|
|
117
|
+
## Disclaimer
|
|
118
|
+
|
|
119
|
+
This project is not affiliated with or endorsed by OpenAI or Apple. It's an independent implementation that provides OpenAI-compatible APIs using
|
|
120
|
+
Apple's MLX framework.
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
[tool.poetry]
|
|
2
|
+
name = "mlx-omni-server"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = ""
|
|
5
|
+
authors = ["madroid <madroidmaq@gmail.com>"]
|
|
6
|
+
readme = "README.md"
|
|
7
|
+
license = "MIT"
|
|
8
|
+
repository = "https://github.com/madroidmaq/mlx-omni-server"
|
|
9
|
+
keywords = ["mlx", "ai", "agi", "aigc", "server", "openai", "tts", "stt"]
|
|
10
|
+
classifiers = [
|
|
11
|
+
"Development Status :: 4 - Beta",
|
|
12
|
+
"Intended Audience :: Developers",
|
|
13
|
+
"Operating System :: OS Independent",
|
|
14
|
+
"Programming Language :: Python :: 3.11",
|
|
15
|
+
]
|
|
16
|
+
packages = [{ include = "mlx_omni_server", from = "src" }]
|
|
17
|
+
|
|
18
|
+
[tool.poetry.dependencies]
|
|
19
|
+
python = "^3.11"
|
|
20
|
+
fastapi = "^0.115.4"
|
|
21
|
+
python-multipart = "^0.0.17"
|
|
22
|
+
pydantic = "^2.9.2"
|
|
23
|
+
f5-tts-mlx = "^0.1.7"
|
|
24
|
+
uvicorn = "^0.32.0"
|
|
25
|
+
numba = ">=0.57.0"
|
|
26
|
+
mlx-whisper = "^0.4.1"
|
|
27
|
+
mlx-lm = "^0.20.0"
|
|
28
|
+
huggingface-hub = "^0.26.2"
|
|
29
|
+
diffusionkit = "^0.5.1"
|
|
30
|
+
sse-starlette = "^2.1.3"
|
|
31
|
+
|
|
32
|
+
[tool.poetry.group.dev.dependencies]
|
|
33
|
+
pytest = "^8.3.3"
|
|
34
|
+
httpx = "^0.27.2"
|
|
35
|
+
pre-commit = "^4.0.1"
|
|
36
|
+
black = "^24.10.0"
|
|
37
|
+
isort = "^5.13.2"
|
|
38
|
+
|
|
39
|
+
[tool.poetry.scripts]
|
|
40
|
+
start = "mlx_omni_server.main:start"
|
|
41
|
+
mlx-omni-server = "mlx_omni_server.main:start"
|
|
42
|
+
|
|
43
|
+
[build-system]
|
|
44
|
+
requires = ["poetry-core"]
|
|
45
|
+
build-backend = "poetry.core.masonry.api"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Generator
|
|
3
|
+
|
|
4
|
+
from fastapi import APIRouter
|
|
5
|
+
from fastapi.responses import JSONResponse, StreamingResponse
|
|
6
|
+
|
|
7
|
+
from ...schemas.chat_schema import ChatCompletionRequest, ChatCompletionResponse
|
|
8
|
+
from ...services.chat.models import load_model
|
|
9
|
+
from ...services.chat_service import ChatService
|
|
10
|
+
|
|
11
|
+
router = APIRouter(tags=["chat—completions"])
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@router.post("/chat/completions", response_model=ChatCompletionResponse)
|
|
15
|
+
@router.post("/v1/chat/completions", response_model=ChatCompletionResponse)
|
|
16
|
+
async def create_chat_completion(request: ChatCompletionRequest):
|
|
17
|
+
"""Create a chat completion"""
|
|
18
|
+
|
|
19
|
+
chat_service = _create_chat_service(request.model)
|
|
20
|
+
|
|
21
|
+
if not request.stream:
|
|
22
|
+
completion = await chat_service.generate_completion(request)
|
|
23
|
+
return JSONResponse(content=completion.model_dump(exclude_none=True))
|
|
24
|
+
|
|
25
|
+
async def event_generator() -> Generator[str, None, None]:
|
|
26
|
+
async for chunk in chat_service.generate_stream(request):
|
|
27
|
+
if chunk.choices[0].finish_reason == "stop":
|
|
28
|
+
yield "data: [DONE]\n\n"
|
|
29
|
+
else:
|
|
30
|
+
yield f"data: {json.dumps(chunk.model_dump(exclude_none=True))}\n\n"
|
|
31
|
+
|
|
32
|
+
return StreamingResponse(
|
|
33
|
+
event_generator(),
|
|
34
|
+
media_type="text/event-stream",
|
|
35
|
+
headers={
|
|
36
|
+
"Cache-Control": "no-cache",
|
|
37
|
+
"Connection": "keep-alive",
|
|
38
|
+
},
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _create_chat_service(model_id: str):
|
|
43
|
+
model = load_model(model_id)
|
|
44
|
+
return ChatService(model)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import time
|
|
2
|
+
|
|
3
|
+
from fastapi import APIRouter, HTTPException
|
|
4
|
+
|
|
5
|
+
from ...schemas.images_schema import ImageGenerationRequest, ImageGenerationResponse
|
|
6
|
+
from ...services.images_service import ImagesService
|
|
7
|
+
|
|
8
|
+
router = APIRouter(tags=["images"])
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@router.post("/images/generations")
|
|
12
|
+
@router.post("/v1/images/generations")
|
|
13
|
+
async def create_image(request: ImageGenerationRequest) -> ImageGenerationResponse:
|
|
14
|
+
"""
|
|
15
|
+
Creates an image given a prompt.
|
|
16
|
+
"""
|
|
17
|
+
try:
|
|
18
|
+
service = ImagesService()
|
|
19
|
+
|
|
20
|
+
# Generate images
|
|
21
|
+
images = service.generate_images(request)
|
|
22
|
+
|
|
23
|
+
# Create response
|
|
24
|
+
return ImageGenerationResponse(created=int(time.time()), data=images)
|
|
25
|
+
|
|
26
|
+
except ValueError as ve:
|
|
27
|
+
raise HTTPException(status_code=400, detail=str(ve))
|
|
28
|
+
except Exception as e:
|
|
29
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from fastapi import APIRouter, HTTPException, Request
|
|
2
|
+
|
|
3
|
+
from ...schemas.models_schema import Model, ModelDeletion, ModelList
|
|
4
|
+
from ...services.models_service import ModelsService
|
|
5
|
+
|
|
6
|
+
router = APIRouter(tags=["models"])
|
|
7
|
+
models_service = ModelsService()
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def extract_model_id_from_path(request: Request) -> str:
|
|
11
|
+
"""Extract full model ID from request path"""
|
|
12
|
+
path = request.url.path
|
|
13
|
+
prefix = "/v1/models/" if "/v1/models/" in path else "/models/"
|
|
14
|
+
return path[len(prefix) :]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def handle_model_error(e: Exception) -> None:
|
|
18
|
+
"""Handle model-related errors and raise appropriate HTTP exceptions"""
|
|
19
|
+
if isinstance(e, ValueError):
|
|
20
|
+
raise HTTPException(status_code=404, detail=str(e))
|
|
21
|
+
print(f"Error processing request: {str(e)}")
|
|
22
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@router.get("/models", response_model=ModelList)
|
|
26
|
+
@router.get("/v1/models", response_model=ModelList)
|
|
27
|
+
async def list_models() -> ModelList:
|
|
28
|
+
"""
|
|
29
|
+
Lists the currently available models, and provides basic information about each one
|
|
30
|
+
such as the owner and availability.
|
|
31
|
+
"""
|
|
32
|
+
try:
|
|
33
|
+
return models_service.list_models()
|
|
34
|
+
except Exception as e:
|
|
35
|
+
handle_model_error(e)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@router.get("/models/{model_id:path}", response_model=Model)
|
|
39
|
+
@router.get("/v1/models/{model_id:path}", response_model=Model)
|
|
40
|
+
async def get_model(request: Request) -> Model:
|
|
41
|
+
"""
|
|
42
|
+
Retrieves a model instance, providing basic information about the model such as
|
|
43
|
+
the owner and permissioning.
|
|
44
|
+
"""
|
|
45
|
+
try:
|
|
46
|
+
model_id = extract_model_id_from_path(request)
|
|
47
|
+
model = models_service.get_model(model_id)
|
|
48
|
+
if model is None:
|
|
49
|
+
raise ValueError(f"Model '{model_id}' not found")
|
|
50
|
+
return model
|
|
51
|
+
except Exception as e:
|
|
52
|
+
handle_model_error(e)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@router.delete("/models/{model_id:path}", response_model=ModelDeletion)
|
|
56
|
+
@router.delete("/v1/models/{model_id:path}", response_model=ModelDeletion)
|
|
57
|
+
async def delete_model(request: Request) -> ModelDeletion:
|
|
58
|
+
"""
|
|
59
|
+
Delete a fine-tuned model from local cache.
|
|
60
|
+
"""
|
|
61
|
+
try:
|
|
62
|
+
model_id = extract_model_id_from_path(request)
|
|
63
|
+
return models_service.delete_model(model_id)
|
|
64
|
+
except Exception as e:
|
|
65
|
+
handle_model_error(e)
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from fastapi import APIRouter, Depends, HTTPException
|
|
2
|
+
from fastapi.responses import JSONResponse, Response
|
|
3
|
+
from starlette.responses import PlainTextResponse
|
|
4
|
+
|
|
5
|
+
from mlx_omni_server.schemas.stt_schema import (
|
|
6
|
+
ResponseFormat,
|
|
7
|
+
STTRequestForm,
|
|
8
|
+
TranscriptionResponse,
|
|
9
|
+
)
|
|
10
|
+
from mlx_omni_server.services.stt_service import STTService
|
|
11
|
+
|
|
12
|
+
router = APIRouter(tags=["speech-to-text"])
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@router.post("/audio/transcriptions", response_model=TranscriptionResponse)
|
|
16
|
+
@router.post("/v1/audio/transcriptions", response_model=TranscriptionResponse)
|
|
17
|
+
async def create_transcription(request: STTRequestForm = Depends()):
|
|
18
|
+
"""
|
|
19
|
+
Transcribe audio file to text.
|
|
20
|
+
"""
|
|
21
|
+
stt_service = STTService()
|
|
22
|
+
try:
|
|
23
|
+
result = await stt_service.transcribe(request)
|
|
24
|
+
|
|
25
|
+
# Return appropriate response based on format
|
|
26
|
+
if request.response_format == ResponseFormat.TEXT:
|
|
27
|
+
return PlainTextResponse(content=result)
|
|
28
|
+
elif request.response_format in (ResponseFormat.SRT, ResponseFormat.VTT):
|
|
29
|
+
return Response(
|
|
30
|
+
content=result,
|
|
31
|
+
media_type="text/plain",
|
|
32
|
+
headers={
|
|
33
|
+
"Content-Disposition": f'attachment; filename="transcription.{request.response_format.value.lower()}"'
|
|
34
|
+
},
|
|
35
|
+
)
|
|
36
|
+
else: # JSON and VERBOSE_JSON
|
|
37
|
+
return JSONResponse(content=result)
|
|
38
|
+
|
|
39
|
+
except Exception as e:
|
|
40
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import io
|
|
2
|
+
|
|
3
|
+
from fastapi import APIRouter, HTTPException
|
|
4
|
+
from fastapi.responses import StreamingResponse
|
|
5
|
+
|
|
6
|
+
from mlx_omni_server.schemas.tts_schema import AudioFormat, TTSRequest
|
|
7
|
+
from mlx_omni_server.services.tts_service import TTSService
|
|
8
|
+
|
|
9
|
+
router = APIRouter(tags=["text-to-speech"])
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@router.post("/audio/speech")
|
|
13
|
+
@router.post("/v1/audio/speech")
|
|
14
|
+
async def create_speech(request: TTSRequest):
|
|
15
|
+
"""
|
|
16
|
+
Generate audio from input text.
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
StreamingResponse: Audio file content in the requested format
|
|
20
|
+
"""
|
|
21
|
+
tts_service = TTSService()
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
audio_content = await tts_service.generate_speech(
|
|
25
|
+
request=request,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
# Create content type mapping
|
|
29
|
+
content_type_mapping = {
|
|
30
|
+
AudioFormat.MP3: "audio/mpeg",
|
|
31
|
+
AudioFormat.OPUS: "audio/opus",
|
|
32
|
+
AudioFormat.AAC: "audio/aac",
|
|
33
|
+
AudioFormat.FLAC: "audio/flac",
|
|
34
|
+
AudioFormat.WAV: "audio/wav",
|
|
35
|
+
AudioFormat.PCM: "audio/pcm",
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
# Create response
|
|
39
|
+
return StreamingResponse(
|
|
40
|
+
io.BytesIO(audio_content),
|
|
41
|
+
media_type=content_type_mapping[request.response_format],
|
|
42
|
+
headers={
|
|
43
|
+
"Content-Disposition": f'attachment; filename="speech.{request.response_format.value}"'
|
|
44
|
+
},
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
except Exception as e:
|
|
48
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from fastapi import APIRouter
|
|
2
|
+
|
|
3
|
+
from .endpoints import chat, images, models, stt, tts
|
|
4
|
+
|
|
5
|
+
api_router = APIRouter()
|
|
6
|
+
api_router.include_router(stt.router)
|
|
7
|
+
api_router.include_router(tts.router)
|
|
8
|
+
api_router.include_router(models.router)
|
|
9
|
+
api_router.include_router(images.router)
|
|
10
|
+
api_router.include_router(chat.router)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
import uvicorn
|
|
4
|
+
from fastapi import FastAPI
|
|
5
|
+
|
|
6
|
+
from .api.routers import api_router
|
|
7
|
+
from .middleware.logging import RequestResponseLoggingMiddleware
|
|
8
|
+
|
|
9
|
+
app = FastAPI(title="MLX Omni Server")
|
|
10
|
+
|
|
11
|
+
# Add request/response logging middleware with custom levels
|
|
12
|
+
app.add_middleware(
|
|
13
|
+
RequestResponseLoggingMiddleware,
|
|
14
|
+
request_level=logging.DEBUG,
|
|
15
|
+
response_level=logging.DEBUG,
|
|
16
|
+
# exclude_paths=["/health"]
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
app.include_router(api_router)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def start():
|
|
23
|
+
uvicorn.run("mlx_omni_server.main:app", host="0.0.0.0", port=10240, reload=True)
|