dwani 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dwani-0.1.1/LICENSE +21 -0
- dwani-0.1.1/PKG-INFO +193 -0
- dwani-0.1.1/README.md +152 -0
- dwani-0.1.1/pyproject.toml +28 -0
- dwani-0.1.1/setup.cfg +4 -0
- dwani-0.1.1/src/__init__.py +39 -0
- dwani-0.1.1/src/asr.py +34 -0
- dwani-0.1.1/src/audio.py +29 -0
- dwani-0.1.1/src/chat.py +17 -0
- dwani-0.1.1/src/client.py +41 -0
- dwani-0.1.1/src/docs.py +70 -0
- dwani-0.1.1/src/dwani.egg-info/PKG-INFO +193 -0
- dwani-0.1.1/src/dwani.egg-info/SOURCES.txt +16 -0
- dwani-0.1.1/src/dwani.egg-info/dependency_links.txt +1 -0
- dwani-0.1.1/src/dwani.egg-info/requires.txt +6 -0
- dwani-0.1.1/src/dwani.egg-info/top_level.txt +8 -0
- dwani-0.1.1/src/exceptions.py +5 -0
- dwani-0.1.1/src/vision.py +21 -0
dwani-0.1.1/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2025 Sachin Shetty
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
dwani-0.1.1/PKG-INFO
ADDED
@@ -0,0 +1,193 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: dwani
|
3
|
+
Version: 0.1.1
|
4
|
+
Summary: Multimodal AI server for Indian languages (speech, vision, LLMs, TTS, ASR, etc.)
|
5
|
+
Author-email: sachin <python@dwani.ai>
|
6
|
+
License: MIT License
|
7
|
+
|
8
|
+
Copyright (c) 2025 Sachin Shetty
|
9
|
+
|
10
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
11
|
+
of this software and associated documentation files (the "Software"), to deal
|
12
|
+
in the Software without restriction, including without limitation the rights
|
13
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
14
|
+
copies of the Software, and to permit persons to whom the Software is
|
15
|
+
furnished to do so, subject to the following conditions:
|
16
|
+
|
17
|
+
The above copyright notice and this permission notice shall be included in all
|
18
|
+
copies or substantial portions of the Software.
|
19
|
+
|
20
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
21
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
22
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
23
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
24
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
25
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
26
|
+
SOFTWARE.
|
27
|
+
|
28
|
+
Project-URL: Homepage, https://github.com/dwani-ai/dwani-server
|
29
|
+
Project-URL: Source, https://github.com/dwani-ai/dwani-server
|
30
|
+
Project-URL: Issues, https://github.com/dwani-ai/dwani-server/issues
|
31
|
+
Requires-Python: >=3.10
|
32
|
+
Description-Content-Type: text/markdown
|
33
|
+
License-File: LICENSE
|
34
|
+
Requires-Dist: fastapi>=0.95.0
|
35
|
+
Requires-Dist: uvicorn[standard]>=0.22.0
|
36
|
+
Requires-Dist: pydantic>=2.0.0
|
37
|
+
Requires-Dist: requests>=2.25.0
|
38
|
+
Requires-Dist: python-multipart>=0.0.5
|
39
|
+
Requires-Dist: pydantic-settings>=2.0.0
|
40
|
+
Dynamic: license-file
|
41
|
+
|
42
|
+
# Dhwani Server
|
43
|
+
|
44
|
+
Dhwani API is a FastAPI-based application providing AI-powered services for Indian languages, including text-to-speech (TTS), language model (LLM) chat, vision-language model (VLM) capabilities, and automatic speech recognition (ASR). It supports lazy loading of models for fast startup and includes endpoints for various tasks.
|
45
|
+
|
46
|
+
## Features
|
47
|
+
- **Text-to-Speech (TTS)**: Generate audio from text in Indian languages using Parler TTS.
|
48
|
+
- **Chat**: Process Kannada prompts and respond in Kannada via translation and LLM.
|
49
|
+
- **Vision-Language Model (VLM)**: Caption images, answer visual queries, detect, and point objects.
|
50
|
+
- **Automatic Speech Recognition (ASR)**: Transcribe audio files in multiple Indian languages.
|
51
|
+
- **Lazy Loading**: Models load on-demand or via an explicit endpoint for fast startup.
|
52
|
+
|
53
|
+
## Prerequisites
|
54
|
+
- **System Requirements - User **:
|
55
|
+
- **Python**: 3.10
|
56
|
+
- Ubuntu 22.04
|
57
|
+
- git
|
58
|
+
- vscode
|
59
|
+
- **System Requirements - Server **:
|
60
|
+
- Ubuntu with sufficient RAM (16GB+ recommended for models).
|
61
|
+
- Optional: NVIDIA GPU with CUDA support for faster inference.
|
62
|
+
- **FFmpeg**: Required for audio processing (ASR).
|
63
|
+
|
64
|
+
- Server Setup
|
65
|
+
```bash
|
66
|
+
export HF_HOME=/home/ubuntu/data-dhwani-models
|
67
|
+
export HF_TOKEN='YOur-HF-token'
|
68
|
+
python src/server/main.py --host 0.0.0.0 --port 7860 --config config_two
|
69
|
+
```
|
70
|
+
## Installation
|
71
|
+
|
72
|
+
1. **Clone the Repository**:
|
73
|
+
```bash
|
74
|
+
git clone https://github.com/slabstech/dhwani-server
|
75
|
+
cd dhwani-server
|
76
|
+
```
|
77
|
+
|
78
|
+
2. Install Libraries:
|
79
|
+
- On Ubuntu: ```sudo apt-get install ffmpeg build-essential```
|
80
|
+
|
81
|
+
3. Set Up Virtual Environment:
|
82
|
+
```bash
|
83
|
+
python -m venv venv
|
84
|
+
source venv/bin/activate
|
85
|
+
```
|
86
|
+
4. Install Dependencies:
|
87
|
+
```bash
|
88
|
+
sudo apt-get install -y ffmpeg build-essential
|
89
|
+
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --profile minimal
|
90
|
+
. "$HOME/.cargo/env"
|
91
|
+
export CC=/usr/bin/gcc
|
92
|
+
export ENV CXX=/usr/bin/g++
|
93
|
+
```
|
94
|
+
```bash
|
95
|
+
pip install --no-cache-dir --upgrade pip setuptools psutil setuptools-rust torch==2.6.0
|
96
|
+
pip install --no-cache-dir flash-attn --no-build-isolation
|
97
|
+
```
|
98
|
+
|
99
|
+
```bash
|
100
|
+
pip install -r requirements.txt
|
101
|
+
```
|
102
|
+
|
103
|
+
4. Set Environment Variable:
|
104
|
+
Create a .env file in the root directory and add your API key:
|
105
|
+
plaintext
|
106
|
+
```bash
|
107
|
+
API_KEY=your_secret_key
|
108
|
+
```
|
109
|
+
|
110
|
+
5. Running the Server
|
111
|
+
- Start the Server:
|
112
|
+
```bash
|
113
|
+
python src/server/main.py --host 0.0.0.0 --port 7860 --config config_two
|
114
|
+
```
|
115
|
+
|
116
|
+
- The server starts with models loaded on start
|
117
|
+
- Access the interactive API docs at http://localhost:7860/docs.
|
118
|
+
|
119
|
+
- (Optional) Load All Models:
|
120
|
+
Preload all models (LLM, Translation, TTS, VLM, ASR) with:
|
121
|
+
-
|
122
|
+
```bash
|
123
|
+
curl -X POST "http://localhost:7860/load_all_models" -H "X-API-Key: your_secret_key"
|
124
|
+
```
|
125
|
+
|
126
|
+
- Usage
|
127
|
+
- Endpoints
|
128
|
+
- All endpoints require the X-API-Key header with the value from your .env file.
|
129
|
+
|
130
|
+
- Health Check: GET /health
|
131
|
+
```bash
|
132
|
+
curl "http://localhost:7860/health"
|
133
|
+
```
|
134
|
+
- Response:
|
135
|
+
```bash
|
136
|
+
{"status": "healthy", "model": "Qwen/Qwen2.5-3B-Instruct"}
|
137
|
+
```
|
138
|
+
|
139
|
+
- Text-to-Speech: POST /v1/audio/speech
|
140
|
+
``` bash
|
141
|
+
curl -X POST "http://localhost:7860/v1/audio/speech" -H "X-API-Key: your_secret_key" -H "Content-Type: application/json" -d '{"input": "ನಮಸ್ಕಾರ", "voice": "Female voice", "model": "ai4bharat/indic-parler-tts", "response_format": "mp3"}' --output speech.mp3
|
142
|
+
```
|
143
|
+
- Chat: POST /chat
|
144
|
+
``` bash
|
145
|
+
curl -X POST "http://localhost:7860/chat" -H "X-API-Key: your_secret_key" -H "Content-Type: application/json" -d '{"prompt": "ನೀವು ಹೇಗಿದ್ದೀರಿ?"}'
|
146
|
+
```
|
147
|
+
|
148
|
+
- Response:
|
149
|
+
```{"response": "<Kannada response>"}```
|
150
|
+
- Image Captioning: POST /caption/
|
151
|
+
```bash
|
152
|
+
curl -X POST "http://localhost:7860/caption/" -H "X-API-Key: your_secret_key" -F "file=@image.jpg" -F "length=short"
|
153
|
+
```
|
154
|
+
- Response:``` {"caption": "<short caption>"}```
|
155
|
+
- Visual Query: POST /visual_query/
|
156
|
+
```bash
|
157
|
+
curl -X POST "http://localhost:7860/visual_query/" -H "X-API-Key: your_secret_key" -F "file=@image.jpg" -F "query=What is this?"
|
158
|
+
```
|
159
|
+
- Response: ```{"answer": "<answer>"}```
|
160
|
+
- Object Detection: POST /detect/
|
161
|
+
```bash
|
162
|
+
curl -X POST "http://localhost:7860/detect/" -H "X-API-Key: your_secret_key" -F "file=@image.jpg" -F "object_type=face"
|
163
|
+
```
|
164
|
+
- Response: ```{"objects": [<list of detected objects>]}```
|
165
|
+
- Object Pointing: POST /point/
|
166
|
+
```bash
|
167
|
+
|
168
|
+
curl -X POST "http://localhost:7860/point/" -H "X-API-Key: your_secret_key" -F "file=@image.jpg" -F "object_type=person"
|
169
|
+
```
|
170
|
+
- Response: ```{"points": [<list of points>]}```
|
171
|
+
- Transcription: POST /transcribe/
|
172
|
+
```bash
|
173
|
+
curl -X POST "http://localhost:7860/transcribe/?language=kannada" -H "X-API-Key: your_secret_key" -F "file=@audio.wav"
|
174
|
+
```
|
175
|
+
- Response: ```{"text": "<transcribed text>"}```
|
176
|
+
- Batch Transcription: POST /transcribe_batch/
|
177
|
+
```bash
|
178
|
+
curl -X POST "http://localhost:7860/transcribe_batch/?language=kannada" -H "X-API-Key: your_secret_key" -F "files=@audio1.wav" -F "files=@audio2.mp3"
|
179
|
+
```
|
180
|
+
- Response: ```{"transcriptions": ["<text1>", "<text2>"]}```
|
181
|
+
|
182
|
+
- Notes
|
183
|
+
- Lazy Loading: Models load on first use or via /load_all_models. Expect a delay on the first request for each model type.
|
184
|
+
Supported Languages: ASR supports multiple Indian languages (e.g., kannada, hindi, tamil); see models/asr.py for the full list.
|
185
|
+
Logs: Check dhwani_api.log for detailed logs (rotated at 10MB, 5 backups).
|
186
|
+
Performance: Use a GPU with flash-attn installed for faster TTS and ASR inference.
|
187
|
+
|
188
|
+
- Troubleshooting
|
189
|
+
|
190
|
+
- Module Errors: Ensure all dependencies are installed. Re-run pip install if needed.
|
191
|
+
FFmpeg Not Found: Install FFmpeg and ensure it’s in your PATH.
|
192
|
+
Permission Denied: Run with sudo if accessing restricted ports (e.g., < 1024).
|
193
|
+
|
dwani-0.1.1/README.md
ADDED
@@ -0,0 +1,152 @@
|
|
1
|
+
# Dhwani Server
|
2
|
+
|
3
|
+
Dhwani API is a FastAPI-based application providing AI-powered services for Indian languages, including text-to-speech (TTS), language model (LLM) chat, vision-language model (VLM) capabilities, and automatic speech recognition (ASR). It supports lazy loading of models for fast startup and includes endpoints for various tasks.
|
4
|
+
|
5
|
+
## Features
|
6
|
+
- **Text-to-Speech (TTS)**: Generate audio from text in Indian languages using Parler TTS.
|
7
|
+
- **Chat**: Process Kannada prompts and respond in Kannada via translation and LLM.
|
8
|
+
- **Vision-Language Model (VLM)**: Caption images, answer visual queries, detect, and point objects.
|
9
|
+
- **Automatic Speech Recognition (ASR)**: Transcribe audio files in multiple Indian languages.
|
10
|
+
- **Lazy Loading**: Models load on-demand or via an explicit endpoint for fast startup.
|
11
|
+
|
12
|
+
## Prerequisites
|
13
|
+
- **System Requirements - User **:
|
14
|
+
- **Python**: 3.10
|
15
|
+
- Ubuntu 22.04
|
16
|
+
- git
|
17
|
+
- vscode
|
18
|
+
- **System Requirements - Server **:
|
19
|
+
- Ubuntu with sufficient RAM (16GB+ recommended for models).
|
20
|
+
- Optional: NVIDIA GPU with CUDA support for faster inference.
|
21
|
+
- **FFmpeg**: Required for audio processing (ASR).
|
22
|
+
|
23
|
+
- Server Setup
|
24
|
+
```bash
|
25
|
+
export HF_HOME=/home/ubuntu/data-dhwani-models
|
26
|
+
export HF_TOKEN='YOur-HF-token'
|
27
|
+
python src/server/main.py --host 0.0.0.0 --port 7860 --config config_two
|
28
|
+
```
|
29
|
+
## Installation
|
30
|
+
|
31
|
+
1. **Clone the Repository**:
|
32
|
+
```bash
|
33
|
+
git clone https://github.com/slabstech/dhwani-server
|
34
|
+
cd dhwani-server
|
35
|
+
```
|
36
|
+
|
37
|
+
2. Install Libraries:
|
38
|
+
- On Ubuntu: ```sudo apt-get install ffmpeg build-essential```
|
39
|
+
|
40
|
+
3. Set Up Virtual Environment:
|
41
|
+
```bash
|
42
|
+
python -m venv venv
|
43
|
+
source venv/bin/activate
|
44
|
+
```
|
45
|
+
4. Install Dependencies:
|
46
|
+
```bash
|
47
|
+
sudo apt-get install -y ffmpeg build-essential
|
48
|
+
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --profile minimal
|
49
|
+
. "$HOME/.cargo/env"
|
50
|
+
export CC=/usr/bin/gcc
|
51
|
+
export ENV CXX=/usr/bin/g++
|
52
|
+
```
|
53
|
+
```bash
|
54
|
+
pip install --no-cache-dir --upgrade pip setuptools psutil setuptools-rust torch==2.6.0
|
55
|
+
pip install --no-cache-dir flash-attn --no-build-isolation
|
56
|
+
```
|
57
|
+
|
58
|
+
```bash
|
59
|
+
pip install -r requirements.txt
|
60
|
+
```
|
61
|
+
|
62
|
+
4. Set Environment Variable:
|
63
|
+
Create a .env file in the root directory and add your API key:
|
64
|
+
plaintext
|
65
|
+
```bash
|
66
|
+
API_KEY=your_secret_key
|
67
|
+
```
|
68
|
+
|
69
|
+
5. Running the Server
|
70
|
+
- Start the Server:
|
71
|
+
```bash
|
72
|
+
python src/server/main.py --host 0.0.0.0 --port 7860 --config config_two
|
73
|
+
```
|
74
|
+
|
75
|
+
- The server starts with models loaded on start
|
76
|
+
- Access the interactive API docs at http://localhost:7860/docs.
|
77
|
+
|
78
|
+
- (Optional) Load All Models:
|
79
|
+
Preload all models (LLM, Translation, TTS, VLM, ASR) with:
|
80
|
+
-
|
81
|
+
```bash
|
82
|
+
curl -X POST "http://localhost:7860/load_all_models" -H "X-API-Key: your_secret_key"
|
83
|
+
```
|
84
|
+
|
85
|
+
- Usage
|
86
|
+
- Endpoints
|
87
|
+
- All endpoints require the X-API-Key header with the value from your .env file.
|
88
|
+
|
89
|
+
- Health Check: GET /health
|
90
|
+
```bash
|
91
|
+
curl "http://localhost:7860/health"
|
92
|
+
```
|
93
|
+
- Response:
|
94
|
+
```bash
|
95
|
+
{"status": "healthy", "model": "Qwen/Qwen2.5-3B-Instruct"}
|
96
|
+
```
|
97
|
+
|
98
|
+
- Text-to-Speech: POST /v1/audio/speech
|
99
|
+
``` bash
|
100
|
+
curl -X POST "http://localhost:7860/v1/audio/speech" -H "X-API-Key: your_secret_key" -H "Content-Type: application/json" -d '{"input": "ನಮಸ್ಕಾರ", "voice": "Female voice", "model": "ai4bharat/indic-parler-tts", "response_format": "mp3"}' --output speech.mp3
|
101
|
+
```
|
102
|
+
- Chat: POST /chat
|
103
|
+
``` bash
|
104
|
+
curl -X POST "http://localhost:7860/chat" -H "X-API-Key: your_secret_key" -H "Content-Type: application/json" -d '{"prompt": "ನೀವು ಹೇಗಿದ್ದೀರಿ?"}'
|
105
|
+
```
|
106
|
+
|
107
|
+
- Response:
|
108
|
+
```{"response": "<Kannada response>"}```
|
109
|
+
- Image Captioning: POST /caption/
|
110
|
+
```bash
|
111
|
+
curl -X POST "http://localhost:7860/caption/" -H "X-API-Key: your_secret_key" -F "file=@image.jpg" -F "length=short"
|
112
|
+
```
|
113
|
+
- Response:``` {"caption": "<short caption>"}```
|
114
|
+
- Visual Query: POST /visual_query/
|
115
|
+
```bash
|
116
|
+
curl -X POST "http://localhost:7860/visual_query/" -H "X-API-Key: your_secret_key" -F "file=@image.jpg" -F "query=What is this?"
|
117
|
+
```
|
118
|
+
- Response: ```{"answer": "<answer>"}```
|
119
|
+
- Object Detection: POST /detect/
|
120
|
+
```bash
|
121
|
+
curl -X POST "http://localhost:7860/detect/" -H "X-API-Key: your_secret_key" -F "file=@image.jpg" -F "object_type=face"
|
122
|
+
```
|
123
|
+
- Response: ```{"objects": [<list of detected objects>]}```
|
124
|
+
- Object Pointing: POST /point/
|
125
|
+
```bash
|
126
|
+
|
127
|
+
curl -X POST "http://localhost:7860/point/" -H "X-API-Key: your_secret_key" -F "file=@image.jpg" -F "object_type=person"
|
128
|
+
```
|
129
|
+
- Response: ```{"points": [<list of points>]}```
|
130
|
+
- Transcription: POST /transcribe/
|
131
|
+
```bash
|
132
|
+
curl -X POST "http://localhost:7860/transcribe/?language=kannada" -H "X-API-Key: your_secret_key" -F "file=@audio.wav"
|
133
|
+
```
|
134
|
+
- Response: ```{"text": "<transcribed text>"}```
|
135
|
+
- Batch Transcription: POST /transcribe_batch/
|
136
|
+
```bash
|
137
|
+
curl -X POST "http://localhost:7860/transcribe_batch/?language=kannada" -H "X-API-Key: your_secret_key" -F "files=@audio1.wav" -F "files=@audio2.mp3"
|
138
|
+
```
|
139
|
+
- Response: ```{"transcriptions": ["<text1>", "<text2>"]}```
|
140
|
+
|
141
|
+
- Notes
|
142
|
+
- Lazy Loading: Models load on first use or via /load_all_models. Expect a delay on the first request for each model type.
|
143
|
+
Supported Languages: ASR supports multiple Indian languages (e.g., kannada, hindi, tamil); see models/asr.py for the full list.
|
144
|
+
Logs: Check dhwani_api.log for detailed logs (rotated at 10MB, 5 backups).
|
145
|
+
Performance: Use a GPU with flash-attn installed for faster TTS and ASR inference.
|
146
|
+
|
147
|
+
- Troubleshooting
|
148
|
+
|
149
|
+
- Module Errors: Ensure all dependencies are installed. Re-run pip install if needed.
|
150
|
+
FFmpeg Not Found: Install FFmpeg and ensure it’s in your PATH.
|
151
|
+
Permission Denied: Run with sudo if accessing restricted ports (e.g., < 1024).
|
152
|
+
|
@@ -0,0 +1,28 @@
|
|
1
|
+
[build-system]
|
2
|
+
requires = ["setuptools>=61.0", "wheel"]
|
3
|
+
build-backend = "setuptools.build_meta"
|
4
|
+
|
5
|
+
[project]
|
6
|
+
name = "dwani"
|
7
|
+
version = "0.1.1"
|
8
|
+
description = "Multimodal AI server for Indian languages (speech, vision, LLMs, TTS, ASR, etc.)"
|
9
|
+
authors = [
|
10
|
+
{ name="sachin", email="python@dwani.ai" }
|
11
|
+
]
|
12
|
+
readme = "README.md"
|
13
|
+
license = { file = "LICENSE" }
|
14
|
+
requires-python = ">=3.10"
|
15
|
+
|
16
|
+
dependencies = [
|
17
|
+
"fastapi>=0.95.0",
|
18
|
+
"uvicorn[standard]>=0.22.0",
|
19
|
+
"pydantic>=2.0.0",
|
20
|
+
"requests>=2.25.0",
|
21
|
+
"python-multipart>=0.0.5",
|
22
|
+
"pydantic-settings>=2.0.0"
|
23
|
+
]
|
24
|
+
|
25
|
+
[project.urls]
|
26
|
+
Homepage = "https://github.com/dwani-ai/dwani-server"
|
27
|
+
Source = "https://github.com/dwani-ai/dwani-server"
|
28
|
+
Issues = "https://github.com/dwani-ai/dwani-server/issues"
|
dwani-0.1.1/setup.cfg
ADDED
@@ -0,0 +1,39 @@
|
|
1
|
+
from .client import DhwaniClient
|
2
|
+
from .chat import Chat
|
3
|
+
from .audio import Audio
|
4
|
+
from .vision import Vision
|
5
|
+
from .asr import ASR
|
6
|
+
from .exceptions import DhwaniAPIError
|
7
|
+
|
8
|
+
__all__ = ["DhwaniClient", "Chat", "Audio", "Vision", "ASR", "DhwaniAPIError"]
|
9
|
+
|
10
|
+
# Optionally, instantiate a default client for convenience
|
11
|
+
api_key = None
|
12
|
+
api_base = "http://localhost:7860"
|
13
|
+
|
14
|
+
def _get_client():
|
15
|
+
global _client
|
16
|
+
if "_client" not in globals() or _client is None:
|
17
|
+
from .client import DhwaniClient
|
18
|
+
globals()["_client"] = DhwaniClient(api_key=api_key, api_base=api_base)
|
19
|
+
return globals()["_client"]
|
20
|
+
|
21
|
+
class chat:
|
22
|
+
@staticmethod
|
23
|
+
def create(prompt, **kwargs):
|
24
|
+
return _get_client().chat(prompt, **kwargs)
|
25
|
+
|
26
|
+
class audio:
|
27
|
+
@staticmethod
|
28
|
+
def speech(*args, **kwargs):
|
29
|
+
return _get_client().speech(*args, **kwargs)
|
30
|
+
|
31
|
+
class vision:
|
32
|
+
@staticmethod
|
33
|
+
def caption(*args, **kwargs):
|
34
|
+
return _get_client().caption(*args, **kwargs)
|
35
|
+
|
36
|
+
class asr:
|
37
|
+
@staticmethod
|
38
|
+
def transcribe(*args, **kwargs):
|
39
|
+
return _get_client().transcribe(*args, **kwargs)
|
dwani-0.1.1/src/asr.py
ADDED
@@ -0,0 +1,34 @@
|
|
1
|
+
from .exceptions import DhwaniAPIError
|
2
|
+
|
3
|
+
def asr_transcribe(client, file_path, language):
|
4
|
+
with open(file_path, "rb") as f:
|
5
|
+
files = {"file": f}
|
6
|
+
resp = requests.post(
|
7
|
+
f"{client.api_base}/transcribe/?language={language}",
|
8
|
+
headers=client._headers(),
|
9
|
+
files=files
|
10
|
+
)
|
11
|
+
if resp.status_code != 200:
|
12
|
+
raise DhwaniAPIError(resp)
|
13
|
+
return resp.json()
|
14
|
+
|
15
|
+
class ASR:
|
16
|
+
@staticmethod
|
17
|
+
def transcribe(*args, **kwargs):
|
18
|
+
from . import _get_client
|
19
|
+
return _get_client().transcribe(*args, **kwargs)
|
20
|
+
|
21
|
+
from .docs import Documents
|
22
|
+
|
23
|
+
class documents:
|
24
|
+
@staticmethod
|
25
|
+
def ocr(file_path, language=None):
|
26
|
+
return _get_client().document_ocr(file_path, language)
|
27
|
+
|
28
|
+
@staticmethod
|
29
|
+
def translate(file_path, src_lang, tgt_lang):
|
30
|
+
return _get_client().document_translate(file_path, src_lang, tgt_lang)
|
31
|
+
|
32
|
+
@staticmethod
|
33
|
+
def summarize(file_path, language=None):
|
34
|
+
return _get_client().document_summarize(file_path, language)
|
dwani-0.1.1/src/audio.py
ADDED
@@ -0,0 +1,29 @@
|
|
1
|
+
from .exceptions import DhwaniAPIError
|
2
|
+
|
3
|
+
def audio_speech(client, input, voice, model, response_format="mp3", output_file=None):
|
4
|
+
data = {
|
5
|
+
"input": input,
|
6
|
+
"voice": voice,
|
7
|
+
"model": model,
|
8
|
+
"response_format": response_format
|
9
|
+
}
|
10
|
+
resp = requests.post(
|
11
|
+
f"{client.api_base}/v1/audio/speech",
|
12
|
+
headers={**client._headers(), "Content-Type": "application/json"},
|
13
|
+
json=data,
|
14
|
+
stream=True
|
15
|
+
)
|
16
|
+
if resp.status_code != 200:
|
17
|
+
raise DhwaniAPIError(resp)
|
18
|
+
if output_file:
|
19
|
+
with open(output_file, "wb") as f:
|
20
|
+
for chunk in resp.iter_content(chunk_size=8192):
|
21
|
+
f.write(chunk)
|
22
|
+
return output_file
|
23
|
+
return resp.content
|
24
|
+
|
25
|
+
class Audio:
|
26
|
+
@staticmethod
|
27
|
+
def speech(*args, **kwargs):
|
28
|
+
from . import _get_client
|
29
|
+
return _get_client().speech(*args, **kwargs)
|
dwani-0.1.1/src/chat.py
ADDED
@@ -0,0 +1,17 @@
|
|
1
|
+
from .exceptions import DhwaniAPIError
|
2
|
+
|
3
|
+
def chat_create(client, prompt, **kwargs):
|
4
|
+
resp = requests.post(
|
5
|
+
f"{client.api_base}/chat",
|
6
|
+
headers={**client._headers(), "Content-Type": "application/json"},
|
7
|
+
json={"prompt": prompt, **kwargs}
|
8
|
+
)
|
9
|
+
if resp.status_code != 200:
|
10
|
+
raise DhwaniAPIError(resp)
|
11
|
+
return resp.json()
|
12
|
+
|
13
|
+
class Chat:
|
14
|
+
@staticmethod
|
15
|
+
def create(prompt, **kwargs):
|
16
|
+
from . import _get_client
|
17
|
+
return _get_client().chat(prompt, **kwargs)
|
@@ -0,0 +1,41 @@
|
|
1
|
+
import os
|
2
|
+
import requests
|
3
|
+
from .exceptions import DhwaniAPIError
|
4
|
+
|
5
|
+
class DhwaniClient:
|
6
|
+
def __init__(self, api_key=None, api_base=None):
|
7
|
+
self.api_key = api_key or os.getenv("DHWANI_API_KEY")
|
8
|
+
self.api_base = api_base or os.getenv("DHWANI_API_BASE", "http://localhost:7860")
|
9
|
+
if not self.api_key:
|
10
|
+
raise ValueError("DHWANI_API_KEY not set")
|
11
|
+
|
12
|
+
def _headers(self):
|
13
|
+
return {"X-API-Key": self.api_key}
|
14
|
+
|
15
|
+
def chat(self, prompt, **kwargs):
|
16
|
+
from .chat import chat_create
|
17
|
+
return chat_create(self, prompt, **kwargs)
|
18
|
+
|
19
|
+
def speech(self, *args, **kwargs):
|
20
|
+
from .audio import audio_speech
|
21
|
+
return audio_speech(self, *args, **kwargs)
|
22
|
+
|
23
|
+
def caption(self, *args, **kwargs):
|
24
|
+
from .vision import vision_caption
|
25
|
+
return vision_caption(self, *args, **kwargs)
|
26
|
+
|
27
|
+
def transcribe(self, *args, **kwargs):
|
28
|
+
from .asr import asr_transcribe
|
29
|
+
return asr_transcribe(self, *args, **kwargs)
|
30
|
+
def document_ocr(self, file_path, language=None):
|
31
|
+
from .docs import document_ocr
|
32
|
+
return document_ocr(self, file_path, language)
|
33
|
+
|
34
|
+
def document_translate(self, file_path, src_lang, tgt_lang):
|
35
|
+
from .docs import document_translate
|
36
|
+
return document_translate(self, file_path, src_lang, tgt_lang)
|
37
|
+
|
38
|
+
def document_summarize(self, file_path, language=None):
|
39
|
+
from .docs import document_summarize
|
40
|
+
return document_summarize(self, file_path, language)
|
41
|
+
|
dwani-0.1.1/src/docs.py
ADDED
@@ -0,0 +1,70 @@
|
|
1
|
+
import requests
|
2
|
+
from .exceptions import DhwaniAPIError
|
3
|
+
|
4
|
+
def document_ocr(client, file_path, language=None):
|
5
|
+
"""OCR a document (image/PDF) and return extracted text."""
|
6
|
+
with open(file_path, "rb") as f:
|
7
|
+
files = {"file": f}
|
8
|
+
data = {}
|
9
|
+
if language:
|
10
|
+
data["language"] = language
|
11
|
+
resp = requests.post(
|
12
|
+
f"{client.api_base}/v1/document/ocr",
|
13
|
+
headers=client._headers(),
|
14
|
+
files=files,
|
15
|
+
data=data
|
16
|
+
)
|
17
|
+
if resp.status_code != 200:
|
18
|
+
raise DhwaniAPIError(resp)
|
19
|
+
return resp.json()
|
20
|
+
|
21
|
+
def document_translate(client, file_path, src_lang, tgt_lang):
|
22
|
+
"""Translate a document (image/PDF with text) from src_lang to tgt_lang."""
|
23
|
+
with open(file_path, "rb") as f:
|
24
|
+
files = {"file": f}
|
25
|
+
data = {
|
26
|
+
"src_lang": src_lang,
|
27
|
+
"tgt_lang": tgt_lang
|
28
|
+
}
|
29
|
+
resp = requests.post(
|
30
|
+
f"{client.api_base}/v1/document/translate",
|
31
|
+
headers=client._headers(),
|
32
|
+
files=files,
|
33
|
+
data=data
|
34
|
+
)
|
35
|
+
if resp.status_code != 200:
|
36
|
+
raise DhwaniAPIError(resp)
|
37
|
+
return resp.json()
|
38
|
+
|
39
|
+
def document_summarize(client, file_path, language=None):
|
40
|
+
"""Summarize a document (image/PDF/text)."""
|
41
|
+
with open(file_path, "rb") as f:
|
42
|
+
files = {"file": f}
|
43
|
+
data = {}
|
44
|
+
if language:
|
45
|
+
data["language"] = language
|
46
|
+
resp = requests.post(
|
47
|
+
f"{client.api_base}/v1/document/summarize",
|
48
|
+
headers=client._headers(),
|
49
|
+
files=files,
|
50
|
+
data=data
|
51
|
+
)
|
52
|
+
if resp.status_code != 200:
|
53
|
+
raise DhwaniAPIError(resp)
|
54
|
+
return resp.json()
|
55
|
+
|
56
|
+
class Documents:
|
57
|
+
@staticmethod
|
58
|
+
def ocr(file_path, language=None):
|
59
|
+
from . import _get_client
|
60
|
+
return _get_client().document_ocr(file_path, language)
|
61
|
+
|
62
|
+
@staticmethod
|
63
|
+
def translate(file_path, src_lang, tgt_lang):
|
64
|
+
from . import _get_client
|
65
|
+
return _get_client().document_translate(file_path, src_lang, tgt_lang)
|
66
|
+
|
67
|
+
@staticmethod
|
68
|
+
def summarize(file_path, language=None):
|
69
|
+
from . import _get_client
|
70
|
+
return _get_client().document_summarize(file_path, language)
|
@@ -0,0 +1,193 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: dwani
|
3
|
+
Version: 0.1.1
|
4
|
+
Summary: Multimodal AI server for Indian languages (speech, vision, LLMs, TTS, ASR, etc.)
|
5
|
+
Author-email: sachin <python@dwani.ai>
|
6
|
+
License: MIT License
|
7
|
+
|
8
|
+
Copyright (c) 2025 Sachin Shetty
|
9
|
+
|
10
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
11
|
+
of this software and associated documentation files (the "Software"), to deal
|
12
|
+
in the Software without restriction, including without limitation the rights
|
13
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
14
|
+
copies of the Software, and to permit persons to whom the Software is
|
15
|
+
furnished to do so, subject to the following conditions:
|
16
|
+
|
17
|
+
The above copyright notice and this permission notice shall be included in all
|
18
|
+
copies or substantial portions of the Software.
|
19
|
+
|
20
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
21
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
22
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
23
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
24
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
25
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
26
|
+
SOFTWARE.
|
27
|
+
|
28
|
+
Project-URL: Homepage, https://github.com/dwani-ai/dwani-server
|
29
|
+
Project-URL: Source, https://github.com/dwani-ai/dwani-server
|
30
|
+
Project-URL: Issues, https://github.com/dwani-ai/dwani-server/issues
|
31
|
+
Requires-Python: >=3.10
|
32
|
+
Description-Content-Type: text/markdown
|
33
|
+
License-File: LICENSE
|
34
|
+
Requires-Dist: fastapi>=0.95.0
|
35
|
+
Requires-Dist: uvicorn[standard]>=0.22.0
|
36
|
+
Requires-Dist: pydantic>=2.0.0
|
37
|
+
Requires-Dist: requests>=2.25.0
|
38
|
+
Requires-Dist: python-multipart>=0.0.5
|
39
|
+
Requires-Dist: pydantic-settings>=2.0.0
|
40
|
+
Dynamic: license-file
|
41
|
+
|
42
|
+
# Dhwani Server
|
43
|
+
|
44
|
+
Dhwani API is a FastAPI-based application providing AI-powered services for Indian languages, including text-to-speech (TTS), language model (LLM) chat, vision-language model (VLM) capabilities, and automatic speech recognition (ASR). It supports lazy loading of models for fast startup and includes endpoints for various tasks.
|
45
|
+
|
46
|
+
## Features
|
47
|
+
- **Text-to-Speech (TTS)**: Generate audio from text in Indian languages using Parler TTS.
|
48
|
+
- **Chat**: Process Kannada prompts and respond in Kannada via translation and LLM.
|
49
|
+
- **Vision-Language Model (VLM)**: Caption images, answer visual queries, detect, and point objects.
|
50
|
+
- **Automatic Speech Recognition (ASR)**: Transcribe audio files in multiple Indian languages.
|
51
|
+
- **Lazy Loading**: Models load on-demand or via an explicit endpoint for fast startup.
|
52
|
+
|
53
|
+
## Prerequisites
|
54
|
+
- **System Requirements - User **:
|
55
|
+
- **Python**: 3.10
|
56
|
+
- Ubuntu 22.04
|
57
|
+
- git
|
58
|
+
- vscode
|
59
|
+
- **System Requirements - Server **:
|
60
|
+
- Ubuntu with sufficient RAM (16GB+ recommended for models).
|
61
|
+
- Optional: NVIDIA GPU with CUDA support for faster inference.
|
62
|
+
- **FFmpeg**: Required for audio processing (ASR).
|
63
|
+
|
64
|
+
- Server Setup
|
65
|
+
```bash
|
66
|
+
export HF_HOME=/home/ubuntu/data-dhwani-models
|
67
|
+
export HF_TOKEN='YOur-HF-token'
|
68
|
+
python src/server/main.py --host 0.0.0.0 --port 7860 --config config_two
|
69
|
+
```
|
70
|
+
## Installation
|
71
|
+
|
72
|
+
1. **Clone the Repository**:
|
73
|
+
```bash
|
74
|
+
git clone https://github.com/slabstech/dhwani-server
|
75
|
+
cd dhwani-server
|
76
|
+
```
|
77
|
+
|
78
|
+
2. Install Libraries:
|
79
|
+
- On Ubuntu: ```sudo apt-get install ffmpeg build-essential```
|
80
|
+
|
81
|
+
3. Set Up Virtual Environment:
|
82
|
+
```bash
|
83
|
+
python -m venv venv
|
84
|
+
source venv/bin/activate
|
85
|
+
```
|
86
|
+
4. Install Dependencies:
|
87
|
+
```bash
|
88
|
+
sudo apt-get install -y ffmpeg build-essential
|
89
|
+
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --profile minimal
|
90
|
+
. "$HOME/.cargo/env"
|
91
|
+
export CC=/usr/bin/gcc
|
92
|
+
export ENV CXX=/usr/bin/g++
|
93
|
+
```
|
94
|
+
```bash
|
95
|
+
pip install --no-cache-dir --upgrade pip setuptools psutil setuptools-rust torch==2.6.0
|
96
|
+
pip install --no-cache-dir flash-attn --no-build-isolation
|
97
|
+
```
|
98
|
+
|
99
|
+
```bash
|
100
|
+
pip install -r requirements.txt
|
101
|
+
```
|
102
|
+
|
103
|
+
4. Set Environment Variable:
|
104
|
+
Create a .env file in the root directory and add your API key:
|
105
|
+
plaintext
|
106
|
+
```bash
|
107
|
+
API_KEY=your_secret_key
|
108
|
+
```
|
109
|
+
|
110
|
+
5. Running the Server
|
111
|
+
- Start the Server:
|
112
|
+
```bash
|
113
|
+
python src/server/main.py --host 0.0.0.0 --port 7860 --config config_two
|
114
|
+
```
|
115
|
+
|
116
|
+
- The server starts with models loaded on start
|
117
|
+
- Access the interactive API docs at http://localhost:7860/docs.
|
118
|
+
|
119
|
+
- (Optional) Load All Models:
|
120
|
+
Preload all models (LLM, Translation, TTS, VLM, ASR) with:
|
121
|
+
-
|
122
|
+
```bash
|
123
|
+
curl -X POST "http://localhost:7860/load_all_models" -H "X-API-Key: your_secret_key"
|
124
|
+
```
|
125
|
+
|
126
|
+
- Usage
|
127
|
+
- Endpoints
|
128
|
+
- All endpoints require the X-API-Key header with the value from your .env file.
|
129
|
+
|
130
|
+
- Health Check: GET /health
|
131
|
+
```bash
|
132
|
+
curl "http://localhost:7860/health"
|
133
|
+
```
|
134
|
+
- Response:
|
135
|
+
```bash
|
136
|
+
{"status": "healthy", "model": "Qwen/Qwen2.5-3B-Instruct"}
|
137
|
+
```
|
138
|
+
|
139
|
+
- Text-to-Speech: POST /v1/audio/speech
|
140
|
+
``` bash
|
141
|
+
curl -X POST "http://localhost:7860/v1/audio/speech" -H "X-API-Key: your_secret_key" -H "Content-Type: application/json" -d '{"input": "ನಮಸ್ಕಾರ", "voice": "Female voice", "model": "ai4bharat/indic-parler-tts", "response_format": "mp3"}' --output speech.mp3
|
142
|
+
```
|
143
|
+
- Chat: POST /chat
|
144
|
+
``` bash
|
145
|
+
curl -X POST "http://localhost:7860/chat" -H "X-API-Key: your_secret_key" -H "Content-Type: application/json" -d '{"prompt": "ನೀವು ಹೇಗಿದ್ದೀರಿ?"}'
|
146
|
+
```
|
147
|
+
|
148
|
+
- Response:
|
149
|
+
```{"response": "<Kannada response>"}```
|
150
|
+
- Image Captioning: POST /caption/
|
151
|
+
```bash
|
152
|
+
curl -X POST "http://localhost:7860/caption/" -H "X-API-Key: your_secret_key" -F "file=@image.jpg" -F "length=short"
|
153
|
+
```
|
154
|
+
- Response:``` {"caption": "<short caption>"}```
|
155
|
+
- Visual Query: POST /visual_query/
|
156
|
+
```bash
|
157
|
+
curl -X POST "http://localhost:7860/visual_query/" -H "X-API-Key: your_secret_key" -F "file=@image.jpg" -F "query=What is this?"
|
158
|
+
```
|
159
|
+
- Response: ```{"answer": "<answer>"}```
|
160
|
+
- Object Detection: POST /detect/
|
161
|
+
```bash
|
162
|
+
curl -X POST "http://localhost:7860/detect/" -H "X-API-Key: your_secret_key" -F "file=@image.jpg" -F "object_type=face"
|
163
|
+
```
|
164
|
+
- Response: ```{"objects": [<list of detected objects>]}```
|
165
|
+
- Object Pointing: POST /point/
|
166
|
+
```bash
|
167
|
+
|
168
|
+
curl -X POST "http://localhost:7860/point/" -H "X-API-Key: your_secret_key" -F "file=@image.jpg" -F "object_type=person"
|
169
|
+
```
|
170
|
+
- Response: ```{"points": [<list of points>]}```
|
171
|
+
- Transcription: POST /transcribe/
|
172
|
+
```bash
|
173
|
+
curl -X POST "http://localhost:7860/transcribe/?language=kannada" -H "X-API-Key: your_secret_key" -F "file=@audio.wav"
|
174
|
+
```
|
175
|
+
- Response: ```{"text": "<transcribed text>"}```
|
176
|
+
- Batch Transcription: POST /transcribe_batch/
|
177
|
+
```bash
|
178
|
+
curl -X POST "http://localhost:7860/transcribe_batch/?language=kannada" -H "X-API-Key: your_secret_key" -F "files=@audio1.wav" -F "files=@audio2.mp3"
|
179
|
+
```
|
180
|
+
- Response: ```{"transcriptions": ["<text1>", "<text2>"]}```
|
181
|
+
|
182
|
+
- Notes
|
183
|
+
- Lazy Loading: Models load on first use or via /load_all_models. Expect a delay on the first request for each model type.
|
184
|
+
Supported Languages: ASR supports multiple Indian languages (e.g., kannada, hindi, tamil); see models/asr.py for the full list.
|
185
|
+
Logs: Check dhwani_api.log for detailed logs (rotated at 10MB, 5 backups).
|
186
|
+
Performance: Use a GPU with flash-attn installed for faster TTS and ASR inference.
|
187
|
+
|
188
|
+
- Troubleshooting
|
189
|
+
|
190
|
+
- Module Errors: Ensure all dependencies are installed. Re-run pip install if needed.
|
191
|
+
FFmpeg Not Found: Install FFmpeg and ensure it’s in your PATH.
|
192
|
+
Permission Denied: Run with sudo if accessing restricted ports (e.g., < 1024).
|
193
|
+
|
@@ -0,0 +1,16 @@
|
|
1
|
+
LICENSE
|
2
|
+
README.md
|
3
|
+
pyproject.toml
|
4
|
+
src/__init__.py
|
5
|
+
src/asr.py
|
6
|
+
src/audio.py
|
7
|
+
src/chat.py
|
8
|
+
src/client.py
|
9
|
+
src/docs.py
|
10
|
+
src/exceptions.py
|
11
|
+
src/vision.py
|
12
|
+
src/dwani.egg-info/PKG-INFO
|
13
|
+
src/dwani.egg-info/SOURCES.txt
|
14
|
+
src/dwani.egg-info/dependency_links.txt
|
15
|
+
src/dwani.egg-info/requires.txt
|
16
|
+
src/dwani.egg-info/top_level.txt
|
@@ -0,0 +1 @@
|
|
1
|
+
|
@@ -0,0 +1,21 @@
|
|
1
|
+
from .exceptions import DhwaniAPIError
|
2
|
+
|
3
|
+
def vision_caption(client, file_path, length="short"):
|
4
|
+
with open(file_path, "rb") as f:
|
5
|
+
files = {"file": f}
|
6
|
+
data = {"length": length}
|
7
|
+
resp = requests.post(
|
8
|
+
f"{client.api_base}/caption/",
|
9
|
+
headers=client._headers(),
|
10
|
+
files=files,
|
11
|
+
data=data
|
12
|
+
)
|
13
|
+
if resp.status_code != 200:
|
14
|
+
raise DhwaniAPIError(resp)
|
15
|
+
return resp.json()
|
16
|
+
|
17
|
+
class Vision:
|
18
|
+
@staticmethod
|
19
|
+
def caption(*args, **kwargs):
|
20
|
+
from . import _get_client
|
21
|
+
return _get_client().caption(*args, **kwargs)
|