paroquant 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- paroquant-0.1.0/LICENSE +21 -0
- paroquant-0.1.0/PKG-INFO +151 -0
- paroquant-0.1.0/README.md +113 -0
- paroquant-0.1.0/paroquant/__init__.py +0 -0
- paroquant-0.1.0/paroquant/cli/__init__.py +0 -0
- paroquant-0.1.0/paroquant/cli/benchmark.py +53 -0
- paroquant-0.1.0/paroquant/cli/chat.py +235 -0
- paroquant-0.1.0/paroquant/cli/convert.py +221 -0
- paroquant-0.1.0/paroquant/cli/evaluate.py +78 -0
- paroquant-0.1.0/paroquant/cli/optimize.py +379 -0
- paroquant-0.1.0/paroquant/inference/__init__.py +10 -0
- paroquant-0.1.0/paroquant/inference/backends/__init__.py +0 -0
- paroquant-0.1.0/paroquant/inference/backends/mlx/__init__.py +3 -0
- paroquant-0.1.0/paroquant/inference/backends/mlx/generator.py +39 -0
- paroquant-0.1.0/paroquant/inference/backends/mlx/load.py +191 -0
- paroquant-0.1.0/paroquant/inference/backends/mlx/modules.py +90 -0
- paroquant-0.1.0/paroquant/inference/backends/transformers/__init__.py +3 -0
- paroquant-0.1.0/paroquant/inference/backends/transformers/generator.py +67 -0
- paroquant-0.1.0/paroquant/inference/backends/transformers/modules.py +71 -0
- paroquant-0.1.0/paroquant/inference/backends/transformers/quantizer.py +122 -0
- paroquant-0.1.0/paroquant/inference/backends/vllm/__init__.py +3 -0
- paroquant-0.1.0/paroquant/inference/backends/vllm/generator.py +36 -0
- paroquant-0.1.0/paroquant/inference/backends/vllm/plugin.py +241 -0
- paroquant-0.1.0/paroquant/inference/base.py +154 -0
- paroquant-0.1.0/paroquant/kernels/__init__.py +0 -0
- paroquant-0.1.0/paroquant/kernels/cuda/__init__.py +36 -0
- paroquant-0.1.0/paroquant/kernels/cuda/autograd.py +86 -0
- paroquant-0.1.0/paroquant/kernels/cuda/pybind.cpp +4 -0
- paroquant-0.1.0/paroquant/kernels/cuda/rotation.cu +235 -0
- paroquant-0.1.0/paroquant/kernels/cuda/rotation.cuh +173 -0
- paroquant-0.1.0/paroquant/kernels/metal/__init__.py +3 -0
- paroquant-0.1.0/paroquant/kernels/metal/rotation.metal +74 -0
- paroquant-0.1.0/paroquant/kernels/metal/rotation.py +23 -0
- paroquant-0.1.0/paroquant/optim/__init__.py +0 -0
- paroquant-0.1.0/paroquant/optim/qlinear.py +243 -0
- paroquant-0.1.0/paroquant/optim/quant.py +13 -0
- paroquant-0.1.0/paroquant/optim/quantizer.py +117 -0
- paroquant-0.1.0/paroquant/optim/rotation.py +87 -0
- paroquant-0.1.0/paroquant/optim/train.py +192 -0
- paroquant-0.1.0/paroquant/optim/util.py +363 -0
- paroquant-0.1.0/paroquant.egg-info/PKG-INFO +151 -0
- paroquant-0.1.0/paroquant.egg-info/SOURCES.txt +45 -0
- paroquant-0.1.0/paroquant.egg-info/dependency_links.txt +1 -0
- paroquant-0.1.0/paroquant.egg-info/requires.txt +28 -0
- paroquant-0.1.0/paroquant.egg-info/top_level.txt +1 -0
- paroquant-0.1.0/pyproject.toml +64 -0
- paroquant-0.1.0/setup.cfg +4 -0
paroquant-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Z Lab
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
paroquant-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: paroquant
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: ParoQuant — Pairwise Rotation Quantization for LLMs
|
|
5
|
+
Author: Z Lab
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://paroquant.z-lab.ai
|
|
8
|
+
Project-URL: Paper, https://arxiv.org/abs/2511.10645
|
|
9
|
+
Project-URL: Models, https://huggingface.co/collections/z-lab/paroquant
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
12
|
+
Requires-Python: >=3.11
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
License-File: LICENSE
|
|
15
|
+
Requires-Dist: rich
|
|
16
|
+
Provides-Extra: transformers
|
|
17
|
+
Requires-Dist: torch>=2.8; extra == "transformers"
|
|
18
|
+
Requires-Dist: transformers>=4.55; extra == "transformers"
|
|
19
|
+
Requires-Dist: autoawq; extra == "transformers"
|
|
20
|
+
Provides-Extra: vllm
|
|
21
|
+
Requires-Dist: vllm>=0.15; extra == "vllm"
|
|
22
|
+
Provides-Extra: mlx
|
|
23
|
+
Requires-Dist: mlx; extra == "mlx"
|
|
24
|
+
Requires-Dist: mlx-lm; extra == "mlx"
|
|
25
|
+
Requires-Dist: mlx-vlm; extra == "mlx"
|
|
26
|
+
Provides-Extra: optim
|
|
27
|
+
Requires-Dist: paroquant[transformers]; extra == "optim"
|
|
28
|
+
Requires-Dist: datasets; extra == "optim"
|
|
29
|
+
Requires-Dist: simple_parsing; extra == "optim"
|
|
30
|
+
Requires-Dist: tqdm; extra == "optim"
|
|
31
|
+
Provides-Extra: eval
|
|
32
|
+
Requires-Dist: lm_eval; extra == "eval"
|
|
33
|
+
Requires-Dist: zstandard; extra == "eval"
|
|
34
|
+
Provides-Extra: dev
|
|
35
|
+
Requires-Dist: pytest; extra == "dev"
|
|
36
|
+
Requires-Dist: pre-commit; extra == "dev"
|
|
37
|
+
Dynamic: license-file
|
|
38
|
+
|
|
39
|
+
# ParoQuant
|
|
40
|
+
|
|
41
|
+
**Pairwise Rotation Quantization for Efficient Reasoning LLM Inference**
|
|
42
|
+
|
|
43
|
+
<p align="center">
|
|
44
|
+
<a href="https://arxiv.org/abs/2511.10645"><img src="https://img.shields.io/badge/arXiv-2511.10645-b31b1b.svg" alt="Paper"></a>
|
|
45
|
+
<a href="https://paroquant.z-lab.ai"><img src="https://img.shields.io/badge/Blog-ParoQuant-blue" alt="Blog"></a>
|
|
46
|
+
<a href="https://huggingface.co/collections/z-lab/paroquant"><img src="https://img.shields.io/badge/%F0%9F%A4%97-Models-yellow" alt="Models"></a>
|
|
47
|
+
</p>
|
|
48
|
+
|
|
49
|
+
State-of-the-art INT4 quantization for LLMs. ParoQuant uses learned pairwise rotations to suppress weight outliers, closing the accuracy gap with FP16 while running at near-AWQ speed. Supports NVIDIA GPUs (vLLM, Transformers) and Apple Silicon (MLX).
|
|
50
|
+
|
|
51
|
+
<p align="center">
|
|
52
|
+
<a href="https://youtu.be/fISG4CkizLM">
|
|
53
|
+
<img src="https://img.youtube.com/vi/fISG4CkizLM/maxresdefault.jpg" width="80%">
|
|
54
|
+
</a>
|
|
55
|
+
</p>
|
|
56
|
+
|
|
57
|
+
## Quick Start
|
|
58
|
+
|
|
59
|
+
**NVIDIA GPU:**
|
|
60
|
+
|
|
61
|
+
```bash
|
|
62
|
+
pip install paroquant[vllm]
|
|
63
|
+
python -m paroquant.cli.chat --model z-lab/Qwen3-8B-PARO
|
|
64
|
+
|
|
65
|
+
# or with Docker
|
|
66
|
+
docker run --pull=always --rm -it --gpus all --ipc=host \
|
|
67
|
+
ghcr.io/z-lab/paroquant:chat --model z-lab/Qwen3-8B-PARO
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
**Apple Silicon:**
|
|
71
|
+
|
|
72
|
+
```bash
|
|
73
|
+
pip install paroquant[mlx]
|
|
74
|
+
python -m paroquant.cli.chat --model z-lab/Qwen3-8B-PARO
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
## Models
|
|
78
|
+
|
|
79
|
+
All models are available on [Hugging Face](https://huggingface.co/collections/z-lab/paroquant). Swap the model name in the commands above to try any of them.
|
|
80
|
+
|
|
81
|
+
**Qwen3**
|
|
82
|
+
|
|
83
|
+
| Model | Checkpoint |
|
|
84
|
+
|---|---|
|
|
85
|
+
| Qwen3-0.6B | [`z-lab/Qwen3-0.6B-PARO`](https://huggingface.co/z-lab/Qwen3-0.6B-PARO) |
|
|
86
|
+
| Qwen3-1.7B | [`z-lab/Qwen3-1.7B-PARO`](https://huggingface.co/z-lab/Qwen3-1.7B-PARO) |
|
|
87
|
+
| Qwen3-4B | [`z-lab/Qwen3-4B-PARO`](https://huggingface.co/z-lab/Qwen3-4B-PARO) |
|
|
88
|
+
| Qwen3-8B | [`z-lab/Qwen3-8B-PARO`](https://huggingface.co/z-lab/Qwen3-8B-PARO) |
|
|
89
|
+
| Qwen3-14B | [`z-lab/Qwen3-14B-PARO`](https://huggingface.co/z-lab/Qwen3-14B-PARO) |
|
|
90
|
+
| Qwen3-4B-Thinking-2507 | [`z-lab/Qwen3-4B-Thinking-2507-PARO`](https://huggingface.co/z-lab/Qwen3-4B-Thinking-2507-PARO) |
|
|
91
|
+
|
|
92
|
+
**Llama**
|
|
93
|
+
|
|
94
|
+
| Model | Checkpoint |
|
|
95
|
+
|---|---|
|
|
96
|
+
| Llama-2-7B | [`z-lab/Llama-2-7b-hf-PARO`](https://huggingface.co/z-lab/Llama-2-7b-hf-PARO) |
|
|
97
|
+
| Llama-3-8B | [`z-lab/Meta-Llama-3-8B-PARO`](https://huggingface.co/z-lab/Meta-Llama-3-8B-PARO) |
|
|
98
|
+
| Llama-3-70B | [`z-lab/Meta-Llama-3-70B-PARO`](https://huggingface.co/z-lab/Meta-Llama-3-70B-PARO) |
|
|
99
|
+
| Llama-3.1-8B-Instruct | [`z-lab/Llama-3.1-8B-Instruct-PARO`](https://huggingface.co/z-lab/Llama-3.1-8B-Instruct-PARO) |
|
|
100
|
+
|
|
101
|
+
Want a model that's not listed? [Open an issue](https://github.com/z-lab/paroquant/issues/new) and let us know.
|
|
102
|
+
|
|
103
|
+
## Installation
|
|
104
|
+
|
|
105
|
+
```bash
|
|
106
|
+
git clone https://github.com/z-lab/paroquant && cd paroquant
|
|
107
|
+
|
|
108
|
+
pip install -e ".[vllm]" # vLLM backend (GPU, recommended)
|
|
109
|
+
pip install -e ".[transformers]" # Transformers backend (GPU)
|
|
110
|
+
pip install -e ".[mlx]" # MLX backend (Apple Silicon)
|
|
111
|
+
pip install -e ".[optim,eval]" # Optimization & evaluation
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
Or use Docker: `docker run -it --gpus all --ipc=host ghcr.io/z-lab/paroquant:latest`
|
|
115
|
+
|
|
116
|
+
## Quantize Your Own Model
|
|
117
|
+
|
|
118
|
+
```bash
|
|
119
|
+
# 1. Optimize rotation parameters
|
|
120
|
+
experiments/optimize/4bit.sh Qwen/Qwen3-8B
|
|
121
|
+
|
|
122
|
+
# 2. Export to HF checkpoint (--mode real for INT4, --mode pseudo for FP16)
|
|
123
|
+
python -m paroquant.cli.convert \
|
|
124
|
+
--model Qwen/Qwen3-8B \
|
|
125
|
+
--result-dir output/Qwen3-8B \
|
|
126
|
+
--output-path models/Qwen3-8B-PARO
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
## Reproduction
|
|
130
|
+
|
|
131
|
+
See [`experiments/README.md`](./experiments/README.md) for scripts to reproduce all results in the paper.
|
|
132
|
+
|
|
133
|
+
## Docker Images
|
|
134
|
+
|
|
135
|
+
| Image | Purpose |
|
|
136
|
+
|---|---|
|
|
137
|
+
| `ghcr.io/z-lab/paroquant:latest` | Optimization & evaluation |
|
|
138
|
+
| `ghcr.io/z-lab/paroquant:chat` | Interactive chat |
|
|
139
|
+
| `ghcr.io/z-lab/paroquant:chat-cu130` | Interactive chat (CUDA 13.0 / ARM64) |
|
|
140
|
+
| `ghcr.io/z-lab/paroquant:eval-reasoning` | Reasoning task evaluation |
|
|
141
|
+
|
|
142
|
+
## Citation
|
|
143
|
+
|
|
144
|
+
```bibtex
|
|
145
|
+
@inproceedings{liang2026paroquant,
|
|
146
|
+
title = {{ParoQuant: Pairwise Rotation Quantization for Efficient Reasoning LLM Inference}},
|
|
147
|
+
author = {Liang, Yesheng and Chen, Haisheng and Zhang, Zihan and Han, Song and Liu, Zhijian},
|
|
148
|
+
booktitle = {International Conference on Learning Representations (ICLR)},
|
|
149
|
+
year = {2026}
|
|
150
|
+
}
|
|
151
|
+
```
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
# ParoQuant
|
|
2
|
+
|
|
3
|
+
**Pairwise Rotation Quantization for Efficient Reasoning LLM Inference**
|
|
4
|
+
|
|
5
|
+
<p align="center">
|
|
6
|
+
<a href="https://arxiv.org/abs/2511.10645"><img src="https://img.shields.io/badge/arXiv-2511.10645-b31b1b.svg" alt="Paper"></a>
|
|
7
|
+
<a href="https://paroquant.z-lab.ai"><img src="https://img.shields.io/badge/Blog-ParoQuant-blue" alt="Blog"></a>
|
|
8
|
+
<a href="https://huggingface.co/collections/z-lab/paroquant"><img src="https://img.shields.io/badge/%F0%9F%A4%97-Models-yellow" alt="Models"></a>
|
|
9
|
+
</p>
|
|
10
|
+
|
|
11
|
+
State-of-the-art INT4 quantization for LLMs. ParoQuant uses learned pairwise rotations to suppress weight outliers, closing the accuracy gap with FP16 while running at near-AWQ speed. Supports NVIDIA GPUs (vLLM, Transformers) and Apple Silicon (MLX).
|
|
12
|
+
|
|
13
|
+
<p align="center">
|
|
14
|
+
<a href="https://youtu.be/fISG4CkizLM">
|
|
15
|
+
<img src="https://img.youtube.com/vi/fISG4CkizLM/maxresdefault.jpg" width="80%">
|
|
16
|
+
</a>
|
|
17
|
+
</p>
|
|
18
|
+
|
|
19
|
+
## Quick Start
|
|
20
|
+
|
|
21
|
+
**NVIDIA GPU:**
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
pip install paroquant[vllm]
|
|
25
|
+
python -m paroquant.cli.chat --model z-lab/Qwen3-8B-PARO
|
|
26
|
+
|
|
27
|
+
# or with Docker
|
|
28
|
+
docker run --pull=always --rm -it --gpus all --ipc=host \
|
|
29
|
+
ghcr.io/z-lab/paroquant:chat --model z-lab/Qwen3-8B-PARO
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
**Apple Silicon:**
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
pip install paroquant[mlx]
|
|
36
|
+
python -m paroquant.cli.chat --model z-lab/Qwen3-8B-PARO
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## Models
|
|
40
|
+
|
|
41
|
+
All models are available on [Hugging Face](https://huggingface.co/collections/z-lab/paroquant). Swap the model name in the commands above to try any of them.
|
|
42
|
+
|
|
43
|
+
**Qwen3**
|
|
44
|
+
|
|
45
|
+
| Model | Checkpoint |
|
|
46
|
+
|---|---|
|
|
47
|
+
| Qwen3-0.6B | [`z-lab/Qwen3-0.6B-PARO`](https://huggingface.co/z-lab/Qwen3-0.6B-PARO) |
|
|
48
|
+
| Qwen3-1.7B | [`z-lab/Qwen3-1.7B-PARO`](https://huggingface.co/z-lab/Qwen3-1.7B-PARO) |
|
|
49
|
+
| Qwen3-4B | [`z-lab/Qwen3-4B-PARO`](https://huggingface.co/z-lab/Qwen3-4B-PARO) |
|
|
50
|
+
| Qwen3-8B | [`z-lab/Qwen3-8B-PARO`](https://huggingface.co/z-lab/Qwen3-8B-PARO) |
|
|
51
|
+
| Qwen3-14B | [`z-lab/Qwen3-14B-PARO`](https://huggingface.co/z-lab/Qwen3-14B-PARO) |
|
|
52
|
+
| Qwen3-4B-Thinking-2507 | [`z-lab/Qwen3-4B-Thinking-2507-PARO`](https://huggingface.co/z-lab/Qwen3-4B-Thinking-2507-PARO) |
|
|
53
|
+
|
|
54
|
+
**Llama**
|
|
55
|
+
|
|
56
|
+
| Model | Checkpoint |
|
|
57
|
+
|---|---|
|
|
58
|
+
| Llama-2-7B | [`z-lab/Llama-2-7b-hf-PARO`](https://huggingface.co/z-lab/Llama-2-7b-hf-PARO) |
|
|
59
|
+
| Llama-3-8B | [`z-lab/Meta-Llama-3-8B-PARO`](https://huggingface.co/z-lab/Meta-Llama-3-8B-PARO) |
|
|
60
|
+
| Llama-3-70B | [`z-lab/Meta-Llama-3-70B-PARO`](https://huggingface.co/z-lab/Meta-Llama-3-70B-PARO) |
|
|
61
|
+
| Llama-3.1-8B-Instruct | [`z-lab/Llama-3.1-8B-Instruct-PARO`](https://huggingface.co/z-lab/Llama-3.1-8B-Instruct-PARO) |
|
|
62
|
+
|
|
63
|
+
Want a model that's not listed? [Open an issue](https://github.com/z-lab/paroquant/issues/new) and let us know.
|
|
64
|
+
|
|
65
|
+
## Installation
|
|
66
|
+
|
|
67
|
+
```bash
|
|
68
|
+
git clone https://github.com/z-lab/paroquant && cd paroquant
|
|
69
|
+
|
|
70
|
+
pip install -e ".[vllm]" # vLLM backend (GPU, recommended)
|
|
71
|
+
pip install -e ".[transformers]" # Transformers backend (GPU)
|
|
72
|
+
pip install -e ".[mlx]" # MLX backend (Apple Silicon)
|
|
73
|
+
pip install -e ".[optim,eval]" # Optimization & evaluation
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
Or use Docker: `docker run -it --gpus all --ipc=host ghcr.io/z-lab/paroquant:latest`
|
|
77
|
+
|
|
78
|
+
## Quantize Your Own Model
|
|
79
|
+
|
|
80
|
+
```bash
|
|
81
|
+
# 1. Optimize rotation parameters
|
|
82
|
+
experiments/optimize/4bit.sh Qwen/Qwen3-8B
|
|
83
|
+
|
|
84
|
+
# 2. Export to HF checkpoint (--mode real for INT4, --mode pseudo for FP16)
|
|
85
|
+
python -m paroquant.cli.convert \
|
|
86
|
+
--model Qwen/Qwen3-8B \
|
|
87
|
+
--result-dir output/Qwen3-8B \
|
|
88
|
+
--output-path models/Qwen3-8B-PARO
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
## Reproduction
|
|
92
|
+
|
|
93
|
+
See [`experiments/README.md`](./experiments/README.md) for scripts to reproduce all results in the paper.
|
|
94
|
+
|
|
95
|
+
## Docker Images
|
|
96
|
+
|
|
97
|
+
| Image | Purpose |
|
|
98
|
+
|---|---|
|
|
99
|
+
| `ghcr.io/z-lab/paroquant:latest` | Optimization & evaluation |
|
|
100
|
+
| `ghcr.io/z-lab/paroquant:chat` | Interactive chat |
|
|
101
|
+
| `ghcr.io/z-lab/paroquant:chat-cu130` | Interactive chat (CUDA 13.0 / ARM64) |
|
|
102
|
+
| `ghcr.io/z-lab/paroquant:eval-reasoning` | Reasoning task evaluation |
|
|
103
|
+
|
|
104
|
+
## Citation
|
|
105
|
+
|
|
106
|
+
```bibtex
|
|
107
|
+
@inproceedings{liang2026paroquant,
|
|
108
|
+
title = {{ParoQuant: Pairwise Rotation Quantization for Efficient Reasoning LLM Inference}},
|
|
109
|
+
author = {Liang, Yesheng and Chen, Haisheng and Zhang, Zihan and Han, Song and Liu, Zhijian},
|
|
110
|
+
booktitle = {International Conference on Learning Representations (ICLR)},
|
|
111
|
+
year = {2026}
|
|
112
|
+
}
|
|
113
|
+
```
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import asyncio
|
|
3
|
+
import time
|
|
4
|
+
|
|
5
|
+
from paroquant.inference import GenerationParams, build_prompt, create_generator
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
async def bench(gen, prompt: str, max_tokens: int, warmup: int, runs: int):
|
|
9
|
+
params = GenerationParams(max_tokens=max_tokens, temperature=0.0)
|
|
10
|
+
formatted = build_prompt(gen.tokenizer, [{"role": "user", "content": prompt}])
|
|
11
|
+
|
|
12
|
+
for _ in range(warmup):
|
|
13
|
+
await gen.generate(formatted, params)
|
|
14
|
+
|
|
15
|
+
times, tokens = [], []
|
|
16
|
+
for _ in range(runs):
|
|
17
|
+
start = time.perf_counter()
|
|
18
|
+
result = await gen.generate(formatted, params)
|
|
19
|
+
elapsed = time.perf_counter() - start
|
|
20
|
+
times.append(elapsed)
|
|
21
|
+
tokens.append(result.stats.num_tokens)
|
|
22
|
+
|
|
23
|
+
avg_tokens = sum(tokens) / len(tokens)
|
|
24
|
+
avg_time = sum(times) / len(times)
|
|
25
|
+
avg_tps = avg_tokens / avg_time if avg_time > 0 else 0
|
|
26
|
+
return avg_tokens, avg_time, avg_tps
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
async def main():
|
|
30
|
+
parser = argparse.ArgumentParser(description="Benchmark generation throughput")
|
|
31
|
+
parser.add_argument("--model", type=str, required=True)
|
|
32
|
+
parser.add_argument("--backend", type=str, default="auto", choices=["auto", "mlx", "vllm", "transformers"])
|
|
33
|
+
parser.add_argument("--max-tokens", type=int, default=128)
|
|
34
|
+
parser.add_argument("--warmup", type=int, default=2)
|
|
35
|
+
parser.add_argument("--runs", type=int, default=5)
|
|
36
|
+
parser.add_argument("--prompt", type=str, default="Write a short essay about the history of computing.")
|
|
37
|
+
args = parser.parse_args()
|
|
38
|
+
|
|
39
|
+
print(f"Loading {args.model} with {args.backend} backend...")
|
|
40
|
+
start = time.perf_counter()
|
|
41
|
+
gen = create_generator(args.backend, args.model)
|
|
42
|
+
load_time = time.perf_counter() - start
|
|
43
|
+
print(f"Loaded in {load_time:.1f}s")
|
|
44
|
+
|
|
45
|
+
print(f"Benchmarking: {args.warmup} warmup + {args.runs} runs, {args.max_tokens} tokens each")
|
|
46
|
+
avg_tokens, avg_time, avg_tps = await bench(gen, args.prompt, args.max_tokens, args.warmup, args.runs)
|
|
47
|
+
print(f" Avg tokens: {avg_tokens:.0f}")
|
|
48
|
+
print(f" Avg time: {avg_time:.2f}s")
|
|
49
|
+
print(f" Throughput: {avg_tps:.1f} tok/s")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
if __name__ == "__main__":
|
|
53
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import asyncio
|
|
5
|
+
import contextlib
|
|
6
|
+
import importlib
|
|
7
|
+
import io
|
|
8
|
+
import os
|
|
9
|
+
import re
|
|
10
|
+
import time
|
|
11
|
+
import warnings
|
|
12
|
+
|
|
13
|
+
from rich.console import Console
|
|
14
|
+
from rich.live import Live
|
|
15
|
+
from rich.markdown import Markdown
|
|
16
|
+
from rich.panel import Panel
|
|
17
|
+
from rich.theme import Theme
|
|
18
|
+
|
|
19
|
+
from paroquant.inference import (
|
|
20
|
+
GenerationParams,
|
|
21
|
+
build_prompt,
|
|
22
|
+
create_generator,
|
|
23
|
+
detect_backend,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@contextlib.contextmanager
|
|
28
|
+
def _silence_stderr():
|
|
29
|
+
with contextlib.redirect_stderr(io.StringIO()):
|
|
30
|
+
yield
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
_THINKING_LINES = 4
|
|
34
|
+
_SPECIAL_RE = re.compile(r"<\|[^|]+\|>")
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class _ThinkingTracker:
|
|
38
|
+
"""Parses raw streamed text, shows thinking in a small live box, then folds it."""
|
|
39
|
+
|
|
40
|
+
_REFRESH_INTERVAL = 1.0 / 24
|
|
41
|
+
|
|
42
|
+
def __init__(self, console: Console, enable_thinking: bool):
|
|
43
|
+
self.console = console
|
|
44
|
+
self.enable_thinking = enable_thinking
|
|
45
|
+
self.raw = ""
|
|
46
|
+
self.in_thinking = enable_thinking
|
|
47
|
+
self._live: Live | None = None
|
|
48
|
+
self._think_start = time.perf_counter()
|
|
49
|
+
self._last_update = 0.0
|
|
50
|
+
|
|
51
|
+
def on_token(self, text: str):
|
|
52
|
+
self.raw += text
|
|
53
|
+
|
|
54
|
+
if self.in_thinking:
|
|
55
|
+
if "</think>" in self.raw:
|
|
56
|
+
self.in_thinking = False
|
|
57
|
+
self.stop()
|
|
58
|
+
self._start_response_live()
|
|
59
|
+
else:
|
|
60
|
+
self._update_thinking_box()
|
|
61
|
+
else:
|
|
62
|
+
if self._live is None:
|
|
63
|
+
self._start_response_live()
|
|
64
|
+
else:
|
|
65
|
+
self._update_response()
|
|
66
|
+
|
|
67
|
+
def _update_thinking_box(self):
|
|
68
|
+
now = time.perf_counter()
|
|
69
|
+
if self._live is not None and now - self._last_update < self._REFRESH_INTERVAL:
|
|
70
|
+
return
|
|
71
|
+
self._last_update = now
|
|
72
|
+
|
|
73
|
+
lines = self.raw.splitlines()
|
|
74
|
+
tail = "\n".join(lines[-_THINKING_LINES:])
|
|
75
|
+
panel = Panel(
|
|
76
|
+
tail or "...",
|
|
77
|
+
title=f"thinking ({now - self._think_start:.1f}s)",
|
|
78
|
+
border_style="dim",
|
|
79
|
+
width=min(self.console.width, 80),
|
|
80
|
+
height=_THINKING_LINES + 2,
|
|
81
|
+
)
|
|
82
|
+
if self._live is None:
|
|
83
|
+
self._live = Live(panel, console=self.console, transient=True)
|
|
84
|
+
self._live.start()
|
|
85
|
+
else:
|
|
86
|
+
self._live.update(panel)
|
|
87
|
+
|
|
88
|
+
def _start_response_live(self):
|
|
89
|
+
self._live = Live(Markdown(""), console=self.console, vertical_overflow="visible")
|
|
90
|
+
self._live.start()
|
|
91
|
+
self._update_response()
|
|
92
|
+
|
|
93
|
+
def _update_response(self):
|
|
94
|
+
now = time.perf_counter()
|
|
95
|
+
if now - self._last_update < self._REFRESH_INTERVAL:
|
|
96
|
+
return
|
|
97
|
+
self._last_update = now
|
|
98
|
+
|
|
99
|
+
response = self._get_response()
|
|
100
|
+
if response and self._live is not None:
|
|
101
|
+
self._live.update(Markdown(response))
|
|
102
|
+
|
|
103
|
+
def _get_response(self) -> str:
|
|
104
|
+
if self.enable_thinking and "</think>" in self.raw:
|
|
105
|
+
text = self.raw.split("</think>", 1)[1].lstrip("\n")
|
|
106
|
+
elif self.in_thinking:
|
|
107
|
+
text = ""
|
|
108
|
+
else:
|
|
109
|
+
text = self.raw
|
|
110
|
+
return _SPECIAL_RE.sub("", text)
|
|
111
|
+
|
|
112
|
+
def stop(self):
|
|
113
|
+
if self._live is not None:
|
|
114
|
+
response = self._get_response()
|
|
115
|
+
if response:
|
|
116
|
+
self._live.update(Markdown(response))
|
|
117
|
+
self._live.stop()
|
|
118
|
+
self._live = None
|
|
119
|
+
|
|
120
|
+
@property
|
|
121
|
+
def output_text(self) -> str:
|
|
122
|
+
return self._get_response().strip()
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def _suppress_library_noise():
|
|
126
|
+
warnings.filterwarnings("ignore")
|
|
127
|
+
os.environ.setdefault("HF_HUB_DISABLE_PROGRESS_BARS", "1")
|
|
128
|
+
os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
|
|
129
|
+
os.environ.setdefault("VLLM_LOGGING_LEVEL", "ERROR")
|
|
130
|
+
for mod, fn in [
|
|
131
|
+
("huggingface_hub", "disable_progress_bars"),
|
|
132
|
+
("transformers.utils.logging", "set_verbosity_error"),
|
|
133
|
+
]:
|
|
134
|
+
try:
|
|
135
|
+
getattr(importlib.import_module(mod), fn)()
|
|
136
|
+
except Exception:
|
|
137
|
+
pass
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
async def run_chat_app(model: str, backend: str, params: GenerationParams):
|
|
141
|
+
_suppress_library_noise()
|
|
142
|
+
|
|
143
|
+
console = Console(theme=Theme({"hint": "dim"}))
|
|
144
|
+
|
|
145
|
+
backend = detect_backend() if backend == "auto" else backend
|
|
146
|
+
|
|
147
|
+
console.print(f"[hint]Loading model ({backend})...[/hint]")
|
|
148
|
+
generator = create_generator(backend, model)
|
|
149
|
+
console.clear()
|
|
150
|
+
|
|
151
|
+
enable_thinking = False
|
|
152
|
+
|
|
153
|
+
banner = (
|
|
154
|
+
f"[bold]ParoQuant Chat[/bold]\n"
|
|
155
|
+
f"Backend: [bold]{backend}[/bold]\n"
|
|
156
|
+
f"Model: [bold]{model}[/bold]\n\n"
|
|
157
|
+
f"[bold]/think[/bold] · [bold]/clear[/bold] · [bold]/quit[/bold]"
|
|
158
|
+
)
|
|
159
|
+
console.print(Panel.fit(banner, border_style="bright_blue"))
|
|
160
|
+
|
|
161
|
+
history: list[dict[str, str]] = []
|
|
162
|
+
|
|
163
|
+
try:
|
|
164
|
+
while True:
|
|
165
|
+
try:
|
|
166
|
+
user_prompt = console.input(">>> ").strip()
|
|
167
|
+
except (KeyboardInterrupt, EOFError):
|
|
168
|
+
console.print("\n[hint]Session closed.[/hint]")
|
|
169
|
+
break
|
|
170
|
+
|
|
171
|
+
if not user_prompt:
|
|
172
|
+
continue
|
|
173
|
+
cmd = user_prompt.lower()
|
|
174
|
+
if cmd in {"/quit", "quit", "/exit", "exit"}:
|
|
175
|
+
break
|
|
176
|
+
if cmd == "/clear":
|
|
177
|
+
history.clear()
|
|
178
|
+
console.clear()
|
|
179
|
+
console.print("[hint]Conversation history cleared.[/hint]\n")
|
|
180
|
+
continue
|
|
181
|
+
if cmd == "/think":
|
|
182
|
+
enable_thinking = not enable_thinking
|
|
183
|
+
console.print(f"[hint]Thinking {'on' if enable_thinking else 'off'}.[/hint]\n")
|
|
184
|
+
continue
|
|
185
|
+
|
|
186
|
+
history.append({"role": "user", "content": user_prompt})
|
|
187
|
+
|
|
188
|
+
prompt = build_prompt(generator.tokenizer, history, enable_thinking)
|
|
189
|
+
|
|
190
|
+
tracker = _ThinkingTracker(console, enable_thinking)
|
|
191
|
+
with _silence_stderr():
|
|
192
|
+
result = await generator.generate(prompt, params, on_text=tracker.on_token)
|
|
193
|
+
tracker.stop()
|
|
194
|
+
|
|
195
|
+
history.append({"role": "assistant", "content": tracker.output_text})
|
|
196
|
+
|
|
197
|
+
s = result.stats
|
|
198
|
+
parts = []
|
|
199
|
+
if s.ttft is not None:
|
|
200
|
+
parts.append(f"ttft {s.ttft:.2f}s")
|
|
201
|
+
parts += [f"{s.num_tokens} tokens", f"{s.tps:.1f} tok/s", f"{s.latency:.1f}s total"]
|
|
202
|
+
console.print(f" {' · '.join(parts)}", style="hint", highlight=False)
|
|
203
|
+
console.print()
|
|
204
|
+
finally:
|
|
205
|
+
await generator.close()
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def main():
|
|
209
|
+
parser = argparse.ArgumentParser()
|
|
210
|
+
parser.add_argument("--model", type=str, required=True)
|
|
211
|
+
parser.add_argument("--backend", type=str, default="auto", choices=["auto", "vllm", "transformers", "mlx"])
|
|
212
|
+
parser.add_argument("--max-tokens", type=int, default=8192)
|
|
213
|
+
parser.add_argument("--temperature", type=float, default=0.6)
|
|
214
|
+
parser.add_argument("--top-p", type=float, default=0.95)
|
|
215
|
+
parser.add_argument("--top-k", type=int, default=0)
|
|
216
|
+
parser.add_argument("--repetition-penalty", type=float, default=1.0)
|
|
217
|
+
args = parser.parse_args()
|
|
218
|
+
|
|
219
|
+
asyncio.run(
|
|
220
|
+
run_chat_app(
|
|
221
|
+
model=args.model,
|
|
222
|
+
backend=args.backend,
|
|
223
|
+
params=GenerationParams(
|
|
224
|
+
max_tokens=args.max_tokens,
|
|
225
|
+
temperature=args.temperature,
|
|
226
|
+
top_p=args.top_p,
|
|
227
|
+
top_k=args.top_k,
|
|
228
|
+
repetition_penalty=args.repetition_penalty,
|
|
229
|
+
),
|
|
230
|
+
)
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
if __name__ == "__main__":
|
|
235
|
+
main()
|