turboquant-pro 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- turboquant_pro-0.2.0/.github/workflows/ci.yml +37 -0
- turboquant_pro-0.2.0/.gitignore +13 -0
- turboquant_pro-0.2.0/CITATION.cff +35 -0
- turboquant_pro-0.2.0/LICENSE +21 -0
- turboquant_pro-0.2.0/PKG-INFO +290 -0
- turboquant_pro-0.2.0/README.md +248 -0
- turboquant_pro-0.2.0/articles/turboquant-linkedin.md +113 -0
- turboquant_pro-0.2.0/articles/turboquant-reddit-ml.md +108 -0
- turboquant_pro-0.2.0/benchmarks/benchmark.py +192 -0
- turboquant_pro-0.2.0/benchmarks/benchmark_llama.py +149 -0
- turboquant_pro-0.2.0/benchmarks/benchmark_pgvector.py +412 -0
- turboquant_pro-0.2.0/docs/turboquant-expansion-plan.md +244 -0
- turboquant_pro-0.2.0/examples/basic_usage.py +47 -0
- turboquant_pro-0.2.0/examples/llama_integration.py +115 -0
- turboquant_pro-0.2.0/examples/streaming_cache.py +58 -0
- turboquant_pro-0.2.0/experiments/deploy_and_run.py +81 -0
- turboquant_pro-0.2.0/experiments/run_reviewer_experiments.py +968 -0
- turboquant_pro-0.2.0/paper/fig_cosine_comparison.pdf +0 -0
- turboquant_pro-0.2.0/paper/fig_eigenspectrum.pdf +0 -0
- turboquant_pro-0.2.0/paper/fig_pareto_frontier.pdf +0 -0
- turboquant_pro-0.2.0/paper/fig_pipeline.pdf +0 -0
- turboquant_pro-0.2.0/paper/fig_recall_comparison.pdf +0 -0
- turboquant_pro-0.2.0/paper/generate_figures.py +284 -0
- turboquant_pro-0.2.0/paper/pca_matryoshka_ieee_tai.docx +0 -0
- turboquant_pro-0.2.0/paper/pca_matryoshka_ieee_tai.tex +689 -0
- turboquant_pro-0.2.0/paper/references.bib +181 -0
- turboquant_pro-0.2.0/paper/turboquant_pro.tex +286 -0
- turboquant_pro-0.2.0/pyproject.toml +69 -0
- turboquant_pro-0.2.0/tests/__init__.py +0 -0
- turboquant_pro-0.2.0/tests/test_cache.py +187 -0
- turboquant_pro-0.2.0/tests/test_core.py +477 -0
- turboquant_pro-0.2.0/tests/test_cuda.py +79 -0
- turboquant_pro-0.2.0/tests/test_nats_codec.py +267 -0
- turboquant_pro-0.2.0/tests/test_pgvector.py +344 -0
- turboquant_pro-0.2.0/turboquant_pro/__init__.py +28 -0
- turboquant_pro-0.2.0/turboquant_pro/core.py +900 -0
- turboquant_pro-0.2.0/turboquant_pro/cuda_kernels.py +194 -0
- turboquant_pro-0.2.0/turboquant_pro/nats_codec.py +239 -0
- turboquant_pro-0.2.0/turboquant_pro/pgvector.py +687 -0
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
name: CI
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
branches: [main, master]
|
|
6
|
+
pull_request:
|
|
7
|
+
branches: [main, master]
|
|
8
|
+
|
|
9
|
+
jobs:
|
|
10
|
+
test:
|
|
11
|
+
runs-on: ubuntu-latest
|
|
12
|
+
strategy:
|
|
13
|
+
matrix:
|
|
14
|
+
python-version: ["3.10", "3.11", "3.12"]
|
|
15
|
+
|
|
16
|
+
steps:
|
|
17
|
+
- uses: actions/checkout@v4
|
|
18
|
+
|
|
19
|
+
- name: Set up Python ${{ matrix.python-version }}
|
|
20
|
+
uses: actions/setup-python@v5
|
|
21
|
+
with:
|
|
22
|
+
python-version: ${{ matrix.python-version }}
|
|
23
|
+
|
|
24
|
+
- name: Install dependencies
|
|
25
|
+
run: |
|
|
26
|
+
python -m pip install --upgrade pip
|
|
27
|
+
pip install -e ".[dev]"
|
|
28
|
+
|
|
29
|
+
- name: Lint with ruff
|
|
30
|
+
run: ruff check turboquant_pro/ tests/ benchmarks/
|
|
31
|
+
|
|
32
|
+
- name: Format check with black
|
|
33
|
+
run: black --check turboquant_pro/ tests/ benchmarks/
|
|
34
|
+
|
|
35
|
+
- name: Run tests
|
|
36
|
+
run: pytest tests/test_core.py tests/test_cache.py tests/test_pgvector.py tests/test_nats_codec.py -v
|
|
37
|
+
# GPU tests (test_cuda.py) are skipped in CI -- no CUDA available
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
cff-version: 1.2.0
|
|
2
|
+
message: "If you use TurboQuant Pro in your research, please cite it as below."
|
|
3
|
+
title: "TurboQuant Pro: Open-Source PolarQuant+QJL Implementation for LLM KV Cache Compression"
|
|
4
|
+
type: software
|
|
5
|
+
authors:
|
|
6
|
+
- family-names: Bond
|
|
7
|
+
given-names: Andrew H.
|
|
8
|
+
email: andrew.bond@sjsu.edu
|
|
9
|
+
affiliation: San Jose State University
|
|
10
|
+
version: 0.1.0
|
|
11
|
+
date-released: 2025-06-01
|
|
12
|
+
license: MIT
|
|
13
|
+
url: "https://github.com/andrewbond/turboquant-pro"
|
|
14
|
+
repository-code: "https://github.com/andrewbond/turboquant-pro"
|
|
15
|
+
keywords:
|
|
16
|
+
- llm
|
|
17
|
+
- kv-cache
|
|
18
|
+
- quantization
|
|
19
|
+
- compression
|
|
20
|
+
- transformer
|
|
21
|
+
- inference
|
|
22
|
+
references:
|
|
23
|
+
- type: conference-paper
|
|
24
|
+
title: "Sub-linear Memory Inference via PolarQuant and QJL"
|
|
25
|
+
authors:
|
|
26
|
+
- family-names: Zandieh
|
|
27
|
+
given-names: Amir
|
|
28
|
+
- family-names: Han
|
|
29
|
+
given-names: Insu
|
|
30
|
+
- family-names: Daliri
|
|
31
|
+
given-names: Majid
|
|
32
|
+
- family-names: Karbasi
|
|
33
|
+
given-names: Amin
|
|
34
|
+
year: 2026
|
|
35
|
+
collection-title: "International Conference on Learning Representations (ICLR)"
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Andrew H. Bond
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: turboquant-pro
|
|
3
|
+
Version: 0.2.0
|
|
4
|
+
Summary: TurboQuant compression for LLM KV caches, pgvector embeddings, and NATS transport — 5-10x memory reduction
|
|
5
|
+
Project-URL: Homepage, https://github.com/ahb-sjsu/turboquant-pro
|
|
6
|
+
Project-URL: Documentation, https://github.com/ahb-sjsu/turboquant-pro#readme
|
|
7
|
+
Project-URL: Repository, https://github.com/ahb-sjsu/turboquant-pro
|
|
8
|
+
Project-URL: Issues, https://github.com/ahb-sjsu/turboquant-pro/issues
|
|
9
|
+
Author-email: "Andrew H. Bond" <andrew.bond@sjsu.edu>
|
|
10
|
+
License: MIT
|
|
11
|
+
License-File: LICENSE
|
|
12
|
+
Keywords: compression,cuda,embeddings,gpu,inference,kv-cache,llm,nats,pgvector,quantization,transformer,vector-database
|
|
13
|
+
Classifier: Development Status :: 4 - Beta
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: Intended Audience :: Science/Research
|
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Operating System :: OS Independent
|
|
18
|
+
Classifier: Programming Language :: Python :: 3
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
23
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
24
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
25
|
+
Requires-Python: >=3.9
|
|
26
|
+
Requires-Dist: numpy>=1.21
|
|
27
|
+
Provides-Extra: all
|
|
28
|
+
Requires-Dist: cupy-cuda12x; extra == 'all'
|
|
29
|
+
Requires-Dist: nats-py>=2.0; extra == 'all'
|
|
30
|
+
Requires-Dist: psycopg2-binary>=2.9; extra == 'all'
|
|
31
|
+
Provides-Extra: dev
|
|
32
|
+
Requires-Dist: black>=23.0; extra == 'dev'
|
|
33
|
+
Requires-Dist: pytest>=7.0; extra == 'dev'
|
|
34
|
+
Requires-Dist: ruff>=0.1.0; extra == 'dev'
|
|
35
|
+
Provides-Extra: gpu
|
|
36
|
+
Requires-Dist: cupy-cuda12x; extra == 'gpu'
|
|
37
|
+
Provides-Extra: nats
|
|
38
|
+
Requires-Dist: nats-py>=2.0; extra == 'nats'
|
|
39
|
+
Provides-Extra: pgvector
|
|
40
|
+
Requires-Dist: psycopg2-binary>=2.9; extra == 'pgvector'
|
|
41
|
+
Description-Content-Type: text/markdown
|
|
42
|
+
|
|
43
|
+
# TurboQuant Pro
|
|
44
|
+
|
|
45
|
+
[](https://pypi.org/project/turboquant-pro/)
|
|
46
|
+
[](https://github.com/ahb-sjsu/turboquant-pro/actions)
|
|
47
|
+
[](LICENSE)
|
|
48
|
+
[](https://python.org)
|
|
49
|
+
|
|
50
|
+
**First open-source implementation of TurboQuant (Zandieh et al., ICLR 2026) for LLM KV cache compression, pgvector embedding compression, and NATS transport.**
|
|
51
|
+
|
|
52
|
+
5-10x memory reduction with 0.978 cosine similarity. Works on consumer GPUs (Volta+) and CPU.
|
|
53
|
+
|
|
54
|
+
## Installation
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
pip install turboquant-pro
|
|
58
|
+
|
|
59
|
+
# With GPU support (CUDA 12.x)
|
|
60
|
+
pip install turboquant-pro[gpu]
|
|
61
|
+
|
|
62
|
+
# With pgvector support (PostgreSQL)
|
|
63
|
+
pip install turboquant-pro[pgvector]
|
|
64
|
+
|
|
65
|
+
# With NATS transport support
|
|
66
|
+
pip install turboquant-pro[nats]
|
|
67
|
+
|
|
68
|
+
# Everything
|
|
69
|
+
pip install turboquant-pro[all]
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
## Quick Start
|
|
73
|
+
|
|
74
|
+
```python
|
|
75
|
+
import numpy as np
|
|
76
|
+
from turboquant_pro import TurboQuantKV
|
|
77
|
+
|
|
78
|
+
tq = TurboQuantKV(head_dim=256, n_heads=16, bits=3, use_gpu=False)
|
|
79
|
+
compressed = tq.compress(kv_tensor, packed=True) # 5.1x smaller
|
|
80
|
+
reconstructed = tq.decompress(compressed) # cos_sim > 0.978
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
## How It Works
|
|
84
|
+
|
|
85
|
+
TurboQuant Pro implements the PolarQuant + QJL algorithm from Zandieh et al. (ICLR 2026) for compressing the key-value cache in transformer inference:
|
|
86
|
+
|
|
87
|
+
```
|
|
88
|
+
KV Tensor (B, H, S, D)
|
|
89
|
+
|
|
|
90
|
+
[L2 Norm Extract]
|
|
91
|
+
|
|
|
92
|
+
[Unit Normalize]
|
|
93
|
+
|
|
|
94
|
+
[Random Rotation Pi] <-- QR of Gaussian matrix
|
|
95
|
+
|
|
|
96
|
+
[Lloyd-Max Scalar Quantize] <-- b-bit per coordinate
|
|
97
|
+
|
|
|
98
|
+
[Bit-Pack Indices] <-- 8x3-bit = 3 bytes
|
|
99
|
+
|
|
|
100
|
+
CompressedKV {indices, norms, bits}
|
|
101
|
+
|
|
|
102
|
+
[Unpack + Lookup]
|
|
103
|
+
|
|
|
104
|
+
[Inverse Rotation]
|
|
105
|
+
|
|
|
106
|
+
[Scale by Norms]
|
|
107
|
+
|
|
|
108
|
+
Reconstructed KV Tensor
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
**Key idea**: A random orthogonal rotation maps head-dimension vectors onto the unit hypersphere, making coordinates approximately i.i.d. Gaussian. This enables efficient scalar quantization with precomputed Lloyd-Max codebooks.
|
|
112
|
+
|
|
113
|
+
## Benchmark Results
|
|
114
|
+
|
|
115
|
+
Compression quality and ratios on random Gaussian KV tensors (head_dim=256, n_heads=16, fp16 baseline):
|
|
116
|
+
|
|
117
|
+
| Bits | Compression Ratio | Cosine Similarity | MSE |
|
|
118
|
+
|------|------------------:|------------------:|---------:|
|
|
119
|
+
| 2 | 7.5x | 0.926 | 0.001178 |
|
|
120
|
+
| 3 | 5.1x | 0.978 | 0.000349 |
|
|
121
|
+
| 4 | 3.9x | 0.995 | 0.000082 |
|
|
122
|
+
|
|
123
|
+
Memory estimates for popular models at 8K context (3-bit, packed):
|
|
124
|
+
|
|
125
|
+
| Model | Original | Compressed | Saved | Ratio |
|
|
126
|
+
|-----------------|----------|------------|---------|-------|
|
|
127
|
+
| Llama 3.1 8B | 0.500 GB | 0.098 GB | 0.402 GB| 5.1x |
|
|
128
|
+
| Llama 3.1 70B | 1.250 GB | 0.244 GB | 1.006 GB| 5.1x |
|
|
129
|
+
| Gemma 4 27B | 1.125 GB | 0.220 GB | 0.905 GB| 5.1x |
|
|
130
|
+
| Mistral 7B | 2.000 GB | 0.391 GB | 1.609 GB| 5.1x |
|
|
131
|
+
|
|
132
|
+
## Streaming Cache
|
|
133
|
+
|
|
134
|
+
TurboQuant Pro includes a streaming tiered cache for autoregressive generation:
|
|
135
|
+
|
|
136
|
+
- **L1 (hot window)**: Recent tokens stored uncompressed for zero-latency attention
|
|
137
|
+
- **L2 (cold storage)**: Older tokens bit-packed at b-bit precision (~5x compression)
|
|
138
|
+
|
|
139
|
+
```python
|
|
140
|
+
from turboquant_pro import TurboQuantKVCache
|
|
141
|
+
|
|
142
|
+
cache = TurboQuantKVCache(head_dim=256, n_heads=16, bits=3, hot_window=512)
|
|
143
|
+
|
|
144
|
+
for token in tokens:
|
|
145
|
+
k, v = model.forward_one(token)
|
|
146
|
+
cache.append(k, v) # auto-compresses old entries
|
|
147
|
+
keys = cache.get_keys(0, cache.length) # seamless hot+cold retrieval
|
|
148
|
+
values = cache.get_values(0, cache.length)
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
## pgvector Embedding Compression
|
|
152
|
+
|
|
153
|
+
TurboQuant Pro can compress high-dimensional embeddings stored in PostgreSQL pgvector, reducing storage by 10x (from float32) or 5x (from float16):
|
|
154
|
+
|
|
155
|
+
```python
|
|
156
|
+
from turboquant_pro import TurboQuantPGVector
|
|
157
|
+
|
|
158
|
+
tq = TurboQuantPGVector(dim=1024, bits=3, seed=42)
|
|
159
|
+
|
|
160
|
+
# Compress a single embedding (4096 bytes -> 388 bytes)
|
|
161
|
+
compressed = tq.compress_embedding(embedding_float32)
|
|
162
|
+
|
|
163
|
+
# Store as bytea in PostgreSQL
|
|
164
|
+
bytea_data = compressed.to_pgbytea()
|
|
165
|
+
|
|
166
|
+
# Batch compress for bulk operations
|
|
167
|
+
compressed_batch = tq.compress_batch(embeddings_array)
|
|
168
|
+
|
|
169
|
+
# Search compressed embeddings
|
|
170
|
+
scores = tq.compressed_cosine_similarity(query, compressed_batch)
|
|
171
|
+
|
|
172
|
+
# PostgreSQL integration
|
|
173
|
+
tq.create_compressed_table(conn, "embeddings_compressed")
|
|
174
|
+
tq.insert_compressed(conn, "embeddings_compressed", ids, embeddings)
|
|
175
|
+
results = tq.search_compressed(conn, "embeddings_compressed", query, top_k=10)
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
**Storage savings for real workloads (1024-dim BGE-M3, 3-bit):**
|
|
179
|
+
|
|
180
|
+
| Dataset | Vectors | Float32 | Compressed | Ratio | Saved |
|
|
181
|
+
|---------|--------:|--------:|-----------:|------:|------:|
|
|
182
|
+
| RAG chunks | 112K | 437 MB | 41 MB | 10.5x | 396 MB |
|
|
183
|
+
| Ethics chunks | 2.4M | 9,375 MB | 893 MB | 10.5x | 8,482 MB |
|
|
184
|
+
| Publications | 824K | 3,222 MB | 307 MB | 10.5x | 2,915 MB |
|
|
185
|
+
|
|
186
|
+
## NATS Transport Codec
|
|
187
|
+
|
|
188
|
+
Compress embeddings for transmission over NATS JetStream or any message bus:
|
|
189
|
+
|
|
190
|
+
```python
|
|
191
|
+
from turboquant_pro import TurboQuantNATSCodec
|
|
192
|
+
|
|
193
|
+
codec = TurboQuantNATSCodec(dim=1024, bits=3, seed=42)
|
|
194
|
+
|
|
195
|
+
# Encode for transport (4096 bytes -> 392 bytes)
|
|
196
|
+
payload = codec.encode(embedding_float32)
|
|
197
|
+
|
|
198
|
+
# Decode on the receiving end
|
|
199
|
+
embedding_approx = codec.decode(payload)
|
|
200
|
+
|
|
201
|
+
# Batch operations
|
|
202
|
+
payloads = codec.encode_batch(embeddings_2d)
|
|
203
|
+
embeddings = codec.decode_batch(payloads)
|
|
204
|
+
|
|
205
|
+
# Check compression stats
|
|
206
|
+
print(codec.stats())
|
|
207
|
+
# {'dim': 1024, 'bits': 3, 'payload_bytes': 392,
|
|
208
|
+
# 'float32_bytes': 4096, 'compression_ratio': 10.45, ...}
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
## Components
|
|
212
|
+
|
|
213
|
+
| Class | Purpose |
|
|
214
|
+
|-------|---------|
|
|
215
|
+
| `TurboQuantKV` | Stateless compress/decompress with optional bit-packing |
|
|
216
|
+
| `TurboQuantKVCache` | Streaming L1/L2 tiered cache for autoregressive inference |
|
|
217
|
+
| `CompressedKV` | Container dataclass for compressed tensors |
|
|
218
|
+
| `TurboQuantPGVector` | Compress pgvector embeddings for PostgreSQL storage |
|
|
219
|
+
| `CompressedEmbedding` | Container for a single compressed embedding |
|
|
220
|
+
| `TurboQuantNATSCodec` | Encode/decode embeddings for NATS transport |
|
|
221
|
+
|
|
222
|
+
## Integration Options
|
|
223
|
+
|
|
224
|
+
### llama.cpp / llama-cpp-python
|
|
225
|
+
|
|
226
|
+
See `examples/llama_integration.py` for a wrapper pattern that intercepts KV tensors and stores them in a `TurboQuantKVCache`.
|
|
227
|
+
|
|
228
|
+
### vLLM
|
|
229
|
+
|
|
230
|
+
TurboQuant Pro can be integrated into vLLM's PagedAttention by compressing cold KV pages:
|
|
231
|
+
|
|
232
|
+
```python
|
|
233
|
+
# Conceptual: compress a page of KV cache
|
|
234
|
+
tq = TurboQuantKV(head_dim=128, n_heads=8, bits=3)
|
|
235
|
+
compressed_page = tq.compress(kv_page, packed=True)
|
|
236
|
+
# Store compressed_page instead of raw fp16
|
|
237
|
+
```
|
|
238
|
+
|
|
239
|
+
### HuggingFace Transformers
|
|
240
|
+
|
|
241
|
+
Wrap the KV cache in `generate()` by subclassing the model's attention:
|
|
242
|
+
|
|
243
|
+
```python
|
|
244
|
+
# Override the cache update in the attention layer
|
|
245
|
+
compressed_k = tq.compress(key_states, packed=True)
|
|
246
|
+
compressed_v = tq.compress(value_states, packed=True)
|
|
247
|
+
# Decompress when computing attention scores
|
|
248
|
+
```
|
|
249
|
+
|
|
250
|
+
## GPU Acceleration
|
|
251
|
+
|
|
252
|
+
When CuPy is available, TurboQuant Pro uses CUDA RawKernels for bit-packing operations. All kernels are Volta-compatible (compute capability 7.0+).
|
|
253
|
+
|
|
254
|
+
```python
|
|
255
|
+
tq = TurboQuantKV(head_dim=256, n_heads=16, bits=3, use_gpu=True)
|
|
256
|
+
# Automatically uses CuPy for rotation, quantization, and bit-packing
|
|
257
|
+
```
|
|
258
|
+
|
|
259
|
+
Falls back to NumPy automatically when CuPy is not installed.
|
|
260
|
+
|
|
261
|
+
## Citation
|
|
262
|
+
|
|
263
|
+
If you use TurboQuant Pro in your research, please cite both this implementation and the original algorithm:
|
|
264
|
+
|
|
265
|
+
```bibtex
|
|
266
|
+
@software{bond2025turboquantkv,
|
|
267
|
+
title={TurboQuant Pro: Open-Source PolarQuant+QJL Implementation for LLM KV Cache Compression},
|
|
268
|
+
author={Bond, Andrew H.},
|
|
269
|
+
year={2025},
|
|
270
|
+
url={https://github.com/ahb-sjsu/turboquant-pro},
|
|
271
|
+
license={MIT}
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
@inproceedings{zandieh2026sublinear,
|
|
275
|
+
title={Sub-linear Memory Inference via PolarQuant and QJL},
|
|
276
|
+
author={Zandieh, Amir and Han, Insu and Daliri, Majid and Karbasi, Amin},
|
|
277
|
+
booktitle={International Conference on Learning Representations (ICLR)},
|
|
278
|
+
year={2026}
|
|
279
|
+
}
|
|
280
|
+
```
|
|
281
|
+
|
|
282
|
+
## Acknowledgments
|
|
283
|
+
|
|
284
|
+
- **Algorithm**: Zandieh, Han, Daliri, and Karbasi -- "Sub-linear Memory Inference via PolarQuant and QJL" (ICLR 2026)
|
|
285
|
+
- **Origin**: Adapted from the Theory Radar project's TurboBeam beam-search compression, which first implemented PolarQuant+QJL in Python
|
|
286
|
+
- **Author**: Andrew H. Bond, San Jose State University
|
|
287
|
+
|
|
288
|
+
## License
|
|
289
|
+
|
|
290
|
+
MIT License. See [LICENSE](LICENSE) for details.
|
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
# TurboQuant Pro
|
|
2
|
+
|
|
3
|
+
[](https://pypi.org/project/turboquant-pro/)
|
|
4
|
+
[](https://github.com/ahb-sjsu/turboquant-pro/actions)
|
|
5
|
+
[](LICENSE)
|
|
6
|
+
[](https://python.org)
|
|
7
|
+
|
|
8
|
+
**First open-source implementation of TurboQuant (Zandieh et al., ICLR 2026) for LLM KV cache compression, pgvector embedding compression, and NATS transport.**
|
|
9
|
+
|
|
10
|
+
5-10x memory reduction with 0.978 cosine similarity. Works on consumer GPUs (Volta+) and CPU.
|
|
11
|
+
|
|
12
|
+
## Installation
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
pip install turboquant-pro
|
|
16
|
+
|
|
17
|
+
# With GPU support (CUDA 12.x)
|
|
18
|
+
pip install turboquant-pro[gpu]
|
|
19
|
+
|
|
20
|
+
# With pgvector support (PostgreSQL)
|
|
21
|
+
pip install turboquant-pro[pgvector]
|
|
22
|
+
|
|
23
|
+
# With NATS transport support
|
|
24
|
+
pip install turboquant-pro[nats]
|
|
25
|
+
|
|
26
|
+
# Everything
|
|
27
|
+
pip install turboquant-pro[all]
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
## Quick Start
|
|
31
|
+
|
|
32
|
+
```python
|
|
33
|
+
import numpy as np
|
|
34
|
+
from turboquant_pro import TurboQuantKV
|
|
35
|
+
|
|
36
|
+
tq = TurboQuantKV(head_dim=256, n_heads=16, bits=3, use_gpu=False)
|
|
37
|
+
compressed = tq.compress(kv_tensor, packed=True) # 5.1x smaller
|
|
38
|
+
reconstructed = tq.decompress(compressed) # cos_sim > 0.978
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
## How It Works
|
|
42
|
+
|
|
43
|
+
TurboQuant Pro implements the PolarQuant + QJL algorithm from Zandieh et al. (ICLR 2026) for compressing the key-value cache in transformer inference:
|
|
44
|
+
|
|
45
|
+
```
|
|
46
|
+
KV Tensor (B, H, S, D)
|
|
47
|
+
|
|
|
48
|
+
[L2 Norm Extract]
|
|
49
|
+
|
|
|
50
|
+
[Unit Normalize]
|
|
51
|
+
|
|
|
52
|
+
[Random Rotation Pi] <-- QR of Gaussian matrix
|
|
53
|
+
|
|
|
54
|
+
[Lloyd-Max Scalar Quantize] <-- b-bit per coordinate
|
|
55
|
+
|
|
|
56
|
+
[Bit-Pack Indices] <-- 8x3-bit = 3 bytes
|
|
57
|
+
|
|
|
58
|
+
CompressedKV {indices, norms, bits}
|
|
59
|
+
|
|
|
60
|
+
[Unpack + Lookup]
|
|
61
|
+
|
|
|
62
|
+
[Inverse Rotation]
|
|
63
|
+
|
|
|
64
|
+
[Scale by Norms]
|
|
65
|
+
|
|
|
66
|
+
Reconstructed KV Tensor
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
**Key idea**: A random orthogonal rotation maps head-dimension vectors onto the unit hypersphere, making coordinates approximately i.i.d. Gaussian. This enables efficient scalar quantization with precomputed Lloyd-Max codebooks.
|
|
70
|
+
|
|
71
|
+
## Benchmark Results
|
|
72
|
+
|
|
73
|
+
Compression quality and ratios on random Gaussian KV tensors (head_dim=256, n_heads=16, fp16 baseline):
|
|
74
|
+
|
|
75
|
+
| Bits | Compression Ratio | Cosine Similarity | MSE |
|
|
76
|
+
|------|------------------:|------------------:|---------:|
|
|
77
|
+
| 2 | 7.5x | 0.926 | 0.001178 |
|
|
78
|
+
| 3 | 5.1x | 0.978 | 0.000349 |
|
|
79
|
+
| 4 | 3.9x | 0.995 | 0.000082 |
|
|
80
|
+
|
|
81
|
+
Memory estimates for popular models at 8K context (3-bit, packed):
|
|
82
|
+
|
|
83
|
+
| Model | Original | Compressed | Saved | Ratio |
|
|
84
|
+
|-----------------|----------|------------|---------|-------|
|
|
85
|
+
| Llama 3.1 8B | 0.500 GB | 0.098 GB | 0.402 GB| 5.1x |
|
|
86
|
+
| Llama 3.1 70B | 1.250 GB | 0.244 GB | 1.006 GB| 5.1x |
|
|
87
|
+
| Gemma 4 27B | 1.125 GB | 0.220 GB | 0.905 GB| 5.1x |
|
|
88
|
+
| Mistral 7B | 2.000 GB | 0.391 GB | 1.609 GB| 5.1x |
|
|
89
|
+
|
|
90
|
+
## Streaming Cache
|
|
91
|
+
|
|
92
|
+
TurboQuant Pro includes a streaming tiered cache for autoregressive generation:
|
|
93
|
+
|
|
94
|
+
- **L1 (hot window)**: Recent tokens stored uncompressed for zero-latency attention
|
|
95
|
+
- **L2 (cold storage)**: Older tokens bit-packed at b-bit precision (~5x compression)
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
from turboquant_pro import TurboQuantKVCache
|
|
99
|
+
|
|
100
|
+
cache = TurboQuantKVCache(head_dim=256, n_heads=16, bits=3, hot_window=512)
|
|
101
|
+
|
|
102
|
+
for token in tokens:
|
|
103
|
+
k, v = model.forward_one(token)
|
|
104
|
+
cache.append(k, v) # auto-compresses old entries
|
|
105
|
+
keys = cache.get_keys(0, cache.length) # seamless hot+cold retrieval
|
|
106
|
+
values = cache.get_values(0, cache.length)
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## pgvector Embedding Compression
|
|
110
|
+
|
|
111
|
+
TurboQuant Pro can compress high-dimensional embeddings stored in PostgreSQL pgvector, reducing storage by 10x (from float32) or 5x (from float16):
|
|
112
|
+
|
|
113
|
+
```python
|
|
114
|
+
from turboquant_pro import TurboQuantPGVector
|
|
115
|
+
|
|
116
|
+
tq = TurboQuantPGVector(dim=1024, bits=3, seed=42)
|
|
117
|
+
|
|
118
|
+
# Compress a single embedding (4096 bytes -> 388 bytes)
|
|
119
|
+
compressed = tq.compress_embedding(embedding_float32)
|
|
120
|
+
|
|
121
|
+
# Store as bytea in PostgreSQL
|
|
122
|
+
bytea_data = compressed.to_pgbytea()
|
|
123
|
+
|
|
124
|
+
# Batch compress for bulk operations
|
|
125
|
+
compressed_batch = tq.compress_batch(embeddings_array)
|
|
126
|
+
|
|
127
|
+
# Search compressed embeddings
|
|
128
|
+
scores = tq.compressed_cosine_similarity(query, compressed_batch)
|
|
129
|
+
|
|
130
|
+
# PostgreSQL integration
|
|
131
|
+
tq.create_compressed_table(conn, "embeddings_compressed")
|
|
132
|
+
tq.insert_compressed(conn, "embeddings_compressed", ids, embeddings)
|
|
133
|
+
results = tq.search_compressed(conn, "embeddings_compressed", query, top_k=10)
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
**Storage savings for real workloads (1024-dim BGE-M3, 3-bit):**
|
|
137
|
+
|
|
138
|
+
| Dataset | Vectors | Float32 | Compressed | Ratio | Saved |
|
|
139
|
+
|---------|--------:|--------:|-----------:|------:|------:|
|
|
140
|
+
| RAG chunks | 112K | 437 MB | 41 MB | 10.5x | 396 MB |
|
|
141
|
+
| Ethics chunks | 2.4M | 9,375 MB | 893 MB | 10.5x | 8,482 MB |
|
|
142
|
+
| Publications | 824K | 3,222 MB | 307 MB | 10.5x | 2,915 MB |
|
|
143
|
+
|
|
144
|
+
## NATS Transport Codec
|
|
145
|
+
|
|
146
|
+
Compress embeddings for transmission over NATS JetStream or any message bus:
|
|
147
|
+
|
|
148
|
+
```python
|
|
149
|
+
from turboquant_pro import TurboQuantNATSCodec
|
|
150
|
+
|
|
151
|
+
codec = TurboQuantNATSCodec(dim=1024, bits=3, seed=42)
|
|
152
|
+
|
|
153
|
+
# Encode for transport (4096 bytes -> 392 bytes)
|
|
154
|
+
payload = codec.encode(embedding_float32)
|
|
155
|
+
|
|
156
|
+
# Decode on the receiving end
|
|
157
|
+
embedding_approx = codec.decode(payload)
|
|
158
|
+
|
|
159
|
+
# Batch operations
|
|
160
|
+
payloads = codec.encode_batch(embeddings_2d)
|
|
161
|
+
embeddings = codec.decode_batch(payloads)
|
|
162
|
+
|
|
163
|
+
# Check compression stats
|
|
164
|
+
print(codec.stats())
|
|
165
|
+
# {'dim': 1024, 'bits': 3, 'payload_bytes': 392,
|
|
166
|
+
# 'float32_bytes': 4096, 'compression_ratio': 10.45, ...}
|
|
167
|
+
```
|
|
168
|
+
|
|
169
|
+
## Components
|
|
170
|
+
|
|
171
|
+
| Class | Purpose |
|
|
172
|
+
|-------|---------|
|
|
173
|
+
| `TurboQuantKV` | Stateless compress/decompress with optional bit-packing |
|
|
174
|
+
| `TurboQuantKVCache` | Streaming L1/L2 tiered cache for autoregressive inference |
|
|
175
|
+
| `CompressedKV` | Container dataclass for compressed tensors |
|
|
176
|
+
| `TurboQuantPGVector` | Compress pgvector embeddings for PostgreSQL storage |
|
|
177
|
+
| `CompressedEmbedding` | Container for a single compressed embedding |
|
|
178
|
+
| `TurboQuantNATSCodec` | Encode/decode embeddings for NATS transport |
|
|
179
|
+
|
|
180
|
+
## Integration Options
|
|
181
|
+
|
|
182
|
+
### llama.cpp / llama-cpp-python
|
|
183
|
+
|
|
184
|
+
See `examples/llama_integration.py` for a wrapper pattern that intercepts KV tensors and stores them in a `TurboQuantKVCache`.
|
|
185
|
+
|
|
186
|
+
### vLLM
|
|
187
|
+
|
|
188
|
+
TurboQuant Pro can be integrated into vLLM's PagedAttention by compressing cold KV pages:
|
|
189
|
+
|
|
190
|
+
```python
|
|
191
|
+
# Conceptual: compress a page of KV cache
|
|
192
|
+
tq = TurboQuantKV(head_dim=128, n_heads=8, bits=3)
|
|
193
|
+
compressed_page = tq.compress(kv_page, packed=True)
|
|
194
|
+
# Store compressed_page instead of raw fp16
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
### HuggingFace Transformers
|
|
198
|
+
|
|
199
|
+
Wrap the KV cache in `generate()` by subclassing the model's attention:
|
|
200
|
+
|
|
201
|
+
```python
|
|
202
|
+
# Override the cache update in the attention layer
|
|
203
|
+
compressed_k = tq.compress(key_states, packed=True)
|
|
204
|
+
compressed_v = tq.compress(value_states, packed=True)
|
|
205
|
+
# Decompress when computing attention scores
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
## GPU Acceleration
|
|
209
|
+
|
|
210
|
+
When CuPy is available, TurboQuant Pro uses CUDA RawKernels for bit-packing operations. All kernels are Volta-compatible (compute capability 7.0+).
|
|
211
|
+
|
|
212
|
+
```python
|
|
213
|
+
tq = TurboQuantKV(head_dim=256, n_heads=16, bits=3, use_gpu=True)
|
|
214
|
+
# Automatically uses CuPy for rotation, quantization, and bit-packing
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
Falls back to NumPy automatically when CuPy is not installed.
|
|
218
|
+
|
|
219
|
+
## Citation
|
|
220
|
+
|
|
221
|
+
If you use TurboQuant Pro in your research, please cite both this implementation and the original algorithm:
|
|
222
|
+
|
|
223
|
+
```bibtex
|
|
224
|
+
@software{bond2025turboquantkv,
|
|
225
|
+
title={TurboQuant Pro: Open-Source PolarQuant+QJL Implementation for LLM KV Cache Compression},
|
|
226
|
+
author={Bond, Andrew H.},
|
|
227
|
+
year={2025},
|
|
228
|
+
url={https://github.com/ahb-sjsu/turboquant-pro},
|
|
229
|
+
license={MIT}
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
@inproceedings{zandieh2026sublinear,
|
|
233
|
+
title={Sub-linear Memory Inference via PolarQuant and QJL},
|
|
234
|
+
author={Zandieh, Amir and Han, Insu and Daliri, Majid and Karbasi, Amin},
|
|
235
|
+
booktitle={International Conference on Learning Representations (ICLR)},
|
|
236
|
+
year={2026}
|
|
237
|
+
}
|
|
238
|
+
```
|
|
239
|
+
|
|
240
|
+
## Acknowledgments
|
|
241
|
+
|
|
242
|
+
- **Algorithm**: Zandieh, Han, Daliri, and Karbasi -- "Sub-linear Memory Inference via PolarQuant and QJL" (ICLR 2026)
|
|
243
|
+
- **Origin**: Adapted from the Theory Radar project's TurboBeam beam-search compression, which first implemented PolarQuant+QJL in Python
|
|
244
|
+
- **Author**: Andrew H. Bond, San Jose State University
|
|
245
|
+
|
|
246
|
+
## License
|
|
247
|
+
|
|
248
|
+
MIT License. See [LICENSE](LICENSE) for details.
|