tpu-inference 0.11.1.dev202511150811__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tpu-inference might be problematic. Click here for more details.
- tests/__init__.py +0 -0
- tests/core/__init__.py +0 -0
- tests/core/test_core_tpu.py +513 -0
- tests/core/test_disagg_executor.py +60 -0
- tests/core/test_disagg_utils.py +53 -0
- tests/core/test_dp_scheduler.py +899 -0
- tests/core/test_init.py +49 -0
- tests/kernels/__init__.py +0 -0
- tests/kernels/fused_moe_v1_test.py +105 -0
- tests/kernels/mla_v1_test.py +396 -0
- tests/kernels/quantized_matmul_kernel_test.py +191 -0
- tests/kernels/ragged_kv_cache_update_v2_test.py +234 -0
- tests/kernels/ragged_paged_attention_kernel_v2_test.py +400 -0
- tests/kernels/ragged_paged_attention_kernel_v3_hd64_test.py +549 -0
- tests/kernels/ragged_paged_attention_kernel_v3_test.py +504 -0
- tests/lora/__init__.py +0 -0
- tests/lora/conftest.py +32 -0
- tests/lora/test_bgmv.py +43 -0
- tests/lora/test_layers.py +654 -0
- tests/lora/test_lora.py +133 -0
- tests/lora/utils.py +96 -0
- tests/test_base.py +201 -0
- tests/test_envs.py +182 -0
- tests/test_quantization.py +836 -0
- tests/test_tpu_info.py +120 -0
- tests/test_utils.py +236 -0
- tpu_inference/__init__.py +34 -0
- tpu_inference/core/__init__.py +0 -0
- tpu_inference/core/core_tpu.py +786 -0
- tpu_inference/core/disagg_executor.py +118 -0
- tpu_inference/core/disagg_utils.py +51 -0
- tpu_inference/core/sched/__init__.py +0 -0
- tpu_inference/core/sched/dp_scheduler.py +523 -0
- tpu_inference/distributed/__init__.py +0 -0
- tpu_inference/distributed/jax_parallel_state.py +67 -0
- tpu_inference/distributed/tpu_connector.py +728 -0
- tpu_inference/distributed/utils.py +59 -0
- tpu_inference/env_override.py +9 -0
- tpu_inference/envs.py +107 -0
- tpu_inference/executors/__init__.py +0 -0
- tpu_inference/executors/ray_distributed_executor.py +362 -0
- tpu_inference/experimental/__init__.py +0 -0
- tpu_inference/experimental/llama3_jax_stashed.py +258 -0
- tpu_inference/kernels/__init__.py +0 -0
- tpu_inference/kernels/collectives/__init__.py +0 -0
- tpu_inference/kernels/collectives/all_gather_matmul.py +735 -0
- tpu_inference/kernels/collectives/all_gather_matmul_tuned_block_sizes.py +60 -0
- tpu_inference/kernels/collectives/util.py +47 -0
- tpu_inference/kernels/flash_attention/__init__.py +0 -0
- tpu_inference/kernels/flash_attention/kernel.py +772 -0
- tpu_inference/kernels/fused_moe/__init__.py +0 -0
- tpu_inference/kernels/fused_moe/v1/__init__.py +0 -0
- tpu_inference/kernels/fused_moe/v1/kernel.py +1035 -0
- tpu_inference/kernels/mla/__init__.py +0 -0
- tpu_inference/kernels/mla/v1/__init__.py +0 -0
- tpu_inference/kernels/mla/v1/kernel.py +1349 -0
- tpu_inference/kernels/quantized_matmul/__init__.py +0 -0
- tpu_inference/kernels/quantized_matmul/kernel.py +395 -0
- tpu_inference/kernels/quantized_matmul/tuned_block_sizes.py +609 -0
- tpu_inference/kernels/quantized_matmul/util.py +58 -0
- tpu_inference/kernels/ragged_paged_attention/__init__.py +0 -0
- tpu_inference/kernels/ragged_paged_attention/v2/__init__.py +0 -0
- tpu_inference/kernels/ragged_paged_attention/v2/kernel.py +875 -0
- tpu_inference/kernels/ragged_paged_attention/v2/ragged_kv_cache_update.py +287 -0
- tpu_inference/kernels/ragged_paged_attention/v2/tuned_block_sizes.py +1482 -0
- tpu_inference/kernels/ragged_paged_attention/v3/__init__.py +0 -0
- tpu_inference/kernels/ragged_paged_attention/v3/kernel.py +1478 -0
- tpu_inference/kernels/ragged_paged_attention/v3/kernel_hd64.py +1482 -0
- tpu_inference/kernels/ragged_paged_attention/v3/tuned_block_sizes.py +4147 -0
- tpu_inference/kernels/ragged_paged_attention/v3/tuned_block_sizes_hd64.py +367 -0
- tpu_inference/kernels/ragged_paged_attention/v3/util.py +51 -0
- tpu_inference/layers/__init__.py +0 -0
- tpu_inference/layers/common/__init__.py +0 -0
- tpu_inference/layers/common/attention_interface.py +390 -0
- tpu_inference/layers/common/attention_metadata.py +34 -0
- tpu_inference/layers/common/binary_search.py +295 -0
- tpu_inference/layers/common/quant_methods.py +8 -0
- tpu_inference/layers/common/sharding.py +582 -0
- tpu_inference/layers/jax/__init__.py +0 -0
- tpu_inference/layers/jax/attention/__init__.py +0 -0
- tpu_inference/layers/jax/attention/attention.py +255 -0
- tpu_inference/layers/jax/attention/deepseek_v3_attention.py +354 -0
- tpu_inference/layers/jax/attention/gpt_oss_attention.py +262 -0
- tpu_inference/layers/jax/attention/llama4_attention.py +153 -0
- tpu_inference/layers/jax/base.py +151 -0
- tpu_inference/layers/jax/constants.py +88 -0
- tpu_inference/layers/jax/layers.py +301 -0
- tpu_inference/layers/jax/misc.py +16 -0
- tpu_inference/layers/jax/moe/__init__.py +0 -0
- tpu_inference/layers/jax/moe/deepseek_v3_moe.py +608 -0
- tpu_inference/layers/jax/moe/gpt_oss_moe.py +185 -0
- tpu_inference/layers/jax/moe/moe.py +209 -0
- tpu_inference/layers/jax/rope.py +280 -0
- tpu_inference/layers/jax/rope_interface.py +214 -0
- tpu_inference/layers/jax/sample/__init__.py +0 -0
- tpu_inference/layers/jax/sample/rejection_sampler.py +515 -0
- tpu_inference/layers/jax/sample/sampling.py +96 -0
- tpu_inference/layers/jax/sample/sampling_metadata.py +76 -0
- tpu_inference/layers/jax/transformer_block.py +107 -0
- tpu_inference/layers/vllm/__init__.py +0 -0
- tpu_inference/layers/vllm/attention.py +221 -0
- tpu_inference/layers/vllm/fused_moe.py +507 -0
- tpu_inference/layers/vllm/linear_common.py +186 -0
- tpu_inference/layers/vllm/quantization/__init__.py +39 -0
- tpu_inference/layers/vllm/quantization/awq.py +207 -0
- tpu_inference/layers/vllm/quantization/common.py +105 -0
- tpu_inference/layers/vllm/quantization/compressed_tensors/__init__.py +0 -0
- tpu_inference/layers/vllm/quantization/compressed_tensors/compressed_tensors.py +120 -0
- tpu_inference/layers/vllm/quantization/compressed_tensors/compressed_tensors_moe.py +203 -0
- tpu_inference/layers/vllm/quantization/compressed_tensors/schemes/__init__.py +0 -0
- tpu_inference/layers/vllm/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +208 -0
- tpu_inference/layers/vllm/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +136 -0
- tpu_inference/layers/vllm/quantization/mxfp4.py +266 -0
- tpu_inference/layers/vllm/quantization/unquantized.py +386 -0
- tpu_inference/layers/vllm/sharding.py +230 -0
- tpu_inference/logger.py +10 -0
- tpu_inference/lora/__init__.py +0 -0
- tpu_inference/lora/torch_lora_ops.py +103 -0
- tpu_inference/lora/torch_punica_tpu.py +311 -0
- tpu_inference/mock/__init__.py +0 -0
- tpu_inference/mock/vllm_config_utils.py +28 -0
- tpu_inference/mock/vllm_envs.py +1219 -0
- tpu_inference/mock/vllm_logger.py +212 -0
- tpu_inference/mock/vllm_logging_utils.py +15 -0
- tpu_inference/models/__init__.py +0 -0
- tpu_inference/models/common/__init__.py +0 -0
- tpu_inference/models/common/model_loader.py +444 -0
- tpu_inference/models/jax/__init__.py +0 -0
- tpu_inference/models/jax/deepseek_v3.py +868 -0
- tpu_inference/models/jax/gpt_oss.py +492 -0
- tpu_inference/models/jax/jax_intermediate_tensor.py +79 -0
- tpu_inference/models/jax/llama3.py +375 -0
- tpu_inference/models/jax/llama4.py +629 -0
- tpu_inference/models/jax/llama_eagle3.py +333 -0
- tpu_inference/models/jax/phi3.py +376 -0
- tpu_inference/models/jax/qwen2.py +375 -0
- tpu_inference/models/jax/qwen2_5_vl.py +1103 -0
- tpu_inference/models/jax/qwen3.py +302 -0
- tpu_inference/models/jax/utils/__init__.py +0 -0
- tpu_inference/models/jax/utils/file_utils.py +96 -0
- tpu_inference/models/jax/utils/multi_modal_utils.py +163 -0
- tpu_inference/models/jax/utils/quantization/__init__.py +0 -0
- tpu_inference/models/jax/utils/quantization/configs/fp8_all_modules_w_only.yaml +5 -0
- tpu_inference/models/jax/utils/quantization/configs/fp8_default.yaml +6 -0
- tpu_inference/models/jax/utils/quantization/configs/int8_all_modules_w_only.yaml +5 -0
- tpu_inference/models/jax/utils/quantization/configs/int8_default.yaml +6 -0
- tpu_inference/models/jax/utils/quantization/mxfp4_utils.py +105 -0
- tpu_inference/models/jax/utils/quantization/quantization_utils.py +653 -0
- tpu_inference/models/jax/utils/weight_utils.py +529 -0
- tpu_inference/models/vllm/__init__.py +0 -0
- tpu_inference/models/vllm/vllm_model_wrapper.py +286 -0
- tpu_inference/models/vllm/vllm_model_wrapper_context.py +45 -0
- tpu_inference/platforms/__init__.py +2 -0
- tpu_inference/platforms/tpu_platform.py +269 -0
- tpu_inference/runner/__init__.py +0 -0
- tpu_inference/runner/block_table.py +122 -0
- tpu_inference/runner/compilation_manager.py +780 -0
- tpu_inference/runner/input_batch.py +435 -0
- tpu_inference/runner/kv_cache.py +132 -0
- tpu_inference/runner/kv_cache_manager.py +479 -0
- tpu_inference/runner/lora_utils.py +92 -0
- tpu_inference/runner/multimodal_manager.py +217 -0
- tpu_inference/runner/persistent_batch_manager.py +244 -0
- tpu_inference/runner/speculative_decoding_manager.py +248 -0
- tpu_inference/runner/structured_decoding_manager.py +88 -0
- tpu_inference/runner/tpu_runner.py +1620 -0
- tpu_inference/runner/utils.py +426 -0
- tpu_inference/spec_decode/__init__.py +0 -0
- tpu_inference/spec_decode/jax/__init__.py +0 -0
- tpu_inference/spec_decode/jax/eagle3.py +367 -0
- tpu_inference/tpu_info.py +77 -0
- tpu_inference/utils.py +317 -0
- tpu_inference/worker/__init__.py +0 -0
- tpu_inference/worker/tpu_worker.py +321 -0
- tpu_inference-0.11.1.dev202511150811.dist-info/METADATA +107 -0
- tpu_inference-0.11.1.dev202511150811.dist-info/RECORD +179 -0
- tpu_inference-0.11.1.dev202511150811.dist-info/WHEEL +5 -0
- tpu_inference-0.11.1.dev202511150811.dist-info/licenses/LICENSE +201 -0
- tpu_inference-0.11.1.dev202511150811.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,367 @@
|
|
|
1
|
+
"""Auto-tuned block sizes for ragged paged attention."""
|
|
2
|
+
|
|
3
|
+
import jax.numpy as jnp
|
|
4
|
+
|
|
5
|
+
from tpu_inference.kernels.ragged_paged_attention.v3.util import (
|
|
6
|
+
align_to, get_dtype_packing, get_tpu_version, next_power_of_2)
|
|
7
|
+
from tpu_inference.logger import init_logger
|
|
8
|
+
from tpu_inference.utils import get_device_name
|
|
9
|
+
|
|
10
|
+
logger = init_logger(__name__)
|
|
11
|
+
|
|
12
|
+
# key
|
|
13
|
+
# - device_name
|
|
14
|
+
# - page_size
|
|
15
|
+
# - q_{q_dtype_name}_kv_{kv_dtype_name}
|
|
16
|
+
# - q_head-{num_q_heads}_kv_head-{num_kv_heads}-_head-{head_dim}
|
|
17
|
+
# - max_model_len
|
|
18
|
+
# value:
|
|
19
|
+
# - (num_kv_pages_per_block, num_queries_per_block)
|
|
20
|
+
TUNED_BLOCK_SIZES = {
|
|
21
|
+
'TPU v5e': {
|
|
22
|
+
128: {
|
|
23
|
+
'q_bfloat16_kv_bfloat16': {
|
|
24
|
+
'q_head-8_kv_head-2_head-64': {
|
|
25
|
+
4096: (16, 32),
|
|
26
|
+
8192: (32, 128),
|
|
27
|
+
128: (1, 16),
|
|
28
|
+
256: (1, 64),
|
|
29
|
+
512: (4, 128),
|
|
30
|
+
1024: (4, 16),
|
|
31
|
+
2048: (16, 64),
|
|
32
|
+
},
|
|
33
|
+
'q_head-64_kv_head-8_head-64': {
|
|
34
|
+
128: (1, 16),
|
|
35
|
+
4096: (16, 16),
|
|
36
|
+
1024: (8, 8),
|
|
37
|
+
256: (2, 16),
|
|
38
|
+
8192: (16, 32),
|
|
39
|
+
2048: (8, 16),
|
|
40
|
+
512: (4, 8),
|
|
41
|
+
},
|
|
42
|
+
'q_head-32_kv_head-4_head-64': {
|
|
43
|
+
256: (2, 8),
|
|
44
|
+
512: (4, 32),
|
|
45
|
+
1024: (8, 8),
|
|
46
|
+
2048: (16, 8),
|
|
47
|
+
4096: (32, 32),
|
|
48
|
+
8192: (16, 32),
|
|
49
|
+
128: (1, 8),
|
|
50
|
+
},
|
|
51
|
+
'q_head-16_kv_head-2_head-64': {
|
|
52
|
+
128: (1, 128),
|
|
53
|
+
256: (2, 128),
|
|
54
|
+
512: (4, 32),
|
|
55
|
+
1024: (8, 16),
|
|
56
|
+
2048: (8, 32),
|
|
57
|
+
4096: (16, 32),
|
|
58
|
+
8192: (16, 32),
|
|
59
|
+
},
|
|
60
|
+
}
|
|
61
|
+
},
|
|
62
|
+
256: {
|
|
63
|
+
'q_bfloat16_kv_bfloat16': {
|
|
64
|
+
'q_head-16_kv_head-2_head-64': {
|
|
65
|
+
1024: (4, 32),
|
|
66
|
+
2048: (8, 16),
|
|
67
|
+
4096: (8, 32),
|
|
68
|
+
8192: (16, 16),
|
|
69
|
+
256: (1, 128),
|
|
70
|
+
512: (2, 128),
|
|
71
|
+
},
|
|
72
|
+
'q_head-64_kv_head-8_head-64': {
|
|
73
|
+
256: (1, 8),
|
|
74
|
+
512: (2, 32),
|
|
75
|
+
1024: (4, 16),
|
|
76
|
+
2048: (8, 8),
|
|
77
|
+
4096: (8, 32),
|
|
78
|
+
8192: (8, 32),
|
|
79
|
+
},
|
|
80
|
+
'q_head-8_kv_head-2_head-64': {
|
|
81
|
+
256: (1, 8),
|
|
82
|
+
512: (1, 32),
|
|
83
|
+
1024: (4, 32),
|
|
84
|
+
2048: (8, 64),
|
|
85
|
+
4096: (8, 16),
|
|
86
|
+
8192: (16, 32),
|
|
87
|
+
},
|
|
88
|
+
'q_head-32_kv_head-4_head-64': {
|
|
89
|
+
256: (1, 16),
|
|
90
|
+
512: (2, 16),
|
|
91
|
+
1024: (4, 32),
|
|
92
|
+
2048: (8, 16),
|
|
93
|
+
4096: (8, 16),
|
|
94
|
+
8192: (8, 32),
|
|
95
|
+
},
|
|
96
|
+
}
|
|
97
|
+
},
|
|
98
|
+
},
|
|
99
|
+
'TPU v6e': {
|
|
100
|
+
128: {
|
|
101
|
+
'q_bfloat16_kv_bfloat16': {
|
|
102
|
+
'q_head-8_kv_head-2_head-64': {
|
|
103
|
+
4096: (32, 32),
|
|
104
|
+
8192: (32, 128),
|
|
105
|
+
128: (1, 64),
|
|
106
|
+
256: (2, 128),
|
|
107
|
+
512: (4, 256),
|
|
108
|
+
1024: (8, 16),
|
|
109
|
+
2048: (16, 32),
|
|
110
|
+
},
|
|
111
|
+
'q_head-64_kv_head-8_head-64': {
|
|
112
|
+
128: (1, 32),
|
|
113
|
+
4096: (32, 16),
|
|
114
|
+
1024: (8, 32),
|
|
115
|
+
256: (2, 16),
|
|
116
|
+
8192: (32, 8),
|
|
117
|
+
2048: (16, 32),
|
|
118
|
+
512: (4, 32),
|
|
119
|
+
},
|
|
120
|
+
'q_head-32_kv_head-4_head-64': {
|
|
121
|
+
256: (2, 16),
|
|
122
|
+
512: (4, 128),
|
|
123
|
+
1024: (8, 64),
|
|
124
|
+
2048: (16, 32),
|
|
125
|
+
4096: (16, 16),
|
|
126
|
+
8192: (32, 32),
|
|
127
|
+
128: (1, 64),
|
|
128
|
+
},
|
|
129
|
+
'q_head-16_kv_head-2_head-64': {
|
|
130
|
+
128: (1, 128),
|
|
131
|
+
256: (2, 128),
|
|
132
|
+
512: (4, 128),
|
|
133
|
+
1024: (8, 64),
|
|
134
|
+
2048: (8, 32),
|
|
135
|
+
4096: (32, 32),
|
|
136
|
+
8192: (32, 32),
|
|
137
|
+
},
|
|
138
|
+
}
|
|
139
|
+
},
|
|
140
|
+
256: {
|
|
141
|
+
'q_bfloat16_kv_bfloat16': {
|
|
142
|
+
'q_head-16_kv_head-2_head-64': {
|
|
143
|
+
1024: (4, 128),
|
|
144
|
+
2048: (8, 32),
|
|
145
|
+
4096: (16, 16),
|
|
146
|
+
8192: (16, 16),
|
|
147
|
+
256: (1, 64),
|
|
148
|
+
512: (2, 32),
|
|
149
|
+
},
|
|
150
|
+
'q_head-64_kv_head-8_head-64': {
|
|
151
|
+
256: (1, 32),
|
|
152
|
+
512: (2, 32),
|
|
153
|
+
1024: (4, 32),
|
|
154
|
+
2048: (8, 16),
|
|
155
|
+
4096: (16, 16),
|
|
156
|
+
8192: (16, 16),
|
|
157
|
+
},
|
|
158
|
+
'q_head-8_kv_head-2_head-64': {
|
|
159
|
+
256: (1, 8),
|
|
160
|
+
512: (2, 128),
|
|
161
|
+
1024: (4, 64),
|
|
162
|
+
2048: (8, 32),
|
|
163
|
+
4096: (8, 32),
|
|
164
|
+
8192: (16, 128),
|
|
165
|
+
},
|
|
166
|
+
'q_head-32_kv_head-4_head-64': {
|
|
167
|
+
256: (1, 32),
|
|
168
|
+
512: (2, 8),
|
|
169
|
+
1024: (4, 8),
|
|
170
|
+
2048: (8, 16),
|
|
171
|
+
4096: (16, 16),
|
|
172
|
+
8192: (16, 16),
|
|
173
|
+
},
|
|
174
|
+
}
|
|
175
|
+
},
|
|
176
|
+
},
|
|
177
|
+
'TPU v7': {
|
|
178
|
+
128: {
|
|
179
|
+
'q_bfloat16_kv_bfloat16': {
|
|
180
|
+
'q_head-8_kv_head-2_head-64': {
|
|
181
|
+
4096: (32, 16),
|
|
182
|
+
8192: (32, 64),
|
|
183
|
+
128: (1, 16),
|
|
184
|
+
256: (2, 64),
|
|
185
|
+
512: (4, 16),
|
|
186
|
+
1024: (8, 32),
|
|
187
|
+
2048: (16, 32),
|
|
188
|
+
},
|
|
189
|
+
'q_head-64_kv_head-8_head-64': {
|
|
190
|
+
128: (1, 16),
|
|
191
|
+
4096: (32, 8),
|
|
192
|
+
1024: (8, 16),
|
|
193
|
+
256: (2, 16),
|
|
194
|
+
8192: (32, 16),
|
|
195
|
+
2048: (16, 16),
|
|
196
|
+
512: (4, 16),
|
|
197
|
+
},
|
|
198
|
+
'q_head-32_kv_head-4_head-64': {
|
|
199
|
+
256: (2, 8),
|
|
200
|
+
512: (4, 16),
|
|
201
|
+
1024: (8, 16),
|
|
202
|
+
2048: (16, 32),
|
|
203
|
+
4096: (32, 64),
|
|
204
|
+
8192: (32, 16),
|
|
205
|
+
128: (1, 16),
|
|
206
|
+
},
|
|
207
|
+
'q_head-16_kv_head-2_head-64': {
|
|
208
|
+
128: (1, 64),
|
|
209
|
+
256: (2, 8),
|
|
210
|
+
512: (4, 8),
|
|
211
|
+
1024: (8, 16),
|
|
212
|
+
2048: (16, 16),
|
|
213
|
+
4096: (32, 32),
|
|
214
|
+
8192: (32, 32),
|
|
215
|
+
},
|
|
216
|
+
}
|
|
217
|
+
},
|
|
218
|
+
256: {
|
|
219
|
+
'q_bfloat16_kv_bfloat16': {
|
|
220
|
+
'q_head-16_kv_head-2_head-64': {
|
|
221
|
+
1024: (4, 32),
|
|
222
|
+
2048: (8, 16),
|
|
223
|
+
4096: (16, 8),
|
|
224
|
+
8192: (16, 16),
|
|
225
|
+
256: (1, 64),
|
|
226
|
+
512: (2, 32),
|
|
227
|
+
},
|
|
228
|
+
'q_head-64_kv_head-8_head-64': {
|
|
229
|
+
256: (1, 8),
|
|
230
|
+
512: (2, 16),
|
|
231
|
+
1024: (4, 32),
|
|
232
|
+
2048: (8, 16),
|
|
233
|
+
4096: (16, 16),
|
|
234
|
+
8192: (16, 16),
|
|
235
|
+
},
|
|
236
|
+
'q_head-8_kv_head-2_head-64': {
|
|
237
|
+
256: (1, 256),
|
|
238
|
+
512: (2, 16),
|
|
239
|
+
1024: (4, 16),
|
|
240
|
+
2048: (8, 16),
|
|
241
|
+
4096: (16, 32),
|
|
242
|
+
8192: (16, 16),
|
|
243
|
+
},
|
|
244
|
+
'q_head-32_kv_head-4_head-64': {
|
|
245
|
+
256: (1, 64),
|
|
246
|
+
512: (2, 32),
|
|
247
|
+
1024: (4, 8),
|
|
248
|
+
2048: (8, 8),
|
|
249
|
+
4096: (16, 32),
|
|
250
|
+
8192: (16, 32),
|
|
251
|
+
},
|
|
252
|
+
}
|
|
253
|
+
},
|
|
254
|
+
},
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def get_tuned_block_sizes(
|
|
259
|
+
q_dtype,
|
|
260
|
+
kv_dtype,
|
|
261
|
+
actual_num_q_heads,
|
|
262
|
+
actual_num_kv_heads,
|
|
263
|
+
head_dim,
|
|
264
|
+
page_size,
|
|
265
|
+
max_num_tokens,
|
|
266
|
+
pages_per_seq,
|
|
267
|
+
) -> tuple[int, int]:
|
|
268
|
+
"""Search tuned values for (num_kv_pages_per_blk, num_queries_per_blk)."""
|
|
269
|
+
|
|
270
|
+
# Set default block sizes for each tpu_version.
|
|
271
|
+
tpu_version = get_tpu_version()
|
|
272
|
+
if tpu_version < 4:
|
|
273
|
+
raise NotImplementedError('TPU version must be 4 or higher.')
|
|
274
|
+
match tpu_version:
|
|
275
|
+
case 4:
|
|
276
|
+
# TPUv4 has much smaller VMEM size so we pick fixed block sizes.
|
|
277
|
+
bkv_p, bq = (512 // page_size, 32)
|
|
278
|
+
case 7:
|
|
279
|
+
bkv_p, bq = (4096 // page_size, 32)
|
|
280
|
+
case _:
|
|
281
|
+
bkv_p, bq = (2048 // page_size, 32)
|
|
282
|
+
|
|
283
|
+
keys = get_lookup_keys(
|
|
284
|
+
page_size,
|
|
285
|
+
q_dtype,
|
|
286
|
+
kv_dtype,
|
|
287
|
+
actual_num_q_heads,
|
|
288
|
+
actual_num_kv_heads,
|
|
289
|
+
head_dim,
|
|
290
|
+
page_size * pages_per_seq,
|
|
291
|
+
)
|
|
292
|
+
device, page_size, dtypes, head_dims, max_model_len = keys
|
|
293
|
+
|
|
294
|
+
try:
|
|
295
|
+
bkv_p, bq = TUNED_BLOCK_SIZES[device][page_size][dtypes][head_dims][
|
|
296
|
+
max_model_len]
|
|
297
|
+
except KeyError:
|
|
298
|
+
print('Couldn`t find tuned sizes for the RPA v3 kernel with %s', keys)
|
|
299
|
+
|
|
300
|
+
return (min(pages_per_seq, bkv_p), min(max_num_tokens, bq))
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
def get_lookup_keys(
|
|
304
|
+
page_size,
|
|
305
|
+
q_dtype,
|
|
306
|
+
kv_dtype,
|
|
307
|
+
num_q_heads,
|
|
308
|
+
num_kv_heads,
|
|
309
|
+
head_dim,
|
|
310
|
+
max_model_len,
|
|
311
|
+
):
|
|
312
|
+
"""Get the lookup keys for tuned block sizes."""
|
|
313
|
+
(
|
|
314
|
+
page_size,
|
|
315
|
+
q_dtype_name,
|
|
316
|
+
kv_dtype_name,
|
|
317
|
+
num_q_heads,
|
|
318
|
+
num_kv_heads,
|
|
319
|
+
head_dim,
|
|
320
|
+
max_model_len,
|
|
321
|
+
) = get_simplified_raw_key(
|
|
322
|
+
page_size,
|
|
323
|
+
q_dtype,
|
|
324
|
+
kv_dtype,
|
|
325
|
+
num_q_heads,
|
|
326
|
+
num_kv_heads,
|
|
327
|
+
head_dim,
|
|
328
|
+
max_model_len,
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
return (
|
|
332
|
+
get_device_name(),
|
|
333
|
+
next_power_of_2(page_size),
|
|
334
|
+
f'q_{q_dtype_name}_kv_{kv_dtype_name}',
|
|
335
|
+
f'q_head-{num_q_heads}_kv_head-{num_kv_heads}_head-{head_dim}',
|
|
336
|
+
next_power_of_2(max_model_len),
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
def get_simplified_raw_key(
|
|
341
|
+
page_size,
|
|
342
|
+
q_dtype,
|
|
343
|
+
kv_dtype,
|
|
344
|
+
actual_num_q_heads,
|
|
345
|
+
actual_num_kv_heads,
|
|
346
|
+
head_dim,
|
|
347
|
+
max_model_len,
|
|
348
|
+
):
|
|
349
|
+
"""Get the simplified key."""
|
|
350
|
+
assert head_dim == 64
|
|
351
|
+
assert actual_num_q_heads % actual_num_kv_heads == 0
|
|
352
|
+
actual_num_q_heads_per_kv_head = actual_num_q_heads // actual_num_kv_heads
|
|
353
|
+
q_packing = get_dtype_packing(q_dtype)
|
|
354
|
+
kv_packing = get_dtype_packing(kv_dtype)
|
|
355
|
+
num_kv_heads = align_to(actual_num_kv_heads, kv_packing)
|
|
356
|
+
num_q_heads_per_kv_head = align_to(actual_num_q_heads_per_kv_head,
|
|
357
|
+
q_packing)
|
|
358
|
+
|
|
359
|
+
return (
|
|
360
|
+
next_power_of_2(page_size),
|
|
361
|
+
jnp.dtype(q_dtype).name,
|
|
362
|
+
jnp.dtype(kv_dtype).name,
|
|
363
|
+
next_power_of_2(num_q_heads_per_kv_head * actual_num_kv_heads),
|
|
364
|
+
next_power_of_2(num_kv_heads),
|
|
365
|
+
head_dim,
|
|
366
|
+
next_power_of_2(max_model_len),
|
|
367
|
+
)
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""Utility functions for ragged paged attention."""
|
|
2
|
+
import jax
|
|
3
|
+
from jax._src import dtypes
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def cdiv(a, b):
|
|
7
|
+
assert b != 0
|
|
8
|
+
return (a + b - 1) // b
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def align_to(x, a):
|
|
12
|
+
return cdiv(x, a) * a
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def get_dtype_bitwidth(dtype):
|
|
16
|
+
return dtypes.bit_width(dtype)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def get_dtype_packing(dtype):
|
|
20
|
+
bits = get_dtype_bitwidth(dtype)
|
|
21
|
+
return 32 // bits
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def next_power_of_2(x: int):
|
|
25
|
+
"""Finds the smallest power of 2 >= x using bit manipulation.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
x: The input number (should be an integer).
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
The smallest integer power of 2 that is >= x.
|
|
32
|
+
"""
|
|
33
|
+
assert x > 0
|
|
34
|
+
if x == 1:
|
|
35
|
+
return 1
|
|
36
|
+
return 1 << (x - 1).bit_length()
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def get_tpu_version() -> int:
|
|
40
|
+
"""Returns the numeric version of the TPU, or -1 if not on TPU."""
|
|
41
|
+
kind = jax.devices()[0].device_kind
|
|
42
|
+
if 'TPU' not in kind:
|
|
43
|
+
return -1
|
|
44
|
+
if kind.endswith(' lite'):
|
|
45
|
+
kind = kind[:-len(' lite')]
|
|
46
|
+
if kind.endswith('p') or kind.endswith('e'):
|
|
47
|
+
kind = kind[:-1]
|
|
48
|
+
if kind == 'TPU7x':
|
|
49
|
+
return 7
|
|
50
|
+
assert kind[:-1] == 'TPU v', kind
|
|
51
|
+
return int(kind[-1])
|
|
File without changes
|
|
File without changes
|