tpu-inference 0.11.1.dev202511220812__py3-none-any.whl → 0.13.2.dev20251230__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tpu-inference might be problematic. Click here for more details.
- tests/__init__.py +13 -0
- tests/core/__init__.py +13 -0
- tests/core/test_disagg_utils.py +14 -0
- tests/core/test_dp_scheduler.py +650 -768
- tests/core/test_init.py +14 -0
- tests/distributed/__init__.py +13 -0
- tests/distributed/test_distributed_utils.py +120 -0
- tests/distributed/test_tpu_connector.py +478 -0
- tests/e2e/__init__.py +13 -0
- tests/e2e/test_async_scheduler.py +211 -0
- tests/e2e/test_data_parallel.py +289 -0
- tests/e2e/test_hybrid_kvcache.py +219 -0
- tests/e2e/test_local_disagg.py +257 -0
- tests/e2e/test_model_loader.py +268 -0
- tests/e2e/test_multi_modal_inference.py +111 -0
- tests/e2e/test_pipeline_parallel.py +265 -0
- tests/e2e/test_runai_model_streamer_loader.py +104 -0
- tests/e2e/test_sampling_params.py +269 -0
- tests/e2e/test_speculative_decoding.py +311 -0
- tests/e2e/test_structured_decoding.py +46 -0
- tests/executors/__init__.py +13 -0
- tests/executors/test_ray_distributed_executor.py +199 -0
- tests/experimental/__init__.py +13 -0
- tests/experimental/test_llama3_jax_stashed.py +208 -0
- tests/kernels/__init__.py +13 -0
- tests/kernels/collectives/__init__.py +13 -0
- tests/kernels/collectives/all_gather_matmul_kernel_test.py +69 -0
- tests/kernels/fused_moe_v1_test.py +317 -34
- tests/kernels/gmm_test.py +205 -0
- tests/kernels/mla_v1_test.py +143 -41
- tests/kernels/quantized_matmul_kernel_test.py +2 -34
- tests/kernels/ragged_kv_cache_update_v2_test.py +14 -0
- tests/kernels/ragged_paged_attention_kernel_v2_test.py +14 -0
- tests/kernels/ragged_paged_attention_kernel_v3_hd64_test.py +17 -1
- tests/kernels/ragged_paged_attention_kernel_v3_test.py +17 -1
- tests/layers/__init__.py +13 -0
- tests/layers/common/__init__.py +13 -0
- tests/layers/common/test_attention_interface.py +156 -0
- tests/layers/common/test_quantization.py +149 -0
- tests/layers/jax/__init__.py +13 -0
- tests/layers/jax/attention/__init__.py +13 -0
- tests/layers/jax/attention/test_common_attention.py +103 -0
- tests/layers/jax/attention/test_deepseek_v3_attention.py +233 -0
- tests/layers/jax/attention/test_llama4_attention.py +135 -0
- tests/layers/jax/moe/__init__.py +13 -0
- tests/layers/jax/moe/test_deepseek_moe.py +235 -0
- tests/layers/jax/sample/__init__.py +13 -0
- tests/layers/jax/sample/test_rejection_sampler.py +1624 -0
- tests/layers/jax/sample/test_sampling.py +115 -0
- tests/layers/jax/sample/test_sampling_metadata.py +254 -0
- tests/layers/jax/test_layers.py +155 -0
- tests/{test_quantization.py → layers/jax/test_qwix.py} +183 -50
- tests/layers/jax/test_rope.py +93 -0
- tests/layers/jax/test_sharding.py +159 -0
- tests/layers/jax/test_transformer_block.py +152 -0
- tests/layers/vllm/__init__.py +13 -0
- tests/layers/vllm/test_attention.py +363 -0
- tests/layers/vllm/test_awq.py +406 -0
- tests/layers/vllm/test_compressed_tensors_moe.py +199 -0
- tests/layers/vllm/test_compressed_tensors_w8a8_fp8.py +441 -0
- tests/layers/vllm/test_compressed_tensors_w8a8_int8.py +443 -0
- tests/layers/vllm/test_fp8.py +17 -0
- tests/layers/vllm/test_mxfp4.py +320 -0
- tests/layers/vllm/test_unquantized.py +662 -0
- tests/layers/vllm/utils.py +87 -0
- tests/lora/__init__.py +13 -0
- tests/lora/conftest.py +14 -0
- tests/lora/test_bgmv.py +14 -0
- tests/lora/test_layers.py +26 -6
- tests/lora/test_lora.py +15 -1
- tests/lora/test_lora_perf.py +67 -0
- tests/models/__init__.py +13 -0
- tests/models/common/__init__.py +13 -0
- tests/models/common/test_model_loader.py +455 -0
- tests/models/jax/__init__.py +13 -0
- tests/models/jax/test_deepseek_v3.py +401 -0
- tests/models/jax/test_llama3.py +184 -0
- tests/models/jax/test_llama4.py +298 -0
- tests/models/jax/test_llama_eagle3.py +197 -0
- tests/models/jax/test_llama_guard_4.py +242 -0
- tests/models/jax/test_qwen2.py +172 -0
- tests/models/jax/test_qwen2_5_vl.py +605 -0
- tests/models/jax/test_qwen3.py +169 -0
- tests/models/jax/test_weight_loading.py +180 -0
- tests/models/jax/utils/__init__.py +13 -0
- tests/models/jax/utils/test_multi_modal_utils.py +212 -0
- tests/platforms/__init__.py +13 -0
- tests/platforms/test_tpu_platform.py +54 -0
- tests/runner/__init__.py +13 -0
- tests/runner/test_block_table.py +395 -0
- tests/runner/test_input_batch.py +226 -0
- tests/runner/test_kv_cache.py +220 -0
- tests/runner/test_kv_cache_manager.py +498 -0
- tests/runner/test_multimodal_manager.py +429 -0
- tests/runner/test_persistent_batch_manager.py +84 -0
- tests/runner/test_speculative_decoding_manager.py +368 -0
- tests/runner/test_structured_decoding_manager.py +220 -0
- tests/runner/test_tpu_runner.py +261 -0
- tests/runner/test_tpu_runner_dp.py +1099 -0
- tests/runner/test_tpu_runner_mesh.py +200 -0
- tests/runner/test_utils.py +411 -0
- tests/spec_decode/__init__.py +13 -0
- tests/spec_decode/test_eagle3.py +311 -0
- tests/test_base.py +14 -0
- tests/test_envs.py +110 -12
- tests/test_tpu_info.py +14 -0
- tests/test_utils.py +2 -45
- tests/worker/__init__.py +13 -0
- tests/worker/tpu_worker_test.py +414 -0
- tpu_inference/__init__.py +14 -0
- tpu_inference/core/__init__.py +13 -0
- tpu_inference/core/sched/__init__.py +13 -0
- tpu_inference/core/sched/dp_scheduler.py +372 -56
- tpu_inference/distributed/__init__.py +13 -0
- tpu_inference/distributed/jax_parallel_state.py +14 -0
- tpu_inference/distributed/tpu_connector.py +15 -10
- tpu_inference/distributed/utils.py +56 -4
- tpu_inference/envs.py +92 -8
- tpu_inference/executors/__init__.py +13 -0
- tpu_inference/executors/ray_distributed_executor.py +25 -4
- tpu_inference/experimental/__init__.py +13 -0
- tpu_inference/experimental/llama3_jax_stashed.py +14 -0
- tpu_inference/kernels/__init__.py +13 -0
- tpu_inference/kernels/collectives/__init__.py +13 -0
- tpu_inference/kernels/collectives/all_gather_matmul.py +12 -6
- tpu_inference/kernels/collectives/all_gather_matmul_tuned_block_sizes.py +7 -2
- tpu_inference/kernels/flash_attention/__init__.py +13 -0
- tpu_inference/kernels/fused_moe/__init__.py +13 -0
- tpu_inference/kernels/fused_moe/v1/__init__.py +13 -0
- tpu_inference/kernels/fused_moe/v1/kernel.py +807 -230
- tpu_inference/kernels/megablox/__init__.py +13 -0
- tpu_inference/kernels/megablox/common.py +54 -0
- tpu_inference/kernels/megablox/gmm.py +646 -0
- tpu_inference/kernels/mla/__init__.py +13 -0
- tpu_inference/kernels/mla/v1/__init__.py +13 -0
- tpu_inference/kernels/mla/v1/kernel.py +117 -145
- tpu_inference/kernels/quantized_matmul/__init__.py +13 -0
- tpu_inference/kernels/quantized_matmul/kernel.py +69 -8
- tpu_inference/kernels/ragged_paged_attention/__init__.py +13 -0
- tpu_inference/kernels/ragged_paged_attention/v2/__init__.py +13 -0
- tpu_inference/kernels/ragged_paged_attention/v2/kernel.py +2 -1
- tpu_inference/kernels/ragged_paged_attention/v2/ragged_kv_cache_update.py +2 -1
- tpu_inference/kernels/ragged_paged_attention/v3/__init__.py +13 -0
- tpu_inference/kernels/ragged_paged_attention/v3/kernel.py +194 -101
- tpu_inference/kernels/ragged_paged_attention/v3/kernel_hd64.py +218 -137
- tpu_inference/kernels/ragged_paged_attention/v3/tuned_block_sizes.py +3817 -3504
- tpu_inference/kernels/ragged_paged_attention/v3/tuned_block_sizes_hd64.py +376 -195
- tpu_inference/kernels/ragged_paged_attention/v3/util.py +15 -1
- tpu_inference/layers/__init__.py +13 -0
- tpu_inference/layers/common/__init__.py +13 -0
- tpu_inference/layers/common/attention_interface.py +25 -12
- tpu_inference/layers/common/attention_metadata.py +14 -0
- tpu_inference/layers/common/fused_moe_gmm.py +506 -0
- tpu_inference/layers/common/quant_methods.py +15 -0
- tpu_inference/layers/common/quantization.py +282 -0
- tpu_inference/layers/common/sharding.py +32 -9
- tpu_inference/layers/common/utils.py +94 -0
- tpu_inference/layers/jax/__init__.py +13 -0
- tpu_inference/layers/jax/attention/__init__.py +13 -0
- tpu_inference/layers/jax/attention/attention.py +19 -6
- tpu_inference/layers/jax/attention/deepseek_v3_attention.py +270 -77
- tpu_inference/layers/jax/attention/gpt_oss_attention.py +24 -11
- tpu_inference/layers/jax/attention/llama4_attention.py +17 -4
- tpu_inference/layers/jax/base.py +14 -0
- tpu_inference/layers/jax/constants.py +13 -0
- tpu_inference/layers/jax/layers.py +14 -0
- tpu_inference/layers/jax/misc.py +14 -0
- tpu_inference/layers/jax/moe/__init__.py +13 -0
- tpu_inference/layers/jax/moe/deepseek_v3_moe.py +20 -13
- tpu_inference/layers/jax/moe/gpt_oss_moe.py +14 -0
- tpu_inference/layers/jax/moe/moe.py +43 -3
- tpu_inference/layers/jax/pp_utils.py +53 -0
- tpu_inference/layers/jax/rope.py +14 -0
- tpu_inference/layers/jax/rope_interface.py +14 -0
- tpu_inference/layers/jax/sample/__init__.py +13 -0
- tpu_inference/layers/jax/sample/rejection_sampler.py +13 -0
- tpu_inference/layers/jax/sample/sampling.py +15 -1
- tpu_inference/layers/jax/sample/sampling_metadata.py +14 -0
- tpu_inference/layers/jax/transformer_block.py +14 -0
- tpu_inference/layers/vllm/__init__.py +13 -0
- tpu_inference/layers/vllm/attention.py +4 -4
- tpu_inference/layers/vllm/fused_moe.py +101 -494
- tpu_inference/layers/vllm/linear.py +64 -0
- tpu_inference/layers/vllm/process_weights/__init__.py +13 -0
- tpu_inference/layers/vllm/{sharding.py → process_weights/cleanup_sharding.py} +24 -15
- tpu_inference/layers/vllm/process_weights/fused_moe_weights.py +369 -0
- tpu_inference/layers/vllm/process_weights/linear_weights.py +174 -0
- tpu_inference/layers/vllm/quantization/__init__.py +19 -3
- tpu_inference/layers/vllm/quantization/awq.py +96 -82
- tpu_inference/layers/vllm/quantization/compressed_tensors/__init__.py +13 -0
- tpu_inference/layers/vllm/quantization/compressed_tensors/compressed_tensors.py +23 -8
- tpu_inference/layers/vllm/quantization/compressed_tensors/compressed_tensors_moe.py +172 -176
- tpu_inference/layers/vllm/quantization/compressed_tensors/schemes/__init__.py +13 -0
- tpu_inference/layers/vllm/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +111 -91
- tpu_inference/layers/vllm/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +79 -43
- tpu_inference/layers/vllm/quantization/{common.py → configs.py} +42 -25
- tpu_inference/layers/vllm/quantization/fp8.py +119 -0
- tpu_inference/layers/vllm/quantization/mxfp4.py +137 -178
- tpu_inference/layers/vllm/quantization/unquantized.py +157 -233
- tpu_inference/lora/__init__.py +13 -0
- tpu_inference/lora/torch_lora_ops.py +8 -13
- tpu_inference/models/__init__.py +13 -0
- tpu_inference/models/common/__init__.py +13 -0
- tpu_inference/models/common/model_loader.py +112 -35
- tpu_inference/models/jax/__init__.py +13 -0
- tpu_inference/models/jax/deepseek_v3.py +267 -157
- tpu_inference/models/jax/gpt_oss.py +26 -10
- tpu_inference/models/jax/jax_intermediate_tensor.py +14 -0
- tpu_inference/models/jax/llama3.py +99 -36
- tpu_inference/models/jax/llama4.py +14 -0
- tpu_inference/models/jax/llama_eagle3.py +18 -5
- tpu_inference/models/jax/llama_guard_4.py +15 -1
- tpu_inference/models/jax/qwen2.py +17 -2
- tpu_inference/models/jax/qwen2_5_vl.py +179 -51
- tpu_inference/models/jax/qwen3.py +17 -2
- tpu_inference/models/jax/utils/__init__.py +13 -0
- tpu_inference/models/jax/utils/file_utils.py +14 -0
- tpu_inference/models/jax/utils/multi_modal_utils.py +18 -4
- tpu_inference/models/jax/utils/qwix/__init__.py +13 -0
- tpu_inference/models/jax/utils/{quantization/quantization_utils.py → qwix/qwix_utils.py} +92 -32
- tpu_inference/models/jax/utils/weight_utils.py +234 -155
- tpu_inference/models/vllm/__init__.py +13 -0
- tpu_inference/models/vllm/vllm_model_wrapper.py +32 -8
- tpu_inference/models/vllm/vllm_model_wrapper_context.py +14 -0
- tpu_inference/platforms/__init__.py +14 -0
- tpu_inference/platforms/tpu_platform.py +51 -72
- tpu_inference/runner/__init__.py +13 -0
- tpu_inference/runner/compilation_manager.py +180 -80
- tpu_inference/runner/kv_cache.py +54 -20
- tpu_inference/runner/kv_cache_manager.py +55 -33
- tpu_inference/runner/lora_utils.py +16 -1
- tpu_inference/runner/multimodal_manager.py +16 -2
- tpu_inference/runner/persistent_batch_manager.py +54 -2
- tpu_inference/runner/speculative_decoding_manager.py +14 -0
- tpu_inference/runner/structured_decoding_manager.py +16 -3
- tpu_inference/runner/tpu_runner.py +124 -61
- tpu_inference/runner/utils.py +2 -2
- tpu_inference/spec_decode/__init__.py +13 -0
- tpu_inference/spec_decode/jax/__init__.py +13 -0
- tpu_inference/spec_decode/jax/eagle3.py +84 -22
- tpu_inference/tpu_info.py +14 -0
- tpu_inference/utils.py +72 -44
- tpu_inference/worker/__init__.py +13 -0
- tpu_inference/worker/tpu_worker.py +66 -52
- {tpu_inference-0.11.1.dev202511220812.dist-info → tpu_inference-0.13.2.dev20251230.dist-info}/METADATA +8 -9
- tpu_inference-0.13.2.dev20251230.dist-info/RECORD +266 -0
- tpu_inference/layers/vllm/linear_common.py +0 -186
- tpu_inference/models/jax/utils/quantization/__init__.py +0 -0
- tpu_inference/models/jax/utils/quantization/configs/fp8_all_modules_w_only.yaml +0 -5
- tpu_inference/models/jax/utils/quantization/configs/fp8_default.yaml +0 -6
- tpu_inference/models/jax/utils/quantization/configs/int8_all_modules_w_only.yaml +0 -5
- tpu_inference/models/jax/utils/quantization/configs/int8_default.yaml +0 -6
- tpu_inference/models/jax/utils/quantization/mxfp4_utils.py +0 -105
- tpu_inference-0.11.1.dev202511220812.dist-info/RECORD +0 -174
- {tpu_inference-0.11.1.dev202511220812.dist-info → tpu_inference-0.13.2.dev20251230.dist-info}/WHEEL +0 -0
- {tpu_inference-0.11.1.dev202511220812.dist-info → tpu_inference-0.13.2.dev20251230.dist-info}/licenses/LICENSE +0 -0
- {tpu_inference-0.11.1.dev202511220812.dist-info → tpu_inference-0.13.2.dev20251230.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,455 @@
|
|
|
1
|
+
# Copyright 2025 Google LLC
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
import tempfile
|
|
17
|
+
from unittest.mock import MagicMock, patch
|
|
18
|
+
|
|
19
|
+
import jax
|
|
20
|
+
import jax.numpy as jnp
|
|
21
|
+
import numpy as np
|
|
22
|
+
import pytest
|
|
23
|
+
import torch
|
|
24
|
+
from jax.sharding import Mesh
|
|
25
|
+
from transformers import PretrainedConfig
|
|
26
|
+
from vllm.config import (ModelConfig, ParallelConfig, VllmConfig,
|
|
27
|
+
set_current_vllm_config)
|
|
28
|
+
from vllm.distributed.parallel_state import (ensure_model_parallel_initialized,
|
|
29
|
+
init_distributed_environment)
|
|
30
|
+
from vllm.engine.arg_utils import EngineArgs
|
|
31
|
+
from vllm.model_executor.models.registry import ModelRegistry
|
|
32
|
+
|
|
33
|
+
from tpu_inference.models.common import model_loader
|
|
34
|
+
from tpu_inference.models.jax.qwen3 import Qwen3ForCausalLM
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class MockModelA:
|
|
38
|
+
|
|
39
|
+
def __init__(self, vllm_config, rng=None, mesh=None):
|
|
40
|
+
pass
|
|
41
|
+
|
|
42
|
+
def __call__(self, kv_caches, input_ids, attention_metadata):
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class MockModelB:
|
|
47
|
+
|
|
48
|
+
def __init__(self, vllm_config, rng=None, mesh=None):
|
|
49
|
+
pass
|
|
50
|
+
|
|
51
|
+
def __call__(self, kv_caches, input_ids, attention_metadata):
|
|
52
|
+
pass
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@pytest.fixture(scope="module")
|
|
56
|
+
def mesh() -> Mesh:
|
|
57
|
+
"""Provides a JAX device mesh for sharding."""
|
|
58
|
+
devices = np.array(jax.devices()[:1])
|
|
59
|
+
devices = devices.reshape((1, 1, 1, -1))
|
|
60
|
+
# Pass the 1D list of devices directly. Its ndim will match len(axis_names).
|
|
61
|
+
return Mesh(devices, axis_names=("data", "attn_dp", "expert", "model"))
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
@pytest.fixture
|
|
65
|
+
def vllm_config() -> MagicMock:
|
|
66
|
+
"""Provides a mock VllmConfig object."""
|
|
67
|
+
model = "Qwen/Qwen3-0.6B"
|
|
68
|
+
mock_config = MagicMock(spec=VllmConfig)
|
|
69
|
+
mock_config.model_config = ModelConfig(model)
|
|
70
|
+
mock_config.model_config.dtype = jnp.bfloat16
|
|
71
|
+
mock_config.load_config = MagicMock()
|
|
72
|
+
mock_config.load_config.download_dir = None
|
|
73
|
+
mock_config.load_config.load_format = "auto"
|
|
74
|
+
mock_config.additional_config = {}
|
|
75
|
+
mock_config.cache_config = MagicMock(cache_dtype="auto")
|
|
76
|
+
mock_config.parallel_config = ParallelConfig(pipeline_parallel_size=1)
|
|
77
|
+
return mock_config
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
# --- Added RNG Fixture ---
|
|
81
|
+
@pytest.fixture
|
|
82
|
+
def rng() -> jax.Array:
|
|
83
|
+
"""Provides a JAX PRNGKey."""
|
|
84
|
+
return jax.random.PRNGKey(0)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
# ==============================================================================
|
|
88
|
+
# >> Test Cases
|
|
89
|
+
# ==============================================================================
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def test_get_model_architecture_supported(vllm_config):
|
|
93
|
+
"""
|
|
94
|
+
Tests that _get_model_architecture returns the correct model class
|
|
95
|
+
for a supported architecture.
|
|
96
|
+
"""
|
|
97
|
+
config = vllm_config.model_config.hf_config
|
|
98
|
+
model_class = model_loader._get_model_architecture(config)
|
|
99
|
+
assert model_class == Qwen3ForCausalLM
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def test_get_model_architecture_unsupported():
|
|
103
|
+
"""
|
|
104
|
+
Tests that _get_model_architecture raises a ValueError for an
|
|
105
|
+
unsupported architecture.
|
|
106
|
+
"""
|
|
107
|
+
config = PretrainedConfig(architectures=["UnsupportedModel"])
|
|
108
|
+
with pytest.raises(ValueError, match="not registered"):
|
|
109
|
+
model_loader._get_model_architecture(config)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
@pytest.fixture(autouse=True)
|
|
113
|
+
def clear_model_registry_after_test():
|
|
114
|
+
"""Clear the model registry after each test to prevent side effects."""
|
|
115
|
+
yield
|
|
116
|
+
model_loader._MODEL_REGISTRY.clear()
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def test_register_model_validation():
|
|
120
|
+
"""Tests that register_model validates the model interface."""
|
|
121
|
+
|
|
122
|
+
class ValidModel:
|
|
123
|
+
|
|
124
|
+
def __init__(self, vllm_config, rng=None, mesh=None):
|
|
125
|
+
pass
|
|
126
|
+
|
|
127
|
+
def __call__(self, kv_caches, input_ids, attention_metadata, **kwargs):
|
|
128
|
+
pass
|
|
129
|
+
|
|
130
|
+
class MissingInitArgModel:
|
|
131
|
+
|
|
132
|
+
def __init__(self, rng=None, mesh=None): # Missing vllm_config
|
|
133
|
+
pass
|
|
134
|
+
|
|
135
|
+
def __call__(self, kv_caches, input_ids, attention_metadata):
|
|
136
|
+
pass
|
|
137
|
+
|
|
138
|
+
class MissingCallArgModel:
|
|
139
|
+
|
|
140
|
+
def __init__(self, vllm_config, rng=None, mesh=None):
|
|
141
|
+
pass
|
|
142
|
+
|
|
143
|
+
def __call__(self, kv_caches, input_ids): # Missing attention_metadata
|
|
144
|
+
pass
|
|
145
|
+
|
|
146
|
+
class NoCallModel:
|
|
147
|
+
|
|
148
|
+
def __init__(self, vllm_config, rng=None, mesh=None):
|
|
149
|
+
pass
|
|
150
|
+
|
|
151
|
+
# This should succeed
|
|
152
|
+
model_loader.register_model("ValidModel", ValidModel)
|
|
153
|
+
|
|
154
|
+
# These should fail
|
|
155
|
+
with pytest.raises(TypeError, match="vllm_config"):
|
|
156
|
+
model_loader.register_model("InvalidInit", MissingInitArgModel)
|
|
157
|
+
|
|
158
|
+
with pytest.raises(TypeError, match="attention_metadata"):
|
|
159
|
+
model_loader.register_model("InvalidCall", MissingCallArgModel)
|
|
160
|
+
|
|
161
|
+
with pytest.raises(TypeError, match="__call__ method"):
|
|
162
|
+
model_loader.register_model("NoCallModel", NoCallModel)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def test_register_model_new_arch():
|
|
166
|
+
"""Tests registering a new model architecture."""
|
|
167
|
+
arch = "NewArch"
|
|
168
|
+
model_loader.register_model(arch, MockModelA)
|
|
169
|
+
|
|
170
|
+
# Check tpu_inference registry
|
|
171
|
+
config = PretrainedConfig(architectures=[arch])
|
|
172
|
+
model_class = model_loader._get_model_architecture(config)
|
|
173
|
+
assert model_class == MockModelA
|
|
174
|
+
|
|
175
|
+
# Check vLLM registry
|
|
176
|
+
vllm_model_class = ModelRegistry._try_load_model_cls(arch)
|
|
177
|
+
assert vllm_model_class is not None
|
|
178
|
+
assert vllm_model_class.__name__ == f"VllmCompatible{MockModelA.__name__}"
|
|
179
|
+
assert issubclass(vllm_model_class, MockModelA)
|
|
180
|
+
assert issubclass(vllm_model_class, torch.nn.Module)
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def test_register_model_update_arch():
|
|
184
|
+
"""Tests updating an existing registered model architecture."""
|
|
185
|
+
arch = "UpdatableArch"
|
|
186
|
+
config = PretrainedConfig(architectures=[arch])
|
|
187
|
+
|
|
188
|
+
# Register initial model
|
|
189
|
+
model_loader.register_model(arch, MockModelA)
|
|
190
|
+
|
|
191
|
+
# Verify initial registration in both registries
|
|
192
|
+
model_class_1 = model_loader._get_model_architecture(config)
|
|
193
|
+
assert model_class_1 == MockModelA
|
|
194
|
+
vllm_model_class_1 = ModelRegistry._try_load_model_cls(arch)
|
|
195
|
+
assert vllm_model_class_1.__name__ == f"VllmCompatible{MockModelA.__name__}"
|
|
196
|
+
assert issubclass(vllm_model_class_1, MockModelA)
|
|
197
|
+
|
|
198
|
+
# Update the registration
|
|
199
|
+
model_loader.register_model(arch, MockModelB)
|
|
200
|
+
|
|
201
|
+
# Verify the update in both registries
|
|
202
|
+
model_class_2 = model_loader._get_model_architecture(config)
|
|
203
|
+
assert model_class_2 == MockModelB
|
|
204
|
+
vllm_model_class_2 = ModelRegistry._try_load_model_cls(arch)
|
|
205
|
+
assert vllm_model_class_2.__name__ == f"VllmCompatible{MockModelB.__name__}"
|
|
206
|
+
assert issubclass(vllm_model_class_2, MockModelB)
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def test_register_model_vllm_wrapper_methods():
|
|
210
|
+
"""Tests that the vLLM wrapper has correct dummy methods."""
|
|
211
|
+
arch = "WrapperMethodTestArch"
|
|
212
|
+
model_loader.register_model(arch, MockModelA)
|
|
213
|
+
|
|
214
|
+
vllm_model_class = ModelRegistry._try_load_model_cls(arch)
|
|
215
|
+
instance = vllm_model_class()
|
|
216
|
+
|
|
217
|
+
# `forward` should be unimplemented.
|
|
218
|
+
with pytest.raises(NotImplementedError, match="JAX model"):
|
|
219
|
+
instance.forward(input_ids=None, positions=None)
|
|
220
|
+
|
|
221
|
+
# `embed_input_ids` should be unimplemented.
|
|
222
|
+
with pytest.raises(NotImplementedError, match="JAX model"):
|
|
223
|
+
instance.embed_input_ids(input_ids=None, positions=None)
|
|
224
|
+
|
|
225
|
+
# `load_weights` should be a no-op that returns None.
|
|
226
|
+
assert instance.load_weights() is None
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
def test_get_flax_model(vllm_config, mesh):
|
|
230
|
+
"""
|
|
231
|
+
An integration test for the main public function `get_flax_model`.
|
|
232
|
+
It verifies that the function returns two valid, JIT-compiled functions
|
|
233
|
+
that execute correctly and produce outputs with the expected sharding.
|
|
234
|
+
"""
|
|
235
|
+
rng = jax.random.PRNGKey(42)
|
|
236
|
+
|
|
237
|
+
# 1. Get the compiled model and logit computation functions
|
|
238
|
+
model_fn, compute_logits_fn, *_ = model_loader.get_flax_model(
|
|
239
|
+
vllm_config, rng, mesh)
|
|
240
|
+
|
|
241
|
+
assert callable(model_fn)
|
|
242
|
+
assert callable(compute_logits_fn)
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def test_get_vllm_model(mesh):
|
|
246
|
+
"""
|
|
247
|
+
An integration test for the main public function `get_vllm_model`.
|
|
248
|
+
It verifies that the function returns two valid, JIT-compiled functions
|
|
249
|
+
that execute correctly and produce outputs with the expected sharding.
|
|
250
|
+
"""
|
|
251
|
+
rng = jax.random.PRNGKey(42)
|
|
252
|
+
|
|
253
|
+
engine_args = EngineArgs(model="Qwen/Qwen3-0.6B")
|
|
254
|
+
vllm_config = engine_args.create_engine_config()
|
|
255
|
+
vllm_config.model_config.dtype = torch.bfloat16
|
|
256
|
+
|
|
257
|
+
with set_current_vllm_config(vllm_config):
|
|
258
|
+
temp_file = tempfile.mkstemp()[1]
|
|
259
|
+
init_distributed_environment(
|
|
260
|
+
world_size=1,
|
|
261
|
+
rank=0,
|
|
262
|
+
local_rank=0,
|
|
263
|
+
distributed_init_method=f"file://{temp_file}",
|
|
264
|
+
backend="gloo",
|
|
265
|
+
)
|
|
266
|
+
ensure_model_parallel_initialized(
|
|
267
|
+
tensor_model_parallel_size=1,
|
|
268
|
+
pipeline_model_parallel_size=1,
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
model_fn, compute_logits_fn, *_ = model_loader.get_vllm_model(
|
|
272
|
+
vllm_config, rng, mesh)
|
|
273
|
+
|
|
274
|
+
assert callable(model_fn)
|
|
275
|
+
assert callable(compute_logits_fn)
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def test_get_vllm_model_random_weights(mesh):
|
|
279
|
+
rng = jax.random.PRNGKey(42)
|
|
280
|
+
|
|
281
|
+
engine_args = EngineArgs(model="Qwen/Qwen3-0.6B")
|
|
282
|
+
vllm_config = engine_args.create_engine_config()
|
|
283
|
+
vllm_config.model_config.dtype = torch.bfloat16
|
|
284
|
+
vllm_config.load_config.load_format = "dummy"
|
|
285
|
+
|
|
286
|
+
with set_current_vllm_config(vllm_config):
|
|
287
|
+
temp_file = tempfile.mkstemp()[1]
|
|
288
|
+
init_distributed_environment(
|
|
289
|
+
world_size=1,
|
|
290
|
+
rank=0,
|
|
291
|
+
local_rank=0,
|
|
292
|
+
distributed_init_method=f"file://{temp_file}",
|
|
293
|
+
backend="gloo",
|
|
294
|
+
)
|
|
295
|
+
ensure_model_parallel_initialized(
|
|
296
|
+
tensor_model_parallel_size=1,
|
|
297
|
+
pipeline_model_parallel_size=1,
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
with patch(
|
|
301
|
+
"vllm.model_executor.model_loader.dummy_loader.DummyModelLoader.load_weights"
|
|
302
|
+
) as mock_load:
|
|
303
|
+
model_fn, compute_logits_fn, *_ = model_loader.get_vllm_model(
|
|
304
|
+
vllm_config, rng, mesh)
|
|
305
|
+
|
|
306
|
+
assert callable(model_fn)
|
|
307
|
+
assert callable(compute_logits_fn)
|
|
308
|
+
mock_load.assert_called()
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
# ==============================================================================
|
|
312
|
+
# >> Test Suite for get_model Fallback Logic
|
|
313
|
+
# ==============================================================================
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
@pytest.mark.usefixtures("mesh") # This fixture is module-scoped, but fine
|
|
317
|
+
class TestGetModel:
|
|
318
|
+
"""Tests the main get_model() entrypoint and its fallback logic."""
|
|
319
|
+
|
|
320
|
+
@patch.dict(os.environ, {"MODEL_IMPL_TYPE": "flax_nnx"}, clear=True)
|
|
321
|
+
@patch("tpu_inference.models.common.model_loader.get_vllm_model")
|
|
322
|
+
@patch("tpu_inference.models.common.model_loader.get_flax_model")
|
|
323
|
+
def test_get_model_flax_happy_path(self, mock_get_flax, mock_get_vllm,
|
|
324
|
+
vllm_config, rng, mesh):
|
|
325
|
+
"""Tests that 'flax_nnx' impl calls get_flax_model."""
|
|
326
|
+
mock_get_flax.return_value = "flax_model_sentinel"
|
|
327
|
+
|
|
328
|
+
result = model_loader.get_model(vllm_config, rng, mesh)
|
|
329
|
+
|
|
330
|
+
mock_get_flax.assert_called_once_with(vllm_config, rng, mesh, False)
|
|
331
|
+
mock_get_vllm.assert_not_called()
|
|
332
|
+
assert result == "flax_model_sentinel"
|
|
333
|
+
|
|
334
|
+
@patch.dict(os.environ, {"MODEL_IMPL_TYPE": "flax_nnx"}, clear=True)
|
|
335
|
+
@patch("tpu_inference.models.common.model_loader.get_vllm_model")
|
|
336
|
+
@patch("tpu_inference.models.common.model_loader.get_flax_model")
|
|
337
|
+
def test_get_model_flax_happy_path_withPP(self, mock_get_flax,
|
|
338
|
+
mock_get_vllm, vllm_config, rng,
|
|
339
|
+
mesh):
|
|
340
|
+
"""Tests that 'flax_nnx' impl calls get_vllm_model when PP is enabled."""
|
|
341
|
+
mock_get_flax.return_value = "flax_model_sentinel"
|
|
342
|
+
mock_get_vllm.return_value = "vllm_model_sentinel"
|
|
343
|
+
vllm_config.parallel_config.pipeline_parallel_size = 2
|
|
344
|
+
result = model_loader.get_model(vllm_config, rng, mesh)
|
|
345
|
+
|
|
346
|
+
mock_get_flax.assert_not_called()
|
|
347
|
+
mock_get_vllm.assert_called_once_with(vllm_config, rng, mesh)
|
|
348
|
+
assert result == "vllm_model_sentinel"
|
|
349
|
+
|
|
350
|
+
@patch.dict(os.environ, {"MODEL_IMPL_TYPE": "vllm"}, clear=True)
|
|
351
|
+
@patch("tpu_inference.models.common.model_loader.get_vllm_model")
|
|
352
|
+
@patch("tpu_inference.models.common.model_loader.get_flax_model")
|
|
353
|
+
def test_get_model_vllm_happy_path(self, mock_get_flax, mock_get_vllm,
|
|
354
|
+
vllm_config, rng, mesh):
|
|
355
|
+
"""Tests that 'vllm' impl calls get_vllm_model."""
|
|
356
|
+
mock_get_vllm.return_value = "vllm_model_sentinel"
|
|
357
|
+
|
|
358
|
+
result = model_loader.get_model(vllm_config, rng, mesh)
|
|
359
|
+
|
|
360
|
+
mock_get_flax.assert_not_called()
|
|
361
|
+
mock_get_vllm.assert_called_once_with(vllm_config, rng, mesh)
|
|
362
|
+
assert result == "vllm_model_sentinel"
|
|
363
|
+
|
|
364
|
+
@patch.dict(os.environ, {"MODEL_IMPL_TYPE": "flax_nnx"}, clear=True)
|
|
365
|
+
@patch("tpu_inference.models.common.model_loader.get_vllm_model")
|
|
366
|
+
@patch("tpu_inference.models.common.model_loader.get_flax_model")
|
|
367
|
+
def test_get_model_flax_fallback_on_unsupported_arch(
|
|
368
|
+
self, mock_get_flax, mock_get_vllm, vllm_config, rng, mesh):
|
|
369
|
+
"""
|
|
370
|
+
Tests that 'flax_nnx' falls back to get_vllm_model on
|
|
371
|
+
UnsupportedArchitectureError.
|
|
372
|
+
"""
|
|
373
|
+
# Mock get_flax_model to raise the specific error
|
|
374
|
+
mock_get_flax.side_effect = model_loader.UnsupportedArchitectureError(
|
|
375
|
+
"Model not supported")
|
|
376
|
+
mock_get_vllm.return_value = "vllm_fallback_sentinel"
|
|
377
|
+
|
|
378
|
+
result = model_loader.get_model(vllm_config, rng, mesh)
|
|
379
|
+
|
|
380
|
+
# Check that both were called
|
|
381
|
+
mock_get_flax.assert_called_once_with(vllm_config, rng, mesh, False)
|
|
382
|
+
mock_get_vllm.assert_called_once_with(vllm_config, rng, mesh)
|
|
383
|
+
assert result == "vllm_fallback_sentinel"
|
|
384
|
+
|
|
385
|
+
@patch.dict(os.environ, {"MODEL_IMPL_TYPE": "flax_nnx"}, clear=True)
|
|
386
|
+
@patch("tpu_inference.models.common.model_loader.get_vllm_model")
|
|
387
|
+
@patch("tpu_inference.models.common.model_loader.get_flax_model")
|
|
388
|
+
def test_get_model_flax_reraises_other_errors(self, mock_get_flax,
|
|
389
|
+
mock_get_vllm, vllm_config,
|
|
390
|
+
rng, mesh):
|
|
391
|
+
"""
|
|
392
|
+
Tests that 'flax_nnx' re-raises other ValueErrors
|
|
393
|
+
and does not fall back.
|
|
394
|
+
"""
|
|
395
|
+
# Mock get_flax_model to raise a *different* error
|
|
396
|
+
mock_get_flax.side_effect = ValueError("A different error")
|
|
397
|
+
|
|
398
|
+
with pytest.raises(ValueError, match="A different error"):
|
|
399
|
+
model_loader.get_model(vllm_config, rng, mesh)
|
|
400
|
+
|
|
401
|
+
# Check that flax was called but vllm was not
|
|
402
|
+
mock_get_flax.assert_called_once_with(vllm_config, rng, mesh, False)
|
|
403
|
+
mock_get_vllm.assert_not_called()
|
|
404
|
+
|
|
405
|
+
@patch.dict(os.environ, {"MODEL_IMPL_TYPE": "jetpack"}, clear=True)
|
|
406
|
+
@patch("tpu_inference.models.common.model_loader.get_vllm_model")
|
|
407
|
+
@patch("tpu_inference.models.common.model_loader.get_flax_model")
|
|
408
|
+
def test_get_model_not_implemented(self, mock_get_flax, mock_get_vllm,
|
|
409
|
+
vllm_config, rng, mesh):
|
|
410
|
+
"""Tests that an unknown impl raises NotImplementedError."""
|
|
411
|
+
with pytest.raises(NotImplementedError):
|
|
412
|
+
model_loader.get_model(vllm_config, rng, mesh)
|
|
413
|
+
|
|
414
|
+
mock_get_flax.assert_not_called()
|
|
415
|
+
mock_get_vllm.assert_not_called()
|
|
416
|
+
|
|
417
|
+
@patch.dict(os.environ, {"MODEL_IMPL_TYPE": "auto"}, clear=True)
|
|
418
|
+
@patch("tpu_inference.models.common.model_loader.get_vllm_model")
|
|
419
|
+
@patch("tpu_inference.models.common.model_loader.get_flax_model")
|
|
420
|
+
def test_get_model_auto_resolves_to_flax_nnx(self, mock_get_flax,
|
|
421
|
+
mock_get_vllm, vllm_config,
|
|
422
|
+
rng, mesh):
|
|
423
|
+
"""
|
|
424
|
+
Tests that 'auto' resolves to 'flax_nnx' for standard architectures
|
|
425
|
+
(not in _VLLM_REQUIRED_ARCHITECTURES).
|
|
426
|
+
"""
|
|
427
|
+
# vllm_config uses Qwen3 which is NOT in _VLLM_REQUIRED_ARCHITECTURES
|
|
428
|
+
mock_get_flax.return_value = "flax_model_sentinel"
|
|
429
|
+
|
|
430
|
+
result = model_loader.get_model(vllm_config, rng, mesh)
|
|
431
|
+
|
|
432
|
+
mock_get_flax.assert_called_once_with(vllm_config, rng, mesh, False)
|
|
433
|
+
mock_get_vllm.assert_not_called()
|
|
434
|
+
assert result == "flax_model_sentinel"
|
|
435
|
+
|
|
436
|
+
@patch.dict(os.environ, {"MODEL_IMPL_TYPE": "auto"}, clear=True)
|
|
437
|
+
@patch("tpu_inference.models.common.model_loader.get_vllm_model")
|
|
438
|
+
@patch("tpu_inference.models.common.model_loader.get_flax_model")
|
|
439
|
+
def test_get_model_auto_resolves_to_vllm_for_gpt_oss(
|
|
440
|
+
self, mock_get_flax, mock_get_vllm, vllm_config, rng, mesh):
|
|
441
|
+
"""
|
|
442
|
+
Tests that 'auto' resolves to 'vllm' for architectures in
|
|
443
|
+
_VLLM_REQUIRED_ARCHITECTURES (e.g., GptOssForCausalLM).
|
|
444
|
+
"""
|
|
445
|
+
# Mock the architecture to be GptOssForCausalLM
|
|
446
|
+
vllm_config.model_config.hf_config.architectures = [
|
|
447
|
+
"GptOssForCausalLM"
|
|
448
|
+
]
|
|
449
|
+
mock_get_vllm.return_value = "vllm_model_sentinel"
|
|
450
|
+
|
|
451
|
+
result = model_loader.get_model(vllm_config, rng, mesh)
|
|
452
|
+
|
|
453
|
+
mock_get_flax.assert_not_called()
|
|
454
|
+
mock_get_vllm.assert_called_once_with(vllm_config, rng, mesh)
|
|
455
|
+
assert result == "vllm_model_sentinel"
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# Copyright 2025 Google LLC
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|