tpu-inference 0.11.1rc1__tar.gz → 0.11.1rc3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tpu-inference might be problematic. Click here for more details.
- {tpu_inference-0.11.1rc1/tpu_inference.egg-info → tpu_inference-0.11.1rc3}/PKG-INFO +6 -6
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/README.md +2 -2
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/requirements.txt +3 -3
- tpu_inference-0.11.1rc3/tpu_inference/kernels/collectives/all_gather_matmul.py +735 -0
- tpu_inference-0.11.1rc3/tpu_inference/kernels/collectives/all_gather_matmul_tuned_block_sizes.py +60 -0
- tpu_inference-0.11.1rc3/tpu_inference/kernels/collectives/util.py +47 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/common/attention_metadata.py +34 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/attention/attention.py +254 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/attention/deepseek_v3_attention.py +354 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/attention/llama4_attention.py +153 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/attention_interface.py +356 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/base.py +151 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/binary_search.py +295 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/constants.py +88 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/layers.py +301 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/misc.py +16 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/moe/deepseek_v3_moe.py +608 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/moe/moe.py +209 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/rope.py +172 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/rope_interface.py +214 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/sample/rejection_sampler.py +515 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/sample/sampling.py +95 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/sample/sampling_metadata.py +69 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/sharding.py +406 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/jax/transformer_block.py +76 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/attention.py +184 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/fused_moe.py +399 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/linear_common.py +186 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/__init__.py +34 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/awq.py +207 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/common.py +105 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/compressed_tensors/compressed_tensors.py +121 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +208 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +136 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/unquantized.py +263 -0
- tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/sharding.py +151 -0
- tpu_inference-0.11.1rc3/tpu_inference/mock/__init__.py +0 -0
- tpu_inference-0.11.1rc3/tpu_inference/models/__init__.py +0 -0
- tpu_inference-0.11.1rc3/tpu_inference/models/common/__init__.py +0 -0
- tpu_inference-0.11.1rc3/tpu_inference/models/common/model_loader.py +433 -0
- tpu_inference-0.11.1rc3/tpu_inference/models/jax/__init__.py +0 -0
- tpu_inference-0.11.1rc3/tpu_inference/models/jax/utils/__init__.py +0 -0
- tpu_inference-0.11.1rc3/tpu_inference/models/jax/utils/quantization/__init__.py +0 -0
- tpu_inference-0.11.1rc3/tpu_inference/models/vllm/__init__.py +0 -0
- tpu_inference-0.11.1rc3/tpu_inference/runner/__init__.py +0 -0
- tpu_inference-0.11.1rc3/tpu_inference/spec_decode/__init__.py +0 -0
- tpu_inference-0.11.1rc3/tpu_inference/spec_decode/jax/__init__.py +0 -0
- tpu_inference-0.11.1rc3/tpu_inference/worker/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3/tpu_inference.egg-info}/PKG-INFO +6 -6
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference.egg-info/SOURCES.txt +45 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference.egg-info/requires.txt +3 -3
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/LICENSE +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/MANIFEST.in +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/pyproject.toml +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/setup.cfg +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/setup.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/core/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/core/test_adapters.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/core/test_core_tpu.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/core/test_disagg_executor.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/core/test_disagg_utils.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/core/test_init.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/kernels/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/kernels/quantized_matmul_kernel_test.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/kernels/ragged_kv_cache_update_v2_test.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/kernels/ragged_paged_attention_kernel_v2_test.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/kernels/ragged_paged_attention_kernel_v3_test.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/lora/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/lora/test_lora.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/test_base.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/test_quantization.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/test_tpu_info.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/test_utils.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/tpu_backend_test.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/adapters/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/adapters/vllm_adapters.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/adapters/vllm_config_adapters.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/backend.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/core/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/core/adapters.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/core/core_tpu.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/core/disagg_executor.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/core/disagg_utils.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/di/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/di/abstracts.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/di/host.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/di/interfaces.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/distributed/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/distributed/tpu_connector.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/distributed/utils.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/executors/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/executors/ray_distributed_executor.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/experimental/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/experimental/llama3_jax_stashed.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/cache.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/config.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/config_parts.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/engine.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/outputs.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/params.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/platform.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/request.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/scheduler.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/__init__.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/kernels/flash_attention → tpu_inference-0.11.1rc3/tpu_inference/kernels/collectives}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/kernels/quantized_matmul → tpu_inference-0.11.1rc3/tpu_inference/kernels/flash_attention}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/flash_attention/kernel.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/kernels/ragged_paged_attention → tpu_inference-0.11.1rc3/tpu_inference/kernels/quantized_matmul}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/quantized_matmul/kernel.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/quantized_matmul/tuned_block_sizes.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/quantized_matmul/util.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/kernels/ragged_paged_attention/v2 → tpu_inference-0.11.1rc3/tpu_inference/kernels/ragged_paged_attention}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/kernels/ragged_paged_attention/v3 → tpu_inference-0.11.1rc3/tpu_inference/kernels/ragged_paged_attention/v2}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/ragged_paged_attention/v2/kernel.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/ragged_paged_attention/v2/ragged_kv_cache_update.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/ragged_paged_attention/v2/tuned_block_sizes.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/lora → tpu_inference-0.11.1rc3/tpu_inference/kernels/ragged_paged_attention/v3}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/ragged_paged_attention/v3/kernel.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/ragged_paged_attention/v3/tuned_block_sizes.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/ragged_paged_attention/v3/util.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/mock → tpu_inference-0.11.1rc3/tpu_inference/layers}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/models → tpu_inference-0.11.1rc3/tpu_inference/layers/common}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/models → tpu_inference-0.11.1rc3/tpu_inference/layers}/jax/__init__.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/models/jax/utils → tpu_inference-0.11.1rc3/tpu_inference/layers/jax/attention}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/models/jax/utils/quantization → tpu_inference-0.11.1rc3/tpu_inference/layers/jax/moe}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/models/vllm → tpu_inference-0.11.1rc3/tpu_inference/layers/jax/sample}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/runner → tpu_inference-0.11.1rc3/tpu_inference/layers/vllm}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/spec_decode → tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/compressed_tensors}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/spec_decode/jax → tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/compressed_tensors/schemes}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/logger.py +0 -0
- {tpu_inference-0.11.1rc1/tpu_inference/worker → tpu_inference-0.11.1rc3/tpu_inference/lora}/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/lora/torch_lora_ops.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/lora/torch_punica_tpu.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/mock/vllm_config_utils.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/mock/vllm_envs.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/mock/vllm_logger.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/mock/vllm_logging_utils.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/deepseek_v3.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/llama3.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/llama4.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/llama_eagle3.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/phi3.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/qwen2.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/qwen2_5_vl.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/qwen3.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/utils/file_utils.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/utils/multi_modal_utils.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/utils/quantization/quantization_utils.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/utils/weight_utils.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/vllm/vllm_model_wrapper.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/vllm/vllm_model_wrapper_context.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/platforms/__init__.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/platforms/tpu_jax.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/block_table_jax.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/compilation_manager.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/input_batch_jax.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/kv_cache.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/kv_cache_manager.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/lora_utils.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/multimodal_manager.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/persistent_batch_manager.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/speculative_decoding_manager.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/structured_decoding_manager.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/tpu_jax_runner.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/utils.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/spec_decode/jax/eagle3.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/tpu_info.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/utils.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/worker/_temporary_vllm_compat.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/worker/base.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/worker/tpu_worker_jax.py +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference.egg-info/dependency_links.txt +0 -0
- {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tpu_inference
|
|
3
|
-
Version: 0.11.
|
|
3
|
+
Version: 0.11.1rc3
|
|
4
4
|
Author: tpu_inference Contributors
|
|
5
5
|
Classifier: Development Status :: 3 - Alpha
|
|
6
6
|
Classifier: Intended Audience :: Developers
|
|
@@ -21,9 +21,9 @@ Requires-Dist: pytest-mock
|
|
|
21
21
|
Requires-Dist: absl-py
|
|
22
22
|
Requires-Dist: numpy
|
|
23
23
|
Requires-Dist: google-cloud-storage
|
|
24
|
-
Requires-Dist: jax
|
|
25
|
-
Requires-Dist: jaxlib
|
|
26
|
-
Requires-Dist: libtpu
|
|
24
|
+
Requires-Dist: jax==0.7.2
|
|
25
|
+
Requires-Dist: jaxlib==0.7.2
|
|
26
|
+
Requires-Dist: libtpu==0.0.23
|
|
27
27
|
Requires-Dist: jaxtyping
|
|
28
28
|
Requires-Dist: flax==0.11.1
|
|
29
29
|
Requires-Dist: torchax==0.0.7
|
|
@@ -47,7 +47,7 @@ Dynamic: requires-python
|
|
|
47
47
|
</p>
|
|
48
48
|
|
|
49
49
|
<p align="center">
|
|
50
|
-
| <a href="https://
|
|
50
|
+
| <a href="https://tpu.vllm.ai"><b>Documentation</b></a> | <a href="https://blog.vllm.ai/"><b>Blog</b></a> | <a href="https://discuss.vllm.ai/c/hardware-support/google-tpu-support/27"><b>User Forum</b></a> | <a href="https://join.slack.com/share/enQtOTY2OTUxMDIyNjY1OS00M2MxYWQwZjAyMGZjM2MyZjRjNTA0ZjRkNjkzOTRhMzg0NDM2OTlkZDAxOTAzYmJmNzdkNDc4OGZjYTUwMmRh"><b>Developer Slack</b></a> |
|
|
51
51
|
</p>
|
|
52
52
|
|
|
53
53
|
---
|
|
@@ -90,7 +90,7 @@ Visit our [documentation](https://github.com/vllm-project/tpu-inference/tree/mai
|
|
|
90
90
|
|
|
91
91
|
## Contribute
|
|
92
92
|
|
|
93
|
-
We're always looking for ways to partner with the community to accelerate vLLM TPU development. If you're interested in contributing to this effort, check out the [Contributing guide](https://github.com/vllm-project/tpu-inference/blob/main/CONTRIBUTING.md) and [Issues](https://github.com/vllm-project/tpu-inference/issues) to start. We recommend filtering Issues on the [**good first issue** tag](https://github.com/vllm-project/tpu-inference/issues?q=is%3Aissue%
|
|
93
|
+
We're always looking for ways to partner with the community to accelerate vLLM TPU development. If you're interested in contributing to this effort, check out the [Contributing guide](https://github.com/vllm-project/tpu-inference/blob/main/CONTRIBUTING.md) and [Issues](https://github.com/vllm-project/tpu-inference/issues) to start. We recommend filtering Issues on the [**good first issue** tag](https://github.com/vllm-project/tpu-inference/issues?q=is%3Aissue+state%3Aopen+label%3A%22good+first+issue%22) if it's your first time contributing.
|
|
94
94
|
|
|
95
95
|
## Contact us
|
|
96
96
|
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
</p>
|
|
7
7
|
|
|
8
8
|
<p align="center">
|
|
9
|
-
| <a href="https://
|
|
9
|
+
| <a href="https://tpu.vllm.ai"><b>Documentation</b></a> | <a href="https://blog.vllm.ai/"><b>Blog</b></a> | <a href="https://discuss.vllm.ai/c/hardware-support/google-tpu-support/27"><b>User Forum</b></a> | <a href="https://join.slack.com/share/enQtOTY2OTUxMDIyNjY1OS00M2MxYWQwZjAyMGZjM2MyZjRjNTA0ZjRkNjkzOTRhMzg0NDM2OTlkZDAxOTAzYmJmNzdkNDc4OGZjYTUwMmRh"><b>Developer Slack</b></a> |
|
|
10
10
|
</p>
|
|
11
11
|
|
|
12
12
|
---
|
|
@@ -49,7 +49,7 @@ Visit our [documentation](https://github.com/vllm-project/tpu-inference/tree/mai
|
|
|
49
49
|
|
|
50
50
|
## Contribute
|
|
51
51
|
|
|
52
|
-
We're always looking for ways to partner with the community to accelerate vLLM TPU development. If you're interested in contributing to this effort, check out the [Contributing guide](https://github.com/vllm-project/tpu-inference/blob/main/CONTRIBUTING.md) and [Issues](https://github.com/vllm-project/tpu-inference/issues) to start. We recommend filtering Issues on the [**good first issue** tag](https://github.com/vllm-project/tpu-inference/issues?q=is%3Aissue%
|
|
52
|
+
We're always looking for ways to partner with the community to accelerate vLLM TPU development. If you're interested in contributing to this effort, check out the [Contributing guide](https://github.com/vllm-project/tpu-inference/blob/main/CONTRIBUTING.md) and [Issues](https://github.com/vllm-project/tpu-inference/issues) to start. We recommend filtering Issues on the [**good first issue** tag](https://github.com/vllm-project/tpu-inference/issues?q=is%3Aissue+state%3Aopen+label%3A%22good+first+issue%22) if it's your first time contributing.
|
|
53
53
|
|
|
54
54
|
## Contact us
|
|
55
55
|
|