tpu-inference 0.11.1rc1__tar.gz → 0.11.1rc3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tpu-inference might be problematic. Click here for more details.

Files changed (176) hide show
  1. {tpu_inference-0.11.1rc1/tpu_inference.egg-info → tpu_inference-0.11.1rc3}/PKG-INFO +6 -6
  2. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/README.md +2 -2
  3. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/requirements.txt +3 -3
  4. tpu_inference-0.11.1rc3/tpu_inference/kernels/collectives/all_gather_matmul.py +735 -0
  5. tpu_inference-0.11.1rc3/tpu_inference/kernels/collectives/all_gather_matmul_tuned_block_sizes.py +60 -0
  6. tpu_inference-0.11.1rc3/tpu_inference/kernels/collectives/util.py +47 -0
  7. tpu_inference-0.11.1rc3/tpu_inference/layers/common/attention_metadata.py +34 -0
  8. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/attention/attention.py +254 -0
  9. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/attention/deepseek_v3_attention.py +354 -0
  10. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/attention/llama4_attention.py +153 -0
  11. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/attention_interface.py +356 -0
  12. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/base.py +151 -0
  13. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/binary_search.py +295 -0
  14. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/constants.py +88 -0
  15. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/layers.py +301 -0
  16. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/misc.py +16 -0
  17. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/moe/deepseek_v3_moe.py +608 -0
  18. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/moe/moe.py +209 -0
  19. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/rope.py +172 -0
  20. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/rope_interface.py +214 -0
  21. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/sample/rejection_sampler.py +515 -0
  22. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/sample/sampling.py +95 -0
  23. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/sample/sampling_metadata.py +69 -0
  24. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/sharding.py +406 -0
  25. tpu_inference-0.11.1rc3/tpu_inference/layers/jax/transformer_block.py +76 -0
  26. tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/attention.py +184 -0
  27. tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/fused_moe.py +399 -0
  28. tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/linear_common.py +186 -0
  29. tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/__init__.py +34 -0
  30. tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/awq.py +207 -0
  31. tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/common.py +105 -0
  32. tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/compressed_tensors/compressed_tensors.py +121 -0
  33. tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +208 -0
  34. tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +136 -0
  35. tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/unquantized.py +263 -0
  36. tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/sharding.py +151 -0
  37. tpu_inference-0.11.1rc3/tpu_inference/mock/__init__.py +0 -0
  38. tpu_inference-0.11.1rc3/tpu_inference/models/__init__.py +0 -0
  39. tpu_inference-0.11.1rc3/tpu_inference/models/common/__init__.py +0 -0
  40. tpu_inference-0.11.1rc3/tpu_inference/models/common/model_loader.py +433 -0
  41. tpu_inference-0.11.1rc3/tpu_inference/models/jax/__init__.py +0 -0
  42. tpu_inference-0.11.1rc3/tpu_inference/models/jax/utils/__init__.py +0 -0
  43. tpu_inference-0.11.1rc3/tpu_inference/models/jax/utils/quantization/__init__.py +0 -0
  44. tpu_inference-0.11.1rc3/tpu_inference/models/vllm/__init__.py +0 -0
  45. tpu_inference-0.11.1rc3/tpu_inference/runner/__init__.py +0 -0
  46. tpu_inference-0.11.1rc3/tpu_inference/spec_decode/__init__.py +0 -0
  47. tpu_inference-0.11.1rc3/tpu_inference/spec_decode/jax/__init__.py +0 -0
  48. tpu_inference-0.11.1rc3/tpu_inference/worker/__init__.py +0 -0
  49. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3/tpu_inference.egg-info}/PKG-INFO +6 -6
  50. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference.egg-info/SOURCES.txt +45 -0
  51. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference.egg-info/requires.txt +3 -3
  52. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/LICENSE +0 -0
  53. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/MANIFEST.in +0 -0
  54. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/pyproject.toml +0 -0
  55. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/setup.cfg +0 -0
  56. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/setup.py +0 -0
  57. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/__init__.py +0 -0
  58. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/core/__init__.py +0 -0
  59. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/core/test_adapters.py +0 -0
  60. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/core/test_core_tpu.py +0 -0
  61. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/core/test_disagg_executor.py +0 -0
  62. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/core/test_disagg_utils.py +0 -0
  63. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/core/test_init.py +0 -0
  64. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/kernels/__init__.py +0 -0
  65. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/kernels/quantized_matmul_kernel_test.py +0 -0
  66. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/kernels/ragged_kv_cache_update_v2_test.py +0 -0
  67. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/kernels/ragged_paged_attention_kernel_v2_test.py +0 -0
  68. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/kernels/ragged_paged_attention_kernel_v3_test.py +0 -0
  69. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/lora/__init__.py +0 -0
  70. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/lora/test_lora.py +0 -0
  71. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/test_base.py +0 -0
  72. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/test_quantization.py +0 -0
  73. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/test_tpu_info.py +0 -0
  74. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/test_utils.py +0 -0
  75. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tests/tpu_backend_test.py +0 -0
  76. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/__init__.py +0 -0
  77. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/adapters/__init__.py +0 -0
  78. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/adapters/vllm_adapters.py +0 -0
  79. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/adapters/vllm_config_adapters.py +0 -0
  80. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/backend.py +0 -0
  81. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/core/__init__.py +0 -0
  82. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/core/adapters.py +0 -0
  83. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/core/core_tpu.py +0 -0
  84. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/core/disagg_executor.py +0 -0
  85. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/core/disagg_utils.py +0 -0
  86. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/di/__init__.py +0 -0
  87. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/di/abstracts.py +0 -0
  88. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/di/host.py +0 -0
  89. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/di/interfaces.py +0 -0
  90. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/distributed/__init__.py +0 -0
  91. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/distributed/tpu_connector.py +0 -0
  92. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/distributed/utils.py +0 -0
  93. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/executors/__init__.py +0 -0
  94. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/executors/ray_distributed_executor.py +0 -0
  95. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/experimental/__init__.py +0 -0
  96. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/experimental/llama3_jax_stashed.py +0 -0
  97. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/__init__.py +0 -0
  98. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/cache.py +0 -0
  99. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/config.py +0 -0
  100. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/config_parts.py +0 -0
  101. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/engine.py +0 -0
  102. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/outputs.py +0 -0
  103. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/params.py +0 -0
  104. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/platform.py +0 -0
  105. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/request.py +0 -0
  106. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/interfaces/scheduler.py +0 -0
  107. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/__init__.py +0 -0
  108. {tpu_inference-0.11.1rc1/tpu_inference/kernels/flash_attention → tpu_inference-0.11.1rc3/tpu_inference/kernels/collectives}/__init__.py +0 -0
  109. {tpu_inference-0.11.1rc1/tpu_inference/kernels/quantized_matmul → tpu_inference-0.11.1rc3/tpu_inference/kernels/flash_attention}/__init__.py +0 -0
  110. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/flash_attention/kernel.py +0 -0
  111. {tpu_inference-0.11.1rc1/tpu_inference/kernels/ragged_paged_attention → tpu_inference-0.11.1rc3/tpu_inference/kernels/quantized_matmul}/__init__.py +0 -0
  112. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/quantized_matmul/kernel.py +0 -0
  113. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/quantized_matmul/tuned_block_sizes.py +0 -0
  114. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/quantized_matmul/util.py +0 -0
  115. {tpu_inference-0.11.1rc1/tpu_inference/kernels/ragged_paged_attention/v2 → tpu_inference-0.11.1rc3/tpu_inference/kernels/ragged_paged_attention}/__init__.py +0 -0
  116. {tpu_inference-0.11.1rc1/tpu_inference/kernels/ragged_paged_attention/v3 → tpu_inference-0.11.1rc3/tpu_inference/kernels/ragged_paged_attention/v2}/__init__.py +0 -0
  117. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/ragged_paged_attention/v2/kernel.py +0 -0
  118. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/ragged_paged_attention/v2/ragged_kv_cache_update.py +0 -0
  119. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/ragged_paged_attention/v2/tuned_block_sizes.py +0 -0
  120. {tpu_inference-0.11.1rc1/tpu_inference/lora → tpu_inference-0.11.1rc3/tpu_inference/kernels/ragged_paged_attention/v3}/__init__.py +0 -0
  121. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/ragged_paged_attention/v3/kernel.py +0 -0
  122. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/ragged_paged_attention/v3/tuned_block_sizes.py +0 -0
  123. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/kernels/ragged_paged_attention/v3/util.py +0 -0
  124. {tpu_inference-0.11.1rc1/tpu_inference/mock → tpu_inference-0.11.1rc3/tpu_inference/layers}/__init__.py +0 -0
  125. {tpu_inference-0.11.1rc1/tpu_inference/models → tpu_inference-0.11.1rc3/tpu_inference/layers/common}/__init__.py +0 -0
  126. {tpu_inference-0.11.1rc1/tpu_inference/models → tpu_inference-0.11.1rc3/tpu_inference/layers}/jax/__init__.py +0 -0
  127. {tpu_inference-0.11.1rc1/tpu_inference/models/jax/utils → tpu_inference-0.11.1rc3/tpu_inference/layers/jax/attention}/__init__.py +0 -0
  128. {tpu_inference-0.11.1rc1/tpu_inference/models/jax/utils/quantization → tpu_inference-0.11.1rc3/tpu_inference/layers/jax/moe}/__init__.py +0 -0
  129. {tpu_inference-0.11.1rc1/tpu_inference/models/vllm → tpu_inference-0.11.1rc3/tpu_inference/layers/jax/sample}/__init__.py +0 -0
  130. {tpu_inference-0.11.1rc1/tpu_inference/runner → tpu_inference-0.11.1rc3/tpu_inference/layers/vllm}/__init__.py +0 -0
  131. {tpu_inference-0.11.1rc1/tpu_inference/spec_decode → tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/compressed_tensors}/__init__.py +0 -0
  132. {tpu_inference-0.11.1rc1/tpu_inference/spec_decode/jax → tpu_inference-0.11.1rc3/tpu_inference/layers/vllm/quantization/compressed_tensors/schemes}/__init__.py +0 -0
  133. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/logger.py +0 -0
  134. {tpu_inference-0.11.1rc1/tpu_inference/worker → tpu_inference-0.11.1rc3/tpu_inference/lora}/__init__.py +0 -0
  135. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/lora/torch_lora_ops.py +0 -0
  136. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/lora/torch_punica_tpu.py +0 -0
  137. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/mock/vllm_config_utils.py +0 -0
  138. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/mock/vllm_envs.py +0 -0
  139. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/mock/vllm_logger.py +0 -0
  140. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/mock/vllm_logging_utils.py +0 -0
  141. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/deepseek_v3.py +0 -0
  142. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/llama3.py +0 -0
  143. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/llama4.py +0 -0
  144. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/llama_eagle3.py +0 -0
  145. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/phi3.py +0 -0
  146. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/qwen2.py +0 -0
  147. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/qwen2_5_vl.py +0 -0
  148. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/qwen3.py +0 -0
  149. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/utils/file_utils.py +0 -0
  150. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/utils/multi_modal_utils.py +0 -0
  151. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/utils/quantization/quantization_utils.py +0 -0
  152. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/jax/utils/weight_utils.py +0 -0
  153. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/vllm/vllm_model_wrapper.py +0 -0
  154. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/models/vllm/vllm_model_wrapper_context.py +0 -0
  155. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/platforms/__init__.py +0 -0
  156. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/platforms/tpu_jax.py +0 -0
  157. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/block_table_jax.py +0 -0
  158. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/compilation_manager.py +0 -0
  159. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/input_batch_jax.py +0 -0
  160. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/kv_cache.py +0 -0
  161. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/kv_cache_manager.py +0 -0
  162. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/lora_utils.py +0 -0
  163. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/multimodal_manager.py +0 -0
  164. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/persistent_batch_manager.py +0 -0
  165. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/speculative_decoding_manager.py +0 -0
  166. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/structured_decoding_manager.py +0 -0
  167. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/tpu_jax_runner.py +0 -0
  168. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/runner/utils.py +0 -0
  169. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/spec_decode/jax/eagle3.py +0 -0
  170. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/tpu_info.py +0 -0
  171. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/utils.py +0 -0
  172. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/worker/_temporary_vllm_compat.py +0 -0
  173. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/worker/base.py +0 -0
  174. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference/worker/tpu_worker_jax.py +0 -0
  175. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference.egg-info/dependency_links.txt +0 -0
  176. {tpu_inference-0.11.1rc1 → tpu_inference-0.11.1rc3}/tpu_inference.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tpu_inference
3
- Version: 0.11.1rc1
3
+ Version: 0.11.1rc3
4
4
  Author: tpu_inference Contributors
5
5
  Classifier: Development Status :: 3 - Alpha
6
6
  Classifier: Intended Audience :: Developers
@@ -21,9 +21,9 @@ Requires-Dist: pytest-mock
21
21
  Requires-Dist: absl-py
22
22
  Requires-Dist: numpy
23
23
  Requires-Dist: google-cloud-storage
24
- Requires-Dist: jax
25
- Requires-Dist: jaxlib
26
- Requires-Dist: libtpu
24
+ Requires-Dist: jax==0.7.2
25
+ Requires-Dist: jaxlib==0.7.2
26
+ Requires-Dist: libtpu==0.0.23
27
27
  Requires-Dist: jaxtyping
28
28
  Requires-Dist: flax==0.11.1
29
29
  Requires-Dist: torchax==0.0.7
@@ -47,7 +47,7 @@ Dynamic: requires-python
47
47
  </p>
48
48
 
49
49
  <p align="center">
50
- | <a href="https://github.com/vllm-project/tpu-inference/tree/main/docs"><b>Documentation</b></a> | <a href="https://blog.vllm.ai/"><b>Blog</b></a> | <a href="https://discuss.vllm.ai/c/hardware-support/google-tpu-support/27"><b>User Forum</b></a> | <a href="https://join.slack.com/share/enQtOTY2OTUxMDIyNjY1OS00M2MxYWQwZjAyMGZjM2MyZjRjNTA0ZjRkNjkzOTRhMzg0NDM2OTlkZDAxOTAzYmJmNzdkNDc4OGZjYTUwMmRh"><b>Developer Slack</b></a> |
50
+ | <a href="https://tpu.vllm.ai"><b>Documentation</b></a> | <a href="https://blog.vllm.ai/"><b>Blog</b></a> | <a href="https://discuss.vllm.ai/c/hardware-support/google-tpu-support/27"><b>User Forum</b></a> | <a href="https://join.slack.com/share/enQtOTY2OTUxMDIyNjY1OS00M2MxYWQwZjAyMGZjM2MyZjRjNTA0ZjRkNjkzOTRhMzg0NDM2OTlkZDAxOTAzYmJmNzdkNDc4OGZjYTUwMmRh"><b>Developer Slack</b></a> |
51
51
  </p>
52
52
 
53
53
  ---
@@ -90,7 +90,7 @@ Visit our [documentation](https://github.com/vllm-project/tpu-inference/tree/mai
90
90
 
91
91
  ## Contribute
92
92
 
93
- We're always looking for ways to partner with the community to accelerate vLLM TPU development. If you're interested in contributing to this effort, check out the [Contributing guide](https://github.com/vllm-project/tpu-inference/blob/main/CONTRIBUTING.md) and [Issues](https://github.com/vllm-project/tpu-inference/issues) to start. We recommend filtering Issues on the [**good first issue** tag](https://github.com/vllm-project/tpu-inference/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22good%20first%20issue%22) if it's your first time contributing.
93
+ We're always looking for ways to partner with the community to accelerate vLLM TPU development. If you're interested in contributing to this effort, check out the [Contributing guide](https://github.com/vllm-project/tpu-inference/blob/main/CONTRIBUTING.md) and [Issues](https://github.com/vllm-project/tpu-inference/issues) to start. We recommend filtering Issues on the [**good first issue** tag](https://github.com/vllm-project/tpu-inference/issues?q=is%3Aissue+state%3Aopen+label%3A%22good+first+issue%22) if it's your first time contributing.
94
94
 
95
95
  ## Contact us
96
96
 
@@ -6,7 +6,7 @@
6
6
  </p>
7
7
 
8
8
  <p align="center">
9
- | <a href="https://github.com/vllm-project/tpu-inference/tree/main/docs"><b>Documentation</b></a> | <a href="https://blog.vllm.ai/"><b>Blog</b></a> | <a href="https://discuss.vllm.ai/c/hardware-support/google-tpu-support/27"><b>User Forum</b></a> | <a href="https://join.slack.com/share/enQtOTY2OTUxMDIyNjY1OS00M2MxYWQwZjAyMGZjM2MyZjRjNTA0ZjRkNjkzOTRhMzg0NDM2OTlkZDAxOTAzYmJmNzdkNDc4OGZjYTUwMmRh"><b>Developer Slack</b></a> |
9
+ | <a href="https://tpu.vllm.ai"><b>Documentation</b></a> | <a href="https://blog.vllm.ai/"><b>Blog</b></a> | <a href="https://discuss.vllm.ai/c/hardware-support/google-tpu-support/27"><b>User Forum</b></a> | <a href="https://join.slack.com/share/enQtOTY2OTUxMDIyNjY1OS00M2MxYWQwZjAyMGZjM2MyZjRjNTA0ZjRkNjkzOTRhMzg0NDM2OTlkZDAxOTAzYmJmNzdkNDc4OGZjYTUwMmRh"><b>Developer Slack</b></a> |
10
10
  </p>
11
11
 
12
12
  ---
@@ -49,7 +49,7 @@ Visit our [documentation](https://github.com/vllm-project/tpu-inference/tree/mai
49
49
 
50
50
  ## Contribute
51
51
 
52
- We're always looking for ways to partner with the community to accelerate vLLM TPU development. If you're interested in contributing to this effort, check out the [Contributing guide](https://github.com/vllm-project/tpu-inference/blob/main/CONTRIBUTING.md) and [Issues](https://github.com/vllm-project/tpu-inference/issues) to start. We recommend filtering Issues on the [**good first issue** tag](https://github.com/vllm-project/tpu-inference/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22good%20first%20issue%22) if it's your first time contributing.
52
+ We're always looking for ways to partner with the community to accelerate vLLM TPU development. If you're interested in contributing to this effort, check out the [Contributing guide](https://github.com/vllm-project/tpu-inference/blob/main/CONTRIBUTING.md) and [Issues](https://github.com/vllm-project/tpu-inference/issues) to start. We recommend filtering Issues on the [**good first issue** tag](https://github.com/vllm-project/tpu-inference/issues?q=is%3Aissue+state%3Aopen+label%3A%22good+first+issue%22) if it's your first time contributing.
53
53
 
54
54
  ## Contact us
55
55
 
@@ -5,9 +5,9 @@ pytest-mock
5
5
  absl-py
6
6
  numpy
7
7
  google-cloud-storage
8
- jax #==0.7.2
9
- jaxlib #==0.7.2
10
- libtpu #==0.0.23
8
+ jax==0.7.2
9
+ jaxlib==0.7.2
10
+ libtpu==0.0.23
11
11
  jaxtyping
12
12
  flax==0.11.1
13
13
  torchax==0.0.7