flashinfer-python 0.2.3__tar.gz → 0.2.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {flashinfer_python-0.2.3/flashinfer_python.egg-info → flashinfer_python-0.2.4}/PKG-INFO +6 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/README.md +3 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/activation.cu +10 -7
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_decode.cu +16 -12
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_decode_jit_pybind.cu +11 -9
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_decode_mla_cute_sm80.cu +8 -12
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_decode_mla_plan.cu +7 -6
- flashinfer_python-0.2.4/csrc/batch_decode_mla_pybind.cu +21 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_decode_mla_run.cu +6 -7
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_mla_plan.cu +4 -3
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_mla_pybind.cu +2 -3
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_mla_run.cu +3 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_mla_sm90_plan.cu +4 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_mla_sm90_pybind.cu +2 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_mla_sm90_run.cu +3 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_prefill.cu +24 -21
- flashinfer_python-0.2.4/csrc/batch_prefill_jit_pybind.cu +49 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_prefill_sm90.cu +23 -22
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_prefill_sm90_jit_pybind.cu +15 -16
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/bmm_fp8.cu +3 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/cascade.cu +9 -7
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/flashinfer_cascade_ops.cu +3 -4
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/flashinfer_gemm_ops.cu +2 -3
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/flashinfer_gemm_sm90_ops.cu +1 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/flashinfer_norm_ops.cu +4 -5
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/flashinfer_ops.cu +82 -66
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/flashinfer_ops_sm90.cu +14 -18
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/flashinfer_page_ops.cu +12 -8
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/flashinfer_quantization_ops.cu +2 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/flashinfer_rope_ops.cu +8 -9
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/flashinfer_sampling_ops.cu +9 -16
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/group_gemm.cu +3 -3
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/group_gemm_sm90.cu +4 -5
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/norm.cu +13 -9
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/page.cu +87 -10
- flashinfer_python-0.2.4/csrc/pod.cu +272 -0
- flashinfer_python-0.2.4/csrc/pod_config.inc +45 -0
- flashinfer_python-0.2.4/csrc/pod_customize_config.jinja +42 -0
- flashinfer_python-0.2.4/csrc/pod_jit_pybind.cu +39 -0
- flashinfer_python-0.2.4/csrc/pod_kernel_inst.jinja +32 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/pytorch_conversion_utils.h +1 -1
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/pytorch_extension_utils.h +20 -15
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/quantization.cu +6 -4
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/renorm.cu +9 -9
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/rope.cu +24 -20
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/sampling.cu +18 -16
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_decode.cu +3 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_decode_jit_pybind.cu +1 -1
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_prefill.cu +3 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_prefill_jit_pybind.cu +1 -1
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_prefill_sm90.cu +3 -3
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_prefill_sm90_jit_pybind.cu +1 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/__init__.py +2 -0
- flashinfer_python-0.2.4/flashinfer/_build_meta.py +1 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/activation.py +7 -7
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/cascade.py +28 -28
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/decode.py +163 -179
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/gemm.py +36 -42
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/jit/__init__.py +9 -4
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/jit/activation.py +3 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/jit/attention/__init__.py +3 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/jit/attention/pytorch.py +195 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/jit/attention/tvm.py +16 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/mla.py +41 -47
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/norm.py +19 -22
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/page.py +126 -40
- flashinfer_python-0.2.4/flashinfer/pod.py +631 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/prefill.py +241 -254
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/quantization.py +22 -18
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/rope.py +87 -87
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/sampling.py +151 -160
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/sparse.py +38 -44
- flashinfer_python-0.2.4/flashinfer/triton/__init__.py +1 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/utils.py +8 -18
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4/flashinfer_python.egg-info}/PKG-INFO +6 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer_python.egg-info/SOURCES.txt +19 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/decode.cuh +34 -24
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/decode_mla_cute_sm80.cuh +8 -9
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/default_prefill_params.cuh +1 -1
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/mla.cuh +24 -21
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/mla_hopper.cuh +207 -260
- flashinfer_python-0.2.4/include/flashinfer/attention/pod.cuh +456 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/prefill.cuh +176 -145
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/cp_async.cuh +2 -1
- flashinfer_python-0.2.4/include/flashinfer/gemm/group_gemm.cuh +115 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/page.cuh +80 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/pos_enc.cuh +38 -32
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/sampling.cuh +122 -58
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/vec_dtypes.cuh +2 -2
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/setup.py +5 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_activation.py +19 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_alibi.py +8 -8
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_batch_decode_kernels.py +69 -59
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_batch_prefill_kernels.py +168 -85
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_block_sparse.py +1 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_deepseek_mla.py +224 -44
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_group_gemm.py +39 -17
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_hopper.py +13 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_jit_example.py +7 -1
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_logits_cap.py +6 -6
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_mla_decode_kernel.py +6 -4
- flashinfer_python-0.2.4/tests/test_mla_page.py +124 -0
- flashinfer_python-0.2.4/tests/test_pod_kernels.py +289 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_rope.py +55 -16
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_sampling.py +79 -74
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_sliding_window.py +26 -14
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_tensor_cores_decode.py +80 -28
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/batch_mla_run.cu +7 -3
- flashinfer_python-0.2.4/tvm_binding/sampling.cu +32 -0
- flashinfer_python-0.2.4/tvm_binding/sampling_jit_tvm_binding.cu +7 -0
- flashinfer_python-0.2.4/version.txt +1 -0
- flashinfer_python-0.2.3/csrc/batch_decode_mla_pybind.cu +0 -20
- flashinfer_python-0.2.3/csrc/batch_prefill_jit_pybind.cu +0 -49
- flashinfer_python-0.2.3/flashinfer/_build_meta.py +0 -1
- flashinfer_python-0.2.3/flashinfer/triton/__init__.py +0 -1
- flashinfer_python-0.2.3/include/flashinfer/gemm/group_gemm.cuh +0 -99
- flashinfer_python-0.2.3/version.txt +0 -1
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/algorithm/axpby.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/algorithm/clear.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/algorithm/cooperative_copy.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/algorithm/cooperative_gemm.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/algorithm/copy.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/algorithm/fill.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/algorithm/functional.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/algorithm/gemm.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/algorithm/prefer.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/algorithm/prefetch.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/algorithm/tensor_algorithms.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/algorithm/tuple_algorithms.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/cluster_sm90.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/config.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/copy.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/copy_sm50.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/copy_sm75.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/copy_sm80.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/copy_sm90.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/copy_sm90_desc.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/copy_sm90_tma.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/mma.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/mma_sm61.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/mma_sm70.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/mma_sm75.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/mma_sm80.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/mma_sm90.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/mma_sm90_desc.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/mma_sm90_gmma.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/mma_sm90_gmma_sparse.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/arch/util.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/copy_atom.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/copy_traits.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/copy_traits_sm50.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/copy_traits_sm75.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/copy_traits_sm80.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/copy_traits_sm90.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/copy_traits_sm90_im2col.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/copy_traits_sm90_tma.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/copy_traits_sm90_tma_swizzle.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/mma_atom.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/mma_traits.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/mma_traits_sm61.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/mma_traits_sm70.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/mma_traits_sm75.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/mma_traits_sm80.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/mma_traits_sm90.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/mma_traits_sm90_gmma.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/atom/mma_traits_sm90_gmma_sparse.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/config.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/container/alignment.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/container/array.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/container/array_aligned.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/container/array_subbyte.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/container/bit_field.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/container/cuda_types.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/container/packed_tuple.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/container/tuple.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/container/type_list.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/int_tuple.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/layout.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/layout_composed.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/numeric/arithmetic_tuple.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/numeric/complex.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/numeric/int.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/numeric/integer_sequence.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/numeric/integral_constant.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/numeric/integral_ratio.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/numeric/math.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/numeric/numeric_types.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/numeric/real.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/pointer.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/pointer_base.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/pointer_flagged.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/pointer_sparse.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/pointer_swizzle.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/stride.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/swizzle.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/swizzle_layout.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/tensor.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/tensor_impl.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/tensor_predicate.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/tensor_zip.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/underscore.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/util/debug.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/util/print.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cute/util/type_traits.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/aligned_buffer.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/arch.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/barrier.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/cache_operation.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/config.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/grid_dependency_control.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/memory.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/memory_sm75.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/memory_sm80.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/mma.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/mma_sm50.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/mma_sm60.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/mma_sm61.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/mma_sm70.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/mma_sm75.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/mma_sm80.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/mma_sm89.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/mma_sm90.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/mma_sparse_sm80.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/mma_sparse_sm89.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/reg_reconfig.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/simd.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/simd_sm60.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/simd_sm61.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/synclog.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/wmma.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/wmma_sm70.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/wmma_sm72.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/arch/wmma_sm75.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/array.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/array_planar_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/array_subbyte.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/barrier.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/bfloat16.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/blas3.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/blas3_types.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/block_striped.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/cluster_launch.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/constants.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/collective/builders/sm90_common.inl +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/collective/builders/sm90_gmma_builder.inl +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/collective/collective_builder.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/collective/collective_conv.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/collective/detail.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/collective/sm90_implicit_gemm_gmma_ss_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/conv2d_problem_size.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/conv3d_problem_size.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/convnd_problem_shape.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/convolution.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/detail.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/device/conv_universal_adapter.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/device/direct_convolution.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/device/implicit_gemm_convolution.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/device/implicit_gemm_convolution_fusion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/dispatch_policy.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/conv_universal.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv2d.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv2d_dgrad.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv2d_fprop.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv2d_fprop_fusion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv2d_fprop_with_absmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv2d_fprop_with_broadcast.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv2d_fprop_with_reduction.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv2d_group_fprop.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv2d_wgrad.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv2d_wgrad_fusion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv3d_dgrad.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv3d_fprop.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv3d_fprop_fusion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv3d_fprop_with_broadcast.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_conv3d_wgrad.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_deconv2d.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_deconv2d_with_broadcast.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_deconv3d.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_deconv3d_with_broadcast.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/default_depthwise_fprop.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/direct_convolution.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution_fusion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution_strided_dgrad.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution_with_absmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/implicit_gemm_convolution_with_fused_epilogue.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/kernel/sm90_implicit_gemm_tma_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/thread/depthwise_mma.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_analytic.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_analytic.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_few_channels.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_fixed_channels.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_few_channels.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_fixed_channels.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_params.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_tile_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_analytic.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_analytic.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv2d_wgrad_output_gradient_tile_access_iterator_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv3d_dgrad_filter_tile_access_iterator_analytic.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv3d_dgrad_filter_tile_access_iterator_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_analytic.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_analytic.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_analytic.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv3d_params.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv3d_wgrad_activation_tile_access_iterator_analytic.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv3d_wgrad_activation_tile_access_iterator_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_analytic.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/depthwise_direct_conv_params.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_fixed_stride_dilation.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/depthwise_fprop_activation_tile_access_iterator_direct_conv_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/depthwise_fprop_direct_conv_multistage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/depthwise_fprop_filter_tile_access_iterator_direct_conv_optimized.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/depthwise_fprop_pipelined.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/depthwise_mma_base.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/implicit_gemm_multistage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/implicit_gemm_pipelined.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/predicated_scale_bias_vector_access_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/predicated_scale_bias_vector_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/threadblock/threadblock_swizzle.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/warp/mma_depthwise_simt.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/warp/mma_depthwise_simt_tile_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/conv/warp/scale_bias_relu_transform.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/coord.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/core_io.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/cuda_host_adapter.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/cutlass.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/detail/collective.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/detail/dependent_false.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/detail/helper_macros.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/detail/layout.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/detail/mma.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/device_kernel.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/collective/builders/sm90_builder.inl +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/collective/builders/sm90_common.inl +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/collective/collective_builder.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/collective/collective_epilogue.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/collective/default_epilogue.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/collective/default_epilogue_array.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/collective/detail.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/collective/epilogue_tensor_broadcast.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/collective/sm70_epilogue_vectorized_array.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/collective/sm90_epilogue_array_tma_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/collective/sm90_epilogue_tma_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/collective/sm90_epilogue_tma_warpspecialized_bias_elementwise.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/dispatch_policy.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/fusion/callbacks.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/fusion/operations.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/fusion/sm90_visitor_compute_tma_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/fusion/sm90_visitor_load_tma_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/fusion/sm90_visitor_store_tma_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/fusion/sm90_visitor_topk_softmax.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/activation.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/conversion_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/detail.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_bias_elementwise.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_bias_relu.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_clamp.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_dgelu.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_drelu.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_gelu.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_generic.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_generic_with_scaling.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_hardswish.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_leaky_relu.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_params.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_planar_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_relu.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_relu0.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_residual_block.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_sigmoid.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_silu.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_tensor_broadcast.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/linear_combination_with_elementwise.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/reduction_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/thread/scale_type.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op_blas3.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_direct_store.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_planar_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_simt.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_with_absmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_with_reduction.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_thread_map_simt.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_thread_map_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/default_thread_map_wmma_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/direct_store_epilogue_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_base.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_base_streamk.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_depthwise.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_direct_store.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_gemm_k_reduction.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_planar_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_smem_accumulator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_streamk_with_broadcast.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_visitor_with_softmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_with_absmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_with_broadcast.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_with_reduction.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_with_visitor.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/epilogue_workspace.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/fusion/visitor_2x.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/fusion/visitor_compute.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/fusion/visitor_load.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/fusion/visitor_store.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/fusion/visitors.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/interleaved_epilogue.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/output_iterator_parameter.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/output_tile_thread_map.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_affine_layout_params.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_blas3.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_conv.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_direct_conv.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_params.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_predicates.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/shared_load_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/shared_load_iterator_mixed.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/threadblock/shared_load_iterator_pitch_linear.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/fragment_iterator_gaussian_complex_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/fragment_iterator_simt.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/fragment_iterator_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/fragment_iterator_wmma_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/simt_policy.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/tensor_op_policy.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/tile_iterator_simt.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/tile_iterator_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/tile_iterator_wmma_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/volta_tensor_op_policy.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/epilogue/warp/wmma_tensor_op_policy.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/fast_math.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/float8.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/floating_point_nvrtc.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/functional.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/builders/sm90_common.inl +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/builders/sm90_gmma_builder.inl +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/builders/sm90_sparse_config.inl +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/builders/sm90_sparse_gmma_builder.inl +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/collective_builder.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/collective_builder_decl.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/collective_mma.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/collective_mma_decl.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/fp8_accumulation.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/sm70_mma_twostage.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/sm80_mma_multistage.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/sm90_mma_array_tma_gmma_ss_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/sm90_mma_multistage_gmma_rs_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/sm90_mma_multistage_gmma_ss_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/sm90_mma_tma_gmma_rs_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/sm90_mma_tma_gmma_rs_warpspecialized_mixed_input.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/sm90_mma_tma_gmma_ss_warpspecialized_fp8.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/collective/sm90_sparse_mma_tma_gmma_ss_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/base_grouped.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/default_gemm_configuration.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/ell_gemm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_array.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_batched.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_grouped.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_layernorm_mainloop_fusion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_sparse.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_sparse_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_sparse_universal_with_absmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_sparse_with_absmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_sparse_with_visitor.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_splitk_parallel.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_universal_adapter.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_universal_base.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_universal_streamk_with_broadcast.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_universal_with_absmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_universal_with_broadcast.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemm_with_k_reduction.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/gemv.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/rank_2k.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/rank_2k_grouped.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/rank_k.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/symm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/device/trmm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/dispatch_policy.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/gemm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/gemm_enumerated_types.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/group_array_problem_shape.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_ell_gemm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_grouped.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_grouped_softmax_mainloop_fusion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_layernorm_mainloop_fusion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_planar_complex_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_sparse.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_sparse_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_sparse_universal_with_absmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_sparse_with_absmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_sparse_with_visitor.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_splitk_parallel.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_streamk_with_broadcast.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_universal_with_visitor.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_with_absmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_with_broadcast.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_with_k_reduction.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemm_with_reduction.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_gemv.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_rank_2k.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_rank_2k_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_rank_2k_grouped.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_rank_2k_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_rank_k.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_rank_k_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_rank_k_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_symm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_symm_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_symm_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_trmm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_trmm_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/default_trmm_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/ell_gemm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_array.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_batched.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_grouped.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_grouped_problem_visitor.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_grouped_softmax_mainloop_fusion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_layernorm_mainloop_fusion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_params.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_pipelined.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_planar_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_planar_complex_array.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_sparse_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_sparse_universal_with_absmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_splitk_parallel.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_transpose_operands.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal_decl.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal_streamk.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal_with_visitor.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_with_absmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_with_fused_epilogue.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemm_with_k_reduction.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemv.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/gemv_batched_strided.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/grouped_problem_visitor.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/params_sparse_base.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/params_universal_base.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_grouped.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_transpose_operands.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_2k_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/rank_k_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sm70_gemm.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_array_tma_warpspecialized_cooperative.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_array_tma_warpspecialized_pingpong.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_cooperative.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized_cooperative.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized_pingpong.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_tile_scheduler_group.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sparse_gemm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sparse_gemm_with_absmax.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/sparse_gemm_with_visitor.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/static_tile_scheduler.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/symm_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/tile_scheduler.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/tile_scheduler_params.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/kernel/trmm_universal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/thread/mma.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/thread/mma_sm50.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/thread/mma_sm60.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/thread/mma_sm61.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_ell_mma.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_gemv_core.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_simt.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm70.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm75.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sm80.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_access_size.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_with_reduction.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_core_wmma.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_layernorm_mainloop_fusion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_planar_complex_multistage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_planar_complex_pipelined.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_softmax_mainloop_fusion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_mma_with_reduction.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_multistage_trmm_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_sparse_mma.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/default_trmm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/ell_mma_multistage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/ell_mma_pipelined.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/gemv.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/index_remat.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_base.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_blas3_multistage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_layernorm_mainloop_fusion_multistage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_multistage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_pipelined.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_base.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_multistage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_planar_complex_pipelined.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_singlestage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_softmax_mainloop_fusion_multistage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_sparse_base.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_sparse_multistage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/mma_with_reduction_multistage.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/threadblock/threadblock_swizzle_streamk.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_complex_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_sparse_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_tensor_op_sm80.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_with_reduction_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/default_mma_wmma_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/layernorm_scale_bias_transform.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_fast_f32.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op_tile_iterator_sm80.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_mixed_input_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_planar_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_simt.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_simt_policy.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_simt_tile_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_sparse_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_policy.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_sm70.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_sparse.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_tensor_op_wmma.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/mma_with_reduction_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/scale_bias_tile_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/softmax_scale_bias_transform.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm/warp/tile_iterator_planar_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm_coord.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/gemm_coord.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/half.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/integer_subbyte.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/kernel_hardware_info.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/kernel_hardware_info.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/kernel_launch.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/layout/layout.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/layout/matrix.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/layout/permute.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/layout/pitch_linear.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/layout/tensor.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm70.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm75.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/layout/tensor_op_multiplicand_sm80.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/layout/vector.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/matrix.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/matrix_coord.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/matrix_shape.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/numeric_conversion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/numeric_size.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/numeric_types.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/pipeline/pipeline.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/pipeline/sm90_pipeline.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/pitch_linear_coord.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/platform/platform.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/predicate_vector.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/quaternion.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/real.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/reduction/device/reduce_split_k.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/reduction/device/tensor_reduce.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/reduction/device/tensor_reduce_affine_contiguous.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/reduction/device/tensor_reduce_affine_strided.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/reduction/kernel/reduce_softmax_final.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/reduction/kernel/reduce_split_k.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/reduction/kernel/tensor_reduce_affine_strided.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/reduction/thread/reduce.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/reduction/thread/reduction_operators.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/reduction/threadblock_swizzle.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/relatively_equal.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/semaphore.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/subbyte_reference.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/tensor_coord.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/tensor_ref.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/tensor_ref_planar_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/tensor_view.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/tensor_view_planar_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/tfloat32.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/thread/matrix.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/trace.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/collective/sm90_wgmma_transpose.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/device/transform_universal_adapter.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/kernel/filter_format_transformer.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/kernel/sm90_sparse_gemm_compressor.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/kernel/sparse_gemm_compressor.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/pitch_linear_thread_map.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/thread/transpose.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/thread/unary_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/ell_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/ell_predicated_tile_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_params.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_access_iterator_triangular_matrix.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_tile_iterator_triangular_matrix.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/predicated_vector_access_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_scale_bias_vector_access_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op_sm70.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/threadblock/vector_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/transform/warp/vector_fragment_iterator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/uint128.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/version.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/wmma_array.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/include/cutlass/workspace.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/GPU_Clock.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/command_line.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/cublas_wrappers.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/debug.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/device_dump.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/device_groupnorm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/device_layernorm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/device_memory.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/device_nchw_to_nhwc.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/device_nhwc_padding.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/device_nhwc_pooling.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/device_nhwc_to_nchw.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/device_rmsnorm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/device_utils.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/distribution.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/exceptions.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/gett_commandline.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/helper_cuda.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/host_reorder.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/host_tensor.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/host_tensor_planar_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/host_uncompress.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/index_sequence.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/packed_stride.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/print_error.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/detail/inner_product.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/detail/linear_to_coordinate.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/convolution.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gemm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gemm_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gemm_planar_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/gett.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/kernel/gemm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_elementwise.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_foreach.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/rank_2k_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_compare.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_fill.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_foreach.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_reduce.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/tensor_relu.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/device/thread/gemm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/conv.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/convolution.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/error_metrics.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gemm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gemm_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gemm_planar_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/gett.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/rank_2k.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/rank_2k_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/rank_k_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/symm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/symm_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_compare.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_compare.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_copy.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_elementwise.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_fill.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_fill.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_foreach.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_norm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_reduce.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/tensor_reduce.hpp +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/trmm.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/reference/host/trmm_complex.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/tensor_view_io.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/3rdparty/cutlass/tools/util/include/cutlass/util/type_traits.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/LICENSE +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/aot_default_additional_params.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/aot_extension_utils.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_decode_config.inc +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_decode_customize_config.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_decode_kernel_inst.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_decode_mla_config.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_mla_config.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_prefill_config.inc +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_prefill_customize_config.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_prefill_paged_kernel_inst.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_prefill_paged_sm90_kernel_inst.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_prefill_ragged_kernel_inst.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_prefill_ragged_sm90_kernel_inst.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_prefill_sm90_config.inc +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/batch_prefill_sm90_customize_config.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/aot_default_additional_params.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_e4m3_dtypekv_e4m3_dtypeout_e4m3_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_e5m2_dtypekv_e5m2_dtypeout_e5m2_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_e4m3_dtypekv_e4m3_dtypeout_e4m3_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_e5m2_dtypekv_e5m2_dtypeout_e5m2_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_paged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_e4m3_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_e5m2_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/batch_ragged_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16_idtype_i32_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/dispatch.inc +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_e4m3_dtypekv_e4m3_dtypeout_e4m3.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_e5m2_dtypekv_e5m2_dtypeout_e5m2.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_f16_dtypekv_e4m3_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_f16_dtypekv_e5m2_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_128_head_vo_128_posenc_0_dtypeq_f16_dtypekv_f16_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_e4m3_dtypekv_e4m3_dtypeout_e4m3.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_e5m2_dtypekv_e5m2_dtypeout_e5m2.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_f16_dtypekv_e4m3_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_f16_dtypekv_e5m2_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_decode_head_qk_256_head_vo_256_posenc_0_dtypeq_f16_dtypekv_f16_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_e4m3_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_e5m2_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_e4m3_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_e5m2_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_e4m3_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_e5m2_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_128_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_192_head_vo_128_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_e4m3_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_e5m2_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_0_dtypeq_f16_dtypekv_f16_dtypeout_f16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_e4m3_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_e5m2_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_1_dtypeq_f16_dtypekv_f16_dtypeout_f16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_bf16_dtypeout_bf16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_e4m3_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_bf16_dtypekv_e5m2_dtypeout_bf16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_e4m3_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_e5m2_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/generated/single_prefill_head_qk_256_head_vo_256_posenc_0_fp16qkred_0_mask_2_dtypeq_f16_dtypekv_f16_dtypeout_f16_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/runtime_utils.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_decode_config.inc +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_decode_customize_config.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_decode_kernel_inst.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_prefill_config.inc +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_prefill_customize_config.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_prefill_kernel_inst.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_prefill_sm90_config.inc +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_prefill_sm90_customize_config.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/csrc/single_prefill_sm90_kernel_inst.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/custom_backend.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/jit/aot_config.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/jit/attention/utils.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/jit/core.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/jit/env.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/jit/utils.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/py.typed +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/triton/activation.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/triton/cascade.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/triton/gemm.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/triton/kernels/__init__.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/triton/kernels/activation.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/triton/kernels/cascade.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/triton/kernels/quant.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/triton/page.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer/triton/utils.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer_python.egg-info/dependency_links.txt +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer_python.egg-info/requires.txt +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/flashinfer_python.egg-info/top_level.txt +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/activation.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/allocator.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/cascade.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/default_decode_params.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/heap.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/attention_updater.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/block_sparse_gather.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/default_params.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/epilogue.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/kernel_traits.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/mainloop.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/mainloop_mma.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/named_barrier.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/prefill_sm90.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/quantization/epilogue.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/quantization/kernel_traits.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/quantization/mainloop_load.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/quantization/mainloop_mma.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/quantization/prefill_sm90.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/sparse_mainloop.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/tile_scheduler.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/utils.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/variant_helper.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper/variants.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/hopper.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/mask.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/mla_params.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/scheduler.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/state.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/variant_helper.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention/variants.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/attention_impl.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/cutlass_utils.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/distributed/all_reduce.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/exception.h +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/fastdiv.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/frag_layout_swizzle.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/gemm/bmm_fp8.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/gemm/group_gemm_lora.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/gemm/group_gemm_sm90.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/gemm/group_gemv.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/layout.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/math.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/mma.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/norm.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/permuted_smem.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/profiler.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/quantization.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/include/flashinfer/utils.cuh +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/pyproject.toml +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/setup.cfg +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_block_sparse_indices_to_vector_sparse_offsets.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_bmm_fp8.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_decode_fp8_calibration_scale.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_decode_prefill_lse.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_fp8_prefill.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_jit_warmup.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_non_contiguous_decode.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_non_contiguous_prefill.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_norm.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_page.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_quantization.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_shared_prefix_kernels.py +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tests/test_triton_cascade.py +1 -1
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/batch_decode.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/batch_decode_customize_config.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/batch_decode_jit_tvm_binding.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/batch_mla_config.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/batch_mla_jit_tvm_binding.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/batch_mla_plan.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/batch_prefill.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/batch_prefill_customize_config.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/batch_prefill_jit_tvm_binding.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/batch_prefill_sm90.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/batch_prefill_sm90_customize_config.jinja +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/batch_prefill_sm90_jit_tvm_binding.cu +0 -0
- {flashinfer_python-0.2.3 → flashinfer_python-0.2.4}/tvm_binding/tvm_binding_utils.h +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: flashinfer-python
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.4
|
|
4
4
|
Summary: FlashInfer: Kernel Library for LLM Serving
|
|
5
5
|
Author: FlashInfer team
|
|
6
6
|
License: Apache License 2.0
|
|
@@ -11,6 +11,7 @@ License-File: LICENSE
|
|
|
11
11
|
Requires-Dist: numpy
|
|
12
12
|
Requires-Dist: torch
|
|
13
13
|
Requires-Dist: ninja
|
|
14
|
+
Dynamic: license-file
|
|
14
15
|
Dynamic: requires-dist
|
|
15
16
|
|
|
16
17
|
<p align="center">
|
|
@@ -27,6 +28,7 @@ Kernel Library for LLM Serving
|
|
|
27
28
|
| <a href="https://flashinfer.ai"><b>Blog</b></a> | <a href="https://docs.flashinfer.ai"><b>Documentation</b></a> | <a href="https://join.slack.com/t/flashinfer/shared_invite/zt-2r93kj2aq-wZnC2n_Z2~mf73N5qnVGGA"><b>Slack</b></a>| <a href="https://github.com/orgs/flashinfer-ai/discussions"><b>Discussion Forum</b></a> |
|
|
28
29
|
</p>
|
|
29
30
|
|
|
31
|
+
[](https://ci.tlcpack.ai/job/flashinfer-ci/job/main/)
|
|
30
32
|
[](https://github.com/flashinfer-ai/flashinfer/actions/workflows/release_wheel.yml)
|
|
31
33
|
[](https://github.com/flashinfer-ai/flashinfer/actions/workflows/build-doc.yml)
|
|
32
34
|
|
|
@@ -46,6 +48,8 @@ The core features of FlashInfer include:
|
|
|
46
48
|
FlashInfer supports PyTorch, TVM and C++ (header-only) APIs, and can be easily integrated into existing projects.
|
|
47
49
|
|
|
48
50
|
## News
|
|
51
|
+
- [Mar 10, 2025] [Blog Post](https://flashinfer.ai/2025/03/10/sampling.html) Sorting-Free GPU Kernels for LLM Sampling, which explains the design of sampling kernels in FlashInfer.
|
|
52
|
+
- [Mar 1, 2025] Checkout flashinfer's [intra-kernel profiler](https://github.com/flashinfer-ai/flashinfer/tree/main/profiler) for visualizing the timeline of each threadblock in GPU kernels.
|
|
49
53
|
- [Dec 16, 2024] [Blog Post](https://flashinfer.ai/2024/12/16/flashinfer-v02-release.html) FlashInfer 0.2 - Efficient and Customizable Kernels for LLM Inference Serving
|
|
50
54
|
- [Sept 2024] We've launched a [Slack](https://join.slack.com/t/flashinfer/shared_invite/zt-2r93kj2aq-wZnC2n_Z2~mf73N5qnVGGA) workspace for Flashinfer users and developers. Join us for timely support, discussions, updates and knowledge sharing!
|
|
51
55
|
- [Jan 31, 2024] [Blog Post](https://flashinfer.ai/2024/01/08/cascade-inference.html) Cascade Inference: Memory-Efficient Shared Prefix Batch Decoding
|
|
@@ -12,6 +12,7 @@ Kernel Library for LLM Serving
|
|
|
12
12
|
| <a href="https://flashinfer.ai"><b>Blog</b></a> | <a href="https://docs.flashinfer.ai"><b>Documentation</b></a> | <a href="https://join.slack.com/t/flashinfer/shared_invite/zt-2r93kj2aq-wZnC2n_Z2~mf73N5qnVGGA"><b>Slack</b></a>| <a href="https://github.com/orgs/flashinfer-ai/discussions"><b>Discussion Forum</b></a> |
|
|
13
13
|
</p>
|
|
14
14
|
|
|
15
|
+
[](https://ci.tlcpack.ai/job/flashinfer-ci/job/main/)
|
|
15
16
|
[](https://github.com/flashinfer-ai/flashinfer/actions/workflows/release_wheel.yml)
|
|
16
17
|
[](https://github.com/flashinfer-ai/flashinfer/actions/workflows/build-doc.yml)
|
|
17
18
|
|
|
@@ -31,6 +32,8 @@ The core features of FlashInfer include:
|
|
|
31
32
|
FlashInfer supports PyTorch, TVM and C++ (header-only) APIs, and can be easily integrated into existing projects.
|
|
32
33
|
|
|
33
34
|
## News
|
|
35
|
+
- [Mar 10, 2025] [Blog Post](https://flashinfer.ai/2025/03/10/sampling.html) Sorting-Free GPU Kernels for LLM Sampling, which explains the design of sampling kernels in FlashInfer.
|
|
36
|
+
- [Mar 1, 2025] Checkout flashinfer's [intra-kernel profiler](https://github.com/flashinfer-ai/flashinfer/tree/main/profiler) for visualizing the timeline of each threadblock in GPU kernels.
|
|
34
37
|
- [Dec 16, 2024] [Blog Post](https://flashinfer.ai/2024/12/16/flashinfer-v02-release.html) FlashInfer 0.2 - Efficient and Customizable Kernels for LLM Inference Serving
|
|
35
38
|
- [Sept 2024] We've launched a [Slack](https://join.slack.com/t/flashinfer/shared_invite/zt-2r93kj2aq-wZnC2n_Z2~mf73N5qnVGGA) workspace for Flashinfer users and developers. Join us for timely support, discussions, updates and knowledge sharing!
|
|
36
39
|
- [Jan 31, 2024] [Blog Post](https://flashinfer.ai/2024/01/08/cascade-inference.html) Cascade Inference: Memory-Efficient Shared Prefix Batch Decoding
|
|
@@ -32,11 +32,12 @@ __device__ __forceinline__ float gelu_tanh(const float& val) {
|
|
|
32
32
|
return val * cdf;
|
|
33
33
|
}
|
|
34
34
|
|
|
35
|
-
void silu_and_mul(at::Tensor& out, at::Tensor& input, bool enable_pdl
|
|
35
|
+
void silu_and_mul(at::Tensor& out, at::Tensor& input, bool enable_pdl) {
|
|
36
36
|
int d = input.size(-1) / 2;
|
|
37
37
|
int64_t num_tokens = input.numel() / input.size(-1);
|
|
38
38
|
|
|
39
|
-
|
|
39
|
+
const c10::cuda::OptionalCUDAGuard device_guard(out.device());
|
|
40
|
+
auto stream = at::cuda::getCurrentCUDAStream();
|
|
40
41
|
|
|
41
42
|
DISPATCH_PYTORCH_DTYPE_TO_CTYPE_FP16(input.scalar_type(), c_type, [&] {
|
|
42
43
|
uint32_t vec_size = 16 / sizeof(c_type);
|
|
@@ -63,11 +64,13 @@ void silu_and_mul(at::Tensor& out, at::Tensor& input, bool enable_pdl, int64_t c
|
|
|
63
64
|
});
|
|
64
65
|
}
|
|
65
66
|
|
|
66
|
-
void gelu_tanh_and_mul(at::Tensor& out, at::Tensor& input, bool enable_pdl
|
|
67
|
+
void gelu_tanh_and_mul(at::Tensor& out, at::Tensor& input, bool enable_pdl) {
|
|
67
68
|
int d = input.size(-1) / 2;
|
|
68
69
|
int64_t num_tokens = input.numel() / input.size(-1);
|
|
69
70
|
|
|
70
|
-
|
|
71
|
+
const c10::cuda::OptionalCUDAGuard device_guard(out.device());
|
|
72
|
+
auto stream = at::cuda::getCurrentCUDAStream();
|
|
73
|
+
|
|
71
74
|
DISPATCH_PYTORCH_DTYPE_TO_CTYPE_FP16(input.scalar_type(), c_type, [&] {
|
|
72
75
|
uint32_t vec_size = 16 / sizeof(c_type);
|
|
73
76
|
cudaLaunchConfig_t config;
|
|
@@ -93,12 +96,12 @@ void gelu_tanh_and_mul(at::Tensor& out, at::Tensor& input, bool enable_pdl, int6
|
|
|
93
96
|
});
|
|
94
97
|
}
|
|
95
98
|
|
|
96
|
-
void gelu_and_mul(at::Tensor& out, at::Tensor& input, bool enable_pdl
|
|
99
|
+
void gelu_and_mul(at::Tensor& out, at::Tensor& input, bool enable_pdl) {
|
|
97
100
|
int d = input.size(-1) / 2;
|
|
98
101
|
int64_t num_tokens = input.numel() / input.size(-1);
|
|
99
|
-
|
|
102
|
+
const c10::cuda::OptionalCUDAGuard device_guard(out.device());
|
|
103
|
+
auto stream = at::cuda::getCurrentCUDAStream();
|
|
100
104
|
|
|
101
|
-
cudaStream_t stream = reinterpret_cast<cudaStream_t>(cuda_stream);
|
|
102
105
|
DISPATCH_PYTORCH_DTYPE_TO_CTYPE_FP16(input.scalar_type(), c_type, [&] {
|
|
103
106
|
uint32_t vec_size = 16 / sizeof(c_type);
|
|
104
107
|
cudaLaunchConfig_t config;
|
|
@@ -19,8 +19,8 @@
|
|
|
19
19
|
#include <optional>
|
|
20
20
|
|
|
21
21
|
#include "batch_decode_config.inc"
|
|
22
|
-
#include "pytorch_extension_utils.h"
|
|
23
22
|
#include "pytorch_conversion_utils.h"
|
|
23
|
+
#include "pytorch_extension_utils.h"
|
|
24
24
|
|
|
25
25
|
namespace flashinfer {
|
|
26
26
|
|
|
@@ -36,9 +36,9 @@ using namespace flashinfer;
|
|
|
36
36
|
at::Tensor BatchDecodeWithPagedKVCachePlan(
|
|
37
37
|
at::Tensor float_workspace_buffer, at::Tensor int_workspace_buffer,
|
|
38
38
|
at::Tensor page_locked_int_workspace_buffer, at::Tensor indptr, int64_t batch_size,
|
|
39
|
-
int64_t num_qo_heads, int64_t num_kv_heads, int64_t page_size,
|
|
40
|
-
|
|
41
|
-
|
|
39
|
+
int64_t num_qo_heads, int64_t num_kv_heads, int64_t page_size, bool enable_cuda_graph,
|
|
40
|
+
int64_t window_left, double logits_soft_cap, int64_t head_dim_qk, int64_t head_dim_vo,
|
|
41
|
+
at::Tensor empty_q_data, at::Tensor empty_kv_data) {
|
|
42
42
|
size_t float_workspace_size_in_bytes =
|
|
43
43
|
float_workspace_buffer.size(0) * float_workspace_buffer.element_size();
|
|
44
44
|
size_t int_workspace_size_in_bytes =
|
|
@@ -53,7 +53,8 @@ at::Tensor BatchDecodeWithPagedKVCachePlan(
|
|
|
53
53
|
"CUDA cores template only supports equal head dim for QK and VO, please use tensor "
|
|
54
54
|
"cores template for different head dim");
|
|
55
55
|
|
|
56
|
-
|
|
56
|
+
const c10::cuda::OptionalCUDAGuard device_guard(float_workspace_buffer.device());
|
|
57
|
+
const cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
|
|
57
58
|
DISPATCH_context(
|
|
58
59
|
DTypeQ, DTypeKV, DTypeO, IdType, HEAD_DIM_QK, HEAD_DIM_VO, POS_ENCODING_MODE,
|
|
59
60
|
USE_SLIDING_WINDOW, USE_LOGITS_SOFT_CAP, AttentionVariant, Params, [&] {
|
|
@@ -77,12 +78,14 @@ at::Tensor BatchDecodeWithPagedKVCachePlan(
|
|
|
77
78
|
return vec_to_tensor(plan_info.ToVector());
|
|
78
79
|
}
|
|
79
80
|
|
|
80
|
-
void BatchDecodeWithPagedKVCacheRun(
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
81
|
+
void BatchDecodeWithPagedKVCacheRun(at::Tensor float_workspace_buffer,
|
|
82
|
+
at::Tensor int_workspace_buffer, at::Tensor plan_info_vec,
|
|
83
|
+
at::Tensor q, at::Tensor paged_k_cache,
|
|
84
|
+
at::Tensor paged_v_cache, at::Tensor paged_kv_indptr,
|
|
85
|
+
at::Tensor paged_kv_indices, at::Tensor paged_kv_last_page_len,
|
|
86
|
+
at::Tensor o, std::optional<at::Tensor> maybe_lse,
|
|
87
|
+
int64_t kv_layout_code,
|
|
88
|
+
int64_t window_left ADDITIONAL_FUNC_PARAMS) {
|
|
86
89
|
DecodePlanInfo plan_info;
|
|
87
90
|
plan_info.FromVector(tensor_to_vec(plan_info_vec));
|
|
88
91
|
QKVLayout kv_layout = static_cast<QKVLayout>(kv_layout_code);
|
|
@@ -129,7 +132,8 @@ void BatchDecodeWithPagedKVCacheRun(
|
|
|
129
132
|
TORCH_CHECK(k_strides == v_strides, "k/v strides must be identical");
|
|
130
133
|
kv_cache_strides = k_strides.data();
|
|
131
134
|
|
|
132
|
-
|
|
135
|
+
const c10::cuda::OptionalCUDAGuard device_guard(device);
|
|
136
|
+
const cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
|
|
133
137
|
|
|
134
138
|
DISPATCH_context(
|
|
135
139
|
DTypeQ, DTypeKV, DTypeO, IdType, HEAD_DIM_QK, HEAD_DIM_VO, POS_ENCODING_MODE,
|
|
@@ -19,16 +19,18 @@
|
|
|
19
19
|
at::Tensor BatchDecodeWithPagedKVCachePlan(
|
|
20
20
|
at::Tensor float_workspace_buffer, at::Tensor int_workspace_buffer,
|
|
21
21
|
at::Tensor page_locked_int_workspace_buffer, at::Tensor indptr, int64_t batch_size,
|
|
22
|
-
int64_t num_qo_heads, int64_t num_kv_heads, int64_t page_size,
|
|
23
|
-
|
|
24
|
-
|
|
22
|
+
int64_t num_qo_heads, int64_t num_kv_heads, int64_t page_size, bool enable_cuda_graph,
|
|
23
|
+
int64_t window_left, double logits_soft_cap, int64_t head_dim_qk, int64_t head_dim_vo,
|
|
24
|
+
at::Tensor empty_q_data, at::Tensor empty_kv_data);
|
|
25
25
|
|
|
26
|
-
void BatchDecodeWithPagedKVCacheRun(
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
26
|
+
void BatchDecodeWithPagedKVCacheRun(at::Tensor float_workspace_buffer,
|
|
27
|
+
at::Tensor int_workspace_buffer, at::Tensor plan_info_vec,
|
|
28
|
+
at::Tensor q, at::Tensor paged_k_cache,
|
|
29
|
+
at::Tensor paged_v_cache, at::Tensor paged_kv_indptr,
|
|
30
|
+
at::Tensor paged_kv_indices, at::Tensor paged_kv_last_page_len,
|
|
31
|
+
at::Tensor o, std::optional<at::Tensor> maybe_lse,
|
|
32
|
+
int64_t kv_layout_code,
|
|
33
|
+
int64_t window_left ADDITIONAL_FUNC_PARAMS);
|
|
32
34
|
|
|
33
35
|
TORCH_LIBRARY_FRAGMENT(TORCH_EXTENSION_NAME, m) {
|
|
34
36
|
// Batched decode with paged KV-Cache plan
|
|
@@ -1,11 +1,9 @@
|
|
|
1
|
+
#include <flashinfer/attention/decode_mla_cute_sm80.cuh>
|
|
2
|
+
#include <flashinfer/attention/scheduler.cuh>
|
|
1
3
|
#include <optional>
|
|
2
4
|
|
|
3
|
-
#include "pytorch_extension_utils.h"
|
|
4
|
-
|
|
5
5
|
#include "mla_config.inc"
|
|
6
|
-
|
|
7
|
-
#include <flashinfer/attention/decode_mla_cute_sm80.cuh>
|
|
8
|
-
#include <flashinfer/attention/scheduler.cuh>
|
|
6
|
+
#include "pytorch_extension_utils.h"
|
|
9
7
|
|
|
10
8
|
using namespace flashinfer;
|
|
11
9
|
|
|
@@ -22,9 +20,8 @@ std::vector<int64_t> BatchDecodeWithPagedKVCachePlanMLA(
|
|
|
22
20
|
DecodePlanInfo plan_info;
|
|
23
21
|
cudaStream_t stream = reinterpret_cast<cudaStream_t>(cuda_stream);
|
|
24
22
|
|
|
25
|
-
auto work_estimation_func =
|
|
26
|
-
|
|
27
|
-
AttentionVariant, Params>;
|
|
23
|
+
auto work_estimation_func = BatchDecodeWithPagedKVCacheWorkEstimationDispatchedMlaCuteSM80<
|
|
24
|
+
HEAD_DIM_CKV, HEAD_DIM_KPE, QO_TILE_LEN, AttentionVariant, Params>;
|
|
28
25
|
cudaError_t status =
|
|
29
26
|
DecodePlan<HEAD_DIM_CKV, flashinfer::PosEncodingMode::kNone, AttentionVariant, Params>(
|
|
30
27
|
static_cast<void*>(float_workspace_buffer.data_ptr()), float_workspace_size_in_bytes,
|
|
@@ -40,7 +37,6 @@ std::vector<int64_t> BatchDecodeWithPagedKVCachePlanMLA(
|
|
|
40
37
|
return plan_info.ToVector();
|
|
41
38
|
}
|
|
42
39
|
|
|
43
|
-
|
|
44
40
|
void BatchDecodeWithPagedKVCacheRunMLA(
|
|
45
41
|
at::Tensor float_workspace_buffer, at::Tensor int_workspace_buffer,
|
|
46
42
|
std::vector<int64_t> plan_info_vec, at::Tensor q_nope, at::Tensor q_pe,
|
|
@@ -99,9 +95,9 @@ void BatchDecodeWithPagedKVCacheRunMLA(
|
|
|
99
95
|
params.padded_batch_size = plan_info.padded_batch_size;
|
|
100
96
|
|
|
101
97
|
cudaStream_t stream = reinterpret_cast<cudaStream_t>(cuda_stream);
|
|
102
|
-
cudaError_t status =
|
|
103
|
-
|
|
104
|
-
|
|
98
|
+
cudaError_t status = BatchDecodeWithPagedKVCacheDispatchedMlaCuteSM80<HEAD_DIM_CKV, HEAD_DIM_KPE,
|
|
99
|
+
QO_TILE_LEN, Params>(
|
|
100
|
+
params, tmp_v, tmp_s, /*stream=*/stream);
|
|
105
101
|
TORCH_CHECK(status == cudaSuccess, "BatchDecodeWithPagedKVCache failed with error ",
|
|
106
102
|
cudaGetErrorString(status));
|
|
107
103
|
}
|
|
@@ -3,16 +3,17 @@
|
|
|
3
3
|
#include <optional>
|
|
4
4
|
|
|
5
5
|
#include "mla_config.inc"
|
|
6
|
-
#include "pytorch_extension_utils.h"
|
|
7
6
|
#include "pytorch_conversion_utils.h"
|
|
7
|
+
#include "pytorch_extension_utils.h"
|
|
8
8
|
|
|
9
9
|
using namespace flashinfer;
|
|
10
10
|
|
|
11
|
-
at::Tensor BatchDecodeWithPagedKVCachePlanMLA(
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
11
|
+
at::Tensor BatchDecodeWithPagedKVCachePlanMLA(at::Tensor float_workspace_buffer,
|
|
12
|
+
at::Tensor int_workspace_buffer,
|
|
13
|
+
at::Tensor page_locked_int_workspace_buffer,
|
|
14
|
+
at::Tensor indptr, int64_t batch_size,
|
|
15
|
+
int64_t num_qo_heads, int64_t page_size,
|
|
16
|
+
bool enable_cuda_graph, int64_t cuda_stream) {
|
|
16
17
|
size_t float_workspace_size_in_bytes =
|
|
17
18
|
float_workspace_buffer.size(0) * float_workspace_buffer.element_size();
|
|
18
19
|
size_t int_workspace_size_in_bytes =
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
#include "mla_config.inc"
|
|
2
|
+
#include "pytorch_extension_utils.h"
|
|
3
|
+
|
|
4
|
+
at::Tensor BatchDecodeWithPagedKVCachePlanMLA(at::Tensor float_workspace_buffer,
|
|
5
|
+
at::Tensor int_workspace_buffer,
|
|
6
|
+
at::Tensor page_locked_int_workspace_buffer,
|
|
7
|
+
at::Tensor indptr, int64_t batch_size,
|
|
8
|
+
int64_t num_qo_heads, int64_t page_size,
|
|
9
|
+
bool enable_cuda_graph, int64_t cuda_stream);
|
|
10
|
+
|
|
11
|
+
void BatchDecodeWithPagedKVCacheRunMLA(
|
|
12
|
+
at::Tensor float_workspace_buffer, at::Tensor int_workspace_buffer, at::Tensor plan_info_vec,
|
|
13
|
+
at::Tensor q_nope, at::Tensor q_pe, at::Tensor paged_ckv_cache, at::Tensor paged_kpe_cache,
|
|
14
|
+
at::Tensor paged_kv_indptr, at::Tensor paged_kv_indices, at::Tensor paged_kv_last_page_len,
|
|
15
|
+
at::Tensor o, double sm_scale, int64_t window_left, double logits_soft_cap, double rope_scale,
|
|
16
|
+
double rope_theta, std::optional<at::Tensor> maybe_lse, int64_t cuda_stream);
|
|
17
|
+
|
|
18
|
+
TORCH_LIBRARY_FRAGMENT(TORCH_EXTENSION_NAME, m) {
|
|
19
|
+
m.def("plan", BatchDecodeWithPagedKVCachePlanMLA);
|
|
20
|
+
m.def("run", BatchDecodeWithPagedKVCacheRunMLA);
|
|
21
|
+
}
|
|
@@ -3,18 +3,17 @@
|
|
|
3
3
|
#include <optional>
|
|
4
4
|
|
|
5
5
|
#include "mla_config.inc"
|
|
6
|
-
#include "pytorch_extension_utils.h"
|
|
7
6
|
#include "pytorch_conversion_utils.h"
|
|
7
|
+
#include "pytorch_extension_utils.h"
|
|
8
8
|
|
|
9
9
|
using namespace flashinfer;
|
|
10
10
|
|
|
11
11
|
void BatchDecodeWithPagedKVCacheRunMLA(
|
|
12
|
-
at::Tensor float_workspace_buffer, at::Tensor int_workspace_buffer,
|
|
13
|
-
at::Tensor
|
|
14
|
-
at::Tensor
|
|
15
|
-
at::Tensor
|
|
16
|
-
|
|
17
|
-
std::optional<at::Tensor> maybe_lse, int64_t cuda_stream) {
|
|
12
|
+
at::Tensor float_workspace_buffer, at::Tensor int_workspace_buffer, at::Tensor plan_info_vec,
|
|
13
|
+
at::Tensor q_nope, at::Tensor q_pe, at::Tensor paged_ckv_cache, at::Tensor paged_kpe_cache,
|
|
14
|
+
at::Tensor paged_kv_indptr, at::Tensor paged_kv_indices, at::Tensor paged_kv_last_page_len,
|
|
15
|
+
at::Tensor o, double sm_scale, int64_t window_left, double logits_soft_cap, double rope_scale,
|
|
16
|
+
double rope_theta, std::optional<at::Tensor> maybe_lse, int64_t cuda_stream) {
|
|
18
17
|
DecodePlanInfo plan_info;
|
|
19
18
|
plan_info.FromVector(tensor_to_vec(plan_info_vec));
|
|
20
19
|
|
|
@@ -26,8 +26,7 @@ at::Tensor BatchMLAPagedAttentionPlan(at::Tensor float_workspace_buffer,
|
|
|
26
26
|
at::Tensor int_workspace_buffer,
|
|
27
27
|
at::Tensor page_locked_int_workspace_buffer,
|
|
28
28
|
at::Tensor qo_indptr, at::Tensor kv_indptr, at::Tensor kv_len,
|
|
29
|
-
int64_t num_heads, int64_t head_dim_o, bool causal
|
|
30
|
-
int64_t cuda_stream) {
|
|
29
|
+
int64_t num_heads, int64_t head_dim_o, bool causal) {
|
|
31
30
|
size_t float_workspace_size_in_bytes =
|
|
32
31
|
float_workspace_buffer.size(0) * float_workspace_buffer.element_size();
|
|
33
32
|
size_t int_workspace_size_in_bytes =
|
|
@@ -37,7 +36,9 @@ at::Tensor BatchMLAPagedAttentionPlan(at::Tensor float_workspace_buffer,
|
|
|
37
36
|
|
|
38
37
|
int batch_size = kv_len.size(0);
|
|
39
38
|
|
|
40
|
-
|
|
39
|
+
const c10::cuda::OptionalCUDAGuard device_guard(float_workspace_buffer.device());
|
|
40
|
+
const cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
|
|
41
|
+
|
|
41
42
|
cudaError_t status =
|
|
42
43
|
MLAPlan(float_workspace_buffer.data_ptr(), float_workspace_size_in_bytes,
|
|
43
44
|
int_workspace_buffer.data_ptr(), page_locked_int_workspace_buffer.data_ptr(),
|
|
@@ -20,15 +20,14 @@ at::Tensor BatchMLAPagedAttentionPlan(at::Tensor float_workspace_buffer,
|
|
|
20
20
|
at::Tensor int_workspace_buffer,
|
|
21
21
|
at::Tensor page_locked_int_workspace_buffer,
|
|
22
22
|
at::Tensor qo_indptr, at::Tensor kv_indptr, at::Tensor kv_len,
|
|
23
|
-
int64_t num_heads, int64_t head_dim_o, bool causal
|
|
24
|
-
int64_t cuda_stream);
|
|
23
|
+
int64_t num_heads, int64_t head_dim_o, bool causal);
|
|
25
24
|
|
|
26
25
|
void BatchMLAPagedAttentionRun(at::Tensor float_workspace_buffer, at::Tensor int_workspace_buffer,
|
|
27
26
|
at::Tensor plan_info_vec, at::Tensor q_nope, at::Tensor q_pe,
|
|
28
27
|
at::Tensor ckv_cache, at::Tensor kpe_cache, at::Tensor kv_indices,
|
|
29
28
|
at::Tensor o, std::optional<at::Tensor> maybe_lse,
|
|
30
29
|
int64_t mask_mode_code, int64_t num_heads, int64_t page_size,
|
|
31
|
-
double sm_scale
|
|
30
|
+
double sm_scale);
|
|
32
31
|
|
|
33
32
|
TORCH_LIBRARY_FRAGMENT(TORCH_EXTENSION_NAME, m) {
|
|
34
33
|
m.def("plan", &BatchMLAPagedAttentionPlan);
|
|
@@ -29,7 +29,7 @@ void BatchMLAPagedAttentionRun(at::Tensor float_workspace_buffer, at::Tensor int
|
|
|
29
29
|
at::Tensor ckv_cache, at::Tensor kpe_cache, at::Tensor kv_indices,
|
|
30
30
|
at::Tensor o, std::optional<at::Tensor> maybe_lse,
|
|
31
31
|
int64_t mask_mode_code, int64_t num_heads, int64_t page_size,
|
|
32
|
-
double sm_scale
|
|
32
|
+
double sm_scale) {
|
|
33
33
|
// q_nope: [n, num_heads, head_dim_ckv]
|
|
34
34
|
// q_pe: [n, num_heads, head_dim_kpe]
|
|
35
35
|
// ckv_cache: [num_pages, page_size, head_dim_ckv]
|
|
@@ -58,7 +58,8 @@ void BatchMLAPagedAttentionRun(at::Tensor float_workspace_buffer, at::Tensor int
|
|
|
58
58
|
unsigned int o_stride_n = o.stride(0);
|
|
59
59
|
unsigned int o_stride_h = o.stride(1);
|
|
60
60
|
|
|
61
|
-
|
|
61
|
+
const c10::cuda::OptionalCUDAGuard device_guard(device);
|
|
62
|
+
const cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
|
|
62
63
|
|
|
63
64
|
DISPATCH_context(
|
|
64
65
|
DTypeQ, DTypeKV, DTypeO, IdType, MASK_MODE, HEAD_DIM_CKV, HEAD_DIM_KPE, Params, [&] {
|
|
@@ -27,7 +27,7 @@ at::Tensor BatchMLAPagedAttentionSM90Plan(at::Tensor float_workspace_buffer,
|
|
|
27
27
|
at::Tensor page_locked_int_workspace_buffer,
|
|
28
28
|
at::Tensor qo_indptr, at::Tensor kv_indptr,
|
|
29
29
|
at::Tensor kv_len, int64_t num_heads, int64_t head_dim_o,
|
|
30
|
-
bool causal
|
|
30
|
+
bool causal) {
|
|
31
31
|
size_t float_workspace_size_in_bytes =
|
|
32
32
|
float_workspace_buffer.size(0) * float_workspace_buffer.element_size();
|
|
33
33
|
size_t int_workspace_size_in_bytes =
|
|
@@ -37,7 +37,9 @@ at::Tensor BatchMLAPagedAttentionSM90Plan(at::Tensor float_workspace_buffer,
|
|
|
37
37
|
|
|
38
38
|
int batch_size = kv_len.size(0);
|
|
39
39
|
|
|
40
|
-
|
|
40
|
+
const c10::cuda::OptionalCUDAGuard device_guard(float_workspace_buffer.device());
|
|
41
|
+
const cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
|
|
42
|
+
|
|
41
43
|
cudaError_t status =
|
|
42
44
|
MLAPlan(float_workspace_buffer.data_ptr(), float_workspace_size_in_bytes,
|
|
43
45
|
int_workspace_buffer.data_ptr(), page_locked_int_workspace_buffer.data_ptr(),
|
|
@@ -21,7 +21,7 @@ at::Tensor BatchMLAPagedAttentionSM90Plan(at::Tensor float_workspace_buffer,
|
|
|
21
21
|
at::Tensor page_locked_int_workspace_buffer,
|
|
22
22
|
at::Tensor qo_indptr, at::Tensor kv_indptr,
|
|
23
23
|
at::Tensor kv_len, int64_t num_heads, int64_t head_dim_o,
|
|
24
|
-
bool causal
|
|
24
|
+
bool causal);
|
|
25
25
|
|
|
26
26
|
void BatchMLAPagedAttentionSM90Run(at::Tensor float_workspace_buffer,
|
|
27
27
|
at::Tensor int_workspace_buffer, at::Tensor plan_info_vec,
|
|
@@ -29,7 +29,7 @@ void BatchMLAPagedAttentionSM90Run(at::Tensor float_workspace_buffer,
|
|
|
29
29
|
at::Tensor kpe_cache, at::Tensor kv_indices, at::Tensor o,
|
|
30
30
|
std::optional<at::Tensor> maybe_lse, int64_t mask_mode_code,
|
|
31
31
|
int64_t num_heads, int64_t page_size,
|
|
32
|
-
double sm_scale ADDITIONAL_FUNC_PARAMS
|
|
32
|
+
double sm_scale ADDITIONAL_FUNC_PARAMS);
|
|
33
33
|
|
|
34
34
|
TORCH_LIBRARY_FRAGMENT(TORCH_EXTENSION_NAME, m) {
|
|
35
35
|
m.def("plan", &BatchMLAPagedAttentionSM90Plan);
|
|
@@ -30,7 +30,7 @@ void BatchMLAPagedAttentionSM90Run(at::Tensor float_workspace_buffer,
|
|
|
30
30
|
at::Tensor kpe_cache, at::Tensor kv_indices, at::Tensor o,
|
|
31
31
|
std::optional<at::Tensor> maybe_lse, int64_t mask_mode_code,
|
|
32
32
|
int64_t num_heads, int64_t page_size,
|
|
33
|
-
double sm_scale ADDITIONAL_FUNC_PARAMS
|
|
33
|
+
double sm_scale ADDITIONAL_FUNC_PARAMS) {
|
|
34
34
|
// q_nope: [n, num_heads, head_dim_ckv]
|
|
35
35
|
// q_pe: [n, num_heads, head_dim_kpe]
|
|
36
36
|
// ckv_cache: [num_pages, page_size, head_dim_ckv]
|
|
@@ -59,7 +59,8 @@ void BatchMLAPagedAttentionSM90Run(at::Tensor float_workspace_buffer,
|
|
|
59
59
|
unsigned int o_stride_n = o.stride(0);
|
|
60
60
|
unsigned int o_stride_h = o.stride(1);
|
|
61
61
|
|
|
62
|
-
|
|
62
|
+
const c10::cuda::OptionalCUDAGuard device_guard(device);
|
|
63
|
+
const cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
|
|
63
64
|
|
|
64
65
|
DISPATCH_context(
|
|
65
66
|
DTypeQ, DTypeKV, DTypeO, IdType, MASK_MODE, HEAD_DIM_CKV, HEAD_DIM_KPE, Params, [&] {
|
|
@@ -19,8 +19,8 @@
|
|
|
19
19
|
#include <optional>
|
|
20
20
|
|
|
21
21
|
#include "batch_prefill_config.inc"
|
|
22
|
-
#include "pytorch_extension_utils.h"
|
|
23
22
|
#include "pytorch_conversion_utils.h"
|
|
23
|
+
#include "pytorch_extension_utils.h"
|
|
24
24
|
|
|
25
25
|
namespace flashinfer {
|
|
26
26
|
|
|
@@ -43,10 +43,9 @@ using namespace flashinfer;
|
|
|
43
43
|
at::Tensor BatchPrefillWithKVCachePlan(
|
|
44
44
|
at::Tensor float_workspace_buffer, at::Tensor int_workspace_buffer,
|
|
45
45
|
at::Tensor page_locked_int_workspace_buffer, at::Tensor qo_indptr, at::Tensor kv_indptr,
|
|
46
|
-
at::Tensor kv_len_arr, int64_t total_num_rows, int64_t batch_size,
|
|
47
|
-
int64_t
|
|
48
|
-
|
|
49
|
-
int64_t cuda_stream) {
|
|
46
|
+
at::Tensor kv_len_arr, int64_t total_num_rows, int64_t batch_size, int64_t num_qo_heads,
|
|
47
|
+
int64_t num_kv_heads, int64_t page_size, bool enable_cuda_graph, int64_t head_dim_qk,
|
|
48
|
+
int64_t head_dim_vo, bool causal) {
|
|
50
49
|
size_t float_workspace_size_in_bytes =
|
|
51
50
|
float_workspace_buffer.size(0) * float_workspace_buffer.element_size();
|
|
52
51
|
size_t int_workspace_size_in_bytes =
|
|
@@ -54,7 +53,8 @@ at::Tensor BatchPrefillWithKVCachePlan(
|
|
|
54
53
|
|
|
55
54
|
PrefillPlanInfo plan_info;
|
|
56
55
|
|
|
57
|
-
|
|
56
|
+
const c10::cuda::OptionalCUDAGuard device_guard(float_workspace_buffer.device());
|
|
57
|
+
const cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
|
|
58
58
|
cudaError_t status = PrefillPlan<IdType>(
|
|
59
59
|
float_workspace_buffer.data_ptr(), float_workspace_size_in_bytes,
|
|
60
60
|
int_workspace_buffer.data_ptr(), page_locked_int_workspace_buffer.data_ptr(),
|
|
@@ -68,12 +68,12 @@ at::Tensor BatchPrefillWithKVCachePlan(
|
|
|
68
68
|
return vec_to_tensor(plan_info.ToVector());
|
|
69
69
|
}
|
|
70
70
|
|
|
71
|
-
void BatchPrefillWithRaggedKVCacheRun(
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
71
|
+
void BatchPrefillWithRaggedKVCacheRun(at::Tensor float_workspace_buffer,
|
|
72
|
+
at::Tensor int_workspace_buffer, at::Tensor plan_info_vec,
|
|
73
|
+
at::Tensor q, at::Tensor k, at::Tensor v,
|
|
74
|
+
at::Tensor qo_indptr, at::Tensor kv_indptr, at::Tensor o,
|
|
75
|
+
std::optional<at::Tensor> maybe_lse, int64_t mask_mode_code,
|
|
76
|
+
int64_t layout, int64_t window_left ADDITIONAL_FUNC_PARAMS) {
|
|
77
77
|
PrefillPlanInfo plan_info;
|
|
78
78
|
plan_info.FromVector(tensor_to_vec(plan_info_vec));
|
|
79
79
|
QKVLayout kv_layout = static_cast<QKVLayout>(layout);
|
|
@@ -109,7 +109,8 @@ void BatchPrefillWithRaggedKVCacheRun(
|
|
|
109
109
|
auto q_scalar_type = q.scalar_type();
|
|
110
110
|
auto kv_scalar_type = k.scalar_type();
|
|
111
111
|
|
|
112
|
-
|
|
112
|
+
const c10::cuda::OptionalCUDAGuard device_guard(float_workspace_buffer.device());
|
|
113
|
+
const cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
|
|
113
114
|
|
|
114
115
|
DISPATCH_context(
|
|
115
116
|
DTypeQ, DTypeKV, DTypeO, IdType, MASK_MODE, HEAD_DIM_QK, HEAD_DIM_VO, POS_ENCODING_MODE,
|
|
@@ -193,13 +194,14 @@ void BatchPrefillWithRaggedKVCacheRun(
|
|
|
193
194
|
});
|
|
194
195
|
}
|
|
195
196
|
|
|
196
|
-
void BatchPrefillWithPagedKVCacheRun(
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
197
|
+
void BatchPrefillWithPagedKVCacheRun(at::Tensor float_workspace_buffer,
|
|
198
|
+
at::Tensor int_workspace_buffer, at::Tensor plan_info_vec,
|
|
199
|
+
at::Tensor q, at::Tensor paged_k_cache,
|
|
200
|
+
at::Tensor paged_v_cache, at::Tensor qo_indptr,
|
|
201
|
+
at::Tensor paged_kv_indptr, at::Tensor paged_kv_indices,
|
|
202
|
+
at::Tensor paged_kv_last_page_len, at::Tensor o,
|
|
203
|
+
std::optional<at::Tensor> maybe_lse, int64_t mask_mode_code,
|
|
204
|
+
int64_t layout, int64_t window_left ADDITIONAL_FUNC_PARAMS) {
|
|
203
205
|
PrefillPlanInfo plan_info;
|
|
204
206
|
plan_info.FromVector(tensor_to_vec(plan_info_vec));
|
|
205
207
|
QKVLayout kv_layout = static_cast<QKVLayout>(layout);
|
|
@@ -240,7 +242,8 @@ void BatchPrefillWithPagedKVCacheRun(
|
|
|
240
242
|
TORCH_CHECK(k_strides == v_strides, "k/v strides must be identical");
|
|
241
243
|
kv_cache_strides = k_strides.data();
|
|
242
244
|
|
|
243
|
-
|
|
245
|
+
const c10::cuda::OptionalCUDAGuard device_guard(float_workspace_buffer.device());
|
|
246
|
+
const cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
|
|
244
247
|
|
|
245
248
|
DISPATCH_context(
|
|
246
249
|
DTypeQ, DTypeKV, DTypeO, IdType, MASK_MODE, HEAD_DIM_QK, HEAD_DIM_VO, POS_ENCODING_MODE,
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* Copyright (c) 2023-2025 by FlashInfer team.
|
|
3
|
+
*
|
|
4
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
* you may not use this file except in compliance with the License.
|
|
6
|
+
* You may obtain a copy of the License at
|
|
7
|
+
*
|
|
8
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
*
|
|
10
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
* See the License for the specific language governing permissions and
|
|
14
|
+
* limitations under the License.
|
|
15
|
+
*/
|
|
16
|
+
#include "batch_prefill_config.inc"
|
|
17
|
+
#include "pytorch_extension_utils.h"
|
|
18
|
+
|
|
19
|
+
at::Tensor BatchPrefillWithKVCachePlan(
|
|
20
|
+
at::Tensor float_workspace_buffer, at::Tensor int_workspace_buffer,
|
|
21
|
+
at::Tensor page_locked_int_workspace_buffer, at::Tensor qo_indptr, at::Tensor kv_indptr,
|
|
22
|
+
at::Tensor kv_len_arr, int64_t total_num_rows, int64_t batch_size, int64_t num_qo_heads,
|
|
23
|
+
int64_t num_kv_heads, int64_t page_size, bool enable_cuda_graph, int64_t head_dim_qk,
|
|
24
|
+
int64_t head_dim_vo, bool causal);
|
|
25
|
+
|
|
26
|
+
void BatchPrefillWithRaggedKVCacheRun(at::Tensor float_workspace_buffer,
|
|
27
|
+
at::Tensor int_workspace_buffer, at::Tensor plan_info_vec,
|
|
28
|
+
at::Tensor q, at::Tensor k, at::Tensor v,
|
|
29
|
+
at::Tensor qo_indptr, at::Tensor kv_indptr, at::Tensor o,
|
|
30
|
+
std::optional<at::Tensor> maybe_lse, int64_t mask_mode_code,
|
|
31
|
+
int64_t layout, int64_t window_left ADDITIONAL_FUNC_PARAMS);
|
|
32
|
+
|
|
33
|
+
void BatchPrefillWithPagedKVCacheRun(at::Tensor float_workspace_buffer,
|
|
34
|
+
at::Tensor int_workspace_buffer, at::Tensor plan_info_vec,
|
|
35
|
+
at::Tensor q, at::Tensor paged_k_cache,
|
|
36
|
+
at::Tensor paged_v_cache, at::Tensor qo_indptr,
|
|
37
|
+
at::Tensor paged_kv_indptr, at::Tensor paged_kv_indices,
|
|
38
|
+
at::Tensor paged_kv_last_page_len, at::Tensor o,
|
|
39
|
+
std::optional<at::Tensor> maybe_lse, int64_t mask_mode_code,
|
|
40
|
+
int64_t layout, int64_t window_left ADDITIONAL_FUNC_PARAMS);
|
|
41
|
+
|
|
42
|
+
TORCH_LIBRARY_FRAGMENT(TORCH_EXTENSION_NAME, m) {
|
|
43
|
+
// Batch-request prefill attention with KV-Cache plan
|
|
44
|
+
m.def("plan", BatchPrefillWithKVCachePlan);
|
|
45
|
+
// Batch-request prefill attention with KV-Cache operator
|
|
46
|
+
m.def("ragged_run", BatchPrefillWithRaggedKVCacheRun);
|
|
47
|
+
// Batch-request prefill attention with KV-Cache operator
|
|
48
|
+
m.def("paged_run", BatchPrefillWithPagedKVCacheRun);
|
|
49
|
+
}
|