mslk-cuda-nightly 2026.1.19__cp310-cp310-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. mslk/__init__.py +56 -0
  2. mslk/attention/__init__.py +7 -0
  3. mslk/attention/cutlass_blackwell_fmha/__init__.py +30 -0
  4. mslk/attention/cutlass_blackwell_fmha/cutlass_blackwell_fmha_custom_op.py +332 -0
  5. mslk/attention/cutlass_blackwell_fmha/cutlass_blackwell_fmha_interface.py +533 -0
  6. mslk/attention/flash_attn/__init__.py +22 -0
  7. mslk/attention/flash_attn/ampere_helpers.py +104 -0
  8. mslk/attention/flash_attn/barrier.py +72 -0
  9. mslk/attention/flash_attn/benchmark.py +269 -0
  10. mslk/attention/flash_attn/blackwell_helpers.py +754 -0
  11. mslk/attention/flash_attn/block_info.py +109 -0
  12. mslk/attention/flash_attn/block_sparse_utils.py +1452 -0
  13. mslk/attention/flash_attn/block_sparsity.py +219 -0
  14. mslk/attention/flash_attn/compute_block_sparsity.py +378 -0
  15. mslk/attention/flash_attn/copy_utils.py +341 -0
  16. mslk/attention/flash_attn/cute_dsl_utils.py +135 -0
  17. mslk/attention/flash_attn/fast_math.py +22 -0
  18. mslk/attention/flash_attn/flash_bwd.py +1262 -0
  19. mslk/attention/flash_attn/flash_bwd_postprocess.py +464 -0
  20. mslk/attention/flash_attn/flash_bwd_preprocess.py +366 -0
  21. mslk/attention/flash_attn/flash_bwd_sm100.py +2951 -0
  22. mslk/attention/flash_attn/flash_bwd_sm90.py +1703 -0
  23. mslk/attention/flash_attn/flash_fwd.py +2471 -0
  24. mslk/attention/flash_attn/flash_fwd_combine.py +705 -0
  25. mslk/attention/flash_attn/flash_fwd_sm100.py +2727 -0
  26. mslk/attention/flash_attn/hopper_helpers.py +102 -0
  27. mslk/attention/flash_attn/interface.py +1771 -0
  28. mslk/attention/flash_attn/mask.py +610 -0
  29. mslk/attention/flash_attn/mma_sm100_desc.py +292 -0
  30. mslk/attention/flash_attn/named_barrier.py +32 -0
  31. mslk/attention/flash_attn/pack_gqa.py +165 -0
  32. mslk/attention/flash_attn/paged_kv.py +176 -0
  33. mslk/attention/flash_attn/pipeline.py +273 -0
  34. mslk/attention/flash_attn/seqlen_info.py +139 -0
  35. mslk/attention/flash_attn/softmax.py +583 -0
  36. mslk/attention/flash_attn/testing.py +424 -0
  37. mslk/attention/flash_attn/tile_scheduler.py +720 -0
  38. mslk/attention/flash_attn/utils.py +860 -0
  39. mslk/attention/fmha/__init__.py +967 -0
  40. mslk/attention/fmha/_triton/__init__.py +6 -0
  41. mslk/attention/fmha/_triton/available.py +50 -0
  42. mslk/attention/fmha/_triton/splitk_kernels.py +1534 -0
  43. mslk/attention/fmha/_triton/vararg_kernel.py +262 -0
  44. mslk/attention/fmha/attn_bias.py +2186 -0
  45. mslk/attention/fmha/attn_bias_utils.py +536 -0
  46. mslk/attention/fmha/ck.py +508 -0
  47. mslk/attention/fmha/ck_decoder.py +141 -0
  48. mslk/attention/fmha/ck_splitk.py +204 -0
  49. mslk/attention/fmha/common.py +598 -0
  50. mslk/attention/fmha/cutlass.py +461 -0
  51. mslk/attention/fmha/cutlass_blackwell.py +560 -0
  52. mslk/attention/fmha/dispatch.py +224 -0
  53. mslk/attention/fmha/flash.py +862 -0
  54. mslk/attention/fmha/flash3.py +858 -0
  55. mslk/attention/fmha/flash_mtia.py +245 -0
  56. mslk/attention/fmha/merge_training.py +192 -0
  57. mslk/attention/fmha/split_blocks_fairinternal.py +329 -0
  58. mslk/attention/fmha/torch_attention_compat.py +154 -0
  59. mslk/attention/fmha/tree_attention.py +718 -0
  60. mslk/attention/fmha/triton_splitk.py +1378 -0
  61. mslk/attention/fmha/unbind.py +130 -0
  62. mslk/attention/fmha/utils/__init__.py +6 -0
  63. mslk/attention/fmha/utils/bench.py +74 -0
  64. mslk/attention/fmha/utils/cpp_lib.py +148 -0
  65. mslk/attention/fmha/utils/op_common.py +65 -0
  66. mslk/attention/gqa_attn_splitk/__init__.py +11 -0
  67. mslk/bench/comm/__init__.py +7 -0
  68. mslk/bench/comm/comm_bench.py +255 -0
  69. mslk/bench/common/__init__.py +5 -0
  70. mslk/bench/common/utils.py +148 -0
  71. mslk/bench/conv/__init__.py +7 -0
  72. mslk/bench/conv/conv_bench.py +551 -0
  73. mslk/bench/conv/conv_ops.py +213 -0
  74. mslk/bench/gemm/__init__.py +7 -0
  75. mslk/bench/gemm/gemm_bench.py +859 -0
  76. mslk/bench/gemm/gemm_ops.py +3342 -0
  77. mslk/bench/gemm/grouped_gemm_bias_scale_benchmark.py +177 -0
  78. mslk/bench/moe/__init__.py +7 -0
  79. mslk/bench/moe/gather_scatter_bench.py +356 -0
  80. mslk/bench/quantize/quantize_bench.py +345 -0
  81. mslk/bench/quantize/quantize_ops.py +266 -0
  82. mslk/comm/__init__.py +11 -0
  83. mslk/conv/__init__.py +11 -0
  84. mslk/gemm/__init__.py +18 -0
  85. mslk/gemm/triton/__init__.py +7 -0
  86. mslk/gemm/triton/fp8_gemm.py +2702 -0
  87. mslk/gemm/triton/grouped_gemm.py +1132 -0
  88. mslk/gemm/triton/matmul_perf_model.py +237 -0
  89. mslk/gemm/triton/utils.py +128 -0
  90. mslk/kv_cache/__init__.py +11 -0
  91. mslk/moe/__init__.py +26 -0
  92. mslk/moe/activation.py +291 -0
  93. mslk/moe/gather_scatter.py +739 -0
  94. mslk/moe/layers.py +1240 -0
  95. mslk/moe/shuffling.py +421 -0
  96. mslk/mslk.so +0 -0
  97. mslk/quantize/__init__.py +11 -0
  98. mslk/quantize/shuffle.py +306 -0
  99. mslk/quantize/triton/__init__.py +7 -0
  100. mslk/quantize/triton/fp4_quantize.py +5942 -0
  101. mslk/quantize/triton/fp8_quantize.py +1902 -0
  102. mslk/testing/__init__.py +7 -0
  103. mslk/testing/attributes.py +60 -0
  104. mslk/testing/rocm.py +91 -0
  105. mslk/utils/__init__.py +7 -0
  106. mslk/utils/torch/__init__.py +7 -0
  107. mslk/utils/torch/library.py +150 -0
  108. mslk/utils/triton/__init__.py +7 -0
  109. mslk/utils/triton/fp8_utils.py +72 -0
  110. mslk/utils/triton/utils.py +128 -0
  111. mslk/version.py +11 -0
  112. mslk_cuda_nightly-2026.1.19.dist-info/METADATA +102 -0
  113. mslk_cuda_nightly-2026.1.19.dist-info/RECORD +116 -0
  114. mslk_cuda_nightly-2026.1.19.dist-info/WHEEL +5 -0
  115. mslk_cuda_nightly-2026.1.19.dist-info/licenses/LICENSE +30 -0
  116. mslk_cuda_nightly-2026.1.19.dist-info/top_level.txt +1 -0
@@ -0,0 +1,72 @@
1
+ # @nolint # fbcode
2
+ import cutlass
3
+ import cutlass.cute as cute
4
+ from cutlass import Int32
5
+ from cutlass.cutlass_dsl import T, dsl_user_op
6
+ from cutlass._mlir.dialects import llvm
7
+
8
+
9
+ @dsl_user_op
10
+ def ld_acquire(lock_ptr: cute.Pointer, *, loc=None, ip=None) -> cutlass.Int32:
11
+ lock_ptr_i64 = lock_ptr.toint(loc=loc, ip=ip).ir_value()
12
+ state = llvm.inline_asm(
13
+ T.i32(),
14
+ [lock_ptr_i64],
15
+ "ld.global.acquire.gpu.b32 $0, [$1];",
16
+ "=r,l",
17
+ has_side_effects=True,
18
+ is_align_stack=False,
19
+ asm_dialect=llvm.AsmDialect.AD_ATT,
20
+ )
21
+ return cutlass.Int32(state)
22
+
23
+
24
+ @dsl_user_op
25
+ def red_relaxed(
26
+ lock_ptr: cute.Pointer, val: cutlass.Constexpr[Int32], *, loc=None, ip=None
27
+ ) -> None:
28
+ lock_ptr_i64 = lock_ptr.toint(loc=loc, ip=ip).ir_value()
29
+ llvm.inline_asm(
30
+ None,
31
+ [lock_ptr_i64, Int32(val).ir_value(loc=loc, ip=ip)],
32
+ "red.relaxed.gpu.global.add.s32 [$0], $1;",
33
+ "l,r",
34
+ has_side_effects=True,
35
+ is_align_stack=False,
36
+ asm_dialect=llvm.AsmDialect.AD_ATT,
37
+ )
38
+
39
+
40
+ @dsl_user_op
41
+ def red_release(
42
+ lock_ptr: cute.Pointer, val: cutlass.Constexpr[Int32], *, loc=None, ip=None
43
+ ) -> None:
44
+ lock_ptr_i64 = lock_ptr.toint(loc=loc, ip=ip).ir_value()
45
+ llvm.inline_asm(
46
+ None,
47
+ [lock_ptr_i64, Int32(val).ir_value(loc=loc, ip=ip)],
48
+ "red.release.gpu.global.add.s32 [$0], $1;",
49
+ "l,r",
50
+ has_side_effects=True,
51
+ is_align_stack=False,
52
+ asm_dialect=llvm.AsmDialect.AD_ATT,
53
+ )
54
+
55
+
56
+ @cute.jit
57
+ def wait_eq(lock_ptr: cute.Pointer, thread_idx: int | Int32, flag_offset: int, val: Int32) -> None:
58
+ flag_ptr = lock_ptr + flag_offset
59
+ if thread_idx == 0:
60
+ read_val = Int32(0)
61
+ while read_val != val:
62
+ read_val = ld_acquire(flag_ptr)
63
+
64
+
65
+ @cute.jit
66
+ def arrive_inc(
67
+ lock_ptr: cute.Pointer, thread_idx: int | Int32, flag_offset: int, val: cutlass.Constexpr[Int32]
68
+ ) -> None:
69
+ flag_ptr = lock_ptr + flag_offset
70
+ if thread_idx == 0:
71
+ red_release(flag_ptr, val)
72
+ # red_relaxed(flag_ptr, val)
@@ -0,0 +1,269 @@
1
+ # @nolint # fbcode
2
+ # Copyright (c) 2023, Tri Dao.
3
+ """Useful functions for writing test code."""
4
+
5
+ import torch
6
+ import torch.utils.benchmark as benchmark
7
+
8
+
9
+ def benchmark_forward(
10
+ fn, *inputs, repeats=10, desc="", verbose=True, amp=False, amp_dtype=torch.float16, **kwinputs
11
+ ):
12
+ """Use Pytorch Benchmark on the forward pass of an arbitrary function."""
13
+ if verbose:
14
+ print(desc, "- Forward pass")
15
+
16
+ def amp_wrapper(*inputs, **kwinputs):
17
+ with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
18
+ fn(*inputs, **kwinputs)
19
+
20
+ t = benchmark.Timer(
21
+ stmt="fn_amp(*inputs, **kwinputs)",
22
+ globals={"fn_amp": amp_wrapper, "inputs": inputs, "kwinputs": kwinputs},
23
+ num_threads=torch.get_num_threads(),
24
+ )
25
+ m = t.timeit(repeats)
26
+ if verbose:
27
+ print(m)
28
+ return t, m
29
+
30
+
31
+ def benchmark_backward(
32
+ fn,
33
+ *inputs,
34
+ grad=None,
35
+ repeats=10,
36
+ desc="",
37
+ verbose=True,
38
+ amp=False,
39
+ amp_dtype=torch.float16,
40
+ **kwinputs,
41
+ ):
42
+ """Use Pytorch Benchmark on the backward pass of an arbitrary function."""
43
+ if verbose:
44
+ print(desc, "- Backward pass")
45
+ with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
46
+ y = fn(*inputs, **kwinputs)
47
+ if type(y) is tuple:
48
+ y = y[0]
49
+ if grad is None:
50
+ grad = torch.randn_like(y)
51
+ else:
52
+ if grad.shape != y.shape:
53
+ raise RuntimeError("Grad shape does not match output shape")
54
+
55
+ def f(*inputs, y, grad):
56
+ # Set .grad to None to avoid extra operation of gradient accumulation
57
+ for x in inputs:
58
+ if isinstance(x, torch.Tensor):
59
+ x.grad = None
60
+ y.backward(grad, retain_graph=True)
61
+
62
+ t = benchmark.Timer(
63
+ stmt="f(*inputs, y=y, grad=grad)",
64
+ globals={"f": f, "inputs": inputs, "y": y, "grad": grad},
65
+ num_threads=torch.get_num_threads(),
66
+ )
67
+ m = t.timeit(repeats)
68
+ if verbose:
69
+ print(m)
70
+ return t, m
71
+
72
+
73
+ def benchmark_combined(
74
+ fn,
75
+ *inputs,
76
+ grad=None,
77
+ repeats=10,
78
+ desc="",
79
+ verbose=True,
80
+ amp=False,
81
+ amp_dtype=torch.float16,
82
+ **kwinputs,
83
+ ):
84
+ """Use Pytorch Benchmark on the forward+backward pass of an arbitrary function."""
85
+ if verbose:
86
+ print(desc, "- Forward + Backward pass")
87
+ with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
88
+ y = fn(*inputs, **kwinputs)
89
+ if type(y) is tuple:
90
+ y = y[0]
91
+ if grad is None:
92
+ grad = torch.randn_like(y)
93
+ else:
94
+ if grad.shape != y.shape:
95
+ raise RuntimeError("Grad shape does not match output shape")
96
+
97
+ def f(grad, *inputs, **kwinputs):
98
+ for x in inputs:
99
+ if isinstance(x, torch.Tensor):
100
+ x.grad = None
101
+ with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
102
+ y = fn(*inputs, **kwinputs)
103
+ if type(y) is tuple:
104
+ y = y[0]
105
+ y.backward(grad, retain_graph=True)
106
+
107
+ t = benchmark.Timer(
108
+ stmt="f(grad, *inputs, **kwinputs)",
109
+ globals={"f": f, "fn": fn, "inputs": inputs, "grad": grad, "kwinputs": kwinputs},
110
+ num_threads=torch.get_num_threads(),
111
+ )
112
+ m = t.timeit(repeats)
113
+ if verbose:
114
+ print(m)
115
+ return t, m
116
+
117
+
118
+ def benchmark_fwd_bwd(
119
+ fn,
120
+ *inputs,
121
+ grad=None,
122
+ repeats=10,
123
+ desc="",
124
+ verbose=True,
125
+ amp=False,
126
+ amp_dtype=torch.float16,
127
+ **kwinputs,
128
+ ):
129
+ """Use Pytorch Benchmark on the forward+backward pass of an arbitrary function."""
130
+ return (
131
+ benchmark_forward(
132
+ fn,
133
+ *inputs,
134
+ repeats=repeats,
135
+ desc=desc,
136
+ verbose=verbose,
137
+ amp=amp,
138
+ amp_dtype=amp_dtype,
139
+ **kwinputs,
140
+ ),
141
+ benchmark_backward(
142
+ fn,
143
+ *inputs,
144
+ grad=grad,
145
+ repeats=repeats,
146
+ desc=desc,
147
+ verbose=verbose,
148
+ amp=amp,
149
+ amp_dtype=amp_dtype,
150
+ **kwinputs,
151
+ ),
152
+ )
153
+
154
+
155
+ def benchmark_all(
156
+ fn,
157
+ *inputs,
158
+ grad=None,
159
+ repeats=10,
160
+ desc="",
161
+ verbose=True,
162
+ amp=False,
163
+ amp_dtype=torch.float16,
164
+ **kwinputs,
165
+ ):
166
+ """Use Pytorch Benchmark on the forward+backward pass of an arbitrary function."""
167
+ return (
168
+ benchmark_forward(
169
+ fn,
170
+ *inputs,
171
+ repeats=repeats,
172
+ desc=desc,
173
+ verbose=verbose,
174
+ amp=amp,
175
+ amp_dtype=amp_dtype,
176
+ **kwinputs,
177
+ ),
178
+ benchmark_backward(
179
+ fn,
180
+ *inputs,
181
+ grad=grad,
182
+ repeats=repeats,
183
+ desc=desc,
184
+ verbose=verbose,
185
+ amp=amp,
186
+ amp_dtype=amp_dtype,
187
+ **kwinputs,
188
+ ),
189
+ benchmark_combined(
190
+ fn,
191
+ *inputs,
192
+ grad=grad,
193
+ repeats=repeats,
194
+ desc=desc,
195
+ verbose=verbose,
196
+ amp=amp,
197
+ amp_dtype=amp_dtype,
198
+ **kwinputs,
199
+ ),
200
+ )
201
+
202
+
203
+ def pytorch_profiler(
204
+ fn,
205
+ *inputs,
206
+ trace_filename=None,
207
+ backward=False,
208
+ amp=False,
209
+ amp_dtype=torch.float16,
210
+ cpu=False,
211
+ verbose=True,
212
+ **kwinputs,
213
+ ):
214
+ """Wrap benchmark functions in Pytorch profiler to see CUDA information."""
215
+ if backward:
216
+ with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
217
+ out = fn(*inputs, **kwinputs)
218
+ if type(out) is tuple:
219
+ out = out[0]
220
+ g = torch.randn_like(out)
221
+ for _ in range(30): # Warm up
222
+ if backward:
223
+ for x in inputs:
224
+ if isinstance(x, torch.Tensor):
225
+ x.grad = None
226
+ with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
227
+ out = fn(*inputs, **kwinputs)
228
+ if type(out) is tuple:
229
+ out = out[0]
230
+ # Backward should be done outside autocast
231
+ if backward:
232
+ out.backward(g, retain_graph=True)
233
+ activities = ([torch.profiler.ProfilerActivity.CPU] if cpu else []) + [
234
+ torch.profiler.ProfilerActivity.CUDA
235
+ ]
236
+ with torch.profiler.profile(
237
+ activities=activities,
238
+ record_shapes=True,
239
+ # profile_memory=True,
240
+ with_stack=True,
241
+ ) as prof:
242
+ if backward:
243
+ for x in inputs:
244
+ if isinstance(x, torch.Tensor):
245
+ x.grad = None
246
+ with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
247
+ out = fn(*inputs, **kwinputs)
248
+ if type(out) is tuple:
249
+ out = out[0]
250
+ if backward:
251
+ out.backward(g, retain_graph=True)
252
+ if verbose:
253
+ # print(prof.key_averages().table(sort_by="self_cuda_time_total", row_limit=50))
254
+ print(prof.key_averages().table(row_limit=50))
255
+ if trace_filename is not None:
256
+ prof.export_chrome_trace(trace_filename)
257
+
258
+
259
+ def benchmark_memory(fn, *inputs, desc="", verbose=True, **kwinputs):
260
+ torch.cuda.empty_cache()
261
+ torch.cuda.reset_peak_memory_stats()
262
+ torch.cuda.synchronize()
263
+ fn(*inputs, **kwinputs)
264
+ torch.cuda.synchronize()
265
+ mem = torch.cuda.max_memory_allocated() / ((2**20) * 1000)
266
+ if verbose:
267
+ print(f"{desc} max memory: {mem}GB")
268
+ torch.cuda.empty_cache()
269
+ return mem