mlx 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlx might be problematic. Click here for more details.

Files changed (914) hide show
  1. checksums.yaml +7 -0
  2. data/ext/mlx/CMakeLists.txt +7 -0
  3. data/ext/mlx/Makefile +273 -0
  4. data/ext/mlx/extconf.rb +94 -0
  5. data/ext/mlx/mkmf.log +44 -0
  6. data/ext/mlx/native.bundle +0 -0
  7. data/ext/mlx/native.bundle.dSYM/Contents/Info.plist +20 -0
  8. data/ext/mlx/native.bundle.dSYM/Contents/Resources/DWARF/native.bundle +0 -0
  9. data/ext/mlx/native.bundle.dSYM/Contents/Resources/Relocations/aarch64/native.bundle.yml +5 -0
  10. data/ext/mlx/native.cpp +8027 -0
  11. data/ext/mlx/native.o +0 -0
  12. data/lib/mlx/core.rb +1678 -0
  13. data/lib/mlx/distributed_utils/common.rb +116 -0
  14. data/lib/mlx/distributed_utils/config.rb +600 -0
  15. data/lib/mlx/distributed_utils/launch.rb +490 -0
  16. data/lib/mlx/extension.rb +24 -0
  17. data/lib/mlx/nn/base.rb +388 -0
  18. data/lib/mlx/nn/init.rb +140 -0
  19. data/lib/mlx/nn/layers/activations.rb +336 -0
  20. data/lib/mlx/nn/layers/base.rb +6 -0
  21. data/lib/mlx/nn/layers/containers.rb +20 -0
  22. data/lib/mlx/nn/layers/convolution.rb +120 -0
  23. data/lib/mlx/nn/layers/convolution_transpose.rb +114 -0
  24. data/lib/mlx/nn/layers/distributed.rb +309 -0
  25. data/lib/mlx/nn/layers/dropout.rb +75 -0
  26. data/lib/mlx/nn/layers/embedding.rb +28 -0
  27. data/lib/mlx/nn/layers/linear.rb +79 -0
  28. data/lib/mlx/nn/layers/normalization.rb +216 -0
  29. data/lib/mlx/nn/layers/pooling.rb +167 -0
  30. data/lib/mlx/nn/layers/positional_encoding.rb +126 -0
  31. data/lib/mlx/nn/layers/quantized.rb +215 -0
  32. data/lib/mlx/nn/layers/recurrent.rb +135 -0
  33. data/lib/mlx/nn/layers/transformer.rb +330 -0
  34. data/lib/mlx/nn/layers/upsample.rb +97 -0
  35. data/lib/mlx/nn/layers.rb +18 -0
  36. data/lib/mlx/nn/losses.rb +251 -0
  37. data/lib/mlx/nn/utils.rb +167 -0
  38. data/lib/mlx/nn.rb +12 -0
  39. data/lib/mlx/optimizers/optimizers.rb +808 -0
  40. data/lib/mlx/optimizers/schedulers.rb +62 -0
  41. data/lib/mlx/optimizers.rb +9 -0
  42. data/lib/mlx/utils.rb +171 -0
  43. data/lib/mlx/version +1 -0
  44. data/lib/mlx/version.rb +5 -0
  45. data/lib/mlx.rb +64 -0
  46. data/mlx/.clang-format +87 -0
  47. data/mlx/.git +1 -0
  48. data/mlx/.github/ISSUE_TEMPLATE/bug_report.md +28 -0
  49. data/mlx/.github/actions/build-cuda-release/action.yml +31 -0
  50. data/mlx/.github/actions/build-docs/action.yml +38 -0
  51. data/mlx/.github/actions/build-linux/action.yml +38 -0
  52. data/mlx/.github/actions/build-linux-release/action.yml +42 -0
  53. data/mlx/.github/actions/build-macos/action.yml +80 -0
  54. data/mlx/.github/actions/build-macos-release/action.yml +36 -0
  55. data/mlx/.github/actions/build-windows/action.yml +26 -0
  56. data/mlx/.github/actions/setup-linux/action.yml +93 -0
  57. data/mlx/.github/actions/setup-macos/action.yml +24 -0
  58. data/mlx/.github/actions/setup-windows/action.yml +42 -0
  59. data/mlx/.github/actions/test-linux/action.yml +69 -0
  60. data/mlx/.github/actions/test-windows/action.yml +20 -0
  61. data/mlx/.github/dependabot.yml +6 -0
  62. data/mlx/.github/pull_request_template.md +12 -0
  63. data/mlx/.github/scripts/build-sanitizer-tests.sh +48 -0
  64. data/mlx/.github/scripts/setup+build-cpp-linux-fedora-container.sh +27 -0
  65. data/mlx/.github/workflows/build_and_test.yml +152 -0
  66. data/mlx/.github/workflows/documentation.yml +28 -0
  67. data/mlx/.github/workflows/nightly.yml +104 -0
  68. data/mlx/.github/workflows/release.yml +256 -0
  69. data/mlx/.gitignore +81 -0
  70. data/mlx/.pre-commit-config.yaml +27 -0
  71. data/mlx/ACKNOWLEDGMENTS.md +268 -0
  72. data/mlx/CITATION.cff +24 -0
  73. data/mlx/CMakeLists.txt +437 -0
  74. data/mlx/CODE_OF_CONDUCT.md +132 -0
  75. data/mlx/CONTRIBUTING.md +38 -0
  76. data/mlx/LICENSE +21 -0
  77. data/mlx/MANIFEST.in +6 -0
  78. data/mlx/README.md +121 -0
  79. data/mlx/benchmarks/cpp/CMakeLists.txt +11 -0
  80. data/mlx/benchmarks/cpp/autograd.cpp +39 -0
  81. data/mlx/benchmarks/cpp/compare_devices.cpp +27 -0
  82. data/mlx/benchmarks/cpp/irregular_strides.cpp +201 -0
  83. data/mlx/benchmarks/cpp/single_ops.cpp +288 -0
  84. data/mlx/benchmarks/cpp/time_utils.h +39 -0
  85. data/mlx/benchmarks/numpy/single_ops.py +39 -0
  86. data/mlx/benchmarks/numpy/time_utils.py +20 -0
  87. data/mlx/benchmarks/python/batch_matmul_bench.py +62 -0
  88. data/mlx/benchmarks/python/blas/bench_gemm.py +191 -0
  89. data/mlx/benchmarks/python/blas/bench_gemv.py +220 -0
  90. data/mlx/benchmarks/python/comparative/README.md +15 -0
  91. data/mlx/benchmarks/python/comparative/bench_mlx.py +519 -0
  92. data/mlx/benchmarks/python/comparative/bench_torch.py +482 -0
  93. data/mlx/benchmarks/python/comparative/compare.py +284 -0
  94. data/mlx/benchmarks/python/compile_bench.py +107 -0
  95. data/mlx/benchmarks/python/conv1d_bench.py +123 -0
  96. data/mlx/benchmarks/python/conv2d_bench_cpu.py +127 -0
  97. data/mlx/benchmarks/python/conv2d_train_bench_cpu.py +143 -0
  98. data/mlx/benchmarks/python/conv2d_transpose_bench_cpu.py +129 -0
  99. data/mlx/benchmarks/python/conv3d_bench_cpu.py +110 -0
  100. data/mlx/benchmarks/python/conv3d_train_bench_cpu.py +143 -0
  101. data/mlx/benchmarks/python/conv3d_transpose_bench_cpu.py +116 -0
  102. data/mlx/benchmarks/python/conv_bench.py +135 -0
  103. data/mlx/benchmarks/python/conv_transpose_bench.py +135 -0
  104. data/mlx/benchmarks/python/conv_unaligned_bench.py +107 -0
  105. data/mlx/benchmarks/python/distributed_bench.py +66 -0
  106. data/mlx/benchmarks/python/einsum_bench.py +84 -0
  107. data/mlx/benchmarks/python/fft_bench.py +118 -0
  108. data/mlx/benchmarks/python/gather_bench.py +52 -0
  109. data/mlx/benchmarks/python/gather_mm_bench.py +74 -0
  110. data/mlx/benchmarks/python/gather_qmm_bench.py +84 -0
  111. data/mlx/benchmarks/python/hadamard_bench.py +70 -0
  112. data/mlx/benchmarks/python/large_gemm_bench.py +119 -0
  113. data/mlx/benchmarks/python/layer_norm_bench.py +82 -0
  114. data/mlx/benchmarks/python/masked_scatter.py +212 -0
  115. data/mlx/benchmarks/python/rms_norm_bench.py +63 -0
  116. data/mlx/benchmarks/python/rope_bench.py +35 -0
  117. data/mlx/benchmarks/python/scatter_bench.py +96 -0
  118. data/mlx/benchmarks/python/sdpa_bench.py +223 -0
  119. data/mlx/benchmarks/python/sdpa_vector_bench.py +95 -0
  120. data/mlx/benchmarks/python/single_ops.py +132 -0
  121. data/mlx/benchmarks/python/synchronize_bench.py +55 -0
  122. data/mlx/benchmarks/python/time_utils.py +38 -0
  123. data/mlx/cmake/FindCUDNN.cmake +177 -0
  124. data/mlx/cmake/FindNCCL.cmake +54 -0
  125. data/mlx/cmake/Findnvpl.cmake +3 -0
  126. data/mlx/cmake/extension.cmake +50 -0
  127. data/mlx/docs/.clang-format +2 -0
  128. data/mlx/docs/.gitignore +3 -0
  129. data/mlx/docs/.nojekyll +0 -0
  130. data/mlx/docs/Doxyfile +51 -0
  131. data/mlx/docs/Makefile +18 -0
  132. data/mlx/docs/README.md +54 -0
  133. data/mlx/docs/index.html +1 -0
  134. data/mlx/docs/requirements.txt +5 -0
  135. data/mlx/docs/src/_static/distributed/m3-ultra-mesh-broken.png +0 -0
  136. data/mlx/docs/src/_static/distributed/m3-ultra-mesh.png +0 -0
  137. data/mlx/docs/src/_static/metal_debugger/capture.png +0 -0
  138. data/mlx/docs/src/_static/metal_debugger/schema.png +0 -0
  139. data/mlx/docs/src/_static/mlx_logo.png +0 -0
  140. data/mlx/docs/src/_static/mlx_logo_dark.png +0 -0
  141. data/mlx/docs/src/_static/tp_inference/all-to-sharded-linear.png +0 -0
  142. data/mlx/docs/src/_static/tp_inference/column-row-tp.png +0 -0
  143. data/mlx/docs/src/_static/tp_inference/llama-transformer.png +0 -0
  144. data/mlx/docs/src/_static/tp_inference/sharded-to-all-linear.png +0 -0
  145. data/mlx/docs/src/_templates/module-base-class.rst +33 -0
  146. data/mlx/docs/src/_templates/nn-module-template.rst +20 -0
  147. data/mlx/docs/src/_templates/optimizers-template.rst +20 -0
  148. data/mlx/docs/src/conf.py +99 -0
  149. data/mlx/docs/src/cpp/ops.rst +7 -0
  150. data/mlx/docs/src/dev/custom_metal_kernels.rst +445 -0
  151. data/mlx/docs/src/dev/extensions.rst +811 -0
  152. data/mlx/docs/src/dev/metal_debugger.rst +68 -0
  153. data/mlx/docs/src/dev/metal_logging.rst +40 -0
  154. data/mlx/docs/src/dev/mlx_in_cpp.rst +121 -0
  155. data/mlx/docs/src/examples/data_parallelism.rst +91 -0
  156. data/mlx/docs/src/examples/linear_regression.rst +77 -0
  157. data/mlx/docs/src/examples/llama-inference.rst +382 -0
  158. data/mlx/docs/src/examples/mlp.rst +134 -0
  159. data/mlx/docs/src/examples/tensor_parallelism.rst +239 -0
  160. data/mlx/docs/src/index.rst +96 -0
  161. data/mlx/docs/src/install.rst +340 -0
  162. data/mlx/docs/src/python/array.rst +65 -0
  163. data/mlx/docs/src/python/cuda.rst +9 -0
  164. data/mlx/docs/src/python/data_types.rst +78 -0
  165. data/mlx/docs/src/python/devices_and_streams.rst +21 -0
  166. data/mlx/docs/src/python/distributed.rst +22 -0
  167. data/mlx/docs/src/python/export.rst +14 -0
  168. data/mlx/docs/src/python/fast.rst +16 -0
  169. data/mlx/docs/src/python/fft.rst +24 -0
  170. data/mlx/docs/src/python/linalg.rst +27 -0
  171. data/mlx/docs/src/python/memory_management.rst +16 -0
  172. data/mlx/docs/src/python/metal.rst +12 -0
  173. data/mlx/docs/src/python/nn/distributed.rst +30 -0
  174. data/mlx/docs/src/python/nn/functions.rst +40 -0
  175. data/mlx/docs/src/python/nn/init.rst +45 -0
  176. data/mlx/docs/src/python/nn/layers.rst +74 -0
  177. data/mlx/docs/src/python/nn/losses.rst +25 -0
  178. data/mlx/docs/src/python/nn/module.rst +38 -0
  179. data/mlx/docs/src/python/nn.rst +186 -0
  180. data/mlx/docs/src/python/ops.rst +184 -0
  181. data/mlx/docs/src/python/optimizers/common_optimizers.rst +22 -0
  182. data/mlx/docs/src/python/optimizers/optimizer.rst +23 -0
  183. data/mlx/docs/src/python/optimizers/schedulers.rst +15 -0
  184. data/mlx/docs/src/python/optimizers.rst +78 -0
  185. data/mlx/docs/src/python/random.rst +48 -0
  186. data/mlx/docs/src/python/transforms.rst +22 -0
  187. data/mlx/docs/src/python/tree_utils.rst +23 -0
  188. data/mlx/docs/src/usage/compile.rst +516 -0
  189. data/mlx/docs/src/usage/distributed.rst +572 -0
  190. data/mlx/docs/src/usage/export.rst +288 -0
  191. data/mlx/docs/src/usage/function_transforms.rst +191 -0
  192. data/mlx/docs/src/usage/indexing.rst +194 -0
  193. data/mlx/docs/src/usage/launching_distributed.rst +234 -0
  194. data/mlx/docs/src/usage/lazy_evaluation.rst +144 -0
  195. data/mlx/docs/src/usage/numpy.rst +124 -0
  196. data/mlx/docs/src/usage/quick_start.rst +67 -0
  197. data/mlx/docs/src/usage/saving_and_loading.rst +81 -0
  198. data/mlx/docs/src/usage/unified_memory.rst +78 -0
  199. data/mlx/docs/src/usage/using_streams.rst +18 -0
  200. data/mlx/examples/cmake_project/CMakeLists.txt +22 -0
  201. data/mlx/examples/cmake_project/README.md +26 -0
  202. data/mlx/examples/cmake_project/example.cpp +14 -0
  203. data/mlx/examples/cpp/CMakeLists.txt +12 -0
  204. data/mlx/examples/cpp/distributed.cpp +22 -0
  205. data/mlx/examples/cpp/linear_regression.cpp +54 -0
  206. data/mlx/examples/cpp/logistic_regression.cpp +54 -0
  207. data/mlx/examples/cpp/metal_capture.cpp +31 -0
  208. data/mlx/examples/cpp/timer.h +20 -0
  209. data/mlx/examples/cpp/tutorial.cpp +99 -0
  210. data/mlx/examples/export/CMakeLists.txt +22 -0
  211. data/mlx/examples/export/README.md +49 -0
  212. data/mlx/examples/export/eval_mlp.cpp +25 -0
  213. data/mlx/examples/export/eval_mlp.py +52 -0
  214. data/mlx/examples/export/train_mlp.cpp +35 -0
  215. data/mlx/examples/export/train_mlp.py +76 -0
  216. data/mlx/examples/extensions/CMakeLists.txt +78 -0
  217. data/mlx/examples/extensions/README.md +24 -0
  218. data/mlx/examples/extensions/axpby/axpby.cpp +306 -0
  219. data/mlx/examples/extensions/axpby/axpby.h +90 -0
  220. data/mlx/examples/extensions/axpby/axpby.metal +47 -0
  221. data/mlx/examples/extensions/bindings.cpp +39 -0
  222. data/mlx/examples/extensions/mlx_sample_extensions/__init__.py +5 -0
  223. data/mlx/examples/extensions/pyproject.toml +8 -0
  224. data/mlx/examples/extensions/requirements.txt +4 -0
  225. data/mlx/examples/extensions/setup.py +18 -0
  226. data/mlx/examples/extensions/test.py +12 -0
  227. data/mlx/examples/python/linear_regression.py +46 -0
  228. data/mlx/examples/python/logistic_regression.py +49 -0
  229. data/mlx/examples/python/qqmm.py +117 -0
  230. data/mlx/mlx/3rdparty/.clang-format +2 -0
  231. data/mlx/mlx/3rdparty/pocketfft.h +3581 -0
  232. data/mlx/mlx/CMakeLists.txt +107 -0
  233. data/mlx/mlx/allocator.h +75 -0
  234. data/mlx/mlx/api.h +29 -0
  235. data/mlx/mlx/array.cpp +354 -0
  236. data/mlx/mlx/array.h +647 -0
  237. data/mlx/mlx/backend/common/CMakeLists.txt +9 -0
  238. data/mlx/mlx/backend/common/binary.h +97 -0
  239. data/mlx/mlx/backend/common/broadcasting.cpp +24 -0
  240. data/mlx/mlx/backend/common/broadcasting.h +11 -0
  241. data/mlx/mlx/backend/common/buffer_cache.h +158 -0
  242. data/mlx/mlx/backend/common/common.cpp +305 -0
  243. data/mlx/mlx/backend/common/compiled.cpp +243 -0
  244. data/mlx/mlx/backend/common/compiled.h +77 -0
  245. data/mlx/mlx/backend/common/copy.h +50 -0
  246. data/mlx/mlx/backend/common/hadamard.h +109 -0
  247. data/mlx/mlx/backend/common/load.cpp +57 -0
  248. data/mlx/mlx/backend/common/matmul.h +67 -0
  249. data/mlx/mlx/backend/common/reduce.cpp +154 -0
  250. data/mlx/mlx/backend/common/reduce.h +59 -0
  251. data/mlx/mlx/backend/common/slicing.cpp +71 -0
  252. data/mlx/mlx/backend/common/slicing.h +20 -0
  253. data/mlx/mlx/backend/common/ternary.h +85 -0
  254. data/mlx/mlx/backend/common/unary.h +29 -0
  255. data/mlx/mlx/backend/common/utils.cpp +231 -0
  256. data/mlx/mlx/backend/common/utils.h +205 -0
  257. data/mlx/mlx/backend/cpu/CMakeLists.txt +88 -0
  258. data/mlx/mlx/backend/cpu/arange.h +28 -0
  259. data/mlx/mlx/backend/cpu/arg_reduce.cpp +124 -0
  260. data/mlx/mlx/backend/cpu/binary.cpp +269 -0
  261. data/mlx/mlx/backend/cpu/binary.h +517 -0
  262. data/mlx/mlx/backend/cpu/binary_ops.h +98 -0
  263. data/mlx/mlx/backend/cpu/binary_two.h +166 -0
  264. data/mlx/mlx/backend/cpu/cholesky.cpp +85 -0
  265. data/mlx/mlx/backend/cpu/compiled.cpp +357 -0
  266. data/mlx/mlx/backend/cpu/compiled_preamble.h +12 -0
  267. data/mlx/mlx/backend/cpu/conv.cpp +1351 -0
  268. data/mlx/mlx/backend/cpu/copy.cpp +386 -0
  269. data/mlx/mlx/backend/cpu/copy.h +36 -0
  270. data/mlx/mlx/backend/cpu/device_info.cpp +113 -0
  271. data/mlx/mlx/backend/cpu/device_info.h +28 -0
  272. data/mlx/mlx/backend/cpu/distributed.cpp +103 -0
  273. data/mlx/mlx/backend/cpu/eig.cpp +281 -0
  274. data/mlx/mlx/backend/cpu/eigh.cpp +241 -0
  275. data/mlx/mlx/backend/cpu/encoder.cpp +16 -0
  276. data/mlx/mlx/backend/cpu/encoder.h +67 -0
  277. data/mlx/mlx/backend/cpu/eval.cpp +40 -0
  278. data/mlx/mlx/backend/cpu/eval.h +12 -0
  279. data/mlx/mlx/backend/cpu/fft.cpp +120 -0
  280. data/mlx/mlx/backend/cpu/gemm.h +26 -0
  281. data/mlx/mlx/backend/cpu/gemms/bnns.cpp +214 -0
  282. data/mlx/mlx/backend/cpu/gemms/cblas.cpp +134 -0
  283. data/mlx/mlx/backend/cpu/gemms/simd_bf16.cpp +45 -0
  284. data/mlx/mlx/backend/cpu/gemms/simd_fp16.cpp +45 -0
  285. data/mlx/mlx/backend/cpu/gemms/simd_gemm.h +139 -0
  286. data/mlx/mlx/backend/cpu/hadamard.cpp +121 -0
  287. data/mlx/mlx/backend/cpu/indexing.cpp +854 -0
  288. data/mlx/mlx/backend/cpu/inverse.cpp +160 -0
  289. data/mlx/mlx/backend/cpu/jit_compiler.cpp +166 -0
  290. data/mlx/mlx/backend/cpu/jit_compiler.h +20 -0
  291. data/mlx/mlx/backend/cpu/lapack.h +80 -0
  292. data/mlx/mlx/backend/cpu/logsumexp.cpp +139 -0
  293. data/mlx/mlx/backend/cpu/luf.cpp +120 -0
  294. data/mlx/mlx/backend/cpu/make_compiled_preamble.ps1 +38 -0
  295. data/mlx/mlx/backend/cpu/make_compiled_preamble.sh +41 -0
  296. data/mlx/mlx/backend/cpu/masked_mm.cpp +608 -0
  297. data/mlx/mlx/backend/cpu/matmul.cpp +166 -0
  298. data/mlx/mlx/backend/cpu/primitives.cpp +478 -0
  299. data/mlx/mlx/backend/cpu/qrf.cpp +147 -0
  300. data/mlx/mlx/backend/cpu/quantized.cpp +1370 -0
  301. data/mlx/mlx/backend/cpu/reduce.cpp +587 -0
  302. data/mlx/mlx/backend/cpu/scan.cpp +338 -0
  303. data/mlx/mlx/backend/cpu/select.cpp +95 -0
  304. data/mlx/mlx/backend/cpu/simd/accelerate_fp16_simd.h +56 -0
  305. data/mlx/mlx/backend/cpu/simd/accelerate_simd.h +329 -0
  306. data/mlx/mlx/backend/cpu/simd/base_simd.h +319 -0
  307. data/mlx/mlx/backend/cpu/simd/math.h +193 -0
  308. data/mlx/mlx/backend/cpu/simd/neon_fp16_simd.h +212 -0
  309. data/mlx/mlx/backend/cpu/simd/simd.h +4 -0
  310. data/mlx/mlx/backend/cpu/simd/type.h +11 -0
  311. data/mlx/mlx/backend/cpu/slicing.h +21 -0
  312. data/mlx/mlx/backend/cpu/softmax.cpp +170 -0
  313. data/mlx/mlx/backend/cpu/sort.cpp +481 -0
  314. data/mlx/mlx/backend/cpu/svd.cpp +289 -0
  315. data/mlx/mlx/backend/cpu/ternary.h +154 -0
  316. data/mlx/mlx/backend/cpu/threefry.cpp +31 -0
  317. data/mlx/mlx/backend/cpu/threefry.h +21 -0
  318. data/mlx/mlx/backend/cpu/unary.cpp +238 -0
  319. data/mlx/mlx/backend/cpu/unary.h +281 -0
  320. data/mlx/mlx/backend/cpu/unary_ops.h +175 -0
  321. data/mlx/mlx/backend/cuda/CMakeLists.txt +265 -0
  322. data/mlx/mlx/backend/cuda/allocator.cpp +451 -0
  323. data/mlx/mlx/backend/cuda/allocator.h +94 -0
  324. data/mlx/mlx/backend/cuda/arange.cu +68 -0
  325. data/mlx/mlx/backend/cuda/arg_reduce.cu +189 -0
  326. data/mlx/mlx/backend/cuda/bin2h.cmake +150 -0
  327. data/mlx/mlx/backend/cuda/binary/CMakeLists.txt +21 -0
  328. data/mlx/mlx/backend/cuda/binary/add.cu +7 -0
  329. data/mlx/mlx/backend/cuda/binary/arctan2.cu +7 -0
  330. data/mlx/mlx/backend/cuda/binary/binary.cuh +383 -0
  331. data/mlx/mlx/backend/cuda/binary/bitwise_binary.cu +27 -0
  332. data/mlx/mlx/backend/cuda/binary/divide.cu +7 -0
  333. data/mlx/mlx/backend/cuda/binary/equal.cu +15 -0
  334. data/mlx/mlx/backend/cuda/binary/greater.cu +7 -0
  335. data/mlx/mlx/backend/cuda/binary/greater_equal.cu +7 -0
  336. data/mlx/mlx/backend/cuda/binary/less.cu +7 -0
  337. data/mlx/mlx/backend/cuda/binary/less_equal.cu +7 -0
  338. data/mlx/mlx/backend/cuda/binary/log_add_exp.cu +7 -0
  339. data/mlx/mlx/backend/cuda/binary/logical_and.cu +7 -0
  340. data/mlx/mlx/backend/cuda/binary/logical_or.cu +7 -0
  341. data/mlx/mlx/backend/cuda/binary/maximum.cu +7 -0
  342. data/mlx/mlx/backend/cuda/binary/minimum.cu +7 -0
  343. data/mlx/mlx/backend/cuda/binary/multiply.cu +7 -0
  344. data/mlx/mlx/backend/cuda/binary/not_equal.cu +7 -0
  345. data/mlx/mlx/backend/cuda/binary/power.cu +7 -0
  346. data/mlx/mlx/backend/cuda/binary/remainder.cu +7 -0
  347. data/mlx/mlx/backend/cuda/binary/subtract.cu +7 -0
  348. data/mlx/mlx/backend/cuda/binary_two.cu +412 -0
  349. data/mlx/mlx/backend/cuda/compiled.cpp +357 -0
  350. data/mlx/mlx/backend/cuda/conv/conv.h +126 -0
  351. data/mlx/mlx/backend/cuda/conv/gemm_conv.cu +217 -0
  352. data/mlx/mlx/backend/cuda/conv/gemm_grouped_conv.cu +231 -0
  353. data/mlx/mlx/backend/cuda/conv.cpp +403 -0
  354. data/mlx/mlx/backend/cuda/copy/copy.cuh +55 -0
  355. data/mlx/mlx/backend/cuda/copy/copy_contiguous.cu +88 -0
  356. data/mlx/mlx/backend/cuda/copy/copy_general.cu +171 -0
  357. data/mlx/mlx/backend/cuda/copy/copy_general_dynamic.cu +118 -0
  358. data/mlx/mlx/backend/cuda/copy/copy_general_input.cu +229 -0
  359. data/mlx/mlx/backend/cuda/copy.cu +132 -0
  360. data/mlx/mlx/backend/cuda/cublas_utils.cpp +222 -0
  361. data/mlx/mlx/backend/cuda/cublas_utils.h +95 -0
  362. data/mlx/mlx/backend/cuda/cuda.h +21 -0
  363. data/mlx/mlx/backend/cuda/cuda_utils.h +90 -0
  364. data/mlx/mlx/backend/cuda/cudnn_utils.cpp +133 -0
  365. data/mlx/mlx/backend/cuda/cudnn_utils.h +187 -0
  366. data/mlx/mlx/backend/cuda/custom_kernel.cpp +379 -0
  367. data/mlx/mlx/backend/cuda/cutlass_utils.cuh +46 -0
  368. data/mlx/mlx/backend/cuda/delayload.cpp +80 -0
  369. data/mlx/mlx/backend/cuda/device/atomic_ops.cuh +63 -0
  370. data/mlx/mlx/backend/cuda/device/binary_ops.cuh +300 -0
  371. data/mlx/mlx/backend/cuda/device/cast_op.cuh +118 -0
  372. data/mlx/mlx/backend/cuda/device/complex.cuh +60 -0
  373. data/mlx/mlx/backend/cuda/device/config.h +12 -0
  374. data/mlx/mlx/backend/cuda/device/fp16_math.cuh +96 -0
  375. data/mlx/mlx/backend/cuda/device/gather.cuh +53 -0
  376. data/mlx/mlx/backend/cuda/device/gather_axis.cuh +65 -0
  377. data/mlx/mlx/backend/cuda/device/indexing.cuh +30 -0
  378. data/mlx/mlx/backend/cuda/device/scatter.cuh +68 -0
  379. data/mlx/mlx/backend/cuda/device/scatter_axis.cuh +67 -0
  380. data/mlx/mlx/backend/cuda/device/scatter_ops.cuh +44 -0
  381. data/mlx/mlx/backend/cuda/device/ternary_ops.cuh +13 -0
  382. data/mlx/mlx/backend/cuda/device/unary_ops.cuh +350 -0
  383. data/mlx/mlx/backend/cuda/device/utils.cuh +464 -0
  384. data/mlx/mlx/backend/cuda/device.cpp +522 -0
  385. data/mlx/mlx/backend/cuda/device.h +195 -0
  386. data/mlx/mlx/backend/cuda/device_info.cpp +232 -0
  387. data/mlx/mlx/backend/cuda/distributed.cu +121 -0
  388. data/mlx/mlx/backend/cuda/eval.cpp +66 -0
  389. data/mlx/mlx/backend/cuda/event.cu +415 -0
  390. data/mlx/mlx/backend/cuda/event.h +79 -0
  391. data/mlx/mlx/backend/cuda/fence.cpp +42 -0
  392. data/mlx/mlx/backend/cuda/gemms/cublas_gemm.cpp +233 -0
  393. data/mlx/mlx/backend/cuda/gemms/cublas_gemm.h +114 -0
  394. data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_0.cpp +77 -0
  395. data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_9.cu +329 -0
  396. data/mlx/mlx/backend/cuda/gemms/gemv.cu +327 -0
  397. data/mlx/mlx/backend/cuda/gemms/gemv.h +34 -0
  398. data/mlx/mlx/backend/cuda/gemms/grouped_gemm.h +25 -0
  399. data/mlx/mlx/backend/cuda/gemms/grouped_gemm_unaligned.cu +358 -0
  400. data/mlx/mlx/backend/cuda/indexing.cpp +434 -0
  401. data/mlx/mlx/backend/cuda/jit_module.cpp +443 -0
  402. data/mlx/mlx/backend/cuda/jit_module.h +120 -0
  403. data/mlx/mlx/backend/cuda/kernel_utils.cu +52 -0
  404. data/mlx/mlx/backend/cuda/kernel_utils.cuh +148 -0
  405. data/mlx/mlx/backend/cuda/layer_norm.cu +417 -0
  406. data/mlx/mlx/backend/cuda/load.cpp +60 -0
  407. data/mlx/mlx/backend/cuda/logsumexp.cu +161 -0
  408. data/mlx/mlx/backend/cuda/lru_cache.h +190 -0
  409. data/mlx/mlx/backend/cuda/matmul.cpp +373 -0
  410. data/mlx/mlx/backend/cuda/no_cuda.cpp +47 -0
  411. data/mlx/mlx/backend/cuda/primitives.cpp +46 -0
  412. data/mlx/mlx/backend/cuda/quantized/affine_quantize.cu +329 -0
  413. data/mlx/mlx/backend/cuda/quantized/convert_fp8.cu +19 -0
  414. data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.cpp +206 -0
  415. data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.h +88 -0
  416. data/mlx/mlx/backend/cuda/quantized/cuda_fp4.h +100 -0
  417. data/mlx/mlx/backend/cuda/quantized/fp_quantize.cu +496 -0
  418. data/mlx/mlx/backend/cuda/quantized/mxfp8_quantize.cuh +32 -0
  419. data/mlx/mlx/backend/cuda/quantized/no_qqmm_impl.cpp +26 -0
  420. data/mlx/mlx/backend/cuda/quantized/nvfp4_quantize.cuh +334 -0
  421. data/mlx/mlx/backend/cuda/quantized/qmv.cu +304 -0
  422. data/mlx/mlx/backend/cuda/quantized/qmv.h +21 -0
  423. data/mlx/mlx/backend/cuda/quantized/qqmm.cpp +158 -0
  424. data/mlx/mlx/backend/cuda/quantized/qqmm_impl.cpp +50 -0
  425. data/mlx/mlx/backend/cuda/quantized/qqmm_impl.h +26 -0
  426. data/mlx/mlx/backend/cuda/quantized/qqmm_utils.cu +227 -0
  427. data/mlx/mlx/backend/cuda/quantized/qqmm_utils.h +30 -0
  428. data/mlx/mlx/backend/cuda/quantized/quantized.cpp +85 -0
  429. data/mlx/mlx/backend/cuda/quantized/quantized.h +53 -0
  430. data/mlx/mlx/backend/cuda/quantized/quantized_utils.cuh +88 -0
  431. data/mlx/mlx/backend/cuda/quantized/quantized_utils.h +50 -0
  432. data/mlx/mlx/backend/cuda/random.cu +202 -0
  433. data/mlx/mlx/backend/cuda/reduce/all_reduce.cu +159 -0
  434. data/mlx/mlx/backend/cuda/reduce/col_reduce.cu +510 -0
  435. data/mlx/mlx/backend/cuda/reduce/init_reduce.cu +50 -0
  436. data/mlx/mlx/backend/cuda/reduce/reduce.cuh +71 -0
  437. data/mlx/mlx/backend/cuda/reduce/reduce_ops.cuh +211 -0
  438. data/mlx/mlx/backend/cuda/reduce/reduce_utils.cuh +145 -0
  439. data/mlx/mlx/backend/cuda/reduce/row_reduce.cu +361 -0
  440. data/mlx/mlx/backend/cuda/reduce.cu +73 -0
  441. data/mlx/mlx/backend/cuda/rms_norm.cu +536 -0
  442. data/mlx/mlx/backend/cuda/rope.cu +429 -0
  443. data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cpp +681 -0
  444. data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cu +796 -0
  445. data/mlx/mlx/backend/cuda/scan.cu +468 -0
  446. data/mlx/mlx/backend/cuda/slicing.cpp +111 -0
  447. data/mlx/mlx/backend/cuda/softmax.cu +162 -0
  448. data/mlx/mlx/backend/cuda/sort.cu +1076 -0
  449. data/mlx/mlx/backend/cuda/steel/defines.cuh +9 -0
  450. data/mlx/mlx/backend/cuda/steel/gemm.cuh +101 -0
  451. data/mlx/mlx/backend/cuda/steel/mma.cuh +117 -0
  452. data/mlx/mlx/backend/cuda/steel/tiles.cuh +450 -0
  453. data/mlx/mlx/backend/cuda/steel/utils.cuh +89 -0
  454. data/mlx/mlx/backend/cuda/ternary.cu +271 -0
  455. data/mlx/mlx/backend/cuda/unary/CMakeLists.txt +34 -0
  456. data/mlx/mlx/backend/cuda/unary/abs.cu +7 -0
  457. data/mlx/mlx/backend/cuda/unary/arccos.cu +7 -0
  458. data/mlx/mlx/backend/cuda/unary/arccosh.cu +7 -0
  459. data/mlx/mlx/backend/cuda/unary/arcsin.cu +7 -0
  460. data/mlx/mlx/backend/cuda/unary/arcsinh.cu +7 -0
  461. data/mlx/mlx/backend/cuda/unary/arctan.cu +7 -0
  462. data/mlx/mlx/backend/cuda/unary/arctanh.cu +7 -0
  463. data/mlx/mlx/backend/cuda/unary/bitwise_invert.cu +7 -0
  464. data/mlx/mlx/backend/cuda/unary/ceil.cu +7 -0
  465. data/mlx/mlx/backend/cuda/unary/conjugate.cu +7 -0
  466. data/mlx/mlx/backend/cuda/unary/cos.cu +7 -0
  467. data/mlx/mlx/backend/cuda/unary/cosh.cu +7 -0
  468. data/mlx/mlx/backend/cuda/unary/erf.cu +7 -0
  469. data/mlx/mlx/backend/cuda/unary/erf_inv.cu +7 -0
  470. data/mlx/mlx/backend/cuda/unary/exp.cu +7 -0
  471. data/mlx/mlx/backend/cuda/unary/expm1.cu +7 -0
  472. data/mlx/mlx/backend/cuda/unary/floor.cu +7 -0
  473. data/mlx/mlx/backend/cuda/unary/imag.cu +7 -0
  474. data/mlx/mlx/backend/cuda/unary/log.cu +21 -0
  475. data/mlx/mlx/backend/cuda/unary/log1p.cu +7 -0
  476. data/mlx/mlx/backend/cuda/unary/logical_not.cu +7 -0
  477. data/mlx/mlx/backend/cuda/unary/negative.cu +7 -0
  478. data/mlx/mlx/backend/cuda/unary/real.cu +7 -0
  479. data/mlx/mlx/backend/cuda/unary/round.cu +18 -0
  480. data/mlx/mlx/backend/cuda/unary/sigmoid.cu +7 -0
  481. data/mlx/mlx/backend/cuda/unary/sign.cu +7 -0
  482. data/mlx/mlx/backend/cuda/unary/sin.cu +7 -0
  483. data/mlx/mlx/backend/cuda/unary/sinh.cu +7 -0
  484. data/mlx/mlx/backend/cuda/unary/sqrt.cu +15 -0
  485. data/mlx/mlx/backend/cuda/unary/square.cu +7 -0
  486. data/mlx/mlx/backend/cuda/unary/tan.cu +7 -0
  487. data/mlx/mlx/backend/cuda/unary/tanh.cu +7 -0
  488. data/mlx/mlx/backend/cuda/unary/unary.cuh +224 -0
  489. data/mlx/mlx/backend/cuda/utils.cpp +116 -0
  490. data/mlx/mlx/backend/cuda/utils.h +49 -0
  491. data/mlx/mlx/backend/cuda/vector_types.cuh +48 -0
  492. data/mlx/mlx/backend/cuda/worker.cpp +79 -0
  493. data/mlx/mlx/backend/cuda/worker.h +55 -0
  494. data/mlx/mlx/backend/gpu/CMakeLists.txt +5 -0
  495. data/mlx/mlx/backend/gpu/copy.cpp +89 -0
  496. data/mlx/mlx/backend/gpu/copy.h +57 -0
  497. data/mlx/mlx/backend/gpu/device_info.h +36 -0
  498. data/mlx/mlx/backend/gpu/eval.h +18 -0
  499. data/mlx/mlx/backend/gpu/primitives.cpp +307 -0
  500. data/mlx/mlx/backend/gpu/slicing.cpp +44 -0
  501. data/mlx/mlx/backend/gpu/slicing.h +36 -0
  502. data/mlx/mlx/backend/metal/CMakeLists.txt +144 -0
  503. data/mlx/mlx/backend/metal/allocator.cpp +279 -0
  504. data/mlx/mlx/backend/metal/allocator.h +79 -0
  505. data/mlx/mlx/backend/metal/binary.cpp +257 -0
  506. data/mlx/mlx/backend/metal/binary.h +33 -0
  507. data/mlx/mlx/backend/metal/compiled.cpp +471 -0
  508. data/mlx/mlx/backend/metal/conv.cpp +1118 -0
  509. data/mlx/mlx/backend/metal/copy.cpp +235 -0
  510. data/mlx/mlx/backend/metal/custom_kernel.cpp +430 -0
  511. data/mlx/mlx/backend/metal/device.cpp +816 -0
  512. data/mlx/mlx/backend/metal/device.h +289 -0
  513. data/mlx/mlx/backend/metal/device_info.cpp +58 -0
  514. data/mlx/mlx/backend/metal/distributed.cpp +38 -0
  515. data/mlx/mlx/backend/metal/eval.cpp +97 -0
  516. data/mlx/mlx/backend/metal/event.cpp +62 -0
  517. data/mlx/mlx/backend/metal/fence.cpp +162 -0
  518. data/mlx/mlx/backend/metal/fft.cpp +807 -0
  519. data/mlx/mlx/backend/metal/hadamard.cpp +198 -0
  520. data/mlx/mlx/backend/metal/indexing.cpp +727 -0
  521. data/mlx/mlx/backend/metal/jit/includes.h +58 -0
  522. data/mlx/mlx/backend/metal/jit/indexing.h +76 -0
  523. data/mlx/mlx/backend/metal/jit_kernels.cpp +1118 -0
  524. data/mlx/mlx/backend/metal/kernels/CMakeLists.txt +193 -0
  525. data/mlx/mlx/backend/metal/kernels/arange.h +9 -0
  526. data/mlx/mlx/backend/metal/kernels/arange.metal +20 -0
  527. data/mlx/mlx/backend/metal/kernels/arg_reduce.metal +182 -0
  528. data/mlx/mlx/backend/metal/kernels/atomic.h +345 -0
  529. data/mlx/mlx/backend/metal/kernels/bf16.h +16 -0
  530. data/mlx/mlx/backend/metal/kernels/bf16_math.h +380 -0
  531. data/mlx/mlx/backend/metal/kernels/binary.h +199 -0
  532. data/mlx/mlx/backend/metal/kernels/binary.metal +109 -0
  533. data/mlx/mlx/backend/metal/kernels/binary_ops.h +330 -0
  534. data/mlx/mlx/backend/metal/kernels/binary_two.h +244 -0
  535. data/mlx/mlx/backend/metal/kernels/binary_two.metal +54 -0
  536. data/mlx/mlx/backend/metal/kernels/cexpf.h +134 -0
  537. data/mlx/mlx/backend/metal/kernels/complex.h +173 -0
  538. data/mlx/mlx/backend/metal/kernels/conv.metal +701 -0
  539. data/mlx/mlx/backend/metal/kernels/copy.h +276 -0
  540. data/mlx/mlx/backend/metal/kernels/copy.metal +75 -0
  541. data/mlx/mlx/backend/metal/kernels/defines.h +24 -0
  542. data/mlx/mlx/backend/metal/kernels/erf.h +69 -0
  543. data/mlx/mlx/backend/metal/kernels/expm1f.h +90 -0
  544. data/mlx/mlx/backend/metal/kernels/fence.metal +52 -0
  545. data/mlx/mlx/backend/metal/kernels/fft/radix.h +328 -0
  546. data/mlx/mlx/backend/metal/kernels/fft/readwrite.h +624 -0
  547. data/mlx/mlx/backend/metal/kernels/fft.h +486 -0
  548. data/mlx/mlx/backend/metal/kernels/fft.metal +67 -0
  549. data/mlx/mlx/backend/metal/kernels/fp4.h +48 -0
  550. data/mlx/mlx/backend/metal/kernels/fp8.h +80 -0
  551. data/mlx/mlx/backend/metal/kernels/fp_quantized.h +1850 -0
  552. data/mlx/mlx/backend/metal/kernels/fp_quantized.metal +153 -0
  553. data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.h +1044 -0
  554. data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.metal +79 -0
  555. data/mlx/mlx/backend/metal/kernels/gemv.metal +868 -0
  556. data/mlx/mlx/backend/metal/kernels/gemv_masked.h +827 -0
  557. data/mlx/mlx/backend/metal/kernels/gemv_masked.metal +76 -0
  558. data/mlx/mlx/backend/metal/kernels/hadamard.h +182 -0
  559. data/mlx/mlx/backend/metal/kernels/indexing/gather.h +51 -0
  560. data/mlx/mlx/backend/metal/kernels/indexing/gather_axis.h +44 -0
  561. data/mlx/mlx/backend/metal/kernels/indexing/gather_front.h +24 -0
  562. data/mlx/mlx/backend/metal/kernels/indexing/indexing.h +23 -0
  563. data/mlx/mlx/backend/metal/kernels/indexing/masked_scatter.h +41 -0
  564. data/mlx/mlx/backend/metal/kernels/indexing/scatter.h +59 -0
  565. data/mlx/mlx/backend/metal/kernels/indexing/scatter_axis.h +52 -0
  566. data/mlx/mlx/backend/metal/kernels/layer_norm.metal +433 -0
  567. data/mlx/mlx/backend/metal/kernels/logging.h +26 -0
  568. data/mlx/mlx/backend/metal/kernels/logsumexp.h +140 -0
  569. data/mlx/mlx/backend/metal/kernels/logsumexp.metal +18 -0
  570. data/mlx/mlx/backend/metal/kernels/quantized.h +2508 -0
  571. data/mlx/mlx/backend/metal/kernels/quantized.metal +144 -0
  572. data/mlx/mlx/backend/metal/kernels/quantized_nax.h +1705 -0
  573. data/mlx/mlx/backend/metal/kernels/quantized_nax.metal +106 -0
  574. data/mlx/mlx/backend/metal/kernels/quantized_utils.h +90 -0
  575. data/mlx/mlx/backend/metal/kernels/random.metal +103 -0
  576. data/mlx/mlx/backend/metal/kernels/reduce.h +5 -0
  577. data/mlx/mlx/backend/metal/kernels/reduce.metal +169 -0
  578. data/mlx/mlx/backend/metal/kernels/reduce_utils.h +6 -0
  579. data/mlx/mlx/backend/metal/kernels/reduction/ops.h +275 -0
  580. data/mlx/mlx/backend/metal/kernels/reduction/reduce_all.h +66 -0
  581. data/mlx/mlx/backend/metal/kernels/reduction/reduce_col.h +398 -0
  582. data/mlx/mlx/backend/metal/kernels/reduction/reduce_init.h +8 -0
  583. data/mlx/mlx/backend/metal/kernels/reduction/reduce_row.h +369 -0
  584. data/mlx/mlx/backend/metal/kernels/rms_norm.metal +391 -0
  585. data/mlx/mlx/backend/metal/kernels/rope.metal +229 -0
  586. data/mlx/mlx/backend/metal/kernels/scaled_dot_product_attention.metal +44 -0
  587. data/mlx/mlx/backend/metal/kernels/scan.h +514 -0
  588. data/mlx/mlx/backend/metal/kernels/scan.metal +109 -0
  589. data/mlx/mlx/backend/metal/kernels/sdpa_vector.h +394 -0
  590. data/mlx/mlx/backend/metal/kernels/softmax.h +190 -0
  591. data/mlx/mlx/backend/metal/kernels/softmax.metal +24 -0
  592. data/mlx/mlx/backend/metal/kernels/sort.h +719 -0
  593. data/mlx/mlx/backend/metal/kernels/sort.metal +80 -0
  594. data/mlx/mlx/backend/metal/kernels/steel/attn/attn.h +296 -0
  595. data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.h +471 -0
  596. data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.metal +27 -0
  597. data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.h +481 -0
  598. data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.metal +28 -0
  599. data/mlx/mlx/backend/metal/kernels/steel/attn/loader.h +264 -0
  600. data/mlx/mlx/backend/metal/kernels/steel/attn/mma.h +750 -0
  601. data/mlx/mlx/backend/metal/kernels/steel/attn/nax.h +1076 -0
  602. data/mlx/mlx/backend/metal/kernels/steel/attn/params.h +44 -0
  603. data/mlx/mlx/backend/metal/kernels/steel/attn/transforms.h +71 -0
  604. data/mlx/mlx/backend/metal/kernels/steel/conv/conv.h +13 -0
  605. data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.h +176 -0
  606. data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.metal +56 -0
  607. data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.h +225 -0
  608. data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.metal +47 -0
  609. data/mlx/mlx/backend/metal/kernels/steel/conv/loader.h +6 -0
  610. data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_l.h +451 -0
  611. data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_n.h +319 -0
  612. data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_general.h +381 -0
  613. data/mlx/mlx/backend/metal/kernels/steel/conv/params.h +62 -0
  614. data/mlx/mlx/backend/metal/kernels/steel/defines.h +7 -0
  615. data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm.h +295 -0
  616. data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm_nax.h +157 -0
  617. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.h +346 -0
  618. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.metal +34 -0
  619. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.h +219 -0
  620. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.metal +30 -0
  621. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.h +459 -0
  622. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.metal +59 -0
  623. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.h +143 -0
  624. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.metal +37 -0
  625. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.h +719 -0
  626. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.metal +76 -0
  627. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.h +266 -0
  628. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.metal +43 -0
  629. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.h +227 -0
  630. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.metal +76 -0
  631. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.h +152 -0
  632. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.metal +30 -0
  633. data/mlx/mlx/backend/metal/kernels/steel/gemm/loader.h +137 -0
  634. data/mlx/mlx/backend/metal/kernels/steel/gemm/mma.h +1146 -0
  635. data/mlx/mlx/backend/metal/kernels/steel/gemm/nax.h +1084 -0
  636. data/mlx/mlx/backend/metal/kernels/steel/gemm/params.h +65 -0
  637. data/mlx/mlx/backend/metal/kernels/steel/gemm/transforms.h +72 -0
  638. data/mlx/mlx/backend/metal/kernels/steel/utils/integral_constant.h +134 -0
  639. data/mlx/mlx/backend/metal/kernels/steel/utils/type_traits.h +55 -0
  640. data/mlx/mlx/backend/metal/kernels/steel/utils.h +42 -0
  641. data/mlx/mlx/backend/metal/kernels/ternary.h +145 -0
  642. data/mlx/mlx/backend/metal/kernels/ternary.metal +48 -0
  643. data/mlx/mlx/backend/metal/kernels/ternary_ops.h +10 -0
  644. data/mlx/mlx/backend/metal/kernels/unary.h +63 -0
  645. data/mlx/mlx/backend/metal/kernels/unary.metal +115 -0
  646. data/mlx/mlx/backend/metal/kernels/unary_ops.h +454 -0
  647. data/mlx/mlx/backend/metal/kernels/utils.h +445 -0
  648. data/mlx/mlx/backend/metal/kernels.h +375 -0
  649. data/mlx/mlx/backend/metal/logsumexp.cpp +95 -0
  650. data/mlx/mlx/backend/metal/make_compiled_preamble.sh +120 -0
  651. data/mlx/mlx/backend/metal/matmul.cpp +2572 -0
  652. data/mlx/mlx/backend/metal/matmul.h +144 -0
  653. data/mlx/mlx/backend/metal/metal.cpp +50 -0
  654. data/mlx/mlx/backend/metal/metal.h +25 -0
  655. data/mlx/mlx/backend/metal/no_metal.cpp +42 -0
  656. data/mlx/mlx/backend/metal/nojit_kernels.cpp +414 -0
  657. data/mlx/mlx/backend/metal/normalization.cpp +433 -0
  658. data/mlx/mlx/backend/metal/primitives.cpp +242 -0
  659. data/mlx/mlx/backend/metal/quantized.cpp +1651 -0
  660. data/mlx/mlx/backend/metal/reduce.cpp +1038 -0
  661. data/mlx/mlx/backend/metal/reduce.h +41 -0
  662. data/mlx/mlx/backend/metal/resident.cpp +100 -0
  663. data/mlx/mlx/backend/metal/resident.h +32 -0
  664. data/mlx/mlx/backend/metal/rope.cpp +165 -0
  665. data/mlx/mlx/backend/metal/scaled_dot_product_attention.cpp +798 -0
  666. data/mlx/mlx/backend/metal/scan.cpp +145 -0
  667. data/mlx/mlx/backend/metal/scan.h +17 -0
  668. data/mlx/mlx/backend/metal/slicing.cpp +99 -0
  669. data/mlx/mlx/backend/metal/softmax.cpp +87 -0
  670. data/mlx/mlx/backend/metal/sort.cpp +368 -0
  671. data/mlx/mlx/backend/metal/ternary.cpp +160 -0
  672. data/mlx/mlx/backend/metal/ternary.h +21 -0
  673. data/mlx/mlx/backend/metal/unary.cpp +161 -0
  674. data/mlx/mlx/backend/metal/unary.h +21 -0
  675. data/mlx/mlx/backend/metal/utils.cpp +77 -0
  676. data/mlx/mlx/backend/metal/utils.h +99 -0
  677. data/mlx/mlx/backend/no_cpu/CMakeLists.txt +7 -0
  678. data/mlx/mlx/backend/no_cpu/compiled.cpp +24 -0
  679. data/mlx/mlx/backend/no_cpu/device_info.cpp +22 -0
  680. data/mlx/mlx/backend/no_cpu/primitives.cpp +146 -0
  681. data/mlx/mlx/backend/no_gpu/CMakeLists.txt +8 -0
  682. data/mlx/mlx/backend/no_gpu/allocator.cpp +134 -0
  683. data/mlx/mlx/backend/no_gpu/apple_memory.h +16 -0
  684. data/mlx/mlx/backend/no_gpu/device_info.cpp +22 -0
  685. data/mlx/mlx/backend/no_gpu/eval.cpp +24 -0
  686. data/mlx/mlx/backend/no_gpu/event.cpp +53 -0
  687. data/mlx/mlx/backend/no_gpu/fence.cpp +54 -0
  688. data/mlx/mlx/backend/no_gpu/linux_memory.h +22 -0
  689. data/mlx/mlx/backend/no_gpu/primitives.cpp +185 -0
  690. data/mlx/mlx/compile.cpp +1243 -0
  691. data/mlx/mlx/compile.h +45 -0
  692. data/mlx/mlx/compile_impl.h +70 -0
  693. data/mlx/mlx/device.cpp +72 -0
  694. data/mlx/mlx/device.h +56 -0
  695. data/mlx/mlx/distributed/CMakeLists.txt +14 -0
  696. data/mlx/mlx/distributed/distributed.cpp +197 -0
  697. data/mlx/mlx/distributed/distributed.h +61 -0
  698. data/mlx/mlx/distributed/distributed_impl.h +59 -0
  699. data/mlx/mlx/distributed/jaccl/CMakeLists.txt +12 -0
  700. data/mlx/mlx/distributed/jaccl/jaccl.cpp +178 -0
  701. data/mlx/mlx/distributed/jaccl/jaccl.h +12 -0
  702. data/mlx/mlx/distributed/jaccl/mesh.cpp +451 -0
  703. data/mlx/mlx/distributed/jaccl/mesh.h +122 -0
  704. data/mlx/mlx/distributed/jaccl/no_jaccl.cpp +20 -0
  705. data/mlx/mlx/distributed/jaccl/ring.cpp +692 -0
  706. data/mlx/mlx/distributed/jaccl/ring.h +178 -0
  707. data/mlx/mlx/distributed/jaccl/utils.cpp +329 -0
  708. data/mlx/mlx/distributed/jaccl/utils.h +342 -0
  709. data/mlx/mlx/distributed/mpi/CMakeLists.txt +5 -0
  710. data/mlx/mlx/distributed/mpi/mpi.cpp +501 -0
  711. data/mlx/mlx/distributed/mpi/mpi.h +12 -0
  712. data/mlx/mlx/distributed/mpi/mpi_declarations.h +28 -0
  713. data/mlx/mlx/distributed/mpi/no_mpi.cpp +20 -0
  714. data/mlx/mlx/distributed/nccl/CMakeLists.txt +26 -0
  715. data/mlx/mlx/distributed/nccl/nccl.cpp +443 -0
  716. data/mlx/mlx/distributed/nccl/nccl.h +12 -0
  717. data/mlx/mlx/distributed/nccl/nccl_stub/CMakeLists.txt +1 -0
  718. data/mlx/mlx/distributed/nccl/nccl_stub/nccl_stubs.cpp +54 -0
  719. data/mlx/mlx/distributed/nccl/no_nccl.cpp +20 -0
  720. data/mlx/mlx/distributed/ops.cpp +186 -0
  721. data/mlx/mlx/distributed/ops.h +57 -0
  722. data/mlx/mlx/distributed/primitives.cpp +95 -0
  723. data/mlx/mlx/distributed/primitives.h +156 -0
  724. data/mlx/mlx/distributed/reduction_ops.h +38 -0
  725. data/mlx/mlx/distributed/ring/CMakeLists.txt +5 -0
  726. data/mlx/mlx/distributed/ring/no_ring.cpp +20 -0
  727. data/mlx/mlx/distributed/ring/ring.cpp +870 -0
  728. data/mlx/mlx/distributed/ring/ring.h +12 -0
  729. data/mlx/mlx/distributed/utils.cpp +206 -0
  730. data/mlx/mlx/distributed/utils.h +67 -0
  731. data/mlx/mlx/dtype.cpp +197 -0
  732. data/mlx/mlx/dtype.h +116 -0
  733. data/mlx/mlx/dtype_utils.cpp +42 -0
  734. data/mlx/mlx/dtype_utils.h +119 -0
  735. data/mlx/mlx/einsum.cpp +941 -0
  736. data/mlx/mlx/einsum.h +23 -0
  737. data/mlx/mlx/event.h +58 -0
  738. data/mlx/mlx/export.cpp +1130 -0
  739. data/mlx/mlx/export.h +137 -0
  740. data/mlx/mlx/export_impl.h +99 -0
  741. data/mlx/mlx/fast.cpp +941 -0
  742. data/mlx/mlx/fast.h +103 -0
  743. data/mlx/mlx/fast_primitives.h +427 -0
  744. data/mlx/mlx/fence.h +39 -0
  745. data/mlx/mlx/fft.cpp +262 -0
  746. data/mlx/mlx/fft.h +159 -0
  747. data/mlx/mlx/graph_utils.cpp +175 -0
  748. data/mlx/mlx/graph_utils.h +67 -0
  749. data/mlx/mlx/io/CMakeLists.txt +25 -0
  750. data/mlx/mlx/io/gguf.cpp +470 -0
  751. data/mlx/mlx/io/gguf.h +20 -0
  752. data/mlx/mlx/io/gguf_quants.cpp +164 -0
  753. data/mlx/mlx/io/load.cpp +397 -0
  754. data/mlx/mlx/io/load.h +175 -0
  755. data/mlx/mlx/io/no_gguf.cpp +20 -0
  756. data/mlx/mlx/io/no_safetensors.cpp +37 -0
  757. data/mlx/mlx/io/safetensors.cpp +234 -0
  758. data/mlx/mlx/io.h +61 -0
  759. data/mlx/mlx/linalg.cpp +708 -0
  760. data/mlx/mlx/linalg.h +115 -0
  761. data/mlx/mlx/memory.h +80 -0
  762. data/mlx/mlx/mlx.h +25 -0
  763. data/mlx/mlx/ops.cpp +6094 -0
  764. data/mlx/mlx/ops.h +1610 -0
  765. data/mlx/mlx/primitives.cpp +5850 -0
  766. data/mlx/mlx/primitives.h +2525 -0
  767. data/mlx/mlx/random.cpp +492 -0
  768. data/mlx/mlx/random.h +283 -0
  769. data/mlx/mlx/scheduler.cpp +73 -0
  770. data/mlx/mlx/scheduler.h +189 -0
  771. data/mlx/mlx/small_vector.h +540 -0
  772. data/mlx/mlx/stream.h +42 -0
  773. data/mlx/mlx/threadpool.h +133 -0
  774. data/mlx/mlx/transforms.cpp +1065 -0
  775. data/mlx/mlx/transforms.h +231 -0
  776. data/mlx/mlx/transforms_impl.h +88 -0
  777. data/mlx/mlx/types/bf16.h +187 -0
  778. data/mlx/mlx/types/complex.h +113 -0
  779. data/mlx/mlx/types/fp16.h +234 -0
  780. data/mlx/mlx/types/half_types.h +58 -0
  781. data/mlx/mlx/types/limits.h +70 -0
  782. data/mlx/mlx/utils.cpp +302 -0
  783. data/mlx/mlx/utils.h +174 -0
  784. data/mlx/mlx/version.cpp +11 -0
  785. data/mlx/mlx/version.h +22 -0
  786. data/mlx/mlx.pc.in +52 -0
  787. data/mlx/pyproject.toml +7 -0
  788. data/mlx/python/mlx/__main__.py +27 -0
  789. data/mlx/python/mlx/_distributed_utils/common.py +135 -0
  790. data/mlx/python/mlx/_distributed_utils/config.py +631 -0
  791. data/mlx/python/mlx/_distributed_utils/launch.py +570 -0
  792. data/mlx/python/mlx/_reprlib_fix.py +16 -0
  793. data/mlx/python/mlx/_stub_patterns.txt +36 -0
  794. data/mlx/python/mlx/extension.py +88 -0
  795. data/mlx/python/mlx/nn/__init__.py +5 -0
  796. data/mlx/python/mlx/nn/init.py +441 -0
  797. data/mlx/python/mlx/nn/layers/__init__.py +105 -0
  798. data/mlx/python/mlx/nn/layers/activations.py +661 -0
  799. data/mlx/python/mlx/nn/layers/base.py +675 -0
  800. data/mlx/python/mlx/nn/layers/containers.py +24 -0
  801. data/mlx/python/mlx/nn/layers/convolution.py +232 -0
  802. data/mlx/python/mlx/nn/layers/convolution_transpose.py +242 -0
  803. data/mlx/python/mlx/nn/layers/distributed.py +601 -0
  804. data/mlx/python/mlx/nn/layers/dropout.py +137 -0
  805. data/mlx/python/mlx/nn/layers/embedding.py +53 -0
  806. data/mlx/python/mlx/nn/layers/linear.py +180 -0
  807. data/mlx/python/mlx/nn/layers/normalization.py +363 -0
  808. data/mlx/python/mlx/nn/layers/pooling.py +398 -0
  809. data/mlx/python/mlx/nn/layers/positional_encoding.py +162 -0
  810. data/mlx/python/mlx/nn/layers/quantized.py +426 -0
  811. data/mlx/python/mlx/nn/layers/recurrent.py +289 -0
  812. data/mlx/python/mlx/nn/layers/transformer.py +354 -0
  813. data/mlx/python/mlx/nn/layers/upsample.py +277 -0
  814. data/mlx/python/mlx/nn/losses.py +610 -0
  815. data/mlx/python/mlx/nn/utils.py +165 -0
  816. data/mlx/python/mlx/optimizers/__init__.py +4 -0
  817. data/mlx/python/mlx/optimizers/optimizers.py +976 -0
  818. data/mlx/python/mlx/optimizers/schedulers.py +158 -0
  819. data/mlx/python/mlx/py.typed +1 -0
  820. data/mlx/python/mlx/utils.py +325 -0
  821. data/mlx/python/src/CMakeLists.txt +96 -0
  822. data/mlx/python/src/array.cpp +1525 -0
  823. data/mlx/python/src/buffer.h +124 -0
  824. data/mlx/python/src/constants.cpp +15 -0
  825. data/mlx/python/src/convert.cpp +504 -0
  826. data/mlx/python/src/convert.h +50 -0
  827. data/mlx/python/src/cuda.cpp +19 -0
  828. data/mlx/python/src/device.cpp +98 -0
  829. data/mlx/python/src/distributed.cpp +352 -0
  830. data/mlx/python/src/export.cpp +356 -0
  831. data/mlx/python/src/fast.cpp +627 -0
  832. data/mlx/python/src/fft.cpp +514 -0
  833. data/mlx/python/src/indexing.cpp +1016 -0
  834. data/mlx/python/src/indexing.h +41 -0
  835. data/mlx/python/src/linalg.cpp +663 -0
  836. data/mlx/python/src/load.cpp +531 -0
  837. data/mlx/python/src/load.h +51 -0
  838. data/mlx/python/src/memory.cpp +125 -0
  839. data/mlx/python/src/metal.cpp +98 -0
  840. data/mlx/python/src/mlx.cpp +51 -0
  841. data/mlx/python/src/mlx_func.cpp +116 -0
  842. data/mlx/python/src/mlx_func.h +31 -0
  843. data/mlx/python/src/ops.cpp +5545 -0
  844. data/mlx/python/src/random.cpp +516 -0
  845. data/mlx/python/src/small_vector.h +76 -0
  846. data/mlx/python/src/stream.cpp +147 -0
  847. data/mlx/python/src/transforms.cpp +1542 -0
  848. data/mlx/python/src/trees.cpp +311 -0
  849. data/mlx/python/src/trees.h +62 -0
  850. data/mlx/python/src/utils.cpp +98 -0
  851. data/mlx/python/src/utils.h +78 -0
  852. data/mlx/python/tests/__main__.py +5 -0
  853. data/mlx/python/tests/cuda_skip.py +62 -0
  854. data/mlx/python/tests/mlx_distributed_tests.py +314 -0
  855. data/mlx/python/tests/mlx_tests.py +116 -0
  856. data/mlx/python/tests/mpi_test_distributed.py +142 -0
  857. data/mlx/python/tests/nccl_test_distributed.py +52 -0
  858. data/mlx/python/tests/ring_test_distributed.py +131 -0
  859. data/mlx/python/tests/test_array.py +2139 -0
  860. data/mlx/python/tests/test_autograd.py +880 -0
  861. data/mlx/python/tests/test_bf16.py +196 -0
  862. data/mlx/python/tests/test_blas.py +1429 -0
  863. data/mlx/python/tests/test_compile.py +1277 -0
  864. data/mlx/python/tests/test_constants.py +41 -0
  865. data/mlx/python/tests/test_conv.py +1198 -0
  866. data/mlx/python/tests/test_conv_transpose.py +810 -0
  867. data/mlx/python/tests/test_device.py +150 -0
  868. data/mlx/python/tests/test_double.py +306 -0
  869. data/mlx/python/tests/test_einsum.py +363 -0
  870. data/mlx/python/tests/test_eval.py +200 -0
  871. data/mlx/python/tests/test_export_import.py +614 -0
  872. data/mlx/python/tests/test_fast.py +923 -0
  873. data/mlx/python/tests/test_fast_sdpa.py +647 -0
  874. data/mlx/python/tests/test_fft.py +323 -0
  875. data/mlx/python/tests/test_graph.py +37 -0
  876. data/mlx/python/tests/test_init.py +139 -0
  877. data/mlx/python/tests/test_linalg.py +621 -0
  878. data/mlx/python/tests/test_load.py +447 -0
  879. data/mlx/python/tests/test_losses.py +427 -0
  880. data/mlx/python/tests/test_memory.py +77 -0
  881. data/mlx/python/tests/test_nn.py +1986 -0
  882. data/mlx/python/tests/test_ops.py +3261 -0
  883. data/mlx/python/tests/test_optimizers.py +584 -0
  884. data/mlx/python/tests/test_quantized.py +1160 -0
  885. data/mlx/python/tests/test_random.py +392 -0
  886. data/mlx/python/tests/test_reduce.py +223 -0
  887. data/mlx/python/tests/test_tree.py +96 -0
  888. data/mlx/python/tests/test_upsample.py +100 -0
  889. data/mlx/python/tests/test_vmap.py +860 -0
  890. data/mlx/setup.py +315 -0
  891. data/mlx/tests/CMakeLists.txt +44 -0
  892. data/mlx/tests/allocator_tests.cpp +41 -0
  893. data/mlx/tests/arg_reduce_tests.cpp +204 -0
  894. data/mlx/tests/array_tests.cpp +663 -0
  895. data/mlx/tests/autograd_tests.cpp +1399 -0
  896. data/mlx/tests/blas_tests.cpp +110 -0
  897. data/mlx/tests/compile_tests.cpp +818 -0
  898. data/mlx/tests/creations_tests.cpp +239 -0
  899. data/mlx/tests/custom_vjp_tests.cpp +55 -0
  900. data/mlx/tests/device_tests.cpp +35 -0
  901. data/mlx/tests/einsum_tests.cpp +85 -0
  902. data/mlx/tests/eval_tests.cpp +93 -0
  903. data/mlx/tests/export_import_tests.cpp +164 -0
  904. data/mlx/tests/fft_tests.cpp +366 -0
  905. data/mlx/tests/gpu_tests.cpp +523 -0
  906. data/mlx/tests/linalg_tests.cpp +639 -0
  907. data/mlx/tests/load_tests.cpp +270 -0
  908. data/mlx/tests/ops_tests.cpp +4159 -0
  909. data/mlx/tests/random_tests.cpp +716 -0
  910. data/mlx/tests/scheduler_tests.cpp +121 -0
  911. data/mlx/tests/tests.cpp +26 -0
  912. data/mlx/tests/utils_tests.cpp +67 -0
  913. data/mlx/tests/vmap_tests.cpp +547 -0
  914. metadata +958 -0
@@ -0,0 +1,796 @@
1
+ // Copyright © 2025 Apple Inc.
2
+
3
+ // Required for using M_LOG2E in MSVC.
4
+ #define _USE_MATH_DEFINES
5
+
6
+ #include "mlx/backend/cuda/device.h"
7
+ #include "mlx/backend/cuda/device/config.h"
8
+ #include "mlx/backend/cuda/device/utils.cuh"
9
+ #include "mlx/backend/cuda/kernel_utils.cuh"
10
+ #include "mlx/backend/gpu/copy.h"
11
+ #include "mlx/dtype_utils.h"
12
+
13
+ #include <cooperative_groups.h>
14
+ #include <cooperative_groups/reduce.h>
15
+
16
+ namespace mlx::core {
17
+
18
+ namespace cu {
19
+
20
+ namespace cg = cooperative_groups;
21
+
22
+ #define PRAGMA_LOOP_UNROLL #pragma unroll
23
+
24
+ struct AttnParams {
25
+ int B;
26
+ int H;
27
+ int D;
28
+
29
+ int qL;
30
+ int kL;
31
+
32
+ int gqa_factor;
33
+ float scale;
34
+
35
+ int64_t Q_strides[3];
36
+ int64_t K_strides[3];
37
+ int64_t V_strides[3];
38
+ int64_t O_strides[3];
39
+ };
40
+
41
+ template <typename T, bool do_causal, int D>
42
+ __global__ void kernel_sdpav_1pass(
43
+ const T* Q,
44
+ const T* K,
45
+ const T* V,
46
+ T* O,
47
+ const T* sinks,
48
+ __grid_constant__ const AttnParams params) {
49
+ constexpr int BN = 32;
50
+ constexpr int BD = 32;
51
+
52
+ constexpr int v_per_thread = D / BD;
53
+
54
+ const int inner_k_stride = BN * int(params.K_strides[2]);
55
+ const int inner_v_stride = BN * int(params.V_strides[2]);
56
+
57
+ typedef float U;
58
+
59
+ U q[v_per_thread];
60
+ U k[v_per_thread];
61
+ U o[v_per_thread];
62
+
63
+ __shared__ U outputs[BN][BD + 1];
64
+ __shared__ U max_scores[BN];
65
+ __shared__ U sum_exp_scores[BN];
66
+
67
+ const U scale_log2 = params.scale * M_LOG2E;
68
+
69
+ auto block = cg::this_thread_block();
70
+ auto warp = cg::tiled_partition<32>(block);
71
+
72
+ const int lane_idx = warp.thread_rank();
73
+ const int warp_idx = warp.meta_group_rank();
74
+
75
+ // Adjust to thread block and thread
76
+ const int batch_idx = blockIdx.z;
77
+ const int head_idx = blockIdx.x;
78
+ const int kv_head_idx = head_idx / params.gqa_factor;
79
+
80
+ const int q_seq_idx = blockIdx.y;
81
+ const int kv_seq_idx = warp_idx;
82
+
83
+ Q += batch_idx * params.Q_strides[0] + // Batch
84
+ head_idx * params.Q_strides[1] + // Head
85
+ q_seq_idx * params.Q_strides[2]; // Sequence
86
+
87
+ K += batch_idx * params.K_strides[0] + // Batch
88
+ kv_head_idx * params.K_strides[1] + // Head
89
+ kv_seq_idx * params.K_strides[2]; // Sequence
90
+
91
+ V += batch_idx * params.V_strides[0] + // Batch
92
+ kv_head_idx * params.V_strides[1] + // Head
93
+ kv_seq_idx * params.V_strides[2]; // Sequence
94
+
95
+ O += batch_idx * params.O_strides[0] + // Batch
96
+ head_idx * params.O_strides[1] + // Head
97
+ q_seq_idx * params.O_strides[2]; // Sequence
98
+
99
+ // Read the query and 0 the output accumulator
100
+ PRAGMA_LOOP_UNROLL
101
+ for (int i = 0; i < v_per_thread; i++) {
102
+ q[i] = scale_log2 * static_cast<U>(Q[v_per_thread * lane_idx + i]);
103
+ }
104
+
105
+ PRAGMA_LOOP_UNROLL
106
+ for (int i = 0; i < v_per_thread; i++) {
107
+ o[i] = 0.f;
108
+ }
109
+
110
+ U max_score = Limits<U>::finite_min();
111
+ U sum_exp_score = 0.f;
112
+ if (sinks && warp_idx == 0) {
113
+ max_score = M_LOG2E * static_cast<U>(sinks[head_idx]);
114
+ sum_exp_score = 1.f;
115
+ }
116
+
117
+ // For each key
118
+ for (int i = kv_seq_idx; i < params.kL; i += BN) {
119
+ bool use_key = true;
120
+ if constexpr (do_causal) {
121
+ use_key = i <= (params.kL - params.qL + q_seq_idx);
122
+ }
123
+
124
+ if (use_key) {
125
+ // Read the key
126
+ PRAGMA_LOOP_UNROLL
127
+ for (int j = 0; j < v_per_thread; j++) {
128
+ k[j] = K[v_per_thread * lane_idx + j];
129
+ }
130
+
131
+ // Compute the i-th score
132
+ U score = 0.f;
133
+ PRAGMA_LOOP_UNROLL
134
+ for (int j = 0; j < v_per_thread; j++) {
135
+ score += q[j] * k[j];
136
+ }
137
+
138
+ // Warp sum
139
+ score = cg::reduce(warp, score, cg::plus<U>());
140
+
141
+ // Update the accumulators
142
+ U new_max = max(max_score, score);
143
+ U factor = exp2f(max_score - new_max);
144
+ U exp_score = exp2f(score - new_max);
145
+
146
+ max_score = new_max;
147
+ sum_exp_score = sum_exp_score * factor + exp_score;
148
+
149
+ // Update the output accumulator
150
+ PRAGMA_LOOP_UNROLL
151
+ for (int j = 0; j < v_per_thread; j++) {
152
+ o[j] = o[j] * factor +
153
+ exp_score * static_cast<U>(V[v_per_thread * lane_idx + j]);
154
+ }
155
+ }
156
+
157
+ // Move the pointers to the next kv
158
+ K += inner_k_stride;
159
+ V += inner_v_stride;
160
+ }
161
+
162
+ if (lane_idx == 0) {
163
+ max_scores[warp_idx] = max_score;
164
+ sum_exp_scores[warp_idx] = sum_exp_score;
165
+ }
166
+ block.sync();
167
+
168
+ max_score = max_scores[lane_idx];
169
+ U new_max = cg::reduce(warp, max_score, cg::greater<U>());
170
+ U factor = exp2f(max_score - new_max);
171
+ sum_exp_score =
172
+ cg::reduce(warp, sum_exp_scores[lane_idx] * factor, cg::plus<U>());
173
+ sum_exp_score = sum_exp_score == 0 ? 0 : __frcp_rn(sum_exp_score);
174
+
175
+ // Now we need to aggregate all the outputs
176
+ PRAGMA_LOOP_UNROLL
177
+ for (int i = 0; i < v_per_thread; i++) {
178
+ outputs[lane_idx][warp_idx] = o[i];
179
+ block.sync();
180
+ U ot = outputs[warp_idx][lane_idx] * factor;
181
+ o[i] = cg::reduce(warp, ot, cg::plus<U>()) * sum_exp_score;
182
+ block.sync();
183
+ }
184
+
185
+ // And write the output
186
+ if (lane_idx == 0) {
187
+ PRAGMA_LOOP_UNROLL
188
+ for (int i = 0; i < v_per_thread; i++) {
189
+ O[v_per_thread * warp_idx + i] = static_cast<T>(o[i]);
190
+ }
191
+ }
192
+ }
193
+
194
+ template <typename T, bool do_causal, int D>
195
+ __global__ void kernel_sdpav_2pass_1(
196
+ const T* Q,
197
+ const T* K,
198
+ const T* V,
199
+ const T* sinks,
200
+ float* partials,
201
+ float* sums,
202
+ float* maxs,
203
+ __grid_constant__ const AttnParams params) {
204
+ constexpr int BN = 8;
205
+ constexpr int BD = 32;
206
+ constexpr int blocks = 32;
207
+
208
+ constexpr int v_per_thread = D / BD;
209
+
210
+ const int inner_k_stride = blocks * BN * int(params.K_strides[2]);
211
+ const int inner_v_stride = blocks * BN * int(params.V_strides[2]);
212
+
213
+ typedef float U;
214
+
215
+ U q[v_per_thread];
216
+ U k[v_per_thread];
217
+ U o[v_per_thread];
218
+
219
+ __shared__ U outputs[BN][BD + 1];
220
+ __shared__ U max_scores[BN];
221
+ __shared__ U sum_exp_scores[BN];
222
+
223
+ const U scale_log2 = params.scale * 1.44269504089f;
224
+
225
+ auto block = cg::this_thread_block();
226
+ auto warp = cg::tiled_partition<32>(block);
227
+
228
+ const int lane_idx = warp.thread_rank();
229
+ const int warp_idx = warp.meta_group_rank();
230
+
231
+ // Adjust to thread block and thread
232
+ const int batch_idx = blockIdx.z / blocks;
233
+ const int block_idx = blockIdx.z % blocks;
234
+ const int head_idx = blockIdx.x;
235
+ const int kv_head_idx = head_idx / params.gqa_factor;
236
+
237
+ const int q_seq_idx = blockIdx.y;
238
+ const int kv_seq_idx = block_idx * BN + warp_idx;
239
+
240
+ Q += batch_idx * params.Q_strides[0] + // Batch
241
+ head_idx * params.Q_strides[1] + // Head
242
+ q_seq_idx * params.Q_strides[2]; // Sequence
243
+
244
+ K += batch_idx * params.K_strides[0] + // Batch
245
+ kv_head_idx * params.K_strides[1] + // Head
246
+ kv_seq_idx * params.K_strides[2]; // Sequence
247
+
248
+ V += batch_idx * params.V_strides[0] + // Batch
249
+ kv_head_idx * params.V_strides[1] + // Head
250
+ kv_seq_idx * params.V_strides[2]; // Sequence
251
+
252
+ const int p_stride_s = blocks;
253
+ const int p_stride_h = params.qL * p_stride_s;
254
+ const int p_stride_b = params.H * p_stride_h;
255
+ const int p_offset = batch_idx * p_stride_b + // Batch
256
+ head_idx * p_stride_h + // Head
257
+ q_seq_idx * p_stride_s + // Sequence
258
+ block_idx; // Block
259
+
260
+ partials += p_offset * D;
261
+ sums += p_offset;
262
+ maxs += p_offset;
263
+
264
+ // Read the query and 0 the output accumulator
265
+ PRAGMA_LOOP_UNROLL
266
+ for (int i = 0; i < v_per_thread; i++) {
267
+ q[i] = scale_log2 * static_cast<U>(Q[v_per_thread * lane_idx + i]);
268
+ }
269
+
270
+ PRAGMA_LOOP_UNROLL
271
+ for (int i = 0; i < v_per_thread; i++) {
272
+ o[i] = 0.f;
273
+ }
274
+
275
+ U max_score = Limits<U>::finite_min();
276
+ U sum_exp_score = 0.f;
277
+ if (sinks && warp_idx == 0 && block_idx == 0) {
278
+ max_score = M_LOG2E * static_cast<U>(sinks[head_idx]);
279
+ sum_exp_score = 1.f;
280
+ }
281
+
282
+ // For each key
283
+ for (int i = kv_seq_idx; i < params.kL; i += blocks * BN) {
284
+ bool use_key = true;
285
+ if constexpr (do_causal) {
286
+ use_key = i <= (params.kL - params.qL + q_seq_idx);
287
+ }
288
+
289
+ if (use_key) {
290
+ // Read the key
291
+ PRAGMA_LOOP_UNROLL
292
+ for (int j = 0; j < v_per_thread; j++) {
293
+ k[j] = K[v_per_thread * lane_idx + j];
294
+ }
295
+
296
+ // Compute the i-th score
297
+ U score = 0.f;
298
+ PRAGMA_LOOP_UNROLL
299
+ for (int j = 0; j < v_per_thread; j++) {
300
+ score += q[j] * k[j];
301
+ }
302
+
303
+ // Warp sum
304
+ score = cg::reduce(warp, score, cg::plus<U>());
305
+
306
+ // Update the accumulators
307
+ U new_max = max(max_score, score);
308
+ U factor = exp2f(max_score - new_max);
309
+ U exp_score = exp2f(score - new_max);
310
+
311
+ max_score = new_max;
312
+ sum_exp_score = sum_exp_score * factor + exp_score;
313
+
314
+ // Update the output accumulator
315
+ PRAGMA_LOOP_UNROLL
316
+ for (int j = 0; j < v_per_thread; j++) {
317
+ o[j] = o[j] * factor +
318
+ exp_score * static_cast<U>(V[v_per_thread * lane_idx + j]);
319
+ }
320
+ }
321
+
322
+ // Move the pointers to the next kv
323
+ K += inner_k_stride;
324
+ V += inner_v_stride;
325
+ }
326
+
327
+ if (lane_idx == 0) {
328
+ max_scores[warp_idx] = max_score;
329
+ sum_exp_scores[warp_idx] = sum_exp_score;
330
+ }
331
+
332
+ block.sync();
333
+
334
+ max_score = (lane_idx < BN) ? max_scores[lane_idx] : -1e9;
335
+ U new_max = cg::reduce(warp, max_score, cg::greater<U>());
336
+ U factor = exp2f(max_score - new_max);
337
+ sum_exp_score = (lane_idx < BN) ? sum_exp_scores[lane_idx] : 0.f;
338
+ sum_exp_score = cg::reduce(warp, sum_exp_score * factor, cg::plus<U>());
339
+
340
+ // Write the sum and new max
341
+ if (warp_idx == 0) {
342
+ sums[0] = sum_exp_score;
343
+ maxs[0] = new_max;
344
+ }
345
+
346
+ // Now we need to aggregate all the outputs
347
+ auto ff = exp2f(max_scores[warp_idx] - new_max);
348
+ PRAGMA_LOOP_UNROLL
349
+ for (int i = 0; i < v_per_thread; i++) {
350
+ outputs[warp_idx][lane_idx] = o[i] * ff;
351
+ block.sync();
352
+
353
+ if (warp_idx == 0) {
354
+ U ot = outputs[0][lane_idx];
355
+ PRAGMA_LOOP_UNROLL
356
+ for (int j = 1; j < BN; j++) {
357
+ ot += outputs[j][lane_idx];
358
+ warp.sync();
359
+ }
360
+ o[i] = ot;
361
+ }
362
+ block.sync();
363
+ }
364
+
365
+ if (warp_idx == 0) {
366
+ PRAGMA_LOOP_UNROLL
367
+ for (int i = 0; i < v_per_thread; i++) {
368
+ partials[v_per_thread * lane_idx + i] = o[i];
369
+ }
370
+ }
371
+ }
372
+
373
+ template <typename T, bool do_causal, int D>
374
+ __global__ void kernel_sdpav_2pass_2(
375
+ const float* partials,
376
+ const float* sums,
377
+ const float* maxs,
378
+ T* O,
379
+ __grid_constant__ const AttnParams params) {
380
+ constexpr int BN = 32;
381
+ constexpr int BD = 32;
382
+ constexpr int blocks = 32;
383
+
384
+ constexpr int v_per_thread = D / BD;
385
+
386
+ typedef float U;
387
+
388
+ U o[v_per_thread];
389
+ __shared__ U outputs[BN][BD + 1];
390
+
391
+ auto block = cg::this_thread_block();
392
+ auto warp = cg::tiled_partition<32>(block);
393
+
394
+ const int lane_idx = warp.thread_rank();
395
+ const int warp_idx = warp.meta_group_rank();
396
+
397
+ // Adjust to thread block and thread
398
+ const int batch_idx = blockIdx.z;
399
+ const int head_idx = blockIdx.x;
400
+ const int q_seq_idx = blockIdx.y;
401
+
402
+ const int p_stride_s = blocks;
403
+ const int p_stride_h = params.qL * p_stride_s;
404
+ const int p_stride_b = params.H * p_stride_h;
405
+ const int p_offset = batch_idx * p_stride_b + // Batch
406
+ head_idx * p_stride_h + // Head
407
+ q_seq_idx * p_stride_s; // Sequence
408
+
409
+ partials += p_offset * D + warp_idx * D;
410
+ sums += p_offset;
411
+ maxs += p_offset;
412
+
413
+ O += batch_idx * params.O_strides[0] + // Batch
414
+ head_idx * params.O_strides[1] + // Head
415
+ q_seq_idx * params.O_strides[2]; // Sequence
416
+
417
+ U max_score = maxs[lane_idx];
418
+ U new_max = cg::reduce(warp, max_score, cg::greater<U>());
419
+ U factor = exp2f(max_score - new_max);
420
+ U sum_exp_score = cg::reduce(warp, sums[lane_idx] * factor, cg::plus<U>());
421
+ sum_exp_score = sum_exp_score == 0 ? 0 : __frcp_rn(sum_exp_score);
422
+
423
+ PRAGMA_LOOP_UNROLL
424
+ for (int i = 0; i < v_per_thread; i++) {
425
+ o[i] = partials[v_per_thread * lane_idx + i];
426
+ }
427
+
428
+ // Now we need to aggregate all the outputs
429
+ PRAGMA_LOOP_UNROLL
430
+ for (int i = 0; i < v_per_thread; i++) {
431
+ outputs[lane_idx][warp_idx] = o[i];
432
+ block.sync();
433
+ U ot = outputs[warp_idx][lane_idx] * factor;
434
+ o[i] = cg::reduce(warp, ot, cg::plus<U>()) * sum_exp_score;
435
+ block.sync();
436
+ }
437
+
438
+ // And write the output
439
+ if (lane_idx == 0) {
440
+ PRAGMA_LOOP_UNROLL
441
+ for (int i = 0; i < v_per_thread; i++) {
442
+ O[v_per_thread * warp_idx + i] = static_cast<T>(o[i]);
443
+ }
444
+ }
445
+ }
446
+
447
+ } // namespace cu
448
+
449
+ namespace {
450
+
451
+ template <typename F>
452
+ void dispatch_headdim(int n, F&& f) {
453
+ switch (n) {
454
+ case 64:
455
+ f(std::integral_constant<int, 64>{});
456
+ break;
457
+ case 96:
458
+ f(std::integral_constant<int, 96>{});
459
+ break;
460
+ case 128:
461
+ f(std::integral_constant<int, 128>{});
462
+ break;
463
+ }
464
+ }
465
+
466
+ void sdpa_vector_1pass_fallback(
467
+ const Stream& s,
468
+ cu::CommandEncoder& encoder,
469
+ const array& q,
470
+ const array& k,
471
+ const array& v,
472
+ const float scale,
473
+ array& o,
474
+ bool do_causal,
475
+ const std::optional<array>& sinks) {
476
+ encoder.set_input_array(q);
477
+ encoder.set_input_array(k);
478
+ encoder.set_input_array(v);
479
+ if (sinks) {
480
+ encoder.set_input_array(*sinks);
481
+ }
482
+ encoder.set_output_array(o);
483
+
484
+ cu::AttnParams params{
485
+ /* int B = */ q.shape(0),
486
+ /* int H = */ q.shape(1),
487
+ /* int D = */ q.shape(3),
488
+
489
+ /* int qL = */ q.shape(2),
490
+ /* int kL = */ k.shape(2),
491
+
492
+ /* int gqa_factor = */ q.shape(1) / k.shape(1),
493
+ /* float scale = */ scale,
494
+
495
+ /* int64_t Q_strides[3] = */ {q.strides(0), q.strides(1), q.strides(2)},
496
+ /* int64_t K_strides[3] = */ {k.strides(0), k.strides(1), k.strides(2)},
497
+ /* int64_t V_strides[3] = */ {v.strides(0), v.strides(1), v.strides(2)},
498
+ /* int64_t O_strides[3] = */ {o.strides(0), o.strides(1), o.strides(2)}};
499
+
500
+ dim3 grid_dim(params.H, params.qL, params.B);
501
+ dim3 block_dim(1024, 1, 1);
502
+
503
+ dispatch_float_types(o.dtype(), "kernel_sdpav_1pass", [&](auto type_tag) {
504
+ dispatch_bool(do_causal, [&](auto do_causal) {
505
+ dispatch_headdim(params.D, [&](auto headdim) {
506
+ using DataType = cuda_type_t<MLX_GET_TYPE(type_tag)>;
507
+
508
+ auto kernel =
509
+ cu::kernel_sdpav_1pass<DataType, do_causal.value, headdim.value>;
510
+ encoder.add_kernel_node(
511
+ kernel,
512
+ grid_dim,
513
+ block_dim,
514
+ 0,
515
+ gpu_ptr<DataType>(q),
516
+ gpu_ptr<DataType>(k),
517
+ gpu_ptr<DataType>(v),
518
+ gpu_ptr<DataType>(o),
519
+ sinks ? gpu_ptr<DataType>(*sinks) : nullptr,
520
+ params);
521
+ });
522
+ });
523
+ });
524
+ }
525
+
526
+ void sdpa_vector_2pass_fallback(
527
+ const Stream& s,
528
+ cu::CommandEncoder& encoder,
529
+ const array& q,
530
+ const array& k,
531
+ const array& v,
532
+ const float scale,
533
+ array& o,
534
+ bool do_causal,
535
+ const std::optional<array>& sinks) {
536
+ cu::AttnParams params{
537
+ /* int B = */ q.shape(0),
538
+ /* int H = */ q.shape(1),
539
+ /* int D = */ q.shape(3),
540
+
541
+ /* int qL = */ q.shape(2),
542
+ /* int kL = */ k.shape(2),
543
+
544
+ /* int gqa_factor = */ q.shape(1) / k.shape(1),
545
+ /* float scale = */ scale,
546
+
547
+ /* int64_t Q_strides[3] = */ {q.strides(0), q.strides(1), q.strides(2)},
548
+ /* int64_t K_strides[3] = */ {k.strides(0), k.strides(1), k.strides(2)},
549
+ /* int64_t V_strides[3] = */ {v.strides(0), v.strides(1), v.strides(2)},
550
+ /* int64_t O_strides[3] = */ {o.strides(0), o.strides(1), o.strides(2)}};
551
+
552
+ // Allocate the intermediates
553
+ int blocks = 32;
554
+
555
+ Shape intermediate_shape;
556
+ intermediate_shape.reserve(o.ndim() + 1);
557
+ intermediate_shape.insert(
558
+ intermediate_shape.end(), o.shape().begin(), o.shape().end() - 1);
559
+ intermediate_shape.push_back(blocks);
560
+ intermediate_shape.push_back(o.shape().back());
561
+
562
+ array intermediate(intermediate_shape, float32, nullptr, {});
563
+ intermediate_shape.pop_back();
564
+ array sums(intermediate_shape, float32, nullptr, {});
565
+ array maxs(std::move(intermediate_shape), float32, nullptr, {});
566
+
567
+ intermediate.set_data(cu::malloc_async(intermediate.nbytes(), encoder));
568
+ sums.set_data(cu::malloc_async(sums.nbytes(), encoder));
569
+ maxs.set_data(cu::malloc_async(maxs.nbytes(), encoder));
570
+
571
+ encoder.add_temporary(intermediate);
572
+ encoder.add_temporary(sums);
573
+ encoder.add_temporary(maxs);
574
+
575
+ dispatch_float_types(o.dtype(), "kernel_sdpav_2pass", [&](auto type_tag) {
576
+ dispatch_bool(do_causal, [&](auto do_causal) {
577
+ dispatch_headdim(params.D, [&](auto headdim) {
578
+ using DataType = cuda_type_t<MLX_GET_TYPE(type_tag)>;
579
+
580
+ {
581
+ auto kernel = cu::
582
+ kernel_sdpav_2pass_1<DataType, do_causal.value, headdim.value>;
583
+
584
+ encoder.set_input_array(q);
585
+ encoder.set_input_array(k);
586
+ encoder.set_input_array(v);
587
+ if (sinks) {
588
+ encoder.set_input_array(*sinks);
589
+ }
590
+
591
+ encoder.set_output_array(intermediate);
592
+ encoder.set_output_array(sums);
593
+ encoder.set_output_array(maxs);
594
+
595
+ dim3 grid_dim(params.H, params.qL, params.B * 32);
596
+ dim3 block_dim(8 * 32, 1, 1);
597
+
598
+ encoder.add_kernel_node(
599
+ kernel,
600
+ grid_dim,
601
+ block_dim,
602
+ 0,
603
+ gpu_ptr<DataType>(q),
604
+ gpu_ptr<DataType>(k),
605
+ gpu_ptr<DataType>(v),
606
+ sinks ? gpu_ptr<DataType>(*sinks) : nullptr,
607
+ gpu_ptr<float>(intermediate),
608
+ gpu_ptr<float>(sums),
609
+ gpu_ptr<float>(maxs),
610
+ params);
611
+ }
612
+
613
+ {
614
+ auto kernel = cu::
615
+ kernel_sdpav_2pass_2<DataType, do_causal.value, headdim.value>;
616
+
617
+ encoder.set_input_array(intermediate);
618
+ encoder.set_input_array(sums);
619
+ encoder.set_input_array(maxs);
620
+ encoder.set_output_array(o);
621
+
622
+ dim3 grid_dim(params.H, params.qL, params.B);
623
+ dim3 block_dim(1024, 1, 1);
624
+
625
+ encoder.add_kernel_node(
626
+ kernel,
627
+ grid_dim,
628
+ block_dim,
629
+ 0,
630
+ gpu_ptr<float>(intermediate),
631
+ gpu_ptr<float>(sums),
632
+ gpu_ptr<float>(maxs),
633
+ gpu_ptr<DataType>(o),
634
+ params);
635
+ }
636
+ });
637
+ });
638
+ });
639
+ }
640
+
641
+ void sdpa_vector_fallback(
642
+ const Stream& s,
643
+ cu::CommandEncoder& encoder,
644
+ const array& q,
645
+ const array& k,
646
+ const array& v,
647
+ const float scale,
648
+ array& o,
649
+ bool do_causal,
650
+ const std::optional<array>& sinks) {
651
+ int kL = k.shape(2);
652
+
653
+ if (kL > 1024) {
654
+ return sdpa_vector_2pass_fallback(
655
+ s, encoder, q, k, v, scale, o, do_causal, sinks);
656
+ } else {
657
+ return sdpa_vector_1pass_fallback(
658
+ s, encoder, q, k, v, scale, o, do_causal, sinks);
659
+ }
660
+ }
661
+
662
+ } // namespace
663
+
664
+ bool supports_sdpa_vector(
665
+ const array& q,
666
+ const array& k,
667
+ const array& v,
668
+ bool has_arr_mask,
669
+ bool output_logsumexp) {
670
+ if (output_logsumexp) {
671
+ return false;
672
+ }
673
+
674
+ const int value_head_dim = v.shape(-1);
675
+ const int query_head_dim = q.shape(-1);
676
+ const int query_sequence_length = q.shape(2);
677
+ const int key_sequence_length = k.shape(2);
678
+
679
+ const bool sdpa_supported_head_dim = query_head_dim == value_head_dim &&
680
+ (query_head_dim == 64 || query_head_dim == 96 || query_head_dim == 128);
681
+
682
+ const bool supported_vector_config =
683
+ sdpa_supported_head_dim && query_sequence_length < 4;
684
+
685
+ return supported_vector_config && !has_arr_mask;
686
+ }
687
+
688
+ void sdpa_vector(
689
+ const array& q_pre,
690
+ const array& k_pre,
691
+ const array& v_pre,
692
+ float scale,
693
+ array& o,
694
+ bool do_causal,
695
+ const std::optional<array>& sinks_pre,
696
+ Stream s) {
697
+ auto& encoder = cu::get_command_encoder(s);
698
+ std::vector<array> copies;
699
+
700
+ // Define some copy functions to ensure the layout of the inputs is as
701
+ // expected.
702
+ copies.reserve(4);
703
+ auto copy_unless = [&copies, &s](
704
+ auto predicate, const array& arr) -> const array& {
705
+ if (!predicate(arr)) {
706
+ array arr_copy = contiguous_copy_gpu(arr, s);
707
+ copies.push_back(std::move(arr_copy));
708
+ return copies.back();
709
+ } else {
710
+ return arr;
711
+ }
712
+ };
713
+
714
+ // Checks that the headdim dimension has stride 1.
715
+ auto is_matrix_contiguous = [](const array& arr) {
716
+ return arr.strides(-1) == 1;
717
+ };
718
+
719
+ std::optional<array> sinks = std::nullopt;
720
+ if (sinks_pre) {
721
+ sinks = copy_unless(is_matrix_contiguous, sinks_pre.value());
722
+ }
723
+
724
+ // We are in vector mode ie single query
725
+ if (q_pre.shape(2) < 4) {
726
+ auto q_copy_unless = [](const array& arr) {
727
+ if (arr.flags().row_contiguous) {
728
+ return true;
729
+ }
730
+ auto& strides = arr.strides();
731
+ auto& shape = arr.shape();
732
+ if (shape[0] == 1 || shape[1] == 1) {
733
+ // If either the batch or head dimension is a singleton, the other can
734
+ // be transposed with the sequence dimension
735
+ auto bidx = shape[0] == 1 ? 1 : 0;
736
+ return (strides[3] == 1) && (strides[2] == shape[3] * shape[bidx]) &&
737
+ (strides[bidx] == shape[3]);
738
+ }
739
+ return false;
740
+ };
741
+
742
+ auto kv_copy_unless = [](const array& arr) {
743
+ // keys and values should be copied if:
744
+ // - the last dimension is not contiguous
745
+ // - the batch and head dim are not contiguous
746
+ auto& strides = arr.strides();
747
+ auto& shape = arr.shape();
748
+ if (strides.back() != 1) {
749
+ return false;
750
+ }
751
+ if (shape[0] == 1 || shape[1] == 1) {
752
+ return true;
753
+ }
754
+ return (strides[0] == strides[1] * shape[1]);
755
+ };
756
+
757
+ const auto& q = copy_unless(q_copy_unless, q_pre);
758
+ const auto& k = copy_unless(kv_copy_unless, k_pre);
759
+ const auto& v = copy_unless(kv_copy_unless, v_pre);
760
+
761
+ // Donate the query if possible
762
+ if (q.is_donatable() && q.flags().row_contiguous && q.size() == o.size()) {
763
+ o.copy_shared_buffer(q);
764
+ } else {
765
+ int64_t str_oD = 1;
766
+ int64_t str_oH = o.shape(3);
767
+ int64_t str_oL = o.shape(1) * str_oH;
768
+ int64_t str_oB = o.shape(2) * str_oL;
769
+
770
+ array::Flags flags{
771
+ /* bool contiguous = */ 1,
772
+ /* bool row_contiguous = */ o.shape(2) == 1,
773
+ /* bool col_contiguous = */ o.size() == o.shape(3),
774
+ };
775
+
776
+ o.set_data(
777
+ cu::malloc_async(o.nbytes(), encoder),
778
+ o.size(),
779
+ {str_oB, str_oH, str_oL, str_oD},
780
+ flags);
781
+ }
782
+
783
+ for (const auto& cp : copies) {
784
+ encoder.add_temporary(cp);
785
+ }
786
+
787
+ sdpa_vector_fallback(s, encoder, q, k, v, scale, o, do_causal, sinks);
788
+ }
789
+
790
+ // Full attention mode should never reach here
791
+ else {
792
+ throw std::runtime_error("Doesn't support matrix yet.");
793
+ }
794
+ }
795
+
796
+ } // namespace mlx::core