mlx 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlx might be problematic. Click here for more details.

Files changed (914) hide show
  1. checksums.yaml +7 -0
  2. data/ext/mlx/CMakeLists.txt +7 -0
  3. data/ext/mlx/Makefile +273 -0
  4. data/ext/mlx/extconf.rb +94 -0
  5. data/ext/mlx/mkmf.log +44 -0
  6. data/ext/mlx/native.bundle +0 -0
  7. data/ext/mlx/native.bundle.dSYM/Contents/Info.plist +20 -0
  8. data/ext/mlx/native.bundle.dSYM/Contents/Resources/DWARF/native.bundle +0 -0
  9. data/ext/mlx/native.bundle.dSYM/Contents/Resources/Relocations/aarch64/native.bundle.yml +5 -0
  10. data/ext/mlx/native.cpp +8027 -0
  11. data/ext/mlx/native.o +0 -0
  12. data/lib/mlx/core.rb +1678 -0
  13. data/lib/mlx/distributed_utils/common.rb +116 -0
  14. data/lib/mlx/distributed_utils/config.rb +600 -0
  15. data/lib/mlx/distributed_utils/launch.rb +490 -0
  16. data/lib/mlx/extension.rb +24 -0
  17. data/lib/mlx/nn/base.rb +388 -0
  18. data/lib/mlx/nn/init.rb +140 -0
  19. data/lib/mlx/nn/layers/activations.rb +336 -0
  20. data/lib/mlx/nn/layers/base.rb +6 -0
  21. data/lib/mlx/nn/layers/containers.rb +20 -0
  22. data/lib/mlx/nn/layers/convolution.rb +120 -0
  23. data/lib/mlx/nn/layers/convolution_transpose.rb +114 -0
  24. data/lib/mlx/nn/layers/distributed.rb +309 -0
  25. data/lib/mlx/nn/layers/dropout.rb +75 -0
  26. data/lib/mlx/nn/layers/embedding.rb +28 -0
  27. data/lib/mlx/nn/layers/linear.rb +79 -0
  28. data/lib/mlx/nn/layers/normalization.rb +216 -0
  29. data/lib/mlx/nn/layers/pooling.rb +167 -0
  30. data/lib/mlx/nn/layers/positional_encoding.rb +126 -0
  31. data/lib/mlx/nn/layers/quantized.rb +215 -0
  32. data/lib/mlx/nn/layers/recurrent.rb +135 -0
  33. data/lib/mlx/nn/layers/transformer.rb +330 -0
  34. data/lib/mlx/nn/layers/upsample.rb +97 -0
  35. data/lib/mlx/nn/layers.rb +18 -0
  36. data/lib/mlx/nn/losses.rb +251 -0
  37. data/lib/mlx/nn/utils.rb +167 -0
  38. data/lib/mlx/nn.rb +12 -0
  39. data/lib/mlx/optimizers/optimizers.rb +808 -0
  40. data/lib/mlx/optimizers/schedulers.rb +62 -0
  41. data/lib/mlx/optimizers.rb +9 -0
  42. data/lib/mlx/utils.rb +171 -0
  43. data/lib/mlx/version +1 -0
  44. data/lib/mlx/version.rb +5 -0
  45. data/lib/mlx.rb +64 -0
  46. data/mlx/.clang-format +87 -0
  47. data/mlx/.git +1 -0
  48. data/mlx/.github/ISSUE_TEMPLATE/bug_report.md +28 -0
  49. data/mlx/.github/actions/build-cuda-release/action.yml +31 -0
  50. data/mlx/.github/actions/build-docs/action.yml +38 -0
  51. data/mlx/.github/actions/build-linux/action.yml +38 -0
  52. data/mlx/.github/actions/build-linux-release/action.yml +42 -0
  53. data/mlx/.github/actions/build-macos/action.yml +80 -0
  54. data/mlx/.github/actions/build-macos-release/action.yml +36 -0
  55. data/mlx/.github/actions/build-windows/action.yml +26 -0
  56. data/mlx/.github/actions/setup-linux/action.yml +93 -0
  57. data/mlx/.github/actions/setup-macos/action.yml +24 -0
  58. data/mlx/.github/actions/setup-windows/action.yml +42 -0
  59. data/mlx/.github/actions/test-linux/action.yml +69 -0
  60. data/mlx/.github/actions/test-windows/action.yml +20 -0
  61. data/mlx/.github/dependabot.yml +6 -0
  62. data/mlx/.github/pull_request_template.md +12 -0
  63. data/mlx/.github/scripts/build-sanitizer-tests.sh +48 -0
  64. data/mlx/.github/scripts/setup+build-cpp-linux-fedora-container.sh +27 -0
  65. data/mlx/.github/workflows/build_and_test.yml +152 -0
  66. data/mlx/.github/workflows/documentation.yml +28 -0
  67. data/mlx/.github/workflows/nightly.yml +104 -0
  68. data/mlx/.github/workflows/release.yml +256 -0
  69. data/mlx/.gitignore +81 -0
  70. data/mlx/.pre-commit-config.yaml +27 -0
  71. data/mlx/ACKNOWLEDGMENTS.md +268 -0
  72. data/mlx/CITATION.cff +24 -0
  73. data/mlx/CMakeLists.txt +437 -0
  74. data/mlx/CODE_OF_CONDUCT.md +132 -0
  75. data/mlx/CONTRIBUTING.md +38 -0
  76. data/mlx/LICENSE +21 -0
  77. data/mlx/MANIFEST.in +6 -0
  78. data/mlx/README.md +121 -0
  79. data/mlx/benchmarks/cpp/CMakeLists.txt +11 -0
  80. data/mlx/benchmarks/cpp/autograd.cpp +39 -0
  81. data/mlx/benchmarks/cpp/compare_devices.cpp +27 -0
  82. data/mlx/benchmarks/cpp/irregular_strides.cpp +201 -0
  83. data/mlx/benchmarks/cpp/single_ops.cpp +288 -0
  84. data/mlx/benchmarks/cpp/time_utils.h +39 -0
  85. data/mlx/benchmarks/numpy/single_ops.py +39 -0
  86. data/mlx/benchmarks/numpy/time_utils.py +20 -0
  87. data/mlx/benchmarks/python/batch_matmul_bench.py +62 -0
  88. data/mlx/benchmarks/python/blas/bench_gemm.py +191 -0
  89. data/mlx/benchmarks/python/blas/bench_gemv.py +220 -0
  90. data/mlx/benchmarks/python/comparative/README.md +15 -0
  91. data/mlx/benchmarks/python/comparative/bench_mlx.py +519 -0
  92. data/mlx/benchmarks/python/comparative/bench_torch.py +482 -0
  93. data/mlx/benchmarks/python/comparative/compare.py +284 -0
  94. data/mlx/benchmarks/python/compile_bench.py +107 -0
  95. data/mlx/benchmarks/python/conv1d_bench.py +123 -0
  96. data/mlx/benchmarks/python/conv2d_bench_cpu.py +127 -0
  97. data/mlx/benchmarks/python/conv2d_train_bench_cpu.py +143 -0
  98. data/mlx/benchmarks/python/conv2d_transpose_bench_cpu.py +129 -0
  99. data/mlx/benchmarks/python/conv3d_bench_cpu.py +110 -0
  100. data/mlx/benchmarks/python/conv3d_train_bench_cpu.py +143 -0
  101. data/mlx/benchmarks/python/conv3d_transpose_bench_cpu.py +116 -0
  102. data/mlx/benchmarks/python/conv_bench.py +135 -0
  103. data/mlx/benchmarks/python/conv_transpose_bench.py +135 -0
  104. data/mlx/benchmarks/python/conv_unaligned_bench.py +107 -0
  105. data/mlx/benchmarks/python/distributed_bench.py +66 -0
  106. data/mlx/benchmarks/python/einsum_bench.py +84 -0
  107. data/mlx/benchmarks/python/fft_bench.py +118 -0
  108. data/mlx/benchmarks/python/gather_bench.py +52 -0
  109. data/mlx/benchmarks/python/gather_mm_bench.py +74 -0
  110. data/mlx/benchmarks/python/gather_qmm_bench.py +84 -0
  111. data/mlx/benchmarks/python/hadamard_bench.py +70 -0
  112. data/mlx/benchmarks/python/large_gemm_bench.py +119 -0
  113. data/mlx/benchmarks/python/layer_norm_bench.py +82 -0
  114. data/mlx/benchmarks/python/masked_scatter.py +212 -0
  115. data/mlx/benchmarks/python/rms_norm_bench.py +63 -0
  116. data/mlx/benchmarks/python/rope_bench.py +35 -0
  117. data/mlx/benchmarks/python/scatter_bench.py +96 -0
  118. data/mlx/benchmarks/python/sdpa_bench.py +223 -0
  119. data/mlx/benchmarks/python/sdpa_vector_bench.py +95 -0
  120. data/mlx/benchmarks/python/single_ops.py +132 -0
  121. data/mlx/benchmarks/python/synchronize_bench.py +55 -0
  122. data/mlx/benchmarks/python/time_utils.py +38 -0
  123. data/mlx/cmake/FindCUDNN.cmake +177 -0
  124. data/mlx/cmake/FindNCCL.cmake +54 -0
  125. data/mlx/cmake/Findnvpl.cmake +3 -0
  126. data/mlx/cmake/extension.cmake +50 -0
  127. data/mlx/docs/.clang-format +2 -0
  128. data/mlx/docs/.gitignore +3 -0
  129. data/mlx/docs/.nojekyll +0 -0
  130. data/mlx/docs/Doxyfile +51 -0
  131. data/mlx/docs/Makefile +18 -0
  132. data/mlx/docs/README.md +54 -0
  133. data/mlx/docs/index.html +1 -0
  134. data/mlx/docs/requirements.txt +5 -0
  135. data/mlx/docs/src/_static/distributed/m3-ultra-mesh-broken.png +0 -0
  136. data/mlx/docs/src/_static/distributed/m3-ultra-mesh.png +0 -0
  137. data/mlx/docs/src/_static/metal_debugger/capture.png +0 -0
  138. data/mlx/docs/src/_static/metal_debugger/schema.png +0 -0
  139. data/mlx/docs/src/_static/mlx_logo.png +0 -0
  140. data/mlx/docs/src/_static/mlx_logo_dark.png +0 -0
  141. data/mlx/docs/src/_static/tp_inference/all-to-sharded-linear.png +0 -0
  142. data/mlx/docs/src/_static/tp_inference/column-row-tp.png +0 -0
  143. data/mlx/docs/src/_static/tp_inference/llama-transformer.png +0 -0
  144. data/mlx/docs/src/_static/tp_inference/sharded-to-all-linear.png +0 -0
  145. data/mlx/docs/src/_templates/module-base-class.rst +33 -0
  146. data/mlx/docs/src/_templates/nn-module-template.rst +20 -0
  147. data/mlx/docs/src/_templates/optimizers-template.rst +20 -0
  148. data/mlx/docs/src/conf.py +99 -0
  149. data/mlx/docs/src/cpp/ops.rst +7 -0
  150. data/mlx/docs/src/dev/custom_metal_kernels.rst +445 -0
  151. data/mlx/docs/src/dev/extensions.rst +811 -0
  152. data/mlx/docs/src/dev/metal_debugger.rst +68 -0
  153. data/mlx/docs/src/dev/metal_logging.rst +40 -0
  154. data/mlx/docs/src/dev/mlx_in_cpp.rst +121 -0
  155. data/mlx/docs/src/examples/data_parallelism.rst +91 -0
  156. data/mlx/docs/src/examples/linear_regression.rst +77 -0
  157. data/mlx/docs/src/examples/llama-inference.rst +382 -0
  158. data/mlx/docs/src/examples/mlp.rst +134 -0
  159. data/mlx/docs/src/examples/tensor_parallelism.rst +239 -0
  160. data/mlx/docs/src/index.rst +96 -0
  161. data/mlx/docs/src/install.rst +340 -0
  162. data/mlx/docs/src/python/array.rst +65 -0
  163. data/mlx/docs/src/python/cuda.rst +9 -0
  164. data/mlx/docs/src/python/data_types.rst +78 -0
  165. data/mlx/docs/src/python/devices_and_streams.rst +21 -0
  166. data/mlx/docs/src/python/distributed.rst +22 -0
  167. data/mlx/docs/src/python/export.rst +14 -0
  168. data/mlx/docs/src/python/fast.rst +16 -0
  169. data/mlx/docs/src/python/fft.rst +24 -0
  170. data/mlx/docs/src/python/linalg.rst +27 -0
  171. data/mlx/docs/src/python/memory_management.rst +16 -0
  172. data/mlx/docs/src/python/metal.rst +12 -0
  173. data/mlx/docs/src/python/nn/distributed.rst +30 -0
  174. data/mlx/docs/src/python/nn/functions.rst +40 -0
  175. data/mlx/docs/src/python/nn/init.rst +45 -0
  176. data/mlx/docs/src/python/nn/layers.rst +74 -0
  177. data/mlx/docs/src/python/nn/losses.rst +25 -0
  178. data/mlx/docs/src/python/nn/module.rst +38 -0
  179. data/mlx/docs/src/python/nn.rst +186 -0
  180. data/mlx/docs/src/python/ops.rst +184 -0
  181. data/mlx/docs/src/python/optimizers/common_optimizers.rst +22 -0
  182. data/mlx/docs/src/python/optimizers/optimizer.rst +23 -0
  183. data/mlx/docs/src/python/optimizers/schedulers.rst +15 -0
  184. data/mlx/docs/src/python/optimizers.rst +78 -0
  185. data/mlx/docs/src/python/random.rst +48 -0
  186. data/mlx/docs/src/python/transforms.rst +22 -0
  187. data/mlx/docs/src/python/tree_utils.rst +23 -0
  188. data/mlx/docs/src/usage/compile.rst +516 -0
  189. data/mlx/docs/src/usage/distributed.rst +572 -0
  190. data/mlx/docs/src/usage/export.rst +288 -0
  191. data/mlx/docs/src/usage/function_transforms.rst +191 -0
  192. data/mlx/docs/src/usage/indexing.rst +194 -0
  193. data/mlx/docs/src/usage/launching_distributed.rst +234 -0
  194. data/mlx/docs/src/usage/lazy_evaluation.rst +144 -0
  195. data/mlx/docs/src/usage/numpy.rst +124 -0
  196. data/mlx/docs/src/usage/quick_start.rst +67 -0
  197. data/mlx/docs/src/usage/saving_and_loading.rst +81 -0
  198. data/mlx/docs/src/usage/unified_memory.rst +78 -0
  199. data/mlx/docs/src/usage/using_streams.rst +18 -0
  200. data/mlx/examples/cmake_project/CMakeLists.txt +22 -0
  201. data/mlx/examples/cmake_project/README.md +26 -0
  202. data/mlx/examples/cmake_project/example.cpp +14 -0
  203. data/mlx/examples/cpp/CMakeLists.txt +12 -0
  204. data/mlx/examples/cpp/distributed.cpp +22 -0
  205. data/mlx/examples/cpp/linear_regression.cpp +54 -0
  206. data/mlx/examples/cpp/logistic_regression.cpp +54 -0
  207. data/mlx/examples/cpp/metal_capture.cpp +31 -0
  208. data/mlx/examples/cpp/timer.h +20 -0
  209. data/mlx/examples/cpp/tutorial.cpp +99 -0
  210. data/mlx/examples/export/CMakeLists.txt +22 -0
  211. data/mlx/examples/export/README.md +49 -0
  212. data/mlx/examples/export/eval_mlp.cpp +25 -0
  213. data/mlx/examples/export/eval_mlp.py +52 -0
  214. data/mlx/examples/export/train_mlp.cpp +35 -0
  215. data/mlx/examples/export/train_mlp.py +76 -0
  216. data/mlx/examples/extensions/CMakeLists.txt +78 -0
  217. data/mlx/examples/extensions/README.md +24 -0
  218. data/mlx/examples/extensions/axpby/axpby.cpp +306 -0
  219. data/mlx/examples/extensions/axpby/axpby.h +90 -0
  220. data/mlx/examples/extensions/axpby/axpby.metal +47 -0
  221. data/mlx/examples/extensions/bindings.cpp +39 -0
  222. data/mlx/examples/extensions/mlx_sample_extensions/__init__.py +5 -0
  223. data/mlx/examples/extensions/pyproject.toml +8 -0
  224. data/mlx/examples/extensions/requirements.txt +4 -0
  225. data/mlx/examples/extensions/setup.py +18 -0
  226. data/mlx/examples/extensions/test.py +12 -0
  227. data/mlx/examples/python/linear_regression.py +46 -0
  228. data/mlx/examples/python/logistic_regression.py +49 -0
  229. data/mlx/examples/python/qqmm.py +117 -0
  230. data/mlx/mlx/3rdparty/.clang-format +2 -0
  231. data/mlx/mlx/3rdparty/pocketfft.h +3581 -0
  232. data/mlx/mlx/CMakeLists.txt +107 -0
  233. data/mlx/mlx/allocator.h +75 -0
  234. data/mlx/mlx/api.h +29 -0
  235. data/mlx/mlx/array.cpp +354 -0
  236. data/mlx/mlx/array.h +647 -0
  237. data/mlx/mlx/backend/common/CMakeLists.txt +9 -0
  238. data/mlx/mlx/backend/common/binary.h +97 -0
  239. data/mlx/mlx/backend/common/broadcasting.cpp +24 -0
  240. data/mlx/mlx/backend/common/broadcasting.h +11 -0
  241. data/mlx/mlx/backend/common/buffer_cache.h +158 -0
  242. data/mlx/mlx/backend/common/common.cpp +305 -0
  243. data/mlx/mlx/backend/common/compiled.cpp +243 -0
  244. data/mlx/mlx/backend/common/compiled.h +77 -0
  245. data/mlx/mlx/backend/common/copy.h +50 -0
  246. data/mlx/mlx/backend/common/hadamard.h +109 -0
  247. data/mlx/mlx/backend/common/load.cpp +57 -0
  248. data/mlx/mlx/backend/common/matmul.h +67 -0
  249. data/mlx/mlx/backend/common/reduce.cpp +154 -0
  250. data/mlx/mlx/backend/common/reduce.h +59 -0
  251. data/mlx/mlx/backend/common/slicing.cpp +71 -0
  252. data/mlx/mlx/backend/common/slicing.h +20 -0
  253. data/mlx/mlx/backend/common/ternary.h +85 -0
  254. data/mlx/mlx/backend/common/unary.h +29 -0
  255. data/mlx/mlx/backend/common/utils.cpp +231 -0
  256. data/mlx/mlx/backend/common/utils.h +205 -0
  257. data/mlx/mlx/backend/cpu/CMakeLists.txt +88 -0
  258. data/mlx/mlx/backend/cpu/arange.h +28 -0
  259. data/mlx/mlx/backend/cpu/arg_reduce.cpp +124 -0
  260. data/mlx/mlx/backend/cpu/binary.cpp +269 -0
  261. data/mlx/mlx/backend/cpu/binary.h +517 -0
  262. data/mlx/mlx/backend/cpu/binary_ops.h +98 -0
  263. data/mlx/mlx/backend/cpu/binary_two.h +166 -0
  264. data/mlx/mlx/backend/cpu/cholesky.cpp +85 -0
  265. data/mlx/mlx/backend/cpu/compiled.cpp +357 -0
  266. data/mlx/mlx/backend/cpu/compiled_preamble.h +12 -0
  267. data/mlx/mlx/backend/cpu/conv.cpp +1351 -0
  268. data/mlx/mlx/backend/cpu/copy.cpp +386 -0
  269. data/mlx/mlx/backend/cpu/copy.h +36 -0
  270. data/mlx/mlx/backend/cpu/device_info.cpp +113 -0
  271. data/mlx/mlx/backend/cpu/device_info.h +28 -0
  272. data/mlx/mlx/backend/cpu/distributed.cpp +103 -0
  273. data/mlx/mlx/backend/cpu/eig.cpp +281 -0
  274. data/mlx/mlx/backend/cpu/eigh.cpp +241 -0
  275. data/mlx/mlx/backend/cpu/encoder.cpp +16 -0
  276. data/mlx/mlx/backend/cpu/encoder.h +67 -0
  277. data/mlx/mlx/backend/cpu/eval.cpp +40 -0
  278. data/mlx/mlx/backend/cpu/eval.h +12 -0
  279. data/mlx/mlx/backend/cpu/fft.cpp +120 -0
  280. data/mlx/mlx/backend/cpu/gemm.h +26 -0
  281. data/mlx/mlx/backend/cpu/gemms/bnns.cpp +214 -0
  282. data/mlx/mlx/backend/cpu/gemms/cblas.cpp +134 -0
  283. data/mlx/mlx/backend/cpu/gemms/simd_bf16.cpp +45 -0
  284. data/mlx/mlx/backend/cpu/gemms/simd_fp16.cpp +45 -0
  285. data/mlx/mlx/backend/cpu/gemms/simd_gemm.h +139 -0
  286. data/mlx/mlx/backend/cpu/hadamard.cpp +121 -0
  287. data/mlx/mlx/backend/cpu/indexing.cpp +854 -0
  288. data/mlx/mlx/backend/cpu/inverse.cpp +160 -0
  289. data/mlx/mlx/backend/cpu/jit_compiler.cpp +166 -0
  290. data/mlx/mlx/backend/cpu/jit_compiler.h +20 -0
  291. data/mlx/mlx/backend/cpu/lapack.h +80 -0
  292. data/mlx/mlx/backend/cpu/logsumexp.cpp +139 -0
  293. data/mlx/mlx/backend/cpu/luf.cpp +120 -0
  294. data/mlx/mlx/backend/cpu/make_compiled_preamble.ps1 +38 -0
  295. data/mlx/mlx/backend/cpu/make_compiled_preamble.sh +41 -0
  296. data/mlx/mlx/backend/cpu/masked_mm.cpp +608 -0
  297. data/mlx/mlx/backend/cpu/matmul.cpp +166 -0
  298. data/mlx/mlx/backend/cpu/primitives.cpp +478 -0
  299. data/mlx/mlx/backend/cpu/qrf.cpp +147 -0
  300. data/mlx/mlx/backend/cpu/quantized.cpp +1370 -0
  301. data/mlx/mlx/backend/cpu/reduce.cpp +587 -0
  302. data/mlx/mlx/backend/cpu/scan.cpp +338 -0
  303. data/mlx/mlx/backend/cpu/select.cpp +95 -0
  304. data/mlx/mlx/backend/cpu/simd/accelerate_fp16_simd.h +56 -0
  305. data/mlx/mlx/backend/cpu/simd/accelerate_simd.h +329 -0
  306. data/mlx/mlx/backend/cpu/simd/base_simd.h +319 -0
  307. data/mlx/mlx/backend/cpu/simd/math.h +193 -0
  308. data/mlx/mlx/backend/cpu/simd/neon_fp16_simd.h +212 -0
  309. data/mlx/mlx/backend/cpu/simd/simd.h +4 -0
  310. data/mlx/mlx/backend/cpu/simd/type.h +11 -0
  311. data/mlx/mlx/backend/cpu/slicing.h +21 -0
  312. data/mlx/mlx/backend/cpu/softmax.cpp +170 -0
  313. data/mlx/mlx/backend/cpu/sort.cpp +481 -0
  314. data/mlx/mlx/backend/cpu/svd.cpp +289 -0
  315. data/mlx/mlx/backend/cpu/ternary.h +154 -0
  316. data/mlx/mlx/backend/cpu/threefry.cpp +31 -0
  317. data/mlx/mlx/backend/cpu/threefry.h +21 -0
  318. data/mlx/mlx/backend/cpu/unary.cpp +238 -0
  319. data/mlx/mlx/backend/cpu/unary.h +281 -0
  320. data/mlx/mlx/backend/cpu/unary_ops.h +175 -0
  321. data/mlx/mlx/backend/cuda/CMakeLists.txt +265 -0
  322. data/mlx/mlx/backend/cuda/allocator.cpp +451 -0
  323. data/mlx/mlx/backend/cuda/allocator.h +94 -0
  324. data/mlx/mlx/backend/cuda/arange.cu +68 -0
  325. data/mlx/mlx/backend/cuda/arg_reduce.cu +189 -0
  326. data/mlx/mlx/backend/cuda/bin2h.cmake +150 -0
  327. data/mlx/mlx/backend/cuda/binary/CMakeLists.txt +21 -0
  328. data/mlx/mlx/backend/cuda/binary/add.cu +7 -0
  329. data/mlx/mlx/backend/cuda/binary/arctan2.cu +7 -0
  330. data/mlx/mlx/backend/cuda/binary/binary.cuh +383 -0
  331. data/mlx/mlx/backend/cuda/binary/bitwise_binary.cu +27 -0
  332. data/mlx/mlx/backend/cuda/binary/divide.cu +7 -0
  333. data/mlx/mlx/backend/cuda/binary/equal.cu +15 -0
  334. data/mlx/mlx/backend/cuda/binary/greater.cu +7 -0
  335. data/mlx/mlx/backend/cuda/binary/greater_equal.cu +7 -0
  336. data/mlx/mlx/backend/cuda/binary/less.cu +7 -0
  337. data/mlx/mlx/backend/cuda/binary/less_equal.cu +7 -0
  338. data/mlx/mlx/backend/cuda/binary/log_add_exp.cu +7 -0
  339. data/mlx/mlx/backend/cuda/binary/logical_and.cu +7 -0
  340. data/mlx/mlx/backend/cuda/binary/logical_or.cu +7 -0
  341. data/mlx/mlx/backend/cuda/binary/maximum.cu +7 -0
  342. data/mlx/mlx/backend/cuda/binary/minimum.cu +7 -0
  343. data/mlx/mlx/backend/cuda/binary/multiply.cu +7 -0
  344. data/mlx/mlx/backend/cuda/binary/not_equal.cu +7 -0
  345. data/mlx/mlx/backend/cuda/binary/power.cu +7 -0
  346. data/mlx/mlx/backend/cuda/binary/remainder.cu +7 -0
  347. data/mlx/mlx/backend/cuda/binary/subtract.cu +7 -0
  348. data/mlx/mlx/backend/cuda/binary_two.cu +412 -0
  349. data/mlx/mlx/backend/cuda/compiled.cpp +357 -0
  350. data/mlx/mlx/backend/cuda/conv/conv.h +126 -0
  351. data/mlx/mlx/backend/cuda/conv/gemm_conv.cu +217 -0
  352. data/mlx/mlx/backend/cuda/conv/gemm_grouped_conv.cu +231 -0
  353. data/mlx/mlx/backend/cuda/conv.cpp +403 -0
  354. data/mlx/mlx/backend/cuda/copy/copy.cuh +55 -0
  355. data/mlx/mlx/backend/cuda/copy/copy_contiguous.cu +88 -0
  356. data/mlx/mlx/backend/cuda/copy/copy_general.cu +171 -0
  357. data/mlx/mlx/backend/cuda/copy/copy_general_dynamic.cu +118 -0
  358. data/mlx/mlx/backend/cuda/copy/copy_general_input.cu +229 -0
  359. data/mlx/mlx/backend/cuda/copy.cu +132 -0
  360. data/mlx/mlx/backend/cuda/cublas_utils.cpp +222 -0
  361. data/mlx/mlx/backend/cuda/cublas_utils.h +95 -0
  362. data/mlx/mlx/backend/cuda/cuda.h +21 -0
  363. data/mlx/mlx/backend/cuda/cuda_utils.h +90 -0
  364. data/mlx/mlx/backend/cuda/cudnn_utils.cpp +133 -0
  365. data/mlx/mlx/backend/cuda/cudnn_utils.h +187 -0
  366. data/mlx/mlx/backend/cuda/custom_kernel.cpp +379 -0
  367. data/mlx/mlx/backend/cuda/cutlass_utils.cuh +46 -0
  368. data/mlx/mlx/backend/cuda/delayload.cpp +80 -0
  369. data/mlx/mlx/backend/cuda/device/atomic_ops.cuh +63 -0
  370. data/mlx/mlx/backend/cuda/device/binary_ops.cuh +300 -0
  371. data/mlx/mlx/backend/cuda/device/cast_op.cuh +118 -0
  372. data/mlx/mlx/backend/cuda/device/complex.cuh +60 -0
  373. data/mlx/mlx/backend/cuda/device/config.h +12 -0
  374. data/mlx/mlx/backend/cuda/device/fp16_math.cuh +96 -0
  375. data/mlx/mlx/backend/cuda/device/gather.cuh +53 -0
  376. data/mlx/mlx/backend/cuda/device/gather_axis.cuh +65 -0
  377. data/mlx/mlx/backend/cuda/device/indexing.cuh +30 -0
  378. data/mlx/mlx/backend/cuda/device/scatter.cuh +68 -0
  379. data/mlx/mlx/backend/cuda/device/scatter_axis.cuh +67 -0
  380. data/mlx/mlx/backend/cuda/device/scatter_ops.cuh +44 -0
  381. data/mlx/mlx/backend/cuda/device/ternary_ops.cuh +13 -0
  382. data/mlx/mlx/backend/cuda/device/unary_ops.cuh +350 -0
  383. data/mlx/mlx/backend/cuda/device/utils.cuh +464 -0
  384. data/mlx/mlx/backend/cuda/device.cpp +522 -0
  385. data/mlx/mlx/backend/cuda/device.h +195 -0
  386. data/mlx/mlx/backend/cuda/device_info.cpp +232 -0
  387. data/mlx/mlx/backend/cuda/distributed.cu +121 -0
  388. data/mlx/mlx/backend/cuda/eval.cpp +66 -0
  389. data/mlx/mlx/backend/cuda/event.cu +415 -0
  390. data/mlx/mlx/backend/cuda/event.h +79 -0
  391. data/mlx/mlx/backend/cuda/fence.cpp +42 -0
  392. data/mlx/mlx/backend/cuda/gemms/cublas_gemm.cpp +233 -0
  393. data/mlx/mlx/backend/cuda/gemms/cublas_gemm.h +114 -0
  394. data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_0.cpp +77 -0
  395. data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_9.cu +329 -0
  396. data/mlx/mlx/backend/cuda/gemms/gemv.cu +327 -0
  397. data/mlx/mlx/backend/cuda/gemms/gemv.h +34 -0
  398. data/mlx/mlx/backend/cuda/gemms/grouped_gemm.h +25 -0
  399. data/mlx/mlx/backend/cuda/gemms/grouped_gemm_unaligned.cu +358 -0
  400. data/mlx/mlx/backend/cuda/indexing.cpp +434 -0
  401. data/mlx/mlx/backend/cuda/jit_module.cpp +443 -0
  402. data/mlx/mlx/backend/cuda/jit_module.h +120 -0
  403. data/mlx/mlx/backend/cuda/kernel_utils.cu +52 -0
  404. data/mlx/mlx/backend/cuda/kernel_utils.cuh +148 -0
  405. data/mlx/mlx/backend/cuda/layer_norm.cu +417 -0
  406. data/mlx/mlx/backend/cuda/load.cpp +60 -0
  407. data/mlx/mlx/backend/cuda/logsumexp.cu +161 -0
  408. data/mlx/mlx/backend/cuda/lru_cache.h +190 -0
  409. data/mlx/mlx/backend/cuda/matmul.cpp +373 -0
  410. data/mlx/mlx/backend/cuda/no_cuda.cpp +47 -0
  411. data/mlx/mlx/backend/cuda/primitives.cpp +46 -0
  412. data/mlx/mlx/backend/cuda/quantized/affine_quantize.cu +329 -0
  413. data/mlx/mlx/backend/cuda/quantized/convert_fp8.cu +19 -0
  414. data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.cpp +206 -0
  415. data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.h +88 -0
  416. data/mlx/mlx/backend/cuda/quantized/cuda_fp4.h +100 -0
  417. data/mlx/mlx/backend/cuda/quantized/fp_quantize.cu +496 -0
  418. data/mlx/mlx/backend/cuda/quantized/mxfp8_quantize.cuh +32 -0
  419. data/mlx/mlx/backend/cuda/quantized/no_qqmm_impl.cpp +26 -0
  420. data/mlx/mlx/backend/cuda/quantized/nvfp4_quantize.cuh +334 -0
  421. data/mlx/mlx/backend/cuda/quantized/qmv.cu +304 -0
  422. data/mlx/mlx/backend/cuda/quantized/qmv.h +21 -0
  423. data/mlx/mlx/backend/cuda/quantized/qqmm.cpp +158 -0
  424. data/mlx/mlx/backend/cuda/quantized/qqmm_impl.cpp +50 -0
  425. data/mlx/mlx/backend/cuda/quantized/qqmm_impl.h +26 -0
  426. data/mlx/mlx/backend/cuda/quantized/qqmm_utils.cu +227 -0
  427. data/mlx/mlx/backend/cuda/quantized/qqmm_utils.h +30 -0
  428. data/mlx/mlx/backend/cuda/quantized/quantized.cpp +85 -0
  429. data/mlx/mlx/backend/cuda/quantized/quantized.h +53 -0
  430. data/mlx/mlx/backend/cuda/quantized/quantized_utils.cuh +88 -0
  431. data/mlx/mlx/backend/cuda/quantized/quantized_utils.h +50 -0
  432. data/mlx/mlx/backend/cuda/random.cu +202 -0
  433. data/mlx/mlx/backend/cuda/reduce/all_reduce.cu +159 -0
  434. data/mlx/mlx/backend/cuda/reduce/col_reduce.cu +510 -0
  435. data/mlx/mlx/backend/cuda/reduce/init_reduce.cu +50 -0
  436. data/mlx/mlx/backend/cuda/reduce/reduce.cuh +71 -0
  437. data/mlx/mlx/backend/cuda/reduce/reduce_ops.cuh +211 -0
  438. data/mlx/mlx/backend/cuda/reduce/reduce_utils.cuh +145 -0
  439. data/mlx/mlx/backend/cuda/reduce/row_reduce.cu +361 -0
  440. data/mlx/mlx/backend/cuda/reduce.cu +73 -0
  441. data/mlx/mlx/backend/cuda/rms_norm.cu +536 -0
  442. data/mlx/mlx/backend/cuda/rope.cu +429 -0
  443. data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cpp +681 -0
  444. data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cu +796 -0
  445. data/mlx/mlx/backend/cuda/scan.cu +468 -0
  446. data/mlx/mlx/backend/cuda/slicing.cpp +111 -0
  447. data/mlx/mlx/backend/cuda/softmax.cu +162 -0
  448. data/mlx/mlx/backend/cuda/sort.cu +1076 -0
  449. data/mlx/mlx/backend/cuda/steel/defines.cuh +9 -0
  450. data/mlx/mlx/backend/cuda/steel/gemm.cuh +101 -0
  451. data/mlx/mlx/backend/cuda/steel/mma.cuh +117 -0
  452. data/mlx/mlx/backend/cuda/steel/tiles.cuh +450 -0
  453. data/mlx/mlx/backend/cuda/steel/utils.cuh +89 -0
  454. data/mlx/mlx/backend/cuda/ternary.cu +271 -0
  455. data/mlx/mlx/backend/cuda/unary/CMakeLists.txt +34 -0
  456. data/mlx/mlx/backend/cuda/unary/abs.cu +7 -0
  457. data/mlx/mlx/backend/cuda/unary/arccos.cu +7 -0
  458. data/mlx/mlx/backend/cuda/unary/arccosh.cu +7 -0
  459. data/mlx/mlx/backend/cuda/unary/arcsin.cu +7 -0
  460. data/mlx/mlx/backend/cuda/unary/arcsinh.cu +7 -0
  461. data/mlx/mlx/backend/cuda/unary/arctan.cu +7 -0
  462. data/mlx/mlx/backend/cuda/unary/arctanh.cu +7 -0
  463. data/mlx/mlx/backend/cuda/unary/bitwise_invert.cu +7 -0
  464. data/mlx/mlx/backend/cuda/unary/ceil.cu +7 -0
  465. data/mlx/mlx/backend/cuda/unary/conjugate.cu +7 -0
  466. data/mlx/mlx/backend/cuda/unary/cos.cu +7 -0
  467. data/mlx/mlx/backend/cuda/unary/cosh.cu +7 -0
  468. data/mlx/mlx/backend/cuda/unary/erf.cu +7 -0
  469. data/mlx/mlx/backend/cuda/unary/erf_inv.cu +7 -0
  470. data/mlx/mlx/backend/cuda/unary/exp.cu +7 -0
  471. data/mlx/mlx/backend/cuda/unary/expm1.cu +7 -0
  472. data/mlx/mlx/backend/cuda/unary/floor.cu +7 -0
  473. data/mlx/mlx/backend/cuda/unary/imag.cu +7 -0
  474. data/mlx/mlx/backend/cuda/unary/log.cu +21 -0
  475. data/mlx/mlx/backend/cuda/unary/log1p.cu +7 -0
  476. data/mlx/mlx/backend/cuda/unary/logical_not.cu +7 -0
  477. data/mlx/mlx/backend/cuda/unary/negative.cu +7 -0
  478. data/mlx/mlx/backend/cuda/unary/real.cu +7 -0
  479. data/mlx/mlx/backend/cuda/unary/round.cu +18 -0
  480. data/mlx/mlx/backend/cuda/unary/sigmoid.cu +7 -0
  481. data/mlx/mlx/backend/cuda/unary/sign.cu +7 -0
  482. data/mlx/mlx/backend/cuda/unary/sin.cu +7 -0
  483. data/mlx/mlx/backend/cuda/unary/sinh.cu +7 -0
  484. data/mlx/mlx/backend/cuda/unary/sqrt.cu +15 -0
  485. data/mlx/mlx/backend/cuda/unary/square.cu +7 -0
  486. data/mlx/mlx/backend/cuda/unary/tan.cu +7 -0
  487. data/mlx/mlx/backend/cuda/unary/tanh.cu +7 -0
  488. data/mlx/mlx/backend/cuda/unary/unary.cuh +224 -0
  489. data/mlx/mlx/backend/cuda/utils.cpp +116 -0
  490. data/mlx/mlx/backend/cuda/utils.h +49 -0
  491. data/mlx/mlx/backend/cuda/vector_types.cuh +48 -0
  492. data/mlx/mlx/backend/cuda/worker.cpp +79 -0
  493. data/mlx/mlx/backend/cuda/worker.h +55 -0
  494. data/mlx/mlx/backend/gpu/CMakeLists.txt +5 -0
  495. data/mlx/mlx/backend/gpu/copy.cpp +89 -0
  496. data/mlx/mlx/backend/gpu/copy.h +57 -0
  497. data/mlx/mlx/backend/gpu/device_info.h +36 -0
  498. data/mlx/mlx/backend/gpu/eval.h +18 -0
  499. data/mlx/mlx/backend/gpu/primitives.cpp +307 -0
  500. data/mlx/mlx/backend/gpu/slicing.cpp +44 -0
  501. data/mlx/mlx/backend/gpu/slicing.h +36 -0
  502. data/mlx/mlx/backend/metal/CMakeLists.txt +144 -0
  503. data/mlx/mlx/backend/metal/allocator.cpp +279 -0
  504. data/mlx/mlx/backend/metal/allocator.h +79 -0
  505. data/mlx/mlx/backend/metal/binary.cpp +257 -0
  506. data/mlx/mlx/backend/metal/binary.h +33 -0
  507. data/mlx/mlx/backend/metal/compiled.cpp +471 -0
  508. data/mlx/mlx/backend/metal/conv.cpp +1118 -0
  509. data/mlx/mlx/backend/metal/copy.cpp +235 -0
  510. data/mlx/mlx/backend/metal/custom_kernel.cpp +430 -0
  511. data/mlx/mlx/backend/metal/device.cpp +816 -0
  512. data/mlx/mlx/backend/metal/device.h +289 -0
  513. data/mlx/mlx/backend/metal/device_info.cpp +58 -0
  514. data/mlx/mlx/backend/metal/distributed.cpp +38 -0
  515. data/mlx/mlx/backend/metal/eval.cpp +97 -0
  516. data/mlx/mlx/backend/metal/event.cpp +62 -0
  517. data/mlx/mlx/backend/metal/fence.cpp +162 -0
  518. data/mlx/mlx/backend/metal/fft.cpp +807 -0
  519. data/mlx/mlx/backend/metal/hadamard.cpp +198 -0
  520. data/mlx/mlx/backend/metal/indexing.cpp +727 -0
  521. data/mlx/mlx/backend/metal/jit/includes.h +58 -0
  522. data/mlx/mlx/backend/metal/jit/indexing.h +76 -0
  523. data/mlx/mlx/backend/metal/jit_kernels.cpp +1118 -0
  524. data/mlx/mlx/backend/metal/kernels/CMakeLists.txt +193 -0
  525. data/mlx/mlx/backend/metal/kernels/arange.h +9 -0
  526. data/mlx/mlx/backend/metal/kernels/arange.metal +20 -0
  527. data/mlx/mlx/backend/metal/kernels/arg_reduce.metal +182 -0
  528. data/mlx/mlx/backend/metal/kernels/atomic.h +345 -0
  529. data/mlx/mlx/backend/metal/kernels/bf16.h +16 -0
  530. data/mlx/mlx/backend/metal/kernels/bf16_math.h +380 -0
  531. data/mlx/mlx/backend/metal/kernels/binary.h +199 -0
  532. data/mlx/mlx/backend/metal/kernels/binary.metal +109 -0
  533. data/mlx/mlx/backend/metal/kernels/binary_ops.h +330 -0
  534. data/mlx/mlx/backend/metal/kernels/binary_two.h +244 -0
  535. data/mlx/mlx/backend/metal/kernels/binary_two.metal +54 -0
  536. data/mlx/mlx/backend/metal/kernels/cexpf.h +134 -0
  537. data/mlx/mlx/backend/metal/kernels/complex.h +173 -0
  538. data/mlx/mlx/backend/metal/kernels/conv.metal +701 -0
  539. data/mlx/mlx/backend/metal/kernels/copy.h +276 -0
  540. data/mlx/mlx/backend/metal/kernels/copy.metal +75 -0
  541. data/mlx/mlx/backend/metal/kernels/defines.h +24 -0
  542. data/mlx/mlx/backend/metal/kernels/erf.h +69 -0
  543. data/mlx/mlx/backend/metal/kernels/expm1f.h +90 -0
  544. data/mlx/mlx/backend/metal/kernels/fence.metal +52 -0
  545. data/mlx/mlx/backend/metal/kernels/fft/radix.h +328 -0
  546. data/mlx/mlx/backend/metal/kernels/fft/readwrite.h +624 -0
  547. data/mlx/mlx/backend/metal/kernels/fft.h +486 -0
  548. data/mlx/mlx/backend/metal/kernels/fft.metal +67 -0
  549. data/mlx/mlx/backend/metal/kernels/fp4.h +48 -0
  550. data/mlx/mlx/backend/metal/kernels/fp8.h +80 -0
  551. data/mlx/mlx/backend/metal/kernels/fp_quantized.h +1850 -0
  552. data/mlx/mlx/backend/metal/kernels/fp_quantized.metal +153 -0
  553. data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.h +1044 -0
  554. data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.metal +79 -0
  555. data/mlx/mlx/backend/metal/kernels/gemv.metal +868 -0
  556. data/mlx/mlx/backend/metal/kernels/gemv_masked.h +827 -0
  557. data/mlx/mlx/backend/metal/kernels/gemv_masked.metal +76 -0
  558. data/mlx/mlx/backend/metal/kernels/hadamard.h +182 -0
  559. data/mlx/mlx/backend/metal/kernels/indexing/gather.h +51 -0
  560. data/mlx/mlx/backend/metal/kernels/indexing/gather_axis.h +44 -0
  561. data/mlx/mlx/backend/metal/kernels/indexing/gather_front.h +24 -0
  562. data/mlx/mlx/backend/metal/kernels/indexing/indexing.h +23 -0
  563. data/mlx/mlx/backend/metal/kernels/indexing/masked_scatter.h +41 -0
  564. data/mlx/mlx/backend/metal/kernels/indexing/scatter.h +59 -0
  565. data/mlx/mlx/backend/metal/kernels/indexing/scatter_axis.h +52 -0
  566. data/mlx/mlx/backend/metal/kernels/layer_norm.metal +433 -0
  567. data/mlx/mlx/backend/metal/kernels/logging.h +26 -0
  568. data/mlx/mlx/backend/metal/kernels/logsumexp.h +140 -0
  569. data/mlx/mlx/backend/metal/kernels/logsumexp.metal +18 -0
  570. data/mlx/mlx/backend/metal/kernels/quantized.h +2508 -0
  571. data/mlx/mlx/backend/metal/kernels/quantized.metal +144 -0
  572. data/mlx/mlx/backend/metal/kernels/quantized_nax.h +1705 -0
  573. data/mlx/mlx/backend/metal/kernels/quantized_nax.metal +106 -0
  574. data/mlx/mlx/backend/metal/kernels/quantized_utils.h +90 -0
  575. data/mlx/mlx/backend/metal/kernels/random.metal +103 -0
  576. data/mlx/mlx/backend/metal/kernels/reduce.h +5 -0
  577. data/mlx/mlx/backend/metal/kernels/reduce.metal +169 -0
  578. data/mlx/mlx/backend/metal/kernels/reduce_utils.h +6 -0
  579. data/mlx/mlx/backend/metal/kernels/reduction/ops.h +275 -0
  580. data/mlx/mlx/backend/metal/kernels/reduction/reduce_all.h +66 -0
  581. data/mlx/mlx/backend/metal/kernels/reduction/reduce_col.h +398 -0
  582. data/mlx/mlx/backend/metal/kernels/reduction/reduce_init.h +8 -0
  583. data/mlx/mlx/backend/metal/kernels/reduction/reduce_row.h +369 -0
  584. data/mlx/mlx/backend/metal/kernels/rms_norm.metal +391 -0
  585. data/mlx/mlx/backend/metal/kernels/rope.metal +229 -0
  586. data/mlx/mlx/backend/metal/kernels/scaled_dot_product_attention.metal +44 -0
  587. data/mlx/mlx/backend/metal/kernels/scan.h +514 -0
  588. data/mlx/mlx/backend/metal/kernels/scan.metal +109 -0
  589. data/mlx/mlx/backend/metal/kernels/sdpa_vector.h +394 -0
  590. data/mlx/mlx/backend/metal/kernels/softmax.h +190 -0
  591. data/mlx/mlx/backend/metal/kernels/softmax.metal +24 -0
  592. data/mlx/mlx/backend/metal/kernels/sort.h +719 -0
  593. data/mlx/mlx/backend/metal/kernels/sort.metal +80 -0
  594. data/mlx/mlx/backend/metal/kernels/steel/attn/attn.h +296 -0
  595. data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.h +471 -0
  596. data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.metal +27 -0
  597. data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.h +481 -0
  598. data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.metal +28 -0
  599. data/mlx/mlx/backend/metal/kernels/steel/attn/loader.h +264 -0
  600. data/mlx/mlx/backend/metal/kernels/steel/attn/mma.h +750 -0
  601. data/mlx/mlx/backend/metal/kernels/steel/attn/nax.h +1076 -0
  602. data/mlx/mlx/backend/metal/kernels/steel/attn/params.h +44 -0
  603. data/mlx/mlx/backend/metal/kernels/steel/attn/transforms.h +71 -0
  604. data/mlx/mlx/backend/metal/kernels/steel/conv/conv.h +13 -0
  605. data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.h +176 -0
  606. data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.metal +56 -0
  607. data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.h +225 -0
  608. data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.metal +47 -0
  609. data/mlx/mlx/backend/metal/kernels/steel/conv/loader.h +6 -0
  610. data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_l.h +451 -0
  611. data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_n.h +319 -0
  612. data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_general.h +381 -0
  613. data/mlx/mlx/backend/metal/kernels/steel/conv/params.h +62 -0
  614. data/mlx/mlx/backend/metal/kernels/steel/defines.h +7 -0
  615. data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm.h +295 -0
  616. data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm_nax.h +157 -0
  617. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.h +346 -0
  618. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.metal +34 -0
  619. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.h +219 -0
  620. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.metal +30 -0
  621. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.h +459 -0
  622. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.metal +59 -0
  623. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.h +143 -0
  624. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.metal +37 -0
  625. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.h +719 -0
  626. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.metal +76 -0
  627. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.h +266 -0
  628. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.metal +43 -0
  629. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.h +227 -0
  630. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.metal +76 -0
  631. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.h +152 -0
  632. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.metal +30 -0
  633. data/mlx/mlx/backend/metal/kernels/steel/gemm/loader.h +137 -0
  634. data/mlx/mlx/backend/metal/kernels/steel/gemm/mma.h +1146 -0
  635. data/mlx/mlx/backend/metal/kernels/steel/gemm/nax.h +1084 -0
  636. data/mlx/mlx/backend/metal/kernels/steel/gemm/params.h +65 -0
  637. data/mlx/mlx/backend/metal/kernels/steel/gemm/transforms.h +72 -0
  638. data/mlx/mlx/backend/metal/kernels/steel/utils/integral_constant.h +134 -0
  639. data/mlx/mlx/backend/metal/kernels/steel/utils/type_traits.h +55 -0
  640. data/mlx/mlx/backend/metal/kernels/steel/utils.h +42 -0
  641. data/mlx/mlx/backend/metal/kernels/ternary.h +145 -0
  642. data/mlx/mlx/backend/metal/kernels/ternary.metal +48 -0
  643. data/mlx/mlx/backend/metal/kernels/ternary_ops.h +10 -0
  644. data/mlx/mlx/backend/metal/kernels/unary.h +63 -0
  645. data/mlx/mlx/backend/metal/kernels/unary.metal +115 -0
  646. data/mlx/mlx/backend/metal/kernels/unary_ops.h +454 -0
  647. data/mlx/mlx/backend/metal/kernels/utils.h +445 -0
  648. data/mlx/mlx/backend/metal/kernels.h +375 -0
  649. data/mlx/mlx/backend/metal/logsumexp.cpp +95 -0
  650. data/mlx/mlx/backend/metal/make_compiled_preamble.sh +120 -0
  651. data/mlx/mlx/backend/metal/matmul.cpp +2572 -0
  652. data/mlx/mlx/backend/metal/matmul.h +144 -0
  653. data/mlx/mlx/backend/metal/metal.cpp +50 -0
  654. data/mlx/mlx/backend/metal/metal.h +25 -0
  655. data/mlx/mlx/backend/metal/no_metal.cpp +42 -0
  656. data/mlx/mlx/backend/metal/nojit_kernels.cpp +414 -0
  657. data/mlx/mlx/backend/metal/normalization.cpp +433 -0
  658. data/mlx/mlx/backend/metal/primitives.cpp +242 -0
  659. data/mlx/mlx/backend/metal/quantized.cpp +1651 -0
  660. data/mlx/mlx/backend/metal/reduce.cpp +1038 -0
  661. data/mlx/mlx/backend/metal/reduce.h +41 -0
  662. data/mlx/mlx/backend/metal/resident.cpp +100 -0
  663. data/mlx/mlx/backend/metal/resident.h +32 -0
  664. data/mlx/mlx/backend/metal/rope.cpp +165 -0
  665. data/mlx/mlx/backend/metal/scaled_dot_product_attention.cpp +798 -0
  666. data/mlx/mlx/backend/metal/scan.cpp +145 -0
  667. data/mlx/mlx/backend/metal/scan.h +17 -0
  668. data/mlx/mlx/backend/metal/slicing.cpp +99 -0
  669. data/mlx/mlx/backend/metal/softmax.cpp +87 -0
  670. data/mlx/mlx/backend/metal/sort.cpp +368 -0
  671. data/mlx/mlx/backend/metal/ternary.cpp +160 -0
  672. data/mlx/mlx/backend/metal/ternary.h +21 -0
  673. data/mlx/mlx/backend/metal/unary.cpp +161 -0
  674. data/mlx/mlx/backend/metal/unary.h +21 -0
  675. data/mlx/mlx/backend/metal/utils.cpp +77 -0
  676. data/mlx/mlx/backend/metal/utils.h +99 -0
  677. data/mlx/mlx/backend/no_cpu/CMakeLists.txt +7 -0
  678. data/mlx/mlx/backend/no_cpu/compiled.cpp +24 -0
  679. data/mlx/mlx/backend/no_cpu/device_info.cpp +22 -0
  680. data/mlx/mlx/backend/no_cpu/primitives.cpp +146 -0
  681. data/mlx/mlx/backend/no_gpu/CMakeLists.txt +8 -0
  682. data/mlx/mlx/backend/no_gpu/allocator.cpp +134 -0
  683. data/mlx/mlx/backend/no_gpu/apple_memory.h +16 -0
  684. data/mlx/mlx/backend/no_gpu/device_info.cpp +22 -0
  685. data/mlx/mlx/backend/no_gpu/eval.cpp +24 -0
  686. data/mlx/mlx/backend/no_gpu/event.cpp +53 -0
  687. data/mlx/mlx/backend/no_gpu/fence.cpp +54 -0
  688. data/mlx/mlx/backend/no_gpu/linux_memory.h +22 -0
  689. data/mlx/mlx/backend/no_gpu/primitives.cpp +185 -0
  690. data/mlx/mlx/compile.cpp +1243 -0
  691. data/mlx/mlx/compile.h +45 -0
  692. data/mlx/mlx/compile_impl.h +70 -0
  693. data/mlx/mlx/device.cpp +72 -0
  694. data/mlx/mlx/device.h +56 -0
  695. data/mlx/mlx/distributed/CMakeLists.txt +14 -0
  696. data/mlx/mlx/distributed/distributed.cpp +197 -0
  697. data/mlx/mlx/distributed/distributed.h +61 -0
  698. data/mlx/mlx/distributed/distributed_impl.h +59 -0
  699. data/mlx/mlx/distributed/jaccl/CMakeLists.txt +12 -0
  700. data/mlx/mlx/distributed/jaccl/jaccl.cpp +178 -0
  701. data/mlx/mlx/distributed/jaccl/jaccl.h +12 -0
  702. data/mlx/mlx/distributed/jaccl/mesh.cpp +451 -0
  703. data/mlx/mlx/distributed/jaccl/mesh.h +122 -0
  704. data/mlx/mlx/distributed/jaccl/no_jaccl.cpp +20 -0
  705. data/mlx/mlx/distributed/jaccl/ring.cpp +692 -0
  706. data/mlx/mlx/distributed/jaccl/ring.h +178 -0
  707. data/mlx/mlx/distributed/jaccl/utils.cpp +329 -0
  708. data/mlx/mlx/distributed/jaccl/utils.h +342 -0
  709. data/mlx/mlx/distributed/mpi/CMakeLists.txt +5 -0
  710. data/mlx/mlx/distributed/mpi/mpi.cpp +501 -0
  711. data/mlx/mlx/distributed/mpi/mpi.h +12 -0
  712. data/mlx/mlx/distributed/mpi/mpi_declarations.h +28 -0
  713. data/mlx/mlx/distributed/mpi/no_mpi.cpp +20 -0
  714. data/mlx/mlx/distributed/nccl/CMakeLists.txt +26 -0
  715. data/mlx/mlx/distributed/nccl/nccl.cpp +443 -0
  716. data/mlx/mlx/distributed/nccl/nccl.h +12 -0
  717. data/mlx/mlx/distributed/nccl/nccl_stub/CMakeLists.txt +1 -0
  718. data/mlx/mlx/distributed/nccl/nccl_stub/nccl_stubs.cpp +54 -0
  719. data/mlx/mlx/distributed/nccl/no_nccl.cpp +20 -0
  720. data/mlx/mlx/distributed/ops.cpp +186 -0
  721. data/mlx/mlx/distributed/ops.h +57 -0
  722. data/mlx/mlx/distributed/primitives.cpp +95 -0
  723. data/mlx/mlx/distributed/primitives.h +156 -0
  724. data/mlx/mlx/distributed/reduction_ops.h +38 -0
  725. data/mlx/mlx/distributed/ring/CMakeLists.txt +5 -0
  726. data/mlx/mlx/distributed/ring/no_ring.cpp +20 -0
  727. data/mlx/mlx/distributed/ring/ring.cpp +870 -0
  728. data/mlx/mlx/distributed/ring/ring.h +12 -0
  729. data/mlx/mlx/distributed/utils.cpp +206 -0
  730. data/mlx/mlx/distributed/utils.h +67 -0
  731. data/mlx/mlx/dtype.cpp +197 -0
  732. data/mlx/mlx/dtype.h +116 -0
  733. data/mlx/mlx/dtype_utils.cpp +42 -0
  734. data/mlx/mlx/dtype_utils.h +119 -0
  735. data/mlx/mlx/einsum.cpp +941 -0
  736. data/mlx/mlx/einsum.h +23 -0
  737. data/mlx/mlx/event.h +58 -0
  738. data/mlx/mlx/export.cpp +1130 -0
  739. data/mlx/mlx/export.h +137 -0
  740. data/mlx/mlx/export_impl.h +99 -0
  741. data/mlx/mlx/fast.cpp +941 -0
  742. data/mlx/mlx/fast.h +103 -0
  743. data/mlx/mlx/fast_primitives.h +427 -0
  744. data/mlx/mlx/fence.h +39 -0
  745. data/mlx/mlx/fft.cpp +262 -0
  746. data/mlx/mlx/fft.h +159 -0
  747. data/mlx/mlx/graph_utils.cpp +175 -0
  748. data/mlx/mlx/graph_utils.h +67 -0
  749. data/mlx/mlx/io/CMakeLists.txt +25 -0
  750. data/mlx/mlx/io/gguf.cpp +470 -0
  751. data/mlx/mlx/io/gguf.h +20 -0
  752. data/mlx/mlx/io/gguf_quants.cpp +164 -0
  753. data/mlx/mlx/io/load.cpp +397 -0
  754. data/mlx/mlx/io/load.h +175 -0
  755. data/mlx/mlx/io/no_gguf.cpp +20 -0
  756. data/mlx/mlx/io/no_safetensors.cpp +37 -0
  757. data/mlx/mlx/io/safetensors.cpp +234 -0
  758. data/mlx/mlx/io.h +61 -0
  759. data/mlx/mlx/linalg.cpp +708 -0
  760. data/mlx/mlx/linalg.h +115 -0
  761. data/mlx/mlx/memory.h +80 -0
  762. data/mlx/mlx/mlx.h +25 -0
  763. data/mlx/mlx/ops.cpp +6094 -0
  764. data/mlx/mlx/ops.h +1610 -0
  765. data/mlx/mlx/primitives.cpp +5850 -0
  766. data/mlx/mlx/primitives.h +2525 -0
  767. data/mlx/mlx/random.cpp +492 -0
  768. data/mlx/mlx/random.h +283 -0
  769. data/mlx/mlx/scheduler.cpp +73 -0
  770. data/mlx/mlx/scheduler.h +189 -0
  771. data/mlx/mlx/small_vector.h +540 -0
  772. data/mlx/mlx/stream.h +42 -0
  773. data/mlx/mlx/threadpool.h +133 -0
  774. data/mlx/mlx/transforms.cpp +1065 -0
  775. data/mlx/mlx/transforms.h +231 -0
  776. data/mlx/mlx/transforms_impl.h +88 -0
  777. data/mlx/mlx/types/bf16.h +187 -0
  778. data/mlx/mlx/types/complex.h +113 -0
  779. data/mlx/mlx/types/fp16.h +234 -0
  780. data/mlx/mlx/types/half_types.h +58 -0
  781. data/mlx/mlx/types/limits.h +70 -0
  782. data/mlx/mlx/utils.cpp +302 -0
  783. data/mlx/mlx/utils.h +174 -0
  784. data/mlx/mlx/version.cpp +11 -0
  785. data/mlx/mlx/version.h +22 -0
  786. data/mlx/mlx.pc.in +52 -0
  787. data/mlx/pyproject.toml +7 -0
  788. data/mlx/python/mlx/__main__.py +27 -0
  789. data/mlx/python/mlx/_distributed_utils/common.py +135 -0
  790. data/mlx/python/mlx/_distributed_utils/config.py +631 -0
  791. data/mlx/python/mlx/_distributed_utils/launch.py +570 -0
  792. data/mlx/python/mlx/_reprlib_fix.py +16 -0
  793. data/mlx/python/mlx/_stub_patterns.txt +36 -0
  794. data/mlx/python/mlx/extension.py +88 -0
  795. data/mlx/python/mlx/nn/__init__.py +5 -0
  796. data/mlx/python/mlx/nn/init.py +441 -0
  797. data/mlx/python/mlx/nn/layers/__init__.py +105 -0
  798. data/mlx/python/mlx/nn/layers/activations.py +661 -0
  799. data/mlx/python/mlx/nn/layers/base.py +675 -0
  800. data/mlx/python/mlx/nn/layers/containers.py +24 -0
  801. data/mlx/python/mlx/nn/layers/convolution.py +232 -0
  802. data/mlx/python/mlx/nn/layers/convolution_transpose.py +242 -0
  803. data/mlx/python/mlx/nn/layers/distributed.py +601 -0
  804. data/mlx/python/mlx/nn/layers/dropout.py +137 -0
  805. data/mlx/python/mlx/nn/layers/embedding.py +53 -0
  806. data/mlx/python/mlx/nn/layers/linear.py +180 -0
  807. data/mlx/python/mlx/nn/layers/normalization.py +363 -0
  808. data/mlx/python/mlx/nn/layers/pooling.py +398 -0
  809. data/mlx/python/mlx/nn/layers/positional_encoding.py +162 -0
  810. data/mlx/python/mlx/nn/layers/quantized.py +426 -0
  811. data/mlx/python/mlx/nn/layers/recurrent.py +289 -0
  812. data/mlx/python/mlx/nn/layers/transformer.py +354 -0
  813. data/mlx/python/mlx/nn/layers/upsample.py +277 -0
  814. data/mlx/python/mlx/nn/losses.py +610 -0
  815. data/mlx/python/mlx/nn/utils.py +165 -0
  816. data/mlx/python/mlx/optimizers/__init__.py +4 -0
  817. data/mlx/python/mlx/optimizers/optimizers.py +976 -0
  818. data/mlx/python/mlx/optimizers/schedulers.py +158 -0
  819. data/mlx/python/mlx/py.typed +1 -0
  820. data/mlx/python/mlx/utils.py +325 -0
  821. data/mlx/python/src/CMakeLists.txt +96 -0
  822. data/mlx/python/src/array.cpp +1525 -0
  823. data/mlx/python/src/buffer.h +124 -0
  824. data/mlx/python/src/constants.cpp +15 -0
  825. data/mlx/python/src/convert.cpp +504 -0
  826. data/mlx/python/src/convert.h +50 -0
  827. data/mlx/python/src/cuda.cpp +19 -0
  828. data/mlx/python/src/device.cpp +98 -0
  829. data/mlx/python/src/distributed.cpp +352 -0
  830. data/mlx/python/src/export.cpp +356 -0
  831. data/mlx/python/src/fast.cpp +627 -0
  832. data/mlx/python/src/fft.cpp +514 -0
  833. data/mlx/python/src/indexing.cpp +1016 -0
  834. data/mlx/python/src/indexing.h +41 -0
  835. data/mlx/python/src/linalg.cpp +663 -0
  836. data/mlx/python/src/load.cpp +531 -0
  837. data/mlx/python/src/load.h +51 -0
  838. data/mlx/python/src/memory.cpp +125 -0
  839. data/mlx/python/src/metal.cpp +98 -0
  840. data/mlx/python/src/mlx.cpp +51 -0
  841. data/mlx/python/src/mlx_func.cpp +116 -0
  842. data/mlx/python/src/mlx_func.h +31 -0
  843. data/mlx/python/src/ops.cpp +5545 -0
  844. data/mlx/python/src/random.cpp +516 -0
  845. data/mlx/python/src/small_vector.h +76 -0
  846. data/mlx/python/src/stream.cpp +147 -0
  847. data/mlx/python/src/transforms.cpp +1542 -0
  848. data/mlx/python/src/trees.cpp +311 -0
  849. data/mlx/python/src/trees.h +62 -0
  850. data/mlx/python/src/utils.cpp +98 -0
  851. data/mlx/python/src/utils.h +78 -0
  852. data/mlx/python/tests/__main__.py +5 -0
  853. data/mlx/python/tests/cuda_skip.py +62 -0
  854. data/mlx/python/tests/mlx_distributed_tests.py +314 -0
  855. data/mlx/python/tests/mlx_tests.py +116 -0
  856. data/mlx/python/tests/mpi_test_distributed.py +142 -0
  857. data/mlx/python/tests/nccl_test_distributed.py +52 -0
  858. data/mlx/python/tests/ring_test_distributed.py +131 -0
  859. data/mlx/python/tests/test_array.py +2139 -0
  860. data/mlx/python/tests/test_autograd.py +880 -0
  861. data/mlx/python/tests/test_bf16.py +196 -0
  862. data/mlx/python/tests/test_blas.py +1429 -0
  863. data/mlx/python/tests/test_compile.py +1277 -0
  864. data/mlx/python/tests/test_constants.py +41 -0
  865. data/mlx/python/tests/test_conv.py +1198 -0
  866. data/mlx/python/tests/test_conv_transpose.py +810 -0
  867. data/mlx/python/tests/test_device.py +150 -0
  868. data/mlx/python/tests/test_double.py +306 -0
  869. data/mlx/python/tests/test_einsum.py +363 -0
  870. data/mlx/python/tests/test_eval.py +200 -0
  871. data/mlx/python/tests/test_export_import.py +614 -0
  872. data/mlx/python/tests/test_fast.py +923 -0
  873. data/mlx/python/tests/test_fast_sdpa.py +647 -0
  874. data/mlx/python/tests/test_fft.py +323 -0
  875. data/mlx/python/tests/test_graph.py +37 -0
  876. data/mlx/python/tests/test_init.py +139 -0
  877. data/mlx/python/tests/test_linalg.py +621 -0
  878. data/mlx/python/tests/test_load.py +447 -0
  879. data/mlx/python/tests/test_losses.py +427 -0
  880. data/mlx/python/tests/test_memory.py +77 -0
  881. data/mlx/python/tests/test_nn.py +1986 -0
  882. data/mlx/python/tests/test_ops.py +3261 -0
  883. data/mlx/python/tests/test_optimizers.py +584 -0
  884. data/mlx/python/tests/test_quantized.py +1160 -0
  885. data/mlx/python/tests/test_random.py +392 -0
  886. data/mlx/python/tests/test_reduce.py +223 -0
  887. data/mlx/python/tests/test_tree.py +96 -0
  888. data/mlx/python/tests/test_upsample.py +100 -0
  889. data/mlx/python/tests/test_vmap.py +860 -0
  890. data/mlx/setup.py +315 -0
  891. data/mlx/tests/CMakeLists.txt +44 -0
  892. data/mlx/tests/allocator_tests.cpp +41 -0
  893. data/mlx/tests/arg_reduce_tests.cpp +204 -0
  894. data/mlx/tests/array_tests.cpp +663 -0
  895. data/mlx/tests/autograd_tests.cpp +1399 -0
  896. data/mlx/tests/blas_tests.cpp +110 -0
  897. data/mlx/tests/compile_tests.cpp +818 -0
  898. data/mlx/tests/creations_tests.cpp +239 -0
  899. data/mlx/tests/custom_vjp_tests.cpp +55 -0
  900. data/mlx/tests/device_tests.cpp +35 -0
  901. data/mlx/tests/einsum_tests.cpp +85 -0
  902. data/mlx/tests/eval_tests.cpp +93 -0
  903. data/mlx/tests/export_import_tests.cpp +164 -0
  904. data/mlx/tests/fft_tests.cpp +366 -0
  905. data/mlx/tests/gpu_tests.cpp +523 -0
  906. data/mlx/tests/linalg_tests.cpp +639 -0
  907. data/mlx/tests/load_tests.cpp +270 -0
  908. data/mlx/tests/ops_tests.cpp +4159 -0
  909. data/mlx/tests/random_tests.cpp +716 -0
  910. data/mlx/tests/scheduler_tests.cpp +121 -0
  911. data/mlx/tests/tests.cpp +26 -0
  912. data/mlx/tests/utils_tests.cpp +67 -0
  913. data/mlx/tests/vmap_tests.cpp +547 -0
  914. metadata +958 -0
@@ -0,0 +1,1160 @@
1
+ # Copyright © 2023 Apple Inc.
2
+
3
+ import unittest
4
+ from itertools import product
5
+
6
+ import mlx.core as mx
7
+ import mlx_tests
8
+
9
+
10
+ class TestQuantized(mlx_tests.MLXTestCase):
11
+ def test_quantize_dequantize(self):
12
+ w = mx.random.normal(shape=(128, 512))
13
+ for gs in [32, 64, 128]:
14
+ for b in [2, 3, 5, 6, 4, 8]:
15
+ with self.subTest(gs=gs, b=b):
16
+ w_q, scales, biases = mx.quantize(w, group_size=gs, bits=b)
17
+ w_hat = mx.dequantize(w_q, scales, biases, gs, b)
18
+ errors = (w - w_hat).abs().reshape(*scales.shape, -1)
19
+ eps = 1e-6
20
+ self.assertTrue((errors <= (scales[..., None] + eps).abs()).all())
21
+
22
+ # test quantize/dequantize 0s
23
+ a = mx.zeros((256, 512))
24
+ for gs in [32, 64, 128]:
25
+ for b in [2, 3, 4, 5, 6, 8]:
26
+ w_q, scales, biases = mx.quantize(a, gs, b)
27
+ a_hat = mx.dequantize(w_q, scales, biases, gs, b)
28
+ self.assertTrue(mx.all(a_hat == 0))
29
+
30
+ def test_mxfp4_quantize_dequantize(self):
31
+ lut = mx.array(
32
+ [
33
+ +0.0,
34
+ +0.5,
35
+ +1.0,
36
+ +1.5,
37
+ +2.0,
38
+ +3.0,
39
+ +4.0,
40
+ +6.0,
41
+ -0.0,
42
+ -0.5,
43
+ -1.0,
44
+ -1.5,
45
+ -2.0,
46
+ -3.0,
47
+ -4.0,
48
+ -6.0,
49
+ ]
50
+ )
51
+ w = lut[mx.random.randint(0, 16, shape=(128, 512))]
52
+ w = w.reshape(-1, 32)
53
+ w[:, 0] = 6
54
+ w = (w + 3e-6).astype(mx.bfloat16)
55
+
56
+ # Invalid bits / group size
57
+ with self.assertRaises(ValueError):
58
+ mx.quantize(w, bits=3, mode="mxfp4")
59
+
60
+ with self.assertRaises(ValueError):
61
+ mx.quantize(w, group_size=64, mode="mxfp4")
62
+
63
+ w_q, scales = mx.quantize(w, mode="mxfp4")
64
+ with self.assertRaises(ValueError):
65
+ mx.dequantize(w_q, scales, bits=3, mode="mxfp4")
66
+
67
+ with self.assertRaises(ValueError):
68
+ mx.dequantize(w_q, scales, group_size=64, mode="mxfp4")
69
+
70
+ # Invalid output type
71
+ with self.assertRaises(ValueError):
72
+ mx.dequantize(
73
+ w_q, scales, group_size=32, bits=4, mode="mxfp4", dtype=mx.int32
74
+ )
75
+
76
+ w_hat = mx.dequantize(w_q, scales, mode="mxfp4")
77
+ self.assertTrue(mx.allclose(w, w_hat, rtol=1e-5, atol=1e-5))
78
+
79
+ # test quantize/dequantize 0s
80
+ a = mx.zeros((256, 512))
81
+ w_q, scales = mx.quantize(a, mode="mxfp4")
82
+ w_hat = mx.dequantize(w_q, scales, mode="mxfp4")
83
+ self.assertTrue(mx.all(w_hat == 0))
84
+
85
+ def test_mxfp8_quantize_dequantize(self):
86
+ w = 2 * mx.random.uniform(shape=(512, 32)) - 1
87
+ w = w.astype(mx.bfloat16)
88
+
89
+ # Invalid bits / group size
90
+ with self.assertRaises(ValueError):
91
+ mx.quantize(w, bits=3, mode="mxfp8")
92
+
93
+ with self.assertRaises(ValueError):
94
+ mx.quantize(w, group_size=32, bits=7, mode="mxfp8")
95
+ w_q, scales = mx.quantize(w, group_size=32, mode="mxfp8")
96
+
97
+ with self.assertRaises(ValueError):
98
+ mx.dequantize(w_q, scales, group_size=16, mode="mxfp8")
99
+
100
+ with self.assertRaises(ValueError):
101
+ mx.dequantize(w_q, scales, bits=4, mode="mxfp8")
102
+
103
+ w_hat = mx.dequantize(w_q, scales, mode="mxfp8")
104
+
105
+ self.assertTrue(mx.allclose(w, w_hat, rtol=1e-1, atol=1e-1))
106
+
107
+ # test quantize/dequantize 0s
108
+ a = mx.zeros((256, 512))
109
+ w_q, scales = mx.quantize(a, mode="mxfp8")
110
+ w_hat = mx.dequantize(w_q, scales, mode="mxfp8")
111
+ self.assertTrue(mx.all(w_hat == 0))
112
+
113
+ def test_nvfp4_quantize_dequantize(self):
114
+ lut = mx.array(
115
+ [
116
+ +0.0,
117
+ +0.5,
118
+ +1.0,
119
+ +1.5,
120
+ +2.0,
121
+ +3.0,
122
+ +4.0,
123
+ +6.0,
124
+ -0.0,
125
+ -0.5,
126
+ -1.0,
127
+ -1.5,
128
+ -2.0,
129
+ -3.0,
130
+ -4.0,
131
+ -6.0,
132
+ ]
133
+ )
134
+ w = lut[mx.random.randint(0, 16, shape=(128, 512))]
135
+ w = w.reshape(-1, 16)
136
+ w[:, 0] = 6
137
+ w = (w + 3e-6).astype(mx.bfloat16)
138
+
139
+ # Invalid bits / group size
140
+ with self.assertRaises(ValueError):
141
+ mx.quantize(w, bits=3, mode="nvfp4")
142
+
143
+ with self.assertRaises(ValueError):
144
+ mx.quantize(w, group_size=64, mode="nvfp4")
145
+
146
+ w_q, scales = mx.quantize(w, mode="nvfp4")
147
+
148
+ with self.assertRaises(ValueError):
149
+ mx.dequantize(w_q, scales, bits=3, mode="nvfp4")
150
+
151
+ with self.assertRaises(ValueError):
152
+ mx.dequantize(w_q, scales, group_size=32, mode="nvfp4")
153
+
154
+ w_hat = mx.dequantize(w_q, scales, mode="nvfp4")
155
+ self.assertTrue(mx.allclose(w, w_hat, rtol=1e-5, atol=1e-5))
156
+
157
+ # test quantize/dequantize 0s
158
+ a = mx.zeros((256, 512))
159
+ w_q, scales = mx.quantize(a, mode="nvfp4")
160
+ w_hat = mx.dequantize(w_q, scales, mode="nvfp4")
161
+ self.assertTrue(mx.all(w_hat == 0))
162
+
163
+ def test_qqmv(self):
164
+ key = mx.random.key(0)
165
+ k1, k2 = mx.random.split(key)
166
+ tests = product(
167
+ [256, 512, 67], # M
168
+ [64, 256], # N
169
+ )
170
+ modes = ["nvfp4", "mxfp8"]
171
+ for M, N in tests:
172
+ for mode in modes:
173
+ with self.subTest(shape=(M, N), mode=mode):
174
+ x_shape = (1, N)
175
+ w_shape = (M, N)
176
+
177
+ x = mx.random.normal(shape=x_shape, key=k1)
178
+ x_hat = mx.dequantize(
179
+ *mx.quantize(x, mode=mode), mode=mode, dtype=mx.float32
180
+ )
181
+
182
+ w = mx.random.normal(shape=w_shape, key=k2)
183
+ w_q, scales = mx.quantize(w, mode=mode)
184
+ w_hat = mx.dequantize(w_q, scales, mode=mode, dtype=mx.float32)
185
+ y_q = mx.qqmm(
186
+ x,
187
+ w_q,
188
+ scales,
189
+ mode=mode,
190
+ )
191
+ y_hat = x_hat @ mx.swapaxes(w_hat, -1, -2)
192
+ self.assertEqual(y_q.shape, y_hat.shape)
193
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
194
+
195
+ def test_qmm(self):
196
+ key = mx.random.key(0)
197
+ k1, k2 = mx.random.split(key)
198
+ dtype = mx.float16 if (mx.default_device() == mx.gpu) else mx.float32
199
+ tests = product(
200
+ [128, 64, 32], # group_size
201
+ [2, 4, 8], # bits
202
+ [8, 32, 33, 64], # M
203
+ [128, 256], # N
204
+ [128, 256], # K
205
+ [True, False], # transposed
206
+ )
207
+ for group_size, bits, M, N, K, transposed in tests:
208
+ with self.subTest(
209
+ shape=(M, N, K),
210
+ group_size=group_size,
211
+ bits=bits,
212
+ transposed=transposed,
213
+ ):
214
+ x = mx.random.normal(shape=(M, K), key=k1) / K**0.5
215
+ w = (
216
+ mx.random.normal(shape=(N, K) if transposed else (K, N), key=k2)
217
+ / K**0.5
218
+ )
219
+ x = x.astype(dtype)
220
+ w = w.astype(dtype)
221
+ w_q, scales, biases = mx.quantize(w, group_size, bits)
222
+ w_hat = mx.dequantize(w_q, scales, biases, group_size, bits)
223
+ y_q = mx.quantized_matmul(
224
+ x, w_q, scales, biases, transposed, group_size, bits
225
+ )
226
+ y_hat = (x @ w_hat.T) if transposed else (x @ w_hat)
227
+ self.assertEqual(y_q.shape, y_hat.shape)
228
+
229
+ tol = 1e-3 if dtype == mx.float32 else 1.5e-3
230
+ self.assertLess((y_q - y_hat).abs().max(), tol)
231
+
232
+ def test_qmm_vjp(self):
233
+ key = mx.random.key(0)
234
+ k1, k2 = mx.random.split(key)
235
+
236
+ bits = 8
237
+ group_size = 64
238
+ M = 64
239
+ N = 1024
240
+ K = 512
241
+
242
+ x = mx.random.normal(shape=(2, M, K), key=k1)
243
+ c = mx.ones(shape=(2, M, N))
244
+
245
+ transposes = [True, False]
246
+ for transposed in transposes:
247
+ w = mx.random.normal(shape=(N, K) if transposed else (K, N), key=k2)
248
+ w_q, scales, biases = mx.quantize(w, group_size, bits)
249
+
250
+ def fn(x):
251
+ return mx.quantized_matmul(
252
+ x, w_q, scales, biases, transposed, group_size, bits
253
+ )
254
+
255
+ _, vjp_out = mx.vjp(fn, primals=(x,), cotangents=(c,))
256
+
257
+ expected_out = mx.quantized_matmul(
258
+ c, w_q, scales, biases, not transposed, group_size, bits
259
+ )
260
+ self.assertTrue(mx.allclose(vjp_out[0], expected_out))
261
+
262
+ def test_qmm_jvp(self):
263
+ key = mx.random.key(0)
264
+ k1, k2 = mx.random.split(key)
265
+
266
+ bits = 8
267
+ group_size = 64
268
+ M = 64
269
+ N = 128
270
+ K = 128
271
+
272
+ x = mx.random.normal(shape=(2, M, K), key=k1)
273
+ x_tan = mx.ones(shape=(2, M, N))
274
+
275
+ transposes = [True, False]
276
+ for transposed in transposes:
277
+ w = mx.random.normal(shape=(N, K) if transposed else (K, N), key=k2)
278
+ w_q, scales, biases = mx.quantize(w, group_size, bits)
279
+
280
+ def fn(x):
281
+ return mx.quantized_matmul(
282
+ x, w_q, scales, biases, transposed, group_size, bits
283
+ )
284
+
285
+ _, jvp_out = mx.jvp(fn, primals=(x,), tangents=(x_tan,))
286
+
287
+ expected_out = mx.quantized_matmul(
288
+ x_tan, w_q, scales, biases, transposed, group_size, bits
289
+ )
290
+ self.assertTrue(mx.allclose(jvp_out[0], expected_out))
291
+
292
+ def test_qmm_shapes(self):
293
+ key = mx.random.key(0)
294
+ k1, k2 = mx.random.split(key)
295
+ group_size = 64
296
+ bits = 4
297
+ w = mx.random.normal(shape=(32, 256), key=k2)
298
+ w_q, scales, biases = mx.quantize(w, group_size, bits)
299
+ w_hat = mx.dequantize(w_q, scales, biases, group_size, bits)
300
+ for s in [(3, 256), (2, 1, 7, 256)]:
301
+ x = mx.random.normal(shape=s, key=k1)
302
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, True, group_size, bits)
303
+ y_hat = x @ w_hat.T
304
+ self.assertEqual(y_q.shape, y_hat.shape)
305
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
306
+
307
+ w = mx.random.normal(shape=(256, 256), key=k2)
308
+ w_q, scales, biases = mx.quantize(w, group_size, bits)
309
+ w_hat = mx.dequantize(w_q, scales, biases, group_size, bits)
310
+ for s in [(3, 256), (2, 1, 7, 256)]:
311
+ x = mx.random.normal(shape=s, key=k1)
312
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, False, group_size, bits)
313
+ y_hat = x @ w_hat
314
+ self.assertEqual(y_q.shape, y_hat.shape)
315
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
316
+
317
+ def test_qmv(self):
318
+ key = mx.random.key(0)
319
+ k1, k2 = mx.random.split(key)
320
+ tests = product(
321
+ [128, 64, 32], # group_size
322
+ [2, 3, 4, 5, 6, 8], # bits
323
+ [256, 512, 67], # M
324
+ [64, 256], # N
325
+ [0, 1, 3, 8], # B
326
+ )
327
+ for group_size, bits, M, N, B in tests:
328
+ if group_size > N:
329
+ continue
330
+ with self.subTest(shape=(B, M, N), group_size=group_size, bits=bits):
331
+ x_shape = (3, 1, N) if B == 0 else (B, 1, N)
332
+ w_shape = (M, N) if B == 0 else (B, M, N)
333
+ x = mx.random.normal(shape=x_shape, key=k1)
334
+ w = mx.random.normal(shape=w_shape, key=k2)
335
+ w_q, scales, biases = mx.quantize(w, group_size, bits)
336
+ w_hat = mx.dequantize(w_q, scales, biases, group_size, bits)
337
+ y_q = mx.quantized_matmul(
338
+ x, w_q, scales, biases, True, group_size, bits
339
+ )
340
+ y_hat = x @ mx.swapaxes(w_hat, -1, -2)
341
+ self.assertEqual(y_q.shape, y_hat.shape)
342
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
343
+
344
+ def test_fp_qmv(self):
345
+ key = mx.random.key(0)
346
+ k1, k2 = mx.random.split(key)
347
+ tests = product(
348
+ [256, 512, 67], # M
349
+ [64, 256], # N
350
+ [0, 1, 3, 8], # B
351
+ )
352
+ modes = ["mxfp4", "nvfp4", "mxfp8"]
353
+ for M, N, B in tests:
354
+ for mode in modes:
355
+ with self.subTest(shape=(B, M, N), mode=mode):
356
+ x_shape = (3, 1, N) if B == 0 else (B, 1, N)
357
+ w_shape = (M, N) if B == 0 else (B, M, N)
358
+ x = mx.random.normal(shape=x_shape, key=k1)
359
+ w = mx.random.normal(shape=w_shape, key=k2)
360
+ w_q, scales = mx.quantize(w, mode=mode)
361
+ w_hat = mx.dequantize(w_q, scales, mode=mode)
362
+ y_q = mx.quantized_matmul(
363
+ x,
364
+ w_q,
365
+ scales,
366
+ transpose=True,
367
+ mode=mode,
368
+ )
369
+ y_hat = x @ mx.swapaxes(w_hat, -1, -2)
370
+ self.assertEqual(y_q.shape, y_hat.shape)
371
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
372
+
373
+ # Test multiple of 16 but not 32
374
+ M = 128
375
+ N = 48
376
+ mode = "nvfp4"
377
+ with self.subTest(shape=(B, M, N), mode=mode):
378
+ x_shape = (1, N)
379
+ w_shape = (M, N)
380
+ x = mx.random.normal(shape=x_shape, key=k1)
381
+ w = mx.random.normal(shape=w_shape, key=k2)
382
+ w_q, scales = mx.quantize(w, mode=mode)
383
+ w_hat = mx.dequantize(w_q, scales, mode=mode)
384
+ y_q = mx.quantized_matmul(
385
+ x,
386
+ w_q,
387
+ scales,
388
+ transpose=True,
389
+ mode=mode,
390
+ )
391
+ y_hat = x @ mx.swapaxes(w_hat, -1, -2)
392
+ self.assertEqual(y_q.shape, y_hat.shape)
393
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
394
+
395
+ def test_qvm(self):
396
+ key = mx.random.key(0)
397
+ k1, k2 = mx.random.split(key)
398
+ tests = product(
399
+ [128, 64, 32], # group_size
400
+ [2, 3, 4, 5, 6, 8], # bits
401
+ [32, 128, 256], # M
402
+ [128, 256, 67], # N
403
+ [0, 1, 3, 8], # B
404
+ )
405
+ for group_size, bits, M, N, B in tests:
406
+ with self.subTest(shape=(B, M, N), group_size=group_size, bits=bits):
407
+ if M < group_size:
408
+ continue
409
+ x_shape = (1, N) if B == 0 else (B, 1, N)
410
+ w_shape = (N, M) if B == 0 else (B, N, M)
411
+ x = mx.random.normal(shape=x_shape, key=k1)
412
+ w = mx.random.normal(shape=w_shape, key=k2)
413
+ w_q, scales, biases = mx.quantize(w, group_size, bits)
414
+ w_hat = mx.dequantize(w_q, scales, biases, group_size, bits)
415
+ y_q = mx.quantized_matmul(
416
+ x, w_q, scales, biases, False, group_size, bits
417
+ )
418
+ y_hat = x @ w_hat
419
+ self.assertEqual(y_q.shape, y_hat.shape)
420
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
421
+
422
+ def test_qvm_splitk(self):
423
+ key = mx.random.key(0)
424
+ k1, k2 = mx.random.split(key)
425
+ tests = product(
426
+ [128, 64, 32], # group_size
427
+ [2, 4, 8], # bits
428
+ [128], # M
429
+ [16384], # N
430
+ [1, 3], # B
431
+ )
432
+ for group_size, bits, M, N, B in tests:
433
+ with self.subTest(shape=(B, M, N), group_size=group_size, bits=bits):
434
+ x_shape = (1, N) if B == 0 else (B, 1, N)
435
+ w_shape = (N, M) if B == 0 else (B, N, M)
436
+ x = 1e-1 * mx.random.normal(shape=x_shape, key=k1)
437
+ w = 1e-1 * mx.random.normal(shape=w_shape, key=k2)
438
+ w_q, scales, biases = mx.quantize(w, group_size, bits)
439
+ w_hat = mx.dequantize(w_q, scales, biases, group_size, bits)
440
+ y_q = mx.quantized_matmul(
441
+ x, w_q, scales, biases, False, group_size, bits
442
+ )
443
+ y_hat = x @ w_hat
444
+ self.assertEqual(y_q.shape, y_hat.shape)
445
+ self.assertLess((y_q - y_hat).abs().max(), 2e-3)
446
+
447
+ # Test with 1D vector
448
+ group_size = 32
449
+ bits = 8
450
+ N = 2048
451
+ x = 1e-1 * mx.random.normal(shape=(N,), key=k1)
452
+ w = 1e-1 * mx.random.normal(shape=(N, N), key=k2)
453
+ w_q, scales, biases = mx.quantize(w, group_size, bits)
454
+ w_hat = mx.dequantize(w_q, scales, biases, group_size, bits)
455
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, False, group_size, bits)
456
+ y_hat = x @ w_hat
457
+ self.assertEqual(y_q.shape, y_hat.shape)
458
+ self.assertLess((y_q - y_hat).abs().max(), 2e-3)
459
+
460
+ def test_fp_qvm(self):
461
+ key = mx.random.key(0)
462
+ k1, k2 = mx.random.split(key)
463
+ tests = product(
464
+ [32, 128, 256], # M
465
+ [128, 256, 67], # N
466
+ [0, 1, 3, 8], # B
467
+ )
468
+ # Add a splitk
469
+ tests = list(tests)
470
+ tests.append((128, 16384, 0))
471
+ modes = ["mxfp4", "nvfp4", "mxfp8"]
472
+
473
+ for M, N, B in tests:
474
+ for mode in modes:
475
+ with self.subTest(shape=(B, M, N), mode=mode):
476
+ x_shape = (1, N) if B == 0 else (B, 1, N)
477
+ w_shape = (N, M) if B == 0 else (B, N, M)
478
+ x = mx.random.normal(shape=x_shape, key=k1)
479
+ w = mx.random.normal(shape=w_shape, key=k2)
480
+ w_q, scales = mx.quantize(w, mode=mode)
481
+ w_hat = mx.dequantize(w_q, scales, mode=mode)
482
+ y_q = mx.quantized_matmul(
483
+ x,
484
+ w_q,
485
+ scales,
486
+ transpose=False,
487
+ mode=mode,
488
+ )
489
+ y_hat = x @ w_hat
490
+ self.assertEqual(y_q.shape, y_hat.shape)
491
+ self.assertLess((y_q - y_hat).abs().max(), 2e-3)
492
+
493
+ def test_mode_error_cases(self):
494
+ w = mx.random.normal(shape=(256, 256))
495
+ x = mx.random.normal(shape=(1, 256))
496
+
497
+ # Invalid mode
498
+ with self.assertRaises(ValueError):
499
+ mx.quantize(w, mode="xyz")
500
+
501
+ wq, scales, biases = mx.quantize(w, bits=4, group_size=32)
502
+
503
+ with self.assertRaises(ValueError):
504
+ mx.dequantize(wq, scales, biases, bits=4, group_size=32, mode="xyz")
505
+
506
+ with self.assertRaises(ValueError):
507
+ mx.quantized_matmul(
508
+ x, wq, scales, biases, bits=4, group_size=32, mode="xyz"
509
+ )
510
+
511
+ rhs_indices = mx.array(0)
512
+ with self.assertRaises(ValueError):
513
+ mx.gather_qmm(
514
+ x,
515
+ wq,
516
+ scales,
517
+ biases,
518
+ rhs_indices=rhs_indices,
519
+ bits=4,
520
+ group_size=32,
521
+ mode="xyz",
522
+ )
523
+
524
+ # Only quantize floating point types
525
+ with self.assertRaises(ValueError):
526
+ mx.quantize(mx.zeros((128, 128), mx.int32))
527
+
528
+ with self.assertRaises(ValueError):
529
+ mx.quantize(mx.zeros((128, 128), mx.int32), mode="mxfp4")
530
+
531
+ # Must have bias for affine
532
+ with self.assertRaises(ValueError):
533
+ mx.dequantize(wq, scales, None, bits=4, group_size=32)
534
+
535
+ with self.assertRaises(ValueError):
536
+ mx.quantized_matmul(x, wq, scales, None, bits=4, group_size=32)
537
+
538
+ with self.assertRaises(ValueError):
539
+ mx.gather_qmm(
540
+ x, wq, scales, None, rhs_indices=rhs_indices, bits=4, group_size=32
541
+ )
542
+
543
+ # Must be floating point
544
+ x = mx.zeros(shape=(256,), dtype=mx.int32)
545
+ scales = mx.zeros(scales.shape, dtype=mx.int32)
546
+ biases = mx.zeros(scales.shape, dtype=mx.int32)
547
+ with self.assertRaises(ValueError):
548
+ mx.dequantize(wq, scales, biases, bits=4, group_size=32)
549
+
550
+ with self.assertRaises(ValueError):
551
+ mx.quantized_matmul(x, wq, scales, biases, bits=4, group_size=32)
552
+
553
+ with self.assertRaises(ValueError):
554
+ mx.gather_qmm(
555
+ x, wq, scales, biases, rhs_indices=rhs_indices, bits=4, group_size=32
556
+ )
557
+
558
+ def test_throw(self):
559
+ x = mx.random.normal(shape=(10, 512))
560
+ w = mx.random.normal(shape=(32, 512))
561
+ w_q, scales, biases = mx.quantize(w)
562
+
563
+ with self.assertRaises(ValueError):
564
+ mx.quantized_matmul(x, w_q.T, scales, biases)
565
+ with self.assertRaises(ValueError):
566
+ mx.quantized_matmul(x, w_q.T, scales.T, biases)
567
+ with self.assertRaises(ValueError):
568
+ mx.quantized_matmul(x, w_q, scales, biases, False)
569
+ with self.assertRaises(ValueError):
570
+ mx.quantized_matmul(x, w_q, scales.T, biases.T)
571
+ y = mx.quantized_matmul(x, w_q, scales, biases, True)
572
+ mx.eval(y)
573
+
574
+ def test_small_matrix(self):
575
+ for w_shape in [(8, 256), (1, 8, 256), (3, 8, 256)]:
576
+ with self.subTest(w_shape=w_shape):
577
+ w = mx.random.normal(shape=(w_shape))
578
+ w_q, scales, biases = mx.quantize(w)
579
+ w_hat = mx.dequantize(w_q, scales, biases)
580
+
581
+ # Test qmv
582
+ for shape in [(3, 1, 256), (3, 4, 256)]:
583
+ x = mx.random.normal(shape=shape)
584
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, transpose=True)
585
+ y_hat = x @ mx.swapaxes(w_hat, -1, -2)
586
+ self.assertEqual(y_q.shape, y_hat.shape)
587
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
588
+
589
+ # Test qmm_t
590
+ x = mx.random.normal(shape=(3, 10, 256))
591
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, transpose=True)
592
+ y_hat = x @ mx.swapaxes(w_hat, -1, -2)
593
+ self.assertEqual(y_q.shape, y_hat.shape)
594
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
595
+
596
+ # Test qvm
597
+ x = mx.random.normal(shape=(3, 1, 8))
598
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, transpose=False)
599
+ y_hat = x @ w_hat
600
+ self.assertEqual(y_q.shape, y_hat.shape)
601
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
602
+
603
+ # Test qmm
604
+ x = mx.random.normal(shape=(3, 10, 8))
605
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, transpose=False)
606
+ y_hat = x @ w_hat
607
+ self.assertEqual(y_q.shape, y_hat.shape)
608
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
609
+
610
+ def test_non_multiples(self):
611
+ w = mx.random.normal(shape=(33, 256))
612
+ w_q, scales, biases = mx.quantize(w)
613
+ w_hat = mx.dequantize(w_q, scales, biases)
614
+
615
+ # Test qmv
616
+ x = mx.random.normal(shape=(1, 256))
617
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, transpose=True)
618
+ y_hat = x @ w_hat.T
619
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
620
+
621
+ # Test qmm_t
622
+ x = mx.random.normal(shape=(10, 256))
623
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, transpose=True)
624
+ y_hat = x @ w_hat.T
625
+ self.assertEqual(y_q.shape, y_hat.shape)
626
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
627
+
628
+ # Test qvm
629
+ x = mx.random.normal(shape=(1, 33))
630
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, transpose=False)
631
+ y_hat = x @ w_hat
632
+ self.assertEqual(y_q.shape, y_hat.shape)
633
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
634
+
635
+ # Test qmm
636
+ x = mx.random.normal(shape=(10, 33))
637
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, transpose=False)
638
+ y_hat = x @ w_hat
639
+ self.assertEqual(y_q.shape, y_hat.shape)
640
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
641
+
642
+ # Smaller than 8
643
+ w = mx.random.normal(shape=(3, 256))
644
+ w_q, scales, biases = mx.quantize(w)
645
+ w_hat = mx.dequantize(w_q, scales, biases)
646
+
647
+ # Test qmv
648
+ x = mx.random.normal(shape=(1, 256))
649
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, transpose=True)
650
+ y_hat = x @ w_hat.T
651
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
652
+
653
+ # Test qmm_t
654
+ x = mx.random.normal(shape=(10, 256))
655
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, transpose=True)
656
+ y_hat = x @ w_hat.T
657
+ self.assertEqual(y_q.shape, y_hat.shape)
658
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
659
+
660
+ # Test qvm
661
+ x = mx.random.normal(shape=(1, 3))
662
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, transpose=False)
663
+ y_hat = x @ w_hat
664
+ self.assertEqual(y_q.shape, y_hat.shape)
665
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
666
+
667
+ # Test qmm
668
+ x = mx.random.normal(shape=(10, 3))
669
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, transpose=False)
670
+ y_hat = x @ w_hat
671
+ self.assertEqual(y_q.shape, y_hat.shape)
672
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
673
+
674
+ # Test with larger than 128 unaligned sizes
675
+ w = mx.random.normal(shape=(99, 256))
676
+ w_q, scales, biases = mx.quantize(w)
677
+ w_hat = mx.dequantize(w_q, scales, biases)
678
+ x = mx.random.normal(shape=(129, 256))
679
+ y_q = mx.quantized_matmul(x, w_q, scales, biases, transpose=True)
680
+ y_hat = x @ w_hat.T
681
+ self.assertEqual(y_q.shape, y_hat.shape)
682
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
683
+
684
+ def test_qmv_small_non_multiples(self):
685
+ # Test very small K and N dimensions (e.g., [MxK] x [NxK].T = [MxN])
686
+ # Each tuple is (M, K, N) representing input rows, weight cols, weight rows
687
+ test_cases = [
688
+ (1, 32, 3),
689
+ (2, 32, 10),
690
+ (1, 32, 5),
691
+ (4, 32, 7),
692
+ ]
693
+
694
+ # Test different quantization settings (bits, group_size, mode)
695
+ quantization_settings = [
696
+ (4, 32, "affine"),
697
+ (6, 32, "affine"),
698
+ (4, 16, "nvfp4"),
699
+ ]
700
+
701
+ for M, K, N in test_cases:
702
+ for bits, group_size, mode in quantization_settings:
703
+ # Test without batch dimension
704
+ with self.subTest(
705
+ M=M,
706
+ K=K,
707
+ N=N,
708
+ batch=None,
709
+ group_size=group_size,
710
+ bits=bits,
711
+ mode=mode,
712
+ ):
713
+ w = mx.random.normal(shape=(N, K))
714
+ w_q, *sb = mx.quantize(
715
+ w,
716
+ group_size=group_size,
717
+ bits=bits,
718
+ mode=mode,
719
+ )
720
+ w_hat = mx.dequantize(
721
+ w_q,
722
+ *sb,
723
+ group_size=group_size,
724
+ bits=bits,
725
+ mode=mode,
726
+ )
727
+
728
+ # Test qmv/qmm_t (transpose=True): [MxK] @ [NxK].T = [MxN]
729
+ x = mx.random.normal(shape=(M, K))
730
+ y_q = mx.quantized_matmul(
731
+ x,
732
+ w_q,
733
+ *sb,
734
+ transpose=True,
735
+ group_size=group_size,
736
+ bits=bits,
737
+ mode=mode,
738
+ )
739
+ y_hat = x @ mx.swapaxes(w_hat, -1, -2)
740
+ self.assertEqual(y_q.shape, y_hat.shape)
741
+ self.assertLess((y_q - y_hat).abs().max(), 1e-3)
742
+
743
+ def test_gather_qmm(self):
744
+ def quantize(w, transpose=True, group_size=None, bits=None, mode="affine"):
745
+ if mode == "affine":
746
+ qw, s, b = mx.quantize(w, group_size=group_size, bits=bits, mode=mode)
747
+ else:
748
+ qw, s = mx.quantize(w, group_size=group_size, bits=bits, mode=mode)
749
+ b = None
750
+ w_hat = mx.dequantize(qw, s, b, group_size=group_size, bits=bits, mode=mode)
751
+ if transpose:
752
+ w_hat = w_hat.swapaxes(-1, -2)
753
+ return w_hat, qw, s, b
754
+
755
+ def test_shape(
756
+ M,
757
+ N,
758
+ K,
759
+ dtype=mx.float32,
760
+ batch_A=(),
761
+ batch_B=(),
762
+ lhs_indices=None,
763
+ rhs_indices=None,
764
+ transpose=True,
765
+ group_size=None,
766
+ bits=None,
767
+ mode="affine",
768
+ ):
769
+ with self.subTest(
770
+ M=M,
771
+ N=N,
772
+ K=K,
773
+ dtype=dtype,
774
+ batch_A=batch_A,
775
+ batch_B=batch_B,
776
+ lhs_indices=lhs_indices,
777
+ rhs_indices=rhs_indices,
778
+ transpose=transpose,
779
+ group_size=group_size,
780
+ bits=bits,
781
+ mode=mode,
782
+ ):
783
+ x = mx.random.normal(shape=batch_A + (M, K)).astype(dtype)
784
+ w = mx.random.normal(
785
+ shape=batch_B + ((N, K) if transpose else (K, N))
786
+ ).astype(dtype)
787
+ w_hat, qw, s, b = quantize(w, transpose, group_size, bits, mode=mode)
788
+
789
+ if lhs_indices is not None:
790
+ lhs_indices = mx.array(lhs_indices)
791
+ if rhs_indices is not None:
792
+ rhs_indices = mx.array(rhs_indices)
793
+
794
+ c1 = mx.gather_mm(x, w_hat, lhs_indices, rhs_indices)
795
+ c2 = mx.gather_qmm(
796
+ x,
797
+ qw,
798
+ s,
799
+ b,
800
+ lhs_indices,
801
+ rhs_indices,
802
+ transpose=transpose,
803
+ group_size=group_size,
804
+ bits=bits,
805
+ mode=mode,
806
+ )
807
+ self.assertTrue(mx.allclose(c1, c2, atol=1e-4))
808
+
809
+ inputs = (
810
+ {
811
+ "batch_A": (1,),
812
+ "lhs_indices": (0,),
813
+ "batch_B": (3,),
814
+ "rhs_indices": (2, 1),
815
+ },
816
+ {
817
+ "batch_A": (1,),
818
+ "lhs_indices": None,
819
+ "batch_B": (3,),
820
+ "rhs_indices": (2, 1),
821
+ },
822
+ {
823
+ "batch_A": (2,),
824
+ "lhs_indices": None,
825
+ "batch_B": (3,),
826
+ "rhs_indices": (2, 1),
827
+ },
828
+ {
829
+ "batch_A": (3,),
830
+ "lhs_indices": (0, 2),
831
+ "batch_B": (1,),
832
+ "rhs_indices": (0,),
833
+ },
834
+ {
835
+ "batch_A": (5,),
836
+ "lhs_indices": (0, 2),
837
+ "batch_B": (3,),
838
+ "rhs_indices": (2, 1),
839
+ },
840
+ {
841
+ "batch_A": (4, 2),
842
+ "lhs_indices": (
843
+ (7, 6),
844
+ (5, 4),
845
+ (1, 2),
846
+ ),
847
+ "batch_B": (4, 1),
848
+ "rhs_indices": ((2,), (0,), (1,)),
849
+ },
850
+ {
851
+ "batch_A": (1,),
852
+ "lhs_indices": (0,),
853
+ "batch_B": (3,),
854
+ "rhs_indices": (2, 1),
855
+ "mode": "nvfp4",
856
+ },
857
+ {
858
+ "batch_A": (1,),
859
+ "lhs_indices": (0,),
860
+ "batch_B": (3,),
861
+ "rhs_indices": (2, 1),
862
+ "mode": "mxfp4",
863
+ },
864
+ {
865
+ "batch_A": (1,),
866
+ "lhs_indices": (0,),
867
+ "batch_B": (3,),
868
+ "rhs_indices": (2, 1),
869
+ "mode": "mxfp8",
870
+ },
871
+ )
872
+
873
+ for kwargs in inputs:
874
+ test_shape(1, 32, 128, **kwargs)
875
+ test_shape(32, 32, 256, **kwargs)
876
+ test_shape(1, 32, 256, **kwargs)
877
+ test_shape(32, 256, 32, transpose=False, **kwargs)
878
+ test_shape(1, 256, 32, transpose=False, **kwargs)
879
+ test_shape(32, 32, 512, **kwargs)
880
+ test_shape(1, 32, 512, **kwargs)
881
+ test_shape(32, 512, 32, transpose=False, **kwargs)
882
+ test_shape(1, 512, 32, transpose=False, **kwargs)
883
+
884
+ def test_qmm_fp_type(self):
885
+ indices = mx.array([[2], [0], [1]], dtype=mx.uint32)
886
+
887
+ modes = ["mxfp8", "mxfp4"]
888
+ for mode in modes:
889
+ for t in [mx.bfloat16, mx.float16, mx.float32]:
890
+ x = mx.random.normal((32, 256)).astype(t)
891
+
892
+ w = mx.random.normal((32, 256))
893
+ wq, s = mx.quantize(w, mode=mode)
894
+ out = mx.quantized_matmul(x, wq, s, mode=mode)
895
+ self.assertEqual(out.dtype, t)
896
+
897
+ w = mx.random.normal((4, 32, 256))
898
+ wq, s = mx.quantize(w, mode=mode)
899
+
900
+ out = mx.gather_qmm(x, wq, s, rhs_indices=indices, mode=mode)
901
+ self.assertEqual(out.dtype, t)
902
+
903
+ def test_gather_matmul_grad(self):
904
+ def quantize(w, transpose=True, group_size=64, bits=4):
905
+ qw, s, b = mx.quantize(w, group_size=group_size, bits=bits)
906
+ w_hat = mx.dequantize(qw, s, b, group_size=group_size, bits=bits)
907
+ if transpose:
908
+ w_hat = w_hat.swapaxes(-1, -2)
909
+ return w_hat, qw, s, b
910
+
911
+ lhs_indices = mx.array([[7, 6], [4, 1], [0, 2]], dtype=mx.uint32)
912
+ rhs_indices = mx.array([[2], [0], [1]], dtype=mx.uint32)
913
+
914
+ x = mx.random.normal((4, 2, 32, 256))
915
+ w = mx.random.normal((4, 1, 32, 256))
916
+ w_hat, qw, s, b = quantize(w)
917
+
918
+ def f_ref(x, w, i1, i2):
919
+ return mx.gather_mm(x, w, i1, i2).sum()
920
+
921
+ def f_test(x, qw, s, b, i1, i2):
922
+ return mx.gather_qmm(x, qw, s, b, i1, i2, transpose=True).sum()
923
+
924
+ r1 = f_ref(x, w_hat, lhs_indices, rhs_indices)
925
+ r2 = f_test(x, qw, s, b, lhs_indices, rhs_indices)
926
+ self.assertTrue(mx.allclose(r1, r2, atol=1e-4))
927
+
928
+ g1 = mx.grad(f_ref)(x, w_hat, lhs_indices, rhs_indices)
929
+ g2 = mx.grad(f_test)(x, qw, s, b, lhs_indices, rhs_indices)
930
+ self.assertTrue(mx.allclose(g1, g2, atol=1e-4))
931
+
932
+ def test_gather_qmm_sorted(self):
933
+ def quantize(w, transpose=True, group_size=None, mode="affine"):
934
+ if mode == "affine":
935
+ qw, s, b = mx.quantize(w, group_size=group_size, mode=mode)
936
+ else:
937
+ qw, s = mx.quantize(w, mode=mode)
938
+ b = None
939
+
940
+ w_hat = mx.dequantize(qw, s, b, group_size=group_size, mode=mode)
941
+ if transpose:
942
+ w_hat = w_hat.swapaxes(-1, -2)
943
+ return w_hat, qw, s, b
944
+
945
+ def gather_sort(x, indices):
946
+ N, M = indices.shape
947
+ indices = indices.flatten()
948
+ order = mx.argsort(indices)
949
+ inv_order = mx.argsort(order)
950
+ return x.flatten(0, -3)[order // M], indices[order], inv_order
951
+
952
+ def scatter_unsort(x, inv_order, shape=None):
953
+ x = x[inv_order]
954
+ if shape is not None:
955
+ x = mx.unflatten(x, 0, shape)
956
+ return x
957
+
958
+ parameters = [
959
+ # L, K, D, E, I, transpose
960
+ (32, 512, 512, 4, 2, True, "affine"),
961
+ (32, 512, 544, 4, 2, True, "mxfp4"),
962
+ (32, 512, 544, 4, 2, True, "nvfp4"),
963
+ (32, 512, 544, 4, 2, True, "mxfp8"),
964
+ (133, 512, 512, 4, 2, True, "affine"),
965
+ (133, 512, 555, 4, 2, True, "affine"),
966
+ (133, 512, 512, 4, 2, True, "affine"),
967
+ (64, 512, 512, 4, 2, False, "affine"),
968
+ (64, 512, 544, 4, 2, False, "mxfp4"),
969
+ (64, 512, 544, 4, 2, False, "nvfp4"),
970
+ (64, 512, 544, 4, 2, False, "mxfp8"),
971
+ (133, 512, 512, 4, 2, False, "affine"),
972
+ (133, 512, 544, 4, 2, False, "affine"),
973
+ (133, 512, 555, 4, 2, False, "affine"),
974
+ (64, 512, 512, 4, 2, False, "affine"),
975
+ ]
976
+
977
+ key = mx.random.key(0)
978
+ k1, k2, k3 = mx.random.split(key, 3)
979
+ dtype = mx.float16 if (mx.default_device() == mx.gpu) else mx.float32
980
+
981
+ for L, K, D, E, I, transpose, mode in parameters:
982
+ with self.subTest(L=L, K=K, D=D, E=E, I=I, transpose=transpose, mode=mode):
983
+ if mode != "affine":
984
+ group_size = None
985
+ dtype = (
986
+ mx.bfloat16 if (mx.default_device() == mx.gpu) else mx.float32
987
+ )
988
+ else:
989
+ group_size = 64
990
+ dtype = (
991
+ mx.float16 if (mx.default_device() == mx.gpu) else mx.float32
992
+ )
993
+
994
+ K, D = (K, D) if transpose else (D, K)
995
+ ishape = (L, I)
996
+ xshape = (L, 1, 1, K)
997
+ wshape = (E, D, K) if transpose else (E, K, D)
998
+
999
+ indices = (mx.random.uniform(shape=ishape, key=k1) * E).astype(
1000
+ mx.uint32
1001
+ )
1002
+ x = mx.random.normal(xshape, key=k2) / K**0.5
1003
+ w = mx.random.normal(wshape, key=k3) / K**0.5
1004
+
1005
+ x = x.astype(dtype)
1006
+ w = w.astype(dtype)
1007
+
1008
+ w, *wq = quantize(
1009
+ w, group_size=group_size, mode=mode, transpose=transpose
1010
+ )
1011
+
1012
+ y1 = mx.gather_mm(x, w, rhs_indices=indices)
1013
+ y2 = mx.gather_qmm(
1014
+ x,
1015
+ *wq,
1016
+ group_size=group_size,
1017
+ mode=mode,
1018
+ transpose=transpose,
1019
+ rhs_indices=indices,
1020
+ )
1021
+ xs, idx, inv_order = gather_sort(x, indices)
1022
+ y3 = mx.gather_mm(xs, w, rhs_indices=idx, sorted_indices=True)
1023
+
1024
+ y4 = mx.gather_qmm(
1025
+ xs,
1026
+ *wq,
1027
+ group_size=group_size,
1028
+ mode=mode,
1029
+ rhs_indices=idx,
1030
+ transpose=transpose,
1031
+ sorted_indices=True,
1032
+ )
1033
+ y3 = scatter_unsort(y3, inv_order, indices.shape)
1034
+ y4 = scatter_unsort(y4, inv_order, indices.shape)
1035
+
1036
+ tol = 1.5e-5 if (dtype == mx.float32) else 2.5e-4
1037
+
1038
+ self.assertLess((y1 - y2).abs().max(), tol)
1039
+ self.assertLess((y1 - y3).abs().max(), tol)
1040
+ self.assertLess((y1 - y4).abs().max(), tol)
1041
+
1042
+ self.assertTrue(mx.allclose(y1, y2, atol=tol))
1043
+ self.assertTrue(mx.allclose(y1, y3, atol=tol))
1044
+ self.assertTrue(mx.allclose(y1, y4, atol=tol))
1045
+
1046
+ def test_gather_qmm_grad(self):
1047
+ def gather_qmm_ref(x, w, s, b, lhs, rhs, trans, sort):
1048
+ if lhs is not None:
1049
+ x = x[lhs]
1050
+ if rhs is not None:
1051
+ w = w[rhs]
1052
+ s = s[rhs]
1053
+ b = b[rhs]
1054
+ return mx.quantized_matmul(x, w, s, b, transpose=trans)
1055
+
1056
+ def gather_qmm(x, w, s, b, lhs, rhs, trans, sort):
1057
+ return mx.gather_qmm(
1058
+ x,
1059
+ w,
1060
+ s,
1061
+ b,
1062
+ transpose=trans,
1063
+ lhs_indices=lhs,
1064
+ rhs_indices=rhs,
1065
+ sorted_indices=sort,
1066
+ )
1067
+
1068
+ key = mx.random.key(0)
1069
+ k1, k2, k3, k4 = mx.random.split(key, 4)
1070
+ dtype = mx.float32
1071
+
1072
+ x = mx.random.normal((16, 1, 256), key=k1).astype(dtype)
1073
+ w, s, b = mx.quantize(mx.random.normal((4, 256, 256), key=k2).astype(dtype))
1074
+ indices = mx.sort(mx.random.randint(0, 4, shape=(16,), key=k3))
1075
+ cotan = mx.random.normal((16, 1, 256), key=k4).astype(dtype)
1076
+
1077
+ (o1,), (dx1, ds1, db1) = mx.vjp(
1078
+ lambda x, s, b: gather_qmm_ref(x, w, s, b, None, indices, True, True),
1079
+ [x, s, b],
1080
+ [cotan],
1081
+ )
1082
+ (o2,), (dx2, ds2, db2) = mx.vjp(
1083
+ lambda x, s, b: gather_qmm(x, w, s, b, None, indices, True, True),
1084
+ [x, s, b],
1085
+ [cotan],
1086
+ )
1087
+
1088
+ self.assertLess((o1 - o2).abs().max(), 1e-4)
1089
+ self.assertTrue(mx.allclose(o1, o2, atol=1e-4))
1090
+ self.assertTrue(mx.allclose(dx1, dx2, atol=1e-4))
1091
+ self.assertTrue(mx.allclose(ds1, ds2, atol=1e-3))
1092
+ self.assertTrue(mx.allclose(db1, db2, atol=1e-3))
1093
+
1094
+ def test_vjp_scales_biases(self):
1095
+ mx.random.seed(0)
1096
+ x = mx.random.normal(shape=(2, 2, 512))
1097
+ w = mx.random.normal(shape=(512, 512))
1098
+ wq, s, b = mx.quantize(w, bits=4, group_size=64)
1099
+
1100
+ def mm(sb, x, wq):
1101
+ return mx.quantized_matmul(x, wq, *sb, bits=4, group_size=64).sum()
1102
+
1103
+ params = (s, b)
1104
+ dparams = mx.grad(mm)((s, b), x, wq)
1105
+
1106
+ eps = 8e-3
1107
+ # numerical grad check with a few indices
1108
+ indices = [(0, 0), (11, 4), (22, 7)]
1109
+ for idx in indices:
1110
+ for p in [0, 1]:
1111
+ params[p][idx] += eps
1112
+ out_up = mm(params, x, wq)
1113
+ params[p][idx] -= 2 * eps
1114
+ out_down = mm(params, x, wq)
1115
+ params[p][idx] += eps
1116
+ num_ds = (out_up - out_down) / (2 * eps)
1117
+ self.assertAlmostEqual(dparams[p][idx], num_ds, delta=2e-2)
1118
+
1119
+ def test_fp_vjp_scales_throws(self):
1120
+ mx.random.seed(0)
1121
+ x = mx.random.normal(shape=(2, 512))
1122
+ w = mx.random.normal(shape=(512, 512))
1123
+ for mode in ["mxfp4", "mxfp8", "nvfp4"]:
1124
+ wq, s = mx.quantize(w, mode=mode)
1125
+
1126
+ def mm(s, x, wq):
1127
+ return mx.quantized_matmul(x, wq, s, mode=mode).sum()
1128
+
1129
+ # Should raise
1130
+ with self.assertRaises(ValueError):
1131
+ ds = mx.grad(mm)(s, x, wq)
1132
+
1133
+ rhs_indices = mx.array(0)
1134
+ with self.assertRaises(ValueError):
1135
+
1136
+ def gmm(s, x, wq):
1137
+ return mx.gather_qmm(
1138
+ x,
1139
+ wq,
1140
+ s,
1141
+ rhs_indices=rhs_indices,
1142
+ mode=mode,
1143
+ ).sum()
1144
+
1145
+ ds = mx.grad(gmm)(s, x, wq)
1146
+
1147
+ def test_quantize_strided(self):
1148
+ N = 64
1149
+ mode = "nvfp4"
1150
+ w = mx.random.normal(shape=(N, N))
1151
+ w_q, scales = mx.quantize(w, mode="nvfp4")
1152
+
1153
+ scales = mx.broadcast_to(mx.array(56, mx.uint8), scales.shape)
1154
+ w_hat = mx.dequantize(w_q, scales, mode=mode)
1155
+ expected = mx.dequantize(w_q, mx.contiguous(scales), mode=mode)
1156
+ self.assertTrue(mx.allclose(w_hat, expected))
1157
+
1158
+
1159
+ if __name__ == "__main__":
1160
+ mlx_tests.MLXTestRunner()