mlx 0.30.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (599) hide show
  1. checksums.yaml +7 -0
  2. data/ext/mlx/extconf.rb +94 -0
  3. data/ext/mlx/native.cpp +8027 -0
  4. data/lib/mlx/core.rb +1678 -0
  5. data/lib/mlx/distributed_utils/common.rb +116 -0
  6. data/lib/mlx/distributed_utils/config.rb +600 -0
  7. data/lib/mlx/distributed_utils/launch.rb +490 -0
  8. data/lib/mlx/extension.rb +24 -0
  9. data/lib/mlx/nn/base.rb +388 -0
  10. data/lib/mlx/nn/init.rb +140 -0
  11. data/lib/mlx/nn/layers/activations.rb +336 -0
  12. data/lib/mlx/nn/layers/base.rb +6 -0
  13. data/lib/mlx/nn/layers/containers.rb +20 -0
  14. data/lib/mlx/nn/layers/convolution.rb +120 -0
  15. data/lib/mlx/nn/layers/convolution_transpose.rb +114 -0
  16. data/lib/mlx/nn/layers/distributed.rb +309 -0
  17. data/lib/mlx/nn/layers/dropout.rb +75 -0
  18. data/lib/mlx/nn/layers/embedding.rb +28 -0
  19. data/lib/mlx/nn/layers/linear.rb +79 -0
  20. data/lib/mlx/nn/layers/normalization.rb +216 -0
  21. data/lib/mlx/nn/layers/pooling.rb +167 -0
  22. data/lib/mlx/nn/layers/positional_encoding.rb +126 -0
  23. data/lib/mlx/nn/layers/quantized.rb +215 -0
  24. data/lib/mlx/nn/layers/recurrent.rb +135 -0
  25. data/lib/mlx/nn/layers/transformer.rb +330 -0
  26. data/lib/mlx/nn/layers/upsample.rb +97 -0
  27. data/lib/mlx/nn/layers.rb +18 -0
  28. data/lib/mlx/nn/losses.rb +251 -0
  29. data/lib/mlx/nn/utils.rb +167 -0
  30. data/lib/mlx/nn.rb +12 -0
  31. data/lib/mlx/optimizers/optimizers.rb +808 -0
  32. data/lib/mlx/optimizers/schedulers.rb +62 -0
  33. data/lib/mlx/optimizers.rb +9 -0
  34. data/lib/mlx/utils.rb +171 -0
  35. data/lib/mlx/version.rb +5 -0
  36. data/lib/mlx.rb +64 -0
  37. data/mlx/CMakeLists.txt +449 -0
  38. data/mlx/cmake/FindCUDNN.cmake +177 -0
  39. data/mlx/cmake/FindNCCL.cmake +54 -0
  40. data/mlx/cmake/Findnvpl.cmake +3 -0
  41. data/mlx/cmake/extension.cmake +50 -0
  42. data/mlx/mlx/3rdparty/.clang-format +2 -0
  43. data/mlx/mlx/3rdparty/pocketfft.h +3581 -0
  44. data/mlx/mlx/CMakeLists.txt +107 -0
  45. data/mlx/mlx/allocator.h +75 -0
  46. data/mlx/mlx/api.h +29 -0
  47. data/mlx/mlx/array.cpp +354 -0
  48. data/mlx/mlx/array.h +647 -0
  49. data/mlx/mlx/backend/common/CMakeLists.txt +9 -0
  50. data/mlx/mlx/backend/common/binary.h +97 -0
  51. data/mlx/mlx/backend/common/broadcasting.cpp +24 -0
  52. data/mlx/mlx/backend/common/broadcasting.h +11 -0
  53. data/mlx/mlx/backend/common/buffer_cache.h +158 -0
  54. data/mlx/mlx/backend/common/common.cpp +305 -0
  55. data/mlx/mlx/backend/common/compiled.cpp +243 -0
  56. data/mlx/mlx/backend/common/compiled.h +77 -0
  57. data/mlx/mlx/backend/common/copy.h +50 -0
  58. data/mlx/mlx/backend/common/hadamard.h +109 -0
  59. data/mlx/mlx/backend/common/load.cpp +57 -0
  60. data/mlx/mlx/backend/common/matmul.h +67 -0
  61. data/mlx/mlx/backend/common/reduce.cpp +154 -0
  62. data/mlx/mlx/backend/common/reduce.h +59 -0
  63. data/mlx/mlx/backend/common/slicing.cpp +71 -0
  64. data/mlx/mlx/backend/common/slicing.h +20 -0
  65. data/mlx/mlx/backend/common/ternary.h +85 -0
  66. data/mlx/mlx/backend/common/unary.h +29 -0
  67. data/mlx/mlx/backend/common/utils.cpp +231 -0
  68. data/mlx/mlx/backend/common/utils.h +205 -0
  69. data/mlx/mlx/backend/cpu/CMakeLists.txt +88 -0
  70. data/mlx/mlx/backend/cpu/arange.h +28 -0
  71. data/mlx/mlx/backend/cpu/arg_reduce.cpp +124 -0
  72. data/mlx/mlx/backend/cpu/binary.cpp +269 -0
  73. data/mlx/mlx/backend/cpu/binary.h +517 -0
  74. data/mlx/mlx/backend/cpu/binary_ops.h +98 -0
  75. data/mlx/mlx/backend/cpu/binary_two.h +166 -0
  76. data/mlx/mlx/backend/cpu/cholesky.cpp +85 -0
  77. data/mlx/mlx/backend/cpu/compiled.cpp +357 -0
  78. data/mlx/mlx/backend/cpu/compiled_preamble.h +12 -0
  79. data/mlx/mlx/backend/cpu/conv.cpp +1351 -0
  80. data/mlx/mlx/backend/cpu/copy.cpp +386 -0
  81. data/mlx/mlx/backend/cpu/copy.h +36 -0
  82. data/mlx/mlx/backend/cpu/device_info.cpp +113 -0
  83. data/mlx/mlx/backend/cpu/device_info.h +28 -0
  84. data/mlx/mlx/backend/cpu/distributed.cpp +103 -0
  85. data/mlx/mlx/backend/cpu/eig.cpp +281 -0
  86. data/mlx/mlx/backend/cpu/eigh.cpp +241 -0
  87. data/mlx/mlx/backend/cpu/encoder.cpp +16 -0
  88. data/mlx/mlx/backend/cpu/encoder.h +67 -0
  89. data/mlx/mlx/backend/cpu/eval.cpp +40 -0
  90. data/mlx/mlx/backend/cpu/eval.h +12 -0
  91. data/mlx/mlx/backend/cpu/fft.cpp +120 -0
  92. data/mlx/mlx/backend/cpu/gemm.h +26 -0
  93. data/mlx/mlx/backend/cpu/gemms/bnns.cpp +214 -0
  94. data/mlx/mlx/backend/cpu/gemms/cblas.cpp +134 -0
  95. data/mlx/mlx/backend/cpu/gemms/simd_bf16.cpp +45 -0
  96. data/mlx/mlx/backend/cpu/gemms/simd_fp16.cpp +45 -0
  97. data/mlx/mlx/backend/cpu/gemms/simd_gemm.h +139 -0
  98. data/mlx/mlx/backend/cpu/hadamard.cpp +121 -0
  99. data/mlx/mlx/backend/cpu/indexing.cpp +854 -0
  100. data/mlx/mlx/backend/cpu/inverse.cpp +160 -0
  101. data/mlx/mlx/backend/cpu/jit_compiler.cpp +166 -0
  102. data/mlx/mlx/backend/cpu/jit_compiler.h +20 -0
  103. data/mlx/mlx/backend/cpu/lapack.h +80 -0
  104. data/mlx/mlx/backend/cpu/logsumexp.cpp +139 -0
  105. data/mlx/mlx/backend/cpu/luf.cpp +120 -0
  106. data/mlx/mlx/backend/cpu/make_compiled_preamble.ps1 +38 -0
  107. data/mlx/mlx/backend/cpu/make_compiled_preamble.sh +41 -0
  108. data/mlx/mlx/backend/cpu/masked_mm.cpp +608 -0
  109. data/mlx/mlx/backend/cpu/matmul.cpp +166 -0
  110. data/mlx/mlx/backend/cpu/primitives.cpp +478 -0
  111. data/mlx/mlx/backend/cpu/qrf.cpp +147 -0
  112. data/mlx/mlx/backend/cpu/quantized.cpp +1370 -0
  113. data/mlx/mlx/backend/cpu/reduce.cpp +587 -0
  114. data/mlx/mlx/backend/cpu/scan.cpp +338 -0
  115. data/mlx/mlx/backend/cpu/select.cpp +95 -0
  116. data/mlx/mlx/backend/cpu/simd/accelerate_fp16_simd.h +56 -0
  117. data/mlx/mlx/backend/cpu/simd/accelerate_simd.h +329 -0
  118. data/mlx/mlx/backend/cpu/simd/base_simd.h +319 -0
  119. data/mlx/mlx/backend/cpu/simd/math.h +193 -0
  120. data/mlx/mlx/backend/cpu/simd/neon_fp16_simd.h +212 -0
  121. data/mlx/mlx/backend/cpu/simd/simd.h +4 -0
  122. data/mlx/mlx/backend/cpu/simd/type.h +11 -0
  123. data/mlx/mlx/backend/cpu/slicing.h +21 -0
  124. data/mlx/mlx/backend/cpu/softmax.cpp +170 -0
  125. data/mlx/mlx/backend/cpu/sort.cpp +481 -0
  126. data/mlx/mlx/backend/cpu/svd.cpp +289 -0
  127. data/mlx/mlx/backend/cpu/ternary.h +154 -0
  128. data/mlx/mlx/backend/cpu/threefry.cpp +31 -0
  129. data/mlx/mlx/backend/cpu/threefry.h +21 -0
  130. data/mlx/mlx/backend/cpu/unary.cpp +238 -0
  131. data/mlx/mlx/backend/cpu/unary.h +281 -0
  132. data/mlx/mlx/backend/cpu/unary_ops.h +175 -0
  133. data/mlx/mlx/backend/cuda/CMakeLists.txt +265 -0
  134. data/mlx/mlx/backend/cuda/allocator.cpp +451 -0
  135. data/mlx/mlx/backend/cuda/allocator.h +94 -0
  136. data/mlx/mlx/backend/cuda/arange.cu +68 -0
  137. data/mlx/mlx/backend/cuda/arg_reduce.cu +189 -0
  138. data/mlx/mlx/backend/cuda/bin2h.cmake +150 -0
  139. data/mlx/mlx/backend/cuda/binary/CMakeLists.txt +21 -0
  140. data/mlx/mlx/backend/cuda/binary/add.cu +7 -0
  141. data/mlx/mlx/backend/cuda/binary/arctan2.cu +7 -0
  142. data/mlx/mlx/backend/cuda/binary/binary.cuh +383 -0
  143. data/mlx/mlx/backend/cuda/binary/bitwise_binary.cu +27 -0
  144. data/mlx/mlx/backend/cuda/binary/divide.cu +7 -0
  145. data/mlx/mlx/backend/cuda/binary/equal.cu +15 -0
  146. data/mlx/mlx/backend/cuda/binary/greater.cu +7 -0
  147. data/mlx/mlx/backend/cuda/binary/greater_equal.cu +7 -0
  148. data/mlx/mlx/backend/cuda/binary/less.cu +7 -0
  149. data/mlx/mlx/backend/cuda/binary/less_equal.cu +7 -0
  150. data/mlx/mlx/backend/cuda/binary/log_add_exp.cu +7 -0
  151. data/mlx/mlx/backend/cuda/binary/logical_and.cu +7 -0
  152. data/mlx/mlx/backend/cuda/binary/logical_or.cu +7 -0
  153. data/mlx/mlx/backend/cuda/binary/maximum.cu +7 -0
  154. data/mlx/mlx/backend/cuda/binary/minimum.cu +7 -0
  155. data/mlx/mlx/backend/cuda/binary/multiply.cu +7 -0
  156. data/mlx/mlx/backend/cuda/binary/not_equal.cu +7 -0
  157. data/mlx/mlx/backend/cuda/binary/power.cu +7 -0
  158. data/mlx/mlx/backend/cuda/binary/remainder.cu +7 -0
  159. data/mlx/mlx/backend/cuda/binary/subtract.cu +7 -0
  160. data/mlx/mlx/backend/cuda/binary_two.cu +412 -0
  161. data/mlx/mlx/backend/cuda/compiled.cpp +357 -0
  162. data/mlx/mlx/backend/cuda/conv/conv.h +126 -0
  163. data/mlx/mlx/backend/cuda/conv/gemm_conv.cu +217 -0
  164. data/mlx/mlx/backend/cuda/conv/gemm_grouped_conv.cu +231 -0
  165. data/mlx/mlx/backend/cuda/conv.cpp +403 -0
  166. data/mlx/mlx/backend/cuda/copy/copy.cuh +55 -0
  167. data/mlx/mlx/backend/cuda/copy/copy_contiguous.cu +88 -0
  168. data/mlx/mlx/backend/cuda/copy/copy_general.cu +171 -0
  169. data/mlx/mlx/backend/cuda/copy/copy_general_dynamic.cu +118 -0
  170. data/mlx/mlx/backend/cuda/copy/copy_general_input.cu +229 -0
  171. data/mlx/mlx/backend/cuda/copy.cu +132 -0
  172. data/mlx/mlx/backend/cuda/cublas_utils.cpp +222 -0
  173. data/mlx/mlx/backend/cuda/cublas_utils.h +95 -0
  174. data/mlx/mlx/backend/cuda/cuda.h +21 -0
  175. data/mlx/mlx/backend/cuda/cuda_utils.h +90 -0
  176. data/mlx/mlx/backend/cuda/cudnn_utils.cpp +133 -0
  177. data/mlx/mlx/backend/cuda/cudnn_utils.h +187 -0
  178. data/mlx/mlx/backend/cuda/custom_kernel.cpp +379 -0
  179. data/mlx/mlx/backend/cuda/cutlass_utils.cuh +46 -0
  180. data/mlx/mlx/backend/cuda/delayload.cpp +80 -0
  181. data/mlx/mlx/backend/cuda/device/atomic_ops.cuh +63 -0
  182. data/mlx/mlx/backend/cuda/device/binary_ops.cuh +300 -0
  183. data/mlx/mlx/backend/cuda/device/cast_op.cuh +118 -0
  184. data/mlx/mlx/backend/cuda/device/complex.cuh +60 -0
  185. data/mlx/mlx/backend/cuda/device/config.h +12 -0
  186. data/mlx/mlx/backend/cuda/device/fp16_math.cuh +96 -0
  187. data/mlx/mlx/backend/cuda/device/gather.cuh +53 -0
  188. data/mlx/mlx/backend/cuda/device/gather_axis.cuh +65 -0
  189. data/mlx/mlx/backend/cuda/device/indexing.cuh +30 -0
  190. data/mlx/mlx/backend/cuda/device/scatter.cuh +68 -0
  191. data/mlx/mlx/backend/cuda/device/scatter_axis.cuh +67 -0
  192. data/mlx/mlx/backend/cuda/device/scatter_ops.cuh +44 -0
  193. data/mlx/mlx/backend/cuda/device/ternary_ops.cuh +13 -0
  194. data/mlx/mlx/backend/cuda/device/unary_ops.cuh +350 -0
  195. data/mlx/mlx/backend/cuda/device/utils.cuh +464 -0
  196. data/mlx/mlx/backend/cuda/device.cpp +522 -0
  197. data/mlx/mlx/backend/cuda/device.h +195 -0
  198. data/mlx/mlx/backend/cuda/device_info.cpp +232 -0
  199. data/mlx/mlx/backend/cuda/distributed.cu +121 -0
  200. data/mlx/mlx/backend/cuda/eval.cpp +66 -0
  201. data/mlx/mlx/backend/cuda/event.cu +415 -0
  202. data/mlx/mlx/backend/cuda/event.h +79 -0
  203. data/mlx/mlx/backend/cuda/fence.cpp +42 -0
  204. data/mlx/mlx/backend/cuda/gemms/cublas_gemm.cpp +233 -0
  205. data/mlx/mlx/backend/cuda/gemms/cublas_gemm.h +114 -0
  206. data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_0.cpp +77 -0
  207. data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_9.cu +329 -0
  208. data/mlx/mlx/backend/cuda/gemms/gemv.cu +327 -0
  209. data/mlx/mlx/backend/cuda/gemms/gemv.h +34 -0
  210. data/mlx/mlx/backend/cuda/gemms/grouped_gemm.h +25 -0
  211. data/mlx/mlx/backend/cuda/gemms/grouped_gemm_unaligned.cu +358 -0
  212. data/mlx/mlx/backend/cuda/indexing.cpp +434 -0
  213. data/mlx/mlx/backend/cuda/jit_module.cpp +443 -0
  214. data/mlx/mlx/backend/cuda/jit_module.h +120 -0
  215. data/mlx/mlx/backend/cuda/kernel_utils.cu +52 -0
  216. data/mlx/mlx/backend/cuda/kernel_utils.cuh +148 -0
  217. data/mlx/mlx/backend/cuda/layer_norm.cu +417 -0
  218. data/mlx/mlx/backend/cuda/load.cpp +60 -0
  219. data/mlx/mlx/backend/cuda/logsumexp.cu +161 -0
  220. data/mlx/mlx/backend/cuda/lru_cache.h +190 -0
  221. data/mlx/mlx/backend/cuda/matmul.cpp +373 -0
  222. data/mlx/mlx/backend/cuda/no_cuda.cpp +47 -0
  223. data/mlx/mlx/backend/cuda/primitives.cpp +46 -0
  224. data/mlx/mlx/backend/cuda/quantized/affine_quantize.cu +329 -0
  225. data/mlx/mlx/backend/cuda/quantized/convert_fp8.cu +19 -0
  226. data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.cpp +206 -0
  227. data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.h +88 -0
  228. data/mlx/mlx/backend/cuda/quantized/cuda_fp4.h +100 -0
  229. data/mlx/mlx/backend/cuda/quantized/fp_quantize.cu +496 -0
  230. data/mlx/mlx/backend/cuda/quantized/mxfp8_quantize.cuh +32 -0
  231. data/mlx/mlx/backend/cuda/quantized/no_qqmm_impl.cpp +26 -0
  232. data/mlx/mlx/backend/cuda/quantized/nvfp4_quantize.cuh +334 -0
  233. data/mlx/mlx/backend/cuda/quantized/qmv.cu +304 -0
  234. data/mlx/mlx/backend/cuda/quantized/qmv.h +21 -0
  235. data/mlx/mlx/backend/cuda/quantized/qqmm.cpp +158 -0
  236. data/mlx/mlx/backend/cuda/quantized/qqmm_impl.cpp +50 -0
  237. data/mlx/mlx/backend/cuda/quantized/qqmm_impl.h +26 -0
  238. data/mlx/mlx/backend/cuda/quantized/qqmm_utils.cu +227 -0
  239. data/mlx/mlx/backend/cuda/quantized/qqmm_utils.h +30 -0
  240. data/mlx/mlx/backend/cuda/quantized/quantized.cpp +85 -0
  241. data/mlx/mlx/backend/cuda/quantized/quantized.h +53 -0
  242. data/mlx/mlx/backend/cuda/quantized/quantized_utils.cuh +88 -0
  243. data/mlx/mlx/backend/cuda/quantized/quantized_utils.h +50 -0
  244. data/mlx/mlx/backend/cuda/random.cu +202 -0
  245. data/mlx/mlx/backend/cuda/reduce/all_reduce.cu +159 -0
  246. data/mlx/mlx/backend/cuda/reduce/col_reduce.cu +510 -0
  247. data/mlx/mlx/backend/cuda/reduce/init_reduce.cu +50 -0
  248. data/mlx/mlx/backend/cuda/reduce/reduce.cuh +71 -0
  249. data/mlx/mlx/backend/cuda/reduce/reduce_ops.cuh +211 -0
  250. data/mlx/mlx/backend/cuda/reduce/reduce_utils.cuh +145 -0
  251. data/mlx/mlx/backend/cuda/reduce/row_reduce.cu +361 -0
  252. data/mlx/mlx/backend/cuda/reduce.cu +73 -0
  253. data/mlx/mlx/backend/cuda/rms_norm.cu +536 -0
  254. data/mlx/mlx/backend/cuda/rope.cu +429 -0
  255. data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cpp +681 -0
  256. data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cu +796 -0
  257. data/mlx/mlx/backend/cuda/scan.cu +468 -0
  258. data/mlx/mlx/backend/cuda/slicing.cpp +111 -0
  259. data/mlx/mlx/backend/cuda/softmax.cu +162 -0
  260. data/mlx/mlx/backend/cuda/sort.cu +1076 -0
  261. data/mlx/mlx/backend/cuda/steel/defines.cuh +9 -0
  262. data/mlx/mlx/backend/cuda/steel/gemm.cuh +101 -0
  263. data/mlx/mlx/backend/cuda/steel/mma.cuh +117 -0
  264. data/mlx/mlx/backend/cuda/steel/tiles.cuh +450 -0
  265. data/mlx/mlx/backend/cuda/steel/utils.cuh +89 -0
  266. data/mlx/mlx/backend/cuda/ternary.cu +271 -0
  267. data/mlx/mlx/backend/cuda/unary/CMakeLists.txt +34 -0
  268. data/mlx/mlx/backend/cuda/unary/abs.cu +7 -0
  269. data/mlx/mlx/backend/cuda/unary/arccos.cu +7 -0
  270. data/mlx/mlx/backend/cuda/unary/arccosh.cu +7 -0
  271. data/mlx/mlx/backend/cuda/unary/arcsin.cu +7 -0
  272. data/mlx/mlx/backend/cuda/unary/arcsinh.cu +7 -0
  273. data/mlx/mlx/backend/cuda/unary/arctan.cu +7 -0
  274. data/mlx/mlx/backend/cuda/unary/arctanh.cu +7 -0
  275. data/mlx/mlx/backend/cuda/unary/bitwise_invert.cu +7 -0
  276. data/mlx/mlx/backend/cuda/unary/ceil.cu +7 -0
  277. data/mlx/mlx/backend/cuda/unary/conjugate.cu +7 -0
  278. data/mlx/mlx/backend/cuda/unary/cos.cu +7 -0
  279. data/mlx/mlx/backend/cuda/unary/cosh.cu +7 -0
  280. data/mlx/mlx/backend/cuda/unary/erf.cu +7 -0
  281. data/mlx/mlx/backend/cuda/unary/erf_inv.cu +7 -0
  282. data/mlx/mlx/backend/cuda/unary/exp.cu +7 -0
  283. data/mlx/mlx/backend/cuda/unary/expm1.cu +7 -0
  284. data/mlx/mlx/backend/cuda/unary/floor.cu +7 -0
  285. data/mlx/mlx/backend/cuda/unary/imag.cu +7 -0
  286. data/mlx/mlx/backend/cuda/unary/log.cu +21 -0
  287. data/mlx/mlx/backend/cuda/unary/log1p.cu +7 -0
  288. data/mlx/mlx/backend/cuda/unary/logical_not.cu +7 -0
  289. data/mlx/mlx/backend/cuda/unary/negative.cu +7 -0
  290. data/mlx/mlx/backend/cuda/unary/real.cu +7 -0
  291. data/mlx/mlx/backend/cuda/unary/round.cu +18 -0
  292. data/mlx/mlx/backend/cuda/unary/sigmoid.cu +7 -0
  293. data/mlx/mlx/backend/cuda/unary/sign.cu +7 -0
  294. data/mlx/mlx/backend/cuda/unary/sin.cu +7 -0
  295. data/mlx/mlx/backend/cuda/unary/sinh.cu +7 -0
  296. data/mlx/mlx/backend/cuda/unary/sqrt.cu +15 -0
  297. data/mlx/mlx/backend/cuda/unary/square.cu +7 -0
  298. data/mlx/mlx/backend/cuda/unary/tan.cu +7 -0
  299. data/mlx/mlx/backend/cuda/unary/tanh.cu +7 -0
  300. data/mlx/mlx/backend/cuda/unary/unary.cuh +224 -0
  301. data/mlx/mlx/backend/cuda/utils.cpp +116 -0
  302. data/mlx/mlx/backend/cuda/utils.h +49 -0
  303. data/mlx/mlx/backend/cuda/vector_types.cuh +48 -0
  304. data/mlx/mlx/backend/cuda/worker.cpp +79 -0
  305. data/mlx/mlx/backend/cuda/worker.h +55 -0
  306. data/mlx/mlx/backend/gpu/CMakeLists.txt +5 -0
  307. data/mlx/mlx/backend/gpu/copy.cpp +89 -0
  308. data/mlx/mlx/backend/gpu/copy.h +57 -0
  309. data/mlx/mlx/backend/gpu/device_info.h +36 -0
  310. data/mlx/mlx/backend/gpu/eval.h +18 -0
  311. data/mlx/mlx/backend/gpu/primitives.cpp +307 -0
  312. data/mlx/mlx/backend/gpu/slicing.cpp +44 -0
  313. data/mlx/mlx/backend/gpu/slicing.h +36 -0
  314. data/mlx/mlx/backend/metal/CMakeLists.txt +144 -0
  315. data/mlx/mlx/backend/metal/allocator.cpp +279 -0
  316. data/mlx/mlx/backend/metal/allocator.h +79 -0
  317. data/mlx/mlx/backend/metal/binary.cpp +257 -0
  318. data/mlx/mlx/backend/metal/binary.h +33 -0
  319. data/mlx/mlx/backend/metal/compiled.cpp +471 -0
  320. data/mlx/mlx/backend/metal/conv.cpp +1118 -0
  321. data/mlx/mlx/backend/metal/copy.cpp +235 -0
  322. data/mlx/mlx/backend/metal/custom_kernel.cpp +430 -0
  323. data/mlx/mlx/backend/metal/device.cpp +816 -0
  324. data/mlx/mlx/backend/metal/device.h +289 -0
  325. data/mlx/mlx/backend/metal/device_info.cpp +58 -0
  326. data/mlx/mlx/backend/metal/distributed.cpp +38 -0
  327. data/mlx/mlx/backend/metal/eval.cpp +97 -0
  328. data/mlx/mlx/backend/metal/event.cpp +62 -0
  329. data/mlx/mlx/backend/metal/fence.cpp +162 -0
  330. data/mlx/mlx/backend/metal/fft.cpp +807 -0
  331. data/mlx/mlx/backend/metal/hadamard.cpp +198 -0
  332. data/mlx/mlx/backend/metal/indexing.cpp +727 -0
  333. data/mlx/mlx/backend/metal/jit/includes.h +58 -0
  334. data/mlx/mlx/backend/metal/jit/indexing.h +76 -0
  335. data/mlx/mlx/backend/metal/jit_kernels.cpp +1118 -0
  336. data/mlx/mlx/backend/metal/kernels/CMakeLists.txt +193 -0
  337. data/mlx/mlx/backend/metal/kernels/arange.h +9 -0
  338. data/mlx/mlx/backend/metal/kernels/arange.metal +20 -0
  339. data/mlx/mlx/backend/metal/kernels/arg_reduce.metal +182 -0
  340. data/mlx/mlx/backend/metal/kernels/atomic.h +345 -0
  341. data/mlx/mlx/backend/metal/kernels/bf16.h +16 -0
  342. data/mlx/mlx/backend/metal/kernels/bf16_math.h +380 -0
  343. data/mlx/mlx/backend/metal/kernels/binary.h +199 -0
  344. data/mlx/mlx/backend/metal/kernels/binary.metal +109 -0
  345. data/mlx/mlx/backend/metal/kernels/binary_ops.h +330 -0
  346. data/mlx/mlx/backend/metal/kernels/binary_two.h +244 -0
  347. data/mlx/mlx/backend/metal/kernels/binary_two.metal +54 -0
  348. data/mlx/mlx/backend/metal/kernels/cexpf.h +134 -0
  349. data/mlx/mlx/backend/metal/kernels/complex.h +173 -0
  350. data/mlx/mlx/backend/metal/kernels/conv.metal +701 -0
  351. data/mlx/mlx/backend/metal/kernels/copy.h +276 -0
  352. data/mlx/mlx/backend/metal/kernels/copy.metal +75 -0
  353. data/mlx/mlx/backend/metal/kernels/defines.h +24 -0
  354. data/mlx/mlx/backend/metal/kernels/erf.h +69 -0
  355. data/mlx/mlx/backend/metal/kernels/expm1f.h +90 -0
  356. data/mlx/mlx/backend/metal/kernels/fence.metal +52 -0
  357. data/mlx/mlx/backend/metal/kernels/fft/radix.h +328 -0
  358. data/mlx/mlx/backend/metal/kernels/fft/readwrite.h +624 -0
  359. data/mlx/mlx/backend/metal/kernels/fft.h +486 -0
  360. data/mlx/mlx/backend/metal/kernels/fft.metal +67 -0
  361. data/mlx/mlx/backend/metal/kernels/fp4.h +48 -0
  362. data/mlx/mlx/backend/metal/kernels/fp8.h +80 -0
  363. data/mlx/mlx/backend/metal/kernels/fp_quantized.h +1850 -0
  364. data/mlx/mlx/backend/metal/kernels/fp_quantized.metal +153 -0
  365. data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.h +1044 -0
  366. data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.metal +79 -0
  367. data/mlx/mlx/backend/metal/kernels/gemv.metal +868 -0
  368. data/mlx/mlx/backend/metal/kernels/gemv_masked.h +827 -0
  369. data/mlx/mlx/backend/metal/kernels/gemv_masked.metal +76 -0
  370. data/mlx/mlx/backend/metal/kernels/hadamard.h +182 -0
  371. data/mlx/mlx/backend/metal/kernels/indexing/gather.h +51 -0
  372. data/mlx/mlx/backend/metal/kernels/indexing/gather_axis.h +44 -0
  373. data/mlx/mlx/backend/metal/kernels/indexing/gather_front.h +24 -0
  374. data/mlx/mlx/backend/metal/kernels/indexing/indexing.h +23 -0
  375. data/mlx/mlx/backend/metal/kernels/indexing/masked_scatter.h +41 -0
  376. data/mlx/mlx/backend/metal/kernels/indexing/scatter.h +59 -0
  377. data/mlx/mlx/backend/metal/kernels/indexing/scatter_axis.h +52 -0
  378. data/mlx/mlx/backend/metal/kernels/layer_norm.metal +433 -0
  379. data/mlx/mlx/backend/metal/kernels/logging.h +26 -0
  380. data/mlx/mlx/backend/metal/kernels/logsumexp.h +140 -0
  381. data/mlx/mlx/backend/metal/kernels/logsumexp.metal +18 -0
  382. data/mlx/mlx/backend/metal/kernels/quantized.h +2508 -0
  383. data/mlx/mlx/backend/metal/kernels/quantized.metal +144 -0
  384. data/mlx/mlx/backend/metal/kernels/quantized_nax.h +1705 -0
  385. data/mlx/mlx/backend/metal/kernels/quantized_nax.metal +106 -0
  386. data/mlx/mlx/backend/metal/kernels/quantized_utils.h +90 -0
  387. data/mlx/mlx/backend/metal/kernels/random.metal +103 -0
  388. data/mlx/mlx/backend/metal/kernels/reduce.h +5 -0
  389. data/mlx/mlx/backend/metal/kernels/reduce.metal +169 -0
  390. data/mlx/mlx/backend/metal/kernels/reduce_utils.h +6 -0
  391. data/mlx/mlx/backend/metal/kernels/reduction/ops.h +275 -0
  392. data/mlx/mlx/backend/metal/kernels/reduction/reduce_all.h +66 -0
  393. data/mlx/mlx/backend/metal/kernels/reduction/reduce_col.h +398 -0
  394. data/mlx/mlx/backend/metal/kernels/reduction/reduce_init.h +8 -0
  395. data/mlx/mlx/backend/metal/kernels/reduction/reduce_row.h +369 -0
  396. data/mlx/mlx/backend/metal/kernels/rms_norm.metal +391 -0
  397. data/mlx/mlx/backend/metal/kernels/rope.metal +229 -0
  398. data/mlx/mlx/backend/metal/kernels/scaled_dot_product_attention.metal +44 -0
  399. data/mlx/mlx/backend/metal/kernels/scan.h +514 -0
  400. data/mlx/mlx/backend/metal/kernels/scan.metal +109 -0
  401. data/mlx/mlx/backend/metal/kernels/sdpa_vector.h +394 -0
  402. data/mlx/mlx/backend/metal/kernels/softmax.h +190 -0
  403. data/mlx/mlx/backend/metal/kernels/softmax.metal +24 -0
  404. data/mlx/mlx/backend/metal/kernels/sort.h +719 -0
  405. data/mlx/mlx/backend/metal/kernels/sort.metal +80 -0
  406. data/mlx/mlx/backend/metal/kernels/steel/attn/attn.h +296 -0
  407. data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.h +471 -0
  408. data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.metal +27 -0
  409. data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.h +481 -0
  410. data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.metal +28 -0
  411. data/mlx/mlx/backend/metal/kernels/steel/attn/loader.h +264 -0
  412. data/mlx/mlx/backend/metal/kernels/steel/attn/mma.h +750 -0
  413. data/mlx/mlx/backend/metal/kernels/steel/attn/nax.h +1076 -0
  414. data/mlx/mlx/backend/metal/kernels/steel/attn/params.h +44 -0
  415. data/mlx/mlx/backend/metal/kernels/steel/attn/transforms.h +71 -0
  416. data/mlx/mlx/backend/metal/kernels/steel/conv/conv.h +13 -0
  417. data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.h +176 -0
  418. data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.metal +56 -0
  419. data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.h +225 -0
  420. data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.metal +47 -0
  421. data/mlx/mlx/backend/metal/kernels/steel/conv/loader.h +6 -0
  422. data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_l.h +451 -0
  423. data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_n.h +319 -0
  424. data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_general.h +381 -0
  425. data/mlx/mlx/backend/metal/kernels/steel/conv/params.h +62 -0
  426. data/mlx/mlx/backend/metal/kernels/steel/defines.h +7 -0
  427. data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm.h +295 -0
  428. data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm_nax.h +157 -0
  429. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.h +346 -0
  430. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.metal +34 -0
  431. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.h +219 -0
  432. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.metal +30 -0
  433. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.h +459 -0
  434. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.metal +59 -0
  435. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.h +143 -0
  436. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.metal +37 -0
  437. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.h +719 -0
  438. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.metal +76 -0
  439. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.h +266 -0
  440. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.metal +43 -0
  441. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.h +227 -0
  442. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.metal +76 -0
  443. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.h +152 -0
  444. data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.metal +30 -0
  445. data/mlx/mlx/backend/metal/kernels/steel/gemm/loader.h +137 -0
  446. data/mlx/mlx/backend/metal/kernels/steel/gemm/mma.h +1146 -0
  447. data/mlx/mlx/backend/metal/kernels/steel/gemm/nax.h +1084 -0
  448. data/mlx/mlx/backend/metal/kernels/steel/gemm/params.h +65 -0
  449. data/mlx/mlx/backend/metal/kernels/steel/gemm/transforms.h +72 -0
  450. data/mlx/mlx/backend/metal/kernels/steel/utils/integral_constant.h +134 -0
  451. data/mlx/mlx/backend/metal/kernels/steel/utils/type_traits.h +55 -0
  452. data/mlx/mlx/backend/metal/kernels/steel/utils.h +42 -0
  453. data/mlx/mlx/backend/metal/kernels/ternary.h +145 -0
  454. data/mlx/mlx/backend/metal/kernels/ternary.metal +48 -0
  455. data/mlx/mlx/backend/metal/kernels/ternary_ops.h +10 -0
  456. data/mlx/mlx/backend/metal/kernels/unary.h +63 -0
  457. data/mlx/mlx/backend/metal/kernels/unary.metal +115 -0
  458. data/mlx/mlx/backend/metal/kernels/unary_ops.h +454 -0
  459. data/mlx/mlx/backend/metal/kernels/utils.h +445 -0
  460. data/mlx/mlx/backend/metal/kernels.h +375 -0
  461. data/mlx/mlx/backend/metal/logsumexp.cpp +95 -0
  462. data/mlx/mlx/backend/metal/make_compiled_preamble.sh +120 -0
  463. data/mlx/mlx/backend/metal/matmul.cpp +2572 -0
  464. data/mlx/mlx/backend/metal/matmul.h +144 -0
  465. data/mlx/mlx/backend/metal/metal.cpp +50 -0
  466. data/mlx/mlx/backend/metal/metal.h +25 -0
  467. data/mlx/mlx/backend/metal/no_metal.cpp +42 -0
  468. data/mlx/mlx/backend/metal/nojit_kernels.cpp +414 -0
  469. data/mlx/mlx/backend/metal/normalization.cpp +433 -0
  470. data/mlx/mlx/backend/metal/primitives.cpp +242 -0
  471. data/mlx/mlx/backend/metal/quantized.cpp +1651 -0
  472. data/mlx/mlx/backend/metal/reduce.cpp +1038 -0
  473. data/mlx/mlx/backend/metal/reduce.h +41 -0
  474. data/mlx/mlx/backend/metal/resident.cpp +100 -0
  475. data/mlx/mlx/backend/metal/resident.h +32 -0
  476. data/mlx/mlx/backend/metal/rope.cpp +165 -0
  477. data/mlx/mlx/backend/metal/scaled_dot_product_attention.cpp +798 -0
  478. data/mlx/mlx/backend/metal/scan.cpp +145 -0
  479. data/mlx/mlx/backend/metal/scan.h +17 -0
  480. data/mlx/mlx/backend/metal/slicing.cpp +99 -0
  481. data/mlx/mlx/backend/metal/softmax.cpp +87 -0
  482. data/mlx/mlx/backend/metal/sort.cpp +368 -0
  483. data/mlx/mlx/backend/metal/ternary.cpp +160 -0
  484. data/mlx/mlx/backend/metal/ternary.h +21 -0
  485. data/mlx/mlx/backend/metal/unary.cpp +161 -0
  486. data/mlx/mlx/backend/metal/unary.h +21 -0
  487. data/mlx/mlx/backend/metal/utils.cpp +77 -0
  488. data/mlx/mlx/backend/metal/utils.h +99 -0
  489. data/mlx/mlx/backend/no_cpu/CMakeLists.txt +7 -0
  490. data/mlx/mlx/backend/no_cpu/compiled.cpp +24 -0
  491. data/mlx/mlx/backend/no_cpu/device_info.cpp +22 -0
  492. data/mlx/mlx/backend/no_cpu/primitives.cpp +146 -0
  493. data/mlx/mlx/backend/no_gpu/CMakeLists.txt +8 -0
  494. data/mlx/mlx/backend/no_gpu/allocator.cpp +134 -0
  495. data/mlx/mlx/backend/no_gpu/apple_memory.h +16 -0
  496. data/mlx/mlx/backend/no_gpu/device_info.cpp +22 -0
  497. data/mlx/mlx/backend/no_gpu/eval.cpp +24 -0
  498. data/mlx/mlx/backend/no_gpu/event.cpp +53 -0
  499. data/mlx/mlx/backend/no_gpu/fence.cpp +54 -0
  500. data/mlx/mlx/backend/no_gpu/linux_memory.h +22 -0
  501. data/mlx/mlx/backend/no_gpu/primitives.cpp +185 -0
  502. data/mlx/mlx/compile.cpp +1243 -0
  503. data/mlx/mlx/compile.h +45 -0
  504. data/mlx/mlx/compile_impl.h +70 -0
  505. data/mlx/mlx/device.cpp +72 -0
  506. data/mlx/mlx/device.h +56 -0
  507. data/mlx/mlx/distributed/CMakeLists.txt +14 -0
  508. data/mlx/mlx/distributed/distributed.cpp +197 -0
  509. data/mlx/mlx/distributed/distributed.h +61 -0
  510. data/mlx/mlx/distributed/distributed_impl.h +59 -0
  511. data/mlx/mlx/distributed/jaccl/CMakeLists.txt +12 -0
  512. data/mlx/mlx/distributed/jaccl/jaccl.cpp +178 -0
  513. data/mlx/mlx/distributed/jaccl/jaccl.h +12 -0
  514. data/mlx/mlx/distributed/jaccl/mesh.cpp +451 -0
  515. data/mlx/mlx/distributed/jaccl/mesh.h +122 -0
  516. data/mlx/mlx/distributed/jaccl/no_jaccl.cpp +20 -0
  517. data/mlx/mlx/distributed/jaccl/ring.cpp +692 -0
  518. data/mlx/mlx/distributed/jaccl/ring.h +178 -0
  519. data/mlx/mlx/distributed/jaccl/utils.cpp +329 -0
  520. data/mlx/mlx/distributed/jaccl/utils.h +342 -0
  521. data/mlx/mlx/distributed/mpi/CMakeLists.txt +5 -0
  522. data/mlx/mlx/distributed/mpi/mpi.cpp +501 -0
  523. data/mlx/mlx/distributed/mpi/mpi.h +12 -0
  524. data/mlx/mlx/distributed/mpi/mpi_declarations.h +28 -0
  525. data/mlx/mlx/distributed/mpi/no_mpi.cpp +20 -0
  526. data/mlx/mlx/distributed/nccl/CMakeLists.txt +26 -0
  527. data/mlx/mlx/distributed/nccl/nccl.cpp +443 -0
  528. data/mlx/mlx/distributed/nccl/nccl.h +12 -0
  529. data/mlx/mlx/distributed/nccl/nccl_stub/CMakeLists.txt +1 -0
  530. data/mlx/mlx/distributed/nccl/nccl_stub/nccl_stubs.cpp +54 -0
  531. data/mlx/mlx/distributed/nccl/no_nccl.cpp +20 -0
  532. data/mlx/mlx/distributed/ops.cpp +186 -0
  533. data/mlx/mlx/distributed/ops.h +57 -0
  534. data/mlx/mlx/distributed/primitives.cpp +95 -0
  535. data/mlx/mlx/distributed/primitives.h +156 -0
  536. data/mlx/mlx/distributed/reduction_ops.h +38 -0
  537. data/mlx/mlx/distributed/ring/CMakeLists.txt +5 -0
  538. data/mlx/mlx/distributed/ring/no_ring.cpp +20 -0
  539. data/mlx/mlx/distributed/ring/ring.cpp +870 -0
  540. data/mlx/mlx/distributed/ring/ring.h +12 -0
  541. data/mlx/mlx/distributed/utils.cpp +206 -0
  542. data/mlx/mlx/distributed/utils.h +67 -0
  543. data/mlx/mlx/dtype.cpp +197 -0
  544. data/mlx/mlx/dtype.h +116 -0
  545. data/mlx/mlx/dtype_utils.cpp +42 -0
  546. data/mlx/mlx/dtype_utils.h +119 -0
  547. data/mlx/mlx/einsum.cpp +941 -0
  548. data/mlx/mlx/einsum.h +23 -0
  549. data/mlx/mlx/event.h +58 -0
  550. data/mlx/mlx/export.cpp +1130 -0
  551. data/mlx/mlx/export.h +137 -0
  552. data/mlx/mlx/export_impl.h +99 -0
  553. data/mlx/mlx/fast.cpp +941 -0
  554. data/mlx/mlx/fast.h +103 -0
  555. data/mlx/mlx/fast_primitives.h +427 -0
  556. data/mlx/mlx/fence.h +39 -0
  557. data/mlx/mlx/fft.cpp +262 -0
  558. data/mlx/mlx/fft.h +159 -0
  559. data/mlx/mlx/graph_utils.cpp +175 -0
  560. data/mlx/mlx/graph_utils.h +67 -0
  561. data/mlx/mlx/io/CMakeLists.txt +25 -0
  562. data/mlx/mlx/io/gguf.cpp +470 -0
  563. data/mlx/mlx/io/gguf.h +20 -0
  564. data/mlx/mlx/io/gguf_quants.cpp +164 -0
  565. data/mlx/mlx/io/load.cpp +397 -0
  566. data/mlx/mlx/io/load.h +175 -0
  567. data/mlx/mlx/io/no_gguf.cpp +20 -0
  568. data/mlx/mlx/io/no_safetensors.cpp +37 -0
  569. data/mlx/mlx/io/safetensors.cpp +234 -0
  570. data/mlx/mlx/io.h +61 -0
  571. data/mlx/mlx/linalg.cpp +708 -0
  572. data/mlx/mlx/linalg.h +115 -0
  573. data/mlx/mlx/memory.h +80 -0
  574. data/mlx/mlx/mlx.h +25 -0
  575. data/mlx/mlx/ops.cpp +6094 -0
  576. data/mlx/mlx/ops.h +1610 -0
  577. data/mlx/mlx/primitives.cpp +5850 -0
  578. data/mlx/mlx/primitives.h +2525 -0
  579. data/mlx/mlx/random.cpp +492 -0
  580. data/mlx/mlx/random.h +283 -0
  581. data/mlx/mlx/scheduler.cpp +73 -0
  582. data/mlx/mlx/scheduler.h +189 -0
  583. data/mlx/mlx/small_vector.h +540 -0
  584. data/mlx/mlx/stream.h +42 -0
  585. data/mlx/mlx/threadpool.h +133 -0
  586. data/mlx/mlx/transforms.cpp +1065 -0
  587. data/mlx/mlx/transforms.h +231 -0
  588. data/mlx/mlx/transforms_impl.h +88 -0
  589. data/mlx/mlx/types/bf16.h +187 -0
  590. data/mlx/mlx/types/complex.h +113 -0
  591. data/mlx/mlx/types/fp16.h +234 -0
  592. data/mlx/mlx/types/half_types.h +58 -0
  593. data/mlx/mlx/types/limits.h +70 -0
  594. data/mlx/mlx/utils.cpp +302 -0
  595. data/mlx/mlx/utils.h +174 -0
  596. data/mlx/mlx/version.cpp +11 -0
  597. data/mlx/mlx/version.h +22 -0
  598. data/mlx/mlx.pc.in +52 -0
  599. metadata +643 -0
@@ -0,0 +1,15 @@
1
+ // Copyright © 2025 Apple Inc.
2
+
3
+ #include "mlx/backend/cuda/unary/unary.cuh"
4
+
5
+ namespace mlx::core {
6
+ void Sqrt::eval_gpu(const std::vector<array>& inputs, array& out) {
7
+ nvtx3::scoped_range r("Sqrt::eval_gpu");
8
+ auto& s = out.primitive().stream();
9
+ if (recip_) {
10
+ unary_op_gpu<cu::Rsqrt>(inputs, out, "Rsqrt", s);
11
+ } else {
12
+ unary_op_gpu<cu::Sqrt>(inputs, out, "Sqrt", s);
13
+ }
14
+ }
15
+ } // namespace mlx::core
@@ -0,0 +1,7 @@
1
+ // Copyright © 2025 Apple Inc.
2
+
3
+ #include "mlx/backend/cuda/unary/unary.cuh"
4
+
5
+ namespace mlx::core {
6
+ UNARY_GPU(Square)
7
+ } // namespace mlx::core
@@ -0,0 +1,7 @@
1
+ // Copyright © 2025 Apple Inc.
2
+
3
+ #include "mlx/backend/cuda/unary/unary.cuh"
4
+
5
+ namespace mlx::core {
6
+ UNARY_GPU(Tan)
7
+ } // namespace mlx::core
@@ -0,0 +1,7 @@
1
+ // Copyright © 2025 Apple Inc.
2
+
3
+ #include "mlx/backend/cuda/unary/unary.cuh"
4
+
5
+ namespace mlx::core {
6
+ UNARY_GPU(Tanh)
7
+ } // namespace mlx::core
@@ -0,0 +1,224 @@
1
+ // Copyright © 2025 Apple Inc.
2
+
3
+ #include "mlx/backend/common/unary.h"
4
+ #include "mlx/backend/cuda/device.h"
5
+ #include "mlx/backend/cuda/device/unary_ops.cuh"
6
+ #include "mlx/backend/cuda/kernel_utils.cuh"
7
+ #include "mlx/dtype_utils.h"
8
+ #include "mlx/primitives.h"
9
+
10
+ #include <cooperative_groups.h>
11
+ #include <nvtx3/nvtx3.hpp>
12
+
13
+ namespace mlx::core {
14
+
15
+ namespace cu {
16
+
17
+ namespace cg = cooperative_groups;
18
+
19
+ template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
20
+ __global__ void unary_v(const In* in, Out* out, IdxT size) {
21
+ IdxT index = cg::this_grid().thread_rank();
22
+
23
+ if ((index + 1) * N_READS > size) {
24
+ for (IdxT i = index * N_READS; i < size; ++i) {
25
+ out[i] = Op{}(in[i]);
26
+ }
27
+ } else {
28
+ auto in_vec = load_vector<N_READS>(in, index);
29
+
30
+ AlignedVector<Out, N_READS> out_vec;
31
+ #pragma unroll
32
+ for (int i = 0; i < N_READS; ++i) {
33
+ out_vec[i] = Op{}(in_vec[i]);
34
+ }
35
+
36
+ store_vector<N_READS>(out, index, out_vec);
37
+ }
38
+ }
39
+
40
+ template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
41
+ __global__ void unary_g(
42
+ const In* in,
43
+ Out* out,
44
+ IdxT size_rest,
45
+ const __grid_constant__ Shape shape,
46
+ const __grid_constant__ Strides strides,
47
+ int ndim) {
48
+ auto block = cg::this_thread_block();
49
+ auto grid = cg::this_grid();
50
+ IdxT index_rest =
51
+ grid.block_index().y * block.dim_threads().y + block.thread_index().y;
52
+ if (index_rest >= size_rest) {
53
+ return;
54
+ }
55
+
56
+ auto shape_x = shape[ndim - 1];
57
+ auto stride_x = strides[ndim - 1];
58
+ IdxT index_x =
59
+ grid.block_index().x * block.dim_threads().x + block.thread_index().x;
60
+ auto idx =
61
+ elem_to_loc(index_rest * shape_x, shape.data(), strides.data(), ndim);
62
+ auto in_vec =
63
+ load_vector<N_READS>(in + idx, index_x, shape_x, stride_x, In(0));
64
+ AlignedVector<Out, N_READS> out_vec;
65
+ #pragma unroll
66
+ for (int i = 0; i < N_READS; ++i) {
67
+ out_vec[i] = Op{}(in_vec[i]);
68
+ }
69
+ store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
70
+ }
71
+
72
+ template <typename Op, typename In, typename Out>
73
+ constexpr bool supports_unary_op() {
74
+ if (std::is_same_v<Op, Abs> || std::is_same_v<Op, Negative> ||
75
+ std::is_same_v<Op, Sign> || std::is_same_v<Op, Square>) {
76
+ return std::is_same_v<In, Out>;
77
+ }
78
+ if (std::is_same_v<Op, ArcCosh> || std::is_same_v<Op, ArcSinh> ||
79
+ std::is_same_v<Op, ArcTanh> || std::is_same_v<Op, Erf> ||
80
+ std::is_same_v<Op, ErfInv> || std::is_same_v<Op, Expm1> ||
81
+ std::is_same_v<Op, Sigmoid>) {
82
+ return std::is_same_v<In, Out> && is_floating_v<In>;
83
+ }
84
+ if (std::is_same_v<Op, BitwiseInvert>) {
85
+ return std::is_same_v<In, Out> && std::is_integral_v<In> &&
86
+ !std::is_same_v<In, bool>;
87
+ }
88
+ if (std::is_same_v<Op, Ceil> || std::is_same_v<Op, Floor>) {
89
+ return std::is_same_v<In, Out> && !mlx::core::is_complex_v<In>;
90
+ }
91
+ if (std::is_same_v<Op, Conjugate>) {
92
+ return std::is_same_v<In, Out> && mlx::core::is_complex_v<In>;
93
+ }
94
+ if (std::is_same_v<Op, ArcCos> || std::is_same_v<Op, ArcSin> ||
95
+ std::is_same_v<Op, ArcTan> || std::is_same_v<Op, Cos> ||
96
+ std::is_same_v<Op, Cosh> || std::is_same_v<Op, Exp> ||
97
+ std::is_same_v<Op, Log> || std::is_same_v<Op, Log2> ||
98
+ std::is_same_v<Op, Log10> || std::is_same_v<Op, Log1p> ||
99
+ std::is_same_v<Op, Round> || std::is_same_v<Op, Rsqrt> ||
100
+ std::is_same_v<Op, Sqrt> || std::is_same_v<Op, Sin> ||
101
+ std::is_same_v<Op, Sinh> || std::is_same_v<Op, Tan> ||
102
+ std::is_same_v<Op, Tanh>) {
103
+ return std::is_same_v<In, Out> && is_inexact_v<In>;
104
+ }
105
+ if (std::is_same_v<Op, Imag> || std::is_same_v<Op, Real>) {
106
+ return mlx::core::is_complex_v<In> && std::is_same_v<Out, float>;
107
+ }
108
+ if (std::is_same_v<Op, LogicalNot>) {
109
+ return std::is_same_v<In, Out> && std::is_same_v<In, bool>;
110
+ }
111
+ if (std::is_same_v<Op, ToFP8>) {
112
+ return std::is_same_v<Out, uint8_t> && is_floating_v<In>;
113
+ }
114
+ if (std::is_same_v<Op, FromFP8>) {
115
+ return std::is_same_v<In, uint8_t> && is_floating_v<Out>;
116
+ }
117
+ return false;
118
+ }
119
+
120
+ } // namespace cu
121
+
122
+ template <typename Op>
123
+ void unary_op_gpu_inplace(
124
+ const std::vector<array>& inputs,
125
+ array& out,
126
+ const char* op,
127
+ const Stream& s) {
128
+ auto& in = inputs[0];
129
+ if (in.size() == 0) {
130
+ return;
131
+ }
132
+ bool contig = in.flags().contiguous;
133
+ bool large;
134
+ if (!contig) {
135
+ large = in.data_size() > INT32_MAX || out.size() > INT32_MAX;
136
+ } else {
137
+ large = in.data_size() > UINT32_MAX;
138
+ }
139
+
140
+ auto& encoder = cu::get_command_encoder(s);
141
+ encoder.set_input_array(in);
142
+ encoder.set_output_array(out);
143
+ dispatch_all_types(in.dtype(), [&](auto in_type_tag) {
144
+ dispatch_all_types(out.dtype(), [&](auto out_type_tag) {
145
+ using CTYPE_IN = MLX_GET_TYPE(in_type_tag);
146
+ using CTYPE_OUT = MLX_GET_TYPE(out_type_tag);
147
+ if constexpr (cu::supports_unary_op<Op, CTYPE_IN, CTYPE_OUT>()) {
148
+ dispatch_bool(large, [&](auto large) {
149
+ using InType = cuda_type_t<CTYPE_IN>;
150
+ using OutType = cuda_type_t<CTYPE_OUT>;
151
+ if (contig) {
152
+ using IdxT = std::conditional_t<large(), int64_t, uint32_t>;
153
+ constexpr int N_READS = 16 / sizeof(OutType);
154
+ auto [num_blocks, block_dims] = get_launch_args(
155
+ out.data_size(), out.shape(), out.strides(), large, N_READS);
156
+ encoder.add_kernel_node(
157
+ cu::unary_v<Op, InType, OutType, IdxT, N_READS>,
158
+ num_blocks,
159
+ block_dims,
160
+ 0,
161
+ gpu_ptr<InType>(in),
162
+ gpu_ptr<OutType>(out),
163
+ out.data_size());
164
+ } else {
165
+ using IdxT = std::conditional_t<large(), int64_t, int32_t>;
166
+ auto [shape, strides] = collapse_contiguous_dims(in);
167
+ auto ndim = shape.size();
168
+ int work_per_thread = 1;
169
+ auto kernel = cu::unary_g<Op, InType, OutType, IdxT, 1>;
170
+ auto dim0 = ndim > 0 ? shape.back() : 1;
171
+ auto rest = out.size() / dim0;
172
+ if (dim0 >= 4) {
173
+ kernel = cu::unary_g<Op, InType, OutType, IdxT, 4>;
174
+ work_per_thread = 4;
175
+ }
176
+ dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
177
+ auto block_dims = get_block_dims(dim0, rest, 1);
178
+ uint32_t num_blocks_x = cuda::ceil_div(dim0, block_dims.x);
179
+ uint32_t num_blocks_y = cuda::ceil_div(rest, block_dims.y);
180
+ encoder.add_kernel_node(
181
+ kernel,
182
+ {num_blocks_x, num_blocks_y},
183
+ block_dims,
184
+ 0,
185
+ gpu_ptr<InType>(in),
186
+ gpu_ptr<OutType>(out),
187
+ rest,
188
+ const_param(shape),
189
+ const_param(strides),
190
+ ndim);
191
+ }
192
+ });
193
+ } else {
194
+ throw std::runtime_error(
195
+ fmt::format(
196
+ "Can not do unary op {} on input of {} with output of {}.",
197
+ op,
198
+ dtype_to_string(in.dtype()),
199
+ dtype_to_string(out.dtype())));
200
+ }
201
+ });
202
+ });
203
+ }
204
+
205
+ template <typename Op>
206
+ void unary_op_gpu(
207
+ const std::vector<array>& inputs,
208
+ array& out,
209
+ const char* op,
210
+ const Stream& s) {
211
+ auto& encoder = cu::get_command_encoder(s);
212
+ set_unary_output_data(
213
+ inputs[0], out, [&](auto n) { return cu::malloc_async(n, encoder); });
214
+ unary_op_gpu_inplace<Op>(inputs, out, op, s);
215
+ }
216
+
217
+ #define UNARY_GPU(func) \
218
+ void func::eval_gpu(const std::vector<array>& inputs, array& out) { \
219
+ nvtx3::scoped_range r(#func "::eval_gpu"); \
220
+ auto& s = out.primitive().stream(); \
221
+ unary_op_gpu<cu::func>(inputs, out, name(), s); \
222
+ }
223
+
224
+ } // namespace mlx::core
@@ -0,0 +1,116 @@
1
+ // Copyright © 2025 Apple Inc.
2
+
3
+ #include "mlx/backend/cuda/utils.h"
4
+ #include "mlx/backend/cuda/device.h"
5
+ #include "mlx/dtype_utils.h"
6
+
7
+ #include <fmt/format.h>
8
+ #include <cuda/cmath>
9
+ #include <vector>
10
+
11
+ namespace mlx::core {
12
+
13
+ void check_cublas_error(const char* name, cublasStatus_t err) {
14
+ if (err != CUBLAS_STATUS_SUCCESS) {
15
+ // TODO: Use cublasGetStatusString when it is widely available.
16
+ throw std::runtime_error(
17
+ fmt::format("{} failed with code: {}.", name, static_cast<int>(err)));
18
+ }
19
+ }
20
+
21
+ void check_cuda_error(const char* name, cudaError_t err) {
22
+ if (err != cudaSuccess) {
23
+ throw std::runtime_error(
24
+ fmt::format("{} failed: {}", name, cudaGetErrorString(err)));
25
+ }
26
+ }
27
+
28
+ void check_cuda_error(const char* name, CUresult err) {
29
+ if (err != CUDA_SUCCESS) {
30
+ const char* err_str = "Unknown error";
31
+ cuGetErrorString(err, &err_str);
32
+ throw std::runtime_error(fmt::format("{} failed: {}", name, err_str));
33
+ }
34
+ }
35
+
36
+ void check_cudnn_error(const char* name, cudnnStatus_t err) {
37
+ if (err != CUDNN_STATUS_SUCCESS) {
38
+ throw std::runtime_error(
39
+ fmt::format("{} failed: {}.", name, cudnnGetErrorString(err)));
40
+ }
41
+ }
42
+
43
+ const char* dtype_to_cuda_type(const Dtype& dtype) {
44
+ switch (dtype) {
45
+ case bool_:
46
+ return "bool";
47
+ case int8:
48
+ return "int8_t";
49
+ case int16:
50
+ return "int16_t";
51
+ case int32:
52
+ return "int32_t";
53
+ case int64:
54
+ return "int64_t";
55
+ case uint8:
56
+ return "uint8_t";
57
+ case uint16:
58
+ return "uint16_t";
59
+ case uint32:
60
+ return "uint32_t";
61
+ case uint64:
62
+ return "uint64_t";
63
+ case float16:
64
+ return "__half";
65
+ case bfloat16:
66
+ return "__nv_bfloat16";
67
+ case float32:
68
+ return "float";
69
+ case float64:
70
+ return "double";
71
+ case complex64:
72
+ return "mlx::core::cu::complex64_t";
73
+ default:
74
+ return "unknown";
75
+ }
76
+ }
77
+
78
+ CudaGraph::CudaGraph(cu::Device& device) {
79
+ device.make_current();
80
+ CHECK_CUDA_ERROR(cudaGraphCreate(&handle_, 0));
81
+ }
82
+
83
+ void CudaGraph::end_capture(cudaStream_t stream) {
84
+ CHECK_CUDA_ERROR(cudaStreamEndCapture(stream, &handle_));
85
+ }
86
+
87
+ void CudaGraphExec::instantiate(cudaGraph_t graph) {
88
+ assert(handle_ == nullptr);
89
+ CHECK_CUDA_ERROR(cudaGraphInstantiate(&handle_, graph, nullptr, nullptr, 0));
90
+ }
91
+
92
+ CudaStream::CudaStream(cu::Device& device) {
93
+ device.make_current();
94
+ CHECK_CUDA_ERROR(cudaStreamCreateWithFlags(&handle_, cudaStreamNonBlocking));
95
+ }
96
+
97
+ void* allocate_workspace(cu::CommandEncoder& encoder, size_t workspace_size) {
98
+ if (workspace_size == 0) {
99
+ return nullptr;
100
+ }
101
+
102
+ // Workspace allocation should not be captured.
103
+ #ifndef NDEBUG
104
+ cudaStreamCaptureStatus status;
105
+ CHECK_CUDA_ERROR(cudaStreamIsCapturing(encoder.stream(), &status));
106
+ assert(status == cudaStreamCaptureStatusNone);
107
+ #endif
108
+
109
+ // Ensure workspace is 256-byte aligned.
110
+ int nbytes = cuda::ceil_div(workspace_size, 256) * 256;
111
+ array workspace(cu::malloc_async(nbytes, encoder), {nbytes}, int8);
112
+ encoder.add_temporary(workspace);
113
+ return gpu_ptr<void>(workspace);
114
+ }
115
+
116
+ } // namespace mlx::core
@@ -0,0 +1,49 @@
1
+ // Copyright © 2025 Apple Inc.
2
+
3
+ // This file include utilities that are used by C++ code (i.e. .cpp files).
4
+
5
+ #pragma once
6
+
7
+ #include "mlx/array.h"
8
+ #include "mlx/backend/cuda/allocator.h"
9
+ #include "mlx/backend/cuda/cuda_utils.h"
10
+
11
+ namespace mlx::core {
12
+
13
+ template <typename T>
14
+ inline uint32_t max_occupancy_block_dim(T kernel) {
15
+ int _, block_dim;
16
+ if constexpr (std::is_same_v<T, CUfunction>) {
17
+ CHECK_CUDA_ERROR(
18
+ cuOccupancyMaxPotentialBlockSize(&_, &block_dim, kernel, 0, 0, 0));
19
+ } else {
20
+ CHECK_CUDA_ERROR(
21
+ cudaOccupancyMaxPotentialBlockSize(&_, &block_dim, kernel));
22
+ }
23
+ return block_dim;
24
+ }
25
+
26
+ template <typename T>
27
+ inline T* gpu_ptr(array& arr) {
28
+ return reinterpret_cast<T*>(
29
+ static_cast<char*>(
30
+ static_cast<cu::CudaBuffer*>(arr.buffer().ptr())->data) +
31
+ arr.offset());
32
+ }
33
+
34
+ // For const array, keep constness in pointer unless it is untyped.
35
+ template <typename T>
36
+ inline std::conditional_t<std::is_same_v<T, void>, void*, const T*> gpu_ptr(
37
+ const array& arr) {
38
+ return gpu_ptr<T>(const_cast<array&>(arr));
39
+ }
40
+
41
+ struct Dtype;
42
+
43
+ // Convert Dtype to CUDA C++ types.
44
+ const char* dtype_to_cuda_type(const Dtype& dtype);
45
+
46
+ // Allocate an empty array and add it as temporary.
47
+ void* allocate_workspace(cu::CommandEncoder& encoder, size_t workspace_size);
48
+
49
+ } // namespace mlx::core
@@ -0,0 +1,48 @@
1
+ // Copyright © 2025 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <cuda_bf16.h>
6
+ #include <cuda_fp16.h>
7
+
8
+ namespace mlx::core::cu {
9
+
10
+ template <typename T>
11
+ struct Vector2;
12
+
13
+ template <>
14
+ struct Vector2<double> {
15
+ using type = double2;
16
+ };
17
+
18
+ template <>
19
+ struct Vector2<float> {
20
+ using type = float2;
21
+ };
22
+
23
+ template <>
24
+ struct Vector2<__half> {
25
+ using type = __half2;
26
+ };
27
+
28
+ template <>
29
+ struct Vector2<__nv_bfloat16> {
30
+ using type = __nv_bfloat162;
31
+ };
32
+
33
+ template <typename T>
34
+ using Vector2_t = typename Vector2<T>::type;
35
+
36
+ template <typename T>
37
+ struct Vector4 {
38
+ T x, y, z, w;
39
+ };
40
+
41
+ template <typename T>
42
+ using Vector4_t = Vector4<T>;
43
+
44
+ using bf16x4 = Vector4_t<__nv_bfloat16>;
45
+ using fp16x4 = Vector4_t<__half>;
46
+ using fp32x4 = Vector4_t<float>;
47
+
48
+ } // namespace mlx::core::cu
@@ -0,0 +1,79 @@
1
+ // Copyright © 2025 Apple Inc.
2
+
3
+ #include "mlx/backend/cuda/worker.h"
4
+ #include "mlx/backend/cuda/device.h"
5
+
6
+ namespace mlx::core::cu {
7
+
8
+ Worker::Worker(Device& d)
9
+ : signal_stream_(d),
10
+ signal_event_(d, cudaEventDisableTiming | cudaEventBlockingSync),
11
+ worker_(&Worker::thread_fn, this) {}
12
+
13
+ Worker::~Worker() {
14
+ {
15
+ std::lock_guard lock(mtx_);
16
+ stop_ = true;
17
+ }
18
+ cond_.notify_one();
19
+ worker_.join();
20
+ }
21
+
22
+ void Worker::add_task(std::function<void()> task) {
23
+ pending_tasks_.push_back(std::move(task));
24
+ }
25
+
26
+ void Worker::signal(void* data) {
27
+ auto w = static_cast<Worker*>(data);
28
+ {
29
+ std::lock_guard lock(w->mtx_);
30
+ w->signaled_batch_++;
31
+ }
32
+ w->cond_.notify_one();
33
+ }
34
+
35
+ void Worker::commit(cudaStream_t stream) {
36
+ // Move pending tasks into tasks
37
+ if (pending_tasks_.empty()) {
38
+ return;
39
+ }
40
+ {
41
+ std::lock_guard lock(mtx_);
42
+ // Move pending tasks into ready tasks
43
+ worker_tasks_[++committed_batch_] = std::move(pending_tasks_);
44
+ }
45
+ signal_event_.record(stream);
46
+ signal_event_.wait(signal_stream_);
47
+ CHECK_CUDA_ERROR(cudaLaunchHostFunc(signal_stream_, signal, this));
48
+ }
49
+
50
+ void Worker::thread_fn() {
51
+ while (!stop_) {
52
+ uint64_t current_batch = 0;
53
+ Tasks tasks;
54
+ {
55
+ std::unique_lock<std::mutex> lk(mtx_);
56
+ cond_.wait(lk, [this, &current_batch] {
57
+ return this->signaled_batch_ > current_batch || this->stop_;
58
+ });
59
+ current_batch = signaled_batch_;
60
+ auto end = worker_tasks_.upper_bound(current_batch);
61
+ for (auto it = worker_tasks_.begin(); it != end; ++it) {
62
+ if (tasks.empty()) {
63
+ tasks = std::move(it->second);
64
+ } else {
65
+ std::move(
66
+ it->second.begin(), it->second.end(), std::back_inserter(tasks));
67
+ }
68
+ }
69
+ worker_tasks_.erase(worker_tasks_.begin(), end);
70
+ }
71
+ // Make sure tasks are cleared before the next wait
72
+ for (int i = 0; i < tasks.size(); ++i) {
73
+ auto task = std::move(tasks[i]);
74
+ task();
75
+ }
76
+ }
77
+ }
78
+
79
+ } // namespace mlx::core::cu
@@ -0,0 +1,55 @@
1
+ // Copyright © 2025 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include "mlx/backend/cuda/event.h"
6
+
7
+ #include <condition_variable>
8
+ #include <functional>
9
+ #include <map>
10
+ #include <mutex>
11
+ #include <thread>
12
+
13
+ namespace mlx::core::cu {
14
+
15
+ // Run tasks in worker thread, synchronized with cuda stream.
16
+ class Worker {
17
+ public:
18
+ explicit Worker(Device& d);
19
+ ~Worker();
20
+
21
+ Worker(const Worker&) = delete;
22
+ Worker& operator=(const Worker&) = delete;
23
+
24
+ // Add a pending |task| that will run when consumed or commited.
25
+ void add_task(std::function<void()> task);
26
+
27
+ // Inform worker thread to run current batches after kernels in |stream|
28
+ // finish running.
29
+ void commit(cudaStream_t stream);
30
+
31
+ private:
32
+ static void signal(void*);
33
+
34
+ void thread_fn();
35
+ std::mutex mtx_;
36
+ std::condition_variable cond_;
37
+
38
+ uint64_t committed_batch_{0};
39
+ uint64_t signaled_batch_{0};
40
+
41
+ // Cuda stream and event for signaling kernel completion.
42
+ CudaStream signal_stream_;
43
+ CudaEvent signal_event_;
44
+
45
+ bool stop_{false};
46
+
47
+ // Tasks are put in |pending_tasks_| first, and then moved to
48
+ // |worker_tasks_| when end_batch() is called.
49
+ using Tasks = std::vector<std::function<void()>>;
50
+ Tasks pending_tasks_;
51
+ std::map<uint64_t, Tasks> worker_tasks_;
52
+ std::thread worker_;
53
+ };
54
+
55
+ } // namespace mlx::core::cu
@@ -0,0 +1,5 @@
1
+ target_sources(
2
+ mlx
3
+ PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/copy.cpp
4
+ ${CMAKE_CURRENT_SOURCE_DIR}/primitives.cpp
5
+ ${CMAKE_CURRENT_SOURCE_DIR}/slicing.cpp)