mlx 0.30.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/ext/mlx/extconf.rb +94 -0
- data/ext/mlx/native.cpp +8027 -0
- data/lib/mlx/core.rb +1678 -0
- data/lib/mlx/distributed_utils/common.rb +116 -0
- data/lib/mlx/distributed_utils/config.rb +600 -0
- data/lib/mlx/distributed_utils/launch.rb +490 -0
- data/lib/mlx/extension.rb +24 -0
- data/lib/mlx/nn/base.rb +388 -0
- data/lib/mlx/nn/init.rb +140 -0
- data/lib/mlx/nn/layers/activations.rb +336 -0
- data/lib/mlx/nn/layers/base.rb +6 -0
- data/lib/mlx/nn/layers/containers.rb +20 -0
- data/lib/mlx/nn/layers/convolution.rb +120 -0
- data/lib/mlx/nn/layers/convolution_transpose.rb +114 -0
- data/lib/mlx/nn/layers/distributed.rb +309 -0
- data/lib/mlx/nn/layers/dropout.rb +75 -0
- data/lib/mlx/nn/layers/embedding.rb +28 -0
- data/lib/mlx/nn/layers/linear.rb +79 -0
- data/lib/mlx/nn/layers/normalization.rb +216 -0
- data/lib/mlx/nn/layers/pooling.rb +167 -0
- data/lib/mlx/nn/layers/positional_encoding.rb +126 -0
- data/lib/mlx/nn/layers/quantized.rb +215 -0
- data/lib/mlx/nn/layers/recurrent.rb +135 -0
- data/lib/mlx/nn/layers/transformer.rb +330 -0
- data/lib/mlx/nn/layers/upsample.rb +97 -0
- data/lib/mlx/nn/layers.rb +18 -0
- data/lib/mlx/nn/losses.rb +251 -0
- data/lib/mlx/nn/utils.rb +167 -0
- data/lib/mlx/nn.rb +12 -0
- data/lib/mlx/optimizers/optimizers.rb +808 -0
- data/lib/mlx/optimizers/schedulers.rb +62 -0
- data/lib/mlx/optimizers.rb +9 -0
- data/lib/mlx/utils.rb +171 -0
- data/lib/mlx/version.rb +5 -0
- data/lib/mlx.rb +64 -0
- data/mlx/CMakeLists.txt +449 -0
- data/mlx/cmake/FindCUDNN.cmake +177 -0
- data/mlx/cmake/FindNCCL.cmake +54 -0
- data/mlx/cmake/Findnvpl.cmake +3 -0
- data/mlx/cmake/extension.cmake +50 -0
- data/mlx/mlx/3rdparty/.clang-format +2 -0
- data/mlx/mlx/3rdparty/pocketfft.h +3581 -0
- data/mlx/mlx/CMakeLists.txt +107 -0
- data/mlx/mlx/allocator.h +75 -0
- data/mlx/mlx/api.h +29 -0
- data/mlx/mlx/array.cpp +354 -0
- data/mlx/mlx/array.h +647 -0
- data/mlx/mlx/backend/common/CMakeLists.txt +9 -0
- data/mlx/mlx/backend/common/binary.h +97 -0
- data/mlx/mlx/backend/common/broadcasting.cpp +24 -0
- data/mlx/mlx/backend/common/broadcasting.h +11 -0
- data/mlx/mlx/backend/common/buffer_cache.h +158 -0
- data/mlx/mlx/backend/common/common.cpp +305 -0
- data/mlx/mlx/backend/common/compiled.cpp +243 -0
- data/mlx/mlx/backend/common/compiled.h +77 -0
- data/mlx/mlx/backend/common/copy.h +50 -0
- data/mlx/mlx/backend/common/hadamard.h +109 -0
- data/mlx/mlx/backend/common/load.cpp +57 -0
- data/mlx/mlx/backend/common/matmul.h +67 -0
- data/mlx/mlx/backend/common/reduce.cpp +154 -0
- data/mlx/mlx/backend/common/reduce.h +59 -0
- data/mlx/mlx/backend/common/slicing.cpp +71 -0
- data/mlx/mlx/backend/common/slicing.h +20 -0
- data/mlx/mlx/backend/common/ternary.h +85 -0
- data/mlx/mlx/backend/common/unary.h +29 -0
- data/mlx/mlx/backend/common/utils.cpp +231 -0
- data/mlx/mlx/backend/common/utils.h +205 -0
- data/mlx/mlx/backend/cpu/CMakeLists.txt +88 -0
- data/mlx/mlx/backend/cpu/arange.h +28 -0
- data/mlx/mlx/backend/cpu/arg_reduce.cpp +124 -0
- data/mlx/mlx/backend/cpu/binary.cpp +269 -0
- data/mlx/mlx/backend/cpu/binary.h +517 -0
- data/mlx/mlx/backend/cpu/binary_ops.h +98 -0
- data/mlx/mlx/backend/cpu/binary_two.h +166 -0
- data/mlx/mlx/backend/cpu/cholesky.cpp +85 -0
- data/mlx/mlx/backend/cpu/compiled.cpp +357 -0
- data/mlx/mlx/backend/cpu/compiled_preamble.h +12 -0
- data/mlx/mlx/backend/cpu/conv.cpp +1351 -0
- data/mlx/mlx/backend/cpu/copy.cpp +386 -0
- data/mlx/mlx/backend/cpu/copy.h +36 -0
- data/mlx/mlx/backend/cpu/device_info.cpp +113 -0
- data/mlx/mlx/backend/cpu/device_info.h +28 -0
- data/mlx/mlx/backend/cpu/distributed.cpp +103 -0
- data/mlx/mlx/backend/cpu/eig.cpp +281 -0
- data/mlx/mlx/backend/cpu/eigh.cpp +241 -0
- data/mlx/mlx/backend/cpu/encoder.cpp +16 -0
- data/mlx/mlx/backend/cpu/encoder.h +67 -0
- data/mlx/mlx/backend/cpu/eval.cpp +40 -0
- data/mlx/mlx/backend/cpu/eval.h +12 -0
- data/mlx/mlx/backend/cpu/fft.cpp +120 -0
- data/mlx/mlx/backend/cpu/gemm.h +26 -0
- data/mlx/mlx/backend/cpu/gemms/bnns.cpp +214 -0
- data/mlx/mlx/backend/cpu/gemms/cblas.cpp +134 -0
- data/mlx/mlx/backend/cpu/gemms/simd_bf16.cpp +45 -0
- data/mlx/mlx/backend/cpu/gemms/simd_fp16.cpp +45 -0
- data/mlx/mlx/backend/cpu/gemms/simd_gemm.h +139 -0
- data/mlx/mlx/backend/cpu/hadamard.cpp +121 -0
- data/mlx/mlx/backend/cpu/indexing.cpp +854 -0
- data/mlx/mlx/backend/cpu/inverse.cpp +160 -0
- data/mlx/mlx/backend/cpu/jit_compiler.cpp +166 -0
- data/mlx/mlx/backend/cpu/jit_compiler.h +20 -0
- data/mlx/mlx/backend/cpu/lapack.h +80 -0
- data/mlx/mlx/backend/cpu/logsumexp.cpp +139 -0
- data/mlx/mlx/backend/cpu/luf.cpp +120 -0
- data/mlx/mlx/backend/cpu/make_compiled_preamble.ps1 +38 -0
- data/mlx/mlx/backend/cpu/make_compiled_preamble.sh +41 -0
- data/mlx/mlx/backend/cpu/masked_mm.cpp +608 -0
- data/mlx/mlx/backend/cpu/matmul.cpp +166 -0
- data/mlx/mlx/backend/cpu/primitives.cpp +478 -0
- data/mlx/mlx/backend/cpu/qrf.cpp +147 -0
- data/mlx/mlx/backend/cpu/quantized.cpp +1370 -0
- data/mlx/mlx/backend/cpu/reduce.cpp +587 -0
- data/mlx/mlx/backend/cpu/scan.cpp +338 -0
- data/mlx/mlx/backend/cpu/select.cpp +95 -0
- data/mlx/mlx/backend/cpu/simd/accelerate_fp16_simd.h +56 -0
- data/mlx/mlx/backend/cpu/simd/accelerate_simd.h +329 -0
- data/mlx/mlx/backend/cpu/simd/base_simd.h +319 -0
- data/mlx/mlx/backend/cpu/simd/math.h +193 -0
- data/mlx/mlx/backend/cpu/simd/neon_fp16_simd.h +212 -0
- data/mlx/mlx/backend/cpu/simd/simd.h +4 -0
- data/mlx/mlx/backend/cpu/simd/type.h +11 -0
- data/mlx/mlx/backend/cpu/slicing.h +21 -0
- data/mlx/mlx/backend/cpu/softmax.cpp +170 -0
- data/mlx/mlx/backend/cpu/sort.cpp +481 -0
- data/mlx/mlx/backend/cpu/svd.cpp +289 -0
- data/mlx/mlx/backend/cpu/ternary.h +154 -0
- data/mlx/mlx/backend/cpu/threefry.cpp +31 -0
- data/mlx/mlx/backend/cpu/threefry.h +21 -0
- data/mlx/mlx/backend/cpu/unary.cpp +238 -0
- data/mlx/mlx/backend/cpu/unary.h +281 -0
- data/mlx/mlx/backend/cpu/unary_ops.h +175 -0
- data/mlx/mlx/backend/cuda/CMakeLists.txt +265 -0
- data/mlx/mlx/backend/cuda/allocator.cpp +451 -0
- data/mlx/mlx/backend/cuda/allocator.h +94 -0
- data/mlx/mlx/backend/cuda/arange.cu +68 -0
- data/mlx/mlx/backend/cuda/arg_reduce.cu +189 -0
- data/mlx/mlx/backend/cuda/bin2h.cmake +150 -0
- data/mlx/mlx/backend/cuda/binary/CMakeLists.txt +21 -0
- data/mlx/mlx/backend/cuda/binary/add.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/arctan2.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/binary.cuh +383 -0
- data/mlx/mlx/backend/cuda/binary/bitwise_binary.cu +27 -0
- data/mlx/mlx/backend/cuda/binary/divide.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/equal.cu +15 -0
- data/mlx/mlx/backend/cuda/binary/greater.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/greater_equal.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/less.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/less_equal.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/log_add_exp.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/logical_and.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/logical_or.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/maximum.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/minimum.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/multiply.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/not_equal.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/power.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/remainder.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/subtract.cu +7 -0
- data/mlx/mlx/backend/cuda/binary_two.cu +412 -0
- data/mlx/mlx/backend/cuda/compiled.cpp +357 -0
- data/mlx/mlx/backend/cuda/conv/conv.h +126 -0
- data/mlx/mlx/backend/cuda/conv/gemm_conv.cu +217 -0
- data/mlx/mlx/backend/cuda/conv/gemm_grouped_conv.cu +231 -0
- data/mlx/mlx/backend/cuda/conv.cpp +403 -0
- data/mlx/mlx/backend/cuda/copy/copy.cuh +55 -0
- data/mlx/mlx/backend/cuda/copy/copy_contiguous.cu +88 -0
- data/mlx/mlx/backend/cuda/copy/copy_general.cu +171 -0
- data/mlx/mlx/backend/cuda/copy/copy_general_dynamic.cu +118 -0
- data/mlx/mlx/backend/cuda/copy/copy_general_input.cu +229 -0
- data/mlx/mlx/backend/cuda/copy.cu +132 -0
- data/mlx/mlx/backend/cuda/cublas_utils.cpp +222 -0
- data/mlx/mlx/backend/cuda/cublas_utils.h +95 -0
- data/mlx/mlx/backend/cuda/cuda.h +21 -0
- data/mlx/mlx/backend/cuda/cuda_utils.h +90 -0
- data/mlx/mlx/backend/cuda/cudnn_utils.cpp +133 -0
- data/mlx/mlx/backend/cuda/cudnn_utils.h +187 -0
- data/mlx/mlx/backend/cuda/custom_kernel.cpp +379 -0
- data/mlx/mlx/backend/cuda/cutlass_utils.cuh +46 -0
- data/mlx/mlx/backend/cuda/delayload.cpp +80 -0
- data/mlx/mlx/backend/cuda/device/atomic_ops.cuh +63 -0
- data/mlx/mlx/backend/cuda/device/binary_ops.cuh +300 -0
- data/mlx/mlx/backend/cuda/device/cast_op.cuh +118 -0
- data/mlx/mlx/backend/cuda/device/complex.cuh +60 -0
- data/mlx/mlx/backend/cuda/device/config.h +12 -0
- data/mlx/mlx/backend/cuda/device/fp16_math.cuh +96 -0
- data/mlx/mlx/backend/cuda/device/gather.cuh +53 -0
- data/mlx/mlx/backend/cuda/device/gather_axis.cuh +65 -0
- data/mlx/mlx/backend/cuda/device/indexing.cuh +30 -0
- data/mlx/mlx/backend/cuda/device/scatter.cuh +68 -0
- data/mlx/mlx/backend/cuda/device/scatter_axis.cuh +67 -0
- data/mlx/mlx/backend/cuda/device/scatter_ops.cuh +44 -0
- data/mlx/mlx/backend/cuda/device/ternary_ops.cuh +13 -0
- data/mlx/mlx/backend/cuda/device/unary_ops.cuh +350 -0
- data/mlx/mlx/backend/cuda/device/utils.cuh +464 -0
- data/mlx/mlx/backend/cuda/device.cpp +522 -0
- data/mlx/mlx/backend/cuda/device.h +195 -0
- data/mlx/mlx/backend/cuda/device_info.cpp +232 -0
- data/mlx/mlx/backend/cuda/distributed.cu +121 -0
- data/mlx/mlx/backend/cuda/eval.cpp +66 -0
- data/mlx/mlx/backend/cuda/event.cu +415 -0
- data/mlx/mlx/backend/cuda/event.h +79 -0
- data/mlx/mlx/backend/cuda/fence.cpp +42 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm.cpp +233 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm.h +114 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_0.cpp +77 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_9.cu +329 -0
- data/mlx/mlx/backend/cuda/gemms/gemv.cu +327 -0
- data/mlx/mlx/backend/cuda/gemms/gemv.h +34 -0
- data/mlx/mlx/backend/cuda/gemms/grouped_gemm.h +25 -0
- data/mlx/mlx/backend/cuda/gemms/grouped_gemm_unaligned.cu +358 -0
- data/mlx/mlx/backend/cuda/indexing.cpp +434 -0
- data/mlx/mlx/backend/cuda/jit_module.cpp +443 -0
- data/mlx/mlx/backend/cuda/jit_module.h +120 -0
- data/mlx/mlx/backend/cuda/kernel_utils.cu +52 -0
- data/mlx/mlx/backend/cuda/kernel_utils.cuh +148 -0
- data/mlx/mlx/backend/cuda/layer_norm.cu +417 -0
- data/mlx/mlx/backend/cuda/load.cpp +60 -0
- data/mlx/mlx/backend/cuda/logsumexp.cu +161 -0
- data/mlx/mlx/backend/cuda/lru_cache.h +190 -0
- data/mlx/mlx/backend/cuda/matmul.cpp +373 -0
- data/mlx/mlx/backend/cuda/no_cuda.cpp +47 -0
- data/mlx/mlx/backend/cuda/primitives.cpp +46 -0
- data/mlx/mlx/backend/cuda/quantized/affine_quantize.cu +329 -0
- data/mlx/mlx/backend/cuda/quantized/convert_fp8.cu +19 -0
- data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.cpp +206 -0
- data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.h +88 -0
- data/mlx/mlx/backend/cuda/quantized/cuda_fp4.h +100 -0
- data/mlx/mlx/backend/cuda/quantized/fp_quantize.cu +496 -0
- data/mlx/mlx/backend/cuda/quantized/mxfp8_quantize.cuh +32 -0
- data/mlx/mlx/backend/cuda/quantized/no_qqmm_impl.cpp +26 -0
- data/mlx/mlx/backend/cuda/quantized/nvfp4_quantize.cuh +334 -0
- data/mlx/mlx/backend/cuda/quantized/qmv.cu +304 -0
- data/mlx/mlx/backend/cuda/quantized/qmv.h +21 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm.cpp +158 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_impl.cpp +50 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_impl.h +26 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_utils.cu +227 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_utils.h +30 -0
- data/mlx/mlx/backend/cuda/quantized/quantized.cpp +85 -0
- data/mlx/mlx/backend/cuda/quantized/quantized.h +53 -0
- data/mlx/mlx/backend/cuda/quantized/quantized_utils.cuh +88 -0
- data/mlx/mlx/backend/cuda/quantized/quantized_utils.h +50 -0
- data/mlx/mlx/backend/cuda/random.cu +202 -0
- data/mlx/mlx/backend/cuda/reduce/all_reduce.cu +159 -0
- data/mlx/mlx/backend/cuda/reduce/col_reduce.cu +510 -0
- data/mlx/mlx/backend/cuda/reduce/init_reduce.cu +50 -0
- data/mlx/mlx/backend/cuda/reduce/reduce.cuh +71 -0
- data/mlx/mlx/backend/cuda/reduce/reduce_ops.cuh +211 -0
- data/mlx/mlx/backend/cuda/reduce/reduce_utils.cuh +145 -0
- data/mlx/mlx/backend/cuda/reduce/row_reduce.cu +361 -0
- data/mlx/mlx/backend/cuda/reduce.cu +73 -0
- data/mlx/mlx/backend/cuda/rms_norm.cu +536 -0
- data/mlx/mlx/backend/cuda/rope.cu +429 -0
- data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cpp +681 -0
- data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cu +796 -0
- data/mlx/mlx/backend/cuda/scan.cu +468 -0
- data/mlx/mlx/backend/cuda/slicing.cpp +111 -0
- data/mlx/mlx/backend/cuda/softmax.cu +162 -0
- data/mlx/mlx/backend/cuda/sort.cu +1076 -0
- data/mlx/mlx/backend/cuda/steel/defines.cuh +9 -0
- data/mlx/mlx/backend/cuda/steel/gemm.cuh +101 -0
- data/mlx/mlx/backend/cuda/steel/mma.cuh +117 -0
- data/mlx/mlx/backend/cuda/steel/tiles.cuh +450 -0
- data/mlx/mlx/backend/cuda/steel/utils.cuh +89 -0
- data/mlx/mlx/backend/cuda/ternary.cu +271 -0
- data/mlx/mlx/backend/cuda/unary/CMakeLists.txt +34 -0
- data/mlx/mlx/backend/cuda/unary/abs.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arccos.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arccosh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arcsin.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arcsinh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arctan.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arctanh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/bitwise_invert.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/ceil.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/conjugate.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/cos.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/cosh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/erf.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/erf_inv.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/exp.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/expm1.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/floor.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/imag.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/log.cu +21 -0
- data/mlx/mlx/backend/cuda/unary/log1p.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/logical_not.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/negative.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/real.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/round.cu +18 -0
- data/mlx/mlx/backend/cuda/unary/sigmoid.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sign.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sin.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sinh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sqrt.cu +15 -0
- data/mlx/mlx/backend/cuda/unary/square.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/tan.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/tanh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/unary.cuh +224 -0
- data/mlx/mlx/backend/cuda/utils.cpp +116 -0
- data/mlx/mlx/backend/cuda/utils.h +49 -0
- data/mlx/mlx/backend/cuda/vector_types.cuh +48 -0
- data/mlx/mlx/backend/cuda/worker.cpp +79 -0
- data/mlx/mlx/backend/cuda/worker.h +55 -0
- data/mlx/mlx/backend/gpu/CMakeLists.txt +5 -0
- data/mlx/mlx/backend/gpu/copy.cpp +89 -0
- data/mlx/mlx/backend/gpu/copy.h +57 -0
- data/mlx/mlx/backend/gpu/device_info.h +36 -0
- data/mlx/mlx/backend/gpu/eval.h +18 -0
- data/mlx/mlx/backend/gpu/primitives.cpp +307 -0
- data/mlx/mlx/backend/gpu/slicing.cpp +44 -0
- data/mlx/mlx/backend/gpu/slicing.h +36 -0
- data/mlx/mlx/backend/metal/CMakeLists.txt +144 -0
- data/mlx/mlx/backend/metal/allocator.cpp +279 -0
- data/mlx/mlx/backend/metal/allocator.h +79 -0
- data/mlx/mlx/backend/metal/binary.cpp +257 -0
- data/mlx/mlx/backend/metal/binary.h +33 -0
- data/mlx/mlx/backend/metal/compiled.cpp +471 -0
- data/mlx/mlx/backend/metal/conv.cpp +1118 -0
- data/mlx/mlx/backend/metal/copy.cpp +235 -0
- data/mlx/mlx/backend/metal/custom_kernel.cpp +430 -0
- data/mlx/mlx/backend/metal/device.cpp +816 -0
- data/mlx/mlx/backend/metal/device.h +289 -0
- data/mlx/mlx/backend/metal/device_info.cpp +58 -0
- data/mlx/mlx/backend/metal/distributed.cpp +38 -0
- data/mlx/mlx/backend/metal/eval.cpp +97 -0
- data/mlx/mlx/backend/metal/event.cpp +62 -0
- data/mlx/mlx/backend/metal/fence.cpp +162 -0
- data/mlx/mlx/backend/metal/fft.cpp +807 -0
- data/mlx/mlx/backend/metal/hadamard.cpp +198 -0
- data/mlx/mlx/backend/metal/indexing.cpp +727 -0
- data/mlx/mlx/backend/metal/jit/includes.h +58 -0
- data/mlx/mlx/backend/metal/jit/indexing.h +76 -0
- data/mlx/mlx/backend/metal/jit_kernels.cpp +1118 -0
- data/mlx/mlx/backend/metal/kernels/CMakeLists.txt +193 -0
- data/mlx/mlx/backend/metal/kernels/arange.h +9 -0
- data/mlx/mlx/backend/metal/kernels/arange.metal +20 -0
- data/mlx/mlx/backend/metal/kernels/arg_reduce.metal +182 -0
- data/mlx/mlx/backend/metal/kernels/atomic.h +345 -0
- data/mlx/mlx/backend/metal/kernels/bf16.h +16 -0
- data/mlx/mlx/backend/metal/kernels/bf16_math.h +380 -0
- data/mlx/mlx/backend/metal/kernels/binary.h +199 -0
- data/mlx/mlx/backend/metal/kernels/binary.metal +109 -0
- data/mlx/mlx/backend/metal/kernels/binary_ops.h +330 -0
- data/mlx/mlx/backend/metal/kernels/binary_two.h +244 -0
- data/mlx/mlx/backend/metal/kernels/binary_two.metal +54 -0
- data/mlx/mlx/backend/metal/kernels/cexpf.h +134 -0
- data/mlx/mlx/backend/metal/kernels/complex.h +173 -0
- data/mlx/mlx/backend/metal/kernels/conv.metal +701 -0
- data/mlx/mlx/backend/metal/kernels/copy.h +276 -0
- data/mlx/mlx/backend/metal/kernels/copy.metal +75 -0
- data/mlx/mlx/backend/metal/kernels/defines.h +24 -0
- data/mlx/mlx/backend/metal/kernels/erf.h +69 -0
- data/mlx/mlx/backend/metal/kernels/expm1f.h +90 -0
- data/mlx/mlx/backend/metal/kernels/fence.metal +52 -0
- data/mlx/mlx/backend/metal/kernels/fft/radix.h +328 -0
- data/mlx/mlx/backend/metal/kernels/fft/readwrite.h +624 -0
- data/mlx/mlx/backend/metal/kernels/fft.h +486 -0
- data/mlx/mlx/backend/metal/kernels/fft.metal +67 -0
- data/mlx/mlx/backend/metal/kernels/fp4.h +48 -0
- data/mlx/mlx/backend/metal/kernels/fp8.h +80 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized.h +1850 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized.metal +153 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.h +1044 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.metal +79 -0
- data/mlx/mlx/backend/metal/kernels/gemv.metal +868 -0
- data/mlx/mlx/backend/metal/kernels/gemv_masked.h +827 -0
- data/mlx/mlx/backend/metal/kernels/gemv_masked.metal +76 -0
- data/mlx/mlx/backend/metal/kernels/hadamard.h +182 -0
- data/mlx/mlx/backend/metal/kernels/indexing/gather.h +51 -0
- data/mlx/mlx/backend/metal/kernels/indexing/gather_axis.h +44 -0
- data/mlx/mlx/backend/metal/kernels/indexing/gather_front.h +24 -0
- data/mlx/mlx/backend/metal/kernels/indexing/indexing.h +23 -0
- data/mlx/mlx/backend/metal/kernels/indexing/masked_scatter.h +41 -0
- data/mlx/mlx/backend/metal/kernels/indexing/scatter.h +59 -0
- data/mlx/mlx/backend/metal/kernels/indexing/scatter_axis.h +52 -0
- data/mlx/mlx/backend/metal/kernels/layer_norm.metal +433 -0
- data/mlx/mlx/backend/metal/kernels/logging.h +26 -0
- data/mlx/mlx/backend/metal/kernels/logsumexp.h +140 -0
- data/mlx/mlx/backend/metal/kernels/logsumexp.metal +18 -0
- data/mlx/mlx/backend/metal/kernels/quantized.h +2508 -0
- data/mlx/mlx/backend/metal/kernels/quantized.metal +144 -0
- data/mlx/mlx/backend/metal/kernels/quantized_nax.h +1705 -0
- data/mlx/mlx/backend/metal/kernels/quantized_nax.metal +106 -0
- data/mlx/mlx/backend/metal/kernels/quantized_utils.h +90 -0
- data/mlx/mlx/backend/metal/kernels/random.metal +103 -0
- data/mlx/mlx/backend/metal/kernels/reduce.h +5 -0
- data/mlx/mlx/backend/metal/kernels/reduce.metal +169 -0
- data/mlx/mlx/backend/metal/kernels/reduce_utils.h +6 -0
- data/mlx/mlx/backend/metal/kernels/reduction/ops.h +275 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_all.h +66 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_col.h +398 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_init.h +8 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_row.h +369 -0
- data/mlx/mlx/backend/metal/kernels/rms_norm.metal +391 -0
- data/mlx/mlx/backend/metal/kernels/rope.metal +229 -0
- data/mlx/mlx/backend/metal/kernels/scaled_dot_product_attention.metal +44 -0
- data/mlx/mlx/backend/metal/kernels/scan.h +514 -0
- data/mlx/mlx/backend/metal/kernels/scan.metal +109 -0
- data/mlx/mlx/backend/metal/kernels/sdpa_vector.h +394 -0
- data/mlx/mlx/backend/metal/kernels/softmax.h +190 -0
- data/mlx/mlx/backend/metal/kernels/softmax.metal +24 -0
- data/mlx/mlx/backend/metal/kernels/sort.h +719 -0
- data/mlx/mlx/backend/metal/kernels/sort.metal +80 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/attn.h +296 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.h +471 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.metal +27 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.h +481 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.metal +28 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/loader.h +264 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/mma.h +750 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/nax.h +1076 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/params.h +44 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/transforms.h +71 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/conv.h +13 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.h +176 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.metal +56 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.h +225 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.metal +47 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loader.h +6 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_l.h +451 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_n.h +319 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_general.h +381 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/params.h +62 -0
- data/mlx/mlx/backend/metal/kernels/steel/defines.h +7 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm.h +295 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm_nax.h +157 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.h +346 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.metal +34 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.h +219 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.metal +30 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.h +459 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.metal +59 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.h +143 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.metal +37 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.h +719 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.metal +76 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.h +266 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.metal +43 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.h +227 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.metal +76 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.h +152 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.metal +30 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/loader.h +137 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/mma.h +1146 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/nax.h +1084 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/params.h +65 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/transforms.h +72 -0
- data/mlx/mlx/backend/metal/kernels/steel/utils/integral_constant.h +134 -0
- data/mlx/mlx/backend/metal/kernels/steel/utils/type_traits.h +55 -0
- data/mlx/mlx/backend/metal/kernels/steel/utils.h +42 -0
- data/mlx/mlx/backend/metal/kernels/ternary.h +145 -0
- data/mlx/mlx/backend/metal/kernels/ternary.metal +48 -0
- data/mlx/mlx/backend/metal/kernels/ternary_ops.h +10 -0
- data/mlx/mlx/backend/metal/kernels/unary.h +63 -0
- data/mlx/mlx/backend/metal/kernels/unary.metal +115 -0
- data/mlx/mlx/backend/metal/kernels/unary_ops.h +454 -0
- data/mlx/mlx/backend/metal/kernels/utils.h +445 -0
- data/mlx/mlx/backend/metal/kernels.h +375 -0
- data/mlx/mlx/backend/metal/logsumexp.cpp +95 -0
- data/mlx/mlx/backend/metal/make_compiled_preamble.sh +120 -0
- data/mlx/mlx/backend/metal/matmul.cpp +2572 -0
- data/mlx/mlx/backend/metal/matmul.h +144 -0
- data/mlx/mlx/backend/metal/metal.cpp +50 -0
- data/mlx/mlx/backend/metal/metal.h +25 -0
- data/mlx/mlx/backend/metal/no_metal.cpp +42 -0
- data/mlx/mlx/backend/metal/nojit_kernels.cpp +414 -0
- data/mlx/mlx/backend/metal/normalization.cpp +433 -0
- data/mlx/mlx/backend/metal/primitives.cpp +242 -0
- data/mlx/mlx/backend/metal/quantized.cpp +1651 -0
- data/mlx/mlx/backend/metal/reduce.cpp +1038 -0
- data/mlx/mlx/backend/metal/reduce.h +41 -0
- data/mlx/mlx/backend/metal/resident.cpp +100 -0
- data/mlx/mlx/backend/metal/resident.h +32 -0
- data/mlx/mlx/backend/metal/rope.cpp +165 -0
- data/mlx/mlx/backend/metal/scaled_dot_product_attention.cpp +798 -0
- data/mlx/mlx/backend/metal/scan.cpp +145 -0
- data/mlx/mlx/backend/metal/scan.h +17 -0
- data/mlx/mlx/backend/metal/slicing.cpp +99 -0
- data/mlx/mlx/backend/metal/softmax.cpp +87 -0
- data/mlx/mlx/backend/metal/sort.cpp +368 -0
- data/mlx/mlx/backend/metal/ternary.cpp +160 -0
- data/mlx/mlx/backend/metal/ternary.h +21 -0
- data/mlx/mlx/backend/metal/unary.cpp +161 -0
- data/mlx/mlx/backend/metal/unary.h +21 -0
- data/mlx/mlx/backend/metal/utils.cpp +77 -0
- data/mlx/mlx/backend/metal/utils.h +99 -0
- data/mlx/mlx/backend/no_cpu/CMakeLists.txt +7 -0
- data/mlx/mlx/backend/no_cpu/compiled.cpp +24 -0
- data/mlx/mlx/backend/no_cpu/device_info.cpp +22 -0
- data/mlx/mlx/backend/no_cpu/primitives.cpp +146 -0
- data/mlx/mlx/backend/no_gpu/CMakeLists.txt +8 -0
- data/mlx/mlx/backend/no_gpu/allocator.cpp +134 -0
- data/mlx/mlx/backend/no_gpu/apple_memory.h +16 -0
- data/mlx/mlx/backend/no_gpu/device_info.cpp +22 -0
- data/mlx/mlx/backend/no_gpu/eval.cpp +24 -0
- data/mlx/mlx/backend/no_gpu/event.cpp +53 -0
- data/mlx/mlx/backend/no_gpu/fence.cpp +54 -0
- data/mlx/mlx/backend/no_gpu/linux_memory.h +22 -0
- data/mlx/mlx/backend/no_gpu/primitives.cpp +185 -0
- data/mlx/mlx/compile.cpp +1243 -0
- data/mlx/mlx/compile.h +45 -0
- data/mlx/mlx/compile_impl.h +70 -0
- data/mlx/mlx/device.cpp +72 -0
- data/mlx/mlx/device.h +56 -0
- data/mlx/mlx/distributed/CMakeLists.txt +14 -0
- data/mlx/mlx/distributed/distributed.cpp +197 -0
- data/mlx/mlx/distributed/distributed.h +61 -0
- data/mlx/mlx/distributed/distributed_impl.h +59 -0
- data/mlx/mlx/distributed/jaccl/CMakeLists.txt +12 -0
- data/mlx/mlx/distributed/jaccl/jaccl.cpp +178 -0
- data/mlx/mlx/distributed/jaccl/jaccl.h +12 -0
- data/mlx/mlx/distributed/jaccl/mesh.cpp +451 -0
- data/mlx/mlx/distributed/jaccl/mesh.h +122 -0
- data/mlx/mlx/distributed/jaccl/no_jaccl.cpp +20 -0
- data/mlx/mlx/distributed/jaccl/ring.cpp +692 -0
- data/mlx/mlx/distributed/jaccl/ring.h +178 -0
- data/mlx/mlx/distributed/jaccl/utils.cpp +329 -0
- data/mlx/mlx/distributed/jaccl/utils.h +342 -0
- data/mlx/mlx/distributed/mpi/CMakeLists.txt +5 -0
- data/mlx/mlx/distributed/mpi/mpi.cpp +501 -0
- data/mlx/mlx/distributed/mpi/mpi.h +12 -0
- data/mlx/mlx/distributed/mpi/mpi_declarations.h +28 -0
- data/mlx/mlx/distributed/mpi/no_mpi.cpp +20 -0
- data/mlx/mlx/distributed/nccl/CMakeLists.txt +26 -0
- data/mlx/mlx/distributed/nccl/nccl.cpp +443 -0
- data/mlx/mlx/distributed/nccl/nccl.h +12 -0
- data/mlx/mlx/distributed/nccl/nccl_stub/CMakeLists.txt +1 -0
- data/mlx/mlx/distributed/nccl/nccl_stub/nccl_stubs.cpp +54 -0
- data/mlx/mlx/distributed/nccl/no_nccl.cpp +20 -0
- data/mlx/mlx/distributed/ops.cpp +186 -0
- data/mlx/mlx/distributed/ops.h +57 -0
- data/mlx/mlx/distributed/primitives.cpp +95 -0
- data/mlx/mlx/distributed/primitives.h +156 -0
- data/mlx/mlx/distributed/reduction_ops.h +38 -0
- data/mlx/mlx/distributed/ring/CMakeLists.txt +5 -0
- data/mlx/mlx/distributed/ring/no_ring.cpp +20 -0
- data/mlx/mlx/distributed/ring/ring.cpp +870 -0
- data/mlx/mlx/distributed/ring/ring.h +12 -0
- data/mlx/mlx/distributed/utils.cpp +206 -0
- data/mlx/mlx/distributed/utils.h +67 -0
- data/mlx/mlx/dtype.cpp +197 -0
- data/mlx/mlx/dtype.h +116 -0
- data/mlx/mlx/dtype_utils.cpp +42 -0
- data/mlx/mlx/dtype_utils.h +119 -0
- data/mlx/mlx/einsum.cpp +941 -0
- data/mlx/mlx/einsum.h +23 -0
- data/mlx/mlx/event.h +58 -0
- data/mlx/mlx/export.cpp +1130 -0
- data/mlx/mlx/export.h +137 -0
- data/mlx/mlx/export_impl.h +99 -0
- data/mlx/mlx/fast.cpp +941 -0
- data/mlx/mlx/fast.h +103 -0
- data/mlx/mlx/fast_primitives.h +427 -0
- data/mlx/mlx/fence.h +39 -0
- data/mlx/mlx/fft.cpp +262 -0
- data/mlx/mlx/fft.h +159 -0
- data/mlx/mlx/graph_utils.cpp +175 -0
- data/mlx/mlx/graph_utils.h +67 -0
- data/mlx/mlx/io/CMakeLists.txt +25 -0
- data/mlx/mlx/io/gguf.cpp +470 -0
- data/mlx/mlx/io/gguf.h +20 -0
- data/mlx/mlx/io/gguf_quants.cpp +164 -0
- data/mlx/mlx/io/load.cpp +397 -0
- data/mlx/mlx/io/load.h +175 -0
- data/mlx/mlx/io/no_gguf.cpp +20 -0
- data/mlx/mlx/io/no_safetensors.cpp +37 -0
- data/mlx/mlx/io/safetensors.cpp +234 -0
- data/mlx/mlx/io.h +61 -0
- data/mlx/mlx/linalg.cpp +708 -0
- data/mlx/mlx/linalg.h +115 -0
- data/mlx/mlx/memory.h +80 -0
- data/mlx/mlx/mlx.h +25 -0
- data/mlx/mlx/ops.cpp +6094 -0
- data/mlx/mlx/ops.h +1610 -0
- data/mlx/mlx/primitives.cpp +5850 -0
- data/mlx/mlx/primitives.h +2525 -0
- data/mlx/mlx/random.cpp +492 -0
- data/mlx/mlx/random.h +283 -0
- data/mlx/mlx/scheduler.cpp +73 -0
- data/mlx/mlx/scheduler.h +189 -0
- data/mlx/mlx/small_vector.h +540 -0
- data/mlx/mlx/stream.h +42 -0
- data/mlx/mlx/threadpool.h +133 -0
- data/mlx/mlx/transforms.cpp +1065 -0
- data/mlx/mlx/transforms.h +231 -0
- data/mlx/mlx/transforms_impl.h +88 -0
- data/mlx/mlx/types/bf16.h +187 -0
- data/mlx/mlx/types/complex.h +113 -0
- data/mlx/mlx/types/fp16.h +234 -0
- data/mlx/mlx/types/half_types.h +58 -0
- data/mlx/mlx/types/limits.h +70 -0
- data/mlx/mlx/utils.cpp +302 -0
- data/mlx/mlx/utils.h +174 -0
- data/mlx/mlx/version.cpp +11 -0
- data/mlx/mlx/version.h +22 -0
- data/mlx/mlx.pc.in +52 -0
- metadata +643 -0
data/mlx/mlx/linalg.cpp
ADDED
|
@@ -0,0 +1,708 @@
|
|
|
1
|
+
// Copyright © 2023 Apple Inc.
|
|
2
|
+
|
|
3
|
+
#include <numeric>
|
|
4
|
+
#include <ostream>
|
|
5
|
+
#include <vector>
|
|
6
|
+
|
|
7
|
+
#include "mlx/linalg.h"
|
|
8
|
+
#include "mlx/primitives.h"
|
|
9
|
+
#include "mlx/utils.h"
|
|
10
|
+
|
|
11
|
+
namespace mlx::core::linalg {
|
|
12
|
+
|
|
13
|
+
void check_cpu_stream(const StreamOrDevice& s, const std::string& prefix) {
|
|
14
|
+
if (to_stream(s).device == Device::gpu) {
|
|
15
|
+
throw std::invalid_argument(
|
|
16
|
+
prefix +
|
|
17
|
+
" This op is not yet supported on the GPU. "
|
|
18
|
+
"Explicitly pass a CPU stream to run it.");
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
void check_float(Dtype dtype, const std::string& prefix) {
|
|
22
|
+
if (dtype != float32 && dtype != float64) {
|
|
23
|
+
std::ostringstream msg;
|
|
24
|
+
msg << prefix << " Arrays must have type float32 or float64. "
|
|
25
|
+
<< "Received array with type " << dtype << ".";
|
|
26
|
+
throw std::invalid_argument(msg.str());
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
void check_float_or_complex(Dtype dtype, const std::string& prefix) {
|
|
31
|
+
if (dtype != float32 && dtype != float64 && dtype != complex64) {
|
|
32
|
+
std::ostringstream msg;
|
|
33
|
+
msg << prefix << " Arrays must have type float32, float64 or complex64. "
|
|
34
|
+
<< "Received array with type " << dtype << ".";
|
|
35
|
+
throw std::invalid_argument(msg.str());
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
Dtype at_least_float(const Dtype& d) {
|
|
40
|
+
return issubdtype(d, inexact) ? d : promote_types(d, float32);
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
inline array l2_norm(
|
|
44
|
+
const array& a,
|
|
45
|
+
const std::vector<int>& axis,
|
|
46
|
+
bool keepdims,
|
|
47
|
+
StreamOrDevice s) {
|
|
48
|
+
if (issubdtype(a.dtype(), complexfloating)) {
|
|
49
|
+
return sqrt(sum(abs(a, s) * abs(a, s), axis, keepdims, s), s);
|
|
50
|
+
} else {
|
|
51
|
+
return sqrt(sum(square(a, s), axis, keepdims, s), s);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
inline array vector_norm(
|
|
56
|
+
const array& a,
|
|
57
|
+
const double ord,
|
|
58
|
+
const std::vector<int>& axis,
|
|
59
|
+
bool keepdims,
|
|
60
|
+
StreamOrDevice s) {
|
|
61
|
+
auto dtype = at_least_float(a.dtype());
|
|
62
|
+
if (ord == 0.0) {
|
|
63
|
+
return astype(sum(not_equal(a, array(0), s), axis, keepdims, s), dtype, s);
|
|
64
|
+
} else if (ord == 1.0) {
|
|
65
|
+
return astype(sum(abs(a, s), axis, keepdims, s), dtype, s);
|
|
66
|
+
} else if (ord == 2.0) {
|
|
67
|
+
return l2_norm(a, axis, keepdims, s);
|
|
68
|
+
} else if (ord == std::numeric_limits<double>::infinity()) {
|
|
69
|
+
return astype(max(abs(a, s), axis, keepdims, s), dtype, s);
|
|
70
|
+
} else if (ord == -std::numeric_limits<double>::infinity()) {
|
|
71
|
+
return astype(min(abs(a, s), axis, keepdims, s), dtype, s);
|
|
72
|
+
} else {
|
|
73
|
+
return power(
|
|
74
|
+
sum(power(abs(a, s), array(ord, dtype), s), axis, keepdims, s),
|
|
75
|
+
array(1.0 / ord, dtype),
|
|
76
|
+
s);
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
inline array matrix_norm(
|
|
81
|
+
const array& a,
|
|
82
|
+
const double ord,
|
|
83
|
+
const std::vector<int>& axis,
|
|
84
|
+
bool keepdims,
|
|
85
|
+
StreamOrDevice s) {
|
|
86
|
+
auto dtype = at_least_float(a.dtype());
|
|
87
|
+
auto row_axis = axis[0];
|
|
88
|
+
auto col_axis = axis[1];
|
|
89
|
+
if (ord == -1.0) {
|
|
90
|
+
col_axis -= (!keepdims && col_axis > row_axis && col_axis > 0);
|
|
91
|
+
return astype(
|
|
92
|
+
min(sum(abs(a, s), row_axis, keepdims, s), col_axis, keepdims, s),
|
|
93
|
+
dtype,
|
|
94
|
+
s);
|
|
95
|
+
} else if (ord == 1.0) {
|
|
96
|
+
col_axis -= (!keepdims && col_axis > row_axis && col_axis > 0);
|
|
97
|
+
return astype(
|
|
98
|
+
max(sum(abs(a, s), row_axis, keepdims, s), col_axis, keepdims, s),
|
|
99
|
+
dtype,
|
|
100
|
+
s);
|
|
101
|
+
} else if (ord == std::numeric_limits<double>::infinity()) {
|
|
102
|
+
row_axis -= (!keepdims && row_axis > col_axis && row_axis > 0);
|
|
103
|
+
return astype(
|
|
104
|
+
max(sum(abs(a, s), col_axis, keepdims, s), row_axis, keepdims, s),
|
|
105
|
+
dtype,
|
|
106
|
+
s);
|
|
107
|
+
} else if (ord == -std::numeric_limits<double>::infinity()) {
|
|
108
|
+
row_axis -= (!keepdims && row_axis > col_axis && row_axis > 0);
|
|
109
|
+
return astype(
|
|
110
|
+
min(sum(abs(a, s), col_axis, keepdims, s), row_axis, keepdims, s),
|
|
111
|
+
dtype,
|
|
112
|
+
s);
|
|
113
|
+
} else if (ord == 2.0 || ord == -2.0) {
|
|
114
|
+
row_axis = (axis[0] < 0) ? axis[0] + a.ndim() : axis[0];
|
|
115
|
+
col_axis = (axis[1] < 0) ? axis[1] + a.ndim() : axis[1];
|
|
116
|
+
auto a_matrix = (row_axis > col_axis)
|
|
117
|
+
? moveaxis(moveaxis(a, row_axis, -1, s), col_axis, -1, s)
|
|
118
|
+
: moveaxis(moveaxis(a, col_axis, -1, s), row_axis, -2, s);
|
|
119
|
+
a_matrix = svd(a_matrix, false, s).at(0);
|
|
120
|
+
a_matrix = (ord == 2.0) ? max(a_matrix, -1, false, s)
|
|
121
|
+
: min(a_matrix, -1, false, s);
|
|
122
|
+
if (keepdims) {
|
|
123
|
+
std::vector<int> sorted_axes = (row_axis < col_axis)
|
|
124
|
+
? std::vector<int>{row_axis, col_axis}
|
|
125
|
+
: std::vector<int>{col_axis, row_axis};
|
|
126
|
+
a_matrix = expand_dims(a_matrix, sorted_axes, s);
|
|
127
|
+
}
|
|
128
|
+
return astype(a_matrix, dtype, s);
|
|
129
|
+
} else {
|
|
130
|
+
std::ostringstream msg;
|
|
131
|
+
msg << "[linalg::norm] Invalid ord " << ord << " for matrix norm.";
|
|
132
|
+
throw std::invalid_argument(msg.str());
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
inline array matrix_norm(
|
|
137
|
+
const array& a,
|
|
138
|
+
const std::string& ord,
|
|
139
|
+
const std::vector<int>& axis,
|
|
140
|
+
bool keepdims,
|
|
141
|
+
StreamOrDevice s) {
|
|
142
|
+
if (ord == "f" || ord == "fro") {
|
|
143
|
+
return l2_norm(a, axis, keepdims, s);
|
|
144
|
+
} else if (ord == "nuc") {
|
|
145
|
+
int row_axis = (axis[0] < 0) ? axis[0] + a.ndim() : axis[0];
|
|
146
|
+
int col_axis = (axis[1] < 0) ? axis[1] + a.ndim() : axis[1];
|
|
147
|
+
auto a_matrix = (row_axis > col_axis)
|
|
148
|
+
? moveaxis(moveaxis(a, row_axis, -1, s), col_axis, -1, s)
|
|
149
|
+
: moveaxis(moveaxis(a, col_axis, -1, s), row_axis, -2, s);
|
|
150
|
+
a_matrix = sum(svd(a_matrix, false, s).at(0), -1, false, s);
|
|
151
|
+
if (keepdims) {
|
|
152
|
+
std::vector<int> sorted_axes = (row_axis < col_axis)
|
|
153
|
+
? std::vector<int>{row_axis, col_axis}
|
|
154
|
+
: std::vector<int>{col_axis, row_axis};
|
|
155
|
+
a_matrix = expand_dims(a_matrix, sorted_axes, s);
|
|
156
|
+
}
|
|
157
|
+
return a_matrix;
|
|
158
|
+
} else {
|
|
159
|
+
std::ostringstream msg;
|
|
160
|
+
msg << "[linalg::norm] Invalid ord value '" << ord << "' for matrix norm.";
|
|
161
|
+
throw std::invalid_argument(msg.str());
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
array norm(
|
|
166
|
+
const array& a,
|
|
167
|
+
const std::optional<std::vector<int>>& axis /* = std::nullopt */,
|
|
168
|
+
bool keepdims /* = false */,
|
|
169
|
+
StreamOrDevice s /* = {} */) {
|
|
170
|
+
if (!axis) {
|
|
171
|
+
return norm(flatten(a, s), std::vector<int>{0}, keepdims, s);
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
if (axis.value().size() > 2) {
|
|
175
|
+
throw std::invalid_argument(
|
|
176
|
+
"[linalg::norm] Received too many axes for norm.");
|
|
177
|
+
}
|
|
178
|
+
return l2_norm(a, axis.value(), keepdims, s);
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
array norm(
|
|
182
|
+
const array& a,
|
|
183
|
+
const double ord,
|
|
184
|
+
const std::optional<std::vector<int>>& axis /* = std::nullopt */,
|
|
185
|
+
bool keepdims /* = false */,
|
|
186
|
+
StreamOrDevice s /* = {} */) {
|
|
187
|
+
std::vector<int> ax;
|
|
188
|
+
if (!axis) {
|
|
189
|
+
ax.resize(a.ndim());
|
|
190
|
+
std::iota(ax.begin(), ax.end(), 0);
|
|
191
|
+
} else {
|
|
192
|
+
ax = axis.value();
|
|
193
|
+
}
|
|
194
|
+
if (ax.size() == 1) {
|
|
195
|
+
return vector_norm(a, ord, ax, keepdims, s);
|
|
196
|
+
} else if (ax.size() == 2) {
|
|
197
|
+
return matrix_norm(a, ord, ax, keepdims, s);
|
|
198
|
+
} else {
|
|
199
|
+
throw std::invalid_argument(
|
|
200
|
+
"[linalg::norm] Received too many axes for norm.");
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
array norm(
|
|
205
|
+
const array& a,
|
|
206
|
+
const std::string& ord,
|
|
207
|
+
const std::optional<std::vector<int>>& axis /* = std::nullopt */,
|
|
208
|
+
bool keepdims /* = false */,
|
|
209
|
+
StreamOrDevice s /* = {} */) {
|
|
210
|
+
std::vector<int> ax;
|
|
211
|
+
if (!axis) {
|
|
212
|
+
ax.resize(a.ndim());
|
|
213
|
+
std::iota(ax.begin(), ax.end(), 0);
|
|
214
|
+
} else {
|
|
215
|
+
ax = axis.value();
|
|
216
|
+
}
|
|
217
|
+
if (ax.size() != 2) {
|
|
218
|
+
std::ostringstream msg;
|
|
219
|
+
msg << "[linalg::norm] Norm '" << ord << "' only supported for matrices,"
|
|
220
|
+
<< " but received " << ax.size() << " axis/axes.";
|
|
221
|
+
throw std::invalid_argument(msg.str());
|
|
222
|
+
}
|
|
223
|
+
return matrix_norm(a, ord, ax, keepdims, s);
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
std::pair<array, array> qr(const array& a, StreamOrDevice s /* = {} */) {
|
|
227
|
+
check_cpu_stream(s, "[linalg::qr]");
|
|
228
|
+
check_float(a.dtype(), "[linalg::qr]");
|
|
229
|
+
|
|
230
|
+
if (a.ndim() < 2) {
|
|
231
|
+
std::ostringstream msg;
|
|
232
|
+
msg << "[linalg::qr] Arrays must have >= 2 dimensions. Received array "
|
|
233
|
+
"with "
|
|
234
|
+
<< a.ndim() << " dimensions.";
|
|
235
|
+
throw std::invalid_argument(msg.str());
|
|
236
|
+
}
|
|
237
|
+
int k = std::min(a.shape(-2), a.shape(-1));
|
|
238
|
+
auto q_shape = a.shape();
|
|
239
|
+
q_shape.back() = k;
|
|
240
|
+
auto r_shape = a.shape();
|
|
241
|
+
r_shape[r_shape.size() - 2] = k;
|
|
242
|
+
auto out = array::make_arrays(
|
|
243
|
+
{std::move(q_shape), std::move(r_shape)},
|
|
244
|
+
{a.dtype(), a.dtype()},
|
|
245
|
+
std::make_shared<QRF>(to_stream(s)),
|
|
246
|
+
{astype(a, a.dtype(), s)});
|
|
247
|
+
return std::make_pair(out[0], out[1]);
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
std::vector<array>
|
|
251
|
+
svd(const array& a, bool compute_uv, StreamOrDevice s /* = {} */) {
|
|
252
|
+
check_cpu_stream(s, "[linalg::svd]");
|
|
253
|
+
check_float_or_complex(a.dtype(), "[linalg::svd]");
|
|
254
|
+
|
|
255
|
+
if (a.ndim() < 2) {
|
|
256
|
+
std::ostringstream msg;
|
|
257
|
+
msg << "[linalg::svd] Input array must have >= 2 dimensions. Received array "
|
|
258
|
+
"with "
|
|
259
|
+
<< a.ndim() << " dimensions.";
|
|
260
|
+
throw std::invalid_argument(msg.str());
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
const auto m = a.shape(-2);
|
|
264
|
+
const auto n = a.shape(-1);
|
|
265
|
+
const auto rank = a.ndim();
|
|
266
|
+
|
|
267
|
+
auto s_shape = a.shape();
|
|
268
|
+
s_shape.pop_back();
|
|
269
|
+
s_shape[rank - 2] = std::min(m, n);
|
|
270
|
+
|
|
271
|
+
auto s_dtype = a.dtype() == complex64 ? float32 : a.dtype();
|
|
272
|
+
|
|
273
|
+
if (!compute_uv) {
|
|
274
|
+
return {array(
|
|
275
|
+
std::move(s_shape),
|
|
276
|
+
s_dtype,
|
|
277
|
+
std::make_shared<SVD>(to_stream(s), compute_uv),
|
|
278
|
+
{a})};
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
auto u_shape = a.shape();
|
|
282
|
+
u_shape[rank - 2] = m;
|
|
283
|
+
u_shape[rank - 1] = m;
|
|
284
|
+
|
|
285
|
+
auto vt_shape = a.shape();
|
|
286
|
+
vt_shape[rank - 2] = n;
|
|
287
|
+
vt_shape[rank - 1] = n;
|
|
288
|
+
|
|
289
|
+
return array::make_arrays(
|
|
290
|
+
{u_shape, s_shape, vt_shape},
|
|
291
|
+
{a.dtype(), s_dtype, a.dtype()},
|
|
292
|
+
std::make_shared<SVD>(to_stream(s), compute_uv),
|
|
293
|
+
{a});
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
array inv_impl(const array& a, bool tri, bool upper, StreamOrDevice s) {
|
|
297
|
+
check_cpu_stream(s, "[linalg::inv]");
|
|
298
|
+
check_float(a.dtype(), "[linalg::inv]");
|
|
299
|
+
|
|
300
|
+
if (a.ndim() < 2) {
|
|
301
|
+
std::ostringstream msg;
|
|
302
|
+
msg << "[linalg::inv] Arrays must have >= 2 dimensions. Received array "
|
|
303
|
+
"with "
|
|
304
|
+
<< a.ndim() << " dimensions.";
|
|
305
|
+
throw std::invalid_argument(msg.str());
|
|
306
|
+
}
|
|
307
|
+
if (a.shape(-1) != a.shape(-2)) {
|
|
308
|
+
throw std::invalid_argument(
|
|
309
|
+
"[linalg::inv] Inverses are only defined for square matrices.");
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
return array(
|
|
313
|
+
a.shape(),
|
|
314
|
+
a.dtype(),
|
|
315
|
+
std::make_shared<Inverse>(to_stream(s), tri, upper),
|
|
316
|
+
{a});
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
array inv(const array& a, StreamOrDevice s /* = {} */) {
|
|
320
|
+
return inv_impl(a, /*tri=*/false, /*upper=*/true, s);
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
array tri_inv(
|
|
324
|
+
const array& a,
|
|
325
|
+
bool upper /* = false */,
|
|
326
|
+
StreamOrDevice s /* = {} */) {
|
|
327
|
+
return inv_impl(a, /*tri=*/true, upper, s);
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
array cholesky(
|
|
331
|
+
const array& a,
|
|
332
|
+
bool upper /* = false */,
|
|
333
|
+
StreamOrDevice s /* = {} */) {
|
|
334
|
+
check_cpu_stream(s, "[linalg::cholesky]");
|
|
335
|
+
check_float(a.dtype(), "[linalg::cholesky]");
|
|
336
|
+
if (a.ndim() < 2) {
|
|
337
|
+
std::ostringstream msg;
|
|
338
|
+
msg << "[linalg::cholesky] Arrays must have >= 2 dimensions. Received array "
|
|
339
|
+
"with "
|
|
340
|
+
<< a.ndim() << " dimensions.";
|
|
341
|
+
throw std::invalid_argument(msg.str());
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
if (a.shape(-1) != a.shape(-2)) {
|
|
345
|
+
throw std::invalid_argument(
|
|
346
|
+
"[linalg::cholesky] Cholesky decomposition is only defined for square "
|
|
347
|
+
"matrices.");
|
|
348
|
+
}
|
|
349
|
+
return array(
|
|
350
|
+
a.shape(),
|
|
351
|
+
a.dtype(),
|
|
352
|
+
std::make_shared<Cholesky>(to_stream(s), upper),
|
|
353
|
+
{a});
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
array pinv(const array& a, StreamOrDevice s /* = {} */) {
|
|
357
|
+
check_cpu_stream(s, "[linalg::pinv]");
|
|
358
|
+
check_float(a.dtype(), "[linalg::pinv]");
|
|
359
|
+
|
|
360
|
+
if (a.ndim() < 2) {
|
|
361
|
+
std::ostringstream msg;
|
|
362
|
+
msg << "[linalg::pinv] Arrays must have >= 2 dimensions. Received array "
|
|
363
|
+
<< "with " << a.ndim() << " dimensions.";
|
|
364
|
+
throw std::invalid_argument(msg.str());
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
int m = a.shape(-2);
|
|
368
|
+
int n = a.shape(-1);
|
|
369
|
+
int k = std::min(m, n);
|
|
370
|
+
auto outs = linalg::svd(a, true, s);
|
|
371
|
+
array U = outs[0];
|
|
372
|
+
array S = outs[1];
|
|
373
|
+
array V = outs[2];
|
|
374
|
+
|
|
375
|
+
Shape starts(a.ndim(), 0);
|
|
376
|
+
auto ends = a.shape();
|
|
377
|
+
int i = a.ndim() - 2;
|
|
378
|
+
int j = a.ndim() - 1;
|
|
379
|
+
|
|
380
|
+
// Prepare U
|
|
381
|
+
ends[i] = m;
|
|
382
|
+
ends[j] = k;
|
|
383
|
+
U = swapaxes(slice(U, starts, ends, s), -1, -2, s);
|
|
384
|
+
|
|
385
|
+
// Prepare V
|
|
386
|
+
ends[i] = k;
|
|
387
|
+
ends[j] = n;
|
|
388
|
+
V = swapaxes(slice(V, starts, ends, s), -1, -2, s);
|
|
389
|
+
|
|
390
|
+
// Prepare S
|
|
391
|
+
S = expand_dims(S, -2, s);
|
|
392
|
+
|
|
393
|
+
auto rcond = 10. * std::max(m, n) * finfo(a.dtype()).eps;
|
|
394
|
+
auto cutoff = multiply(array(rcond, a.dtype()), max(S, -1, true, s), s);
|
|
395
|
+
auto rS =
|
|
396
|
+
where(greater(S, cutoff, s), reciprocal(S, s), array(0.0f, a.dtype()), s);
|
|
397
|
+
|
|
398
|
+
return matmul(multiply(V, rS, s), U, s);
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
array cholesky_inv(
|
|
402
|
+
const array& L,
|
|
403
|
+
bool upper /* = false */,
|
|
404
|
+
StreamOrDevice s /* = {} */) {
|
|
405
|
+
check_cpu_stream(s, "[linalg::cholesky_inv]");
|
|
406
|
+
check_float(L.dtype(), "[linalg::cholesky_inv]");
|
|
407
|
+
|
|
408
|
+
if (L.ndim() < 2) {
|
|
409
|
+
std::ostringstream msg;
|
|
410
|
+
msg << "[linalg::cholesky_inv] Arrays must have >= 2 dimensions. Received array "
|
|
411
|
+
"with "
|
|
412
|
+
<< L.ndim() << " dimensions.";
|
|
413
|
+
throw std::invalid_argument(msg.str());
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
if (L.shape(-1) != L.shape(-2)) {
|
|
417
|
+
throw std::invalid_argument(
|
|
418
|
+
"[linalg::cholesky_inv] Cholesky inverse is only defined for square "
|
|
419
|
+
"matrices.");
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
array L_inv = tri_inv(L, upper, s);
|
|
423
|
+
if (upper) {
|
|
424
|
+
return matmul(L_inv, swapaxes(L_inv, -1, -2, s), s);
|
|
425
|
+
} else {
|
|
426
|
+
return matmul(swapaxes(L_inv, -1, -2, s), L_inv, s);
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
array cross(
|
|
431
|
+
const array& a,
|
|
432
|
+
const array& b,
|
|
433
|
+
int axis /* = -1 */,
|
|
434
|
+
StreamOrDevice s /* = {} */) {
|
|
435
|
+
auto check_ax = [axis](const array& arr) {
|
|
436
|
+
if (axis >= static_cast<int>(arr.ndim()) || axis + arr.ndim() < 0) {
|
|
437
|
+
std::ostringstream msg;
|
|
438
|
+
msg << "[linalg::cross] axis " << axis << " invalid for array with "
|
|
439
|
+
<< arr.ndim() << " dimensions.";
|
|
440
|
+
throw std::invalid_argument(msg.str());
|
|
441
|
+
}
|
|
442
|
+
if (arr.shape(axis) < 2 || arr.shape(axis) > 3) {
|
|
443
|
+
throw std::invalid_argument(
|
|
444
|
+
"[linalg::cross] The specified axis must have size 2 or 3.");
|
|
445
|
+
}
|
|
446
|
+
};
|
|
447
|
+
check_ax(a);
|
|
448
|
+
check_ax(b);
|
|
449
|
+
|
|
450
|
+
bool a_2d = a.shape(axis) == 2;
|
|
451
|
+
bool b_2d = b.shape(axis) == 2;
|
|
452
|
+
|
|
453
|
+
auto out_type = promote_types(a.dtype(), b.dtype());
|
|
454
|
+
auto ashape = a.shape();
|
|
455
|
+
auto bshape = b.shape();
|
|
456
|
+
|
|
457
|
+
ashape[axis < 0 ? axis + a.ndim() : axis] = 3;
|
|
458
|
+
bshape[axis < 0 ? axis + b.ndim() : axis] = 3;
|
|
459
|
+
auto out_shape = broadcast_shapes(ashape, bshape);
|
|
460
|
+
|
|
461
|
+
if (axis < 0) {
|
|
462
|
+
axis += out_shape.size();
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
out_shape[axis] = a_2d ? 2 : 3;
|
|
466
|
+
auto a_ = broadcast_to(astype(a, out_type, s), out_shape, s);
|
|
467
|
+
|
|
468
|
+
out_shape[axis] = b_2d ? 2 : 3;
|
|
469
|
+
auto b_ = broadcast_to(astype(b, out_type, s), out_shape, s);
|
|
470
|
+
|
|
471
|
+
auto a_splits = split(a_, a_2d ? 2 : 3, axis);
|
|
472
|
+
auto b_splits = split(b_, b_2d ? 2 : 3, axis);
|
|
473
|
+
|
|
474
|
+
std::vector<array> outputs;
|
|
475
|
+
if (a_2d && b_2d) {
|
|
476
|
+
auto z = zeros_like(a_splits[0], s);
|
|
477
|
+
outputs.push_back(z);
|
|
478
|
+
outputs.push_back(z);
|
|
479
|
+
} else if (b_2d) {
|
|
480
|
+
outputs.push_back(negative(multiply(a_splits[2], b_splits[1], s), s));
|
|
481
|
+
outputs.push_back(multiply(a_splits[2], b_splits[0], s));
|
|
482
|
+
} else if (a_2d) {
|
|
483
|
+
outputs.push_back(multiply(a_splits[1], b_splits[2], s));
|
|
484
|
+
outputs.push_back(negative(multiply(a_splits[0], b_splits[2], s), s));
|
|
485
|
+
} else {
|
|
486
|
+
outputs.push_back(subtract(
|
|
487
|
+
multiply(a_splits[1], b_splits[2], s),
|
|
488
|
+
multiply(a_splits[2], b_splits[1], s),
|
|
489
|
+
s));
|
|
490
|
+
outputs.push_back(subtract(
|
|
491
|
+
multiply(a_splits[2], b_splits[0], s),
|
|
492
|
+
multiply(a_splits[0], b_splits[2], s),
|
|
493
|
+
s));
|
|
494
|
+
}
|
|
495
|
+
outputs.push_back(subtract(
|
|
496
|
+
multiply(a_splits[0], b_splits[1], s),
|
|
497
|
+
multiply(a_splits[1], b_splits[0], s),
|
|
498
|
+
s));
|
|
499
|
+
return concatenate(outputs, axis, s);
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
void validate_eig(
|
|
503
|
+
const array& a,
|
|
504
|
+
const StreamOrDevice& stream,
|
|
505
|
+
const std::string& fname) {
|
|
506
|
+
check_cpu_stream(stream, fname);
|
|
507
|
+
check_float_or_complex(a.dtype(), fname);
|
|
508
|
+
|
|
509
|
+
if (a.ndim() < 2) {
|
|
510
|
+
std::ostringstream msg;
|
|
511
|
+
msg << fname << " Arrays must have >= 2 dimensions. Received array with "
|
|
512
|
+
<< a.ndim() << " dimensions.";
|
|
513
|
+
throw std::invalid_argument(msg.str());
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
if (a.shape(-1) != a.shape(-2)) {
|
|
517
|
+
throw std::invalid_argument(fname + " Only defined for square matrices.");
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
array eigvalsh(
|
|
522
|
+
const array& a,
|
|
523
|
+
std::string UPLO /* = "L" */,
|
|
524
|
+
StreamOrDevice s /* = {} */) {
|
|
525
|
+
validate_eig(a, s, "[linalg::eigvalsh]");
|
|
526
|
+
Shape out_shape(a.shape().begin(), a.shape().end() - 1);
|
|
527
|
+
Dtype eigval_type = a.dtype() == complex64 ? float32 : a.dtype();
|
|
528
|
+
return array(
|
|
529
|
+
std::move(out_shape),
|
|
530
|
+
eigval_type,
|
|
531
|
+
std::make_shared<Eigh>(to_stream(s), UPLO, false),
|
|
532
|
+
{a});
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
std::pair<array, array> eigh(
|
|
536
|
+
const array& a,
|
|
537
|
+
std::string UPLO /* = "L" */,
|
|
538
|
+
StreamOrDevice s /* = {} */) {
|
|
539
|
+
validate_eig(a, s, "[linalg::eigh]");
|
|
540
|
+
Dtype eigval_type = a.dtype() == complex64 ? float32 : a.dtype();
|
|
541
|
+
auto out = array::make_arrays(
|
|
542
|
+
{Shape(a.shape().begin(), a.shape().end() - 1), a.shape()},
|
|
543
|
+
{eigval_type, a.dtype()},
|
|
544
|
+
std::make_shared<Eigh>(to_stream(s), UPLO, true),
|
|
545
|
+
{a});
|
|
546
|
+
return std::make_pair(out[0], out[1]);
|
|
547
|
+
}
|
|
548
|
+
|
|
549
|
+
array eigvals(const array& a, StreamOrDevice s /* = {} */) {
|
|
550
|
+
validate_eig(a, s, "[linalg::eigvals]");
|
|
551
|
+
Shape out_shape(a.shape().begin(), a.shape().end() - 1);
|
|
552
|
+
return array(
|
|
553
|
+
std::move(out_shape),
|
|
554
|
+
complex64,
|
|
555
|
+
std::make_shared<Eig>(to_stream(s), false),
|
|
556
|
+
{a});
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
std::pair<array, array> eig(const array& a, StreamOrDevice s /* = {} */) {
|
|
560
|
+
validate_eig(a, s, "[linalg::eig]");
|
|
561
|
+
auto out = array::make_arrays(
|
|
562
|
+
{Shape(a.shape().begin(), a.shape().end() - 1), a.shape()},
|
|
563
|
+
{complex64, complex64},
|
|
564
|
+
std::make_shared<Eig>(to_stream(s), true),
|
|
565
|
+
{a});
|
|
566
|
+
return std::make_pair(out[0], out[1]);
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
void validate_lu(
|
|
570
|
+
const array& a,
|
|
571
|
+
const StreamOrDevice& stream,
|
|
572
|
+
const std::string& fname) {
|
|
573
|
+
check_cpu_stream(stream, fname);
|
|
574
|
+
check_float(a.dtype(), fname);
|
|
575
|
+
|
|
576
|
+
if (a.ndim() < 2) {
|
|
577
|
+
std::ostringstream msg;
|
|
578
|
+
msg << fname
|
|
579
|
+
<< " Arrays must have >= 2 dimensions. Received array "
|
|
580
|
+
"with "
|
|
581
|
+
<< a.ndim() << " dimensions.";
|
|
582
|
+
throw std::invalid_argument(msg.str());
|
|
583
|
+
}
|
|
584
|
+
}
|
|
585
|
+
|
|
586
|
+
std::vector<array> lu_helper(const array& a, StreamOrDevice s /* = {} */) {
|
|
587
|
+
int m = a.shape()[a.shape().size() - 2];
|
|
588
|
+
int n = a.shape()[a.shape().size() - 1];
|
|
589
|
+
|
|
590
|
+
Shape pivots_shape(a.shape().begin(), a.shape().end() - 2);
|
|
591
|
+
pivots_shape.push_back(std::min(m, n));
|
|
592
|
+
|
|
593
|
+
Shape row_idx_shape(a.shape().begin(), a.shape().end() - 1);
|
|
594
|
+
|
|
595
|
+
return array::make_arrays(
|
|
596
|
+
{a.shape(), pivots_shape, row_idx_shape},
|
|
597
|
+
{a.dtype(), uint32, uint32},
|
|
598
|
+
std::make_shared<LUF>(to_stream(s)),
|
|
599
|
+
{astype(a, a.dtype(), s)});
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
std::vector<array> lu(const array& a, StreamOrDevice s /* = {} */) {
|
|
603
|
+
validate_lu(a, s, "[linalg::lu]");
|
|
604
|
+
|
|
605
|
+
auto out = lu_helper(a, s);
|
|
606
|
+
auto& LU = out[0];
|
|
607
|
+
auto& row_pivots = out[2];
|
|
608
|
+
auto L = tril(LU, /* k = */ -1, s);
|
|
609
|
+
auto U = triu(LU, /* k = */ 0, s);
|
|
610
|
+
|
|
611
|
+
int M = a.shape(-2);
|
|
612
|
+
int N = a.shape(-1);
|
|
613
|
+
int K = std::min(M, N);
|
|
614
|
+
if (N != K) {
|
|
615
|
+
auto start = Shape(L.ndim(), 0);
|
|
616
|
+
auto stop = L.shape();
|
|
617
|
+
stop.back() = K;
|
|
618
|
+
L = slice(L, std::move(start), std::move(stop), s);
|
|
619
|
+
} else if (M != K) {
|
|
620
|
+
auto start = Shape(U.ndim(), 0);
|
|
621
|
+
auto stop = U.shape();
|
|
622
|
+
stop[U.ndim() - 2] = K;
|
|
623
|
+
U = slice(U, std::move(start), std::move(stop), s);
|
|
624
|
+
}
|
|
625
|
+
L = add(L, eye(M, K, s), s);
|
|
626
|
+
return {row_pivots, L, U};
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
std::pair<array, array> lu_factor(const array& a, StreamOrDevice s /* = {} */) {
|
|
630
|
+
validate_lu(a, s, "[linalg::lu_factor]");
|
|
631
|
+
auto out = lu_helper(a, s);
|
|
632
|
+
return std::make_pair(out[0], out[1]);
|
|
633
|
+
}
|
|
634
|
+
|
|
635
|
+
void validate_solve(
|
|
636
|
+
const array& a,
|
|
637
|
+
const array& b,
|
|
638
|
+
const StreamOrDevice& stream,
|
|
639
|
+
const std::string& fname) {
|
|
640
|
+
check_cpu_stream(stream, fname);
|
|
641
|
+
if (a.ndim() < 2) {
|
|
642
|
+
std::ostringstream msg;
|
|
643
|
+
msg << fname << " First input must have >= 2 dimensions. "
|
|
644
|
+
<< "Received array with " << a.ndim() << " dimensions.";
|
|
645
|
+
throw std::invalid_argument(msg.str());
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
if (b.ndim() < 1) {
|
|
649
|
+
std::ostringstream msg;
|
|
650
|
+
msg << fname << " Second input must have >= 1 dimensions. "
|
|
651
|
+
<< "Received array with " << b.ndim() << " dimensions.";
|
|
652
|
+
throw std::invalid_argument(msg.str());
|
|
653
|
+
}
|
|
654
|
+
|
|
655
|
+
if (a.shape(-1) != a.shape(-2)) {
|
|
656
|
+
std::ostringstream msg;
|
|
657
|
+
msg << fname << " First input must be a square matrix. "
|
|
658
|
+
<< "Received array with shape " << a.shape() << ".";
|
|
659
|
+
throw std::invalid_argument(msg.str());
|
|
660
|
+
}
|
|
661
|
+
|
|
662
|
+
int lastDim = b.ndim() > 1 ? -2 : -1;
|
|
663
|
+
if (a.shape(-1) != b.shape(lastDim)) {
|
|
664
|
+
std::ostringstream msg;
|
|
665
|
+
msg << fname << " Last dimension of first input with shape " << a.shape()
|
|
666
|
+
<< " must match second to last dimension of"
|
|
667
|
+
<< " second input with shape " << b.shape() << ".";
|
|
668
|
+
throw std::invalid_argument(msg.str());
|
|
669
|
+
}
|
|
670
|
+
|
|
671
|
+
auto out_type = promote_types(a.dtype(), b.dtype());
|
|
672
|
+
if (out_type != float32 && out_type != float64) {
|
|
673
|
+
std::ostringstream msg;
|
|
674
|
+
msg << fname
|
|
675
|
+
<< " Input arrays must promote to float32 or float64. "
|
|
676
|
+
" Received arrays with type "
|
|
677
|
+
<< a.dtype() << " and " << b.dtype() << ".";
|
|
678
|
+
throw std::invalid_argument(msg.str());
|
|
679
|
+
}
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
array solve(const array& a, const array& b, StreamOrDevice s /* = {} */) {
|
|
683
|
+
validate_solve(a, b, s, "[linalg::solve]");
|
|
684
|
+
|
|
685
|
+
// P, L, U matrices
|
|
686
|
+
const auto luf = lu(a, s);
|
|
687
|
+
auto perm = argsort(luf[0], -1, s);
|
|
688
|
+
int take_axis = -1;
|
|
689
|
+
if (b.ndim() >= 2) {
|
|
690
|
+
perm = expand_dims(perm, -1, s);
|
|
691
|
+
take_axis -= 1;
|
|
692
|
+
}
|
|
693
|
+
auto pb = take_along_axis(b, perm, take_axis, s);
|
|
694
|
+
auto y = solve_triangular(luf[1], pb, /* upper = */ false, s);
|
|
695
|
+
return solve_triangular(luf[2], y, /* upper = */ true, s);
|
|
696
|
+
}
|
|
697
|
+
|
|
698
|
+
array solve_triangular(
|
|
699
|
+
const array& a,
|
|
700
|
+
const array& b,
|
|
701
|
+
bool upper /* = false */,
|
|
702
|
+
StreamOrDevice s /* = {} */) {
|
|
703
|
+
validate_solve(a, b, s, "[linalg::solve_triangular]");
|
|
704
|
+
auto a_inv = tri_inv(a, upper, s);
|
|
705
|
+
return matmul(a_inv, b, s);
|
|
706
|
+
}
|
|
707
|
+
|
|
708
|
+
} // namespace mlx::core::linalg
|