mlx 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlx might be problematic. Click here for more details.
- checksums.yaml +7 -0
- data/ext/mlx/CMakeLists.txt +7 -0
- data/ext/mlx/Makefile +273 -0
- data/ext/mlx/extconf.rb +94 -0
- data/ext/mlx/mkmf.log +44 -0
- data/ext/mlx/native.bundle +0 -0
- data/ext/mlx/native.bundle.dSYM/Contents/Info.plist +20 -0
- data/ext/mlx/native.bundle.dSYM/Contents/Resources/DWARF/native.bundle +0 -0
- data/ext/mlx/native.bundle.dSYM/Contents/Resources/Relocations/aarch64/native.bundle.yml +5 -0
- data/ext/mlx/native.cpp +8027 -0
- data/ext/mlx/native.o +0 -0
- data/lib/mlx/core.rb +1678 -0
- data/lib/mlx/distributed_utils/common.rb +116 -0
- data/lib/mlx/distributed_utils/config.rb +600 -0
- data/lib/mlx/distributed_utils/launch.rb +490 -0
- data/lib/mlx/extension.rb +24 -0
- data/lib/mlx/nn/base.rb +388 -0
- data/lib/mlx/nn/init.rb +140 -0
- data/lib/mlx/nn/layers/activations.rb +336 -0
- data/lib/mlx/nn/layers/base.rb +6 -0
- data/lib/mlx/nn/layers/containers.rb +20 -0
- data/lib/mlx/nn/layers/convolution.rb +120 -0
- data/lib/mlx/nn/layers/convolution_transpose.rb +114 -0
- data/lib/mlx/nn/layers/distributed.rb +309 -0
- data/lib/mlx/nn/layers/dropout.rb +75 -0
- data/lib/mlx/nn/layers/embedding.rb +28 -0
- data/lib/mlx/nn/layers/linear.rb +79 -0
- data/lib/mlx/nn/layers/normalization.rb +216 -0
- data/lib/mlx/nn/layers/pooling.rb +167 -0
- data/lib/mlx/nn/layers/positional_encoding.rb +126 -0
- data/lib/mlx/nn/layers/quantized.rb +215 -0
- data/lib/mlx/nn/layers/recurrent.rb +135 -0
- data/lib/mlx/nn/layers/transformer.rb +330 -0
- data/lib/mlx/nn/layers/upsample.rb +97 -0
- data/lib/mlx/nn/layers.rb +18 -0
- data/lib/mlx/nn/losses.rb +251 -0
- data/lib/mlx/nn/utils.rb +167 -0
- data/lib/mlx/nn.rb +12 -0
- data/lib/mlx/optimizers/optimizers.rb +808 -0
- data/lib/mlx/optimizers/schedulers.rb +62 -0
- data/lib/mlx/optimizers.rb +9 -0
- data/lib/mlx/utils.rb +171 -0
- data/lib/mlx/version +1 -0
- data/lib/mlx/version.rb +5 -0
- data/lib/mlx.rb +64 -0
- data/mlx/.clang-format +87 -0
- data/mlx/.git +1 -0
- data/mlx/.github/ISSUE_TEMPLATE/bug_report.md +28 -0
- data/mlx/.github/actions/build-cuda-release/action.yml +31 -0
- data/mlx/.github/actions/build-docs/action.yml +38 -0
- data/mlx/.github/actions/build-linux/action.yml +38 -0
- data/mlx/.github/actions/build-linux-release/action.yml +42 -0
- data/mlx/.github/actions/build-macos/action.yml +80 -0
- data/mlx/.github/actions/build-macos-release/action.yml +36 -0
- data/mlx/.github/actions/build-windows/action.yml +26 -0
- data/mlx/.github/actions/setup-linux/action.yml +93 -0
- data/mlx/.github/actions/setup-macos/action.yml +24 -0
- data/mlx/.github/actions/setup-windows/action.yml +42 -0
- data/mlx/.github/actions/test-linux/action.yml +69 -0
- data/mlx/.github/actions/test-windows/action.yml +20 -0
- data/mlx/.github/dependabot.yml +6 -0
- data/mlx/.github/pull_request_template.md +12 -0
- data/mlx/.github/scripts/build-sanitizer-tests.sh +48 -0
- data/mlx/.github/scripts/setup+build-cpp-linux-fedora-container.sh +27 -0
- data/mlx/.github/workflows/build_and_test.yml +152 -0
- data/mlx/.github/workflows/documentation.yml +28 -0
- data/mlx/.github/workflows/nightly.yml +104 -0
- data/mlx/.github/workflows/release.yml +256 -0
- data/mlx/.gitignore +81 -0
- data/mlx/.pre-commit-config.yaml +27 -0
- data/mlx/ACKNOWLEDGMENTS.md +268 -0
- data/mlx/CITATION.cff +24 -0
- data/mlx/CMakeLists.txt +437 -0
- data/mlx/CODE_OF_CONDUCT.md +132 -0
- data/mlx/CONTRIBUTING.md +38 -0
- data/mlx/LICENSE +21 -0
- data/mlx/MANIFEST.in +6 -0
- data/mlx/README.md +121 -0
- data/mlx/benchmarks/cpp/CMakeLists.txt +11 -0
- data/mlx/benchmarks/cpp/autograd.cpp +39 -0
- data/mlx/benchmarks/cpp/compare_devices.cpp +27 -0
- data/mlx/benchmarks/cpp/irregular_strides.cpp +201 -0
- data/mlx/benchmarks/cpp/single_ops.cpp +288 -0
- data/mlx/benchmarks/cpp/time_utils.h +39 -0
- data/mlx/benchmarks/numpy/single_ops.py +39 -0
- data/mlx/benchmarks/numpy/time_utils.py +20 -0
- data/mlx/benchmarks/python/batch_matmul_bench.py +62 -0
- data/mlx/benchmarks/python/blas/bench_gemm.py +191 -0
- data/mlx/benchmarks/python/blas/bench_gemv.py +220 -0
- data/mlx/benchmarks/python/comparative/README.md +15 -0
- data/mlx/benchmarks/python/comparative/bench_mlx.py +519 -0
- data/mlx/benchmarks/python/comparative/bench_torch.py +482 -0
- data/mlx/benchmarks/python/comparative/compare.py +284 -0
- data/mlx/benchmarks/python/compile_bench.py +107 -0
- data/mlx/benchmarks/python/conv1d_bench.py +123 -0
- data/mlx/benchmarks/python/conv2d_bench_cpu.py +127 -0
- data/mlx/benchmarks/python/conv2d_train_bench_cpu.py +143 -0
- data/mlx/benchmarks/python/conv2d_transpose_bench_cpu.py +129 -0
- data/mlx/benchmarks/python/conv3d_bench_cpu.py +110 -0
- data/mlx/benchmarks/python/conv3d_train_bench_cpu.py +143 -0
- data/mlx/benchmarks/python/conv3d_transpose_bench_cpu.py +116 -0
- data/mlx/benchmarks/python/conv_bench.py +135 -0
- data/mlx/benchmarks/python/conv_transpose_bench.py +135 -0
- data/mlx/benchmarks/python/conv_unaligned_bench.py +107 -0
- data/mlx/benchmarks/python/distributed_bench.py +66 -0
- data/mlx/benchmarks/python/einsum_bench.py +84 -0
- data/mlx/benchmarks/python/fft_bench.py +118 -0
- data/mlx/benchmarks/python/gather_bench.py +52 -0
- data/mlx/benchmarks/python/gather_mm_bench.py +74 -0
- data/mlx/benchmarks/python/gather_qmm_bench.py +84 -0
- data/mlx/benchmarks/python/hadamard_bench.py +70 -0
- data/mlx/benchmarks/python/large_gemm_bench.py +119 -0
- data/mlx/benchmarks/python/layer_norm_bench.py +82 -0
- data/mlx/benchmarks/python/masked_scatter.py +212 -0
- data/mlx/benchmarks/python/rms_norm_bench.py +63 -0
- data/mlx/benchmarks/python/rope_bench.py +35 -0
- data/mlx/benchmarks/python/scatter_bench.py +96 -0
- data/mlx/benchmarks/python/sdpa_bench.py +223 -0
- data/mlx/benchmarks/python/sdpa_vector_bench.py +95 -0
- data/mlx/benchmarks/python/single_ops.py +132 -0
- data/mlx/benchmarks/python/synchronize_bench.py +55 -0
- data/mlx/benchmarks/python/time_utils.py +38 -0
- data/mlx/cmake/FindCUDNN.cmake +177 -0
- data/mlx/cmake/FindNCCL.cmake +54 -0
- data/mlx/cmake/Findnvpl.cmake +3 -0
- data/mlx/cmake/extension.cmake +50 -0
- data/mlx/docs/.clang-format +2 -0
- data/mlx/docs/.gitignore +3 -0
- data/mlx/docs/.nojekyll +0 -0
- data/mlx/docs/Doxyfile +51 -0
- data/mlx/docs/Makefile +18 -0
- data/mlx/docs/README.md +54 -0
- data/mlx/docs/index.html +1 -0
- data/mlx/docs/requirements.txt +5 -0
- data/mlx/docs/src/_static/distributed/m3-ultra-mesh-broken.png +0 -0
- data/mlx/docs/src/_static/distributed/m3-ultra-mesh.png +0 -0
- data/mlx/docs/src/_static/metal_debugger/capture.png +0 -0
- data/mlx/docs/src/_static/metal_debugger/schema.png +0 -0
- data/mlx/docs/src/_static/mlx_logo.png +0 -0
- data/mlx/docs/src/_static/mlx_logo_dark.png +0 -0
- data/mlx/docs/src/_static/tp_inference/all-to-sharded-linear.png +0 -0
- data/mlx/docs/src/_static/tp_inference/column-row-tp.png +0 -0
- data/mlx/docs/src/_static/tp_inference/llama-transformer.png +0 -0
- data/mlx/docs/src/_static/tp_inference/sharded-to-all-linear.png +0 -0
- data/mlx/docs/src/_templates/module-base-class.rst +33 -0
- data/mlx/docs/src/_templates/nn-module-template.rst +20 -0
- data/mlx/docs/src/_templates/optimizers-template.rst +20 -0
- data/mlx/docs/src/conf.py +99 -0
- data/mlx/docs/src/cpp/ops.rst +7 -0
- data/mlx/docs/src/dev/custom_metal_kernels.rst +445 -0
- data/mlx/docs/src/dev/extensions.rst +811 -0
- data/mlx/docs/src/dev/metal_debugger.rst +68 -0
- data/mlx/docs/src/dev/metal_logging.rst +40 -0
- data/mlx/docs/src/dev/mlx_in_cpp.rst +121 -0
- data/mlx/docs/src/examples/data_parallelism.rst +91 -0
- data/mlx/docs/src/examples/linear_regression.rst +77 -0
- data/mlx/docs/src/examples/llama-inference.rst +382 -0
- data/mlx/docs/src/examples/mlp.rst +134 -0
- data/mlx/docs/src/examples/tensor_parallelism.rst +239 -0
- data/mlx/docs/src/index.rst +96 -0
- data/mlx/docs/src/install.rst +340 -0
- data/mlx/docs/src/python/array.rst +65 -0
- data/mlx/docs/src/python/cuda.rst +9 -0
- data/mlx/docs/src/python/data_types.rst +78 -0
- data/mlx/docs/src/python/devices_and_streams.rst +21 -0
- data/mlx/docs/src/python/distributed.rst +22 -0
- data/mlx/docs/src/python/export.rst +14 -0
- data/mlx/docs/src/python/fast.rst +16 -0
- data/mlx/docs/src/python/fft.rst +24 -0
- data/mlx/docs/src/python/linalg.rst +27 -0
- data/mlx/docs/src/python/memory_management.rst +16 -0
- data/mlx/docs/src/python/metal.rst +12 -0
- data/mlx/docs/src/python/nn/distributed.rst +30 -0
- data/mlx/docs/src/python/nn/functions.rst +40 -0
- data/mlx/docs/src/python/nn/init.rst +45 -0
- data/mlx/docs/src/python/nn/layers.rst +74 -0
- data/mlx/docs/src/python/nn/losses.rst +25 -0
- data/mlx/docs/src/python/nn/module.rst +38 -0
- data/mlx/docs/src/python/nn.rst +186 -0
- data/mlx/docs/src/python/ops.rst +184 -0
- data/mlx/docs/src/python/optimizers/common_optimizers.rst +22 -0
- data/mlx/docs/src/python/optimizers/optimizer.rst +23 -0
- data/mlx/docs/src/python/optimizers/schedulers.rst +15 -0
- data/mlx/docs/src/python/optimizers.rst +78 -0
- data/mlx/docs/src/python/random.rst +48 -0
- data/mlx/docs/src/python/transforms.rst +22 -0
- data/mlx/docs/src/python/tree_utils.rst +23 -0
- data/mlx/docs/src/usage/compile.rst +516 -0
- data/mlx/docs/src/usage/distributed.rst +572 -0
- data/mlx/docs/src/usage/export.rst +288 -0
- data/mlx/docs/src/usage/function_transforms.rst +191 -0
- data/mlx/docs/src/usage/indexing.rst +194 -0
- data/mlx/docs/src/usage/launching_distributed.rst +234 -0
- data/mlx/docs/src/usage/lazy_evaluation.rst +144 -0
- data/mlx/docs/src/usage/numpy.rst +124 -0
- data/mlx/docs/src/usage/quick_start.rst +67 -0
- data/mlx/docs/src/usage/saving_and_loading.rst +81 -0
- data/mlx/docs/src/usage/unified_memory.rst +78 -0
- data/mlx/docs/src/usage/using_streams.rst +18 -0
- data/mlx/examples/cmake_project/CMakeLists.txt +22 -0
- data/mlx/examples/cmake_project/README.md +26 -0
- data/mlx/examples/cmake_project/example.cpp +14 -0
- data/mlx/examples/cpp/CMakeLists.txt +12 -0
- data/mlx/examples/cpp/distributed.cpp +22 -0
- data/mlx/examples/cpp/linear_regression.cpp +54 -0
- data/mlx/examples/cpp/logistic_regression.cpp +54 -0
- data/mlx/examples/cpp/metal_capture.cpp +31 -0
- data/mlx/examples/cpp/timer.h +20 -0
- data/mlx/examples/cpp/tutorial.cpp +99 -0
- data/mlx/examples/export/CMakeLists.txt +22 -0
- data/mlx/examples/export/README.md +49 -0
- data/mlx/examples/export/eval_mlp.cpp +25 -0
- data/mlx/examples/export/eval_mlp.py +52 -0
- data/mlx/examples/export/train_mlp.cpp +35 -0
- data/mlx/examples/export/train_mlp.py +76 -0
- data/mlx/examples/extensions/CMakeLists.txt +78 -0
- data/mlx/examples/extensions/README.md +24 -0
- data/mlx/examples/extensions/axpby/axpby.cpp +306 -0
- data/mlx/examples/extensions/axpby/axpby.h +90 -0
- data/mlx/examples/extensions/axpby/axpby.metal +47 -0
- data/mlx/examples/extensions/bindings.cpp +39 -0
- data/mlx/examples/extensions/mlx_sample_extensions/__init__.py +5 -0
- data/mlx/examples/extensions/pyproject.toml +8 -0
- data/mlx/examples/extensions/requirements.txt +4 -0
- data/mlx/examples/extensions/setup.py +18 -0
- data/mlx/examples/extensions/test.py +12 -0
- data/mlx/examples/python/linear_regression.py +46 -0
- data/mlx/examples/python/logistic_regression.py +49 -0
- data/mlx/examples/python/qqmm.py +117 -0
- data/mlx/mlx/3rdparty/.clang-format +2 -0
- data/mlx/mlx/3rdparty/pocketfft.h +3581 -0
- data/mlx/mlx/CMakeLists.txt +107 -0
- data/mlx/mlx/allocator.h +75 -0
- data/mlx/mlx/api.h +29 -0
- data/mlx/mlx/array.cpp +354 -0
- data/mlx/mlx/array.h +647 -0
- data/mlx/mlx/backend/common/CMakeLists.txt +9 -0
- data/mlx/mlx/backend/common/binary.h +97 -0
- data/mlx/mlx/backend/common/broadcasting.cpp +24 -0
- data/mlx/mlx/backend/common/broadcasting.h +11 -0
- data/mlx/mlx/backend/common/buffer_cache.h +158 -0
- data/mlx/mlx/backend/common/common.cpp +305 -0
- data/mlx/mlx/backend/common/compiled.cpp +243 -0
- data/mlx/mlx/backend/common/compiled.h +77 -0
- data/mlx/mlx/backend/common/copy.h +50 -0
- data/mlx/mlx/backend/common/hadamard.h +109 -0
- data/mlx/mlx/backend/common/load.cpp +57 -0
- data/mlx/mlx/backend/common/matmul.h +67 -0
- data/mlx/mlx/backend/common/reduce.cpp +154 -0
- data/mlx/mlx/backend/common/reduce.h +59 -0
- data/mlx/mlx/backend/common/slicing.cpp +71 -0
- data/mlx/mlx/backend/common/slicing.h +20 -0
- data/mlx/mlx/backend/common/ternary.h +85 -0
- data/mlx/mlx/backend/common/unary.h +29 -0
- data/mlx/mlx/backend/common/utils.cpp +231 -0
- data/mlx/mlx/backend/common/utils.h +205 -0
- data/mlx/mlx/backend/cpu/CMakeLists.txt +88 -0
- data/mlx/mlx/backend/cpu/arange.h +28 -0
- data/mlx/mlx/backend/cpu/arg_reduce.cpp +124 -0
- data/mlx/mlx/backend/cpu/binary.cpp +269 -0
- data/mlx/mlx/backend/cpu/binary.h +517 -0
- data/mlx/mlx/backend/cpu/binary_ops.h +98 -0
- data/mlx/mlx/backend/cpu/binary_two.h +166 -0
- data/mlx/mlx/backend/cpu/cholesky.cpp +85 -0
- data/mlx/mlx/backend/cpu/compiled.cpp +357 -0
- data/mlx/mlx/backend/cpu/compiled_preamble.h +12 -0
- data/mlx/mlx/backend/cpu/conv.cpp +1351 -0
- data/mlx/mlx/backend/cpu/copy.cpp +386 -0
- data/mlx/mlx/backend/cpu/copy.h +36 -0
- data/mlx/mlx/backend/cpu/device_info.cpp +113 -0
- data/mlx/mlx/backend/cpu/device_info.h +28 -0
- data/mlx/mlx/backend/cpu/distributed.cpp +103 -0
- data/mlx/mlx/backend/cpu/eig.cpp +281 -0
- data/mlx/mlx/backend/cpu/eigh.cpp +241 -0
- data/mlx/mlx/backend/cpu/encoder.cpp +16 -0
- data/mlx/mlx/backend/cpu/encoder.h +67 -0
- data/mlx/mlx/backend/cpu/eval.cpp +40 -0
- data/mlx/mlx/backend/cpu/eval.h +12 -0
- data/mlx/mlx/backend/cpu/fft.cpp +120 -0
- data/mlx/mlx/backend/cpu/gemm.h +26 -0
- data/mlx/mlx/backend/cpu/gemms/bnns.cpp +214 -0
- data/mlx/mlx/backend/cpu/gemms/cblas.cpp +134 -0
- data/mlx/mlx/backend/cpu/gemms/simd_bf16.cpp +45 -0
- data/mlx/mlx/backend/cpu/gemms/simd_fp16.cpp +45 -0
- data/mlx/mlx/backend/cpu/gemms/simd_gemm.h +139 -0
- data/mlx/mlx/backend/cpu/hadamard.cpp +121 -0
- data/mlx/mlx/backend/cpu/indexing.cpp +854 -0
- data/mlx/mlx/backend/cpu/inverse.cpp +160 -0
- data/mlx/mlx/backend/cpu/jit_compiler.cpp +166 -0
- data/mlx/mlx/backend/cpu/jit_compiler.h +20 -0
- data/mlx/mlx/backend/cpu/lapack.h +80 -0
- data/mlx/mlx/backend/cpu/logsumexp.cpp +139 -0
- data/mlx/mlx/backend/cpu/luf.cpp +120 -0
- data/mlx/mlx/backend/cpu/make_compiled_preamble.ps1 +38 -0
- data/mlx/mlx/backend/cpu/make_compiled_preamble.sh +41 -0
- data/mlx/mlx/backend/cpu/masked_mm.cpp +608 -0
- data/mlx/mlx/backend/cpu/matmul.cpp +166 -0
- data/mlx/mlx/backend/cpu/primitives.cpp +478 -0
- data/mlx/mlx/backend/cpu/qrf.cpp +147 -0
- data/mlx/mlx/backend/cpu/quantized.cpp +1370 -0
- data/mlx/mlx/backend/cpu/reduce.cpp +587 -0
- data/mlx/mlx/backend/cpu/scan.cpp +338 -0
- data/mlx/mlx/backend/cpu/select.cpp +95 -0
- data/mlx/mlx/backend/cpu/simd/accelerate_fp16_simd.h +56 -0
- data/mlx/mlx/backend/cpu/simd/accelerate_simd.h +329 -0
- data/mlx/mlx/backend/cpu/simd/base_simd.h +319 -0
- data/mlx/mlx/backend/cpu/simd/math.h +193 -0
- data/mlx/mlx/backend/cpu/simd/neon_fp16_simd.h +212 -0
- data/mlx/mlx/backend/cpu/simd/simd.h +4 -0
- data/mlx/mlx/backend/cpu/simd/type.h +11 -0
- data/mlx/mlx/backend/cpu/slicing.h +21 -0
- data/mlx/mlx/backend/cpu/softmax.cpp +170 -0
- data/mlx/mlx/backend/cpu/sort.cpp +481 -0
- data/mlx/mlx/backend/cpu/svd.cpp +289 -0
- data/mlx/mlx/backend/cpu/ternary.h +154 -0
- data/mlx/mlx/backend/cpu/threefry.cpp +31 -0
- data/mlx/mlx/backend/cpu/threefry.h +21 -0
- data/mlx/mlx/backend/cpu/unary.cpp +238 -0
- data/mlx/mlx/backend/cpu/unary.h +281 -0
- data/mlx/mlx/backend/cpu/unary_ops.h +175 -0
- data/mlx/mlx/backend/cuda/CMakeLists.txt +265 -0
- data/mlx/mlx/backend/cuda/allocator.cpp +451 -0
- data/mlx/mlx/backend/cuda/allocator.h +94 -0
- data/mlx/mlx/backend/cuda/arange.cu +68 -0
- data/mlx/mlx/backend/cuda/arg_reduce.cu +189 -0
- data/mlx/mlx/backend/cuda/bin2h.cmake +150 -0
- data/mlx/mlx/backend/cuda/binary/CMakeLists.txt +21 -0
- data/mlx/mlx/backend/cuda/binary/add.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/arctan2.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/binary.cuh +383 -0
- data/mlx/mlx/backend/cuda/binary/bitwise_binary.cu +27 -0
- data/mlx/mlx/backend/cuda/binary/divide.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/equal.cu +15 -0
- data/mlx/mlx/backend/cuda/binary/greater.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/greater_equal.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/less.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/less_equal.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/log_add_exp.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/logical_and.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/logical_or.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/maximum.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/minimum.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/multiply.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/not_equal.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/power.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/remainder.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/subtract.cu +7 -0
- data/mlx/mlx/backend/cuda/binary_two.cu +412 -0
- data/mlx/mlx/backend/cuda/compiled.cpp +357 -0
- data/mlx/mlx/backend/cuda/conv/conv.h +126 -0
- data/mlx/mlx/backend/cuda/conv/gemm_conv.cu +217 -0
- data/mlx/mlx/backend/cuda/conv/gemm_grouped_conv.cu +231 -0
- data/mlx/mlx/backend/cuda/conv.cpp +403 -0
- data/mlx/mlx/backend/cuda/copy/copy.cuh +55 -0
- data/mlx/mlx/backend/cuda/copy/copy_contiguous.cu +88 -0
- data/mlx/mlx/backend/cuda/copy/copy_general.cu +171 -0
- data/mlx/mlx/backend/cuda/copy/copy_general_dynamic.cu +118 -0
- data/mlx/mlx/backend/cuda/copy/copy_general_input.cu +229 -0
- data/mlx/mlx/backend/cuda/copy.cu +132 -0
- data/mlx/mlx/backend/cuda/cublas_utils.cpp +222 -0
- data/mlx/mlx/backend/cuda/cublas_utils.h +95 -0
- data/mlx/mlx/backend/cuda/cuda.h +21 -0
- data/mlx/mlx/backend/cuda/cuda_utils.h +90 -0
- data/mlx/mlx/backend/cuda/cudnn_utils.cpp +133 -0
- data/mlx/mlx/backend/cuda/cudnn_utils.h +187 -0
- data/mlx/mlx/backend/cuda/custom_kernel.cpp +379 -0
- data/mlx/mlx/backend/cuda/cutlass_utils.cuh +46 -0
- data/mlx/mlx/backend/cuda/delayload.cpp +80 -0
- data/mlx/mlx/backend/cuda/device/atomic_ops.cuh +63 -0
- data/mlx/mlx/backend/cuda/device/binary_ops.cuh +300 -0
- data/mlx/mlx/backend/cuda/device/cast_op.cuh +118 -0
- data/mlx/mlx/backend/cuda/device/complex.cuh +60 -0
- data/mlx/mlx/backend/cuda/device/config.h +12 -0
- data/mlx/mlx/backend/cuda/device/fp16_math.cuh +96 -0
- data/mlx/mlx/backend/cuda/device/gather.cuh +53 -0
- data/mlx/mlx/backend/cuda/device/gather_axis.cuh +65 -0
- data/mlx/mlx/backend/cuda/device/indexing.cuh +30 -0
- data/mlx/mlx/backend/cuda/device/scatter.cuh +68 -0
- data/mlx/mlx/backend/cuda/device/scatter_axis.cuh +67 -0
- data/mlx/mlx/backend/cuda/device/scatter_ops.cuh +44 -0
- data/mlx/mlx/backend/cuda/device/ternary_ops.cuh +13 -0
- data/mlx/mlx/backend/cuda/device/unary_ops.cuh +350 -0
- data/mlx/mlx/backend/cuda/device/utils.cuh +464 -0
- data/mlx/mlx/backend/cuda/device.cpp +522 -0
- data/mlx/mlx/backend/cuda/device.h +195 -0
- data/mlx/mlx/backend/cuda/device_info.cpp +232 -0
- data/mlx/mlx/backend/cuda/distributed.cu +121 -0
- data/mlx/mlx/backend/cuda/eval.cpp +66 -0
- data/mlx/mlx/backend/cuda/event.cu +415 -0
- data/mlx/mlx/backend/cuda/event.h +79 -0
- data/mlx/mlx/backend/cuda/fence.cpp +42 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm.cpp +233 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm.h +114 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_0.cpp +77 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_9.cu +329 -0
- data/mlx/mlx/backend/cuda/gemms/gemv.cu +327 -0
- data/mlx/mlx/backend/cuda/gemms/gemv.h +34 -0
- data/mlx/mlx/backend/cuda/gemms/grouped_gemm.h +25 -0
- data/mlx/mlx/backend/cuda/gemms/grouped_gemm_unaligned.cu +358 -0
- data/mlx/mlx/backend/cuda/indexing.cpp +434 -0
- data/mlx/mlx/backend/cuda/jit_module.cpp +443 -0
- data/mlx/mlx/backend/cuda/jit_module.h +120 -0
- data/mlx/mlx/backend/cuda/kernel_utils.cu +52 -0
- data/mlx/mlx/backend/cuda/kernel_utils.cuh +148 -0
- data/mlx/mlx/backend/cuda/layer_norm.cu +417 -0
- data/mlx/mlx/backend/cuda/load.cpp +60 -0
- data/mlx/mlx/backend/cuda/logsumexp.cu +161 -0
- data/mlx/mlx/backend/cuda/lru_cache.h +190 -0
- data/mlx/mlx/backend/cuda/matmul.cpp +373 -0
- data/mlx/mlx/backend/cuda/no_cuda.cpp +47 -0
- data/mlx/mlx/backend/cuda/primitives.cpp +46 -0
- data/mlx/mlx/backend/cuda/quantized/affine_quantize.cu +329 -0
- data/mlx/mlx/backend/cuda/quantized/convert_fp8.cu +19 -0
- data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.cpp +206 -0
- data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.h +88 -0
- data/mlx/mlx/backend/cuda/quantized/cuda_fp4.h +100 -0
- data/mlx/mlx/backend/cuda/quantized/fp_quantize.cu +496 -0
- data/mlx/mlx/backend/cuda/quantized/mxfp8_quantize.cuh +32 -0
- data/mlx/mlx/backend/cuda/quantized/no_qqmm_impl.cpp +26 -0
- data/mlx/mlx/backend/cuda/quantized/nvfp4_quantize.cuh +334 -0
- data/mlx/mlx/backend/cuda/quantized/qmv.cu +304 -0
- data/mlx/mlx/backend/cuda/quantized/qmv.h +21 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm.cpp +158 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_impl.cpp +50 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_impl.h +26 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_utils.cu +227 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_utils.h +30 -0
- data/mlx/mlx/backend/cuda/quantized/quantized.cpp +85 -0
- data/mlx/mlx/backend/cuda/quantized/quantized.h +53 -0
- data/mlx/mlx/backend/cuda/quantized/quantized_utils.cuh +88 -0
- data/mlx/mlx/backend/cuda/quantized/quantized_utils.h +50 -0
- data/mlx/mlx/backend/cuda/random.cu +202 -0
- data/mlx/mlx/backend/cuda/reduce/all_reduce.cu +159 -0
- data/mlx/mlx/backend/cuda/reduce/col_reduce.cu +510 -0
- data/mlx/mlx/backend/cuda/reduce/init_reduce.cu +50 -0
- data/mlx/mlx/backend/cuda/reduce/reduce.cuh +71 -0
- data/mlx/mlx/backend/cuda/reduce/reduce_ops.cuh +211 -0
- data/mlx/mlx/backend/cuda/reduce/reduce_utils.cuh +145 -0
- data/mlx/mlx/backend/cuda/reduce/row_reduce.cu +361 -0
- data/mlx/mlx/backend/cuda/reduce.cu +73 -0
- data/mlx/mlx/backend/cuda/rms_norm.cu +536 -0
- data/mlx/mlx/backend/cuda/rope.cu +429 -0
- data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cpp +681 -0
- data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cu +796 -0
- data/mlx/mlx/backend/cuda/scan.cu +468 -0
- data/mlx/mlx/backend/cuda/slicing.cpp +111 -0
- data/mlx/mlx/backend/cuda/softmax.cu +162 -0
- data/mlx/mlx/backend/cuda/sort.cu +1076 -0
- data/mlx/mlx/backend/cuda/steel/defines.cuh +9 -0
- data/mlx/mlx/backend/cuda/steel/gemm.cuh +101 -0
- data/mlx/mlx/backend/cuda/steel/mma.cuh +117 -0
- data/mlx/mlx/backend/cuda/steel/tiles.cuh +450 -0
- data/mlx/mlx/backend/cuda/steel/utils.cuh +89 -0
- data/mlx/mlx/backend/cuda/ternary.cu +271 -0
- data/mlx/mlx/backend/cuda/unary/CMakeLists.txt +34 -0
- data/mlx/mlx/backend/cuda/unary/abs.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arccos.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arccosh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arcsin.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arcsinh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arctan.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arctanh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/bitwise_invert.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/ceil.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/conjugate.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/cos.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/cosh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/erf.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/erf_inv.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/exp.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/expm1.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/floor.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/imag.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/log.cu +21 -0
- data/mlx/mlx/backend/cuda/unary/log1p.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/logical_not.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/negative.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/real.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/round.cu +18 -0
- data/mlx/mlx/backend/cuda/unary/sigmoid.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sign.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sin.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sinh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sqrt.cu +15 -0
- data/mlx/mlx/backend/cuda/unary/square.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/tan.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/tanh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/unary.cuh +224 -0
- data/mlx/mlx/backend/cuda/utils.cpp +116 -0
- data/mlx/mlx/backend/cuda/utils.h +49 -0
- data/mlx/mlx/backend/cuda/vector_types.cuh +48 -0
- data/mlx/mlx/backend/cuda/worker.cpp +79 -0
- data/mlx/mlx/backend/cuda/worker.h +55 -0
- data/mlx/mlx/backend/gpu/CMakeLists.txt +5 -0
- data/mlx/mlx/backend/gpu/copy.cpp +89 -0
- data/mlx/mlx/backend/gpu/copy.h +57 -0
- data/mlx/mlx/backend/gpu/device_info.h +36 -0
- data/mlx/mlx/backend/gpu/eval.h +18 -0
- data/mlx/mlx/backend/gpu/primitives.cpp +307 -0
- data/mlx/mlx/backend/gpu/slicing.cpp +44 -0
- data/mlx/mlx/backend/gpu/slicing.h +36 -0
- data/mlx/mlx/backend/metal/CMakeLists.txt +144 -0
- data/mlx/mlx/backend/metal/allocator.cpp +279 -0
- data/mlx/mlx/backend/metal/allocator.h +79 -0
- data/mlx/mlx/backend/metal/binary.cpp +257 -0
- data/mlx/mlx/backend/metal/binary.h +33 -0
- data/mlx/mlx/backend/metal/compiled.cpp +471 -0
- data/mlx/mlx/backend/metal/conv.cpp +1118 -0
- data/mlx/mlx/backend/metal/copy.cpp +235 -0
- data/mlx/mlx/backend/metal/custom_kernel.cpp +430 -0
- data/mlx/mlx/backend/metal/device.cpp +816 -0
- data/mlx/mlx/backend/metal/device.h +289 -0
- data/mlx/mlx/backend/metal/device_info.cpp +58 -0
- data/mlx/mlx/backend/metal/distributed.cpp +38 -0
- data/mlx/mlx/backend/metal/eval.cpp +97 -0
- data/mlx/mlx/backend/metal/event.cpp +62 -0
- data/mlx/mlx/backend/metal/fence.cpp +162 -0
- data/mlx/mlx/backend/metal/fft.cpp +807 -0
- data/mlx/mlx/backend/metal/hadamard.cpp +198 -0
- data/mlx/mlx/backend/metal/indexing.cpp +727 -0
- data/mlx/mlx/backend/metal/jit/includes.h +58 -0
- data/mlx/mlx/backend/metal/jit/indexing.h +76 -0
- data/mlx/mlx/backend/metal/jit_kernels.cpp +1118 -0
- data/mlx/mlx/backend/metal/kernels/CMakeLists.txt +193 -0
- data/mlx/mlx/backend/metal/kernels/arange.h +9 -0
- data/mlx/mlx/backend/metal/kernels/arange.metal +20 -0
- data/mlx/mlx/backend/metal/kernels/arg_reduce.metal +182 -0
- data/mlx/mlx/backend/metal/kernels/atomic.h +345 -0
- data/mlx/mlx/backend/metal/kernels/bf16.h +16 -0
- data/mlx/mlx/backend/metal/kernels/bf16_math.h +380 -0
- data/mlx/mlx/backend/metal/kernels/binary.h +199 -0
- data/mlx/mlx/backend/metal/kernels/binary.metal +109 -0
- data/mlx/mlx/backend/metal/kernels/binary_ops.h +330 -0
- data/mlx/mlx/backend/metal/kernels/binary_two.h +244 -0
- data/mlx/mlx/backend/metal/kernels/binary_two.metal +54 -0
- data/mlx/mlx/backend/metal/kernels/cexpf.h +134 -0
- data/mlx/mlx/backend/metal/kernels/complex.h +173 -0
- data/mlx/mlx/backend/metal/kernels/conv.metal +701 -0
- data/mlx/mlx/backend/metal/kernels/copy.h +276 -0
- data/mlx/mlx/backend/metal/kernels/copy.metal +75 -0
- data/mlx/mlx/backend/metal/kernels/defines.h +24 -0
- data/mlx/mlx/backend/metal/kernels/erf.h +69 -0
- data/mlx/mlx/backend/metal/kernels/expm1f.h +90 -0
- data/mlx/mlx/backend/metal/kernels/fence.metal +52 -0
- data/mlx/mlx/backend/metal/kernels/fft/radix.h +328 -0
- data/mlx/mlx/backend/metal/kernels/fft/readwrite.h +624 -0
- data/mlx/mlx/backend/metal/kernels/fft.h +486 -0
- data/mlx/mlx/backend/metal/kernels/fft.metal +67 -0
- data/mlx/mlx/backend/metal/kernels/fp4.h +48 -0
- data/mlx/mlx/backend/metal/kernels/fp8.h +80 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized.h +1850 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized.metal +153 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.h +1044 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.metal +79 -0
- data/mlx/mlx/backend/metal/kernels/gemv.metal +868 -0
- data/mlx/mlx/backend/metal/kernels/gemv_masked.h +827 -0
- data/mlx/mlx/backend/metal/kernels/gemv_masked.metal +76 -0
- data/mlx/mlx/backend/metal/kernels/hadamard.h +182 -0
- data/mlx/mlx/backend/metal/kernels/indexing/gather.h +51 -0
- data/mlx/mlx/backend/metal/kernels/indexing/gather_axis.h +44 -0
- data/mlx/mlx/backend/metal/kernels/indexing/gather_front.h +24 -0
- data/mlx/mlx/backend/metal/kernels/indexing/indexing.h +23 -0
- data/mlx/mlx/backend/metal/kernels/indexing/masked_scatter.h +41 -0
- data/mlx/mlx/backend/metal/kernels/indexing/scatter.h +59 -0
- data/mlx/mlx/backend/metal/kernels/indexing/scatter_axis.h +52 -0
- data/mlx/mlx/backend/metal/kernels/layer_norm.metal +433 -0
- data/mlx/mlx/backend/metal/kernels/logging.h +26 -0
- data/mlx/mlx/backend/metal/kernels/logsumexp.h +140 -0
- data/mlx/mlx/backend/metal/kernels/logsumexp.metal +18 -0
- data/mlx/mlx/backend/metal/kernels/quantized.h +2508 -0
- data/mlx/mlx/backend/metal/kernels/quantized.metal +144 -0
- data/mlx/mlx/backend/metal/kernels/quantized_nax.h +1705 -0
- data/mlx/mlx/backend/metal/kernels/quantized_nax.metal +106 -0
- data/mlx/mlx/backend/metal/kernels/quantized_utils.h +90 -0
- data/mlx/mlx/backend/metal/kernels/random.metal +103 -0
- data/mlx/mlx/backend/metal/kernels/reduce.h +5 -0
- data/mlx/mlx/backend/metal/kernels/reduce.metal +169 -0
- data/mlx/mlx/backend/metal/kernels/reduce_utils.h +6 -0
- data/mlx/mlx/backend/metal/kernels/reduction/ops.h +275 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_all.h +66 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_col.h +398 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_init.h +8 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_row.h +369 -0
- data/mlx/mlx/backend/metal/kernels/rms_norm.metal +391 -0
- data/mlx/mlx/backend/metal/kernels/rope.metal +229 -0
- data/mlx/mlx/backend/metal/kernels/scaled_dot_product_attention.metal +44 -0
- data/mlx/mlx/backend/metal/kernels/scan.h +514 -0
- data/mlx/mlx/backend/metal/kernels/scan.metal +109 -0
- data/mlx/mlx/backend/metal/kernels/sdpa_vector.h +394 -0
- data/mlx/mlx/backend/metal/kernels/softmax.h +190 -0
- data/mlx/mlx/backend/metal/kernels/softmax.metal +24 -0
- data/mlx/mlx/backend/metal/kernels/sort.h +719 -0
- data/mlx/mlx/backend/metal/kernels/sort.metal +80 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/attn.h +296 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.h +471 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.metal +27 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.h +481 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.metal +28 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/loader.h +264 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/mma.h +750 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/nax.h +1076 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/params.h +44 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/transforms.h +71 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/conv.h +13 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.h +176 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.metal +56 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.h +225 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.metal +47 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loader.h +6 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_l.h +451 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_n.h +319 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_general.h +381 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/params.h +62 -0
- data/mlx/mlx/backend/metal/kernels/steel/defines.h +7 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm.h +295 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm_nax.h +157 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.h +346 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.metal +34 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.h +219 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.metal +30 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.h +459 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.metal +59 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.h +143 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.metal +37 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.h +719 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.metal +76 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.h +266 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.metal +43 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.h +227 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.metal +76 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.h +152 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.metal +30 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/loader.h +137 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/mma.h +1146 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/nax.h +1084 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/params.h +65 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/transforms.h +72 -0
- data/mlx/mlx/backend/metal/kernels/steel/utils/integral_constant.h +134 -0
- data/mlx/mlx/backend/metal/kernels/steel/utils/type_traits.h +55 -0
- data/mlx/mlx/backend/metal/kernels/steel/utils.h +42 -0
- data/mlx/mlx/backend/metal/kernels/ternary.h +145 -0
- data/mlx/mlx/backend/metal/kernels/ternary.metal +48 -0
- data/mlx/mlx/backend/metal/kernels/ternary_ops.h +10 -0
- data/mlx/mlx/backend/metal/kernels/unary.h +63 -0
- data/mlx/mlx/backend/metal/kernels/unary.metal +115 -0
- data/mlx/mlx/backend/metal/kernels/unary_ops.h +454 -0
- data/mlx/mlx/backend/metal/kernels/utils.h +445 -0
- data/mlx/mlx/backend/metal/kernels.h +375 -0
- data/mlx/mlx/backend/metal/logsumexp.cpp +95 -0
- data/mlx/mlx/backend/metal/make_compiled_preamble.sh +120 -0
- data/mlx/mlx/backend/metal/matmul.cpp +2572 -0
- data/mlx/mlx/backend/metal/matmul.h +144 -0
- data/mlx/mlx/backend/metal/metal.cpp +50 -0
- data/mlx/mlx/backend/metal/metal.h +25 -0
- data/mlx/mlx/backend/metal/no_metal.cpp +42 -0
- data/mlx/mlx/backend/metal/nojit_kernels.cpp +414 -0
- data/mlx/mlx/backend/metal/normalization.cpp +433 -0
- data/mlx/mlx/backend/metal/primitives.cpp +242 -0
- data/mlx/mlx/backend/metal/quantized.cpp +1651 -0
- data/mlx/mlx/backend/metal/reduce.cpp +1038 -0
- data/mlx/mlx/backend/metal/reduce.h +41 -0
- data/mlx/mlx/backend/metal/resident.cpp +100 -0
- data/mlx/mlx/backend/metal/resident.h +32 -0
- data/mlx/mlx/backend/metal/rope.cpp +165 -0
- data/mlx/mlx/backend/metal/scaled_dot_product_attention.cpp +798 -0
- data/mlx/mlx/backend/metal/scan.cpp +145 -0
- data/mlx/mlx/backend/metal/scan.h +17 -0
- data/mlx/mlx/backend/metal/slicing.cpp +99 -0
- data/mlx/mlx/backend/metal/softmax.cpp +87 -0
- data/mlx/mlx/backend/metal/sort.cpp +368 -0
- data/mlx/mlx/backend/metal/ternary.cpp +160 -0
- data/mlx/mlx/backend/metal/ternary.h +21 -0
- data/mlx/mlx/backend/metal/unary.cpp +161 -0
- data/mlx/mlx/backend/metal/unary.h +21 -0
- data/mlx/mlx/backend/metal/utils.cpp +77 -0
- data/mlx/mlx/backend/metal/utils.h +99 -0
- data/mlx/mlx/backend/no_cpu/CMakeLists.txt +7 -0
- data/mlx/mlx/backend/no_cpu/compiled.cpp +24 -0
- data/mlx/mlx/backend/no_cpu/device_info.cpp +22 -0
- data/mlx/mlx/backend/no_cpu/primitives.cpp +146 -0
- data/mlx/mlx/backend/no_gpu/CMakeLists.txt +8 -0
- data/mlx/mlx/backend/no_gpu/allocator.cpp +134 -0
- data/mlx/mlx/backend/no_gpu/apple_memory.h +16 -0
- data/mlx/mlx/backend/no_gpu/device_info.cpp +22 -0
- data/mlx/mlx/backend/no_gpu/eval.cpp +24 -0
- data/mlx/mlx/backend/no_gpu/event.cpp +53 -0
- data/mlx/mlx/backend/no_gpu/fence.cpp +54 -0
- data/mlx/mlx/backend/no_gpu/linux_memory.h +22 -0
- data/mlx/mlx/backend/no_gpu/primitives.cpp +185 -0
- data/mlx/mlx/compile.cpp +1243 -0
- data/mlx/mlx/compile.h +45 -0
- data/mlx/mlx/compile_impl.h +70 -0
- data/mlx/mlx/device.cpp +72 -0
- data/mlx/mlx/device.h +56 -0
- data/mlx/mlx/distributed/CMakeLists.txt +14 -0
- data/mlx/mlx/distributed/distributed.cpp +197 -0
- data/mlx/mlx/distributed/distributed.h +61 -0
- data/mlx/mlx/distributed/distributed_impl.h +59 -0
- data/mlx/mlx/distributed/jaccl/CMakeLists.txt +12 -0
- data/mlx/mlx/distributed/jaccl/jaccl.cpp +178 -0
- data/mlx/mlx/distributed/jaccl/jaccl.h +12 -0
- data/mlx/mlx/distributed/jaccl/mesh.cpp +451 -0
- data/mlx/mlx/distributed/jaccl/mesh.h +122 -0
- data/mlx/mlx/distributed/jaccl/no_jaccl.cpp +20 -0
- data/mlx/mlx/distributed/jaccl/ring.cpp +692 -0
- data/mlx/mlx/distributed/jaccl/ring.h +178 -0
- data/mlx/mlx/distributed/jaccl/utils.cpp +329 -0
- data/mlx/mlx/distributed/jaccl/utils.h +342 -0
- data/mlx/mlx/distributed/mpi/CMakeLists.txt +5 -0
- data/mlx/mlx/distributed/mpi/mpi.cpp +501 -0
- data/mlx/mlx/distributed/mpi/mpi.h +12 -0
- data/mlx/mlx/distributed/mpi/mpi_declarations.h +28 -0
- data/mlx/mlx/distributed/mpi/no_mpi.cpp +20 -0
- data/mlx/mlx/distributed/nccl/CMakeLists.txt +26 -0
- data/mlx/mlx/distributed/nccl/nccl.cpp +443 -0
- data/mlx/mlx/distributed/nccl/nccl.h +12 -0
- data/mlx/mlx/distributed/nccl/nccl_stub/CMakeLists.txt +1 -0
- data/mlx/mlx/distributed/nccl/nccl_stub/nccl_stubs.cpp +54 -0
- data/mlx/mlx/distributed/nccl/no_nccl.cpp +20 -0
- data/mlx/mlx/distributed/ops.cpp +186 -0
- data/mlx/mlx/distributed/ops.h +57 -0
- data/mlx/mlx/distributed/primitives.cpp +95 -0
- data/mlx/mlx/distributed/primitives.h +156 -0
- data/mlx/mlx/distributed/reduction_ops.h +38 -0
- data/mlx/mlx/distributed/ring/CMakeLists.txt +5 -0
- data/mlx/mlx/distributed/ring/no_ring.cpp +20 -0
- data/mlx/mlx/distributed/ring/ring.cpp +870 -0
- data/mlx/mlx/distributed/ring/ring.h +12 -0
- data/mlx/mlx/distributed/utils.cpp +206 -0
- data/mlx/mlx/distributed/utils.h +67 -0
- data/mlx/mlx/dtype.cpp +197 -0
- data/mlx/mlx/dtype.h +116 -0
- data/mlx/mlx/dtype_utils.cpp +42 -0
- data/mlx/mlx/dtype_utils.h +119 -0
- data/mlx/mlx/einsum.cpp +941 -0
- data/mlx/mlx/einsum.h +23 -0
- data/mlx/mlx/event.h +58 -0
- data/mlx/mlx/export.cpp +1130 -0
- data/mlx/mlx/export.h +137 -0
- data/mlx/mlx/export_impl.h +99 -0
- data/mlx/mlx/fast.cpp +941 -0
- data/mlx/mlx/fast.h +103 -0
- data/mlx/mlx/fast_primitives.h +427 -0
- data/mlx/mlx/fence.h +39 -0
- data/mlx/mlx/fft.cpp +262 -0
- data/mlx/mlx/fft.h +159 -0
- data/mlx/mlx/graph_utils.cpp +175 -0
- data/mlx/mlx/graph_utils.h +67 -0
- data/mlx/mlx/io/CMakeLists.txt +25 -0
- data/mlx/mlx/io/gguf.cpp +470 -0
- data/mlx/mlx/io/gguf.h +20 -0
- data/mlx/mlx/io/gguf_quants.cpp +164 -0
- data/mlx/mlx/io/load.cpp +397 -0
- data/mlx/mlx/io/load.h +175 -0
- data/mlx/mlx/io/no_gguf.cpp +20 -0
- data/mlx/mlx/io/no_safetensors.cpp +37 -0
- data/mlx/mlx/io/safetensors.cpp +234 -0
- data/mlx/mlx/io.h +61 -0
- data/mlx/mlx/linalg.cpp +708 -0
- data/mlx/mlx/linalg.h +115 -0
- data/mlx/mlx/memory.h +80 -0
- data/mlx/mlx/mlx.h +25 -0
- data/mlx/mlx/ops.cpp +6094 -0
- data/mlx/mlx/ops.h +1610 -0
- data/mlx/mlx/primitives.cpp +5850 -0
- data/mlx/mlx/primitives.h +2525 -0
- data/mlx/mlx/random.cpp +492 -0
- data/mlx/mlx/random.h +283 -0
- data/mlx/mlx/scheduler.cpp +73 -0
- data/mlx/mlx/scheduler.h +189 -0
- data/mlx/mlx/small_vector.h +540 -0
- data/mlx/mlx/stream.h +42 -0
- data/mlx/mlx/threadpool.h +133 -0
- data/mlx/mlx/transforms.cpp +1065 -0
- data/mlx/mlx/transforms.h +231 -0
- data/mlx/mlx/transforms_impl.h +88 -0
- data/mlx/mlx/types/bf16.h +187 -0
- data/mlx/mlx/types/complex.h +113 -0
- data/mlx/mlx/types/fp16.h +234 -0
- data/mlx/mlx/types/half_types.h +58 -0
- data/mlx/mlx/types/limits.h +70 -0
- data/mlx/mlx/utils.cpp +302 -0
- data/mlx/mlx/utils.h +174 -0
- data/mlx/mlx/version.cpp +11 -0
- data/mlx/mlx/version.h +22 -0
- data/mlx/mlx.pc.in +52 -0
- data/mlx/pyproject.toml +7 -0
- data/mlx/python/mlx/__main__.py +27 -0
- data/mlx/python/mlx/_distributed_utils/common.py +135 -0
- data/mlx/python/mlx/_distributed_utils/config.py +631 -0
- data/mlx/python/mlx/_distributed_utils/launch.py +570 -0
- data/mlx/python/mlx/_reprlib_fix.py +16 -0
- data/mlx/python/mlx/_stub_patterns.txt +36 -0
- data/mlx/python/mlx/extension.py +88 -0
- data/mlx/python/mlx/nn/__init__.py +5 -0
- data/mlx/python/mlx/nn/init.py +441 -0
- data/mlx/python/mlx/nn/layers/__init__.py +105 -0
- data/mlx/python/mlx/nn/layers/activations.py +661 -0
- data/mlx/python/mlx/nn/layers/base.py +675 -0
- data/mlx/python/mlx/nn/layers/containers.py +24 -0
- data/mlx/python/mlx/nn/layers/convolution.py +232 -0
- data/mlx/python/mlx/nn/layers/convolution_transpose.py +242 -0
- data/mlx/python/mlx/nn/layers/distributed.py +601 -0
- data/mlx/python/mlx/nn/layers/dropout.py +137 -0
- data/mlx/python/mlx/nn/layers/embedding.py +53 -0
- data/mlx/python/mlx/nn/layers/linear.py +180 -0
- data/mlx/python/mlx/nn/layers/normalization.py +363 -0
- data/mlx/python/mlx/nn/layers/pooling.py +398 -0
- data/mlx/python/mlx/nn/layers/positional_encoding.py +162 -0
- data/mlx/python/mlx/nn/layers/quantized.py +426 -0
- data/mlx/python/mlx/nn/layers/recurrent.py +289 -0
- data/mlx/python/mlx/nn/layers/transformer.py +354 -0
- data/mlx/python/mlx/nn/layers/upsample.py +277 -0
- data/mlx/python/mlx/nn/losses.py +610 -0
- data/mlx/python/mlx/nn/utils.py +165 -0
- data/mlx/python/mlx/optimizers/__init__.py +4 -0
- data/mlx/python/mlx/optimizers/optimizers.py +976 -0
- data/mlx/python/mlx/optimizers/schedulers.py +158 -0
- data/mlx/python/mlx/py.typed +1 -0
- data/mlx/python/mlx/utils.py +325 -0
- data/mlx/python/src/CMakeLists.txt +96 -0
- data/mlx/python/src/array.cpp +1525 -0
- data/mlx/python/src/buffer.h +124 -0
- data/mlx/python/src/constants.cpp +15 -0
- data/mlx/python/src/convert.cpp +504 -0
- data/mlx/python/src/convert.h +50 -0
- data/mlx/python/src/cuda.cpp +19 -0
- data/mlx/python/src/device.cpp +98 -0
- data/mlx/python/src/distributed.cpp +352 -0
- data/mlx/python/src/export.cpp +356 -0
- data/mlx/python/src/fast.cpp +627 -0
- data/mlx/python/src/fft.cpp +514 -0
- data/mlx/python/src/indexing.cpp +1016 -0
- data/mlx/python/src/indexing.h +41 -0
- data/mlx/python/src/linalg.cpp +663 -0
- data/mlx/python/src/load.cpp +531 -0
- data/mlx/python/src/load.h +51 -0
- data/mlx/python/src/memory.cpp +125 -0
- data/mlx/python/src/metal.cpp +98 -0
- data/mlx/python/src/mlx.cpp +51 -0
- data/mlx/python/src/mlx_func.cpp +116 -0
- data/mlx/python/src/mlx_func.h +31 -0
- data/mlx/python/src/ops.cpp +5545 -0
- data/mlx/python/src/random.cpp +516 -0
- data/mlx/python/src/small_vector.h +76 -0
- data/mlx/python/src/stream.cpp +147 -0
- data/mlx/python/src/transforms.cpp +1542 -0
- data/mlx/python/src/trees.cpp +311 -0
- data/mlx/python/src/trees.h +62 -0
- data/mlx/python/src/utils.cpp +98 -0
- data/mlx/python/src/utils.h +78 -0
- data/mlx/python/tests/__main__.py +5 -0
- data/mlx/python/tests/cuda_skip.py +62 -0
- data/mlx/python/tests/mlx_distributed_tests.py +314 -0
- data/mlx/python/tests/mlx_tests.py +116 -0
- data/mlx/python/tests/mpi_test_distributed.py +142 -0
- data/mlx/python/tests/nccl_test_distributed.py +52 -0
- data/mlx/python/tests/ring_test_distributed.py +131 -0
- data/mlx/python/tests/test_array.py +2139 -0
- data/mlx/python/tests/test_autograd.py +880 -0
- data/mlx/python/tests/test_bf16.py +196 -0
- data/mlx/python/tests/test_blas.py +1429 -0
- data/mlx/python/tests/test_compile.py +1277 -0
- data/mlx/python/tests/test_constants.py +41 -0
- data/mlx/python/tests/test_conv.py +1198 -0
- data/mlx/python/tests/test_conv_transpose.py +810 -0
- data/mlx/python/tests/test_device.py +150 -0
- data/mlx/python/tests/test_double.py +306 -0
- data/mlx/python/tests/test_einsum.py +363 -0
- data/mlx/python/tests/test_eval.py +200 -0
- data/mlx/python/tests/test_export_import.py +614 -0
- data/mlx/python/tests/test_fast.py +923 -0
- data/mlx/python/tests/test_fast_sdpa.py +647 -0
- data/mlx/python/tests/test_fft.py +323 -0
- data/mlx/python/tests/test_graph.py +37 -0
- data/mlx/python/tests/test_init.py +139 -0
- data/mlx/python/tests/test_linalg.py +621 -0
- data/mlx/python/tests/test_load.py +447 -0
- data/mlx/python/tests/test_losses.py +427 -0
- data/mlx/python/tests/test_memory.py +77 -0
- data/mlx/python/tests/test_nn.py +1986 -0
- data/mlx/python/tests/test_ops.py +3261 -0
- data/mlx/python/tests/test_optimizers.py +584 -0
- data/mlx/python/tests/test_quantized.py +1160 -0
- data/mlx/python/tests/test_random.py +392 -0
- data/mlx/python/tests/test_reduce.py +223 -0
- data/mlx/python/tests/test_tree.py +96 -0
- data/mlx/python/tests/test_upsample.py +100 -0
- data/mlx/python/tests/test_vmap.py +860 -0
- data/mlx/setup.py +315 -0
- data/mlx/tests/CMakeLists.txt +44 -0
- data/mlx/tests/allocator_tests.cpp +41 -0
- data/mlx/tests/arg_reduce_tests.cpp +204 -0
- data/mlx/tests/array_tests.cpp +663 -0
- data/mlx/tests/autograd_tests.cpp +1399 -0
- data/mlx/tests/blas_tests.cpp +110 -0
- data/mlx/tests/compile_tests.cpp +818 -0
- data/mlx/tests/creations_tests.cpp +239 -0
- data/mlx/tests/custom_vjp_tests.cpp +55 -0
- data/mlx/tests/device_tests.cpp +35 -0
- data/mlx/tests/einsum_tests.cpp +85 -0
- data/mlx/tests/eval_tests.cpp +93 -0
- data/mlx/tests/export_import_tests.cpp +164 -0
- data/mlx/tests/fft_tests.cpp +366 -0
- data/mlx/tests/gpu_tests.cpp +523 -0
- data/mlx/tests/linalg_tests.cpp +639 -0
- data/mlx/tests/load_tests.cpp +270 -0
- data/mlx/tests/ops_tests.cpp +4159 -0
- data/mlx/tests/random_tests.cpp +716 -0
- data/mlx/tests/scheduler_tests.cpp +121 -0
- data/mlx/tests/tests.cpp +26 -0
- data/mlx/tests/utils_tests.cpp +67 -0
- data/mlx/tests/vmap_tests.cpp +547 -0
- metadata +958 -0
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
// Copyright © 2023-2024 Apple Inc.
|
|
2
|
+
|
|
3
|
+
#pragma once
|
|
4
|
+
|
|
5
|
+
#include <nanobind/nanobind.h>
|
|
6
|
+
|
|
7
|
+
#include "mlx/array.h"
|
|
8
|
+
#include "python/src/utils.h"
|
|
9
|
+
|
|
10
|
+
namespace mx = mlx::core;
|
|
11
|
+
namespace nb = nanobind;
|
|
12
|
+
|
|
13
|
+
mx::array mlx_get_item(const mx::array& src, const nb::object& obj);
|
|
14
|
+
void mlx_set_item(
|
|
15
|
+
mx::array& src,
|
|
16
|
+
const nb::object& obj,
|
|
17
|
+
const ScalarOrArray& v);
|
|
18
|
+
mx::array mlx_add_item(
|
|
19
|
+
const mx::array& src,
|
|
20
|
+
const nb::object& obj,
|
|
21
|
+
const ScalarOrArray& v);
|
|
22
|
+
mx::array mlx_subtract_item(
|
|
23
|
+
const mx::array& src,
|
|
24
|
+
const nb::object& obj,
|
|
25
|
+
const ScalarOrArray& v);
|
|
26
|
+
mx::array mlx_multiply_item(
|
|
27
|
+
const mx::array& src,
|
|
28
|
+
const nb::object& obj,
|
|
29
|
+
const ScalarOrArray& v);
|
|
30
|
+
mx::array mlx_divide_item(
|
|
31
|
+
const mx::array& src,
|
|
32
|
+
const nb::object& obj,
|
|
33
|
+
const ScalarOrArray& v);
|
|
34
|
+
mx::array mlx_maximum_item(
|
|
35
|
+
const mx::array& src,
|
|
36
|
+
const nb::object& obj,
|
|
37
|
+
const ScalarOrArray& v);
|
|
38
|
+
mx::array mlx_minimum_item(
|
|
39
|
+
const mx::array& src,
|
|
40
|
+
const nb::object& obj,
|
|
41
|
+
const ScalarOrArray& v);
|
|
@@ -0,0 +1,663 @@
|
|
|
1
|
+
// Copyright © 2023-2024 Apple Inc.
|
|
2
|
+
|
|
3
|
+
#include <variant>
|
|
4
|
+
|
|
5
|
+
#include <nanobind/nanobind.h>
|
|
6
|
+
#include <nanobind/stl/pair.h>
|
|
7
|
+
#include <nanobind/stl/string.h>
|
|
8
|
+
#include <nanobind/stl/variant.h>
|
|
9
|
+
#include <nanobind/stl/vector.h>
|
|
10
|
+
|
|
11
|
+
#include "mlx/linalg.h"
|
|
12
|
+
#include "python/src/small_vector.h"
|
|
13
|
+
|
|
14
|
+
namespace mx = mlx::core;
|
|
15
|
+
namespace nb = nanobind;
|
|
16
|
+
using namespace nb::literals;
|
|
17
|
+
|
|
18
|
+
void init_linalg(nb::module_& parent_module) {
|
|
19
|
+
auto m = parent_module.def_submodule(
|
|
20
|
+
"linalg", "mlx.core.linalg: linear algebra routines.");
|
|
21
|
+
|
|
22
|
+
m.def(
|
|
23
|
+
"norm",
|
|
24
|
+
[](const mx::array& a,
|
|
25
|
+
const std::variant<std::monostate, int, double, std::string>& ord_,
|
|
26
|
+
const std::variant<std::monostate, int, std::vector<int>>& axis_,
|
|
27
|
+
const bool keepdims,
|
|
28
|
+
const mx::StreamOrDevice stream) {
|
|
29
|
+
std::optional<std::vector<int>> axis = std::nullopt;
|
|
30
|
+
if (auto pv = std::get_if<int>(&axis_); pv) {
|
|
31
|
+
axis = std::vector<int>{*pv};
|
|
32
|
+
} else if (auto pv = std::get_if<std::vector<int>>(&axis_); pv) {
|
|
33
|
+
axis = *pv;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
if (std::holds_alternative<std::monostate>(ord_)) {
|
|
37
|
+
return mx::linalg::norm(a, axis, keepdims, stream);
|
|
38
|
+
} else {
|
|
39
|
+
if (auto pv = std::get_if<std::string>(&ord_); pv) {
|
|
40
|
+
return mx::linalg::norm(a, *pv, axis, keepdims, stream);
|
|
41
|
+
}
|
|
42
|
+
double ord;
|
|
43
|
+
if (auto pv = std::get_if<int>(&ord_); pv) {
|
|
44
|
+
ord = *pv;
|
|
45
|
+
} else {
|
|
46
|
+
ord = std::get<double>(ord_);
|
|
47
|
+
}
|
|
48
|
+
return mx::linalg::norm(a, ord, axis, keepdims, stream);
|
|
49
|
+
}
|
|
50
|
+
},
|
|
51
|
+
nb::arg(),
|
|
52
|
+
"ord"_a = nb::none(),
|
|
53
|
+
"axis"_a = nb::none(),
|
|
54
|
+
"keepdims"_a = false,
|
|
55
|
+
nb::kw_only(),
|
|
56
|
+
"stream"_a = nb::none(),
|
|
57
|
+
nb::sig(
|
|
58
|
+
"def norm(a: array, /, ord: Union[None, int, float, str] = None, axis: Union[None, int, list[int]] = None, keepdims: bool = False, *, stream: Union[None, Stream, Device] = None) -> array"),
|
|
59
|
+
R"pbdoc(
|
|
60
|
+
Matrix or vector norm.
|
|
61
|
+
|
|
62
|
+
This function computes vector or matrix norms depending on the value of
|
|
63
|
+
the ``ord`` and ``axis`` parameters.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
a (array): Input array. If ``axis`` is ``None``, ``a`` must be 1-D or 2-D,
|
|
67
|
+
unless ``ord`` is ``None``. If both ``axis`` and ``ord`` are ``None``, the
|
|
68
|
+
2-norm of ``a.flatten`` will be returned.
|
|
69
|
+
ord (int, float or str, optional): Order of the norm (see table under ``Notes``).
|
|
70
|
+
If ``None``, the 2-norm (or Frobenius norm for matrices) will be computed
|
|
71
|
+
along the given ``axis``. Default: ``None``.
|
|
72
|
+
axis (int or list(int), optional): If ``axis`` is an integer, it specifies the
|
|
73
|
+
axis of ``a`` along which to compute the vector norms. If ``axis`` is a
|
|
74
|
+
2-tuple, it specifies the axes that hold 2-D matrices, and the matrix
|
|
75
|
+
norms of these matrices are computed. If `axis` is ``None`` then
|
|
76
|
+
either a vector norm (when ``a`` is 1-D) or a matrix norm (when ``a`` is
|
|
77
|
+
2-D) is returned. Default: ``None``.
|
|
78
|
+
keepdims (bool, optional): If ``True``, the axes which are normed over are
|
|
79
|
+
left in the result as dimensions with size one. Default ``False``.
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
array: The output containing the norm(s).
|
|
83
|
+
|
|
84
|
+
Notes:
|
|
85
|
+
For values of ``ord < 1``, the result is, strictly speaking, not a
|
|
86
|
+
mathematical norm, but it may still be useful for various numerical
|
|
87
|
+
purposes.
|
|
88
|
+
|
|
89
|
+
The following norms can be calculated:
|
|
90
|
+
|
|
91
|
+
===== ============================ ==========================
|
|
92
|
+
ord norm for matrices norm for vectors
|
|
93
|
+
===== ============================ ==========================
|
|
94
|
+
None Frobenius norm 2-norm
|
|
95
|
+
'fro' Frobenius norm --
|
|
96
|
+
'nuc' nuclear norm --
|
|
97
|
+
inf max(sum(abs(x), axis=1)) max(abs(x))
|
|
98
|
+
-inf min(sum(abs(x), axis=1)) min(abs(x))
|
|
99
|
+
0 -- sum(x != 0)
|
|
100
|
+
1 max(sum(abs(x), axis=0)) as below
|
|
101
|
+
-1 min(sum(abs(x), axis=0)) as below
|
|
102
|
+
2 2-norm (largest sing. value) as below
|
|
103
|
+
-2 smallest singular value as below
|
|
104
|
+
other -- sum(abs(x)**ord)**(1./ord)
|
|
105
|
+
===== ============================ ==========================
|
|
106
|
+
|
|
107
|
+
The Frobenius norm is given by [1]_:
|
|
108
|
+
|
|
109
|
+
:math:`||A||_F = [\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
|
|
110
|
+
|
|
111
|
+
The nuclear norm is the sum of the singular values.
|
|
112
|
+
|
|
113
|
+
Both the Frobenius and nuclear norm orders are only defined for
|
|
114
|
+
matrices and raise a ``ValueError`` when ``a.ndim != 2``.
|
|
115
|
+
|
|
116
|
+
References:
|
|
117
|
+
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
|
|
118
|
+
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
|
|
119
|
+
|
|
120
|
+
Examples:
|
|
121
|
+
>>> import mlx.core as mx
|
|
122
|
+
>>> from mlx.core import linalg as la
|
|
123
|
+
>>> a = mx.arange(9) - 4
|
|
124
|
+
>>> a
|
|
125
|
+
array([-4, -3, -2, ..., 2, 3, 4], dtype=int32)
|
|
126
|
+
>>> b = a.reshape((3,3))
|
|
127
|
+
>>> b
|
|
128
|
+
array([[-4, -3, -2],
|
|
129
|
+
[-1, 0, 1],
|
|
130
|
+
[ 2, 3, 4]], dtype=int32)
|
|
131
|
+
>>> la.norm(a)
|
|
132
|
+
array(7.74597, dtype=float32)
|
|
133
|
+
>>> la.norm(b)
|
|
134
|
+
array(7.74597, dtype=float32)
|
|
135
|
+
>>> la.norm(b, 'fro')
|
|
136
|
+
array(7.74597, dtype=float32)
|
|
137
|
+
>>> la.norm(a, float("inf"))
|
|
138
|
+
array(4, dtype=float32)
|
|
139
|
+
>>> la.norm(b, float("inf"))
|
|
140
|
+
array(9, dtype=float32)
|
|
141
|
+
>>> la.norm(a, -float("inf"))
|
|
142
|
+
array(0, dtype=float32)
|
|
143
|
+
>>> la.norm(b, -float("inf"))
|
|
144
|
+
array(2, dtype=float32)
|
|
145
|
+
>>> la.norm(a, 1)
|
|
146
|
+
array(20, dtype=float32)
|
|
147
|
+
>>> la.norm(b, 1)
|
|
148
|
+
array(7, dtype=float32)
|
|
149
|
+
>>> la.norm(a, -1)
|
|
150
|
+
array(0, dtype=float32)
|
|
151
|
+
>>> la.norm(b, -1)
|
|
152
|
+
array(6, dtype=float32)
|
|
153
|
+
>>> la.norm(a, 2)
|
|
154
|
+
array(7.74597, dtype=float32)
|
|
155
|
+
>>> la.norm(a, 3)
|
|
156
|
+
array(5.84804, dtype=float32)
|
|
157
|
+
>>> la.norm(a, -3)
|
|
158
|
+
array(0, dtype=float32)
|
|
159
|
+
>>> c = mx.array([[ 1, 2, 3],
|
|
160
|
+
... [-1, 1, 4]])
|
|
161
|
+
>>> la.norm(c, axis=0)
|
|
162
|
+
array([1.41421, 2.23607, 5], dtype=float32)
|
|
163
|
+
>>> la.norm(c, axis=1)
|
|
164
|
+
array([3.74166, 4.24264], dtype=float32)
|
|
165
|
+
>>> la.norm(c, ord=1, axis=1)
|
|
166
|
+
array([6, 6], dtype=float32)
|
|
167
|
+
>>> m = mx.arange(8).reshape(2,2,2)
|
|
168
|
+
>>> la.norm(m, axis=(1,2))
|
|
169
|
+
array([3.74166, 11.225], dtype=float32)
|
|
170
|
+
>>> la.norm(m[0, :, :]), LA.norm(m[1, :, :])
|
|
171
|
+
(array(3.74166, dtype=float32), array(11.225, dtype=float32))
|
|
172
|
+
)pbdoc");
|
|
173
|
+
m.def(
|
|
174
|
+
"qr",
|
|
175
|
+
&mx::linalg::qr,
|
|
176
|
+
"a"_a,
|
|
177
|
+
nb::kw_only(),
|
|
178
|
+
"stream"_a = nb::none(),
|
|
179
|
+
nb::sig(
|
|
180
|
+
"def qr(a: array, *, stream: Union[None, Stream, Device] = None) -> Tuple[array, array]"),
|
|
181
|
+
R"pbdoc(
|
|
182
|
+
The QR factorization of the input matrix.
|
|
183
|
+
|
|
184
|
+
This function supports arrays with at least 2 dimensions. The matrices
|
|
185
|
+
which are factorized are assumed to be in the last two dimensions of
|
|
186
|
+
the input.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
a (array): Input array.
|
|
190
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
191
|
+
in which case the default stream of the default device is used.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
tuple(array, array): ``Q`` and ``R`` matrices such that ``Q @ R = a``.
|
|
195
|
+
|
|
196
|
+
Example:
|
|
197
|
+
>>> A = mx.array([[2., 3.], [1., 2.]])
|
|
198
|
+
>>> Q, R = mx.linalg.qr(A, stream=mx.cpu)
|
|
199
|
+
>>> Q
|
|
200
|
+
array([[-0.894427, -0.447214],
|
|
201
|
+
[-0.447214, 0.894427]], dtype=float32)
|
|
202
|
+
>>> R
|
|
203
|
+
array([[-2.23607, -3.57771],
|
|
204
|
+
[0, 0.447214]], dtype=float32)
|
|
205
|
+
)pbdoc");
|
|
206
|
+
m.def(
|
|
207
|
+
"svd",
|
|
208
|
+
[](const mx::array& a,
|
|
209
|
+
bool compute_uv /* = true */,
|
|
210
|
+
mx::StreamOrDevice s /* = {} */) -> nb::object {
|
|
211
|
+
const auto result = mx::linalg::svd(a, compute_uv, s);
|
|
212
|
+
if (result.size() == 1) {
|
|
213
|
+
return nb::cast(result.at(0));
|
|
214
|
+
} else {
|
|
215
|
+
return nb::make_tuple(result.at(0), result.at(1), result.at(2));
|
|
216
|
+
}
|
|
217
|
+
},
|
|
218
|
+
"a"_a,
|
|
219
|
+
"compute_uv"_a = true,
|
|
220
|
+
nb::kw_only(),
|
|
221
|
+
"stream"_a = nb::none(),
|
|
222
|
+
nb::sig(
|
|
223
|
+
"def svd(a: array, compute_uv: bool = True, *, stream: Union[None, Stream, Device] = None) -> Tuple[array, array, array]"),
|
|
224
|
+
R"pbdoc(
|
|
225
|
+
The Singular Value Decomposition (SVD) of the input matrix.
|
|
226
|
+
|
|
227
|
+
This function supports arrays with at least 2 dimensions. When the input
|
|
228
|
+
has more than two dimensions, the function iterates over all indices of the first
|
|
229
|
+
a.ndim - 2 dimensions and for each combination SVD is applied to the last two indices.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
a (array): Input array.
|
|
233
|
+
compute_uv (bool, optional): If ``True``, return the ``U``, ``S``, and ``Vt`` components.
|
|
234
|
+
If ``False``, return only the ``S`` array. Default: ``True``.
|
|
235
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
236
|
+
in which case the default stream of the default device is used.
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
Union[tuple(array, ...), array]:
|
|
240
|
+
If compute_uv is ``True`` returns the ``U``, ``S``, and ``Vt`` matrices, such that
|
|
241
|
+
``A = U @ diag(S) @ Vt``. If compute_uv is ``False`` returns singular values array ``S``.
|
|
242
|
+
)pbdoc");
|
|
243
|
+
m.def(
|
|
244
|
+
"inv",
|
|
245
|
+
&mx::linalg::inv,
|
|
246
|
+
"a"_a,
|
|
247
|
+
nb::kw_only(),
|
|
248
|
+
"stream"_a = nb::none(),
|
|
249
|
+
nb::sig(
|
|
250
|
+
"def inv(a: array, *, stream: Union[None, Stream, Device] = None) -> array"),
|
|
251
|
+
R"pbdoc(
|
|
252
|
+
Compute the inverse of a square matrix.
|
|
253
|
+
|
|
254
|
+
This function supports arrays with at least 2 dimensions. When the input
|
|
255
|
+
has more than two dimensions, the inverse is computed for each matrix
|
|
256
|
+
in the last two dimensions of ``a``.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
a (array): Input array.
|
|
260
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
261
|
+
in which case the default stream of the default device is used.
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
array: ``ainv`` such that ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``
|
|
265
|
+
)pbdoc");
|
|
266
|
+
m.def(
|
|
267
|
+
"tri_inv",
|
|
268
|
+
&mx::linalg::tri_inv,
|
|
269
|
+
"a"_a,
|
|
270
|
+
"upper"_a = false,
|
|
271
|
+
nb::kw_only(),
|
|
272
|
+
"stream"_a = nb::none(),
|
|
273
|
+
nb::sig(
|
|
274
|
+
"def tri_inv(a: array, upper: bool = False, *, stream: Union[None, Stream, Device] = None) -> array"),
|
|
275
|
+
R"pbdoc(
|
|
276
|
+
Compute the inverse of a triangular square matrix.
|
|
277
|
+
|
|
278
|
+
This function supports arrays with at least 2 dimensions. When the input
|
|
279
|
+
has more than two dimensions, the inverse is computed for each matrix
|
|
280
|
+
in the last two dimensions of ``a``.
|
|
281
|
+
|
|
282
|
+
Args:
|
|
283
|
+
a (array): Input array.
|
|
284
|
+
upper (bool, optional): Whether the array is upper or lower triangular. Defaults to ``False``.
|
|
285
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
286
|
+
in which case the default stream of the default device is used.
|
|
287
|
+
|
|
288
|
+
Returns:
|
|
289
|
+
array: ``ainv`` such that ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``
|
|
290
|
+
)pbdoc");
|
|
291
|
+
m.def(
|
|
292
|
+
"cholesky",
|
|
293
|
+
&mx::linalg::cholesky,
|
|
294
|
+
"a"_a,
|
|
295
|
+
"upper"_a = false,
|
|
296
|
+
nb::kw_only(),
|
|
297
|
+
"stream"_a = nb::none(),
|
|
298
|
+
nb::sig(
|
|
299
|
+
"def cholesky(a: array, upper: bool = False, *, stream: Union[None, Stream, Device] = None) -> array"),
|
|
300
|
+
R"pbdoc(
|
|
301
|
+
Compute the Cholesky decomposition of a real symmetric positive semi-definite matrix.
|
|
302
|
+
|
|
303
|
+
This function supports arrays with at least 2 dimensions. When the input
|
|
304
|
+
has more than two dimensions, the Cholesky decomposition is computed for each matrix
|
|
305
|
+
in the last two dimensions of ``a``.
|
|
306
|
+
|
|
307
|
+
If the input matrix is not symmetric positive semi-definite, behaviour is undefined.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
a (array): Input array.
|
|
311
|
+
upper (bool, optional): If ``True``, return the upper triangular Cholesky factor.
|
|
312
|
+
If ``False``, return the lower triangular Cholesky factor. Default: ``False``.
|
|
313
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
314
|
+
in which case the default stream of the default device is used.
|
|
315
|
+
|
|
316
|
+
Returns:
|
|
317
|
+
array: If ``upper = False``, it returns a lower triangular ``L`` matrix such
|
|
318
|
+
that ``L @ L.T = a``. If ``upper = True``, it returns an upper triangular
|
|
319
|
+
``U`` matrix such that ``U.T @ U = a``.
|
|
320
|
+
)pbdoc");
|
|
321
|
+
m.def(
|
|
322
|
+
"cholesky_inv",
|
|
323
|
+
&mx::linalg::cholesky_inv,
|
|
324
|
+
"a"_a,
|
|
325
|
+
"upper"_a = false,
|
|
326
|
+
nb::kw_only(),
|
|
327
|
+
"stream"_a = nb::none(),
|
|
328
|
+
nb::sig(
|
|
329
|
+
"def cholesky_inv(L: array, upper: bool = False, *, stream: Union[None, Stream, Device] = None) -> array"),
|
|
330
|
+
R"pbdoc(
|
|
331
|
+
Compute the inverse of a real symmetric positive semi-definite matrix using it's Cholesky decomposition.
|
|
332
|
+
|
|
333
|
+
Let :math:`\mathbf{A}` be a real symmetric positive semi-definite matrix and :math:`\mathbf{L}` its Cholesky decomposition such that:
|
|
334
|
+
|
|
335
|
+
.. math::
|
|
336
|
+
|
|
337
|
+
\begin{aligned}
|
|
338
|
+
\mathbf{A} = \mathbf{L}\mathbf{L}^T
|
|
339
|
+
\end{aligned}
|
|
340
|
+
|
|
341
|
+
This function computes :math:`\mathbf{A}^{-1}`.
|
|
342
|
+
|
|
343
|
+
This function supports arrays with at least 2 dimensions. When the input
|
|
344
|
+
has more than two dimensions, the Cholesky inverse is computed for each matrix
|
|
345
|
+
in the last two dimensions of :math:`\mathbf{L}`.
|
|
346
|
+
|
|
347
|
+
If the input matrix is not a triangular matrix behaviour is undefined.
|
|
348
|
+
|
|
349
|
+
Args:
|
|
350
|
+
L (array): Input array.
|
|
351
|
+
upper (bool, optional): If ``True``, return the upper triangular Cholesky factor.
|
|
352
|
+
If ``False``, return the lower triangular Cholesky factor. Default: ``False``.
|
|
353
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
354
|
+
in which case the default stream of the default device is used.
|
|
355
|
+
|
|
356
|
+
Returns:
|
|
357
|
+
array: :math:`\mathbf{A^{-1}}` where :math:`\mathbf{A} = \mathbf{L}\mathbf{L}^T`.
|
|
358
|
+
)pbdoc");
|
|
359
|
+
m.def(
|
|
360
|
+
"pinv",
|
|
361
|
+
&mx::linalg::pinv,
|
|
362
|
+
"a"_a,
|
|
363
|
+
nb::kw_only(),
|
|
364
|
+
"stream"_a = nb::none(),
|
|
365
|
+
nb::sig(
|
|
366
|
+
"def pinv(a: array, *, stream: Union[None, Stream, Device] = None) -> array"),
|
|
367
|
+
R"pbdoc(
|
|
368
|
+
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
|
|
369
|
+
|
|
370
|
+
This function calculates a generalized inverse of a matrix using its
|
|
371
|
+
singular-value decomposition. This function supports arrays with at least 2 dimensions.
|
|
372
|
+
When the input has more than two dimensions, the inverse is computed for each
|
|
373
|
+
matrix in the last two dimensions of ``a``.
|
|
374
|
+
|
|
375
|
+
Args:
|
|
376
|
+
a (array): Input array.
|
|
377
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
378
|
+
in which case the default stream of the default device is used.
|
|
379
|
+
|
|
380
|
+
Returns:
|
|
381
|
+
array: ``aplus`` such that ``a @ aplus @ a = a``
|
|
382
|
+
)pbdoc");
|
|
383
|
+
m.def(
|
|
384
|
+
"cross",
|
|
385
|
+
&mx::linalg::cross,
|
|
386
|
+
"a"_a,
|
|
387
|
+
"b"_a,
|
|
388
|
+
"axis"_a = -1,
|
|
389
|
+
nb::kw_only(),
|
|
390
|
+
"stream"_a = nb::none(),
|
|
391
|
+
nb::sig(
|
|
392
|
+
"def cross(a: array, b: array, axis: int = -1, *, stream: Union[None, Stream, Device] = None) -> array"),
|
|
393
|
+
R"pbdoc(
|
|
394
|
+
Compute the cross product of two arrays along a specified axis.
|
|
395
|
+
|
|
396
|
+
The cross product is defined for arrays with size 2 or 3 in the
|
|
397
|
+
specified axis. If the size is 2 then the third value is assumed
|
|
398
|
+
to be zero.
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
a (array): Input array.
|
|
402
|
+
b (array): Input array.
|
|
403
|
+
axis (int, optional): Axis along which to compute the cross
|
|
404
|
+
product. Default: ``-1``.
|
|
405
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
406
|
+
in which case the default stream of the default device is used.
|
|
407
|
+
|
|
408
|
+
Returns:
|
|
409
|
+
array: The cross product of ``a`` and ``b`` along the specified axis.
|
|
410
|
+
)pbdoc");
|
|
411
|
+
m.def(
|
|
412
|
+
"eigvals",
|
|
413
|
+
&mx::linalg::eigvals,
|
|
414
|
+
"a"_a,
|
|
415
|
+
nb::kw_only(),
|
|
416
|
+
"stream"_a = nb::none(),
|
|
417
|
+
R"pbdoc(
|
|
418
|
+
Compute the eigenvalues of a square matrix.
|
|
419
|
+
|
|
420
|
+
This function differs from :func:`numpy.linalg.eigvals` in that the
|
|
421
|
+
return type is always complex even if the eigenvalues are all real.
|
|
422
|
+
|
|
423
|
+
This function supports arrays with at least 2 dimensions. When the
|
|
424
|
+
input has more than two dimensions, the eigenvalues are computed for
|
|
425
|
+
each matrix in the last two dimensions.
|
|
426
|
+
|
|
427
|
+
Args:
|
|
428
|
+
a (array): The input array.
|
|
429
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
430
|
+
in which case the default stream of the default device is used.
|
|
431
|
+
|
|
432
|
+
Returns:
|
|
433
|
+
array: The eigenvalues (not necessarily in order).
|
|
434
|
+
|
|
435
|
+
Example:
|
|
436
|
+
>>> A = mx.array([[1., -2.], [-2., 1.]])
|
|
437
|
+
>>> eigenvalues = mx.linalg.eigvals(A, stream=mx.cpu)
|
|
438
|
+
>>> eigenvalues
|
|
439
|
+
array([3+0j, -1+0j], dtype=complex64)
|
|
440
|
+
)pbdoc");
|
|
441
|
+
m.def(
|
|
442
|
+
"eig",
|
|
443
|
+
[](const mx::array& a, mx::StreamOrDevice s) {
|
|
444
|
+
auto result = mx::linalg::eig(a, s);
|
|
445
|
+
return nb::make_tuple(result.first, result.second);
|
|
446
|
+
},
|
|
447
|
+
"a"_a,
|
|
448
|
+
nb::kw_only(),
|
|
449
|
+
"stream"_a = nb::none(),
|
|
450
|
+
nb::sig(
|
|
451
|
+
"def eig(a: array, *, stream: Union[None, Stream, Device] = None) -> Tuple[array, array]"),
|
|
452
|
+
R"pbdoc(
|
|
453
|
+
Compute the eigenvalues and eigenvectors of a square matrix.
|
|
454
|
+
|
|
455
|
+
This function differs from :func:`numpy.linalg.eig` in that the
|
|
456
|
+
return type is always complex even if the eigenvalues are all real.
|
|
457
|
+
|
|
458
|
+
This function supports arrays with at least 2 dimensions. When the input
|
|
459
|
+
has more than two dimensions, the eigenvalues and eigenvectors are
|
|
460
|
+
computed for each matrix in the last two dimensions.
|
|
461
|
+
|
|
462
|
+
Args:
|
|
463
|
+
a (array): The input array.
|
|
464
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
465
|
+
in which case the default stream of the default device is used.
|
|
466
|
+
|
|
467
|
+
Returns:
|
|
468
|
+
Tuple[array, array]:
|
|
469
|
+
A tuple containing the eigenvalues and the normalized right
|
|
470
|
+
eigenvectors. The column ``v[:, i]`` is the eigenvector
|
|
471
|
+
corresponding to the i-th eigenvalue.
|
|
472
|
+
|
|
473
|
+
Example:
|
|
474
|
+
>>> A = mx.array([[1., -2.], [-2., 1.]])
|
|
475
|
+
>>> w, v = mx.linalg.eig(A, stream=mx.cpu)
|
|
476
|
+
>>> w
|
|
477
|
+
array([3+0j, -1+0j], dtype=complex64)
|
|
478
|
+
>>> v
|
|
479
|
+
array([[0.707107+0j, 0.707107+0j],
|
|
480
|
+
[-0.707107+0j, 0.707107+0j]], dtype=complex64)
|
|
481
|
+
)pbdoc");
|
|
482
|
+
|
|
483
|
+
m.def(
|
|
484
|
+
"eigvalsh",
|
|
485
|
+
&mx::linalg::eigvalsh,
|
|
486
|
+
"a"_a,
|
|
487
|
+
"UPLO"_a = "L",
|
|
488
|
+
nb::kw_only(),
|
|
489
|
+
"stream"_a = nb::none(),
|
|
490
|
+
R"pbdoc(
|
|
491
|
+
Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
|
|
492
|
+
|
|
493
|
+
This function supports arrays with at least 2 dimensions. When the
|
|
494
|
+
input has more than two dimensions, the eigenvalues are computed for
|
|
495
|
+
each matrix in the last two dimensions.
|
|
496
|
+
|
|
497
|
+
Args:
|
|
498
|
+
a (array): Input array. Must be a real symmetric or complex
|
|
499
|
+
Hermitian matrix.
|
|
500
|
+
UPLO (str, optional): Whether to use the upper (``"U"``) or
|
|
501
|
+
lower (``"L"``) triangle of the matrix. Default: ``"L"``.
|
|
502
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
503
|
+
in which case the default stream of the default device is used.
|
|
504
|
+
|
|
505
|
+
Returns:
|
|
506
|
+
array: The eigenvalues in ascending order.
|
|
507
|
+
|
|
508
|
+
Note:
|
|
509
|
+
The input matrix is assumed to be symmetric (or Hermitian). Only
|
|
510
|
+
the selected triangle is used. No checks for symmetry are performed.
|
|
511
|
+
|
|
512
|
+
Example:
|
|
513
|
+
>>> A = mx.array([[1., -2.], [-2., 1.]])
|
|
514
|
+
>>> eigenvalues = mx.linalg.eigvalsh(A, stream=mx.cpu)
|
|
515
|
+
>>> eigenvalues
|
|
516
|
+
array([-1., 3.], dtype=float32)
|
|
517
|
+
)pbdoc");
|
|
518
|
+
m.def(
|
|
519
|
+
"eigh",
|
|
520
|
+
[](const mx::array& a, const std::string& UPLO, mx::StreamOrDevice s) {
|
|
521
|
+
auto result = mx::linalg::eigh(a, UPLO, s);
|
|
522
|
+
return nb::make_tuple(result.first, result.second);
|
|
523
|
+
},
|
|
524
|
+
"a"_a,
|
|
525
|
+
"UPLO"_a = "L",
|
|
526
|
+
nb::kw_only(),
|
|
527
|
+
"stream"_a = nb::none(),
|
|
528
|
+
nb::sig(
|
|
529
|
+
"def eigh(a: array, UPLO: str = 'L', *, stream: Union[None, Stream, Device] = None) -> Tuple[array, array]"),
|
|
530
|
+
R"pbdoc(
|
|
531
|
+
Compute the eigenvalues and eigenvectors of a complex Hermitian or
|
|
532
|
+
real symmetric matrix.
|
|
533
|
+
|
|
534
|
+
This function supports arrays with at least 2 dimensions. When the input
|
|
535
|
+
has more than two dimensions, the eigenvalues and eigenvectors are
|
|
536
|
+
computed for each matrix in the last two dimensions.
|
|
537
|
+
|
|
538
|
+
Args:
|
|
539
|
+
a (array): Input array. Must be a real symmetric or complex
|
|
540
|
+
Hermitian matrix.
|
|
541
|
+
UPLO (str, optional): Whether to use the upper (``"U"``) or
|
|
542
|
+
lower (``"L"``) triangle of the matrix. Default: ``"L"``.
|
|
543
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
544
|
+
in which case the default stream of the default device is used.
|
|
545
|
+
|
|
546
|
+
Returns:
|
|
547
|
+
Tuple[array, array]:
|
|
548
|
+
A tuple containing the eigenvalues in ascending order and
|
|
549
|
+
the normalized eigenvectors. The column ``v[:, i]`` is the
|
|
550
|
+
eigenvector corresponding to the i-th eigenvalue.
|
|
551
|
+
|
|
552
|
+
Note:
|
|
553
|
+
The input matrix is assumed to be symmetric (or Hermitian). Only
|
|
554
|
+
the selected triangle is used. No checks for symmetry are performed.
|
|
555
|
+
|
|
556
|
+
Example:
|
|
557
|
+
>>> A = mx.array([[1., -2.], [-2., 1.]])
|
|
558
|
+
>>> w, v = mx.linalg.eigh(A, stream=mx.cpu)
|
|
559
|
+
>>> w
|
|
560
|
+
array([-1., 3.], dtype=float32)
|
|
561
|
+
>>> v
|
|
562
|
+
array([[ 0.707107, -0.707107],
|
|
563
|
+
[ 0.707107, 0.707107]], dtype=float32)
|
|
564
|
+
)pbdoc");
|
|
565
|
+
m.def(
|
|
566
|
+
"lu",
|
|
567
|
+
[](const mx::array& a, mx::StreamOrDevice s /* = {} */) {
|
|
568
|
+
auto result = mx::linalg::lu(a, s);
|
|
569
|
+
return nb::make_tuple(result.at(0), result.at(1), result.at(2));
|
|
570
|
+
},
|
|
571
|
+
"a"_a,
|
|
572
|
+
nb::kw_only(),
|
|
573
|
+
"stream"_a = nb::none(),
|
|
574
|
+
nb::sig(
|
|
575
|
+
"def lu(a: array, *, stream: Union[None, Stream, Device] = None) -> Tuple[array, array, array]"),
|
|
576
|
+
R"pbdoc(
|
|
577
|
+
Compute the LU factorization of the given matrix ``A``.
|
|
578
|
+
|
|
579
|
+
Note, unlike the default behavior of ``scipy.linalg.lu``, the pivots
|
|
580
|
+
are indices. To reconstruct the input use ``L[P, :] @ U`` for 2
|
|
581
|
+
dimensions or ``mx.take_along_axis(L, P[..., None], axis=-2) @ U``
|
|
582
|
+
for more than 2 dimensions.
|
|
583
|
+
|
|
584
|
+
To construct the full permuation matrix do:
|
|
585
|
+
|
|
586
|
+
.. code-block::
|
|
587
|
+
|
|
588
|
+
P = mx.put_along_axis(mx.zeros_like(L), p[..., None], mx.array(1.0), axis=-1)
|
|
589
|
+
|
|
590
|
+
Args:
|
|
591
|
+
a (array): Input array.
|
|
592
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
593
|
+
in which case the default stream of the default device is used.
|
|
594
|
+
|
|
595
|
+
Returns:
|
|
596
|
+
tuple(array, array, array):
|
|
597
|
+
The ``p``, ``L``, and ``U`` arrays, such that ``A = L[P, :] @ U``
|
|
598
|
+
)pbdoc");
|
|
599
|
+
m.def(
|
|
600
|
+
"lu_factor",
|
|
601
|
+
&mx::linalg::lu_factor,
|
|
602
|
+
"a"_a,
|
|
603
|
+
nb::kw_only(),
|
|
604
|
+
"stream"_a = nb::none(),
|
|
605
|
+
nb::sig(
|
|
606
|
+
"def lu_factor(a: array, *, stream: Union[None, Stream, Device] = None) -> Tuple[array, array]"),
|
|
607
|
+
R"pbdoc(
|
|
608
|
+
Computes a compact representation of the LU factorization.
|
|
609
|
+
|
|
610
|
+
Args:
|
|
611
|
+
a (array): Input array.
|
|
612
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
613
|
+
in which case the default stream of the default device is used.
|
|
614
|
+
|
|
615
|
+
Returns:
|
|
616
|
+
tuple(array, array): The ``LU`` matrix and ``pivots`` array.
|
|
617
|
+
)pbdoc");
|
|
618
|
+
m.def(
|
|
619
|
+
"solve",
|
|
620
|
+
&mx::linalg::solve,
|
|
621
|
+
"a"_a,
|
|
622
|
+
"b"_a,
|
|
623
|
+
nb::kw_only(),
|
|
624
|
+
"stream"_a = nb::none(),
|
|
625
|
+
nb::sig(
|
|
626
|
+
"def solve(a: array, b: array, *, stream: Union[None, Stream, Device] = None) -> array"),
|
|
627
|
+
R"pbdoc(
|
|
628
|
+
Compute the solution to a system of linear equations ``AX = B``.
|
|
629
|
+
|
|
630
|
+
Args:
|
|
631
|
+
a (array): Input array.
|
|
632
|
+
b (array): Input array.
|
|
633
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
634
|
+
in which case the default stream of the default device is used.
|
|
635
|
+
|
|
636
|
+
Returns:
|
|
637
|
+
array: The unique solution to the system ``AX = B``.
|
|
638
|
+
)pbdoc");
|
|
639
|
+
m.def(
|
|
640
|
+
"solve_triangular",
|
|
641
|
+
&mx::linalg::solve_triangular,
|
|
642
|
+
"a"_a,
|
|
643
|
+
"b"_a,
|
|
644
|
+
nb::kw_only(),
|
|
645
|
+
"upper"_a = false,
|
|
646
|
+
"stream"_a = nb::none(),
|
|
647
|
+
nb::sig(
|
|
648
|
+
"def solve_triangular(a: array, b: array, *, upper: bool = False, stream: Union[None, Stream, Device] = None) -> array"),
|
|
649
|
+
R"pbdoc(
|
|
650
|
+
Computes the solution of a triangular system of linear equations ``AX = B``.
|
|
651
|
+
|
|
652
|
+
Args:
|
|
653
|
+
a (array): Input array.
|
|
654
|
+
b (array): Input array.
|
|
655
|
+
upper (bool, optional): Whether the array is upper or lower
|
|
656
|
+
triangular. Default: ``False``.
|
|
657
|
+
stream (Stream, optional): Stream or device. Defaults to ``None``
|
|
658
|
+
in which case the default stream of the default device is used.
|
|
659
|
+
|
|
660
|
+
Returns:
|
|
661
|
+
array: The unique solution to the system ``AX = B``.
|
|
662
|
+
)pbdoc");
|
|
663
|
+
}
|