mlx 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlx might be problematic. Click here for more details.
- checksums.yaml +7 -0
- data/ext/mlx/CMakeLists.txt +7 -0
- data/ext/mlx/Makefile +273 -0
- data/ext/mlx/extconf.rb +94 -0
- data/ext/mlx/mkmf.log +44 -0
- data/ext/mlx/native.bundle +0 -0
- data/ext/mlx/native.bundle.dSYM/Contents/Info.plist +20 -0
- data/ext/mlx/native.bundle.dSYM/Contents/Resources/DWARF/native.bundle +0 -0
- data/ext/mlx/native.bundle.dSYM/Contents/Resources/Relocations/aarch64/native.bundle.yml +5 -0
- data/ext/mlx/native.cpp +8027 -0
- data/ext/mlx/native.o +0 -0
- data/lib/mlx/core.rb +1678 -0
- data/lib/mlx/distributed_utils/common.rb +116 -0
- data/lib/mlx/distributed_utils/config.rb +600 -0
- data/lib/mlx/distributed_utils/launch.rb +490 -0
- data/lib/mlx/extension.rb +24 -0
- data/lib/mlx/nn/base.rb +388 -0
- data/lib/mlx/nn/init.rb +140 -0
- data/lib/mlx/nn/layers/activations.rb +336 -0
- data/lib/mlx/nn/layers/base.rb +6 -0
- data/lib/mlx/nn/layers/containers.rb +20 -0
- data/lib/mlx/nn/layers/convolution.rb +120 -0
- data/lib/mlx/nn/layers/convolution_transpose.rb +114 -0
- data/lib/mlx/nn/layers/distributed.rb +309 -0
- data/lib/mlx/nn/layers/dropout.rb +75 -0
- data/lib/mlx/nn/layers/embedding.rb +28 -0
- data/lib/mlx/nn/layers/linear.rb +79 -0
- data/lib/mlx/nn/layers/normalization.rb +216 -0
- data/lib/mlx/nn/layers/pooling.rb +167 -0
- data/lib/mlx/nn/layers/positional_encoding.rb +126 -0
- data/lib/mlx/nn/layers/quantized.rb +215 -0
- data/lib/mlx/nn/layers/recurrent.rb +135 -0
- data/lib/mlx/nn/layers/transformer.rb +330 -0
- data/lib/mlx/nn/layers/upsample.rb +97 -0
- data/lib/mlx/nn/layers.rb +18 -0
- data/lib/mlx/nn/losses.rb +251 -0
- data/lib/mlx/nn/utils.rb +167 -0
- data/lib/mlx/nn.rb +12 -0
- data/lib/mlx/optimizers/optimizers.rb +808 -0
- data/lib/mlx/optimizers/schedulers.rb +62 -0
- data/lib/mlx/optimizers.rb +9 -0
- data/lib/mlx/utils.rb +171 -0
- data/lib/mlx/version +1 -0
- data/lib/mlx/version.rb +5 -0
- data/lib/mlx.rb +64 -0
- data/mlx/.clang-format +87 -0
- data/mlx/.git +1 -0
- data/mlx/.github/ISSUE_TEMPLATE/bug_report.md +28 -0
- data/mlx/.github/actions/build-cuda-release/action.yml +31 -0
- data/mlx/.github/actions/build-docs/action.yml +38 -0
- data/mlx/.github/actions/build-linux/action.yml +38 -0
- data/mlx/.github/actions/build-linux-release/action.yml +42 -0
- data/mlx/.github/actions/build-macos/action.yml +80 -0
- data/mlx/.github/actions/build-macos-release/action.yml +36 -0
- data/mlx/.github/actions/build-windows/action.yml +26 -0
- data/mlx/.github/actions/setup-linux/action.yml +93 -0
- data/mlx/.github/actions/setup-macos/action.yml +24 -0
- data/mlx/.github/actions/setup-windows/action.yml +42 -0
- data/mlx/.github/actions/test-linux/action.yml +69 -0
- data/mlx/.github/actions/test-windows/action.yml +20 -0
- data/mlx/.github/dependabot.yml +6 -0
- data/mlx/.github/pull_request_template.md +12 -0
- data/mlx/.github/scripts/build-sanitizer-tests.sh +48 -0
- data/mlx/.github/scripts/setup+build-cpp-linux-fedora-container.sh +27 -0
- data/mlx/.github/workflows/build_and_test.yml +152 -0
- data/mlx/.github/workflows/documentation.yml +28 -0
- data/mlx/.github/workflows/nightly.yml +104 -0
- data/mlx/.github/workflows/release.yml +256 -0
- data/mlx/.gitignore +81 -0
- data/mlx/.pre-commit-config.yaml +27 -0
- data/mlx/ACKNOWLEDGMENTS.md +268 -0
- data/mlx/CITATION.cff +24 -0
- data/mlx/CMakeLists.txt +437 -0
- data/mlx/CODE_OF_CONDUCT.md +132 -0
- data/mlx/CONTRIBUTING.md +38 -0
- data/mlx/LICENSE +21 -0
- data/mlx/MANIFEST.in +6 -0
- data/mlx/README.md +121 -0
- data/mlx/benchmarks/cpp/CMakeLists.txt +11 -0
- data/mlx/benchmarks/cpp/autograd.cpp +39 -0
- data/mlx/benchmarks/cpp/compare_devices.cpp +27 -0
- data/mlx/benchmarks/cpp/irregular_strides.cpp +201 -0
- data/mlx/benchmarks/cpp/single_ops.cpp +288 -0
- data/mlx/benchmarks/cpp/time_utils.h +39 -0
- data/mlx/benchmarks/numpy/single_ops.py +39 -0
- data/mlx/benchmarks/numpy/time_utils.py +20 -0
- data/mlx/benchmarks/python/batch_matmul_bench.py +62 -0
- data/mlx/benchmarks/python/blas/bench_gemm.py +191 -0
- data/mlx/benchmarks/python/blas/bench_gemv.py +220 -0
- data/mlx/benchmarks/python/comparative/README.md +15 -0
- data/mlx/benchmarks/python/comparative/bench_mlx.py +519 -0
- data/mlx/benchmarks/python/comparative/bench_torch.py +482 -0
- data/mlx/benchmarks/python/comparative/compare.py +284 -0
- data/mlx/benchmarks/python/compile_bench.py +107 -0
- data/mlx/benchmarks/python/conv1d_bench.py +123 -0
- data/mlx/benchmarks/python/conv2d_bench_cpu.py +127 -0
- data/mlx/benchmarks/python/conv2d_train_bench_cpu.py +143 -0
- data/mlx/benchmarks/python/conv2d_transpose_bench_cpu.py +129 -0
- data/mlx/benchmarks/python/conv3d_bench_cpu.py +110 -0
- data/mlx/benchmarks/python/conv3d_train_bench_cpu.py +143 -0
- data/mlx/benchmarks/python/conv3d_transpose_bench_cpu.py +116 -0
- data/mlx/benchmarks/python/conv_bench.py +135 -0
- data/mlx/benchmarks/python/conv_transpose_bench.py +135 -0
- data/mlx/benchmarks/python/conv_unaligned_bench.py +107 -0
- data/mlx/benchmarks/python/distributed_bench.py +66 -0
- data/mlx/benchmarks/python/einsum_bench.py +84 -0
- data/mlx/benchmarks/python/fft_bench.py +118 -0
- data/mlx/benchmarks/python/gather_bench.py +52 -0
- data/mlx/benchmarks/python/gather_mm_bench.py +74 -0
- data/mlx/benchmarks/python/gather_qmm_bench.py +84 -0
- data/mlx/benchmarks/python/hadamard_bench.py +70 -0
- data/mlx/benchmarks/python/large_gemm_bench.py +119 -0
- data/mlx/benchmarks/python/layer_norm_bench.py +82 -0
- data/mlx/benchmarks/python/masked_scatter.py +212 -0
- data/mlx/benchmarks/python/rms_norm_bench.py +63 -0
- data/mlx/benchmarks/python/rope_bench.py +35 -0
- data/mlx/benchmarks/python/scatter_bench.py +96 -0
- data/mlx/benchmarks/python/sdpa_bench.py +223 -0
- data/mlx/benchmarks/python/sdpa_vector_bench.py +95 -0
- data/mlx/benchmarks/python/single_ops.py +132 -0
- data/mlx/benchmarks/python/synchronize_bench.py +55 -0
- data/mlx/benchmarks/python/time_utils.py +38 -0
- data/mlx/cmake/FindCUDNN.cmake +177 -0
- data/mlx/cmake/FindNCCL.cmake +54 -0
- data/mlx/cmake/Findnvpl.cmake +3 -0
- data/mlx/cmake/extension.cmake +50 -0
- data/mlx/docs/.clang-format +2 -0
- data/mlx/docs/.gitignore +3 -0
- data/mlx/docs/.nojekyll +0 -0
- data/mlx/docs/Doxyfile +51 -0
- data/mlx/docs/Makefile +18 -0
- data/mlx/docs/README.md +54 -0
- data/mlx/docs/index.html +1 -0
- data/mlx/docs/requirements.txt +5 -0
- data/mlx/docs/src/_static/distributed/m3-ultra-mesh-broken.png +0 -0
- data/mlx/docs/src/_static/distributed/m3-ultra-mesh.png +0 -0
- data/mlx/docs/src/_static/metal_debugger/capture.png +0 -0
- data/mlx/docs/src/_static/metal_debugger/schema.png +0 -0
- data/mlx/docs/src/_static/mlx_logo.png +0 -0
- data/mlx/docs/src/_static/mlx_logo_dark.png +0 -0
- data/mlx/docs/src/_static/tp_inference/all-to-sharded-linear.png +0 -0
- data/mlx/docs/src/_static/tp_inference/column-row-tp.png +0 -0
- data/mlx/docs/src/_static/tp_inference/llama-transformer.png +0 -0
- data/mlx/docs/src/_static/tp_inference/sharded-to-all-linear.png +0 -0
- data/mlx/docs/src/_templates/module-base-class.rst +33 -0
- data/mlx/docs/src/_templates/nn-module-template.rst +20 -0
- data/mlx/docs/src/_templates/optimizers-template.rst +20 -0
- data/mlx/docs/src/conf.py +99 -0
- data/mlx/docs/src/cpp/ops.rst +7 -0
- data/mlx/docs/src/dev/custom_metal_kernels.rst +445 -0
- data/mlx/docs/src/dev/extensions.rst +811 -0
- data/mlx/docs/src/dev/metal_debugger.rst +68 -0
- data/mlx/docs/src/dev/metal_logging.rst +40 -0
- data/mlx/docs/src/dev/mlx_in_cpp.rst +121 -0
- data/mlx/docs/src/examples/data_parallelism.rst +91 -0
- data/mlx/docs/src/examples/linear_regression.rst +77 -0
- data/mlx/docs/src/examples/llama-inference.rst +382 -0
- data/mlx/docs/src/examples/mlp.rst +134 -0
- data/mlx/docs/src/examples/tensor_parallelism.rst +239 -0
- data/mlx/docs/src/index.rst +96 -0
- data/mlx/docs/src/install.rst +340 -0
- data/mlx/docs/src/python/array.rst +65 -0
- data/mlx/docs/src/python/cuda.rst +9 -0
- data/mlx/docs/src/python/data_types.rst +78 -0
- data/mlx/docs/src/python/devices_and_streams.rst +21 -0
- data/mlx/docs/src/python/distributed.rst +22 -0
- data/mlx/docs/src/python/export.rst +14 -0
- data/mlx/docs/src/python/fast.rst +16 -0
- data/mlx/docs/src/python/fft.rst +24 -0
- data/mlx/docs/src/python/linalg.rst +27 -0
- data/mlx/docs/src/python/memory_management.rst +16 -0
- data/mlx/docs/src/python/metal.rst +12 -0
- data/mlx/docs/src/python/nn/distributed.rst +30 -0
- data/mlx/docs/src/python/nn/functions.rst +40 -0
- data/mlx/docs/src/python/nn/init.rst +45 -0
- data/mlx/docs/src/python/nn/layers.rst +74 -0
- data/mlx/docs/src/python/nn/losses.rst +25 -0
- data/mlx/docs/src/python/nn/module.rst +38 -0
- data/mlx/docs/src/python/nn.rst +186 -0
- data/mlx/docs/src/python/ops.rst +184 -0
- data/mlx/docs/src/python/optimizers/common_optimizers.rst +22 -0
- data/mlx/docs/src/python/optimizers/optimizer.rst +23 -0
- data/mlx/docs/src/python/optimizers/schedulers.rst +15 -0
- data/mlx/docs/src/python/optimizers.rst +78 -0
- data/mlx/docs/src/python/random.rst +48 -0
- data/mlx/docs/src/python/transforms.rst +22 -0
- data/mlx/docs/src/python/tree_utils.rst +23 -0
- data/mlx/docs/src/usage/compile.rst +516 -0
- data/mlx/docs/src/usage/distributed.rst +572 -0
- data/mlx/docs/src/usage/export.rst +288 -0
- data/mlx/docs/src/usage/function_transforms.rst +191 -0
- data/mlx/docs/src/usage/indexing.rst +194 -0
- data/mlx/docs/src/usage/launching_distributed.rst +234 -0
- data/mlx/docs/src/usage/lazy_evaluation.rst +144 -0
- data/mlx/docs/src/usage/numpy.rst +124 -0
- data/mlx/docs/src/usage/quick_start.rst +67 -0
- data/mlx/docs/src/usage/saving_and_loading.rst +81 -0
- data/mlx/docs/src/usage/unified_memory.rst +78 -0
- data/mlx/docs/src/usage/using_streams.rst +18 -0
- data/mlx/examples/cmake_project/CMakeLists.txt +22 -0
- data/mlx/examples/cmake_project/README.md +26 -0
- data/mlx/examples/cmake_project/example.cpp +14 -0
- data/mlx/examples/cpp/CMakeLists.txt +12 -0
- data/mlx/examples/cpp/distributed.cpp +22 -0
- data/mlx/examples/cpp/linear_regression.cpp +54 -0
- data/mlx/examples/cpp/logistic_regression.cpp +54 -0
- data/mlx/examples/cpp/metal_capture.cpp +31 -0
- data/mlx/examples/cpp/timer.h +20 -0
- data/mlx/examples/cpp/tutorial.cpp +99 -0
- data/mlx/examples/export/CMakeLists.txt +22 -0
- data/mlx/examples/export/README.md +49 -0
- data/mlx/examples/export/eval_mlp.cpp +25 -0
- data/mlx/examples/export/eval_mlp.py +52 -0
- data/mlx/examples/export/train_mlp.cpp +35 -0
- data/mlx/examples/export/train_mlp.py +76 -0
- data/mlx/examples/extensions/CMakeLists.txt +78 -0
- data/mlx/examples/extensions/README.md +24 -0
- data/mlx/examples/extensions/axpby/axpby.cpp +306 -0
- data/mlx/examples/extensions/axpby/axpby.h +90 -0
- data/mlx/examples/extensions/axpby/axpby.metal +47 -0
- data/mlx/examples/extensions/bindings.cpp +39 -0
- data/mlx/examples/extensions/mlx_sample_extensions/__init__.py +5 -0
- data/mlx/examples/extensions/pyproject.toml +8 -0
- data/mlx/examples/extensions/requirements.txt +4 -0
- data/mlx/examples/extensions/setup.py +18 -0
- data/mlx/examples/extensions/test.py +12 -0
- data/mlx/examples/python/linear_regression.py +46 -0
- data/mlx/examples/python/logistic_regression.py +49 -0
- data/mlx/examples/python/qqmm.py +117 -0
- data/mlx/mlx/3rdparty/.clang-format +2 -0
- data/mlx/mlx/3rdparty/pocketfft.h +3581 -0
- data/mlx/mlx/CMakeLists.txt +107 -0
- data/mlx/mlx/allocator.h +75 -0
- data/mlx/mlx/api.h +29 -0
- data/mlx/mlx/array.cpp +354 -0
- data/mlx/mlx/array.h +647 -0
- data/mlx/mlx/backend/common/CMakeLists.txt +9 -0
- data/mlx/mlx/backend/common/binary.h +97 -0
- data/mlx/mlx/backend/common/broadcasting.cpp +24 -0
- data/mlx/mlx/backend/common/broadcasting.h +11 -0
- data/mlx/mlx/backend/common/buffer_cache.h +158 -0
- data/mlx/mlx/backend/common/common.cpp +305 -0
- data/mlx/mlx/backend/common/compiled.cpp +243 -0
- data/mlx/mlx/backend/common/compiled.h +77 -0
- data/mlx/mlx/backend/common/copy.h +50 -0
- data/mlx/mlx/backend/common/hadamard.h +109 -0
- data/mlx/mlx/backend/common/load.cpp +57 -0
- data/mlx/mlx/backend/common/matmul.h +67 -0
- data/mlx/mlx/backend/common/reduce.cpp +154 -0
- data/mlx/mlx/backend/common/reduce.h +59 -0
- data/mlx/mlx/backend/common/slicing.cpp +71 -0
- data/mlx/mlx/backend/common/slicing.h +20 -0
- data/mlx/mlx/backend/common/ternary.h +85 -0
- data/mlx/mlx/backend/common/unary.h +29 -0
- data/mlx/mlx/backend/common/utils.cpp +231 -0
- data/mlx/mlx/backend/common/utils.h +205 -0
- data/mlx/mlx/backend/cpu/CMakeLists.txt +88 -0
- data/mlx/mlx/backend/cpu/arange.h +28 -0
- data/mlx/mlx/backend/cpu/arg_reduce.cpp +124 -0
- data/mlx/mlx/backend/cpu/binary.cpp +269 -0
- data/mlx/mlx/backend/cpu/binary.h +517 -0
- data/mlx/mlx/backend/cpu/binary_ops.h +98 -0
- data/mlx/mlx/backend/cpu/binary_two.h +166 -0
- data/mlx/mlx/backend/cpu/cholesky.cpp +85 -0
- data/mlx/mlx/backend/cpu/compiled.cpp +357 -0
- data/mlx/mlx/backend/cpu/compiled_preamble.h +12 -0
- data/mlx/mlx/backend/cpu/conv.cpp +1351 -0
- data/mlx/mlx/backend/cpu/copy.cpp +386 -0
- data/mlx/mlx/backend/cpu/copy.h +36 -0
- data/mlx/mlx/backend/cpu/device_info.cpp +113 -0
- data/mlx/mlx/backend/cpu/device_info.h +28 -0
- data/mlx/mlx/backend/cpu/distributed.cpp +103 -0
- data/mlx/mlx/backend/cpu/eig.cpp +281 -0
- data/mlx/mlx/backend/cpu/eigh.cpp +241 -0
- data/mlx/mlx/backend/cpu/encoder.cpp +16 -0
- data/mlx/mlx/backend/cpu/encoder.h +67 -0
- data/mlx/mlx/backend/cpu/eval.cpp +40 -0
- data/mlx/mlx/backend/cpu/eval.h +12 -0
- data/mlx/mlx/backend/cpu/fft.cpp +120 -0
- data/mlx/mlx/backend/cpu/gemm.h +26 -0
- data/mlx/mlx/backend/cpu/gemms/bnns.cpp +214 -0
- data/mlx/mlx/backend/cpu/gemms/cblas.cpp +134 -0
- data/mlx/mlx/backend/cpu/gemms/simd_bf16.cpp +45 -0
- data/mlx/mlx/backend/cpu/gemms/simd_fp16.cpp +45 -0
- data/mlx/mlx/backend/cpu/gemms/simd_gemm.h +139 -0
- data/mlx/mlx/backend/cpu/hadamard.cpp +121 -0
- data/mlx/mlx/backend/cpu/indexing.cpp +854 -0
- data/mlx/mlx/backend/cpu/inverse.cpp +160 -0
- data/mlx/mlx/backend/cpu/jit_compiler.cpp +166 -0
- data/mlx/mlx/backend/cpu/jit_compiler.h +20 -0
- data/mlx/mlx/backend/cpu/lapack.h +80 -0
- data/mlx/mlx/backend/cpu/logsumexp.cpp +139 -0
- data/mlx/mlx/backend/cpu/luf.cpp +120 -0
- data/mlx/mlx/backend/cpu/make_compiled_preamble.ps1 +38 -0
- data/mlx/mlx/backend/cpu/make_compiled_preamble.sh +41 -0
- data/mlx/mlx/backend/cpu/masked_mm.cpp +608 -0
- data/mlx/mlx/backend/cpu/matmul.cpp +166 -0
- data/mlx/mlx/backend/cpu/primitives.cpp +478 -0
- data/mlx/mlx/backend/cpu/qrf.cpp +147 -0
- data/mlx/mlx/backend/cpu/quantized.cpp +1370 -0
- data/mlx/mlx/backend/cpu/reduce.cpp +587 -0
- data/mlx/mlx/backend/cpu/scan.cpp +338 -0
- data/mlx/mlx/backend/cpu/select.cpp +95 -0
- data/mlx/mlx/backend/cpu/simd/accelerate_fp16_simd.h +56 -0
- data/mlx/mlx/backend/cpu/simd/accelerate_simd.h +329 -0
- data/mlx/mlx/backend/cpu/simd/base_simd.h +319 -0
- data/mlx/mlx/backend/cpu/simd/math.h +193 -0
- data/mlx/mlx/backend/cpu/simd/neon_fp16_simd.h +212 -0
- data/mlx/mlx/backend/cpu/simd/simd.h +4 -0
- data/mlx/mlx/backend/cpu/simd/type.h +11 -0
- data/mlx/mlx/backend/cpu/slicing.h +21 -0
- data/mlx/mlx/backend/cpu/softmax.cpp +170 -0
- data/mlx/mlx/backend/cpu/sort.cpp +481 -0
- data/mlx/mlx/backend/cpu/svd.cpp +289 -0
- data/mlx/mlx/backend/cpu/ternary.h +154 -0
- data/mlx/mlx/backend/cpu/threefry.cpp +31 -0
- data/mlx/mlx/backend/cpu/threefry.h +21 -0
- data/mlx/mlx/backend/cpu/unary.cpp +238 -0
- data/mlx/mlx/backend/cpu/unary.h +281 -0
- data/mlx/mlx/backend/cpu/unary_ops.h +175 -0
- data/mlx/mlx/backend/cuda/CMakeLists.txt +265 -0
- data/mlx/mlx/backend/cuda/allocator.cpp +451 -0
- data/mlx/mlx/backend/cuda/allocator.h +94 -0
- data/mlx/mlx/backend/cuda/arange.cu +68 -0
- data/mlx/mlx/backend/cuda/arg_reduce.cu +189 -0
- data/mlx/mlx/backend/cuda/bin2h.cmake +150 -0
- data/mlx/mlx/backend/cuda/binary/CMakeLists.txt +21 -0
- data/mlx/mlx/backend/cuda/binary/add.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/arctan2.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/binary.cuh +383 -0
- data/mlx/mlx/backend/cuda/binary/bitwise_binary.cu +27 -0
- data/mlx/mlx/backend/cuda/binary/divide.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/equal.cu +15 -0
- data/mlx/mlx/backend/cuda/binary/greater.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/greater_equal.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/less.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/less_equal.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/log_add_exp.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/logical_and.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/logical_or.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/maximum.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/minimum.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/multiply.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/not_equal.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/power.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/remainder.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/subtract.cu +7 -0
- data/mlx/mlx/backend/cuda/binary_two.cu +412 -0
- data/mlx/mlx/backend/cuda/compiled.cpp +357 -0
- data/mlx/mlx/backend/cuda/conv/conv.h +126 -0
- data/mlx/mlx/backend/cuda/conv/gemm_conv.cu +217 -0
- data/mlx/mlx/backend/cuda/conv/gemm_grouped_conv.cu +231 -0
- data/mlx/mlx/backend/cuda/conv.cpp +403 -0
- data/mlx/mlx/backend/cuda/copy/copy.cuh +55 -0
- data/mlx/mlx/backend/cuda/copy/copy_contiguous.cu +88 -0
- data/mlx/mlx/backend/cuda/copy/copy_general.cu +171 -0
- data/mlx/mlx/backend/cuda/copy/copy_general_dynamic.cu +118 -0
- data/mlx/mlx/backend/cuda/copy/copy_general_input.cu +229 -0
- data/mlx/mlx/backend/cuda/copy.cu +132 -0
- data/mlx/mlx/backend/cuda/cublas_utils.cpp +222 -0
- data/mlx/mlx/backend/cuda/cublas_utils.h +95 -0
- data/mlx/mlx/backend/cuda/cuda.h +21 -0
- data/mlx/mlx/backend/cuda/cuda_utils.h +90 -0
- data/mlx/mlx/backend/cuda/cudnn_utils.cpp +133 -0
- data/mlx/mlx/backend/cuda/cudnn_utils.h +187 -0
- data/mlx/mlx/backend/cuda/custom_kernel.cpp +379 -0
- data/mlx/mlx/backend/cuda/cutlass_utils.cuh +46 -0
- data/mlx/mlx/backend/cuda/delayload.cpp +80 -0
- data/mlx/mlx/backend/cuda/device/atomic_ops.cuh +63 -0
- data/mlx/mlx/backend/cuda/device/binary_ops.cuh +300 -0
- data/mlx/mlx/backend/cuda/device/cast_op.cuh +118 -0
- data/mlx/mlx/backend/cuda/device/complex.cuh +60 -0
- data/mlx/mlx/backend/cuda/device/config.h +12 -0
- data/mlx/mlx/backend/cuda/device/fp16_math.cuh +96 -0
- data/mlx/mlx/backend/cuda/device/gather.cuh +53 -0
- data/mlx/mlx/backend/cuda/device/gather_axis.cuh +65 -0
- data/mlx/mlx/backend/cuda/device/indexing.cuh +30 -0
- data/mlx/mlx/backend/cuda/device/scatter.cuh +68 -0
- data/mlx/mlx/backend/cuda/device/scatter_axis.cuh +67 -0
- data/mlx/mlx/backend/cuda/device/scatter_ops.cuh +44 -0
- data/mlx/mlx/backend/cuda/device/ternary_ops.cuh +13 -0
- data/mlx/mlx/backend/cuda/device/unary_ops.cuh +350 -0
- data/mlx/mlx/backend/cuda/device/utils.cuh +464 -0
- data/mlx/mlx/backend/cuda/device.cpp +522 -0
- data/mlx/mlx/backend/cuda/device.h +195 -0
- data/mlx/mlx/backend/cuda/device_info.cpp +232 -0
- data/mlx/mlx/backend/cuda/distributed.cu +121 -0
- data/mlx/mlx/backend/cuda/eval.cpp +66 -0
- data/mlx/mlx/backend/cuda/event.cu +415 -0
- data/mlx/mlx/backend/cuda/event.h +79 -0
- data/mlx/mlx/backend/cuda/fence.cpp +42 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm.cpp +233 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm.h +114 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_0.cpp +77 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_9.cu +329 -0
- data/mlx/mlx/backend/cuda/gemms/gemv.cu +327 -0
- data/mlx/mlx/backend/cuda/gemms/gemv.h +34 -0
- data/mlx/mlx/backend/cuda/gemms/grouped_gemm.h +25 -0
- data/mlx/mlx/backend/cuda/gemms/grouped_gemm_unaligned.cu +358 -0
- data/mlx/mlx/backend/cuda/indexing.cpp +434 -0
- data/mlx/mlx/backend/cuda/jit_module.cpp +443 -0
- data/mlx/mlx/backend/cuda/jit_module.h +120 -0
- data/mlx/mlx/backend/cuda/kernel_utils.cu +52 -0
- data/mlx/mlx/backend/cuda/kernel_utils.cuh +148 -0
- data/mlx/mlx/backend/cuda/layer_norm.cu +417 -0
- data/mlx/mlx/backend/cuda/load.cpp +60 -0
- data/mlx/mlx/backend/cuda/logsumexp.cu +161 -0
- data/mlx/mlx/backend/cuda/lru_cache.h +190 -0
- data/mlx/mlx/backend/cuda/matmul.cpp +373 -0
- data/mlx/mlx/backend/cuda/no_cuda.cpp +47 -0
- data/mlx/mlx/backend/cuda/primitives.cpp +46 -0
- data/mlx/mlx/backend/cuda/quantized/affine_quantize.cu +329 -0
- data/mlx/mlx/backend/cuda/quantized/convert_fp8.cu +19 -0
- data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.cpp +206 -0
- data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.h +88 -0
- data/mlx/mlx/backend/cuda/quantized/cuda_fp4.h +100 -0
- data/mlx/mlx/backend/cuda/quantized/fp_quantize.cu +496 -0
- data/mlx/mlx/backend/cuda/quantized/mxfp8_quantize.cuh +32 -0
- data/mlx/mlx/backend/cuda/quantized/no_qqmm_impl.cpp +26 -0
- data/mlx/mlx/backend/cuda/quantized/nvfp4_quantize.cuh +334 -0
- data/mlx/mlx/backend/cuda/quantized/qmv.cu +304 -0
- data/mlx/mlx/backend/cuda/quantized/qmv.h +21 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm.cpp +158 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_impl.cpp +50 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_impl.h +26 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_utils.cu +227 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_utils.h +30 -0
- data/mlx/mlx/backend/cuda/quantized/quantized.cpp +85 -0
- data/mlx/mlx/backend/cuda/quantized/quantized.h +53 -0
- data/mlx/mlx/backend/cuda/quantized/quantized_utils.cuh +88 -0
- data/mlx/mlx/backend/cuda/quantized/quantized_utils.h +50 -0
- data/mlx/mlx/backend/cuda/random.cu +202 -0
- data/mlx/mlx/backend/cuda/reduce/all_reduce.cu +159 -0
- data/mlx/mlx/backend/cuda/reduce/col_reduce.cu +510 -0
- data/mlx/mlx/backend/cuda/reduce/init_reduce.cu +50 -0
- data/mlx/mlx/backend/cuda/reduce/reduce.cuh +71 -0
- data/mlx/mlx/backend/cuda/reduce/reduce_ops.cuh +211 -0
- data/mlx/mlx/backend/cuda/reduce/reduce_utils.cuh +145 -0
- data/mlx/mlx/backend/cuda/reduce/row_reduce.cu +361 -0
- data/mlx/mlx/backend/cuda/reduce.cu +73 -0
- data/mlx/mlx/backend/cuda/rms_norm.cu +536 -0
- data/mlx/mlx/backend/cuda/rope.cu +429 -0
- data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cpp +681 -0
- data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cu +796 -0
- data/mlx/mlx/backend/cuda/scan.cu +468 -0
- data/mlx/mlx/backend/cuda/slicing.cpp +111 -0
- data/mlx/mlx/backend/cuda/softmax.cu +162 -0
- data/mlx/mlx/backend/cuda/sort.cu +1076 -0
- data/mlx/mlx/backend/cuda/steel/defines.cuh +9 -0
- data/mlx/mlx/backend/cuda/steel/gemm.cuh +101 -0
- data/mlx/mlx/backend/cuda/steel/mma.cuh +117 -0
- data/mlx/mlx/backend/cuda/steel/tiles.cuh +450 -0
- data/mlx/mlx/backend/cuda/steel/utils.cuh +89 -0
- data/mlx/mlx/backend/cuda/ternary.cu +271 -0
- data/mlx/mlx/backend/cuda/unary/CMakeLists.txt +34 -0
- data/mlx/mlx/backend/cuda/unary/abs.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arccos.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arccosh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arcsin.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arcsinh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arctan.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arctanh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/bitwise_invert.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/ceil.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/conjugate.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/cos.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/cosh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/erf.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/erf_inv.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/exp.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/expm1.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/floor.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/imag.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/log.cu +21 -0
- data/mlx/mlx/backend/cuda/unary/log1p.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/logical_not.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/negative.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/real.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/round.cu +18 -0
- data/mlx/mlx/backend/cuda/unary/sigmoid.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sign.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sin.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sinh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sqrt.cu +15 -0
- data/mlx/mlx/backend/cuda/unary/square.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/tan.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/tanh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/unary.cuh +224 -0
- data/mlx/mlx/backend/cuda/utils.cpp +116 -0
- data/mlx/mlx/backend/cuda/utils.h +49 -0
- data/mlx/mlx/backend/cuda/vector_types.cuh +48 -0
- data/mlx/mlx/backend/cuda/worker.cpp +79 -0
- data/mlx/mlx/backend/cuda/worker.h +55 -0
- data/mlx/mlx/backend/gpu/CMakeLists.txt +5 -0
- data/mlx/mlx/backend/gpu/copy.cpp +89 -0
- data/mlx/mlx/backend/gpu/copy.h +57 -0
- data/mlx/mlx/backend/gpu/device_info.h +36 -0
- data/mlx/mlx/backend/gpu/eval.h +18 -0
- data/mlx/mlx/backend/gpu/primitives.cpp +307 -0
- data/mlx/mlx/backend/gpu/slicing.cpp +44 -0
- data/mlx/mlx/backend/gpu/slicing.h +36 -0
- data/mlx/mlx/backend/metal/CMakeLists.txt +144 -0
- data/mlx/mlx/backend/metal/allocator.cpp +279 -0
- data/mlx/mlx/backend/metal/allocator.h +79 -0
- data/mlx/mlx/backend/metal/binary.cpp +257 -0
- data/mlx/mlx/backend/metal/binary.h +33 -0
- data/mlx/mlx/backend/metal/compiled.cpp +471 -0
- data/mlx/mlx/backend/metal/conv.cpp +1118 -0
- data/mlx/mlx/backend/metal/copy.cpp +235 -0
- data/mlx/mlx/backend/metal/custom_kernel.cpp +430 -0
- data/mlx/mlx/backend/metal/device.cpp +816 -0
- data/mlx/mlx/backend/metal/device.h +289 -0
- data/mlx/mlx/backend/metal/device_info.cpp +58 -0
- data/mlx/mlx/backend/metal/distributed.cpp +38 -0
- data/mlx/mlx/backend/metal/eval.cpp +97 -0
- data/mlx/mlx/backend/metal/event.cpp +62 -0
- data/mlx/mlx/backend/metal/fence.cpp +162 -0
- data/mlx/mlx/backend/metal/fft.cpp +807 -0
- data/mlx/mlx/backend/metal/hadamard.cpp +198 -0
- data/mlx/mlx/backend/metal/indexing.cpp +727 -0
- data/mlx/mlx/backend/metal/jit/includes.h +58 -0
- data/mlx/mlx/backend/metal/jit/indexing.h +76 -0
- data/mlx/mlx/backend/metal/jit_kernels.cpp +1118 -0
- data/mlx/mlx/backend/metal/kernels/CMakeLists.txt +193 -0
- data/mlx/mlx/backend/metal/kernels/arange.h +9 -0
- data/mlx/mlx/backend/metal/kernels/arange.metal +20 -0
- data/mlx/mlx/backend/metal/kernels/arg_reduce.metal +182 -0
- data/mlx/mlx/backend/metal/kernels/atomic.h +345 -0
- data/mlx/mlx/backend/metal/kernels/bf16.h +16 -0
- data/mlx/mlx/backend/metal/kernels/bf16_math.h +380 -0
- data/mlx/mlx/backend/metal/kernels/binary.h +199 -0
- data/mlx/mlx/backend/metal/kernels/binary.metal +109 -0
- data/mlx/mlx/backend/metal/kernels/binary_ops.h +330 -0
- data/mlx/mlx/backend/metal/kernels/binary_two.h +244 -0
- data/mlx/mlx/backend/metal/kernels/binary_two.metal +54 -0
- data/mlx/mlx/backend/metal/kernels/cexpf.h +134 -0
- data/mlx/mlx/backend/metal/kernels/complex.h +173 -0
- data/mlx/mlx/backend/metal/kernels/conv.metal +701 -0
- data/mlx/mlx/backend/metal/kernels/copy.h +276 -0
- data/mlx/mlx/backend/metal/kernels/copy.metal +75 -0
- data/mlx/mlx/backend/metal/kernels/defines.h +24 -0
- data/mlx/mlx/backend/metal/kernels/erf.h +69 -0
- data/mlx/mlx/backend/metal/kernels/expm1f.h +90 -0
- data/mlx/mlx/backend/metal/kernels/fence.metal +52 -0
- data/mlx/mlx/backend/metal/kernels/fft/radix.h +328 -0
- data/mlx/mlx/backend/metal/kernels/fft/readwrite.h +624 -0
- data/mlx/mlx/backend/metal/kernels/fft.h +486 -0
- data/mlx/mlx/backend/metal/kernels/fft.metal +67 -0
- data/mlx/mlx/backend/metal/kernels/fp4.h +48 -0
- data/mlx/mlx/backend/metal/kernels/fp8.h +80 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized.h +1850 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized.metal +153 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.h +1044 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.metal +79 -0
- data/mlx/mlx/backend/metal/kernels/gemv.metal +868 -0
- data/mlx/mlx/backend/metal/kernels/gemv_masked.h +827 -0
- data/mlx/mlx/backend/metal/kernels/gemv_masked.metal +76 -0
- data/mlx/mlx/backend/metal/kernels/hadamard.h +182 -0
- data/mlx/mlx/backend/metal/kernels/indexing/gather.h +51 -0
- data/mlx/mlx/backend/metal/kernels/indexing/gather_axis.h +44 -0
- data/mlx/mlx/backend/metal/kernels/indexing/gather_front.h +24 -0
- data/mlx/mlx/backend/metal/kernels/indexing/indexing.h +23 -0
- data/mlx/mlx/backend/metal/kernels/indexing/masked_scatter.h +41 -0
- data/mlx/mlx/backend/metal/kernels/indexing/scatter.h +59 -0
- data/mlx/mlx/backend/metal/kernels/indexing/scatter_axis.h +52 -0
- data/mlx/mlx/backend/metal/kernels/layer_norm.metal +433 -0
- data/mlx/mlx/backend/metal/kernels/logging.h +26 -0
- data/mlx/mlx/backend/metal/kernels/logsumexp.h +140 -0
- data/mlx/mlx/backend/metal/kernels/logsumexp.metal +18 -0
- data/mlx/mlx/backend/metal/kernels/quantized.h +2508 -0
- data/mlx/mlx/backend/metal/kernels/quantized.metal +144 -0
- data/mlx/mlx/backend/metal/kernels/quantized_nax.h +1705 -0
- data/mlx/mlx/backend/metal/kernels/quantized_nax.metal +106 -0
- data/mlx/mlx/backend/metal/kernels/quantized_utils.h +90 -0
- data/mlx/mlx/backend/metal/kernels/random.metal +103 -0
- data/mlx/mlx/backend/metal/kernels/reduce.h +5 -0
- data/mlx/mlx/backend/metal/kernels/reduce.metal +169 -0
- data/mlx/mlx/backend/metal/kernels/reduce_utils.h +6 -0
- data/mlx/mlx/backend/metal/kernels/reduction/ops.h +275 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_all.h +66 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_col.h +398 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_init.h +8 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_row.h +369 -0
- data/mlx/mlx/backend/metal/kernels/rms_norm.metal +391 -0
- data/mlx/mlx/backend/metal/kernels/rope.metal +229 -0
- data/mlx/mlx/backend/metal/kernels/scaled_dot_product_attention.metal +44 -0
- data/mlx/mlx/backend/metal/kernels/scan.h +514 -0
- data/mlx/mlx/backend/metal/kernels/scan.metal +109 -0
- data/mlx/mlx/backend/metal/kernels/sdpa_vector.h +394 -0
- data/mlx/mlx/backend/metal/kernels/softmax.h +190 -0
- data/mlx/mlx/backend/metal/kernels/softmax.metal +24 -0
- data/mlx/mlx/backend/metal/kernels/sort.h +719 -0
- data/mlx/mlx/backend/metal/kernels/sort.metal +80 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/attn.h +296 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.h +471 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.metal +27 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.h +481 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.metal +28 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/loader.h +264 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/mma.h +750 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/nax.h +1076 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/params.h +44 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/transforms.h +71 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/conv.h +13 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.h +176 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.metal +56 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.h +225 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.metal +47 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loader.h +6 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_l.h +451 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_n.h +319 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_general.h +381 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/params.h +62 -0
- data/mlx/mlx/backend/metal/kernels/steel/defines.h +7 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm.h +295 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm_nax.h +157 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.h +346 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.metal +34 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.h +219 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.metal +30 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.h +459 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.metal +59 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.h +143 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.metal +37 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.h +719 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.metal +76 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.h +266 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.metal +43 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.h +227 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.metal +76 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.h +152 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.metal +30 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/loader.h +137 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/mma.h +1146 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/nax.h +1084 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/params.h +65 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/transforms.h +72 -0
- data/mlx/mlx/backend/metal/kernels/steel/utils/integral_constant.h +134 -0
- data/mlx/mlx/backend/metal/kernels/steel/utils/type_traits.h +55 -0
- data/mlx/mlx/backend/metal/kernels/steel/utils.h +42 -0
- data/mlx/mlx/backend/metal/kernels/ternary.h +145 -0
- data/mlx/mlx/backend/metal/kernels/ternary.metal +48 -0
- data/mlx/mlx/backend/metal/kernels/ternary_ops.h +10 -0
- data/mlx/mlx/backend/metal/kernels/unary.h +63 -0
- data/mlx/mlx/backend/metal/kernels/unary.metal +115 -0
- data/mlx/mlx/backend/metal/kernels/unary_ops.h +454 -0
- data/mlx/mlx/backend/metal/kernels/utils.h +445 -0
- data/mlx/mlx/backend/metal/kernels.h +375 -0
- data/mlx/mlx/backend/metal/logsumexp.cpp +95 -0
- data/mlx/mlx/backend/metal/make_compiled_preamble.sh +120 -0
- data/mlx/mlx/backend/metal/matmul.cpp +2572 -0
- data/mlx/mlx/backend/metal/matmul.h +144 -0
- data/mlx/mlx/backend/metal/metal.cpp +50 -0
- data/mlx/mlx/backend/metal/metal.h +25 -0
- data/mlx/mlx/backend/metal/no_metal.cpp +42 -0
- data/mlx/mlx/backend/metal/nojit_kernels.cpp +414 -0
- data/mlx/mlx/backend/metal/normalization.cpp +433 -0
- data/mlx/mlx/backend/metal/primitives.cpp +242 -0
- data/mlx/mlx/backend/metal/quantized.cpp +1651 -0
- data/mlx/mlx/backend/metal/reduce.cpp +1038 -0
- data/mlx/mlx/backend/metal/reduce.h +41 -0
- data/mlx/mlx/backend/metal/resident.cpp +100 -0
- data/mlx/mlx/backend/metal/resident.h +32 -0
- data/mlx/mlx/backend/metal/rope.cpp +165 -0
- data/mlx/mlx/backend/metal/scaled_dot_product_attention.cpp +798 -0
- data/mlx/mlx/backend/metal/scan.cpp +145 -0
- data/mlx/mlx/backend/metal/scan.h +17 -0
- data/mlx/mlx/backend/metal/slicing.cpp +99 -0
- data/mlx/mlx/backend/metal/softmax.cpp +87 -0
- data/mlx/mlx/backend/metal/sort.cpp +368 -0
- data/mlx/mlx/backend/metal/ternary.cpp +160 -0
- data/mlx/mlx/backend/metal/ternary.h +21 -0
- data/mlx/mlx/backend/metal/unary.cpp +161 -0
- data/mlx/mlx/backend/metal/unary.h +21 -0
- data/mlx/mlx/backend/metal/utils.cpp +77 -0
- data/mlx/mlx/backend/metal/utils.h +99 -0
- data/mlx/mlx/backend/no_cpu/CMakeLists.txt +7 -0
- data/mlx/mlx/backend/no_cpu/compiled.cpp +24 -0
- data/mlx/mlx/backend/no_cpu/device_info.cpp +22 -0
- data/mlx/mlx/backend/no_cpu/primitives.cpp +146 -0
- data/mlx/mlx/backend/no_gpu/CMakeLists.txt +8 -0
- data/mlx/mlx/backend/no_gpu/allocator.cpp +134 -0
- data/mlx/mlx/backend/no_gpu/apple_memory.h +16 -0
- data/mlx/mlx/backend/no_gpu/device_info.cpp +22 -0
- data/mlx/mlx/backend/no_gpu/eval.cpp +24 -0
- data/mlx/mlx/backend/no_gpu/event.cpp +53 -0
- data/mlx/mlx/backend/no_gpu/fence.cpp +54 -0
- data/mlx/mlx/backend/no_gpu/linux_memory.h +22 -0
- data/mlx/mlx/backend/no_gpu/primitives.cpp +185 -0
- data/mlx/mlx/compile.cpp +1243 -0
- data/mlx/mlx/compile.h +45 -0
- data/mlx/mlx/compile_impl.h +70 -0
- data/mlx/mlx/device.cpp +72 -0
- data/mlx/mlx/device.h +56 -0
- data/mlx/mlx/distributed/CMakeLists.txt +14 -0
- data/mlx/mlx/distributed/distributed.cpp +197 -0
- data/mlx/mlx/distributed/distributed.h +61 -0
- data/mlx/mlx/distributed/distributed_impl.h +59 -0
- data/mlx/mlx/distributed/jaccl/CMakeLists.txt +12 -0
- data/mlx/mlx/distributed/jaccl/jaccl.cpp +178 -0
- data/mlx/mlx/distributed/jaccl/jaccl.h +12 -0
- data/mlx/mlx/distributed/jaccl/mesh.cpp +451 -0
- data/mlx/mlx/distributed/jaccl/mesh.h +122 -0
- data/mlx/mlx/distributed/jaccl/no_jaccl.cpp +20 -0
- data/mlx/mlx/distributed/jaccl/ring.cpp +692 -0
- data/mlx/mlx/distributed/jaccl/ring.h +178 -0
- data/mlx/mlx/distributed/jaccl/utils.cpp +329 -0
- data/mlx/mlx/distributed/jaccl/utils.h +342 -0
- data/mlx/mlx/distributed/mpi/CMakeLists.txt +5 -0
- data/mlx/mlx/distributed/mpi/mpi.cpp +501 -0
- data/mlx/mlx/distributed/mpi/mpi.h +12 -0
- data/mlx/mlx/distributed/mpi/mpi_declarations.h +28 -0
- data/mlx/mlx/distributed/mpi/no_mpi.cpp +20 -0
- data/mlx/mlx/distributed/nccl/CMakeLists.txt +26 -0
- data/mlx/mlx/distributed/nccl/nccl.cpp +443 -0
- data/mlx/mlx/distributed/nccl/nccl.h +12 -0
- data/mlx/mlx/distributed/nccl/nccl_stub/CMakeLists.txt +1 -0
- data/mlx/mlx/distributed/nccl/nccl_stub/nccl_stubs.cpp +54 -0
- data/mlx/mlx/distributed/nccl/no_nccl.cpp +20 -0
- data/mlx/mlx/distributed/ops.cpp +186 -0
- data/mlx/mlx/distributed/ops.h +57 -0
- data/mlx/mlx/distributed/primitives.cpp +95 -0
- data/mlx/mlx/distributed/primitives.h +156 -0
- data/mlx/mlx/distributed/reduction_ops.h +38 -0
- data/mlx/mlx/distributed/ring/CMakeLists.txt +5 -0
- data/mlx/mlx/distributed/ring/no_ring.cpp +20 -0
- data/mlx/mlx/distributed/ring/ring.cpp +870 -0
- data/mlx/mlx/distributed/ring/ring.h +12 -0
- data/mlx/mlx/distributed/utils.cpp +206 -0
- data/mlx/mlx/distributed/utils.h +67 -0
- data/mlx/mlx/dtype.cpp +197 -0
- data/mlx/mlx/dtype.h +116 -0
- data/mlx/mlx/dtype_utils.cpp +42 -0
- data/mlx/mlx/dtype_utils.h +119 -0
- data/mlx/mlx/einsum.cpp +941 -0
- data/mlx/mlx/einsum.h +23 -0
- data/mlx/mlx/event.h +58 -0
- data/mlx/mlx/export.cpp +1130 -0
- data/mlx/mlx/export.h +137 -0
- data/mlx/mlx/export_impl.h +99 -0
- data/mlx/mlx/fast.cpp +941 -0
- data/mlx/mlx/fast.h +103 -0
- data/mlx/mlx/fast_primitives.h +427 -0
- data/mlx/mlx/fence.h +39 -0
- data/mlx/mlx/fft.cpp +262 -0
- data/mlx/mlx/fft.h +159 -0
- data/mlx/mlx/graph_utils.cpp +175 -0
- data/mlx/mlx/graph_utils.h +67 -0
- data/mlx/mlx/io/CMakeLists.txt +25 -0
- data/mlx/mlx/io/gguf.cpp +470 -0
- data/mlx/mlx/io/gguf.h +20 -0
- data/mlx/mlx/io/gguf_quants.cpp +164 -0
- data/mlx/mlx/io/load.cpp +397 -0
- data/mlx/mlx/io/load.h +175 -0
- data/mlx/mlx/io/no_gguf.cpp +20 -0
- data/mlx/mlx/io/no_safetensors.cpp +37 -0
- data/mlx/mlx/io/safetensors.cpp +234 -0
- data/mlx/mlx/io.h +61 -0
- data/mlx/mlx/linalg.cpp +708 -0
- data/mlx/mlx/linalg.h +115 -0
- data/mlx/mlx/memory.h +80 -0
- data/mlx/mlx/mlx.h +25 -0
- data/mlx/mlx/ops.cpp +6094 -0
- data/mlx/mlx/ops.h +1610 -0
- data/mlx/mlx/primitives.cpp +5850 -0
- data/mlx/mlx/primitives.h +2525 -0
- data/mlx/mlx/random.cpp +492 -0
- data/mlx/mlx/random.h +283 -0
- data/mlx/mlx/scheduler.cpp +73 -0
- data/mlx/mlx/scheduler.h +189 -0
- data/mlx/mlx/small_vector.h +540 -0
- data/mlx/mlx/stream.h +42 -0
- data/mlx/mlx/threadpool.h +133 -0
- data/mlx/mlx/transforms.cpp +1065 -0
- data/mlx/mlx/transforms.h +231 -0
- data/mlx/mlx/transforms_impl.h +88 -0
- data/mlx/mlx/types/bf16.h +187 -0
- data/mlx/mlx/types/complex.h +113 -0
- data/mlx/mlx/types/fp16.h +234 -0
- data/mlx/mlx/types/half_types.h +58 -0
- data/mlx/mlx/types/limits.h +70 -0
- data/mlx/mlx/utils.cpp +302 -0
- data/mlx/mlx/utils.h +174 -0
- data/mlx/mlx/version.cpp +11 -0
- data/mlx/mlx/version.h +22 -0
- data/mlx/mlx.pc.in +52 -0
- data/mlx/pyproject.toml +7 -0
- data/mlx/python/mlx/__main__.py +27 -0
- data/mlx/python/mlx/_distributed_utils/common.py +135 -0
- data/mlx/python/mlx/_distributed_utils/config.py +631 -0
- data/mlx/python/mlx/_distributed_utils/launch.py +570 -0
- data/mlx/python/mlx/_reprlib_fix.py +16 -0
- data/mlx/python/mlx/_stub_patterns.txt +36 -0
- data/mlx/python/mlx/extension.py +88 -0
- data/mlx/python/mlx/nn/__init__.py +5 -0
- data/mlx/python/mlx/nn/init.py +441 -0
- data/mlx/python/mlx/nn/layers/__init__.py +105 -0
- data/mlx/python/mlx/nn/layers/activations.py +661 -0
- data/mlx/python/mlx/nn/layers/base.py +675 -0
- data/mlx/python/mlx/nn/layers/containers.py +24 -0
- data/mlx/python/mlx/nn/layers/convolution.py +232 -0
- data/mlx/python/mlx/nn/layers/convolution_transpose.py +242 -0
- data/mlx/python/mlx/nn/layers/distributed.py +601 -0
- data/mlx/python/mlx/nn/layers/dropout.py +137 -0
- data/mlx/python/mlx/nn/layers/embedding.py +53 -0
- data/mlx/python/mlx/nn/layers/linear.py +180 -0
- data/mlx/python/mlx/nn/layers/normalization.py +363 -0
- data/mlx/python/mlx/nn/layers/pooling.py +398 -0
- data/mlx/python/mlx/nn/layers/positional_encoding.py +162 -0
- data/mlx/python/mlx/nn/layers/quantized.py +426 -0
- data/mlx/python/mlx/nn/layers/recurrent.py +289 -0
- data/mlx/python/mlx/nn/layers/transformer.py +354 -0
- data/mlx/python/mlx/nn/layers/upsample.py +277 -0
- data/mlx/python/mlx/nn/losses.py +610 -0
- data/mlx/python/mlx/nn/utils.py +165 -0
- data/mlx/python/mlx/optimizers/__init__.py +4 -0
- data/mlx/python/mlx/optimizers/optimizers.py +976 -0
- data/mlx/python/mlx/optimizers/schedulers.py +158 -0
- data/mlx/python/mlx/py.typed +1 -0
- data/mlx/python/mlx/utils.py +325 -0
- data/mlx/python/src/CMakeLists.txt +96 -0
- data/mlx/python/src/array.cpp +1525 -0
- data/mlx/python/src/buffer.h +124 -0
- data/mlx/python/src/constants.cpp +15 -0
- data/mlx/python/src/convert.cpp +504 -0
- data/mlx/python/src/convert.h +50 -0
- data/mlx/python/src/cuda.cpp +19 -0
- data/mlx/python/src/device.cpp +98 -0
- data/mlx/python/src/distributed.cpp +352 -0
- data/mlx/python/src/export.cpp +356 -0
- data/mlx/python/src/fast.cpp +627 -0
- data/mlx/python/src/fft.cpp +514 -0
- data/mlx/python/src/indexing.cpp +1016 -0
- data/mlx/python/src/indexing.h +41 -0
- data/mlx/python/src/linalg.cpp +663 -0
- data/mlx/python/src/load.cpp +531 -0
- data/mlx/python/src/load.h +51 -0
- data/mlx/python/src/memory.cpp +125 -0
- data/mlx/python/src/metal.cpp +98 -0
- data/mlx/python/src/mlx.cpp +51 -0
- data/mlx/python/src/mlx_func.cpp +116 -0
- data/mlx/python/src/mlx_func.h +31 -0
- data/mlx/python/src/ops.cpp +5545 -0
- data/mlx/python/src/random.cpp +516 -0
- data/mlx/python/src/small_vector.h +76 -0
- data/mlx/python/src/stream.cpp +147 -0
- data/mlx/python/src/transforms.cpp +1542 -0
- data/mlx/python/src/trees.cpp +311 -0
- data/mlx/python/src/trees.h +62 -0
- data/mlx/python/src/utils.cpp +98 -0
- data/mlx/python/src/utils.h +78 -0
- data/mlx/python/tests/__main__.py +5 -0
- data/mlx/python/tests/cuda_skip.py +62 -0
- data/mlx/python/tests/mlx_distributed_tests.py +314 -0
- data/mlx/python/tests/mlx_tests.py +116 -0
- data/mlx/python/tests/mpi_test_distributed.py +142 -0
- data/mlx/python/tests/nccl_test_distributed.py +52 -0
- data/mlx/python/tests/ring_test_distributed.py +131 -0
- data/mlx/python/tests/test_array.py +2139 -0
- data/mlx/python/tests/test_autograd.py +880 -0
- data/mlx/python/tests/test_bf16.py +196 -0
- data/mlx/python/tests/test_blas.py +1429 -0
- data/mlx/python/tests/test_compile.py +1277 -0
- data/mlx/python/tests/test_constants.py +41 -0
- data/mlx/python/tests/test_conv.py +1198 -0
- data/mlx/python/tests/test_conv_transpose.py +810 -0
- data/mlx/python/tests/test_device.py +150 -0
- data/mlx/python/tests/test_double.py +306 -0
- data/mlx/python/tests/test_einsum.py +363 -0
- data/mlx/python/tests/test_eval.py +200 -0
- data/mlx/python/tests/test_export_import.py +614 -0
- data/mlx/python/tests/test_fast.py +923 -0
- data/mlx/python/tests/test_fast_sdpa.py +647 -0
- data/mlx/python/tests/test_fft.py +323 -0
- data/mlx/python/tests/test_graph.py +37 -0
- data/mlx/python/tests/test_init.py +139 -0
- data/mlx/python/tests/test_linalg.py +621 -0
- data/mlx/python/tests/test_load.py +447 -0
- data/mlx/python/tests/test_losses.py +427 -0
- data/mlx/python/tests/test_memory.py +77 -0
- data/mlx/python/tests/test_nn.py +1986 -0
- data/mlx/python/tests/test_ops.py +3261 -0
- data/mlx/python/tests/test_optimizers.py +584 -0
- data/mlx/python/tests/test_quantized.py +1160 -0
- data/mlx/python/tests/test_random.py +392 -0
- data/mlx/python/tests/test_reduce.py +223 -0
- data/mlx/python/tests/test_tree.py +96 -0
- data/mlx/python/tests/test_upsample.py +100 -0
- data/mlx/python/tests/test_vmap.py +860 -0
- data/mlx/setup.py +315 -0
- data/mlx/tests/CMakeLists.txt +44 -0
- data/mlx/tests/allocator_tests.cpp +41 -0
- data/mlx/tests/arg_reduce_tests.cpp +204 -0
- data/mlx/tests/array_tests.cpp +663 -0
- data/mlx/tests/autograd_tests.cpp +1399 -0
- data/mlx/tests/blas_tests.cpp +110 -0
- data/mlx/tests/compile_tests.cpp +818 -0
- data/mlx/tests/creations_tests.cpp +239 -0
- data/mlx/tests/custom_vjp_tests.cpp +55 -0
- data/mlx/tests/device_tests.cpp +35 -0
- data/mlx/tests/einsum_tests.cpp +85 -0
- data/mlx/tests/eval_tests.cpp +93 -0
- data/mlx/tests/export_import_tests.cpp +164 -0
- data/mlx/tests/fft_tests.cpp +366 -0
- data/mlx/tests/gpu_tests.cpp +523 -0
- data/mlx/tests/linalg_tests.cpp +639 -0
- data/mlx/tests/load_tests.cpp +270 -0
- data/mlx/tests/ops_tests.cpp +4159 -0
- data/mlx/tests/random_tests.cpp +716 -0
- data/mlx/tests/scheduler_tests.cpp +121 -0
- data/mlx/tests/tests.cpp +26 -0
- data/mlx/tests/utils_tests.cpp +67 -0
- data/mlx/tests/vmap_tests.cpp +547 -0
- metadata +958 -0
|
@@ -0,0 +1,3261 @@
|
|
|
1
|
+
# Copyright © 2023-2024 Apple Inc.
|
|
2
|
+
|
|
3
|
+
import math
|
|
4
|
+
import os
|
|
5
|
+
import unittest
|
|
6
|
+
from itertools import permutations, product
|
|
7
|
+
|
|
8
|
+
import mlx.core as mx
|
|
9
|
+
import mlx_tests
|
|
10
|
+
import numpy as np
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def np_wrap_between(x, a):
|
|
14
|
+
"""Wraps `x` between `[-a, a]`."""
|
|
15
|
+
two_a = 2 * a
|
|
16
|
+
zero = 0
|
|
17
|
+
rem = np.remainder(np.add(x, a), two_a)
|
|
18
|
+
if isinstance(rem, np.ndarray):
|
|
19
|
+
rem = np.select(rem < zero, np.add(rem, two_a), rem)
|
|
20
|
+
else:
|
|
21
|
+
rem = np.add(rem, two_a) if rem < zero else rem
|
|
22
|
+
return np.subtract(rem, a)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def np_logaddexp(x1: np.ndarray, x2: np.ndarray):
|
|
26
|
+
amax = np.maximum(x1, x2)
|
|
27
|
+
if np.issubdtype(x1.dtype, np.floating):
|
|
28
|
+
delta = np.subtract(x1, x2)
|
|
29
|
+
if isinstance(delta, np.ndarray):
|
|
30
|
+
return np.select(
|
|
31
|
+
np.isnan(delta),
|
|
32
|
+
np.add(x1, x2),
|
|
33
|
+
np.add(amax, np.log1p(np.exp(np.negative(np.abs(delta))))),
|
|
34
|
+
)
|
|
35
|
+
else:
|
|
36
|
+
return (
|
|
37
|
+
np.add(x1, x2)
|
|
38
|
+
if np.isnan(delta)
|
|
39
|
+
else np.add(amax, np.log1p(np.exp(np.negative(np.abs(delta)))))
|
|
40
|
+
)
|
|
41
|
+
else:
|
|
42
|
+
delta = np.subtract(np.add(x1, x2), np.multiply(amax, 2))
|
|
43
|
+
out = np.add(amax, np.log1p(np.exp(delta)))
|
|
44
|
+
return np.real(out) + 1j * np_wrap_between(np.imag(out), np.pi)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def np_cumlogaddexp(x1: np.ndarray, axis: int = -1):
|
|
48
|
+
out = x1.copy()
|
|
49
|
+
for i in range(1, out.shape[axis]):
|
|
50
|
+
out[i] = np_logaddexp(out[i], out[i - 1])
|
|
51
|
+
return out
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class TestOps(mlx_tests.MLXTestCase):
|
|
55
|
+
def test_full_ones_zeros(self):
|
|
56
|
+
x = mx.full(2, 3.0)
|
|
57
|
+
self.assertEqual(x.shape, (2,))
|
|
58
|
+
self.assertEqual(x.tolist(), [3.0, 3.0])
|
|
59
|
+
|
|
60
|
+
x = mx.full((2, 3), 2.0)
|
|
61
|
+
self.assertEqual(x.dtype, mx.float32)
|
|
62
|
+
self.assertEqual(x.shape, (2, 3))
|
|
63
|
+
self.assertEqual(x.tolist(), [[2, 2, 2], [2, 2, 2]])
|
|
64
|
+
|
|
65
|
+
x = mx.full([3, 2], mx.array([False, True]))
|
|
66
|
+
self.assertEqual(x.dtype, mx.bool_)
|
|
67
|
+
self.assertEqual(x.tolist(), [[False, True], [False, True], [False, True]])
|
|
68
|
+
|
|
69
|
+
x = mx.full([3, 2], mx.array([2.0, 3.0]))
|
|
70
|
+
self.assertEqual(x.tolist(), [[2, 3], [2, 3], [2, 3]])
|
|
71
|
+
|
|
72
|
+
x = mx.zeros(2)
|
|
73
|
+
self.assertEqual(x.shape, (2,))
|
|
74
|
+
self.assertEqual(x.tolist(), [0.0, 0.0])
|
|
75
|
+
|
|
76
|
+
x = mx.ones(2)
|
|
77
|
+
self.assertEqual(x.shape, (2,))
|
|
78
|
+
self.assertEqual(x.tolist(), [1.0, 1.0])
|
|
79
|
+
|
|
80
|
+
for t in [mx.bool_, mx.int32, mx.float32]:
|
|
81
|
+
x = mx.zeros([2, 2], t)
|
|
82
|
+
self.assertEqual(x.dtype, t)
|
|
83
|
+
self.assertTrue(mx.array_equal(x, mx.array([[0, 0], [0, 0]])))
|
|
84
|
+
y = mx.zeros_like(x)
|
|
85
|
+
self.assertEqual(y.dtype, t)
|
|
86
|
+
self.assertTrue(mx.array_equal(y, x))
|
|
87
|
+
|
|
88
|
+
x = mx.ones([2, 2], t)
|
|
89
|
+
self.assertEqual(x.dtype, t)
|
|
90
|
+
self.assertTrue(mx.array_equal(x, mx.array([[1, 1], [1, 1]])))
|
|
91
|
+
y = mx.ones_like(x)
|
|
92
|
+
self.assertEqual(y.dtype, t)
|
|
93
|
+
self.assertTrue(mx.array_equal(y, x))
|
|
94
|
+
|
|
95
|
+
def test_scalar_inputs(self):
|
|
96
|
+
# Check combinations of python types
|
|
97
|
+
a = mx.add(False, True)
|
|
98
|
+
self.assertEqual(a.dtype, mx.bool_)
|
|
99
|
+
self.assertEqual(a.item(), True)
|
|
100
|
+
|
|
101
|
+
a = mx.add(1, 2)
|
|
102
|
+
self.assertEqual(a.dtype, mx.int32)
|
|
103
|
+
self.assertEqual(a.item(), 3)
|
|
104
|
+
|
|
105
|
+
a = mx.add(1.0, 2.0)
|
|
106
|
+
self.assertEqual(a.dtype, mx.float32)
|
|
107
|
+
self.assertEqual(a.item(), 3.0)
|
|
108
|
+
|
|
109
|
+
a = mx.add(True, 2)
|
|
110
|
+
self.assertEqual(a.dtype, mx.int32)
|
|
111
|
+
self.assertEqual(a.item(), 3)
|
|
112
|
+
|
|
113
|
+
a = mx.add(True, 2.0)
|
|
114
|
+
self.assertEqual(a.dtype, mx.float32)
|
|
115
|
+
self.assertEqual(a.item(), 3.0)
|
|
116
|
+
|
|
117
|
+
a = mx.add(1, 2.0)
|
|
118
|
+
self.assertEqual(a.dtype, mx.float32)
|
|
119
|
+
self.assertEqual(a.item(), 3.0)
|
|
120
|
+
|
|
121
|
+
a = mx.add(2, True)
|
|
122
|
+
self.assertEqual(a.dtype, mx.int32)
|
|
123
|
+
self.assertEqual(a.item(), 3)
|
|
124
|
+
|
|
125
|
+
a = mx.add(2.0, True)
|
|
126
|
+
self.assertEqual(a.dtype, mx.float32)
|
|
127
|
+
self.assertEqual(a.item(), 3.0)
|
|
128
|
+
|
|
129
|
+
a = mx.add(2.0, 1)
|
|
130
|
+
self.assertEqual(a.dtype, mx.float32)
|
|
131
|
+
self.assertEqual(a.item(), 3.0)
|
|
132
|
+
|
|
133
|
+
# Check combinations with mlx arrays
|
|
134
|
+
a = mx.add(mx.array(True), False)
|
|
135
|
+
self.assertEqual(a.dtype, mx.bool_)
|
|
136
|
+
self.assertEqual(a.item(), True)
|
|
137
|
+
|
|
138
|
+
a = mx.add(mx.array(1), False)
|
|
139
|
+
self.assertEqual(a.dtype, mx.int32)
|
|
140
|
+
self.assertEqual(a.item(), 1.0)
|
|
141
|
+
|
|
142
|
+
# Edge case: take the type of the scalar
|
|
143
|
+
a = mx.add(mx.array(True), 1)
|
|
144
|
+
self.assertEqual(a.dtype, mx.int32)
|
|
145
|
+
self.assertEqual(a.item(), 2)
|
|
146
|
+
|
|
147
|
+
a = mx.add(mx.array(1.0), 1)
|
|
148
|
+
self.assertEqual(a.dtype, mx.float32)
|
|
149
|
+
self.assertEqual(a.item(), 2.0)
|
|
150
|
+
|
|
151
|
+
a = mx.add(1, mx.array(1.0))
|
|
152
|
+
self.assertEqual(a.dtype, mx.float32)
|
|
153
|
+
self.assertEqual(a.item(), 2.0)
|
|
154
|
+
|
|
155
|
+
binary_ops = [
|
|
156
|
+
"add",
|
|
157
|
+
"subtract",
|
|
158
|
+
"multiply",
|
|
159
|
+
"divide",
|
|
160
|
+
"floor_divide",
|
|
161
|
+
"remainder",
|
|
162
|
+
"equal",
|
|
163
|
+
"not_equal",
|
|
164
|
+
"less",
|
|
165
|
+
"greater",
|
|
166
|
+
"less_equal",
|
|
167
|
+
"greater_equal",
|
|
168
|
+
"maximum",
|
|
169
|
+
"minimum",
|
|
170
|
+
]
|
|
171
|
+
|
|
172
|
+
for op in binary_ops:
|
|
173
|
+
npop = getattr(np, op)
|
|
174
|
+
mlxop = getattr(mx, op)
|
|
175
|
+
|
|
176
|
+
# Avoid subtract from bool and divide by 0
|
|
177
|
+
for x in [-1, 0, 1, -1.0, 1.0]:
|
|
178
|
+
for y in [True, -1, 1, -1.0, 1.0]:
|
|
179
|
+
self.assertEqual(npop(x, y).item(), mlxop(x, y).item())
|
|
180
|
+
|
|
181
|
+
def test_add(self):
|
|
182
|
+
x = mx.array(1)
|
|
183
|
+
y = mx.array(1)
|
|
184
|
+
z = mx.add(x, y)
|
|
185
|
+
self.assertEqual(z.item(), 2)
|
|
186
|
+
|
|
187
|
+
x = mx.array(False, mx.bool_)
|
|
188
|
+
z = x + 1
|
|
189
|
+
self.assertEqual(z.dtype, mx.int32)
|
|
190
|
+
self.assertEqual(z.item(), 1)
|
|
191
|
+
z = 2 + x
|
|
192
|
+
self.assertEqual(z.dtype, mx.int32)
|
|
193
|
+
self.assertEqual(z.item(), 2)
|
|
194
|
+
|
|
195
|
+
x = mx.array(1, mx.uint32)
|
|
196
|
+
z = x + 3
|
|
197
|
+
self.assertEqual(z.dtype, mx.uint32)
|
|
198
|
+
self.assertEqual(z.item(), 4)
|
|
199
|
+
|
|
200
|
+
z = 3 + x
|
|
201
|
+
self.assertEqual(z.dtype, mx.uint32)
|
|
202
|
+
self.assertEqual(z.item(), 4)
|
|
203
|
+
|
|
204
|
+
z = x + 3.0
|
|
205
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
206
|
+
self.assertEqual(z.item(), 4.0)
|
|
207
|
+
|
|
208
|
+
z = 3.0 + x
|
|
209
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
210
|
+
self.assertEqual(z.item(), 4.0)
|
|
211
|
+
|
|
212
|
+
x = mx.array(1, mx.int64)
|
|
213
|
+
z = x + 3
|
|
214
|
+
self.assertEqual(z.dtype, mx.int64)
|
|
215
|
+
self.assertEqual(z.item(), 4)
|
|
216
|
+
z = 3 + x
|
|
217
|
+
self.assertEqual(z.dtype, mx.int64)
|
|
218
|
+
self.assertEqual(z.item(), 4)
|
|
219
|
+
z = x + 3.0
|
|
220
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
221
|
+
self.assertEqual(z.item(), 4.0)
|
|
222
|
+
z = 3.0 + x
|
|
223
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
224
|
+
self.assertEqual(z.item(), 4.0)
|
|
225
|
+
|
|
226
|
+
x = mx.array(1, mx.float32)
|
|
227
|
+
z = x + 3
|
|
228
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
229
|
+
self.assertEqual(z.item(), 4)
|
|
230
|
+
z = 3 + x
|
|
231
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
232
|
+
self.assertEqual(z.item(), 4)
|
|
233
|
+
|
|
234
|
+
def test_subtract(self):
|
|
235
|
+
x = mx.array(4.0)
|
|
236
|
+
y = mx.array(3.0)
|
|
237
|
+
|
|
238
|
+
z = mx.subtract(x, y)
|
|
239
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
240
|
+
self.assertEqual(z.item(), 1.0)
|
|
241
|
+
|
|
242
|
+
z = x - 3.0
|
|
243
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
244
|
+
self.assertEqual(z.item(), 1.0)
|
|
245
|
+
|
|
246
|
+
z = 5.0 - x
|
|
247
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
248
|
+
self.assertEqual(z.item(), 1.0)
|
|
249
|
+
|
|
250
|
+
def test_multiply(self):
|
|
251
|
+
x = mx.array(2.0)
|
|
252
|
+
y = mx.array(3.0)
|
|
253
|
+
|
|
254
|
+
z = mx.multiply(x, y)
|
|
255
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
256
|
+
self.assertEqual(z.item(), 6.0)
|
|
257
|
+
|
|
258
|
+
z = x * 3.0
|
|
259
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
260
|
+
self.assertEqual(z.item(), 6.0)
|
|
261
|
+
|
|
262
|
+
z = 3.0 * x
|
|
263
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
264
|
+
self.assertEqual(z.item(), 6.0)
|
|
265
|
+
|
|
266
|
+
def test_divide(self):
|
|
267
|
+
x = mx.array(2.0)
|
|
268
|
+
y = mx.array(4.0)
|
|
269
|
+
|
|
270
|
+
z = mx.divide(x, y)
|
|
271
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
272
|
+
self.assertEqual(z.item(), 0.5)
|
|
273
|
+
|
|
274
|
+
z = x / 4.0
|
|
275
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
276
|
+
self.assertEqual(z.item(), 0.5)
|
|
277
|
+
|
|
278
|
+
z = 1.0 / x
|
|
279
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
280
|
+
self.assertEqual(z.item(), 0.5)
|
|
281
|
+
|
|
282
|
+
x = x.astype(mx.float16)
|
|
283
|
+
z = x / 4.0
|
|
284
|
+
self.assertEqual(z.dtype, mx.float16)
|
|
285
|
+
|
|
286
|
+
x = x.astype(mx.float16)
|
|
287
|
+
z = 4.0 / x
|
|
288
|
+
self.assertEqual(z.dtype, mx.float16)
|
|
289
|
+
|
|
290
|
+
x = mx.array(5)
|
|
291
|
+
y = mx.array(2)
|
|
292
|
+
z = x / y
|
|
293
|
+
self.assertEqual(z.dtype, mx.float32)
|
|
294
|
+
self.assertEqual(z.item(), 2.5)
|
|
295
|
+
|
|
296
|
+
z = x // y
|
|
297
|
+
self.assertEqual(z.dtype, mx.int32)
|
|
298
|
+
self.assertEqual(z.item(), 2)
|
|
299
|
+
|
|
300
|
+
def test_remainder(self):
|
|
301
|
+
for dt in [mx.int32, mx.float32]:
|
|
302
|
+
x = mx.array(2, dtype=dt)
|
|
303
|
+
y = mx.array(4, dtype=dt)
|
|
304
|
+
|
|
305
|
+
z1 = mx.remainder(x, y)
|
|
306
|
+
z2 = mx.remainder(y, x)
|
|
307
|
+
self.assertEqual(z1.dtype, dt)
|
|
308
|
+
self.assertEqual(z1.item(), 2)
|
|
309
|
+
self.assertEqual(z2.item(), 0)
|
|
310
|
+
|
|
311
|
+
z = x % 4
|
|
312
|
+
self.assertEqual(z.dtype, dt)
|
|
313
|
+
self.assertEqual(z.item(), 2)
|
|
314
|
+
|
|
315
|
+
z = 1 % x
|
|
316
|
+
self.assertEqual(z.dtype, dt)
|
|
317
|
+
self.assertEqual(z.item(), 1)
|
|
318
|
+
|
|
319
|
+
z = -1 % x
|
|
320
|
+
self.assertEqual(z.dtype, dt)
|
|
321
|
+
self.assertEqual(z.item(), 1)
|
|
322
|
+
|
|
323
|
+
z = -1 % -x
|
|
324
|
+
self.assertEqual(z.dtype, dt)
|
|
325
|
+
self.assertEqual(z.item(), -1)
|
|
326
|
+
|
|
327
|
+
x = mx.arange(10).astype(dt) - 5
|
|
328
|
+
y = x % 5
|
|
329
|
+
z = x % -5
|
|
330
|
+
self.assertEqual(y.tolist(), [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
|
|
331
|
+
self.assertEqual(z.tolist(), [0, -4, -3, -2, -1, 0, -4, -3, -2, -1])
|
|
332
|
+
|
|
333
|
+
z = -mx.ones(64) % mx.full(64, 2)
|
|
334
|
+
self.assertTrue(mx.array_equal(z, mx.ones(64)))
|
|
335
|
+
|
|
336
|
+
def test_comparisons(self):
|
|
337
|
+
a = mx.array([0.0, 1.0, 5.0])
|
|
338
|
+
b = mx.array([-1.0, 2.0, 5.0])
|
|
339
|
+
|
|
340
|
+
self.assertEqual(mx.less(a, b).tolist(), [False, True, False])
|
|
341
|
+
self.assertEqual(mx.less_equal(a, b).tolist(), [False, True, True])
|
|
342
|
+
self.assertEqual(mx.greater(a, b).tolist(), [True, False, False])
|
|
343
|
+
self.assertEqual(mx.greater_equal(a, b).tolist(), [True, False, True])
|
|
344
|
+
|
|
345
|
+
self.assertEqual(mx.less(a, 5).tolist(), [True, True, False])
|
|
346
|
+
self.assertEqual(mx.less(5, a).tolist(), [False, False, False])
|
|
347
|
+
self.assertEqual(mx.less_equal(5, a).tolist(), [False, False, True])
|
|
348
|
+
self.assertEqual(mx.greater(a, 1).tolist(), [False, False, True])
|
|
349
|
+
self.assertEqual(mx.greater_equal(a, 1).tolist(), [False, True, True])
|
|
350
|
+
|
|
351
|
+
a = mx.array([0.0, 1.0, 5.0, -1.0])
|
|
352
|
+
b = mx.array([0.0, 2.0, 5.0, 3.0])
|
|
353
|
+
self.assertEqual(mx.equal(a, b).tolist(), [True, False, True, False])
|
|
354
|
+
self.assertEqual(mx.not_equal(a, b).tolist(), [False, True, False, True])
|
|
355
|
+
|
|
356
|
+
def test_array_equal(self):
|
|
357
|
+
x = mx.array([1, 2, 3, 4])
|
|
358
|
+
y = mx.array([1, 2, 3, 4])
|
|
359
|
+
self.assertTrue(mx.array_equal(x, y))
|
|
360
|
+
|
|
361
|
+
y = mx.array([1, 2, 4, 5])
|
|
362
|
+
self.assertFalse(mx.array_equal(x, y))
|
|
363
|
+
|
|
364
|
+
y = mx.array([1, 2, 3])
|
|
365
|
+
self.assertFalse(mx.array_equal(x, y))
|
|
366
|
+
|
|
367
|
+
# Can still be equal with different types
|
|
368
|
+
y = mx.array([1.0, 2.0, 3.0, 4.0])
|
|
369
|
+
self.assertTrue(mx.array_equal(x, y))
|
|
370
|
+
|
|
371
|
+
x = mx.array([0.0, float("nan")])
|
|
372
|
+
y = mx.array([0.0, float("nan")])
|
|
373
|
+
self.assertFalse(mx.array_equal(x, y))
|
|
374
|
+
self.assertTrue(mx.array_equal(x, y, equal_nan=True))
|
|
375
|
+
|
|
376
|
+
for t in [mx.float32, mx.float16, mx.bfloat16, mx.complex64]:
|
|
377
|
+
with self.subTest(type=t):
|
|
378
|
+
x = mx.array([0.0, float("nan")]).astype(t)
|
|
379
|
+
y = mx.array([0.0, float("nan")]).astype(t)
|
|
380
|
+
self.assertFalse(mx.array_equal(x, y))
|
|
381
|
+
self.assertTrue(mx.array_equal(x, y, equal_nan=True))
|
|
382
|
+
|
|
383
|
+
def test_isnan(self):
|
|
384
|
+
x = mx.array([0.0, float("nan")])
|
|
385
|
+
self.assertEqual(mx.isnan(x).tolist(), [False, True])
|
|
386
|
+
|
|
387
|
+
x = mx.array([0.0, float("nan")]).astype(mx.float16)
|
|
388
|
+
self.assertEqual(mx.isnan(x).tolist(), [False, True])
|
|
389
|
+
|
|
390
|
+
x = mx.array([0.0, float("nan")]).astype(mx.bfloat16)
|
|
391
|
+
self.assertEqual(mx.isnan(x).tolist(), [False, True])
|
|
392
|
+
|
|
393
|
+
x = mx.array([0.0, float("nan")]).astype(mx.complex64)
|
|
394
|
+
self.assertEqual(mx.isnan(x).tolist(), [False, True])
|
|
395
|
+
|
|
396
|
+
self.assertEqual(mx.isnan(0 * mx.array(float("inf"))).tolist(), True)
|
|
397
|
+
|
|
398
|
+
def test_isinf(self):
|
|
399
|
+
x = mx.array([0.0, float("inf")])
|
|
400
|
+
self.assertEqual(mx.isinf(x).tolist(), [False, True])
|
|
401
|
+
|
|
402
|
+
x = mx.array([0.0, float("inf")]).astype(mx.float16)
|
|
403
|
+
self.assertEqual(mx.isinf(x).tolist(), [False, True])
|
|
404
|
+
|
|
405
|
+
x = mx.array([0.0, float("inf")]).astype(mx.bfloat16)
|
|
406
|
+
self.assertEqual(mx.isinf(x).tolist(), [False, True])
|
|
407
|
+
|
|
408
|
+
x = mx.array([0.0, float("inf")]).astype(mx.complex64)
|
|
409
|
+
self.assertEqual(mx.isinf(x).tolist(), [False, True])
|
|
410
|
+
|
|
411
|
+
self.assertEqual(mx.isinf(0 * mx.array(float("inf"))).tolist(), False)
|
|
412
|
+
|
|
413
|
+
x = mx.array([-2147483648, 0, 2147483647], dtype=mx.int32)
|
|
414
|
+
result = mx.isinf(x)
|
|
415
|
+
self.assertEqual(result.tolist(), [False, False, False])
|
|
416
|
+
|
|
417
|
+
x = mx.array([-32768, 0, 32767], dtype=mx.int16)
|
|
418
|
+
result = mx.isinf(x)
|
|
419
|
+
self.assertEqual(result.tolist(), [False, False, False])
|
|
420
|
+
|
|
421
|
+
def test_isfinite(self):
|
|
422
|
+
x = mx.array([0.0, float("inf"), float("nan")])
|
|
423
|
+
self.assertEqual(mx.isfinite(x).tolist(), [True, False, False])
|
|
424
|
+
|
|
425
|
+
x = x.astype(mx.float16)
|
|
426
|
+
self.assertEqual(mx.isfinite(x).tolist(), [True, False, False])
|
|
427
|
+
|
|
428
|
+
x = x.astype(mx.bfloat16)
|
|
429
|
+
self.assertEqual(mx.isfinite(x).tolist(), [True, False, False])
|
|
430
|
+
|
|
431
|
+
def test_tri(self):
|
|
432
|
+
for shape in [[4], [4, 4], [2, 10]]:
|
|
433
|
+
for diag in [-1, 0, 1, -2]:
|
|
434
|
+
self.assertCmpNumpy(shape, mx.tri, np.tri, k=diag)
|
|
435
|
+
self.assertEqual(mx.tri(1, 1).dtype, mx.float32)
|
|
436
|
+
self.assertEqual(mx.tri(1, 1, dtype=mx.bfloat16).dtype, mx.bfloat16)
|
|
437
|
+
|
|
438
|
+
def test_tril(self):
|
|
439
|
+
for diag in [-1, 0, 1, -2]:
|
|
440
|
+
self.assertCmpNumpy([(10, 10)], mx.tril, np.tril, k=diag)
|
|
441
|
+
|
|
442
|
+
with self.assertRaises(Exception):
|
|
443
|
+
mx.tril(mx.zeros((1)))
|
|
444
|
+
|
|
445
|
+
def test_triu(self):
|
|
446
|
+
for diag in [-1, 0, 1, -2]:
|
|
447
|
+
self.assertCmpNumpy([(10, 10)], mx.triu, np.triu, k=diag)
|
|
448
|
+
with self.assertRaises(Exception):
|
|
449
|
+
mx.triu(mx.zeros((1)))
|
|
450
|
+
|
|
451
|
+
def test_minimum(self):
|
|
452
|
+
x = mx.array([0.0, -5, 10.0])
|
|
453
|
+
y = mx.array([1.0, -7.0, 3.0])
|
|
454
|
+
|
|
455
|
+
expected = [0, -7, 3]
|
|
456
|
+
self.assertListEqual(mx.minimum(x, y).tolist(), expected)
|
|
457
|
+
|
|
458
|
+
a = mx.array([float("nan")])
|
|
459
|
+
b = mx.array([0.0])
|
|
460
|
+
self.assertTrue(math.isnan(mx.minimum(a, b).item()))
|
|
461
|
+
self.assertTrue(math.isnan(mx.minimum(b, a).item()))
|
|
462
|
+
|
|
463
|
+
def test_maximum(self):
|
|
464
|
+
x = mx.array([0.0, -5, 10.0])
|
|
465
|
+
y = mx.array([1.0, -7.0, 3.0])
|
|
466
|
+
|
|
467
|
+
expected = [1, -5, 10]
|
|
468
|
+
self.assertListEqual(mx.maximum(x, y).tolist(), expected)
|
|
469
|
+
|
|
470
|
+
a = mx.array([float("nan")])
|
|
471
|
+
b = mx.array([0.0])
|
|
472
|
+
self.assertTrue(math.isnan(mx.maximum(a, b).item()))
|
|
473
|
+
self.assertTrue(math.isnan(mx.maximum(b, a).item()))
|
|
474
|
+
|
|
475
|
+
def test_floor(self):
|
|
476
|
+
x = mx.array([-22.03, 19.98, -27, 9, 0.0, -np.inf, np.inf])
|
|
477
|
+
expected = [-23, 19, -27, 9, 0, -np.inf, np.inf]
|
|
478
|
+
self.assertListEqual(mx.floor(x).tolist(), expected)
|
|
479
|
+
|
|
480
|
+
with self.assertRaises(ValueError):
|
|
481
|
+
mx.floor(mx.array([22 + 3j, 19 + 98j]))
|
|
482
|
+
|
|
483
|
+
def test_ceil(self):
|
|
484
|
+
x = mx.array([-22.03, 19.98, -27, 9, 0.0, -np.inf, np.inf])
|
|
485
|
+
expected = [-22, 20, -27, 9, 0, -np.inf, np.inf]
|
|
486
|
+
self.assertListEqual(mx.ceil(x).tolist(), expected)
|
|
487
|
+
|
|
488
|
+
with self.assertRaises(ValueError):
|
|
489
|
+
mx.ceil(mx.array([22 + 3j, 19 + 98j]))
|
|
490
|
+
|
|
491
|
+
def test_isposinf(self):
|
|
492
|
+
x = mx.array([0.0, float("-inf")])
|
|
493
|
+
self.assertEqual(mx.isposinf(x).tolist(), [False, False])
|
|
494
|
+
|
|
495
|
+
x = mx.array([0.0, float("-inf")]).astype(mx.float16)
|
|
496
|
+
self.assertEqual(mx.isposinf(x).tolist(), [False, False])
|
|
497
|
+
|
|
498
|
+
x = mx.array([0.0, float("-inf")]).astype(mx.bfloat16)
|
|
499
|
+
self.assertEqual(mx.isposinf(x).tolist(), [False, False])
|
|
500
|
+
|
|
501
|
+
x = mx.array([0.0, float("-inf")]).astype(mx.complex64)
|
|
502
|
+
self.assertEqual(mx.isposinf(x).tolist(), [False, False])
|
|
503
|
+
|
|
504
|
+
self.assertEqual(mx.isposinf(0 * mx.array(float("inf"))).tolist(), False)
|
|
505
|
+
|
|
506
|
+
x = mx.array([-2147483648, 0, 2147483647], dtype=mx.int32)
|
|
507
|
+
result = mx.isposinf(x)
|
|
508
|
+
self.assertEqual(result.tolist(), [False, False, False])
|
|
509
|
+
|
|
510
|
+
x = mx.array([-32768, 0, 32767], dtype=mx.int16)
|
|
511
|
+
result = mx.isposinf(x)
|
|
512
|
+
self.assertEqual(result.tolist(), [False, False, False])
|
|
513
|
+
|
|
514
|
+
def test_isneginf(self):
|
|
515
|
+
x = mx.array([0.0, float("-inf")])
|
|
516
|
+
self.assertEqual(mx.isneginf(x).tolist(), [False, True])
|
|
517
|
+
|
|
518
|
+
x = mx.array([0.0, float("-inf")]).astype(mx.float16)
|
|
519
|
+
self.assertEqual(mx.isneginf(x).tolist(), [False, True])
|
|
520
|
+
|
|
521
|
+
x = mx.array([0.0, float("-inf")]).astype(mx.bfloat16)
|
|
522
|
+
self.assertEqual(mx.isneginf(x).tolist(), [False, True])
|
|
523
|
+
|
|
524
|
+
x = mx.array([0.0, float("-inf")]).astype(mx.complex64)
|
|
525
|
+
self.assertEqual(mx.isneginf(x).tolist(), [False, True])
|
|
526
|
+
|
|
527
|
+
self.assertEqual(mx.isneginf(0 * mx.array(float("inf"))).tolist(), False)
|
|
528
|
+
|
|
529
|
+
x = mx.array([-2147483648, 0, 2147483647], dtype=mx.int32)
|
|
530
|
+
result = mx.isneginf(x)
|
|
531
|
+
self.assertEqual(result.tolist(), [False, False, False])
|
|
532
|
+
|
|
533
|
+
x = mx.array([-32768, 0, 32767], dtype=mx.int16)
|
|
534
|
+
result = mx.isneginf(x)
|
|
535
|
+
self.assertEqual(result.tolist(), [False, False, False])
|
|
536
|
+
|
|
537
|
+
def test_round(self):
|
|
538
|
+
# float
|
|
539
|
+
x = mx.array(
|
|
540
|
+
[0.5, -0.5, 1.5, -1.5, -21.03, 19.98, -27, 9, 0.0, -np.inf, np.inf]
|
|
541
|
+
)
|
|
542
|
+
expected = [0, -0, 2, -2, -21, 20, -27, 9, 0, -np.inf, np.inf]
|
|
543
|
+
self.assertListEqual(mx.round(x).tolist(), expected)
|
|
544
|
+
|
|
545
|
+
# complex
|
|
546
|
+
y = mx.round(mx.array([22.2 + 3.6j, 18.5 + 98.2j]))
|
|
547
|
+
self.assertListEqual(y.tolist(), [22 + 4j, 18 + 98j])
|
|
548
|
+
|
|
549
|
+
# decimals
|
|
550
|
+
y0 = mx.round(mx.array([15, 122], mx.int32), decimals=0)
|
|
551
|
+
y1 = mx.round(mx.array([15, 122], mx.int32), decimals=-1)
|
|
552
|
+
y2 = mx.round(mx.array([15, 122], mx.int32), decimals=-2)
|
|
553
|
+
self.assertEqual(y0.dtype, mx.int32)
|
|
554
|
+
self.assertEqual(y1.dtype, mx.int32)
|
|
555
|
+
self.assertEqual(y2.dtype, mx.int32)
|
|
556
|
+
self.assertListEqual(y0.tolist(), [15, 122])
|
|
557
|
+
self.assertListEqual(y1.tolist(), [20, 120])
|
|
558
|
+
self.assertListEqual(y2.tolist(), [0, 100])
|
|
559
|
+
|
|
560
|
+
y1 = mx.round(mx.array([1.537, 1.471], mx.float32), decimals=1)
|
|
561
|
+
y2 = mx.round(mx.array([1.537, 1.471], mx.float32), decimals=2)
|
|
562
|
+
self.assertTrue(mx.allclose(y1, mx.array([1.5, 1.5])))
|
|
563
|
+
self.assertTrue(mx.allclose(y2, mx.array([1.54, 1.47])))
|
|
564
|
+
|
|
565
|
+
# check round to nearest for different types
|
|
566
|
+
dtypes = [mx.bfloat16, mx.float16, mx.float32]
|
|
567
|
+
for dtype in dtypes:
|
|
568
|
+
x = mx.arange(10, dtype=dtype) - 4.5
|
|
569
|
+
x = mx.round(x)
|
|
570
|
+
self.assertEqual(
|
|
571
|
+
x.astype(mx.float32).tolist(),
|
|
572
|
+
[-4.0, -4.0, -2.0, -2.0, -0.0, 0.0, 2.0, 2.0, 4.0, 4.0],
|
|
573
|
+
)
|
|
574
|
+
|
|
575
|
+
def test_transpose_noargs(self):
|
|
576
|
+
x = mx.array([[0, 1, 1], [1, 0, 0]])
|
|
577
|
+
|
|
578
|
+
expected = [
|
|
579
|
+
[0, 1],
|
|
580
|
+
[1, 0],
|
|
581
|
+
[1, 0],
|
|
582
|
+
]
|
|
583
|
+
|
|
584
|
+
self.assertListEqual(mx.transpose(x).tolist(), expected)
|
|
585
|
+
|
|
586
|
+
def test_transpose_axis(self):
|
|
587
|
+
x = mx.array(
|
|
588
|
+
[
|
|
589
|
+
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]],
|
|
590
|
+
[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]],
|
|
591
|
+
]
|
|
592
|
+
)
|
|
593
|
+
expected = [
|
|
594
|
+
[[0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11]],
|
|
595
|
+
[[12, 16, 20], [13, 17, 21], [14, 18, 22], [15, 19, 23]],
|
|
596
|
+
]
|
|
597
|
+
|
|
598
|
+
self.assertListEqual(mx.transpose(x, axes=(0, 2, 1)).tolist(), expected)
|
|
599
|
+
|
|
600
|
+
def test_move_swap_axes(self):
|
|
601
|
+
x = mx.zeros((2, 3, 4))
|
|
602
|
+
self.assertEqual(mx.moveaxis(x, 0, 2).shape, (3, 4, 2))
|
|
603
|
+
self.assertEqual(x.moveaxis(0, 2).shape, (3, 4, 2))
|
|
604
|
+
self.assertEqual(mx.swapaxes(x, 0, 2).shape, (4, 3, 2))
|
|
605
|
+
self.assertEqual(x.swapaxes(0, 2).shape, (4, 3, 2))
|
|
606
|
+
|
|
607
|
+
def test_sum(self):
|
|
608
|
+
x = mx.array(
|
|
609
|
+
[
|
|
610
|
+
[1, 2],
|
|
611
|
+
[3, 3],
|
|
612
|
+
]
|
|
613
|
+
)
|
|
614
|
+
self.assertEqual(mx.sum(x).item(), 9)
|
|
615
|
+
y = mx.sum(x, keepdims=True)
|
|
616
|
+
self.assertEqual(y, mx.array(9))
|
|
617
|
+
self.assertEqual(y.shape, (1, 1))
|
|
618
|
+
|
|
619
|
+
self.assertEqual(mx.sum(x, axis=0).tolist(), [4, 5])
|
|
620
|
+
self.assertEqual(mx.sum(x, axis=1).tolist(), [3, 6])
|
|
621
|
+
|
|
622
|
+
x_npy = np.arange(3 * 5 * 4 * 7).astype(np.float32)
|
|
623
|
+
x_npy = np.reshape(x_npy, (3, 5, 4, 7))
|
|
624
|
+
x_mlx = mx.array(x_npy)
|
|
625
|
+
|
|
626
|
+
for axis in (None, 0, 1, 2, 3, (0, 1), (2, 3), (1, 2, 3)):
|
|
627
|
+
sum_npy = np.sum(x_npy, axis=axis)
|
|
628
|
+
sum_mlx = np.asarray(mx.sum(x_mlx, axis=axis))
|
|
629
|
+
self.assertListEqual(list(sum_npy.shape), list(sum_mlx.shape))
|
|
630
|
+
self.assertTrue(np.all(sum_npy == sum_mlx))
|
|
631
|
+
|
|
632
|
+
x_npy = np.array([1.0, 2.0, 3.0, 4.0]).astype(np.float32)
|
|
633
|
+
x_mlx = mx.array(x_npy)
|
|
634
|
+
|
|
635
|
+
y_npy = x_npy[0:4:2]
|
|
636
|
+
y_npy = np.broadcast_to(y_npy, (2, 2))
|
|
637
|
+
|
|
638
|
+
y_mlx = x_mlx[0:4:2]
|
|
639
|
+
y_mlx = mx.broadcast_to(y_mlx, (2, 2))
|
|
640
|
+
|
|
641
|
+
for axis in (None, 0, 1, (0, 1)):
|
|
642
|
+
sum_npy = np.sum(y_npy, axis=axis)
|
|
643
|
+
sum_mlx = np.asarray(mx.sum(y_mlx, axis=axis))
|
|
644
|
+
self.assertListEqual(list(sum_npy.shape), list(sum_mlx.shape))
|
|
645
|
+
self.assertTrue(np.all(sum_npy == sum_mlx))
|
|
646
|
+
|
|
647
|
+
x_npy = (
|
|
648
|
+
np.arange(3 * 2 * 3 * 3 * 3 * 3)
|
|
649
|
+
.reshape(3, 2, 3, 3, 3, 3)
|
|
650
|
+
.astype(np.float32)
|
|
651
|
+
)
|
|
652
|
+
x_mlx = mx.array(x_npy)
|
|
653
|
+
|
|
654
|
+
y_mlx = x_mlx.sum(axis=(0, 1, 3, 4, 5))
|
|
655
|
+
y_npy = x_npy.sum(axis=(0, 1, 3, 4, 5))
|
|
656
|
+
|
|
657
|
+
self.assertTrue(np.array_equal(y_mlx, y_npy))
|
|
658
|
+
|
|
659
|
+
def test_prod(self):
|
|
660
|
+
x = mx.array(
|
|
661
|
+
[
|
|
662
|
+
[1, 2],
|
|
663
|
+
[3, 3],
|
|
664
|
+
]
|
|
665
|
+
)
|
|
666
|
+
self.assertEqual(mx.prod(x).item(), 18)
|
|
667
|
+
y = mx.prod(x, keepdims=True)
|
|
668
|
+
self.assertEqual(y, mx.array(18))
|
|
669
|
+
self.assertEqual(y.shape, (1, 1))
|
|
670
|
+
|
|
671
|
+
self.assertEqual(mx.prod(x, axis=0).tolist(), [3, 6])
|
|
672
|
+
self.assertEqual(mx.prod(x, axis=1).tolist(), [2, 9])
|
|
673
|
+
|
|
674
|
+
def test_min_and_max(self):
|
|
675
|
+
x = mx.array(
|
|
676
|
+
[
|
|
677
|
+
[1, 2],
|
|
678
|
+
[3, 4],
|
|
679
|
+
]
|
|
680
|
+
)
|
|
681
|
+
self.assertEqual(mx.min(x).item(), 1)
|
|
682
|
+
self.assertEqual(mx.max(x).item(), 4)
|
|
683
|
+
y = mx.min(x, keepdims=True)
|
|
684
|
+
self.assertEqual(y.shape, (1, 1))
|
|
685
|
+
self.assertEqual(y, mx.array(1))
|
|
686
|
+
|
|
687
|
+
y = mx.max(x, keepdims=True)
|
|
688
|
+
self.assertEqual(y.shape, (1, 1))
|
|
689
|
+
self.assertEqual(y, mx.array(4))
|
|
690
|
+
|
|
691
|
+
self.assertEqual(mx.min(x, axis=0).tolist(), [1, 2])
|
|
692
|
+
self.assertEqual(mx.min(x, axis=1).tolist(), [1, 3])
|
|
693
|
+
self.assertEqual(mx.max(x, axis=0).tolist(), [3, 4])
|
|
694
|
+
self.assertEqual(mx.max(x, axis=1).tolist(), [2, 4])
|
|
695
|
+
|
|
696
|
+
def test_argmin_argmax(self):
|
|
697
|
+
data = np.random.rand(10, 12, 13)
|
|
698
|
+
x = mx.array(data)
|
|
699
|
+
for op in ["argmin", "argmax"]:
|
|
700
|
+
for axis in range(3):
|
|
701
|
+
for kd in [True, False]:
|
|
702
|
+
a = getattr(mx, op)(x, axis, kd)
|
|
703
|
+
b = getattr(np, op)(data, axis, keepdims=kd)
|
|
704
|
+
self.assertEqual(a.tolist(), b.tolist())
|
|
705
|
+
|
|
706
|
+
for op in ["argmin", "argmax"]:
|
|
707
|
+
a = getattr(mx, op)(x, keepdims=True)
|
|
708
|
+
b = getattr(np, op)(data, keepdims=True)
|
|
709
|
+
self.assertEqual(a.tolist(), b.tolist())
|
|
710
|
+
a = getattr(mx, op)(x)
|
|
711
|
+
b = getattr(np, op)(data)
|
|
712
|
+
self.assertEqual(a.item(), b)
|
|
713
|
+
|
|
714
|
+
def test_broadcast(self):
|
|
715
|
+
a_npy = np.reshape(np.arange(200), (10, 20))
|
|
716
|
+
a_mlx = mx.array(a_npy)
|
|
717
|
+
|
|
718
|
+
b_npy = np.broadcast_to(a_npy, (30, 10, 20))
|
|
719
|
+
b_mlx = mx.broadcast_to(a_mlx, (30, 10, 20))
|
|
720
|
+
self.assertListEqual(list(b_npy.shape), list(b_mlx.shape))
|
|
721
|
+
self.assertTrue(np.array_equal(b_npy, b_mlx))
|
|
722
|
+
|
|
723
|
+
b_npy = np.broadcast_to(a_npy, (1, 10, 20))
|
|
724
|
+
b_mlx = mx.broadcast_to(a_mlx, (1, 10, 20))
|
|
725
|
+
self.assertListEqual(list(b_npy.shape), list(b_mlx.shape))
|
|
726
|
+
self.assertTrue(np.array_equal(b_npy, b_mlx))
|
|
727
|
+
|
|
728
|
+
b_npy = np.broadcast_to(1, (10, 20))
|
|
729
|
+
b_mlx = mx.broadcast_to(1, (10, 20))
|
|
730
|
+
self.assertListEqual(list(b_npy.shape), list(b_mlx.shape))
|
|
731
|
+
self.assertTrue(np.array_equal(b_npy, b_mlx))
|
|
732
|
+
|
|
733
|
+
def test_logsumexp(self):
|
|
734
|
+
def logsumexp(x, axes=None):
|
|
735
|
+
maxs = mx.max(x, axis=axes, keepdims=True)
|
|
736
|
+
return mx.log(mx.sum(mx.exp(x - maxs), axis=axes, keepdims=True)) + maxs
|
|
737
|
+
|
|
738
|
+
x = mx.array(
|
|
739
|
+
[
|
|
740
|
+
[1.0, 2.0],
|
|
741
|
+
[3.0, 4.0],
|
|
742
|
+
]
|
|
743
|
+
)
|
|
744
|
+
self.assertTrue(math.isclose(mx.logsumexp(x).item(), logsumexp(x).item()))
|
|
745
|
+
|
|
746
|
+
x = mx.random.uniform(shape=(1025,))
|
|
747
|
+
self.assertTrue(mx.allclose(mx.logsumexp(x), logsumexp(x)))
|
|
748
|
+
|
|
749
|
+
# Transposed
|
|
750
|
+
x = mx.random.uniform(shape=(2, 2, 8))
|
|
751
|
+
x = x.swapaxes(0, 1)
|
|
752
|
+
self.assertTrue(mx.allclose(mx.logsumexp(x), logsumexp(x)))
|
|
753
|
+
|
|
754
|
+
# Broadcast
|
|
755
|
+
x = mx.broadcast_to(mx.random.uniform(shape=(2, 1, 8)), (2, 2, 8))
|
|
756
|
+
self.assertTrue(mx.allclose(mx.logsumexp(x), logsumexp(x)))
|
|
757
|
+
|
|
758
|
+
# Large
|
|
759
|
+
x = mx.random.uniform(shape=(1025,))
|
|
760
|
+
x = mx.broadcast_to(mx.random.uniform(shape=(2, 1, 8)), (2, 2, 8))
|
|
761
|
+
self.assertTrue(mx.allclose(mx.logsumexp(x), logsumexp(x)))
|
|
762
|
+
|
|
763
|
+
def test_mean(self):
|
|
764
|
+
x = mx.array(
|
|
765
|
+
[
|
|
766
|
+
[1, 2],
|
|
767
|
+
[3, 4],
|
|
768
|
+
]
|
|
769
|
+
)
|
|
770
|
+
self.assertEqual(mx.mean(x).item(), 2.5)
|
|
771
|
+
y = mx.mean(x, keepdims=True)
|
|
772
|
+
self.assertEqual(y, mx.array(2.5))
|
|
773
|
+
self.assertEqual(y.shape, (1, 1))
|
|
774
|
+
|
|
775
|
+
self.assertEqual(mx.mean(x, axis=0).tolist(), [2, 3])
|
|
776
|
+
self.assertEqual(mx.mean(x, axis=1).tolist(), [1.5, 3.5])
|
|
777
|
+
|
|
778
|
+
def test_median(self):
|
|
779
|
+
x = mx.array([])
|
|
780
|
+
with self.assertRaises(ValueError):
|
|
781
|
+
mx.median(x, axis=0)
|
|
782
|
+
x = mx.array([0, 1, 2, 3, 4])
|
|
783
|
+
with self.assertRaises(ValueError):
|
|
784
|
+
mx.median(x, axis=(0, 1))
|
|
785
|
+
with self.assertRaises(ValueError):
|
|
786
|
+
mx.median(x, axis=(0, 0))
|
|
787
|
+
|
|
788
|
+
out = mx.median(x)
|
|
789
|
+
self.assertEqual(out.shape, ())
|
|
790
|
+
self.assertEqual(out.item(), 2)
|
|
791
|
+
out = mx.median(x, keepdims=True)
|
|
792
|
+
self.assertEqual(out.shape, (1,))
|
|
793
|
+
|
|
794
|
+
x = mx.array([0, 1, 2, 3, 4, 5])
|
|
795
|
+
out = mx.median(x)
|
|
796
|
+
self.assertEqual(out.item(), 2.5)
|
|
797
|
+
|
|
798
|
+
x = mx.random.normal((5, 5, 5, 5))
|
|
799
|
+
out = mx.median(x, axis=(0, 2), keepdims=True)
|
|
800
|
+
out_np = np.median(x, axis=(0, 2), keepdims=True)
|
|
801
|
+
self.assertTrue(np.allclose(out, out_np))
|
|
802
|
+
|
|
803
|
+
out = mx.median(x, axis=(1, 3), keepdims=True)
|
|
804
|
+
out_np = np.median(x, axis=(1, 3), keepdims=True)
|
|
805
|
+
self.assertTrue(np.allclose(out, out_np))
|
|
806
|
+
|
|
807
|
+
out = mx.median(x, axis=(0, 1, 3), keepdims=True)
|
|
808
|
+
out_np = np.median(x, axis=(0, 1, 3), keepdims=True)
|
|
809
|
+
self.assertTrue(np.allclose(out, out_np))
|
|
810
|
+
|
|
811
|
+
def test_var(self):
|
|
812
|
+
x = mx.array(
|
|
813
|
+
[
|
|
814
|
+
[1, 2],
|
|
815
|
+
[3, 4],
|
|
816
|
+
]
|
|
817
|
+
)
|
|
818
|
+
self.assertEqual(mx.var(x).item(), 1.25)
|
|
819
|
+
y = mx.var(x, keepdims=True)
|
|
820
|
+
self.assertEqual(y, mx.array(1.25))
|
|
821
|
+
self.assertEqual(y.shape, (1, 1))
|
|
822
|
+
|
|
823
|
+
self.assertEqual(mx.var(x, axis=0).tolist(), [1.0, 1.0])
|
|
824
|
+
self.assertEqual(mx.var(x, axis=1).tolist(), [0.25, 0.25])
|
|
825
|
+
|
|
826
|
+
x = mx.array([1.0, 2.0])
|
|
827
|
+
out = mx.var(x, ddof=2)
|
|
828
|
+
self.assertEqual(out.item(), float("inf"))
|
|
829
|
+
|
|
830
|
+
x = mx.array([1.0, 2.0])
|
|
831
|
+
out = mx.var(x, ddof=3)
|
|
832
|
+
self.assertEqual(out.item(), float("inf"))
|
|
833
|
+
|
|
834
|
+
def test_std(self):
|
|
835
|
+
x = mx.random.uniform(shape=(5, 5))
|
|
836
|
+
x_np = np.array(x)
|
|
837
|
+
self.assertAlmostEqual(mx.std(x).item(), x_np.std().item(), places=6)
|
|
838
|
+
|
|
839
|
+
def test_abs(self):
|
|
840
|
+
a = mx.array([-1.0, 1.0, -2.0, 3.0])
|
|
841
|
+
result = mx.abs(a)
|
|
842
|
+
expected = np.abs(a, dtype=np.float32)
|
|
843
|
+
self.assertTrue(np.allclose(result, expected))
|
|
844
|
+
|
|
845
|
+
self.assertTrue(np.allclose(a.abs(), abs(a)))
|
|
846
|
+
|
|
847
|
+
def test_negative(self):
|
|
848
|
+
a = mx.array([-1.0, 1.0, -2.0, 3.0])
|
|
849
|
+
result = mx.negative(a)
|
|
850
|
+
expected = np.negative(a, dtype=np.float32)
|
|
851
|
+
self.assertTrue(np.allclose(result, expected))
|
|
852
|
+
|
|
853
|
+
def test_sign(self):
|
|
854
|
+
a = mx.array([-1.0, 1.0, 0.0, -2.0, 3.0])
|
|
855
|
+
result = mx.sign(a)
|
|
856
|
+
expected = np.sign(a, dtype=np.float32)
|
|
857
|
+
self.assertTrue(np.allclose(result, expected))
|
|
858
|
+
|
|
859
|
+
a = mx.array([-1.0, 1.0, 0.0, -2.0, 3.0])
|
|
860
|
+
b = mx.array([-4.0, -3.0, 1.0, 0.0, 3.0])
|
|
861
|
+
c = a + b * 1j
|
|
862
|
+
result = mx.sign(c)
|
|
863
|
+
# np.sign differs in NumPy 1 and 2 so
|
|
864
|
+
# we manually implement the NumPy 2 version here.
|
|
865
|
+
expected = c / np.abs(c)
|
|
866
|
+
self.assertTrue(np.allclose(result, expected))
|
|
867
|
+
|
|
868
|
+
def test_logical_not(self):
|
|
869
|
+
a = mx.array([-1.0, 1.0, 0.0, 1.0, -2.0, 3.0])
|
|
870
|
+
result = mx.logical_not(a)
|
|
871
|
+
expected = np.logical_not(a)
|
|
872
|
+
self.assertTrue(np.array_equal(result, expected))
|
|
873
|
+
|
|
874
|
+
def test_logical_and(self):
|
|
875
|
+
a = mx.array([True, False, True, False])
|
|
876
|
+
b = mx.array([True, True, False, False])
|
|
877
|
+
result = mx.logical_and(a, b)
|
|
878
|
+
expected = np.logical_and(a, b)
|
|
879
|
+
self.assertTrue(np.array_equal(result, expected))
|
|
880
|
+
|
|
881
|
+
# test overloaded operator
|
|
882
|
+
result = a & b
|
|
883
|
+
self.assertTrue(np.array_equal(result, expected))
|
|
884
|
+
|
|
885
|
+
def test_logical_or(self):
|
|
886
|
+
a = mx.array([True, False, True, False])
|
|
887
|
+
b = mx.array([True, True, False, False])
|
|
888
|
+
result = mx.logical_or(a, b)
|
|
889
|
+
expected = np.logical_or(a, b)
|
|
890
|
+
self.assertTrue(np.array_equal(result, expected))
|
|
891
|
+
|
|
892
|
+
# test overloaded operator
|
|
893
|
+
result = a | b
|
|
894
|
+
self.assertTrue(np.array_equal(result, expected))
|
|
895
|
+
|
|
896
|
+
def test_square(self):
|
|
897
|
+
a = mx.array([0.1, 0.5, 1.0, 10.0])
|
|
898
|
+
result = mx.square(a)
|
|
899
|
+
expected = np.square(a, dtype=np.float32)
|
|
900
|
+
|
|
901
|
+
self.assertTrue(np.allclose(result, expected))
|
|
902
|
+
|
|
903
|
+
def test_sqrt(self):
|
|
904
|
+
a = mx.array([0.1, 0.5, 1.0, 10.0])
|
|
905
|
+
result = mx.sqrt(a)
|
|
906
|
+
expected = np.sqrt(a, dtype=np.float32)
|
|
907
|
+
self.assertTrue(np.allclose(result, expected))
|
|
908
|
+
|
|
909
|
+
def test_rsqrt(self):
|
|
910
|
+
a = mx.array([0.1, 0.5, 1.0, 10.0])
|
|
911
|
+
result = mx.rsqrt(a)
|
|
912
|
+
expected = 1.0 / np.sqrt(a, dtype=np.float32)
|
|
913
|
+
self.assertTrue(np.allclose(result, expected))
|
|
914
|
+
|
|
915
|
+
def test_reciprocal(self):
|
|
916
|
+
a = mx.array([0.1, 0.5, 1.0, 2.0])
|
|
917
|
+
result = mx.reciprocal(a)
|
|
918
|
+
expected = np.reciprocal(a, dtype=np.float32)
|
|
919
|
+
self.assertTrue(np.allclose(result, expected))
|
|
920
|
+
|
|
921
|
+
def test_logaddexp(self):
|
|
922
|
+
a = mx.array([0, 1, 2, 9.0])
|
|
923
|
+
b = mx.array([1, 0, 4, 2.5])
|
|
924
|
+
|
|
925
|
+
result = mx.logaddexp(a, b)
|
|
926
|
+
expected = np.logaddexp(a, b, dtype=np.float32)
|
|
927
|
+
|
|
928
|
+
self.assertTrue(np.allclose(result, expected))
|
|
929
|
+
|
|
930
|
+
# Complex test
|
|
931
|
+
|
|
932
|
+
a = mx.array([0, 1, 2, 9.0]) + 1j
|
|
933
|
+
b = mx.array([1, 0, 4, 2.5]) + 1j
|
|
934
|
+
|
|
935
|
+
result = mx.logaddexp(a, b)
|
|
936
|
+
expected = np_logaddexp(np.array(a), np.array(b))
|
|
937
|
+
|
|
938
|
+
self.assertTrue(np.allclose(result, expected))
|
|
939
|
+
|
|
940
|
+
a = mx.array([float("nan")])
|
|
941
|
+
b = mx.array([0.0])
|
|
942
|
+
self.assertTrue(math.isnan(mx.logaddexp(a, b).item()))
|
|
943
|
+
|
|
944
|
+
def test_log(self):
|
|
945
|
+
a = mx.array([1, 0.5, 10, 100])
|
|
946
|
+
result = mx.log(a)
|
|
947
|
+
expected = np.log(a, dtype=np.float32)
|
|
948
|
+
|
|
949
|
+
self.assertTrue(np.allclose(result, expected))
|
|
950
|
+
|
|
951
|
+
a = mx.array(1.0) + 1j * mx.array(2.0)
|
|
952
|
+
result = mx.log(a)
|
|
953
|
+
expected = np.log(np.array(a))
|
|
954
|
+
self.assertTrue(np.allclose(result, expected))
|
|
955
|
+
|
|
956
|
+
def test_log2(self):
|
|
957
|
+
a = mx.array([0.5, 1, 2, 10, 16])
|
|
958
|
+
result = mx.log2(a)
|
|
959
|
+
expected = np.log2(a, dtype=np.float32)
|
|
960
|
+
|
|
961
|
+
self.assertTrue(np.allclose(result, expected))
|
|
962
|
+
|
|
963
|
+
a = mx.array(1.0) + 1j * mx.array(2.0)
|
|
964
|
+
result = mx.log2(a)
|
|
965
|
+
expected = np.log2(np.array(a))
|
|
966
|
+
self.assertTrue(np.allclose(result, expected))
|
|
967
|
+
|
|
968
|
+
def test_log10(self):
|
|
969
|
+
a = mx.array([0.1, 1, 10, 20, 100])
|
|
970
|
+
result = mx.log10(a)
|
|
971
|
+
expected = np.log10(a, dtype=np.float32)
|
|
972
|
+
|
|
973
|
+
self.assertTrue(np.allclose(result, expected))
|
|
974
|
+
|
|
975
|
+
a = mx.array(1.0) + 1j * mx.array(2.0)
|
|
976
|
+
result = mx.log10(a)
|
|
977
|
+
expected = np.log10(np.array(a))
|
|
978
|
+
self.assertTrue(np.allclose(result, expected))
|
|
979
|
+
|
|
980
|
+
def test_exp(self):
|
|
981
|
+
a = mx.array([0, 0.5, -0.5, 5])
|
|
982
|
+
result = mx.exp(a)
|
|
983
|
+
expected = np.exp(a, dtype=np.float32)
|
|
984
|
+
|
|
985
|
+
self.assertTrue(np.allclose(result, expected))
|
|
986
|
+
|
|
987
|
+
def test_expm1(self):
|
|
988
|
+
a = mx.array([-88, -87, 0, 0.5, -0.5, 5, 87, 88, 89, 90])
|
|
989
|
+
result = mx.expm1(a)
|
|
990
|
+
errs = np.seterr(over="ignore")
|
|
991
|
+
expected = np.expm1(a)
|
|
992
|
+
np.seterr(over=errs["over"])
|
|
993
|
+
self.assertTrue(np.allclose(result, expected, rtol=1e-3, atol=1e-4))
|
|
994
|
+
|
|
995
|
+
def test_erf(self):
|
|
996
|
+
inputs = [-5, 0.0, 0.5, 1.0, 2.0, 10.0]
|
|
997
|
+
x = mx.array(inputs)
|
|
998
|
+
expected = np.array([math.erf(i) for i in inputs])
|
|
999
|
+
self.assertTrue(np.allclose(mx.erf(x), expected))
|
|
1000
|
+
|
|
1001
|
+
def test_erfinv(self):
|
|
1002
|
+
inputs = [-5.0, -1.0, 0.5, 0.0, 0.5, 1.0, 5.0]
|
|
1003
|
+
x = mx.array(inputs)
|
|
1004
|
+
# Output of:
|
|
1005
|
+
# scipy.special.erfinv([-5.0, -1.0, 0.5, 0.0, 0.5, 1.0, 5.0])
|
|
1006
|
+
expected = np.array(
|
|
1007
|
+
[
|
|
1008
|
+
float("nan"),
|
|
1009
|
+
-float("inf"),
|
|
1010
|
+
0.47693628,
|
|
1011
|
+
0.0,
|
|
1012
|
+
0.47693628,
|
|
1013
|
+
float("inf"),
|
|
1014
|
+
float("nan"),
|
|
1015
|
+
]
|
|
1016
|
+
).astype(np.float32)
|
|
1017
|
+
self.assertTrue(np.allclose(mx.erfinv(x), expected, equal_nan=True))
|
|
1018
|
+
|
|
1019
|
+
result = mx.erfinv(mx.array([0.9999999403953552] * 8))
|
|
1020
|
+
expected = mx.array([3.8325066566467285] * 8)
|
|
1021
|
+
self.assertTrue(mx.allclose(result, expected))
|
|
1022
|
+
|
|
1023
|
+
def test_sin(self):
|
|
1024
|
+
a = mx.array(
|
|
1025
|
+
[0, math.pi / 4, math.pi / 2, math.pi, 3 * math.pi / 4, 2 * math.pi]
|
|
1026
|
+
)
|
|
1027
|
+
result = mx.sin(a)
|
|
1028
|
+
expected = np.sin(a, dtype=np.float32)
|
|
1029
|
+
|
|
1030
|
+
self.assertTrue(np.allclose(result, expected))
|
|
1031
|
+
|
|
1032
|
+
def test_cos(self):
|
|
1033
|
+
a = mx.array(
|
|
1034
|
+
[0, math.pi / 4, math.pi / 2, math.pi, 3 * math.pi / 4, 2 * math.pi]
|
|
1035
|
+
)
|
|
1036
|
+
result = mx.cos(a)
|
|
1037
|
+
expected = np.cos(a, dtype=np.float32)
|
|
1038
|
+
|
|
1039
|
+
self.assertTrue(np.allclose(result, expected))
|
|
1040
|
+
|
|
1041
|
+
def test_degrees(self):
|
|
1042
|
+
a = mx.array(
|
|
1043
|
+
[0, math.pi / 4, math.pi / 2, math.pi, 3 * math.pi / 4, 2 * math.pi]
|
|
1044
|
+
)
|
|
1045
|
+
result = mx.degrees(a)
|
|
1046
|
+
expected = np.degrees(a, dtype=np.float32)
|
|
1047
|
+
|
|
1048
|
+
self.assertTrue(np.allclose(result, expected))
|
|
1049
|
+
|
|
1050
|
+
def test_radians(self):
|
|
1051
|
+
a = mx.array([0.0, 45.0, 90.0, 180.0, 270.0, 360.0])
|
|
1052
|
+
result = mx.radians(a)
|
|
1053
|
+
expected = np.radians(a, dtype=np.float32)
|
|
1054
|
+
|
|
1055
|
+
self.assertTrue(np.allclose(result, expected))
|
|
1056
|
+
|
|
1057
|
+
def test_log1p(self):
|
|
1058
|
+
a = mx.array([1, 0.5, 10, 100])
|
|
1059
|
+
result = mx.log1p(a)
|
|
1060
|
+
expected = np.log1p(a, dtype=np.float32)
|
|
1061
|
+
|
|
1062
|
+
self.assertTrue(np.allclose(result, expected))
|
|
1063
|
+
|
|
1064
|
+
# Complex test
|
|
1065
|
+
a = mx.array([1, 0.5, 10, 100]) + 1j
|
|
1066
|
+
result = mx.log1p(a)
|
|
1067
|
+
expected = np.log1p(a, dtype=np.complex64)
|
|
1068
|
+
|
|
1069
|
+
self.assertTrue(np.allclose(result, expected))
|
|
1070
|
+
|
|
1071
|
+
def test_sigmoid(self):
|
|
1072
|
+
a = mx.array([0.0, 1.0, -1.0, 5.0, -5.0])
|
|
1073
|
+
result = mx.sigmoid(a)
|
|
1074
|
+
expected = 1 / (1 + np.exp(-a, dtype=np.float32))
|
|
1075
|
+
self.assertTrue(np.allclose(result, expected))
|
|
1076
|
+
|
|
1077
|
+
# Low precision
|
|
1078
|
+
a = mx.array(-8.0).astype(mx.float16)
|
|
1079
|
+
self.assertNotEqual(mx.sigmoid(a).item(), 0.0)
|
|
1080
|
+
a = mx.array(8.0).astype(mx.float16)
|
|
1081
|
+
self.assertNotEqual(mx.sigmoid(a).item(), 1.0)
|
|
1082
|
+
|
|
1083
|
+
def test_allclose(self):
|
|
1084
|
+
a = mx.array(1.0)
|
|
1085
|
+
b = mx.array(1.0)
|
|
1086
|
+
|
|
1087
|
+
self.assertTrue(mx.allclose(a, b).item())
|
|
1088
|
+
|
|
1089
|
+
b = mx.array(1.1)
|
|
1090
|
+
self.assertFalse(mx.allclose(a, b).item())
|
|
1091
|
+
self.assertTrue(mx.allclose(a, b, 0.1).item())
|
|
1092
|
+
self.assertFalse(mx.allclose(a, b, 0.01).item())
|
|
1093
|
+
self.assertTrue(mx.allclose(a, b, 0.01, 0.1).item())
|
|
1094
|
+
|
|
1095
|
+
c = mx.array(float("inf"))
|
|
1096
|
+
self.assertTrue(mx.allclose(c, c).item())
|
|
1097
|
+
|
|
1098
|
+
def test_isclose(self):
|
|
1099
|
+
a = mx.array([float("inf"), float("inf"), float("-inf")])
|
|
1100
|
+
b = mx.array([float("inf"), float("-inf"), float("-inf")])
|
|
1101
|
+
|
|
1102
|
+
self.assertListEqual(mx.isclose(a, b).tolist(), [True, False, True])
|
|
1103
|
+
|
|
1104
|
+
a = mx.array([np.nan])
|
|
1105
|
+
self.assertListEqual(mx.isclose(a, a).tolist(), [False])
|
|
1106
|
+
|
|
1107
|
+
a = mx.array([np.nan])
|
|
1108
|
+
self.assertListEqual(mx.isclose(a, a, equal_nan=True).tolist(), [True])
|
|
1109
|
+
|
|
1110
|
+
def test_all(self):
|
|
1111
|
+
a = mx.array([[True, False], [True, True]])
|
|
1112
|
+
|
|
1113
|
+
self.assertFalse(mx.all(a).item())
|
|
1114
|
+
self.assertEqual(mx.all(a, keepdims=True).shape, (1, 1))
|
|
1115
|
+
self.assertFalse(mx.all(a, axis=[0, 1]).item())
|
|
1116
|
+
self.assertEqual(mx.all(a, axis=[0]).tolist(), [True, False])
|
|
1117
|
+
self.assertEqual(mx.all(a, axis=[1]).tolist(), [False, True])
|
|
1118
|
+
self.assertEqual(mx.all(a, axis=0).tolist(), [True, False])
|
|
1119
|
+
self.assertEqual(mx.all(a, axis=1).tolist(), [False, True])
|
|
1120
|
+
|
|
1121
|
+
def test_any(self):
|
|
1122
|
+
a = mx.array([[True, False], [False, False]])
|
|
1123
|
+
|
|
1124
|
+
self.assertTrue(mx.any(a).item())
|
|
1125
|
+
self.assertEqual(mx.any(a, keepdims=True).shape, (1, 1))
|
|
1126
|
+
self.assertTrue(mx.any(a, axis=[0, 1]).item())
|
|
1127
|
+
self.assertEqual(mx.any(a, axis=[0]).tolist(), [True, False])
|
|
1128
|
+
self.assertEqual(mx.any(a, axis=[1]).tolist(), [True, False])
|
|
1129
|
+
self.assertEqual(mx.any(a, axis=0).tolist(), [True, False])
|
|
1130
|
+
self.assertEqual(mx.any(a, axis=1).tolist(), [True, False])
|
|
1131
|
+
|
|
1132
|
+
def test_stop_gradient(self):
|
|
1133
|
+
def func(x):
|
|
1134
|
+
return mx.sum(2 * x + mx.stop_gradient(3 * x))
|
|
1135
|
+
|
|
1136
|
+
x = mx.array([0.0, 0.1, -3])
|
|
1137
|
+
expected = [2, 2, 2]
|
|
1138
|
+
|
|
1139
|
+
self.assertListEqual(mx.grad(func)(x).tolist(), expected)
|
|
1140
|
+
|
|
1141
|
+
def test_kron(self):
|
|
1142
|
+
# Basic vector test
|
|
1143
|
+
x = mx.array([1, 2])
|
|
1144
|
+
y = mx.array([3, 4])
|
|
1145
|
+
z = mx.kron(x, y)
|
|
1146
|
+
self.assertEqual(z.tolist(), [3, 4, 6, 8])
|
|
1147
|
+
|
|
1148
|
+
# Basic matrix test
|
|
1149
|
+
x = mx.array([[1, 2], [3, 4]])
|
|
1150
|
+
y = mx.array([[0, 5], [6, 7]])
|
|
1151
|
+
z = mx.kron(x, y)
|
|
1152
|
+
self.assertEqual(
|
|
1153
|
+
z.tolist(),
|
|
1154
|
+
[[0, 5, 0, 10], [6, 7, 12, 14], [0, 15, 0, 20], [18, 21, 24, 28]],
|
|
1155
|
+
)
|
|
1156
|
+
|
|
1157
|
+
# Test with different dimensions
|
|
1158
|
+
x = mx.array([1, 2]) # (2,)
|
|
1159
|
+
y = mx.array([[3, 4], [5, 6]]) # (2, 2)
|
|
1160
|
+
z = mx.kron(x, y)
|
|
1161
|
+
self.assertEqual(z.tolist(), [[3, 4, 6, 8], [5, 6, 10, 12]])
|
|
1162
|
+
|
|
1163
|
+
# Test with empty array
|
|
1164
|
+
x = mx.array([])
|
|
1165
|
+
y = mx.array([1, 2])
|
|
1166
|
+
with self.assertRaises(ValueError):
|
|
1167
|
+
mx.kron(x, y)
|
|
1168
|
+
|
|
1169
|
+
def test_take(self):
|
|
1170
|
+
# Shape: 4 x 3 x 2
|
|
1171
|
+
l = [
|
|
1172
|
+
[[1, 3], [-2, -2], [-3, -2]],
|
|
1173
|
+
[[2, 4], [-3, 2], [-4, -2]],
|
|
1174
|
+
[[2, 3], [2, 4], [2, 1]],
|
|
1175
|
+
[[1, -5], [3, -1], [2, 3]],
|
|
1176
|
+
]
|
|
1177
|
+
|
|
1178
|
+
a = mx.array(l)
|
|
1179
|
+
a_npy = np.array(l)
|
|
1180
|
+
|
|
1181
|
+
indices = [0, -1]
|
|
1182
|
+
flatten_take = mx.take(a, mx.array(indices)).tolist()
|
|
1183
|
+
flatten_take_expected = np.take(a_npy, np.array(indices)).tolist()
|
|
1184
|
+
self.assertListEqual(flatten_take, flatten_take_expected)
|
|
1185
|
+
|
|
1186
|
+
indices = [-1, 2, 0]
|
|
1187
|
+
axis_take = mx.take(a, mx.array(indices), axis=0).tolist()
|
|
1188
|
+
axis_take_expected = np.take(a_npy, np.array(indices), axis=0).tolist()
|
|
1189
|
+
self.assertListEqual(axis_take, axis_take_expected)
|
|
1190
|
+
|
|
1191
|
+
indices = [0, 0, -2]
|
|
1192
|
+
axis_take = mx.take(a, mx.array(indices), axis=1).tolist()
|
|
1193
|
+
axis_take_expected = np.take(a_npy, np.array(indices), axis=1).tolist()
|
|
1194
|
+
self.assertListEqual(axis_take, axis_take_expected)
|
|
1195
|
+
|
|
1196
|
+
indices = [0, -1, -1]
|
|
1197
|
+
axis_take = mx.take(a, mx.array(indices), axis=-1).tolist()
|
|
1198
|
+
axis_take_expected = np.take(a_npy, np.array(indices), axis=-1).tolist()
|
|
1199
|
+
self.assertListEqual(axis_take, axis_take_expected)
|
|
1200
|
+
|
|
1201
|
+
a_npy = np.arange(8 * 8 * 8, dtype=np.int32)
|
|
1202
|
+
a_npy = a_npy.reshape((8, 8, 8))
|
|
1203
|
+
idx_npy = np.arange(6, dtype=np.uint32)
|
|
1204
|
+
idx_npy = idx_npy.reshape((2, 3))
|
|
1205
|
+
a_mlx = mx.array(a_npy)
|
|
1206
|
+
idx_mlx = mx.array(idx_npy)
|
|
1207
|
+
|
|
1208
|
+
a_npy_taken = np.take(a_npy, idx_npy)
|
|
1209
|
+
a_mlx_taken = mx.take(a_mlx, idx_mlx)
|
|
1210
|
+
self.assertEqual(a_npy_taken.shape, a_mlx_taken.shape)
|
|
1211
|
+
self.assertListEqual(a_npy_taken.tolist(), a_mlx_taken.tolist())
|
|
1212
|
+
|
|
1213
|
+
a_npy_taken = np.take(a_npy, idx_npy, axis=0)
|
|
1214
|
+
a_mlx_taken = mx.take(a_mlx, idx_mlx, axis=0)
|
|
1215
|
+
self.assertEqual(a_npy_taken.shape, a_mlx_taken.shape)
|
|
1216
|
+
self.assertListEqual(a_npy_taken.tolist(), a_mlx_taken.tolist())
|
|
1217
|
+
|
|
1218
|
+
a_npy_taken = np.take(a_npy, idx_npy, axis=1)
|
|
1219
|
+
a_mlx_taken = mx.take(a_mlx, idx_mlx, axis=1)
|
|
1220
|
+
self.assertEqual(a_npy_taken.shape, a_mlx_taken.shape)
|
|
1221
|
+
self.assertListEqual(a_npy_taken.tolist(), a_mlx_taken.tolist())
|
|
1222
|
+
|
|
1223
|
+
a_npy_taken = np.take(a_npy, idx_npy, axis=2)
|
|
1224
|
+
a_mlx_taken = mx.take(a_mlx, idx_mlx, axis=2)
|
|
1225
|
+
self.assertEqual(a_npy_taken.shape, a_mlx_taken.shape)
|
|
1226
|
+
self.assertListEqual(a_npy_taken.tolist(), a_mlx_taken.tolist())
|
|
1227
|
+
|
|
1228
|
+
# Take with integer index
|
|
1229
|
+
a = mx.arange(8).reshape(2, 4)
|
|
1230
|
+
out = mx.take(a, 1, axis=0)
|
|
1231
|
+
self.assertTrue(mx.array_equal(out, mx.array([4, 5, 6, 7])))
|
|
1232
|
+
out = mx.take(a, 1, axis=1)
|
|
1233
|
+
self.assertTrue(mx.array_equal(out, mx.array([1, 5])))
|
|
1234
|
+
|
|
1235
|
+
# Take with multi-dim scalar preserves dims
|
|
1236
|
+
out = mx.take(a, mx.array(1), axis=0)
|
|
1237
|
+
self.assertEqual(out.shape, (4,))
|
|
1238
|
+
|
|
1239
|
+
out = mx.take(a, mx.array([1]), axis=0)
|
|
1240
|
+
self.assertEqual(out.shape, (1, 4))
|
|
1241
|
+
|
|
1242
|
+
out = mx.take(a, mx.array([[1]]), axis=0)
|
|
1243
|
+
self.assertEqual(out.shape, (1, 1, 4))
|
|
1244
|
+
|
|
1245
|
+
# Take from empty array works in some cases
|
|
1246
|
+
a = mx.zeros((4, 0))
|
|
1247
|
+
out = mx.take(a, mx.array([1, 2]), axis=0)
|
|
1248
|
+
self.assertEqual(out.shape, (2, 0))
|
|
1249
|
+
self.assertEqual(out.dtype, a.dtype)
|
|
1250
|
+
with self.assertRaises(ValueError):
|
|
1251
|
+
mx.take(a, mx.array([[1]]), axis=1)
|
|
1252
|
+
|
|
1253
|
+
def test_take_along_axis(self):
|
|
1254
|
+
a_np = np.arange(8).reshape(2, 2, 2)
|
|
1255
|
+
a_mlx = mx.array(a_np)
|
|
1256
|
+
idx_np = np.array([1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0])
|
|
1257
|
+
idx_mlx = mx.array(idx_np)
|
|
1258
|
+
|
|
1259
|
+
for ax in [None, 0, 1, 2]:
|
|
1260
|
+
if ax == None:
|
|
1261
|
+
shape = [-1]
|
|
1262
|
+
else:
|
|
1263
|
+
shape = [2] * 3
|
|
1264
|
+
shape[ax] = 3
|
|
1265
|
+
out_np = np.take_along_axis(a_np, idx_np.reshape(shape), axis=ax)
|
|
1266
|
+
out_mlx = mx.take_along_axis(a_mlx, mx.reshape(idx_mlx, shape), axis=ax)
|
|
1267
|
+
self.assertTrue(np.array_equal(out_np, np.array(out_mlx)))
|
|
1268
|
+
|
|
1269
|
+
def test_put_along_axis(self):
|
|
1270
|
+
for ax in [None, 0, 1, 2]:
|
|
1271
|
+
a_np = np.arange(16).reshape(2, 2, 4).astype(np.int32)
|
|
1272
|
+
a_mlx = mx.array(a_np)
|
|
1273
|
+
|
|
1274
|
+
if ax == None:
|
|
1275
|
+
idx_np = np.random.permutation(a_np.size)
|
|
1276
|
+
values_np = np.random.randint(low=0, high=100, size=(16,))
|
|
1277
|
+
else:
|
|
1278
|
+
shape = list(a_np.shape)
|
|
1279
|
+
shape[ax] = 2
|
|
1280
|
+
idx_np = np.random.choice(a_np.shape[ax], replace=False, size=(2,))
|
|
1281
|
+
idx_np = np.expand_dims(idx_np, list(range(1, 2 - ax + 1)))
|
|
1282
|
+
idx_np = np.broadcast_to(idx_np, shape)
|
|
1283
|
+
values_np = np.random.randint(low=0, high=100, size=shape)
|
|
1284
|
+
|
|
1285
|
+
idx_np.astype(np.int32)
|
|
1286
|
+
values_np.astype(a_np.dtype)
|
|
1287
|
+
|
|
1288
|
+
idx_mlx = mx.array(idx_np)
|
|
1289
|
+
values_mlx = mx.array(values_np)
|
|
1290
|
+
|
|
1291
|
+
np.put_along_axis(a_np, idx_np, values_np, axis=ax)
|
|
1292
|
+
out_mlx = mx.put_along_axis(a_mlx, idx_mlx, values_mlx, axis=ax)
|
|
1293
|
+
self.assertTrue(np.array_equal(a_np, out_mlx))
|
|
1294
|
+
|
|
1295
|
+
source = mx.zeros((1, 1, 8, 32))
|
|
1296
|
+
indices = mx.array([0, 2, 4, 5]).reshape((1, 1, 4, 1))
|
|
1297
|
+
update = mx.array(1.0)
|
|
1298
|
+
|
|
1299
|
+
out_mlx = mx.put_along_axis(source, indices, update, axis=-2)
|
|
1300
|
+
out_np = np.array(source)
|
|
1301
|
+
np.put_along_axis(out_np, np.array(indices), np.array(update), axis=-2)
|
|
1302
|
+
self.assertTrue(np.array_equal(out_np, np.array(out_mlx)))
|
|
1303
|
+
|
|
1304
|
+
a = mx.array([], mx.float32)
|
|
1305
|
+
b = mx.put_along_axis(a, a, a, axis=None)
|
|
1306
|
+
mx.eval(b)
|
|
1307
|
+
self.assertEqual(b.size, 0)
|
|
1308
|
+
self.assertEqual(b.shape, a.shape)
|
|
1309
|
+
|
|
1310
|
+
def test_split(self):
|
|
1311
|
+
a = mx.array([1, 2, 3])
|
|
1312
|
+
splits = mx.split(a, 3)
|
|
1313
|
+
for e, x in enumerate(splits):
|
|
1314
|
+
self.assertEqual(x.item(), e + 1)
|
|
1315
|
+
|
|
1316
|
+
a = mx.array([[1, 2], [3, 4], [5, 6]])
|
|
1317
|
+
x, y, z = mx.split(a, 3, axis=0)
|
|
1318
|
+
self.assertEqual(x.tolist(), [[1, 2]])
|
|
1319
|
+
self.assertEqual(y.tolist(), [[3, 4]])
|
|
1320
|
+
self.assertEqual(z.tolist(), [[5, 6]])
|
|
1321
|
+
|
|
1322
|
+
with self.assertRaises(ValueError):
|
|
1323
|
+
mx.split(a, 3, axis=2)
|
|
1324
|
+
|
|
1325
|
+
a = mx.arange(8)
|
|
1326
|
+
x, y, z = mx.split(a, [1, 5])
|
|
1327
|
+
self.assertEqual(x.tolist(), [0])
|
|
1328
|
+
self.assertEqual(y.tolist(), [1, 2, 3, 4])
|
|
1329
|
+
self.assertEqual(z.tolist(), [5, 6, 7])
|
|
1330
|
+
|
|
1331
|
+
def test_arange_overload_dispatch(self):
|
|
1332
|
+
with self.assertRaises(ValueError):
|
|
1333
|
+
a = mx.arange(float("nan"), 1, 5)
|
|
1334
|
+
with self.assertRaises(ValueError):
|
|
1335
|
+
a = mx.arange(0, float("nan"), 5)
|
|
1336
|
+
with self.assertRaises(ValueError):
|
|
1337
|
+
a = mx.arange(0, 2, float("nan"))
|
|
1338
|
+
with self.assertRaises(ValueError):
|
|
1339
|
+
a = mx.arange(0, float("inf"), float("inf"))
|
|
1340
|
+
with self.assertRaises(ValueError):
|
|
1341
|
+
a = mx.arange(float("inf"), 1, float("inf"))
|
|
1342
|
+
with self.assertRaises(ValueError):
|
|
1343
|
+
a = mx.arange(float("inf"), 1, 5)
|
|
1344
|
+
with self.assertRaises(TypeError):
|
|
1345
|
+
INT_MAX = 2147483647
|
|
1346
|
+
a = mx.arange(0, INT_MAX + 1, 1)
|
|
1347
|
+
|
|
1348
|
+
a = mx.arange(5)
|
|
1349
|
+
expected = [0, 1, 2, 3, 4]
|
|
1350
|
+
self.assertListEqual(a.tolist(), expected)
|
|
1351
|
+
|
|
1352
|
+
a = mx.arange(1, 5)
|
|
1353
|
+
expected = [1, 2, 3, 4]
|
|
1354
|
+
self.assertListEqual(a.tolist(), expected)
|
|
1355
|
+
|
|
1356
|
+
a = mx.arange(-3, step=-1)
|
|
1357
|
+
expected = [0, -1, -2]
|
|
1358
|
+
self.assertListEqual(a.tolist(), expected)
|
|
1359
|
+
|
|
1360
|
+
a = mx.arange(stop=2, step=0.5)
|
|
1361
|
+
expected = [0, 0.5, 1.0, 1.5]
|
|
1362
|
+
self.assertListEqual(a.tolist(), expected)
|
|
1363
|
+
|
|
1364
|
+
with self.assertRaises(TypeError):
|
|
1365
|
+
mx.arange(start=1, step=2)
|
|
1366
|
+
|
|
1367
|
+
a = mx.arange(stop=3)
|
|
1368
|
+
expected = [0, 1, 2]
|
|
1369
|
+
self.assertListEqual(a.tolist(), expected)
|
|
1370
|
+
|
|
1371
|
+
def test_arange_inferred_dtype(self):
|
|
1372
|
+
a = mx.arange(5)
|
|
1373
|
+
self.assertEqual(a.dtype, mx.int32)
|
|
1374
|
+
|
|
1375
|
+
a = mx.arange(5.0)
|
|
1376
|
+
self.assertEqual(a.dtype, mx.float32)
|
|
1377
|
+
|
|
1378
|
+
a = mx.arange(1, 3.0)
|
|
1379
|
+
self.assertEqual(a.dtype, mx.float32)
|
|
1380
|
+
|
|
1381
|
+
a = mx.arange(1, 3, dtype=mx.float32)
|
|
1382
|
+
self.assertEqual(a.dtype, mx.float32)
|
|
1383
|
+
|
|
1384
|
+
a = mx.arange(1, 5, 1)
|
|
1385
|
+
self.assertEqual(a.dtype, mx.int32)
|
|
1386
|
+
|
|
1387
|
+
a = mx.arange(1.0, 5, 1)
|
|
1388
|
+
self.assertEqual(a.dtype, mx.float32)
|
|
1389
|
+
|
|
1390
|
+
a = mx.arange(1, 5.0, 1)
|
|
1391
|
+
self.assertEqual(a.dtype, mx.float32)
|
|
1392
|
+
|
|
1393
|
+
a = mx.arange(1, 5, 1.0)
|
|
1394
|
+
self.assertEqual(a.dtype, mx.float32)
|
|
1395
|
+
|
|
1396
|
+
a = mx.arange(1.0, 3.0, 0.2, dtype=mx.int32)
|
|
1397
|
+
self.assertEqual(a.dtype, mx.int32)
|
|
1398
|
+
|
|
1399
|
+
def test_arange_corner_cases_cast(self):
|
|
1400
|
+
a = mx.arange(0, 3, 0.2, dtype=mx.int32)
|
|
1401
|
+
expected = [0] * 15
|
|
1402
|
+
self.assertListEqual(a.tolist(), expected)
|
|
1403
|
+
self.assertEqual(a.dtype, mx.int32)
|
|
1404
|
+
|
|
1405
|
+
a = mx.arange(-1, -4, -0.9, dtype=mx.int32)
|
|
1406
|
+
expected = [-1] * 4
|
|
1407
|
+
self.assertListEqual(a.tolist(), expected)
|
|
1408
|
+
self.assertEqual(a.dtype, mx.int32)
|
|
1409
|
+
|
|
1410
|
+
a = mx.arange(-1, -20, -1.2, dtype=mx.int32)
|
|
1411
|
+
expected = [
|
|
1412
|
+
-1,
|
|
1413
|
+
-2,
|
|
1414
|
+
-3,
|
|
1415
|
+
-4,
|
|
1416
|
+
-5,
|
|
1417
|
+
-6,
|
|
1418
|
+
-7,
|
|
1419
|
+
-8,
|
|
1420
|
+
-9,
|
|
1421
|
+
-10,
|
|
1422
|
+
-11,
|
|
1423
|
+
-12,
|
|
1424
|
+
-13,
|
|
1425
|
+
-14,
|
|
1426
|
+
-15,
|
|
1427
|
+
-16,
|
|
1428
|
+
]
|
|
1429
|
+
self.assertListEqual(a.tolist(), expected)
|
|
1430
|
+
self.assertEqual(a.dtype, mx.int32)
|
|
1431
|
+
|
|
1432
|
+
a = mx.arange(0, 10, 100)
|
|
1433
|
+
expected = [0]
|
|
1434
|
+
self.assertListEqual(a.tolist(), expected)
|
|
1435
|
+
self.assertEqual(a.dtype, mx.int32)
|
|
1436
|
+
|
|
1437
|
+
a = mx.arange(10, 0, 1)
|
|
1438
|
+
expected = []
|
|
1439
|
+
self.assertListEqual(a.tolist(), expected)
|
|
1440
|
+
|
|
1441
|
+
a = mx.arange(10, 0, float("inf"))
|
|
1442
|
+
expected = []
|
|
1443
|
+
self.assertListEqual(a.tolist(), expected)
|
|
1444
|
+
|
|
1445
|
+
a = mx.arange(0, 10, float("inf"))
|
|
1446
|
+
expected = [0]
|
|
1447
|
+
self.assertListEqual(a.tolist(), expected)
|
|
1448
|
+
|
|
1449
|
+
a = mx.arange(0, -10, float("-inf"))
|
|
1450
|
+
expected = [0]
|
|
1451
|
+
self.assertListEqual(a.tolist(), expected)
|
|
1452
|
+
|
|
1453
|
+
def test_unary_ops(self):
|
|
1454
|
+
def test_ops(npop, mlxop, x, y, atol, rtol):
|
|
1455
|
+
r_np = npop(x)
|
|
1456
|
+
r_mlx = mlxop(y)
|
|
1457
|
+
mx.eval(r_mlx)
|
|
1458
|
+
self.assertTrue(np.allclose(r_np, r_mlx, atol=atol, rtol=rtol))
|
|
1459
|
+
|
|
1460
|
+
x = np.random.rand(18, 28, 38)
|
|
1461
|
+
for op in ["abs", "exp", "log", "square", "sqrt"]:
|
|
1462
|
+
with self.subTest(op=op):
|
|
1463
|
+
float_dtypes = [("float16", 1e-3, 1e-3), ("float32", 1e-6, 1e-5)]
|
|
1464
|
+
|
|
1465
|
+
for dtype, atol, rtol in float_dtypes:
|
|
1466
|
+
with self.subTest(dtype=dtype):
|
|
1467
|
+
x_ = x.astype(getattr(np, dtype))
|
|
1468
|
+
y_ = mx.array(x_)
|
|
1469
|
+
test_ops(getattr(np, op), getattr(mx, op), x_, y_, atol, rtol)
|
|
1470
|
+
|
|
1471
|
+
def test_unary_ops_from_non_array(self):
|
|
1472
|
+
unary_ops = [
|
|
1473
|
+
"abs",
|
|
1474
|
+
"exp",
|
|
1475
|
+
"log",
|
|
1476
|
+
"square",
|
|
1477
|
+
"sqrt",
|
|
1478
|
+
"sin",
|
|
1479
|
+
"cos",
|
|
1480
|
+
"tan",
|
|
1481
|
+
"sinh",
|
|
1482
|
+
"cosh",
|
|
1483
|
+
"tanh",
|
|
1484
|
+
"sign",
|
|
1485
|
+
"negative",
|
|
1486
|
+
"expm1",
|
|
1487
|
+
"arcsin",
|
|
1488
|
+
"arccos",
|
|
1489
|
+
"arctan",
|
|
1490
|
+
"arcsinh",
|
|
1491
|
+
"arctanh",
|
|
1492
|
+
"degrees",
|
|
1493
|
+
"radians",
|
|
1494
|
+
"log2",
|
|
1495
|
+
"log10",
|
|
1496
|
+
"log1p",
|
|
1497
|
+
"floor",
|
|
1498
|
+
"ceil",
|
|
1499
|
+
"conjugate",
|
|
1500
|
+
]
|
|
1501
|
+
|
|
1502
|
+
x = 0.5
|
|
1503
|
+
x_np = np.random.rand(10).astype(np.float32)
|
|
1504
|
+
for op in unary_ops:
|
|
1505
|
+
with self.subTest(op=op):
|
|
1506
|
+
# Test from scalar
|
|
1507
|
+
expected = getattr(np, op)(x)
|
|
1508
|
+
out = getattr(mx, op)(x)
|
|
1509
|
+
|
|
1510
|
+
# Check close
|
|
1511
|
+
self.assertTrue(np.allclose(expected, out, equal_nan=True))
|
|
1512
|
+
|
|
1513
|
+
# Test from NumPy
|
|
1514
|
+
expected = getattr(np, op)(x_np)
|
|
1515
|
+
out = getattr(mx, op)(x_np)
|
|
1516
|
+
|
|
1517
|
+
# Check close
|
|
1518
|
+
self.assertTrue(np.allclose(expected, np.array(out), equal_nan=True))
|
|
1519
|
+
|
|
1520
|
+
def test_trig_ops(self):
|
|
1521
|
+
def test_ops(npop, mlxop, x, y, atol, rtol):
|
|
1522
|
+
r_np = npop(x)
|
|
1523
|
+
r_mlx = mlxop(y)
|
|
1524
|
+
mx.eval(r_mlx)
|
|
1525
|
+
|
|
1526
|
+
self.assertTrue(
|
|
1527
|
+
np.allclose(r_np, r_mlx, atol=atol, rtol=rtol, equal_nan=True)
|
|
1528
|
+
)
|
|
1529
|
+
|
|
1530
|
+
x = np.random.rand(9, 12, 18)
|
|
1531
|
+
xi = np.random.rand(9, 12, 18)
|
|
1532
|
+
base_ops = ["sin", "cos", "tan"]
|
|
1533
|
+
hyperbolic_ops = ["sinh", "cosh", "tanh"]
|
|
1534
|
+
all_fwd_ops = base_ops + hyperbolic_ops
|
|
1535
|
+
|
|
1536
|
+
for op in all_fwd_ops:
|
|
1537
|
+
with self.subTest(op=op):
|
|
1538
|
+
float_dtypes = [("float16", 1e-3, 1e-3), ("float32", 1e-6, 1e-5)]
|
|
1539
|
+
|
|
1540
|
+
for dtype, atol, rtol in float_dtypes:
|
|
1541
|
+
with self.subTest(dtype=dtype):
|
|
1542
|
+
x_ = x.astype(getattr(np, dtype))
|
|
1543
|
+
y_ = mx.array(x_)
|
|
1544
|
+
test_ops(getattr(np, op), getattr(mx, op), x_, y_, atol, rtol)
|
|
1545
|
+
|
|
1546
|
+
with self.subTest(op=op):
|
|
1547
|
+
dtype = "complex64"
|
|
1548
|
+
with self.subTest(dtype=dtype):
|
|
1549
|
+
x_ = x + 1.0j * xi
|
|
1550
|
+
x_ = x_.astype(getattr(np, dtype))
|
|
1551
|
+
y_ = mx.array(x_)
|
|
1552
|
+
test_ops(getattr(np, op), getattr(mx, op), x_, y_, 1e-5, 1e-5)
|
|
1553
|
+
|
|
1554
|
+
with self.subTest(op="arc" + op):
|
|
1555
|
+
float_dtypes = [("float16", 1e-3, 1e-3), ("float32", 1e-6, 1e-5)]
|
|
1556
|
+
op_inv = "arc" + op
|
|
1557
|
+
|
|
1558
|
+
for dtype, atol, rtol in float_dtypes:
|
|
1559
|
+
with self.subTest(dtype=dtype):
|
|
1560
|
+
np_op_fwd = getattr(np, op)
|
|
1561
|
+
x_ = np_op_fwd(x).astype(getattr(np, dtype))
|
|
1562
|
+
y_ = mx.array(x_)
|
|
1563
|
+
test_ops(
|
|
1564
|
+
getattr(np, op_inv), getattr(mx, op_inv), x_, y_, atol, rtol
|
|
1565
|
+
)
|
|
1566
|
+
|
|
1567
|
+
# Test grads
|
|
1568
|
+
np_vjp_funcs = {
|
|
1569
|
+
"sin": lambda primal, cotan: cotan * np.cos(primal),
|
|
1570
|
+
"cos": lambda primal, cotan: -cotan * np.sin(primal),
|
|
1571
|
+
"tan": lambda primal, cotan: cotan / (np.cos(primal) ** 2),
|
|
1572
|
+
"sinh": lambda primal, cotan: cotan * np.cosh(primal),
|
|
1573
|
+
"cosh": lambda primal, cotan: cotan * np.sinh(primal),
|
|
1574
|
+
"tanh": lambda primal, cotan: cotan / (np.cosh(primal) ** 2),
|
|
1575
|
+
"arcsin": lambda primal, cotan: cotan / np.sqrt(1.0 - primal**2),
|
|
1576
|
+
"arccos": lambda primal, cotan: -cotan / np.sqrt(1.0 - primal**2),
|
|
1577
|
+
"arctan": lambda primal, cotan: cotan / (1.0 + primal**2),
|
|
1578
|
+
"arctan2": lambda primal, cotan: cotan / (1.0 + primal**2),
|
|
1579
|
+
"arcsinh": lambda primal, cotan: cotan / np.sqrt(primal**2 + 1),
|
|
1580
|
+
"arccosh": lambda primal, cotan: cotan / np.sqrt(primal**2 - 1),
|
|
1581
|
+
"arctanh": lambda primal, cotan: cotan / (1.0 - primal**2),
|
|
1582
|
+
}
|
|
1583
|
+
with self.subTest(name="grads"):
|
|
1584
|
+
for op in all_fwd_ops:
|
|
1585
|
+
with self.subTest(op=op):
|
|
1586
|
+
primal_np = xi.astype(np.float32)
|
|
1587
|
+
primal_mx = mx.array(primal_np)
|
|
1588
|
+
x_ = x.astype(np.float32)
|
|
1589
|
+
y_ = mx.array(x_)
|
|
1590
|
+
op_ = op
|
|
1591
|
+
|
|
1592
|
+
np_vjp = lambda x: np_vjp_funcs[op_](primal_np, x)
|
|
1593
|
+
mx_vjp = lambda x: mx.vjp(getattr(mx, op_), [primal_mx], [x])[1][0]
|
|
1594
|
+
test_ops(np_vjp, mx_vjp, x_, y_, 1e-5, 1e-5)
|
|
1595
|
+
|
|
1596
|
+
with self.subTest(op="arc" + op):
|
|
1597
|
+
np_op_fwd = getattr(np, op)
|
|
1598
|
+
primal_np = np_op_fwd(xi).astype(np.float32)
|
|
1599
|
+
|
|
1600
|
+
# To avoid divide by zero error
|
|
1601
|
+
if op == "cosh":
|
|
1602
|
+
primal_np[np.isclose(primal_np, 1.0)] += 1e-3
|
|
1603
|
+
elif op == "cos":
|
|
1604
|
+
primal_np[np.isclose(primal_np, 1.0)] -= 1e-3
|
|
1605
|
+
|
|
1606
|
+
primal_mx = mx.array(primal_np)
|
|
1607
|
+
x_ = x.astype(np.float32)
|
|
1608
|
+
y_ = mx.array(x_)
|
|
1609
|
+
op_ = "arc" + op
|
|
1610
|
+
|
|
1611
|
+
np_vjp = lambda x: np_vjp_funcs[op_](primal_np, x)
|
|
1612
|
+
mx_vjp = lambda x: mx.vjp(getattr(mx, op_), [primal_mx], [x])[1][0]
|
|
1613
|
+
test_ops(np_vjp, mx_vjp, x_, y_, 1e-5, 1e-5)
|
|
1614
|
+
|
|
1615
|
+
def test_binary_ops(self):
|
|
1616
|
+
def test_ops(npop, mlxop, x1, x2, y1, y2, atol):
|
|
1617
|
+
r_np = npop(x1, x2)
|
|
1618
|
+
r_mlx = mlxop(y1, y2)
|
|
1619
|
+
mx.eval(r_mlx)
|
|
1620
|
+
self.assertTrue(np.allclose(r_np, r_mlx, atol=atol))
|
|
1621
|
+
|
|
1622
|
+
r_np = npop(x1[:1], x2)
|
|
1623
|
+
r_mlx = mlxop(y1[:1], y2)
|
|
1624
|
+
mx.eval(r_mlx)
|
|
1625
|
+
self.assertTrue(np.allclose(r_np, r_mlx, atol=atol))
|
|
1626
|
+
|
|
1627
|
+
r_np = npop(x1[:, :1], x2)
|
|
1628
|
+
r_mlx = mlxop(y1[:, :1], y2)
|
|
1629
|
+
mx.eval(r_mlx)
|
|
1630
|
+
self.assertTrue(np.allclose(r_np, r_mlx, atol=atol))
|
|
1631
|
+
|
|
1632
|
+
r_np = npop(x1[:, :, :1], x2)
|
|
1633
|
+
r_mlx = mlxop(y1[:, :, :1], y2)
|
|
1634
|
+
mx.eval(r_mlx)
|
|
1635
|
+
self.assertTrue(np.allclose(r_np, r_mlx, atol=atol))
|
|
1636
|
+
|
|
1637
|
+
x1 = np.maximum(np.random.rand(18, 28, 38), 0.1)
|
|
1638
|
+
x2 = np.maximum(np.random.rand(18, 28, 38), 0.1)
|
|
1639
|
+
y1 = mx.array(x1)
|
|
1640
|
+
y2 = mx.array(x2)
|
|
1641
|
+
mx.eval(y1, y2)
|
|
1642
|
+
for op in [
|
|
1643
|
+
"add",
|
|
1644
|
+
"subtract",
|
|
1645
|
+
"multiply",
|
|
1646
|
+
"divide",
|
|
1647
|
+
"floor_divide",
|
|
1648
|
+
"maximum",
|
|
1649
|
+
"minimum",
|
|
1650
|
+
"power",
|
|
1651
|
+
]:
|
|
1652
|
+
with self.subTest(op=op):
|
|
1653
|
+
int_dtypes = [
|
|
1654
|
+
"int8",
|
|
1655
|
+
"int16",
|
|
1656
|
+
"int32",
|
|
1657
|
+
"int64",
|
|
1658
|
+
"uint8",
|
|
1659
|
+
"uint16",
|
|
1660
|
+
"uint32",
|
|
1661
|
+
"uint64",
|
|
1662
|
+
]
|
|
1663
|
+
float_dtypes = ["float16", "float32"]
|
|
1664
|
+
|
|
1665
|
+
dtypes = {
|
|
1666
|
+
"divide": float_dtypes,
|
|
1667
|
+
"power": float_dtypes,
|
|
1668
|
+
"floor_divide": ["float32"] + int_dtypes,
|
|
1669
|
+
}
|
|
1670
|
+
dtypes = dtypes.get(op, int_dtypes + float_dtypes)
|
|
1671
|
+
|
|
1672
|
+
for dtype in dtypes:
|
|
1673
|
+
atol = 1e-3 if dtype == "float16" else 1e-6
|
|
1674
|
+
with self.subTest(dtype=dtype):
|
|
1675
|
+
m = 10 if dtype in int_dtypes else 1
|
|
1676
|
+
x1_ = (x1 * m).astype(getattr(np, dtype))
|
|
1677
|
+
x2_ = (x2 * m).astype(getattr(np, dtype))
|
|
1678
|
+
y1_ = mx.array(x1_)
|
|
1679
|
+
y2_ = mx.array(x2_)
|
|
1680
|
+
test_ops(
|
|
1681
|
+
getattr(np, op), getattr(mx, op), x1_, x2_, y1_, y2_, atol
|
|
1682
|
+
)
|
|
1683
|
+
|
|
1684
|
+
def test_irregular_binary_ops(self):
|
|
1685
|
+
# Check transposed binary ops
|
|
1686
|
+
dims = [2, 3, 4, 5]
|
|
1687
|
+
size = 3
|
|
1688
|
+
trial_mul = 2
|
|
1689
|
+
np.random.seed(0)
|
|
1690
|
+
for d in dims:
|
|
1691
|
+
anp = np.random.randint(-20, 20, (size**d,)).reshape([size] * d)
|
|
1692
|
+
bnp = np.random.randint(-20, 20, (size**d,)).reshape([size] * d)
|
|
1693
|
+
for _ in range(trial_mul * d):
|
|
1694
|
+
amlx = mx.array(anp)
|
|
1695
|
+
bmlx = mx.array(bnp)
|
|
1696
|
+
a_t = np.random.permutation(d).tolist()
|
|
1697
|
+
b_t = np.random.permutation(d).tolist()
|
|
1698
|
+
outnp = np.add(anp.transpose(a_t), bnp.transpose(b_t))
|
|
1699
|
+
outmlx = mx.add(mx.transpose(amlx, a_t), mx.transpose(bmlx, b_t))
|
|
1700
|
+
self.assertTrue(np.array_equal(outnp, outmlx))
|
|
1701
|
+
|
|
1702
|
+
# Check broadcast binary ops
|
|
1703
|
+
for d in dims:
|
|
1704
|
+
anp = np.random.randint(-20, 20, (size**d,)).reshape([size] * d)
|
|
1705
|
+
for n_bsx in range(d):
|
|
1706
|
+
bnp = np.random.randint(-20, 20, (size**n_bsx,)).reshape([size] * n_bsx)
|
|
1707
|
+
for _ in range(trial_mul * d):
|
|
1708
|
+
amlx = mx.array(anp)
|
|
1709
|
+
bmlx = mx.array(bnp)
|
|
1710
|
+
b_shape = [1] * (d - n_bsx) + [size] * n_bsx
|
|
1711
|
+
np.random.shuffle(b_shape)
|
|
1712
|
+
outnp = np.add(anp, bnp.reshape(b_shape))
|
|
1713
|
+
outmlx = mx.add(amlx, mx.reshape(bmlx, b_shape))
|
|
1714
|
+
self.assertTrue(np.array_equal(outnp, outmlx))
|
|
1715
|
+
|
|
1716
|
+
# Check strided binary ops
|
|
1717
|
+
for d in dims:
|
|
1718
|
+
a = np.random.randint(-20, 20, (10,) * d)
|
|
1719
|
+
b = np.random.randint(-20, 20, (10,) * d)
|
|
1720
|
+
a_ = mx.array(a)
|
|
1721
|
+
b_ = mx.array(b)
|
|
1722
|
+
for t in permutations(range(d)):
|
|
1723
|
+
for s in range(d):
|
|
1724
|
+
idx = tuple(
|
|
1725
|
+
[slice(None)] * s
|
|
1726
|
+
+ [slice(None, None, 2)]
|
|
1727
|
+
+ [slice(None)] * (d - s - 1)
|
|
1728
|
+
)
|
|
1729
|
+
c = a.transpose(t)[idx] + b[idx]
|
|
1730
|
+
c_ = mx.transpose(a_, t)[idx] + b_[idx]
|
|
1731
|
+
self.assertTrue(np.array_equal(c, c_))
|
|
1732
|
+
|
|
1733
|
+
def test_softmax(self):
|
|
1734
|
+
cases = [(np.float32, 1e-6), (np.float16, 1e-3)]
|
|
1735
|
+
|
|
1736
|
+
for dtype, atol in cases:
|
|
1737
|
+
a_npy = np.random.randn(16, 8, 32).astype(dtype)
|
|
1738
|
+
a_mlx = mx.array(a_npy)
|
|
1739
|
+
|
|
1740
|
+
def np_softmax(x, axis):
|
|
1741
|
+
ex = np.exp(x - np.max(x, axis=axis, keepdims=True))
|
|
1742
|
+
return ex / np.sum(ex, axis=axis, keepdims=True)
|
|
1743
|
+
|
|
1744
|
+
for axes in (None, 0, 1, 2, (0, 1), (1, 2), (0, 2), (0, 1, 2)):
|
|
1745
|
+
b_npy = np_softmax(a_npy, axes)
|
|
1746
|
+
b_mlx = mx.softmax(a_mlx, axes)
|
|
1747
|
+
self.assertTrue(np.allclose(b_npy, b_mlx, atol=atol))
|
|
1748
|
+
|
|
1749
|
+
for s in [100, 2049, 4097, 8193]:
|
|
1750
|
+
a = np.full(s, -np.inf)
|
|
1751
|
+
a[-1] = 0.0
|
|
1752
|
+
a = mx.softmax(mx.array(a))
|
|
1753
|
+
self.assertFalse(np.any(np.isnan(a)))
|
|
1754
|
+
self.assertTrue((a[:-1] < 1e-9).all())
|
|
1755
|
+
self.assertEqual(a[-1], 1)
|
|
1756
|
+
|
|
1757
|
+
# Sliced inputs
|
|
1758
|
+
y = mx.random.uniform(shape=(8, 4))
|
|
1759
|
+
out = mx.softmax(y[:, 0:2], axis=-1)
|
|
1760
|
+
self.assertAlmostEqual(out.sum().item(), 8.0, 5)
|
|
1761
|
+
|
|
1762
|
+
# Precise
|
|
1763
|
+
for t in [mx.float16, mx.bfloat16]:
|
|
1764
|
+
a = (10 * mx.random.normal(shape=(1024,))).astype(t)
|
|
1765
|
+
out_expect = mx.softmax(a.astype(mx.float32)).astype(t)
|
|
1766
|
+
out = mx.softmax(a, axis=-1, precise=True)
|
|
1767
|
+
self.assertTrue(mx.allclose(out_expect, out))
|
|
1768
|
+
|
|
1769
|
+
# All Infs give NaNs
|
|
1770
|
+
for n in [127, 128, 129]:
|
|
1771
|
+
x = mx.full((n,), vals=-float("inf"))
|
|
1772
|
+
self.assertTrue(mx.all(mx.isnan(mx.softmax(x))))
|
|
1773
|
+
|
|
1774
|
+
# Transposed inputs
|
|
1775
|
+
a = mx.random.uniform(shape=(32, 32, 32))
|
|
1776
|
+
b = mx.softmax(a, axis=-1)
|
|
1777
|
+
c = mx.softmax(a.swapaxes(0, 1), axis=-1).swapaxes(0, 1)
|
|
1778
|
+
self.assertEqual((b - c).abs().max().item(), 0.0)
|
|
1779
|
+
|
|
1780
|
+
with self.assertRaises(ValueError):
|
|
1781
|
+
mx.softmax(mx.array(1.0), axis=-1)
|
|
1782
|
+
|
|
1783
|
+
def test_concatenate(self):
|
|
1784
|
+
a_npy = np.random.randn(32, 32, 32)
|
|
1785
|
+
b_npy = np.random.randn(32, 32, 32)
|
|
1786
|
+
a_mlx = mx.array(a_npy)
|
|
1787
|
+
b_mlx = mx.array(b_npy)
|
|
1788
|
+
|
|
1789
|
+
for axis in (None, 0, 1, 2):
|
|
1790
|
+
for p in permutations([0, 1, 2]):
|
|
1791
|
+
c_npy = np.concatenate([a_npy, np.transpose(b_npy, p)], axis=axis)
|
|
1792
|
+
c_mlx = mx.concatenate([a_mlx, mx.transpose(b_mlx, p)], axis=axis)
|
|
1793
|
+
self.assertEqual(list(c_npy.shape), list(c_mlx.shape))
|
|
1794
|
+
self.assertTrue(np.allclose(c_npy, c_mlx, atol=1e-6))
|
|
1795
|
+
|
|
1796
|
+
with self.assertRaises(ValueError):
|
|
1797
|
+
a = mx.array([[1, 2], [1, 2], [1, 2]])
|
|
1798
|
+
b = mx.array([1, 2])
|
|
1799
|
+
mx.concatenate([a, b], axis=0)
|
|
1800
|
+
|
|
1801
|
+
# Cocnatenate with 0-sized array
|
|
1802
|
+
a = mx.zeros((2, 0, 2))
|
|
1803
|
+
b = mx.zeros((2, 2, 2))
|
|
1804
|
+
out = mx.concatenate([a, b], axis=1)
|
|
1805
|
+
self.assertTrue(mx.array_equal(out, b))
|
|
1806
|
+
|
|
1807
|
+
def test_meshgrid(self):
|
|
1808
|
+
x = mx.array([1, 2, 3], dtype=mx.int32)
|
|
1809
|
+
y = np.array([1, 2, 3], dtype=np.int32)
|
|
1810
|
+
|
|
1811
|
+
# Test single input
|
|
1812
|
+
a_mlx = mx.meshgrid(x)
|
|
1813
|
+
a_np = np.meshgrid(y)
|
|
1814
|
+
self.assertEqualArray(a_mlx[0], mx.array(a_np[0]))
|
|
1815
|
+
|
|
1816
|
+
# Test sparse
|
|
1817
|
+
a_mlx, b_mlx, c_mlx = mx.meshgrid(x, x, x, sparse=True)
|
|
1818
|
+
a_np, b_np, c_np = np.meshgrid(y, y, y, sparse=True)
|
|
1819
|
+
self.assertEqualArray(a_mlx, mx.array(a_np))
|
|
1820
|
+
self.assertEqualArray(b_mlx, mx.array(b_np))
|
|
1821
|
+
self.assertEqualArray(c_mlx, mx.array(c_np))
|
|
1822
|
+
|
|
1823
|
+
# Test different lengths
|
|
1824
|
+
x = mx.array([1, 2], dtype=mx.int32)
|
|
1825
|
+
y = mx.array([1, 2, 3], dtype=mx.int32)
|
|
1826
|
+
z = np.array([1, 2], dtype=np.int32)
|
|
1827
|
+
w = np.array([1, 2, 3], dtype=np.int32)
|
|
1828
|
+
a_mlx, b_mlx = mx.meshgrid(x, y)
|
|
1829
|
+
a_np, b_np = np.meshgrid(z, w)
|
|
1830
|
+
self.assertEqualArray(a_mlx, mx.array(a_np))
|
|
1831
|
+
self.assertEqualArray(b_mlx, mx.array(b_np))
|
|
1832
|
+
|
|
1833
|
+
# Test empty input
|
|
1834
|
+
x = mx.array([], dtype=mx.int32)
|
|
1835
|
+
y = np.array([], dtype=np.int32)
|
|
1836
|
+
a_mlx = mx.meshgrid(x)
|
|
1837
|
+
a_np = np.meshgrid(y)
|
|
1838
|
+
self.assertEqualArray(a_mlx[0], mx.array(a_np[0]))
|
|
1839
|
+
|
|
1840
|
+
# Test float32 input
|
|
1841
|
+
x = mx.array([1.1, 2.2, 3.3], dtype=mx.float32)
|
|
1842
|
+
y = np.array([1.1, 2.2, 3.3], dtype=np.float32)
|
|
1843
|
+
a_mlx = mx.meshgrid(x, x, x)
|
|
1844
|
+
a_np = np.meshgrid(y, y, y)
|
|
1845
|
+
self.assertEqualArray(a_mlx[0], mx.array(a_np[0]))
|
|
1846
|
+
self.assertEqualArray(a_mlx[1], mx.array(a_np[1]))
|
|
1847
|
+
self.assertEqualArray(a_mlx[2], mx.array(a_np[2]))
|
|
1848
|
+
|
|
1849
|
+
# Test ij indexing
|
|
1850
|
+
x = mx.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=mx.float32)
|
|
1851
|
+
y = np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32)
|
|
1852
|
+
a_mlx = mx.meshgrid(x, x, indexing="ij")
|
|
1853
|
+
a_np = np.meshgrid(y, y, indexing="ij")
|
|
1854
|
+
self.assertEqualArray(a_mlx[0], mx.array(a_np[0]))
|
|
1855
|
+
self.assertEqualArray(a_mlx[1], mx.array(a_np[1]))
|
|
1856
|
+
|
|
1857
|
+
# Test different lengths, sparse, and ij indexing
|
|
1858
|
+
a = mx.array([1, 2], dtype=mx.int64)
|
|
1859
|
+
b = mx.array([1, 2, 3], dtype=mx.int64)
|
|
1860
|
+
c = mx.array([1, 2, 3, 4], dtype=mx.int64)
|
|
1861
|
+
x = np.array([1, 2], dtype=np.int64)
|
|
1862
|
+
y = np.array([1, 2, 3], dtype=np.int64)
|
|
1863
|
+
z = np.array([1, 2, 3, 4], dtype=np.int64)
|
|
1864
|
+
a_mlx, b_mlx, c_mlx = mx.meshgrid(a, b, c, sparse=True, indexing="ij")
|
|
1865
|
+
a_np, b_np, c_np = np.meshgrid(x, y, z, sparse=True, indexing="ij")
|
|
1866
|
+
self.assertEqualArray(a_mlx, mx.array(a_np))
|
|
1867
|
+
self.assertEqualArray(b_mlx, mx.array(b_np))
|
|
1868
|
+
self.assertEqualArray(c_mlx, mx.array(c_np))
|
|
1869
|
+
|
|
1870
|
+
def test_pad(self):
|
|
1871
|
+
pad_width_and_values = [
|
|
1872
|
+
([(1, 1), (1, 1), (1, 1)], 0),
|
|
1873
|
+
([(1, 1), (1, 1), (1, 1)], 5),
|
|
1874
|
+
([(3, 0), (0, 2), (5, 7)], 0),
|
|
1875
|
+
([(3, 0), (0, 2), (5, 7)], -7),
|
|
1876
|
+
([(0, 0), (0, 0), (0, 0)], 0),
|
|
1877
|
+
]
|
|
1878
|
+
|
|
1879
|
+
for pw, v in pad_width_and_values:
|
|
1880
|
+
with self.subTest(pad_width=pw, value=v):
|
|
1881
|
+
a_npy = np.random.randn(16, 16, 16).astype(np.float32)
|
|
1882
|
+
a_mlx = mx.array(a_npy)
|
|
1883
|
+
|
|
1884
|
+
b_npy = np.pad(a_npy, pw, constant_values=v)
|
|
1885
|
+
b_mlx = mx.pad(a_mlx, pw, constant_values=v)
|
|
1886
|
+
|
|
1887
|
+
self.assertEqual(list(b_npy.shape), list(b_mlx.shape))
|
|
1888
|
+
self.assertTrue(np.allclose(b_npy, b_mlx, atol=1e-6))
|
|
1889
|
+
|
|
1890
|
+
b_npy = np.pad(a_npy, pw, mode="edge")
|
|
1891
|
+
b_mlx = mx.pad(a_mlx, pw, mode="edge")
|
|
1892
|
+
|
|
1893
|
+
self.assertEqual(list(b_npy.shape), list(b_mlx.shape))
|
|
1894
|
+
self.assertTrue(np.allclose(b_npy, b_mlx, atol=1e-6))
|
|
1895
|
+
|
|
1896
|
+
a = mx.zeros((1, 1, 1))
|
|
1897
|
+
self.assertEqual(mx.pad(a, 1).shape, (3, 3, 3))
|
|
1898
|
+
self.assertEqual(mx.pad(a, (1,)).shape, (3, 3, 3))
|
|
1899
|
+
self.assertEqual(mx.pad(a, [1]).shape, (3, 3, 3))
|
|
1900
|
+
self.assertEqual(mx.pad(a, (1, 2)).shape, (4, 4, 4))
|
|
1901
|
+
self.assertEqual(mx.pad(a, [(1, 2)]).shape, (4, 4, 4))
|
|
1902
|
+
self.assertEqual(mx.pad(a, ((1, 2),)).shape, (4, 4, 4))
|
|
1903
|
+
self.assertEqual(mx.pad(a, ((1, 2), (2, 1), (2, 2))).shape, (4, 4, 5))
|
|
1904
|
+
|
|
1905
|
+
# Test grads
|
|
1906
|
+
a_fwd = mx.array(np.random.rand(16, 16).astype(np.float32))
|
|
1907
|
+
a_bwd = mx.ones((22, 22))
|
|
1908
|
+
f = lambda x: mx.pad(x, ((4, 2), (2, 4)))
|
|
1909
|
+
|
|
1910
|
+
_, df = mx.vjp(f, [a_fwd], [a_bwd])
|
|
1911
|
+
self.assertTrue(mx.allclose(a_bwd[4:-2, 2:-4], df[0]).item())
|
|
1912
|
+
|
|
1913
|
+
def test_where(self):
|
|
1914
|
+
self.assertCmpNumpy([True, mx.array([[1, 2], [3, 4]]), 1], mx.where, np.where)
|
|
1915
|
+
self.assertCmpNumpy([True, 1, mx.array([[1, 2], [3, 4]])], mx.where, np.where)
|
|
1916
|
+
self.assertCmpNumpy(
|
|
1917
|
+
[
|
|
1918
|
+
mx.array([[True, False], [False, True]]),
|
|
1919
|
+
mx.array([[1, 2], [3, 4]]),
|
|
1920
|
+
mx.array([5, 6]),
|
|
1921
|
+
],
|
|
1922
|
+
mx.where,
|
|
1923
|
+
np.where,
|
|
1924
|
+
)
|
|
1925
|
+
|
|
1926
|
+
# Check non-contiguous input with several dimensions
|
|
1927
|
+
shape = [1, 2, 2, 3, 3, 1]
|
|
1928
|
+
strides = [16, 4, 1, 4, 1, 1]
|
|
1929
|
+
x = mx.ones(shape=(1, 4, 4, 1))
|
|
1930
|
+
x = mx.as_strided(x, shape, strides)
|
|
1931
|
+
out = mx.where(mx.isnan(x), mx.nan, x)
|
|
1932
|
+
self.assertTrue(mx.allclose(out, mx.ones_like(out)))
|
|
1933
|
+
|
|
1934
|
+
def test_nan_to_num(self):
|
|
1935
|
+
a = mx.array([6, float("inf"), 2, 0])
|
|
1936
|
+
out_mx = mx.nan_to_num(a)
|
|
1937
|
+
out_np = np.nan_to_num(a)
|
|
1938
|
+
self.assertTrue(np.allclose(out_mx, out_np))
|
|
1939
|
+
|
|
1940
|
+
for t in [mx.float32, mx.float16]:
|
|
1941
|
+
a = mx.array([float("inf"), 6.9, float("nan"), float("-inf")])
|
|
1942
|
+
out_mx = mx.nan_to_num(a)
|
|
1943
|
+
out_np = np.nan_to_num(a)
|
|
1944
|
+
self.assertTrue(np.allclose(out_mx, out_np))
|
|
1945
|
+
|
|
1946
|
+
a = mx.array([float("inf"), 6.9, float("nan"), float("-inf")]).astype(t)
|
|
1947
|
+
out_np = np.nan_to_num(a, nan=0.0, posinf=1000, neginf=-1000)
|
|
1948
|
+
out_mx = mx.nan_to_num(a, nan=0.0, posinf=1000, neginf=-1000)
|
|
1949
|
+
self.assertTrue(np.allclose(out_mx, out_np))
|
|
1950
|
+
|
|
1951
|
+
def test_as_strided(self):
|
|
1952
|
+
x_npy = np.random.randn(128).astype(np.float32)
|
|
1953
|
+
x_mlx = mx.array(x_npy)
|
|
1954
|
+
|
|
1955
|
+
shapes = [(10, 10), (5, 5), (2, 20), (10,)]
|
|
1956
|
+
strides = [(3, 3), (7, 1), (1, 5), (4,)]
|
|
1957
|
+
for shape, stride in zip(shapes, strides):
|
|
1958
|
+
for offset in [0, 1, 3]:
|
|
1959
|
+
y_npy = np.lib.stride_tricks.as_strided(
|
|
1960
|
+
x_npy[offset:], shape, np.multiply(stride, 4)
|
|
1961
|
+
)
|
|
1962
|
+
y_mlx = mx.as_strided(x_mlx, shape, stride, offset)
|
|
1963
|
+
self.assertTrue(np.array_equal(y_npy, y_mlx))
|
|
1964
|
+
|
|
1965
|
+
x = mx.random.uniform(shape=(32,))
|
|
1966
|
+
y = mx.as_strided(x, (x.size,), (-1,), x.size - 1)
|
|
1967
|
+
self.assertTrue(mx.array_equal(y, x[::-1]))
|
|
1968
|
+
|
|
1969
|
+
def test_logcumsumexp(self):
|
|
1970
|
+
npop = np.logaddexp.accumulate
|
|
1971
|
+
mxop = mx.logcumsumexp
|
|
1972
|
+
|
|
1973
|
+
a_npy = np.random.randn(32, 32, 32).astype(np.float32)
|
|
1974
|
+
a_mlx = mx.array(a_npy)
|
|
1975
|
+
|
|
1976
|
+
for axis in (0, 1, 2):
|
|
1977
|
+
c_npy = npop(a_npy, axis=axis)
|
|
1978
|
+
c_mlx = mxop(a_mlx, axis=axis)
|
|
1979
|
+
self.assertTrue(np.allclose(c_npy, c_mlx, rtol=1e-3, atol=1e-3))
|
|
1980
|
+
|
|
1981
|
+
edge_cases_npy = [
|
|
1982
|
+
np.float32([-float("inf")] * 8),
|
|
1983
|
+
np.float32([-float("inf"), 0, -float("inf")]),
|
|
1984
|
+
np.float32([-float("inf"), float("inf"), -float("inf")]),
|
|
1985
|
+
]
|
|
1986
|
+
edge_cases_mlx = [mx.array(a) for a in edge_cases_npy]
|
|
1987
|
+
|
|
1988
|
+
for a_npy, a_mlx in zip(edge_cases_npy, edge_cases_mlx):
|
|
1989
|
+
c_npy = npop(a_npy, axis=0)
|
|
1990
|
+
c_mlx = mxop(a_mlx, axis=0)
|
|
1991
|
+
self.assertTrue(np.allclose(c_npy, c_mlx, rtol=1e-3, atol=1e-3))
|
|
1992
|
+
|
|
1993
|
+
# Complex tests
|
|
1994
|
+
|
|
1995
|
+
a_npy = np.array([1, 2, 3]).astype(np.float32) + 1j
|
|
1996
|
+
a_mlx = mx.array(a_npy)
|
|
1997
|
+
c_npy = np_cumlogaddexp(a_npy, axis=-1)
|
|
1998
|
+
c_mlx = mxop(a_mlx, axis=-1)
|
|
1999
|
+
self.assertTrue(np.allclose(c_npy, c_mlx, rtol=1e-3, atol=1e-3))
|
|
2000
|
+
|
|
2001
|
+
def test_scans(self):
|
|
2002
|
+
a_npy = np.random.randn(32, 32, 32).astype(np.float32)
|
|
2003
|
+
a_mlx = mx.array(a_npy)
|
|
2004
|
+
|
|
2005
|
+
for op in ["cumsum", "cumprod"]:
|
|
2006
|
+
npop = getattr(np, op)
|
|
2007
|
+
mxop = getattr(mx, op)
|
|
2008
|
+
for axis in (None, 0, 1, 2):
|
|
2009
|
+
c_npy = npop(a_npy, axis=axis)
|
|
2010
|
+
c_mlx = mxop(a_mlx, axis=axis)
|
|
2011
|
+
self.assertTrue(np.allclose(c_npy, c_mlx, rtol=1e-3, atol=1e-3))
|
|
2012
|
+
|
|
2013
|
+
# Complex test
|
|
2014
|
+
|
|
2015
|
+
a_npy = np.random.randn(32, 32, 32).astype(np.float32) + 0.5j
|
|
2016
|
+
a_mlx = mx.array(a_npy)
|
|
2017
|
+
|
|
2018
|
+
for op in ["cumsum", "cumprod"]:
|
|
2019
|
+
npop = getattr(np, op)
|
|
2020
|
+
mxop = getattr(mx, op)
|
|
2021
|
+
for axis in (None, 0, 1, 2):
|
|
2022
|
+
c_npy = npop(a_npy, axis=axis)
|
|
2023
|
+
c_mlx = mxop(a_mlx, axis=axis)
|
|
2024
|
+
self.assertTrue(np.allclose(c_npy, c_mlx, rtol=1e-3, atol=1e-3))
|
|
2025
|
+
|
|
2026
|
+
a_mlx = mx.random.randint(shape=(32, 32, 32), low=-100, high=100)
|
|
2027
|
+
for dt in [mx.int32, mx.int64]:
|
|
2028
|
+
mxx = a_mlx.astype(dt)
|
|
2029
|
+
npx = np.array(mxx)
|
|
2030
|
+
for op in ["cumsum", "cumprod"]:
|
|
2031
|
+
npop = getattr(np, op)
|
|
2032
|
+
mxop = getattr(mx, op)
|
|
2033
|
+
for axis in (None, 0, 1, 2):
|
|
2034
|
+
c_npy = npop(npx, axis=axis, dtype=npx.dtype)
|
|
2035
|
+
c_mlx = mxop(mxx, axis=axis)
|
|
2036
|
+
self.assertTrue(np.array_equal(c_npy, c_mlx))
|
|
2037
|
+
|
|
2038
|
+
a_mlx = mx.random.randint(shape=(32, 32, 32), low=-100, high=100)
|
|
2039
|
+
for op in ["cumsum", "cumprod", "cummax", "cummin"]:
|
|
2040
|
+
mxop = getattr(mx, op)
|
|
2041
|
+
c1 = mxop(a_mlx, axis=2)
|
|
2042
|
+
c2 = mxop(a_mlx, axis=2, inclusive=False, reverse=False)
|
|
2043
|
+
self.assertTrue(mx.array_equal(c1[:, :, :-1], c2[:, :, 1:]))
|
|
2044
|
+
c1 = mxop(a_mlx, axis=1)
|
|
2045
|
+
c2 = mxop(a_mlx, axis=1, inclusive=False, reverse=False)
|
|
2046
|
+
self.assertTrue(mx.array_equal(c1[:, :-1, :], c2[:, 1:, :]))
|
|
2047
|
+
c1 = mxop(a_mlx, axis=0)
|
|
2048
|
+
c2 = mxop(a_mlx, axis=0, inclusive=False, reverse=False)
|
|
2049
|
+
self.assertTrue(mx.array_equal(c1[:-1, :, :], c2[1:, :, :]))
|
|
2050
|
+
|
|
2051
|
+
rev_idx = mx.arange(31, -1, -1)
|
|
2052
|
+
c1 = mxop(a_mlx[:, :, rev_idx], axis=2)[:, :, rev_idx]
|
|
2053
|
+
c2 = mxop(a_mlx, axis=2, inclusive=True, reverse=True)
|
|
2054
|
+
self.assertTrue(mx.array_equal(c1, c2))
|
|
2055
|
+
c1 = mxop(a_mlx[:, rev_idx, :], axis=1)[:, rev_idx, :]
|
|
2056
|
+
c2 = mxop(a_mlx, axis=1, inclusive=True, reverse=True)
|
|
2057
|
+
self.assertTrue(mx.array_equal(c1, c2))
|
|
2058
|
+
c1 = mxop(a_mlx[rev_idx, :, :], axis=0)[rev_idx, :, :]
|
|
2059
|
+
c2 = mxop(a_mlx, axis=0, inclusive=True, reverse=True)
|
|
2060
|
+
self.assertTrue(mx.array_equal(c1, c2))
|
|
2061
|
+
|
|
2062
|
+
rev_idx = mx.arange(31, -1, -1)
|
|
2063
|
+
c1 = mxop(a_mlx[:, :, rev_idx], axis=2)[:, :, rev_idx][:, :, 1:]
|
|
2064
|
+
c2 = mxop(a_mlx, axis=2, inclusive=False, reverse=True)[:, :, :-1]
|
|
2065
|
+
self.assertTrue(mx.array_equal(c1, c2))
|
|
2066
|
+
c1 = mxop(a_mlx[:, rev_idx, :], axis=1)[:, rev_idx, :][:, 1:, :]
|
|
2067
|
+
c2 = mxop(a_mlx, axis=1, inclusive=False, reverse=True)[:, :-1, :]
|
|
2068
|
+
self.assertTrue(mx.array_equal(c1, c2))
|
|
2069
|
+
c1 = mxop(a_mlx[rev_idx, :, :], axis=0)[rev_idx, :, :][1:, :, :]
|
|
2070
|
+
c2 = mxop(a_mlx, axis=0, inclusive=False, reverse=True)[:-1, :, :]
|
|
2071
|
+
self.assertTrue(mx.array_equal(c1, c2))
|
|
2072
|
+
|
|
2073
|
+
a = mx.random.uniform(shape=(8, 32))
|
|
2074
|
+
mat = mx.tri(32)
|
|
2075
|
+
for t in [mx.float16, mx.bfloat16]:
|
|
2076
|
+
a_t = a.astype(t)
|
|
2077
|
+
mat_t = mat.astype(t)
|
|
2078
|
+
out = mx.cumsum(a_t, axis=-1)
|
|
2079
|
+
expected = (mat_t * a_t[:, None, :]).sum(axis=-1)
|
|
2080
|
+
self.assertTrue(mx.allclose(out, expected, rtol=0.02, atol=1e-3))
|
|
2081
|
+
sizes = [1023, 1024, 1025, 2047, 2048, 2049]
|
|
2082
|
+
for s in sizes:
|
|
2083
|
+
a = mx.ones((s,), mx.int32)
|
|
2084
|
+
out = mx.cumsum(a)
|
|
2085
|
+
expected = mx.arange(1, s + 1, dtype=mx.int32)
|
|
2086
|
+
self.assertTrue(mx.array_equal(expected, out))
|
|
2087
|
+
|
|
2088
|
+
# non-contiguous scan
|
|
2089
|
+
a = mx.ones((s, 2), mx.int32)
|
|
2090
|
+
out = mx.cumsum(a, axis=0)
|
|
2091
|
+
expected = mx.repeat(expected[:, None], 2, axis=1)
|
|
2092
|
+
self.assertTrue(mx.array_equal(expected, out))
|
|
2093
|
+
|
|
2094
|
+
# Test donation
|
|
2095
|
+
def fn(its):
|
|
2096
|
+
x = mx.ones((32,))
|
|
2097
|
+
for _ in range(its):
|
|
2098
|
+
x = mx.cumsum(x)
|
|
2099
|
+
return x
|
|
2100
|
+
|
|
2101
|
+
mx.synchronize()
|
|
2102
|
+
mx.eval(fn(2))
|
|
2103
|
+
mx.synchronize()
|
|
2104
|
+
mem2 = mx.get_peak_memory()
|
|
2105
|
+
mx.eval(fn(4))
|
|
2106
|
+
mx.synchronize()
|
|
2107
|
+
mem4 = mx.get_peak_memory()
|
|
2108
|
+
self.assertEqual(mem2, mem4)
|
|
2109
|
+
|
|
2110
|
+
def test_squeeze_expand(self):
|
|
2111
|
+
a = mx.zeros((2, 1, 2, 1))
|
|
2112
|
+
self.assertEqual(mx.squeeze(a).shape, (2, 2))
|
|
2113
|
+
self.assertEqual(mx.squeeze(a, 1).shape, (2, 2, 1))
|
|
2114
|
+
self.assertEqual(mx.squeeze(a, [1, 3]).shape, (2, 2))
|
|
2115
|
+
self.assertEqual(a.squeeze().shape, (2, 2))
|
|
2116
|
+
self.assertEqual(a.squeeze(1).shape, (2, 2, 1))
|
|
2117
|
+
self.assertEqual(a.squeeze([1, 3]).shape, (2, 2))
|
|
2118
|
+
|
|
2119
|
+
a = mx.zeros((2, 2))
|
|
2120
|
+
self.assertEqual(mx.squeeze(a).shape, (2, 2))
|
|
2121
|
+
|
|
2122
|
+
self.assertEqual(mx.expand_dims(a, 0).shape, (1, 2, 2))
|
|
2123
|
+
self.assertEqual(mx.expand_dims(a, (0, 1)).shape, (1, 1, 2, 2))
|
|
2124
|
+
self.assertEqual(mx.expand_dims(a, [0, -1]).shape, (1, 2, 2, 1))
|
|
2125
|
+
|
|
2126
|
+
def test_sort(self):
|
|
2127
|
+
shape = (6, 4, 10)
|
|
2128
|
+
tests = product(
|
|
2129
|
+
("int32", "float32"), # type
|
|
2130
|
+
(None, 0, 1, 2), # axis
|
|
2131
|
+
(True, False), # strided
|
|
2132
|
+
)
|
|
2133
|
+
for dtype, axis, strided in tests:
|
|
2134
|
+
with self.subTest(dtype=dtype, axis=axis, strided=strided):
|
|
2135
|
+
np.random.seed(0)
|
|
2136
|
+
np_dtype = getattr(np, dtype)
|
|
2137
|
+
a_np = np.random.uniform(0, 100, size=shape).astype(np_dtype)
|
|
2138
|
+
a_mx = mx.array(a_np)
|
|
2139
|
+
if strided:
|
|
2140
|
+
a_mx = a_mx[::2, :, ::2]
|
|
2141
|
+
a_np = a_np[::2, :, ::2]
|
|
2142
|
+
|
|
2143
|
+
b_np = np.sort(a_np, axis=axis)
|
|
2144
|
+
b_mx = mx.sort(a_mx, axis=axis)
|
|
2145
|
+
|
|
2146
|
+
self.assertTrue(np.array_equal(b_np, b_mx))
|
|
2147
|
+
self.assertEqual(b_mx.dtype, a_mx.dtype)
|
|
2148
|
+
|
|
2149
|
+
c_np = np.argsort(a_np, axis=axis)
|
|
2150
|
+
c_mx = mx.argsort(a_mx, axis=axis)
|
|
2151
|
+
d_np = np.take_along_axis(a_np, c_np, axis=axis)
|
|
2152
|
+
d_mx = mx.take_along_axis(a_mx, c_mx, axis=axis)
|
|
2153
|
+
|
|
2154
|
+
self.assertTrue(np.array_equal(d_np, d_mx))
|
|
2155
|
+
self.assertEqual(c_mx.dtype, mx.uint32)
|
|
2156
|
+
|
|
2157
|
+
# Set random seed
|
|
2158
|
+
np.random.seed(0)
|
|
2159
|
+
|
|
2160
|
+
# Test multi-block sort
|
|
2161
|
+
for strided in (False, True):
|
|
2162
|
+
with self.subTest(strided=strided):
|
|
2163
|
+
a_np = np.random.normal(size=(32769,)).astype(np.float32)
|
|
2164
|
+
a_mx = mx.array(a_np)
|
|
2165
|
+
|
|
2166
|
+
if strided:
|
|
2167
|
+
a_mx = a_mx[::3]
|
|
2168
|
+
a_np = a_np[::3]
|
|
2169
|
+
|
|
2170
|
+
b_np = np.sort(a_np)
|
|
2171
|
+
b_mx = mx.sort(a_mx)
|
|
2172
|
+
|
|
2173
|
+
self.assertTrue(np.array_equal(b_np, b_mx))
|
|
2174
|
+
self.assertEqual(b_mx.dtype, a_mx.dtype)
|
|
2175
|
+
|
|
2176
|
+
# Test multi-dum multi-block sort
|
|
2177
|
+
a_np = np.random.normal(size=(2, 4, 32769)).astype(np.float32)
|
|
2178
|
+
a_mx = mx.array(a_np)
|
|
2179
|
+
|
|
2180
|
+
if strided:
|
|
2181
|
+
a_mx = a_mx[..., ::3]
|
|
2182
|
+
a_np = a_np[..., ::3]
|
|
2183
|
+
|
|
2184
|
+
b_np = np.sort(a_np, axis=-1)
|
|
2185
|
+
b_mx = mx.sort(a_mx, axis=-1)
|
|
2186
|
+
|
|
2187
|
+
self.assertTrue(np.array_equal(b_np, b_mx))
|
|
2188
|
+
self.assertEqual(b_mx.dtype, a_mx.dtype)
|
|
2189
|
+
|
|
2190
|
+
a_np = np.random.normal(size=(2, 32769, 4)).astype(np.float32)
|
|
2191
|
+
a_mx = mx.array(a_np)
|
|
2192
|
+
|
|
2193
|
+
if strided:
|
|
2194
|
+
a_mx = a_mx[:, ::3]
|
|
2195
|
+
a_np = a_np[:, ::3]
|
|
2196
|
+
|
|
2197
|
+
b_np = np.sort(a_np, axis=1)
|
|
2198
|
+
b_mx = mx.sort(a_mx, axis=1)
|
|
2199
|
+
|
|
2200
|
+
self.assertTrue(np.array_equal(b_np, b_mx))
|
|
2201
|
+
self.assertEqual(b_mx.dtype, a_mx.dtype)
|
|
2202
|
+
|
|
2203
|
+
# test 0 strides
|
|
2204
|
+
a_np = np.array([1, 0, 2, 1, 3, 0, 4, 0])
|
|
2205
|
+
a_mx = mx.array(a_np)
|
|
2206
|
+
b_np = np.broadcast_to(a_np, (16, 8))
|
|
2207
|
+
b_mx = mx.broadcast_to(a_mx, (16, 8))
|
|
2208
|
+
mx.eval(b_mx)
|
|
2209
|
+
for axis in (0, 1):
|
|
2210
|
+
c_np = np.sort(b_np, axis=axis)
|
|
2211
|
+
c_mx = mx.sort(b_mx, axis=axis)
|
|
2212
|
+
self.assertTrue(np.array_equal(c_np, c_mx))
|
|
2213
|
+
self.assertEqual(b_mx.dtype, c_mx.dtype)
|
|
2214
|
+
|
|
2215
|
+
# Test very large array
|
|
2216
|
+
if mx.default_device() == mx.gpu:
|
|
2217
|
+
a_np = np.random.normal(20, 20, size=(2**22)).astype(np.float32)
|
|
2218
|
+
a_mx = mx.array(a_np)
|
|
2219
|
+
|
|
2220
|
+
b_np = np.sort(a_np)
|
|
2221
|
+
b_mx = mx.sort(a_mx)
|
|
2222
|
+
self.assertTrue(np.array_equal(b_np, b_mx))
|
|
2223
|
+
|
|
2224
|
+
# 1D strided sort
|
|
2225
|
+
a = mx.array([[4, 3], [2, 1], [5, 4], [3, 2]])
|
|
2226
|
+
out = mx.argsort(a[:, 1])
|
|
2227
|
+
expected = mx.array([1, 3, 0, 2], dtype=mx.uint32)
|
|
2228
|
+
self.assertTrue(mx.array_equal(out, expected))
|
|
2229
|
+
|
|
2230
|
+
# Test array with singleton dim
|
|
2231
|
+
out = mx.sort(mx.array([1, 2, 3]), axis=0)
|
|
2232
|
+
self.assertTrue(mx.array_equal(out, mx.array([1, 2, 3])))
|
|
2233
|
+
|
|
2234
|
+
x = np.random.uniform(size=(1, 4, 8, 1)).astype(np.float32)
|
|
2235
|
+
y_np = np.sort(x, axis=-2)
|
|
2236
|
+
y_mx = mx.sort(mx.array(x), axis=-2)
|
|
2237
|
+
self.assertTrue(np.array_equal(y_np, y_mx))
|
|
2238
|
+
|
|
2239
|
+
# Test many segments
|
|
2240
|
+
a = mx.random.uniform(shape=(512, 128))
|
|
2241
|
+
y_mx = mx.sort(a, axis=-1)
|
|
2242
|
+
y_np = np.sort(np.array(a), axis=-1)
|
|
2243
|
+
self.assertTrue(np.array_equal(y_np, y_mx))
|
|
2244
|
+
|
|
2245
|
+
def test_partition(self):
|
|
2246
|
+
shape = (3, 4, 5)
|
|
2247
|
+
for dtype in ("int32", "float32"):
|
|
2248
|
+
for axis in (None, 0, 1, 2):
|
|
2249
|
+
for kth in (-2, 0, 2):
|
|
2250
|
+
with self.subTest(dtype=dtype, axis=axis, kth=kth):
|
|
2251
|
+
np.random.seed(0)
|
|
2252
|
+
np_dtype = getattr(np, dtype)
|
|
2253
|
+
a_np = np.random.uniform(0, 100, size=shape).astype(np_dtype)
|
|
2254
|
+
a_mx = mx.array(a_np)
|
|
2255
|
+
|
|
2256
|
+
b_np = np.partition(a_np, kth, axis=axis)
|
|
2257
|
+
b_mx = mx.partition(a_mx, kth, axis=axis)
|
|
2258
|
+
|
|
2259
|
+
c_np = np.take(b_np, (kth,), axis=axis)
|
|
2260
|
+
c_mx = np.take(np.array(b_mx), (kth,), axis=axis)
|
|
2261
|
+
|
|
2262
|
+
self.assertTrue(np.array_equal(c_np, c_mx))
|
|
2263
|
+
self.assertEqual(b_mx.dtype, a_mx.dtype)
|
|
2264
|
+
|
|
2265
|
+
if kth >= 0:
|
|
2266
|
+
top_k_mx = mx.topk(a_mx, kth, axis=axis)
|
|
2267
|
+
top_k_np = np.take(
|
|
2268
|
+
np.partition(a_np, -kth, axis=axis), (-kth,), axis=axis
|
|
2269
|
+
)
|
|
2270
|
+
self.assertTrue(np.all(top_k_np <= top_k_mx))
|
|
2271
|
+
self.assertEqual(top_k_mx.dtype, a_mx.dtype)
|
|
2272
|
+
N = a_mx.shape[axis] if axis is not None else a_mx.size
|
|
2273
|
+
M = top_k_mx.shape[axis or 0]
|
|
2274
|
+
self.assertEqual(M, (kth + N) % N)
|
|
2275
|
+
|
|
2276
|
+
def test_argpartition(self):
|
|
2277
|
+
x = mx.broadcast_to(mx.array([1, 2, 3]), (2, 3))
|
|
2278
|
+
out = mx.argpartition(x, kth=1, axis=0)
|
|
2279
|
+
expected = mx.array([[0, 0, 0], [1, 1, 1]])
|
|
2280
|
+
self.assertTrue(mx.array_equal(out, expected))
|
|
2281
|
+
|
|
2282
|
+
x = mx.array([[1, 2], [3, 4]]).T
|
|
2283
|
+
out = mx.argpartition(x, kth=1, axis=0)
|
|
2284
|
+
expected = mx.array([[0, 0], [1, 1]])
|
|
2285
|
+
self.assertTrue(mx.array_equal(out, expected))
|
|
2286
|
+
|
|
2287
|
+
@unittest.skipIf(
|
|
2288
|
+
os.getenv("LOW_MEMORY", None) is not None,
|
|
2289
|
+
"This test requires a lot of memory",
|
|
2290
|
+
)
|
|
2291
|
+
def test_large_binary(self):
|
|
2292
|
+
a = mx.ones([1000, 2147484], mx.int8)
|
|
2293
|
+
b = mx.ones([2147484], mx.int8)
|
|
2294
|
+
self.assertEqual((a + b)[0, 0].item(), 2)
|
|
2295
|
+
|
|
2296
|
+
def test_eye(self):
|
|
2297
|
+
self.assertCmpNumpy([3], mx.eye, np.eye)
|
|
2298
|
+
# Test for non-square matrix
|
|
2299
|
+
self.assertCmpNumpy([3, 4], mx.eye, np.eye)
|
|
2300
|
+
# Test with positive k parameter
|
|
2301
|
+
self.assertCmpNumpy([3, 4], mx.eye, np.eye, k=1)
|
|
2302
|
+
# Test with negative k parameter
|
|
2303
|
+
self.assertCmpNumpy([5, 6], mx.eye, np.eye, k=-2)
|
|
2304
|
+
|
|
2305
|
+
def test_stack(self):
|
|
2306
|
+
a = mx.ones((2,))
|
|
2307
|
+
np_a = np.ones((2,))
|
|
2308
|
+
b = mx.ones((2,))
|
|
2309
|
+
np_b = np.ones((2,))
|
|
2310
|
+
|
|
2311
|
+
# One dimensional stack axis=0
|
|
2312
|
+
c = mx.stack([a, b])
|
|
2313
|
+
np_c = np.stack([np_a, np_b])
|
|
2314
|
+
self.assertTrue(np.array_equal(c, np_c))
|
|
2315
|
+
|
|
2316
|
+
# One dimensional stack axis=1
|
|
2317
|
+
c = mx.stack([a, b], axis=1)
|
|
2318
|
+
np_c = np.stack([np_a, np_b], axis=1)
|
|
2319
|
+
self.assertTrue(np.array_equal(c, np_c))
|
|
2320
|
+
|
|
2321
|
+
a = mx.ones((1, 2))
|
|
2322
|
+
np_a = np.ones((1, 2))
|
|
2323
|
+
b = mx.ones((1, 2))
|
|
2324
|
+
np_b = np.ones((1, 2))
|
|
2325
|
+
|
|
2326
|
+
# Two dimensional stack axis=0
|
|
2327
|
+
c = mx.stack([a, b])
|
|
2328
|
+
np_c = np.stack([np_a, np_b])
|
|
2329
|
+
self.assertTrue(np.array_equal(c, np_c))
|
|
2330
|
+
|
|
2331
|
+
# Two dimensional stack axis=1
|
|
2332
|
+
c = mx.stack([a, b], axis=1)
|
|
2333
|
+
np_c = np.stack([np_a, np_b], axis=1)
|
|
2334
|
+
self.assertTrue(np.array_equal(c, np_c))
|
|
2335
|
+
|
|
2336
|
+
def test_flatten(self):
|
|
2337
|
+
x = mx.zeros([2, 3, 4])
|
|
2338
|
+
self.assertEqual(mx.flatten(x).shape, (2 * 3 * 4,))
|
|
2339
|
+
self.assertEqual(mx.flatten(x, start_axis=1).shape, (2, 3 * 4))
|
|
2340
|
+
self.assertEqual(mx.flatten(x, end_axis=1).shape, (2 * 3, 4))
|
|
2341
|
+
self.assertEqual(x.flatten().shape, (2 * 3 * 4,))
|
|
2342
|
+
self.assertEqual(x.flatten(start_axis=1).shape, (2, 3 * 4))
|
|
2343
|
+
self.assertEqual(x.flatten(end_axis=1).shape, (2 * 3, 4))
|
|
2344
|
+
|
|
2345
|
+
def test_clip(self):
|
|
2346
|
+
a = np.array([1, 4, 3, 8, 5], np.int32)
|
|
2347
|
+
expected = np.clip(a, 2, 6)
|
|
2348
|
+
clipped = mx.clip(mx.array(a), 2, 6)
|
|
2349
|
+
self.assertTrue(np.array_equal(clipped, expected))
|
|
2350
|
+
|
|
2351
|
+
a = np.array([-1, 1, 0, 5], np.int32)
|
|
2352
|
+
expected = np.clip(a, 0, None)
|
|
2353
|
+
clipped = mx.clip(mx.array(a), 0, None)
|
|
2354
|
+
self.assertTrue(np.array_equal(clipped, expected))
|
|
2355
|
+
|
|
2356
|
+
a = np.array([2, 3, 4, 5], np.int32)
|
|
2357
|
+
expected = np.clip(a, None, 4)
|
|
2358
|
+
clipped = mx.clip(mx.array(a), None, 4)
|
|
2359
|
+
self.assertTrue(np.array_equal(clipped, expected))
|
|
2360
|
+
|
|
2361
|
+
mins = np.array([3, 1, 5, 5])
|
|
2362
|
+
a = np.array([2, 3, 4, 5], np.int32)
|
|
2363
|
+
expected = np.clip(a, mins, 4)
|
|
2364
|
+
clipped = mx.clip(mx.array(a), mx.array(mins), 4)
|
|
2365
|
+
self.assertTrue(np.array_equal(clipped, expected))
|
|
2366
|
+
|
|
2367
|
+
maxs = np.array([5, -1, 2, 9])
|
|
2368
|
+
a = np.array([2, 3, 4, 5], np.int32)
|
|
2369
|
+
expected = np.clip(a, mins, maxs)
|
|
2370
|
+
clipped = mx.clip(mx.array(a), mx.array(mins), mx.array(maxs))
|
|
2371
|
+
self.assertTrue(np.array_equal(clipped, expected))
|
|
2372
|
+
|
|
2373
|
+
# Check clip output types
|
|
2374
|
+
a = mx.array([1, 2, 3], mx.int16)
|
|
2375
|
+
out_t = mx.clip(a, a_min=0, a_max=5).dtype
|
|
2376
|
+
self.assertEqual(out_t, mx.int16)
|
|
2377
|
+
|
|
2378
|
+
out_t = mx.clip(a, a_min=0.0, a_max=5).dtype
|
|
2379
|
+
self.assertEqual(out_t, mx.float32)
|
|
2380
|
+
|
|
2381
|
+
a = mx.array([1, 2, 3], mx.float16)
|
|
2382
|
+
out_t = mx.clip(a, a_min=0.0, a_max=5).dtype
|
|
2383
|
+
self.assertEqual(out_t, mx.float16)
|
|
2384
|
+
|
|
2385
|
+
a = mx.array([1, 2, 3], mx.float16)
|
|
2386
|
+
out_t = mx.clip(a, a_min=0.0, a_max=mx.array(1.0)).dtype
|
|
2387
|
+
self.assertEqual(out_t, mx.float32)
|
|
2388
|
+
|
|
2389
|
+
def test_linspace(self):
|
|
2390
|
+
# Test default num = 50
|
|
2391
|
+
a = mx.linspace(0, 1)
|
|
2392
|
+
expected = mx.array(np.linspace(0, 1))
|
|
2393
|
+
self.assertEqualArray(a, expected)
|
|
2394
|
+
|
|
2395
|
+
# Test int64 dtype
|
|
2396
|
+
b = mx.linspace(0, 10, 5, mx.int64)
|
|
2397
|
+
expected = mx.array(np.linspace(0, 10, 5, dtype=int))
|
|
2398
|
+
self.assertEqualArray(b, expected)
|
|
2399
|
+
|
|
2400
|
+
# Test negative sequence with float start and stop
|
|
2401
|
+
c = mx.linspace(-2.7, -0.7, 7)
|
|
2402
|
+
expected = mx.array(np.linspace(-2.7, -0.7, 7))
|
|
2403
|
+
self.assertEqualArray(c, expected)
|
|
2404
|
+
|
|
2405
|
+
# Test irrational step size of 1/9
|
|
2406
|
+
d = mx.linspace(0, 1, 10)
|
|
2407
|
+
expected = mx.array(np.linspace(0, 1, 10))
|
|
2408
|
+
self.assertEqualArray(d, expected)
|
|
2409
|
+
|
|
2410
|
+
# Test num equal to 1
|
|
2411
|
+
d = mx.linspace(1, 10, 1)
|
|
2412
|
+
expected = mx.array(np.linspace(1, 10, 1))
|
|
2413
|
+
self.assertEqualArray(d, expected)
|
|
2414
|
+
|
|
2415
|
+
# Ensure that the start and stop are always the ones provided
|
|
2416
|
+
ranges = mx.random.normal((16, 2)).tolist()
|
|
2417
|
+
nums = (2 + mx.random.uniform(shape=(16,)) * 10).astype(mx.uint32).tolist()
|
|
2418
|
+
for (a, b), n in zip(ranges, nums):
|
|
2419
|
+
d = mx.linspace(a, b, n).tolist()
|
|
2420
|
+
self.assertEqual(d[0], a)
|
|
2421
|
+
self.assertEqual(d[-1], b)
|
|
2422
|
+
|
|
2423
|
+
def test_repeat(self):
|
|
2424
|
+
# Setup data for the tests
|
|
2425
|
+
data = mx.array([[[13, 3], [16, 6]], [[14, 4], [15, 5]], [[11, 1], [12, 2]]])
|
|
2426
|
+
# Test repeat 0 times
|
|
2427
|
+
self.assertCmpNumpy([data, 0], mx.repeat, np.repeat)
|
|
2428
|
+
# Test repeat along axis 0
|
|
2429
|
+
self.assertCmpNumpy([data, 2], mx.repeat, np.repeat, axis=0)
|
|
2430
|
+
# Test repeat along axis 1
|
|
2431
|
+
self.assertCmpNumpy([data, 2], mx.repeat, np.repeat, axis=1)
|
|
2432
|
+
# Test repeat along the last axis (default)
|
|
2433
|
+
self.assertCmpNumpy([data, 2], mx.repeat, np.repeat)
|
|
2434
|
+
# Test repeat with a 1D array along axis 0
|
|
2435
|
+
self.assertCmpNumpy([mx.array([1, 3, 2]), 3], mx.repeat, np.repeat, axis=0)
|
|
2436
|
+
# Test repeat with a 2D array along axis 0
|
|
2437
|
+
self.assertCmpNumpy(
|
|
2438
|
+
[mx.array([[1, 2, 3], [4, 5, 4], [0, 1, 2]]), 2],
|
|
2439
|
+
mx.repeat,
|
|
2440
|
+
np.repeat,
|
|
2441
|
+
axis=0,
|
|
2442
|
+
)
|
|
2443
|
+
|
|
2444
|
+
def test_tensordot(self):
|
|
2445
|
+
# No fp16 matmuls on common cpu backend
|
|
2446
|
+
if not self.is_apple_silicon:
|
|
2447
|
+
dtypes = [mx.float32]
|
|
2448
|
+
else:
|
|
2449
|
+
dtypes = [mx.float16, mx.float32]
|
|
2450
|
+
for dtype in dtypes:
|
|
2451
|
+
with self.subTest(dtype=dtype):
|
|
2452
|
+
self.assertCmpNumpy(
|
|
2453
|
+
[(3, 4, 5), (4, 3, 2)],
|
|
2454
|
+
mx.tensordot,
|
|
2455
|
+
np.tensordot,
|
|
2456
|
+
dtype=dtype,
|
|
2457
|
+
axes=([1, 0], [0, 1]),
|
|
2458
|
+
)
|
|
2459
|
+
self.assertCmpNumpy(
|
|
2460
|
+
[(3, 4, 5), (4, 5, 6)],
|
|
2461
|
+
mx.tensordot,
|
|
2462
|
+
np.tensordot,
|
|
2463
|
+
dtype=dtype,
|
|
2464
|
+
axes=2,
|
|
2465
|
+
)
|
|
2466
|
+
self.assertCmpNumpy(
|
|
2467
|
+
[(3, 5, 4, 6), (6, 4, 5, 3)],
|
|
2468
|
+
mx.tensordot,
|
|
2469
|
+
np.tensordot,
|
|
2470
|
+
dtype=dtype,
|
|
2471
|
+
axes=([2, 1, 3], [1, 2, 0]),
|
|
2472
|
+
)
|
|
2473
|
+
|
|
2474
|
+
def test_inner(self):
|
|
2475
|
+
self.assertCmpNumpy([(3,), (3,)], mx.inner, np.inner)
|
|
2476
|
+
self.assertCmpNumpy([(1, 1, 2), (3, 2)], mx.inner, np.inner)
|
|
2477
|
+
self.assertCmpNumpy([(2, 3, 4), (4,)], mx.inner, np.inner)
|
|
2478
|
+
|
|
2479
|
+
def test_outer(self):
|
|
2480
|
+
self.assertCmpNumpy([(3,), (3,)], mx.outer, np.outer)
|
|
2481
|
+
self.assertCmpNumpy(
|
|
2482
|
+
[
|
|
2483
|
+
mx.ones(
|
|
2484
|
+
5,
|
|
2485
|
+
),
|
|
2486
|
+
mx.linspace(-2, 2, 5),
|
|
2487
|
+
],
|
|
2488
|
+
mx.outer,
|
|
2489
|
+
np.outer,
|
|
2490
|
+
)
|
|
2491
|
+
self.assertCmpNumpy(
|
|
2492
|
+
[
|
|
2493
|
+
1j * mx.linspace(2, -2, 5),
|
|
2494
|
+
mx.ones(
|
|
2495
|
+
5,
|
|
2496
|
+
),
|
|
2497
|
+
],
|
|
2498
|
+
mx.outer,
|
|
2499
|
+
np.outer,
|
|
2500
|
+
)
|
|
2501
|
+
|
|
2502
|
+
def test_divmod(self):
|
|
2503
|
+
# A few sizes for the inputs with and without broadcasting
|
|
2504
|
+
sizes = [
|
|
2505
|
+
((1,), (1,)),
|
|
2506
|
+
((1,), (10,)),
|
|
2507
|
+
((10,), (1,)),
|
|
2508
|
+
((3,), (3,)),
|
|
2509
|
+
((2, 2, 2), (1, 2, 1)),
|
|
2510
|
+
((2, 1, 2), (1, 2, 1)),
|
|
2511
|
+
((2, 2, 2, 2), (2, 2, 2, 2)),
|
|
2512
|
+
]
|
|
2513
|
+
types = [np.uint16, np.uint32, np.int32, np.float16, np.float32]
|
|
2514
|
+
for s1, s2 in sizes:
|
|
2515
|
+
for t in types:
|
|
2516
|
+
a_np = np.random.uniform(1, 100, size=s1).astype(t)
|
|
2517
|
+
b_np = np.random.uniform(1, 100, size=s2).astype(t)
|
|
2518
|
+
np_out = np.divmod(a_np, b_np)
|
|
2519
|
+
mx_out = mx.divmod(mx.array(a_np), mx.array(b_np))
|
|
2520
|
+
self.assertTrue(
|
|
2521
|
+
np.allclose(np_out[0], mx_out[0]), msg=f"Shapes {s1} {s2}, Type {t}"
|
|
2522
|
+
)
|
|
2523
|
+
|
|
2524
|
+
def test_tile(self):
|
|
2525
|
+
self.assertCmpNumpy([(2,), [2]], mx.tile, np.tile)
|
|
2526
|
+
self.assertCmpNumpy([(2, 3, 4), [2]], mx.tile, np.tile)
|
|
2527
|
+
self.assertCmpNumpy([(2, 3, 4), [2, 1]], mx.tile, np.tile)
|
|
2528
|
+
self.assertCmpNumpy(
|
|
2529
|
+
[
|
|
2530
|
+
(2, 3, 4),
|
|
2531
|
+
[
|
|
2532
|
+
2,
|
|
2533
|
+
2,
|
|
2534
|
+
],
|
|
2535
|
+
],
|
|
2536
|
+
mx.tile,
|
|
2537
|
+
np.tile,
|
|
2538
|
+
)
|
|
2539
|
+
self.assertCmpNumpy([(3,), [2, 2, 2]], mx.tile, np.tile)
|
|
2540
|
+
|
|
2541
|
+
def test_empty_matmuls(self):
|
|
2542
|
+
a = mx.array([])
|
|
2543
|
+
b = mx.array([])
|
|
2544
|
+
self.assertEqual(mx.inner(a, b).item(), 0.0)
|
|
2545
|
+
|
|
2546
|
+
a = mx.zeros((10, 0))
|
|
2547
|
+
b = mx.zeros((0, 10))
|
|
2548
|
+
out = a @ b
|
|
2549
|
+
self.assertTrue(mx.array_equal(out, mx.zeros((10, 10))))
|
|
2550
|
+
|
|
2551
|
+
def test_diagonal(self):
|
|
2552
|
+
x = mx.array(
|
|
2553
|
+
[
|
|
2554
|
+
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]],
|
|
2555
|
+
[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]],
|
|
2556
|
+
]
|
|
2557
|
+
)
|
|
2558
|
+
expected = [[0, 13], [4, 17], [8, 21]]
|
|
2559
|
+
|
|
2560
|
+
self.assertListEqual(mx.diagonal(x, 0, -1, 0).tolist(), expected)
|
|
2561
|
+
|
|
2562
|
+
expected = [[1, 14], [5, 18], [9, 22]]
|
|
2563
|
+
self.assertListEqual(mx.diagonal(x, -1, 2, 0).tolist(), expected)
|
|
2564
|
+
|
|
2565
|
+
def test_diag(self):
|
|
2566
|
+
# Test 1D input
|
|
2567
|
+
x = mx.array([1, 2, 3, 4])
|
|
2568
|
+
expected = mx.array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]])
|
|
2569
|
+
result = mx.diag(x)
|
|
2570
|
+
self.assertTrue(mx.array_equal(result, expected))
|
|
2571
|
+
|
|
2572
|
+
# Test 1D with offset
|
|
2573
|
+
x = mx.array([2, 6])
|
|
2574
|
+
result = mx.diag(x, k=5)
|
|
2575
|
+
expected = mx.array(np.diag(x, k=5))
|
|
2576
|
+
self.assertTrue(mx.array_equal(result, expected))
|
|
2577
|
+
|
|
2578
|
+
# Test 2D input
|
|
2579
|
+
x = mx.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
|
|
2580
|
+
expected = mx.array([1, 5, 9])
|
|
2581
|
+
result = mx.diag(x)
|
|
2582
|
+
self.assertTrue(mx.array_equal(result, expected))
|
|
2583
|
+
|
|
2584
|
+
# Test with offset
|
|
2585
|
+
expected = mx.array([2, 6])
|
|
2586
|
+
result = mx.diag(x, 1)
|
|
2587
|
+
self.assertTrue(mx.array_equal(result, expected))
|
|
2588
|
+
|
|
2589
|
+
# Test non-square
|
|
2590
|
+
x = mx.array([[1, 2, 3], [4, 5, 6]])
|
|
2591
|
+
result = mx.diag(x)
|
|
2592
|
+
expected = mx.array(np.diag(x))
|
|
2593
|
+
self.assertTrue(mx.array_equal(result, expected))
|
|
2594
|
+
|
|
2595
|
+
result = mx.diag(x, k=10)
|
|
2596
|
+
expected = mx.array(np.diag(x, k=10))
|
|
2597
|
+
self.assertTrue(mx.array_equal(result, expected))
|
|
2598
|
+
|
|
2599
|
+
result = mx.diag(x, k=-10)
|
|
2600
|
+
expected = mx.array(np.diag(x, k=-10))
|
|
2601
|
+
self.assertTrue(mx.array_equal(result, expected))
|
|
2602
|
+
|
|
2603
|
+
result = mx.diag(x, k=-1)
|
|
2604
|
+
expected = mx.array(np.diag(x, k=-1))
|
|
2605
|
+
self.assertTrue(mx.array_equal(result, expected))
|
|
2606
|
+
|
|
2607
|
+
def test_trace(self):
|
|
2608
|
+
a_mx = mx.arange(9, dtype=mx.int64).reshape((3, 3))
|
|
2609
|
+
a_np = np.arange(9, dtype=np.int64).reshape((3, 3))
|
|
2610
|
+
|
|
2611
|
+
# Test 2D array
|
|
2612
|
+
result = mx.trace(a_mx)
|
|
2613
|
+
expected = np.trace(a_np)
|
|
2614
|
+
self.assertEqualArray(result, mx.array(expected))
|
|
2615
|
+
|
|
2616
|
+
# Test dtype
|
|
2617
|
+
result = mx.trace(a_mx, dtype=mx.float16)
|
|
2618
|
+
expected = np.trace(a_np, dtype=np.float16)
|
|
2619
|
+
self.assertEqualArray(result, mx.array(expected))
|
|
2620
|
+
|
|
2621
|
+
# Test offset
|
|
2622
|
+
result = mx.trace(a_mx, offset=1)
|
|
2623
|
+
expected = np.trace(a_np, offset=1)
|
|
2624
|
+
self.assertEqualArray(result, mx.array(expected))
|
|
2625
|
+
|
|
2626
|
+
# Test axis1 and axis2
|
|
2627
|
+
b_mx = mx.arange(27, dtype=mx.int64).reshape(3, 3, 3)
|
|
2628
|
+
b_np = np.arange(27, dtype=np.int64).reshape(3, 3, 3)
|
|
2629
|
+
|
|
2630
|
+
result = mx.trace(b_mx, axis1=1, axis2=2)
|
|
2631
|
+
expected = np.trace(b_np, axis1=1, axis2=2)
|
|
2632
|
+
self.assertEqualArray(result, mx.array(expected))
|
|
2633
|
+
|
|
2634
|
+
# Test offset, axis1, axis2, and dtype
|
|
2635
|
+
result = mx.trace(b_mx, offset=1, axis1=1, axis2=2, dtype=mx.float32)
|
|
2636
|
+
expected = np.trace(b_np, offset=1, axis1=1, axis2=2, dtype=np.float32)
|
|
2637
|
+
self.assertEqualArray(result, mx.array(expected))
|
|
2638
|
+
|
|
2639
|
+
def test_atleast_1d(self):
|
|
2640
|
+
# Test 1D input
|
|
2641
|
+
arrays = [
|
|
2642
|
+
[1],
|
|
2643
|
+
[1, 2, 3],
|
|
2644
|
+
[1, 2, 3, 4],
|
|
2645
|
+
[[1], [2], [3]],
|
|
2646
|
+
[[1, 2], [3, 4]],
|
|
2647
|
+
[[1, 2, 3], [4, 5, 6]],
|
|
2648
|
+
[[[[1]], [[2]], [[3]]]],
|
|
2649
|
+
]
|
|
2650
|
+
|
|
2651
|
+
mx_arrays = [mx.atleast_1d(mx.array(x)) for x in arrays]
|
|
2652
|
+
atleast_arrays = mx.atleast_1d(*mx_arrays)
|
|
2653
|
+
|
|
2654
|
+
for i, array in enumerate(arrays):
|
|
2655
|
+
mx_res = mx.atleast_1d(mx.array(array))
|
|
2656
|
+
np_res = np.atleast_1d(np.array(array))
|
|
2657
|
+
self.assertEqual(mx_res.shape, np_res.shape)
|
|
2658
|
+
self.assertEqual(mx_res.ndim, np_res.ndim)
|
|
2659
|
+
self.assertTrue(mx.array_equal(mx_res, atleast_arrays[i]))
|
|
2660
|
+
|
|
2661
|
+
def test_atleast_2d(self):
|
|
2662
|
+
# Test 1D input
|
|
2663
|
+
arrays = [
|
|
2664
|
+
[1],
|
|
2665
|
+
[1, 2, 3],
|
|
2666
|
+
[1, 2, 3, 4],
|
|
2667
|
+
[[1], [2], [3]],
|
|
2668
|
+
[[1, 2], [3, 4]],
|
|
2669
|
+
[[1, 2, 3], [4, 5, 6]],
|
|
2670
|
+
[[[[1]], [[2]], [[3]]]],
|
|
2671
|
+
]
|
|
2672
|
+
|
|
2673
|
+
mx_arrays = [mx.atleast_2d(mx.array(x)) for x in arrays]
|
|
2674
|
+
atleast_arrays = mx.atleast_2d(*mx_arrays)
|
|
2675
|
+
|
|
2676
|
+
for i, array in enumerate(arrays):
|
|
2677
|
+
mx_res = mx.atleast_2d(mx.array(array))
|
|
2678
|
+
np_res = np.atleast_2d(np.array(array))
|
|
2679
|
+
self.assertEqual(mx_res.shape, np_res.shape)
|
|
2680
|
+
self.assertEqual(mx_res.ndim, np_res.ndim)
|
|
2681
|
+
self.assertTrue(mx.array_equal(mx_res, atleast_arrays[i]))
|
|
2682
|
+
|
|
2683
|
+
def test_atleast_3d(self):
|
|
2684
|
+
# Test 1D input
|
|
2685
|
+
arrays = [
|
|
2686
|
+
[1],
|
|
2687
|
+
[1, 2, 3],
|
|
2688
|
+
[1, 2, 3, 4],
|
|
2689
|
+
[[1], [2], [3]],
|
|
2690
|
+
[[1, 2], [3, 4]],
|
|
2691
|
+
[[1, 2, 3], [4, 5, 6]],
|
|
2692
|
+
[[[[1]], [[2]], [[3]]]],
|
|
2693
|
+
]
|
|
2694
|
+
|
|
2695
|
+
mx_arrays = [mx.atleast_3d(mx.array(x)) for x in arrays]
|
|
2696
|
+
atleast_arrays = mx.atleast_3d(*mx_arrays)
|
|
2697
|
+
|
|
2698
|
+
for i, array in enumerate(arrays):
|
|
2699
|
+
mx_res = mx.atleast_3d(mx.array(array))
|
|
2700
|
+
np_res = np.atleast_3d(np.array(array))
|
|
2701
|
+
self.assertEqual(mx_res.shape, np_res.shape)
|
|
2702
|
+
self.assertEqual(mx_res.ndim, np_res.ndim)
|
|
2703
|
+
self.assertTrue(mx.array_equal(mx_res, atleast_arrays[i]))
|
|
2704
|
+
|
|
2705
|
+
def test_issubdtype(self):
|
|
2706
|
+
self.assertTrue(mx.issubdtype(mx.bfloat16, mx.inexact))
|
|
2707
|
+
|
|
2708
|
+
cats = [
|
|
2709
|
+
"complexfloating",
|
|
2710
|
+
"floating",
|
|
2711
|
+
"inexact",
|
|
2712
|
+
"signedinteger",
|
|
2713
|
+
"unsignedinteger",
|
|
2714
|
+
"integer",
|
|
2715
|
+
"number",
|
|
2716
|
+
"generic",
|
|
2717
|
+
"bool_",
|
|
2718
|
+
"uint8",
|
|
2719
|
+
"uint16",
|
|
2720
|
+
"uint32",
|
|
2721
|
+
"uint64",
|
|
2722
|
+
"int8",
|
|
2723
|
+
"int16",
|
|
2724
|
+
"int32",
|
|
2725
|
+
"int64",
|
|
2726
|
+
"float16",
|
|
2727
|
+
"float32",
|
|
2728
|
+
"complex64",
|
|
2729
|
+
]
|
|
2730
|
+
|
|
2731
|
+
for a in cats:
|
|
2732
|
+
for b in cats:
|
|
2733
|
+
self.assertEqual(
|
|
2734
|
+
mx.issubdtype(getattr(mx, a), getattr(mx, b)),
|
|
2735
|
+
np.issubdtype(getattr(np, a), getattr(np, b)),
|
|
2736
|
+
f"mx and np don't aggree on {a}, {b}",
|
|
2737
|
+
)
|
|
2738
|
+
|
|
2739
|
+
def test_bitwise_ops(self):
|
|
2740
|
+
types = [
|
|
2741
|
+
mx.uint8,
|
|
2742
|
+
mx.uint16,
|
|
2743
|
+
mx.uint32,
|
|
2744
|
+
mx.uint64,
|
|
2745
|
+
mx.int8,
|
|
2746
|
+
mx.int16,
|
|
2747
|
+
mx.int32,
|
|
2748
|
+
mx.int64,
|
|
2749
|
+
]
|
|
2750
|
+
a = mx.random.randint(0, 4096, (1000,))
|
|
2751
|
+
b = mx.random.randint(0, 4096, (1000,))
|
|
2752
|
+
for op in ["bitwise_and", "bitwise_or", "bitwise_xor"]:
|
|
2753
|
+
for t in types:
|
|
2754
|
+
a_mlx = a.astype(t)
|
|
2755
|
+
b_mlx = b.astype(t)
|
|
2756
|
+
a_np = np.array(a_mlx)
|
|
2757
|
+
b_np = np.array(b_mlx)
|
|
2758
|
+
out_mlx = getattr(mx, op)(a_mlx, b_mlx)
|
|
2759
|
+
out_np = getattr(np, op)(a_np, b_np)
|
|
2760
|
+
self.assertTrue(np.array_equal(np.array(out_mlx), out_np))
|
|
2761
|
+
for op in ["left_shift", "right_shift"]:
|
|
2762
|
+
for t in types:
|
|
2763
|
+
a_mlx = a.astype(t)
|
|
2764
|
+
b_mlx = mx.random.randint(0, t.size, (1000,)).astype(t)
|
|
2765
|
+
a_np = np.array(a_mlx)
|
|
2766
|
+
b_np = np.array(b_mlx)
|
|
2767
|
+
out_mlx = getattr(mx, op)(a_mlx, b_mlx)
|
|
2768
|
+
out_np = getattr(np, op)(a_np, b_np)
|
|
2769
|
+
self.assertTrue(np.array_equal(np.array(out_mlx), out_np))
|
|
2770
|
+
|
|
2771
|
+
for t in types:
|
|
2772
|
+
a_mlx = a.astype(t)
|
|
2773
|
+
a_np = np.array(a_mlx)
|
|
2774
|
+
|
|
2775
|
+
out_mlx = ~a_mlx
|
|
2776
|
+
out_np = ~a_np
|
|
2777
|
+
self.assertTrue(np.array_equal(np.array(out_mlx), out_np))
|
|
2778
|
+
|
|
2779
|
+
out_mlx = mx.bitwise_invert(a_mlx)
|
|
2780
|
+
out_np = mx.bitwise_invert(a_np)
|
|
2781
|
+
self.assertTrue(np.array_equal(np.array(out_mlx), out_np))
|
|
2782
|
+
|
|
2783
|
+
# Check broadcasting
|
|
2784
|
+
a = mx.ones((3, 1, 5), dtype=mx.bool_)
|
|
2785
|
+
b = mx.zeros((1, 2, 5), dtype=mx.bool_)
|
|
2786
|
+
c = a | b
|
|
2787
|
+
self.assertEqual(c.shape, (3, 2, 5))
|
|
2788
|
+
self.assertTrue(mx.array_equal(c, mx.ones((3, 2, 5), dtype=mx.bool_)))
|
|
2789
|
+
|
|
2790
|
+
def test_bitwise_grad(self):
|
|
2791
|
+
a = np.random.randint(0, 10, size=(4, 3))
|
|
2792
|
+
b = np.random.randint(0, 10, size=(4, 3))
|
|
2793
|
+
cotangent = np.random.randint(0, 10, size=(4, 3))
|
|
2794
|
+
a = mx.array(a)
|
|
2795
|
+
b = mx.array(b)
|
|
2796
|
+
cotangent = mx.array(cotangent)
|
|
2797
|
+
|
|
2798
|
+
def bitwise(a, b):
|
|
2799
|
+
return a.astype(mx.int32) & b.astype(mx.int32)
|
|
2800
|
+
|
|
2801
|
+
_, vjps = mx.vjp(bitwise, [a, b], [cotangent])
|
|
2802
|
+
for vjp in vjps:
|
|
2803
|
+
self.assertFalse(np.any(np.array(vjp)))
|
|
2804
|
+
|
|
2805
|
+
def test_conjugate(self):
|
|
2806
|
+
shape = (3, 5, 7)
|
|
2807
|
+
a = np.random.normal(size=shape) + 1j * np.random.normal(size=shape)
|
|
2808
|
+
a = a.astype(np.complex64)
|
|
2809
|
+
ops = ["conjugate", "conj"]
|
|
2810
|
+
for op in ops:
|
|
2811
|
+
out_mlx = getattr(mx, op)(mx.array(a))
|
|
2812
|
+
out_np = getattr(np, op)(a)
|
|
2813
|
+
self.assertTrue(np.array_equal(np.array(out_mlx), out_np))
|
|
2814
|
+
out_mlx = mx.array(a).conj()
|
|
2815
|
+
out_np = a.conj()
|
|
2816
|
+
self.assertTrue(np.array_equal(np.array(out_mlx), out_np))
|
|
2817
|
+
|
|
2818
|
+
def test_view(self):
|
|
2819
|
+
# Check scalar
|
|
2820
|
+
out = mx.array(1, mx.int8).view(mx.uint8).item()
|
|
2821
|
+
self.assertEqual(out, 1)
|
|
2822
|
+
|
|
2823
|
+
a = mx.random.randint(shape=(4, 2, 4), low=-100, high=100)
|
|
2824
|
+
a_np = np.array(a)
|
|
2825
|
+
|
|
2826
|
+
for t in ["bool_", "int16", "float32", "int64"]:
|
|
2827
|
+
out = a.view(getattr(mx, t))
|
|
2828
|
+
expected = a_np.view(getattr(np, t))
|
|
2829
|
+
self.assertTrue(np.array_equal(out, expected, equal_nan=True))
|
|
2830
|
+
|
|
2831
|
+
# Irregular strides
|
|
2832
|
+
a = mx.random.randint(shape=(2, 4), low=-100, high=100)
|
|
2833
|
+
a = mx.broadcast_to(a, shape=(4, 2, 4))
|
|
2834
|
+
|
|
2835
|
+
for t in ["bool_", "int16", "float32", "int64"]:
|
|
2836
|
+
out = a.view(getattr(mx, t))
|
|
2837
|
+
a_out = out.view(mx.int32)
|
|
2838
|
+
self.assertTrue(mx.array_equal(a_out, a, equal_nan=True))
|
|
2839
|
+
|
|
2840
|
+
a = mx.random.randint(shape=(4, 4), low=-100, high=100).T
|
|
2841
|
+
for t in ["bool_", "int16", "float32", "int64"]:
|
|
2842
|
+
out = a.view(getattr(mx, t))
|
|
2843
|
+
a_out = out.view(mx.int32)
|
|
2844
|
+
self.assertTrue(mx.array_equal(a_out, a, equal_nan=True))
|
|
2845
|
+
|
|
2846
|
+
def _hadamard(self, N):
|
|
2847
|
+
# Matches scipy.linalg.hadamard
|
|
2848
|
+
H = np.array([[1]], dtype=np.int64)
|
|
2849
|
+
for i in range(0, np.log2(N).astype(np.int64)):
|
|
2850
|
+
H = np.vstack((np.hstack((H, H)), np.hstack((H, -H))))
|
|
2851
|
+
return H
|
|
2852
|
+
|
|
2853
|
+
def test_hadamard(self):
|
|
2854
|
+
with self.assertRaises(ValueError):
|
|
2855
|
+
mx.hadamard_transform(mx.array([]))
|
|
2856
|
+
|
|
2857
|
+
h28_str = """
|
|
2858
|
+
+------++----++-+--+-+--++--
|
|
2859
|
+
-+-----+++-----+-+--+-+--++-
|
|
2860
|
+
--+-----+++---+-+-+----+--++
|
|
2861
|
+
---+-----+++---+-+-+-+--+--+
|
|
2862
|
+
----+-----+++---+-+-+++--+--
|
|
2863
|
+
-----+-----++++--+-+--++--+-
|
|
2864
|
+
------++----++-+--+-+--++--+
|
|
2865
|
+
--++++-+-------++--+++-+--+-
|
|
2866
|
+
---++++-+-----+-++--+-+-+--+
|
|
2867
|
+
+---+++--+----++-++--+-+-+--
|
|
2868
|
+
++---++---+----++-++--+-+-+-
|
|
2869
|
+
+++---+----+----++-++--+-+-+
|
|
2870
|
+
++++--------+-+--++-++--+-+-
|
|
2871
|
+
-++++--------+++--++--+--+-+
|
|
2872
|
+
-+-++-++--++--+--------++++-
|
|
2873
|
+
+-+-++--+--++--+--------++++
|
|
2874
|
+
-+-+-++--+--++--+----+---+++
|
|
2875
|
+
+-+-+-++--+--+---+---++---++
|
|
2876
|
+
++-+-+-++--+------+--+++---+
|
|
2877
|
+
-++-+-+-++--+------+-++++---
|
|
2878
|
+
+-++-+---++--+------+-++++--
|
|
2879
|
+
-++--++-+-++-+++----++------
|
|
2880
|
+
+-++--++-+-++-+++-----+-----
|
|
2881
|
+
++-++---+-+-++-+++-----+----
|
|
2882
|
+
-++-++-+-+-+-+--+++-----+---
|
|
2883
|
+
--++-++++-+-+----+++-----+--
|
|
2884
|
+
+--++-+-++-+-+----+++-----+-
|
|
2885
|
+
++--++-+-++-+-+----++------+
|
|
2886
|
+
"""
|
|
2887
|
+
|
|
2888
|
+
def parse_h_string(h_str):
|
|
2889
|
+
return np.array(
|
|
2890
|
+
[[1 if s == "+" else -1 for s in row] for row in h_str.split()]
|
|
2891
|
+
)
|
|
2892
|
+
|
|
2893
|
+
h28 = parse_h_string(h28_str)
|
|
2894
|
+
|
|
2895
|
+
x = mx.array(5)
|
|
2896
|
+
y = mx.hadamard_transform(x)
|
|
2897
|
+
self.assertEqual(y.item(), 5)
|
|
2898
|
+
|
|
2899
|
+
x = mx.array(5)
|
|
2900
|
+
y = mx.hadamard_transform(x, scale=0.2)
|
|
2901
|
+
self.assertEqual(y.item(), 1)
|
|
2902
|
+
|
|
2903
|
+
x = mx.random.normal((8, 8, 1))
|
|
2904
|
+
y = mx.hadamard_transform(x)
|
|
2905
|
+
self.assertTrue(mx.all(y == x).item())
|
|
2906
|
+
|
|
2907
|
+
# Too slow to compare to numpy so let's compare CPU to GPU
|
|
2908
|
+
if mx.default_device() == mx.gpu:
|
|
2909
|
+
rk = mx.random.key(42)
|
|
2910
|
+
for k in range(14, 17):
|
|
2911
|
+
for m in [1, 3, 5, 7]:
|
|
2912
|
+
x = mx.random.normal((4, m * 2**k), key=rk)
|
|
2913
|
+
y1 = mx.hadamard_transform(x, stream=mx.cpu)
|
|
2914
|
+
y2 = mx.hadamard_transform(x, stream=mx.gpu)
|
|
2915
|
+
self.assertLess(mx.abs(y1 - y2).max().item(), 5e-6)
|
|
2916
|
+
|
|
2917
|
+
np.random.seed(7)
|
|
2918
|
+
tests = product([np.float32, np.float16, np.int32], [1, 28], range(1, 14))
|
|
2919
|
+
for dtype, m, k in tests:
|
|
2920
|
+
# skip large m=28 cases because they're very slow in NumPy
|
|
2921
|
+
if m > 1 and k > 8:
|
|
2922
|
+
continue
|
|
2923
|
+
with self.subTest(dtype=dtype, m=m, k=k):
|
|
2924
|
+
n = m * 2**k
|
|
2925
|
+
b = 4
|
|
2926
|
+
scale = 0.34
|
|
2927
|
+
x = np.random.normal(size=(b, n)).astype(dtype)
|
|
2928
|
+
# contiguity check
|
|
2929
|
+
x = mx.array(x)[::2]
|
|
2930
|
+
y = mx.hadamard_transform(x, scale=scale)
|
|
2931
|
+
mx.eval(y)
|
|
2932
|
+
h = (
|
|
2933
|
+
self._hadamard(2**k)
|
|
2934
|
+
if m == 1
|
|
2935
|
+
else np.kron(h28, self._hadamard(2**k))
|
|
2936
|
+
)
|
|
2937
|
+
y_np = np.einsum("ij,bj->bi", h, x) * scale
|
|
2938
|
+
atol = 2e-4 if dtype == np.float32 else 5e-2 * k
|
|
2939
|
+
np.testing.assert_allclose(y, y_np, atol=atol)
|
|
2940
|
+
|
|
2941
|
+
# bfloat16 emulation on M1 means 2**14 doesn't fit in threadgroup memory
|
|
2942
|
+
if dtype == np.float16 and k < 14:
|
|
2943
|
+
y_bf16 = mx.hadamard_transform(x.astype(mx.bfloat16), scale=scale)
|
|
2944
|
+
np.testing.assert_allclose(
|
|
2945
|
+
y_bf16.astype(mx.float16), y, atol=atol * 2
|
|
2946
|
+
)
|
|
2947
|
+
|
|
2948
|
+
def test_hadamard_grad_vmap(self):
|
|
2949
|
+
np.random.seed(4)
|
|
2950
|
+
|
|
2951
|
+
for k in range(2, 8):
|
|
2952
|
+
n = 2**k
|
|
2953
|
+
x = np.random.normal(size=(n,))
|
|
2954
|
+
h = self._hadamard(n)
|
|
2955
|
+
c = np.random.normal(size=(n,))
|
|
2956
|
+
x = mx.array(x).astype(mx.float32)
|
|
2957
|
+
h = mx.array(h).astype(mx.float32)
|
|
2958
|
+
c = mx.array(c).astype(mx.float32)
|
|
2959
|
+
|
|
2960
|
+
def hadamard_transform(x):
|
|
2961
|
+
return h @ x / mx.sqrt(x.shape[-1])
|
|
2962
|
+
|
|
2963
|
+
out = mx.vjp(hadamard_transform, [x], [c])
|
|
2964
|
+
out_t = mx.vjp(mx.hadamard_transform, [x], [c])
|
|
2965
|
+
np.testing.assert_allclose(out, out_t, atol=1e-4)
|
|
2966
|
+
|
|
2967
|
+
for axis in (0, 1, 2):
|
|
2968
|
+
vht = mx.vmap(mx.vmap(hadamard_transform, 0, 0), axis, axis)
|
|
2969
|
+
vht_t = mx.vmap(mx.vmap(mx.hadamard_transform, 0, 0), axis, axis)
|
|
2970
|
+
|
|
2971
|
+
xb = mx.array(np.random.normal(size=(n, n, n)))
|
|
2972
|
+
out = vht(xb)
|
|
2973
|
+
out_t = vht_t(xb)
|
|
2974
|
+
np.testing.assert_allclose(out, out_t, atol=1e-4)
|
|
2975
|
+
|
|
2976
|
+
def test_roll(self):
|
|
2977
|
+
x = mx.arange(10).reshape(2, 5)
|
|
2978
|
+
|
|
2979
|
+
for s in [-2, -1, 0, 1, 2]:
|
|
2980
|
+
y1 = np.roll(x, s)
|
|
2981
|
+
y2 = mx.roll(x, s)
|
|
2982
|
+
self.assertTrue(mx.array_equal(y1, y2).item())
|
|
2983
|
+
|
|
2984
|
+
y1 = np.roll(x, (s, s, s))
|
|
2985
|
+
y2 = mx.roll(x, (s, s, s))
|
|
2986
|
+
self.assertTrue(mx.array_equal(y1, y2).item())
|
|
2987
|
+
|
|
2988
|
+
shifts = [
|
|
2989
|
+
1,
|
|
2990
|
+
2,
|
|
2991
|
+
-1,
|
|
2992
|
+
-2,
|
|
2993
|
+
(1, 1),
|
|
2994
|
+
(-1, 2),
|
|
2995
|
+
(33, 33),
|
|
2996
|
+
]
|
|
2997
|
+
axes = [
|
|
2998
|
+
0,
|
|
2999
|
+
1,
|
|
3000
|
+
(1, 0),
|
|
3001
|
+
(0, 1),
|
|
3002
|
+
(0, 0),
|
|
3003
|
+
(1, 1),
|
|
3004
|
+
]
|
|
3005
|
+
for s, a in product(shifts, axes):
|
|
3006
|
+
y1 = np.roll(x, s, a)
|
|
3007
|
+
y2 = mx.roll(x, s, a)
|
|
3008
|
+
self.assertTrue(mx.array_equal(y1, y2).item())
|
|
3009
|
+
|
|
3010
|
+
def test_roll_errors(self):
|
|
3011
|
+
x = mx.array([])
|
|
3012
|
+
result = mx.roll(x, [0], [0])
|
|
3013
|
+
self.assertTrue(mx.array_equal(result, x))
|
|
3014
|
+
|
|
3015
|
+
def test_real_imag(self):
|
|
3016
|
+
x = mx.random.uniform(shape=(4, 4))
|
|
3017
|
+
out = mx.real(x)
|
|
3018
|
+
self.assertTrue(mx.array_equal(x, out))
|
|
3019
|
+
|
|
3020
|
+
out = mx.imag(x)
|
|
3021
|
+
self.assertTrue(mx.array_equal(mx.zeros_like(x), out))
|
|
3022
|
+
|
|
3023
|
+
y = mx.random.uniform(shape=(4, 4))
|
|
3024
|
+
z = x + 1j * y
|
|
3025
|
+
self.assertEqual(mx.real(z).dtype, mx.float32)
|
|
3026
|
+
self.assertTrue(mx.array_equal(mx.real(z), x))
|
|
3027
|
+
self.assertEqual(mx.imag(z).dtype, mx.float32)
|
|
3028
|
+
self.assertTrue(mx.array_equal(mx.imag(z), y))
|
|
3029
|
+
|
|
3030
|
+
def test_dynamic_slicing(self):
|
|
3031
|
+
x = mx.random.randint(0, 100, shape=(4, 4, 4))
|
|
3032
|
+
expected = x[1:, 2:, 3:]
|
|
3033
|
+
out = mx.slice(x, mx.array([1, 2, 3]), (0, 1, 2), (3, 2, 1))
|
|
3034
|
+
self.assertTrue(mx.array_equal(expected, out))
|
|
3035
|
+
|
|
3036
|
+
x = mx.zeros(shape=(4, 4, 4))
|
|
3037
|
+
update = mx.random.randint(0, 100, shape=(3, 2, 1))
|
|
3038
|
+
out = mx.slice_update(x, update, mx.array([1, 2, 3]), (0, 1, 2))
|
|
3039
|
+
expected = mx.zeros_like(x)
|
|
3040
|
+
expected[1:, 2:, 3:] = update
|
|
3041
|
+
self.assertTrue(mx.array_equal(expected, out))
|
|
3042
|
+
|
|
3043
|
+
def test_broadcast_arrays(self):
|
|
3044
|
+
a = mx.array(1)
|
|
3045
|
+
b = mx.array(1.0)
|
|
3046
|
+
a, b = mx.broadcast_arrays(a, b)
|
|
3047
|
+
self.assertEqual(a.shape, ())
|
|
3048
|
+
self.assertEqual(a.dtype, mx.int32)
|
|
3049
|
+
self.assertEqual(b.shape, ())
|
|
3050
|
+
self.assertEqual(b.dtype, mx.float32)
|
|
3051
|
+
|
|
3052
|
+
a, b = mx.broadcast_arrays(mx.zeros((3, 1, 2)), mx.zeros((4, 1)))
|
|
3053
|
+
self.assertEqual(a.shape, (3, 4, 2))
|
|
3054
|
+
self.assertEqual(b.shape, (3, 4, 2))
|
|
3055
|
+
|
|
3056
|
+
def test_slice_update_reversed(self):
|
|
3057
|
+
a = mx.array([1, 2, 3, 4])
|
|
3058
|
+
b = a[::-1]
|
|
3059
|
+
b[::2] = 0
|
|
3060
|
+
self.assertTrue(mx.array_equal(b, mx.array([0, 3, 0, 1])))
|
|
3061
|
+
|
|
3062
|
+
def test_slice_with_negative_stride(self):
|
|
3063
|
+
a = mx.random.uniform(shape=(128, 4))
|
|
3064
|
+
out = a[::-1]
|
|
3065
|
+
self.assertTrue(mx.array_equal(out[-1, :], a[0, :]))
|
|
3066
|
+
|
|
3067
|
+
a = mx.arange(8)
|
|
3068
|
+
for _ in range(4):
|
|
3069
|
+
a = a[::-1]
|
|
3070
|
+
self.assertTrue(mx.array_equal(a, mx.arange(8)))
|
|
3071
|
+
|
|
3072
|
+
def test_complex_ops(self):
|
|
3073
|
+
x = mx.array(
|
|
3074
|
+
[
|
|
3075
|
+
3.0 + 4.0j,
|
|
3076
|
+
-5.0 + 12.0j,
|
|
3077
|
+
-8.0 + 0.0j,
|
|
3078
|
+
0.0 + 9.0j,
|
|
3079
|
+
0.0 + 0.0j,
|
|
3080
|
+
]
|
|
3081
|
+
)
|
|
3082
|
+
|
|
3083
|
+
ops = ["arccos", "arcsin", "arctan", "square", "sqrt"]
|
|
3084
|
+
for op in ops:
|
|
3085
|
+
with self.subTest(op=op):
|
|
3086
|
+
np_op = getattr(np, op)
|
|
3087
|
+
mx_op = getattr(mx, op)
|
|
3088
|
+
self.assertTrue(np.allclose(mx_op(x), np_op(x)))
|
|
3089
|
+
|
|
3090
|
+
x = mx.array(
|
|
3091
|
+
[
|
|
3092
|
+
3.0 + 4.0j,
|
|
3093
|
+
-5.0 + 12.0j,
|
|
3094
|
+
-8.0 + 0.0j,
|
|
3095
|
+
0.0 + 9.0j,
|
|
3096
|
+
9.0 + 1.0j,
|
|
3097
|
+
]
|
|
3098
|
+
)
|
|
3099
|
+
self.assertTrue(np.allclose(mx.rsqrt(x), 1.0 / np.sqrt(x)))
|
|
3100
|
+
|
|
3101
|
+
def test_complex_power(self):
|
|
3102
|
+
out = mx.power(mx.array(0j), 2)
|
|
3103
|
+
self.assertEqual(out.item(), 0j)
|
|
3104
|
+
|
|
3105
|
+
out = mx.power(mx.array(0j), float("nan"))
|
|
3106
|
+
self.assertTrue(mx.isnan(out))
|
|
3107
|
+
|
|
3108
|
+
def test_irregular_alignments(self):
|
|
3109
|
+
# Unaligned unary op
|
|
3110
|
+
a = mx.ones((64, 1))
|
|
3111
|
+
b = -a[1:]
|
|
3112
|
+
self.assertTrue(mx.all(b == -1.0))
|
|
3113
|
+
|
|
3114
|
+
# Unaligned binary op
|
|
3115
|
+
a = mx.ones((64, 1))
|
|
3116
|
+
b = a[1:]
|
|
3117
|
+
c = b + b
|
|
3118
|
+
self.assertTrue(mx.all(c == 2.0))
|
|
3119
|
+
|
|
3120
|
+
# Unaligned ternary op
|
|
3121
|
+
a = mx.ones((64, 1))
|
|
3122
|
+
b = mx.zeros((63, 1))
|
|
3123
|
+
c = mx.ones((63, 1)).astype(mx.bool_)
|
|
3124
|
+
d = mx.where(c, a[1:], b)
|
|
3125
|
+
self.assertTrue(mx.all(d == 1.0))
|
|
3126
|
+
|
|
3127
|
+
def test_integer_power(self):
|
|
3128
|
+
x = mx.power(2, mx.array([8, 8, 8, 8, 8, 8, 8, 8]))
|
|
3129
|
+
self.assertTrue(mx.all(x == 256))
|
|
3130
|
+
|
|
3131
|
+
# Doesn't hang
|
|
3132
|
+
x = mx.power(2, -1)
|
|
3133
|
+
|
|
3134
|
+
def test_depends(self):
|
|
3135
|
+
a = mx.array([1.0, 2.0, 3.0])
|
|
3136
|
+
b = mx.exp(a)
|
|
3137
|
+
c = mx.log(a)
|
|
3138
|
+
out = mx.depends([b], [c])[0]
|
|
3139
|
+
self.assertTrue(mx.array_equal(out, b))
|
|
3140
|
+
|
|
3141
|
+
a = mx.array([1.0, 2.0, 3.0])
|
|
3142
|
+
b = mx.exp(a)
|
|
3143
|
+
c = mx.log(a)
|
|
3144
|
+
out = mx.depends(b, c)
|
|
3145
|
+
self.assertTrue(mx.array_equal(out, b))
|
|
3146
|
+
|
|
3147
|
+
def test_masked_scatter(self):
|
|
3148
|
+
# boolean mask updates matching numpy semantics
|
|
3149
|
+
a = mx.array([1.0, 2.0, 3.0])
|
|
3150
|
+
mask = mx.array([True, False, True])
|
|
3151
|
+
src = mx.array([5.0, 6.0])
|
|
3152
|
+
expected = mx.array([5.0, 2.0, 6.0])
|
|
3153
|
+
a[mask] = src
|
|
3154
|
+
self.assertTrue(mx.array_equal(a, expected))
|
|
3155
|
+
|
|
3156
|
+
# non-boolean mask raises
|
|
3157
|
+
b = mx.array([1.0, 2.0, 3.0])
|
|
3158
|
+
bad_mask = mx.array([1, 0, 1])
|
|
3159
|
+
src = mx.array([4.0, 5.0])
|
|
3160
|
+
with self.assertRaises((TypeError, ValueError)):
|
|
3161
|
+
b[bad_mask] = src
|
|
3162
|
+
|
|
3163
|
+
# mask matching leading dimension selects entire trailing slices
|
|
3164
|
+
c = mx.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
|
|
3165
|
+
mask = mx.array([True, False])
|
|
3166
|
+
src = mx.array([2.0, 3.0, 4.0])
|
|
3167
|
+
expected = mx.array([[2.0, 3.0, 4.0], [1.0, 1.0, 1.0]])
|
|
3168
|
+
c[mask] = src
|
|
3169
|
+
self.assertTrue(mx.array_equal(c, expected))
|
|
3170
|
+
|
|
3171
|
+
# scalar source applies to all selected entries
|
|
3172
|
+
c = mx.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
|
|
3173
|
+
mask = mx.array([True, False])
|
|
3174
|
+
src = 2.0
|
|
3175
|
+
expected = mx.array([[2.0, 2.0, 2.0], [1.0, 1.0, 1.0]])
|
|
3176
|
+
c[mask] = src
|
|
3177
|
+
self.assertTrue(mx.array_equal(c, expected))
|
|
3178
|
+
|
|
3179
|
+
# mask with no updates leaves values unchanged
|
|
3180
|
+
d = mx.array([[7.0, 8.0], [9.0, 10.0]])
|
|
3181
|
+
mask = mx.zeros_like(d).astype(mx.bool_)
|
|
3182
|
+
src = mx.array([1.0])
|
|
3183
|
+
d[mask] = src
|
|
3184
|
+
self.assertTrue(mx.array_equal(d, mx.array([[7.0, 8.0], [9.0, 10.0]])))
|
|
3185
|
+
|
|
3186
|
+
# empty mask leaves array unchanged
|
|
3187
|
+
e = mx.zeros((0,), dtype=mx.float32)
|
|
3188
|
+
mask = mx.zeros((0,), dtype=mx.bool_)
|
|
3189
|
+
src = mx.zeros((0,), dtype=mx.float32)
|
|
3190
|
+
e[mask] = src
|
|
3191
|
+
self.assertTrue(mx.array_equal(e, mx.zeros((0,), dtype=mx.float32)))
|
|
3192
|
+
|
|
3193
|
+
# strided target, mask, and source derived from slices
|
|
3194
|
+
target = mx.arange(10.0, dtype=mx.float32)[1::2]
|
|
3195
|
+
mask = mx.array(
|
|
3196
|
+
[False, True, False, False, True, False, False, True, False, False],
|
|
3197
|
+
dtype=mx.bool_,
|
|
3198
|
+
)[1::2]
|
|
3199
|
+
src = mx.arange(-4.0, 0.0, dtype=mx.float32)[::2]
|
|
3200
|
+
|
|
3201
|
+
target[mask] = src
|
|
3202
|
+
self.assertTrue(
|
|
3203
|
+
mx.array_equal(
|
|
3204
|
+
target, mx.array([-4.0, 3.0, 5.0, -2.0, 9.0], dtype=mx.float32)
|
|
3205
|
+
)
|
|
3206
|
+
)
|
|
3207
|
+
|
|
3208
|
+
def test_broadcast_shapes(self):
|
|
3209
|
+
# Basic broadcasting
|
|
3210
|
+
self.assertEqual(mx.broadcast_shapes((1, 2, 3), (3,)), (1, 2, 3))
|
|
3211
|
+
self.assertEqual(mx.broadcast_shapes((4, 1, 6), (5, 6)), (4, 5, 6))
|
|
3212
|
+
self.assertEqual(mx.broadcast_shapes((5, 1, 4), (1, 3, 4)), (5, 3, 4))
|
|
3213
|
+
|
|
3214
|
+
# Multiple arguments
|
|
3215
|
+
self.assertEqual(mx.broadcast_shapes((1, 1), (1, 8), (7, 1)), (7, 8))
|
|
3216
|
+
self.assertEqual(
|
|
3217
|
+
mx.broadcast_shapes((6, 1, 5), (1, 7, 1), (6, 7, 5)), (6, 7, 5)
|
|
3218
|
+
)
|
|
3219
|
+
|
|
3220
|
+
# Same shapes
|
|
3221
|
+
self.assertEqual(mx.broadcast_shapes((3, 4, 5), (3, 4, 5)), (3, 4, 5))
|
|
3222
|
+
|
|
3223
|
+
# Single argument
|
|
3224
|
+
self.assertEqual(mx.broadcast_shapes((2, 3)), (2, 3))
|
|
3225
|
+
|
|
3226
|
+
# Empty shapes
|
|
3227
|
+
self.assertEqual(mx.broadcast_shapes((), ()), ())
|
|
3228
|
+
self.assertEqual(mx.broadcast_shapes((), (1,)), (1,))
|
|
3229
|
+
self.assertEqual(mx.broadcast_shapes((1,), ()), (1,))
|
|
3230
|
+
|
|
3231
|
+
# Broadcasting with zeroes
|
|
3232
|
+
self.assertEqual(mx.broadcast_shapes((0,), (0,)), (0,))
|
|
3233
|
+
self.assertEqual(mx.broadcast_shapes((1, 0, 5), (3, 1, 5)), (3, 0, 5))
|
|
3234
|
+
self.assertEqual(mx.broadcast_shapes((5, 0), (0, 5, 0)), (0, 5, 0))
|
|
3235
|
+
|
|
3236
|
+
# Error cases
|
|
3237
|
+
with self.assertRaises(ValueError):
|
|
3238
|
+
mx.broadcast_shapes((3, 4), (4, 3))
|
|
3239
|
+
|
|
3240
|
+
with self.assertRaises(ValueError):
|
|
3241
|
+
mx.broadcast_shapes((2, 3, 4), (2, 5, 4))
|
|
3242
|
+
|
|
3243
|
+
with self.assertRaises(ValueError):
|
|
3244
|
+
mx.broadcast_shapes()
|
|
3245
|
+
|
|
3246
|
+
def test_sort_nan(self):
|
|
3247
|
+
x = mx.array([3.0, mx.nan, 2.0, 0.0])
|
|
3248
|
+
expected = mx.array([0.0, 2.0, 3.0, mx.nan])
|
|
3249
|
+
self.assertTrue(mx.array_equal(mx.sort(x), expected, equal_nan=True))
|
|
3250
|
+
x = mx.array([3.0, mx.nan, 2.0, 0.0]) + 1j * mx.array([1.0] * 4)
|
|
3251
|
+
|
|
3252
|
+
def test_to_from_fp8(self):
|
|
3253
|
+
vals = mx.array(
|
|
3254
|
+
[448, 256, 192, 128, 96, 64, 48, 32, 24, 16, 12, 8, 6, 4, 3, 2, 0.015625]
|
|
3255
|
+
)
|
|
3256
|
+
self.assertTrue(mx.array_equal(mx.from_fp8(mx.to_fp8(vals)), vals))
|
|
3257
|
+
self.assertTrue(mx.array_equal(mx.from_fp8(mx.to_fp8(-vals)), -vals))
|
|
3258
|
+
|
|
3259
|
+
|
|
3260
|
+
if __name__ == "__main__":
|
|
3261
|
+
mlx_tests.MLXTestRunner()
|