mlx 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlx might be problematic. Click here for more details.
- checksums.yaml +7 -0
- data/ext/mlx/CMakeLists.txt +7 -0
- data/ext/mlx/Makefile +273 -0
- data/ext/mlx/extconf.rb +94 -0
- data/ext/mlx/mkmf.log +44 -0
- data/ext/mlx/native.bundle +0 -0
- data/ext/mlx/native.bundle.dSYM/Contents/Info.plist +20 -0
- data/ext/mlx/native.bundle.dSYM/Contents/Resources/DWARF/native.bundle +0 -0
- data/ext/mlx/native.bundle.dSYM/Contents/Resources/Relocations/aarch64/native.bundle.yml +5 -0
- data/ext/mlx/native.cpp +8027 -0
- data/ext/mlx/native.o +0 -0
- data/lib/mlx/core.rb +1678 -0
- data/lib/mlx/distributed_utils/common.rb +116 -0
- data/lib/mlx/distributed_utils/config.rb +600 -0
- data/lib/mlx/distributed_utils/launch.rb +490 -0
- data/lib/mlx/extension.rb +24 -0
- data/lib/mlx/nn/base.rb +388 -0
- data/lib/mlx/nn/init.rb +140 -0
- data/lib/mlx/nn/layers/activations.rb +336 -0
- data/lib/mlx/nn/layers/base.rb +6 -0
- data/lib/mlx/nn/layers/containers.rb +20 -0
- data/lib/mlx/nn/layers/convolution.rb +120 -0
- data/lib/mlx/nn/layers/convolution_transpose.rb +114 -0
- data/lib/mlx/nn/layers/distributed.rb +309 -0
- data/lib/mlx/nn/layers/dropout.rb +75 -0
- data/lib/mlx/nn/layers/embedding.rb +28 -0
- data/lib/mlx/nn/layers/linear.rb +79 -0
- data/lib/mlx/nn/layers/normalization.rb +216 -0
- data/lib/mlx/nn/layers/pooling.rb +167 -0
- data/lib/mlx/nn/layers/positional_encoding.rb +126 -0
- data/lib/mlx/nn/layers/quantized.rb +215 -0
- data/lib/mlx/nn/layers/recurrent.rb +135 -0
- data/lib/mlx/nn/layers/transformer.rb +330 -0
- data/lib/mlx/nn/layers/upsample.rb +97 -0
- data/lib/mlx/nn/layers.rb +18 -0
- data/lib/mlx/nn/losses.rb +251 -0
- data/lib/mlx/nn/utils.rb +167 -0
- data/lib/mlx/nn.rb +12 -0
- data/lib/mlx/optimizers/optimizers.rb +808 -0
- data/lib/mlx/optimizers/schedulers.rb +62 -0
- data/lib/mlx/optimizers.rb +9 -0
- data/lib/mlx/utils.rb +171 -0
- data/lib/mlx/version +1 -0
- data/lib/mlx/version.rb +5 -0
- data/lib/mlx.rb +64 -0
- data/mlx/.clang-format +87 -0
- data/mlx/.git +1 -0
- data/mlx/.github/ISSUE_TEMPLATE/bug_report.md +28 -0
- data/mlx/.github/actions/build-cuda-release/action.yml +31 -0
- data/mlx/.github/actions/build-docs/action.yml +38 -0
- data/mlx/.github/actions/build-linux/action.yml +38 -0
- data/mlx/.github/actions/build-linux-release/action.yml +42 -0
- data/mlx/.github/actions/build-macos/action.yml +80 -0
- data/mlx/.github/actions/build-macos-release/action.yml +36 -0
- data/mlx/.github/actions/build-windows/action.yml +26 -0
- data/mlx/.github/actions/setup-linux/action.yml +93 -0
- data/mlx/.github/actions/setup-macos/action.yml +24 -0
- data/mlx/.github/actions/setup-windows/action.yml +42 -0
- data/mlx/.github/actions/test-linux/action.yml +69 -0
- data/mlx/.github/actions/test-windows/action.yml +20 -0
- data/mlx/.github/dependabot.yml +6 -0
- data/mlx/.github/pull_request_template.md +12 -0
- data/mlx/.github/scripts/build-sanitizer-tests.sh +48 -0
- data/mlx/.github/scripts/setup+build-cpp-linux-fedora-container.sh +27 -0
- data/mlx/.github/workflows/build_and_test.yml +152 -0
- data/mlx/.github/workflows/documentation.yml +28 -0
- data/mlx/.github/workflows/nightly.yml +104 -0
- data/mlx/.github/workflows/release.yml +256 -0
- data/mlx/.gitignore +81 -0
- data/mlx/.pre-commit-config.yaml +27 -0
- data/mlx/ACKNOWLEDGMENTS.md +268 -0
- data/mlx/CITATION.cff +24 -0
- data/mlx/CMakeLists.txt +437 -0
- data/mlx/CODE_OF_CONDUCT.md +132 -0
- data/mlx/CONTRIBUTING.md +38 -0
- data/mlx/LICENSE +21 -0
- data/mlx/MANIFEST.in +6 -0
- data/mlx/README.md +121 -0
- data/mlx/benchmarks/cpp/CMakeLists.txt +11 -0
- data/mlx/benchmarks/cpp/autograd.cpp +39 -0
- data/mlx/benchmarks/cpp/compare_devices.cpp +27 -0
- data/mlx/benchmarks/cpp/irregular_strides.cpp +201 -0
- data/mlx/benchmarks/cpp/single_ops.cpp +288 -0
- data/mlx/benchmarks/cpp/time_utils.h +39 -0
- data/mlx/benchmarks/numpy/single_ops.py +39 -0
- data/mlx/benchmarks/numpy/time_utils.py +20 -0
- data/mlx/benchmarks/python/batch_matmul_bench.py +62 -0
- data/mlx/benchmarks/python/blas/bench_gemm.py +191 -0
- data/mlx/benchmarks/python/blas/bench_gemv.py +220 -0
- data/mlx/benchmarks/python/comparative/README.md +15 -0
- data/mlx/benchmarks/python/comparative/bench_mlx.py +519 -0
- data/mlx/benchmarks/python/comparative/bench_torch.py +482 -0
- data/mlx/benchmarks/python/comparative/compare.py +284 -0
- data/mlx/benchmarks/python/compile_bench.py +107 -0
- data/mlx/benchmarks/python/conv1d_bench.py +123 -0
- data/mlx/benchmarks/python/conv2d_bench_cpu.py +127 -0
- data/mlx/benchmarks/python/conv2d_train_bench_cpu.py +143 -0
- data/mlx/benchmarks/python/conv2d_transpose_bench_cpu.py +129 -0
- data/mlx/benchmarks/python/conv3d_bench_cpu.py +110 -0
- data/mlx/benchmarks/python/conv3d_train_bench_cpu.py +143 -0
- data/mlx/benchmarks/python/conv3d_transpose_bench_cpu.py +116 -0
- data/mlx/benchmarks/python/conv_bench.py +135 -0
- data/mlx/benchmarks/python/conv_transpose_bench.py +135 -0
- data/mlx/benchmarks/python/conv_unaligned_bench.py +107 -0
- data/mlx/benchmarks/python/distributed_bench.py +66 -0
- data/mlx/benchmarks/python/einsum_bench.py +84 -0
- data/mlx/benchmarks/python/fft_bench.py +118 -0
- data/mlx/benchmarks/python/gather_bench.py +52 -0
- data/mlx/benchmarks/python/gather_mm_bench.py +74 -0
- data/mlx/benchmarks/python/gather_qmm_bench.py +84 -0
- data/mlx/benchmarks/python/hadamard_bench.py +70 -0
- data/mlx/benchmarks/python/large_gemm_bench.py +119 -0
- data/mlx/benchmarks/python/layer_norm_bench.py +82 -0
- data/mlx/benchmarks/python/masked_scatter.py +212 -0
- data/mlx/benchmarks/python/rms_norm_bench.py +63 -0
- data/mlx/benchmarks/python/rope_bench.py +35 -0
- data/mlx/benchmarks/python/scatter_bench.py +96 -0
- data/mlx/benchmarks/python/sdpa_bench.py +223 -0
- data/mlx/benchmarks/python/sdpa_vector_bench.py +95 -0
- data/mlx/benchmarks/python/single_ops.py +132 -0
- data/mlx/benchmarks/python/synchronize_bench.py +55 -0
- data/mlx/benchmarks/python/time_utils.py +38 -0
- data/mlx/cmake/FindCUDNN.cmake +177 -0
- data/mlx/cmake/FindNCCL.cmake +54 -0
- data/mlx/cmake/Findnvpl.cmake +3 -0
- data/mlx/cmake/extension.cmake +50 -0
- data/mlx/docs/.clang-format +2 -0
- data/mlx/docs/.gitignore +3 -0
- data/mlx/docs/.nojekyll +0 -0
- data/mlx/docs/Doxyfile +51 -0
- data/mlx/docs/Makefile +18 -0
- data/mlx/docs/README.md +54 -0
- data/mlx/docs/index.html +1 -0
- data/mlx/docs/requirements.txt +5 -0
- data/mlx/docs/src/_static/distributed/m3-ultra-mesh-broken.png +0 -0
- data/mlx/docs/src/_static/distributed/m3-ultra-mesh.png +0 -0
- data/mlx/docs/src/_static/metal_debugger/capture.png +0 -0
- data/mlx/docs/src/_static/metal_debugger/schema.png +0 -0
- data/mlx/docs/src/_static/mlx_logo.png +0 -0
- data/mlx/docs/src/_static/mlx_logo_dark.png +0 -0
- data/mlx/docs/src/_static/tp_inference/all-to-sharded-linear.png +0 -0
- data/mlx/docs/src/_static/tp_inference/column-row-tp.png +0 -0
- data/mlx/docs/src/_static/tp_inference/llama-transformer.png +0 -0
- data/mlx/docs/src/_static/tp_inference/sharded-to-all-linear.png +0 -0
- data/mlx/docs/src/_templates/module-base-class.rst +33 -0
- data/mlx/docs/src/_templates/nn-module-template.rst +20 -0
- data/mlx/docs/src/_templates/optimizers-template.rst +20 -0
- data/mlx/docs/src/conf.py +99 -0
- data/mlx/docs/src/cpp/ops.rst +7 -0
- data/mlx/docs/src/dev/custom_metal_kernels.rst +445 -0
- data/mlx/docs/src/dev/extensions.rst +811 -0
- data/mlx/docs/src/dev/metal_debugger.rst +68 -0
- data/mlx/docs/src/dev/metal_logging.rst +40 -0
- data/mlx/docs/src/dev/mlx_in_cpp.rst +121 -0
- data/mlx/docs/src/examples/data_parallelism.rst +91 -0
- data/mlx/docs/src/examples/linear_regression.rst +77 -0
- data/mlx/docs/src/examples/llama-inference.rst +382 -0
- data/mlx/docs/src/examples/mlp.rst +134 -0
- data/mlx/docs/src/examples/tensor_parallelism.rst +239 -0
- data/mlx/docs/src/index.rst +96 -0
- data/mlx/docs/src/install.rst +340 -0
- data/mlx/docs/src/python/array.rst +65 -0
- data/mlx/docs/src/python/cuda.rst +9 -0
- data/mlx/docs/src/python/data_types.rst +78 -0
- data/mlx/docs/src/python/devices_and_streams.rst +21 -0
- data/mlx/docs/src/python/distributed.rst +22 -0
- data/mlx/docs/src/python/export.rst +14 -0
- data/mlx/docs/src/python/fast.rst +16 -0
- data/mlx/docs/src/python/fft.rst +24 -0
- data/mlx/docs/src/python/linalg.rst +27 -0
- data/mlx/docs/src/python/memory_management.rst +16 -0
- data/mlx/docs/src/python/metal.rst +12 -0
- data/mlx/docs/src/python/nn/distributed.rst +30 -0
- data/mlx/docs/src/python/nn/functions.rst +40 -0
- data/mlx/docs/src/python/nn/init.rst +45 -0
- data/mlx/docs/src/python/nn/layers.rst +74 -0
- data/mlx/docs/src/python/nn/losses.rst +25 -0
- data/mlx/docs/src/python/nn/module.rst +38 -0
- data/mlx/docs/src/python/nn.rst +186 -0
- data/mlx/docs/src/python/ops.rst +184 -0
- data/mlx/docs/src/python/optimizers/common_optimizers.rst +22 -0
- data/mlx/docs/src/python/optimizers/optimizer.rst +23 -0
- data/mlx/docs/src/python/optimizers/schedulers.rst +15 -0
- data/mlx/docs/src/python/optimizers.rst +78 -0
- data/mlx/docs/src/python/random.rst +48 -0
- data/mlx/docs/src/python/transforms.rst +22 -0
- data/mlx/docs/src/python/tree_utils.rst +23 -0
- data/mlx/docs/src/usage/compile.rst +516 -0
- data/mlx/docs/src/usage/distributed.rst +572 -0
- data/mlx/docs/src/usage/export.rst +288 -0
- data/mlx/docs/src/usage/function_transforms.rst +191 -0
- data/mlx/docs/src/usage/indexing.rst +194 -0
- data/mlx/docs/src/usage/launching_distributed.rst +234 -0
- data/mlx/docs/src/usage/lazy_evaluation.rst +144 -0
- data/mlx/docs/src/usage/numpy.rst +124 -0
- data/mlx/docs/src/usage/quick_start.rst +67 -0
- data/mlx/docs/src/usage/saving_and_loading.rst +81 -0
- data/mlx/docs/src/usage/unified_memory.rst +78 -0
- data/mlx/docs/src/usage/using_streams.rst +18 -0
- data/mlx/examples/cmake_project/CMakeLists.txt +22 -0
- data/mlx/examples/cmake_project/README.md +26 -0
- data/mlx/examples/cmake_project/example.cpp +14 -0
- data/mlx/examples/cpp/CMakeLists.txt +12 -0
- data/mlx/examples/cpp/distributed.cpp +22 -0
- data/mlx/examples/cpp/linear_regression.cpp +54 -0
- data/mlx/examples/cpp/logistic_regression.cpp +54 -0
- data/mlx/examples/cpp/metal_capture.cpp +31 -0
- data/mlx/examples/cpp/timer.h +20 -0
- data/mlx/examples/cpp/tutorial.cpp +99 -0
- data/mlx/examples/export/CMakeLists.txt +22 -0
- data/mlx/examples/export/README.md +49 -0
- data/mlx/examples/export/eval_mlp.cpp +25 -0
- data/mlx/examples/export/eval_mlp.py +52 -0
- data/mlx/examples/export/train_mlp.cpp +35 -0
- data/mlx/examples/export/train_mlp.py +76 -0
- data/mlx/examples/extensions/CMakeLists.txt +78 -0
- data/mlx/examples/extensions/README.md +24 -0
- data/mlx/examples/extensions/axpby/axpby.cpp +306 -0
- data/mlx/examples/extensions/axpby/axpby.h +90 -0
- data/mlx/examples/extensions/axpby/axpby.metal +47 -0
- data/mlx/examples/extensions/bindings.cpp +39 -0
- data/mlx/examples/extensions/mlx_sample_extensions/__init__.py +5 -0
- data/mlx/examples/extensions/pyproject.toml +8 -0
- data/mlx/examples/extensions/requirements.txt +4 -0
- data/mlx/examples/extensions/setup.py +18 -0
- data/mlx/examples/extensions/test.py +12 -0
- data/mlx/examples/python/linear_regression.py +46 -0
- data/mlx/examples/python/logistic_regression.py +49 -0
- data/mlx/examples/python/qqmm.py +117 -0
- data/mlx/mlx/3rdparty/.clang-format +2 -0
- data/mlx/mlx/3rdparty/pocketfft.h +3581 -0
- data/mlx/mlx/CMakeLists.txt +107 -0
- data/mlx/mlx/allocator.h +75 -0
- data/mlx/mlx/api.h +29 -0
- data/mlx/mlx/array.cpp +354 -0
- data/mlx/mlx/array.h +647 -0
- data/mlx/mlx/backend/common/CMakeLists.txt +9 -0
- data/mlx/mlx/backend/common/binary.h +97 -0
- data/mlx/mlx/backend/common/broadcasting.cpp +24 -0
- data/mlx/mlx/backend/common/broadcasting.h +11 -0
- data/mlx/mlx/backend/common/buffer_cache.h +158 -0
- data/mlx/mlx/backend/common/common.cpp +305 -0
- data/mlx/mlx/backend/common/compiled.cpp +243 -0
- data/mlx/mlx/backend/common/compiled.h +77 -0
- data/mlx/mlx/backend/common/copy.h +50 -0
- data/mlx/mlx/backend/common/hadamard.h +109 -0
- data/mlx/mlx/backend/common/load.cpp +57 -0
- data/mlx/mlx/backend/common/matmul.h +67 -0
- data/mlx/mlx/backend/common/reduce.cpp +154 -0
- data/mlx/mlx/backend/common/reduce.h +59 -0
- data/mlx/mlx/backend/common/slicing.cpp +71 -0
- data/mlx/mlx/backend/common/slicing.h +20 -0
- data/mlx/mlx/backend/common/ternary.h +85 -0
- data/mlx/mlx/backend/common/unary.h +29 -0
- data/mlx/mlx/backend/common/utils.cpp +231 -0
- data/mlx/mlx/backend/common/utils.h +205 -0
- data/mlx/mlx/backend/cpu/CMakeLists.txt +88 -0
- data/mlx/mlx/backend/cpu/arange.h +28 -0
- data/mlx/mlx/backend/cpu/arg_reduce.cpp +124 -0
- data/mlx/mlx/backend/cpu/binary.cpp +269 -0
- data/mlx/mlx/backend/cpu/binary.h +517 -0
- data/mlx/mlx/backend/cpu/binary_ops.h +98 -0
- data/mlx/mlx/backend/cpu/binary_two.h +166 -0
- data/mlx/mlx/backend/cpu/cholesky.cpp +85 -0
- data/mlx/mlx/backend/cpu/compiled.cpp +357 -0
- data/mlx/mlx/backend/cpu/compiled_preamble.h +12 -0
- data/mlx/mlx/backend/cpu/conv.cpp +1351 -0
- data/mlx/mlx/backend/cpu/copy.cpp +386 -0
- data/mlx/mlx/backend/cpu/copy.h +36 -0
- data/mlx/mlx/backend/cpu/device_info.cpp +113 -0
- data/mlx/mlx/backend/cpu/device_info.h +28 -0
- data/mlx/mlx/backend/cpu/distributed.cpp +103 -0
- data/mlx/mlx/backend/cpu/eig.cpp +281 -0
- data/mlx/mlx/backend/cpu/eigh.cpp +241 -0
- data/mlx/mlx/backend/cpu/encoder.cpp +16 -0
- data/mlx/mlx/backend/cpu/encoder.h +67 -0
- data/mlx/mlx/backend/cpu/eval.cpp +40 -0
- data/mlx/mlx/backend/cpu/eval.h +12 -0
- data/mlx/mlx/backend/cpu/fft.cpp +120 -0
- data/mlx/mlx/backend/cpu/gemm.h +26 -0
- data/mlx/mlx/backend/cpu/gemms/bnns.cpp +214 -0
- data/mlx/mlx/backend/cpu/gemms/cblas.cpp +134 -0
- data/mlx/mlx/backend/cpu/gemms/simd_bf16.cpp +45 -0
- data/mlx/mlx/backend/cpu/gemms/simd_fp16.cpp +45 -0
- data/mlx/mlx/backend/cpu/gemms/simd_gemm.h +139 -0
- data/mlx/mlx/backend/cpu/hadamard.cpp +121 -0
- data/mlx/mlx/backend/cpu/indexing.cpp +854 -0
- data/mlx/mlx/backend/cpu/inverse.cpp +160 -0
- data/mlx/mlx/backend/cpu/jit_compiler.cpp +166 -0
- data/mlx/mlx/backend/cpu/jit_compiler.h +20 -0
- data/mlx/mlx/backend/cpu/lapack.h +80 -0
- data/mlx/mlx/backend/cpu/logsumexp.cpp +139 -0
- data/mlx/mlx/backend/cpu/luf.cpp +120 -0
- data/mlx/mlx/backend/cpu/make_compiled_preamble.ps1 +38 -0
- data/mlx/mlx/backend/cpu/make_compiled_preamble.sh +41 -0
- data/mlx/mlx/backend/cpu/masked_mm.cpp +608 -0
- data/mlx/mlx/backend/cpu/matmul.cpp +166 -0
- data/mlx/mlx/backend/cpu/primitives.cpp +478 -0
- data/mlx/mlx/backend/cpu/qrf.cpp +147 -0
- data/mlx/mlx/backend/cpu/quantized.cpp +1370 -0
- data/mlx/mlx/backend/cpu/reduce.cpp +587 -0
- data/mlx/mlx/backend/cpu/scan.cpp +338 -0
- data/mlx/mlx/backend/cpu/select.cpp +95 -0
- data/mlx/mlx/backend/cpu/simd/accelerate_fp16_simd.h +56 -0
- data/mlx/mlx/backend/cpu/simd/accelerate_simd.h +329 -0
- data/mlx/mlx/backend/cpu/simd/base_simd.h +319 -0
- data/mlx/mlx/backend/cpu/simd/math.h +193 -0
- data/mlx/mlx/backend/cpu/simd/neon_fp16_simd.h +212 -0
- data/mlx/mlx/backend/cpu/simd/simd.h +4 -0
- data/mlx/mlx/backend/cpu/simd/type.h +11 -0
- data/mlx/mlx/backend/cpu/slicing.h +21 -0
- data/mlx/mlx/backend/cpu/softmax.cpp +170 -0
- data/mlx/mlx/backend/cpu/sort.cpp +481 -0
- data/mlx/mlx/backend/cpu/svd.cpp +289 -0
- data/mlx/mlx/backend/cpu/ternary.h +154 -0
- data/mlx/mlx/backend/cpu/threefry.cpp +31 -0
- data/mlx/mlx/backend/cpu/threefry.h +21 -0
- data/mlx/mlx/backend/cpu/unary.cpp +238 -0
- data/mlx/mlx/backend/cpu/unary.h +281 -0
- data/mlx/mlx/backend/cpu/unary_ops.h +175 -0
- data/mlx/mlx/backend/cuda/CMakeLists.txt +265 -0
- data/mlx/mlx/backend/cuda/allocator.cpp +451 -0
- data/mlx/mlx/backend/cuda/allocator.h +94 -0
- data/mlx/mlx/backend/cuda/arange.cu +68 -0
- data/mlx/mlx/backend/cuda/arg_reduce.cu +189 -0
- data/mlx/mlx/backend/cuda/bin2h.cmake +150 -0
- data/mlx/mlx/backend/cuda/binary/CMakeLists.txt +21 -0
- data/mlx/mlx/backend/cuda/binary/add.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/arctan2.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/binary.cuh +383 -0
- data/mlx/mlx/backend/cuda/binary/bitwise_binary.cu +27 -0
- data/mlx/mlx/backend/cuda/binary/divide.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/equal.cu +15 -0
- data/mlx/mlx/backend/cuda/binary/greater.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/greater_equal.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/less.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/less_equal.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/log_add_exp.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/logical_and.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/logical_or.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/maximum.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/minimum.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/multiply.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/not_equal.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/power.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/remainder.cu +7 -0
- data/mlx/mlx/backend/cuda/binary/subtract.cu +7 -0
- data/mlx/mlx/backend/cuda/binary_two.cu +412 -0
- data/mlx/mlx/backend/cuda/compiled.cpp +357 -0
- data/mlx/mlx/backend/cuda/conv/conv.h +126 -0
- data/mlx/mlx/backend/cuda/conv/gemm_conv.cu +217 -0
- data/mlx/mlx/backend/cuda/conv/gemm_grouped_conv.cu +231 -0
- data/mlx/mlx/backend/cuda/conv.cpp +403 -0
- data/mlx/mlx/backend/cuda/copy/copy.cuh +55 -0
- data/mlx/mlx/backend/cuda/copy/copy_contiguous.cu +88 -0
- data/mlx/mlx/backend/cuda/copy/copy_general.cu +171 -0
- data/mlx/mlx/backend/cuda/copy/copy_general_dynamic.cu +118 -0
- data/mlx/mlx/backend/cuda/copy/copy_general_input.cu +229 -0
- data/mlx/mlx/backend/cuda/copy.cu +132 -0
- data/mlx/mlx/backend/cuda/cublas_utils.cpp +222 -0
- data/mlx/mlx/backend/cuda/cublas_utils.h +95 -0
- data/mlx/mlx/backend/cuda/cuda.h +21 -0
- data/mlx/mlx/backend/cuda/cuda_utils.h +90 -0
- data/mlx/mlx/backend/cuda/cudnn_utils.cpp +133 -0
- data/mlx/mlx/backend/cuda/cudnn_utils.h +187 -0
- data/mlx/mlx/backend/cuda/custom_kernel.cpp +379 -0
- data/mlx/mlx/backend/cuda/cutlass_utils.cuh +46 -0
- data/mlx/mlx/backend/cuda/delayload.cpp +80 -0
- data/mlx/mlx/backend/cuda/device/atomic_ops.cuh +63 -0
- data/mlx/mlx/backend/cuda/device/binary_ops.cuh +300 -0
- data/mlx/mlx/backend/cuda/device/cast_op.cuh +118 -0
- data/mlx/mlx/backend/cuda/device/complex.cuh +60 -0
- data/mlx/mlx/backend/cuda/device/config.h +12 -0
- data/mlx/mlx/backend/cuda/device/fp16_math.cuh +96 -0
- data/mlx/mlx/backend/cuda/device/gather.cuh +53 -0
- data/mlx/mlx/backend/cuda/device/gather_axis.cuh +65 -0
- data/mlx/mlx/backend/cuda/device/indexing.cuh +30 -0
- data/mlx/mlx/backend/cuda/device/scatter.cuh +68 -0
- data/mlx/mlx/backend/cuda/device/scatter_axis.cuh +67 -0
- data/mlx/mlx/backend/cuda/device/scatter_ops.cuh +44 -0
- data/mlx/mlx/backend/cuda/device/ternary_ops.cuh +13 -0
- data/mlx/mlx/backend/cuda/device/unary_ops.cuh +350 -0
- data/mlx/mlx/backend/cuda/device/utils.cuh +464 -0
- data/mlx/mlx/backend/cuda/device.cpp +522 -0
- data/mlx/mlx/backend/cuda/device.h +195 -0
- data/mlx/mlx/backend/cuda/device_info.cpp +232 -0
- data/mlx/mlx/backend/cuda/distributed.cu +121 -0
- data/mlx/mlx/backend/cuda/eval.cpp +66 -0
- data/mlx/mlx/backend/cuda/event.cu +415 -0
- data/mlx/mlx/backend/cuda/event.h +79 -0
- data/mlx/mlx/backend/cuda/fence.cpp +42 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm.cpp +233 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm.h +114 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_0.cpp +77 -0
- data/mlx/mlx/backend/cuda/gemms/cublas_gemm_batched_12_9.cu +329 -0
- data/mlx/mlx/backend/cuda/gemms/gemv.cu +327 -0
- data/mlx/mlx/backend/cuda/gemms/gemv.h +34 -0
- data/mlx/mlx/backend/cuda/gemms/grouped_gemm.h +25 -0
- data/mlx/mlx/backend/cuda/gemms/grouped_gemm_unaligned.cu +358 -0
- data/mlx/mlx/backend/cuda/indexing.cpp +434 -0
- data/mlx/mlx/backend/cuda/jit_module.cpp +443 -0
- data/mlx/mlx/backend/cuda/jit_module.h +120 -0
- data/mlx/mlx/backend/cuda/kernel_utils.cu +52 -0
- data/mlx/mlx/backend/cuda/kernel_utils.cuh +148 -0
- data/mlx/mlx/backend/cuda/layer_norm.cu +417 -0
- data/mlx/mlx/backend/cuda/load.cpp +60 -0
- data/mlx/mlx/backend/cuda/logsumexp.cu +161 -0
- data/mlx/mlx/backend/cuda/lru_cache.h +190 -0
- data/mlx/mlx/backend/cuda/matmul.cpp +373 -0
- data/mlx/mlx/backend/cuda/no_cuda.cpp +47 -0
- data/mlx/mlx/backend/cuda/primitives.cpp +46 -0
- data/mlx/mlx/backend/cuda/quantized/affine_quantize.cu +329 -0
- data/mlx/mlx/backend/cuda/quantized/convert_fp8.cu +19 -0
- data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.cpp +206 -0
- data/mlx/mlx/backend/cuda/quantized/cublas_qqmm.h +88 -0
- data/mlx/mlx/backend/cuda/quantized/cuda_fp4.h +100 -0
- data/mlx/mlx/backend/cuda/quantized/fp_quantize.cu +496 -0
- data/mlx/mlx/backend/cuda/quantized/mxfp8_quantize.cuh +32 -0
- data/mlx/mlx/backend/cuda/quantized/no_qqmm_impl.cpp +26 -0
- data/mlx/mlx/backend/cuda/quantized/nvfp4_quantize.cuh +334 -0
- data/mlx/mlx/backend/cuda/quantized/qmv.cu +304 -0
- data/mlx/mlx/backend/cuda/quantized/qmv.h +21 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm.cpp +158 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_impl.cpp +50 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_impl.h +26 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_utils.cu +227 -0
- data/mlx/mlx/backend/cuda/quantized/qqmm_utils.h +30 -0
- data/mlx/mlx/backend/cuda/quantized/quantized.cpp +85 -0
- data/mlx/mlx/backend/cuda/quantized/quantized.h +53 -0
- data/mlx/mlx/backend/cuda/quantized/quantized_utils.cuh +88 -0
- data/mlx/mlx/backend/cuda/quantized/quantized_utils.h +50 -0
- data/mlx/mlx/backend/cuda/random.cu +202 -0
- data/mlx/mlx/backend/cuda/reduce/all_reduce.cu +159 -0
- data/mlx/mlx/backend/cuda/reduce/col_reduce.cu +510 -0
- data/mlx/mlx/backend/cuda/reduce/init_reduce.cu +50 -0
- data/mlx/mlx/backend/cuda/reduce/reduce.cuh +71 -0
- data/mlx/mlx/backend/cuda/reduce/reduce_ops.cuh +211 -0
- data/mlx/mlx/backend/cuda/reduce/reduce_utils.cuh +145 -0
- data/mlx/mlx/backend/cuda/reduce/row_reduce.cu +361 -0
- data/mlx/mlx/backend/cuda/reduce.cu +73 -0
- data/mlx/mlx/backend/cuda/rms_norm.cu +536 -0
- data/mlx/mlx/backend/cuda/rope.cu +429 -0
- data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cpp +681 -0
- data/mlx/mlx/backend/cuda/scaled_dot_product_attention.cu +796 -0
- data/mlx/mlx/backend/cuda/scan.cu +468 -0
- data/mlx/mlx/backend/cuda/slicing.cpp +111 -0
- data/mlx/mlx/backend/cuda/softmax.cu +162 -0
- data/mlx/mlx/backend/cuda/sort.cu +1076 -0
- data/mlx/mlx/backend/cuda/steel/defines.cuh +9 -0
- data/mlx/mlx/backend/cuda/steel/gemm.cuh +101 -0
- data/mlx/mlx/backend/cuda/steel/mma.cuh +117 -0
- data/mlx/mlx/backend/cuda/steel/tiles.cuh +450 -0
- data/mlx/mlx/backend/cuda/steel/utils.cuh +89 -0
- data/mlx/mlx/backend/cuda/ternary.cu +271 -0
- data/mlx/mlx/backend/cuda/unary/CMakeLists.txt +34 -0
- data/mlx/mlx/backend/cuda/unary/abs.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arccos.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arccosh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arcsin.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arcsinh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arctan.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/arctanh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/bitwise_invert.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/ceil.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/conjugate.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/cos.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/cosh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/erf.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/erf_inv.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/exp.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/expm1.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/floor.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/imag.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/log.cu +21 -0
- data/mlx/mlx/backend/cuda/unary/log1p.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/logical_not.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/negative.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/real.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/round.cu +18 -0
- data/mlx/mlx/backend/cuda/unary/sigmoid.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sign.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sin.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sinh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/sqrt.cu +15 -0
- data/mlx/mlx/backend/cuda/unary/square.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/tan.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/tanh.cu +7 -0
- data/mlx/mlx/backend/cuda/unary/unary.cuh +224 -0
- data/mlx/mlx/backend/cuda/utils.cpp +116 -0
- data/mlx/mlx/backend/cuda/utils.h +49 -0
- data/mlx/mlx/backend/cuda/vector_types.cuh +48 -0
- data/mlx/mlx/backend/cuda/worker.cpp +79 -0
- data/mlx/mlx/backend/cuda/worker.h +55 -0
- data/mlx/mlx/backend/gpu/CMakeLists.txt +5 -0
- data/mlx/mlx/backend/gpu/copy.cpp +89 -0
- data/mlx/mlx/backend/gpu/copy.h +57 -0
- data/mlx/mlx/backend/gpu/device_info.h +36 -0
- data/mlx/mlx/backend/gpu/eval.h +18 -0
- data/mlx/mlx/backend/gpu/primitives.cpp +307 -0
- data/mlx/mlx/backend/gpu/slicing.cpp +44 -0
- data/mlx/mlx/backend/gpu/slicing.h +36 -0
- data/mlx/mlx/backend/metal/CMakeLists.txt +144 -0
- data/mlx/mlx/backend/metal/allocator.cpp +279 -0
- data/mlx/mlx/backend/metal/allocator.h +79 -0
- data/mlx/mlx/backend/metal/binary.cpp +257 -0
- data/mlx/mlx/backend/metal/binary.h +33 -0
- data/mlx/mlx/backend/metal/compiled.cpp +471 -0
- data/mlx/mlx/backend/metal/conv.cpp +1118 -0
- data/mlx/mlx/backend/metal/copy.cpp +235 -0
- data/mlx/mlx/backend/metal/custom_kernel.cpp +430 -0
- data/mlx/mlx/backend/metal/device.cpp +816 -0
- data/mlx/mlx/backend/metal/device.h +289 -0
- data/mlx/mlx/backend/metal/device_info.cpp +58 -0
- data/mlx/mlx/backend/metal/distributed.cpp +38 -0
- data/mlx/mlx/backend/metal/eval.cpp +97 -0
- data/mlx/mlx/backend/metal/event.cpp +62 -0
- data/mlx/mlx/backend/metal/fence.cpp +162 -0
- data/mlx/mlx/backend/metal/fft.cpp +807 -0
- data/mlx/mlx/backend/metal/hadamard.cpp +198 -0
- data/mlx/mlx/backend/metal/indexing.cpp +727 -0
- data/mlx/mlx/backend/metal/jit/includes.h +58 -0
- data/mlx/mlx/backend/metal/jit/indexing.h +76 -0
- data/mlx/mlx/backend/metal/jit_kernels.cpp +1118 -0
- data/mlx/mlx/backend/metal/kernels/CMakeLists.txt +193 -0
- data/mlx/mlx/backend/metal/kernels/arange.h +9 -0
- data/mlx/mlx/backend/metal/kernels/arange.metal +20 -0
- data/mlx/mlx/backend/metal/kernels/arg_reduce.metal +182 -0
- data/mlx/mlx/backend/metal/kernels/atomic.h +345 -0
- data/mlx/mlx/backend/metal/kernels/bf16.h +16 -0
- data/mlx/mlx/backend/metal/kernels/bf16_math.h +380 -0
- data/mlx/mlx/backend/metal/kernels/binary.h +199 -0
- data/mlx/mlx/backend/metal/kernels/binary.metal +109 -0
- data/mlx/mlx/backend/metal/kernels/binary_ops.h +330 -0
- data/mlx/mlx/backend/metal/kernels/binary_two.h +244 -0
- data/mlx/mlx/backend/metal/kernels/binary_two.metal +54 -0
- data/mlx/mlx/backend/metal/kernels/cexpf.h +134 -0
- data/mlx/mlx/backend/metal/kernels/complex.h +173 -0
- data/mlx/mlx/backend/metal/kernels/conv.metal +701 -0
- data/mlx/mlx/backend/metal/kernels/copy.h +276 -0
- data/mlx/mlx/backend/metal/kernels/copy.metal +75 -0
- data/mlx/mlx/backend/metal/kernels/defines.h +24 -0
- data/mlx/mlx/backend/metal/kernels/erf.h +69 -0
- data/mlx/mlx/backend/metal/kernels/expm1f.h +90 -0
- data/mlx/mlx/backend/metal/kernels/fence.metal +52 -0
- data/mlx/mlx/backend/metal/kernels/fft/radix.h +328 -0
- data/mlx/mlx/backend/metal/kernels/fft/readwrite.h +624 -0
- data/mlx/mlx/backend/metal/kernels/fft.h +486 -0
- data/mlx/mlx/backend/metal/kernels/fft.metal +67 -0
- data/mlx/mlx/backend/metal/kernels/fp4.h +48 -0
- data/mlx/mlx/backend/metal/kernels/fp8.h +80 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized.h +1850 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized.metal +153 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.h +1044 -0
- data/mlx/mlx/backend/metal/kernels/fp_quantized_nax.metal +79 -0
- data/mlx/mlx/backend/metal/kernels/gemv.metal +868 -0
- data/mlx/mlx/backend/metal/kernels/gemv_masked.h +827 -0
- data/mlx/mlx/backend/metal/kernels/gemv_masked.metal +76 -0
- data/mlx/mlx/backend/metal/kernels/hadamard.h +182 -0
- data/mlx/mlx/backend/metal/kernels/indexing/gather.h +51 -0
- data/mlx/mlx/backend/metal/kernels/indexing/gather_axis.h +44 -0
- data/mlx/mlx/backend/metal/kernels/indexing/gather_front.h +24 -0
- data/mlx/mlx/backend/metal/kernels/indexing/indexing.h +23 -0
- data/mlx/mlx/backend/metal/kernels/indexing/masked_scatter.h +41 -0
- data/mlx/mlx/backend/metal/kernels/indexing/scatter.h +59 -0
- data/mlx/mlx/backend/metal/kernels/indexing/scatter_axis.h +52 -0
- data/mlx/mlx/backend/metal/kernels/layer_norm.metal +433 -0
- data/mlx/mlx/backend/metal/kernels/logging.h +26 -0
- data/mlx/mlx/backend/metal/kernels/logsumexp.h +140 -0
- data/mlx/mlx/backend/metal/kernels/logsumexp.metal +18 -0
- data/mlx/mlx/backend/metal/kernels/quantized.h +2508 -0
- data/mlx/mlx/backend/metal/kernels/quantized.metal +144 -0
- data/mlx/mlx/backend/metal/kernels/quantized_nax.h +1705 -0
- data/mlx/mlx/backend/metal/kernels/quantized_nax.metal +106 -0
- data/mlx/mlx/backend/metal/kernels/quantized_utils.h +90 -0
- data/mlx/mlx/backend/metal/kernels/random.metal +103 -0
- data/mlx/mlx/backend/metal/kernels/reduce.h +5 -0
- data/mlx/mlx/backend/metal/kernels/reduce.metal +169 -0
- data/mlx/mlx/backend/metal/kernels/reduce_utils.h +6 -0
- data/mlx/mlx/backend/metal/kernels/reduction/ops.h +275 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_all.h +66 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_col.h +398 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_init.h +8 -0
- data/mlx/mlx/backend/metal/kernels/reduction/reduce_row.h +369 -0
- data/mlx/mlx/backend/metal/kernels/rms_norm.metal +391 -0
- data/mlx/mlx/backend/metal/kernels/rope.metal +229 -0
- data/mlx/mlx/backend/metal/kernels/scaled_dot_product_attention.metal +44 -0
- data/mlx/mlx/backend/metal/kernels/scan.h +514 -0
- data/mlx/mlx/backend/metal/kernels/scan.metal +109 -0
- data/mlx/mlx/backend/metal/kernels/sdpa_vector.h +394 -0
- data/mlx/mlx/backend/metal/kernels/softmax.h +190 -0
- data/mlx/mlx/backend/metal/kernels/softmax.metal +24 -0
- data/mlx/mlx/backend/metal/kernels/sort.h +719 -0
- data/mlx/mlx/backend/metal/kernels/sort.metal +80 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/attn.h +296 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.h +471 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention.metal +27 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.h +481 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/kernels/steel_attention_nax.metal +28 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/loader.h +264 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/mma.h +750 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/nax.h +1076 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/params.h +44 -0
- data/mlx/mlx/backend/metal/kernels/steel/attn/transforms.h +71 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/conv.h +13 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.h +176 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv.metal +56 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.h +225 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/kernels/steel_conv_general.metal +47 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loader.h +6 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_l.h +451 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_channel_n.h +319 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/loaders/loader_general.h +381 -0
- data/mlx/mlx/backend/metal/kernels/steel/conv/params.h +62 -0
- data/mlx/mlx/backend/metal/kernels/steel/defines.h +7 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm.h +295 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/gemm_nax.h +157 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.h +346 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused.metal +34 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.h +219 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_fused_nax.metal +30 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.h +459 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather.metal +59 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.h +143 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_gather_nax.metal +37 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.h +719 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_masked.metal +76 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.h +266 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_segmented.metal +43 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.h +227 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk.metal +76 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.h +152 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/kernels/steel_gemm_splitk_nax.metal +30 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/loader.h +137 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/mma.h +1146 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/nax.h +1084 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/params.h +65 -0
- data/mlx/mlx/backend/metal/kernels/steel/gemm/transforms.h +72 -0
- data/mlx/mlx/backend/metal/kernels/steel/utils/integral_constant.h +134 -0
- data/mlx/mlx/backend/metal/kernels/steel/utils/type_traits.h +55 -0
- data/mlx/mlx/backend/metal/kernels/steel/utils.h +42 -0
- data/mlx/mlx/backend/metal/kernels/ternary.h +145 -0
- data/mlx/mlx/backend/metal/kernels/ternary.metal +48 -0
- data/mlx/mlx/backend/metal/kernels/ternary_ops.h +10 -0
- data/mlx/mlx/backend/metal/kernels/unary.h +63 -0
- data/mlx/mlx/backend/metal/kernels/unary.metal +115 -0
- data/mlx/mlx/backend/metal/kernels/unary_ops.h +454 -0
- data/mlx/mlx/backend/metal/kernels/utils.h +445 -0
- data/mlx/mlx/backend/metal/kernels.h +375 -0
- data/mlx/mlx/backend/metal/logsumexp.cpp +95 -0
- data/mlx/mlx/backend/metal/make_compiled_preamble.sh +120 -0
- data/mlx/mlx/backend/metal/matmul.cpp +2572 -0
- data/mlx/mlx/backend/metal/matmul.h +144 -0
- data/mlx/mlx/backend/metal/metal.cpp +50 -0
- data/mlx/mlx/backend/metal/metal.h +25 -0
- data/mlx/mlx/backend/metal/no_metal.cpp +42 -0
- data/mlx/mlx/backend/metal/nojit_kernels.cpp +414 -0
- data/mlx/mlx/backend/metal/normalization.cpp +433 -0
- data/mlx/mlx/backend/metal/primitives.cpp +242 -0
- data/mlx/mlx/backend/metal/quantized.cpp +1651 -0
- data/mlx/mlx/backend/metal/reduce.cpp +1038 -0
- data/mlx/mlx/backend/metal/reduce.h +41 -0
- data/mlx/mlx/backend/metal/resident.cpp +100 -0
- data/mlx/mlx/backend/metal/resident.h +32 -0
- data/mlx/mlx/backend/metal/rope.cpp +165 -0
- data/mlx/mlx/backend/metal/scaled_dot_product_attention.cpp +798 -0
- data/mlx/mlx/backend/metal/scan.cpp +145 -0
- data/mlx/mlx/backend/metal/scan.h +17 -0
- data/mlx/mlx/backend/metal/slicing.cpp +99 -0
- data/mlx/mlx/backend/metal/softmax.cpp +87 -0
- data/mlx/mlx/backend/metal/sort.cpp +368 -0
- data/mlx/mlx/backend/metal/ternary.cpp +160 -0
- data/mlx/mlx/backend/metal/ternary.h +21 -0
- data/mlx/mlx/backend/metal/unary.cpp +161 -0
- data/mlx/mlx/backend/metal/unary.h +21 -0
- data/mlx/mlx/backend/metal/utils.cpp +77 -0
- data/mlx/mlx/backend/metal/utils.h +99 -0
- data/mlx/mlx/backend/no_cpu/CMakeLists.txt +7 -0
- data/mlx/mlx/backend/no_cpu/compiled.cpp +24 -0
- data/mlx/mlx/backend/no_cpu/device_info.cpp +22 -0
- data/mlx/mlx/backend/no_cpu/primitives.cpp +146 -0
- data/mlx/mlx/backend/no_gpu/CMakeLists.txt +8 -0
- data/mlx/mlx/backend/no_gpu/allocator.cpp +134 -0
- data/mlx/mlx/backend/no_gpu/apple_memory.h +16 -0
- data/mlx/mlx/backend/no_gpu/device_info.cpp +22 -0
- data/mlx/mlx/backend/no_gpu/eval.cpp +24 -0
- data/mlx/mlx/backend/no_gpu/event.cpp +53 -0
- data/mlx/mlx/backend/no_gpu/fence.cpp +54 -0
- data/mlx/mlx/backend/no_gpu/linux_memory.h +22 -0
- data/mlx/mlx/backend/no_gpu/primitives.cpp +185 -0
- data/mlx/mlx/compile.cpp +1243 -0
- data/mlx/mlx/compile.h +45 -0
- data/mlx/mlx/compile_impl.h +70 -0
- data/mlx/mlx/device.cpp +72 -0
- data/mlx/mlx/device.h +56 -0
- data/mlx/mlx/distributed/CMakeLists.txt +14 -0
- data/mlx/mlx/distributed/distributed.cpp +197 -0
- data/mlx/mlx/distributed/distributed.h +61 -0
- data/mlx/mlx/distributed/distributed_impl.h +59 -0
- data/mlx/mlx/distributed/jaccl/CMakeLists.txt +12 -0
- data/mlx/mlx/distributed/jaccl/jaccl.cpp +178 -0
- data/mlx/mlx/distributed/jaccl/jaccl.h +12 -0
- data/mlx/mlx/distributed/jaccl/mesh.cpp +451 -0
- data/mlx/mlx/distributed/jaccl/mesh.h +122 -0
- data/mlx/mlx/distributed/jaccl/no_jaccl.cpp +20 -0
- data/mlx/mlx/distributed/jaccl/ring.cpp +692 -0
- data/mlx/mlx/distributed/jaccl/ring.h +178 -0
- data/mlx/mlx/distributed/jaccl/utils.cpp +329 -0
- data/mlx/mlx/distributed/jaccl/utils.h +342 -0
- data/mlx/mlx/distributed/mpi/CMakeLists.txt +5 -0
- data/mlx/mlx/distributed/mpi/mpi.cpp +501 -0
- data/mlx/mlx/distributed/mpi/mpi.h +12 -0
- data/mlx/mlx/distributed/mpi/mpi_declarations.h +28 -0
- data/mlx/mlx/distributed/mpi/no_mpi.cpp +20 -0
- data/mlx/mlx/distributed/nccl/CMakeLists.txt +26 -0
- data/mlx/mlx/distributed/nccl/nccl.cpp +443 -0
- data/mlx/mlx/distributed/nccl/nccl.h +12 -0
- data/mlx/mlx/distributed/nccl/nccl_stub/CMakeLists.txt +1 -0
- data/mlx/mlx/distributed/nccl/nccl_stub/nccl_stubs.cpp +54 -0
- data/mlx/mlx/distributed/nccl/no_nccl.cpp +20 -0
- data/mlx/mlx/distributed/ops.cpp +186 -0
- data/mlx/mlx/distributed/ops.h +57 -0
- data/mlx/mlx/distributed/primitives.cpp +95 -0
- data/mlx/mlx/distributed/primitives.h +156 -0
- data/mlx/mlx/distributed/reduction_ops.h +38 -0
- data/mlx/mlx/distributed/ring/CMakeLists.txt +5 -0
- data/mlx/mlx/distributed/ring/no_ring.cpp +20 -0
- data/mlx/mlx/distributed/ring/ring.cpp +870 -0
- data/mlx/mlx/distributed/ring/ring.h +12 -0
- data/mlx/mlx/distributed/utils.cpp +206 -0
- data/mlx/mlx/distributed/utils.h +67 -0
- data/mlx/mlx/dtype.cpp +197 -0
- data/mlx/mlx/dtype.h +116 -0
- data/mlx/mlx/dtype_utils.cpp +42 -0
- data/mlx/mlx/dtype_utils.h +119 -0
- data/mlx/mlx/einsum.cpp +941 -0
- data/mlx/mlx/einsum.h +23 -0
- data/mlx/mlx/event.h +58 -0
- data/mlx/mlx/export.cpp +1130 -0
- data/mlx/mlx/export.h +137 -0
- data/mlx/mlx/export_impl.h +99 -0
- data/mlx/mlx/fast.cpp +941 -0
- data/mlx/mlx/fast.h +103 -0
- data/mlx/mlx/fast_primitives.h +427 -0
- data/mlx/mlx/fence.h +39 -0
- data/mlx/mlx/fft.cpp +262 -0
- data/mlx/mlx/fft.h +159 -0
- data/mlx/mlx/graph_utils.cpp +175 -0
- data/mlx/mlx/graph_utils.h +67 -0
- data/mlx/mlx/io/CMakeLists.txt +25 -0
- data/mlx/mlx/io/gguf.cpp +470 -0
- data/mlx/mlx/io/gguf.h +20 -0
- data/mlx/mlx/io/gguf_quants.cpp +164 -0
- data/mlx/mlx/io/load.cpp +397 -0
- data/mlx/mlx/io/load.h +175 -0
- data/mlx/mlx/io/no_gguf.cpp +20 -0
- data/mlx/mlx/io/no_safetensors.cpp +37 -0
- data/mlx/mlx/io/safetensors.cpp +234 -0
- data/mlx/mlx/io.h +61 -0
- data/mlx/mlx/linalg.cpp +708 -0
- data/mlx/mlx/linalg.h +115 -0
- data/mlx/mlx/memory.h +80 -0
- data/mlx/mlx/mlx.h +25 -0
- data/mlx/mlx/ops.cpp +6094 -0
- data/mlx/mlx/ops.h +1610 -0
- data/mlx/mlx/primitives.cpp +5850 -0
- data/mlx/mlx/primitives.h +2525 -0
- data/mlx/mlx/random.cpp +492 -0
- data/mlx/mlx/random.h +283 -0
- data/mlx/mlx/scheduler.cpp +73 -0
- data/mlx/mlx/scheduler.h +189 -0
- data/mlx/mlx/small_vector.h +540 -0
- data/mlx/mlx/stream.h +42 -0
- data/mlx/mlx/threadpool.h +133 -0
- data/mlx/mlx/transforms.cpp +1065 -0
- data/mlx/mlx/transforms.h +231 -0
- data/mlx/mlx/transforms_impl.h +88 -0
- data/mlx/mlx/types/bf16.h +187 -0
- data/mlx/mlx/types/complex.h +113 -0
- data/mlx/mlx/types/fp16.h +234 -0
- data/mlx/mlx/types/half_types.h +58 -0
- data/mlx/mlx/types/limits.h +70 -0
- data/mlx/mlx/utils.cpp +302 -0
- data/mlx/mlx/utils.h +174 -0
- data/mlx/mlx/version.cpp +11 -0
- data/mlx/mlx/version.h +22 -0
- data/mlx/mlx.pc.in +52 -0
- data/mlx/pyproject.toml +7 -0
- data/mlx/python/mlx/__main__.py +27 -0
- data/mlx/python/mlx/_distributed_utils/common.py +135 -0
- data/mlx/python/mlx/_distributed_utils/config.py +631 -0
- data/mlx/python/mlx/_distributed_utils/launch.py +570 -0
- data/mlx/python/mlx/_reprlib_fix.py +16 -0
- data/mlx/python/mlx/_stub_patterns.txt +36 -0
- data/mlx/python/mlx/extension.py +88 -0
- data/mlx/python/mlx/nn/__init__.py +5 -0
- data/mlx/python/mlx/nn/init.py +441 -0
- data/mlx/python/mlx/nn/layers/__init__.py +105 -0
- data/mlx/python/mlx/nn/layers/activations.py +661 -0
- data/mlx/python/mlx/nn/layers/base.py +675 -0
- data/mlx/python/mlx/nn/layers/containers.py +24 -0
- data/mlx/python/mlx/nn/layers/convolution.py +232 -0
- data/mlx/python/mlx/nn/layers/convolution_transpose.py +242 -0
- data/mlx/python/mlx/nn/layers/distributed.py +601 -0
- data/mlx/python/mlx/nn/layers/dropout.py +137 -0
- data/mlx/python/mlx/nn/layers/embedding.py +53 -0
- data/mlx/python/mlx/nn/layers/linear.py +180 -0
- data/mlx/python/mlx/nn/layers/normalization.py +363 -0
- data/mlx/python/mlx/nn/layers/pooling.py +398 -0
- data/mlx/python/mlx/nn/layers/positional_encoding.py +162 -0
- data/mlx/python/mlx/nn/layers/quantized.py +426 -0
- data/mlx/python/mlx/nn/layers/recurrent.py +289 -0
- data/mlx/python/mlx/nn/layers/transformer.py +354 -0
- data/mlx/python/mlx/nn/layers/upsample.py +277 -0
- data/mlx/python/mlx/nn/losses.py +610 -0
- data/mlx/python/mlx/nn/utils.py +165 -0
- data/mlx/python/mlx/optimizers/__init__.py +4 -0
- data/mlx/python/mlx/optimizers/optimizers.py +976 -0
- data/mlx/python/mlx/optimizers/schedulers.py +158 -0
- data/mlx/python/mlx/py.typed +1 -0
- data/mlx/python/mlx/utils.py +325 -0
- data/mlx/python/src/CMakeLists.txt +96 -0
- data/mlx/python/src/array.cpp +1525 -0
- data/mlx/python/src/buffer.h +124 -0
- data/mlx/python/src/constants.cpp +15 -0
- data/mlx/python/src/convert.cpp +504 -0
- data/mlx/python/src/convert.h +50 -0
- data/mlx/python/src/cuda.cpp +19 -0
- data/mlx/python/src/device.cpp +98 -0
- data/mlx/python/src/distributed.cpp +352 -0
- data/mlx/python/src/export.cpp +356 -0
- data/mlx/python/src/fast.cpp +627 -0
- data/mlx/python/src/fft.cpp +514 -0
- data/mlx/python/src/indexing.cpp +1016 -0
- data/mlx/python/src/indexing.h +41 -0
- data/mlx/python/src/linalg.cpp +663 -0
- data/mlx/python/src/load.cpp +531 -0
- data/mlx/python/src/load.h +51 -0
- data/mlx/python/src/memory.cpp +125 -0
- data/mlx/python/src/metal.cpp +98 -0
- data/mlx/python/src/mlx.cpp +51 -0
- data/mlx/python/src/mlx_func.cpp +116 -0
- data/mlx/python/src/mlx_func.h +31 -0
- data/mlx/python/src/ops.cpp +5545 -0
- data/mlx/python/src/random.cpp +516 -0
- data/mlx/python/src/small_vector.h +76 -0
- data/mlx/python/src/stream.cpp +147 -0
- data/mlx/python/src/transforms.cpp +1542 -0
- data/mlx/python/src/trees.cpp +311 -0
- data/mlx/python/src/trees.h +62 -0
- data/mlx/python/src/utils.cpp +98 -0
- data/mlx/python/src/utils.h +78 -0
- data/mlx/python/tests/__main__.py +5 -0
- data/mlx/python/tests/cuda_skip.py +62 -0
- data/mlx/python/tests/mlx_distributed_tests.py +314 -0
- data/mlx/python/tests/mlx_tests.py +116 -0
- data/mlx/python/tests/mpi_test_distributed.py +142 -0
- data/mlx/python/tests/nccl_test_distributed.py +52 -0
- data/mlx/python/tests/ring_test_distributed.py +131 -0
- data/mlx/python/tests/test_array.py +2139 -0
- data/mlx/python/tests/test_autograd.py +880 -0
- data/mlx/python/tests/test_bf16.py +196 -0
- data/mlx/python/tests/test_blas.py +1429 -0
- data/mlx/python/tests/test_compile.py +1277 -0
- data/mlx/python/tests/test_constants.py +41 -0
- data/mlx/python/tests/test_conv.py +1198 -0
- data/mlx/python/tests/test_conv_transpose.py +810 -0
- data/mlx/python/tests/test_device.py +150 -0
- data/mlx/python/tests/test_double.py +306 -0
- data/mlx/python/tests/test_einsum.py +363 -0
- data/mlx/python/tests/test_eval.py +200 -0
- data/mlx/python/tests/test_export_import.py +614 -0
- data/mlx/python/tests/test_fast.py +923 -0
- data/mlx/python/tests/test_fast_sdpa.py +647 -0
- data/mlx/python/tests/test_fft.py +323 -0
- data/mlx/python/tests/test_graph.py +37 -0
- data/mlx/python/tests/test_init.py +139 -0
- data/mlx/python/tests/test_linalg.py +621 -0
- data/mlx/python/tests/test_load.py +447 -0
- data/mlx/python/tests/test_losses.py +427 -0
- data/mlx/python/tests/test_memory.py +77 -0
- data/mlx/python/tests/test_nn.py +1986 -0
- data/mlx/python/tests/test_ops.py +3261 -0
- data/mlx/python/tests/test_optimizers.py +584 -0
- data/mlx/python/tests/test_quantized.py +1160 -0
- data/mlx/python/tests/test_random.py +392 -0
- data/mlx/python/tests/test_reduce.py +223 -0
- data/mlx/python/tests/test_tree.py +96 -0
- data/mlx/python/tests/test_upsample.py +100 -0
- data/mlx/python/tests/test_vmap.py +860 -0
- data/mlx/setup.py +315 -0
- data/mlx/tests/CMakeLists.txt +44 -0
- data/mlx/tests/allocator_tests.cpp +41 -0
- data/mlx/tests/arg_reduce_tests.cpp +204 -0
- data/mlx/tests/array_tests.cpp +663 -0
- data/mlx/tests/autograd_tests.cpp +1399 -0
- data/mlx/tests/blas_tests.cpp +110 -0
- data/mlx/tests/compile_tests.cpp +818 -0
- data/mlx/tests/creations_tests.cpp +239 -0
- data/mlx/tests/custom_vjp_tests.cpp +55 -0
- data/mlx/tests/device_tests.cpp +35 -0
- data/mlx/tests/einsum_tests.cpp +85 -0
- data/mlx/tests/eval_tests.cpp +93 -0
- data/mlx/tests/export_import_tests.cpp +164 -0
- data/mlx/tests/fft_tests.cpp +366 -0
- data/mlx/tests/gpu_tests.cpp +523 -0
- data/mlx/tests/linalg_tests.cpp +639 -0
- data/mlx/tests/load_tests.cpp +270 -0
- data/mlx/tests/ops_tests.cpp +4159 -0
- data/mlx/tests/random_tests.cpp +716 -0
- data/mlx/tests/scheduler_tests.cpp +121 -0
- data/mlx/tests/tests.cpp +26 -0
- data/mlx/tests/utils_tests.cpp +67 -0
- data/mlx/tests/vmap_tests.cpp +547 -0
- metadata +958 -0
|
@@ -0,0 +1,1198 @@
|
|
|
1
|
+
# Copyright © 2023-2024 Apple Inc.
|
|
2
|
+
|
|
3
|
+
import math
|
|
4
|
+
import unittest
|
|
5
|
+
from itertools import permutations
|
|
6
|
+
|
|
7
|
+
import mlx.core as mx
|
|
8
|
+
import mlx_tests
|
|
9
|
+
import numpy as np
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
import torch
|
|
13
|
+
import torch.nn.functional as F
|
|
14
|
+
|
|
15
|
+
has_torch = True
|
|
16
|
+
except ImportError as e:
|
|
17
|
+
has_torch = False
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class TestConv(mlx_tests.MLXTestCase):
|
|
21
|
+
def test_numpy_conv(self):
|
|
22
|
+
for dtype in (
|
|
23
|
+
"float16",
|
|
24
|
+
"float32",
|
|
25
|
+
):
|
|
26
|
+
np_dtype = getattr(np, dtype)
|
|
27
|
+
for M, N, mode in (
|
|
28
|
+
(1, 1, "full"),
|
|
29
|
+
(25, 5, "full"),
|
|
30
|
+
(24, 5, "same"),
|
|
31
|
+
(24, 4, "same"),
|
|
32
|
+
(24, 4, "valid"),
|
|
33
|
+
(4, 24, "full"),
|
|
34
|
+
(5, 25, "same"),
|
|
35
|
+
(4, 25, "valid"),
|
|
36
|
+
):
|
|
37
|
+
with self.subTest(dtype=dtype, M=M, N=N, mode=mode):
|
|
38
|
+
atol = 1e-6 if dtype == "float32" else 1e-5
|
|
39
|
+
a_np = np.random.rand(M).astype(np_dtype)
|
|
40
|
+
v_np = np.random.rand(N).astype(np_dtype)
|
|
41
|
+
a_mx = mx.array(a_np)
|
|
42
|
+
v_mx = mx.array(v_np)
|
|
43
|
+
|
|
44
|
+
c_np = np.convolve(a_np, v_np, mode=mode)
|
|
45
|
+
c_mx = mx.convolve(a_mx, v_mx, mode=mode)
|
|
46
|
+
|
|
47
|
+
self.assertEqual(c_mx.shape, c_np.shape)
|
|
48
|
+
self.assertTrue(np.allclose(c_mx, c_np, atol=atol))
|
|
49
|
+
|
|
50
|
+
def test_conv_1d_groups_flipped(self):
|
|
51
|
+
x = mx.broadcast_to(mx.arange(5).astype(mx.float32), (2, 5)).T
|
|
52
|
+
w = mx.broadcast_to(mx.arange(4).astype(mx.float32), (2, 4))
|
|
53
|
+
out = mx.conv_general(x[None], w[..., None], flip=True, groups=2)
|
|
54
|
+
expected = mx.array([4.0, 4.0, 10.0, 10.0]).reshape(1, 2, 2)
|
|
55
|
+
self.assertTrue(mx.allclose(out, expected))
|
|
56
|
+
|
|
57
|
+
@unittest.skipIf(not has_torch, "requires Torch")
|
|
58
|
+
def test_torch_conv_1D(self):
|
|
59
|
+
def run_conv1D(
|
|
60
|
+
N,
|
|
61
|
+
C,
|
|
62
|
+
O,
|
|
63
|
+
iH,
|
|
64
|
+
kH,
|
|
65
|
+
stride,
|
|
66
|
+
padding,
|
|
67
|
+
dilation=1,
|
|
68
|
+
groups=1,
|
|
69
|
+
dtype="float32",
|
|
70
|
+
atol=1e-5,
|
|
71
|
+
):
|
|
72
|
+
with self.subTest(
|
|
73
|
+
dtype=dtype,
|
|
74
|
+
N=N,
|
|
75
|
+
C=C,
|
|
76
|
+
O=O,
|
|
77
|
+
iH=iH,
|
|
78
|
+
kH=kH,
|
|
79
|
+
stride=stride,
|
|
80
|
+
padding=padding,
|
|
81
|
+
dilation=dilation,
|
|
82
|
+
groups=groups,
|
|
83
|
+
):
|
|
84
|
+
np_dtype = getattr(np, dtype)
|
|
85
|
+
np.random.seed(0)
|
|
86
|
+
in_np = np.random.normal(0, 1.0 / C, (N, iH, C)).astype(np_dtype)
|
|
87
|
+
wt_np = np.random.normal(0, 1.0 / C, (O, kH, int(C / groups))).astype(
|
|
88
|
+
np_dtype
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
in_mx, wt_mx = map(mx.array, (in_np, wt_np))
|
|
92
|
+
in_pt, wt_pt = map(
|
|
93
|
+
lambda x: torch.from_numpy(x.transpose(0, 2, 1)), (in_np, wt_np)
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
out_mx = mx.conv1d(
|
|
97
|
+
in_mx,
|
|
98
|
+
wt_mx,
|
|
99
|
+
stride=stride,
|
|
100
|
+
padding=padding,
|
|
101
|
+
dilation=dilation,
|
|
102
|
+
groups=groups,
|
|
103
|
+
)
|
|
104
|
+
out_pt = torch.conv1d(
|
|
105
|
+
in_pt,
|
|
106
|
+
wt_pt,
|
|
107
|
+
stride=stride,
|
|
108
|
+
padding=padding,
|
|
109
|
+
dilation=dilation,
|
|
110
|
+
groups=groups,
|
|
111
|
+
)
|
|
112
|
+
out_pt = torch.transpose(out_pt, 2, 1)
|
|
113
|
+
|
|
114
|
+
self.assertEqual(out_pt.shape, out_mx.shape)
|
|
115
|
+
self.assertTrue(np.allclose(out_pt.numpy(), out_mx, atol=atol))
|
|
116
|
+
|
|
117
|
+
for dtype in ("float32",):
|
|
118
|
+
for N, C, O in (
|
|
119
|
+
(1, 1, 1),
|
|
120
|
+
(1, 6, 1),
|
|
121
|
+
(1, 1, 6),
|
|
122
|
+
(4, 32, 64),
|
|
123
|
+
):
|
|
124
|
+
for iH, kH, stride, padding in (
|
|
125
|
+
(1, 1, 1, 0),
|
|
126
|
+
(3, 3, 1, 0),
|
|
127
|
+
(31, 5, 5, 2),
|
|
128
|
+
):
|
|
129
|
+
run_conv1D(N, C, O, iH, kH, stride, padding, dtype=dtype)
|
|
130
|
+
|
|
131
|
+
# Groups tests
|
|
132
|
+
N, C, O = (4, 32, 64)
|
|
133
|
+
for iH, kH, stride, padding in (
|
|
134
|
+
(1, 1, 1, 0),
|
|
135
|
+
(3, 3, 1, 0),
|
|
136
|
+
(31, 5, 5, 2),
|
|
137
|
+
):
|
|
138
|
+
for group in (1, 2, 4, 8, 16, 32):
|
|
139
|
+
run_conv1D(N, C, O, iH, kH, stride, padding, groups=group, dtype=dtype)
|
|
140
|
+
|
|
141
|
+
# Strided inputs tests
|
|
142
|
+
for tpose_in, tpose_wt in (
|
|
143
|
+
((0, 2, 1), (0, 1, 2)),
|
|
144
|
+
((0, 2, 1), (0, 2, 1)),
|
|
145
|
+
):
|
|
146
|
+
with self.subTest(name="strided", tpose_in=tpose_in, tpose_wt=tpose_wt):
|
|
147
|
+
in_np = np.random.normal(0, 1.0 / 16, (16, 16, 16)).astype(np.float32)
|
|
148
|
+
wt_np = np.random.normal(0, 1.0 / 16, (16, 16, 16)).astype(np.float32)
|
|
149
|
+
|
|
150
|
+
in_mx, wt_mx = map(mx.array, (in_np, wt_np))
|
|
151
|
+
in_mx_t = mx.transpose(in_mx, tpose_in)
|
|
152
|
+
wt_mx_t = mx.transpose(wt_mx, tpose_wt)
|
|
153
|
+
out_mx = mx.conv1d(in_mx_t, wt_mx_t)
|
|
154
|
+
|
|
155
|
+
in_pt, wt_pt = map(
|
|
156
|
+
lambda x: torch.from_numpy(x.transpose(0, 2, 1)),
|
|
157
|
+
(in_np.transpose(tpose_in), wt_np.transpose(tpose_wt)),
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
out_pt = torch.conv1d(in_pt, wt_pt)
|
|
161
|
+
out_pt = torch.transpose(out_pt, 2, 1)
|
|
162
|
+
|
|
163
|
+
self.assertEqual(out_pt.shape, out_mx.shape)
|
|
164
|
+
self.assertTrue(np.allclose(out_pt.numpy(), out_mx, atol=1e-5))
|
|
165
|
+
|
|
166
|
+
@unittest.skipIf(not has_torch, "requires Torch")
|
|
167
|
+
def test_torch_conv_1D_grad(self):
|
|
168
|
+
def run_conv1D_grad(
|
|
169
|
+
N,
|
|
170
|
+
C,
|
|
171
|
+
O,
|
|
172
|
+
iH,
|
|
173
|
+
kH,
|
|
174
|
+
stride,
|
|
175
|
+
padding,
|
|
176
|
+
dilation=1,
|
|
177
|
+
groups=1,
|
|
178
|
+
dtype="float32",
|
|
179
|
+
atol=1e-5,
|
|
180
|
+
):
|
|
181
|
+
with self.subTest(
|
|
182
|
+
dtype=dtype,
|
|
183
|
+
N=N,
|
|
184
|
+
C=C,
|
|
185
|
+
O=O,
|
|
186
|
+
iH=iH,
|
|
187
|
+
kH=kH,
|
|
188
|
+
stride=stride,
|
|
189
|
+
padding=padding,
|
|
190
|
+
dilation=dilation,
|
|
191
|
+
groups=groups,
|
|
192
|
+
):
|
|
193
|
+
np_dtype = getattr(np, dtype)
|
|
194
|
+
np.random.seed(0)
|
|
195
|
+
oH = 1 + ((iH + 2 * padding - dilation * (kH - 1) - 1) // stride)
|
|
196
|
+
|
|
197
|
+
in_np = np.random.normal(0, 1.0 / C, (N, iH, C)).astype(np_dtype)
|
|
198
|
+
wt_np = np.random.normal(0, 1.0 / C, (O, kH, C)).astype(np_dtype)
|
|
199
|
+
ct_np = np.random.normal(0, 1.0 / C, (N, oH, O)).astype(np_dtype)
|
|
200
|
+
|
|
201
|
+
in_mx, wt_mx, ct_mx = map(mx.array, (in_np, wt_np, ct_np))
|
|
202
|
+
in_pt, wt_pt, ct_pt = map(
|
|
203
|
+
lambda x: torch.from_numpy(x.transpose(0, 2, 1)),
|
|
204
|
+
(in_np, wt_np, ct_np),
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
def f(a, b):
|
|
208
|
+
return mx.conv1d(
|
|
209
|
+
a,
|
|
210
|
+
b,
|
|
211
|
+
stride=stride,
|
|
212
|
+
padding=padding,
|
|
213
|
+
dilation=dilation,
|
|
214
|
+
groups=groups,
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
_, outs_mx = mx.vjp(
|
|
218
|
+
f,
|
|
219
|
+
[
|
|
220
|
+
in_mx,
|
|
221
|
+
wt_mx,
|
|
222
|
+
],
|
|
223
|
+
[
|
|
224
|
+
ct_mx,
|
|
225
|
+
],
|
|
226
|
+
)
|
|
227
|
+
pt_grad_in = F.grad.conv1d_input(
|
|
228
|
+
in_pt.shape,
|
|
229
|
+
wt_pt,
|
|
230
|
+
ct_pt,
|
|
231
|
+
stride=stride,
|
|
232
|
+
padding=padding,
|
|
233
|
+
dilation=dilation,
|
|
234
|
+
groups=groups,
|
|
235
|
+
)
|
|
236
|
+
pt_grad_wt = F.grad.conv1d_weight(
|
|
237
|
+
in_pt,
|
|
238
|
+
wt_pt.shape,
|
|
239
|
+
ct_pt,
|
|
240
|
+
stride=stride,
|
|
241
|
+
padding=padding,
|
|
242
|
+
dilation=dilation,
|
|
243
|
+
groups=groups,
|
|
244
|
+
)
|
|
245
|
+
pt_grad_in = torch.transpose(pt_grad_in, 2, 1).numpy()
|
|
246
|
+
pt_grad_wt = torch.transpose(pt_grad_wt, 2, 1).numpy()
|
|
247
|
+
|
|
248
|
+
mx_grad_in, mx_grad_wt = outs_mx
|
|
249
|
+
|
|
250
|
+
self.assertEqual(pt_grad_in.shape, mx_grad_in.shape)
|
|
251
|
+
self.assertEqual(in_mx.shape, mx_grad_in.shape)
|
|
252
|
+
self.assertTrue(np.allclose(pt_grad_in, mx_grad_in, atol=atol))
|
|
253
|
+
|
|
254
|
+
self.assertEqual(pt_grad_wt.shape, mx_grad_wt.shape)
|
|
255
|
+
self.assertEqual(wt_mx.shape, mx_grad_wt.shape)
|
|
256
|
+
self.assertTrue(np.allclose(pt_grad_wt, mx_grad_wt, atol=atol))
|
|
257
|
+
|
|
258
|
+
for dtype in ("float32",):
|
|
259
|
+
for N, C, O in (
|
|
260
|
+
(1, 1, 1),
|
|
261
|
+
(1, 6, 1),
|
|
262
|
+
(1, 1, 6),
|
|
263
|
+
(4, 32, 64),
|
|
264
|
+
):
|
|
265
|
+
for iH, kH, stride, padding in (
|
|
266
|
+
(1, 1, 1, 0),
|
|
267
|
+
(3, 3, 1, 0),
|
|
268
|
+
(31, 5, 5, 2),
|
|
269
|
+
):
|
|
270
|
+
run_conv1D_grad(N, C, O, iH, kH, stride, padding, dtype=dtype)
|
|
271
|
+
|
|
272
|
+
@unittest.skipIf(not has_torch, "requires Torch")
|
|
273
|
+
def test_torch_conv_2D(self):
|
|
274
|
+
def run_conv2D(
|
|
275
|
+
N,
|
|
276
|
+
C,
|
|
277
|
+
O,
|
|
278
|
+
idim,
|
|
279
|
+
kdim,
|
|
280
|
+
stride,
|
|
281
|
+
padding,
|
|
282
|
+
dilation=(1, 1),
|
|
283
|
+
groups=1,
|
|
284
|
+
dtype="float32",
|
|
285
|
+
):
|
|
286
|
+
with self.subTest(
|
|
287
|
+
dtype=dtype,
|
|
288
|
+
N=N,
|
|
289
|
+
C=C,
|
|
290
|
+
O=O,
|
|
291
|
+
idim=idim,
|
|
292
|
+
kdim=kdim,
|
|
293
|
+
stride=stride,
|
|
294
|
+
padding=padding,
|
|
295
|
+
dilation=dilation,
|
|
296
|
+
groups=groups,
|
|
297
|
+
):
|
|
298
|
+
np.random.seed(0)
|
|
299
|
+
iH, iW = idim
|
|
300
|
+
kH, kW = kdim
|
|
301
|
+
scale = 1.0 / math.sqrt(kH * kW * C)
|
|
302
|
+
in_np = np.random.normal(0.0, scale, (N, iH, iW, C))
|
|
303
|
+
wt_np = np.random.normal(0.0, 1.0, (O, kH, kW, int(C / groups)))
|
|
304
|
+
|
|
305
|
+
mx_dtype = getattr(mx, dtype)
|
|
306
|
+
torch_dtype = getattr(torch, dtype)
|
|
307
|
+
in_mx, wt_mx = map(
|
|
308
|
+
lambda x: mx.array(x).astype(mx_dtype), (in_np, wt_np)
|
|
309
|
+
)
|
|
310
|
+
in_pt, wt_pt = map(
|
|
311
|
+
lambda x: torch.from_numpy(x.transpose(0, 3, 1, 2))
|
|
312
|
+
.to("cpu")
|
|
313
|
+
.to(torch_dtype),
|
|
314
|
+
(in_np, wt_np),
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
out_mx = mx.conv2d(
|
|
318
|
+
in_mx,
|
|
319
|
+
wt_mx,
|
|
320
|
+
stride=stride,
|
|
321
|
+
padding=padding,
|
|
322
|
+
dilation=dilation,
|
|
323
|
+
groups=groups,
|
|
324
|
+
).astype(mx.float32)
|
|
325
|
+
out_pt = torch.conv2d(
|
|
326
|
+
in_pt,
|
|
327
|
+
wt_pt,
|
|
328
|
+
stride=stride,
|
|
329
|
+
padding=padding,
|
|
330
|
+
dilation=dilation,
|
|
331
|
+
groups=groups,
|
|
332
|
+
)
|
|
333
|
+
out_pt = (
|
|
334
|
+
torch.permute(out_pt, (0, 2, 3, 1))
|
|
335
|
+
.to(torch.float32)
|
|
336
|
+
.numpy(force=True)
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
self.assertEqual(out_pt.shape, out_mx.shape)
|
|
340
|
+
if dtype == "bfloat16":
|
|
341
|
+
atol, rtol = 1e-1, 1e-3
|
|
342
|
+
else:
|
|
343
|
+
atol, rtol = 1e-5, 1e-6
|
|
344
|
+
self.assertTrue(np.allclose(out_pt, out_mx, atol=atol))
|
|
345
|
+
|
|
346
|
+
for dtype in ("float32", "bfloat16"):
|
|
347
|
+
for N, C, O in (
|
|
348
|
+
(1, 1, 1),
|
|
349
|
+
(1, 6, 1),
|
|
350
|
+
(1, 1, 6),
|
|
351
|
+
(4, 32, 64),
|
|
352
|
+
):
|
|
353
|
+
for idim, kdim, stride, padding in (
|
|
354
|
+
((1, 1), (1, 1), (1, 1), (0, 0)),
|
|
355
|
+
((3, 3), (3, 1), (1, 1), (0, 0)),
|
|
356
|
+
((31, 31), (5, 5), (5, 5), (2, 2)),
|
|
357
|
+
):
|
|
358
|
+
run_conv2D(N, C, O, idim, kdim, stride, padding, dtype=dtype)
|
|
359
|
+
|
|
360
|
+
# Groups tests
|
|
361
|
+
N, C, O = (4, 32, 64)
|
|
362
|
+
for idim, kdim, stride, padding in (
|
|
363
|
+
((1, 1), (1, 1), (1, 1), (0, 0)),
|
|
364
|
+
((3, 3), (3, 1), (1, 1), (0, 0)),
|
|
365
|
+
((31, 31), (5, 5), (5, 5), (2, 2)),
|
|
366
|
+
):
|
|
367
|
+
for group in (1, 2, 4, 8, 16, 32):
|
|
368
|
+
run_conv2D(
|
|
369
|
+
N, C, O, idim, kdim, stride, padding, groups=group, dtype=dtype
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
@unittest.skipIf(not has_torch, "requires Torch")
|
|
373
|
+
def test_torch_conv_2D_grad(self):
|
|
374
|
+
def run_conv2D_grad(
|
|
375
|
+
N,
|
|
376
|
+
C,
|
|
377
|
+
O,
|
|
378
|
+
idim,
|
|
379
|
+
kdim,
|
|
380
|
+
stride,
|
|
381
|
+
padding,
|
|
382
|
+
dilation=(1, 1),
|
|
383
|
+
groups=1,
|
|
384
|
+
dtype="float32",
|
|
385
|
+
atol=1e-5,
|
|
386
|
+
):
|
|
387
|
+
with self.subTest(
|
|
388
|
+
dtype=dtype,
|
|
389
|
+
N=N,
|
|
390
|
+
C=C,
|
|
391
|
+
O=O,
|
|
392
|
+
idim=idim,
|
|
393
|
+
kdim=kdim,
|
|
394
|
+
stride=stride,
|
|
395
|
+
padding=padding,
|
|
396
|
+
dilation=dilation,
|
|
397
|
+
groups=groups,
|
|
398
|
+
):
|
|
399
|
+
np_dtype = getattr(np, dtype)
|
|
400
|
+
np.random.seed(0)
|
|
401
|
+
iH, iW = idim
|
|
402
|
+
kH, kW = kdim
|
|
403
|
+
scale = 1.0 / math.sqrt(kH * kW * C)
|
|
404
|
+
|
|
405
|
+
oH = 1 + (
|
|
406
|
+
(iH + 2 * padding[0] - dilation[0] * (kH - 1) - 1) // stride[0]
|
|
407
|
+
)
|
|
408
|
+
oW = 1 + (
|
|
409
|
+
(iW + 2 * padding[1] - dilation[1] * (kW - 1) - 1) // stride[1]
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
in_np = np.random.normal(0.0, scale, (N, iH, iW, C)).astype(np_dtype)
|
|
413
|
+
wt_np = np.random.normal(0.0, scale, (O, kH, kW, C)).astype(np_dtype)
|
|
414
|
+
ct_np = np.random.normal(0.0, scale, (N, oH, oW, O)).astype(np_dtype)
|
|
415
|
+
|
|
416
|
+
in_mx, wt_mx, ct_mx = map(mx.array, (in_np, wt_np, ct_np))
|
|
417
|
+
in_pt, wt_pt, ct_pt = map(
|
|
418
|
+
lambda x: torch.from_numpy(x.transpose(0, 3, 1, 2)).to("cpu"),
|
|
419
|
+
(in_np, wt_np, ct_np),
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
def f(a, b):
|
|
423
|
+
return mx.conv2d(
|
|
424
|
+
a,
|
|
425
|
+
b,
|
|
426
|
+
stride=stride,
|
|
427
|
+
padding=padding,
|
|
428
|
+
dilation=dilation,
|
|
429
|
+
groups=groups,
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
_, outs_mx = mx.vjp(
|
|
433
|
+
f,
|
|
434
|
+
[in_mx, wt_mx],
|
|
435
|
+
[ct_mx],
|
|
436
|
+
)
|
|
437
|
+
pt_grad_in = F.grad.conv2d_input(
|
|
438
|
+
in_pt.shape,
|
|
439
|
+
wt_pt,
|
|
440
|
+
ct_pt,
|
|
441
|
+
stride=stride,
|
|
442
|
+
padding=padding,
|
|
443
|
+
dilation=dilation,
|
|
444
|
+
groups=groups,
|
|
445
|
+
)
|
|
446
|
+
pt_grad_wt = F.grad.conv2d_weight(
|
|
447
|
+
in_pt,
|
|
448
|
+
wt_pt.shape,
|
|
449
|
+
ct_pt,
|
|
450
|
+
stride=stride,
|
|
451
|
+
padding=padding,
|
|
452
|
+
dilation=dilation,
|
|
453
|
+
groups=groups,
|
|
454
|
+
)
|
|
455
|
+
pt_grad_in = torch.permute(pt_grad_in, (0, 2, 3, 1)).numpy()
|
|
456
|
+
pt_grad_wt = torch.permute(pt_grad_wt, (0, 2, 3, 1)).numpy()
|
|
457
|
+
|
|
458
|
+
mx_grad_in, mx_grad_wt = outs_mx
|
|
459
|
+
|
|
460
|
+
self.assertEqual(pt_grad_in.shape, mx_grad_in.shape)
|
|
461
|
+
self.assertEqual(in_mx.shape, mx_grad_in.shape)
|
|
462
|
+
self.assertTrue(np.allclose(pt_grad_in, mx_grad_in, atol=atol))
|
|
463
|
+
|
|
464
|
+
self.assertEqual(pt_grad_wt.shape, mx_grad_wt.shape)
|
|
465
|
+
self.assertEqual(wt_mx.shape, mx_grad_wt.shape)
|
|
466
|
+
self.assertTrue(np.allclose(pt_grad_wt, mx_grad_wt, atol=atol))
|
|
467
|
+
|
|
468
|
+
for dtype in ("float32",):
|
|
469
|
+
for N, C, O in ((1, 1, 1), (1, 6, 1), (1, 1, 6), (4, 32, 64), (4, 16, 32)):
|
|
470
|
+
for idim, kdim, stride, padding, dilation in (
|
|
471
|
+
((1, 1), (1, 1), (1, 1), (0, 0), (1, 1)),
|
|
472
|
+
((3, 3), (3, 1), (1, 1), (0, 0), (1, 1)),
|
|
473
|
+
((31, 31), (5, 5), (5, 5), (2, 2), (1, 1)),
|
|
474
|
+
((32, 32), (3, 3), (2, 2), (1, 1), (1, 1)),
|
|
475
|
+
((31, 31), (5, 5), (5, 5), (2, 2), (3, 2)),
|
|
476
|
+
((32, 32), (3, 3), (2, 2), (1, 1), (3, 2)),
|
|
477
|
+
):
|
|
478
|
+
run_conv2D_grad(
|
|
479
|
+
N, C, O, idim, kdim, stride, padding, dilation, dtype=dtype
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
@unittest.skipIf(not has_torch, "requires Torch")
|
|
483
|
+
def test_torch_conv_3D(self):
|
|
484
|
+
def run_conv3D(
|
|
485
|
+
N,
|
|
486
|
+
C,
|
|
487
|
+
O,
|
|
488
|
+
idim,
|
|
489
|
+
kdim,
|
|
490
|
+
stride,
|
|
491
|
+
padding,
|
|
492
|
+
dilation=(1, 1, 1),
|
|
493
|
+
groups=1,
|
|
494
|
+
dtype="float32",
|
|
495
|
+
atol=1e-5,
|
|
496
|
+
):
|
|
497
|
+
with self.subTest(
|
|
498
|
+
dtype=dtype,
|
|
499
|
+
N=N,
|
|
500
|
+
C=C,
|
|
501
|
+
O=O,
|
|
502
|
+
idim=idim,
|
|
503
|
+
kdim=kdim,
|
|
504
|
+
stride=stride,
|
|
505
|
+
padding=padding,
|
|
506
|
+
dilation=dilation,
|
|
507
|
+
groups=groups,
|
|
508
|
+
):
|
|
509
|
+
np_dtype = getattr(np, dtype)
|
|
510
|
+
np.random.seed(0)
|
|
511
|
+
iD, iH, iW = idim
|
|
512
|
+
kD, kH, kW = kdim
|
|
513
|
+
scale = 1.0 / math.sqrt(kD * kH * kW * C)
|
|
514
|
+
in_np = np.random.normal(0.0, scale, (N, iD, iH, iW, C)).astype(
|
|
515
|
+
np_dtype
|
|
516
|
+
)
|
|
517
|
+
wt_np = np.random.normal(0.0, 1.0, (O, kD, kH, kW, C)).astype(np_dtype)
|
|
518
|
+
|
|
519
|
+
in_mx, wt_mx = map(mx.array, (in_np, wt_np))
|
|
520
|
+
in_pt, wt_pt = map(
|
|
521
|
+
lambda x: torch.from_numpy(x.transpose(0, 4, 1, 2, 3)).to("cpu"),
|
|
522
|
+
(in_np, wt_np),
|
|
523
|
+
)
|
|
524
|
+
|
|
525
|
+
out_mx = mx.conv3d(
|
|
526
|
+
in_mx,
|
|
527
|
+
wt_mx,
|
|
528
|
+
stride=stride,
|
|
529
|
+
padding=padding,
|
|
530
|
+
dilation=dilation,
|
|
531
|
+
groups=groups,
|
|
532
|
+
)
|
|
533
|
+
out_pt = torch.conv3d(
|
|
534
|
+
in_pt,
|
|
535
|
+
wt_pt,
|
|
536
|
+
stride=stride,
|
|
537
|
+
padding=padding,
|
|
538
|
+
dilation=dilation,
|
|
539
|
+
groups=groups,
|
|
540
|
+
)
|
|
541
|
+
out_pt = torch.permute(out_pt, (0, 2, 3, 4, 1)).numpy(force=True)
|
|
542
|
+
|
|
543
|
+
self.assertEqual(out_pt.shape, out_mx.shape)
|
|
544
|
+
self.assertTrue(np.allclose(out_pt, out_mx, atol=atol))
|
|
545
|
+
|
|
546
|
+
for dtype in ("float32",):
|
|
547
|
+
for N, C, O in (
|
|
548
|
+
(1, 1, 1),
|
|
549
|
+
(1, 6, 1),
|
|
550
|
+
(1, 1, 6),
|
|
551
|
+
(4, 16, 32),
|
|
552
|
+
):
|
|
553
|
+
continue
|
|
554
|
+
for idim, kdim, stride, padding in (
|
|
555
|
+
((1, 1, 1), (1, 1, 1), (1, 1, 1), (0, 0, 0)),
|
|
556
|
+
((3, 3, 3), (3, 1, 1), (1, 1, 1), (0, 0, 0)),
|
|
557
|
+
((31, 31, 31), (5, 5, 5), (5, 5, 5), (2, 2, 2)),
|
|
558
|
+
):
|
|
559
|
+
run_conv3D(N, C, O, idim, kdim, stride, padding, dtype=dtype)
|
|
560
|
+
|
|
561
|
+
N, C, O = (2, 4, 4)
|
|
562
|
+
idim, kdim, stride, padding = (6, 6, 6), (3, 1, 1), (1, 1, 1), (0, 0, 0)
|
|
563
|
+
run_conv3D(
|
|
564
|
+
N, C, O, idim, kdim, stride, padding, dilation=(2, 2, 2), dtype=dtype
|
|
565
|
+
)
|
|
566
|
+
|
|
567
|
+
@unittest.skipIf(not has_torch, "requires Torch")
|
|
568
|
+
def test_torch_conv_3D_grad(self):
|
|
569
|
+
def run_conv3D_grad(
|
|
570
|
+
N,
|
|
571
|
+
C,
|
|
572
|
+
O,
|
|
573
|
+
idim,
|
|
574
|
+
kdim,
|
|
575
|
+
stride,
|
|
576
|
+
padding,
|
|
577
|
+
dilation=(1, 1, 1),
|
|
578
|
+
groups=1,
|
|
579
|
+
dtype="float32",
|
|
580
|
+
atol=1e-5,
|
|
581
|
+
):
|
|
582
|
+
with self.subTest(
|
|
583
|
+
dtype=dtype,
|
|
584
|
+
N=N,
|
|
585
|
+
C=C,
|
|
586
|
+
O=O,
|
|
587
|
+
idim=idim,
|
|
588
|
+
kdim=kdim,
|
|
589
|
+
stride=stride,
|
|
590
|
+
padding=padding,
|
|
591
|
+
dilation=dilation,
|
|
592
|
+
groups=groups,
|
|
593
|
+
):
|
|
594
|
+
np_dtype = getattr(np, dtype)
|
|
595
|
+
np.random.seed(0)
|
|
596
|
+
iD, iH, iW = idim
|
|
597
|
+
kD, kH, kW = kdim
|
|
598
|
+
scale = 1.0 / math.sqrt(kD * kH * kW * C)
|
|
599
|
+
|
|
600
|
+
oD = 1 + (
|
|
601
|
+
(iD + 2 * padding[0] - dilation[0] * (kD - 1) - 1) // stride[0]
|
|
602
|
+
)
|
|
603
|
+
oH = 1 + (
|
|
604
|
+
(iH + 2 * padding[1] - dilation[1] * (kH - 1) - 1) // stride[1]
|
|
605
|
+
)
|
|
606
|
+
oW = 1 + (
|
|
607
|
+
(iW + 2 * padding[2] - dilation[2] * (kW - 1) - 1) // stride[2]
|
|
608
|
+
)
|
|
609
|
+
|
|
610
|
+
in_np = np.random.normal(0.0, scale, (N, iD, iH, iW, C)).astype(
|
|
611
|
+
np_dtype
|
|
612
|
+
)
|
|
613
|
+
wt_np = np.random.normal(0.0, scale, (O, kD, kH, kW, C)).astype(
|
|
614
|
+
np_dtype
|
|
615
|
+
)
|
|
616
|
+
ct_np = np.random.normal(0.0, scale, (N, oD, oH, oW, O)).astype(
|
|
617
|
+
np_dtype
|
|
618
|
+
)
|
|
619
|
+
|
|
620
|
+
in_mx, wt_mx, ct_mx = map(mx.array, (in_np, wt_np, ct_np))
|
|
621
|
+
in_pt, wt_pt, ct_pt = map(
|
|
622
|
+
lambda x: torch.from_numpy(x.transpose(0, 4, 1, 2, 3)).to("cpu"),
|
|
623
|
+
(in_np, wt_np, ct_np),
|
|
624
|
+
)
|
|
625
|
+
|
|
626
|
+
def f(a, b):
|
|
627
|
+
return mx.conv3d(
|
|
628
|
+
a,
|
|
629
|
+
b,
|
|
630
|
+
stride=stride,
|
|
631
|
+
padding=padding,
|
|
632
|
+
dilation=dilation,
|
|
633
|
+
groups=groups,
|
|
634
|
+
)
|
|
635
|
+
|
|
636
|
+
_, outs_mx = mx.vjp(
|
|
637
|
+
f,
|
|
638
|
+
[in_mx, wt_mx],
|
|
639
|
+
[ct_mx],
|
|
640
|
+
)
|
|
641
|
+
pt_grad_in = F.grad.conv3d_input(
|
|
642
|
+
in_pt.shape,
|
|
643
|
+
wt_pt,
|
|
644
|
+
ct_pt,
|
|
645
|
+
stride=stride,
|
|
646
|
+
padding=padding,
|
|
647
|
+
dilation=dilation,
|
|
648
|
+
groups=groups,
|
|
649
|
+
)
|
|
650
|
+
pt_grad_wt = F.grad.conv3d_weight(
|
|
651
|
+
in_pt,
|
|
652
|
+
wt_pt.shape,
|
|
653
|
+
ct_pt,
|
|
654
|
+
stride=stride,
|
|
655
|
+
padding=padding,
|
|
656
|
+
dilation=dilation,
|
|
657
|
+
groups=groups,
|
|
658
|
+
)
|
|
659
|
+
pt_grad_in = torch.permute(pt_grad_in, (0, 2, 3, 4, 1)).numpy()
|
|
660
|
+
pt_grad_wt = torch.permute(pt_grad_wt, (0, 2, 3, 4, 1)).numpy()
|
|
661
|
+
|
|
662
|
+
mx_grad_in, mx_grad_wt = outs_mx
|
|
663
|
+
|
|
664
|
+
self.assertEqual(pt_grad_in.shape, mx_grad_in.shape)
|
|
665
|
+
self.assertEqual(in_mx.shape, mx_grad_in.shape)
|
|
666
|
+
self.assertTrue(np.allclose(pt_grad_in, mx_grad_in, atol=atol))
|
|
667
|
+
|
|
668
|
+
self.assertEqual(pt_grad_wt.shape, mx_grad_wt.shape)
|
|
669
|
+
self.assertEqual(wt_mx.shape, mx_grad_wt.shape)
|
|
670
|
+
self.assertTrue(np.allclose(pt_grad_wt, mx_grad_wt, atol=atol))
|
|
671
|
+
|
|
672
|
+
for dtype in ("float32",):
|
|
673
|
+
for N, C, O in ((1, 1, 1), (1, 6, 1), (1, 1, 6), (4, 16, 32), (4, 8, 16)):
|
|
674
|
+
for idim, kdim, stride, padding, dilation in (
|
|
675
|
+
((1, 1, 1), (1, 1, 1), (1, 1, 1), (0, 0, 0), (1, 1, 1)),
|
|
676
|
+
((3, 3, 3), (3, 1, 1), (1, 1, 1), (0, 0, 0), (1, 1, 1)),
|
|
677
|
+
((15, 15, 15), (5, 5, 5), (5, 5, 5), (2, 2, 2), (1, 1, 1)),
|
|
678
|
+
((16, 16, 16), (3, 3, 3), (2, 2, 2), (1, 1, 1), (1, 1, 1)),
|
|
679
|
+
((15, 15, 15), (5, 5, 5), (5, 5, 5), (2, 2, 2), (3, 2, 2)),
|
|
680
|
+
((16, 16, 16), (3, 3, 3), (2, 2, 2), (1, 1, 1), (3, 2, 2)),
|
|
681
|
+
):
|
|
682
|
+
run_conv3D_grad(
|
|
683
|
+
N, C, O, idim, kdim, stride, padding, dilation, dtype=dtype
|
|
684
|
+
)
|
|
685
|
+
|
|
686
|
+
def __conv_general_test(
|
|
687
|
+
self,
|
|
688
|
+
in_shape,
|
|
689
|
+
wt_shape,
|
|
690
|
+
stride=1,
|
|
691
|
+
padding=0,
|
|
692
|
+
kernel_dilation=1,
|
|
693
|
+
input_dilation=1,
|
|
694
|
+
groups=1,
|
|
695
|
+
flip=False,
|
|
696
|
+
np_dtype=np.float32,
|
|
697
|
+
atol=1e-5,
|
|
698
|
+
):
|
|
699
|
+
with self.subTest(
|
|
700
|
+
in_shape=in_shape,
|
|
701
|
+
wt_shape=wt_shape,
|
|
702
|
+
stride=stride,
|
|
703
|
+
padding=padding,
|
|
704
|
+
kernel_dilation=kernel_dilation,
|
|
705
|
+
input_dilation=input_dilation,
|
|
706
|
+
groups=groups,
|
|
707
|
+
flip=flip,
|
|
708
|
+
np_dtype=np_dtype,
|
|
709
|
+
):
|
|
710
|
+
np.random.seed(0)
|
|
711
|
+
scale = 1.0 / math.sqrt(np.prod(wt_shape[1:]))
|
|
712
|
+
scale = min(0.3, scale)
|
|
713
|
+
in_np = np.random.normal(0, scale, in_shape).astype(np_dtype)
|
|
714
|
+
wt_np = np.random.normal(0, scale, wt_shape).astype(np_dtype)
|
|
715
|
+
|
|
716
|
+
in_mx, wt_mx = map(mx.array, (in_np, wt_np))
|
|
717
|
+
|
|
718
|
+
in_pt, wt_pt = map(
|
|
719
|
+
lambda x: torch.from_numpy(np.moveaxis(x, -1, 1)).to("cpu"),
|
|
720
|
+
(in_np, wt_np),
|
|
721
|
+
)
|
|
722
|
+
|
|
723
|
+
out_mx = mx.conv_general(
|
|
724
|
+
in_mx,
|
|
725
|
+
wt_mx,
|
|
726
|
+
stride=stride,
|
|
727
|
+
padding=padding,
|
|
728
|
+
kernel_dilation=kernel_dilation,
|
|
729
|
+
input_dilation=input_dilation,
|
|
730
|
+
groups=groups,
|
|
731
|
+
flip=flip,
|
|
732
|
+
)
|
|
733
|
+
|
|
734
|
+
def conv_general_pt(
|
|
735
|
+
inp, wt, stride, padding, kernel_dilation, input_dilation, groups, flip
|
|
736
|
+
):
|
|
737
|
+
C = inp.size()[1]
|
|
738
|
+
ndim = inp.ndim - 2
|
|
739
|
+
map_ints = lambda x: [x] * ndim if isinstance(x, int) else x
|
|
740
|
+
|
|
741
|
+
stride, padding, kernel_dilation, input_dilation = map(
|
|
742
|
+
map_ints, (stride, padding, kernel_dilation, input_dilation)
|
|
743
|
+
)
|
|
744
|
+
|
|
745
|
+
torch_convt_list = (
|
|
746
|
+
F.conv_transpose1d,
|
|
747
|
+
F.conv_transpose2d,
|
|
748
|
+
F.conv_transpose3d,
|
|
749
|
+
)
|
|
750
|
+
torch_conv_list = (F.conv1d, F.conv2d, F.conv3d)
|
|
751
|
+
|
|
752
|
+
conv_f = torch_conv_list[ndim - 1]
|
|
753
|
+
convt_f = torch_convt_list[ndim - 1]
|
|
754
|
+
|
|
755
|
+
if flip:
|
|
756
|
+
wt = torch.flip(wt, tuple(np.arange(2, wt.ndim)))
|
|
757
|
+
|
|
758
|
+
if not np.all(input_dilation == 1):
|
|
759
|
+
ones = torch.ones(
|
|
760
|
+
[C]
|
|
761
|
+
+ [
|
|
762
|
+
1,
|
|
763
|
+
]
|
|
764
|
+
* (ndim + 1)
|
|
765
|
+
).to(inp.dtype)
|
|
766
|
+
inp = convt_f(inp, ones, stride=input_dilation, groups=C)
|
|
767
|
+
|
|
768
|
+
return conv_f(
|
|
769
|
+
inp,
|
|
770
|
+
wt,
|
|
771
|
+
stride=stride,
|
|
772
|
+
padding=padding,
|
|
773
|
+
dilation=kernel_dilation,
|
|
774
|
+
groups=groups,
|
|
775
|
+
)
|
|
776
|
+
|
|
777
|
+
out_pt = conv_general_pt(
|
|
778
|
+
in_pt,
|
|
779
|
+
wt_pt,
|
|
780
|
+
stride=stride,
|
|
781
|
+
padding=padding,
|
|
782
|
+
kernel_dilation=kernel_dilation,
|
|
783
|
+
input_dilation=input_dilation,
|
|
784
|
+
groups=groups,
|
|
785
|
+
flip=flip,
|
|
786
|
+
)
|
|
787
|
+
|
|
788
|
+
out_pt = np.moveaxis(out_pt.numpy(), 1, -1)
|
|
789
|
+
|
|
790
|
+
self.assertEqual(out_mx.shape, out_pt.shape)
|
|
791
|
+
self.assertTrue(np.allclose(out_mx, out_pt, atol=atol))
|
|
792
|
+
|
|
793
|
+
@unittest.skipIf(not has_torch, "requires Torch")
|
|
794
|
+
def test_torch_conv_general(self):
|
|
795
|
+
in_shape = (2, 32, 32, 16)
|
|
796
|
+
wt_shape = (32, 5, 5, 16)
|
|
797
|
+
stride = (1, 1)
|
|
798
|
+
padding = (2, 2)
|
|
799
|
+
kernel_dilation = (2, 3)
|
|
800
|
+
input_dilation = (1, 1)
|
|
801
|
+
flip = False
|
|
802
|
+
|
|
803
|
+
self.__conv_general_test(
|
|
804
|
+
in_shape,
|
|
805
|
+
wt_shape,
|
|
806
|
+
stride,
|
|
807
|
+
padding,
|
|
808
|
+
kernel_dilation,
|
|
809
|
+
input_dilation,
|
|
810
|
+
flip=flip,
|
|
811
|
+
)
|
|
812
|
+
|
|
813
|
+
in_shape = (2, 32, 32, 16)
|
|
814
|
+
wt_shape = (32, 5, 10, 16)
|
|
815
|
+
stride = (2, 3)
|
|
816
|
+
padding = (0, 0)
|
|
817
|
+
kernel_dilation = (3, 2)
|
|
818
|
+
input_dilation = (2, 4)
|
|
819
|
+
flip = False
|
|
820
|
+
|
|
821
|
+
self.__conv_general_test(
|
|
822
|
+
in_shape,
|
|
823
|
+
wt_shape,
|
|
824
|
+
stride,
|
|
825
|
+
padding,
|
|
826
|
+
kernel_dilation,
|
|
827
|
+
input_dilation,
|
|
828
|
+
flip=flip,
|
|
829
|
+
)
|
|
830
|
+
|
|
831
|
+
in_shape = (2, 32, 32, 16)
|
|
832
|
+
wt_shape = (32, 5, 10, 16)
|
|
833
|
+
stride = (2, 2)
|
|
834
|
+
padding = (3, 2)
|
|
835
|
+
kernel_dilation = (3, 2)
|
|
836
|
+
input_dilation = (2, 4)
|
|
837
|
+
flip = False
|
|
838
|
+
|
|
839
|
+
self.__conv_general_test(
|
|
840
|
+
in_shape,
|
|
841
|
+
wt_shape,
|
|
842
|
+
stride,
|
|
843
|
+
padding,
|
|
844
|
+
kernel_dilation,
|
|
845
|
+
input_dilation,
|
|
846
|
+
flip=flip,
|
|
847
|
+
)
|
|
848
|
+
|
|
849
|
+
in_shape = (2, 32, 32, 16)
|
|
850
|
+
wt_shape = (32, 5, 10, 16)
|
|
851
|
+
stride = (2, 3)
|
|
852
|
+
padding = (3, 2)
|
|
853
|
+
kernel_dilation = (3, 2)
|
|
854
|
+
input_dilation = (2, 5)
|
|
855
|
+
flip = False
|
|
856
|
+
|
|
857
|
+
self.__conv_general_test(
|
|
858
|
+
in_shape,
|
|
859
|
+
wt_shape,
|
|
860
|
+
stride,
|
|
861
|
+
padding,
|
|
862
|
+
kernel_dilation,
|
|
863
|
+
input_dilation,
|
|
864
|
+
flip=flip,
|
|
865
|
+
)
|
|
866
|
+
|
|
867
|
+
in_shape = (2, 32, 32, 16)
|
|
868
|
+
wt_shape = (32, 5, 5, 16)
|
|
869
|
+
stride = (2, 3)
|
|
870
|
+
padding = (0, 0)
|
|
871
|
+
kernel_dilation = (3, 1)
|
|
872
|
+
input_dilation = (2, 5)
|
|
873
|
+
flip = True
|
|
874
|
+
|
|
875
|
+
self.__conv_general_test(
|
|
876
|
+
in_shape,
|
|
877
|
+
wt_shape,
|
|
878
|
+
stride,
|
|
879
|
+
padding,
|
|
880
|
+
kernel_dilation,
|
|
881
|
+
input_dilation,
|
|
882
|
+
flip=flip,
|
|
883
|
+
)
|
|
884
|
+
|
|
885
|
+
def test_conv_general_flip_grad(self):
|
|
886
|
+
for s in (1, 2):
|
|
887
|
+
w = mx.random.normal(shape=(1, 2, 2, 1))
|
|
888
|
+
x = mx.random.normal(shape=(1, 2, 2, 1))
|
|
889
|
+
|
|
890
|
+
def conv_t(w):
|
|
891
|
+
return mx.conv_general(
|
|
892
|
+
x,
|
|
893
|
+
w,
|
|
894
|
+
stride=1,
|
|
895
|
+
padding=(1, 1),
|
|
896
|
+
kernel_dilation=1,
|
|
897
|
+
input_dilation=s,
|
|
898
|
+
flip=True,
|
|
899
|
+
)
|
|
900
|
+
|
|
901
|
+
cotan = mx.random.normal(shape=(1, 2 + s, 2 + s, 1))
|
|
902
|
+
|
|
903
|
+
dw = mx.vjp(conv_t, (w,), (cotan,))[1][0]
|
|
904
|
+
|
|
905
|
+
x = x.squeeze()
|
|
906
|
+
cotan = cotan.squeeze()
|
|
907
|
+
dw = dw.squeeze()
|
|
908
|
+
|
|
909
|
+
dw00 = (cotan[:-1:s, :-1:s] * x).sum()
|
|
910
|
+
dw01 = (cotan[:-1:s, 1::s] * x).sum()
|
|
911
|
+
dw10 = (cotan[1::s, :-1:s] * x).sum()
|
|
912
|
+
dw11 = (cotan[1::s, 1::s] * x).sum()
|
|
913
|
+
expected = mx.array([[dw00, dw01], [dw10, dw11]])
|
|
914
|
+
self.assertTrue(mx.allclose(dw, expected, rtol=1e-5, atol=1e-5))
|
|
915
|
+
|
|
916
|
+
# Test with input dilation
|
|
917
|
+
inputs = mx.random.normal((1, 14, 14, 2))
|
|
918
|
+
kernel = mx.random.normal((2, 7, 7, 2))
|
|
919
|
+
|
|
920
|
+
def conv_flip(kernel):
|
|
921
|
+
return mx.conv_general(
|
|
922
|
+
inputs,
|
|
923
|
+
kernel,
|
|
924
|
+
stride=1,
|
|
925
|
+
padding=([6, 6], [15, 15]),
|
|
926
|
+
kernel_dilation=(1, 1),
|
|
927
|
+
input_dilation=(16, 16),
|
|
928
|
+
groups=1,
|
|
929
|
+
flip=True,
|
|
930
|
+
).sum()
|
|
931
|
+
|
|
932
|
+
def reverse_sequence(xs, axis=0):
|
|
933
|
+
indices = mx.arange(xs.shape[axis] - 1, -1, -1)
|
|
934
|
+
return mx.take(xs, indices, axis=axis)
|
|
935
|
+
|
|
936
|
+
def conv_manual_flip(kernel):
|
|
937
|
+
for ax in range(1, kernel.ndim - 1):
|
|
938
|
+
kernel = reverse_sequence(kernel, axis=ax)
|
|
939
|
+
return mx.conv_general(
|
|
940
|
+
inputs,
|
|
941
|
+
kernel,
|
|
942
|
+
stride=1,
|
|
943
|
+
padding=([6, 6], [15, 15]),
|
|
944
|
+
kernel_dilation=(1, 1),
|
|
945
|
+
input_dilation=(16, 16),
|
|
946
|
+
groups=1,
|
|
947
|
+
flip=False,
|
|
948
|
+
).sum()
|
|
949
|
+
|
|
950
|
+
grad = mx.grad(conv_flip)(kernel)
|
|
951
|
+
expected_grad = mx.grad(conv_manual_flip)(kernel)
|
|
952
|
+
self.assertTrue(mx.allclose(grad, expected_grad))
|
|
953
|
+
|
|
954
|
+
def test_conv_groups_grad(self):
|
|
955
|
+
def fn(x, w):
|
|
956
|
+
num_groups = x.shape[-1] // w.shape[-1]
|
|
957
|
+
return mx.conv1d(x, w, groups=num_groups)
|
|
958
|
+
|
|
959
|
+
def fn_gt(x, w):
|
|
960
|
+
num_groups = x.shape[-1] // w.shape[-1]
|
|
961
|
+
group_size = w.shape[-1]
|
|
962
|
+
ws = w.reshape(num_groups, -1, *w.shape[1:]).split(num_groups)
|
|
963
|
+
xs = x.reshape(*x.shape[:-1], num_groups, -1).split(num_groups, axis=-2)
|
|
964
|
+
return mx.concatenate(
|
|
965
|
+
[mx.conv_general(x.squeeze(-2), w.squeeze(0)) for x, w in zip(xs, ws)],
|
|
966
|
+
axis=-1,
|
|
967
|
+
)
|
|
968
|
+
|
|
969
|
+
mx.random.seed(3)
|
|
970
|
+
|
|
971
|
+
w = mx.random.normal(shape=(2, 3, 1))
|
|
972
|
+
x = mx.random.normal(shape=(1, 5, 2))
|
|
973
|
+
cotans = (mx.ones(shape=(1, 3, 2)),)
|
|
974
|
+
grads = mx.vjp(fn, (x, w), cotans)[1]
|
|
975
|
+
expected = mx.vjp(fn_gt, (x, w), cotans)[1]
|
|
976
|
+
self.assertTrue(mx.allclose(expected[0], grads[0]))
|
|
977
|
+
self.assertTrue(mx.allclose(expected[1], grads[1]))
|
|
978
|
+
|
|
979
|
+
w = mx.random.normal(shape=(2, 3, 2))
|
|
980
|
+
x = mx.random.normal(shape=(1, 5, 4))
|
|
981
|
+
cotans = (mx.ones(shape=(1, 3, 2)),)
|
|
982
|
+
grads = mx.vjp(fn, (x, w), cotans)[1]
|
|
983
|
+
expected = mx.vjp(fn_gt, (x, w), cotans)[1]
|
|
984
|
+
self.assertTrue(mx.allclose(expected[0], grads[0]))
|
|
985
|
+
self.assertTrue(mx.allclose(expected[1], grads[1]))
|
|
986
|
+
|
|
987
|
+
w = mx.random.normal(shape=(6, 3, 2))
|
|
988
|
+
x = mx.random.normal(shape=(1, 5, 4))
|
|
989
|
+
cotans = (mx.ones(shape=(1, 3, 6)),)
|
|
990
|
+
grads = mx.vjp(fn, (x, w), cotans)[1]
|
|
991
|
+
expected = mx.vjp(fn_gt, (x, w), cotans)[1]
|
|
992
|
+
self.assertTrue(mx.allclose(expected[0], grads[0]))
|
|
993
|
+
self.assertTrue(mx.allclose(expected[1], grads[1]))
|
|
994
|
+
|
|
995
|
+
# Test 2D
|
|
996
|
+
w = mx.random.normal(shape=(2, 3, 3, 1))
|
|
997
|
+
x = mx.random.normal(shape=(1, 5, 5, 2))
|
|
998
|
+
cotans = (mx.ones(shape=(1, 3, 3, 2)),)
|
|
999
|
+
grads = mx.vjp(fn, (x, w), cotans)[1]
|
|
1000
|
+
expected = mx.vjp(fn_gt, (x, w), cotans)[1]
|
|
1001
|
+
self.assertTrue(mx.allclose(expected[0], grads[0]))
|
|
1002
|
+
self.assertTrue(mx.allclose(expected[1], grads[1]))
|
|
1003
|
+
|
|
1004
|
+
# Test with flip
|
|
1005
|
+
def fn(x, w):
|
|
1006
|
+
num_groups = x.shape[-1] // w.shape[-1]
|
|
1007
|
+
return mx.conv_general(x, w, groups=num_groups, flip=True)
|
|
1008
|
+
|
|
1009
|
+
def fn_gt(x, w):
|
|
1010
|
+
num_groups = x.shape[-1] // w.shape[-1]
|
|
1011
|
+
group_size = w.shape[-1]
|
|
1012
|
+
ws = w.reshape(num_groups, -1, *w.shape[1:]).split(num_groups)
|
|
1013
|
+
xs = x.reshape(*x.shape[:-1], num_groups, -1).split(num_groups, axis=-2)
|
|
1014
|
+
return mx.concatenate(
|
|
1015
|
+
[
|
|
1016
|
+
mx.conv_general(x.squeeze(-2), w.squeeze(0), flip=True)
|
|
1017
|
+
for x, w in zip(xs, ws)
|
|
1018
|
+
],
|
|
1019
|
+
axis=-1,
|
|
1020
|
+
)
|
|
1021
|
+
|
|
1022
|
+
w = mx.random.normal(shape=(2, 3, 1))
|
|
1023
|
+
x = mx.random.normal(shape=(1, 5, 2))
|
|
1024
|
+
cotans = (mx.ones(shape=(1, 3, 2)),)
|
|
1025
|
+
grads = mx.vjp(fn, (x, w), cotans)[1]
|
|
1026
|
+
expected = mx.vjp(fn_gt, (x, w), cotans)[1]
|
|
1027
|
+
self.assertTrue(mx.allclose(expected[0], grads[0]))
|
|
1028
|
+
self.assertTrue(mx.allclose(expected[1], grads[1]))
|
|
1029
|
+
|
|
1030
|
+
w = mx.random.normal(shape=(2, 3, 2))
|
|
1031
|
+
x = mx.random.normal(shape=(1, 5, 4))
|
|
1032
|
+
cotans = (mx.ones(shape=(1, 3, 2)),)
|
|
1033
|
+
grads = mx.vjp(fn, (x, w), cotans)[1]
|
|
1034
|
+
expected = mx.vjp(fn_gt, (x, w), cotans)[1]
|
|
1035
|
+
self.assertTrue(mx.allclose(expected[0], grads[0]))
|
|
1036
|
+
self.assertTrue(mx.allclose(expected[1], grads[1]))
|
|
1037
|
+
|
|
1038
|
+
# Test 2D
|
|
1039
|
+
w = mx.random.normal(shape=(2, 3, 3, 1))
|
|
1040
|
+
x = mx.random.normal(shape=(1, 5, 5, 2))
|
|
1041
|
+
cotans = (mx.ones(shape=(1, 3, 3, 2)),)
|
|
1042
|
+
grads = mx.vjp(fn, (x, w), cotans)[1]
|
|
1043
|
+
expected = mx.vjp(fn_gt, (x, w), cotans)[1]
|
|
1044
|
+
self.assertTrue(mx.allclose(expected[0], grads[0]))
|
|
1045
|
+
self.assertTrue(mx.allclose(expected[1], grads[1]))
|
|
1046
|
+
|
|
1047
|
+
def test_repeated_conv(self):
|
|
1048
|
+
x = mx.random.normal((1, 3, 3, 320))
|
|
1049
|
+
w = mx.random.normal((320, 3, 3, 320))
|
|
1050
|
+
for i in range(8):
|
|
1051
|
+
y1 = mx.conv2d(x, w, (1, 1), (1, 1), (1, 1), 1)
|
|
1052
|
+
y2 = mx.conv2d(x, w, (1, 1), (1, 1), (1, 1), 1)
|
|
1053
|
+
self.assertTrue(mx.allclose(y1, y2))
|
|
1054
|
+
|
|
1055
|
+
@unittest.skipIf(not has_torch, "requires Torch")
|
|
1056
|
+
def test_torch_conv_depthwise(self):
|
|
1057
|
+
|
|
1058
|
+
# fmt: off
|
|
1059
|
+
shapes = (
|
|
1060
|
+
# N, H, W, C kH, kW, O, strides, padding, groups
|
|
1061
|
+
( 2, 16, 16, 32, 1, 1, 32, (2, 2), (1, 1), 32),
|
|
1062
|
+
( 1, 16, 16, 32, 3, 3, 32, (2, 2), (1, 1), 32),
|
|
1063
|
+
( 1, 32, 32, 32, 7, 7, 32, (1, 1), (3, 3), 32),
|
|
1064
|
+
( 3, 32, 32, 32, 5, 5, 32, (1, 2), (0, 0), 32),
|
|
1065
|
+
( 1, 32, 32, 32, 7, 7, 32, (2, 1), (1, 3), 32),
|
|
1066
|
+
)
|
|
1067
|
+
# fmt: on
|
|
1068
|
+
|
|
1069
|
+
dtypes = [np.float32]
|
|
1070
|
+
if mx.default_device() == mx.gpu:
|
|
1071
|
+
dtypes += [np.float16]
|
|
1072
|
+
|
|
1073
|
+
for N, H, W, C, kH, kW, O, strides, padding, groups in shapes:
|
|
1074
|
+
for dtype in dtypes:
|
|
1075
|
+
for flip in [False, True]:
|
|
1076
|
+
Cw = C // groups
|
|
1077
|
+
|
|
1078
|
+
self.__conv_general_test(
|
|
1079
|
+
(N, H, W, C),
|
|
1080
|
+
(O, kH, kW, Cw),
|
|
1081
|
+
strides,
|
|
1082
|
+
padding,
|
|
1083
|
+
kernel_dilation=1,
|
|
1084
|
+
input_dilation=1,
|
|
1085
|
+
groups=groups,
|
|
1086
|
+
flip=flip,
|
|
1087
|
+
np_dtype=dtype,
|
|
1088
|
+
atol=2e-5 if dtype == np.float32 else 5e-4,
|
|
1089
|
+
)
|
|
1090
|
+
|
|
1091
|
+
@unittest.skipIf(not has_torch, "requires Torch")
|
|
1092
|
+
def test_asymmetric_padding(self):
|
|
1093
|
+
inputs = np.random.normal(size=(2, 8, 8, 8, 3)).astype(np.float32)
|
|
1094
|
+
kernel = np.random.normal(size=(2, 3, 3, 3, 3)).astype(np.float32)
|
|
1095
|
+
strides = (2, 2, 2)
|
|
1096
|
+
|
|
1097
|
+
pt_out = torch.conv3d(
|
|
1098
|
+
torch.permute(torch.tensor(inputs), (0, 4, 1, 2, 3)),
|
|
1099
|
+
torch.permute(torch.tensor(kernel), (0, 4, 1, 2, 3)),
|
|
1100
|
+
stride=strides,
|
|
1101
|
+
padding=2,
|
|
1102
|
+
)
|
|
1103
|
+
pt_out = torch.permute(pt_out, (0, 2, 3, 4, 1))[:, 1:, 1:, 1:, :].numpy()
|
|
1104
|
+
|
|
1105
|
+
mx_out = mx.conv_general(
|
|
1106
|
+
mx.array(inputs),
|
|
1107
|
+
mx.array(kernel),
|
|
1108
|
+
stride=strides,
|
|
1109
|
+
padding=([0, 0, 0], [1, 1, 1]),
|
|
1110
|
+
)
|
|
1111
|
+
|
|
1112
|
+
self.assertTrue(mx.allclose(mx_out, mx.array(pt_out), atol=1e-3, rtol=1e-3))
|
|
1113
|
+
|
|
1114
|
+
inputs = np.random.normal(size=(2, 10, 10, 3)).astype(np.float32)
|
|
1115
|
+
kernel = np.random.normal(size=(2, 2, 2, 3)).astype(np.float32)
|
|
1116
|
+
|
|
1117
|
+
pt_out = torch.conv2d(
|
|
1118
|
+
torch.permute(torch.tensor(inputs), (0, 3, 1, 2)),
|
|
1119
|
+
torch.permute(torch.tensor(kernel), (0, 3, 1, 2)),
|
|
1120
|
+
stride=1,
|
|
1121
|
+
padding=(1, 0),
|
|
1122
|
+
)
|
|
1123
|
+
pt_out = torch.permute(pt_out, (0, 2, 3, 1))[:, 1:].numpy()
|
|
1124
|
+
|
|
1125
|
+
mx_out = mx.conv_general(
|
|
1126
|
+
mx.array(inputs),
|
|
1127
|
+
mx.array(kernel),
|
|
1128
|
+
stride=1,
|
|
1129
|
+
padding=([0, 0], [1, 0]),
|
|
1130
|
+
)
|
|
1131
|
+
self.assertTrue(mx.allclose(mx_out, mx.array(pt_out), atol=1e-3, rtol=1e-3))
|
|
1132
|
+
|
|
1133
|
+
def test_basic_grad_shapes(self):
|
|
1134
|
+
def loss_fn(kernel, inputs, strides, groups):
|
|
1135
|
+
return mx.sum(
|
|
1136
|
+
mx.conv_general(
|
|
1137
|
+
inputs,
|
|
1138
|
+
kernel,
|
|
1139
|
+
stride=strides,
|
|
1140
|
+
groups=groups,
|
|
1141
|
+
)
|
|
1142
|
+
)
|
|
1143
|
+
|
|
1144
|
+
for in_shape, k_shape, strides, groups in [
|
|
1145
|
+
((3, 5, 4), (6, 2, 2), (2,), 2),
|
|
1146
|
+
((3, 5, 4), (24, 2, 1), (2,), 4),
|
|
1147
|
+
((3, 5, 5, 4), (6, 2, 2, 2), (2, 1), 2),
|
|
1148
|
+
((3, 5, 5, 4), (24, 2, 2, 1), (2, 2), 4),
|
|
1149
|
+
]:
|
|
1150
|
+
grads = mx.grad(loss_fn)(
|
|
1151
|
+
mx.zeros(k_shape), mx.zeros(in_shape), strides, groups
|
|
1152
|
+
)
|
|
1153
|
+
self.assertEqual(grads.shape, k_shape)
|
|
1154
|
+
|
|
1155
|
+
def test_1d_conv_with_2d(self):
|
|
1156
|
+
x = mx.random.uniform(shape=(2, 10, 16))
|
|
1157
|
+
y = mx.random.normal(shape=(16, 3, 16))
|
|
1158
|
+
|
|
1159
|
+
out = mx.conv1d(x, y, padding=1)
|
|
1160
|
+
out_2d = mx.conv2d(
|
|
1161
|
+
mx.expand_dims(x, axis=2), mx.expand_dims(y, axis=2), padding=(1, 0)
|
|
1162
|
+
)
|
|
1163
|
+
|
|
1164
|
+
self.assertTrue(mx.allclose(out, out_2d.squeeze(2)))
|
|
1165
|
+
|
|
1166
|
+
x = mx.random.uniform(shape=(2, 10, 4))
|
|
1167
|
+
y = mx.random.normal(shape=(4, 3, 4))
|
|
1168
|
+
|
|
1169
|
+
out = mx.conv1d(x, y, padding=1)
|
|
1170
|
+
out_2d = mx.conv2d(
|
|
1171
|
+
mx.expand_dims(x, axis=2), mx.expand_dims(y, axis=2), padding=(1, 0)
|
|
1172
|
+
)
|
|
1173
|
+
|
|
1174
|
+
self.assertTrue(mx.allclose(out, out_2d.squeeze(2)))
|
|
1175
|
+
|
|
1176
|
+
def test_conv2d_unaligned_channels(self):
|
|
1177
|
+
x = mx.random.uniform(shape=(2, 16, 16, 21))
|
|
1178
|
+
w = mx.random.uniform(shape=(32, 3, 3, 21))
|
|
1179
|
+
y = mx.conv2d(x, w, stream=mx.cpu)
|
|
1180
|
+
y_hat = mx.conv2d(x, w)
|
|
1181
|
+
self.assertTrue(mx.allclose(y, y_hat))
|
|
1182
|
+
|
|
1183
|
+
x = mx.random.uniform(shape=(2, 16, 16, 21))
|
|
1184
|
+
w = mx.random.uniform(shape=(21, 3, 3, 21))
|
|
1185
|
+
y = mx.conv2d(x, w, stream=mx.cpu)
|
|
1186
|
+
y_hat = mx.conv2d(x, w)
|
|
1187
|
+
self.assertTrue(mx.allclose(y, y_hat))
|
|
1188
|
+
|
|
1189
|
+
def test_conv2d_large_filter_small_channels(self):
|
|
1190
|
+
x = mx.random.normal(shape=(1, 181, 181, 1))
|
|
1191
|
+
w = mx.random.normal(shape=(1, 182, 182, 1))
|
|
1192
|
+
y = mx.conv2d(x, w, (1, 1), (1, 1), stream=mx.cpu)
|
|
1193
|
+
y_hat = mx.conv2d(x, w, (1, 1), (1, 1))
|
|
1194
|
+
self.assertTrue(mx.allclose(y, y_hat, rtol=1e-3, atol=1e-3))
|
|
1195
|
+
|
|
1196
|
+
|
|
1197
|
+
if __name__ == "__main__":
|
|
1198
|
+
mlx_tests.MLXTestRunner()
|