numba-cuda 0.22.0__cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of numba-cuda might be problematic. Click here for more details.
- _numba_cuda_redirector.pth +4 -0
- _numba_cuda_redirector.py +89 -0
- numba_cuda/VERSION +1 -0
- numba_cuda/__init__.py +6 -0
- numba_cuda/_version.py +11 -0
- numba_cuda/numba/cuda/__init__.py +70 -0
- numba_cuda/numba/cuda/_internal/cuda_bf16.py +16394 -0
- numba_cuda/numba/cuda/_internal/cuda_fp16.py +8112 -0
- numba_cuda/numba/cuda/api.py +580 -0
- numba_cuda/numba/cuda/api_util.py +76 -0
- numba_cuda/numba/cuda/args.py +72 -0
- numba_cuda/numba/cuda/bf16.py +397 -0
- numba_cuda/numba/cuda/cache_hints.py +287 -0
- numba_cuda/numba/cuda/cext/__init__.py +2 -0
- numba_cuda/numba/cuda/cext/_devicearray.cpp +159 -0
- numba_cuda/numba/cuda/cext/_devicearray.cpython-312-aarch64-linux-gnu.so +0 -0
- numba_cuda/numba/cuda/cext/_devicearray.h +29 -0
- numba_cuda/numba/cuda/cext/_dispatcher.cpp +1098 -0
- numba_cuda/numba/cuda/cext/_dispatcher.cpython-312-aarch64-linux-gnu.so +0 -0
- numba_cuda/numba/cuda/cext/_hashtable.cpp +532 -0
- numba_cuda/numba/cuda/cext/_hashtable.h +135 -0
- numba_cuda/numba/cuda/cext/_helperlib.c +71 -0
- numba_cuda/numba/cuda/cext/_helperlib.cpython-312-aarch64-linux-gnu.so +0 -0
- numba_cuda/numba/cuda/cext/_helpermod.c +82 -0
- numba_cuda/numba/cuda/cext/_pymodule.h +38 -0
- numba_cuda/numba/cuda/cext/_typeconv.cpp +206 -0
- numba_cuda/numba/cuda/cext/_typeconv.cpython-312-aarch64-linux-gnu.so +0 -0
- numba_cuda/numba/cuda/cext/_typeof.cpp +1159 -0
- numba_cuda/numba/cuda/cext/_typeof.h +19 -0
- numba_cuda/numba/cuda/cext/capsulethunk.h +111 -0
- numba_cuda/numba/cuda/cext/mviewbuf.c +385 -0
- numba_cuda/numba/cuda/cext/mviewbuf.cpython-312-aarch64-linux-gnu.so +0 -0
- numba_cuda/numba/cuda/cext/typeconv.cpp +212 -0
- numba_cuda/numba/cuda/cext/typeconv.hpp +101 -0
- numba_cuda/numba/cuda/cg.py +67 -0
- numba_cuda/numba/cuda/cgutils.py +1294 -0
- numba_cuda/numba/cuda/cloudpickle/__init__.py +21 -0
- numba_cuda/numba/cuda/cloudpickle/cloudpickle.py +1598 -0
- numba_cuda/numba/cuda/cloudpickle/cloudpickle_fast.py +17 -0
- numba_cuda/numba/cuda/codegen.py +541 -0
- numba_cuda/numba/cuda/compiler.py +1396 -0
- numba_cuda/numba/cuda/core/analysis.py +758 -0
- numba_cuda/numba/cuda/core/annotations/__init__.py +0 -0
- numba_cuda/numba/cuda/core/annotations/pretty_annotate.py +288 -0
- numba_cuda/numba/cuda/core/annotations/type_annotations.py +305 -0
- numba_cuda/numba/cuda/core/base.py +1332 -0
- numba_cuda/numba/cuda/core/boxing.py +1411 -0
- numba_cuda/numba/cuda/core/bytecode.py +728 -0
- numba_cuda/numba/cuda/core/byteflow.py +2346 -0
- numba_cuda/numba/cuda/core/caching.py +744 -0
- numba_cuda/numba/cuda/core/callconv.py +392 -0
- numba_cuda/numba/cuda/core/codegen.py +171 -0
- numba_cuda/numba/cuda/core/compiler.py +199 -0
- numba_cuda/numba/cuda/core/compiler_lock.py +85 -0
- numba_cuda/numba/cuda/core/compiler_machinery.py +497 -0
- numba_cuda/numba/cuda/core/config.py +650 -0
- numba_cuda/numba/cuda/core/consts.py +124 -0
- numba_cuda/numba/cuda/core/controlflow.py +989 -0
- numba_cuda/numba/cuda/core/entrypoints.py +57 -0
- numba_cuda/numba/cuda/core/environment.py +66 -0
- numba_cuda/numba/cuda/core/errors.py +917 -0
- numba_cuda/numba/cuda/core/event.py +511 -0
- numba_cuda/numba/cuda/core/funcdesc.py +330 -0
- numba_cuda/numba/cuda/core/generators.py +387 -0
- numba_cuda/numba/cuda/core/imputils.py +509 -0
- numba_cuda/numba/cuda/core/inline_closurecall.py +1787 -0
- numba_cuda/numba/cuda/core/interpreter.py +3617 -0
- numba_cuda/numba/cuda/core/ir.py +1812 -0
- numba_cuda/numba/cuda/core/ir_utils.py +2638 -0
- numba_cuda/numba/cuda/core/optional.py +129 -0
- numba_cuda/numba/cuda/core/options.py +262 -0
- numba_cuda/numba/cuda/core/postproc.py +249 -0
- numba_cuda/numba/cuda/core/pythonapi.py +1859 -0
- numba_cuda/numba/cuda/core/registry.py +46 -0
- numba_cuda/numba/cuda/core/removerefctpass.py +123 -0
- numba_cuda/numba/cuda/core/rewrites/__init__.py +26 -0
- numba_cuda/numba/cuda/core/rewrites/ir_print.py +91 -0
- numba_cuda/numba/cuda/core/rewrites/registry.py +104 -0
- numba_cuda/numba/cuda/core/rewrites/static_binop.py +41 -0
- numba_cuda/numba/cuda/core/rewrites/static_getitem.py +189 -0
- numba_cuda/numba/cuda/core/rewrites/static_raise.py +100 -0
- numba_cuda/numba/cuda/core/sigutils.py +68 -0
- numba_cuda/numba/cuda/core/ssa.py +498 -0
- numba_cuda/numba/cuda/core/targetconfig.py +330 -0
- numba_cuda/numba/cuda/core/tracing.py +231 -0
- numba_cuda/numba/cuda/core/transforms.py +956 -0
- numba_cuda/numba/cuda/core/typed_passes.py +867 -0
- numba_cuda/numba/cuda/core/typeinfer.py +1950 -0
- numba_cuda/numba/cuda/core/unsafe/__init__.py +0 -0
- numba_cuda/numba/cuda/core/unsafe/bytes.py +67 -0
- numba_cuda/numba/cuda/core/unsafe/eh.py +67 -0
- numba_cuda/numba/cuda/core/unsafe/refcount.py +98 -0
- numba_cuda/numba/cuda/core/untyped_passes.py +1979 -0
- numba_cuda/numba/cuda/cpython/builtins.py +1153 -0
- numba_cuda/numba/cuda/cpython/charseq.py +1218 -0
- numba_cuda/numba/cuda/cpython/cmathimpl.py +560 -0
- numba_cuda/numba/cuda/cpython/enumimpl.py +103 -0
- numba_cuda/numba/cuda/cpython/iterators.py +167 -0
- numba_cuda/numba/cuda/cpython/listobj.py +1326 -0
- numba_cuda/numba/cuda/cpython/mathimpl.py +499 -0
- numba_cuda/numba/cuda/cpython/numbers.py +1475 -0
- numba_cuda/numba/cuda/cpython/rangeobj.py +289 -0
- numba_cuda/numba/cuda/cpython/slicing.py +322 -0
- numba_cuda/numba/cuda/cpython/tupleobj.py +456 -0
- numba_cuda/numba/cuda/cpython/unicode.py +2865 -0
- numba_cuda/numba/cuda/cpython/unicode_support.py +1597 -0
- numba_cuda/numba/cuda/cpython/unsafe/__init__.py +0 -0
- numba_cuda/numba/cuda/cpython/unsafe/numbers.py +64 -0
- numba_cuda/numba/cuda/cpython/unsafe/tuple.py +92 -0
- numba_cuda/numba/cuda/cuda_paths.py +691 -0
- numba_cuda/numba/cuda/cudadecl.py +543 -0
- numba_cuda/numba/cuda/cudadrv/__init__.py +14 -0
- numba_cuda/numba/cuda/cudadrv/devicearray.py +954 -0
- numba_cuda/numba/cuda/cudadrv/devices.py +249 -0
- numba_cuda/numba/cuda/cudadrv/driver.py +3238 -0
- numba_cuda/numba/cuda/cudadrv/drvapi.py +435 -0
- numba_cuda/numba/cuda/cudadrv/dummyarray.py +562 -0
- numba_cuda/numba/cuda/cudadrv/enums.py +613 -0
- numba_cuda/numba/cuda/cudadrv/error.py +48 -0
- numba_cuda/numba/cuda/cudadrv/libs.py +220 -0
- numba_cuda/numba/cuda/cudadrv/linkable_code.py +184 -0
- numba_cuda/numba/cuda/cudadrv/mappings.py +14 -0
- numba_cuda/numba/cuda/cudadrv/ndarray.py +26 -0
- numba_cuda/numba/cuda/cudadrv/nvrtc.py +193 -0
- numba_cuda/numba/cuda/cudadrv/nvvm.py +756 -0
- numba_cuda/numba/cuda/cudadrv/rtapi.py +13 -0
- numba_cuda/numba/cuda/cudadrv/runtime.py +34 -0
- numba_cuda/numba/cuda/cudaimpl.py +983 -0
- numba_cuda/numba/cuda/cudamath.py +149 -0
- numba_cuda/numba/cuda/datamodel/__init__.py +7 -0
- numba_cuda/numba/cuda/datamodel/cuda_manager.py +66 -0
- numba_cuda/numba/cuda/datamodel/cuda_models.py +1446 -0
- numba_cuda/numba/cuda/datamodel/cuda_packer.py +224 -0
- numba_cuda/numba/cuda/datamodel/cuda_registry.py +22 -0
- numba_cuda/numba/cuda/datamodel/cuda_testing.py +153 -0
- numba_cuda/numba/cuda/datamodel/manager.py +11 -0
- numba_cuda/numba/cuda/datamodel/models.py +9 -0
- numba_cuda/numba/cuda/datamodel/packer.py +9 -0
- numba_cuda/numba/cuda/datamodel/registry.py +11 -0
- numba_cuda/numba/cuda/datamodel/testing.py +11 -0
- numba_cuda/numba/cuda/debuginfo.py +997 -0
- numba_cuda/numba/cuda/decorators.py +294 -0
- numba_cuda/numba/cuda/descriptor.py +35 -0
- numba_cuda/numba/cuda/device_init.py +155 -0
- numba_cuda/numba/cuda/deviceufunc.py +1021 -0
- numba_cuda/numba/cuda/dispatcher.py +2463 -0
- numba_cuda/numba/cuda/errors.py +72 -0
- numba_cuda/numba/cuda/extending.py +697 -0
- numba_cuda/numba/cuda/flags.py +178 -0
- numba_cuda/numba/cuda/fp16.py +357 -0
- numba_cuda/numba/cuda/include/12/cuda_bf16.h +5118 -0
- numba_cuda/numba/cuda/include/12/cuda_bf16.hpp +3865 -0
- numba_cuda/numba/cuda/include/12/cuda_fp16.h +5363 -0
- numba_cuda/numba/cuda/include/12/cuda_fp16.hpp +3483 -0
- numba_cuda/numba/cuda/include/13/cuda_bf16.h +5118 -0
- numba_cuda/numba/cuda/include/13/cuda_bf16.hpp +3865 -0
- numba_cuda/numba/cuda/include/13/cuda_fp16.h +5363 -0
- numba_cuda/numba/cuda/include/13/cuda_fp16.hpp +3483 -0
- numba_cuda/numba/cuda/initialize.py +24 -0
- numba_cuda/numba/cuda/intrinsics.py +531 -0
- numba_cuda/numba/cuda/itanium_mangler.py +214 -0
- numba_cuda/numba/cuda/kernels/__init__.py +2 -0
- numba_cuda/numba/cuda/kernels/reduction.py +265 -0
- numba_cuda/numba/cuda/kernels/transpose.py +65 -0
- numba_cuda/numba/cuda/libdevice.py +3386 -0
- numba_cuda/numba/cuda/libdevicedecl.py +20 -0
- numba_cuda/numba/cuda/libdevicefuncs.py +1060 -0
- numba_cuda/numba/cuda/libdeviceimpl.py +88 -0
- numba_cuda/numba/cuda/locks.py +19 -0
- numba_cuda/numba/cuda/lowering.py +1980 -0
- numba_cuda/numba/cuda/mathimpl.py +374 -0
- numba_cuda/numba/cuda/memory_management/__init__.py +4 -0
- numba_cuda/numba/cuda/memory_management/memsys.cu +99 -0
- numba_cuda/numba/cuda/memory_management/memsys.cuh +22 -0
- numba_cuda/numba/cuda/memory_management/nrt.cu +212 -0
- numba_cuda/numba/cuda/memory_management/nrt.cuh +48 -0
- numba_cuda/numba/cuda/memory_management/nrt.py +390 -0
- numba_cuda/numba/cuda/memory_management/nrt_context.py +438 -0
- numba_cuda/numba/cuda/misc/appdirs.py +594 -0
- numba_cuda/numba/cuda/misc/cffiimpl.py +24 -0
- numba_cuda/numba/cuda/misc/coverage_support.py +43 -0
- numba_cuda/numba/cuda/misc/dump_style.py +41 -0
- numba_cuda/numba/cuda/misc/findlib.py +75 -0
- numba_cuda/numba/cuda/misc/firstlinefinder.py +96 -0
- numba_cuda/numba/cuda/misc/gdb_hook.py +240 -0
- numba_cuda/numba/cuda/misc/literal.py +28 -0
- numba_cuda/numba/cuda/misc/llvm_pass_timings.py +412 -0
- numba_cuda/numba/cuda/misc/special.py +94 -0
- numba_cuda/numba/cuda/models.py +56 -0
- numba_cuda/numba/cuda/np/arraymath.py +5130 -0
- numba_cuda/numba/cuda/np/arrayobj.py +7635 -0
- numba_cuda/numba/cuda/np/extensions.py +11 -0
- numba_cuda/numba/cuda/np/linalg.py +3087 -0
- numba_cuda/numba/cuda/np/math/__init__.py +0 -0
- numba_cuda/numba/cuda/np/math/cmathimpl.py +558 -0
- numba_cuda/numba/cuda/np/math/mathimpl.py +487 -0
- numba_cuda/numba/cuda/np/math/numbers.py +1461 -0
- numba_cuda/numba/cuda/np/npdatetime.py +969 -0
- numba_cuda/numba/cuda/np/npdatetime_helpers.py +217 -0
- numba_cuda/numba/cuda/np/npyfuncs.py +1808 -0
- numba_cuda/numba/cuda/np/npyimpl.py +1027 -0
- numba_cuda/numba/cuda/np/numpy_support.py +798 -0
- numba_cuda/numba/cuda/np/polynomial/__init__.py +4 -0
- numba_cuda/numba/cuda/np/polynomial/polynomial_core.py +242 -0
- numba_cuda/numba/cuda/np/polynomial/polynomial_functions.py +380 -0
- numba_cuda/numba/cuda/np/ufunc/__init__.py +4 -0
- numba_cuda/numba/cuda/np/ufunc/decorators.py +203 -0
- numba_cuda/numba/cuda/np/ufunc/sigparse.py +68 -0
- numba_cuda/numba/cuda/np/ufunc/ufuncbuilder.py +65 -0
- numba_cuda/numba/cuda/np/ufunc_db.py +1282 -0
- numba_cuda/numba/cuda/np/unsafe/__init__.py +0 -0
- numba_cuda/numba/cuda/np/unsafe/ndarray.py +84 -0
- numba_cuda/numba/cuda/nvvmutils.py +254 -0
- numba_cuda/numba/cuda/printimpl.py +126 -0
- numba_cuda/numba/cuda/random.py +308 -0
- numba_cuda/numba/cuda/reshape_funcs.cu +156 -0
- numba_cuda/numba/cuda/serialize.py +267 -0
- numba_cuda/numba/cuda/simulator/__init__.py +63 -0
- numba_cuda/numba/cuda/simulator/_internal/__init__.py +4 -0
- numba_cuda/numba/cuda/simulator/_internal/cuda_bf16.py +2 -0
- numba_cuda/numba/cuda/simulator/api.py +179 -0
- numba_cuda/numba/cuda/simulator/bf16.py +4 -0
- numba_cuda/numba/cuda/simulator/compiler.py +38 -0
- numba_cuda/numba/cuda/simulator/cudadrv/__init__.py +11 -0
- numba_cuda/numba/cuda/simulator/cudadrv/devicearray.py +462 -0
- numba_cuda/numba/cuda/simulator/cudadrv/devices.py +122 -0
- numba_cuda/numba/cuda/simulator/cudadrv/driver.py +66 -0
- numba_cuda/numba/cuda/simulator/cudadrv/drvapi.py +7 -0
- numba_cuda/numba/cuda/simulator/cudadrv/dummyarray.py +7 -0
- numba_cuda/numba/cuda/simulator/cudadrv/error.py +10 -0
- numba_cuda/numba/cuda/simulator/cudadrv/libs.py +10 -0
- numba_cuda/numba/cuda/simulator/cudadrv/linkable_code.py +61 -0
- numba_cuda/numba/cuda/simulator/cudadrv/nvrtc.py +11 -0
- numba_cuda/numba/cuda/simulator/cudadrv/nvvm.py +32 -0
- numba_cuda/numba/cuda/simulator/cudadrv/runtime.py +22 -0
- numba_cuda/numba/cuda/simulator/dispatcher.py +11 -0
- numba_cuda/numba/cuda/simulator/kernel.py +320 -0
- numba_cuda/numba/cuda/simulator/kernelapi.py +509 -0
- numba_cuda/numba/cuda/simulator/memory_management/__init__.py +4 -0
- numba_cuda/numba/cuda/simulator/memory_management/nrt.py +21 -0
- numba_cuda/numba/cuda/simulator/reduction.py +19 -0
- numba_cuda/numba/cuda/simulator/tests/support.py +4 -0
- numba_cuda/numba/cuda/simulator/vector_types.py +65 -0
- numba_cuda/numba/cuda/simulator_init.py +18 -0
- numba_cuda/numba/cuda/stubs.py +624 -0
- numba_cuda/numba/cuda/target.py +505 -0
- numba_cuda/numba/cuda/testing.py +347 -0
- numba_cuda/numba/cuda/tests/__init__.py +62 -0
- numba_cuda/numba/cuda/tests/benchmarks/__init__.py +0 -0
- numba_cuda/numba/cuda/tests/benchmarks/test_kernel_launch.py +119 -0
- numba_cuda/numba/cuda/tests/cloudpickle_main_class.py +9 -0
- numba_cuda/numba/cuda/tests/core/serialize_usecases.py +113 -0
- numba_cuda/numba/cuda/tests/core/test_itanium_mangler.py +83 -0
- numba_cuda/numba/cuda/tests/core/test_serialize.py +371 -0
- numba_cuda/numba/cuda/tests/cudadrv/__init__.py +9 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_array_attr.py +147 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_context_stack.py +161 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_array_slicing.py +397 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_auto_context.py +24 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_devicerecord.py +180 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_driver.py +313 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_memory.py +191 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_ndarray.py +621 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_deallocations.py +247 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_detect.py +100 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_emm_plugins.py +200 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_events.py +53 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_host_alloc.py +72 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_init.py +138 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_inline_ptx.py +43 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_is_fp16.py +15 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_linkable_code.py +58 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_linker.py +348 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_managed_alloc.py +128 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_module_callbacks.py +301 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_nvjitlink.py +174 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_nvrtc.py +28 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_nvvm_driver.py +185 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_pinned.py +39 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_profiler.py +23 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_reset_device.py +38 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_runtime.py +48 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_select_device.py +44 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_streams.py +127 -0
- numba_cuda/numba/cuda/tests/cudapy/__init__.py +9 -0
- numba_cuda/numba/cuda/tests/cudapy/cache_usecases.py +231 -0
- numba_cuda/numba/cuda/tests/cudapy/cache_with_cpu_usecases.py +50 -0
- numba_cuda/numba/cuda/tests/cudapy/cg_cache_usecases.py +36 -0
- numba_cuda/numba/cuda/tests/cudapy/complex_usecases.py +116 -0
- numba_cuda/numba/cuda/tests/cudapy/enum_usecases.py +59 -0
- numba_cuda/numba/cuda/tests/cudapy/extensions_usecases.py +62 -0
- numba_cuda/numba/cuda/tests/cudapy/jitlink.ptx +28 -0
- numba_cuda/numba/cuda/tests/cudapy/overload_usecases.py +33 -0
- numba_cuda/numba/cuda/tests/cudapy/recursion_usecases.py +104 -0
- numba_cuda/numba/cuda/tests/cudapy/test_alignment.py +47 -0
- numba_cuda/numba/cuda/tests/cudapy/test_analysis.py +1122 -0
- numba_cuda/numba/cuda/tests/cudapy/test_array.py +344 -0
- numba_cuda/numba/cuda/tests/cudapy/test_array_alignment.py +268 -0
- numba_cuda/numba/cuda/tests/cudapy/test_array_args.py +203 -0
- numba_cuda/numba/cuda/tests/cudapy/test_array_methods.py +63 -0
- numba_cuda/numba/cuda/tests/cudapy/test_array_reductions.py +360 -0
- numba_cuda/numba/cuda/tests/cudapy/test_atomics.py +1815 -0
- numba_cuda/numba/cuda/tests/cudapy/test_bfloat16.py +599 -0
- numba_cuda/numba/cuda/tests/cudapy/test_bfloat16_bindings.py +377 -0
- numba_cuda/numba/cuda/tests/cudapy/test_blackscholes.py +160 -0
- numba_cuda/numba/cuda/tests/cudapy/test_boolean.py +27 -0
- numba_cuda/numba/cuda/tests/cudapy/test_byteflow.py +98 -0
- numba_cuda/numba/cuda/tests/cudapy/test_cache_hints.py +210 -0
- numba_cuda/numba/cuda/tests/cudapy/test_caching.py +683 -0
- numba_cuda/numba/cuda/tests/cudapy/test_casting.py +265 -0
- numba_cuda/numba/cuda/tests/cudapy/test_cffi.py +42 -0
- numba_cuda/numba/cuda/tests/cudapy/test_compiler.py +718 -0
- numba_cuda/numba/cuda/tests/cudapy/test_complex.py +370 -0
- numba_cuda/numba/cuda/tests/cudapy/test_complex_kernel.py +23 -0
- numba_cuda/numba/cuda/tests/cudapy/test_const_string.py +142 -0
- numba_cuda/numba/cuda/tests/cudapy/test_constmem.py +178 -0
- numba_cuda/numba/cuda/tests/cudapy/test_cooperative_groups.py +193 -0
- numba_cuda/numba/cuda/tests/cudapy/test_copy_propagate.py +131 -0
- numba_cuda/numba/cuda/tests/cudapy/test_cuda_array_interface.py +438 -0
- numba_cuda/numba/cuda/tests/cudapy/test_cuda_jit_no_types.py +94 -0
- numba_cuda/numba/cuda/tests/cudapy/test_datetime.py +101 -0
- numba_cuda/numba/cuda/tests/cudapy/test_debug.py +105 -0
- numba_cuda/numba/cuda/tests/cudapy/test_debuginfo.py +978 -0
- numba_cuda/numba/cuda/tests/cudapy/test_debuginfo_types.py +476 -0
- numba_cuda/numba/cuda/tests/cudapy/test_device_func.py +500 -0
- numba_cuda/numba/cuda/tests/cudapy/test_dispatcher.py +820 -0
- numba_cuda/numba/cuda/tests/cudapy/test_enums.py +152 -0
- numba_cuda/numba/cuda/tests/cudapy/test_errors.py +111 -0
- numba_cuda/numba/cuda/tests/cudapy/test_exception.py +170 -0
- numba_cuda/numba/cuda/tests/cudapy/test_extending.py +1088 -0
- numba_cuda/numba/cuda/tests/cudapy/test_extending_types.py +71 -0
- numba_cuda/numba/cuda/tests/cudapy/test_fastmath.py +265 -0
- numba_cuda/numba/cuda/tests/cudapy/test_flow_control.py +1433 -0
- numba_cuda/numba/cuda/tests/cudapy/test_forall.py +57 -0
- numba_cuda/numba/cuda/tests/cudapy/test_freevar.py +34 -0
- numba_cuda/numba/cuda/tests/cudapy/test_frexp_ldexp.py +69 -0
- numba_cuda/numba/cuda/tests/cudapy/test_globals.py +62 -0
- numba_cuda/numba/cuda/tests/cudapy/test_gufunc.py +474 -0
- numba_cuda/numba/cuda/tests/cudapy/test_gufunc_scalar.py +167 -0
- numba_cuda/numba/cuda/tests/cudapy/test_gufunc_scheduling.py +92 -0
- numba_cuda/numba/cuda/tests/cudapy/test_idiv.py +39 -0
- numba_cuda/numba/cuda/tests/cudapy/test_inline.py +170 -0
- numba_cuda/numba/cuda/tests/cudapy/test_inspect.py +255 -0
- numba_cuda/numba/cuda/tests/cudapy/test_intrinsics.py +1219 -0
- numba_cuda/numba/cuda/tests/cudapy/test_ipc.py +263 -0
- numba_cuda/numba/cuda/tests/cudapy/test_ir.py +598 -0
- numba_cuda/numba/cuda/tests/cudapy/test_ir_utils.py +276 -0
- numba_cuda/numba/cuda/tests/cudapy/test_iterators.py +101 -0
- numba_cuda/numba/cuda/tests/cudapy/test_lang.py +68 -0
- numba_cuda/numba/cuda/tests/cudapy/test_laplace.py +123 -0
- numba_cuda/numba/cuda/tests/cudapy/test_libdevice.py +194 -0
- numba_cuda/numba/cuda/tests/cudapy/test_lineinfo.py +220 -0
- numba_cuda/numba/cuda/tests/cudapy/test_localmem.py +173 -0
- numba_cuda/numba/cuda/tests/cudapy/test_make_function_to_jit_function.py +364 -0
- numba_cuda/numba/cuda/tests/cudapy/test_mandel.py +47 -0
- numba_cuda/numba/cuda/tests/cudapy/test_math.py +842 -0
- numba_cuda/numba/cuda/tests/cudapy/test_matmul.py +76 -0
- numba_cuda/numba/cuda/tests/cudapy/test_minmax.py +78 -0
- numba_cuda/numba/cuda/tests/cudapy/test_montecarlo.py +25 -0
- numba_cuda/numba/cuda/tests/cudapy/test_multigpu.py +145 -0
- numba_cuda/numba/cuda/tests/cudapy/test_multiprocessing.py +39 -0
- numba_cuda/numba/cuda/tests/cudapy/test_multithreads.py +82 -0
- numba_cuda/numba/cuda/tests/cudapy/test_nondet.py +53 -0
- numba_cuda/numba/cuda/tests/cudapy/test_operator.py +504 -0
- numba_cuda/numba/cuda/tests/cudapy/test_optimization.py +93 -0
- numba_cuda/numba/cuda/tests/cudapy/test_overload.py +402 -0
- numba_cuda/numba/cuda/tests/cudapy/test_powi.py +128 -0
- numba_cuda/numba/cuda/tests/cudapy/test_print.py +193 -0
- numba_cuda/numba/cuda/tests/cudapy/test_py2_div_issue.py +37 -0
- numba_cuda/numba/cuda/tests/cudapy/test_random.py +117 -0
- numba_cuda/numba/cuda/tests/cudapy/test_record_dtype.py +614 -0
- numba_cuda/numba/cuda/tests/cudapy/test_recursion.py +130 -0
- numba_cuda/numba/cuda/tests/cudapy/test_reduction.py +94 -0
- numba_cuda/numba/cuda/tests/cudapy/test_retrieve_autoconverted_arrays.py +83 -0
- numba_cuda/numba/cuda/tests/cudapy/test_serialize.py +86 -0
- numba_cuda/numba/cuda/tests/cudapy/test_slicing.py +40 -0
- numba_cuda/numba/cuda/tests/cudapy/test_sm.py +457 -0
- numba_cuda/numba/cuda/tests/cudapy/test_sm_creation.py +233 -0
- numba_cuda/numba/cuda/tests/cudapy/test_ssa.py +454 -0
- numba_cuda/numba/cuda/tests/cudapy/test_stream_api.py +56 -0
- numba_cuda/numba/cuda/tests/cudapy/test_sync.py +277 -0
- numba_cuda/numba/cuda/tests/cudapy/test_tracing.py +200 -0
- numba_cuda/numba/cuda/tests/cudapy/test_transpose.py +90 -0
- numba_cuda/numba/cuda/tests/cudapy/test_typeconv.py +333 -0
- numba_cuda/numba/cuda/tests/cudapy/test_typeinfer.py +538 -0
- numba_cuda/numba/cuda/tests/cudapy/test_ufuncs.py +585 -0
- numba_cuda/numba/cuda/tests/cudapy/test_userexc.py +42 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vector_type.py +485 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize.py +312 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize_complex.py +23 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize_decor.py +183 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize_device.py +40 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize_scalar_arg.py +40 -0
- numba_cuda/numba/cuda/tests/cudapy/test_warning.py +206 -0
- numba_cuda/numba/cuda/tests/cudapy/test_warp_ops.py +446 -0
- numba_cuda/numba/cuda/tests/cudasim/__init__.py +9 -0
- numba_cuda/numba/cuda/tests/cudasim/support.py +9 -0
- numba_cuda/numba/cuda/tests/cudasim/test_cudasim_issues.py +111 -0
- numba_cuda/numba/cuda/tests/data/__init__.py +2 -0
- numba_cuda/numba/cuda/tests/data/cta_barrier.cu +28 -0
- numba_cuda/numba/cuda/tests/data/cuda_include.cu +10 -0
- numba_cuda/numba/cuda/tests/data/error.cu +12 -0
- numba_cuda/numba/cuda/tests/data/include/add.cuh +8 -0
- numba_cuda/numba/cuda/tests/data/jitlink.cu +28 -0
- numba_cuda/numba/cuda/tests/data/jitlink.ptx +49 -0
- numba_cuda/numba/cuda/tests/data/warn.cu +12 -0
- numba_cuda/numba/cuda/tests/doc_examples/__init__.py +9 -0
- numba_cuda/numba/cuda/tests/doc_examples/ffi/__init__.py +2 -0
- numba_cuda/numba/cuda/tests/doc_examples/ffi/functions.cu +54 -0
- numba_cuda/numba/cuda/tests/doc_examples/ffi/include/mul.cuh +8 -0
- numba_cuda/numba/cuda/tests/doc_examples/ffi/saxpy.cu +14 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_cg.py +86 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_cpointer.py +68 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_cpu_gpu_compat.py +81 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_ffi.py +141 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_laplace.py +160 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_matmul.py +180 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_montecarlo.py +119 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_random.py +66 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_reduction.py +80 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_sessionize.py +206 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_ufunc.py +53 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_vecadd.py +76 -0
- numba_cuda/numba/cuda/tests/nocuda/__init__.py +9 -0
- numba_cuda/numba/cuda/tests/nocuda/test_dummyarray.py +452 -0
- numba_cuda/numba/cuda/tests/nocuda/test_function_resolution.py +48 -0
- numba_cuda/numba/cuda/tests/nocuda/test_import.py +63 -0
- numba_cuda/numba/cuda/tests/nocuda/test_library_lookup.py +252 -0
- numba_cuda/numba/cuda/tests/nocuda/test_nvvm.py +59 -0
- numba_cuda/numba/cuda/tests/nrt/__init__.py +9 -0
- numba_cuda/numba/cuda/tests/nrt/test_nrt.py +387 -0
- numba_cuda/numba/cuda/tests/nrt/test_nrt_refct.py +124 -0
- numba_cuda/numba/cuda/tests/support.py +900 -0
- numba_cuda/numba/cuda/typeconv/__init__.py +4 -0
- numba_cuda/numba/cuda/typeconv/castgraph.py +137 -0
- numba_cuda/numba/cuda/typeconv/rules.py +63 -0
- numba_cuda/numba/cuda/typeconv/typeconv.py +121 -0
- numba_cuda/numba/cuda/types/__init__.py +233 -0
- numba_cuda/numba/cuda/types/__init__.pyi +167 -0
- numba_cuda/numba/cuda/types/abstract.py +9 -0
- numba_cuda/numba/cuda/types/common.py +9 -0
- numba_cuda/numba/cuda/types/containers.py +9 -0
- numba_cuda/numba/cuda/types/cuda_abstract.py +533 -0
- numba_cuda/numba/cuda/types/cuda_common.py +110 -0
- numba_cuda/numba/cuda/types/cuda_containers.py +971 -0
- numba_cuda/numba/cuda/types/cuda_function_type.py +230 -0
- numba_cuda/numba/cuda/types/cuda_functions.py +798 -0
- numba_cuda/numba/cuda/types/cuda_iterators.py +120 -0
- numba_cuda/numba/cuda/types/cuda_misc.py +569 -0
- numba_cuda/numba/cuda/types/cuda_npytypes.py +690 -0
- numba_cuda/numba/cuda/types/cuda_scalars.py +280 -0
- numba_cuda/numba/cuda/types/ext_types.py +101 -0
- numba_cuda/numba/cuda/types/function_type.py +11 -0
- numba_cuda/numba/cuda/types/functions.py +9 -0
- numba_cuda/numba/cuda/types/iterators.py +9 -0
- numba_cuda/numba/cuda/types/misc.py +9 -0
- numba_cuda/numba/cuda/types/npytypes.py +9 -0
- numba_cuda/numba/cuda/types/scalars.py +9 -0
- numba_cuda/numba/cuda/typing/__init__.py +19 -0
- numba_cuda/numba/cuda/typing/arraydecl.py +939 -0
- numba_cuda/numba/cuda/typing/asnumbatype.py +130 -0
- numba_cuda/numba/cuda/typing/bufproto.py +70 -0
- numba_cuda/numba/cuda/typing/builtins.py +1209 -0
- numba_cuda/numba/cuda/typing/cffi_utils.py +219 -0
- numba_cuda/numba/cuda/typing/cmathdecl.py +47 -0
- numba_cuda/numba/cuda/typing/collections.py +138 -0
- numba_cuda/numba/cuda/typing/context.py +782 -0
- numba_cuda/numba/cuda/typing/ctypes_utils.py +125 -0
- numba_cuda/numba/cuda/typing/dictdecl.py +63 -0
- numba_cuda/numba/cuda/typing/enumdecl.py +74 -0
- numba_cuda/numba/cuda/typing/listdecl.py +147 -0
- numba_cuda/numba/cuda/typing/mathdecl.py +158 -0
- numba_cuda/numba/cuda/typing/npdatetime.py +322 -0
- numba_cuda/numba/cuda/typing/npydecl.py +749 -0
- numba_cuda/numba/cuda/typing/setdecl.py +115 -0
- numba_cuda/numba/cuda/typing/templates.py +1446 -0
- numba_cuda/numba/cuda/typing/typeof.py +301 -0
- numba_cuda/numba/cuda/ufuncs.py +746 -0
- numba_cuda/numba/cuda/utils.py +724 -0
- numba_cuda/numba/cuda/vector_types.py +214 -0
- numba_cuda/numba/cuda/vectorizers.py +260 -0
- numba_cuda-0.22.0.dist-info/METADATA +109 -0
- numba_cuda-0.22.0.dist-info/RECORD +487 -0
- numba_cuda-0.22.0.dist-info/WHEEL +6 -0
- numba_cuda-0.22.0.dist-info/licenses/LICENSE +26 -0
- numba_cuda-0.22.0.dist-info/licenses/LICENSE.numba +24 -0
- numba_cuda-0.22.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,3087 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: BSD-2-Clause
|
|
3
|
+
"""
|
|
4
|
+
Implementation of linear algebra operations.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import contextlib
|
|
8
|
+
import warnings
|
|
9
|
+
|
|
10
|
+
from llvmlite import ir
|
|
11
|
+
|
|
12
|
+
import numpy as np
|
|
13
|
+
import operator
|
|
14
|
+
|
|
15
|
+
from numba.cuda.core.imputils import impl_ret_borrowed, impl_ret_new_ref
|
|
16
|
+
from numba.cuda.typing import signature
|
|
17
|
+
from numba.cuda.extending import intrinsic, overload, register_jitable
|
|
18
|
+
from numba.cuda import types
|
|
19
|
+
from numba.cuda import cgutils
|
|
20
|
+
from numba.cuda.core.errors import (
|
|
21
|
+
TypingError,
|
|
22
|
+
NumbaTypeError,
|
|
23
|
+
NumbaPerformanceWarning,
|
|
24
|
+
)
|
|
25
|
+
from .arrayobj import make_array, array_copy
|
|
26
|
+
from numba.cuda.np import numpy_support as np_support
|
|
27
|
+
|
|
28
|
+
ll_char = ir.IntType(8)
|
|
29
|
+
ll_char_p = ll_char.as_pointer()
|
|
30
|
+
ll_void_p = ll_char_p
|
|
31
|
+
ll_intc = ir.IntType(32)
|
|
32
|
+
ll_intc_p = ll_intc.as_pointer()
|
|
33
|
+
intp_t = cgutils.intp_t
|
|
34
|
+
ll_intp_p = intp_t.as_pointer()
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# fortran int type, this needs to match the F_INT C declaration in
|
|
38
|
+
# _lapack.c and is present to accommodate potential future 64bit int
|
|
39
|
+
# based LAPACK use.
|
|
40
|
+
F_INT_nptype = np.int32
|
|
41
|
+
F_INT_nbtype = types.int32
|
|
42
|
+
|
|
43
|
+
# BLAS kinds as letters
|
|
44
|
+
_blas_kinds = {
|
|
45
|
+
types.float32: "s",
|
|
46
|
+
types.float64: "d",
|
|
47
|
+
types.complex64: "c",
|
|
48
|
+
types.complex128: "z",
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def get_blas_kind(dtype, func_name="<BLAS function>"):
|
|
53
|
+
kind = _blas_kinds.get(dtype)
|
|
54
|
+
if kind is None:
|
|
55
|
+
raise NumbaTypeError("unsupported dtype for %s()" % (func_name,))
|
|
56
|
+
return kind
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def ensure_blas():
|
|
60
|
+
try:
|
|
61
|
+
import scipy.linalg.cython_blas # noqa: F401
|
|
62
|
+
except ImportError:
|
|
63
|
+
raise ImportError("scipy 0.16+ is required for linear algebra")
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def ensure_lapack():
|
|
67
|
+
try:
|
|
68
|
+
import scipy.linalg.cython_lapack # noqa: F401
|
|
69
|
+
except ImportError:
|
|
70
|
+
raise ImportError("scipy 0.16+ is required for linear algebra")
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def make_constant_slot(context, builder, ty, val):
|
|
74
|
+
const = context.get_constant_generic(builder, ty, val)
|
|
75
|
+
return cgutils.alloca_once_value(builder, const)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class _BLAS:
|
|
79
|
+
"""
|
|
80
|
+
Functions to return type signatures for wrapped
|
|
81
|
+
BLAS functions.
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
def __init__(self):
|
|
85
|
+
ensure_blas()
|
|
86
|
+
|
|
87
|
+
@classmethod
|
|
88
|
+
def numba_xxnrm2(cls, dtype):
|
|
89
|
+
rtype = getattr(dtype, "underlying_float", dtype)
|
|
90
|
+
sig = types.intc(
|
|
91
|
+
types.char, # kind
|
|
92
|
+
types.intp, # n
|
|
93
|
+
types.CPointer(dtype), # x
|
|
94
|
+
types.intp, # incx
|
|
95
|
+
types.CPointer(rtype),
|
|
96
|
+
) # returned
|
|
97
|
+
|
|
98
|
+
return types.ExternalFunction("numba_xxnrm2", sig)
|
|
99
|
+
|
|
100
|
+
@classmethod
|
|
101
|
+
def numba_xxgemm(cls, dtype):
|
|
102
|
+
sig = types.intc(
|
|
103
|
+
types.char, # kind
|
|
104
|
+
types.char, # transa
|
|
105
|
+
types.char, # transb
|
|
106
|
+
types.intp, # m
|
|
107
|
+
types.intp, # n
|
|
108
|
+
types.intp, # k
|
|
109
|
+
types.CPointer(dtype), # alpha
|
|
110
|
+
types.CPointer(dtype), # a
|
|
111
|
+
types.intp, # lda
|
|
112
|
+
types.CPointer(dtype), # b
|
|
113
|
+
types.intp, # ldb
|
|
114
|
+
types.CPointer(dtype), # beta
|
|
115
|
+
types.CPointer(dtype), # c
|
|
116
|
+
types.intp, # ldc
|
|
117
|
+
)
|
|
118
|
+
return types.ExternalFunction("numba_xxgemm", sig)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class _LAPACK:
|
|
122
|
+
"""
|
|
123
|
+
Functions to return type signatures for wrapped
|
|
124
|
+
LAPACK functions.
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
def __init__(self):
|
|
128
|
+
ensure_lapack()
|
|
129
|
+
|
|
130
|
+
@classmethod
|
|
131
|
+
def numba_xxgetrf(cls, dtype):
|
|
132
|
+
sig = types.intc(
|
|
133
|
+
types.char, # kind
|
|
134
|
+
types.intp, # m
|
|
135
|
+
types.intp, # n
|
|
136
|
+
types.CPointer(dtype), # a
|
|
137
|
+
types.intp, # lda
|
|
138
|
+
types.CPointer(F_INT_nbtype), # ipiv
|
|
139
|
+
)
|
|
140
|
+
return types.ExternalFunction("numba_xxgetrf", sig)
|
|
141
|
+
|
|
142
|
+
@classmethod
|
|
143
|
+
def numba_ez_xxgetri(cls, dtype):
|
|
144
|
+
sig = types.intc(
|
|
145
|
+
types.char, # kind
|
|
146
|
+
types.intp, # n
|
|
147
|
+
types.CPointer(dtype), # a
|
|
148
|
+
types.intp, # lda
|
|
149
|
+
types.CPointer(F_INT_nbtype), # ipiv
|
|
150
|
+
)
|
|
151
|
+
return types.ExternalFunction("numba_ez_xxgetri", sig)
|
|
152
|
+
|
|
153
|
+
@classmethod
|
|
154
|
+
def numba_ez_rgeev(cls, dtype):
|
|
155
|
+
sig = types.intc(
|
|
156
|
+
types.char, # kind
|
|
157
|
+
types.char, # jobvl
|
|
158
|
+
types.char, # jobvr
|
|
159
|
+
types.intp, # n
|
|
160
|
+
types.CPointer(dtype), # a
|
|
161
|
+
types.intp, # lda
|
|
162
|
+
types.CPointer(dtype), # wr
|
|
163
|
+
types.CPointer(dtype), # wi
|
|
164
|
+
types.CPointer(dtype), # vl
|
|
165
|
+
types.intp, # ldvl
|
|
166
|
+
types.CPointer(dtype), # vr
|
|
167
|
+
types.intp, # ldvr
|
|
168
|
+
)
|
|
169
|
+
return types.ExternalFunction("numba_ez_rgeev", sig)
|
|
170
|
+
|
|
171
|
+
@classmethod
|
|
172
|
+
def numba_ez_cgeev(cls, dtype):
|
|
173
|
+
sig = types.intc(
|
|
174
|
+
types.char, # kind
|
|
175
|
+
types.char, # jobvl
|
|
176
|
+
types.char, # jobvr
|
|
177
|
+
types.intp, # n
|
|
178
|
+
types.CPointer(dtype), # a
|
|
179
|
+
types.intp, # lda
|
|
180
|
+
types.CPointer(dtype), # w
|
|
181
|
+
types.CPointer(dtype), # vl
|
|
182
|
+
types.intp, # ldvl
|
|
183
|
+
types.CPointer(dtype), # vr
|
|
184
|
+
types.intp, # ldvr
|
|
185
|
+
)
|
|
186
|
+
return types.ExternalFunction("numba_ez_cgeev", sig)
|
|
187
|
+
|
|
188
|
+
@classmethod
|
|
189
|
+
def numba_ez_xxxevd(cls, dtype):
|
|
190
|
+
wtype = getattr(dtype, "underlying_float", dtype)
|
|
191
|
+
sig = types.intc(
|
|
192
|
+
types.char, # kind
|
|
193
|
+
types.char, # jobz
|
|
194
|
+
types.char, # uplo
|
|
195
|
+
types.intp, # n
|
|
196
|
+
types.CPointer(dtype), # a
|
|
197
|
+
types.intp, # lda
|
|
198
|
+
types.CPointer(wtype), # w
|
|
199
|
+
)
|
|
200
|
+
return types.ExternalFunction("numba_ez_xxxevd", sig)
|
|
201
|
+
|
|
202
|
+
@classmethod
|
|
203
|
+
def numba_xxpotrf(cls, dtype):
|
|
204
|
+
sig = types.intc(
|
|
205
|
+
types.char, # kind
|
|
206
|
+
types.char, # uplo
|
|
207
|
+
types.intp, # n
|
|
208
|
+
types.CPointer(dtype), # a
|
|
209
|
+
types.intp, # lda
|
|
210
|
+
)
|
|
211
|
+
return types.ExternalFunction("numba_xxpotrf", sig)
|
|
212
|
+
|
|
213
|
+
@classmethod
|
|
214
|
+
def numba_ez_gesdd(cls, dtype):
|
|
215
|
+
stype = getattr(dtype, "underlying_float", dtype)
|
|
216
|
+
sig = types.intc(
|
|
217
|
+
types.char, # kind
|
|
218
|
+
types.char, # jobz
|
|
219
|
+
types.intp, # m
|
|
220
|
+
types.intp, # n
|
|
221
|
+
types.CPointer(dtype), # a
|
|
222
|
+
types.intp, # lda
|
|
223
|
+
types.CPointer(stype), # s
|
|
224
|
+
types.CPointer(dtype), # u
|
|
225
|
+
types.intp, # ldu
|
|
226
|
+
types.CPointer(dtype), # vt
|
|
227
|
+
types.intp, # ldvt
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
return types.ExternalFunction("numba_ez_gesdd", sig)
|
|
231
|
+
|
|
232
|
+
@classmethod
|
|
233
|
+
def numba_ez_geqrf(cls, dtype):
|
|
234
|
+
sig = types.intc(
|
|
235
|
+
types.char, # kind
|
|
236
|
+
types.intp, # m
|
|
237
|
+
types.intp, # n
|
|
238
|
+
types.CPointer(dtype), # a
|
|
239
|
+
types.intp, # lda
|
|
240
|
+
types.CPointer(dtype), # tau
|
|
241
|
+
)
|
|
242
|
+
return types.ExternalFunction("numba_ez_geqrf", sig)
|
|
243
|
+
|
|
244
|
+
@classmethod
|
|
245
|
+
def numba_ez_xxgqr(cls, dtype):
|
|
246
|
+
sig = types.intc(
|
|
247
|
+
types.char, # kind
|
|
248
|
+
types.intp, # m
|
|
249
|
+
types.intp, # n
|
|
250
|
+
types.intp, # k
|
|
251
|
+
types.CPointer(dtype), # a
|
|
252
|
+
types.intp, # lda
|
|
253
|
+
types.CPointer(dtype), # tau
|
|
254
|
+
)
|
|
255
|
+
return types.ExternalFunction("numba_ez_xxgqr", sig)
|
|
256
|
+
|
|
257
|
+
@classmethod
|
|
258
|
+
def numba_ez_gelsd(cls, dtype):
|
|
259
|
+
rtype = getattr(dtype, "underlying_float", dtype)
|
|
260
|
+
sig = types.intc(
|
|
261
|
+
types.char, # kind
|
|
262
|
+
types.intp, # m
|
|
263
|
+
types.intp, # n
|
|
264
|
+
types.intp, # nrhs
|
|
265
|
+
types.CPointer(dtype), # a
|
|
266
|
+
types.intp, # lda
|
|
267
|
+
types.CPointer(dtype), # b
|
|
268
|
+
types.intp, # ldb
|
|
269
|
+
types.CPointer(rtype), # S
|
|
270
|
+
types.float64, # rcond
|
|
271
|
+
types.CPointer(types.intc), # rank
|
|
272
|
+
)
|
|
273
|
+
return types.ExternalFunction("numba_ez_gelsd", sig)
|
|
274
|
+
|
|
275
|
+
@classmethod
|
|
276
|
+
def numba_xgesv(cls, dtype):
|
|
277
|
+
sig = types.intc(
|
|
278
|
+
types.char, # kind
|
|
279
|
+
types.intp, # n
|
|
280
|
+
types.intp, # nhrs
|
|
281
|
+
types.CPointer(dtype), # a
|
|
282
|
+
types.intp, # lda
|
|
283
|
+
types.CPointer(F_INT_nbtype), # ipiv
|
|
284
|
+
types.CPointer(dtype), # b
|
|
285
|
+
types.intp, # ldb
|
|
286
|
+
)
|
|
287
|
+
return types.ExternalFunction("numba_xgesv", sig)
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
@contextlib.contextmanager
|
|
291
|
+
def make_contiguous(context, builder, sig, args):
|
|
292
|
+
"""
|
|
293
|
+
Ensure that all array arguments are contiguous, if necessary by
|
|
294
|
+
copying them.
|
|
295
|
+
A new (sig, args) tuple is yielded.
|
|
296
|
+
"""
|
|
297
|
+
newtys = []
|
|
298
|
+
newargs = []
|
|
299
|
+
copies = []
|
|
300
|
+
for ty, val in zip(sig.args, args):
|
|
301
|
+
if not isinstance(ty, types.Array) or ty.layout in "CF":
|
|
302
|
+
newty, newval = ty, val
|
|
303
|
+
else:
|
|
304
|
+
newty = ty.copy(layout="C")
|
|
305
|
+
copysig = signature(newty, ty)
|
|
306
|
+
newval = array_copy(context, builder, copysig, (val,))
|
|
307
|
+
copies.append((newty, newval))
|
|
308
|
+
newtys.append(newty)
|
|
309
|
+
newargs.append(newval)
|
|
310
|
+
yield signature(sig.return_type, *newtys), tuple(newargs)
|
|
311
|
+
for ty, val in copies:
|
|
312
|
+
context.nrt.decref(builder, ty, val)
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def check_c_int(context, builder, n):
|
|
316
|
+
"""
|
|
317
|
+
Check whether *n* fits in a C `int`.
|
|
318
|
+
"""
|
|
319
|
+
_maxint = 2**31 - 1
|
|
320
|
+
|
|
321
|
+
def impl(n):
|
|
322
|
+
if n > _maxint:
|
|
323
|
+
raise OverflowError("array size too large to fit in C int")
|
|
324
|
+
|
|
325
|
+
context.compile_internal(
|
|
326
|
+
builder, impl, signature(types.none, types.intp), (n,)
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
def check_blas_return(context, builder, res):
|
|
331
|
+
"""
|
|
332
|
+
Check the integer error return from one of the BLAS wrappers in
|
|
333
|
+
_helperlib.c.
|
|
334
|
+
"""
|
|
335
|
+
with builder.if_then(cgutils.is_not_null(builder, res), likely=False):
|
|
336
|
+
# Those errors shouldn't happen, it's easier to just abort the process
|
|
337
|
+
pyapi = context.get_python_api(builder)
|
|
338
|
+
pyapi.gil_ensure()
|
|
339
|
+
pyapi.fatal_error("BLAS wrapper returned with an error")
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
def check_lapack_return(context, builder, res):
|
|
343
|
+
"""
|
|
344
|
+
Check the integer error return from one of the LAPACK wrappers in
|
|
345
|
+
_helperlib.c.
|
|
346
|
+
"""
|
|
347
|
+
with builder.if_then(cgutils.is_not_null(builder, res), likely=False):
|
|
348
|
+
# Those errors shouldn't happen, it's easier to just abort the process
|
|
349
|
+
pyapi = context.get_python_api(builder)
|
|
350
|
+
pyapi.gil_ensure()
|
|
351
|
+
pyapi.fatal_error("LAPACK wrapper returned with an error")
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def call_xxdot(context, builder, conjugate, dtype, n, a_data, b_data, out_data):
|
|
355
|
+
"""
|
|
356
|
+
Call the BLAS vector * vector product function for the given arguments.
|
|
357
|
+
"""
|
|
358
|
+
fnty = ir.FunctionType(
|
|
359
|
+
ir.IntType(32),
|
|
360
|
+
[
|
|
361
|
+
ll_char,
|
|
362
|
+
ll_char,
|
|
363
|
+
intp_t, # kind, conjugate, n
|
|
364
|
+
ll_void_p,
|
|
365
|
+
ll_void_p,
|
|
366
|
+
ll_void_p, # a, b, out
|
|
367
|
+
],
|
|
368
|
+
)
|
|
369
|
+
fn = cgutils.get_or_insert_function(builder.module, fnty, "numba_xxdot")
|
|
370
|
+
|
|
371
|
+
kind = get_blas_kind(dtype)
|
|
372
|
+
kind_val = ir.Constant(ll_char, ord(kind))
|
|
373
|
+
conjugate = ir.Constant(ll_char, int(conjugate))
|
|
374
|
+
|
|
375
|
+
res = builder.call(
|
|
376
|
+
fn,
|
|
377
|
+
(
|
|
378
|
+
kind_val,
|
|
379
|
+
conjugate,
|
|
380
|
+
n,
|
|
381
|
+
builder.bitcast(a_data, ll_void_p),
|
|
382
|
+
builder.bitcast(b_data, ll_void_p),
|
|
383
|
+
builder.bitcast(out_data, ll_void_p),
|
|
384
|
+
),
|
|
385
|
+
)
|
|
386
|
+
check_blas_return(context, builder, res)
|
|
387
|
+
|
|
388
|
+
|
|
389
|
+
def call_xxgemv(
|
|
390
|
+
context, builder, do_trans, m_type, m_shapes, m_data, v_data, out_data
|
|
391
|
+
):
|
|
392
|
+
"""
|
|
393
|
+
Call the BLAS matrix * vector product function for the given arguments.
|
|
394
|
+
"""
|
|
395
|
+
fnty = ir.FunctionType(
|
|
396
|
+
ir.IntType(32),
|
|
397
|
+
[
|
|
398
|
+
ll_char,
|
|
399
|
+
ll_char, # kind, trans
|
|
400
|
+
intp_t,
|
|
401
|
+
intp_t, # m, n
|
|
402
|
+
ll_void_p,
|
|
403
|
+
ll_void_p,
|
|
404
|
+
intp_t, # alpha, a, lda
|
|
405
|
+
ll_void_p,
|
|
406
|
+
ll_void_p,
|
|
407
|
+
ll_void_p, # x, beta, y
|
|
408
|
+
],
|
|
409
|
+
)
|
|
410
|
+
fn = cgutils.get_or_insert_function(builder.module, fnty, "numba_xxgemv")
|
|
411
|
+
|
|
412
|
+
dtype = m_type.dtype
|
|
413
|
+
alpha = make_constant_slot(context, builder, dtype, 1.0)
|
|
414
|
+
beta = make_constant_slot(context, builder, dtype, 0.0)
|
|
415
|
+
|
|
416
|
+
if m_type.layout == "F":
|
|
417
|
+
m, n = m_shapes
|
|
418
|
+
lda = m_shapes[0]
|
|
419
|
+
else:
|
|
420
|
+
n, m = m_shapes
|
|
421
|
+
lda = m_shapes[1]
|
|
422
|
+
|
|
423
|
+
kind = get_blas_kind(dtype)
|
|
424
|
+
kind_val = ir.Constant(ll_char, ord(kind))
|
|
425
|
+
trans = ir.Constant(ll_char, ord("t") if do_trans else ord("n"))
|
|
426
|
+
|
|
427
|
+
res = builder.call(
|
|
428
|
+
fn,
|
|
429
|
+
(
|
|
430
|
+
kind_val,
|
|
431
|
+
trans,
|
|
432
|
+
m,
|
|
433
|
+
n,
|
|
434
|
+
builder.bitcast(alpha, ll_void_p),
|
|
435
|
+
builder.bitcast(m_data, ll_void_p),
|
|
436
|
+
lda,
|
|
437
|
+
builder.bitcast(v_data, ll_void_p),
|
|
438
|
+
builder.bitcast(beta, ll_void_p),
|
|
439
|
+
builder.bitcast(out_data, ll_void_p),
|
|
440
|
+
),
|
|
441
|
+
)
|
|
442
|
+
check_blas_return(context, builder, res)
|
|
443
|
+
|
|
444
|
+
|
|
445
|
+
def call_xxgemm(
|
|
446
|
+
context,
|
|
447
|
+
builder,
|
|
448
|
+
x_type,
|
|
449
|
+
x_shapes,
|
|
450
|
+
x_data,
|
|
451
|
+
y_type,
|
|
452
|
+
y_shapes,
|
|
453
|
+
y_data,
|
|
454
|
+
out_type,
|
|
455
|
+
out_shapes,
|
|
456
|
+
out_data,
|
|
457
|
+
):
|
|
458
|
+
"""
|
|
459
|
+
Call the BLAS matrix * matrix product function for the given arguments.
|
|
460
|
+
"""
|
|
461
|
+
fnty = ir.FunctionType(
|
|
462
|
+
ir.IntType(32),
|
|
463
|
+
[
|
|
464
|
+
ll_char, # kind
|
|
465
|
+
ll_char,
|
|
466
|
+
ll_char, # transa, transb
|
|
467
|
+
intp_t,
|
|
468
|
+
intp_t,
|
|
469
|
+
intp_t, # m, n, k
|
|
470
|
+
ll_void_p,
|
|
471
|
+
ll_void_p,
|
|
472
|
+
intp_t, # alpha, a, lda
|
|
473
|
+
ll_void_p,
|
|
474
|
+
intp_t,
|
|
475
|
+
ll_void_p, # b, ldb, beta
|
|
476
|
+
ll_void_p,
|
|
477
|
+
intp_t, # c, ldc
|
|
478
|
+
],
|
|
479
|
+
)
|
|
480
|
+
fn = cgutils.get_or_insert_function(builder.module, fnty, "numba_xxgemm")
|
|
481
|
+
|
|
482
|
+
m, k = x_shapes
|
|
483
|
+
_k, n = y_shapes
|
|
484
|
+
dtype = x_type.dtype
|
|
485
|
+
alpha = make_constant_slot(context, builder, dtype, 1.0)
|
|
486
|
+
beta = make_constant_slot(context, builder, dtype, 0.0)
|
|
487
|
+
|
|
488
|
+
trans = ir.Constant(ll_char, ord("t"))
|
|
489
|
+
notrans = ir.Constant(ll_char, ord("n"))
|
|
490
|
+
|
|
491
|
+
def get_array_param(ty, shapes, data):
|
|
492
|
+
return (
|
|
493
|
+
# Transpose if layout different from result's
|
|
494
|
+
notrans if ty.layout == out_type.layout else trans,
|
|
495
|
+
# Size of the inner dimension in physical array order
|
|
496
|
+
shapes[1] if ty.layout == "C" else shapes[0],
|
|
497
|
+
# The data pointer, unit-less
|
|
498
|
+
builder.bitcast(data, ll_void_p),
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
transa, lda, data_a = get_array_param(y_type, y_shapes, y_data)
|
|
502
|
+
transb, ldb, data_b = get_array_param(x_type, x_shapes, x_data)
|
|
503
|
+
_, ldc, data_c = get_array_param(out_type, out_shapes, out_data)
|
|
504
|
+
|
|
505
|
+
kind = get_blas_kind(dtype)
|
|
506
|
+
kind_val = ir.Constant(ll_char, ord(kind))
|
|
507
|
+
|
|
508
|
+
res = builder.call(
|
|
509
|
+
fn,
|
|
510
|
+
(
|
|
511
|
+
kind_val,
|
|
512
|
+
transa,
|
|
513
|
+
transb,
|
|
514
|
+
n,
|
|
515
|
+
m,
|
|
516
|
+
k,
|
|
517
|
+
builder.bitcast(alpha, ll_void_p),
|
|
518
|
+
data_a,
|
|
519
|
+
lda,
|
|
520
|
+
data_b,
|
|
521
|
+
ldb,
|
|
522
|
+
builder.bitcast(beta, ll_void_p),
|
|
523
|
+
data_c,
|
|
524
|
+
ldc,
|
|
525
|
+
),
|
|
526
|
+
)
|
|
527
|
+
check_blas_return(context, builder, res)
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
def dot_2_mm(context, builder, sig, args):
|
|
531
|
+
"""
|
|
532
|
+
np.dot(matrix, matrix)
|
|
533
|
+
"""
|
|
534
|
+
|
|
535
|
+
def dot_impl(a, b):
|
|
536
|
+
m, k = a.shape
|
|
537
|
+
_k, n = b.shape
|
|
538
|
+
if k == 0:
|
|
539
|
+
return np.zeros((m, n), a.dtype)
|
|
540
|
+
out = np.empty((m, n), a.dtype)
|
|
541
|
+
return np.dot(a, b, out)
|
|
542
|
+
|
|
543
|
+
res = context.compile_internal(builder, dot_impl, sig, args)
|
|
544
|
+
return impl_ret_new_ref(context, builder, sig.return_type, res)
|
|
545
|
+
|
|
546
|
+
|
|
547
|
+
def dot_2_vm(context, builder, sig, args):
|
|
548
|
+
"""
|
|
549
|
+
np.dot(vector, matrix)
|
|
550
|
+
"""
|
|
551
|
+
|
|
552
|
+
def dot_impl(a, b):
|
|
553
|
+
(m,) = a.shape
|
|
554
|
+
_m, n = b.shape
|
|
555
|
+
if m == 0:
|
|
556
|
+
return np.zeros((n,), a.dtype)
|
|
557
|
+
out = np.empty((n,), a.dtype)
|
|
558
|
+
return np.dot(a, b, out)
|
|
559
|
+
|
|
560
|
+
res = context.compile_internal(builder, dot_impl, sig, args)
|
|
561
|
+
return impl_ret_new_ref(context, builder, sig.return_type, res)
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
def dot_2_mv(context, builder, sig, args):
|
|
565
|
+
"""
|
|
566
|
+
np.dot(matrix, vector)
|
|
567
|
+
"""
|
|
568
|
+
|
|
569
|
+
def dot_impl(a, b):
|
|
570
|
+
m, n = a.shape
|
|
571
|
+
(_n,) = b.shape
|
|
572
|
+
if n == 0:
|
|
573
|
+
return np.zeros((m,), a.dtype)
|
|
574
|
+
out = np.empty((m,), a.dtype)
|
|
575
|
+
return np.dot(a, b, out)
|
|
576
|
+
|
|
577
|
+
res = context.compile_internal(builder, dot_impl, sig, args)
|
|
578
|
+
return impl_ret_new_ref(context, builder, sig.return_type, res)
|
|
579
|
+
|
|
580
|
+
|
|
581
|
+
def dot_2_vv(context, builder, sig, args, conjugate=False):
|
|
582
|
+
"""
|
|
583
|
+
np.dot(vector, vector)
|
|
584
|
+
np.vdot(vector, vector)
|
|
585
|
+
"""
|
|
586
|
+
aty, bty = sig.args
|
|
587
|
+
dtype = sig.return_type
|
|
588
|
+
a = make_array(aty)(context, builder, args[0])
|
|
589
|
+
b = make_array(bty)(context, builder, args[1])
|
|
590
|
+
(n,) = cgutils.unpack_tuple(builder, a.shape)
|
|
591
|
+
|
|
592
|
+
def check_args(a, b):
|
|
593
|
+
(m,) = a.shape
|
|
594
|
+
(n,) = b.shape
|
|
595
|
+
if m != n:
|
|
596
|
+
raise ValueError(
|
|
597
|
+
"incompatible array sizes for np.dot(a, b) (vector * vector)"
|
|
598
|
+
)
|
|
599
|
+
|
|
600
|
+
context.compile_internal(
|
|
601
|
+
builder, check_args, signature(types.none, *sig.args), args
|
|
602
|
+
)
|
|
603
|
+
check_c_int(context, builder, n)
|
|
604
|
+
|
|
605
|
+
out = cgutils.alloca_once(builder, context.get_value_type(dtype))
|
|
606
|
+
call_xxdot(context, builder, conjugate, dtype, n, a.data, b.data, out)
|
|
607
|
+
return builder.load(out)
|
|
608
|
+
|
|
609
|
+
|
|
610
|
+
@overload(np.dot)
|
|
611
|
+
def dot_2(left, right):
|
|
612
|
+
"""
|
|
613
|
+
np.dot(a, b)
|
|
614
|
+
"""
|
|
615
|
+
return dot_2_impl("np.dot()", left, right)
|
|
616
|
+
|
|
617
|
+
|
|
618
|
+
@overload(operator.matmul)
|
|
619
|
+
def matmul_2(left, right):
|
|
620
|
+
"""
|
|
621
|
+
a @ b
|
|
622
|
+
"""
|
|
623
|
+
return dot_2_impl("'@'", left, right)
|
|
624
|
+
|
|
625
|
+
|
|
626
|
+
def dot_2_impl(name, left, right):
|
|
627
|
+
if isinstance(left, types.Array) and isinstance(right, types.Array):
|
|
628
|
+
|
|
629
|
+
@intrinsic
|
|
630
|
+
def _impl(typingcontext, left, right):
|
|
631
|
+
ndims = (left.ndim, right.ndim)
|
|
632
|
+
|
|
633
|
+
def _dot2_codegen(context, builder, sig, args):
|
|
634
|
+
ensure_blas()
|
|
635
|
+
|
|
636
|
+
with make_contiguous(context, builder, sig, args) as (
|
|
637
|
+
sig,
|
|
638
|
+
args,
|
|
639
|
+
):
|
|
640
|
+
if ndims == (2, 2):
|
|
641
|
+
return dot_2_mm(context, builder, sig, args)
|
|
642
|
+
elif ndims == (2, 1):
|
|
643
|
+
return dot_2_mv(context, builder, sig, args)
|
|
644
|
+
elif ndims == (1, 2):
|
|
645
|
+
return dot_2_vm(context, builder, sig, args)
|
|
646
|
+
elif ndims == (1, 1):
|
|
647
|
+
return dot_2_vv(context, builder, sig, args)
|
|
648
|
+
else:
|
|
649
|
+
raise AssertionError("unreachable")
|
|
650
|
+
|
|
651
|
+
if left.dtype != right.dtype:
|
|
652
|
+
raise TypingError(
|
|
653
|
+
"%s arguments must all have the same dtype" % name
|
|
654
|
+
)
|
|
655
|
+
|
|
656
|
+
if ndims == (2, 2):
|
|
657
|
+
return_type = types.Array(left.dtype, 2, "C")
|
|
658
|
+
elif ndims == (2, 1) or ndims == (1, 2):
|
|
659
|
+
return_type = types.Array(left.dtype, 1, "C")
|
|
660
|
+
elif ndims == (1, 1):
|
|
661
|
+
return_type = left.dtype
|
|
662
|
+
else:
|
|
663
|
+
raise TypingError(
|
|
664
|
+
("%s: inputs must have compatible dimensions") % name
|
|
665
|
+
)
|
|
666
|
+
return signature(return_type, left, right), _dot2_codegen
|
|
667
|
+
|
|
668
|
+
if left.layout not in "CF" or right.layout not in "CF":
|
|
669
|
+
warnings.warn(
|
|
670
|
+
"%s is faster on contiguous arrays, called on %s"
|
|
671
|
+
% (
|
|
672
|
+
name,
|
|
673
|
+
(left, right),
|
|
674
|
+
),
|
|
675
|
+
NumbaPerformanceWarning,
|
|
676
|
+
)
|
|
677
|
+
|
|
678
|
+
return lambda left, right: _impl(left, right)
|
|
679
|
+
|
|
680
|
+
|
|
681
|
+
@overload(np.vdot)
|
|
682
|
+
def vdot(left, right):
|
|
683
|
+
"""
|
|
684
|
+
np.vdot(a, b)
|
|
685
|
+
"""
|
|
686
|
+
if isinstance(left, types.Array) and isinstance(right, types.Array):
|
|
687
|
+
|
|
688
|
+
@intrinsic
|
|
689
|
+
def _impl(typingcontext, left, right):
|
|
690
|
+
def codegen(context, builder, sig, args):
|
|
691
|
+
ensure_blas()
|
|
692
|
+
|
|
693
|
+
with make_contiguous(context, builder, sig, args) as (
|
|
694
|
+
sig,
|
|
695
|
+
args,
|
|
696
|
+
):
|
|
697
|
+
return dot_2_vv(context, builder, sig, args, conjugate=True)
|
|
698
|
+
|
|
699
|
+
if left.ndim != 1 or right.ndim != 1:
|
|
700
|
+
raise TypingError("np.vdot() only supported on 1-D arrays")
|
|
701
|
+
|
|
702
|
+
if left.dtype != right.dtype:
|
|
703
|
+
raise TypingError(
|
|
704
|
+
"np.vdot() arguments must all have the same dtype"
|
|
705
|
+
)
|
|
706
|
+
return signature(left.dtype, left, right), codegen
|
|
707
|
+
|
|
708
|
+
if left.layout not in "CF" or right.layout not in "CF":
|
|
709
|
+
warnings.warn(
|
|
710
|
+
"np.vdot() is faster on contiguous arrays, called on %s"
|
|
711
|
+
% ((left, right),),
|
|
712
|
+
NumbaPerformanceWarning,
|
|
713
|
+
)
|
|
714
|
+
|
|
715
|
+
return lambda left, right: _impl(left, right)
|
|
716
|
+
|
|
717
|
+
|
|
718
|
+
def dot_3_vm_check_args(a, b, out):
|
|
719
|
+
(m,) = a.shape
|
|
720
|
+
_m, n = b.shape
|
|
721
|
+
if m != _m:
|
|
722
|
+
raise ValueError(
|
|
723
|
+
"incompatible array sizes for np.dot(a, b) (vector * matrix)"
|
|
724
|
+
)
|
|
725
|
+
if out.shape != (n,):
|
|
726
|
+
raise ValueError(
|
|
727
|
+
"incompatible output array size for "
|
|
728
|
+
"np.dot(a, b, out) (vector * matrix)"
|
|
729
|
+
)
|
|
730
|
+
|
|
731
|
+
|
|
732
|
+
def dot_3_mv_check_args(a, b, out):
|
|
733
|
+
m, _n = a.shape
|
|
734
|
+
(n,) = b.shape
|
|
735
|
+
if n != _n:
|
|
736
|
+
raise ValueError(
|
|
737
|
+
"incompatible array sizes for np.dot(a, b) (matrix * vector)"
|
|
738
|
+
)
|
|
739
|
+
if out.shape != (m,):
|
|
740
|
+
raise ValueError(
|
|
741
|
+
"incompatible output array size for "
|
|
742
|
+
"np.dot(a, b, out) (matrix * vector)"
|
|
743
|
+
)
|
|
744
|
+
|
|
745
|
+
|
|
746
|
+
def dot_3_vm(context, builder, sig, args):
|
|
747
|
+
"""
|
|
748
|
+
np.dot(vector, matrix, out)
|
|
749
|
+
np.dot(matrix, vector, out)
|
|
750
|
+
"""
|
|
751
|
+
xty, yty, outty = sig.args
|
|
752
|
+
assert outty == sig.return_type
|
|
753
|
+
|
|
754
|
+
x = make_array(xty)(context, builder, args[0])
|
|
755
|
+
y = make_array(yty)(context, builder, args[1])
|
|
756
|
+
out = make_array(outty)(context, builder, args[2])
|
|
757
|
+
x_shapes = cgutils.unpack_tuple(builder, x.shape)
|
|
758
|
+
y_shapes = cgutils.unpack_tuple(builder, y.shape)
|
|
759
|
+
out_shapes = cgutils.unpack_tuple(builder, out.shape) # noqa: F841
|
|
760
|
+
if xty.ndim < yty.ndim:
|
|
761
|
+
# Vector * matrix
|
|
762
|
+
# Asked for x * y, we will compute y.T * x
|
|
763
|
+
mty = yty
|
|
764
|
+
m_shapes = y_shapes
|
|
765
|
+
v_shape = x_shapes[0]
|
|
766
|
+
lda = m_shapes[1]
|
|
767
|
+
do_trans = yty.layout == "F"
|
|
768
|
+
m_data, v_data = y.data, x.data
|
|
769
|
+
check_args = dot_3_vm_check_args
|
|
770
|
+
else:
|
|
771
|
+
# Matrix * vector
|
|
772
|
+
# We will compute x * y
|
|
773
|
+
mty = xty
|
|
774
|
+
m_shapes = x_shapes
|
|
775
|
+
v_shape = y_shapes[0]
|
|
776
|
+
lda = m_shapes[0]
|
|
777
|
+
do_trans = xty.layout == "C"
|
|
778
|
+
m_data, v_data = x.data, y.data
|
|
779
|
+
check_args = dot_3_mv_check_args
|
|
780
|
+
|
|
781
|
+
context.compile_internal(
|
|
782
|
+
builder, check_args, signature(types.none, *sig.args), args
|
|
783
|
+
)
|
|
784
|
+
for val in m_shapes:
|
|
785
|
+
check_c_int(context, builder, val)
|
|
786
|
+
|
|
787
|
+
zero = context.get_constant(types.intp, 0)
|
|
788
|
+
both_empty = builder.icmp_signed("==", v_shape, zero)
|
|
789
|
+
matrix_empty = builder.icmp_signed("==", lda, zero)
|
|
790
|
+
is_empty = builder.or_(both_empty, matrix_empty)
|
|
791
|
+
with builder.if_else(is_empty, likely=False) as (empty, nonempty):
|
|
792
|
+
with empty:
|
|
793
|
+
cgutils.memset(
|
|
794
|
+
builder, out.data, builder.mul(out.itemsize, out.nitems), 0
|
|
795
|
+
)
|
|
796
|
+
with nonempty:
|
|
797
|
+
call_xxgemv(
|
|
798
|
+
context,
|
|
799
|
+
builder,
|
|
800
|
+
do_trans,
|
|
801
|
+
mty,
|
|
802
|
+
m_shapes,
|
|
803
|
+
m_data,
|
|
804
|
+
v_data,
|
|
805
|
+
out.data,
|
|
806
|
+
)
|
|
807
|
+
|
|
808
|
+
return impl_ret_borrowed(context, builder, sig.return_type, out._getvalue())
|
|
809
|
+
|
|
810
|
+
|
|
811
|
+
def dot_3_mm(context, builder, sig, args):
|
|
812
|
+
"""
|
|
813
|
+
np.dot(matrix, matrix, out)
|
|
814
|
+
"""
|
|
815
|
+
xty, yty, outty = sig.args
|
|
816
|
+
assert outty == sig.return_type
|
|
817
|
+
dtype = xty.dtype
|
|
818
|
+
|
|
819
|
+
x = make_array(xty)(context, builder, args[0])
|
|
820
|
+
y = make_array(yty)(context, builder, args[1])
|
|
821
|
+
out = make_array(outty)(context, builder, args[2])
|
|
822
|
+
x_shapes = cgutils.unpack_tuple(builder, x.shape)
|
|
823
|
+
y_shapes = cgutils.unpack_tuple(builder, y.shape)
|
|
824
|
+
out_shapes = cgutils.unpack_tuple(builder, out.shape)
|
|
825
|
+
m, k = x_shapes
|
|
826
|
+
_k, n = y_shapes
|
|
827
|
+
|
|
828
|
+
# The only case Numpy supports
|
|
829
|
+
assert outty.layout == "C"
|
|
830
|
+
|
|
831
|
+
def check_args(a, b, out):
|
|
832
|
+
m, k = a.shape
|
|
833
|
+
_k, n = b.shape
|
|
834
|
+
if k != _k:
|
|
835
|
+
raise ValueError(
|
|
836
|
+
"incompatible array sizes for np.dot(a, b) (matrix * matrix)"
|
|
837
|
+
)
|
|
838
|
+
if out.shape != (m, n):
|
|
839
|
+
raise ValueError(
|
|
840
|
+
"incompatible output array size for "
|
|
841
|
+
"np.dot(a, b, out) (matrix * matrix)"
|
|
842
|
+
)
|
|
843
|
+
|
|
844
|
+
context.compile_internal(
|
|
845
|
+
builder, check_args, signature(types.none, *sig.args), args
|
|
846
|
+
)
|
|
847
|
+
|
|
848
|
+
check_c_int(context, builder, m)
|
|
849
|
+
check_c_int(context, builder, k)
|
|
850
|
+
check_c_int(context, builder, n)
|
|
851
|
+
|
|
852
|
+
x_data = x.data
|
|
853
|
+
y_data = y.data
|
|
854
|
+
out_data = out.data
|
|
855
|
+
|
|
856
|
+
# If eliminated dimension is zero, set all entries to zero and return
|
|
857
|
+
zero = context.get_constant(types.intp, 0)
|
|
858
|
+
both_empty = builder.icmp_signed("==", k, zero)
|
|
859
|
+
x_empty = builder.icmp_signed("==", m, zero)
|
|
860
|
+
y_empty = builder.icmp_signed("==", n, zero)
|
|
861
|
+
is_empty = builder.or_(both_empty, builder.or_(x_empty, y_empty))
|
|
862
|
+
with builder.if_else(is_empty, likely=False) as (empty, nonempty):
|
|
863
|
+
with empty:
|
|
864
|
+
cgutils.memset(
|
|
865
|
+
builder, out.data, builder.mul(out.itemsize, out.nitems), 0
|
|
866
|
+
)
|
|
867
|
+
with nonempty:
|
|
868
|
+
# Check if any of the operands is really a 1-d vector represented
|
|
869
|
+
# as a (1, k) or (k, 1) 2-d array. In those cases, it is pessimal
|
|
870
|
+
# to call the generic matrix * matrix product BLAS function.
|
|
871
|
+
one = context.get_constant(types.intp, 1)
|
|
872
|
+
is_left_vec = builder.icmp_signed("==", m, one)
|
|
873
|
+
is_right_vec = builder.icmp_signed("==", n, one)
|
|
874
|
+
|
|
875
|
+
with builder.if_else(is_right_vec) as (r_vec, r_mat):
|
|
876
|
+
with r_vec:
|
|
877
|
+
with builder.if_else(is_left_vec) as (v_v, m_v):
|
|
878
|
+
with v_v:
|
|
879
|
+
# V * V
|
|
880
|
+
call_xxdot(
|
|
881
|
+
context,
|
|
882
|
+
builder,
|
|
883
|
+
False,
|
|
884
|
+
dtype,
|
|
885
|
+
k,
|
|
886
|
+
x_data,
|
|
887
|
+
y_data,
|
|
888
|
+
out_data,
|
|
889
|
+
)
|
|
890
|
+
with m_v:
|
|
891
|
+
# M * V
|
|
892
|
+
do_trans = xty.layout == outty.layout
|
|
893
|
+
call_xxgemv(
|
|
894
|
+
context,
|
|
895
|
+
builder,
|
|
896
|
+
do_trans,
|
|
897
|
+
xty,
|
|
898
|
+
x_shapes,
|
|
899
|
+
x_data,
|
|
900
|
+
y_data,
|
|
901
|
+
out_data,
|
|
902
|
+
)
|
|
903
|
+
with r_mat:
|
|
904
|
+
with builder.if_else(is_left_vec) as (v_m, m_m):
|
|
905
|
+
with v_m:
|
|
906
|
+
# V * M
|
|
907
|
+
do_trans = yty.layout != outty.layout
|
|
908
|
+
call_xxgemv(
|
|
909
|
+
context,
|
|
910
|
+
builder,
|
|
911
|
+
do_trans,
|
|
912
|
+
yty,
|
|
913
|
+
y_shapes,
|
|
914
|
+
y_data,
|
|
915
|
+
x_data,
|
|
916
|
+
out_data,
|
|
917
|
+
)
|
|
918
|
+
with m_m:
|
|
919
|
+
# M * M
|
|
920
|
+
call_xxgemm(
|
|
921
|
+
context,
|
|
922
|
+
builder,
|
|
923
|
+
xty,
|
|
924
|
+
x_shapes,
|
|
925
|
+
x_data,
|
|
926
|
+
yty,
|
|
927
|
+
y_shapes,
|
|
928
|
+
y_data,
|
|
929
|
+
outty,
|
|
930
|
+
out_shapes,
|
|
931
|
+
out_data,
|
|
932
|
+
)
|
|
933
|
+
|
|
934
|
+
return impl_ret_borrowed(context, builder, sig.return_type, out._getvalue())
|
|
935
|
+
|
|
936
|
+
|
|
937
|
+
@overload(np.dot)
|
|
938
|
+
def dot_3(left, right, out):
|
|
939
|
+
"""
|
|
940
|
+
np.dot(a, b, out)
|
|
941
|
+
"""
|
|
942
|
+
if (
|
|
943
|
+
isinstance(left, types.Array)
|
|
944
|
+
and isinstance(right, types.Array)
|
|
945
|
+
and isinstance(out, types.Array)
|
|
946
|
+
):
|
|
947
|
+
|
|
948
|
+
@intrinsic
|
|
949
|
+
def _impl(typingcontext, left, right, out):
|
|
950
|
+
def codegen(context, builder, sig, args):
|
|
951
|
+
ensure_blas()
|
|
952
|
+
|
|
953
|
+
with make_contiguous(context, builder, sig, args) as (
|
|
954
|
+
sig,
|
|
955
|
+
args,
|
|
956
|
+
):
|
|
957
|
+
ndims = set(x.ndim for x in sig.args[:2])
|
|
958
|
+
if ndims == {2}:
|
|
959
|
+
return dot_3_mm(context, builder, sig, args)
|
|
960
|
+
elif ndims == {1, 2}:
|
|
961
|
+
return dot_3_vm(context, builder, sig, args)
|
|
962
|
+
else:
|
|
963
|
+
raise AssertionError("unreachable")
|
|
964
|
+
|
|
965
|
+
if left.dtype != right.dtype or left.dtype != out.dtype:
|
|
966
|
+
raise TypingError(
|
|
967
|
+
"np.dot() arguments must all have the same dtype"
|
|
968
|
+
)
|
|
969
|
+
|
|
970
|
+
return signature(out, left, right, out), codegen
|
|
971
|
+
|
|
972
|
+
if (
|
|
973
|
+
left.layout not in "CF"
|
|
974
|
+
or right.layout not in "CF"
|
|
975
|
+
or out.layout not in "CF"
|
|
976
|
+
):
|
|
977
|
+
warnings.warn(
|
|
978
|
+
"np.vdot() is faster on contiguous arrays, called on %s"
|
|
979
|
+
% ((left, right),),
|
|
980
|
+
NumbaPerformanceWarning,
|
|
981
|
+
)
|
|
982
|
+
|
|
983
|
+
return lambda left, right, out: _impl(left, right, out)
|
|
984
|
+
|
|
985
|
+
|
|
986
|
+
fatal_error_func = types.ExternalFunction("numba_fatal_error", types.intc())
|
|
987
|
+
|
|
988
|
+
|
|
989
|
+
@register_jitable
|
|
990
|
+
def _check_finite_matrix(a):
|
|
991
|
+
for v in np.nditer(a):
|
|
992
|
+
if not np.isfinite(v.item()):
|
|
993
|
+
raise np.linalg.LinAlgError("Array must not contain infs or NaNs.")
|
|
994
|
+
|
|
995
|
+
|
|
996
|
+
def _check_linalg_matrix(a, func_name, la_prefix=True):
|
|
997
|
+
# la_prefix is present as some functions, e.g. np.trace()
|
|
998
|
+
# are documented under "linear algebra" but aren't in the
|
|
999
|
+
# module
|
|
1000
|
+
prefix = "np.linalg" if la_prefix else "np"
|
|
1001
|
+
interp = (prefix, func_name)
|
|
1002
|
+
# Unpack optional type
|
|
1003
|
+
if isinstance(a, types.Optional):
|
|
1004
|
+
a = a.type
|
|
1005
|
+
if not isinstance(a, types.Array):
|
|
1006
|
+
msg = "%s.%s() only supported for array types" % interp
|
|
1007
|
+
raise TypingError(msg, highlighting=False)
|
|
1008
|
+
if not a.ndim == 2:
|
|
1009
|
+
msg = "%s.%s() only supported on 2-D arrays." % interp
|
|
1010
|
+
raise TypingError(msg, highlighting=False)
|
|
1011
|
+
if not isinstance(a.dtype, (types.Float, types.Complex)):
|
|
1012
|
+
msg = "%s.%s() only supported on float and complex arrays." % interp
|
|
1013
|
+
raise TypingError(msg, highlighting=False)
|
|
1014
|
+
|
|
1015
|
+
|
|
1016
|
+
def _check_homogeneous_types(func_name, *types):
|
|
1017
|
+
t0 = types[0].dtype
|
|
1018
|
+
for t in types[1:]:
|
|
1019
|
+
if t.dtype != t0:
|
|
1020
|
+
msg = (
|
|
1021
|
+
"np.linalg.%s() only supports inputs that have homogeneous dtypes."
|
|
1022
|
+
% func_name
|
|
1023
|
+
)
|
|
1024
|
+
raise TypingError(msg, highlighting=False)
|
|
1025
|
+
|
|
1026
|
+
|
|
1027
|
+
def _copy_to_fortran_order():
|
|
1028
|
+
pass
|
|
1029
|
+
|
|
1030
|
+
|
|
1031
|
+
@overload(_copy_to_fortran_order)
|
|
1032
|
+
def ol_copy_to_fortran_order(a):
|
|
1033
|
+
# This function copies the array 'a' into a new array with fortran order.
|
|
1034
|
+
# This exists because the copy routines don't take order flags yet.
|
|
1035
|
+
F_layout = a.layout == "F"
|
|
1036
|
+
A_layout = a.layout == "A"
|
|
1037
|
+
|
|
1038
|
+
def impl(a):
|
|
1039
|
+
if F_layout:
|
|
1040
|
+
# it's F ordered at compile time, just copy
|
|
1041
|
+
acpy = np.copy(a)
|
|
1042
|
+
elif A_layout:
|
|
1043
|
+
# decide based on runtime value
|
|
1044
|
+
flag_f = a.flags.f_contiguous
|
|
1045
|
+
if flag_f:
|
|
1046
|
+
# it's already F ordered, so copy but in a round about way to
|
|
1047
|
+
# ensure that the copy is also F ordered
|
|
1048
|
+
acpy = np.copy(a.T).T
|
|
1049
|
+
else:
|
|
1050
|
+
# it's something else ordered, so let asfortranarray deal with
|
|
1051
|
+
# copying and making it fortran ordered
|
|
1052
|
+
acpy = np.asfortranarray(a)
|
|
1053
|
+
else:
|
|
1054
|
+
# it's C ordered at compile time, asfortranarray it.
|
|
1055
|
+
acpy = np.asfortranarray(a)
|
|
1056
|
+
return acpy
|
|
1057
|
+
|
|
1058
|
+
return impl
|
|
1059
|
+
|
|
1060
|
+
|
|
1061
|
+
@register_jitable
|
|
1062
|
+
def _inv_err_handler(r):
|
|
1063
|
+
if r != 0:
|
|
1064
|
+
if r < 0:
|
|
1065
|
+
fatal_error_func()
|
|
1066
|
+
assert 0 # unreachable
|
|
1067
|
+
if r > 0:
|
|
1068
|
+
raise np.linalg.LinAlgError(
|
|
1069
|
+
"Matrix is singular to machine precision."
|
|
1070
|
+
)
|
|
1071
|
+
|
|
1072
|
+
|
|
1073
|
+
@register_jitable
|
|
1074
|
+
def _dummy_liveness_func(a):
|
|
1075
|
+
"""pass a list of variables to be preserved through dead code elimination"""
|
|
1076
|
+
return a[0]
|
|
1077
|
+
|
|
1078
|
+
|
|
1079
|
+
@overload(np.linalg.inv)
|
|
1080
|
+
def inv_impl(a):
|
|
1081
|
+
ensure_lapack()
|
|
1082
|
+
|
|
1083
|
+
_check_linalg_matrix(a, "inv")
|
|
1084
|
+
|
|
1085
|
+
numba_xxgetrf = _LAPACK().numba_xxgetrf(a.dtype)
|
|
1086
|
+
|
|
1087
|
+
numba_xxgetri = _LAPACK().numba_ez_xxgetri(a.dtype)
|
|
1088
|
+
|
|
1089
|
+
kind = ord(get_blas_kind(a.dtype, "inv"))
|
|
1090
|
+
|
|
1091
|
+
def inv_impl(a):
|
|
1092
|
+
n = a.shape[-1]
|
|
1093
|
+
if a.shape[-2] != n:
|
|
1094
|
+
msg = "Last 2 dimensions of the array must be square."
|
|
1095
|
+
raise np.linalg.LinAlgError(msg)
|
|
1096
|
+
|
|
1097
|
+
_check_finite_matrix(a)
|
|
1098
|
+
|
|
1099
|
+
acpy = _copy_to_fortran_order(a)
|
|
1100
|
+
|
|
1101
|
+
if n == 0:
|
|
1102
|
+
return acpy
|
|
1103
|
+
|
|
1104
|
+
ipiv = np.empty(n, dtype=F_INT_nptype)
|
|
1105
|
+
|
|
1106
|
+
r = numba_xxgetrf(kind, n, n, acpy.ctypes, n, ipiv.ctypes)
|
|
1107
|
+
_inv_err_handler(r)
|
|
1108
|
+
|
|
1109
|
+
r = numba_xxgetri(kind, n, acpy.ctypes, n, ipiv.ctypes)
|
|
1110
|
+
_inv_err_handler(r)
|
|
1111
|
+
|
|
1112
|
+
# help liveness analysis
|
|
1113
|
+
_dummy_liveness_func([acpy.size, ipiv.size])
|
|
1114
|
+
return acpy
|
|
1115
|
+
|
|
1116
|
+
return inv_impl
|
|
1117
|
+
|
|
1118
|
+
|
|
1119
|
+
@register_jitable
|
|
1120
|
+
def _handle_err_maybe_convergence_problem(r):
|
|
1121
|
+
if r != 0:
|
|
1122
|
+
if r < 0:
|
|
1123
|
+
fatal_error_func()
|
|
1124
|
+
assert 0 # unreachable
|
|
1125
|
+
if r > 0:
|
|
1126
|
+
raise ValueError("Internal algorithm failed to converge.")
|
|
1127
|
+
|
|
1128
|
+
|
|
1129
|
+
def _check_linalg_1_or_2d_matrix(a, func_name, la_prefix=True):
|
|
1130
|
+
# la_prefix is present as some functions, e.g. np.trace()
|
|
1131
|
+
# are documented under "linear algebra" but aren't in the
|
|
1132
|
+
# module
|
|
1133
|
+
prefix = "np.linalg" if la_prefix else "np"
|
|
1134
|
+
interp = (prefix, func_name)
|
|
1135
|
+
# checks that a matrix is 1 or 2D
|
|
1136
|
+
if not isinstance(a, types.Array):
|
|
1137
|
+
raise TypingError("%s.%s() only supported for array types " % interp)
|
|
1138
|
+
if not a.ndim <= 2:
|
|
1139
|
+
raise TypingError(
|
|
1140
|
+
"%s.%s() only supported on 1 and 2-D arrays " % interp
|
|
1141
|
+
)
|
|
1142
|
+
if not isinstance(a.dtype, (types.Float, types.Complex)):
|
|
1143
|
+
raise TypingError(
|
|
1144
|
+
"%s.%s() only supported on float and complex arrays." % interp
|
|
1145
|
+
)
|
|
1146
|
+
|
|
1147
|
+
|
|
1148
|
+
@overload(np.linalg.cholesky)
|
|
1149
|
+
def cho_impl(a):
|
|
1150
|
+
ensure_lapack()
|
|
1151
|
+
|
|
1152
|
+
_check_linalg_matrix(a, "cholesky")
|
|
1153
|
+
|
|
1154
|
+
numba_xxpotrf = _LAPACK().numba_xxpotrf(a.dtype)
|
|
1155
|
+
|
|
1156
|
+
kind = ord(get_blas_kind(a.dtype, "cholesky"))
|
|
1157
|
+
UP = ord("U")
|
|
1158
|
+
LO = ord("L") # noqa: F841
|
|
1159
|
+
|
|
1160
|
+
def cho_impl(a):
|
|
1161
|
+
n = a.shape[-1]
|
|
1162
|
+
if a.shape[-2] != n:
|
|
1163
|
+
msg = "Last 2 dimensions of the array must be square."
|
|
1164
|
+
raise np.linalg.LinAlgError(msg)
|
|
1165
|
+
|
|
1166
|
+
# The output is allocated in C order
|
|
1167
|
+
out = a.copy()
|
|
1168
|
+
|
|
1169
|
+
if n == 0:
|
|
1170
|
+
return out
|
|
1171
|
+
|
|
1172
|
+
# Pass UP since xxpotrf() operates in F order
|
|
1173
|
+
# The semantics ensure this works fine
|
|
1174
|
+
# (out is really its Hermitian in F order, but UP instructs
|
|
1175
|
+
# xxpotrf to compute the Hermitian of the upper triangle
|
|
1176
|
+
# => they cancel each other)
|
|
1177
|
+
r = numba_xxpotrf(kind, UP, n, out.ctypes, n)
|
|
1178
|
+
if r != 0:
|
|
1179
|
+
if r < 0:
|
|
1180
|
+
fatal_error_func()
|
|
1181
|
+
assert 0 # unreachable
|
|
1182
|
+
if r > 0:
|
|
1183
|
+
raise np.linalg.LinAlgError("Matrix is not positive definite.")
|
|
1184
|
+
# Zero out upper triangle, in F order
|
|
1185
|
+
for col in range(n):
|
|
1186
|
+
out[:col, col] = 0
|
|
1187
|
+
return out
|
|
1188
|
+
|
|
1189
|
+
return cho_impl
|
|
1190
|
+
|
|
1191
|
+
|
|
1192
|
+
@overload(np.linalg.eig)
|
|
1193
|
+
def eig_impl(a):
|
|
1194
|
+
ensure_lapack()
|
|
1195
|
+
|
|
1196
|
+
_check_linalg_matrix(a, "eig")
|
|
1197
|
+
|
|
1198
|
+
numba_ez_rgeev = _LAPACK().numba_ez_rgeev(a.dtype)
|
|
1199
|
+
numba_ez_cgeev = _LAPACK().numba_ez_cgeev(a.dtype)
|
|
1200
|
+
|
|
1201
|
+
kind = ord(get_blas_kind(a.dtype, "eig"))
|
|
1202
|
+
|
|
1203
|
+
JOBVL = ord("N")
|
|
1204
|
+
JOBVR = ord("V")
|
|
1205
|
+
|
|
1206
|
+
def real_eig_impl(a):
|
|
1207
|
+
"""
|
|
1208
|
+
eig() implementation for real arrays.
|
|
1209
|
+
"""
|
|
1210
|
+
n = a.shape[-1]
|
|
1211
|
+
if a.shape[-2] != n:
|
|
1212
|
+
msg = "Last 2 dimensions of the array must be square."
|
|
1213
|
+
raise np.linalg.LinAlgError(msg)
|
|
1214
|
+
|
|
1215
|
+
_check_finite_matrix(a)
|
|
1216
|
+
|
|
1217
|
+
acpy = _copy_to_fortran_order(a)
|
|
1218
|
+
|
|
1219
|
+
ldvl = 1
|
|
1220
|
+
ldvr = n
|
|
1221
|
+
wr = np.empty(n, dtype=a.dtype)
|
|
1222
|
+
wi = np.empty(n, dtype=a.dtype)
|
|
1223
|
+
vl = np.empty((n, ldvl), dtype=a.dtype)
|
|
1224
|
+
vr = np.empty((n, ldvr), dtype=a.dtype)
|
|
1225
|
+
|
|
1226
|
+
if n == 0:
|
|
1227
|
+
return (wr, vr.T)
|
|
1228
|
+
|
|
1229
|
+
r = numba_ez_rgeev(
|
|
1230
|
+
kind,
|
|
1231
|
+
JOBVL,
|
|
1232
|
+
JOBVR,
|
|
1233
|
+
n,
|
|
1234
|
+
acpy.ctypes,
|
|
1235
|
+
n,
|
|
1236
|
+
wr.ctypes,
|
|
1237
|
+
wi.ctypes,
|
|
1238
|
+
vl.ctypes,
|
|
1239
|
+
ldvl,
|
|
1240
|
+
vr.ctypes,
|
|
1241
|
+
ldvr,
|
|
1242
|
+
)
|
|
1243
|
+
_handle_err_maybe_convergence_problem(r)
|
|
1244
|
+
|
|
1245
|
+
# By design numba does not support dynamic return types, however,
|
|
1246
|
+
# Numpy does. Numpy uses this ability in the case of returning
|
|
1247
|
+
# eigenvalues/vectors of a real matrix. The return type of
|
|
1248
|
+
# np.linalg.eig(), when operating on a matrix in real space
|
|
1249
|
+
# depends on the values present in the matrix itself (recalling
|
|
1250
|
+
# that eigenvalues are the roots of the characteristic polynomial
|
|
1251
|
+
# of the system matrix, which will by construction depend on the
|
|
1252
|
+
# values present in the system matrix). As numba cannot handle
|
|
1253
|
+
# the case of a runtime decision based domain change relative to
|
|
1254
|
+
# the input type, if it is required numba raises as below.
|
|
1255
|
+
if np.any(wi):
|
|
1256
|
+
raise ValueError("eig() argument must not cause a domain change.")
|
|
1257
|
+
|
|
1258
|
+
# put these in to help with liveness analysis,
|
|
1259
|
+
# `.ctypes` doesn't keep the vars alive
|
|
1260
|
+
_dummy_liveness_func([acpy.size, vl.size, vr.size, wr.size, wi.size])
|
|
1261
|
+
return (wr, vr.T)
|
|
1262
|
+
|
|
1263
|
+
def cmplx_eig_impl(a):
|
|
1264
|
+
"""
|
|
1265
|
+
eig() implementation for complex arrays.
|
|
1266
|
+
"""
|
|
1267
|
+
n = a.shape[-1]
|
|
1268
|
+
if a.shape[-2] != n:
|
|
1269
|
+
msg = "Last 2 dimensions of the array must be square."
|
|
1270
|
+
raise np.linalg.LinAlgError(msg)
|
|
1271
|
+
|
|
1272
|
+
_check_finite_matrix(a)
|
|
1273
|
+
|
|
1274
|
+
acpy = _copy_to_fortran_order(a)
|
|
1275
|
+
|
|
1276
|
+
ldvl = 1
|
|
1277
|
+
ldvr = n
|
|
1278
|
+
w = np.empty(n, dtype=a.dtype)
|
|
1279
|
+
vl = np.empty((n, ldvl), dtype=a.dtype)
|
|
1280
|
+
vr = np.empty((n, ldvr), dtype=a.dtype)
|
|
1281
|
+
|
|
1282
|
+
if n == 0:
|
|
1283
|
+
return (w, vr.T)
|
|
1284
|
+
|
|
1285
|
+
r = numba_ez_cgeev(
|
|
1286
|
+
kind,
|
|
1287
|
+
JOBVL,
|
|
1288
|
+
JOBVR,
|
|
1289
|
+
n,
|
|
1290
|
+
acpy.ctypes,
|
|
1291
|
+
n,
|
|
1292
|
+
w.ctypes,
|
|
1293
|
+
vl.ctypes,
|
|
1294
|
+
ldvl,
|
|
1295
|
+
vr.ctypes,
|
|
1296
|
+
ldvr,
|
|
1297
|
+
)
|
|
1298
|
+
_handle_err_maybe_convergence_problem(r)
|
|
1299
|
+
|
|
1300
|
+
# put these in to help with liveness analysis,
|
|
1301
|
+
# `.ctypes` doesn't keep the vars alive
|
|
1302
|
+
_dummy_liveness_func([acpy.size, vl.size, vr.size, w.size])
|
|
1303
|
+
return (w, vr.T)
|
|
1304
|
+
|
|
1305
|
+
if isinstance(a.dtype, types.scalars.Complex):
|
|
1306
|
+
return cmplx_eig_impl
|
|
1307
|
+
else:
|
|
1308
|
+
return real_eig_impl
|
|
1309
|
+
|
|
1310
|
+
|
|
1311
|
+
@overload(np.linalg.eigvals)
|
|
1312
|
+
def eigvals_impl(a):
|
|
1313
|
+
ensure_lapack()
|
|
1314
|
+
|
|
1315
|
+
_check_linalg_matrix(a, "eigvals")
|
|
1316
|
+
|
|
1317
|
+
numba_ez_rgeev = _LAPACK().numba_ez_rgeev(a.dtype)
|
|
1318
|
+
numba_ez_cgeev = _LAPACK().numba_ez_cgeev(a.dtype)
|
|
1319
|
+
|
|
1320
|
+
kind = ord(get_blas_kind(a.dtype, "eigvals"))
|
|
1321
|
+
|
|
1322
|
+
JOBVL = ord("N")
|
|
1323
|
+
JOBVR = ord("N")
|
|
1324
|
+
|
|
1325
|
+
def real_eigvals_impl(a):
|
|
1326
|
+
"""
|
|
1327
|
+
eigvals() implementation for real arrays.
|
|
1328
|
+
"""
|
|
1329
|
+
n = a.shape[-1]
|
|
1330
|
+
if a.shape[-2] != n:
|
|
1331
|
+
msg = "Last 2 dimensions of the array must be square."
|
|
1332
|
+
raise np.linalg.LinAlgError(msg)
|
|
1333
|
+
|
|
1334
|
+
_check_finite_matrix(a)
|
|
1335
|
+
|
|
1336
|
+
acpy = _copy_to_fortran_order(a)
|
|
1337
|
+
|
|
1338
|
+
ldvl = 1
|
|
1339
|
+
ldvr = 1
|
|
1340
|
+
wr = np.empty(n, dtype=a.dtype)
|
|
1341
|
+
|
|
1342
|
+
if n == 0:
|
|
1343
|
+
return wr
|
|
1344
|
+
|
|
1345
|
+
wi = np.empty(n, dtype=a.dtype)
|
|
1346
|
+
|
|
1347
|
+
# not referenced but need setting for MKL null check
|
|
1348
|
+
vl = np.empty((1), dtype=a.dtype)
|
|
1349
|
+
vr = np.empty((1), dtype=a.dtype)
|
|
1350
|
+
|
|
1351
|
+
r = numba_ez_rgeev(
|
|
1352
|
+
kind,
|
|
1353
|
+
JOBVL,
|
|
1354
|
+
JOBVR,
|
|
1355
|
+
n,
|
|
1356
|
+
acpy.ctypes,
|
|
1357
|
+
n,
|
|
1358
|
+
wr.ctypes,
|
|
1359
|
+
wi.ctypes,
|
|
1360
|
+
vl.ctypes,
|
|
1361
|
+
ldvl,
|
|
1362
|
+
vr.ctypes,
|
|
1363
|
+
ldvr,
|
|
1364
|
+
)
|
|
1365
|
+
_handle_err_maybe_convergence_problem(r)
|
|
1366
|
+
|
|
1367
|
+
# By design numba does not support dynamic return types, however,
|
|
1368
|
+
# Numpy does. Numpy uses this ability in the case of returning
|
|
1369
|
+
# eigenvalues/vectors of a real matrix. The return type of
|
|
1370
|
+
# np.linalg.eigvals(), when operating on a matrix in real space
|
|
1371
|
+
# depends on the values present in the matrix itself (recalling
|
|
1372
|
+
# that eigenvalues are the roots of the characteristic polynomial
|
|
1373
|
+
# of the system matrix, which will by construction depend on the
|
|
1374
|
+
# values present in the system matrix). As numba cannot handle
|
|
1375
|
+
# the case of a runtime decision based domain change relative to
|
|
1376
|
+
# the input type, if it is required numba raises as below.
|
|
1377
|
+
if np.any(wi):
|
|
1378
|
+
raise ValueError(
|
|
1379
|
+
"eigvals() argument must not cause a domain change."
|
|
1380
|
+
)
|
|
1381
|
+
|
|
1382
|
+
# put these in to help with liveness analysis,
|
|
1383
|
+
# `.ctypes` doesn't keep the vars alive
|
|
1384
|
+
_dummy_liveness_func([acpy.size, vl.size, vr.size, wr.size, wi.size])
|
|
1385
|
+
return wr
|
|
1386
|
+
|
|
1387
|
+
def cmplx_eigvals_impl(a):
|
|
1388
|
+
"""
|
|
1389
|
+
eigvals() implementation for complex arrays.
|
|
1390
|
+
"""
|
|
1391
|
+
n = a.shape[-1]
|
|
1392
|
+
if a.shape[-2] != n:
|
|
1393
|
+
msg = "Last 2 dimensions of the array must be square."
|
|
1394
|
+
raise np.linalg.LinAlgError(msg)
|
|
1395
|
+
|
|
1396
|
+
_check_finite_matrix(a)
|
|
1397
|
+
|
|
1398
|
+
acpy = _copy_to_fortran_order(a)
|
|
1399
|
+
|
|
1400
|
+
ldvl = 1
|
|
1401
|
+
ldvr = 1
|
|
1402
|
+
w = np.empty(n, dtype=a.dtype)
|
|
1403
|
+
|
|
1404
|
+
if n == 0:
|
|
1405
|
+
return w
|
|
1406
|
+
|
|
1407
|
+
vl = np.empty((1), dtype=a.dtype)
|
|
1408
|
+
vr = np.empty((1), dtype=a.dtype)
|
|
1409
|
+
|
|
1410
|
+
r = numba_ez_cgeev(
|
|
1411
|
+
kind,
|
|
1412
|
+
JOBVL,
|
|
1413
|
+
JOBVR,
|
|
1414
|
+
n,
|
|
1415
|
+
acpy.ctypes,
|
|
1416
|
+
n,
|
|
1417
|
+
w.ctypes,
|
|
1418
|
+
vl.ctypes,
|
|
1419
|
+
ldvl,
|
|
1420
|
+
vr.ctypes,
|
|
1421
|
+
ldvr,
|
|
1422
|
+
)
|
|
1423
|
+
_handle_err_maybe_convergence_problem(r)
|
|
1424
|
+
|
|
1425
|
+
# put these in to help with liveness analysis,
|
|
1426
|
+
# `.ctypes` doesn't keep the vars alive
|
|
1427
|
+
_dummy_liveness_func([acpy.size, vl.size, vr.size, w.size])
|
|
1428
|
+
return w
|
|
1429
|
+
|
|
1430
|
+
if isinstance(a.dtype, types.scalars.Complex):
|
|
1431
|
+
return cmplx_eigvals_impl
|
|
1432
|
+
else:
|
|
1433
|
+
return real_eigvals_impl
|
|
1434
|
+
|
|
1435
|
+
|
|
1436
|
+
@overload(np.linalg.eigh)
|
|
1437
|
+
def eigh_impl(a):
|
|
1438
|
+
ensure_lapack()
|
|
1439
|
+
|
|
1440
|
+
_check_linalg_matrix(a, "eigh")
|
|
1441
|
+
|
|
1442
|
+
# convert typing floats to numpy floats for use in the impl
|
|
1443
|
+
w_type = getattr(a.dtype, "underlying_float", a.dtype)
|
|
1444
|
+
w_dtype = np_support.as_dtype(w_type)
|
|
1445
|
+
|
|
1446
|
+
numba_ez_xxxevd = _LAPACK().numba_ez_xxxevd(a.dtype)
|
|
1447
|
+
|
|
1448
|
+
kind = ord(get_blas_kind(a.dtype, "eigh"))
|
|
1449
|
+
|
|
1450
|
+
JOBZ = ord("V")
|
|
1451
|
+
UPLO = ord("L")
|
|
1452
|
+
|
|
1453
|
+
def eigh_impl(a):
|
|
1454
|
+
n = a.shape[-1]
|
|
1455
|
+
|
|
1456
|
+
if a.shape[-2] != n:
|
|
1457
|
+
msg = "Last 2 dimensions of the array must be square."
|
|
1458
|
+
raise np.linalg.LinAlgError(msg)
|
|
1459
|
+
|
|
1460
|
+
_check_finite_matrix(a)
|
|
1461
|
+
|
|
1462
|
+
acpy = _copy_to_fortran_order(a)
|
|
1463
|
+
|
|
1464
|
+
w = np.empty(n, dtype=w_dtype)
|
|
1465
|
+
|
|
1466
|
+
if n == 0:
|
|
1467
|
+
return (w, acpy)
|
|
1468
|
+
|
|
1469
|
+
r = numba_ez_xxxevd(
|
|
1470
|
+
kind, # kind
|
|
1471
|
+
JOBZ, # jobz
|
|
1472
|
+
UPLO, # uplo
|
|
1473
|
+
n, # n
|
|
1474
|
+
acpy.ctypes, # a
|
|
1475
|
+
n, # lda
|
|
1476
|
+
w.ctypes, # w
|
|
1477
|
+
)
|
|
1478
|
+
_handle_err_maybe_convergence_problem(r)
|
|
1479
|
+
|
|
1480
|
+
# help liveness analysis
|
|
1481
|
+
_dummy_liveness_func([acpy.size, w.size])
|
|
1482
|
+
return (w, acpy)
|
|
1483
|
+
|
|
1484
|
+
return eigh_impl
|
|
1485
|
+
|
|
1486
|
+
|
|
1487
|
+
@overload(np.linalg.eigvalsh)
|
|
1488
|
+
def eigvalsh_impl(a):
|
|
1489
|
+
ensure_lapack()
|
|
1490
|
+
|
|
1491
|
+
_check_linalg_matrix(a, "eigvalsh")
|
|
1492
|
+
|
|
1493
|
+
# convert typing floats to numpy floats for use in the impl
|
|
1494
|
+
w_type = getattr(a.dtype, "underlying_float", a.dtype)
|
|
1495
|
+
w_dtype = np_support.as_dtype(w_type)
|
|
1496
|
+
|
|
1497
|
+
numba_ez_xxxevd = _LAPACK().numba_ez_xxxevd(a.dtype)
|
|
1498
|
+
|
|
1499
|
+
kind = ord(get_blas_kind(a.dtype, "eigvalsh"))
|
|
1500
|
+
|
|
1501
|
+
JOBZ = ord("N")
|
|
1502
|
+
UPLO = ord("L")
|
|
1503
|
+
|
|
1504
|
+
def eigvalsh_impl(a):
|
|
1505
|
+
n = a.shape[-1]
|
|
1506
|
+
|
|
1507
|
+
if a.shape[-2] != n:
|
|
1508
|
+
msg = "Last 2 dimensions of the array must be square."
|
|
1509
|
+
raise np.linalg.LinAlgError(msg)
|
|
1510
|
+
|
|
1511
|
+
_check_finite_matrix(a)
|
|
1512
|
+
|
|
1513
|
+
acpy = _copy_to_fortran_order(a)
|
|
1514
|
+
|
|
1515
|
+
w = np.empty(n, dtype=w_dtype)
|
|
1516
|
+
|
|
1517
|
+
if n == 0:
|
|
1518
|
+
return w
|
|
1519
|
+
|
|
1520
|
+
r = numba_ez_xxxevd(
|
|
1521
|
+
kind, # kind
|
|
1522
|
+
JOBZ, # jobz
|
|
1523
|
+
UPLO, # uplo
|
|
1524
|
+
n, # n
|
|
1525
|
+
acpy.ctypes, # a
|
|
1526
|
+
n, # lda
|
|
1527
|
+
w.ctypes, # w
|
|
1528
|
+
)
|
|
1529
|
+
_handle_err_maybe_convergence_problem(r)
|
|
1530
|
+
|
|
1531
|
+
# help liveness analysis
|
|
1532
|
+
_dummy_liveness_func([acpy.size, w.size])
|
|
1533
|
+
return w
|
|
1534
|
+
|
|
1535
|
+
return eigvalsh_impl
|
|
1536
|
+
|
|
1537
|
+
|
|
1538
|
+
@overload(np.linalg.svd)
|
|
1539
|
+
def svd_impl(a, full_matrices=1):
|
|
1540
|
+
ensure_lapack()
|
|
1541
|
+
|
|
1542
|
+
_check_linalg_matrix(a, "svd")
|
|
1543
|
+
|
|
1544
|
+
# convert typing floats to numpy floats for use in the impl
|
|
1545
|
+
s_type = getattr(a.dtype, "underlying_float", a.dtype)
|
|
1546
|
+
s_dtype = np_support.as_dtype(s_type)
|
|
1547
|
+
|
|
1548
|
+
numba_ez_gesdd = _LAPACK().numba_ez_gesdd(a.dtype)
|
|
1549
|
+
|
|
1550
|
+
kind = ord(get_blas_kind(a.dtype, "svd"))
|
|
1551
|
+
|
|
1552
|
+
JOBZ_A = ord("A")
|
|
1553
|
+
JOBZ_S = ord("S")
|
|
1554
|
+
|
|
1555
|
+
def svd_impl(a, full_matrices=1):
|
|
1556
|
+
n = a.shape[-1]
|
|
1557
|
+
m = a.shape[-2]
|
|
1558
|
+
|
|
1559
|
+
if n == 0 or m == 0:
|
|
1560
|
+
raise np.linalg.LinAlgError("Arrays cannot be empty")
|
|
1561
|
+
|
|
1562
|
+
_check_finite_matrix(a)
|
|
1563
|
+
|
|
1564
|
+
acpy = _copy_to_fortran_order(a)
|
|
1565
|
+
|
|
1566
|
+
ldu = m
|
|
1567
|
+
minmn = min(m, n)
|
|
1568
|
+
|
|
1569
|
+
if full_matrices:
|
|
1570
|
+
JOBZ = JOBZ_A
|
|
1571
|
+
ucol = m
|
|
1572
|
+
ldvt = n
|
|
1573
|
+
else:
|
|
1574
|
+
JOBZ = JOBZ_S
|
|
1575
|
+
ucol = minmn
|
|
1576
|
+
ldvt = minmn
|
|
1577
|
+
|
|
1578
|
+
u = np.empty((ucol, ldu), dtype=a.dtype)
|
|
1579
|
+
s = np.empty(minmn, dtype=s_dtype)
|
|
1580
|
+
vt = np.empty((n, ldvt), dtype=a.dtype)
|
|
1581
|
+
|
|
1582
|
+
r = numba_ez_gesdd(
|
|
1583
|
+
kind, # kind
|
|
1584
|
+
JOBZ, # jobz
|
|
1585
|
+
m, # m
|
|
1586
|
+
n, # n
|
|
1587
|
+
acpy.ctypes, # a
|
|
1588
|
+
m, # lda
|
|
1589
|
+
s.ctypes, # s
|
|
1590
|
+
u.ctypes, # u
|
|
1591
|
+
ldu, # ldu
|
|
1592
|
+
vt.ctypes, # vt
|
|
1593
|
+
ldvt, # ldvt
|
|
1594
|
+
)
|
|
1595
|
+
_handle_err_maybe_convergence_problem(r)
|
|
1596
|
+
|
|
1597
|
+
# help liveness analysis
|
|
1598
|
+
_dummy_liveness_func([acpy.size, vt.size, u.size, s.size])
|
|
1599
|
+
return (u.T, s, vt.T)
|
|
1600
|
+
|
|
1601
|
+
return svd_impl
|
|
1602
|
+
|
|
1603
|
+
|
|
1604
|
+
@overload(np.linalg.qr)
|
|
1605
|
+
def qr_impl(a):
|
|
1606
|
+
ensure_lapack()
|
|
1607
|
+
|
|
1608
|
+
_check_linalg_matrix(a, "qr")
|
|
1609
|
+
|
|
1610
|
+
# Need two functions, the first computes R, storing it in the upper
|
|
1611
|
+
# triangle of A with the below diagonal part of A containing elementary
|
|
1612
|
+
# reflectors needed to construct Q. The second turns the below diagonal
|
|
1613
|
+
# entries of A into Q, storing Q in A (creates orthonormal columns from
|
|
1614
|
+
# the elementary reflectors).
|
|
1615
|
+
|
|
1616
|
+
numba_ez_geqrf = _LAPACK().numba_ez_geqrf(a.dtype)
|
|
1617
|
+
numba_ez_xxgqr = _LAPACK().numba_ez_xxgqr(a.dtype)
|
|
1618
|
+
|
|
1619
|
+
kind = ord(get_blas_kind(a.dtype, "qr"))
|
|
1620
|
+
|
|
1621
|
+
def qr_impl(a):
|
|
1622
|
+
n = a.shape[-1]
|
|
1623
|
+
m = a.shape[-2]
|
|
1624
|
+
|
|
1625
|
+
if n == 0 or m == 0:
|
|
1626
|
+
raise np.linalg.LinAlgError("Arrays cannot be empty")
|
|
1627
|
+
|
|
1628
|
+
_check_finite_matrix(a)
|
|
1629
|
+
|
|
1630
|
+
# copy A as it will be destroyed
|
|
1631
|
+
q = _copy_to_fortran_order(a)
|
|
1632
|
+
|
|
1633
|
+
minmn = min(m, n)
|
|
1634
|
+
tau = np.empty((minmn), dtype=a.dtype)
|
|
1635
|
+
|
|
1636
|
+
ret = numba_ez_geqrf(
|
|
1637
|
+
kind, # kind
|
|
1638
|
+
m, # m
|
|
1639
|
+
n, # n
|
|
1640
|
+
q.ctypes, # a
|
|
1641
|
+
m, # lda
|
|
1642
|
+
tau.ctypes, # tau
|
|
1643
|
+
)
|
|
1644
|
+
if ret < 0:
|
|
1645
|
+
fatal_error_func()
|
|
1646
|
+
assert 0 # unreachable
|
|
1647
|
+
|
|
1648
|
+
# pull out R, this is transposed because of Fortran
|
|
1649
|
+
r = np.zeros((n, minmn), dtype=a.dtype).T
|
|
1650
|
+
|
|
1651
|
+
# the triangle in R
|
|
1652
|
+
for i in range(minmn):
|
|
1653
|
+
for j in range(i + 1):
|
|
1654
|
+
r[j, i] = q[j, i]
|
|
1655
|
+
|
|
1656
|
+
# and the possible square in R
|
|
1657
|
+
for i in range(minmn, n):
|
|
1658
|
+
for j in range(minmn):
|
|
1659
|
+
r[j, i] = q[j, i]
|
|
1660
|
+
|
|
1661
|
+
ret = numba_ez_xxgqr(
|
|
1662
|
+
kind, # kind
|
|
1663
|
+
m, # m
|
|
1664
|
+
minmn, # n
|
|
1665
|
+
minmn, # k
|
|
1666
|
+
q.ctypes, # a
|
|
1667
|
+
m, # lda
|
|
1668
|
+
tau.ctypes, # tau
|
|
1669
|
+
)
|
|
1670
|
+
_handle_err_maybe_convergence_problem(ret)
|
|
1671
|
+
|
|
1672
|
+
# help liveness analysis
|
|
1673
|
+
_dummy_liveness_func([tau.size, q.size])
|
|
1674
|
+
return (q[:, :minmn], r)
|
|
1675
|
+
|
|
1676
|
+
return qr_impl
|
|
1677
|
+
|
|
1678
|
+
|
|
1679
|
+
# helpers and jitted specialisations required for np.linalg.lstsq
|
|
1680
|
+
# and np.linalg.solve. These functions have "system" in their name
|
|
1681
|
+
# as a differentiator.
|
|
1682
|
+
|
|
1683
|
+
|
|
1684
|
+
def _system_copy_in_b(bcpy, b, nrhs):
|
|
1685
|
+
"""
|
|
1686
|
+
Correctly copy 'b' into the 'bcpy' scratch space.
|
|
1687
|
+
"""
|
|
1688
|
+
raise NotImplementedError
|
|
1689
|
+
|
|
1690
|
+
|
|
1691
|
+
@overload(_system_copy_in_b)
|
|
1692
|
+
def _system_copy_in_b_impl(bcpy, b, nrhs):
|
|
1693
|
+
if b.ndim == 1:
|
|
1694
|
+
|
|
1695
|
+
def oneD_impl(bcpy, b, nrhs):
|
|
1696
|
+
bcpy[: b.shape[-1], 0] = b
|
|
1697
|
+
|
|
1698
|
+
return oneD_impl
|
|
1699
|
+
else:
|
|
1700
|
+
|
|
1701
|
+
def twoD_impl(bcpy, b, nrhs):
|
|
1702
|
+
bcpy[: b.shape[-2], :nrhs] = b
|
|
1703
|
+
|
|
1704
|
+
return twoD_impl
|
|
1705
|
+
|
|
1706
|
+
|
|
1707
|
+
def _system_compute_nrhs(b):
|
|
1708
|
+
"""
|
|
1709
|
+
Compute the number of right hand sides in the system of equations
|
|
1710
|
+
"""
|
|
1711
|
+
raise NotImplementedError
|
|
1712
|
+
|
|
1713
|
+
|
|
1714
|
+
@overload(_system_compute_nrhs)
|
|
1715
|
+
def _system_compute_nrhs_impl(b):
|
|
1716
|
+
if b.ndim == 1:
|
|
1717
|
+
|
|
1718
|
+
def oneD_impl(b):
|
|
1719
|
+
return 1
|
|
1720
|
+
|
|
1721
|
+
return oneD_impl
|
|
1722
|
+
else:
|
|
1723
|
+
|
|
1724
|
+
def twoD_impl(b):
|
|
1725
|
+
return b.shape[-1]
|
|
1726
|
+
|
|
1727
|
+
return twoD_impl
|
|
1728
|
+
|
|
1729
|
+
|
|
1730
|
+
def _system_check_dimensionally_valid(a, b):
|
|
1731
|
+
"""
|
|
1732
|
+
Check that AX=B style system input is dimensionally valid.
|
|
1733
|
+
"""
|
|
1734
|
+
raise NotImplementedError
|
|
1735
|
+
|
|
1736
|
+
|
|
1737
|
+
@overload(_system_check_dimensionally_valid)
|
|
1738
|
+
def _system_check_dimensionally_valid_impl(a, b):
|
|
1739
|
+
ndim = b.ndim
|
|
1740
|
+
if ndim == 1:
|
|
1741
|
+
|
|
1742
|
+
def oneD_impl(a, b):
|
|
1743
|
+
am = a.shape[-2]
|
|
1744
|
+
bm = b.shape[-1]
|
|
1745
|
+
if am != bm:
|
|
1746
|
+
raise np.linalg.LinAlgError(
|
|
1747
|
+
"Incompatible array sizes, system is not dimensionally valid."
|
|
1748
|
+
)
|
|
1749
|
+
|
|
1750
|
+
return oneD_impl
|
|
1751
|
+
else:
|
|
1752
|
+
|
|
1753
|
+
def twoD_impl(a, b):
|
|
1754
|
+
am = a.shape[-2]
|
|
1755
|
+
bm = b.shape[-2]
|
|
1756
|
+
if am != bm:
|
|
1757
|
+
raise np.linalg.LinAlgError(
|
|
1758
|
+
"Incompatible array sizes, system is not dimensionally valid."
|
|
1759
|
+
)
|
|
1760
|
+
|
|
1761
|
+
return twoD_impl
|
|
1762
|
+
|
|
1763
|
+
|
|
1764
|
+
def _system_check_non_empty(a, b):
|
|
1765
|
+
"""
|
|
1766
|
+
Check that AX=B style system input is not empty.
|
|
1767
|
+
"""
|
|
1768
|
+
raise NotImplementedError
|
|
1769
|
+
|
|
1770
|
+
|
|
1771
|
+
@overload(_system_check_non_empty)
|
|
1772
|
+
def _system_check_non_empty_impl(a, b):
|
|
1773
|
+
ndim = b.ndim
|
|
1774
|
+
if ndim == 1:
|
|
1775
|
+
|
|
1776
|
+
def oneD_impl(a, b):
|
|
1777
|
+
am = a.shape[-2]
|
|
1778
|
+
an = a.shape[-1]
|
|
1779
|
+
bm = b.shape[-1]
|
|
1780
|
+
if am == 0 or bm == 0 or an == 0:
|
|
1781
|
+
raise np.linalg.LinAlgError("Arrays cannot be empty")
|
|
1782
|
+
|
|
1783
|
+
return oneD_impl
|
|
1784
|
+
else:
|
|
1785
|
+
|
|
1786
|
+
def twoD_impl(a, b):
|
|
1787
|
+
am = a.shape[-2]
|
|
1788
|
+
an = a.shape[-1]
|
|
1789
|
+
bm = b.shape[-2]
|
|
1790
|
+
bn = b.shape[-1]
|
|
1791
|
+
if am == 0 or bm == 0 or an == 0 or bn == 0:
|
|
1792
|
+
raise np.linalg.LinAlgError("Arrays cannot be empty")
|
|
1793
|
+
|
|
1794
|
+
return twoD_impl
|
|
1795
|
+
|
|
1796
|
+
|
|
1797
|
+
def _lstsq_residual(b, n, nrhs):
|
|
1798
|
+
"""
|
|
1799
|
+
Compute the residual from the 'b' scratch space.
|
|
1800
|
+
"""
|
|
1801
|
+
raise NotImplementedError
|
|
1802
|
+
|
|
1803
|
+
|
|
1804
|
+
@overload(_lstsq_residual)
|
|
1805
|
+
def _lstsq_residual_impl(b, n, nrhs):
|
|
1806
|
+
ndim = b.ndim
|
|
1807
|
+
dtype = b.dtype
|
|
1808
|
+
real_dtype = np_support.as_dtype(getattr(dtype, "underlying_float", dtype))
|
|
1809
|
+
|
|
1810
|
+
if ndim == 1:
|
|
1811
|
+
if isinstance(dtype, (types.Complex)):
|
|
1812
|
+
|
|
1813
|
+
def cmplx_impl(b, n, nrhs):
|
|
1814
|
+
res = np.empty((1,), dtype=real_dtype)
|
|
1815
|
+
res[0] = np.sum(np.abs(b[n:, 0]) ** 2)
|
|
1816
|
+
return res
|
|
1817
|
+
|
|
1818
|
+
return cmplx_impl
|
|
1819
|
+
else:
|
|
1820
|
+
|
|
1821
|
+
def real_impl(b, n, nrhs):
|
|
1822
|
+
res = np.empty((1,), dtype=real_dtype)
|
|
1823
|
+
res[0] = np.sum(b[n:, 0] ** 2)
|
|
1824
|
+
return res
|
|
1825
|
+
|
|
1826
|
+
return real_impl
|
|
1827
|
+
else:
|
|
1828
|
+
assert ndim == 2
|
|
1829
|
+
if isinstance(dtype, (types.Complex)):
|
|
1830
|
+
|
|
1831
|
+
def cmplx_impl(b, n, nrhs):
|
|
1832
|
+
res = np.empty((nrhs), dtype=real_dtype)
|
|
1833
|
+
for k in range(nrhs):
|
|
1834
|
+
res[k] = np.sum(np.abs(b[n:, k]) ** 2)
|
|
1835
|
+
return res
|
|
1836
|
+
|
|
1837
|
+
return cmplx_impl
|
|
1838
|
+
else:
|
|
1839
|
+
|
|
1840
|
+
def real_impl(b, n, nrhs):
|
|
1841
|
+
res = np.empty((nrhs), dtype=real_dtype)
|
|
1842
|
+
for k in range(nrhs):
|
|
1843
|
+
res[k] = np.sum(b[n:, k] ** 2)
|
|
1844
|
+
return res
|
|
1845
|
+
|
|
1846
|
+
return real_impl
|
|
1847
|
+
|
|
1848
|
+
|
|
1849
|
+
def _lstsq_solution(b, bcpy, n):
|
|
1850
|
+
"""
|
|
1851
|
+
Extract 'x' (the lstsq solution) from the 'bcpy' scratch space.
|
|
1852
|
+
Note 'b' is only used to check the system input dimension...
|
|
1853
|
+
"""
|
|
1854
|
+
raise NotImplementedError
|
|
1855
|
+
|
|
1856
|
+
|
|
1857
|
+
@overload(_lstsq_solution)
|
|
1858
|
+
def _lstsq_solution_impl(b, bcpy, n):
|
|
1859
|
+
if b.ndim == 1:
|
|
1860
|
+
|
|
1861
|
+
def oneD_impl(b, bcpy, n):
|
|
1862
|
+
return bcpy.T.ravel()[:n]
|
|
1863
|
+
|
|
1864
|
+
return oneD_impl
|
|
1865
|
+
else:
|
|
1866
|
+
|
|
1867
|
+
def twoD_impl(b, bcpy, n):
|
|
1868
|
+
return bcpy[:n, :].copy()
|
|
1869
|
+
|
|
1870
|
+
return twoD_impl
|
|
1871
|
+
|
|
1872
|
+
|
|
1873
|
+
@overload(np.linalg.lstsq)
|
|
1874
|
+
def lstsq_impl(a, b, rcond=-1.0):
|
|
1875
|
+
ensure_lapack()
|
|
1876
|
+
|
|
1877
|
+
_check_linalg_matrix(a, "lstsq")
|
|
1878
|
+
|
|
1879
|
+
# B can be 1D or 2D.
|
|
1880
|
+
_check_linalg_1_or_2d_matrix(b, "lstsq")
|
|
1881
|
+
|
|
1882
|
+
_check_homogeneous_types("lstsq", a, b)
|
|
1883
|
+
|
|
1884
|
+
np_dt = np_support.as_dtype(a.dtype)
|
|
1885
|
+
nb_dt = a.dtype
|
|
1886
|
+
|
|
1887
|
+
# convert typing floats to np floats for use in the impl
|
|
1888
|
+
r_type = getattr(nb_dt, "underlying_float", nb_dt)
|
|
1889
|
+
real_dtype = np_support.as_dtype(r_type)
|
|
1890
|
+
|
|
1891
|
+
# lapack solver
|
|
1892
|
+
numba_ez_gelsd = _LAPACK().numba_ez_gelsd(a.dtype)
|
|
1893
|
+
|
|
1894
|
+
kind = ord(get_blas_kind(nb_dt, "lstsq"))
|
|
1895
|
+
|
|
1896
|
+
# The following functions select specialisations based on
|
|
1897
|
+
# information around 'b', a lot of this effort is required
|
|
1898
|
+
# as 'b' can be either 1D or 2D, and then there are
|
|
1899
|
+
# some optimisations available depending on real or complex
|
|
1900
|
+
# space.
|
|
1901
|
+
|
|
1902
|
+
def lstsq_impl(a, b, rcond=-1.0):
|
|
1903
|
+
n = a.shape[-1]
|
|
1904
|
+
m = a.shape[-2]
|
|
1905
|
+
nrhs = _system_compute_nrhs(b)
|
|
1906
|
+
|
|
1907
|
+
# check the systems have no inf or NaN
|
|
1908
|
+
_check_finite_matrix(a)
|
|
1909
|
+
_check_finite_matrix(b)
|
|
1910
|
+
|
|
1911
|
+
# check the system is not empty
|
|
1912
|
+
_system_check_non_empty(a, b)
|
|
1913
|
+
|
|
1914
|
+
# check the systems are dimensionally valid
|
|
1915
|
+
_system_check_dimensionally_valid(a, b)
|
|
1916
|
+
|
|
1917
|
+
minmn = min(m, n)
|
|
1918
|
+
maxmn = max(m, n)
|
|
1919
|
+
|
|
1920
|
+
# a is destroyed on exit, copy it
|
|
1921
|
+
acpy = _copy_to_fortran_order(a)
|
|
1922
|
+
|
|
1923
|
+
# b is overwritten on exit with the solution, copy allocate
|
|
1924
|
+
bcpy = np.empty((nrhs, maxmn), dtype=np_dt).T
|
|
1925
|
+
# specialised copy in due to b being 1 or 2D
|
|
1926
|
+
_system_copy_in_b(bcpy, b, nrhs)
|
|
1927
|
+
|
|
1928
|
+
# Allocate returns
|
|
1929
|
+
s = np.empty(minmn, dtype=real_dtype)
|
|
1930
|
+
rank_ptr = np.empty(1, dtype=np.int32)
|
|
1931
|
+
|
|
1932
|
+
r = numba_ez_gelsd(
|
|
1933
|
+
kind, # kind
|
|
1934
|
+
m, # m
|
|
1935
|
+
n, # n
|
|
1936
|
+
nrhs, # nrhs
|
|
1937
|
+
acpy.ctypes, # a
|
|
1938
|
+
m, # lda
|
|
1939
|
+
bcpy.ctypes, # a
|
|
1940
|
+
maxmn, # ldb
|
|
1941
|
+
s.ctypes, # s
|
|
1942
|
+
rcond, # rcond
|
|
1943
|
+
rank_ptr.ctypes, # rank
|
|
1944
|
+
)
|
|
1945
|
+
_handle_err_maybe_convergence_problem(r)
|
|
1946
|
+
|
|
1947
|
+
# set rank to that which was computed
|
|
1948
|
+
rank = rank_ptr[0]
|
|
1949
|
+
|
|
1950
|
+
# compute residuals
|
|
1951
|
+
if rank < n or m <= n:
|
|
1952
|
+
res = np.empty((0), dtype=real_dtype)
|
|
1953
|
+
else:
|
|
1954
|
+
# this requires additional dispatch as there's a faster
|
|
1955
|
+
# impl if the result is in the real domain (no abs() required)
|
|
1956
|
+
res = _lstsq_residual(bcpy, n, nrhs)
|
|
1957
|
+
|
|
1958
|
+
# extract 'x', the solution
|
|
1959
|
+
x = _lstsq_solution(b, bcpy, n)
|
|
1960
|
+
|
|
1961
|
+
# help liveness analysis
|
|
1962
|
+
_dummy_liveness_func([acpy.size, bcpy.size, s.size, rank_ptr.size])
|
|
1963
|
+
return (x, res, rank, s[:minmn])
|
|
1964
|
+
|
|
1965
|
+
return lstsq_impl
|
|
1966
|
+
|
|
1967
|
+
|
|
1968
|
+
def _solve_compute_return(b, bcpy):
|
|
1969
|
+
"""
|
|
1970
|
+
Extract 'x' (the solution) from the 'bcpy' scratch space.
|
|
1971
|
+
Note 'b' is only used to check the system input dimension...
|
|
1972
|
+
"""
|
|
1973
|
+
raise NotImplementedError
|
|
1974
|
+
|
|
1975
|
+
|
|
1976
|
+
@overload(_solve_compute_return)
|
|
1977
|
+
def _solve_compute_return_impl(b, bcpy):
|
|
1978
|
+
if b.ndim == 1:
|
|
1979
|
+
|
|
1980
|
+
def oneD_impl(b, bcpy):
|
|
1981
|
+
return bcpy.T.ravel()
|
|
1982
|
+
|
|
1983
|
+
return oneD_impl
|
|
1984
|
+
else:
|
|
1985
|
+
|
|
1986
|
+
def twoD_impl(b, bcpy):
|
|
1987
|
+
return bcpy
|
|
1988
|
+
|
|
1989
|
+
return twoD_impl
|
|
1990
|
+
|
|
1991
|
+
|
|
1992
|
+
@overload(np.linalg.solve)
|
|
1993
|
+
def solve_impl(a, b):
|
|
1994
|
+
ensure_lapack()
|
|
1995
|
+
|
|
1996
|
+
_check_linalg_matrix(a, "solve")
|
|
1997
|
+
_check_linalg_1_or_2d_matrix(b, "solve")
|
|
1998
|
+
|
|
1999
|
+
_check_homogeneous_types("solve", a, b)
|
|
2000
|
+
|
|
2001
|
+
np_dt = np_support.as_dtype(a.dtype)
|
|
2002
|
+
nb_dt = a.dtype
|
|
2003
|
+
|
|
2004
|
+
# the lapack solver
|
|
2005
|
+
numba_xgesv = _LAPACK().numba_xgesv(a.dtype)
|
|
2006
|
+
|
|
2007
|
+
kind = ord(get_blas_kind(nb_dt, "solve"))
|
|
2008
|
+
|
|
2009
|
+
def solve_impl(a, b):
|
|
2010
|
+
n = a.shape[-1]
|
|
2011
|
+
nrhs = _system_compute_nrhs(b)
|
|
2012
|
+
|
|
2013
|
+
# check the systems have no inf or NaN
|
|
2014
|
+
_check_finite_matrix(a)
|
|
2015
|
+
_check_finite_matrix(b)
|
|
2016
|
+
|
|
2017
|
+
# check the systems are dimensionally valid
|
|
2018
|
+
_system_check_dimensionally_valid(a, b)
|
|
2019
|
+
|
|
2020
|
+
# a is destroyed on exit, copy it
|
|
2021
|
+
acpy = _copy_to_fortran_order(a)
|
|
2022
|
+
|
|
2023
|
+
# b is overwritten on exit with the solution, copy allocate
|
|
2024
|
+
bcpy = np.empty((nrhs, n), dtype=np_dt).T
|
|
2025
|
+
if n == 0:
|
|
2026
|
+
return _solve_compute_return(b, bcpy)
|
|
2027
|
+
|
|
2028
|
+
# specialised copy in due to b being 1 or 2D
|
|
2029
|
+
_system_copy_in_b(bcpy, b, nrhs)
|
|
2030
|
+
|
|
2031
|
+
# allocate pivot array (needs to be fortran int size)
|
|
2032
|
+
ipiv = np.empty(n, dtype=F_INT_nptype)
|
|
2033
|
+
|
|
2034
|
+
r = numba_xgesv(
|
|
2035
|
+
kind, # kind
|
|
2036
|
+
n, # n
|
|
2037
|
+
nrhs, # nhrs
|
|
2038
|
+
acpy.ctypes, # a
|
|
2039
|
+
n, # lda
|
|
2040
|
+
ipiv.ctypes, # ipiv
|
|
2041
|
+
bcpy.ctypes, # b
|
|
2042
|
+
n, # ldb
|
|
2043
|
+
)
|
|
2044
|
+
_inv_err_handler(r)
|
|
2045
|
+
|
|
2046
|
+
# help liveness analysis
|
|
2047
|
+
_dummy_liveness_func([acpy.size, bcpy.size, ipiv.size])
|
|
2048
|
+
return _solve_compute_return(b, bcpy)
|
|
2049
|
+
|
|
2050
|
+
return solve_impl
|
|
2051
|
+
|
|
2052
|
+
|
|
2053
|
+
@overload(np.linalg.pinv)
|
|
2054
|
+
def pinv_impl(a, rcond=1.0e-15):
|
|
2055
|
+
ensure_lapack()
|
|
2056
|
+
|
|
2057
|
+
_check_linalg_matrix(a, "pinv")
|
|
2058
|
+
|
|
2059
|
+
# convert typing floats to numpy floats for use in the impl
|
|
2060
|
+
s_type = getattr(a.dtype, "underlying_float", a.dtype)
|
|
2061
|
+
s_dtype = np_support.as_dtype(s_type)
|
|
2062
|
+
|
|
2063
|
+
numba_ez_gesdd = _LAPACK().numba_ez_gesdd(a.dtype)
|
|
2064
|
+
|
|
2065
|
+
numba_xxgemm = _BLAS().numba_xxgemm(a.dtype)
|
|
2066
|
+
|
|
2067
|
+
kind = ord(get_blas_kind(a.dtype, "pinv"))
|
|
2068
|
+
JOB = ord("S")
|
|
2069
|
+
|
|
2070
|
+
# need conjugate transposes
|
|
2071
|
+
TRANSA = ord("C")
|
|
2072
|
+
TRANSB = ord("C")
|
|
2073
|
+
|
|
2074
|
+
# scalar constants
|
|
2075
|
+
dt = np_support.as_dtype(a.dtype)
|
|
2076
|
+
zero = np.array([0.0], dtype=dt)
|
|
2077
|
+
one = np.array([1.0], dtype=dt)
|
|
2078
|
+
|
|
2079
|
+
def pinv_impl(a, rcond=1.0e-15):
|
|
2080
|
+
# The idea is to build the pseudo-inverse via inverting the singular
|
|
2081
|
+
# value decomposition of a matrix `A`. Mathematically, this is roughly
|
|
2082
|
+
# A = U*S*V^H [The SV decomposition of A]
|
|
2083
|
+
# A^+ = V*(S^+)*U^H [The inverted SV decomposition of A]
|
|
2084
|
+
# where ^+ is pseudo inversion and ^H is Hermitian transpose.
|
|
2085
|
+
# As V and U are unitary, their inverses are simply their Hermitian
|
|
2086
|
+
# transpose. S has singular values on its diagonal and zero elsewhere,
|
|
2087
|
+
# it is inverted trivially by reciprocal of the diagonal values with
|
|
2088
|
+
# the exception that zero singular values remain as zero.
|
|
2089
|
+
#
|
|
2090
|
+
# The practical implementation can take advantage of a few things to
|
|
2091
|
+
# gain a few % performance increase:
|
|
2092
|
+
# * A is destroyed by the SVD algorithm from LAPACK so a copy is
|
|
2093
|
+
# required, this memory is exactly the right size in which to return
|
|
2094
|
+
# the pseudo-inverse and so can be reused for this purpose.
|
|
2095
|
+
# * The pseudo-inverse of S can be applied to either V or U^H, this
|
|
2096
|
+
# then leaves a GEMM operation to compute the inverse via either:
|
|
2097
|
+
# A^+ = (V*(S^+))*U^H
|
|
2098
|
+
# or
|
|
2099
|
+
# A^+ = V*((S^+)*U^H)
|
|
2100
|
+
# however application of S^+ to V^H or U is more convenient as they
|
|
2101
|
+
# are the result of the SVD algorithm. The application of the
|
|
2102
|
+
# diagonal system is just a matrix multiplication which results in a
|
|
2103
|
+
# row/column scaling (direction depending). To save effort, this
|
|
2104
|
+
# "matrix multiplication" is applied to the smallest of U or V^H and
|
|
2105
|
+
# only up to the point of "cut-off" (see next note) just as a direct
|
|
2106
|
+
# scaling.
|
|
2107
|
+
# * The cut-off level for application of S^+ can be used to reduce
|
|
2108
|
+
# total effort, this cut-off can come via rcond or may just naturally
|
|
2109
|
+
# be present as a result of zeros in the singular values. Regardless
|
|
2110
|
+
# there's no need to multiply by zeros in the application of S^+ to
|
|
2111
|
+
# V^H or U as above. Further, the GEMM operation can be shrunk in
|
|
2112
|
+
# effort by noting that the possible zero block generated by the
|
|
2113
|
+
# presence of zeros in S^+ has no effect apart from wasting cycles as
|
|
2114
|
+
# it is all fmadd()s where one operand is zero. The inner dimension
|
|
2115
|
+
# of the GEMM operation can therefore be set as shrunk accordingly!
|
|
2116
|
+
|
|
2117
|
+
n = a.shape[-1]
|
|
2118
|
+
m = a.shape[-2]
|
|
2119
|
+
|
|
2120
|
+
_check_finite_matrix(a)
|
|
2121
|
+
|
|
2122
|
+
acpy = _copy_to_fortran_order(a)
|
|
2123
|
+
|
|
2124
|
+
if m == 0 or n == 0:
|
|
2125
|
+
return acpy.T.ravel().reshape(a.shape).T
|
|
2126
|
+
|
|
2127
|
+
minmn = min(m, n)
|
|
2128
|
+
|
|
2129
|
+
u = np.empty((minmn, m), dtype=a.dtype)
|
|
2130
|
+
s = np.empty(minmn, dtype=s_dtype)
|
|
2131
|
+
vt = np.empty((n, minmn), dtype=a.dtype)
|
|
2132
|
+
|
|
2133
|
+
r = numba_ez_gesdd(
|
|
2134
|
+
kind, # kind
|
|
2135
|
+
JOB, # job
|
|
2136
|
+
m, # m
|
|
2137
|
+
n, # n
|
|
2138
|
+
acpy.ctypes, # a
|
|
2139
|
+
m, # lda
|
|
2140
|
+
s.ctypes, # s
|
|
2141
|
+
u.ctypes, # u
|
|
2142
|
+
m, # ldu
|
|
2143
|
+
vt.ctypes, # vt
|
|
2144
|
+
minmn, # ldvt
|
|
2145
|
+
)
|
|
2146
|
+
_handle_err_maybe_convergence_problem(r)
|
|
2147
|
+
|
|
2148
|
+
# Invert singular values under threshold. Also find the index of
|
|
2149
|
+
# the threshold value as this is the upper limit for the application
|
|
2150
|
+
# of the inverted singular values. Finding this value saves
|
|
2151
|
+
# multiplication by a block of zeros that would be created by the
|
|
2152
|
+
# application of these values to either U or V^H ahead of multiplying
|
|
2153
|
+
# them together. This is done by simply in BLAS parlance via
|
|
2154
|
+
# restricting the `k` dimension to `cut_idx` in `xgemm` whilst keeping
|
|
2155
|
+
# the leading dimensions correct.
|
|
2156
|
+
|
|
2157
|
+
cut_at = s[0] * rcond
|
|
2158
|
+
cut_idx = 0
|
|
2159
|
+
for k in range(minmn):
|
|
2160
|
+
if s[k] > cut_at:
|
|
2161
|
+
s[k] = 1.0 / s[k]
|
|
2162
|
+
cut_idx = k
|
|
2163
|
+
cut_idx += 1
|
|
2164
|
+
|
|
2165
|
+
# Use cut_idx so there's no scaling by 0.
|
|
2166
|
+
if m >= n:
|
|
2167
|
+
# U is largest so apply S^+ to V^H.
|
|
2168
|
+
for i in range(n):
|
|
2169
|
+
for j in range(cut_idx):
|
|
2170
|
+
vt[i, j] = vt[i, j] * s[j]
|
|
2171
|
+
else:
|
|
2172
|
+
# V^H is largest so apply S^+ to U.
|
|
2173
|
+
for i in range(cut_idx):
|
|
2174
|
+
s_local = s[i]
|
|
2175
|
+
for j in range(minmn):
|
|
2176
|
+
u[i, j] = u[i, j] * s_local
|
|
2177
|
+
|
|
2178
|
+
# Do (v^H)^H*U^H (obviously one of the matrices includes the S^+
|
|
2179
|
+
# scaling) and write back to acpy. Note the innner dimension of cut_idx
|
|
2180
|
+
# taking account of the possible zero block.
|
|
2181
|
+
# We can store the result in acpy, given we had to create it
|
|
2182
|
+
# for use in the SVD, and it is now redundant and the right size
|
|
2183
|
+
# but wrong shape.
|
|
2184
|
+
|
|
2185
|
+
r = numba_xxgemm(
|
|
2186
|
+
kind,
|
|
2187
|
+
TRANSA, # TRANSA
|
|
2188
|
+
TRANSB, # TRANSB
|
|
2189
|
+
n, # M
|
|
2190
|
+
m, # N
|
|
2191
|
+
cut_idx, # K
|
|
2192
|
+
one.ctypes, # ALPHA
|
|
2193
|
+
vt.ctypes, # A
|
|
2194
|
+
minmn, # LDA
|
|
2195
|
+
u.ctypes, # B
|
|
2196
|
+
m, # LDB
|
|
2197
|
+
zero.ctypes, # BETA
|
|
2198
|
+
acpy.ctypes, # C
|
|
2199
|
+
n, # LDC
|
|
2200
|
+
)
|
|
2201
|
+
|
|
2202
|
+
# help liveness analysis
|
|
2203
|
+
# acpy.size
|
|
2204
|
+
# vt.size
|
|
2205
|
+
# u.size
|
|
2206
|
+
# s.size
|
|
2207
|
+
# one.size
|
|
2208
|
+
# zero.size
|
|
2209
|
+
_dummy_liveness_func(
|
|
2210
|
+
[acpy.size, vt.size, u.size, s.size, one.size, zero.size]
|
|
2211
|
+
)
|
|
2212
|
+
return acpy.T.ravel().reshape(a.shape).T
|
|
2213
|
+
|
|
2214
|
+
return pinv_impl
|
|
2215
|
+
|
|
2216
|
+
|
|
2217
|
+
def _get_slogdet_diag_walker(a):
|
|
2218
|
+
"""
|
|
2219
|
+
Walks the diag of a LUP decomposed matrix
|
|
2220
|
+
uses that det(A) = prod(diag(lup(A)))
|
|
2221
|
+
and also that log(a)+log(b) = log(a*b)
|
|
2222
|
+
The return sign is adjusted based on the values found
|
|
2223
|
+
such that the log(value) stays in the real domain.
|
|
2224
|
+
"""
|
|
2225
|
+
if isinstance(a.dtype, types.Complex):
|
|
2226
|
+
|
|
2227
|
+
@register_jitable
|
|
2228
|
+
def cmplx_diag_walker(n, a, sgn):
|
|
2229
|
+
# walk diagonal
|
|
2230
|
+
csgn = sgn + 0.0j
|
|
2231
|
+
acc = 0.0
|
|
2232
|
+
for k in range(n):
|
|
2233
|
+
absel = np.abs(a[k, k])
|
|
2234
|
+
csgn = csgn * (a[k, k] / absel)
|
|
2235
|
+
acc = acc + np.log(absel)
|
|
2236
|
+
return (csgn, acc)
|
|
2237
|
+
|
|
2238
|
+
return cmplx_diag_walker
|
|
2239
|
+
else:
|
|
2240
|
+
|
|
2241
|
+
@register_jitable
|
|
2242
|
+
def real_diag_walker(n, a, sgn):
|
|
2243
|
+
# walk diagonal
|
|
2244
|
+
acc = 0.0
|
|
2245
|
+
for k in range(n):
|
|
2246
|
+
v = a[k, k]
|
|
2247
|
+
if v < 0.0:
|
|
2248
|
+
sgn = -sgn
|
|
2249
|
+
v = -v
|
|
2250
|
+
acc = acc + np.log(v)
|
|
2251
|
+
# sgn is a float dtype
|
|
2252
|
+
return (sgn + 0.0, acc)
|
|
2253
|
+
|
|
2254
|
+
return real_diag_walker
|
|
2255
|
+
|
|
2256
|
+
|
|
2257
|
+
@overload(np.linalg.slogdet)
|
|
2258
|
+
def slogdet_impl(a):
|
|
2259
|
+
ensure_lapack()
|
|
2260
|
+
|
|
2261
|
+
_check_linalg_matrix(a, "slogdet")
|
|
2262
|
+
|
|
2263
|
+
numba_xxgetrf = _LAPACK().numba_xxgetrf(a.dtype)
|
|
2264
|
+
|
|
2265
|
+
kind = ord(get_blas_kind(a.dtype, "slogdet"))
|
|
2266
|
+
|
|
2267
|
+
diag_walker = _get_slogdet_diag_walker(a)
|
|
2268
|
+
|
|
2269
|
+
ONE = a.dtype(1)
|
|
2270
|
+
ZERO = getattr(a.dtype, "underlying_float", a.dtype)(0)
|
|
2271
|
+
|
|
2272
|
+
def slogdet_impl(a):
|
|
2273
|
+
n = a.shape[-1]
|
|
2274
|
+
if a.shape[-2] != n:
|
|
2275
|
+
msg = "Last 2 dimensions of the array must be square."
|
|
2276
|
+
raise np.linalg.LinAlgError(msg)
|
|
2277
|
+
|
|
2278
|
+
if n == 0:
|
|
2279
|
+
return (ONE, ZERO)
|
|
2280
|
+
|
|
2281
|
+
_check_finite_matrix(a)
|
|
2282
|
+
|
|
2283
|
+
acpy = _copy_to_fortran_order(a)
|
|
2284
|
+
|
|
2285
|
+
ipiv = np.empty(n, dtype=F_INT_nptype)
|
|
2286
|
+
|
|
2287
|
+
r = numba_xxgetrf(kind, n, n, acpy.ctypes, n, ipiv.ctypes)
|
|
2288
|
+
|
|
2289
|
+
if r > 0:
|
|
2290
|
+
# factorisation failed, return same defaults as np
|
|
2291
|
+
return (0.0, -np.inf)
|
|
2292
|
+
_inv_err_handler(r) # catch input-to-lapack problem
|
|
2293
|
+
|
|
2294
|
+
# The following, prior to the call to diag_walker, is present
|
|
2295
|
+
# to account for the effect of possible permutations to the
|
|
2296
|
+
# sign of the determinant.
|
|
2297
|
+
# This is the same idea as in numpy:
|
|
2298
|
+
# File name `umath_linalg.c.src` e.g.
|
|
2299
|
+
# https://github.com/numpy/numpy/blob/master/numpy/linalg/umath_linalg.c.src
|
|
2300
|
+
# in function `@TYPE@_slogdet_single_element`.
|
|
2301
|
+
sgn = 1
|
|
2302
|
+
for k in range(n):
|
|
2303
|
+
sgn = sgn + (ipiv[k] != (k + 1))
|
|
2304
|
+
|
|
2305
|
+
sgn = sgn & 1
|
|
2306
|
+
if sgn == 0:
|
|
2307
|
+
sgn = -1
|
|
2308
|
+
|
|
2309
|
+
# help liveness analysis
|
|
2310
|
+
_dummy_liveness_func([ipiv.size])
|
|
2311
|
+
return diag_walker(n, acpy, sgn)
|
|
2312
|
+
|
|
2313
|
+
return slogdet_impl
|
|
2314
|
+
|
|
2315
|
+
|
|
2316
|
+
@overload(np.linalg.det)
|
|
2317
|
+
def det_impl(a):
|
|
2318
|
+
ensure_lapack()
|
|
2319
|
+
|
|
2320
|
+
_check_linalg_matrix(a, "det")
|
|
2321
|
+
|
|
2322
|
+
def det_impl(a):
|
|
2323
|
+
(sgn, slogdet) = np.linalg.slogdet(a)
|
|
2324
|
+
return sgn * np.exp(slogdet)
|
|
2325
|
+
|
|
2326
|
+
return det_impl
|
|
2327
|
+
|
|
2328
|
+
|
|
2329
|
+
def _compute_singular_values(a):
|
|
2330
|
+
"""
|
|
2331
|
+
Compute singular values of *a*.
|
|
2332
|
+
"""
|
|
2333
|
+
raise NotImplementedError
|
|
2334
|
+
|
|
2335
|
+
|
|
2336
|
+
@overload(_compute_singular_values)
|
|
2337
|
+
def _compute_singular_values_impl(a):
|
|
2338
|
+
"""
|
|
2339
|
+
Returns a function to compute singular values of `a`
|
|
2340
|
+
"""
|
|
2341
|
+
numba_ez_gesdd = _LAPACK().numba_ez_gesdd(a.dtype)
|
|
2342
|
+
|
|
2343
|
+
kind = ord(get_blas_kind(a.dtype, "svd"))
|
|
2344
|
+
|
|
2345
|
+
# Flag for "only compute `S`" to give to xgesdd
|
|
2346
|
+
JOBZ_N = ord("N")
|
|
2347
|
+
|
|
2348
|
+
nb_ret_type = getattr(a.dtype, "underlying_float", a.dtype)
|
|
2349
|
+
np_ret_type = np_support.as_dtype(nb_ret_type)
|
|
2350
|
+
np_dtype = np_support.as_dtype(a.dtype)
|
|
2351
|
+
|
|
2352
|
+
# These are not referenced in the computation but must be set
|
|
2353
|
+
# for MKL.
|
|
2354
|
+
u = np.empty((1, 1), dtype=np_dtype)
|
|
2355
|
+
vt = np.empty((1, 1), dtype=np_dtype)
|
|
2356
|
+
|
|
2357
|
+
def sv_function(a):
|
|
2358
|
+
"""
|
|
2359
|
+
Computes singular values.
|
|
2360
|
+
"""
|
|
2361
|
+
# Don't use the np.linalg.svd impl instead
|
|
2362
|
+
# call LAPACK to shortcut doing the "reconstruct
|
|
2363
|
+
# singular vectors from reflectors" step and just
|
|
2364
|
+
# get back the singular values.
|
|
2365
|
+
n = a.shape[-1]
|
|
2366
|
+
m = a.shape[-2]
|
|
2367
|
+
if m == 0 or n == 0:
|
|
2368
|
+
raise np.linalg.LinAlgError("Arrays cannot be empty")
|
|
2369
|
+
_check_finite_matrix(a)
|
|
2370
|
+
|
|
2371
|
+
ldu = m
|
|
2372
|
+
minmn = min(m, n)
|
|
2373
|
+
|
|
2374
|
+
# need to be >=1 but aren't referenced
|
|
2375
|
+
ucol = 1 # noqa: F841
|
|
2376
|
+
ldvt = 1
|
|
2377
|
+
|
|
2378
|
+
acpy = _copy_to_fortran_order(a)
|
|
2379
|
+
|
|
2380
|
+
# u and vt are not referenced however need to be
|
|
2381
|
+
# allocated (as done above) for MKL as it
|
|
2382
|
+
# checks for ref is nullptr.
|
|
2383
|
+
s = np.empty(minmn, dtype=np_ret_type)
|
|
2384
|
+
|
|
2385
|
+
r = numba_ez_gesdd(
|
|
2386
|
+
kind, # kind
|
|
2387
|
+
JOBZ_N, # jobz
|
|
2388
|
+
m, # m
|
|
2389
|
+
n, # n
|
|
2390
|
+
acpy.ctypes, # a
|
|
2391
|
+
m, # lda
|
|
2392
|
+
s.ctypes, # s
|
|
2393
|
+
u.ctypes, # u
|
|
2394
|
+
ldu, # ldu
|
|
2395
|
+
vt.ctypes, # vt
|
|
2396
|
+
ldvt, # ldvt
|
|
2397
|
+
)
|
|
2398
|
+
_handle_err_maybe_convergence_problem(r)
|
|
2399
|
+
|
|
2400
|
+
# help liveness analysis
|
|
2401
|
+
_dummy_liveness_func([acpy.size, vt.size, u.size, s.size])
|
|
2402
|
+
return s
|
|
2403
|
+
|
|
2404
|
+
return sv_function
|
|
2405
|
+
|
|
2406
|
+
|
|
2407
|
+
def _oneD_norm_2(a):
|
|
2408
|
+
"""
|
|
2409
|
+
Compute the L2-norm of 1D-array *a*.
|
|
2410
|
+
"""
|
|
2411
|
+
raise NotImplementedError
|
|
2412
|
+
|
|
2413
|
+
|
|
2414
|
+
@overload(_oneD_norm_2)
|
|
2415
|
+
def _oneD_norm_2_impl(a):
|
|
2416
|
+
nb_ret_type = getattr(a.dtype, "underlying_float", a.dtype)
|
|
2417
|
+
np_ret_type = np_support.as_dtype(nb_ret_type)
|
|
2418
|
+
|
|
2419
|
+
xxnrm2 = _BLAS().numba_xxnrm2(a.dtype)
|
|
2420
|
+
|
|
2421
|
+
kind = ord(get_blas_kind(a.dtype, "norm"))
|
|
2422
|
+
|
|
2423
|
+
def impl(a):
|
|
2424
|
+
# Just ignore order, calls are guarded to only come
|
|
2425
|
+
# from cases where order=None or order=2.
|
|
2426
|
+
n = len(a)
|
|
2427
|
+
# Call L2-norm routine from BLAS
|
|
2428
|
+
ret = np.empty((1,), dtype=np_ret_type)
|
|
2429
|
+
jmp = int(a.strides[0] / a.itemsize)
|
|
2430
|
+
r = xxnrm2(
|
|
2431
|
+
kind, # kind
|
|
2432
|
+
n, # n
|
|
2433
|
+
a.ctypes, # x
|
|
2434
|
+
jmp, # incx
|
|
2435
|
+
ret.ctypes, # result
|
|
2436
|
+
)
|
|
2437
|
+
if r < 0:
|
|
2438
|
+
fatal_error_func()
|
|
2439
|
+
assert 0 # unreachable
|
|
2440
|
+
|
|
2441
|
+
# help liveness analysis
|
|
2442
|
+
# ret.size
|
|
2443
|
+
# a.size
|
|
2444
|
+
_dummy_liveness_func([ret.size, a.size])
|
|
2445
|
+
return ret[0]
|
|
2446
|
+
|
|
2447
|
+
return impl
|
|
2448
|
+
|
|
2449
|
+
|
|
2450
|
+
def _get_norm_impl(x, ord_flag):
|
|
2451
|
+
# This function is quite involved as norm supports a large
|
|
2452
|
+
# range of values to select different norm types via kwarg `ord`.
|
|
2453
|
+
# The implementation below branches on dimension of the input
|
|
2454
|
+
# (1D or 2D). The default for `ord` is `None` which requires
|
|
2455
|
+
# special handling in numba, this is dealt with first in each of
|
|
2456
|
+
# the dimension branches. Following this the various norms are
|
|
2457
|
+
# computed via code that is in most cases simply a loop version
|
|
2458
|
+
# of a ufunc based version as found in numpy.
|
|
2459
|
+
|
|
2460
|
+
# The following is common to both 1D and 2D cases.
|
|
2461
|
+
# Convert typing floats to numpy floats for use in the impl.
|
|
2462
|
+
# The return type is always a float, numba differs from numpy in
|
|
2463
|
+
# that it returns an input precision specific value whereas numpy
|
|
2464
|
+
# always returns np.float64.
|
|
2465
|
+
nb_ret_type = getattr(x.dtype, "underlying_float", x.dtype)
|
|
2466
|
+
np_ret_type = np_support.as_dtype(nb_ret_type)
|
|
2467
|
+
|
|
2468
|
+
np_dtype = np_support.as_dtype(x.dtype) # noqa: F841
|
|
2469
|
+
xxnrm2 = _BLAS().numba_xxnrm2(x.dtype) # noqa: F841
|
|
2470
|
+
kind = ord(get_blas_kind(x.dtype, "norm")) # noqa: F841
|
|
2471
|
+
|
|
2472
|
+
if x.ndim == 1:
|
|
2473
|
+
# 1D cases
|
|
2474
|
+
|
|
2475
|
+
# handle "ord" being "None", must be done separately
|
|
2476
|
+
if ord_flag in (None, types.none):
|
|
2477
|
+
|
|
2478
|
+
def oneD_impl(x, ord=None):
|
|
2479
|
+
return _oneD_norm_2(x)
|
|
2480
|
+
else:
|
|
2481
|
+
|
|
2482
|
+
def oneD_impl(x, ord=None):
|
|
2483
|
+
n = len(x)
|
|
2484
|
+
|
|
2485
|
+
# Shortcut to handle zero length arrays
|
|
2486
|
+
# this differs slightly to numpy in that
|
|
2487
|
+
# numpy raises a ValueError for kwarg ord=
|
|
2488
|
+
# +/-np.inf as the reduction operations like
|
|
2489
|
+
# max() and min() don't accept zero length
|
|
2490
|
+
# arrays
|
|
2491
|
+
if n == 0:
|
|
2492
|
+
return 0.0
|
|
2493
|
+
|
|
2494
|
+
# Note: on order == 2
|
|
2495
|
+
# This is the same as for ord=="None" but because
|
|
2496
|
+
# we have to handle "None" specially this condition
|
|
2497
|
+
# is separated
|
|
2498
|
+
if ord == 2:
|
|
2499
|
+
return _oneD_norm_2(x)
|
|
2500
|
+
elif ord == np.inf:
|
|
2501
|
+
# max(abs(x))
|
|
2502
|
+
ret = abs(x[0])
|
|
2503
|
+
for k in range(1, n):
|
|
2504
|
+
val = abs(x[k])
|
|
2505
|
+
if val > ret:
|
|
2506
|
+
ret = val
|
|
2507
|
+
return ret
|
|
2508
|
+
|
|
2509
|
+
elif ord == -np.inf:
|
|
2510
|
+
# min(abs(x))
|
|
2511
|
+
ret = abs(x[0])
|
|
2512
|
+
for k in range(1, n):
|
|
2513
|
+
val = abs(x[k])
|
|
2514
|
+
if val < ret:
|
|
2515
|
+
ret = val
|
|
2516
|
+
return ret
|
|
2517
|
+
|
|
2518
|
+
elif ord == 0:
|
|
2519
|
+
# sum(x != 0)
|
|
2520
|
+
ret = 0.0
|
|
2521
|
+
for k in range(n):
|
|
2522
|
+
if x[k] != 0.0:
|
|
2523
|
+
ret += 1.0
|
|
2524
|
+
return ret
|
|
2525
|
+
|
|
2526
|
+
elif ord == 1:
|
|
2527
|
+
# sum(abs(x))
|
|
2528
|
+
ret = 0.0
|
|
2529
|
+
for k in range(n):
|
|
2530
|
+
ret += abs(x[k])
|
|
2531
|
+
return ret
|
|
2532
|
+
|
|
2533
|
+
else:
|
|
2534
|
+
# sum(abs(x)**ord)**(1./ord)
|
|
2535
|
+
ret = 0.0
|
|
2536
|
+
for k in range(n):
|
|
2537
|
+
ret += abs(x[k]) ** ord
|
|
2538
|
+
return ret ** (1.0 / ord)
|
|
2539
|
+
|
|
2540
|
+
return oneD_impl
|
|
2541
|
+
|
|
2542
|
+
elif x.ndim == 2:
|
|
2543
|
+
# 2D cases
|
|
2544
|
+
|
|
2545
|
+
# handle "ord" being "None"
|
|
2546
|
+
if ord_flag in (None, types.none):
|
|
2547
|
+
# Force `x` to be C-order, so that we can take a contiguous
|
|
2548
|
+
# 1D view.
|
|
2549
|
+
if x.layout == "C":
|
|
2550
|
+
|
|
2551
|
+
@register_jitable
|
|
2552
|
+
def array_prepare(x):
|
|
2553
|
+
return x
|
|
2554
|
+
elif x.layout == "F":
|
|
2555
|
+
|
|
2556
|
+
@register_jitable
|
|
2557
|
+
def array_prepare(x):
|
|
2558
|
+
# Legal since L2(x) == L2(x.T)
|
|
2559
|
+
return x.T
|
|
2560
|
+
else:
|
|
2561
|
+
|
|
2562
|
+
@register_jitable
|
|
2563
|
+
def array_prepare(x):
|
|
2564
|
+
return x.copy()
|
|
2565
|
+
|
|
2566
|
+
# Compute the Frobenius norm, this is the L2,2 induced norm of `x`
|
|
2567
|
+
# which is the L2-norm of x.ravel() and so can be computed via BLAS
|
|
2568
|
+
def twoD_impl(x, ord=None):
|
|
2569
|
+
n = x.size
|
|
2570
|
+
if n == 0:
|
|
2571
|
+
# reshape() currently doesn't support zero-sized arrays
|
|
2572
|
+
return 0.0
|
|
2573
|
+
x_c = array_prepare(x)
|
|
2574
|
+
return _oneD_norm_2(x_c.reshape(n))
|
|
2575
|
+
else:
|
|
2576
|
+
# max value for this dtype
|
|
2577
|
+
max_val = np.finfo(np_ret_type.type).max
|
|
2578
|
+
|
|
2579
|
+
def twoD_impl(x, ord=None):
|
|
2580
|
+
n = x.shape[-1]
|
|
2581
|
+
m = x.shape[-2]
|
|
2582
|
+
|
|
2583
|
+
# Shortcut to handle zero size arrays
|
|
2584
|
+
# this differs slightly to numpy in that
|
|
2585
|
+
# numpy raises errors for some ord values
|
|
2586
|
+
# and in other cases returns zero.
|
|
2587
|
+
if x.size == 0:
|
|
2588
|
+
return 0.0
|
|
2589
|
+
|
|
2590
|
+
if ord == np.inf:
|
|
2591
|
+
# max of sum of abs across rows
|
|
2592
|
+
# max(sum(abs(x)), axis=1)
|
|
2593
|
+
global_max = 0.0
|
|
2594
|
+
for ii in range(m):
|
|
2595
|
+
tmp = 0.0
|
|
2596
|
+
for jj in range(n):
|
|
2597
|
+
tmp += abs(x[ii, jj])
|
|
2598
|
+
if tmp > global_max:
|
|
2599
|
+
global_max = tmp
|
|
2600
|
+
return global_max
|
|
2601
|
+
|
|
2602
|
+
elif ord == -np.inf:
|
|
2603
|
+
# min of sum of abs across rows
|
|
2604
|
+
# min(sum(abs(x)), axis=1)
|
|
2605
|
+
global_min = max_val
|
|
2606
|
+
for ii in range(m):
|
|
2607
|
+
tmp = 0.0
|
|
2608
|
+
for jj in range(n):
|
|
2609
|
+
tmp += abs(x[ii, jj])
|
|
2610
|
+
if tmp < global_min:
|
|
2611
|
+
global_min = tmp
|
|
2612
|
+
return global_min
|
|
2613
|
+
elif ord == 1:
|
|
2614
|
+
# max of sum of abs across cols
|
|
2615
|
+
# max(sum(abs(x)), axis=0)
|
|
2616
|
+
global_max = 0.0
|
|
2617
|
+
for ii in range(n):
|
|
2618
|
+
tmp = 0.0
|
|
2619
|
+
for jj in range(m):
|
|
2620
|
+
tmp += abs(x[jj, ii])
|
|
2621
|
+
if tmp > global_max:
|
|
2622
|
+
global_max = tmp
|
|
2623
|
+
return global_max
|
|
2624
|
+
|
|
2625
|
+
elif ord == -1:
|
|
2626
|
+
# min of sum of abs across cols
|
|
2627
|
+
# min(sum(abs(x)), axis=0)
|
|
2628
|
+
global_min = max_val
|
|
2629
|
+
for ii in range(n):
|
|
2630
|
+
tmp = 0.0
|
|
2631
|
+
for jj in range(m):
|
|
2632
|
+
tmp += abs(x[jj, ii])
|
|
2633
|
+
if tmp < global_min:
|
|
2634
|
+
global_min = tmp
|
|
2635
|
+
return global_min
|
|
2636
|
+
|
|
2637
|
+
# Results via SVD, singular values are sorted on return
|
|
2638
|
+
# by definition.
|
|
2639
|
+
elif ord == 2:
|
|
2640
|
+
# max SV
|
|
2641
|
+
return _compute_singular_values(x)[0]
|
|
2642
|
+
elif ord == -2:
|
|
2643
|
+
# min SV
|
|
2644
|
+
return _compute_singular_values(x)[-1]
|
|
2645
|
+
else:
|
|
2646
|
+
# replicate numpy error
|
|
2647
|
+
raise ValueError("Invalid norm order for matrices.")
|
|
2648
|
+
|
|
2649
|
+
return twoD_impl
|
|
2650
|
+
else:
|
|
2651
|
+
assert 0 # unreachable
|
|
2652
|
+
|
|
2653
|
+
|
|
2654
|
+
@overload(np.linalg.norm)
|
|
2655
|
+
def norm_impl(x, ord=None):
|
|
2656
|
+
ensure_lapack()
|
|
2657
|
+
|
|
2658
|
+
_check_linalg_1_or_2d_matrix(x, "norm")
|
|
2659
|
+
|
|
2660
|
+
return _get_norm_impl(x, ord)
|
|
2661
|
+
|
|
2662
|
+
|
|
2663
|
+
@overload(np.linalg.cond)
|
|
2664
|
+
def cond_impl(x, p=None):
|
|
2665
|
+
ensure_lapack()
|
|
2666
|
+
|
|
2667
|
+
_check_linalg_matrix(x, "cond")
|
|
2668
|
+
|
|
2669
|
+
def impl(x, p=None):
|
|
2670
|
+
# This is extracted for performance, numpy does approximately:
|
|
2671
|
+
# `condition = norm(x) * norm(inv(x))`
|
|
2672
|
+
# in the cases of `p == 2` or `p ==-2` singular values are used
|
|
2673
|
+
# for computing norms. This costs numpy an svd of `x` then an
|
|
2674
|
+
# inversion of `x` and another svd of `x`.
|
|
2675
|
+
# Below is a different approach, which also gives a more
|
|
2676
|
+
# accurate answer as there is no inversion involved.
|
|
2677
|
+
# Recall that the singular values of an inverted matrix are the
|
|
2678
|
+
# reciprocal of singular values of the original matrix.
|
|
2679
|
+
# Therefore calling `svd(x)` once yields all the information
|
|
2680
|
+
# needed about both `x` and `inv(x)` without the cost or
|
|
2681
|
+
# potential loss of accuracy incurred through inversion.
|
|
2682
|
+
# For the case of `p == 2`, the result is just the ratio of
|
|
2683
|
+
# `largest singular value/smallest singular value`, and for the
|
|
2684
|
+
# case of `p==-2` the result is simply the
|
|
2685
|
+
# `smallest singular value/largest singular value`.
|
|
2686
|
+
# As a result of this, numba accepts non-square matrices as
|
|
2687
|
+
# input when p==+/-2 as well as when p==None.
|
|
2688
|
+
if p == 2 or p == -2 or p is None:
|
|
2689
|
+
s = _compute_singular_values(x)
|
|
2690
|
+
if p == 2 or p is None:
|
|
2691
|
+
r = np.divide(s[0], s[-1])
|
|
2692
|
+
else:
|
|
2693
|
+
r = np.divide(s[-1], s[0])
|
|
2694
|
+
else: # cases np.inf, -np.inf, 1, -1
|
|
2695
|
+
norm_x = np.linalg.norm(x, p)
|
|
2696
|
+
norm_inv_x = np.linalg.norm(np.linalg.inv(x), p)
|
|
2697
|
+
r = norm_x * norm_inv_x
|
|
2698
|
+
# NumPy uses a NaN mask, if the input has a NaN, it will return NaN,
|
|
2699
|
+
# Numba calls ban NaN through the use of _check_finite_matrix but this
|
|
2700
|
+
# catches cases where NaN occurs through floating point use
|
|
2701
|
+
if np.isnan(r):
|
|
2702
|
+
return np.inf
|
|
2703
|
+
else:
|
|
2704
|
+
return r
|
|
2705
|
+
|
|
2706
|
+
return impl
|
|
2707
|
+
|
|
2708
|
+
|
|
2709
|
+
@register_jitable
|
|
2710
|
+
def _get_rank_from_singular_values(sv, t):
|
|
2711
|
+
"""
|
|
2712
|
+
Gets rank from singular values with cut-off at a given tolerance
|
|
2713
|
+
"""
|
|
2714
|
+
rank = 0
|
|
2715
|
+
for k in range(len(sv)):
|
|
2716
|
+
if sv[k] > t:
|
|
2717
|
+
rank = rank + 1
|
|
2718
|
+
else: # sv is ordered big->small so break on condition not met
|
|
2719
|
+
break
|
|
2720
|
+
return rank
|
|
2721
|
+
|
|
2722
|
+
|
|
2723
|
+
@overload(np.linalg.matrix_rank)
|
|
2724
|
+
def matrix_rank_impl(A, tol=None):
|
|
2725
|
+
"""
|
|
2726
|
+
Computes rank for matrices and vectors.
|
|
2727
|
+
The only issue that may arise is that because numpy uses double
|
|
2728
|
+
precision lapack calls whereas numba uses type specific lapack
|
|
2729
|
+
calls, some singular values may differ and therefore counting the
|
|
2730
|
+
number of them above a tolerance may lead to different counts,
|
|
2731
|
+
and therefore rank, in some cases.
|
|
2732
|
+
"""
|
|
2733
|
+
ensure_lapack()
|
|
2734
|
+
|
|
2735
|
+
_check_linalg_1_or_2d_matrix(A, "matrix_rank")
|
|
2736
|
+
|
|
2737
|
+
def _2d_matrix_rank_impl(A, tol):
|
|
2738
|
+
# handle the tol==None case separately for type inference to work
|
|
2739
|
+
if tol in (None, types.none):
|
|
2740
|
+
nb_type = getattr(A.dtype, "underlying_float", A.dtype)
|
|
2741
|
+
np_type = np_support.as_dtype(nb_type)
|
|
2742
|
+
eps_val = np.finfo(np_type).eps
|
|
2743
|
+
|
|
2744
|
+
def _2d_tol_none_impl(A, tol=None):
|
|
2745
|
+
s = _compute_singular_values(A)
|
|
2746
|
+
# replicate numpy default tolerance calculation
|
|
2747
|
+
r = A.shape[0]
|
|
2748
|
+
c = A.shape[1]
|
|
2749
|
+
l = max(r, c)
|
|
2750
|
+
t = s[0] * l * eps_val
|
|
2751
|
+
return _get_rank_from_singular_values(s, t)
|
|
2752
|
+
|
|
2753
|
+
return _2d_tol_none_impl
|
|
2754
|
+
else:
|
|
2755
|
+
|
|
2756
|
+
def _2d_tol_not_none_impl(A, tol=None):
|
|
2757
|
+
s = _compute_singular_values(A)
|
|
2758
|
+
return _get_rank_from_singular_values(s, tol)
|
|
2759
|
+
|
|
2760
|
+
return _2d_tol_not_none_impl
|
|
2761
|
+
|
|
2762
|
+
def _get_matrix_rank_impl(A, tol):
|
|
2763
|
+
ndim = A.ndim
|
|
2764
|
+
if ndim == 1:
|
|
2765
|
+
# NOTE: Technically, the numpy implementation could be argued as
|
|
2766
|
+
# incorrect for the case of a vector (1D matrix). If a tolerance
|
|
2767
|
+
# is provided and a vector with a singular value below tolerance is
|
|
2768
|
+
# encountered this should report a rank of zero, the numpy
|
|
2769
|
+
# implementation does not do this and instead elects to report that
|
|
2770
|
+
# if any value in the vector is nonzero then the rank is 1.
|
|
2771
|
+
# An example would be [0, 1e-15, 0, 2e-15] which numpy reports as
|
|
2772
|
+
# rank 1 invariant of `tol`. The singular value for this vector is
|
|
2773
|
+
# obviously sqrt(5)*1e-15 and so a tol of e.g. sqrt(6)*1e-15 should
|
|
2774
|
+
# lead to a reported rank of 0 whereas a tol of 1e-15 should lead
|
|
2775
|
+
# to a reported rank of 1, numpy reports 1 regardless.
|
|
2776
|
+
# The code below replicates the numpy behaviour.
|
|
2777
|
+
def _1d_matrix_rank_impl(A, tol=None):
|
|
2778
|
+
for k in range(len(A)):
|
|
2779
|
+
if A[k] != 0.0:
|
|
2780
|
+
return 1
|
|
2781
|
+
return 0
|
|
2782
|
+
|
|
2783
|
+
return _1d_matrix_rank_impl
|
|
2784
|
+
elif ndim == 2:
|
|
2785
|
+
return _2d_matrix_rank_impl(A, tol)
|
|
2786
|
+
else:
|
|
2787
|
+
assert 0 # unreachable
|
|
2788
|
+
|
|
2789
|
+
return _get_matrix_rank_impl(A, tol)
|
|
2790
|
+
|
|
2791
|
+
|
|
2792
|
+
@overload(np.linalg.matrix_power)
|
|
2793
|
+
def matrix_power_impl(a, n):
|
|
2794
|
+
"""
|
|
2795
|
+
Computes matrix power. Only integer powers are supported in numpy.
|
|
2796
|
+
"""
|
|
2797
|
+
|
|
2798
|
+
_check_linalg_matrix(a, "matrix_power")
|
|
2799
|
+
np_dtype = np_support.as_dtype(a.dtype)
|
|
2800
|
+
|
|
2801
|
+
nt = getattr(n, "dtype", n)
|
|
2802
|
+
if not isinstance(nt, types.Integer):
|
|
2803
|
+
raise NumbaTypeError("Exponent must be an integer.")
|
|
2804
|
+
|
|
2805
|
+
def matrix_power_impl(a, n):
|
|
2806
|
+
if n == 0:
|
|
2807
|
+
# this should be eye() but it doesn't support
|
|
2808
|
+
# the dtype kwarg yet so do it manually to save
|
|
2809
|
+
# the copy required by eye(a.shape[0]).asdtype()
|
|
2810
|
+
A = np.zeros(a.shape, dtype=np_dtype)
|
|
2811
|
+
for k in range(a.shape[0]):
|
|
2812
|
+
A[k, k] = 1.0
|
|
2813
|
+
return A
|
|
2814
|
+
|
|
2815
|
+
am, an = a.shape[-1], a.shape[-2]
|
|
2816
|
+
if am != an:
|
|
2817
|
+
raise ValueError("input must be a square array")
|
|
2818
|
+
|
|
2819
|
+
# empty, return a copy
|
|
2820
|
+
if am == 0:
|
|
2821
|
+
return a.copy()
|
|
2822
|
+
|
|
2823
|
+
# note: to be consistent over contiguousness, C order is
|
|
2824
|
+
# returned as that is what dot() produces and the most common
|
|
2825
|
+
# paths through matrix_power will involve that. Therefore
|
|
2826
|
+
# copies are made here to ensure the data ordering is
|
|
2827
|
+
# correct for paths not going via dot().
|
|
2828
|
+
|
|
2829
|
+
if n < 0:
|
|
2830
|
+
A = np.linalg.inv(a).copy()
|
|
2831
|
+
if n == -1: # return now
|
|
2832
|
+
return A
|
|
2833
|
+
n = -n
|
|
2834
|
+
else:
|
|
2835
|
+
if n == 1: # return a copy now
|
|
2836
|
+
return a.copy()
|
|
2837
|
+
A = a # this is safe, `a` is only read
|
|
2838
|
+
|
|
2839
|
+
if n < 4:
|
|
2840
|
+
if n == 2:
|
|
2841
|
+
return np.dot(A, A)
|
|
2842
|
+
if n == 3:
|
|
2843
|
+
return np.dot(np.dot(A, A), A)
|
|
2844
|
+
else:
|
|
2845
|
+
acc = A
|
|
2846
|
+
exp = n
|
|
2847
|
+
|
|
2848
|
+
# Initialise ret, SSA cannot see the loop will execute, without this
|
|
2849
|
+
# it appears as uninitialised.
|
|
2850
|
+
ret = acc
|
|
2851
|
+
# tried a loop split and branchless using identity matrix as
|
|
2852
|
+
# input but it seems like having a "first entry" flag is quicker
|
|
2853
|
+
flag = True
|
|
2854
|
+
while exp != 0:
|
|
2855
|
+
if exp & 1:
|
|
2856
|
+
if flag:
|
|
2857
|
+
ret = acc
|
|
2858
|
+
flag = False
|
|
2859
|
+
else:
|
|
2860
|
+
ret = np.dot(ret, acc)
|
|
2861
|
+
acc = np.dot(acc, acc)
|
|
2862
|
+
exp = exp >> 1
|
|
2863
|
+
|
|
2864
|
+
return ret
|
|
2865
|
+
|
|
2866
|
+
return matrix_power_impl
|
|
2867
|
+
|
|
2868
|
+
|
|
2869
|
+
# This is documented under linalg despite not being in the module
|
|
2870
|
+
|
|
2871
|
+
|
|
2872
|
+
@overload(np.trace)
|
|
2873
|
+
def matrix_trace_impl(a, offset=0):
|
|
2874
|
+
"""
|
|
2875
|
+
Computes the trace of an array.
|
|
2876
|
+
"""
|
|
2877
|
+
|
|
2878
|
+
_check_linalg_matrix(a, "trace", la_prefix=False)
|
|
2879
|
+
|
|
2880
|
+
if not isinstance(offset, (int, types.Integer)):
|
|
2881
|
+
raise NumbaTypeError("integer argument expected, got %s" % offset)
|
|
2882
|
+
|
|
2883
|
+
def matrix_trace_impl(a, offset=0):
|
|
2884
|
+
rows, cols = a.shape
|
|
2885
|
+
k = offset
|
|
2886
|
+
if k < 0:
|
|
2887
|
+
rows = rows + k
|
|
2888
|
+
if k > 0:
|
|
2889
|
+
cols = cols - k
|
|
2890
|
+
n = max(min(rows, cols), 0)
|
|
2891
|
+
ret = 0
|
|
2892
|
+
if k >= 0:
|
|
2893
|
+
for i in range(n):
|
|
2894
|
+
ret += a[i, k + i]
|
|
2895
|
+
else:
|
|
2896
|
+
for i in range(n):
|
|
2897
|
+
ret += a[i - k, i]
|
|
2898
|
+
return ret
|
|
2899
|
+
|
|
2900
|
+
return matrix_trace_impl
|
|
2901
|
+
|
|
2902
|
+
|
|
2903
|
+
def _check_scalar_or_lt_2d_mat(a, func_name, la_prefix=True):
|
|
2904
|
+
prefix = "np.linalg" if la_prefix else "np"
|
|
2905
|
+
interp = (prefix, func_name)
|
|
2906
|
+
# checks that a matrix is 1 or 2D
|
|
2907
|
+
if isinstance(a, types.Array):
|
|
2908
|
+
if not a.ndim <= 2:
|
|
2909
|
+
raise TypingError(
|
|
2910
|
+
"%s.%s() only supported on 1 and 2-D arrays " % interp,
|
|
2911
|
+
highlighting=False,
|
|
2912
|
+
)
|
|
2913
|
+
|
|
2914
|
+
|
|
2915
|
+
@register_jitable
|
|
2916
|
+
def outer_impl_none(a, b, out):
|
|
2917
|
+
aa = np.asarray(a)
|
|
2918
|
+
bb = np.asarray(b)
|
|
2919
|
+
return np.multiply(
|
|
2920
|
+
aa.ravel().reshape((aa.size, 1)), bb.ravel().reshape((1, bb.size))
|
|
2921
|
+
)
|
|
2922
|
+
|
|
2923
|
+
|
|
2924
|
+
@register_jitable
|
|
2925
|
+
def outer_impl_arr(a, b, out):
|
|
2926
|
+
aa = np.asarray(a)
|
|
2927
|
+
bb = np.asarray(b)
|
|
2928
|
+
np.multiply(
|
|
2929
|
+
aa.ravel().reshape((aa.size, 1)), bb.ravel().reshape((1, bb.size)), out
|
|
2930
|
+
)
|
|
2931
|
+
return out
|
|
2932
|
+
|
|
2933
|
+
|
|
2934
|
+
def _get_outer_impl(a, b, out):
|
|
2935
|
+
if out in (None, types.none):
|
|
2936
|
+
return outer_impl_none
|
|
2937
|
+
else:
|
|
2938
|
+
return outer_impl_arr
|
|
2939
|
+
|
|
2940
|
+
|
|
2941
|
+
@overload(np.outer)
|
|
2942
|
+
def outer_impl(a, b, out=None):
|
|
2943
|
+
_check_scalar_or_lt_2d_mat(a, "outer", la_prefix=False)
|
|
2944
|
+
_check_scalar_or_lt_2d_mat(b, "outer", la_prefix=False)
|
|
2945
|
+
|
|
2946
|
+
impl = _get_outer_impl(a, b, out)
|
|
2947
|
+
|
|
2948
|
+
def outer_impl(a, b, out=None):
|
|
2949
|
+
return impl(a, b, out)
|
|
2950
|
+
|
|
2951
|
+
return outer_impl
|
|
2952
|
+
|
|
2953
|
+
|
|
2954
|
+
def _kron_normaliser_impl(x):
|
|
2955
|
+
# makes x into a 2d array
|
|
2956
|
+
if isinstance(x, types.Array):
|
|
2957
|
+
if x.layout not in ("C", "F"):
|
|
2958
|
+
raise TypingError(
|
|
2959
|
+
"np.linalg.kron only supports 'C' or 'F' layout "
|
|
2960
|
+
"input arrays. Received an input of "
|
|
2961
|
+
"layout '{}'.".format(x.layout)
|
|
2962
|
+
)
|
|
2963
|
+
elif x.ndim == 2:
|
|
2964
|
+
|
|
2965
|
+
@register_jitable
|
|
2966
|
+
def nrm_shape(x):
|
|
2967
|
+
xn = x.shape[-1]
|
|
2968
|
+
xm = x.shape[-2]
|
|
2969
|
+
return x.reshape(xm, xn)
|
|
2970
|
+
|
|
2971
|
+
return nrm_shape
|
|
2972
|
+
else:
|
|
2973
|
+
|
|
2974
|
+
@register_jitable
|
|
2975
|
+
def nrm_shape(x):
|
|
2976
|
+
xn = x.shape[-1]
|
|
2977
|
+
return x.reshape(1, xn)
|
|
2978
|
+
|
|
2979
|
+
return nrm_shape
|
|
2980
|
+
else: # assume its a scalar
|
|
2981
|
+
|
|
2982
|
+
@register_jitable
|
|
2983
|
+
def nrm_shape(x):
|
|
2984
|
+
a = np.empty((1, 1), type(x))
|
|
2985
|
+
a[0] = x
|
|
2986
|
+
return a
|
|
2987
|
+
|
|
2988
|
+
return nrm_shape
|
|
2989
|
+
|
|
2990
|
+
|
|
2991
|
+
def _kron_return(a, b):
|
|
2992
|
+
# transforms c into something that kron would return
|
|
2993
|
+
# based on the shapes of a and b
|
|
2994
|
+
a_is_arr = isinstance(a, types.Array)
|
|
2995
|
+
b_is_arr = isinstance(b, types.Array)
|
|
2996
|
+
if a_is_arr and b_is_arr:
|
|
2997
|
+
if a.ndim == 2 or b.ndim == 2:
|
|
2998
|
+
|
|
2999
|
+
@register_jitable
|
|
3000
|
+
def ret(a, b, c):
|
|
3001
|
+
return c
|
|
3002
|
+
|
|
3003
|
+
return ret
|
|
3004
|
+
else:
|
|
3005
|
+
|
|
3006
|
+
@register_jitable
|
|
3007
|
+
def ret(a, b, c):
|
|
3008
|
+
return c.reshape(c.size)
|
|
3009
|
+
|
|
3010
|
+
return ret
|
|
3011
|
+
else: # at least one of (a, b) is a scalar
|
|
3012
|
+
if a_is_arr:
|
|
3013
|
+
|
|
3014
|
+
@register_jitable
|
|
3015
|
+
def ret(a, b, c):
|
|
3016
|
+
return c.reshape(a.shape)
|
|
3017
|
+
|
|
3018
|
+
return ret
|
|
3019
|
+
elif b_is_arr:
|
|
3020
|
+
|
|
3021
|
+
@register_jitable
|
|
3022
|
+
def ret(a, b, c):
|
|
3023
|
+
return c.reshape(b.shape)
|
|
3024
|
+
|
|
3025
|
+
return ret
|
|
3026
|
+
else: # both scalars
|
|
3027
|
+
|
|
3028
|
+
@register_jitable
|
|
3029
|
+
def ret(a, b, c):
|
|
3030
|
+
return c[0]
|
|
3031
|
+
|
|
3032
|
+
return ret
|
|
3033
|
+
|
|
3034
|
+
|
|
3035
|
+
@overload(np.kron)
|
|
3036
|
+
def kron_impl(a, b):
|
|
3037
|
+
_check_scalar_or_lt_2d_mat(a, "kron", la_prefix=False)
|
|
3038
|
+
_check_scalar_or_lt_2d_mat(b, "kron", la_prefix=False)
|
|
3039
|
+
|
|
3040
|
+
fix_a = _kron_normaliser_impl(a)
|
|
3041
|
+
fix_b = _kron_normaliser_impl(b)
|
|
3042
|
+
ret_c = _kron_return(a, b)
|
|
3043
|
+
|
|
3044
|
+
# this is fine because the ufunc for the Hadamard product
|
|
3045
|
+
# will reject differing dtypes in a and b.
|
|
3046
|
+
dt = getattr(a, "dtype", a)
|
|
3047
|
+
|
|
3048
|
+
def kron_impl(a, b):
|
|
3049
|
+
aa = fix_a(a)
|
|
3050
|
+
bb = fix_b(b)
|
|
3051
|
+
|
|
3052
|
+
am = aa.shape[-2]
|
|
3053
|
+
an = aa.shape[-1]
|
|
3054
|
+
bm = bb.shape[-2]
|
|
3055
|
+
bn = bb.shape[-1]
|
|
3056
|
+
|
|
3057
|
+
cm = am * bm
|
|
3058
|
+
cn = an * bn
|
|
3059
|
+
|
|
3060
|
+
# allocate c
|
|
3061
|
+
C = np.empty((cm, cn), dtype=dt)
|
|
3062
|
+
|
|
3063
|
+
# In practice this is runs quicker than the more obvious
|
|
3064
|
+
# `each element of A multiplied by B and assigned to
|
|
3065
|
+
# a block in C` like alg.
|
|
3066
|
+
|
|
3067
|
+
# loop over rows of A
|
|
3068
|
+
for i in range(am):
|
|
3069
|
+
# compute the column offset into C
|
|
3070
|
+
rjmp = i * bm
|
|
3071
|
+
# loop over rows of B
|
|
3072
|
+
for k in range(bm):
|
|
3073
|
+
# compute row the offset into C
|
|
3074
|
+
irjmp = rjmp + k
|
|
3075
|
+
# slice a given row of B
|
|
3076
|
+
slc = bb[k, :]
|
|
3077
|
+
# loop over columns of A
|
|
3078
|
+
for j in range(an):
|
|
3079
|
+
# vectorized assignment of an element of A
|
|
3080
|
+
# multiplied by the current row of B into
|
|
3081
|
+
# a slice of a row of C
|
|
3082
|
+
cjmp = j * bn
|
|
3083
|
+
C[irjmp, cjmp : cjmp + bn] = aa[i, j] * slc
|
|
3084
|
+
|
|
3085
|
+
return ret_c(a, b, C)
|
|
3086
|
+
|
|
3087
|
+
return kron_impl
|