numba-cuda 0.0.1__py3-none-any.whl → 0.0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _numba_cuda_redirector.pth +1 -0
- _numba_cuda_redirector.py +74 -0
- numba_cuda/VERSION +1 -0
- numba_cuda/__init__.py +5 -0
- numba_cuda/_version.py +19 -0
- numba_cuda/numba/cuda/__init__.py +22 -0
- numba_cuda/numba/cuda/api.py +526 -0
- numba_cuda/numba/cuda/api_util.py +30 -0
- numba_cuda/numba/cuda/args.py +77 -0
- numba_cuda/numba/cuda/cg.py +62 -0
- numba_cuda/numba/cuda/codegen.py +378 -0
- numba_cuda/numba/cuda/compiler.py +422 -0
- numba_cuda/numba/cuda/cpp_function_wrappers.cu +47 -0
- numba_cuda/numba/cuda/cuda_fp16.h +3631 -0
- numba_cuda/numba/cuda/cuda_fp16.hpp +2465 -0
- numba_cuda/numba/cuda/cuda_paths.py +258 -0
- numba_cuda/numba/cuda/cudadecl.py +806 -0
- numba_cuda/numba/cuda/cudadrv/__init__.py +9 -0
- numba_cuda/numba/cuda/cudadrv/devicearray.py +904 -0
- numba_cuda/numba/cuda/cudadrv/devices.py +248 -0
- numba_cuda/numba/cuda/cudadrv/driver.py +3201 -0
- numba_cuda/numba/cuda/cudadrv/drvapi.py +398 -0
- numba_cuda/numba/cuda/cudadrv/dummyarray.py +452 -0
- numba_cuda/numba/cuda/cudadrv/enums.py +607 -0
- numba_cuda/numba/cuda/cudadrv/error.py +36 -0
- numba_cuda/numba/cuda/cudadrv/libs.py +176 -0
- numba_cuda/numba/cuda/cudadrv/ndarray.py +20 -0
- numba_cuda/numba/cuda/cudadrv/nvrtc.py +260 -0
- numba_cuda/numba/cuda/cudadrv/nvvm.py +707 -0
- numba_cuda/numba/cuda/cudadrv/rtapi.py +10 -0
- numba_cuda/numba/cuda/cudadrv/runtime.py +142 -0
- numba_cuda/numba/cuda/cudaimpl.py +1055 -0
- numba_cuda/numba/cuda/cudamath.py +140 -0
- numba_cuda/numba/cuda/decorators.py +189 -0
- numba_cuda/numba/cuda/descriptor.py +33 -0
- numba_cuda/numba/cuda/device_init.py +89 -0
- numba_cuda/numba/cuda/deviceufunc.py +908 -0
- numba_cuda/numba/cuda/dispatcher.py +1057 -0
- numba_cuda/numba/cuda/errors.py +59 -0
- numba_cuda/numba/cuda/extending.py +7 -0
- numba_cuda/numba/cuda/initialize.py +13 -0
- numba_cuda/numba/cuda/intrinsic_wrapper.py +77 -0
- numba_cuda/numba/cuda/intrinsics.py +198 -0
- numba_cuda/numba/cuda/kernels/__init__.py +0 -0
- numba_cuda/numba/cuda/kernels/reduction.py +262 -0
- numba_cuda/numba/cuda/kernels/transpose.py +65 -0
- numba_cuda/numba/cuda/libdevice.py +3382 -0
- numba_cuda/numba/cuda/libdevicedecl.py +17 -0
- numba_cuda/numba/cuda/libdevicefuncs.py +1057 -0
- numba_cuda/numba/cuda/libdeviceimpl.py +83 -0
- numba_cuda/numba/cuda/mathimpl.py +448 -0
- numba_cuda/numba/cuda/models.py +48 -0
- numba_cuda/numba/cuda/nvvmutils.py +235 -0
- numba_cuda/numba/cuda/printimpl.py +86 -0
- numba_cuda/numba/cuda/random.py +292 -0
- numba_cuda/numba/cuda/simulator/__init__.py +38 -0
- numba_cuda/numba/cuda/simulator/api.py +110 -0
- numba_cuda/numba/cuda/simulator/compiler.py +9 -0
- numba_cuda/numba/cuda/simulator/cudadrv/__init__.py +2 -0
- numba_cuda/numba/cuda/simulator/cudadrv/devicearray.py +432 -0
- numba_cuda/numba/cuda/simulator/cudadrv/devices.py +117 -0
- numba_cuda/numba/cuda/simulator/cudadrv/driver.py +62 -0
- numba_cuda/numba/cuda/simulator/cudadrv/drvapi.py +4 -0
- numba_cuda/numba/cuda/simulator/cudadrv/dummyarray.py +4 -0
- numba_cuda/numba/cuda/simulator/cudadrv/error.py +6 -0
- numba_cuda/numba/cuda/simulator/cudadrv/libs.py +2 -0
- numba_cuda/numba/cuda/simulator/cudadrv/nvvm.py +29 -0
- numba_cuda/numba/cuda/simulator/cudadrv/runtime.py +19 -0
- numba_cuda/numba/cuda/simulator/kernel.py +308 -0
- numba_cuda/numba/cuda/simulator/kernelapi.py +495 -0
- numba_cuda/numba/cuda/simulator/reduction.py +15 -0
- numba_cuda/numba/cuda/simulator/vector_types.py +58 -0
- numba_cuda/numba/cuda/simulator_init.py +17 -0
- numba_cuda/numba/cuda/stubs.py +902 -0
- numba_cuda/numba/cuda/target.py +440 -0
- numba_cuda/numba/cuda/testing.py +202 -0
- numba_cuda/numba/cuda/tests/__init__.py +58 -0
- numba_cuda/numba/cuda/tests/cudadrv/__init__.py +8 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_array_attr.py +145 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_context_stack.py +145 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_array_slicing.py +375 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_auto_context.py +21 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_devicerecord.py +179 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_driver.py +235 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_libraries.py +22 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_memory.py +193 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_ndarray.py +547 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_deallocations.py +249 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_detect.py +81 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_emm_plugins.py +192 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_events.py +38 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_host_alloc.py +65 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_init.py +139 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_inline_ptx.py +37 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_is_fp16.py +12 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_linker.py +317 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_managed_alloc.py +127 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_mvc.py +54 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_nvvm_driver.py +199 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_pinned.py +37 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_profiler.py +20 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_ptds.py +149 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_reset_device.py +36 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_runtime.py +85 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_select_device.py +41 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_streams.py +122 -0
- numba_cuda/numba/cuda/tests/cudapy/__init__.py +8 -0
- numba_cuda/numba/cuda/tests/cudapy/cache_usecases.py +234 -0
- numba_cuda/numba/cuda/tests/cudapy/cache_with_cpu_usecases.py +41 -0
- numba_cuda/numba/cuda/tests/cudapy/extensions_usecases.py +58 -0
- numba_cuda/numba/cuda/tests/cudapy/jitlink.ptx +30 -0
- numba_cuda/numba/cuda/tests/cudapy/recursion_usecases.py +100 -0
- numba_cuda/numba/cuda/tests/cudapy/test_alignment.py +42 -0
- numba_cuda/numba/cuda/tests/cudapy/test_array.py +260 -0
- numba_cuda/numba/cuda/tests/cudapy/test_array_args.py +201 -0
- numba_cuda/numba/cuda/tests/cudapy/test_array_methods.py +35 -0
- numba_cuda/numba/cuda/tests/cudapy/test_atomics.py +1620 -0
- numba_cuda/numba/cuda/tests/cudapy/test_blackscholes.py +120 -0
- numba_cuda/numba/cuda/tests/cudapy/test_boolean.py +24 -0
- numba_cuda/numba/cuda/tests/cudapy/test_caching.py +545 -0
- numba_cuda/numba/cuda/tests/cudapy/test_casting.py +257 -0
- numba_cuda/numba/cuda/tests/cudapy/test_cffi.py +33 -0
- numba_cuda/numba/cuda/tests/cudapy/test_compiler.py +276 -0
- numba_cuda/numba/cuda/tests/cudapy/test_complex.py +296 -0
- numba_cuda/numba/cuda/tests/cudapy/test_complex_kernel.py +20 -0
- numba_cuda/numba/cuda/tests/cudapy/test_const_string.py +129 -0
- numba_cuda/numba/cuda/tests/cudapy/test_constmem.py +176 -0
- numba_cuda/numba/cuda/tests/cudapy/test_cooperative_groups.py +147 -0
- numba_cuda/numba/cuda/tests/cudapy/test_cuda_array_interface.py +435 -0
- numba_cuda/numba/cuda/tests/cudapy/test_cuda_jit_no_types.py +90 -0
- numba_cuda/numba/cuda/tests/cudapy/test_datetime.py +94 -0
- numba_cuda/numba/cuda/tests/cudapy/test_debug.py +101 -0
- numba_cuda/numba/cuda/tests/cudapy/test_debuginfo.py +221 -0
- numba_cuda/numba/cuda/tests/cudapy/test_device_func.py +222 -0
- numba_cuda/numba/cuda/tests/cudapy/test_dispatcher.py +700 -0
- numba_cuda/numba/cuda/tests/cudapy/test_enums.py +121 -0
- numba_cuda/numba/cuda/tests/cudapy/test_errors.py +79 -0
- numba_cuda/numba/cuda/tests/cudapy/test_exception.py +174 -0
- numba_cuda/numba/cuda/tests/cudapy/test_extending.py +155 -0
- numba_cuda/numba/cuda/tests/cudapy/test_fastmath.py +244 -0
- numba_cuda/numba/cuda/tests/cudapy/test_forall.py +52 -0
- numba_cuda/numba/cuda/tests/cudapy/test_freevar.py +29 -0
- numba_cuda/numba/cuda/tests/cudapy/test_frexp_ldexp.py +66 -0
- numba_cuda/numba/cuda/tests/cudapy/test_globals.py +60 -0
- numba_cuda/numba/cuda/tests/cudapy/test_gufunc.py +456 -0
- numba_cuda/numba/cuda/tests/cudapy/test_gufunc_scalar.py +159 -0
- numba_cuda/numba/cuda/tests/cudapy/test_gufunc_scheduling.py +95 -0
- numba_cuda/numba/cuda/tests/cudapy/test_idiv.py +37 -0
- numba_cuda/numba/cuda/tests/cudapy/test_inspect.py +165 -0
- numba_cuda/numba/cuda/tests/cudapy/test_intrinsics.py +1106 -0
- numba_cuda/numba/cuda/tests/cudapy/test_ipc.py +318 -0
- numba_cuda/numba/cuda/tests/cudapy/test_iterators.py +99 -0
- numba_cuda/numba/cuda/tests/cudapy/test_lang.py +64 -0
- numba_cuda/numba/cuda/tests/cudapy/test_laplace.py +119 -0
- numba_cuda/numba/cuda/tests/cudapy/test_libdevice.py +187 -0
- numba_cuda/numba/cuda/tests/cudapy/test_lineinfo.py +199 -0
- numba_cuda/numba/cuda/tests/cudapy/test_localmem.py +164 -0
- numba_cuda/numba/cuda/tests/cudapy/test_mandel.py +37 -0
- numba_cuda/numba/cuda/tests/cudapy/test_math.py +786 -0
- numba_cuda/numba/cuda/tests/cudapy/test_matmul.py +74 -0
- numba_cuda/numba/cuda/tests/cudapy/test_minmax.py +113 -0
- numba_cuda/numba/cuda/tests/cudapy/test_montecarlo.py +22 -0
- numba_cuda/numba/cuda/tests/cudapy/test_multigpu.py +140 -0
- numba_cuda/numba/cuda/tests/cudapy/test_multiprocessing.py +46 -0
- numba_cuda/numba/cuda/tests/cudapy/test_multithreads.py +101 -0
- numba_cuda/numba/cuda/tests/cudapy/test_nondet.py +49 -0
- numba_cuda/numba/cuda/tests/cudapy/test_operator.py +401 -0
- numba_cuda/numba/cuda/tests/cudapy/test_optimization.py +86 -0
- numba_cuda/numba/cuda/tests/cudapy/test_overload.py +335 -0
- numba_cuda/numba/cuda/tests/cudapy/test_powi.py +124 -0
- numba_cuda/numba/cuda/tests/cudapy/test_print.py +128 -0
- numba_cuda/numba/cuda/tests/cudapy/test_py2_div_issue.py +33 -0
- numba_cuda/numba/cuda/tests/cudapy/test_random.py +104 -0
- numba_cuda/numba/cuda/tests/cudapy/test_record_dtype.py +610 -0
- numba_cuda/numba/cuda/tests/cudapy/test_recursion.py +125 -0
- numba_cuda/numba/cuda/tests/cudapy/test_reduction.py +76 -0
- numba_cuda/numba/cuda/tests/cudapy/test_retrieve_autoconverted_arrays.py +83 -0
- numba_cuda/numba/cuda/tests/cudapy/test_serialize.py +85 -0
- numba_cuda/numba/cuda/tests/cudapy/test_slicing.py +37 -0
- numba_cuda/numba/cuda/tests/cudapy/test_sm.py +444 -0
- numba_cuda/numba/cuda/tests/cudapy/test_sm_creation.py +205 -0
- numba_cuda/numba/cuda/tests/cudapy/test_sync.py +271 -0
- numba_cuda/numba/cuda/tests/cudapy/test_transpose.py +80 -0
- numba_cuda/numba/cuda/tests/cudapy/test_ufuncs.py +277 -0
- numba_cuda/numba/cuda/tests/cudapy/test_userexc.py +47 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vector_type.py +307 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize.py +283 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize_complex.py +20 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize_decor.py +69 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize_device.py +36 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize_scalar_arg.py +37 -0
- numba_cuda/numba/cuda/tests/cudapy/test_warning.py +139 -0
- numba_cuda/numba/cuda/tests/cudapy/test_warp_ops.py +276 -0
- numba_cuda/numba/cuda/tests/cudasim/__init__.py +6 -0
- numba_cuda/numba/cuda/tests/cudasim/support.py +6 -0
- numba_cuda/numba/cuda/tests/cudasim/test_cudasim_issues.py +102 -0
- numba_cuda/numba/cuda/tests/data/__init__.py +0 -0
- numba_cuda/numba/cuda/tests/data/cuda_include.cu +5 -0
- numba_cuda/numba/cuda/tests/data/error.cu +7 -0
- numba_cuda/numba/cuda/tests/data/jitlink.cu +23 -0
- numba_cuda/numba/cuda/tests/data/jitlink.ptx +51 -0
- numba_cuda/numba/cuda/tests/data/warn.cu +7 -0
- numba_cuda/numba/cuda/tests/doc_examples/__init__.py +6 -0
- numba_cuda/numba/cuda/tests/doc_examples/ffi/__init__.py +0 -0
- numba_cuda/numba/cuda/tests/doc_examples/ffi/functions.cu +49 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_cg.py +77 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_cpu_gpu_compat.py +76 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_ffi.py +82 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_laplace.py +155 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_matmul.py +173 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_montecarlo.py +109 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_random.py +59 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_reduction.py +76 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_sessionize.py +130 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_ufunc.py +50 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_vecadd.py +73 -0
- numba_cuda/numba/cuda/tests/nocuda/__init__.py +8 -0
- numba_cuda/numba/cuda/tests/nocuda/test_dummyarray.py +359 -0
- numba_cuda/numba/cuda/tests/nocuda/test_function_resolution.py +36 -0
- numba_cuda/numba/cuda/tests/nocuda/test_import.py +49 -0
- numba_cuda/numba/cuda/tests/nocuda/test_library_lookup.py +238 -0
- numba_cuda/numba/cuda/tests/nocuda/test_nvvm.py +54 -0
- numba_cuda/numba/cuda/types.py +37 -0
- numba_cuda/numba/cuda/ufuncs.py +662 -0
- numba_cuda/numba/cuda/vector_types.py +209 -0
- numba_cuda/numba/cuda/vectorizers.py +252 -0
- numba_cuda-0.0.12.dist-info/LICENSE +25 -0
- numba_cuda-0.0.12.dist-info/METADATA +68 -0
- numba_cuda-0.0.12.dist-info/RECORD +231 -0
- {numba_cuda-0.0.1.dist-info → numba_cuda-0.0.12.dist-info}/WHEEL +1 -1
- numba_cuda-0.0.1.dist-info/METADATA +0 -10
- numba_cuda-0.0.1.dist-info/RECORD +0 -5
- {numba_cuda-0.0.1.dist-info → numba_cuda-0.0.12.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,456 @@
|
|
1
|
+
import numpy as np
|
2
|
+
|
3
|
+
from collections import namedtuple
|
4
|
+
from numba import void, int32, float32, float64
|
5
|
+
from numba import guvectorize
|
6
|
+
from numba import cuda
|
7
|
+
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
|
8
|
+
import unittest
|
9
|
+
import warnings
|
10
|
+
from numba.core.errors import NumbaPerformanceWarning, TypingError
|
11
|
+
from numba.tests.support import override_config
|
12
|
+
|
13
|
+
|
14
|
+
def _get_matmulcore_gufunc(dtype=float32):
|
15
|
+
@guvectorize([void(dtype[:, :], dtype[:, :], dtype[:, :])],
|
16
|
+
'(m,n),(n,p)->(m,p)',
|
17
|
+
target='cuda')
|
18
|
+
def matmulcore(A, B, C):
|
19
|
+
m, n = A.shape
|
20
|
+
n, p = B.shape
|
21
|
+
for i in range(m):
|
22
|
+
for j in range(p):
|
23
|
+
C[i, j] = 0
|
24
|
+
for k in range(n):
|
25
|
+
C[i, j] += A[i, k] * B[k, j]
|
26
|
+
|
27
|
+
return matmulcore
|
28
|
+
|
29
|
+
|
30
|
+
@skip_on_cudasim('ufunc API unsupported in the simulator')
|
31
|
+
class TestCUDAGufunc(CUDATestCase):
|
32
|
+
|
33
|
+
def test_gufunc_small(self):
|
34
|
+
|
35
|
+
gufunc = _get_matmulcore_gufunc()
|
36
|
+
|
37
|
+
matrix_ct = 2
|
38
|
+
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
|
39
|
+
4)
|
40
|
+
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
|
41
|
+
5)
|
42
|
+
|
43
|
+
C = gufunc(A, B)
|
44
|
+
Gold = np.matmul(A, B)
|
45
|
+
self.assertTrue(np.allclose(C, Gold))
|
46
|
+
|
47
|
+
def test_gufunc_auto_transfer(self):
|
48
|
+
|
49
|
+
gufunc = _get_matmulcore_gufunc()
|
50
|
+
|
51
|
+
matrix_ct = 2
|
52
|
+
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
|
53
|
+
4)
|
54
|
+
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
|
55
|
+
5)
|
56
|
+
|
57
|
+
dB = cuda.to_device(B)
|
58
|
+
|
59
|
+
C = gufunc(A, dB).copy_to_host()
|
60
|
+
Gold = np.matmul(A, B)
|
61
|
+
self.assertTrue(np.allclose(C, Gold))
|
62
|
+
|
63
|
+
def test_gufunc(self):
|
64
|
+
|
65
|
+
gufunc = _get_matmulcore_gufunc()
|
66
|
+
|
67
|
+
matrix_ct = 1001 # an odd number to test thread/block division in CUDA
|
68
|
+
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
|
69
|
+
4)
|
70
|
+
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
|
71
|
+
5)
|
72
|
+
|
73
|
+
C = gufunc(A, B)
|
74
|
+
Gold = np.matmul(A, B)
|
75
|
+
self.assertTrue(np.allclose(C, Gold))
|
76
|
+
|
77
|
+
def test_gufunc_hidim(self):
|
78
|
+
|
79
|
+
gufunc = _get_matmulcore_gufunc()
|
80
|
+
|
81
|
+
matrix_ct = 100 # an odd number to test thread/block division in CUDA
|
82
|
+
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(4, 25, 2, 4)
|
83
|
+
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(4, 25, 4, 5)
|
84
|
+
|
85
|
+
C = gufunc(A, B)
|
86
|
+
Gold = np.matmul(A, B)
|
87
|
+
self.assertTrue(np.allclose(C, Gold))
|
88
|
+
|
89
|
+
def test_gufunc_new_axis(self):
|
90
|
+
|
91
|
+
gufunc = _get_matmulcore_gufunc(dtype=float64)
|
92
|
+
|
93
|
+
X = np.random.randn(10, 3, 3)
|
94
|
+
Y = np.random.randn(3, 3)
|
95
|
+
|
96
|
+
gold = np.matmul(X, Y)
|
97
|
+
|
98
|
+
res1 = gufunc(X, Y)
|
99
|
+
np.testing.assert_allclose(gold, res1)
|
100
|
+
|
101
|
+
res2 = gufunc(X, np.tile(Y, (10, 1, 1)))
|
102
|
+
np.testing.assert_allclose(gold, res2)
|
103
|
+
|
104
|
+
def test_gufunc_stream(self):
|
105
|
+
|
106
|
+
gufunc = _get_matmulcore_gufunc()
|
107
|
+
|
108
|
+
#cuda.driver.flush_pending_free()
|
109
|
+
matrix_ct = 1001 # an odd number to test thread/block division in CUDA
|
110
|
+
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
|
111
|
+
4)
|
112
|
+
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
|
113
|
+
5)
|
114
|
+
|
115
|
+
stream = cuda.stream()
|
116
|
+
dA = cuda.to_device(A, stream)
|
117
|
+
dB = cuda.to_device(B, stream)
|
118
|
+
|
119
|
+
dC = cuda.device_array(shape=(1001, 2, 5), dtype=A.dtype, stream=stream)
|
120
|
+
dC = gufunc(dA, dB, out=dC, stream=stream)
|
121
|
+
C = dC.copy_to_host(stream=stream)
|
122
|
+
stream.synchronize()
|
123
|
+
|
124
|
+
Gold = np.matmul(A, B)
|
125
|
+
|
126
|
+
self.assertTrue(np.allclose(C, Gold))
|
127
|
+
|
128
|
+
def test_copy(self):
|
129
|
+
|
130
|
+
@guvectorize([void(float32[:], float32[:])],
|
131
|
+
'(x)->(x)',
|
132
|
+
target='cuda')
|
133
|
+
def copy(A, B):
|
134
|
+
for i in range(B.size):
|
135
|
+
B[i] = A[i]
|
136
|
+
|
137
|
+
A = np.arange(10, dtype=np.float32) + 1
|
138
|
+
B = np.zeros_like(A)
|
139
|
+
copy(A, out=B)
|
140
|
+
np.testing.assert_allclose(A, B)
|
141
|
+
|
142
|
+
def test_copy_unspecified_return(self):
|
143
|
+
# Ensure that behaviour is correct when the return type is not
|
144
|
+
# specified in the signature.
|
145
|
+
@guvectorize([(float32[:], float32[:])],
|
146
|
+
'(x)->(x)',
|
147
|
+
target='cuda')
|
148
|
+
def copy(A, B):
|
149
|
+
for i in range(B.size):
|
150
|
+
B[i] = A[i]
|
151
|
+
|
152
|
+
A = np.arange(10, dtype=np.float32) + 1
|
153
|
+
B = np.zeros_like(A)
|
154
|
+
copy(A, out=B)
|
155
|
+
self.assertTrue(np.allclose(A, B))
|
156
|
+
|
157
|
+
def test_copy_odd(self):
|
158
|
+
|
159
|
+
@guvectorize([void(float32[:], float32[:])],
|
160
|
+
'(x)->(x)',
|
161
|
+
target='cuda')
|
162
|
+
def copy(A, B):
|
163
|
+
for i in range(B.size):
|
164
|
+
B[i] = A[i]
|
165
|
+
|
166
|
+
A = np.arange(11, dtype=np.float32) + 1
|
167
|
+
B = np.zeros_like(A)
|
168
|
+
copy(A, out=B)
|
169
|
+
self.assertTrue(np.allclose(A, B))
|
170
|
+
|
171
|
+
def test_copy2d(self):
|
172
|
+
|
173
|
+
@guvectorize([void(float32[:, :], float32[:, :])],
|
174
|
+
'(x, y)->(x, y)',
|
175
|
+
target='cuda')
|
176
|
+
def copy2d(A, B):
|
177
|
+
for x in range(B.shape[0]):
|
178
|
+
for y in range(B.shape[1]):
|
179
|
+
B[x, y] = A[x, y]
|
180
|
+
|
181
|
+
A = np.arange(30, dtype=np.float32).reshape(5, 6) + 1
|
182
|
+
B = np.zeros_like(A)
|
183
|
+
copy2d(A, out=B)
|
184
|
+
self.assertTrue(np.allclose(A, B))
|
185
|
+
|
186
|
+
def test_not_supported_call_from_jit(self):
|
187
|
+
# not supported
|
188
|
+
@guvectorize([void(int32[:], int32[:])],
|
189
|
+
'(n)->(n)', target='cuda')
|
190
|
+
def gufunc_copy(A, b):
|
191
|
+
for i in range(A.shape[0]):
|
192
|
+
b[i] = A[i]
|
193
|
+
|
194
|
+
@cuda.jit
|
195
|
+
def cuda_jit(A, b):
|
196
|
+
return gufunc_copy(A, b)
|
197
|
+
|
198
|
+
A = np.arange(1024 * 32).astype('int32')
|
199
|
+
b = np.zeros_like(A)
|
200
|
+
msg = "Untyped global name 'gufunc_copy'.*"
|
201
|
+
with self.assertRaisesRegex(TypingError, msg):
|
202
|
+
cuda_jit[1, 1](A, b)
|
203
|
+
|
204
|
+
# Test inefficient use of the GPU where the inputs are all mapped onto a
|
205
|
+
# single thread in a single block.
|
206
|
+
def test_inefficient_launch_configuration(self):
|
207
|
+
@guvectorize(['void(float32[:], float32[:], float32[:])'],
|
208
|
+
'(n),(n)->(n)', target='cuda')
|
209
|
+
def numba_dist_cuda(a, b, dist):
|
210
|
+
len = a.shape[0]
|
211
|
+
for i in range(len):
|
212
|
+
dist[i] = a[i] * b[i]
|
213
|
+
|
214
|
+
a = np.random.rand(1024 * 32).astype('float32')
|
215
|
+
b = np.random.rand(1024 * 32).astype('float32')
|
216
|
+
dist = np.zeros(a.shape[0]).astype('float32')
|
217
|
+
|
218
|
+
with override_config('CUDA_LOW_OCCUPANCY_WARNINGS', 1):
|
219
|
+
with warnings.catch_warnings(record=True) as w:
|
220
|
+
numba_dist_cuda(a, b, dist)
|
221
|
+
self.assertEqual(w[0].category, NumbaPerformanceWarning)
|
222
|
+
self.assertIn('Grid size', str(w[0].message))
|
223
|
+
self.assertIn('low occupancy', str(w[0].message))
|
224
|
+
|
225
|
+
def test_efficient_launch_configuration(self):
|
226
|
+
@guvectorize(['void(float32[:], float32[:], float32[:])'],
|
227
|
+
'(n),(n)->(n)', nopython=True, target='cuda')
|
228
|
+
def numba_dist_cuda2(a, b, dist):
|
229
|
+
len = a.shape[0]
|
230
|
+
for i in range(len):
|
231
|
+
dist[i] = a[i] * b[i]
|
232
|
+
|
233
|
+
a = np.random.rand(524288 * 2).astype('float32').\
|
234
|
+
reshape((524288, 2))
|
235
|
+
b = np.random.rand(524288 * 2).astype('float32').\
|
236
|
+
reshape((524288, 2))
|
237
|
+
dist = np.zeros_like(a)
|
238
|
+
|
239
|
+
with override_config('CUDA_LOW_OCCUPANCY_WARNINGS', 1):
|
240
|
+
with warnings.catch_warnings(record=True) as w:
|
241
|
+
numba_dist_cuda2(a, b, dist)
|
242
|
+
self.assertEqual(len(w), 0)
|
243
|
+
|
244
|
+
def test_nopython_flag(self):
|
245
|
+
|
246
|
+
def foo(A, B):
|
247
|
+
pass
|
248
|
+
|
249
|
+
# nopython = True is fine
|
250
|
+
guvectorize([void(float32[:], float32[:])], '(x)->(x)', target='cuda',
|
251
|
+
nopython=True)(foo)
|
252
|
+
|
253
|
+
# nopython = False is bad
|
254
|
+
with self.assertRaises(TypeError) as raises:
|
255
|
+
guvectorize([void(float32[:], float32[:])], '(x)->(x)',
|
256
|
+
target='cuda', nopython=False)(foo)
|
257
|
+
self.assertEqual("nopython flag must be True", str(raises.exception))
|
258
|
+
|
259
|
+
def test_invalid_flags(self):
|
260
|
+
# Check invalid flags
|
261
|
+
def foo(A, B):
|
262
|
+
pass
|
263
|
+
|
264
|
+
with self.assertRaises(TypeError) as raises:
|
265
|
+
guvectorize([void(float32[:], float32[:])], '(x)->(x)',
|
266
|
+
target='cuda', what1=True, ever2=False)(foo)
|
267
|
+
head = "The following target options are not supported:"
|
268
|
+
msg = str(raises.exception)
|
269
|
+
self.assertEqual(msg[:len(head)], head)
|
270
|
+
items = msg[len(head):].strip().split(',')
|
271
|
+
items = [i.strip("'\" ") for i in items]
|
272
|
+
self.assertEqual(set(['what1', 'ever2']), set(items))
|
273
|
+
|
274
|
+
def test_duplicated_output(self):
|
275
|
+
@guvectorize([void(float32[:], float32[:])], '(x)->(x)', target='cuda')
|
276
|
+
def foo(inp, out):
|
277
|
+
pass # intentionally empty; never executed
|
278
|
+
|
279
|
+
inp = out = np.zeros(10, dtype=np.float32)
|
280
|
+
with self.assertRaises(ValueError) as raises:
|
281
|
+
foo(inp, out, out=out)
|
282
|
+
|
283
|
+
msg = "cannot specify argument 'out' as both positional and keyword"
|
284
|
+
self.assertEqual(str(raises.exception), msg)
|
285
|
+
|
286
|
+
def check_tuple_arg(self, a, b):
|
287
|
+
@guvectorize([(float64[:], float64[:], float64[:])], '(n),(n)->()',
|
288
|
+
target='cuda')
|
289
|
+
def gu_reduce(x, y, r):
|
290
|
+
s = 0
|
291
|
+
for i in range(len(x)):
|
292
|
+
s += x[i] * y[i]
|
293
|
+
r[0] = s
|
294
|
+
|
295
|
+
r = gu_reduce(a, b)
|
296
|
+
expected = np.sum(np.asarray(a) * np.asarray(b), axis=1)
|
297
|
+
np.testing.assert_equal(expected, r)
|
298
|
+
|
299
|
+
def test_tuple_of_tuple_arg(self):
|
300
|
+
a = ((1.0, 2.0, 3.0),
|
301
|
+
(4.0, 5.0, 6.0))
|
302
|
+
b = ((1.5, 2.5, 3.5),
|
303
|
+
(4.5, 5.5, 6.5))
|
304
|
+
self.check_tuple_arg(a, b)
|
305
|
+
|
306
|
+
def test_tuple_of_namedtuple_arg(self):
|
307
|
+
Point = namedtuple('Point', ('x', 'y', 'z'))
|
308
|
+
a = (Point(x=1.0, y=2.0, z=3.0),
|
309
|
+
Point(x=4.0, y=5.0, z=6.0))
|
310
|
+
b = (Point(x=1.5, y=2.5, z=3.5),
|
311
|
+
Point(x=4.5, y=5.5, z=6.5))
|
312
|
+
self.check_tuple_arg(a, b)
|
313
|
+
|
314
|
+
def test_tuple_of_array_arg(self):
|
315
|
+
a = (np.asarray((1.0, 2.0, 3.0)),
|
316
|
+
np.asarray((4.0, 5.0, 6.0)))
|
317
|
+
b = (np.asarray((1.5, 2.5, 3.5)),
|
318
|
+
np.asarray((4.5, 5.5, 6.5)))
|
319
|
+
self.check_tuple_arg(a, b)
|
320
|
+
|
321
|
+
def test_gufunc_name(self):
|
322
|
+
gufunc = _get_matmulcore_gufunc()
|
323
|
+
self.assertEqual(gufunc.__name__, 'matmulcore')
|
324
|
+
|
325
|
+
def test_bad_return_type(self):
|
326
|
+
with self.assertRaises(TypeError) as te:
|
327
|
+
@guvectorize([int32(int32[:], int32[:])], '(m)->(m)', target='cuda')
|
328
|
+
def f(x, y):
|
329
|
+
pass
|
330
|
+
|
331
|
+
msg = str(te.exception)
|
332
|
+
self.assertIn('guvectorized functions cannot return values', msg)
|
333
|
+
self.assertIn('specifies int32 return type', msg)
|
334
|
+
|
335
|
+
def test_incorrect_number_of_pos_args(self):
|
336
|
+
@guvectorize([(int32[:], int32[:], int32[:])],
|
337
|
+
'(m),(m)->(m)', target='cuda')
|
338
|
+
def f(x, y, z):
|
339
|
+
pass
|
340
|
+
|
341
|
+
arr = np.arange(5)
|
342
|
+
|
343
|
+
# Inputs only, too few
|
344
|
+
with self.assertRaises(TypeError) as te:
|
345
|
+
f(arr)
|
346
|
+
|
347
|
+
msg = str(te.exception)
|
348
|
+
self.assertIn('gufunc accepts 2 positional arguments', msg)
|
349
|
+
self.assertIn('or 3 positional arguments', msg)
|
350
|
+
self.assertIn('Got 1 positional argument.', msg)
|
351
|
+
|
352
|
+
# Inputs and outputs, too many
|
353
|
+
with self.assertRaises(TypeError) as te:
|
354
|
+
f(arr, arr, arr, arr)
|
355
|
+
|
356
|
+
msg = str(te.exception)
|
357
|
+
self.assertIn('gufunc accepts 2 positional arguments', msg)
|
358
|
+
self.assertIn('or 3 positional arguments', msg)
|
359
|
+
self.assertIn('Got 4 positional arguments.', msg)
|
360
|
+
|
361
|
+
|
362
|
+
@skip_on_cudasim('ufunc API unsupported in the simulator')
|
363
|
+
class TestMultipleOutputs(CUDATestCase):
|
364
|
+
def test_multiple_outputs_same_type_passed_in(self):
|
365
|
+
@guvectorize([void(float32[:], float32[:], float32[:])],
|
366
|
+
'(x)->(x),(x)',
|
367
|
+
target='cuda')
|
368
|
+
def copy(A, B, C):
|
369
|
+
for i in range(B.size):
|
370
|
+
B[i] = A[i]
|
371
|
+
C[i] = A[i]
|
372
|
+
|
373
|
+
A = np.arange(10, dtype=np.float32) + 1
|
374
|
+
B = np.zeros_like(A)
|
375
|
+
C = np.zeros_like(A)
|
376
|
+
copy(A, B, C)
|
377
|
+
np.testing.assert_allclose(A, B)
|
378
|
+
np.testing.assert_allclose(A, C)
|
379
|
+
|
380
|
+
def test_multiple_outputs_distinct_values(self):
|
381
|
+
|
382
|
+
@guvectorize([void(float32[:], float32[:], float32[:])],
|
383
|
+
'(x)->(x),(x)',
|
384
|
+
target='cuda')
|
385
|
+
def copy_and_double(A, B, C):
|
386
|
+
for i in range(B.size):
|
387
|
+
B[i] = A[i]
|
388
|
+
C[i] = A[i] * 2
|
389
|
+
|
390
|
+
A = np.arange(10, dtype=np.float32) + 1
|
391
|
+
B = np.zeros_like(A)
|
392
|
+
C = np.zeros_like(A)
|
393
|
+
copy_and_double(A, B, C)
|
394
|
+
np.testing.assert_allclose(A, B)
|
395
|
+
np.testing.assert_allclose(A * 2, C)
|
396
|
+
|
397
|
+
def test_multiple_output_allocation(self):
|
398
|
+
@guvectorize([void(float32[:], float32[:], float32[:])],
|
399
|
+
'(x)->(x),(x)',
|
400
|
+
target='cuda')
|
401
|
+
def copy_and_double(A, B, C):
|
402
|
+
for i in range(B.size):
|
403
|
+
B[i] = A[i]
|
404
|
+
C[i] = A[i] * 2
|
405
|
+
|
406
|
+
A = np.arange(10, dtype=np.float32) + 1
|
407
|
+
B, C = copy_and_double(A)
|
408
|
+
np.testing.assert_allclose(A, B)
|
409
|
+
np.testing.assert_allclose(A * 2, C)
|
410
|
+
|
411
|
+
def test_multiple_output_dtypes(self):
|
412
|
+
|
413
|
+
@guvectorize([void(int32[:], int32[:], float64[:])],
|
414
|
+
'(x)->(x),(x)',
|
415
|
+
target='cuda')
|
416
|
+
def copy_and_multiply(A, B, C):
|
417
|
+
for i in range(B.size):
|
418
|
+
B[i] = A[i]
|
419
|
+
C[i] = A[i] * 1.5
|
420
|
+
|
421
|
+
A = np.arange(10, dtype=np.int32) + 1
|
422
|
+
B = np.zeros_like(A)
|
423
|
+
C = np.zeros_like(A, dtype=np.float64)
|
424
|
+
copy_and_multiply(A, B, C)
|
425
|
+
np.testing.assert_allclose(A, B)
|
426
|
+
np.testing.assert_allclose(A * np.float64(1.5), C)
|
427
|
+
|
428
|
+
def test_incorrect_number_of_pos_args(self):
|
429
|
+
@guvectorize([(int32[:], int32[:], int32[:], int32[:])],
|
430
|
+
'(m),(m)->(m),(m)', target='cuda')
|
431
|
+
def f(x, y, z, w):
|
432
|
+
pass
|
433
|
+
|
434
|
+
arr = np.arange(5)
|
435
|
+
|
436
|
+
# Inputs only, too few
|
437
|
+
with self.assertRaises(TypeError) as te:
|
438
|
+
f(arr)
|
439
|
+
|
440
|
+
msg = str(te.exception)
|
441
|
+
self.assertIn('gufunc accepts 2 positional arguments', msg)
|
442
|
+
self.assertIn('or 4 positional arguments', msg)
|
443
|
+
self.assertIn('Got 1 positional argument.', msg)
|
444
|
+
|
445
|
+
# Inputs and outputs, too many
|
446
|
+
with self.assertRaises(TypeError) as te:
|
447
|
+
f(arr, arr, arr, arr, arr)
|
448
|
+
|
449
|
+
msg = str(te.exception)
|
450
|
+
self.assertIn('gufunc accepts 2 positional arguments', msg)
|
451
|
+
self.assertIn('or 4 positional arguments', msg)
|
452
|
+
self.assertIn('Got 5 positional arguments.', msg)
|
453
|
+
|
454
|
+
|
455
|
+
if __name__ == '__main__':
|
456
|
+
unittest.main()
|
@@ -0,0 +1,159 @@
|
|
1
|
+
"""Example: sum each row using guvectorize
|
2
|
+
|
3
|
+
See Numpy documentation for detail about gufunc:
|
4
|
+
http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
|
5
|
+
"""
|
6
|
+
import numpy as np
|
7
|
+
from numba import guvectorize, cuda
|
8
|
+
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
|
9
|
+
import unittest
|
10
|
+
|
11
|
+
|
12
|
+
@skip_on_cudasim('ufunc API unsupported in the simulator')
|
13
|
+
class TestGUFuncScalar(CUDATestCase):
|
14
|
+
def test_gufunc_scalar_output(self):
|
15
|
+
# function type:
|
16
|
+
# - has no void return type
|
17
|
+
# - array argument is one dimension fewer than the source array
|
18
|
+
# - scalar output is passed as a 1-element array.
|
19
|
+
#
|
20
|
+
# signature: (n)->()
|
21
|
+
# - the function takes an array of n-element and output a scalar.
|
22
|
+
|
23
|
+
@guvectorize(['void(int32[:], int32[:])'], '(n)->()', target='cuda')
|
24
|
+
def sum_row(inp, out):
|
25
|
+
tmp = 0.
|
26
|
+
for i in range(inp.shape[0]):
|
27
|
+
tmp += inp[i]
|
28
|
+
out[0] = tmp
|
29
|
+
|
30
|
+
# inp is (10000, 3)
|
31
|
+
# out is (10000)
|
32
|
+
# The outer (leftmost) dimension must match or numpy broadcasting
|
33
|
+
# is performed. But, broadcasting on CUDA arrays is not supported.
|
34
|
+
|
35
|
+
inp = np.arange(300, dtype=np.int32).reshape(100, 3)
|
36
|
+
|
37
|
+
# invoke on CUDA with manually managed memory
|
38
|
+
out1 = np.empty(100, dtype=inp.dtype)
|
39
|
+
out2 = np.empty(100, dtype=inp.dtype)
|
40
|
+
|
41
|
+
dev_inp = cuda.to_device(
|
42
|
+
inp) # alloc and copy input data
|
43
|
+
dev_out1 = cuda.to_device(out1, copy=False) # alloc only
|
44
|
+
|
45
|
+
sum_row(dev_inp, out=dev_out1) # invoke the gufunc
|
46
|
+
dev_out2 = sum_row(dev_inp) # invoke the gufunc
|
47
|
+
|
48
|
+
dev_out1.copy_to_host(out1) # retrieve the result
|
49
|
+
dev_out2.copy_to_host(out2) # retrieve the result
|
50
|
+
|
51
|
+
# verify result
|
52
|
+
for i in range(inp.shape[0]):
|
53
|
+
self.assertTrue(out1[i] == inp[i].sum())
|
54
|
+
self.assertTrue(out2[i] == inp[i].sum())
|
55
|
+
|
56
|
+
def test_gufunc_scalar_output_bug(self):
|
57
|
+
# Issue 2812: Error due to using input argument types as output argument
|
58
|
+
@guvectorize(['void(int32, int32[:])'], '()->()', target='cuda')
|
59
|
+
def twice(inp, out):
|
60
|
+
out[0] = inp * 2
|
61
|
+
|
62
|
+
self.assertEqual(twice(10), 20)
|
63
|
+
arg = np.arange(10).astype(np.int32)
|
64
|
+
self.assertPreciseEqual(twice(arg), arg * 2)
|
65
|
+
|
66
|
+
def test_gufunc_scalar_input_saxpy(self):
|
67
|
+
@guvectorize(['void(float32, float32[:], float32[:], float32[:])'],
|
68
|
+
'(),(t),(t)->(t)', target='cuda')
|
69
|
+
def saxpy(a, x, y, out):
|
70
|
+
for i in range(out.shape[0]):
|
71
|
+
out[i] = a * x[i] + y[i]
|
72
|
+
|
73
|
+
A = np.float32(2)
|
74
|
+
X = np.arange(10, dtype=np.float32).reshape(5, 2)
|
75
|
+
Y = np.arange(10, dtype=np.float32).reshape(5, 2)
|
76
|
+
out = saxpy(A, X, Y)
|
77
|
+
|
78
|
+
for j in range(5):
|
79
|
+
for i in range(2):
|
80
|
+
exp = A * X[j, i] + Y[j, i]
|
81
|
+
self.assertTrue(exp == out[j, i])
|
82
|
+
|
83
|
+
X = np.arange(10, dtype=np.float32)
|
84
|
+
Y = np.arange(10, dtype=np.float32)
|
85
|
+
out = saxpy(A, X, Y)
|
86
|
+
|
87
|
+
for j in range(10):
|
88
|
+
exp = A * X[j] + Y[j]
|
89
|
+
self.assertTrue(exp == out[j], (exp, out[j]))
|
90
|
+
|
91
|
+
A = np.arange(5, dtype=np.float32)
|
92
|
+
X = np.arange(10, dtype=np.float32).reshape(5, 2)
|
93
|
+
Y = np.arange(10, dtype=np.float32).reshape(5, 2)
|
94
|
+
out = saxpy(A, X, Y)
|
95
|
+
|
96
|
+
for j in range(5):
|
97
|
+
for i in range(2):
|
98
|
+
exp = A[j] * X[j, i] + Y[j, i]
|
99
|
+
self.assertTrue(exp == out[j, i], (exp, out[j, i]))
|
100
|
+
|
101
|
+
def test_gufunc_scalar_cast(self):
|
102
|
+
@guvectorize(['void(int32, int32[:], int32[:])'], '(),(t)->(t)',
|
103
|
+
target='cuda')
|
104
|
+
def foo(a, b, out):
|
105
|
+
for i in range(b.size):
|
106
|
+
out[i] = a * b[i]
|
107
|
+
|
108
|
+
a = np.int64(2) # type does not match signature (int32)
|
109
|
+
b = np.arange(10).astype(np.int32)
|
110
|
+
out = foo(a, b)
|
111
|
+
np.testing.assert_equal(out, a * b)
|
112
|
+
|
113
|
+
# test error
|
114
|
+
a = np.array(a)
|
115
|
+
da = cuda.to_device(a)
|
116
|
+
self.assertEqual(da.dtype, np.int64)
|
117
|
+
with self.assertRaises(TypeError) as raises:
|
118
|
+
foo(da, b)
|
119
|
+
|
120
|
+
self.assertIn("does not support .astype()", str(raises.exception))
|
121
|
+
|
122
|
+
def test_gufunc_old_style_scalar_as_array(self):
|
123
|
+
# Example from issue #2579
|
124
|
+
@guvectorize(['void(int32[:],int32[:],int32[:])'], '(n),()->(n)',
|
125
|
+
target='cuda')
|
126
|
+
def gufunc(x, y, res):
|
127
|
+
for i in range(x.shape[0]):
|
128
|
+
res[i] = x[i] + y[0]
|
129
|
+
|
130
|
+
# Case 1
|
131
|
+
a = np.array([1, 2, 3, 4], dtype=np.int32)
|
132
|
+
b = np.array([2], dtype=np.int32)
|
133
|
+
|
134
|
+
res = np.zeros(4, dtype=np.int32)
|
135
|
+
|
136
|
+
expected = res.copy()
|
137
|
+
expected = a + b
|
138
|
+
|
139
|
+
gufunc(a, b, out=res)
|
140
|
+
|
141
|
+
np.testing.assert_almost_equal(expected, res)
|
142
|
+
|
143
|
+
# Case 2
|
144
|
+
a = np.array([1, 2, 3, 4] * 2, dtype=np.int32).reshape(2, 4)
|
145
|
+
b = np.array([2, 10], dtype=np.int32)
|
146
|
+
|
147
|
+
res = np.zeros((2, 4), dtype=np.int32)
|
148
|
+
|
149
|
+
expected = res.copy()
|
150
|
+
expected[0] = a[0] + b[0]
|
151
|
+
expected[1] = a[1] + b[1]
|
152
|
+
|
153
|
+
gufunc(a, b, res)
|
154
|
+
|
155
|
+
np.testing.assert_almost_equal(expected, res)
|
156
|
+
|
157
|
+
|
158
|
+
if __name__ == '__main__':
|
159
|
+
unittest.main()
|
@@ -0,0 +1,95 @@
|
|
1
|
+
from numba.cuda.deviceufunc import GUFuncEngine
|
2
|
+
import unittest
|
3
|
+
|
4
|
+
|
5
|
+
def template(signature, shapes, expects):
|
6
|
+
gufb = GUFuncEngine.from_signature(signature)
|
7
|
+
sch = gufb.schedule(shapes)
|
8
|
+
for k, v in expects.items():
|
9
|
+
got = getattr(sch, k)
|
10
|
+
if got != v:
|
11
|
+
fmt = 'error for %s: got=%s but expect=%s'
|
12
|
+
raise AssertionError(fmt % (k, got, v))
|
13
|
+
|
14
|
+
|
15
|
+
class TestGUFuncScheduling(unittest.TestCase):
|
16
|
+
def test_signature_1(self):
|
17
|
+
signature = '(m, n), (n, p) -> (m, p)'
|
18
|
+
shapes = (100, 4, 5), (1, 5, 7)
|
19
|
+
expects = dict(
|
20
|
+
ishapes=[(4, 5), (5, 7)],
|
21
|
+
oshapes=[(4, 7)],
|
22
|
+
loopdims=(100,),
|
23
|
+
pinned=[False, True]
|
24
|
+
)
|
25
|
+
template(signature, shapes, expects)
|
26
|
+
|
27
|
+
def test_signature_2(self):
|
28
|
+
signature = '(m, n), (n, p) -> (m, p)'
|
29
|
+
shapes = (100, 4, 5), (100, 5, 7)
|
30
|
+
expects = dict(
|
31
|
+
ishapes=[(4, 5), (5, 7)],
|
32
|
+
oshapes=[(4, 7)],
|
33
|
+
loopdims=(100,),
|
34
|
+
pinned=[False, False]
|
35
|
+
)
|
36
|
+
template(signature, shapes, expects)
|
37
|
+
|
38
|
+
def test_signature_3(self):
|
39
|
+
signature = '(m, n), (n, p) -> (m, p)'
|
40
|
+
shapes = (12, 34, 4, 5), (12, 34, 5, 7)
|
41
|
+
expects = dict(
|
42
|
+
ishapes=[(4, 5), (5, 7)],
|
43
|
+
oshapes=[(4, 7)],
|
44
|
+
loopdims=(12, 34),
|
45
|
+
pinned=[False, False]
|
46
|
+
)
|
47
|
+
template(signature, shapes, expects)
|
48
|
+
|
49
|
+
def test_signature_4(self):
|
50
|
+
signature = '(m, n), (n, p) -> (m, p)'
|
51
|
+
shapes = (4, 5), (5, 7)
|
52
|
+
expects = dict(
|
53
|
+
ishapes=[(4, 5), (5, 7)],
|
54
|
+
oshapes=[(4, 7)],
|
55
|
+
loopdims=(),
|
56
|
+
pinned=[False, False]
|
57
|
+
)
|
58
|
+
template(signature, shapes, expects)
|
59
|
+
|
60
|
+
def test_signature_5(self):
|
61
|
+
signature = '(a), (a) -> (a)'
|
62
|
+
shapes = (5,), (5,)
|
63
|
+
expects = dict(
|
64
|
+
ishapes=[(5,), (5,)],
|
65
|
+
oshapes=[(5,)],
|
66
|
+
loopdims=(),
|
67
|
+
pinned=[False, False]
|
68
|
+
)
|
69
|
+
template(signature, shapes, expects)
|
70
|
+
|
71
|
+
def test_signature_6(self):
|
72
|
+
signature = '(), () -> ()'
|
73
|
+
shapes = (5,), (5,)
|
74
|
+
expects = dict(
|
75
|
+
ishapes=[(), ()],
|
76
|
+
oshapes=[()],
|
77
|
+
loopdims=(5,),
|
78
|
+
pinned=[False, False]
|
79
|
+
)
|
80
|
+
template(signature, shapes, expects)
|
81
|
+
|
82
|
+
def test_signature_7(self):
|
83
|
+
signature = '(), () -> ()'
|
84
|
+
shapes = (5,), ()
|
85
|
+
expects = dict(
|
86
|
+
ishapes=[(), ()],
|
87
|
+
oshapes=[()],
|
88
|
+
loopdims=(5,),
|
89
|
+
pinned=[False, True]
|
90
|
+
)
|
91
|
+
template(signature, shapes, expects)
|
92
|
+
|
93
|
+
|
94
|
+
if __name__ == '__main__':
|
95
|
+
unittest.main()
|