numba-cuda 0.0.1__py3-none-any.whl → 0.0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _numba_cuda_redirector.pth +1 -0
- _numba_cuda_redirector.py +74 -0
- numba_cuda/VERSION +1 -0
- numba_cuda/__init__.py +5 -0
- numba_cuda/_version.py +19 -0
- numba_cuda/numba/cuda/__init__.py +22 -0
- numba_cuda/numba/cuda/api.py +526 -0
- numba_cuda/numba/cuda/api_util.py +30 -0
- numba_cuda/numba/cuda/args.py +77 -0
- numba_cuda/numba/cuda/cg.py +62 -0
- numba_cuda/numba/cuda/codegen.py +378 -0
- numba_cuda/numba/cuda/compiler.py +422 -0
- numba_cuda/numba/cuda/cpp_function_wrappers.cu +47 -0
- numba_cuda/numba/cuda/cuda_fp16.h +3631 -0
- numba_cuda/numba/cuda/cuda_fp16.hpp +2465 -0
- numba_cuda/numba/cuda/cuda_paths.py +258 -0
- numba_cuda/numba/cuda/cudadecl.py +806 -0
- numba_cuda/numba/cuda/cudadrv/__init__.py +9 -0
- numba_cuda/numba/cuda/cudadrv/devicearray.py +904 -0
- numba_cuda/numba/cuda/cudadrv/devices.py +248 -0
- numba_cuda/numba/cuda/cudadrv/driver.py +3201 -0
- numba_cuda/numba/cuda/cudadrv/drvapi.py +398 -0
- numba_cuda/numba/cuda/cudadrv/dummyarray.py +452 -0
- numba_cuda/numba/cuda/cudadrv/enums.py +607 -0
- numba_cuda/numba/cuda/cudadrv/error.py +36 -0
- numba_cuda/numba/cuda/cudadrv/libs.py +176 -0
- numba_cuda/numba/cuda/cudadrv/ndarray.py +20 -0
- numba_cuda/numba/cuda/cudadrv/nvrtc.py +260 -0
- numba_cuda/numba/cuda/cudadrv/nvvm.py +707 -0
- numba_cuda/numba/cuda/cudadrv/rtapi.py +10 -0
- numba_cuda/numba/cuda/cudadrv/runtime.py +142 -0
- numba_cuda/numba/cuda/cudaimpl.py +1055 -0
- numba_cuda/numba/cuda/cudamath.py +140 -0
- numba_cuda/numba/cuda/decorators.py +189 -0
- numba_cuda/numba/cuda/descriptor.py +33 -0
- numba_cuda/numba/cuda/device_init.py +89 -0
- numba_cuda/numba/cuda/deviceufunc.py +908 -0
- numba_cuda/numba/cuda/dispatcher.py +1057 -0
- numba_cuda/numba/cuda/errors.py +59 -0
- numba_cuda/numba/cuda/extending.py +7 -0
- numba_cuda/numba/cuda/initialize.py +13 -0
- numba_cuda/numba/cuda/intrinsic_wrapper.py +77 -0
- numba_cuda/numba/cuda/intrinsics.py +198 -0
- numba_cuda/numba/cuda/kernels/__init__.py +0 -0
- numba_cuda/numba/cuda/kernels/reduction.py +262 -0
- numba_cuda/numba/cuda/kernels/transpose.py +65 -0
- numba_cuda/numba/cuda/libdevice.py +3382 -0
- numba_cuda/numba/cuda/libdevicedecl.py +17 -0
- numba_cuda/numba/cuda/libdevicefuncs.py +1057 -0
- numba_cuda/numba/cuda/libdeviceimpl.py +83 -0
- numba_cuda/numba/cuda/mathimpl.py +448 -0
- numba_cuda/numba/cuda/models.py +48 -0
- numba_cuda/numba/cuda/nvvmutils.py +235 -0
- numba_cuda/numba/cuda/printimpl.py +86 -0
- numba_cuda/numba/cuda/random.py +292 -0
- numba_cuda/numba/cuda/simulator/__init__.py +38 -0
- numba_cuda/numba/cuda/simulator/api.py +110 -0
- numba_cuda/numba/cuda/simulator/compiler.py +9 -0
- numba_cuda/numba/cuda/simulator/cudadrv/__init__.py +2 -0
- numba_cuda/numba/cuda/simulator/cudadrv/devicearray.py +432 -0
- numba_cuda/numba/cuda/simulator/cudadrv/devices.py +117 -0
- numba_cuda/numba/cuda/simulator/cudadrv/driver.py +62 -0
- numba_cuda/numba/cuda/simulator/cudadrv/drvapi.py +4 -0
- numba_cuda/numba/cuda/simulator/cudadrv/dummyarray.py +4 -0
- numba_cuda/numba/cuda/simulator/cudadrv/error.py +6 -0
- numba_cuda/numba/cuda/simulator/cudadrv/libs.py +2 -0
- numba_cuda/numba/cuda/simulator/cudadrv/nvvm.py +29 -0
- numba_cuda/numba/cuda/simulator/cudadrv/runtime.py +19 -0
- numba_cuda/numba/cuda/simulator/kernel.py +308 -0
- numba_cuda/numba/cuda/simulator/kernelapi.py +495 -0
- numba_cuda/numba/cuda/simulator/reduction.py +15 -0
- numba_cuda/numba/cuda/simulator/vector_types.py +58 -0
- numba_cuda/numba/cuda/simulator_init.py +17 -0
- numba_cuda/numba/cuda/stubs.py +902 -0
- numba_cuda/numba/cuda/target.py +440 -0
- numba_cuda/numba/cuda/testing.py +202 -0
- numba_cuda/numba/cuda/tests/__init__.py +58 -0
- numba_cuda/numba/cuda/tests/cudadrv/__init__.py +8 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_array_attr.py +145 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_context_stack.py +145 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_array_slicing.py +375 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_auto_context.py +21 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_devicerecord.py +179 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_driver.py +235 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_libraries.py +22 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_memory.py +193 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_ndarray.py +547 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_deallocations.py +249 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_detect.py +81 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_emm_plugins.py +192 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_events.py +38 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_host_alloc.py +65 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_init.py +139 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_inline_ptx.py +37 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_is_fp16.py +12 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_linker.py +317 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_managed_alloc.py +127 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_mvc.py +54 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_nvvm_driver.py +199 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_pinned.py +37 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_profiler.py +20 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_ptds.py +149 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_reset_device.py +36 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_runtime.py +85 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_select_device.py +41 -0
- numba_cuda/numba/cuda/tests/cudadrv/test_streams.py +122 -0
- numba_cuda/numba/cuda/tests/cudapy/__init__.py +8 -0
- numba_cuda/numba/cuda/tests/cudapy/cache_usecases.py +234 -0
- numba_cuda/numba/cuda/tests/cudapy/cache_with_cpu_usecases.py +41 -0
- numba_cuda/numba/cuda/tests/cudapy/extensions_usecases.py +58 -0
- numba_cuda/numba/cuda/tests/cudapy/jitlink.ptx +30 -0
- numba_cuda/numba/cuda/tests/cudapy/recursion_usecases.py +100 -0
- numba_cuda/numba/cuda/tests/cudapy/test_alignment.py +42 -0
- numba_cuda/numba/cuda/tests/cudapy/test_array.py +260 -0
- numba_cuda/numba/cuda/tests/cudapy/test_array_args.py +201 -0
- numba_cuda/numba/cuda/tests/cudapy/test_array_methods.py +35 -0
- numba_cuda/numba/cuda/tests/cudapy/test_atomics.py +1620 -0
- numba_cuda/numba/cuda/tests/cudapy/test_blackscholes.py +120 -0
- numba_cuda/numba/cuda/tests/cudapy/test_boolean.py +24 -0
- numba_cuda/numba/cuda/tests/cudapy/test_caching.py +545 -0
- numba_cuda/numba/cuda/tests/cudapy/test_casting.py +257 -0
- numba_cuda/numba/cuda/tests/cudapy/test_cffi.py +33 -0
- numba_cuda/numba/cuda/tests/cudapy/test_compiler.py +276 -0
- numba_cuda/numba/cuda/tests/cudapy/test_complex.py +296 -0
- numba_cuda/numba/cuda/tests/cudapy/test_complex_kernel.py +20 -0
- numba_cuda/numba/cuda/tests/cudapy/test_const_string.py +129 -0
- numba_cuda/numba/cuda/tests/cudapy/test_constmem.py +176 -0
- numba_cuda/numba/cuda/tests/cudapy/test_cooperative_groups.py +147 -0
- numba_cuda/numba/cuda/tests/cudapy/test_cuda_array_interface.py +435 -0
- numba_cuda/numba/cuda/tests/cudapy/test_cuda_jit_no_types.py +90 -0
- numba_cuda/numba/cuda/tests/cudapy/test_datetime.py +94 -0
- numba_cuda/numba/cuda/tests/cudapy/test_debug.py +101 -0
- numba_cuda/numba/cuda/tests/cudapy/test_debuginfo.py +221 -0
- numba_cuda/numba/cuda/tests/cudapy/test_device_func.py +222 -0
- numba_cuda/numba/cuda/tests/cudapy/test_dispatcher.py +700 -0
- numba_cuda/numba/cuda/tests/cudapy/test_enums.py +121 -0
- numba_cuda/numba/cuda/tests/cudapy/test_errors.py +79 -0
- numba_cuda/numba/cuda/tests/cudapy/test_exception.py +174 -0
- numba_cuda/numba/cuda/tests/cudapy/test_extending.py +155 -0
- numba_cuda/numba/cuda/tests/cudapy/test_fastmath.py +244 -0
- numba_cuda/numba/cuda/tests/cudapy/test_forall.py +52 -0
- numba_cuda/numba/cuda/tests/cudapy/test_freevar.py +29 -0
- numba_cuda/numba/cuda/tests/cudapy/test_frexp_ldexp.py +66 -0
- numba_cuda/numba/cuda/tests/cudapy/test_globals.py +60 -0
- numba_cuda/numba/cuda/tests/cudapy/test_gufunc.py +456 -0
- numba_cuda/numba/cuda/tests/cudapy/test_gufunc_scalar.py +159 -0
- numba_cuda/numba/cuda/tests/cudapy/test_gufunc_scheduling.py +95 -0
- numba_cuda/numba/cuda/tests/cudapy/test_idiv.py +37 -0
- numba_cuda/numba/cuda/tests/cudapy/test_inspect.py +165 -0
- numba_cuda/numba/cuda/tests/cudapy/test_intrinsics.py +1106 -0
- numba_cuda/numba/cuda/tests/cudapy/test_ipc.py +318 -0
- numba_cuda/numba/cuda/tests/cudapy/test_iterators.py +99 -0
- numba_cuda/numba/cuda/tests/cudapy/test_lang.py +64 -0
- numba_cuda/numba/cuda/tests/cudapy/test_laplace.py +119 -0
- numba_cuda/numba/cuda/tests/cudapy/test_libdevice.py +187 -0
- numba_cuda/numba/cuda/tests/cudapy/test_lineinfo.py +199 -0
- numba_cuda/numba/cuda/tests/cudapy/test_localmem.py +164 -0
- numba_cuda/numba/cuda/tests/cudapy/test_mandel.py +37 -0
- numba_cuda/numba/cuda/tests/cudapy/test_math.py +786 -0
- numba_cuda/numba/cuda/tests/cudapy/test_matmul.py +74 -0
- numba_cuda/numba/cuda/tests/cudapy/test_minmax.py +113 -0
- numba_cuda/numba/cuda/tests/cudapy/test_montecarlo.py +22 -0
- numba_cuda/numba/cuda/tests/cudapy/test_multigpu.py +140 -0
- numba_cuda/numba/cuda/tests/cudapy/test_multiprocessing.py +46 -0
- numba_cuda/numba/cuda/tests/cudapy/test_multithreads.py +101 -0
- numba_cuda/numba/cuda/tests/cudapy/test_nondet.py +49 -0
- numba_cuda/numba/cuda/tests/cudapy/test_operator.py +401 -0
- numba_cuda/numba/cuda/tests/cudapy/test_optimization.py +86 -0
- numba_cuda/numba/cuda/tests/cudapy/test_overload.py +335 -0
- numba_cuda/numba/cuda/tests/cudapy/test_powi.py +124 -0
- numba_cuda/numba/cuda/tests/cudapy/test_print.py +128 -0
- numba_cuda/numba/cuda/tests/cudapy/test_py2_div_issue.py +33 -0
- numba_cuda/numba/cuda/tests/cudapy/test_random.py +104 -0
- numba_cuda/numba/cuda/tests/cudapy/test_record_dtype.py +610 -0
- numba_cuda/numba/cuda/tests/cudapy/test_recursion.py +125 -0
- numba_cuda/numba/cuda/tests/cudapy/test_reduction.py +76 -0
- numba_cuda/numba/cuda/tests/cudapy/test_retrieve_autoconverted_arrays.py +83 -0
- numba_cuda/numba/cuda/tests/cudapy/test_serialize.py +85 -0
- numba_cuda/numba/cuda/tests/cudapy/test_slicing.py +37 -0
- numba_cuda/numba/cuda/tests/cudapy/test_sm.py +444 -0
- numba_cuda/numba/cuda/tests/cudapy/test_sm_creation.py +205 -0
- numba_cuda/numba/cuda/tests/cudapy/test_sync.py +271 -0
- numba_cuda/numba/cuda/tests/cudapy/test_transpose.py +80 -0
- numba_cuda/numba/cuda/tests/cudapy/test_ufuncs.py +277 -0
- numba_cuda/numba/cuda/tests/cudapy/test_userexc.py +47 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vector_type.py +307 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize.py +283 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize_complex.py +20 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize_decor.py +69 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize_device.py +36 -0
- numba_cuda/numba/cuda/tests/cudapy/test_vectorize_scalar_arg.py +37 -0
- numba_cuda/numba/cuda/tests/cudapy/test_warning.py +139 -0
- numba_cuda/numba/cuda/tests/cudapy/test_warp_ops.py +276 -0
- numba_cuda/numba/cuda/tests/cudasim/__init__.py +6 -0
- numba_cuda/numba/cuda/tests/cudasim/support.py +6 -0
- numba_cuda/numba/cuda/tests/cudasim/test_cudasim_issues.py +102 -0
- numba_cuda/numba/cuda/tests/data/__init__.py +0 -0
- numba_cuda/numba/cuda/tests/data/cuda_include.cu +5 -0
- numba_cuda/numba/cuda/tests/data/error.cu +7 -0
- numba_cuda/numba/cuda/tests/data/jitlink.cu +23 -0
- numba_cuda/numba/cuda/tests/data/jitlink.ptx +51 -0
- numba_cuda/numba/cuda/tests/data/warn.cu +7 -0
- numba_cuda/numba/cuda/tests/doc_examples/__init__.py +6 -0
- numba_cuda/numba/cuda/tests/doc_examples/ffi/__init__.py +0 -0
- numba_cuda/numba/cuda/tests/doc_examples/ffi/functions.cu +49 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_cg.py +77 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_cpu_gpu_compat.py +76 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_ffi.py +82 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_laplace.py +155 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_matmul.py +173 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_montecarlo.py +109 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_random.py +59 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_reduction.py +76 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_sessionize.py +130 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_ufunc.py +50 -0
- numba_cuda/numba/cuda/tests/doc_examples/test_vecadd.py +73 -0
- numba_cuda/numba/cuda/tests/nocuda/__init__.py +8 -0
- numba_cuda/numba/cuda/tests/nocuda/test_dummyarray.py +359 -0
- numba_cuda/numba/cuda/tests/nocuda/test_function_resolution.py +36 -0
- numba_cuda/numba/cuda/tests/nocuda/test_import.py +49 -0
- numba_cuda/numba/cuda/tests/nocuda/test_library_lookup.py +238 -0
- numba_cuda/numba/cuda/tests/nocuda/test_nvvm.py +54 -0
- numba_cuda/numba/cuda/types.py +37 -0
- numba_cuda/numba/cuda/ufuncs.py +662 -0
- numba_cuda/numba/cuda/vector_types.py +209 -0
- numba_cuda/numba/cuda/vectorizers.py +252 -0
- numba_cuda-0.0.12.dist-info/LICENSE +25 -0
- numba_cuda-0.0.12.dist-info/METADATA +68 -0
- numba_cuda-0.0.12.dist-info/RECORD +231 -0
- {numba_cuda-0.0.1.dist-info → numba_cuda-0.0.12.dist-info}/WHEEL +1 -1
- numba_cuda-0.0.1.dist-info/METADATA +0 -10
- numba_cuda-0.0.1.dist-info/RECORD +0 -5
- {numba_cuda-0.0.1.dist-info → numba_cuda-0.0.12.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,440 @@
|
|
1
|
+
import re
|
2
|
+
from functools import cached_property
|
3
|
+
import llvmlite.binding as ll
|
4
|
+
from llvmlite import ir
|
5
|
+
|
6
|
+
from numba.core import (cgutils, config, debuginfo, itanium_mangler, types,
|
7
|
+
typing, utils)
|
8
|
+
from numba.core.dispatcher import Dispatcher
|
9
|
+
from numba.core.base import BaseContext
|
10
|
+
from numba.core.callconv import BaseCallConv, MinimalCallConv
|
11
|
+
from numba.core.typing import cmathdecl
|
12
|
+
from numba.core import datamodel
|
13
|
+
|
14
|
+
from .cudadrv import nvvm
|
15
|
+
from numba.cuda import codegen, nvvmutils, ufuncs
|
16
|
+
from numba.cuda.models import cuda_data_manager
|
17
|
+
|
18
|
+
# -----------------------------------------------------------------------------
|
19
|
+
# Typing
|
20
|
+
|
21
|
+
|
22
|
+
class CUDATypingContext(typing.BaseContext):
|
23
|
+
def load_additional_registries(self):
|
24
|
+
from . import cudadecl, cudamath, libdevicedecl, vector_types
|
25
|
+
from numba.core.typing import enumdecl, cffi_utils
|
26
|
+
|
27
|
+
self.install_registry(cudadecl.registry)
|
28
|
+
self.install_registry(cffi_utils.registry)
|
29
|
+
self.install_registry(cudamath.registry)
|
30
|
+
self.install_registry(cmathdecl.registry)
|
31
|
+
self.install_registry(libdevicedecl.registry)
|
32
|
+
self.install_registry(enumdecl.registry)
|
33
|
+
self.install_registry(vector_types.typing_registry)
|
34
|
+
|
35
|
+
def resolve_value_type(self, val):
|
36
|
+
# treat other dispatcher object as another device function
|
37
|
+
from numba.cuda.dispatcher import CUDADispatcher
|
38
|
+
if (isinstance(val, Dispatcher) and not
|
39
|
+
isinstance(val, CUDADispatcher)):
|
40
|
+
try:
|
41
|
+
# use cached device function
|
42
|
+
val = val.__dispatcher
|
43
|
+
except AttributeError:
|
44
|
+
if not val._can_compile:
|
45
|
+
raise ValueError('using cpu function on device '
|
46
|
+
'but its compilation is disabled')
|
47
|
+
targetoptions = val.targetoptions.copy()
|
48
|
+
targetoptions['device'] = True
|
49
|
+
targetoptions['debug'] = targetoptions.get('debug', False)
|
50
|
+
targetoptions['opt'] = targetoptions.get('opt', True)
|
51
|
+
disp = CUDADispatcher(val.py_func, targetoptions)
|
52
|
+
# cache the device function for future use and to avoid
|
53
|
+
# duplicated copy of the same function.
|
54
|
+
val.__dispatcher = disp
|
55
|
+
val = disp
|
56
|
+
|
57
|
+
# continue with parent logic
|
58
|
+
return super(CUDATypingContext, self).resolve_value_type(val)
|
59
|
+
|
60
|
+
# -----------------------------------------------------------------------------
|
61
|
+
# Implementation
|
62
|
+
|
63
|
+
|
64
|
+
VALID_CHARS = re.compile(r'[^a-z0-9]', re.I)
|
65
|
+
|
66
|
+
|
67
|
+
class CUDATargetContext(BaseContext):
|
68
|
+
implement_powi_as_math_call = True
|
69
|
+
strict_alignment = True
|
70
|
+
|
71
|
+
def __init__(self, typingctx, target='cuda'):
|
72
|
+
super().__init__(typingctx, target)
|
73
|
+
self.data_model_manager = cuda_data_manager.chain(
|
74
|
+
datamodel.default_manager
|
75
|
+
)
|
76
|
+
|
77
|
+
@property
|
78
|
+
def DIBuilder(self):
|
79
|
+
return debuginfo.DIBuilder
|
80
|
+
|
81
|
+
@property
|
82
|
+
def enable_boundscheck(self):
|
83
|
+
# Unconditionally disabled
|
84
|
+
return False
|
85
|
+
|
86
|
+
# Overrides
|
87
|
+
def create_module(self, name):
|
88
|
+
return self._internal_codegen._create_empty_module(name)
|
89
|
+
|
90
|
+
def init(self):
|
91
|
+
self._internal_codegen = codegen.JITCUDACodegen("numba.cuda.jit")
|
92
|
+
self._target_data = None
|
93
|
+
|
94
|
+
def load_additional_registries(self):
|
95
|
+
# side effect of import needed for numba.cpython.*, the builtins
|
96
|
+
# registry is updated at import time.
|
97
|
+
from numba.cpython import numbers, tupleobj, slicing # noqa: F401
|
98
|
+
from numba.cpython import rangeobj, iterators, enumimpl # noqa: F401
|
99
|
+
from numba.cpython import unicode, charseq # noqa: F401
|
100
|
+
from numba.cpython import cmathimpl
|
101
|
+
from numba.misc import cffiimpl
|
102
|
+
from numba.np import arrayobj # noqa: F401
|
103
|
+
from numba.np import npdatetime # noqa: F401
|
104
|
+
from . import (
|
105
|
+
cudaimpl, printimpl, libdeviceimpl, mathimpl, vector_types
|
106
|
+
)
|
107
|
+
# fix for #8940
|
108
|
+
from numba.np.unsafe import ndarray # noqa F401
|
109
|
+
|
110
|
+
self.install_registry(cudaimpl.registry)
|
111
|
+
self.install_registry(cffiimpl.registry)
|
112
|
+
self.install_registry(printimpl.registry)
|
113
|
+
self.install_registry(libdeviceimpl.registry)
|
114
|
+
self.install_registry(cmathimpl.registry)
|
115
|
+
self.install_registry(mathimpl.registry)
|
116
|
+
self.install_registry(vector_types.impl_registry)
|
117
|
+
|
118
|
+
def codegen(self):
|
119
|
+
return self._internal_codegen
|
120
|
+
|
121
|
+
@property
|
122
|
+
def target_data(self):
|
123
|
+
if self._target_data is None:
|
124
|
+
self._target_data = ll.create_target_data(nvvm.NVVM().data_layout)
|
125
|
+
return self._target_data
|
126
|
+
|
127
|
+
@cached_property
|
128
|
+
def nonconst_module_attrs(self):
|
129
|
+
"""
|
130
|
+
Some CUDA intrinsics are at the module level, but cannot be treated as
|
131
|
+
constants, because they are loaded from a special register in the PTX.
|
132
|
+
These include threadIdx, blockDim, etc.
|
133
|
+
"""
|
134
|
+
from numba import cuda
|
135
|
+
nonconsts = ('threadIdx', 'blockDim', 'blockIdx', 'gridDim', 'laneid',
|
136
|
+
'warpsize')
|
137
|
+
nonconsts_with_mod = tuple([(types.Module(cuda), nc)
|
138
|
+
for nc in nonconsts])
|
139
|
+
return nonconsts_with_mod
|
140
|
+
|
141
|
+
@cached_property
|
142
|
+
def call_conv(self):
|
143
|
+
return CUDACallConv(self)
|
144
|
+
|
145
|
+
def mangler(self, name, argtypes, *, abi_tags=(), uid=None):
|
146
|
+
return itanium_mangler.mangle(name, argtypes, abi_tags=abi_tags,
|
147
|
+
uid=uid)
|
148
|
+
|
149
|
+
def prepare_cuda_kernel(self, codelib, fndesc, debug, lineinfo,
|
150
|
+
nvvm_options, filename, linenum,
|
151
|
+
max_registers=None):
|
152
|
+
"""
|
153
|
+
Adapt a code library ``codelib`` with the numba compiled CUDA kernel
|
154
|
+
with name ``fname`` and arguments ``argtypes`` for NVVM.
|
155
|
+
A new library is created with a wrapper function that can be used as
|
156
|
+
the kernel entry point for the given kernel.
|
157
|
+
|
158
|
+
Returns the new code library and the wrapper function.
|
159
|
+
|
160
|
+
Parameters:
|
161
|
+
|
162
|
+
codelib: The CodeLibrary containing the device function to wrap
|
163
|
+
in a kernel call.
|
164
|
+
fndesc: The FunctionDescriptor of the source function.
|
165
|
+
debug: Whether to compile with debug.
|
166
|
+
lineinfo: Whether to emit line info.
|
167
|
+
nvvm_options: Dict of NVVM options used when compiling the new library.
|
168
|
+
filename: The source filename that the function is contained in.
|
169
|
+
linenum: The source line that the function is on.
|
170
|
+
max_registers: The max_registers argument for the code library.
|
171
|
+
"""
|
172
|
+
kernel_name = itanium_mangler.prepend_namespace(
|
173
|
+
fndesc.llvm_func_name, ns='cudapy',
|
174
|
+
)
|
175
|
+
library = self.codegen().create_library(f'{codelib.name}_kernel_',
|
176
|
+
entry_name=kernel_name,
|
177
|
+
nvvm_options=nvvm_options,
|
178
|
+
max_registers=max_registers)
|
179
|
+
library.add_linking_library(codelib)
|
180
|
+
wrapper = self.generate_kernel_wrapper(library, fndesc, kernel_name,
|
181
|
+
debug, lineinfo, filename,
|
182
|
+
linenum)
|
183
|
+
return library, wrapper
|
184
|
+
|
185
|
+
def generate_kernel_wrapper(self, library, fndesc, kernel_name, debug,
|
186
|
+
lineinfo, filename, linenum):
|
187
|
+
"""
|
188
|
+
Generate the kernel wrapper in the given ``library``.
|
189
|
+
The function being wrapped is described by ``fndesc``.
|
190
|
+
The wrapper function is returned.
|
191
|
+
"""
|
192
|
+
|
193
|
+
argtypes = fndesc.argtypes
|
194
|
+
arginfo = self.get_arg_packer(argtypes)
|
195
|
+
argtys = list(arginfo.argument_types)
|
196
|
+
wrapfnty = ir.FunctionType(ir.VoidType(), argtys)
|
197
|
+
wrapper_module = self.create_module("cuda.kernel.wrapper")
|
198
|
+
fnty = ir.FunctionType(ir.IntType(32),
|
199
|
+
[self.call_conv.get_return_type(types.pyobject)]
|
200
|
+
+ argtys)
|
201
|
+
func = ir.Function(wrapper_module, fnty, fndesc.llvm_func_name)
|
202
|
+
|
203
|
+
prefixed = itanium_mangler.prepend_namespace(func.name, ns='cudapy')
|
204
|
+
wrapfn = ir.Function(wrapper_module, wrapfnty, prefixed)
|
205
|
+
builder = ir.IRBuilder(wrapfn.append_basic_block(''))
|
206
|
+
|
207
|
+
if debug or lineinfo:
|
208
|
+
directives_only = lineinfo and not debug
|
209
|
+
debuginfo = self.DIBuilder(module=wrapper_module,
|
210
|
+
filepath=filename,
|
211
|
+
cgctx=self,
|
212
|
+
directives_only=directives_only)
|
213
|
+
debuginfo.mark_subprogram(
|
214
|
+
wrapfn, kernel_name, fndesc.args, argtypes, linenum,
|
215
|
+
)
|
216
|
+
debuginfo.mark_location(builder, linenum)
|
217
|
+
|
218
|
+
# Define error handling variable
|
219
|
+
def define_error_gv(postfix):
|
220
|
+
name = wrapfn.name + postfix
|
221
|
+
gv = cgutils.add_global_variable(wrapper_module, ir.IntType(32),
|
222
|
+
name)
|
223
|
+
gv.initializer = ir.Constant(gv.type.pointee, None)
|
224
|
+
return gv
|
225
|
+
|
226
|
+
gv_exc = define_error_gv("__errcode__")
|
227
|
+
gv_tid = []
|
228
|
+
gv_ctaid = []
|
229
|
+
for i in 'xyz':
|
230
|
+
gv_tid.append(define_error_gv("__tid%s__" % i))
|
231
|
+
gv_ctaid.append(define_error_gv("__ctaid%s__" % i))
|
232
|
+
|
233
|
+
callargs = arginfo.from_arguments(builder, wrapfn.args)
|
234
|
+
status, _ = self.call_conv.call_function(
|
235
|
+
builder, func, types.void, argtypes, callargs)
|
236
|
+
|
237
|
+
if debug:
|
238
|
+
# Check error status
|
239
|
+
with cgutils.if_likely(builder, status.is_ok):
|
240
|
+
builder.ret_void()
|
241
|
+
|
242
|
+
with builder.if_then(builder.not_(status.is_python_exc)):
|
243
|
+
# User exception raised
|
244
|
+
old = ir.Constant(gv_exc.type.pointee, None)
|
245
|
+
|
246
|
+
# Use atomic cmpxchg to prevent rewriting the error status
|
247
|
+
# Only the first error is recorded
|
248
|
+
|
249
|
+
xchg = builder.cmpxchg(gv_exc, old, status.code,
|
250
|
+
'monotonic', 'monotonic')
|
251
|
+
changed = builder.extract_value(xchg, 1)
|
252
|
+
|
253
|
+
# If the xchange is successful, save the thread ID.
|
254
|
+
sreg = nvvmutils.SRegBuilder(builder)
|
255
|
+
with builder.if_then(changed):
|
256
|
+
for dim, ptr, in zip("xyz", gv_tid):
|
257
|
+
val = sreg.tid(dim)
|
258
|
+
builder.store(val, ptr)
|
259
|
+
|
260
|
+
for dim, ptr, in zip("xyz", gv_ctaid):
|
261
|
+
val = sreg.ctaid(dim)
|
262
|
+
builder.store(val, ptr)
|
263
|
+
|
264
|
+
builder.ret_void()
|
265
|
+
|
266
|
+
nvvm.set_cuda_kernel(wrapfn)
|
267
|
+
library.add_ir_module(wrapper_module)
|
268
|
+
if debug or lineinfo:
|
269
|
+
debuginfo.finalize()
|
270
|
+
library.finalize()
|
271
|
+
|
272
|
+
if config.DUMP_LLVM:
|
273
|
+
utils.dump_llvm(fndesc, wrapper_module)
|
274
|
+
|
275
|
+
return library.get_function(wrapfn.name)
|
276
|
+
|
277
|
+
def make_constant_array(self, builder, aryty, arr):
|
278
|
+
"""
|
279
|
+
Unlike the parent version. This returns a a pointer in the constant
|
280
|
+
addrspace.
|
281
|
+
"""
|
282
|
+
|
283
|
+
lmod = builder.module
|
284
|
+
|
285
|
+
constvals = [
|
286
|
+
self.get_constant(types.byte, i)
|
287
|
+
for i in iter(arr.tobytes(order='A'))
|
288
|
+
]
|
289
|
+
constaryty = ir.ArrayType(ir.IntType(8), len(constvals))
|
290
|
+
constary = ir.Constant(constaryty, constvals)
|
291
|
+
|
292
|
+
addrspace = nvvm.ADDRSPACE_CONSTANT
|
293
|
+
gv = cgutils.add_global_variable(lmod, constary.type, "_cudapy_cmem",
|
294
|
+
addrspace=addrspace)
|
295
|
+
gv.linkage = 'internal'
|
296
|
+
gv.global_constant = True
|
297
|
+
gv.initializer = constary
|
298
|
+
|
299
|
+
# Preserve the underlying alignment
|
300
|
+
lldtype = self.get_data_type(aryty.dtype)
|
301
|
+
align = self.get_abi_sizeof(lldtype)
|
302
|
+
gv.align = 2 ** (align - 1).bit_length()
|
303
|
+
|
304
|
+
# Convert to generic address-space
|
305
|
+
ptrty = ir.PointerType(ir.IntType(8))
|
306
|
+
genptr = builder.addrspacecast(gv, ptrty, 'generic')
|
307
|
+
|
308
|
+
# Create array object
|
309
|
+
ary = self.make_array(aryty)(self, builder)
|
310
|
+
kshape = [self.get_constant(types.intp, s) for s in arr.shape]
|
311
|
+
kstrides = [self.get_constant(types.intp, s) for s in arr.strides]
|
312
|
+
self.populate_array(ary, data=builder.bitcast(genptr, ary.data.type),
|
313
|
+
shape=kshape,
|
314
|
+
strides=kstrides,
|
315
|
+
itemsize=ary.itemsize, parent=ary.parent,
|
316
|
+
meminfo=None)
|
317
|
+
|
318
|
+
return ary._getvalue()
|
319
|
+
|
320
|
+
def insert_const_string(self, mod, string):
|
321
|
+
"""
|
322
|
+
Unlike the parent version. This returns a a pointer in the constant
|
323
|
+
addrspace.
|
324
|
+
"""
|
325
|
+
text = cgutils.make_bytearray(string.encode("utf-8") + b"\x00")
|
326
|
+
name = '$'.join(["__conststring__",
|
327
|
+
itanium_mangler.mangle_identifier(string)])
|
328
|
+
# Try to reuse existing global
|
329
|
+
gv = mod.globals.get(name)
|
330
|
+
if gv is None:
|
331
|
+
# Not defined yet
|
332
|
+
gv = cgutils.add_global_variable(mod, text.type, name,
|
333
|
+
addrspace=nvvm.ADDRSPACE_CONSTANT)
|
334
|
+
gv.linkage = 'internal'
|
335
|
+
gv.global_constant = True
|
336
|
+
gv.initializer = text
|
337
|
+
|
338
|
+
# Cast to a i8* pointer
|
339
|
+
charty = gv.type.pointee.element
|
340
|
+
return gv.bitcast(charty.as_pointer(nvvm.ADDRSPACE_CONSTANT))
|
341
|
+
|
342
|
+
def insert_string_const_addrspace(self, builder, string):
|
343
|
+
"""
|
344
|
+
Insert a constant string in the constant addresspace and return a
|
345
|
+
generic i8 pointer to the data.
|
346
|
+
|
347
|
+
This function attempts to deduplicate.
|
348
|
+
"""
|
349
|
+
lmod = builder.module
|
350
|
+
gv = self.insert_const_string(lmod, string)
|
351
|
+
charptrty = ir.PointerType(ir.IntType(8))
|
352
|
+
return builder.addrspacecast(gv, charptrty, 'generic')
|
353
|
+
|
354
|
+
def optimize_function(self, func):
|
355
|
+
"""Run O1 function passes
|
356
|
+
"""
|
357
|
+
pass
|
358
|
+
## XXX skipped for now
|
359
|
+
# fpm = lp.FunctionPassManager.new(func.module)
|
360
|
+
#
|
361
|
+
# lp.PassManagerBuilder.new().populate(fpm)
|
362
|
+
#
|
363
|
+
# fpm.initialize()
|
364
|
+
# fpm.run(func)
|
365
|
+
# fpm.finalize()
|
366
|
+
|
367
|
+
def get_ufunc_info(self, ufunc_key):
|
368
|
+
return ufuncs.get_ufunc_info(ufunc_key)
|
369
|
+
|
370
|
+
|
371
|
+
class CUDACallConv(MinimalCallConv):
|
372
|
+
pass
|
373
|
+
|
374
|
+
|
375
|
+
class CUDACABICallConv(BaseCallConv):
|
376
|
+
"""
|
377
|
+
Calling convention aimed at matching the CUDA C/C++ ABI. The implemented
|
378
|
+
function signature is:
|
379
|
+
|
380
|
+
<Python return type> (<Python arguments>)
|
381
|
+
|
382
|
+
Exceptions are unsupported in this convention.
|
383
|
+
"""
|
384
|
+
|
385
|
+
def _make_call_helper(self, builder):
|
386
|
+
# Call helpers are used to help report exceptions back to Python, so
|
387
|
+
# none is required here.
|
388
|
+
return None
|
389
|
+
|
390
|
+
def return_value(self, builder, retval):
|
391
|
+
return builder.ret(retval)
|
392
|
+
|
393
|
+
def return_user_exc(self, builder, exc, exc_args=None, loc=None,
|
394
|
+
func_name=None):
|
395
|
+
msg = "Python exceptions are unsupported in the CUDA C/C++ ABI"
|
396
|
+
raise NotImplementedError(msg)
|
397
|
+
|
398
|
+
def return_status_propagate(self, builder, status):
|
399
|
+
msg = "Return status is unsupported in the CUDA C/C++ ABI"
|
400
|
+
raise NotImplementedError(msg)
|
401
|
+
|
402
|
+
def get_function_type(self, restype, argtypes):
|
403
|
+
"""
|
404
|
+
Get the LLVM IR Function type for *restype* and *argtypes*.
|
405
|
+
"""
|
406
|
+
arginfo = self._get_arg_packer(argtypes)
|
407
|
+
argtypes = list(arginfo.argument_types)
|
408
|
+
fnty = ir.FunctionType(self.get_return_type(restype), argtypes)
|
409
|
+
return fnty
|
410
|
+
|
411
|
+
def decorate_function(self, fn, args, fe_argtypes, noalias=False):
|
412
|
+
"""
|
413
|
+
Set names and attributes of function arguments.
|
414
|
+
"""
|
415
|
+
assert not noalias
|
416
|
+
arginfo = self._get_arg_packer(fe_argtypes)
|
417
|
+
arginfo.assign_names(self.get_arguments(fn),
|
418
|
+
['arg.' + a for a in args])
|
419
|
+
|
420
|
+
def get_arguments(self, func):
|
421
|
+
"""
|
422
|
+
Get the Python-level arguments of LLVM *func*.
|
423
|
+
"""
|
424
|
+
return func.args
|
425
|
+
|
426
|
+
def call_function(self, builder, callee, resty, argtys, args):
|
427
|
+
"""
|
428
|
+
Call the Numba-compiled *callee*.
|
429
|
+
"""
|
430
|
+
arginfo = self._get_arg_packer(argtys)
|
431
|
+
realargs = arginfo.as_arguments(builder, args)
|
432
|
+
code = builder.call(callee, realargs)
|
433
|
+
# No status required as we don't support exceptions or a distinct None
|
434
|
+
# value in a C ABI.
|
435
|
+
status = None
|
436
|
+
out = self.context.get_returned_value(builder, resty, code)
|
437
|
+
return status, out
|
438
|
+
|
439
|
+
def get_return_type(self, ty):
|
440
|
+
return self.context.data_model_manager[ty].get_return_type()
|
@@ -0,0 +1,202 @@
|
|
1
|
+
import os
|
2
|
+
import platform
|
3
|
+
import shutil
|
4
|
+
|
5
|
+
from numba.tests.support import SerialMixin
|
6
|
+
from numba.cuda.cuda_paths import get_conda_ctk
|
7
|
+
from numba.cuda.cudadrv import driver, devices, libs
|
8
|
+
from numba.core import config
|
9
|
+
from numba.tests.support import TestCase
|
10
|
+
from pathlib import Path
|
11
|
+
import unittest
|
12
|
+
|
13
|
+
numba_cuda_dir = Path(__file__).parent
|
14
|
+
test_data_dir = numba_cuda_dir / 'tests' / 'data'
|
15
|
+
|
16
|
+
|
17
|
+
class CUDATestCase(SerialMixin, TestCase):
|
18
|
+
"""
|
19
|
+
For tests that use a CUDA device. Test methods in a CUDATestCase must not
|
20
|
+
be run out of module order, because the ContextResettingTestCase may reset
|
21
|
+
the context and destroy resources used by a normal CUDATestCase if any of
|
22
|
+
its tests are run between tests from a CUDATestCase.
|
23
|
+
"""
|
24
|
+
|
25
|
+
def setUp(self):
|
26
|
+
self._low_occupancy_warnings = config.CUDA_LOW_OCCUPANCY_WARNINGS
|
27
|
+
self._warn_on_implicit_copy = config.CUDA_WARN_ON_IMPLICIT_COPY
|
28
|
+
|
29
|
+
# Disable warnings about low gpu utilization in the test suite
|
30
|
+
config.CUDA_LOW_OCCUPANCY_WARNINGS = 0
|
31
|
+
# Disable warnings about host arrays in the test suite
|
32
|
+
config.CUDA_WARN_ON_IMPLICIT_COPY = 0
|
33
|
+
|
34
|
+
def tearDown(self):
|
35
|
+
config.CUDA_LOW_OCCUPANCY_WARNINGS = self._low_occupancy_warnings
|
36
|
+
config.CUDA_WARN_ON_IMPLICIT_COPY = self._warn_on_implicit_copy
|
37
|
+
|
38
|
+
def skip_if_lto(self, reason):
|
39
|
+
# Some linkers need the compute capability to be specified, so we
|
40
|
+
# always specify it here.
|
41
|
+
cc = devices.get_context().device.compute_capability
|
42
|
+
linker = driver.Linker.new(cc=cc)
|
43
|
+
if linker.lto:
|
44
|
+
self.skipTest(reason)
|
45
|
+
|
46
|
+
|
47
|
+
class ContextResettingTestCase(CUDATestCase):
|
48
|
+
"""
|
49
|
+
For tests where the context needs to be reset after each test. Typically
|
50
|
+
these inspect or modify parts of the context that would usually be expected
|
51
|
+
to be internal implementation details (such as the state of allocations and
|
52
|
+
deallocations, etc.).
|
53
|
+
"""
|
54
|
+
|
55
|
+
def tearDown(self):
|
56
|
+
super().tearDown()
|
57
|
+
from numba.cuda.cudadrv.devices import reset
|
58
|
+
reset()
|
59
|
+
|
60
|
+
|
61
|
+
def ensure_supported_ccs_initialized():
|
62
|
+
from numba.cuda import is_available as cuda_is_available
|
63
|
+
from numba.cuda.cudadrv import nvvm
|
64
|
+
|
65
|
+
if cuda_is_available():
|
66
|
+
# Ensure that cudart.so is loaded and the list of supported compute
|
67
|
+
# capabilities in the nvvm module is populated before a fork. This is
|
68
|
+
# needed because some compilation tests don't require a CUDA context,
|
69
|
+
# but do use NVVM, and it is required that libcudart.so should be
|
70
|
+
# loaded before a fork (note that the requirement is not explicitly
|
71
|
+
# documented).
|
72
|
+
nvvm.get_supported_ccs()
|
73
|
+
|
74
|
+
|
75
|
+
def skip_on_cudasim(reason):
|
76
|
+
"""Skip this test if running on the CUDA simulator"""
|
77
|
+
return unittest.skipIf(config.ENABLE_CUDASIM, reason)
|
78
|
+
|
79
|
+
|
80
|
+
def skip_unless_cudasim(reason):
|
81
|
+
"""Skip this test if running on CUDA hardware"""
|
82
|
+
return unittest.skipUnless(config.ENABLE_CUDASIM, reason)
|
83
|
+
|
84
|
+
|
85
|
+
def skip_unless_conda_cudatoolkit(reason):
|
86
|
+
"""Skip test if the CUDA toolkit was not installed by Conda"""
|
87
|
+
return unittest.skipUnless(get_conda_ctk() is not None, reason)
|
88
|
+
|
89
|
+
|
90
|
+
def skip_if_external_memmgr(reason):
|
91
|
+
"""Skip test if an EMM Plugin is in use"""
|
92
|
+
return unittest.skipIf(config.CUDA_MEMORY_MANAGER != 'default', reason)
|
93
|
+
|
94
|
+
|
95
|
+
def skip_under_cuda_memcheck(reason):
|
96
|
+
return unittest.skipIf(os.environ.get('CUDA_MEMCHECK') is not None, reason)
|
97
|
+
|
98
|
+
|
99
|
+
def skip_without_nvdisasm(reason):
|
100
|
+
nvdisasm_path = shutil.which('nvdisasm')
|
101
|
+
return unittest.skipIf(nvdisasm_path is None, reason)
|
102
|
+
|
103
|
+
|
104
|
+
def skip_with_nvdisasm(reason):
|
105
|
+
nvdisasm_path = shutil.which('nvdisasm')
|
106
|
+
return unittest.skipIf(nvdisasm_path is not None, reason)
|
107
|
+
|
108
|
+
|
109
|
+
def skip_on_arm(reason):
|
110
|
+
cpu = platform.processor()
|
111
|
+
is_arm = cpu.startswith('arm') or cpu.startswith('aarch')
|
112
|
+
return unittest.skipIf(is_arm, reason)
|
113
|
+
|
114
|
+
|
115
|
+
def skip_if_cuda_includes_missing(fn):
|
116
|
+
# Skip when cuda.h is not available - generally this should indicate
|
117
|
+
# whether the CUDA includes are available or not
|
118
|
+
cuda_h = os.path.join(config.CUDA_INCLUDE_PATH, 'cuda.h')
|
119
|
+
cuda_h_file = (os.path.exists(cuda_h) and os.path.isfile(cuda_h))
|
120
|
+
reason = 'CUDA include dir not available on this system'
|
121
|
+
return unittest.skipUnless(cuda_h_file, reason)(fn)
|
122
|
+
|
123
|
+
|
124
|
+
def skip_if_mvc_enabled(reason):
|
125
|
+
"""Skip a test if Minor Version Compatibility is enabled"""
|
126
|
+
return unittest.skipIf(config.CUDA_ENABLE_MINOR_VERSION_COMPATIBILITY,
|
127
|
+
reason)
|
128
|
+
|
129
|
+
|
130
|
+
def skip_if_mvc_libraries_unavailable(fn):
|
131
|
+
libs_available = False
|
132
|
+
try:
|
133
|
+
import cubinlinker # noqa: F401
|
134
|
+
import ptxcompiler # noqa: F401
|
135
|
+
libs_available = True
|
136
|
+
except ImportError:
|
137
|
+
pass
|
138
|
+
|
139
|
+
return unittest.skipUnless(libs_available,
|
140
|
+
"Requires cubinlinker and ptxcompiler")(fn)
|
141
|
+
|
142
|
+
|
143
|
+
def cc_X_or_above(major, minor):
|
144
|
+
if not config.ENABLE_CUDASIM:
|
145
|
+
cc = devices.get_context().device.compute_capability
|
146
|
+
return cc >= (major, minor)
|
147
|
+
else:
|
148
|
+
return True
|
149
|
+
|
150
|
+
|
151
|
+
def skip_unless_cc_50(fn):
|
152
|
+
return unittest.skipUnless(cc_X_or_above(5, 0), "requires cc >= 5.0")(fn)
|
153
|
+
|
154
|
+
|
155
|
+
def skip_unless_cc_53(fn):
|
156
|
+
return unittest.skipUnless(cc_X_or_above(5, 3), "requires cc >= 5.3")(fn)
|
157
|
+
|
158
|
+
|
159
|
+
def skip_unless_cc_60(fn):
|
160
|
+
return unittest.skipUnless(cc_X_or_above(6, 0), "requires cc >= 6.0")(fn)
|
161
|
+
|
162
|
+
|
163
|
+
def skip_unless_cc_75(fn):
|
164
|
+
return unittest.skipUnless(cc_X_or_above(7, 5), "requires cc >= 7.5")(fn)
|
165
|
+
|
166
|
+
|
167
|
+
def xfail_unless_cudasim(fn):
|
168
|
+
if config.ENABLE_CUDASIM:
|
169
|
+
return fn
|
170
|
+
else:
|
171
|
+
return unittest.expectedFailure(fn)
|
172
|
+
|
173
|
+
|
174
|
+
def skip_with_cuda_python(reason):
|
175
|
+
return unittest.skipIf(driver.USE_NV_BINDING, reason)
|
176
|
+
|
177
|
+
|
178
|
+
def cudadevrt_missing():
|
179
|
+
if config.ENABLE_CUDASIM:
|
180
|
+
return False
|
181
|
+
try:
|
182
|
+
path = libs.get_cudalib('cudadevrt', static=True)
|
183
|
+
libs.check_static_lib(path)
|
184
|
+
except FileNotFoundError:
|
185
|
+
return True
|
186
|
+
return False
|
187
|
+
|
188
|
+
|
189
|
+
def skip_if_cudadevrt_missing(fn):
|
190
|
+
return unittest.skipIf(cudadevrt_missing(), 'cudadevrt missing')(fn)
|
191
|
+
|
192
|
+
|
193
|
+
class ForeignArray(object):
|
194
|
+
"""
|
195
|
+
Class for emulating an array coming from another library through the CUDA
|
196
|
+
Array interface. This just hides a DeviceNDArray so that it doesn't look
|
197
|
+
like a DeviceNDArray.
|
198
|
+
"""
|
199
|
+
|
200
|
+
def __init__(self, arr):
|
201
|
+
self._arr = arr
|
202
|
+
self.__cuda_array_interface__ = arr.__cuda_array_interface__
|
@@ -0,0 +1,58 @@
|
|
1
|
+
from fnmatch import fnmatch
|
2
|
+
from numba.cuda.testing import ensure_supported_ccs_initialized
|
3
|
+
from numba.testing import unittest
|
4
|
+
from numba import cuda
|
5
|
+
from os.path import dirname, isfile, join, normpath, relpath, splitext
|
6
|
+
|
7
|
+
import os
|
8
|
+
import sys
|
9
|
+
import traceback
|
10
|
+
|
11
|
+
|
12
|
+
# Copied and modified from numba/testing/__init__.py, to handle the difference
|
13
|
+
# between the top dirs for Numba and the CUDA target
|
14
|
+
def load_testsuite(loader, dir):
|
15
|
+
"""Find tests in 'dir'."""
|
16
|
+
top_level_dir = dirname(dirname(dirname(dirname(__file__))))
|
17
|
+
try:
|
18
|
+
suite = unittest.TestSuite()
|
19
|
+
files = []
|
20
|
+
for f in os.listdir(dir):
|
21
|
+
path = join(dir, f)
|
22
|
+
if isfile(path) and fnmatch(f, 'test_*.py'):
|
23
|
+
files.append(f)
|
24
|
+
elif isfile(join(path, '__init__.py')):
|
25
|
+
suite.addTests(loader.discover(path,
|
26
|
+
top_level_dir=top_level_dir))
|
27
|
+
for f in files:
|
28
|
+
# turn 'f' into a filename relative to the toplevel dir and
|
29
|
+
# translate it to a module name. This differs from the
|
30
|
+
# implementation in Numba, because the toplevel dir is the
|
31
|
+
# numba_cuda module location, not the numba one.
|
32
|
+
f = relpath(join(dir, f), top_level_dir)
|
33
|
+
f = splitext(normpath(f.replace(os.path.sep, '.')))[0]
|
34
|
+
suite.addTests(loader.loadTestsFromName(f))
|
35
|
+
return suite
|
36
|
+
except Exception:
|
37
|
+
traceback.print_exc(file=sys.stderr)
|
38
|
+
sys.exit(-1)
|
39
|
+
|
40
|
+
|
41
|
+
def load_tests(loader, tests, pattern):
|
42
|
+
suite = unittest.TestSuite()
|
43
|
+
this_dir = dirname(__file__)
|
44
|
+
ensure_supported_ccs_initialized()
|
45
|
+
suite.addTests(load_testsuite(loader, join(this_dir, 'nocuda')))
|
46
|
+
if cuda.is_available():
|
47
|
+
suite.addTests(load_testsuite(loader, join(this_dir, 'cudasim')))
|
48
|
+
gpus = cuda.list_devices()
|
49
|
+
if gpus and gpus[0].compute_capability >= (2, 0):
|
50
|
+
suite.addTests(load_testsuite(loader, join(this_dir, 'cudadrv')))
|
51
|
+
suite.addTests(load_testsuite(loader, join(this_dir, 'cudapy')))
|
52
|
+
suite.addTests(load_testsuite(loader, join(this_dir,
|
53
|
+
'doc_examples')))
|
54
|
+
else:
|
55
|
+
print("skipped CUDA tests because GPU CC < 2.0")
|
56
|
+
else:
|
57
|
+
print("skipped CUDA tests")
|
58
|
+
return suite
|