numba-cuda 0.14.0__py3-none-any.whl → 0.15.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
numba_cuda/VERSION CHANGED
@@ -1 +1 @@
1
- 0.14.0
1
+ 0.15.1
@@ -1,5 +1,47 @@
1
+ import importlib
1
2
  from numba import runtests
2
3
  from numba.core import config
4
+ from .utils import _readenv
5
+
6
+ # Enable pynvjitlink if the environment variables NUMBA_CUDA_ENABLE_PYNVJITLINK
7
+ # or CUDA_ENABLE_PYNVJITLINK are set, or if the pynvjitlink module is found. If
8
+ # explicitly disabled, do not use pynvjitlink, even if present in the env.
9
+ _pynvjitlink_enabled_in_env = _readenv(
10
+ "NUMBA_CUDA_ENABLE_PYNVJITLINK", bool, None
11
+ )
12
+ _pynvjitlink_enabled_in_cfg = getattr(config, "CUDA_ENABLE_PYNVJITLINK", None)
13
+
14
+ if _pynvjitlink_enabled_in_env is not None:
15
+ ENABLE_PYNVJITLINK = _pynvjitlink_enabled_in_env
16
+ elif _pynvjitlink_enabled_in_cfg is not None:
17
+ ENABLE_PYNVJITLINK = _pynvjitlink_enabled_in_cfg
18
+ else:
19
+ ENABLE_PYNVJITLINK = importlib.util.find_spec("pynvjitlink") is not None
20
+
21
+ if not hasattr(config, "CUDA_ENABLE_PYNVJITLINK"):
22
+ config.CUDA_ENABLE_PYNVJITLINK = ENABLE_PYNVJITLINK
23
+
24
+ # Upstream numba sets CUDA_USE_NVIDIA_BINDING to 0 by default, so it always
25
+ # exists. Override, but not if explicitly set to 0 in the envioronment.
26
+ _nvidia_binding_enabled_in_env = _readenv(
27
+ "NUMBA_CUDA_USE_NVIDIA_BINDING", bool, None
28
+ )
29
+ if _nvidia_binding_enabled_in_env is False:
30
+ USE_NV_BINDING = False
31
+ else:
32
+ USE_NV_BINDING = True
33
+ config.CUDA_USE_NVIDIA_BINDING = USE_NV_BINDING
34
+ if config.CUDA_USE_NVIDIA_BINDING:
35
+ if not importlib.util.find_spec("cuda.bindings"):
36
+ raise ImportError(
37
+ "CUDA bindings not found. Please pip install the "
38
+ "cuda-bindings package. Alternatively, install "
39
+ "numba-cuda[cuXY], where XY is the required CUDA "
40
+ "version, to install the binding automatically. "
41
+ "If no CUDA bindings are desired, set the env var "
42
+ "NUMBA_CUDA_USE_NVIDIA_BINDING=0 to enable ctypes "
43
+ "bindings."
44
+ )
3
45
 
4
46
  if config.ENABLE_CUDASIM:
5
47
  from .simulator_init import *
@@ -121,16 +121,10 @@ class DeviceNDArrayBase(_devicearray.DeviceArray):
121
121
 
122
122
  @property
123
123
  def __cuda_array_interface__(self):
124
- if _driver.USE_NV_BINDING:
125
- if self.device_ctypes_pointer is not None:
126
- ptr = int(self.device_ctypes_pointer)
127
- else:
128
- ptr = 0
124
+ if self.device_ctypes_pointer.value is not None:
125
+ ptr = self.device_ctypes_pointer.value
129
126
  else:
130
- if self.device_ctypes_pointer.value is not None:
131
- ptr = self.device_ctypes_pointer.value
132
- else:
133
- ptr = 0
127
+ ptr = 0
134
128
 
135
129
  return {
136
130
  "shape": tuple(self.shape),
@@ -204,10 +198,7 @@ class DeviceNDArrayBase(_devicearray.DeviceArray):
204
198
  def device_ctypes_pointer(self):
205
199
  """Returns the ctypes pointer to the GPU data buffer"""
206
200
  if self.gpu_data is None:
207
- if _driver.USE_NV_BINDING:
208
- return _driver.binding.CUdeviceptr(0)
209
- else:
210
- return c_void_p(0)
201
+ return c_void_p(0)
211
202
  else:
212
203
  return self.gpu_data.device_ctypes_pointer
213
204
 
@@ -49,7 +49,7 @@ from .drvapi import API_PROTOTYPES
49
49
  from .drvapi import cu_occupancy_b2d_size, cu_stream_callback_pyobj, cu_uuid
50
50
  from .mappings import FILE_EXTENSION_MAP
51
51
  from .linkable_code import LinkableCode, LTOIR, Fatbin, Object
52
- from numba.cuda.utils import _readenv, cached_file_read
52
+ from numba.cuda.utils import cached_file_read
53
53
  from numba.cuda.cudadrv import enums, drvapi, nvrtc
54
54
 
55
55
  try:
@@ -57,15 +57,6 @@ try:
57
57
  except ImportError:
58
58
  NvJitLinker, NvJitLinkError = None, None
59
59
 
60
- USE_NV_BINDING = config.CUDA_USE_NVIDIA_BINDING
61
-
62
- if USE_NV_BINDING:
63
- from cuda import cuda as binding
64
-
65
- # There is no definition of the default stream in the Nvidia bindings (nor
66
- # is there at the C/C++ level), so we define it here so we don't need to
67
- # use a magic number 0 in places where we want the default stream.
68
- CU_STREAM_DEFAULT = 0
69
60
 
70
61
  MIN_REQUIRED_CC = (3, 5)
71
62
  SUPPORTS_IPC = sys.platform.startswith("linux")
@@ -82,23 +73,15 @@ _MVC_ERROR_MESSAGE = (
82
73
  "to be available"
83
74
  )
84
75
 
85
- # Enable pynvjitlink if the environment variables NUMBA_CUDA_ENABLE_PYNVJITLINK
86
- # or CUDA_ENABLE_PYNVJITLINK are set, or if the pynvjitlink module is found. If
87
- # explicitly disabled, do not use pynvjitlink, even if present in the env.
88
- _pynvjitlink_enabled_in_env = _readenv(
89
- "NUMBA_CUDA_ENABLE_PYNVJITLINK", bool, None
90
- )
91
- _pynvjitlink_enabled_in_cfg = getattr(config, "CUDA_ENABLE_PYNVJITLINK", None)
76
+ USE_NV_BINDING = config.CUDA_USE_NVIDIA_BINDING
92
77
 
93
- if _pynvjitlink_enabled_in_env is not None:
94
- ENABLE_PYNVJITLINK = _pynvjitlink_enabled_in_env
95
- elif _pynvjitlink_enabled_in_cfg is not None:
96
- ENABLE_PYNVJITLINK = _pynvjitlink_enabled_in_cfg
97
- else:
98
- ENABLE_PYNVJITLINK = importlib.util.find_spec("pynvjitlink") is not None
78
+ if USE_NV_BINDING:
79
+ from cuda.bindings import driver as binding
99
80
 
100
- if not hasattr(config, "CUDA_ENABLE_PYNVJITLINK"):
101
- config.CUDA_ENABLE_PYNVJITLINK = ENABLE_PYNVJITLINK
81
+ # There is no definition of the default stream in the Nvidia bindings (nor
82
+ # is there at the C/C++ level), so we define it here so we don't need to
83
+ # use a magic number 0 in places where we want the default stream.
84
+ CU_STREAM_DEFAULT = 0
102
85
 
103
86
 
104
87
  def make_logger():
@@ -2107,6 +2090,8 @@ class MemoryPointer(object):
2107
2090
 
2108
2091
  @property
2109
2092
  def device_ctypes_pointer(self):
2093
+ if USE_NV_BINDING:
2094
+ return drvapi.cu_device_ptr(int(self.device_pointer))
2110
2095
  return self.device_pointer
2111
2096
 
2112
2097
  @property
@@ -3192,7 +3177,6 @@ class CudaPythonLinker(Linker):
3192
3177
 
3193
3178
  raw_keys = list(options.keys())
3194
3179
  raw_values = list(options.values())
3195
-
3196
3180
  self.handle = driver.cuLinkCreate(len(raw_keys), raw_keys, raw_values)
3197
3181
 
3198
3182
  weakref.finalize(self, driver.cuLinkDestroy, self.handle)
@@ -3449,8 +3433,8 @@ def device_extents(devmem):
3449
3433
  """
3450
3434
  devptr = device_ctypes_pointer(devmem)
3451
3435
  if USE_NV_BINDING:
3452
- s, n = driver.cuMemGetAddressRange(devptr)
3453
- return s, binding.CUdeviceptr(int(s) + n)
3436
+ s, n = driver.cuMemGetAddressRange(devptr.value)
3437
+ return int(s), int(binding.CUdeviceptr(int(s) + n))
3454
3438
  else:
3455
3439
  s = drvapi.cu_device_ptr()
3456
3440
  n = c_size_t()
@@ -3467,10 +3451,7 @@ def device_memory_size(devmem):
3467
3451
  sz = getattr(devmem, "_cuda_memsize_", None)
3468
3452
  if sz is None:
3469
3453
  s, e = device_extents(devmem)
3470
- if USE_NV_BINDING:
3471
- sz = int(e) - int(s)
3472
- else:
3473
- sz = e - s
3454
+ sz = e - s
3474
3455
  devmem._cuda_memsize_ = sz
3475
3456
  assert sz >= 0, "{} length array".format(sz)
3476
3457
  return sz
@@ -3536,10 +3517,7 @@ def host_memory_size(obj):
3536
3517
 
3537
3518
  def device_pointer(obj):
3538
3519
  "Get the device pointer as an integer"
3539
- if USE_NV_BINDING:
3540
- return obj.device_ctypes_pointer
3541
- else:
3542
- return device_ctypes_pointer(obj).value
3520
+ return device_ctypes_pointer(obj).value
3543
3521
 
3544
3522
 
3545
3523
  def device_ctypes_pointer(obj):
@@ -2,9 +2,9 @@ from numba import config
2
2
  from . import enums
3
3
 
4
4
  if config.CUDA_USE_NVIDIA_BINDING:
5
- from cuda import cuda
5
+ from cuda.bindings import driver
6
6
 
7
- jitty = cuda.CUjitInputType
7
+ jitty = driver.CUjitInputType
8
8
  FILE_EXTENSION_MAP = {
9
9
  "o": jitty.CU_JIT_INPUT_OBJECT,
10
10
  "ptx": jitty.CU_JIT_INPUT_PTX,
@@ -588,8 +588,6 @@ class _Kernel(serialize.ReduceMixin):
588
588
  elif isinstance(ty, types.Record):
589
589
  devrec = wrap_arg(val).to_device(retr, stream)
590
590
  ptr = devrec.device_ctypes_pointer
591
- if driver.USE_NV_BINDING:
592
- ptr = ctypes.c_void_p(int(ptr))
593
591
  kernelargs.append(ptr)
594
592
 
595
593
  elif isinstance(ty, types.BaseTuple):
@@ -1009,7 +1007,7 @@ class CUDADispatcher(Dispatcher, serialize.ReduceMixin):
1009
1007
  A (template, pysig, args, kws) tuple is returned.
1010
1008
  """
1011
1009
  # Fold keyword arguments and resolve default values
1012
- pysig, args = self._compiler.fold_argument_types(args, kws)
1010
+ pysig, args = self.fold_argument_types(args, kws)
1013
1011
  kws = {}
1014
1012
 
1015
1013
  # Ensure an exactly-matching overload is available if we can
@@ -113,10 +113,13 @@ class _Runtime:
113
113
  self._compile_memsys_module()
114
114
 
115
115
  # Allocate space for NRT_MemSys
116
- ptr, nbytes = self._memsys_module.get_global_symbol("memsys_size")
117
116
  memsys_size = ctypes.c_uint64()
117
+ ptr, nbytes = self._memsys_module.get_global_symbol("memsys_size")
118
+ device_memsys_size = ptr.device_ctypes_pointer
119
+ if USE_NV_BINDING:
120
+ device_memsys_size = device_memsys_size.value
118
121
  driver.cuMemcpyDtoH(
119
- ctypes.addressof(memsys_size), ptr.device_ctypes_pointer, nbytes
122
+ ctypes.addressof(memsys_size), device_memsys_size, nbytes
120
123
  )
121
124
  self._memsys = device_array(
122
125
  (memsys_size.value,), dtype="i1", stream=stream
@@ -145,18 +148,6 @@ class _Runtime:
145
148
  cooperative=False,
146
149
  )
147
150
 
148
- def _ctypes_pointer(self, array):
149
- """
150
- Given an array, return a ctypes pointer to the data suitable for
151
- passing to ``launch_kernel``.
152
- """
153
- ptr = array.device_ctypes_pointer
154
-
155
- if USE_NV_BINDING:
156
- ptr = ctypes.c_void_p(int(ptr))
157
-
158
- return ptr
159
-
160
151
  def ensure_initialized(self, stream=None):
161
152
  """
162
153
  If memsys is not initialized, initialize memsys
@@ -206,7 +197,7 @@ class _Runtime:
206
197
  context
207
198
  """
208
199
  enabled_ar = cuda.managed_array(1, np.uint8)
209
- enabled_ptr = self._ctypes_pointer(enabled_ar)
200
+ enabled_ptr = enabled_ar.device_ctypes_pointer
210
201
 
211
202
  self._single_thread_launch(
212
203
  self._memsys_module,
@@ -233,7 +224,7 @@ class _Runtime:
233
224
  )
234
225
 
235
226
  stats_for_read = cuda.managed_array(1, dt)
236
- stats_ptr = self._ctypes_pointer(stats_for_read)
227
+ stats_ptr = stats_for_read.device_ctypes_pointer
237
228
 
238
229
  self._single_thread_launch(
239
230
  self._memsys_module, stream, "NRT_MemSys_read", [stats_ptr]
@@ -264,7 +255,7 @@ class _Runtime:
264
255
  Get a single stat from the memsys
265
256
  """
266
257
  got = cuda.managed_array(1, np.uint64)
267
- got_ptr = self._ctypes_pointer(got)
258
+ got_ptr = got.device_ctypes_pointer
268
259
 
269
260
  self._single_thread_launch(
270
261
  self._memsys_module, stream, f"NRT_MemSys_read_{stat}", [got_ptr]
@@ -327,7 +318,7 @@ class _Runtime:
327
318
  "Please allocate NRT Memsys first before setting to module."
328
319
  )
329
320
 
330
- memsys_ptr = self._ctypes_pointer(self._memsys)
321
+ memsys_ptr = self._memsys.device_ctypes_pointer
331
322
 
332
323
  self._single_thread_launch(
333
324
  module, stream, "NRT_MemSys_set", [memsys_ptr]
@@ -1,4 +1,4 @@
1
- from ctypes import byref, c_int, c_void_p, sizeof
1
+ from ctypes import byref, c_int, sizeof
2
2
 
3
3
  from numba.cuda.cudadrv.driver import (
4
4
  host_to_device,
@@ -94,7 +94,6 @@ class TestCudaDriver(CUDATestCase):
94
94
  stream = 0
95
95
 
96
96
  if _driver.USE_NV_BINDING:
97
- ptr = c_void_p(int(ptr))
98
97
  stream = _driver.binding.CUstream(stream)
99
98
 
100
99
  launch_kernel(
@@ -129,8 +128,6 @@ class TestCudaDriver(CUDATestCase):
129
128
  host_to_device(memory, array, sizeof(array), stream=stream)
130
129
 
131
130
  ptr = memory.device_ctypes_pointer
132
- if _driver.USE_NV_BINDING:
133
- ptr = c_void_p(int(ptr))
134
131
 
135
132
  launch_kernel(
136
133
  function.handle, # Kernel
@@ -20,10 +20,7 @@ class TestCudaMemory(ContextResettingTestCase):
20
20
  def _template(self, obj):
21
21
  self.assertTrue(driver.is_device_memory(obj))
22
22
  driver.require_device_memory(obj)
23
- if driver.USE_NV_BINDING:
24
- expected_class = driver.binding.CUdeviceptr
25
- else:
26
- expected_class = drvapi.cu_device_ptr
23
+ expected_class = drvapi.cu_device_ptr
27
24
  self.assertTrue(isinstance(obj.device_ctypes_pointer, expected_class))
28
25
 
29
26
  def test_device_memory(self):
@@ -104,8 +104,6 @@ def simple_lmem(A, B, dty):
104
104
 
105
105
  @skip_on_cudasim("Linking unsupported in the simulator")
106
106
  class TestLinker(CUDATestCase):
107
- _NUMBA_NVIDIA_BINDING_0_ENV = {"NUMBA_CUDA_USE_NVIDIA_BINDING": "0"}
108
-
109
107
  @require_context
110
108
  def test_linker_basic(self):
111
109
  """Simply go through the constructor and destructor"""
@@ -15,7 +15,7 @@ if not config.ENABLE_CUDASIM:
15
15
  from cuda.bindings.driver import cuModuleGetGlobal, cuMemcpyHtoD
16
16
 
17
17
  if config.CUDA_USE_NVIDIA_BINDING:
18
- from cuda.cuda import CUmodule as cu_module_type
18
+ from cuda.bindings.driver import CUmodule as cu_module_type
19
19
  else:
20
20
  from numba.cuda.cudadrv.drvapi import cu_module as cu_module_type
21
21
 
@@ -57,8 +57,6 @@ if TEST_BIN_DIR:
57
57
  )
58
58
  @skip_on_cudasim("Linking unsupported in the simulator")
59
59
  class TestLinker(CUDATestCase):
60
- _NUMBA_NVIDIA_BINDING_0_ENV = {"NUMBA_CUDA_USE_NVIDIA_BINDING": "0"}
61
-
62
60
  def test_nvjitlink_create(self):
63
61
  patched_linker = PyNvJitLinker(cc=(7, 5))
64
62
  assert "-arch=sm_75" in patched_linker.options
@@ -12,10 +12,9 @@ from unittest.mock import call, patch
12
12
  @skip_on_cudasim("CUDA Array Interface is not supported in the simulator")
13
13
  class TestCudaArrayInterface(ContextResettingTestCase):
14
14
  def assertPointersEqual(self, a, b):
15
- if driver.USE_NV_BINDING:
16
- self.assertEqual(
17
- int(a.device_ctypes_pointer), int(b.device_ctypes_pointer)
18
- )
15
+ self.assertEqual(
16
+ a.device_ctypes_pointer.value, b.device_ctypes_pointer.value
17
+ )
19
18
 
20
19
  def test_as_cuda_array(self):
21
20
  h_arr = np.arange(10)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: numba-cuda
3
- Version: 0.14.0
3
+ Version: 0.15.1
4
4
  Summary: CUDA target for Numba
5
5
  Author: Anaconda Inc., NVIDIA Corporation
6
6
  License: BSD 2-clause
@@ -13,11 +13,13 @@ Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
14
  Requires-Dist: numba>=0.59.1
15
15
  Provides-Extra: cu11
16
+ Requires-Dist: cuda-bindings==11.8.*; extra == "cu11"
16
17
  Requires-Dist: cuda-python==11.8.*; extra == "cu11"
17
18
  Requires-Dist: nvidia-cuda-nvcc-cu11; extra == "cu11"
18
19
  Requires-Dist: nvidia-cuda-runtime-cu11; extra == "cu11"
19
20
  Requires-Dist: nvidia-cuda-nvrtc-cu11; extra == "cu11"
20
21
  Provides-Extra: cu12
22
+ Requires-Dist: cuda-bindings==12.9.*; extra == "cu12"
21
23
  Requires-Dist: cuda-python==12.9.*; extra == "cu12"
22
24
  Requires-Dist: nvidia-cuda-nvcc-cu12; extra == "cu12"
23
25
  Requires-Dist: nvidia-cuda-runtime-cu12; extra == "cu12"
@@ -1,9 +1,9 @@
1
1
  _numba_cuda_redirector.pth,sha256=cmfMMmV0JPh3yEpl4bGeM9AuXiVVMSo6Z_b7RaQL3XE,30
2
2
  _numba_cuda_redirector.py,sha256=n_r8MYbu5-vcXMnLJW147k8DnFXXvgb7nPIXnlXwTyQ,2659
3
- numba_cuda/VERSION,sha256=BlWCZVqs1vyD_3QqVxXAS7Slc5W_PuRVl5j6QsLORYk,7
3
+ numba_cuda/VERSION,sha256=RKv2awImAXmzpQmw8QtfJvQx8xbvTKUPGHZMnkuM_Uo,7
4
4
  numba_cuda/__init__.py,sha256=atXeUvJKR3JHcAiCFbXCVOJQUHgB1TulmsqSL_9RT3Q,114
5
5
  numba_cuda/_version.py,sha256=nzrrJXi85d18m6SPdsPsetJNClDETkmF1MrEhGLYDBs,734
6
- numba_cuda/numba/cuda/__init__.py,sha256=3siqMXEKqa9ezQ8RxPC3KMdebUjgJt-EKxxV4CX9818,607
6
+ numba_cuda/numba/cuda/__init__.py,sha256=C4GaDax1WYQgj6JaPkHAHXSxI9grkEigbrTW03foqKU,2369
7
7
  numba_cuda/numba/cuda/api.py,sha256=mkbZBcBfm819kCywQbH8jAvUex2m4pYTcFD-LE-tXsQ,17638
8
8
  numba_cuda/numba/cuda/api_util.py,sha256=jK8oUD3zf_D5IX7vbjc3uY_5kmOxwgEqO2m_lDHdWfM,861
9
9
  numba_cuda/numba/cuda/args.py,sha256=UlTHTJpwPeCtnW0Bb-Wetm5UO9TPR-PCgIt5ys8b8tQ,1894
@@ -21,7 +21,7 @@ numba_cuda/numba/cuda/decorators.py,sha256=NeSHxaiUZyAVJf79UFTctU-7AKLm8dDPERIHb
21
21
  numba_cuda/numba/cuda/descriptor.py,sha256=t1rSVJSCAlVACC5_Un3FQ7iubdTTBe-euqz88cvs2tI,985
22
22
  numba_cuda/numba/cuda/device_init.py,sha256=Rtwd6hQMHMLMkj6MXtndbWYFJfkIaRe0MwOIJF2nzhU,3449
23
23
  numba_cuda/numba/cuda/deviceufunc.py,sha256=zj9BbLiZD-dPttHew4olw8ANgR2nXnXEE9qjCeGLrQI,30731
24
- numba_cuda/numba/cuda/dispatcher.py,sha256=_uaS7jxpquTiG4En2u5eNbOBXYvOIrJebVS-vk9voVU,43467
24
+ numba_cuda/numba/cuda/dispatcher.py,sha256=wkzSs-kZGRRqgaHMffO9xtkoZZSAzdZmPjd__c5mb_c,43371
25
25
  numba_cuda/numba/cuda/errors.py,sha256=WRso1Q_jCoWP5yrDBMhihRhhVtVo1-7KdN8QVE9j46o,1712
26
26
  numba_cuda/numba/cuda/extending.py,sha256=VwuU5F0AQFlJsqaiwoWk-6Itihew1FsjVT_BVjhY8Us,2278
27
27
  numba_cuda/numba/cuda/initialize.py,sha256=0SnpjccQEYiWITIyfAJx833H1yhYFFDY42EpnwYyMn8,487
@@ -50,16 +50,16 @@ numba_cuda/numba/cuda/vector_types.py,sha256=FlzOKufhvBnZ-VC-liA7y9is8BV-uj0fD-E
50
50
  numba_cuda/numba/cuda/vectorizers.py,sha256=nEfQxjSA4oCX8ZzvoqjDRygDfwzxFVDXtnjx-K1aPqA,8387
51
51
  numba_cuda/numba/cuda/_internal/cuda_bf16.py,sha256=QYck6s_D85HBEsc__SAl_UZxf7SptqAk31mLv_1gzuE,152212
52
52
  numba_cuda/numba/cuda/cudadrv/__init__.py,sha256=inat2K8K1OVrgDe64FK7CyRmyFyNKcNO4p2_L79yRZ0,201
53
- numba_cuda/numba/cuda/cudadrv/devicearray.py,sha256=6tF2TYnmjMbKk2fho1ONoD_QsRD9QVTT2kHP7x1u1J0,31556
53
+ numba_cuda/numba/cuda/cudadrv/devicearray.py,sha256=xJFZwbfi7o9bzPDLxSPDTLdH6iFYi8W1AbOMmikbpgY,31225
54
54
  numba_cuda/numba/cuda/cudadrv/devices.py,sha256=k87EDIRhj1ncM9PxJCjZGPFfEks99vzmHlTc55GK5X0,8062
55
- numba_cuda/numba/cuda/cudadrv/driver.py,sha256=ypF1plUmtHo7pFVI_JsIAJkOAYerj_1eW3rsXmawXJM,119641
55
+ numba_cuda/numba/cuda/cudadrv/driver.py,sha256=DrnrTjK857vvOTO_kj5KRo59VjwyVRXAhMU6TAOwRi4,118802
56
56
  numba_cuda/numba/cuda/cudadrv/drvapi.py,sha256=OnjYWnmy8ZlSfYouhzyYIpW-AJ3x1YHj32YcBY2xet4,16790
57
57
  numba_cuda/numba/cuda/cudadrv/dummyarray.py,sha256=2jycZhniMy3ncoVWQG9D8dBehTEeocBZTW43gKHL5Tc,14291
58
58
  numba_cuda/numba/cuda/cudadrv/enums.py,sha256=raWKryxamWQZ5A8ivMpyYVhhwbSpaD9lu7l1_wl2W9M,23742
59
59
  numba_cuda/numba/cuda/cudadrv/error.py,sha256=C2tTPT5h3BGgzjaFTCqbY7hOk2PgkVh0iuM1EiRp1eI,583
60
60
  numba_cuda/numba/cuda/cudadrv/libs.py,sha256=qjknQxYXd2ucwDLQqzhWC_srNg6FnwvcVHIpKyPxJ9A,7287
61
61
  numba_cuda/numba/cuda/cudadrv/linkable_code.py,sha256=IZ13laEG_altDQyi9HkdMcwW-YYEIn2erqz6AnYsqHg,2808
62
- numba_cuda/numba/cuda/cudadrv/mappings.py,sha256=9uEs1KepeVGRbEpVhLjtxSsvZpZsbrHnPywmx--y88A,804
62
+ numba_cuda/numba/cuda/cudadrv/mappings.py,sha256=M10CEqzEBzMRjSm8aiwkrvUy06zF3NQfv64c9QEF_Ek,817
63
63
  numba_cuda/numba/cuda/cudadrv/ndarray.py,sha256=HtULWWFyDlgqvrH5459yyPTvU4UbUo2DSdtcNfvbH00,473
64
64
  numba_cuda/numba/cuda/cudadrv/nvrtc.py,sha256=UD8kASyGUU896tNWAtVxmbzDTP5jDbiOAZjCsELOg6U,14986
65
65
  numba_cuda/numba/cuda/cudadrv/nvvm.py,sha256=2vq00bifcNvQQGbp0IUaStlFLM5faU9weQ2poWSB0a4,29637
@@ -81,7 +81,7 @@ numba_cuda/numba/cuda/memory_management/memsys.cu,sha256=gMBM9_Hnv3EO3Gw_GKvII8y
81
81
  numba_cuda/numba/cuda/memory_management/memsys.cuh,sha256=hPGBQgKyOfYY25ntoBXlhYyeXzxJyz0ByeTszkaKJUM,504
82
82
  numba_cuda/numba/cuda/memory_management/nrt.cu,sha256=1hzbAKyqh9783UVdVT67ZxfvJyl_Ojt8e0AbHUC86ss,4818
83
83
  numba_cuda/numba/cuda/memory_management/nrt.cuh,sha256=p2GQ-l-EfCoO0sBTyKXhIY3hxGWbPhEJcR-mLLT_V3M,2173
84
- numba_cuda/numba/cuda/memory_management/nrt.py,sha256=6yXKBUvjIw_9BJ48iDIuckREaQVskzQAXm7uIRGFVuc,10039
84
+ numba_cuda/numba/cuda/memory_management/nrt.py,sha256=D5Dh72YYRm22qzINCpQ86CS6hK6U7VcgBW-JSbDMzuM,9864
85
85
  numba_cuda/numba/cuda/simulator/__init__.py,sha256=ONoWJ3SwE53di0p-lFRH7NOZea2jEUWyn9sDpkOVjCw,2040
86
86
  numba_cuda/numba/cuda/simulator/api.py,sha256=hFSFPIrg-aUd-MHg2GCSosFJiL8x2XRiQaqolfTGA3A,3551
87
87
  numba_cuda/numba/cuda/simulator/bf16.py,sha256=1ZWkY4Adv8dY46YyorGKGQj3KEBqeet6rsyM8jwfAb4,16
@@ -114,9 +114,9 @@ numba_cuda/numba/cuda/tests/cudadrv/test_context_stack.py,sha256=m7q1bEsH3rJD2gn
114
114
  numba_cuda/numba/cuda/tests/cudadrv/test_cuda_array_slicing.py,sha256=QhBQr3ZzrBMT-r132RR99UCKwrR-RwZk98RxVv5os0w,13933
115
115
  numba_cuda/numba/cuda/tests/cudadrv/test_cuda_auto_context.py,sha256=xprxASXl0g6QrOujoj07YDw3lIwu0SQbk1lGQPJHlRc,564
116
116
  numba_cuda/numba/cuda/tests/cudadrv/test_cuda_devicerecord.py,sha256=JkMbKFa3CBSMSQaSWzOqJU7DE5YlwJLux6OLAmvnSJo,5654
117
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_driver.py,sha256=bn9OBNmNq5WTgv5LXQTyi-3V3auKbIBNoC-vNfzeX9I,7536
117
+ numba_cuda/numba/cuda/tests/cudadrv/test_cuda_driver.py,sha256=Q8hfUK8xhmt1bUYpzVy5Knm_14QAWdt3zxZkXJ6RT-0,7409
118
118
  numba_cuda/numba/cuda/tests/cudadrv/test_cuda_libraries.py,sha256=KWGON5OSb5Vp74QFDIiupK6ytMwwwDfbYqpENAB4lGE,801
119
- numba_cuda/numba/cuda/tests/cudadrv/test_cuda_memory.py,sha256=nN1pk7CEm4j8A6XYlDpIWkpFOSO7IGz-7rwa0fFnerY,6485
119
+ numba_cuda/numba/cuda/tests/cudadrv/test_cuda_memory.py,sha256=_ysNhgEnvr18miDhzFWbFoQdrPRl6wHPjXAH4sGTOPo,6377
120
120
  numba_cuda/numba/cuda/tests/cudadrv/test_cuda_ndarray.py,sha256=MzKXO2RLCHA_0XU29JfjaLrmKTwwu7PA3cspTfQgCPM,21699
121
121
  numba_cuda/numba/cuda/tests/cudadrv/test_deallocations.py,sha256=VKYRuIOPdEWkI-6E6-pRCNC1U4-Qxi0d-jX_q_7x1dI,8420
122
122
  numba_cuda/numba/cuda/tests/cudadrv/test_detect.py,sha256=DUYZeNlDgL1mQN1xHDYzTRfc-zetuikcULyULcRaC1A,2657
@@ -126,11 +126,11 @@ numba_cuda/numba/cuda/tests/cudadrv/test_host_alloc.py,sha256=ciy4dAK6-qrf1f8X_x
126
126
  numba_cuda/numba/cuda/tests/cudadrv/test_init.py,sha256=mRcGOJWTUpZ533EWq4Tbp3D_aHFFcVS6c_iZqhId7I0,4494
127
127
  numba_cuda/numba/cuda/tests/cudadrv/test_inline_ptx.py,sha256=B_fYsBUpd9SxYSOmuWuSFbb6JAiA90HhiVeTSuYVb8c,1280
128
128
  numba_cuda/numba/cuda/tests/cudadrv/test_is_fp16.py,sha256=0KPe4E9wOZsSV_0QI0LmjUeMTjWpYT8BXExUUsmUCDI,394
129
- numba_cuda/numba/cuda/tests/cudadrv/test_linker.py,sha256=ymv2ujRLLIIURikNEdC0SshJFwXhIx9j462va_QvPTw,10133
129
+ numba_cuda/numba/cuda/tests/cudadrv/test_linker.py,sha256=0jU-kakHDIWEmwkyorwqt89-E9Mq8Um1DIxc32_jePE,10059
130
130
  numba_cuda/numba/cuda/tests/cudadrv/test_managed_alloc.py,sha256=2tkf766GjIta_wL5NGlMIqmrDMFN2rZmnP_c9A8cWA8,5084
131
- numba_cuda/numba/cuda/tests/cudadrv/test_module_callbacks.py,sha256=176Ma2ZVLnc4w4bfYwbF1eeRq3x3rbOvDieRJLSuNpI,8413
131
+ numba_cuda/numba/cuda/tests/cudadrv/test_module_callbacks.py,sha256=qZj2KfiCJ9mPMwJ5Yhvdyx1gfoi8qnp8dERKTSylsrM,8424
132
132
  numba_cuda/numba/cuda/tests/cudadrv/test_mvc.py,sha256=9MLFEXn7DnLkuuXK_qjilA1jxQwC-AeSBOcRYzZogRY,1513
133
- numba_cuda/numba/cuda/tests/cudadrv/test_nvjitlink.py,sha256=2BpJ-m3Ue9ZN-NNVkVgPyPyWsffADj_eCtYdiLVJ528,11551
133
+ numba_cuda/numba/cuda/tests/cudadrv/test_nvjitlink.py,sha256=HF1rWokC5FKZtnnq_1uUHon4hEWK0BCfYYKSC2pdoHw,11477
134
134
  numba_cuda/numba/cuda/tests/cudadrv/test_nvvm_driver.py,sha256=71-Hlng6-HyhfK3i3ITUzHQIHyL3hCv1ubkkJOGt0R4,7400
135
135
  numba_cuda/numba/cuda/tests/cudadrv/test_pinned.py,sha256=PGuv4bt9qiIGlkLhyQCOXFIf1SK5Nj-RjcpWqeO1TMM,943
136
136
  numba_cuda/numba/cuda/tests/cudadrv/test_profiler.py,sha256=xbSFmvqOIcWY-TI9p1MDcGwE-24iaK4j-_UenMvTnR4,508
@@ -165,7 +165,7 @@ numba_cuda/numba/cuda/tests/cudapy/test_complex_kernel.py,sha256=KIuXQ0ihgQQXM-e
165
165
  numba_cuda/numba/cuda/tests/cudapy/test_const_string.py,sha256=li1UsV5vc2M01cJ7k6_526VPtuAOAKr8e7kb1CDUXi4,4323
166
166
  numba_cuda/numba/cuda/tests/cudapy/test_constmem.py,sha256=ZWmyKvFokRMjqyXjVpZVOnR6LR694GWcbUn2jVEQV14,5170
167
167
  numba_cuda/numba/cuda/tests/cudapy/test_cooperative_groups.py,sha256=3OkjhcjPp_P3Pnc1zbteGpAGpoN07cG8Xtdnunx5yWA,5973
168
- numba_cuda/numba/cuda/tests/cudapy/test_cuda_array_interface.py,sha256=RXCNHAZM35sbUf3Gi-x2E8-a6BmhFb2rhQkBOeiS_fo,15757
168
+ numba_cuda/numba/cuda/tests/cudapy/test_cuda_array_interface.py,sha256=k6pkt63aipYZd2ekYOuXw_GnyGWImcKwIHOkh5PEsr4,15713
169
169
  numba_cuda/numba/cuda/tests/cudapy/test_cuda_jit_no_types.py,sha256=8prL2FTiaajW-UHSL9al-nBniygOfpdAOT_Dkej4PWI,2138
170
170
  numba_cuda/numba/cuda/tests/cudapy/test_datetime.py,sha256=MnOeDWMz-rL3-07FsswM06Laxmm0KjTmTwhrP3rmchQ,3526
171
171
  numba_cuda/numba/cuda/tests/cudapy/test_debug.py,sha256=1P369s02AvGu7fSIEe_YxSgh3c6S72Aw1gRgmepDbQY,3383
@@ -274,8 +274,8 @@ numba_cuda/numba/cuda/tests/test_binary_generation/generate_raw_ltoir.py,sha256=
274
274
  numba_cuda/numba/cuda/tests/test_binary_generation/nrt_extern.cu,sha256=T9ubst3fFUK7EXyXXMi73wAban3VFFQ986cY5OcKfvI,157
275
275
  numba_cuda/numba/cuda/tests/test_binary_generation/test_device_functions.cu,sha256=IB5t-dVhrKVoue3AbUx3yVMxPG0hBF_yZbzb4642sf0,538
276
276
  numba_cuda/numba/cuda/tests/test_binary_generation/undefined_extern.cu,sha256=q3oxZziT8KDodeNcEBiWULH6vMrHCWucmJmtrg8C0d0,128
277
- numba_cuda-0.14.0.dist-info/licenses/LICENSE,sha256=eHeYE-XjASmwbxfsP5AImgfzRwZurZGqH1f6OFwJ4io,1326
278
- numba_cuda-0.14.0.dist-info/METADATA,sha256=eq4qxmqY97oT9f9_0tBT4EFxrMBsD1Bvj5Ix3he40HM,2799
279
- numba_cuda-0.14.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
280
- numba_cuda-0.14.0.dist-info/top_level.txt,sha256=C50SsH-8tXDmt7I0Y3nlJYhS5s6pqWflCPdobe9vx2M,11
281
- numba_cuda-0.14.0.dist-info/RECORD,,
277
+ numba_cuda-0.15.1.dist-info/licenses/LICENSE,sha256=eHeYE-XjASmwbxfsP5AImgfzRwZurZGqH1f6OFwJ4io,1326
278
+ numba_cuda-0.15.1.dist-info/METADATA,sha256=tIHuoF1LHBDRmxYFwmcx6Lgv3Te5Pl539NwfRn21yLk,2907
279
+ numba_cuda-0.15.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
280
+ numba_cuda-0.15.1.dist-info/top_level.txt,sha256=C50SsH-8tXDmt7I0Y3nlJYhS5s6pqWflCPdobe9vx2M,11
281
+ numba_cuda-0.15.1.dist-info/RECORD,,