numba-cuda 0.0.0__py3-none-any.whl → 0.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (233) hide show
  1. _numba_cuda_redirector.pth +1 -0
  2. _numba_cuda_redirector.py +74 -0
  3. numba_cuda/VERSION +1 -0
  4. numba_cuda/__init__.py +5 -0
  5. numba_cuda/_version.py +19 -0
  6. numba_cuda/numba/cuda/__init__.py +22 -0
  7. numba_cuda/numba/cuda/api.py +526 -0
  8. numba_cuda/numba/cuda/api_util.py +30 -0
  9. numba_cuda/numba/cuda/args.py +77 -0
  10. numba_cuda/numba/cuda/cg.py +62 -0
  11. numba_cuda/numba/cuda/codegen.py +378 -0
  12. numba_cuda/numba/cuda/compiler.py +422 -0
  13. numba_cuda/numba/cuda/cpp_function_wrappers.cu +47 -0
  14. numba_cuda/numba/cuda/cuda_fp16.h +3631 -0
  15. numba_cuda/numba/cuda/cuda_fp16.hpp +2465 -0
  16. numba_cuda/numba/cuda/cuda_paths.py +258 -0
  17. numba_cuda/numba/cuda/cudadecl.py +806 -0
  18. numba_cuda/numba/cuda/cudadrv/__init__.py +9 -0
  19. numba_cuda/numba/cuda/cudadrv/devicearray.py +904 -0
  20. numba_cuda/numba/cuda/cudadrv/devices.py +248 -0
  21. numba_cuda/numba/cuda/cudadrv/driver.py +3201 -0
  22. numba_cuda/numba/cuda/cudadrv/drvapi.py +398 -0
  23. numba_cuda/numba/cuda/cudadrv/dummyarray.py +452 -0
  24. numba_cuda/numba/cuda/cudadrv/enums.py +607 -0
  25. numba_cuda/numba/cuda/cudadrv/error.py +36 -0
  26. numba_cuda/numba/cuda/cudadrv/libs.py +176 -0
  27. numba_cuda/numba/cuda/cudadrv/ndarray.py +20 -0
  28. numba_cuda/numba/cuda/cudadrv/nvrtc.py +260 -0
  29. numba_cuda/numba/cuda/cudadrv/nvvm.py +707 -0
  30. numba_cuda/numba/cuda/cudadrv/rtapi.py +10 -0
  31. numba_cuda/numba/cuda/cudadrv/runtime.py +142 -0
  32. numba_cuda/numba/cuda/cudaimpl.py +1055 -0
  33. numba_cuda/numba/cuda/cudamath.py +140 -0
  34. numba_cuda/numba/cuda/decorators.py +189 -0
  35. numba_cuda/numba/cuda/descriptor.py +33 -0
  36. numba_cuda/numba/cuda/device_init.py +89 -0
  37. numba_cuda/numba/cuda/deviceufunc.py +908 -0
  38. numba_cuda/numba/cuda/dispatcher.py +1057 -0
  39. numba_cuda/numba/cuda/errors.py +59 -0
  40. numba_cuda/numba/cuda/extending.py +7 -0
  41. numba_cuda/numba/cuda/initialize.py +13 -0
  42. numba_cuda/numba/cuda/intrinsic_wrapper.py +77 -0
  43. numba_cuda/numba/cuda/intrinsics.py +198 -0
  44. numba_cuda/numba/cuda/kernels/__init__.py +0 -0
  45. numba_cuda/numba/cuda/kernels/reduction.py +262 -0
  46. numba_cuda/numba/cuda/kernels/transpose.py +65 -0
  47. numba_cuda/numba/cuda/libdevice.py +3382 -0
  48. numba_cuda/numba/cuda/libdevicedecl.py +17 -0
  49. numba_cuda/numba/cuda/libdevicefuncs.py +1057 -0
  50. numba_cuda/numba/cuda/libdeviceimpl.py +83 -0
  51. numba_cuda/numba/cuda/mathimpl.py +448 -0
  52. numba_cuda/numba/cuda/models.py +48 -0
  53. numba_cuda/numba/cuda/nvvmutils.py +235 -0
  54. numba_cuda/numba/cuda/printimpl.py +86 -0
  55. numba_cuda/numba/cuda/random.py +292 -0
  56. numba_cuda/numba/cuda/simulator/__init__.py +38 -0
  57. numba_cuda/numba/cuda/simulator/api.py +110 -0
  58. numba_cuda/numba/cuda/simulator/compiler.py +9 -0
  59. numba_cuda/numba/cuda/simulator/cudadrv/__init__.py +2 -0
  60. numba_cuda/numba/cuda/simulator/cudadrv/devicearray.py +432 -0
  61. numba_cuda/numba/cuda/simulator/cudadrv/devices.py +117 -0
  62. numba_cuda/numba/cuda/simulator/cudadrv/driver.py +62 -0
  63. numba_cuda/numba/cuda/simulator/cudadrv/drvapi.py +4 -0
  64. numba_cuda/numba/cuda/simulator/cudadrv/dummyarray.py +4 -0
  65. numba_cuda/numba/cuda/simulator/cudadrv/error.py +6 -0
  66. numba_cuda/numba/cuda/simulator/cudadrv/libs.py +2 -0
  67. numba_cuda/numba/cuda/simulator/cudadrv/nvvm.py +29 -0
  68. numba_cuda/numba/cuda/simulator/cudadrv/runtime.py +19 -0
  69. numba_cuda/numba/cuda/simulator/kernel.py +308 -0
  70. numba_cuda/numba/cuda/simulator/kernelapi.py +495 -0
  71. numba_cuda/numba/cuda/simulator/reduction.py +15 -0
  72. numba_cuda/numba/cuda/simulator/vector_types.py +58 -0
  73. numba_cuda/numba/cuda/simulator_init.py +17 -0
  74. numba_cuda/numba/cuda/stubs.py +902 -0
  75. numba_cuda/numba/cuda/target.py +440 -0
  76. numba_cuda/numba/cuda/testing.py +202 -0
  77. numba_cuda/numba/cuda/tests/__init__.py +58 -0
  78. numba_cuda/numba/cuda/tests/cudadrv/__init__.py +8 -0
  79. numba_cuda/numba/cuda/tests/cudadrv/test_array_attr.py +145 -0
  80. numba_cuda/numba/cuda/tests/cudadrv/test_context_stack.py +145 -0
  81. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_array_slicing.py +375 -0
  82. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_auto_context.py +21 -0
  83. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_devicerecord.py +179 -0
  84. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_driver.py +235 -0
  85. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_libraries.py +22 -0
  86. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_memory.py +193 -0
  87. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_ndarray.py +547 -0
  88. numba_cuda/numba/cuda/tests/cudadrv/test_deallocations.py +249 -0
  89. numba_cuda/numba/cuda/tests/cudadrv/test_detect.py +81 -0
  90. numba_cuda/numba/cuda/tests/cudadrv/test_emm_plugins.py +192 -0
  91. numba_cuda/numba/cuda/tests/cudadrv/test_events.py +38 -0
  92. numba_cuda/numba/cuda/tests/cudadrv/test_host_alloc.py +65 -0
  93. numba_cuda/numba/cuda/tests/cudadrv/test_init.py +139 -0
  94. numba_cuda/numba/cuda/tests/cudadrv/test_inline_ptx.py +37 -0
  95. numba_cuda/numba/cuda/tests/cudadrv/test_is_fp16.py +12 -0
  96. numba_cuda/numba/cuda/tests/cudadrv/test_linker.py +317 -0
  97. numba_cuda/numba/cuda/tests/cudadrv/test_managed_alloc.py +127 -0
  98. numba_cuda/numba/cuda/tests/cudadrv/test_mvc.py +54 -0
  99. numba_cuda/numba/cuda/tests/cudadrv/test_nvvm_driver.py +199 -0
  100. numba_cuda/numba/cuda/tests/cudadrv/test_pinned.py +37 -0
  101. numba_cuda/numba/cuda/tests/cudadrv/test_profiler.py +20 -0
  102. numba_cuda/numba/cuda/tests/cudadrv/test_ptds.py +149 -0
  103. numba_cuda/numba/cuda/tests/cudadrv/test_reset_device.py +36 -0
  104. numba_cuda/numba/cuda/tests/cudadrv/test_runtime.py +85 -0
  105. numba_cuda/numba/cuda/tests/cudadrv/test_select_device.py +41 -0
  106. numba_cuda/numba/cuda/tests/cudadrv/test_streams.py +122 -0
  107. numba_cuda/numba/cuda/tests/cudapy/__init__.py +8 -0
  108. numba_cuda/numba/cuda/tests/cudapy/cache_usecases.py +234 -0
  109. numba_cuda/numba/cuda/tests/cudapy/cache_with_cpu_usecases.py +41 -0
  110. numba_cuda/numba/cuda/tests/cudapy/extensions_usecases.py +58 -0
  111. numba_cuda/numba/cuda/tests/cudapy/jitlink.ptx +30 -0
  112. numba_cuda/numba/cuda/tests/cudapy/recursion_usecases.py +100 -0
  113. numba_cuda/numba/cuda/tests/cudapy/test_alignment.py +42 -0
  114. numba_cuda/numba/cuda/tests/cudapy/test_array.py +260 -0
  115. numba_cuda/numba/cuda/tests/cudapy/test_array_args.py +201 -0
  116. numba_cuda/numba/cuda/tests/cudapy/test_array_methods.py +35 -0
  117. numba_cuda/numba/cuda/tests/cudapy/test_atomics.py +1620 -0
  118. numba_cuda/numba/cuda/tests/cudapy/test_blackscholes.py +120 -0
  119. numba_cuda/numba/cuda/tests/cudapy/test_boolean.py +24 -0
  120. numba_cuda/numba/cuda/tests/cudapy/test_caching.py +545 -0
  121. numba_cuda/numba/cuda/tests/cudapy/test_casting.py +257 -0
  122. numba_cuda/numba/cuda/tests/cudapy/test_cffi.py +33 -0
  123. numba_cuda/numba/cuda/tests/cudapy/test_compiler.py +276 -0
  124. numba_cuda/numba/cuda/tests/cudapy/test_complex.py +296 -0
  125. numba_cuda/numba/cuda/tests/cudapy/test_complex_kernel.py +20 -0
  126. numba_cuda/numba/cuda/tests/cudapy/test_const_string.py +129 -0
  127. numba_cuda/numba/cuda/tests/cudapy/test_constmem.py +176 -0
  128. numba_cuda/numba/cuda/tests/cudapy/test_cooperative_groups.py +147 -0
  129. numba_cuda/numba/cuda/tests/cudapy/test_cuda_array_interface.py +435 -0
  130. numba_cuda/numba/cuda/tests/cudapy/test_cuda_jit_no_types.py +90 -0
  131. numba_cuda/numba/cuda/tests/cudapy/test_datetime.py +94 -0
  132. numba_cuda/numba/cuda/tests/cudapy/test_debug.py +101 -0
  133. numba_cuda/numba/cuda/tests/cudapy/test_debuginfo.py +221 -0
  134. numba_cuda/numba/cuda/tests/cudapy/test_device_func.py +222 -0
  135. numba_cuda/numba/cuda/tests/cudapy/test_dispatcher.py +700 -0
  136. numba_cuda/numba/cuda/tests/cudapy/test_enums.py +121 -0
  137. numba_cuda/numba/cuda/tests/cudapy/test_errors.py +79 -0
  138. numba_cuda/numba/cuda/tests/cudapy/test_exception.py +174 -0
  139. numba_cuda/numba/cuda/tests/cudapy/test_extending.py +155 -0
  140. numba_cuda/numba/cuda/tests/cudapy/test_fastmath.py +244 -0
  141. numba_cuda/numba/cuda/tests/cudapy/test_forall.py +52 -0
  142. numba_cuda/numba/cuda/tests/cudapy/test_freevar.py +29 -0
  143. numba_cuda/numba/cuda/tests/cudapy/test_frexp_ldexp.py +66 -0
  144. numba_cuda/numba/cuda/tests/cudapy/test_globals.py +60 -0
  145. numba_cuda/numba/cuda/tests/cudapy/test_gufunc.py +456 -0
  146. numba_cuda/numba/cuda/tests/cudapy/test_gufunc_scalar.py +159 -0
  147. numba_cuda/numba/cuda/tests/cudapy/test_gufunc_scheduling.py +95 -0
  148. numba_cuda/numba/cuda/tests/cudapy/test_idiv.py +37 -0
  149. numba_cuda/numba/cuda/tests/cudapy/test_inspect.py +165 -0
  150. numba_cuda/numba/cuda/tests/cudapy/test_intrinsics.py +1106 -0
  151. numba_cuda/numba/cuda/tests/cudapy/test_ipc.py +318 -0
  152. numba_cuda/numba/cuda/tests/cudapy/test_iterators.py +99 -0
  153. numba_cuda/numba/cuda/tests/cudapy/test_lang.py +64 -0
  154. numba_cuda/numba/cuda/tests/cudapy/test_laplace.py +119 -0
  155. numba_cuda/numba/cuda/tests/cudapy/test_libdevice.py +187 -0
  156. numba_cuda/numba/cuda/tests/cudapy/test_lineinfo.py +199 -0
  157. numba_cuda/numba/cuda/tests/cudapy/test_localmem.py +164 -0
  158. numba_cuda/numba/cuda/tests/cudapy/test_mandel.py +37 -0
  159. numba_cuda/numba/cuda/tests/cudapy/test_math.py +786 -0
  160. numba_cuda/numba/cuda/tests/cudapy/test_matmul.py +74 -0
  161. numba_cuda/numba/cuda/tests/cudapy/test_minmax.py +113 -0
  162. numba_cuda/numba/cuda/tests/cudapy/test_montecarlo.py +22 -0
  163. numba_cuda/numba/cuda/tests/cudapy/test_multigpu.py +140 -0
  164. numba_cuda/numba/cuda/tests/cudapy/test_multiprocessing.py +46 -0
  165. numba_cuda/numba/cuda/tests/cudapy/test_multithreads.py +101 -0
  166. numba_cuda/numba/cuda/tests/cudapy/test_nondet.py +49 -0
  167. numba_cuda/numba/cuda/tests/cudapy/test_operator.py +401 -0
  168. numba_cuda/numba/cuda/tests/cudapy/test_optimization.py +86 -0
  169. numba_cuda/numba/cuda/tests/cudapy/test_overload.py +335 -0
  170. numba_cuda/numba/cuda/tests/cudapy/test_powi.py +124 -0
  171. numba_cuda/numba/cuda/tests/cudapy/test_print.py +128 -0
  172. numba_cuda/numba/cuda/tests/cudapy/test_py2_div_issue.py +33 -0
  173. numba_cuda/numba/cuda/tests/cudapy/test_random.py +104 -0
  174. numba_cuda/numba/cuda/tests/cudapy/test_record_dtype.py +610 -0
  175. numba_cuda/numba/cuda/tests/cudapy/test_recursion.py +125 -0
  176. numba_cuda/numba/cuda/tests/cudapy/test_reduction.py +76 -0
  177. numba_cuda/numba/cuda/tests/cudapy/test_retrieve_autoconverted_arrays.py +83 -0
  178. numba_cuda/numba/cuda/tests/cudapy/test_serialize.py +85 -0
  179. numba_cuda/numba/cuda/tests/cudapy/test_slicing.py +37 -0
  180. numba_cuda/numba/cuda/tests/cudapy/test_sm.py +444 -0
  181. numba_cuda/numba/cuda/tests/cudapy/test_sm_creation.py +205 -0
  182. numba_cuda/numba/cuda/tests/cudapy/test_sync.py +271 -0
  183. numba_cuda/numba/cuda/tests/cudapy/test_transpose.py +80 -0
  184. numba_cuda/numba/cuda/tests/cudapy/test_ufuncs.py +277 -0
  185. numba_cuda/numba/cuda/tests/cudapy/test_userexc.py +47 -0
  186. numba_cuda/numba/cuda/tests/cudapy/test_vector_type.py +307 -0
  187. numba_cuda/numba/cuda/tests/cudapy/test_vectorize.py +283 -0
  188. numba_cuda/numba/cuda/tests/cudapy/test_vectorize_complex.py +20 -0
  189. numba_cuda/numba/cuda/tests/cudapy/test_vectorize_decor.py +69 -0
  190. numba_cuda/numba/cuda/tests/cudapy/test_vectorize_device.py +36 -0
  191. numba_cuda/numba/cuda/tests/cudapy/test_vectorize_scalar_arg.py +37 -0
  192. numba_cuda/numba/cuda/tests/cudapy/test_warning.py +139 -0
  193. numba_cuda/numba/cuda/tests/cudapy/test_warp_ops.py +276 -0
  194. numba_cuda/numba/cuda/tests/cudasim/__init__.py +6 -0
  195. numba_cuda/numba/cuda/tests/cudasim/support.py +6 -0
  196. numba_cuda/numba/cuda/tests/cudasim/test_cudasim_issues.py +102 -0
  197. numba_cuda/numba/cuda/tests/data/__init__.py +0 -0
  198. numba_cuda/numba/cuda/tests/data/cuda_include.cu +5 -0
  199. numba_cuda/numba/cuda/tests/data/error.cu +7 -0
  200. numba_cuda/numba/cuda/tests/data/jitlink.cu +23 -0
  201. numba_cuda/numba/cuda/tests/data/jitlink.ptx +51 -0
  202. numba_cuda/numba/cuda/tests/data/warn.cu +7 -0
  203. numba_cuda/numba/cuda/tests/doc_examples/__init__.py +6 -0
  204. numba_cuda/numba/cuda/tests/doc_examples/ffi/__init__.py +0 -0
  205. numba_cuda/numba/cuda/tests/doc_examples/ffi/functions.cu +49 -0
  206. numba_cuda/numba/cuda/tests/doc_examples/test_cg.py +77 -0
  207. numba_cuda/numba/cuda/tests/doc_examples/test_cpu_gpu_compat.py +76 -0
  208. numba_cuda/numba/cuda/tests/doc_examples/test_ffi.py +82 -0
  209. numba_cuda/numba/cuda/tests/doc_examples/test_laplace.py +155 -0
  210. numba_cuda/numba/cuda/tests/doc_examples/test_matmul.py +173 -0
  211. numba_cuda/numba/cuda/tests/doc_examples/test_montecarlo.py +109 -0
  212. numba_cuda/numba/cuda/tests/doc_examples/test_random.py +59 -0
  213. numba_cuda/numba/cuda/tests/doc_examples/test_reduction.py +76 -0
  214. numba_cuda/numba/cuda/tests/doc_examples/test_sessionize.py +130 -0
  215. numba_cuda/numba/cuda/tests/doc_examples/test_ufunc.py +50 -0
  216. numba_cuda/numba/cuda/tests/doc_examples/test_vecadd.py +73 -0
  217. numba_cuda/numba/cuda/tests/nocuda/__init__.py +8 -0
  218. numba_cuda/numba/cuda/tests/nocuda/test_dummyarray.py +359 -0
  219. numba_cuda/numba/cuda/tests/nocuda/test_function_resolution.py +36 -0
  220. numba_cuda/numba/cuda/tests/nocuda/test_import.py +49 -0
  221. numba_cuda/numba/cuda/tests/nocuda/test_library_lookup.py +238 -0
  222. numba_cuda/numba/cuda/tests/nocuda/test_nvvm.py +54 -0
  223. numba_cuda/numba/cuda/types.py +37 -0
  224. numba_cuda/numba/cuda/ufuncs.py +662 -0
  225. numba_cuda/numba/cuda/vector_types.py +209 -0
  226. numba_cuda/numba/cuda/vectorizers.py +252 -0
  227. numba_cuda-0.0.12.dist-info/LICENSE +25 -0
  228. numba_cuda-0.0.12.dist-info/METADATA +68 -0
  229. numba_cuda-0.0.12.dist-info/RECORD +231 -0
  230. {numba_cuda-0.0.0.dist-info → numba_cuda-0.0.12.dist-info}/WHEEL +1 -1
  231. numba_cuda-0.0.0.dist-info/METADATA +0 -6
  232. numba_cuda-0.0.0.dist-info/RECORD +0 -5
  233. {numba_cuda-0.0.0.dist-info → numba_cuda-0.0.12.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,110 @@
1
+ '''
2
+ Contains CUDA API functions
3
+ '''
4
+
5
+ # Imports here bring together parts of the API from other modules, so some of
6
+ # them appear unused.
7
+ from contextlib import contextmanager
8
+
9
+ from .cudadrv.devices import require_context, reset, gpus # noqa: F401
10
+ from .kernel import FakeCUDAKernel
11
+ from numba.core.sigutils import is_signature
12
+ from warnings import warn
13
+ from ..args import In, Out, InOut # noqa: F401
14
+
15
+
16
+ def select_device(dev=0):
17
+ assert dev == 0, 'Only a single device supported by the simulator'
18
+
19
+
20
+ def is_float16_supported():
21
+ return True
22
+
23
+
24
+ class stream(object):
25
+ '''
26
+ The stream API is supported in the simulator - however, all execution
27
+ occurs synchronously, so synchronization requires no operation.
28
+ '''
29
+ @contextmanager
30
+ def auto_synchronize(self):
31
+ yield
32
+
33
+ def synchronize(self):
34
+ pass
35
+
36
+
37
+ def synchronize():
38
+ pass
39
+
40
+
41
+ def close():
42
+ gpus.closed = True
43
+
44
+
45
+ def declare_device(*args, **kwargs):
46
+ pass
47
+
48
+
49
+ def detect():
50
+ print('Found 1 CUDA devices')
51
+ print('id %d %20s %40s' % (0, 'SIMULATOR', '[SUPPORTED]'))
52
+ print('%40s: 5.0' % 'compute capability')
53
+
54
+
55
+ def list_devices():
56
+ return gpus
57
+
58
+
59
+ # Events
60
+
61
+ class Event(object):
62
+ '''
63
+ The simulator supports the event API, but they do not record timing info,
64
+ and all simulation is synchronous. Execution time is not recorded.
65
+ '''
66
+ def record(self, stream=0):
67
+ pass
68
+
69
+ def wait(self, stream=0):
70
+ pass
71
+
72
+ def synchronize(self):
73
+ pass
74
+
75
+ def elapsed_time(self, event):
76
+ warn('Simulator timings are bogus')
77
+ return 0.0
78
+
79
+
80
+ event = Event
81
+
82
+
83
+ def jit(func_or_sig=None, device=False, debug=False, argtypes=None,
84
+ inline=False, restype=None, fastmath=False, link=None,
85
+ boundscheck=None, opt=True, cache=None
86
+ ):
87
+ # Here for API compatibility
88
+ if boundscheck:
89
+ raise NotImplementedError("bounds checking is not supported for CUDA")
90
+
91
+ if link is not None:
92
+ raise NotImplementedError('Cannot link PTX in the simulator')
93
+
94
+ # Check for first argument specifying types - in that case the
95
+ # decorator is not being passed a function
96
+ if (func_or_sig is None or is_signature(func_or_sig)
97
+ or isinstance(func_or_sig, list)):
98
+ def jitwrapper(fn):
99
+ return FakeCUDAKernel(fn,
100
+ device=device,
101
+ fastmath=fastmath,
102
+ debug=debug)
103
+ return jitwrapper
104
+ return FakeCUDAKernel(func_or_sig, device=device, debug=debug)
105
+
106
+
107
+ @contextmanager
108
+ def defer_cleanup():
109
+ # No effect for simulator
110
+ yield
@@ -0,0 +1,9 @@
1
+ '''
2
+ The compiler is not implemented in the simulator. This module provides a stub
3
+ to allow tests to import successfully.
4
+ '''
5
+
6
+ compile = None
7
+ compile_for_current_device = None
8
+ compile_ptx = None
9
+ compile_ptx_for_current_device = None
@@ -0,0 +1,2 @@
1
+ from numba.cuda.simulator.cudadrv import (devicearray, devices, driver, drvapi,
2
+ error, nvvm)
@@ -0,0 +1,432 @@
1
+ '''
2
+ The Device Array API is not implemented in the simulator. This module provides
3
+ stubs to allow tests to import correctly.
4
+ '''
5
+ from contextlib import contextmanager
6
+ from numba.np.numpy_support import numpy_version
7
+
8
+ import numpy as np
9
+
10
+
11
+ DeviceRecord = None
12
+ from_record_like = None
13
+
14
+
15
+ errmsg_contiguous_buffer = ("Array contains non-contiguous buffer and cannot "
16
+ "be transferred as a single memory region. Please "
17
+ "ensure contiguous buffer with numpy "
18
+ ".ascontiguousarray()")
19
+
20
+
21
+ class FakeShape(tuple):
22
+ '''
23
+ The FakeShape class is used to provide a shape which does not allow negative
24
+ indexing, similar to the shape in CUDA Python. (Numpy shape arrays allow
25
+ negative indexing)
26
+ '''
27
+
28
+ def __getitem__(self, k):
29
+ if isinstance(k, int) and k < 0:
30
+ raise IndexError('tuple index out of range')
31
+ return super(FakeShape, self).__getitem__(k)
32
+
33
+
34
+ class FakeWithinKernelCUDAArray(object):
35
+ '''
36
+ Created to emulate the behavior of arrays within kernels, where either
37
+ array.item or array['item'] is valid (that is, give all structured
38
+ arrays `numpy.recarray`-like semantics). This behaviour does not follow
39
+ the semantics of Python and NumPy with non-jitted code, and will be
40
+ deprecated and removed.
41
+ '''
42
+
43
+ def __init__(self, item):
44
+ assert isinstance(item, FakeCUDAArray)
45
+ self.__dict__['_item'] = item
46
+
47
+ def __wrap_if_fake(self, item):
48
+ if isinstance(item, FakeCUDAArray):
49
+ return FakeWithinKernelCUDAArray(item)
50
+ else:
51
+ return item
52
+
53
+ def __getattr__(self, attrname):
54
+ if attrname in dir(self._item._ary): # For e.g. array size.
55
+ return self.__wrap_if_fake(getattr(self._item._ary, attrname))
56
+ else:
57
+ return self.__wrap_if_fake(self._item.__getitem__(attrname))
58
+
59
+ def __setattr__(self, nm, val):
60
+ self._item.__setitem__(nm, val)
61
+
62
+ def __getitem__(self, idx):
63
+ return self.__wrap_if_fake(self._item.__getitem__(idx))
64
+
65
+ def __setitem__(self, idx, val):
66
+ self._item.__setitem__(idx, val)
67
+
68
+ def __len__(self):
69
+ return len(self._item)
70
+
71
+ def __array_ufunc__(self, ufunc, method, *args, **kwargs):
72
+ # ufuncs can only be called directly on instances of numpy.ndarray (not
73
+ # things that implement its interfaces, like the FakeCUDAArray or
74
+ # FakeWithinKernelCUDAArray). For other objects, __array_ufunc__ is
75
+ # called when they are arguments to ufuncs, to provide an opportunity
76
+ # to somehow implement the ufunc. Since the FakeWithinKernelCUDAArray
77
+ # is just a thin wrapper over an ndarray, we can implement all ufuncs
78
+ # by passing the underlying ndarrays to a call to the intended ufunc.
79
+ call = getattr(ufunc, method)
80
+
81
+ def convert_fakes(obj):
82
+ if isinstance(obj, FakeWithinKernelCUDAArray):
83
+ obj = obj._item._ary
84
+
85
+ return obj
86
+
87
+ out = kwargs.get('out')
88
+ if out:
89
+ kwargs['out'] = tuple(convert_fakes(o) for o in out)
90
+ args = tuple(convert_fakes(a) for a in args)
91
+ return call(*args, **kwargs)
92
+
93
+
94
+ class FakeCUDAArray(object):
95
+ '''
96
+ Implements the interface of a DeviceArray/DeviceRecord, but mostly just
97
+ wraps a NumPy array.
98
+ '''
99
+
100
+ __cuda_ndarray__ = True # There must be gpu_data attribute
101
+
102
+ def __init__(self, ary, stream=0):
103
+ self._ary = ary
104
+ self.stream = stream
105
+
106
+ @property
107
+ def alloc_size(self):
108
+ return self._ary.nbytes
109
+
110
+ @property
111
+ def nbytes(self):
112
+ # return nbytes -- FakeCUDAArray is a wrapper around NumPy
113
+ return self._ary.nbytes
114
+
115
+ def __getattr__(self, attrname):
116
+ try:
117
+ attr = getattr(self._ary, attrname)
118
+ return attr
119
+ except AttributeError as e:
120
+ msg = "Wrapped array has no attribute '%s'" % attrname
121
+ raise AttributeError(msg) from e
122
+
123
+ def bind(self, stream=0):
124
+ return FakeCUDAArray(self._ary, stream)
125
+
126
+ @property
127
+ def T(self):
128
+ return self.transpose()
129
+
130
+ def transpose(self, axes=None):
131
+ return FakeCUDAArray(np.transpose(self._ary, axes=axes))
132
+
133
+ def __getitem__(self, idx):
134
+ ret = self._ary.__getitem__(idx)
135
+ if type(ret) not in [np.ndarray, np.void]:
136
+ return ret
137
+ else:
138
+ return FakeCUDAArray(ret, stream=self.stream)
139
+
140
+ def __setitem__(self, idx, val):
141
+ return self._ary.__setitem__(idx, val)
142
+
143
+ def copy_to_host(self, ary=None, stream=0):
144
+ if ary is None:
145
+ ary = np.empty_like(self._ary)
146
+ else:
147
+ check_array_compatibility(self, ary)
148
+ np.copyto(ary, self._ary)
149
+ return ary
150
+
151
+ def copy_to_device(self, ary, stream=0):
152
+ '''
153
+ Copy from the provided array into this array.
154
+
155
+ This may be less forgiving than the CUDA Python implementation, which
156
+ will copy data up to the length of the smallest of the two arrays,
157
+ whereas this expects the size of the arrays to be equal.
158
+ '''
159
+ sentry_contiguous(self)
160
+ self_core, ary_core = array_core(self), array_core(ary)
161
+ if isinstance(ary, FakeCUDAArray):
162
+ sentry_contiguous(ary)
163
+ check_array_compatibility(self_core, ary_core)
164
+ else:
165
+ ary_core = np.array(
166
+ ary_core,
167
+ order='C' if self_core.flags['C_CONTIGUOUS'] else 'F',
168
+ subok=True,
169
+ copy=False if numpy_version < (2, 0) else None)
170
+ check_array_compatibility(self_core, ary_core)
171
+ np.copyto(self_core._ary, ary_core)
172
+
173
+ @property
174
+ def shape(self):
175
+ return FakeShape(self._ary.shape)
176
+
177
+ def ravel(self, *args, **kwargs):
178
+ return FakeCUDAArray(self._ary.ravel(*args, **kwargs))
179
+
180
+ def reshape(self, *args, **kwargs):
181
+ return FakeCUDAArray(self._ary.reshape(*args, **kwargs))
182
+
183
+ def view(self, *args, **kwargs):
184
+ return FakeCUDAArray(self._ary.view(*args, **kwargs))
185
+
186
+ def is_c_contiguous(self):
187
+ return self._ary.flags.c_contiguous
188
+
189
+ def is_f_contiguous(self):
190
+ return self._ary.flags.f_contiguous
191
+
192
+ def __str__(self):
193
+ return str(self._ary)
194
+
195
+ def __repr__(self):
196
+ return repr(self._ary)
197
+
198
+ def __len__(self):
199
+ return len(self._ary)
200
+
201
+ # TODO: Add inplace, bitwise, unary magic methods
202
+ # (or maybe inherit this class from numpy)?
203
+ def __eq__(self, other):
204
+ return FakeCUDAArray(self._ary == other)
205
+
206
+ def __ne__(self, other):
207
+ return FakeCUDAArray(self._ary != other)
208
+
209
+ def __lt__(self, other):
210
+ return FakeCUDAArray(self._ary < other)
211
+
212
+ def __le__(self, other):
213
+ return FakeCUDAArray(self._ary <= other)
214
+
215
+ def __gt__(self, other):
216
+ return FakeCUDAArray(self._ary > other)
217
+
218
+ def __ge__(self, other):
219
+ return FakeCUDAArray(self._ary >= other)
220
+
221
+ def __add__(self, other):
222
+ return FakeCUDAArray(self._ary + other)
223
+
224
+ def __sub__(self, other):
225
+ return FakeCUDAArray(self._ary - other)
226
+
227
+ def __mul__(self, other):
228
+ return FakeCUDAArray(self._ary * other)
229
+
230
+ def __floordiv__(self, other):
231
+ return FakeCUDAArray(self._ary // other)
232
+
233
+ def __truediv__(self, other):
234
+ return FakeCUDAArray(self._ary / other)
235
+
236
+ def __mod__(self, other):
237
+ return FakeCUDAArray(self._ary % other)
238
+
239
+ def __pow__(self, other):
240
+ return FakeCUDAArray(self._ary ** other)
241
+
242
+ def split(self, section, stream=0):
243
+ return [
244
+ FakeCUDAArray(a)
245
+ for a in np.split(self._ary, range(section, len(self), section))
246
+ ]
247
+
248
+
249
+ def array_core(ary):
250
+ """
251
+ Extract the repeated core of a broadcast array.
252
+
253
+ Broadcast arrays are by definition non-contiguous due to repeated
254
+ dimensions, i.e., dimensions with stride 0. In order to ascertain memory
255
+ contiguity and copy the underlying data from such arrays, we must create
256
+ a view without the repeated dimensions.
257
+
258
+ """
259
+ if not ary.strides or not ary.size:
260
+ return ary
261
+ core_index = []
262
+ for stride in ary.strides:
263
+ core_index.append(0 if stride == 0 else slice(None))
264
+ return ary[tuple(core_index)]
265
+
266
+
267
+ def is_contiguous(ary):
268
+ """
269
+ Returns True iff `ary` is C-style contiguous while ignoring
270
+ broadcasted and 1-sized dimensions.
271
+ As opposed to array_core(), it does not call require_context(),
272
+ which can be quite expensive.
273
+ """
274
+ size = ary.dtype.itemsize
275
+ for shape, stride in zip(reversed(ary.shape), reversed(ary.strides)):
276
+ if shape > 1 and stride != 0:
277
+ if size != stride:
278
+ return False
279
+ size *= shape
280
+ return True
281
+
282
+
283
+ def sentry_contiguous(ary):
284
+ core = array_core(ary)
285
+ if not core.flags['C_CONTIGUOUS'] and not core.flags['F_CONTIGUOUS']:
286
+ raise ValueError(errmsg_contiguous_buffer)
287
+
288
+
289
+ def check_array_compatibility(ary1, ary2):
290
+ ary1sq, ary2sq = ary1.squeeze(), ary2.squeeze()
291
+ if ary1.dtype != ary2.dtype:
292
+ raise TypeError('incompatible dtype: %s vs. %s' %
293
+ (ary1.dtype, ary2.dtype))
294
+ if ary1sq.shape != ary2sq.shape:
295
+ raise ValueError('incompatible shape: %s vs. %s' %
296
+ (ary1.shape, ary2.shape))
297
+ if ary1sq.strides != ary2sq.strides:
298
+ raise ValueError('incompatible strides: %s vs. %s' %
299
+ (ary1.strides, ary2.strides))
300
+
301
+
302
+ def to_device(ary, stream=0, copy=True, to=None):
303
+ ary = np.array(ary,
304
+ copy=False if numpy_version < (2, 0) else None,
305
+ subok=True)
306
+ sentry_contiguous(ary)
307
+ if to is None:
308
+ buffer_dtype = np.int64 if ary.dtype.char in 'Mm' else ary.dtype
309
+ return FakeCUDAArray(
310
+ np.ndarray(
311
+ buffer=np.copy(array_core(ary)).view(buffer_dtype),
312
+ dtype=ary.dtype,
313
+ shape=ary.shape,
314
+ strides=ary.strides,
315
+ ).view(type=type(ary)),
316
+ )
317
+ else:
318
+ to.copy_to_device(ary, stream=stream)
319
+
320
+
321
+ @contextmanager
322
+ def pinned(arg):
323
+ yield
324
+
325
+
326
+ def mapped_array(*args, **kwargs):
327
+ for unused_arg in ('portable', 'wc'):
328
+ if unused_arg in kwargs:
329
+ kwargs.pop(unused_arg)
330
+ return device_array(*args, **kwargs)
331
+
332
+
333
+ def pinned_array(shape, dtype=np.float64, strides=None, order='C'):
334
+ return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order)
335
+
336
+
337
+ def managed_array(shape, dtype=np.float64, strides=None, order='C'):
338
+ return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order)
339
+
340
+
341
+ def device_array(*args, **kwargs):
342
+ stream = kwargs.pop('stream') if 'stream' in kwargs else 0
343
+ return FakeCUDAArray(np.ndarray(*args, **kwargs), stream=stream)
344
+
345
+
346
+ def _contiguous_strides_like_array(ary):
347
+ """
348
+ Given an array, compute strides for a new contiguous array of the same
349
+ shape.
350
+ """
351
+ # Don't recompute strides if the default strides will be sufficient to
352
+ # create a contiguous array.
353
+ if ary.flags['C_CONTIGUOUS'] or ary.flags['F_CONTIGUOUS'] or ary.ndim <= 1:
354
+ return None
355
+
356
+ # Otherwise, we need to compute new strides using an algorithm adapted from
357
+ # NumPy v1.17.4's PyArray_NewLikeArrayWithShape in
358
+ # core/src/multiarray/ctors.c. We permute the strides in ascending order
359
+ # then compute the stride for the dimensions with the same permutation.
360
+
361
+ # Stride permutation. E.g. a stride array (4, -2, 12) becomes
362
+ # [(1, -2), (0, 4), (2, 12)]
363
+ strideperm = [ x for x in enumerate(ary.strides) ]
364
+ strideperm.sort(key=lambda x: x[1])
365
+
366
+ # Compute new strides using permutation
367
+ strides = [0] * len(ary.strides)
368
+ stride = ary.dtype.itemsize
369
+ for i_perm, _ in strideperm:
370
+ strides[i_perm] = stride
371
+ stride *= ary.shape[i_perm]
372
+ return tuple(strides)
373
+
374
+
375
+ def _order_like_array(ary):
376
+ if ary.flags['F_CONTIGUOUS'] and not ary.flags['C_CONTIGUOUS']:
377
+ return 'F'
378
+ else:
379
+ return 'C'
380
+
381
+
382
+ def device_array_like(ary, stream=0):
383
+ strides = _contiguous_strides_like_array(ary)
384
+ order = _order_like_array(ary)
385
+ return device_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
386
+ order=order)
387
+
388
+
389
+ def pinned_array_like(ary):
390
+ strides = _contiguous_strides_like_array(ary)
391
+ order = _order_like_array(ary)
392
+ return pinned_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
393
+ order=order)
394
+
395
+
396
+ def auto_device(ary, stream=0, copy=True):
397
+ if isinstance(ary, FakeCUDAArray):
398
+ return ary, False
399
+
400
+ if not isinstance(ary, np.void):
401
+ ary = np.array(
402
+ ary,
403
+ copy=False if numpy_version < (2, 0) else None,
404
+ subok=True)
405
+ return to_device(ary, stream, copy), True
406
+
407
+
408
+ def is_cuda_ndarray(obj):
409
+ "Check if an object is a CUDA ndarray"
410
+ return getattr(obj, '__cuda_ndarray__', False)
411
+
412
+
413
+ def verify_cuda_ndarray_interface(obj):
414
+ "Verify the CUDA ndarray interface for an obj"
415
+ require_cuda_ndarray(obj)
416
+
417
+ def requires_attr(attr, typ):
418
+ if not hasattr(obj, attr):
419
+ raise AttributeError(attr)
420
+ if not isinstance(getattr(obj, attr), typ):
421
+ raise AttributeError('%s must be of type %s' % (attr, typ))
422
+
423
+ requires_attr('shape', tuple)
424
+ requires_attr('strides', tuple)
425
+ requires_attr('dtype', np.dtype)
426
+ requires_attr('size', int)
427
+
428
+
429
+ def require_cuda_ndarray(obj):
430
+ "Raises ValueError is is_cuda_ndarray(obj) evaluates False"
431
+ if not is_cuda_ndarray(obj):
432
+ raise ValueError('require an cuda ndarray object')
@@ -0,0 +1,117 @@
1
+ import numpy as np
2
+ from collections import namedtuple
3
+
4
+ _MemoryInfo = namedtuple("_MemoryInfo", "free,total")
5
+
6
+ _SIMULATOR_CC = (5, 2)
7
+
8
+
9
+ class FakeCUDADevice:
10
+ def __init__(self):
11
+ self.uuid = 'GPU-00000000-0000-0000-0000-000000000000'
12
+
13
+ @property
14
+ def compute_capability(self):
15
+ return _SIMULATOR_CC
16
+
17
+
18
+ class FakeCUDAContext:
19
+ '''
20
+ This stub implements functionality only for simulating a single GPU
21
+ at the moment.
22
+ '''
23
+ def __init__(self, device_id):
24
+ self._device_id = device_id
25
+ self._device = FakeCUDADevice()
26
+
27
+ def __enter__(self):
28
+ pass
29
+
30
+ def __exit__(self, exc_type, exc_val, exc_tb):
31
+ pass
32
+
33
+ def __str__(self):
34
+ return "<Managed Device {self.id}>".format(self=self)
35
+
36
+ @property
37
+ def id(self):
38
+ return self._device_id
39
+
40
+ @property
41
+ def device(self):
42
+ return self._device
43
+
44
+ @property
45
+ def compute_capability(self):
46
+ return _SIMULATOR_CC
47
+
48
+ def reset(self):
49
+ pass
50
+
51
+ def get_memory_info(self):
52
+ """
53
+ Cross-platform free / total host memory is hard without external
54
+ dependencies, e.g. `psutil` - so return infinite memory to maintain API
55
+ type compatibility
56
+ """
57
+ return _MemoryInfo(float('inf'), float('inf'))
58
+
59
+ def memalloc(self, sz):
60
+ """
61
+ Allocates memory on the simulated device
62
+ At present, there is no division between simulated
63
+ host memory and simulated device memory.
64
+ """
65
+ return np.ndarray(sz, dtype='u1')
66
+
67
+ def memhostalloc(self, sz, mapped=False, portable=False, wc=False):
68
+ '''Allocates memory on the host'''
69
+ return self.memalloc(sz)
70
+
71
+
72
+ class FakeDeviceList:
73
+ '''
74
+ This stub implements a device list containing a single GPU. It also
75
+ keeps track of the GPU status, i.e. whether the context is closed or not,
76
+ which may have been set by the user calling reset()
77
+ '''
78
+ def __init__(self):
79
+ self.lst = (FakeCUDAContext(0),)
80
+ self.closed = False
81
+
82
+ def __getitem__(self, devnum):
83
+ self.closed = False
84
+ return self.lst[devnum]
85
+
86
+ def __str__(self):
87
+ return ', '.join([str(d) for d in self.lst])
88
+
89
+ def __iter__(self):
90
+ return iter(self.lst)
91
+
92
+ def __len__(self):
93
+ return len(self.lst)
94
+
95
+ @property
96
+ def current(self):
97
+ if self.closed:
98
+ return None
99
+ return self.lst[0]
100
+
101
+
102
+ gpus = FakeDeviceList()
103
+
104
+
105
+ def reset():
106
+ gpus[0].closed = True
107
+
108
+
109
+ def get_context(devnum=0):
110
+ return FakeCUDAContext(devnum)
111
+
112
+
113
+ def require_context(func):
114
+ '''
115
+ In the simulator, a context is always "available", so this is a no-op.
116
+ '''
117
+ return func