numba-cuda 0.19.0__py3-none-any.whl → 0.20.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of numba-cuda might be problematic. Click here for more details.

Files changed (353) hide show
  1. _numba_cuda_redirector.pth +3 -0
  2. _numba_cuda_redirector.py +3 -0
  3. numba_cuda/VERSION +1 -1
  4. numba_cuda/__init__.py +2 -1
  5. numba_cuda/_version.py +2 -13
  6. numba_cuda/numba/cuda/__init__.py +4 -1
  7. numba_cuda/numba/cuda/_internal/cuda_bf16.py +12708 -1469
  8. numba_cuda/numba/cuda/_internal/cuda_fp16.py +2656 -8769
  9. numba_cuda/numba/cuda/api.py +9 -1
  10. numba_cuda/numba/cuda/api_util.py +3 -0
  11. numba_cuda/numba/cuda/args.py +3 -0
  12. numba_cuda/numba/cuda/bf16.py +288 -2
  13. numba_cuda/numba/cuda/cg.py +3 -0
  14. numba_cuda/numba/cuda/cgutils.py +5 -2
  15. numba_cuda/numba/cuda/cloudpickle/__init__.py +21 -0
  16. numba_cuda/numba/cuda/cloudpickle/cloudpickle.py +1598 -0
  17. numba_cuda/numba/cuda/cloudpickle/cloudpickle_fast.py +17 -0
  18. numba_cuda/numba/cuda/codegen.py +4 -1
  19. numba_cuda/numba/cuda/compiler.py +376 -30
  20. numba_cuda/numba/cuda/core/analysis.py +319 -0
  21. numba_cuda/numba/cuda/core/annotations/__init__.py +0 -0
  22. numba_cuda/numba/cuda/core/annotations/type_annotations.py +304 -0
  23. numba_cuda/numba/cuda/core/base.py +1289 -0
  24. numba_cuda/numba/cuda/core/bytecode.py +727 -0
  25. numba_cuda/numba/cuda/core/caching.py +5 -2
  26. numba_cuda/numba/cuda/core/callconv.py +3 -0
  27. numba_cuda/numba/cuda/core/codegen.py +3 -0
  28. numba_cuda/numba/cuda/core/compiler.py +9 -14
  29. numba_cuda/numba/cuda/core/compiler_machinery.py +497 -0
  30. numba_cuda/numba/cuda/core/config.py +747 -0
  31. numba_cuda/numba/cuda/core/consts.py +124 -0
  32. numba_cuda/numba/cuda/core/cpu.py +370 -0
  33. numba_cuda/numba/cuda/core/environment.py +68 -0
  34. numba_cuda/numba/cuda/core/event.py +511 -0
  35. numba_cuda/numba/cuda/core/funcdesc.py +330 -0
  36. numba_cuda/numba/cuda/core/inline_closurecall.py +1889 -0
  37. numba_cuda/numba/cuda/core/interpreter.py +52 -27
  38. numba_cuda/numba/cuda/core/ir_utils.py +17 -29
  39. numba_cuda/numba/cuda/core/options.py +262 -0
  40. numba_cuda/numba/cuda/core/postproc.py +249 -0
  41. numba_cuda/numba/cuda/core/pythonapi.py +1868 -0
  42. numba_cuda/numba/cuda/core/rewrites/__init__.py +26 -0
  43. numba_cuda/numba/cuda/core/rewrites/ir_print.py +90 -0
  44. numba_cuda/numba/cuda/core/rewrites/registry.py +104 -0
  45. numba_cuda/numba/cuda/core/rewrites/static_binop.py +40 -0
  46. numba_cuda/numba/cuda/core/rewrites/static_getitem.py +187 -0
  47. numba_cuda/numba/cuda/core/rewrites/static_raise.py +98 -0
  48. numba_cuda/numba/cuda/core/sigutils.py +3 -0
  49. numba_cuda/numba/cuda/core/ssa.py +496 -0
  50. numba_cuda/numba/cuda/core/targetconfig.py +329 -0
  51. numba_cuda/numba/cuda/core/tracing.py +231 -0
  52. numba_cuda/numba/cuda/core/transforms.py +952 -0
  53. numba_cuda/numba/cuda/core/typed_passes.py +741 -7
  54. numba_cuda/numba/cuda/core/typeinfer.py +1948 -0
  55. numba_cuda/numba/cuda/core/unsafe/__init__.py +0 -0
  56. numba_cuda/numba/cuda/core/unsafe/bytes.py +67 -0
  57. numba_cuda/numba/cuda/core/unsafe/eh.py +66 -0
  58. numba_cuda/numba/cuda/core/unsafe/refcount.py +98 -0
  59. numba_cuda/numba/cuda/core/untyped_passes.py +1983 -0
  60. numba_cuda/numba/cuda/cpython/cmathimpl.py +560 -0
  61. numba_cuda/numba/cuda/cpython/mathimpl.py +499 -0
  62. numba_cuda/numba/cuda/cpython/numbers.py +1474 -0
  63. numba_cuda/numba/cuda/cuda_paths.py +425 -246
  64. numba_cuda/numba/cuda/cudadecl.py +4 -1
  65. numba_cuda/numba/cuda/cudadrv/__init__.py +4 -1
  66. numba_cuda/numba/cuda/cudadrv/devicearray.py +5 -1
  67. numba_cuda/numba/cuda/cudadrv/devices.py +3 -0
  68. numba_cuda/numba/cuda/cudadrv/driver.py +14 -140
  69. numba_cuda/numba/cuda/cudadrv/drvapi.py +3 -0
  70. numba_cuda/numba/cuda/cudadrv/dummyarray.py +114 -24
  71. numba_cuda/numba/cuda/cudadrv/enums.py +3 -0
  72. numba_cuda/numba/cuda/cudadrv/error.py +4 -0
  73. numba_cuda/numba/cuda/cudadrv/libs.py +8 -5
  74. numba_cuda/numba/cuda/cudadrv/linkable_code.py +3 -0
  75. numba_cuda/numba/cuda/cudadrv/mappings.py +4 -1
  76. numba_cuda/numba/cuda/cudadrv/ndarray.py +3 -0
  77. numba_cuda/numba/cuda/cudadrv/nvrtc.py +22 -8
  78. numba_cuda/numba/cuda/cudadrv/nvvm.py +4 -4
  79. numba_cuda/numba/cuda/cudadrv/rtapi.py +3 -0
  80. numba_cuda/numba/cuda/cudadrv/runtime.py +4 -1
  81. numba_cuda/numba/cuda/cudaimpl.py +8 -1
  82. numba_cuda/numba/cuda/cudamath.py +3 -0
  83. numba_cuda/numba/cuda/debuginfo.py +88 -2
  84. numba_cuda/numba/cuda/decorators.py +6 -3
  85. numba_cuda/numba/cuda/descriptor.py +6 -4
  86. numba_cuda/numba/cuda/device_init.py +3 -0
  87. numba_cuda/numba/cuda/deviceufunc.py +69 -2
  88. numba_cuda/numba/cuda/dispatcher.py +21 -39
  89. numba_cuda/numba/cuda/errors.py +10 -0
  90. numba_cuda/numba/cuda/extending.py +3 -0
  91. numba_cuda/numba/cuda/flags.py +143 -1
  92. numba_cuda/numba/cuda/fp16.py +3 -2
  93. numba_cuda/numba/cuda/include/13/cuda_bf16.h +5118 -0
  94. numba_cuda/numba/cuda/include/13/cuda_bf16.hpp +3865 -0
  95. numba_cuda/numba/cuda/include/13/cuda_fp16.h +5363 -0
  96. numba_cuda/numba/cuda/include/13/cuda_fp16.hpp +3483 -0
  97. numba_cuda/numba/cuda/initialize.py +4 -0
  98. numba_cuda/numba/cuda/intrinsic_wrapper.py +3 -0
  99. numba_cuda/numba/cuda/intrinsics.py +3 -0
  100. numba_cuda/numba/cuda/itanium_mangler.py +3 -0
  101. numba_cuda/numba/cuda/kernels/__init__.py +2 -0
  102. numba_cuda/numba/cuda/kernels/reduction.py +3 -0
  103. numba_cuda/numba/cuda/kernels/transpose.py +3 -0
  104. numba_cuda/numba/cuda/libdevice.py +4 -0
  105. numba_cuda/numba/cuda/libdevicedecl.py +3 -0
  106. numba_cuda/numba/cuda/libdevicefuncs.py +3 -0
  107. numba_cuda/numba/cuda/libdeviceimpl.py +3 -0
  108. numba_cuda/numba/cuda/locks.py +3 -0
  109. numba_cuda/numba/cuda/lowering.py +59 -159
  110. numba_cuda/numba/cuda/mathimpl.py +5 -1
  111. numba_cuda/numba/cuda/memory_management/__init__.py +3 -0
  112. numba_cuda/numba/cuda/memory_management/memsys.cu +5 -0
  113. numba_cuda/numba/cuda/memory_management/memsys.cuh +5 -0
  114. numba_cuda/numba/cuda/memory_management/nrt.cu +5 -0
  115. numba_cuda/numba/cuda/memory_management/nrt.cuh +5 -0
  116. numba_cuda/numba/cuda/memory_management/nrt.py +48 -18
  117. numba_cuda/numba/cuda/misc/findlib.py +75 -0
  118. numba_cuda/numba/cuda/models.py +12 -1
  119. numba_cuda/numba/cuda/np/npdatetime_helpers.py +217 -0
  120. numba_cuda/numba/cuda/np/npyfuncs.py +1807 -0
  121. numba_cuda/numba/cuda/np/numpy_support.py +553 -0
  122. numba_cuda/numba/cuda/np/ufunc/ufuncbuilder.py +59 -0
  123. numba_cuda/numba/cuda/nvvmutils.py +4 -1
  124. numba_cuda/numba/cuda/printimpl.py +15 -1
  125. numba_cuda/numba/cuda/random.py +4 -1
  126. numba_cuda/numba/cuda/reshape_funcs.cu +5 -0
  127. numba_cuda/numba/cuda/serialize.py +4 -1
  128. numba_cuda/numba/cuda/simulator/__init__.py +4 -1
  129. numba_cuda/numba/cuda/simulator/_internal/__init__.py +3 -0
  130. numba_cuda/numba/cuda/simulator/_internal/cuda_bf16.py +2 -0
  131. numba_cuda/numba/cuda/simulator/api.py +4 -1
  132. numba_cuda/numba/cuda/simulator/bf16.py +3 -0
  133. numba_cuda/numba/cuda/simulator/compiler.py +7 -0
  134. numba_cuda/numba/cuda/simulator/cudadrv/__init__.py +3 -0
  135. numba_cuda/numba/cuda/simulator/cudadrv/devicearray.py +4 -1
  136. numba_cuda/numba/cuda/simulator/cudadrv/devices.py +3 -0
  137. numba_cuda/numba/cuda/simulator/cudadrv/driver.py +3 -0
  138. numba_cuda/numba/cuda/simulator/cudadrv/drvapi.py +3 -0
  139. numba_cuda/numba/cuda/simulator/cudadrv/dummyarray.py +3 -0
  140. numba_cuda/numba/cuda/simulator/cudadrv/error.py +4 -0
  141. numba_cuda/numba/cuda/simulator/cudadrv/libs.py +4 -0
  142. numba_cuda/numba/cuda/simulator/cudadrv/linkable_code.py +4 -0
  143. numba_cuda/numba/cuda/simulator/cudadrv/nvrtc.py +3 -0
  144. numba_cuda/numba/cuda/simulator/cudadrv/nvvm.py +3 -0
  145. numba_cuda/numba/cuda/simulator/cudadrv/runtime.py +3 -0
  146. numba_cuda/numba/cuda/simulator/dispatcher.py +4 -0
  147. numba_cuda/numba/cuda/simulator/kernel.py +3 -0
  148. numba_cuda/numba/cuda/simulator/kernelapi.py +4 -1
  149. numba_cuda/numba/cuda/simulator/memory_management/__init__.py +3 -0
  150. numba_cuda/numba/cuda/simulator/memory_management/nrt.py +17 -2
  151. numba_cuda/numba/cuda/simulator/reduction.py +3 -0
  152. numba_cuda/numba/cuda/simulator/vector_types.py +3 -0
  153. numba_cuda/numba/cuda/simulator_init.py +3 -0
  154. numba_cuda/numba/cuda/stubs.py +3 -0
  155. numba_cuda/numba/cuda/target.py +38 -17
  156. numba_cuda/numba/cuda/testing.py +7 -19
  157. numba_cuda/numba/cuda/tests/__init__.py +4 -1
  158. numba_cuda/numba/cuda/tests/cloudpickle_main_class.py +9 -0
  159. numba_cuda/numba/cuda/tests/complex_usecases.py +3 -0
  160. numba_cuda/numba/cuda/tests/core/serialize_usecases.py +3 -0
  161. numba_cuda/numba/cuda/tests/core/test_itanium_mangler.py +3 -0
  162. numba_cuda/numba/cuda/tests/core/test_serialize.py +7 -4
  163. numba_cuda/numba/cuda/tests/cudadrv/__init__.py +3 -0
  164. numba_cuda/numba/cuda/tests/cudadrv/test_array_attr.py +3 -0
  165. numba_cuda/numba/cuda/tests/cudadrv/test_context_stack.py +3 -0
  166. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_array_slicing.py +3 -0
  167. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_auto_context.py +3 -0
  168. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_devicerecord.py +4 -1
  169. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_driver.py +3 -0
  170. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_libraries.py +4 -1
  171. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_memory.py +3 -0
  172. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_ndarray.py +3 -0
  173. numba_cuda/numba/cuda/tests/cudadrv/test_deallocations.py +4 -1
  174. numba_cuda/numba/cuda/tests/cudadrv/test_detect.py +9 -3
  175. numba_cuda/numba/cuda/tests/cudadrv/test_emm_plugins.py +4 -1
  176. numba_cuda/numba/cuda/tests/cudadrv/test_events.py +3 -0
  177. numba_cuda/numba/cuda/tests/cudadrv/test_host_alloc.py +3 -0
  178. numba_cuda/numba/cuda/tests/cudadrv/test_init.py +3 -0
  179. numba_cuda/numba/cuda/tests/cudadrv/test_inline_ptx.py +3 -0
  180. numba_cuda/numba/cuda/tests/cudadrv/test_is_fp16.py +3 -0
  181. numba_cuda/numba/cuda/tests/cudadrv/test_linker.py +21 -2
  182. numba_cuda/numba/cuda/tests/cudadrv/test_managed_alloc.py +3 -0
  183. numba_cuda/numba/cuda/tests/cudadrv/test_module_callbacks.py +5 -1
  184. numba_cuda/numba/cuda/tests/cudadrv/test_nvjitlink.py +4 -1
  185. numba_cuda/numba/cuda/tests/cudadrv/test_nvrtc.py +3 -0
  186. numba_cuda/numba/cuda/tests/cudadrv/test_nvvm_driver.py +3 -0
  187. numba_cuda/numba/cuda/tests/cudadrv/test_pinned.py +3 -0
  188. numba_cuda/numba/cuda/tests/cudadrv/test_profiler.py +3 -0
  189. numba_cuda/numba/cuda/tests/cudadrv/test_ptds.py +4 -1
  190. numba_cuda/numba/cuda/tests/cudadrv/test_reset_device.py +3 -0
  191. numba_cuda/numba/cuda/tests/cudadrv/test_runtime.py +3 -0
  192. numba_cuda/numba/cuda/tests/cudadrv/test_select_device.py +3 -0
  193. numba_cuda/numba/cuda/tests/cudadrv/test_streams.py +3 -0
  194. numba_cuda/numba/cuda/tests/cudapy/__init__.py +3 -0
  195. numba_cuda/numba/cuda/tests/cudapy/cache_usecases.py +3 -0
  196. numba_cuda/numba/cuda/tests/cudapy/cache_with_cpu_usecases.py +3 -0
  197. numba_cuda/numba/cuda/tests/cudapy/cg_cache_usecases.py +3 -0
  198. numba_cuda/numba/cuda/tests/cudapy/extensions_usecases.py +4 -1
  199. numba_cuda/numba/cuda/tests/cudapy/recursion_usecases.py +3 -0
  200. numba_cuda/numba/cuda/tests/cudapy/test_alignment.py +3 -0
  201. numba_cuda/numba/cuda/tests/cudapy/test_array.py +5 -1
  202. numba_cuda/numba/cuda/tests/cudapy/test_array_alignment.py +3 -0
  203. numba_cuda/numba/cuda/tests/cudapy/test_array_args.py +3 -0
  204. numba_cuda/numba/cuda/tests/cudapy/test_array_methods.py +3 -0
  205. numba_cuda/numba/cuda/tests/cudapy/test_atomics.py +4 -1
  206. numba_cuda/numba/cuda/tests/cudapy/test_bfloat16.py +542 -2
  207. numba_cuda/numba/cuda/tests/cudapy/test_bfloat16_bindings.py +84 -1
  208. numba_cuda/numba/cuda/tests/cudapy/test_blackscholes.py +3 -0
  209. numba_cuda/numba/cuda/tests/cudapy/test_boolean.py +3 -0
  210. numba_cuda/numba/cuda/tests/cudapy/test_caching.py +4 -3
  211. numba_cuda/numba/cuda/tests/cudapy/test_casting.py +3 -0
  212. numba_cuda/numba/cuda/tests/cudapy/test_cffi.py +3 -0
  213. numba_cuda/numba/cuda/tests/cudapy/test_compiler.py +3 -0
  214. numba_cuda/numba/cuda/tests/cudapy/test_complex.py +4 -1
  215. numba_cuda/numba/cuda/tests/cudapy/test_complex_kernel.py +3 -0
  216. numba_cuda/numba/cuda/tests/cudapy/test_const_string.py +3 -0
  217. numba_cuda/numba/cuda/tests/cudapy/test_constmem.py +4 -1
  218. numba_cuda/numba/cuda/tests/cudapy/test_cooperative_groups.py +5 -3
  219. numba_cuda/numba/cuda/tests/cudapy/test_copy_propagate.py +130 -0
  220. numba_cuda/numba/cuda/tests/cudapy/test_cuda_array_interface.py +3 -0
  221. numba_cuda/numba/cuda/tests/cudapy/test_cuda_jit_no_types.py +3 -0
  222. numba_cuda/numba/cuda/tests/cudapy/test_datetime.py +4 -1
  223. numba_cuda/numba/cuda/tests/cudapy/test_debug.py +4 -1
  224. numba_cuda/numba/cuda/tests/cudapy/test_debuginfo.py +314 -3
  225. numba_cuda/numba/cuda/tests/cudapy/test_debuginfo_types.py +4 -1
  226. numba_cuda/numba/cuda/tests/cudapy/test_device_func.py +3 -0
  227. numba_cuda/numba/cuda/tests/cudapy/test_dispatcher.py +4 -1
  228. numba_cuda/numba/cuda/tests/cudapy/test_enums.py +3 -0
  229. numba_cuda/numba/cuda/tests/cudapy/test_errors.py +4 -1
  230. numba_cuda/numba/cuda/tests/cudapy/test_exception.py +4 -1
  231. numba_cuda/numba/cuda/tests/cudapy/test_extending.py +5 -1
  232. numba_cuda/numba/cuda/tests/cudapy/test_fastmath.py +3 -0
  233. numba_cuda/numba/cuda/tests/cudapy/test_forall.py +3 -0
  234. numba_cuda/numba/cuda/tests/cudapy/test_freevar.py +3 -0
  235. numba_cuda/numba/cuda/tests/cudapy/test_frexp_ldexp.py +3 -0
  236. numba_cuda/numba/cuda/tests/cudapy/test_globals.py +3 -0
  237. numba_cuda/numba/cuda/tests/cudapy/test_gufunc.py +3 -0
  238. numba_cuda/numba/cuda/tests/cudapy/test_gufunc_scalar.py +3 -0
  239. numba_cuda/numba/cuda/tests/cudapy/test_gufunc_scheduling.py +3 -0
  240. numba_cuda/numba/cuda/tests/cudapy/test_idiv.py +3 -0
  241. numba_cuda/numba/cuda/tests/cudapy/test_inline.py +21 -8
  242. numba_cuda/numba/cuda/tests/cudapy/test_inspect.py +3 -0
  243. numba_cuda/numba/cuda/tests/cudapy/test_intrinsics.py +3 -0
  244. numba_cuda/numba/cuda/tests/cudapy/test_ipc.py +3 -0
  245. numba_cuda/numba/cuda/tests/cudapy/test_ir_utils.py +13 -37
  246. numba_cuda/numba/cuda/tests/cudapy/test_iterators.py +3 -0
  247. numba_cuda/numba/cuda/tests/cudapy/test_lang.py +3 -0
  248. numba_cuda/numba/cuda/tests/cudapy/test_laplace.py +4 -1
  249. numba_cuda/numba/cuda/tests/cudapy/test_libdevice.py +3 -0
  250. numba_cuda/numba/cuda/tests/cudapy/test_lineinfo.py +3 -0
  251. numba_cuda/numba/cuda/tests/cudapy/test_localmem.py +3 -0
  252. numba_cuda/numba/cuda/tests/cudapy/test_mandel.py +3 -0
  253. numba_cuda/numba/cuda/tests/cudapy/test_math.py +4 -1
  254. numba_cuda/numba/cuda/tests/cudapy/test_matmul.py +4 -1
  255. numba_cuda/numba/cuda/tests/cudapy/test_minmax.py +3 -0
  256. numba_cuda/numba/cuda/tests/cudapy/test_montecarlo.py +3 -0
  257. numba_cuda/numba/cuda/tests/cudapy/test_multigpu.py +3 -0
  258. numba_cuda/numba/cuda/tests/cudapy/test_multiprocessing.py +3 -0
  259. numba_cuda/numba/cuda/tests/cudapy/test_multithreads.py +3 -0
  260. numba_cuda/numba/cuda/tests/cudapy/test_nondet.py +3 -0
  261. numba_cuda/numba/cuda/tests/cudapy/test_operator.py +4 -1
  262. numba_cuda/numba/cuda/tests/cudapy/test_optimization.py +3 -0
  263. numba_cuda/numba/cuda/tests/cudapy/test_overload.py +3 -0
  264. numba_cuda/numba/cuda/tests/cudapy/test_powi.py +3 -0
  265. numba_cuda/numba/cuda/tests/cudapy/test_print.py +23 -0
  266. numba_cuda/numba/cuda/tests/cudapy/test_py2_div_issue.py +3 -0
  267. numba_cuda/numba/cuda/tests/cudapy/test_random.py +3 -0
  268. numba_cuda/numba/cuda/tests/cudapy/test_record_dtype.py +4 -1
  269. numba_cuda/numba/cuda/tests/cudapy/test_recursion.py +3 -0
  270. numba_cuda/numba/cuda/tests/cudapy/test_reduction.py +4 -1
  271. numba_cuda/numba/cuda/tests/cudapy/test_retrieve_autoconverted_arrays.py +3 -0
  272. numba_cuda/numba/cuda/tests/cudapy/test_serialize.py +4 -1
  273. numba_cuda/numba/cuda/tests/cudapy/test_slicing.py +3 -0
  274. numba_cuda/numba/cuda/tests/cudapy/test_sm.py +4 -1
  275. numba_cuda/numba/cuda/tests/cudapy/test_sm_creation.py +3 -0
  276. numba_cuda/numba/cuda/tests/cudapy/test_ssa.py +453 -0
  277. numba_cuda/numba/cuda/tests/cudapy/test_stream_api.py +3 -0
  278. numba_cuda/numba/cuda/tests/cudapy/test_sync.py +4 -1
  279. numba_cuda/numba/cuda/tests/cudapy/test_transpose.py +3 -0
  280. numba_cuda/numba/cuda/tests/cudapy/test_typeinfer.py +538 -0
  281. numba_cuda/numba/cuda/tests/cudapy/test_ufuncs.py +266 -2
  282. numba_cuda/numba/cuda/tests/cudapy/test_userexc.py +4 -1
  283. numba_cuda/numba/cuda/tests/cudapy/test_vector_type.py +4 -1
  284. numba_cuda/numba/cuda/tests/cudapy/test_vectorize.py +3 -0
  285. numba_cuda/numba/cuda/tests/cudapy/test_vectorize_complex.py +3 -0
  286. numba_cuda/numba/cuda/tests/cudapy/test_vectorize_decor.py +115 -6
  287. numba_cuda/numba/cuda/tests/cudapy/test_vectorize_device.py +3 -0
  288. numba_cuda/numba/cuda/tests/cudapy/test_vectorize_scalar_arg.py +3 -0
  289. numba_cuda/numba/cuda/tests/cudapy/test_warning.py +4 -1
  290. numba_cuda/numba/cuda/tests/cudapy/test_warp_ops.py +4 -1
  291. numba_cuda/numba/cuda/tests/cudasim/__init__.py +3 -0
  292. numba_cuda/numba/cuda/tests/cudasim/support.py +3 -0
  293. numba_cuda/numba/cuda/tests/cudasim/test_cudasim_issues.py +3 -0
  294. numba_cuda/numba/cuda/tests/data/__init__.py +2 -0
  295. numba_cuda/numba/cuda/tests/data/cta_barrier.cu +5 -0
  296. numba_cuda/numba/cuda/tests/data/cuda_include.cu +5 -0
  297. numba_cuda/numba/cuda/tests/data/error.cu +5 -0
  298. numba_cuda/numba/cuda/tests/data/include/add.cuh +5 -0
  299. numba_cuda/numba/cuda/tests/data/jitlink.cu +5 -0
  300. numba_cuda/numba/cuda/tests/data/warn.cu +5 -0
  301. numba_cuda/numba/cuda/tests/doc_examples/__init__.py +3 -0
  302. numba_cuda/numba/cuda/tests/doc_examples/ffi/__init__.py +2 -0
  303. numba_cuda/numba/cuda/tests/doc_examples/ffi/functions.cu +5 -0
  304. numba_cuda/numba/cuda/tests/doc_examples/ffi/include/mul.cuh +5 -0
  305. numba_cuda/numba/cuda/tests/doc_examples/ffi/saxpy.cu +5 -0
  306. numba_cuda/numba/cuda/tests/doc_examples/test_cg.py +3 -2
  307. numba_cuda/numba/cuda/tests/doc_examples/test_cpointer.py +3 -0
  308. numba_cuda/numba/cuda/tests/doc_examples/test_cpu_gpu_compat.py +3 -0
  309. numba_cuda/numba/cuda/tests/doc_examples/test_ffi.py +6 -2
  310. numba_cuda/numba/cuda/tests/doc_examples/test_laplace.py +3 -2
  311. numba_cuda/numba/cuda/tests/doc_examples/test_matmul.py +3 -0
  312. numba_cuda/numba/cuda/tests/doc_examples/test_montecarlo.py +3 -0
  313. numba_cuda/numba/cuda/tests/doc_examples/test_random.py +3 -0
  314. numba_cuda/numba/cuda/tests/doc_examples/test_reduction.py +3 -0
  315. numba_cuda/numba/cuda/tests/doc_examples/test_sessionize.py +3 -2
  316. numba_cuda/numba/cuda/tests/doc_examples/test_ufunc.py +3 -0
  317. numba_cuda/numba/cuda/tests/doc_examples/test_vecadd.py +3 -0
  318. numba_cuda/numba/cuda/tests/enum_usecases.py +3 -0
  319. numba_cuda/numba/cuda/tests/nocuda/__init__.py +3 -0
  320. numba_cuda/numba/cuda/tests/nocuda/test_dummyarray.py +3 -0
  321. numba_cuda/numba/cuda/tests/nocuda/test_function_resolution.py +3 -0
  322. numba_cuda/numba/cuda/tests/nocuda/test_import.py +6 -1
  323. numba_cuda/numba/cuda/tests/nocuda/test_library_lookup.py +27 -12
  324. numba_cuda/numba/cuda/tests/nocuda/test_nvvm.py +3 -0
  325. numba_cuda/numba/cuda/tests/nrt/__init__.py +3 -0
  326. numba_cuda/numba/cuda/tests/nrt/test_nrt.py +5 -1
  327. numba_cuda/numba/cuda/tests/nrt/test_nrt_refct.py +3 -0
  328. numba_cuda/numba/cuda/tests/support.py +58 -15
  329. numba_cuda/numba/cuda/tests/test_binary_generation/Makefile +3 -0
  330. numba_cuda/numba/cuda/tests/test_binary_generation/generate_raw_ltoir.py +2 -1
  331. numba_cuda/numba/cuda/tests/test_binary_generation/nrt_extern.cu +5 -0
  332. numba_cuda/numba/cuda/tests/test_binary_generation/test_device_functions.cu +5 -0
  333. numba_cuda/numba/cuda/tests/test_binary_generation/undefined_extern.cu +5 -0
  334. numba_cuda/numba/cuda/tests/test_tracing.py +200 -0
  335. numba_cuda/numba/cuda/types.py +59 -0
  336. numba_cuda/numba/cuda/typing/__init__.py +12 -1
  337. numba_cuda/numba/cuda/typing/cffi_utils.py +55 -0
  338. numba_cuda/numba/cuda/typing/context.py +751 -0
  339. numba_cuda/numba/cuda/typing/enumdecl.py +74 -0
  340. numba_cuda/numba/cuda/typing/npydecl.py +658 -0
  341. numba_cuda/numba/cuda/typing/templates.py +10 -14
  342. numba_cuda/numba/cuda/ufuncs.py +6 -3
  343. numba_cuda/numba/cuda/utils.py +9 -112
  344. numba_cuda/numba/cuda/vector_types.py +3 -0
  345. numba_cuda/numba/cuda/vectorizers.py +3 -0
  346. {numba_cuda-0.19.0.dist-info → numba_cuda-0.20.0.dist-info}/METADATA +6 -2
  347. numba_cuda-0.20.0.dist-info/RECORD +357 -0
  348. {numba_cuda-0.19.0.dist-info → numba_cuda-0.20.0.dist-info}/licenses/LICENSE +1 -0
  349. numba_cuda-0.20.0.dist-info/licenses/LICENSE.numba +24 -0
  350. numba_cuda/numba/cuda/tests/cudadrv/test_mvc.py +0 -57
  351. numba_cuda-0.19.0.dist-info/RECORD +0 -301
  352. {numba_cuda-0.19.0.dist-info → numba_cuda-0.20.0.dist-info}/WHEEL +0 -0
  353. {numba_cuda-0.19.0.dist-info → numba_cuda-0.20.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1889 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: BSD-2-Clause
3
+
4
+ import types as pytypes # avoid confusion with numba.types
5
+ import copy
6
+ import ctypes
7
+ import numba.core.analysis
8
+ from numba.core import types, typing, config, cgutils, ir, errors
9
+ from numba.cuda import utils
10
+ from numba.cuda.core.ir_utils import (
11
+ next_label,
12
+ add_offset_to_labels,
13
+ replace_vars,
14
+ remove_dels,
15
+ rename_labels,
16
+ find_topo_order,
17
+ merge_adjacent_blocks,
18
+ GuardException,
19
+ require,
20
+ guard,
21
+ get_definition,
22
+ find_callname,
23
+ find_build_sequence,
24
+ get_np_ufunc_typ,
25
+ get_ir_of_code,
26
+ simplify_CFG,
27
+ canonicalize_array_math,
28
+ dead_code_elimination,
29
+ )
30
+
31
+ from numba.core.analysis import (
32
+ compute_cfg_from_blocks,
33
+ compute_use_defs,
34
+ compute_live_variables,
35
+ )
36
+ from numba.core.imputils import impl_ret_untracked
37
+ from numba.core.extending import intrinsic
38
+ from numba.core.typing import signature
39
+
40
+ from numba.cuda.core import postproc, rewrites
41
+ from numba.np.unsafe.ndarray import empty_inferred as unsafe_empty_inferred
42
+ import numpy as np
43
+ import operator
44
+ import numba.misc.special
45
+
46
+ """
47
+ Variable enable_inline_arraycall is only used for testing purpose.
48
+ """
49
+ enable_inline_arraycall = True
50
+
51
+
52
+ def callee_ir_validator(func_ir):
53
+ """Checks the IR of a callee is supported for inlining"""
54
+ for blk in func_ir.blocks.values():
55
+ for stmt in blk.find_insts(ir.Assign):
56
+ if isinstance(stmt.value, ir.Yield):
57
+ msg = "The use of yield in a closure is unsupported."
58
+ raise errors.UnsupportedError(msg, loc=stmt.loc)
59
+
60
+
61
+ def _created_inlined_var_name(function_name, var_name):
62
+ """Creates a name for an inlined variable based on the function name and the
63
+ variable name. It does this "safely" to avoid the use of characters that are
64
+ illegal in python variable names as there are occasions when function
65
+ generation needs valid python name tokens."""
66
+ inlined_name = f"{function_name}.{var_name}"
67
+ # Replace angle brackets, e.g. "<locals>" is replaced with "_locals_"
68
+ new_name = inlined_name.replace("<", "_").replace(">", "_")
69
+ # The version "version" of the closure function e.g. foo$2 (id 2) is
70
+ # rewritten as "foo_v2". Further "." is also replaced with "_".
71
+ new_name = new_name.replace(".", "_").replace("$", "_v")
72
+ return new_name
73
+
74
+
75
+ class InlineClosureCallPass(object):
76
+ """InlineClosureCallPass class looks for direct calls to locally defined
77
+ closures, and inlines the body of the closure function to the call site.
78
+ """
79
+
80
+ def __init__(self, func_ir, parallel_options, swapped=None, typed=False):
81
+ if swapped is None:
82
+ swapped = {}
83
+ self.func_ir = func_ir
84
+ self.parallel_options = parallel_options
85
+ self.swapped = swapped
86
+ self._processed_stencils = []
87
+ self.typed = typed
88
+
89
+ def run(self):
90
+ """Run inline closure call pass."""
91
+ # Analysis relies on ir.Del presence, strip out later
92
+ pp = postproc.PostProcessor(self.func_ir)
93
+ pp.run(True)
94
+
95
+ modified = False
96
+ work_list = list(self.func_ir.blocks.items())
97
+ debug_print = _make_debug_print("InlineClosureCallPass")
98
+ debug_print(f"START {self.func_ir.func_id.func_qualname}")
99
+ while work_list:
100
+ _label, block = work_list.pop()
101
+ for i, instr in enumerate(block.body):
102
+ if isinstance(instr, ir.Assign):
103
+ expr = instr.value
104
+ if isinstance(expr, ir.Expr) and expr.op == "call":
105
+ call_name = guard(find_callname, self.func_ir, expr)
106
+ func_def = guard(
107
+ get_definition, self.func_ir, expr.func
108
+ )
109
+
110
+ if guard(
111
+ self._inline_reduction,
112
+ work_list,
113
+ block,
114
+ i,
115
+ expr,
116
+ call_name,
117
+ ):
118
+ modified = True
119
+ break # because block structure changed
120
+
121
+ if guard(
122
+ self._inline_closure, work_list, block, i, func_def
123
+ ):
124
+ modified = True
125
+ break # because block structure changed
126
+
127
+ if guard(
128
+ self._inline_stencil, instr, call_name, func_def
129
+ ):
130
+ modified = True
131
+
132
+ if enable_inline_arraycall:
133
+ # Identify loop structure
134
+ if modified:
135
+ # Need to do some cleanups if closure inlining kicked in
136
+ merge_adjacent_blocks(self.func_ir.blocks)
137
+ cfg = compute_cfg_from_blocks(self.func_ir.blocks)
138
+ debug_print("start inline arraycall")
139
+ _debug_dump(cfg)
140
+ loops = cfg.loops()
141
+ sized_loops = [(k, len(loops[k].body)) for k in loops.keys()]
142
+ visited = []
143
+ # We go over all loops, bigger loops first (outer first)
144
+ for k, s in sorted(
145
+ sized_loops, key=lambda tup: tup[1], reverse=True
146
+ ):
147
+ visited.append(k)
148
+ if guard(
149
+ _inline_arraycall,
150
+ self.func_ir,
151
+ cfg,
152
+ visited,
153
+ loops[k],
154
+ self.swapped,
155
+ self.parallel_options.comprehension,
156
+ self.typed,
157
+ ):
158
+ modified = True
159
+ if modified:
160
+ _fix_nested_array(self.func_ir)
161
+
162
+ if modified:
163
+ # clean up now dead/unreachable blocks, e.g. unconditionally raising
164
+ # an exception in an inlined function would render some parts of the
165
+ # inliner unreachable
166
+ cfg = compute_cfg_from_blocks(self.func_ir.blocks)
167
+ for dead in cfg.dead_nodes():
168
+ del self.func_ir.blocks[dead]
169
+
170
+ # run dead code elimination
171
+ dead_code_elimination(self.func_ir)
172
+ # do label renaming
173
+ self.func_ir.blocks = rename_labels(self.func_ir.blocks)
174
+
175
+ # inlining done, strip dels
176
+ remove_dels(self.func_ir.blocks)
177
+
178
+ debug_print("END")
179
+
180
+ def _inline_reduction(self, work_list, block, i, expr, call_name):
181
+ # only inline reduction in sequential execution, parallel handling
182
+ # is done in ParforPass.
183
+ require(not self.parallel_options.reduction)
184
+ require(
185
+ call_name == ("reduce", "builtins")
186
+ or call_name == ("reduce", "_functools")
187
+ )
188
+ if len(expr.args) not in (2, 3):
189
+ raise TypeError(
190
+ "invalid reduce call, "
191
+ "two arguments are required (optional initial "
192
+ "value can also be specified)"
193
+ )
194
+ check_reduce_func(self.func_ir, expr.args[0])
195
+
196
+ def reduce_func(f, A, v=None):
197
+ it = iter(A)
198
+ if v is not None:
199
+ s = v
200
+ else:
201
+ s = next(it)
202
+ for a in it:
203
+ s = f(s, a)
204
+ return s
205
+
206
+ inline_closure_call(
207
+ self.func_ir,
208
+ self.func_ir.func_id.func.__globals__,
209
+ block,
210
+ i,
211
+ reduce_func,
212
+ work_list=work_list,
213
+ callee_validator=callee_ir_validator,
214
+ )
215
+ return True
216
+
217
+ def _inline_stencil(self, instr, call_name, func_def):
218
+ from numba.stencils.stencil import StencilFunc
219
+
220
+ lhs = instr.target
221
+ expr = instr.value
222
+ # We keep the escaping variables of the stencil kernel
223
+ # alive by adding them to the actual kernel call as extra
224
+ # keyword arguments, which is ignored anyway.
225
+ if (
226
+ isinstance(func_def, ir.Global)
227
+ and func_def.name == "stencil"
228
+ and isinstance(func_def.value, StencilFunc)
229
+ ):
230
+ if expr.kws:
231
+ expr.kws += func_def.value.kws
232
+ else:
233
+ expr.kws = func_def.value.kws
234
+ return True
235
+ # Otherwise we proceed to check if it is a call to numba.stencil
236
+ require(
237
+ call_name == ("stencil", "numba.stencils.stencil")
238
+ or call_name == ("stencil", "numba")
239
+ )
240
+ require(expr not in self._processed_stencils)
241
+ self._processed_stencils.append(expr)
242
+ if not len(expr.args) == 1:
243
+ raise ValueError(
244
+ "As a minimum Stencil requires a kernel as an argument"
245
+ )
246
+ stencil_def = guard(get_definition, self.func_ir, expr.args[0])
247
+ require(
248
+ isinstance(stencil_def, ir.Expr)
249
+ and stencil_def.op == "make_function"
250
+ )
251
+ kernel_ir = get_ir_of_code(
252
+ self.func_ir.func_id.func.__globals__, stencil_def.code
253
+ )
254
+ options = dict(expr.kws)
255
+ if "neighborhood" in options:
256
+ fixed = guard(self._fix_stencil_neighborhood, options)
257
+ if not fixed:
258
+ raise ValueError(
259
+ "stencil neighborhood option should be a tuple"
260
+ " with constant structure such as ((-w, w),)"
261
+ )
262
+ if "index_offsets" in options:
263
+ fixed = guard(self._fix_stencil_index_offsets, options)
264
+ if not fixed:
265
+ raise ValueError(
266
+ "stencil index_offsets option should be a tuple"
267
+ " with constant structure such as (offset, )"
268
+ )
269
+ sf = StencilFunc(kernel_ir, "constant", options)
270
+ sf.kws = expr.kws # hack to keep variables live
271
+ sf_global = ir.Global("stencil", sf, expr.loc)
272
+ self.func_ir._definitions[lhs.name] = [sf_global]
273
+ instr.value = sf_global
274
+ return True
275
+
276
+ def _fix_stencil_neighborhood(self, options):
277
+ """
278
+ Extract the two-level tuple representing the stencil neighborhood
279
+ from the program IR to provide a tuple to StencilFunc.
280
+ """
281
+ # build_tuple node with neighborhood for each dimension
282
+ dims_build_tuple = get_definition(self.func_ir, options["neighborhood"])
283
+ require(hasattr(dims_build_tuple, "items"))
284
+ res = []
285
+ for window_var in dims_build_tuple.items:
286
+ win_build_tuple = get_definition(self.func_ir, window_var)
287
+ require(hasattr(win_build_tuple, "items"))
288
+ res.append(tuple(win_build_tuple.items))
289
+ options["neighborhood"] = tuple(res)
290
+ return True
291
+
292
+ def _fix_stencil_index_offsets(self, options):
293
+ """
294
+ Extract the tuple representing the stencil index offsets
295
+ from the program IR to provide to StencilFunc.
296
+ """
297
+ offset_tuple = get_definition(self.func_ir, options["index_offsets"])
298
+ require(hasattr(offset_tuple, "items"))
299
+ options["index_offsets"] = tuple(offset_tuple.items)
300
+ return True
301
+
302
+ def _inline_closure(self, work_list, block, i, func_def):
303
+ require(
304
+ isinstance(func_def, ir.Expr) and func_def.op == "make_function"
305
+ )
306
+ inline_closure_call(
307
+ self.func_ir,
308
+ self.func_ir.func_id.func.__globals__,
309
+ block,
310
+ i,
311
+ func_def,
312
+ work_list=work_list,
313
+ callee_validator=callee_ir_validator,
314
+ )
315
+ return True
316
+
317
+
318
+ def check_reduce_func(func_ir, func_var):
319
+ """Checks the function at func_var in func_ir to make sure it's amenable
320
+ for inlining. Returns the function itself"""
321
+ reduce_func = guard(get_definition, func_ir, func_var)
322
+ if reduce_func is None:
323
+ raise ValueError(
324
+ "Reduce function cannot be found for njit \
325
+ analysis"
326
+ )
327
+ if isinstance(reduce_func, (ir.FreeVar, ir.Global)):
328
+ if not isinstance(reduce_func.value, numba.core.registry.CPUDispatcher):
329
+ raise ValueError("Invalid reduction function")
330
+ # pull out the python function for inlining
331
+ reduce_func = reduce_func.value.py_func
332
+ elif not (hasattr(reduce_func, "code") or hasattr(reduce_func, "__code__")):
333
+ raise ValueError("Invalid reduction function")
334
+ f_code = (
335
+ reduce_func.code
336
+ if hasattr(reduce_func, "code")
337
+ else reduce_func.__code__
338
+ )
339
+ if not f_code.co_argcount == 2:
340
+ raise TypeError("Reduction function should take 2 arguments")
341
+ return reduce_func
342
+
343
+
344
+ class InlineWorker(object):
345
+ """A worker class for inlining, this is a more advanced version of
346
+ `inline_closure_call` in that it permits inlining from function type, Numba
347
+ IR and code object. It also, runs the entire untyped compiler pipeline on
348
+ the inlinee to ensure that it is transformed as though it were compiled
349
+ directly.
350
+ """
351
+
352
+ def __init__(
353
+ self,
354
+ typingctx=None,
355
+ targetctx=None,
356
+ locals=None,
357
+ pipeline=None,
358
+ flags=None,
359
+ validator=callee_ir_validator,
360
+ typemap=None,
361
+ calltypes=None,
362
+ ):
363
+ """
364
+ Instantiate a new InlineWorker, all arguments are optional though some
365
+ must be supplied together for certain use cases. The methods will refuse
366
+ to run if the object isn't configured in the manner needed. Args are the
367
+ same as those in a numba.core.Compiler.state, except the validator which
368
+ is a function taking Numba IR and validating it for use when inlining
369
+ (this is optional and really to just provide better error messages about
370
+ things which the inliner cannot handle like yield in closure).
371
+ """
372
+
373
+ def check(arg, name):
374
+ if arg is None:
375
+ raise TypeError("{} must not be None".format(name))
376
+
377
+ from numba.cuda.compiler import DefaultPassBuilder
378
+
379
+ # check the stuff needed to run the more advanced compilation pipeline
380
+ # is valid if any of it is provided
381
+ compiler_args = (targetctx, locals, pipeline, flags)
382
+ compiler_group = [x is not None for x in compiler_args]
383
+ if any(compiler_group) and not all(compiler_group):
384
+ check(targetctx, "targetctx")
385
+ check(locals, "locals")
386
+ check(pipeline, "pipeline")
387
+ check(flags, "flags")
388
+ elif all(compiler_group):
389
+ check(typingctx, "typingctx")
390
+
391
+ self._compiler_pipeline = DefaultPassBuilder.define_untyped_pipeline
392
+
393
+ self.typingctx = typingctx
394
+ self.targetctx = targetctx
395
+ self.locals = locals
396
+ self.pipeline = pipeline
397
+ self.flags = flags
398
+ self.validator = validator
399
+ self.debug_print = _make_debug_print("InlineWorker")
400
+
401
+ # check whether this inliner can also support typemap and calltypes
402
+ # update and if what's provided is valid
403
+ pair = (typemap, calltypes)
404
+ pair_is_none = [x is None for x in pair]
405
+ if any(pair_is_none) and not all(pair_is_none):
406
+ msg = (
407
+ "typemap and calltypes must both be either None or have a "
408
+ "value, got: %s, %s"
409
+ )
410
+ raise TypeError(msg % pair)
411
+ self._permit_update_type_and_call_maps = not all(pair_is_none)
412
+ self.typemap = typemap
413
+ self.calltypes = calltypes
414
+
415
+ def inline_ir(
416
+ self, caller_ir, block, i, callee_ir, callee_freevars, arg_typs=None
417
+ ):
418
+ """Inlines the callee_ir in the caller_ir at statement index i of block
419
+ `block`, callee_freevars are the free variables for the callee_ir. If
420
+ the callee_ir is derived from a function `func` then this is
421
+ `func.__code__.co_freevars`. If `arg_typs` is given and the InlineWorker
422
+ instance was initialized with a typemap and calltypes then they will be
423
+ appropriately updated based on the arg_typs.
424
+ """
425
+
426
+ # Always copy the callee IR, it gets mutated
427
+ def copy_ir(the_ir):
428
+ kernel_copy = the_ir.copy()
429
+ kernel_copy.blocks = {}
430
+ for block_label, block in the_ir.blocks.items():
431
+ new_block = copy.deepcopy(the_ir.blocks[block_label])
432
+ kernel_copy.blocks[block_label] = new_block
433
+ return kernel_copy
434
+
435
+ callee_ir = copy_ir(callee_ir)
436
+
437
+ # check that the contents of the callee IR is something that can be
438
+ # inlined if a validator is present
439
+ if self.validator is not None:
440
+ self.validator(callee_ir)
441
+
442
+ # save an unmutated copy of the callee_ir to return
443
+ callee_ir_original = copy_ir(callee_ir)
444
+ scope = block.scope
445
+ instr = block.body[i]
446
+ call_expr = instr.value
447
+ callee_blocks = callee_ir.blocks
448
+ from numba.cuda.core import ir_utils
449
+
450
+ # 1. relabel callee_ir by adding an offset
451
+ max_label = max(
452
+ ir_utils._the_max_label.next(),
453
+ max(caller_ir.blocks.keys()),
454
+ )
455
+ callee_blocks = add_offset_to_labels(callee_blocks, max_label + 1)
456
+ callee_blocks = simplify_CFG(callee_blocks)
457
+ callee_ir.blocks = callee_blocks
458
+ min_label = min(callee_blocks.keys())
459
+ max_label = max(callee_blocks.keys())
460
+ # reset globals in ir_utils before we use it
461
+ ir_utils._the_max_label.update(max_label)
462
+ self.debug_print("After relabel")
463
+ _debug_dump(callee_ir)
464
+
465
+ # 2. rename all local variables in callee_ir with new locals created in
466
+ # caller_ir
467
+ callee_scopes = _get_all_scopes(callee_blocks)
468
+ self.debug_print("callee_scopes = ", callee_scopes)
469
+ # one function should only have one local scope
470
+ assert len(callee_scopes) == 1
471
+ callee_scope = callee_scopes[0]
472
+ var_dict = {}
473
+ for var in tuple(callee_scope.localvars._con.values()):
474
+ if var.name not in callee_freevars:
475
+ inlined_name = _created_inlined_var_name(
476
+ callee_ir.func_id.unique_name, var.name
477
+ )
478
+ # Update the caller scope with the new names
479
+ new_var = scope.redefine(inlined_name, loc=var.loc)
480
+ # Also update the callee scope with the new names. Should the
481
+ # type and call maps need updating (which requires SSA form) the
482
+ # transformation to SSA is valid as the IR object is internally
483
+ # consistent.
484
+ callee_scope.redefine(inlined_name, loc=var.loc)
485
+ var_dict[var.name] = new_var
486
+ self.debug_print("var_dict = ", var_dict)
487
+ replace_vars(callee_blocks, var_dict)
488
+ self.debug_print("After local var rename")
489
+ _debug_dump(callee_ir)
490
+
491
+ # 3. replace formal parameters with actual arguments
492
+ callee_func = callee_ir.func_id.func
493
+ args = _get_callee_args(
494
+ call_expr, callee_func, block.body[i].loc, caller_ir
495
+ )
496
+
497
+ # 4. Update typemap
498
+ if self._permit_update_type_and_call_maps:
499
+ if arg_typs is None:
500
+ raise TypeError("arg_typs should have a value not None")
501
+ self.update_type_and_call_maps(callee_ir, arg_typs)
502
+ # update_type_and_call_maps replaces blocks
503
+ callee_blocks = callee_ir.blocks
504
+
505
+ self.debug_print("After arguments rename: ")
506
+ _debug_dump(callee_ir)
507
+
508
+ _replace_args_with(callee_blocks, args)
509
+ # 5. split caller blocks into two
510
+ new_blocks = []
511
+ new_block = ir.Block(scope, block.loc)
512
+ new_block.body = block.body[i + 1 :]
513
+ new_label = next_label()
514
+ caller_ir.blocks[new_label] = new_block
515
+ new_blocks.append((new_label, new_block))
516
+ block.body = block.body[:i]
517
+ block.body.append(ir.Jump(min_label, instr.loc))
518
+
519
+ # 6. replace Return with assignment to LHS
520
+ topo_order = find_topo_order(callee_blocks)
521
+ _replace_returns(callee_blocks, instr.target, new_label)
522
+
523
+ # remove the old definition of instr.target too
524
+ if (
525
+ instr.target.name in caller_ir._definitions
526
+ and call_expr in caller_ir._definitions[instr.target.name]
527
+ ):
528
+ # NOTE: target can have multiple definitions due to control flow
529
+ caller_ir._definitions[instr.target.name].remove(call_expr)
530
+
531
+ # 7. insert all new blocks, and add back definitions
532
+ for label in topo_order:
533
+ # block scope must point to parent's
534
+ block = callee_blocks[label]
535
+ block.scope = scope
536
+ _add_definitions(caller_ir, block)
537
+ caller_ir.blocks[label] = block
538
+ new_blocks.append((label, block))
539
+ self.debug_print("After merge in")
540
+ _debug_dump(caller_ir)
541
+
542
+ return callee_ir_original, callee_blocks, var_dict, new_blocks
543
+
544
+ def inline_function(self, caller_ir, block, i, function, arg_typs=None):
545
+ """Inlines the function in the caller_ir at statement index i of block
546
+ `block`. If `arg_typs` is given and the InlineWorker instance was
547
+ initialized with a typemap and calltypes then they will be appropriately
548
+ updated based on the arg_typs.
549
+ """
550
+ callee_ir = self.run_untyped_passes(function)
551
+ freevars = function.__code__.co_freevars
552
+ return self.inline_ir(
553
+ caller_ir, block, i, callee_ir, freevars, arg_typs=arg_typs
554
+ )
555
+
556
+ def run_untyped_passes(self, func, enable_ssa=False):
557
+ """
558
+ Run the compiler frontend's untyped passes over the given Python
559
+ function, and return the function's canonical Numba IR.
560
+
561
+ Disable SSA transformation by default, since the call site won't be in
562
+ SSA form and self.inline_ir depends on this being the case.
563
+ """
564
+ from numba.cuda.core.compiler import StateDict, _CompileStatus
565
+ from numba.cuda.core.untyped_passes import ExtractByteCode
566
+ from numba.cuda.core import bytecode
567
+
568
+ state = StateDict()
569
+ state.func_ir = None
570
+ state.typingctx = self.typingctx
571
+ state.targetctx = self.targetctx
572
+ state.locals = self.locals
573
+ state.pipeline = self.pipeline
574
+ state.flags = self.flags
575
+ state.flags.enable_ssa = enable_ssa
576
+
577
+ state.func_id = bytecode.FunctionIdentity.from_function(func)
578
+
579
+ state.typemap = None
580
+ state.calltypes = None
581
+ state.type_annotation = None
582
+ state.status = _CompileStatus(False)
583
+ state.return_type = None
584
+ state.metadata = {}
585
+
586
+ ExtractByteCode().run_pass(state)
587
+ # This is a lie, just need *some* args for the case where an obj mode
588
+ # with lift is needed
589
+ state.args = len(state.bc.func_id.pysig.parameters) * (types.pyobject,)
590
+
591
+ pm = self._compiler_pipeline(state)
592
+
593
+ pm.finalize()
594
+ pm.run(state)
595
+ return state.func_ir
596
+
597
+ def update_type_and_call_maps(self, callee_ir, arg_typs):
598
+ """Updates the type and call maps based on calling callee_ir with
599
+ arguments from arg_typs"""
600
+ from numba.cuda.core.ssa import reconstruct_ssa
601
+ from numba.cuda.core.typed_passes import PreLowerStripPhis
602
+
603
+ if not self._permit_update_type_and_call_maps:
604
+ msg = (
605
+ "InlineWorker instance not configured correctly, typemap or "
606
+ "calltypes missing in initialization."
607
+ )
608
+ raise ValueError(msg)
609
+ from numba.cuda.core import typed_passes, ir_utils
610
+
611
+ # call branch pruning to simplify IR and avoid inference errors
612
+ callee_ir._definitions = ir_utils.build_definitions(callee_ir.blocks)
613
+ numba.core.analysis.dead_branch_prune(callee_ir, arg_typs)
614
+ # callee's typing may require SSA
615
+ callee_ir = reconstruct_ssa(callee_ir)
616
+ callee_ir._definitions = ir_utils.build_definitions(callee_ir.blocks)
617
+ [f_typemap, _f_return_type, f_calltypes, _] = (
618
+ typed_passes.type_inference_stage(
619
+ self.typingctx,
620
+ self.targetctx,
621
+ callee_ir,
622
+ arg_typs,
623
+ None,
624
+ )
625
+ )
626
+ callee_ir = PreLowerStripPhis()._strip_phi_nodes(callee_ir)
627
+ callee_ir._definitions = ir_utils.build_definitions(callee_ir.blocks)
628
+ canonicalize_array_math(
629
+ callee_ir, f_typemap, f_calltypes, self.typingctx
630
+ )
631
+ # remove argument entries like arg.a from typemap
632
+ arg_names = [vname for vname in f_typemap if vname.startswith("arg.")]
633
+ for a in arg_names:
634
+ f_typemap.pop(a)
635
+ self.typemap.update(f_typemap)
636
+ self.calltypes.update(f_calltypes)
637
+
638
+
639
+ def inline_closure_call(
640
+ func_ir,
641
+ glbls,
642
+ block,
643
+ i,
644
+ callee,
645
+ typingctx=None,
646
+ targetctx=None,
647
+ arg_typs=None,
648
+ typemap=None,
649
+ calltypes=None,
650
+ work_list=None,
651
+ callee_validator=None,
652
+ replace_freevars=True,
653
+ ):
654
+ """Inline the body of `callee` at its callsite (`i`-th instruction of
655
+ `block`)
656
+
657
+ `func_ir` is the func_ir object of the caller function and `glbls` is its
658
+ global variable environment (func_ir.func_id.func.__globals__).
659
+ `block` is the IR block of the callsite and `i` is the index of the
660
+ callsite's node. `callee` is either the called function or a
661
+ make_function node. `typingctx`, `typemap` and `calltypes` are typing
662
+ data structures of the caller, available if we are in a typed pass.
663
+ `arg_typs` includes the types of the arguments at the callsite.
664
+ `callee_validator` is an optional callable which can be used to validate the
665
+ IR of the callee to ensure that it contains IR supported for inlining, it
666
+ takes one argument, the func_ir of the callee
667
+
668
+ Returns IR blocks of the callee and the variable renaming dictionary used
669
+ for them to facilitate further processing of new blocks.
670
+ """
671
+ scope = block.scope
672
+ instr = block.body[i]
673
+ call_expr = instr.value
674
+ debug_print = _make_debug_print("inline_closure_call")
675
+ debug_print("Found closure call: ", instr, " with callee = ", callee)
676
+ # support both function object and make_function Expr
677
+ callee_code = callee.code if hasattr(callee, "code") else callee.__code__
678
+ callee_closure = (
679
+ callee.closure if hasattr(callee, "closure") else callee.__closure__
680
+ )
681
+ from numba.cuda.core import ir_utils
682
+
683
+ # first, get the IR of the callee
684
+ if isinstance(callee, pytypes.FunctionType):
685
+ from numba.cuda.core import compiler
686
+
687
+ callee_ir = compiler.run_frontend(callee, inline_closures=True)
688
+ else:
689
+ callee_ir = get_ir_of_code(glbls, callee_code)
690
+
691
+ # check that the contents of the callee IR is something that can be inlined
692
+ # if a validator is supplied
693
+ if callee_validator is not None:
694
+ callee_validator(callee_ir)
695
+
696
+ callee_blocks = callee_ir.blocks
697
+
698
+ # 1. relabel callee_ir by adding an offset
699
+ max_label = max(ir_utils._the_max_label.next(), max(func_ir.blocks.keys()))
700
+ callee_blocks = add_offset_to_labels(callee_blocks, max_label + 1)
701
+ callee_blocks = simplify_CFG(callee_blocks)
702
+ callee_ir.blocks = callee_blocks
703
+ min_label = min(callee_blocks.keys())
704
+ max_label = max(callee_blocks.keys())
705
+ # reset globals in ir_utils before we use it
706
+ ir_utils._the_max_label.update(max_label)
707
+ debug_print("After relabel")
708
+ _debug_dump(callee_ir)
709
+
710
+ # 2. rename all local variables in callee_ir with new locals created in
711
+ # func_ir
712
+ callee_scopes = _get_all_scopes(callee_blocks)
713
+ debug_print("callee_scopes = ", callee_scopes)
714
+ # one function should only have one local scope
715
+ assert len(callee_scopes) == 1
716
+ callee_scope = callee_scopes[0]
717
+ var_dict = {}
718
+ for var in callee_scope.localvars._con.values():
719
+ if var.name not in callee_code.co_freevars:
720
+ inlined_name = _created_inlined_var_name(
721
+ callee_ir.func_id.unique_name, var.name
722
+ )
723
+ new_var = scope.redefine(inlined_name, loc=var.loc)
724
+ var_dict[var.name] = new_var
725
+ debug_print("var_dict = ", var_dict)
726
+ replace_vars(callee_blocks, var_dict)
727
+ debug_print("After local var rename")
728
+ _debug_dump(callee_ir)
729
+
730
+ # 3. replace formal parameters with actual arguments
731
+ args = _get_callee_args(call_expr, callee, block.body[i].loc, func_ir)
732
+
733
+ debug_print("After arguments rename: ")
734
+ _debug_dump(callee_ir)
735
+
736
+ # 4. replace freevar with actual closure var
737
+ if callee_closure and replace_freevars:
738
+ closure = func_ir.get_definition(callee_closure)
739
+ debug_print("callee's closure = ", closure)
740
+ if isinstance(closure, tuple):
741
+ cellget = ctypes.pythonapi.PyCell_Get
742
+ cellget.restype = ctypes.py_object
743
+ cellget.argtypes = (ctypes.py_object,)
744
+ items = tuple(cellget(x) for x in closure)
745
+ else:
746
+ assert isinstance(closure, ir.Expr) and closure.op == "build_tuple"
747
+ items = closure.items
748
+ assert len(callee_code.co_freevars) == len(items)
749
+ _replace_freevars(callee_blocks, items)
750
+ debug_print("After closure rename")
751
+ _debug_dump(callee_ir)
752
+
753
+ if typingctx:
754
+ from numba.cuda.core import typed_passes
755
+
756
+ # call branch pruning to simplify IR and avoid inference errors
757
+ callee_ir._definitions = ir_utils.build_definitions(callee_ir.blocks)
758
+ numba.cuda.core.analysis.dead_branch_prune(callee_ir, arg_typs)
759
+ try:
760
+ [f_typemap, f_return_type, f_calltypes, _] = (
761
+ typed_passes.type_inference_stage(
762
+ typingctx, targetctx, callee_ir, arg_typs, None
763
+ )
764
+ )
765
+ except Exception:
766
+ [f_typemap, f_return_type, f_calltypes, _] = (
767
+ typed_passes.type_inference_stage(
768
+ typingctx, targetctx, callee_ir, arg_typs, None
769
+ )
770
+ )
771
+ canonicalize_array_math(callee_ir, f_typemap, f_calltypes, typingctx)
772
+ # remove argument entries like arg.a from typemap
773
+ arg_names = [vname for vname in f_typemap if vname.startswith("arg.")]
774
+ for a in arg_names:
775
+ f_typemap.pop(a)
776
+ typemap.update(f_typemap)
777
+ calltypes.update(f_calltypes)
778
+
779
+ _replace_args_with(callee_blocks, args)
780
+ # 5. split caller blocks into two
781
+ new_blocks = []
782
+ new_block = ir.Block(scope, block.loc)
783
+ new_block.body = block.body[i + 1 :]
784
+ new_label = next_label()
785
+ func_ir.blocks[new_label] = new_block
786
+ new_blocks.append((new_label, new_block))
787
+ block.body = block.body[:i]
788
+ block.body.append(ir.Jump(min_label, instr.loc))
789
+
790
+ # 6. replace Return with assignment to LHS
791
+ topo_order = find_topo_order(callee_blocks)
792
+ _replace_returns(callee_blocks, instr.target, new_label)
793
+
794
+ # remove the old definition of instr.target too
795
+ if (
796
+ instr.target.name in func_ir._definitions
797
+ and call_expr in func_ir._definitions[instr.target.name]
798
+ ):
799
+ # NOTE: target can have multiple definitions due to control flow
800
+ func_ir._definitions[instr.target.name].remove(call_expr)
801
+
802
+ # 7. insert all new blocks, and add back definitions
803
+ for label in topo_order:
804
+ # block scope must point to parent's
805
+ block = callee_blocks[label]
806
+ block.scope = scope
807
+ _add_definitions(func_ir, block)
808
+ func_ir.blocks[label] = block
809
+ new_blocks.append((label, block))
810
+ debug_print("After merge in")
811
+ _debug_dump(func_ir)
812
+
813
+ if work_list is not None:
814
+ for block in new_blocks:
815
+ work_list.append(block)
816
+ return callee_blocks, var_dict
817
+
818
+
819
+ def _get_callee_args(call_expr, callee, loc, func_ir):
820
+ """Get arguments for calling 'callee', including the default arguments.
821
+ keyword arguments are currently only handled when 'callee' is a function.
822
+ """
823
+ from numba.cuda.core import ir_utils
824
+
825
+ if call_expr.op == "call":
826
+ args = list(call_expr.args)
827
+ if call_expr.vararg:
828
+ msg = "Calling a closure with *args is unsupported."
829
+ raise errors.UnsupportedError(msg, call_expr.loc)
830
+ elif call_expr.op == "getattr":
831
+ args = [call_expr.value]
832
+ elif ir_utils.is_operator_or_getitem(call_expr):
833
+ args = call_expr.list_vars()
834
+ else:
835
+ raise TypeError("Unsupported ir.Expr.{}".format(call_expr.op))
836
+
837
+ debug_print = _make_debug_print("inline_closure_call default handling")
838
+
839
+ # handle defaults and kw arguments using pysignature if callee is function
840
+ if isinstance(callee, pytypes.FunctionType):
841
+ pysig = utils.pysignature(callee)
842
+ normal_handler = lambda index, param, default: default
843
+ default_handler = lambda index, param, default: ir.Const(default, loc)
844
+
845
+ # Throw error for stararg
846
+ # TODO: handle stararg
847
+ def stararg_handler(index, param, default):
848
+ raise NotImplementedError(
849
+ "Stararg not supported in inliner for arg {} {}".format(
850
+ index, param
851
+ )
852
+ )
853
+
854
+ if call_expr.op == "call":
855
+ kws = dict(call_expr.kws)
856
+ else:
857
+ kws = {}
858
+ return numba.core.typing.fold_arguments(
859
+ pysig, args, kws, normal_handler, default_handler, stararg_handler
860
+ )
861
+ else:
862
+ # TODO: handle arguments for make_function case similar to function
863
+ # case above
864
+ callee_defaults = (
865
+ callee.defaults
866
+ if hasattr(callee, "defaults")
867
+ else callee.__defaults__
868
+ )
869
+ if callee_defaults:
870
+ debug_print("defaults = ", callee_defaults)
871
+ if isinstance(callee_defaults, tuple): # Python 3.5
872
+ defaults_list = []
873
+ for x in callee_defaults:
874
+ if isinstance(x, ir.Var):
875
+ defaults_list.append(x)
876
+ else:
877
+ # this branch is predominantly for kwargs from
878
+ # inlinable functions
879
+ defaults_list.append(ir.Const(value=x, loc=loc))
880
+ args = args + defaults_list
881
+ elif isinstance(callee_defaults, ir.Var) or isinstance(
882
+ callee_defaults, str
883
+ ):
884
+ default_tuple = func_ir.get_definition(callee_defaults)
885
+ assert isinstance(default_tuple, ir.Expr)
886
+ assert default_tuple.op == "build_tuple"
887
+ const_vals = [
888
+ func_ir.get_definition(x) for x in default_tuple.items
889
+ ]
890
+ args = args + const_vals
891
+ else:
892
+ raise NotImplementedError(
893
+ "Unsupported defaults to make_function: {}".format(
894
+ callee_defaults
895
+ )
896
+ )
897
+ return args
898
+
899
+
900
+ def _make_debug_print(prefix):
901
+ def debug_print(*args):
902
+ if config.DEBUG_INLINE_CLOSURE:
903
+ print(prefix + ": " + "".join(str(x) for x in args))
904
+
905
+ return debug_print
906
+
907
+
908
+ def _debug_dump(func_ir):
909
+ if config.DEBUG_INLINE_CLOSURE:
910
+ func_ir.dump()
911
+
912
+
913
+ def _get_all_scopes(blocks):
914
+ """Get all block-local scopes from an IR."""
915
+ all_scopes = []
916
+ for label, block in blocks.items():
917
+ if block.scope not in all_scopes:
918
+ all_scopes.append(block.scope)
919
+ return all_scopes
920
+
921
+
922
+ def _replace_args_with(blocks, args):
923
+ """
924
+ Replace ir.Arg(...) with real arguments from call site
925
+ """
926
+ for label, block in blocks.items():
927
+ assigns = block.find_insts(ir.Assign)
928
+ for stmt in assigns:
929
+ if isinstance(stmt.value, ir.Arg):
930
+ idx = stmt.value.index
931
+ assert idx < len(args)
932
+ stmt.value = args[idx]
933
+
934
+
935
+ def _replace_freevars(blocks, args):
936
+ """
937
+ Replace ir.FreeVar(...) with real variables from parent function
938
+ """
939
+ for label, block in blocks.items():
940
+ assigns = block.find_insts(ir.Assign)
941
+ for stmt in assigns:
942
+ if isinstance(stmt.value, ir.FreeVar):
943
+ idx = stmt.value.index
944
+ assert idx < len(args)
945
+ if isinstance(args[idx], ir.Var):
946
+ stmt.value = args[idx]
947
+ else:
948
+ stmt.value = ir.Const(args[idx], stmt.loc)
949
+
950
+
951
+ def _replace_returns(blocks, target, return_label):
952
+ """
953
+ Return return statement by assigning directly to target, and a jump.
954
+ """
955
+ for label, block in blocks.items():
956
+ casts = []
957
+ for i in range(len(block.body)):
958
+ stmt = block.body[i]
959
+ if isinstance(stmt, ir.Return):
960
+ assert i + 1 == len(block.body)
961
+ block.body[i] = ir.Assign(stmt.value, target, stmt.loc)
962
+ block.body.append(ir.Jump(return_label, stmt.loc))
963
+ # remove cast of the returned value
964
+ for cast in casts:
965
+ if cast.target.name == stmt.value.name:
966
+ cast.value = cast.value.value
967
+ elif (
968
+ isinstance(stmt, ir.Assign)
969
+ and isinstance(stmt.value, ir.Expr)
970
+ and stmt.value.op == "cast"
971
+ ):
972
+ casts.append(stmt)
973
+
974
+
975
+ def _add_definitions(func_ir, block):
976
+ """
977
+ Add variable definitions found in a block to parent func_ir.
978
+ """
979
+ definitions = func_ir._definitions
980
+ assigns = block.find_insts(ir.Assign)
981
+ for stmt in assigns:
982
+ definitions[stmt.target.name].append(stmt.value)
983
+
984
+
985
+ def _find_arraycall(func_ir, block):
986
+ """Look for statement like "x = numpy.array(y)" or "x[..] = y"
987
+ immediately after the closure call that creates list y (the i-th
988
+ statement in block). Return the statement index if found, or
989
+ raise GuardException.
990
+ """
991
+ array_var = None
992
+ list_var_dead_after_array_call = False
993
+ list_var = None
994
+
995
+ i = 0
996
+ while i < len(block.body):
997
+ instr = block.body[i]
998
+ if isinstance(instr, ir.Del):
999
+ # Stop the process if list_var becomes dead
1000
+ if list_var and array_var and instr.value == list_var.name:
1001
+ list_var_dead_after_array_call = True
1002
+ break
1003
+ pass
1004
+ elif isinstance(instr, ir.Assign):
1005
+ # Found array_var = array(list_var)
1006
+ lhs = instr.target
1007
+ expr = instr.value
1008
+ if guard(find_callname, func_ir, expr) == (
1009
+ "array",
1010
+ "numpy",
1011
+ ) and isinstance(expr.args[0], ir.Var):
1012
+ list_var = expr.args[0]
1013
+ array_var = lhs
1014
+ array_stmt_index = i
1015
+ array_kws = dict(expr.kws)
1016
+ elif (
1017
+ isinstance(instr, ir.SetItem)
1018
+ and isinstance(instr.value, ir.Var)
1019
+ and not list_var
1020
+ ):
1021
+ list_var = instr.value
1022
+ # Found array_var[..] = list_var, the case for nested array
1023
+ array_var = instr.target
1024
+ array_def = get_definition(func_ir, array_var)
1025
+ require(guard(_find_unsafe_empty_inferred, func_ir, array_def))
1026
+ array_stmt_index = i
1027
+ array_kws = {}
1028
+ else:
1029
+ # Bail out otherwise
1030
+ break
1031
+ i = i + 1
1032
+ # require array_var is found, and list_var is dead after array_call.
1033
+ require(array_var and list_var_dead_after_array_call)
1034
+ _make_debug_print("find_array_call")(block.body[array_stmt_index])
1035
+ return list_var, array_stmt_index, array_kws
1036
+
1037
+
1038
+ def _find_iter_range(func_ir, range_iter_var, swapped):
1039
+ """Find the iterator's actual range if it is either range(n), or
1040
+ range(m, n), otherwise return raise GuardException.
1041
+ """
1042
+ debug_print = _make_debug_print("find_iter_range")
1043
+ range_iter_def = get_definition(func_ir, range_iter_var)
1044
+ debug_print("range_iter_var = ", range_iter_var, " def = ", range_iter_def)
1045
+ require(
1046
+ isinstance(range_iter_def, ir.Expr) and range_iter_def.op == "getiter"
1047
+ )
1048
+ range_var = range_iter_def.value
1049
+ range_def = get_definition(func_ir, range_var)
1050
+ debug_print("range_var = ", range_var, " range_def = ", range_def)
1051
+ require(isinstance(range_def, ir.Expr) and range_def.op == "call")
1052
+ func_var = range_def.func
1053
+ func_def = get_definition(func_ir, func_var)
1054
+ debug_print("func_var = ", func_var, " func_def = ", func_def)
1055
+ require(
1056
+ isinstance(func_def, ir.Global)
1057
+ and (
1058
+ func_def.value is range
1059
+ or func_def.value == numba.misc.special.prange
1060
+ )
1061
+ )
1062
+ nargs = len(range_def.args)
1063
+ swapping = [('"array comprehension"', "closure of"), range_def.func.loc]
1064
+ if nargs == 1:
1065
+ swapped[range_def.func.name] = swapping
1066
+ stop = get_definition(func_ir, range_def.args[0], lhs_only=True)
1067
+ return (0, range_def.args[0], func_def)
1068
+ elif nargs == 2:
1069
+ swapped[range_def.func.name] = swapping
1070
+ start = get_definition(func_ir, range_def.args[0], lhs_only=True)
1071
+ stop = get_definition(func_ir, range_def.args[1], lhs_only=True)
1072
+ return (start, stop, func_def)
1073
+ else:
1074
+ raise GuardException
1075
+
1076
+
1077
+ @intrinsic
1078
+ def length_of_iterator(typingctx, val):
1079
+ """
1080
+ An implementation of len(iter) for internal use.
1081
+ Primary use is for array comprehensions (see inline_closurecall).
1082
+ """
1083
+ if isinstance(val, types.RangeIteratorType):
1084
+ val_type = val.yield_type
1085
+
1086
+ def codegen(context, builder, sig, args):
1087
+ (value,) = args
1088
+ from numba.cpython.rangeobj import range_impl_map
1089
+
1090
+ iter_type = range_impl_map[val_type][1]
1091
+ iterobj = cgutils.create_struct_proxy(iter_type)(
1092
+ context, builder, value
1093
+ )
1094
+ int_type = iterobj.count.type
1095
+ return impl_ret_untracked(
1096
+ context, builder, int_type, builder.load(iterobj.count)
1097
+ )
1098
+
1099
+ return signature(val_type, val), codegen
1100
+ elif isinstance(val, types.ListIter):
1101
+
1102
+ def codegen(context, builder, sig, args):
1103
+ (value,) = args
1104
+ intp_t = context.get_value_type(types.intp)
1105
+ from numba.cpython.listobj import ListIterInstance
1106
+
1107
+ iterobj = ListIterInstance(context, builder, sig.args[0], value)
1108
+ return impl_ret_untracked(context, builder, intp_t, iterobj.size)
1109
+
1110
+ return signature(types.intp, val), codegen
1111
+ elif isinstance(val, types.ArrayIterator):
1112
+
1113
+ def codegen(context, builder, sig, args):
1114
+ (iterty,) = sig.args
1115
+ (value,) = args
1116
+ intp_t = context.get_value_type(types.intp)
1117
+ iterobj = context.make_helper(builder, iterty, value=value)
1118
+ arrayty = iterty.array_type
1119
+ from numba.np.arrayobj import make_array
1120
+
1121
+ ary = make_array(arrayty)(context, builder, value=iterobj.array)
1122
+ shape = cgutils.unpack_tuple(builder, ary.shape)
1123
+ # array iterates along the outer dimension
1124
+ return impl_ret_untracked(context, builder, intp_t, shape[0])
1125
+
1126
+ return signature(types.intp, val), codegen
1127
+ elif isinstance(val, types.UniTupleIter):
1128
+
1129
+ def codegen(context, builder, sig, args):
1130
+ (iterty,) = sig.args
1131
+ tuplety = iterty.container
1132
+ intp_t = context.get_value_type(types.intp)
1133
+ count_const = intp_t(tuplety.count)
1134
+ return impl_ret_untracked(context, builder, intp_t, count_const)
1135
+
1136
+ return signature(types.intp, val), codegen
1137
+ elif isinstance(val, types.ListTypeIteratorType):
1138
+
1139
+ def codegen(context, builder, sig, args):
1140
+ (value,) = args
1141
+ intp_t = context.get_value_type(types.intp)
1142
+ from numba.typed.listobject import ListIterInstance
1143
+
1144
+ iterobj = ListIterInstance(context, builder, sig.args[0], value)
1145
+ return impl_ret_untracked(context, builder, intp_t, iterobj.size)
1146
+
1147
+ return signature(types.intp, val), codegen
1148
+ else:
1149
+ msg = (
1150
+ "Unsupported iterator found in array comprehension, try "
1151
+ "preallocating the array and filling manually."
1152
+ )
1153
+ raise errors.TypingError(msg)
1154
+
1155
+
1156
+ def _inline_arraycall(
1157
+ func_ir, cfg, visited, loop, swapped, enable_prange=False, typed=False
1158
+ ):
1159
+ """Look for array(list) call in the exit block of a given loop, and turn
1160
+ list operations into array operations in the loop if the following
1161
+ conditions are met:
1162
+ 1. The exit block contains an array call on the list;
1163
+ 2. The list variable is no longer live after array call;
1164
+ 3. The list is created in the loop entry block;
1165
+ 4. The loop is created from an range iterator whose length is known prior
1166
+ to the loop;
1167
+ 5. There is only one list_append operation on the list variable in the
1168
+ loop body;
1169
+ 6. The block that contains list_append dominates the loop head, which
1170
+ ensures list length is the same as loop length;
1171
+ If any condition check fails, no modification will be made to the incoming
1172
+ IR.
1173
+ """
1174
+ debug_print = _make_debug_print("inline_arraycall")
1175
+ # There should only be one loop exit
1176
+ require(len(loop.exits) == 1)
1177
+ exit_block = next(iter(loop.exits))
1178
+ list_var, array_call_index, array_kws = _find_arraycall(
1179
+ func_ir,
1180
+ func_ir.blocks[exit_block],
1181
+ )
1182
+
1183
+ # check if dtype is present in array call
1184
+ dtype_def = None
1185
+ dtype_mod_def = None
1186
+ if "dtype" in array_kws:
1187
+ require(isinstance(array_kws["dtype"], ir.Var))
1188
+ # We require that dtype argument to be a constant of getattr Expr, and
1189
+ # we'll remember its definition for later use.
1190
+ dtype_def = get_definition(func_ir, array_kws["dtype"])
1191
+ require(isinstance(dtype_def, ir.Expr) and dtype_def.op == "getattr")
1192
+ dtype_mod_def = get_definition(func_ir, dtype_def.value)
1193
+
1194
+ list_var_def = get_definition(func_ir, list_var)
1195
+ debug_print("list_var = ", list_var, " def = ", list_var_def)
1196
+ if isinstance(list_var_def, ir.Expr) and list_var_def.op == "cast":
1197
+ list_var_def = get_definition(func_ir, list_var_def.value)
1198
+ # Check if the definition is a build_list
1199
+ require(
1200
+ isinstance(list_var_def, ir.Expr) and list_var_def.op == "build_list"
1201
+ )
1202
+ # The build_list must be empty
1203
+ require(len(list_var_def.items) == 0)
1204
+
1205
+ # Look for list_append in "last" block in loop body, which should be a block
1206
+ # that is a post-dominator of the loop header.
1207
+ list_append_stmts = []
1208
+ for label in loop.body:
1209
+ # We have to consider blocks of this loop, but not sub-loops.
1210
+ # To achieve this, we require the set of "in_loops" of "label" to be
1211
+ # visited loops.
1212
+ in_visited_loops = [l.header in visited for l in cfg.in_loops(label)]
1213
+ if not all(in_visited_loops):
1214
+ continue
1215
+ block = func_ir.blocks[label]
1216
+ debug_print("check loop body block ", label)
1217
+ for stmt in block.find_insts(ir.Assign):
1218
+ expr = stmt.value
1219
+ if isinstance(expr, ir.Expr) and expr.op == "call":
1220
+ func_def = get_definition(func_ir, expr.func)
1221
+ if (
1222
+ isinstance(func_def, ir.Expr)
1223
+ and func_def.op == "getattr"
1224
+ and func_def.attr == "append"
1225
+ ):
1226
+ list_def = get_definition(func_ir, func_def.value)
1227
+ debug_print(
1228
+ "list_def = ", list_def, list_def is list_var_def
1229
+ )
1230
+ if list_def is list_var_def:
1231
+ # found matching append call
1232
+ list_append_stmts.append((label, block, stmt))
1233
+
1234
+ # Require only one list_append, otherwise we won't know the indices
1235
+ require(len(list_append_stmts) == 1)
1236
+ append_block_label, append_block, append_stmt = list_append_stmts[0]
1237
+
1238
+ # Check if append_block (besides loop entry) dominates loop header.
1239
+ # Since CFG doesn't give us this info without loop entry, we approximate
1240
+ # by checking if the predecessor set of the header block is the same
1241
+ # as loop_entries plus append_block, which is certainly more restrictive
1242
+ # than necessary, and can be relaxed if needed.
1243
+ preds = set(l for l, b in cfg.predecessors(loop.header))
1244
+ debug_print("preds = ", preds, (loop.entries | set([append_block_label])))
1245
+ require(preds == (loop.entries | set([append_block_label])))
1246
+
1247
+ # Find iterator in loop header
1248
+ iter_vars = []
1249
+ iter_first_vars = []
1250
+ loop_header = func_ir.blocks[loop.header]
1251
+ for stmt in loop_header.find_insts(ir.Assign):
1252
+ expr = stmt.value
1253
+ if isinstance(expr, ir.Expr):
1254
+ if expr.op == "iternext":
1255
+ iter_def = get_definition(func_ir, expr.value)
1256
+ debug_print("iter_def = ", iter_def)
1257
+ iter_vars.append(expr.value)
1258
+ elif expr.op == "pair_first":
1259
+ iter_first_vars.append(stmt.target)
1260
+
1261
+ # Require only one iterator in loop header
1262
+ require(len(iter_vars) == 1 and len(iter_first_vars) == 1)
1263
+ # variable that holds the iterator object
1264
+ iter_var = iter_vars[0]
1265
+ # variable that holds the value out of iterator
1266
+ iter_first_var = iter_first_vars[0]
1267
+
1268
+ # Final requirement: only one loop entry, and we're going to modify it by:
1269
+ # 1. replacing the list definition with an array definition;
1270
+ # 2. adding a counter for the array iteration.
1271
+ require(len(loop.entries) == 1)
1272
+ loop_entry = func_ir.blocks[next(iter(loop.entries))]
1273
+ terminator = loop_entry.terminator
1274
+ scope = loop_entry.scope
1275
+ loc = loop_entry.loc
1276
+ stmts = []
1277
+ removed = []
1278
+
1279
+ def is_removed(val, removed):
1280
+ if isinstance(val, ir.Var):
1281
+ for x in removed:
1282
+ if x.name == val.name:
1283
+ return True
1284
+ return False
1285
+
1286
+ # Skip list construction and skip terminator, add the rest to stmts
1287
+ for i in range(len(loop_entry.body) - 1):
1288
+ stmt = loop_entry.body[i]
1289
+ if isinstance(stmt, ir.Assign) and (
1290
+ stmt.value is list_def or is_removed(stmt.value, removed)
1291
+ ):
1292
+ removed.append(stmt.target)
1293
+ else:
1294
+ stmts.append(stmt)
1295
+ debug_print("removed variables: ", removed)
1296
+
1297
+ # Define an index_var to index the array.
1298
+ # If the range happens to be single step ranges like range(n), or
1299
+ # range(m, n), then the index_var correlates to iterator index; otherwise
1300
+ # we'll have to define a new counter.
1301
+ range_def = guard(_find_iter_range, func_ir, iter_var, swapped)
1302
+ index_var = scope.redefine("index", loc)
1303
+ if range_def and range_def[0] == 0:
1304
+ # iterator starts with 0, index_var can just be iter_first_var
1305
+ index_var = iter_first_var
1306
+ else:
1307
+ # index_var = -1 # starting the index with -1 since it will incremented
1308
+ # in loop header
1309
+ stmts.append(
1310
+ _new_definition(
1311
+ func_ir, index_var, ir.Const(value=-1, loc=loc), loc
1312
+ )
1313
+ )
1314
+
1315
+ # Insert statement to get the size of the loop iterator
1316
+ size_var = scope.redefine("size", loc)
1317
+ if range_def:
1318
+ start, stop, range_func_def = range_def
1319
+ if start == 0:
1320
+ size_val = stop
1321
+ else:
1322
+ size_val = ir.Expr.binop(
1323
+ fn=operator.sub, lhs=stop, rhs=start, loc=loc
1324
+ )
1325
+
1326
+ else:
1327
+ # this doesn't work in objmode as it's effectively untyped
1328
+ if typed:
1329
+ len_func_var = scope.redefine("len_func", loc)
1330
+ stmts.append(
1331
+ _new_definition(
1332
+ func_ir,
1333
+ len_func_var,
1334
+ ir.Global(
1335
+ "length_of_iterator", length_of_iterator, loc=loc
1336
+ ),
1337
+ loc,
1338
+ )
1339
+ )
1340
+ size_val = ir.Expr.call(len_func_var, (iter_var,), (), loc=loc)
1341
+ else:
1342
+ raise GuardException
1343
+
1344
+ stmts.append(_new_definition(func_ir, size_var, size_val, loc))
1345
+
1346
+ size_tuple_var = scope.redefine("size_tuple", loc)
1347
+ stmts.append(
1348
+ _new_definition(
1349
+ func_ir,
1350
+ size_tuple_var,
1351
+ ir.Expr.build_tuple(items=[size_var], loc=loc),
1352
+ loc,
1353
+ )
1354
+ )
1355
+
1356
+ # Insert array allocation
1357
+ array_var = scope.redefine("array", loc)
1358
+ empty_func = scope.redefine("empty_func", loc)
1359
+ if dtype_def and dtype_mod_def:
1360
+ # when dtype is present, we'll call empty with dtype
1361
+ dtype_mod_var = scope.redefine("dtype_mod", loc)
1362
+ dtype_var = scope.redefine("dtype", loc)
1363
+ stmts.append(
1364
+ _new_definition(func_ir, dtype_mod_var, dtype_mod_def, loc)
1365
+ )
1366
+ stmts.append(
1367
+ _new_definition(
1368
+ func_ir,
1369
+ dtype_var,
1370
+ ir.Expr.getattr(dtype_mod_var, dtype_def.attr, loc),
1371
+ loc,
1372
+ )
1373
+ )
1374
+ stmts.append(
1375
+ _new_definition(
1376
+ func_ir, empty_func, ir.Global("empty", np.empty, loc=loc), loc
1377
+ )
1378
+ )
1379
+ array_kws = [("dtype", dtype_var)]
1380
+ else:
1381
+ # this doesn't work in objmode as it's effectively untyped
1382
+ if typed:
1383
+ # otherwise we'll call unsafe_empty_inferred
1384
+ stmts.append(
1385
+ _new_definition(
1386
+ func_ir,
1387
+ empty_func,
1388
+ ir.Global(
1389
+ "unsafe_empty_inferred", unsafe_empty_inferred, loc=loc
1390
+ ),
1391
+ loc,
1392
+ )
1393
+ )
1394
+ array_kws = []
1395
+ else:
1396
+ raise GuardException
1397
+
1398
+ # array_var = empty_func(size_tuple_var)
1399
+ stmts.append(
1400
+ _new_definition(
1401
+ func_ir,
1402
+ array_var,
1403
+ ir.Expr.call(
1404
+ empty_func, (size_tuple_var,), list(array_kws), loc=loc
1405
+ ),
1406
+ loc,
1407
+ )
1408
+ )
1409
+
1410
+ # Add back removed just in case they are used by something else
1411
+ for var in removed:
1412
+ stmts.append(_new_definition(func_ir, var, array_var, loc))
1413
+
1414
+ # Add back terminator
1415
+ stmts.append(terminator)
1416
+ # Modify loop_entry
1417
+ loop_entry.body = stmts
1418
+
1419
+ if range_def:
1420
+ if range_def[0] != 0:
1421
+ # when range doesn't start from 0, index_var becomes loop index
1422
+ # (iter_first_var) minus an offset (range_def[0])
1423
+ terminator = loop_header.terminator
1424
+ assert isinstance(terminator, ir.Branch)
1425
+ # find the block in the loop body that header jumps to
1426
+ block_id = terminator.truebr
1427
+ blk = func_ir.blocks[block_id]
1428
+ loc = blk.loc
1429
+ blk.body.insert(
1430
+ 0,
1431
+ _new_definition(
1432
+ func_ir,
1433
+ index_var,
1434
+ ir.Expr.binop(
1435
+ fn=operator.sub,
1436
+ lhs=iter_first_var,
1437
+ rhs=range_def[0],
1438
+ loc=loc,
1439
+ ),
1440
+ loc,
1441
+ ),
1442
+ )
1443
+ else:
1444
+ # Insert index_var increment to the end of loop header
1445
+ loc = loop_header.loc
1446
+ terminator = loop_header.terminator
1447
+ stmts = loop_header.body[0:-1]
1448
+ next_index_var = scope.redefine("next_index", loc)
1449
+ one = scope.redefine("one", loc)
1450
+ # one = 1
1451
+ stmts.append(
1452
+ _new_definition(func_ir, one, ir.Const(value=1, loc=loc), loc)
1453
+ )
1454
+ # next_index_var = index_var + 1
1455
+ stmts.append(
1456
+ _new_definition(
1457
+ func_ir,
1458
+ next_index_var,
1459
+ ir.Expr.binop(fn=operator.add, lhs=index_var, rhs=one, loc=loc),
1460
+ loc,
1461
+ )
1462
+ )
1463
+ # index_var = next_index_var
1464
+ stmts.append(_new_definition(func_ir, index_var, next_index_var, loc))
1465
+ stmts.append(terminator)
1466
+ loop_header.body = stmts
1467
+
1468
+ # In append_block, change list_append into array assign
1469
+ for i in range(len(append_block.body)):
1470
+ if append_block.body[i] is append_stmt:
1471
+ debug_print("Replace append with SetItem")
1472
+ append_block.body[i] = ir.SetItem(
1473
+ target=array_var,
1474
+ index=index_var,
1475
+ value=append_stmt.value.args[0],
1476
+ loc=append_stmt.loc,
1477
+ )
1478
+
1479
+ # replace array call, by changing "a = array(b)" to "a = b"
1480
+ stmt = func_ir.blocks[exit_block].body[array_call_index]
1481
+ # stmt can be either array call or SetItem, we only replace array call
1482
+ if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr):
1483
+ stmt.value = array_var
1484
+ func_ir._definitions[stmt.target.name] = [stmt.value]
1485
+
1486
+ return True
1487
+
1488
+
1489
+ def _find_unsafe_empty_inferred(func_ir, expr):
1490
+ unsafe_empty_inferred
1491
+ require(isinstance(expr, ir.Expr) and expr.op == "call")
1492
+ callee = expr.func
1493
+ callee_def = get_definition(func_ir, callee)
1494
+ require(isinstance(callee_def, ir.Global))
1495
+ _make_debug_print("_find_unsafe_empty_inferred")(callee_def.value)
1496
+ return callee_def.value == unsafe_empty_inferred
1497
+
1498
+
1499
+ def _fix_nested_array(func_ir):
1500
+ """Look for assignment like: a[..] = b, where both a and b are numpy arrays,
1501
+ and try to eliminate array b by expanding a with an extra dimension.
1502
+ """
1503
+ blocks = func_ir.blocks
1504
+ cfg = compute_cfg_from_blocks(blocks)
1505
+ usedefs = compute_use_defs(blocks)
1506
+ empty_deadmap = dict([(label, set()) for label in blocks.keys()])
1507
+ livemap = compute_live_variables(cfg, blocks, usedefs.defmap, empty_deadmap)
1508
+
1509
+ def find_array_def(arr):
1510
+ """Find numpy array definition such as
1511
+ arr = numba.unsafe.ndarray.empty_inferred(...).
1512
+ If it is arr = b[...], find array definition of b recursively.
1513
+ """
1514
+ arr_def = get_definition(func_ir, arr)
1515
+ _make_debug_print("find_array_def")(arr, arr_def)
1516
+ if isinstance(arr_def, ir.Expr):
1517
+ if guard(_find_unsafe_empty_inferred, func_ir, arr_def):
1518
+ return arr_def
1519
+ elif arr_def.op == "getitem":
1520
+ return find_array_def(arr_def.value)
1521
+ raise GuardException
1522
+
1523
+ def fix_dependencies(expr, varlist):
1524
+ """Double check if all variables in varlist are defined before
1525
+ expr is used. Try to move constant definition when the check fails.
1526
+ Bails out by raising GuardException if it can't be moved.
1527
+ """
1528
+ debug_print = _make_debug_print("fix_dependencies")
1529
+ for label, block in blocks.items():
1530
+ scope = block.scope
1531
+ body = block.body
1532
+ defined = set()
1533
+ for i in range(len(body)):
1534
+ inst = body[i]
1535
+ if isinstance(inst, ir.Assign):
1536
+ defined.add(inst.target.name)
1537
+ if inst.value is expr:
1538
+ new_varlist = []
1539
+ for var in varlist:
1540
+ # var must be defined before this inst, or live
1541
+ # and not later defined.
1542
+ if var.name in defined or (
1543
+ var.name in livemap[label]
1544
+ and var.name not in usedefs.defmap[label]
1545
+ ):
1546
+ debug_print(var.name, " already defined")
1547
+ new_varlist.append(var)
1548
+ else:
1549
+ debug_print(var.name, " not yet defined")
1550
+ var_def = get_definition(func_ir, var.name)
1551
+ if isinstance(var_def, ir.Const):
1552
+ loc = var.loc
1553
+ new_var = scope.redefine("new_var", loc)
1554
+ new_const = ir.Const(var_def.value, loc)
1555
+ new_vardef = _new_definition(
1556
+ func_ir, new_var, new_const, loc
1557
+ )
1558
+ new_body = []
1559
+ new_body.extend(body[:i])
1560
+ new_body.append(new_vardef)
1561
+ new_body.extend(body[i:])
1562
+ block.body = new_body
1563
+ new_varlist.append(new_var)
1564
+ else:
1565
+ raise GuardException
1566
+ return new_varlist
1567
+ # when expr is not found in block
1568
+ raise GuardException
1569
+
1570
+ def fix_array_assign(stmt):
1571
+ """For assignment like lhs[idx] = rhs, where both lhs and rhs are
1572
+ arrays, do the following:
1573
+ 1. find the definition of rhs, which has to be a call to
1574
+ numba.unsafe.ndarray.empty_inferred
1575
+ 2. find the source array creation for lhs, insert an extra dimension of
1576
+ size of b.
1577
+ 3. replace the definition of
1578
+ rhs = numba.unsafe.ndarray.empty_inferred(...) with rhs = lhs[idx]
1579
+ """
1580
+ require(isinstance(stmt, ir.SetItem))
1581
+ require(isinstance(stmt.value, ir.Var))
1582
+ debug_print = _make_debug_print("fix_array_assign")
1583
+ debug_print("found SetItem: ", stmt)
1584
+ lhs = stmt.target
1585
+ # Find the source array creation of lhs
1586
+ lhs_def = find_array_def(lhs)
1587
+ debug_print("found lhs_def: ", lhs_def)
1588
+ rhs_def = get_definition(func_ir, stmt.value)
1589
+ debug_print("found rhs_def: ", rhs_def)
1590
+ require(isinstance(rhs_def, ir.Expr))
1591
+ if rhs_def.op == "cast":
1592
+ rhs_def = get_definition(func_ir, rhs_def.value)
1593
+ require(isinstance(rhs_def, ir.Expr))
1594
+ require(_find_unsafe_empty_inferred(func_ir, rhs_def))
1595
+ # Find the array dimension of rhs
1596
+ dim_def = get_definition(func_ir, rhs_def.args[0])
1597
+ require(isinstance(dim_def, ir.Expr) and dim_def.op == "build_tuple")
1598
+ debug_print("dim_def = ", dim_def)
1599
+ extra_dims = [
1600
+ get_definition(func_ir, x, lhs_only=True) for x in dim_def.items
1601
+ ]
1602
+ debug_print("extra_dims = ", extra_dims)
1603
+ # Expand size tuple when creating lhs_def with extra_dims
1604
+ size_tuple_def = get_definition(func_ir, lhs_def.args[0])
1605
+ require(
1606
+ isinstance(size_tuple_def, ir.Expr)
1607
+ and size_tuple_def.op == "build_tuple"
1608
+ )
1609
+ debug_print("size_tuple_def = ", size_tuple_def)
1610
+ extra_dims = fix_dependencies(size_tuple_def, extra_dims)
1611
+ size_tuple_def.items += extra_dims
1612
+ # In-place modify rhs_def to be getitem
1613
+ rhs_def.op = "getitem"
1614
+ rhs_def.fn = operator.getitem
1615
+ rhs_def.value = get_definition(func_ir, lhs, lhs_only=True)
1616
+ rhs_def.index = stmt.index
1617
+ del rhs_def._kws["func"]
1618
+ del rhs_def._kws["args"]
1619
+ del rhs_def._kws["vararg"]
1620
+ del rhs_def._kws["kws"]
1621
+ # success
1622
+ return True
1623
+
1624
+ for label in find_topo_order(func_ir.blocks):
1625
+ block = func_ir.blocks[label]
1626
+ for stmt in block.body:
1627
+ if guard(fix_array_assign, stmt):
1628
+ block.body.remove(stmt)
1629
+
1630
+
1631
+ def _new_definition(func_ir, var, value, loc):
1632
+ func_ir._definitions[var.name] = [value]
1633
+ return ir.Assign(value=value, target=var, loc=loc)
1634
+
1635
+
1636
+ @rewrites.register_rewrite("after-inference")
1637
+ class RewriteArrayOfConsts(rewrites.Rewrite):
1638
+ """The RewriteArrayOfConsts class is responsible for finding
1639
+ 1D array creations from a constant list, and rewriting it into
1640
+ direct initialization of array elements without creating the list.
1641
+ """
1642
+
1643
+ def __init__(self, state, *args, **kws):
1644
+ self.typingctx = state.typingctx
1645
+ super(RewriteArrayOfConsts, self).__init__(*args, **kws)
1646
+
1647
+ def match(self, func_ir, block, typemap, calltypes):
1648
+ if len(calltypes) == 0:
1649
+ return False
1650
+ self.crnt_block = block
1651
+ self.new_body = guard(
1652
+ _inline_const_arraycall,
1653
+ block,
1654
+ func_ir,
1655
+ self.typingctx,
1656
+ typemap,
1657
+ calltypes,
1658
+ )
1659
+ return self.new_body is not None
1660
+
1661
+ def apply(self):
1662
+ self.crnt_block.body = self.new_body
1663
+ return self.crnt_block
1664
+
1665
+
1666
+ def _inline_const_arraycall(block, func_ir, context, typemap, calltypes):
1667
+ """Look for array(list) call where list is a constant list created by
1668
+ build_list, and turn them into direct array creation and initialization, if
1669
+ the following conditions are met:
1670
+ 1. The build_list call immediate precedes the array call;
1671
+ 2. The list variable is no longer live after array call;
1672
+ If any condition check fails, no modification will be made.
1673
+ """
1674
+ debug_print = _make_debug_print("inline_const_arraycall")
1675
+ scope = block.scope
1676
+
1677
+ def inline_array(array_var, expr, stmts, list_vars, dels):
1678
+ """Check to see if the given "array_var" is created from a list
1679
+ of constants, and try to inline the list definition as array
1680
+ initialization.
1681
+
1682
+ Extra statements produced with be appended to "stmts".
1683
+ """
1684
+ callname = guard(find_callname, func_ir, expr)
1685
+ require(callname and callname[1] == "numpy" and callname[0] == "array")
1686
+ require(expr.args[0].name in list_vars)
1687
+ ret_type = calltypes[expr].return_type
1688
+ require(
1689
+ isinstance(ret_type, types.ArrayCompatible) and ret_type.ndim == 1
1690
+ )
1691
+ loc = expr.loc
1692
+ list_var = expr.args[0]
1693
+ # Get the type of the array to be created.
1694
+ array_typ = typemap[array_var.name]
1695
+ debug_print("inline array_var = ", array_var, " list_var = ", list_var)
1696
+ # Get the element type of the array to be created.
1697
+ dtype = array_typ.dtype
1698
+ # Get the sequence of operations to provide values to the new array.
1699
+ seq, _ = find_build_sequence(func_ir, list_var)
1700
+ size = len(seq)
1701
+ # Create a tuple to pass to empty below to specify the new array size.
1702
+ size_var = scope.redefine("size", loc)
1703
+ size_tuple_var = scope.redefine("size_tuple", loc)
1704
+ size_typ = types.intp
1705
+ size_tuple_typ = types.UniTuple(size_typ, 1)
1706
+ typemap[size_var.name] = size_typ
1707
+ typemap[size_tuple_var.name] = size_tuple_typ
1708
+ stmts.append(
1709
+ _new_definition(func_ir, size_var, ir.Const(size, loc=loc), loc)
1710
+ )
1711
+ stmts.append(
1712
+ _new_definition(
1713
+ func_ir,
1714
+ size_tuple_var,
1715
+ ir.Expr.build_tuple(items=[size_var], loc=loc),
1716
+ loc,
1717
+ )
1718
+ )
1719
+
1720
+ # The general approach is to create an empty array and then fill
1721
+ # the elements in one-by-one from their specification.
1722
+
1723
+ # Get the numpy type to pass to empty.
1724
+ nptype = types.DType(dtype)
1725
+
1726
+ # Create a variable to hold the numpy empty function.
1727
+ empty_func = scope.redefine("empty_func", loc)
1728
+ fnty = get_np_ufunc_typ(np.empty)
1729
+ context.resolve_function_type(fnty, (size_typ,), {"dtype": nptype})
1730
+
1731
+ typemap[empty_func.name] = fnty
1732
+
1733
+ stmts.append(
1734
+ _new_definition(
1735
+ func_ir, empty_func, ir.Global("empty", np.empty, loc=loc), loc
1736
+ )
1737
+ )
1738
+
1739
+ # We pass two arguments to empty, first the size tuple and second
1740
+ # the dtype of the new array. Here, we created typ_var which is
1741
+ # the dtype argument of the new array. typ_var in turn is created
1742
+ # by getattr of the dtype string on the numpy module.
1743
+
1744
+ # Create var for numpy module.
1745
+ g_np_var = scope.redefine("$np_g_var", loc)
1746
+ typemap[g_np_var.name] = types.misc.Module(np)
1747
+ g_np = ir.Global("np", np, loc)
1748
+ stmts.append(_new_definition(func_ir, g_np_var, g_np, loc))
1749
+
1750
+ # Create var for result of numpy.<dtype>.
1751
+ typ_var = scope.redefine("$np_typ_var", loc)
1752
+ typemap[typ_var.name] = nptype
1753
+ dtype_str = str(dtype)
1754
+ if dtype_str == "bool":
1755
+ dtype_str = "bool_"
1756
+ # Get dtype attribute of numpy module.
1757
+ np_typ_getattr = ir.Expr.getattr(g_np_var, dtype_str, loc)
1758
+ stmts.append(_new_definition(func_ir, typ_var, np_typ_getattr, loc))
1759
+
1760
+ # Create the call to numpy.empty passing the size tuple and dtype var.
1761
+ empty_call = ir.Expr.call(empty_func, [size_var, typ_var], {}, loc=loc)
1762
+ calltypes[empty_call] = typing.signature(array_typ, size_typ, nptype)
1763
+ stmts.append(_new_definition(func_ir, array_var, empty_call, loc))
1764
+
1765
+ # Fill in the new empty array one-by-one.
1766
+ for i in range(size):
1767
+ index_var = scope.redefine("index", loc)
1768
+ index_typ = types.intp
1769
+ typemap[index_var.name] = index_typ
1770
+ stmts.append(
1771
+ _new_definition(func_ir, index_var, ir.Const(i, loc), loc)
1772
+ )
1773
+ setitem = ir.SetItem(array_var, index_var, seq[i], loc)
1774
+ calltypes[setitem] = typing.signature(
1775
+ types.none, array_typ, index_typ, dtype
1776
+ )
1777
+ stmts.append(setitem)
1778
+
1779
+ stmts.extend(dels)
1780
+ return True
1781
+
1782
+ class State(object):
1783
+ """
1784
+ This class is used to hold the state in the following loop so as to make
1785
+ it easy to reset the state of the variables tracking the various
1786
+ statement kinds
1787
+ """
1788
+
1789
+ def __init__(self):
1790
+ # list_vars keep track of the variable created from the latest
1791
+ # build_list instruction, as well as its synonyms.
1792
+ self.list_vars = []
1793
+ # dead_vars keep track of those in list_vars that are considered
1794
+ # dead.
1795
+ self.dead_vars = []
1796
+ # list_items keep track of the elements used in build_list.
1797
+ self.list_items = []
1798
+ self.stmts = []
1799
+ # dels keep track of the deletion of list_items, which will need to
1800
+ # be moved after array initialization.
1801
+ self.dels = []
1802
+ # tracks if a modification has taken place
1803
+ self.modified = False
1804
+
1805
+ def reset(self):
1806
+ """
1807
+ Resets the internal state of the variables used for tracking
1808
+ """
1809
+ self.list_vars = []
1810
+ self.dead_vars = []
1811
+ self.list_items = []
1812
+ self.dels = []
1813
+
1814
+ def list_var_used(self, inst):
1815
+ """
1816
+ Returns True if the list being analysed is used between the
1817
+ build_list and the array call.
1818
+ """
1819
+ return any([x.name in self.list_vars for x in inst.list_vars()])
1820
+
1821
+ state = State()
1822
+
1823
+ for inst in block.body:
1824
+ if isinstance(inst, ir.Assign):
1825
+ if isinstance(inst.value, ir.Var):
1826
+ if inst.value.name in state.list_vars:
1827
+ state.list_vars.append(inst.target.name)
1828
+ state.stmts.append(inst)
1829
+ continue
1830
+ elif isinstance(inst.value, ir.Expr):
1831
+ expr = inst.value
1832
+ if expr.op == "build_list":
1833
+ # new build_list encountered, reset state
1834
+ state.reset()
1835
+ state.list_items = [x.name for x in expr.items]
1836
+ state.list_vars = [inst.target.name]
1837
+ state.stmts.append(inst)
1838
+ continue
1839
+ elif expr.op == "call" and expr in calltypes:
1840
+ if guard(
1841
+ inline_array,
1842
+ inst.target,
1843
+ expr,
1844
+ state.stmts,
1845
+ state.list_vars,
1846
+ state.dels,
1847
+ ):
1848
+ state.modified = True
1849
+ continue
1850
+ elif isinstance(inst, ir.Del):
1851
+ removed_var = inst.value
1852
+ if removed_var in state.list_items:
1853
+ state.dels.append(inst)
1854
+ continue
1855
+ elif removed_var in state.list_vars:
1856
+ # one of the list_vars is considered dead.
1857
+ state.dead_vars.append(removed_var)
1858
+ state.list_vars.remove(removed_var)
1859
+ state.stmts.append(inst)
1860
+ if state.list_vars == []:
1861
+ # if all list_vars are considered dead, we need to filter
1862
+ # them out from existing stmts to completely remove
1863
+ # build_list.
1864
+ # Note that if a translation didn't take place, dead_vars
1865
+ # will also be empty when we reach this point.
1866
+ body = []
1867
+ for inst in state.stmts:
1868
+ if (
1869
+ isinstance(inst, ir.Assign)
1870
+ and inst.target.name in state.dead_vars
1871
+ ) or (
1872
+ isinstance(inst, ir.Del)
1873
+ and inst.value in state.dead_vars
1874
+ ):
1875
+ continue
1876
+ body.append(inst)
1877
+ state.stmts = body
1878
+ state.dead_vars = []
1879
+ state.modified = True
1880
+ continue
1881
+ state.stmts.append(inst)
1882
+
1883
+ # If the list is used in any capacity between build_list and array
1884
+ # call, then we must call off the translation for this list because
1885
+ # it could be mutated and list_items would no longer be applicable.
1886
+ if state.list_var_used(inst):
1887
+ state.reset()
1888
+
1889
+ return state.stmts if state.modified else None