pyopencl 2024.2.2__cp312-cp312-win_amd64.whl → 2024.2.4__cp312-cp312-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyopencl might be problematic. Click here for more details.
- pyopencl/__init__.py +16 -4
- pyopencl/_cl.cp312-win_amd64.pyd +0 -0
- pyopencl/algorithm.py +3 -1
- pyopencl/bitonic_sort.py +2 -0
- pyopencl/characterize/__init__.py +23 -0
- pyopencl/compyte/.git +1 -0
- pyopencl/compyte/.github/workflows/autopush.yml +21 -0
- pyopencl/compyte/.github/workflows/ci.yml +30 -0
- pyopencl/compyte/.gitignore +21 -0
- pyopencl/compyte/ndarray/Makefile +31 -0
- pyopencl/compyte/ndarray/gpu_ndarray.h +35 -0
- pyopencl/compyte/ndarray/pygpu_language.h +207 -0
- pyopencl/compyte/ndarray/pygpu_language_cuda.cu +622 -0
- pyopencl/compyte/ndarray/pygpu_language_opencl.cpp +317 -0
- pyopencl/compyte/ndarray/pygpu_ndarray.cpp +1546 -0
- pyopencl/compyte/ndarray/pygpu_ndarray.h +71 -0
- pyopencl/compyte/ndarray/pygpu_ndarray_object.h +232 -0
- pyopencl/compyte/setup.cfg +9 -0
- pyopencl/tools.py +60 -56
- pyopencl/version.py +7 -3
- {pyopencl-2024.2.2.dist-info → pyopencl-2024.2.4.dist-info}/METADATA +105 -105
- pyopencl-2024.2.4.dist-info/RECORD +59 -0
- {pyopencl-2024.2.2.dist-info → pyopencl-2024.2.4.dist-info}/WHEEL +1 -1
- pyopencl-2024.2.2.data/data/CITATION.cff +0 -74
- pyopencl-2024.2.2.data/data/CMakeLists.txt +0 -83
- pyopencl-2024.2.2.data/data/Makefile.in +0 -21
- pyopencl-2024.2.2.data/data/README.rst +0 -70
- pyopencl-2024.2.2.data/data/README_SETUP.txt +0 -34
- pyopencl-2024.2.2.data/data/aksetup_helper.py +0 -1013
- pyopencl-2024.2.2.data/data/configure.py +0 -6
- pyopencl-2024.2.2.data/data/contrib/cldis.py +0 -91
- pyopencl-2024.2.2.data/data/contrib/fortran-to-opencl/README +0 -29
- pyopencl-2024.2.2.data/data/contrib/fortran-to-opencl/translate.py +0 -1441
- pyopencl-2024.2.2.data/data/contrib/pyopencl.vim +0 -84
- pyopencl-2024.2.2.data/data/doc/Makefile +0 -23
- pyopencl-2024.2.2.data/data/doc/algorithm.rst +0 -214
- pyopencl-2024.2.2.data/data/doc/array.rst +0 -305
- pyopencl-2024.2.2.data/data/doc/conf.py +0 -26
- pyopencl-2024.2.2.data/data/doc/howto.rst +0 -105
- pyopencl-2024.2.2.data/data/doc/index.rst +0 -137
- pyopencl-2024.2.2.data/data/doc/make_constants.py +0 -561
- pyopencl-2024.2.2.data/data/doc/misc.rst +0 -885
- pyopencl-2024.2.2.data/data/doc/runtime.rst +0 -51
- pyopencl-2024.2.2.data/data/doc/runtime_const.rst +0 -30
- pyopencl-2024.2.2.data/data/doc/runtime_gl.rst +0 -78
- pyopencl-2024.2.2.data/data/doc/runtime_memory.rst +0 -527
- pyopencl-2024.2.2.data/data/doc/runtime_platform.rst +0 -184
- pyopencl-2024.2.2.data/data/doc/runtime_program.rst +0 -364
- pyopencl-2024.2.2.data/data/doc/runtime_queue.rst +0 -182
- pyopencl-2024.2.2.data/data/doc/subst.rst +0 -36
- pyopencl-2024.2.2.data/data/doc/tools.rst +0 -4
- pyopencl-2024.2.2.data/data/doc/types.rst +0 -42
- pyopencl-2024.2.2.data/data/examples/black-hole-accretion.py +0 -2227
- pyopencl-2024.2.2.data/data/examples/demo-struct-reduce.py +0 -75
- pyopencl-2024.2.2.data/data/examples/demo.py +0 -39
- pyopencl-2024.2.2.data/data/examples/demo_array.py +0 -32
- pyopencl-2024.2.2.data/data/examples/demo_array_svm.py +0 -37
- pyopencl-2024.2.2.data/data/examples/demo_elementwise.py +0 -34
- pyopencl-2024.2.2.data/data/examples/demo_elementwise_complex.py +0 -53
- pyopencl-2024.2.2.data/data/examples/demo_mandelbrot.py +0 -183
- pyopencl-2024.2.2.data/data/examples/demo_meta_codepy.py +0 -56
- pyopencl-2024.2.2.data/data/examples/demo_meta_template.py +0 -55
- pyopencl-2024.2.2.data/data/examples/dump-performance.py +0 -38
- pyopencl-2024.2.2.data/data/examples/dump-properties.py +0 -86
- pyopencl-2024.2.2.data/data/examples/gl_interop_demo.py +0 -84
- pyopencl-2024.2.2.data/data/examples/gl_particle_animation.py +0 -218
- pyopencl-2024.2.2.data/data/examples/ipython-demo.ipynb +0 -203
- pyopencl-2024.2.2.data/data/examples/median-filter.py +0 -99
- pyopencl-2024.2.2.data/data/examples/n-body.py +0 -1070
- pyopencl-2024.2.2.data/data/examples/narray.py +0 -37
- pyopencl-2024.2.2.data/data/examples/noisyImage.jpg +0 -0
- pyopencl-2024.2.2.data/data/examples/pi-monte-carlo.py +0 -1166
- pyopencl-2024.2.2.data/data/examples/svm.py +0 -82
- pyopencl-2024.2.2.data/data/examples/transpose.py +0 -229
- pyopencl-2024.2.2.data/data/pytest.ini +0 -3
- pyopencl-2024.2.2.data/data/src/bitlog.cpp +0 -51
- pyopencl-2024.2.2.data/data/src/bitlog.hpp +0 -83
- pyopencl-2024.2.2.data/data/src/clinfo_ext.h +0 -134
- pyopencl-2024.2.2.data/data/src/mempool.hpp +0 -444
- pyopencl-2024.2.2.data/data/src/pyopencl_ext.h +0 -77
- pyopencl-2024.2.2.data/data/src/tools.hpp +0 -90
- pyopencl-2024.2.2.data/data/src/wrap_cl.cpp +0 -61
- pyopencl-2024.2.2.data/data/src/wrap_cl.hpp +0 -5853
- pyopencl-2024.2.2.data/data/src/wrap_cl_part_1.cpp +0 -369
- pyopencl-2024.2.2.data/data/src/wrap_cl_part_2.cpp +0 -702
- pyopencl-2024.2.2.data/data/src/wrap_constants.cpp +0 -1274
- pyopencl-2024.2.2.data/data/src/wrap_helpers.hpp +0 -213
- pyopencl-2024.2.2.data/data/src/wrap_mempool.cpp +0 -738
- pyopencl-2024.2.2.data/data/test/add-vectors-32.spv +0 -0
- pyopencl-2024.2.2.data/data/test/add-vectors-64.spv +0 -0
- pyopencl-2024.2.2.data/data/test/empty-header.h +0 -1
- pyopencl-2024.2.2.data/data/test/test_algorithm.py +0 -1180
- pyopencl-2024.2.2.data/data/test/test_array.py +0 -2392
- pyopencl-2024.2.2.data/data/test/test_arrays_in_structs.py +0 -100
- pyopencl-2024.2.2.data/data/test/test_clmath.py +0 -529
- pyopencl-2024.2.2.data/data/test/test_clrandom.py +0 -75
- pyopencl-2024.2.2.data/data/test/test_enqueue_copy.py +0 -271
- pyopencl-2024.2.2.data/data/test/test_wrapper.py +0 -1565
- pyopencl-2024.2.2.dist-info/LICENSE +0 -282
- pyopencl-2024.2.2.dist-info/RECORD +0 -123
- pyopencl-2024.2.2.dist-info/top_level.txt +0 -1
- {pyopencl-2024.2.2.data/data → pyopencl-2024.2.4.dist-info/licenses}/LICENSE +0 -0
|
@@ -1,2392 +0,0 @@
|
|
|
1
|
-
#! /usr/bin/env python
|
|
2
|
-
|
|
3
|
-
__copyright__ = "Copyright (C) 2009 Andreas Kloeckner"
|
|
4
|
-
|
|
5
|
-
__license__ = """
|
|
6
|
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
7
|
-
of this software and associated documentation files (the "Software"), to deal
|
|
8
|
-
in the Software without restriction, including without limitation the rights
|
|
9
|
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
10
|
-
copies of the Software, and to permit persons to whom the Software is
|
|
11
|
-
furnished to do so, subject to the following conditions:
|
|
12
|
-
|
|
13
|
-
The above copyright notice and this permission notice shall be included in
|
|
14
|
-
all copies or substantial portions of the Software.
|
|
15
|
-
|
|
16
|
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
17
|
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
18
|
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
19
|
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
20
|
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
21
|
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
22
|
-
THE SOFTWARE.
|
|
23
|
-
"""
|
|
24
|
-
|
|
25
|
-
import operator
|
|
26
|
-
import platform
|
|
27
|
-
import sys
|
|
28
|
-
from itertools import product
|
|
29
|
-
|
|
30
|
-
import numpy as np
|
|
31
|
-
import numpy.linalg as la
|
|
32
|
-
import pytest
|
|
33
|
-
|
|
34
|
-
import pyopencl as cl
|
|
35
|
-
import pyopencl.array as cl_array
|
|
36
|
-
import pyopencl.cltypes as cltypes
|
|
37
|
-
import pyopencl.tools as cl_tools
|
|
38
|
-
from pyopencl.characterize import has_double_support, has_struct_arg_count_bug
|
|
39
|
-
from pyopencl.clrandom import PhiloxGenerator, ThreefryGenerator
|
|
40
|
-
from pyopencl.tools import \
|
|
41
|
-
pytest_generate_tests_for_pyopencl as pytest_generate_tests # noqa: F401
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
_PYPY = cl._PYPY
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
# {{{ helpers
|
|
48
|
-
|
|
49
|
-
TO_REAL = {
|
|
50
|
-
np.dtype(np.complex64): np.float32,
|
|
51
|
-
np.dtype(np.complex128): np.float64
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
def general_clrand(queue, shape, dtype):
|
|
56
|
-
from pyopencl.clrandom import rand as clrand
|
|
57
|
-
|
|
58
|
-
dtype = np.dtype(dtype)
|
|
59
|
-
if dtype.kind == "c":
|
|
60
|
-
real_dtype = dtype.type(0).real.dtype
|
|
61
|
-
return clrand(queue, shape, real_dtype) + 1j*clrand(queue, shape, real_dtype)
|
|
62
|
-
else:
|
|
63
|
-
return clrand(queue, shape, dtype)
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
def make_random_array(queue, dtype, size):
|
|
67
|
-
from pyopencl.clrandom import rand
|
|
68
|
-
|
|
69
|
-
dtype = np.dtype(dtype)
|
|
70
|
-
if dtype.kind == "c":
|
|
71
|
-
real_dtype = TO_REAL[dtype]
|
|
72
|
-
return (rand(queue, shape=(size,), dtype=real_dtype).astype(dtype)
|
|
73
|
-
+ rand(queue, shape=(size,), dtype=real_dtype).astype(dtype)
|
|
74
|
-
* dtype.type(1j))
|
|
75
|
-
else:
|
|
76
|
-
return rand(queue, shape=(size,), dtype=dtype)
|
|
77
|
-
|
|
78
|
-
# }}}
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
# {{{ dtype-related
|
|
82
|
-
|
|
83
|
-
# {{{ test_basic_complex
|
|
84
|
-
|
|
85
|
-
def test_basic_complex(ctx_factory):
|
|
86
|
-
context = ctx_factory()
|
|
87
|
-
queue = cl.CommandQueue(context)
|
|
88
|
-
|
|
89
|
-
from pyopencl.clrandom import rand
|
|
90
|
-
|
|
91
|
-
size = 500
|
|
92
|
-
|
|
93
|
-
ary = (rand(queue, shape=(size,), dtype=np.float32).astype(np.complex64)
|
|
94
|
-
+ rand(queue, shape=(size,), dtype=np.float32).astype(np.complex64) * 1j)
|
|
95
|
-
assert ary.dtype != np.dtype(np.complex128)
|
|
96
|
-
c = np.complex64(5+7j)
|
|
97
|
-
|
|
98
|
-
host_ary = ary.get()
|
|
99
|
-
assert la.norm((ary*c).get() - c*host_ary) < 1e-5 * la.norm(host_ary)
|
|
100
|
-
|
|
101
|
-
# }}}
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
# {{{ test_mix_complex
|
|
105
|
-
|
|
106
|
-
def test_mix_complex(ctx_factory):
|
|
107
|
-
context = ctx_factory()
|
|
108
|
-
queue = cl.CommandQueue(context)
|
|
109
|
-
|
|
110
|
-
size = 10
|
|
111
|
-
|
|
112
|
-
dtypes = [
|
|
113
|
-
(np.float32, np.complex64),
|
|
114
|
-
#(np.int32, np.complex64),
|
|
115
|
-
]
|
|
116
|
-
|
|
117
|
-
dev = context.devices[0]
|
|
118
|
-
if has_double_support(dev) and has_struct_arg_count_bug(dev) == "apple":
|
|
119
|
-
dtypes.extend([
|
|
120
|
-
(np.float32, np.float64),
|
|
121
|
-
])
|
|
122
|
-
elif has_double_support(dev):
|
|
123
|
-
dtypes.extend([
|
|
124
|
-
(np.float32, np.float64),
|
|
125
|
-
(np.float32, np.complex128),
|
|
126
|
-
(np.float64, np.complex64),
|
|
127
|
-
(np.float64, np.complex128),
|
|
128
|
-
])
|
|
129
|
-
|
|
130
|
-
from operator import add, mul, sub, truediv
|
|
131
|
-
for op in [add, sub, mul, truediv, pow]:
|
|
132
|
-
for dtype_a0, dtype_b0 in dtypes:
|
|
133
|
-
for dtype_a, dtype_b in [
|
|
134
|
-
(dtype_a0, dtype_b0),
|
|
135
|
-
(dtype_b0, dtype_a0),
|
|
136
|
-
]:
|
|
137
|
-
for is_scalar_a, is_scalar_b in [
|
|
138
|
-
(False, False),
|
|
139
|
-
(False, True),
|
|
140
|
-
(True, False),
|
|
141
|
-
]:
|
|
142
|
-
if is_scalar_a:
|
|
143
|
-
ary_a = make_random_array(queue, dtype_a, 1).get()[0]
|
|
144
|
-
host_ary_a = ary_a
|
|
145
|
-
else:
|
|
146
|
-
ary_a = make_random_array(queue, dtype_a, size)
|
|
147
|
-
host_ary_a = ary_a.get()
|
|
148
|
-
|
|
149
|
-
if is_scalar_b:
|
|
150
|
-
ary_b = make_random_array(queue, dtype_b, 1).get()[0]
|
|
151
|
-
host_ary_b = ary_b
|
|
152
|
-
else:
|
|
153
|
-
ary_b = make_random_array(queue, dtype_b, size)
|
|
154
|
-
host_ary_b = ary_b.get()
|
|
155
|
-
|
|
156
|
-
print(op, dtype_a, dtype_b, is_scalar_a, is_scalar_b)
|
|
157
|
-
dev_result = op(ary_a, ary_b).get()
|
|
158
|
-
host_result = op(host_ary_a, host_ary_b)
|
|
159
|
-
|
|
160
|
-
if host_result.dtype != dev_result.dtype:
|
|
161
|
-
# This appears to be a numpy bug, where we get
|
|
162
|
-
# served a Python complex that is really a
|
|
163
|
-
# smaller numpy complex.
|
|
164
|
-
|
|
165
|
-
print("HOST_DTYPE: {} DEV_DTYPE: {}".format(
|
|
166
|
-
host_result.dtype, dev_result.dtype))
|
|
167
|
-
|
|
168
|
-
dev_result = dev_result.astype(host_result.dtype)
|
|
169
|
-
|
|
170
|
-
err = la.norm(host_result-dev_result)/la.norm(host_result)
|
|
171
|
-
print(err)
|
|
172
|
-
correct = err < 1e-4
|
|
173
|
-
if not correct:
|
|
174
|
-
print(host_result)
|
|
175
|
-
print(dev_result)
|
|
176
|
-
print(host_result - dev_result)
|
|
177
|
-
|
|
178
|
-
assert correct
|
|
179
|
-
|
|
180
|
-
# }}}
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
# {{{ test_pow_neg1_vs_inv
|
|
184
|
-
|
|
185
|
-
def test_pow_neg1_vs_inv(ctx_factory):
|
|
186
|
-
ctx = ctx_factory()
|
|
187
|
-
queue = cl.CommandQueue(ctx)
|
|
188
|
-
|
|
189
|
-
device = ctx.devices[0]
|
|
190
|
-
if not has_double_support(device):
|
|
191
|
-
from pytest import skip
|
|
192
|
-
skip("double precision not supported on %s" % device)
|
|
193
|
-
if has_struct_arg_count_bug(device) == "apple":
|
|
194
|
-
from pytest import xfail
|
|
195
|
-
xfail("apple struct arg counting broken")
|
|
196
|
-
|
|
197
|
-
a_dev = make_random_array(queue, np.complex128, 20000)
|
|
198
|
-
|
|
199
|
-
res1 = (a_dev ** (-1)).get()
|
|
200
|
-
res2 = (1/a_dev).get()
|
|
201
|
-
ref = 1/a_dev.get()
|
|
202
|
-
|
|
203
|
-
assert la.norm(res1-ref, np.inf) / la.norm(ref) < 1e-13
|
|
204
|
-
assert la.norm(res2-ref, np.inf) / la.norm(ref) < 1e-13
|
|
205
|
-
|
|
206
|
-
# }}}
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
# {{{ test_vector_fill
|
|
210
|
-
|
|
211
|
-
def test_vector_fill(ctx_factory):
|
|
212
|
-
context = ctx_factory()
|
|
213
|
-
queue = cl.CommandQueue(context)
|
|
214
|
-
|
|
215
|
-
a_gpu = cl_array.Array(queue, 100, dtype=cltypes.float4)
|
|
216
|
-
a_gpu.fill(cltypes.make_float4(0.0, 0.0, 1.0, 0.0))
|
|
217
|
-
a = a_gpu.get()
|
|
218
|
-
assert a.dtype == cltypes.float4
|
|
219
|
-
|
|
220
|
-
a_gpu = cl_array.zeros(queue, 100, dtype=cltypes.float4)
|
|
221
|
-
|
|
222
|
-
# }}}
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
# {{{ test_zeros_large_array
|
|
226
|
-
|
|
227
|
-
def test_zeros_large_array(ctx_factory):
|
|
228
|
-
context = ctx_factory()
|
|
229
|
-
queue = cl.CommandQueue(context)
|
|
230
|
-
dev = queue.device
|
|
231
|
-
|
|
232
|
-
if dev.platform.vendor == "Intel(R) Corporation" \
|
|
233
|
-
and platform.system() == "Windows":
|
|
234
|
-
pytest.xfail("large array fail with out-of-host memory with"
|
|
235
|
-
"Intel CPU runtime as of 2022-10-05")
|
|
236
|
-
|
|
237
|
-
size = 2**28 + 1
|
|
238
|
-
if dev.address_bits == 64 and dev.max_mem_alloc_size >= 8 * size:
|
|
239
|
-
# this shouldn't hang/cause errors
|
|
240
|
-
# see https://github.com/inducer/pyopencl/issues/395
|
|
241
|
-
a_gpu = cl_array.zeros(queue, (size,), dtype="float64")
|
|
242
|
-
# run a couple kernels to ensure no propagated runtime errors
|
|
243
|
-
a_gpu[...] = 1.
|
|
244
|
-
a_gpu = 2 * a_gpu - 3
|
|
245
|
-
else:
|
|
246
|
-
pass
|
|
247
|
-
|
|
248
|
-
# }}}
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
# {{{ test_absrealimag
|
|
252
|
-
|
|
253
|
-
def test_absrealimag(ctx_factory):
|
|
254
|
-
context = ctx_factory()
|
|
255
|
-
queue = cl.CommandQueue(context)
|
|
256
|
-
|
|
257
|
-
def real(x):
|
|
258
|
-
return x.real
|
|
259
|
-
|
|
260
|
-
def imag(x):
|
|
261
|
-
return x.imag
|
|
262
|
-
|
|
263
|
-
def conj(x):
|
|
264
|
-
return x.conj()
|
|
265
|
-
|
|
266
|
-
n = 111
|
|
267
|
-
for func in [abs, real, imag, conj]:
|
|
268
|
-
for dtype in [np.int32, np.float32, np.complex64]:
|
|
269
|
-
print(func, dtype)
|
|
270
|
-
a = -make_random_array(queue, dtype, n)
|
|
271
|
-
|
|
272
|
-
host_res = func(a.get())
|
|
273
|
-
dev_res = func(a).get()
|
|
274
|
-
|
|
275
|
-
correct = np.allclose(dev_res, host_res)
|
|
276
|
-
if not correct:
|
|
277
|
-
print(dev_res)
|
|
278
|
-
print(host_res)
|
|
279
|
-
print(dev_res-host_res)
|
|
280
|
-
assert correct
|
|
281
|
-
|
|
282
|
-
# }}}
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
# {{{ test_custom_type_zeros
|
|
286
|
-
|
|
287
|
-
def test_custom_type_zeros(ctx_factory):
|
|
288
|
-
context = ctx_factory()
|
|
289
|
-
queue = cl.CommandQueue(context)
|
|
290
|
-
|
|
291
|
-
if not (
|
|
292
|
-
queue._get_cl_version() >= (1, 2)
|
|
293
|
-
and cl.get_cl_header_version() >= (1, 2)):
|
|
294
|
-
pytest.skip("CL1.2 not available")
|
|
295
|
-
|
|
296
|
-
dtype = np.dtype([
|
|
297
|
-
("cur_min", np.int32),
|
|
298
|
-
("cur_max", np.int32),
|
|
299
|
-
("pad", np.int32),
|
|
300
|
-
])
|
|
301
|
-
|
|
302
|
-
from pyopencl.tools import get_or_register_dtype, match_dtype_to_c_struct
|
|
303
|
-
|
|
304
|
-
name = "mmc_type"
|
|
305
|
-
dtype, c_decl = match_dtype_to_c_struct(queue.device, name, dtype)
|
|
306
|
-
dtype = get_or_register_dtype(name, dtype)
|
|
307
|
-
|
|
308
|
-
n = 1000
|
|
309
|
-
z_dev = cl.array.zeros(queue, n, dtype=dtype)
|
|
310
|
-
|
|
311
|
-
z = z_dev.get()
|
|
312
|
-
|
|
313
|
-
assert np.array_equal(np.zeros(n, dtype), z)
|
|
314
|
-
|
|
315
|
-
# }}}
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
# {{{ test_custom_type_fill
|
|
319
|
-
|
|
320
|
-
def test_custom_type_fill(ctx_factory):
|
|
321
|
-
context = ctx_factory()
|
|
322
|
-
queue = cl.CommandQueue(context)
|
|
323
|
-
|
|
324
|
-
from pyopencl.characterize import has_struct_arg_count_bug
|
|
325
|
-
if has_struct_arg_count_bug(queue.device):
|
|
326
|
-
pytest.skip("device has LLVM arg counting bug")
|
|
327
|
-
|
|
328
|
-
dtype = np.dtype([
|
|
329
|
-
("cur_min", np.int32),
|
|
330
|
-
("cur_max", np.int32),
|
|
331
|
-
("pad", np.int32),
|
|
332
|
-
])
|
|
333
|
-
|
|
334
|
-
from pyopencl.tools import get_or_register_dtype, match_dtype_to_c_struct
|
|
335
|
-
|
|
336
|
-
name = "mmc_type"
|
|
337
|
-
dtype, c_decl = match_dtype_to_c_struct(queue.device, name, dtype)
|
|
338
|
-
dtype = get_or_register_dtype(name, dtype)
|
|
339
|
-
|
|
340
|
-
n = 1000
|
|
341
|
-
z_dev = cl.array.empty(queue, n, dtype=dtype)
|
|
342
|
-
z_dev.fill(np.zeros((), dtype))
|
|
343
|
-
|
|
344
|
-
z = z_dev.get()
|
|
345
|
-
|
|
346
|
-
assert np.array_equal(np.zeros(n, dtype), z)
|
|
347
|
-
|
|
348
|
-
# }}}
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
# {{{ test_custom_type_take_put
|
|
352
|
-
|
|
353
|
-
def test_custom_type_take_put(ctx_factory):
|
|
354
|
-
context = ctx_factory()
|
|
355
|
-
queue = cl.CommandQueue(context)
|
|
356
|
-
|
|
357
|
-
dtype = np.dtype([
|
|
358
|
-
("cur_min", np.int32),
|
|
359
|
-
("cur_max", np.int32),
|
|
360
|
-
])
|
|
361
|
-
|
|
362
|
-
from pyopencl.tools import get_or_register_dtype, match_dtype_to_c_struct
|
|
363
|
-
|
|
364
|
-
name = "tp_type"
|
|
365
|
-
dtype, c_decl = match_dtype_to_c_struct(queue.device, name, dtype)
|
|
366
|
-
dtype = get_or_register_dtype(name, dtype)
|
|
367
|
-
|
|
368
|
-
n = 100
|
|
369
|
-
z = np.empty(100, dtype)
|
|
370
|
-
z["cur_min"] = np.arange(n)
|
|
371
|
-
z["cur_max"] = np.arange(n)**2
|
|
372
|
-
|
|
373
|
-
z_dev = cl.array.to_device(queue, z)
|
|
374
|
-
ind = cl.array.arange(queue, n, step=3, dtype=np.int32)
|
|
375
|
-
|
|
376
|
-
z_ind_ref = z[ind.get()]
|
|
377
|
-
z_ind = z_dev[ind]
|
|
378
|
-
|
|
379
|
-
assert np.array_equal(z_ind.get(), z_ind_ref)
|
|
380
|
-
|
|
381
|
-
# }}}
|
|
382
|
-
|
|
383
|
-
# }}}
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
# {{{ operators
|
|
387
|
-
|
|
388
|
-
# {{{ test_div_type_matches_numpy
|
|
389
|
-
|
|
390
|
-
@pytest.mark.parametrize("dtype", [np.int8, np.int32, np.int64, np.float32])
|
|
391
|
-
# FIXME Implement florodiv
|
|
392
|
-
#@pytest.mark.parametrize("op", [operator.truediv, operator.floordiv])
|
|
393
|
-
@pytest.mark.parametrize("op", [operator.truediv])
|
|
394
|
-
def test_div_type_matches_numpy(ctx_factory, dtype, op):
|
|
395
|
-
context = ctx_factory()
|
|
396
|
-
queue = cl.CommandQueue(context)
|
|
397
|
-
|
|
398
|
-
a = cl_array.arange(queue, 10, dtype=dtype) + 1
|
|
399
|
-
res = op(4*a, 3*a)
|
|
400
|
-
a_np = a.get()
|
|
401
|
-
res_np = op(4*a_np, 3*a_np)
|
|
402
|
-
assert res_np.dtype == res.dtype
|
|
403
|
-
assert np.allclose(res_np, res.get())
|
|
404
|
-
|
|
405
|
-
# }}}
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
# {{{ test_rmul_yields_right_type
|
|
409
|
-
|
|
410
|
-
def test_rmul_yields_right_type(ctx_factory):
|
|
411
|
-
context = ctx_factory()
|
|
412
|
-
queue = cl.CommandQueue(context)
|
|
413
|
-
|
|
414
|
-
a = np.array([1, 2, 3, 4, 5]).astype(np.float32)
|
|
415
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
416
|
-
|
|
417
|
-
two_a = 2*a_gpu
|
|
418
|
-
assert isinstance(two_a, cl_array.Array)
|
|
419
|
-
|
|
420
|
-
two_a = np.float32(2)*a_gpu
|
|
421
|
-
assert isinstance(two_a, cl_array.Array)
|
|
422
|
-
|
|
423
|
-
# }}}
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
# {{{ test_pow_array
|
|
427
|
-
|
|
428
|
-
def test_pow_array(ctx_factory):
|
|
429
|
-
context = ctx_factory()
|
|
430
|
-
queue = cl.CommandQueue(context)
|
|
431
|
-
|
|
432
|
-
a = np.array([1, 2, 3, 4, 5]).astype(np.float32)
|
|
433
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
434
|
-
|
|
435
|
-
result = pow(a_gpu, a_gpu).get()
|
|
436
|
-
assert (np.abs(a ** a - result) < 3e-3).all()
|
|
437
|
-
|
|
438
|
-
result = (a_gpu ** a_gpu).get()
|
|
439
|
-
assert (np.abs(pow(a, a) - result) < 3e-3).all()
|
|
440
|
-
|
|
441
|
-
# }}}
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
# {{{ test_pow_number
|
|
445
|
-
|
|
446
|
-
def test_pow_number(ctx_factory):
|
|
447
|
-
context = ctx_factory()
|
|
448
|
-
queue = cl.CommandQueue(context)
|
|
449
|
-
|
|
450
|
-
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
|
|
451
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
452
|
-
|
|
453
|
-
result = pow(a_gpu, 2).get()
|
|
454
|
-
assert (np.abs(a ** 2 - result) < 1e-3).all()
|
|
455
|
-
|
|
456
|
-
# }}}
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
# {{{ test_multiply
|
|
460
|
-
|
|
461
|
-
def test_multiply(ctx_factory):
|
|
462
|
-
"""Test the muliplication of an array with a scalar. """
|
|
463
|
-
|
|
464
|
-
context = ctx_factory()
|
|
465
|
-
queue = cl.CommandQueue(context)
|
|
466
|
-
|
|
467
|
-
for sz in [10, 50000]:
|
|
468
|
-
for dtype, scalars in [
|
|
469
|
-
(np.float32, [2]),
|
|
470
|
-
(np.complex64, [2j]),
|
|
471
|
-
]:
|
|
472
|
-
for scalar in scalars:
|
|
473
|
-
a_gpu = make_random_array(queue, dtype, sz)
|
|
474
|
-
a = a_gpu.get()
|
|
475
|
-
a_mult = (scalar * a_gpu).get()
|
|
476
|
-
|
|
477
|
-
assert (a * scalar == a_mult).all()
|
|
478
|
-
|
|
479
|
-
# }}}
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
# {{{ test_multiply_array
|
|
483
|
-
|
|
484
|
-
def test_multiply_array(ctx_factory):
|
|
485
|
-
"""Test the multiplication of two arrays."""
|
|
486
|
-
|
|
487
|
-
context = ctx_factory()
|
|
488
|
-
queue = cl.CommandQueue(context)
|
|
489
|
-
|
|
490
|
-
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
|
|
491
|
-
|
|
492
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
493
|
-
b_gpu = cl_array.to_device(queue, a)
|
|
494
|
-
|
|
495
|
-
a_squared = (b_gpu * a_gpu).get()
|
|
496
|
-
|
|
497
|
-
assert (a * a == a_squared).all()
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
# }}}
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
# {{{ test_addition_array
|
|
504
|
-
|
|
505
|
-
def test_addition_array(ctx_factory):
|
|
506
|
-
"""Test the addition of two arrays."""
|
|
507
|
-
|
|
508
|
-
context = ctx_factory()
|
|
509
|
-
queue = cl.CommandQueue(context)
|
|
510
|
-
|
|
511
|
-
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
|
|
512
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
513
|
-
a_added = (a_gpu + a_gpu).get()
|
|
514
|
-
|
|
515
|
-
assert (a + a == a_added).all()
|
|
516
|
-
|
|
517
|
-
# }}}
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
# {{{ test_addition_scalar
|
|
521
|
-
|
|
522
|
-
def test_addition_scalar(ctx_factory):
|
|
523
|
-
"""Test the addition of an array and a scalar."""
|
|
524
|
-
|
|
525
|
-
context = ctx_factory()
|
|
526
|
-
queue = cl.CommandQueue(context)
|
|
527
|
-
|
|
528
|
-
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
|
|
529
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
530
|
-
a_added = (7 + a_gpu).get()
|
|
531
|
-
|
|
532
|
-
assert (7 + a == a_added).all()
|
|
533
|
-
|
|
534
|
-
# }}}
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
# {{{ test_subtract_array
|
|
538
|
-
|
|
539
|
-
@pytest.mark.parametrize(("dtype_a", "dtype_b"),
|
|
540
|
-
[
|
|
541
|
-
(np.float32, np.float32),
|
|
542
|
-
(np.float32, np.int32),
|
|
543
|
-
(np.int32, np.int32),
|
|
544
|
-
(np.int64, np.int32),
|
|
545
|
-
(np.int64, np.uint32),
|
|
546
|
-
])
|
|
547
|
-
def test_subtract_array(ctx_factory, dtype_a, dtype_b):
|
|
548
|
-
"""Test the subtraction of two arrays."""
|
|
549
|
-
#test data
|
|
550
|
-
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(dtype_a)
|
|
551
|
-
b = np.array([10, 20, 30, 40, 50,
|
|
552
|
-
60, 70, 80, 90, 100]).astype(dtype_b)
|
|
553
|
-
|
|
554
|
-
context = ctx_factory()
|
|
555
|
-
queue = cl.CommandQueue(context)
|
|
556
|
-
|
|
557
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
558
|
-
b_gpu = cl_array.to_device(queue, b)
|
|
559
|
-
|
|
560
|
-
result = (a_gpu - b_gpu).get()
|
|
561
|
-
assert (a - b == result).all()
|
|
562
|
-
|
|
563
|
-
result = (b_gpu - a_gpu).get()
|
|
564
|
-
assert (b - a == result).all()
|
|
565
|
-
|
|
566
|
-
# }}}
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
# {{{ test_subtract_scalar
|
|
570
|
-
|
|
571
|
-
def test_subtract_scalar(ctx_factory):
|
|
572
|
-
"""Test the subtraction of an array and a scalar."""
|
|
573
|
-
|
|
574
|
-
context = ctx_factory()
|
|
575
|
-
queue = cl.CommandQueue(context)
|
|
576
|
-
|
|
577
|
-
#test data
|
|
578
|
-
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
|
|
579
|
-
|
|
580
|
-
#convert a to a gpu object
|
|
581
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
582
|
-
|
|
583
|
-
result = (a_gpu - 7).get()
|
|
584
|
-
assert (a - 7 == result).all()
|
|
585
|
-
|
|
586
|
-
result = (7 - a_gpu).get()
|
|
587
|
-
assert (7 - a == result).all()
|
|
588
|
-
|
|
589
|
-
# }}}
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
# {{{ test_divide_scalar
|
|
593
|
-
|
|
594
|
-
def test_divide_scalar(ctx_factory):
|
|
595
|
-
"""Test the division of an array and a scalar."""
|
|
596
|
-
|
|
597
|
-
context = ctx_factory()
|
|
598
|
-
queue = cl.CommandQueue(context)
|
|
599
|
-
|
|
600
|
-
if queue.device.platform.name == "Apple":
|
|
601
|
-
pytest.xfail("Apple CL compiler crashes on this.")
|
|
602
|
-
|
|
603
|
-
dtypes = (np.uint8, np.uint16, np.uint32,
|
|
604
|
-
np.int8, np.int16, np.int32,
|
|
605
|
-
np.float32, np.complex64)
|
|
606
|
-
from pyopencl.characterize import has_double_support
|
|
607
|
-
if has_double_support(queue.device):
|
|
608
|
-
dtypes = dtypes + (np.float64, np.complex128)
|
|
609
|
-
|
|
610
|
-
from itertools import product
|
|
611
|
-
|
|
612
|
-
for dtype_a, dtype_s in product(dtypes, repeat=2):
|
|
613
|
-
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]).astype(dtype_a)
|
|
614
|
-
s = dtype_s(40)
|
|
615
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
616
|
-
|
|
617
|
-
b = a / s
|
|
618
|
-
b_gpu = a_gpu / s
|
|
619
|
-
assert (np.abs(b_gpu.get() - b) < 1e-3).all()
|
|
620
|
-
assert b_gpu.dtype is b.dtype
|
|
621
|
-
|
|
622
|
-
c = s / a
|
|
623
|
-
c_gpu = s / a_gpu
|
|
624
|
-
assert (np.abs(c_gpu.get() - c) < 1e-3).all()
|
|
625
|
-
assert c_gpu.dtype is c.dtype
|
|
626
|
-
|
|
627
|
-
# }}}
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
# {{{ test_divide_array
|
|
631
|
-
|
|
632
|
-
def test_divide_array(ctx_factory):
|
|
633
|
-
"""Test the division of an array and a scalar. """
|
|
634
|
-
|
|
635
|
-
context = ctx_factory()
|
|
636
|
-
queue = cl.CommandQueue(context)
|
|
637
|
-
|
|
638
|
-
dtypes = (np.float32, np.complex64)
|
|
639
|
-
from pyopencl.characterize import has_double_support
|
|
640
|
-
if has_double_support(queue.device):
|
|
641
|
-
dtypes = dtypes + (np.float64, np.complex128)
|
|
642
|
-
|
|
643
|
-
from itertools import product
|
|
644
|
-
|
|
645
|
-
for dtype_a, dtype_b in product(dtypes, repeat=2):
|
|
646
|
-
|
|
647
|
-
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]).astype(dtype_a)
|
|
648
|
-
b = np.array([10, 10, 10, 10, 10, 10, 10, 10, 10, 10]).astype(dtype_b)
|
|
649
|
-
|
|
650
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
651
|
-
b_gpu = cl_array.to_device(queue, b)
|
|
652
|
-
c = a / b
|
|
653
|
-
c_gpu = (a_gpu / b_gpu)
|
|
654
|
-
assert (np.abs(c_gpu.get() - c) < 1e-3).all()
|
|
655
|
-
assert c_gpu.dtype is c.dtype
|
|
656
|
-
|
|
657
|
-
d = b / a
|
|
658
|
-
d_gpu = (b_gpu / a_gpu)
|
|
659
|
-
assert (np.abs(d_gpu.get() - d) < 1e-3).all()
|
|
660
|
-
assert d_gpu.dtype is d.dtype
|
|
661
|
-
|
|
662
|
-
# }}}
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
# {{{ test_divide_inplace_scalar
|
|
666
|
-
|
|
667
|
-
def test_divide_inplace_scalar(ctx_factory):
|
|
668
|
-
"""Test inplace division of arrays and a scalar."""
|
|
669
|
-
|
|
670
|
-
context = ctx_factory()
|
|
671
|
-
queue = cl.CommandQueue(context)
|
|
672
|
-
|
|
673
|
-
if queue.device.platform.name == "Apple":
|
|
674
|
-
pytest.xfail("Apple CL compiler crashes on this.")
|
|
675
|
-
|
|
676
|
-
dtypes = (np.uint8, np.uint16, np.uint32,
|
|
677
|
-
np.int8, np.int16, np.int32,
|
|
678
|
-
np.float32, np.complex64)
|
|
679
|
-
from pyopencl.characterize import has_double_support
|
|
680
|
-
if has_double_support(queue.device):
|
|
681
|
-
dtypes = dtypes + (np.float64, np.complex128)
|
|
682
|
-
|
|
683
|
-
from itertools import product
|
|
684
|
-
|
|
685
|
-
for dtype_a, dtype_s in product(dtypes, repeat=2):
|
|
686
|
-
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]).astype(dtype_a)
|
|
687
|
-
s = dtype_s(40)
|
|
688
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
689
|
-
|
|
690
|
-
# ensure the same behavior as inplace numpy.ndarray division
|
|
691
|
-
try:
|
|
692
|
-
a /= s
|
|
693
|
-
except TypeError:
|
|
694
|
-
with np.testing.assert_raises(TypeError):
|
|
695
|
-
a_gpu /= s
|
|
696
|
-
else:
|
|
697
|
-
a_gpu /= s
|
|
698
|
-
assert (np.abs(a_gpu.get() - a) < 1e-3).all()
|
|
699
|
-
assert a_gpu.dtype is a.dtype
|
|
700
|
-
|
|
701
|
-
# }}}
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
# {{{ test_divide_inplace_array
|
|
705
|
-
|
|
706
|
-
def test_divide_inplace_array(ctx_factory):
|
|
707
|
-
"""Test inplace division of arrays."""
|
|
708
|
-
|
|
709
|
-
context = ctx_factory()
|
|
710
|
-
queue = cl.CommandQueue(context)
|
|
711
|
-
|
|
712
|
-
dtypes = (np.uint8, np.uint16, np.uint32,
|
|
713
|
-
np.int8, np.int16, np.int32,
|
|
714
|
-
np.float32, np.complex64)
|
|
715
|
-
from pyopencl.characterize import has_double_support
|
|
716
|
-
if has_double_support(queue.device):
|
|
717
|
-
dtypes = dtypes + (np.float64, np.complex128)
|
|
718
|
-
|
|
719
|
-
from itertools import product
|
|
720
|
-
|
|
721
|
-
for dtype_a, dtype_b in product(dtypes, repeat=2):
|
|
722
|
-
print(dtype_a, dtype_b)
|
|
723
|
-
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]).astype(dtype_a)
|
|
724
|
-
b = np.array([10, 10, 10, 10, 10, 10, 10, 10, 10, 10]).astype(dtype_b)
|
|
725
|
-
|
|
726
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
727
|
-
b_gpu = cl_array.to_device(queue, b)
|
|
728
|
-
|
|
729
|
-
# ensure the same behavior as inplace numpy.ndarray division
|
|
730
|
-
try:
|
|
731
|
-
a_gpu /= b_gpu
|
|
732
|
-
except TypeError:
|
|
733
|
-
# pass for now, as numpy casts differently for in-place and out-place
|
|
734
|
-
# true_divide
|
|
735
|
-
pass
|
|
736
|
-
# with np.testing.assert_raises(TypeError):
|
|
737
|
-
# a /= b
|
|
738
|
-
else:
|
|
739
|
-
a /= b
|
|
740
|
-
assert (np.abs(a_gpu.get() - a) < 1e-3).all()
|
|
741
|
-
assert a_gpu.dtype is a.dtype
|
|
742
|
-
|
|
743
|
-
# }}}
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
# {{{ test_bitwise
|
|
747
|
-
|
|
748
|
-
def test_bitwise(ctx_factory):
|
|
749
|
-
if _PYPY:
|
|
750
|
-
pytest.xfail("numpypy: missing bitwise ops")
|
|
751
|
-
|
|
752
|
-
context = ctx_factory()
|
|
753
|
-
queue = cl.CommandQueue(context)
|
|
754
|
-
|
|
755
|
-
from itertools import product
|
|
756
|
-
|
|
757
|
-
dtypes = [np.dtype(t) for t in (np.int64, np.int32, np.int16, np.int8)]
|
|
758
|
-
|
|
759
|
-
from pyopencl.clrandom import rand as clrand
|
|
760
|
-
|
|
761
|
-
for a_dtype, b_dtype in product(dtypes, dtypes):
|
|
762
|
-
ary_len = 16
|
|
763
|
-
|
|
764
|
-
int32_min = np.iinfo(np.int32).min
|
|
765
|
-
int32_max = np.iinfo(np.int32).max
|
|
766
|
-
|
|
767
|
-
a_dev = clrand(
|
|
768
|
-
queue, (ary_len,), a=int32_min, b=1+int32_max, dtype=np.int64
|
|
769
|
-
).astype(a_dtype)
|
|
770
|
-
b_dev = clrand(
|
|
771
|
-
queue, (ary_len,), a=int32_min, b=1+int32_max, dtype=np.int64
|
|
772
|
-
).astype(b_dtype)
|
|
773
|
-
|
|
774
|
-
a = a_dev.get()
|
|
775
|
-
b = b_dev.get()
|
|
776
|
-
s = int(clrand(queue, (), a=int32_min, b=1+int32_max, dtype=np.int64)
|
|
777
|
-
.astype(b_dtype).get())
|
|
778
|
-
|
|
779
|
-
import operator as o
|
|
780
|
-
|
|
781
|
-
for op in [o.and_, o.or_, o.xor]:
|
|
782
|
-
res_dev = op(a_dev, b_dev)
|
|
783
|
-
res = op(a, b)
|
|
784
|
-
|
|
785
|
-
assert (res_dev.get() == res).all()
|
|
786
|
-
|
|
787
|
-
try:
|
|
788
|
-
res = op(a, s)
|
|
789
|
-
except OverflowError:
|
|
790
|
-
pass
|
|
791
|
-
else:
|
|
792
|
-
res_dev = op(a_dev, s)
|
|
793
|
-
|
|
794
|
-
assert (res_dev.get() == res).all()
|
|
795
|
-
|
|
796
|
-
try:
|
|
797
|
-
res = op(s, b)
|
|
798
|
-
except OverflowError:
|
|
799
|
-
pass
|
|
800
|
-
else:
|
|
801
|
-
res_dev = op(s, b_dev)
|
|
802
|
-
|
|
803
|
-
assert (res_dev.get() == res).all()
|
|
804
|
-
|
|
805
|
-
for op in [o.iand, o.ior, o.ixor]:
|
|
806
|
-
res_dev = a_dev.copy()
|
|
807
|
-
op_res = op(res_dev, b_dev)
|
|
808
|
-
assert op_res is res_dev
|
|
809
|
-
|
|
810
|
-
res = a.copy()
|
|
811
|
-
try:
|
|
812
|
-
op(res, b)
|
|
813
|
-
except OverflowError:
|
|
814
|
-
pass
|
|
815
|
-
else:
|
|
816
|
-
|
|
817
|
-
assert (res_dev.get() == res).all()
|
|
818
|
-
|
|
819
|
-
res = a.copy()
|
|
820
|
-
try:
|
|
821
|
-
op(res, s)
|
|
822
|
-
except OverflowError:
|
|
823
|
-
pass
|
|
824
|
-
else:
|
|
825
|
-
res_dev = a_dev.copy()
|
|
826
|
-
op_res = op(res_dev, s)
|
|
827
|
-
assert op_res is res_dev
|
|
828
|
-
|
|
829
|
-
assert (res_dev.get() == res).all()
|
|
830
|
-
|
|
831
|
-
# Test unary ~
|
|
832
|
-
res_dev = ~a_dev
|
|
833
|
-
res = ~a # pylint:disable=invalid-unary-operand-type
|
|
834
|
-
assert (res_dev.get() == res).all()
|
|
835
|
-
|
|
836
|
-
# }}}
|
|
837
|
-
|
|
838
|
-
# }}}
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
# {{{ RNG
|
|
842
|
-
|
|
843
|
-
# {{{ test_random_float_in_range
|
|
844
|
-
|
|
845
|
-
@pytest.mark.parametrize("rng_class",
|
|
846
|
-
[PhiloxGenerator, ThreefryGenerator])
|
|
847
|
-
@pytest.mark.parametrize("ary_size", [300, 301, 302, 303, 10007, 1000000])
|
|
848
|
-
def test_random_float_in_range(ctx_factory, rng_class, ary_size, plot_hist=False):
|
|
849
|
-
context = ctx_factory()
|
|
850
|
-
queue = cl.CommandQueue(context)
|
|
851
|
-
|
|
852
|
-
if has_double_support(context.devices[0]):
|
|
853
|
-
dtypes = [np.float32, np.float64]
|
|
854
|
-
else:
|
|
855
|
-
dtypes = [np.float32]
|
|
856
|
-
|
|
857
|
-
gen = rng_class(context)
|
|
858
|
-
|
|
859
|
-
for dtype in dtypes:
|
|
860
|
-
print(dtype)
|
|
861
|
-
ran = cl_array.zeros(queue, ary_size, dtype)
|
|
862
|
-
gen.fill_uniform(ran)
|
|
863
|
-
|
|
864
|
-
if plot_hist:
|
|
865
|
-
import matplotlib.pyplot as pt
|
|
866
|
-
pt.hist(ran.get(), 30)
|
|
867
|
-
pt.show()
|
|
868
|
-
|
|
869
|
-
assert (0 <= ran.get()).all()
|
|
870
|
-
assert (ran.get() <= 1).all()
|
|
871
|
-
|
|
872
|
-
ran = cl_array.zeros(queue, ary_size, dtype)
|
|
873
|
-
gen.fill_uniform(ran, a=4, b=7)
|
|
874
|
-
ran_host = ran.get()
|
|
875
|
-
|
|
876
|
-
for cond in [4 <= ran_host, ran_host <= 7]:
|
|
877
|
-
good = cond.all()
|
|
878
|
-
if not good:
|
|
879
|
-
print(np.where(~cond))
|
|
880
|
-
print(ran_host[~cond])
|
|
881
|
-
assert good
|
|
882
|
-
|
|
883
|
-
ran = gen.normal(queue, ary_size, dtype, mu=10, sigma=3)
|
|
884
|
-
|
|
885
|
-
if plot_hist:
|
|
886
|
-
import matplotlib.pyplot as pt
|
|
887
|
-
pt.hist(ran.get(), 30)
|
|
888
|
-
pt.show()
|
|
889
|
-
|
|
890
|
-
# }}}
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
# {{{ test_random_int_in_range
|
|
894
|
-
|
|
895
|
-
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
|
|
896
|
-
@pytest.mark.parametrize("rng_class",
|
|
897
|
-
[PhiloxGenerator, ThreefryGenerator])
|
|
898
|
-
def test_random_int_in_range(ctx_factory, rng_class, dtype, plot_hist=False):
|
|
899
|
-
context = ctx_factory()
|
|
900
|
-
queue = cl.CommandQueue(context)
|
|
901
|
-
|
|
902
|
-
gen = rng_class(context)
|
|
903
|
-
|
|
904
|
-
# if (dtype == np.int64
|
|
905
|
-
# and context.devices[0].platform.vendor.startswith("Advanced Micro")):
|
|
906
|
-
# pytest.xfail("AMD miscompiles 64-bit RNG math")
|
|
907
|
-
|
|
908
|
-
ran = gen.uniform(queue, (10000007,), dtype, a=200, b=300).get()
|
|
909
|
-
assert (200 <= ran).all()
|
|
910
|
-
assert (ran < 300).all()
|
|
911
|
-
|
|
912
|
-
print(np.min(ran), np.max(ran))
|
|
913
|
-
assert np.max(ran) > 295
|
|
914
|
-
|
|
915
|
-
if plot_hist:
|
|
916
|
-
from matplotlib import pyplot as pt
|
|
917
|
-
pt.hist(ran)
|
|
918
|
-
pt.show()
|
|
919
|
-
|
|
920
|
-
# }}}
|
|
921
|
-
|
|
922
|
-
# }}}
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
# {{{ misc
|
|
926
|
-
|
|
927
|
-
# {{{ test_numpy_integer_shape
|
|
928
|
-
|
|
929
|
-
def test_numpy_integer_shape(ctx_factory):
|
|
930
|
-
try:
|
|
931
|
-
list(np.int32(17))
|
|
932
|
-
except Exception:
|
|
933
|
-
pass
|
|
934
|
-
else:
|
|
935
|
-
from pytest import skip
|
|
936
|
-
skip("numpy implementation does not handle scalar correctly.")
|
|
937
|
-
context = ctx_factory()
|
|
938
|
-
queue = cl.CommandQueue(context)
|
|
939
|
-
|
|
940
|
-
cl_array.empty(queue, np.int32(17), np.float32)
|
|
941
|
-
cl_array.empty(queue, (np.int32(17), np.int32(17)), np.float32)
|
|
942
|
-
|
|
943
|
-
# }}}
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
# {{{ test_allocation_with_various_shape_scalar_types
|
|
947
|
-
|
|
948
|
-
def test_allocation_with_various_shape_scalar_types(ctx_factory):
|
|
949
|
-
context = ctx_factory()
|
|
950
|
-
queue = cl.CommandQueue(context)
|
|
951
|
-
|
|
952
|
-
dims_ok = (2, np.int32(7), np.uint64(1))
|
|
953
|
-
dims_not_ok = (-1, 5.70, np.float32(7))
|
|
954
|
-
|
|
955
|
-
shapes_ok_1d = list(product(dims_ok))
|
|
956
|
-
shapes_ok_2d = list(product(dims_ok, dims_ok))
|
|
957
|
-
shapes_ok_3d = list(product(dims_ok, dims_ok, dims_ok))
|
|
958
|
-
|
|
959
|
-
shapes_not_ok_1d = list(product(dims_not_ok))
|
|
960
|
-
shapes_not_ok_2d = list(product(dims_ok, dims_not_ok))
|
|
961
|
-
shapes_not_ok_3d = list(product(dims_not_ok, dims_not_ok, dims_not_ok))
|
|
962
|
-
|
|
963
|
-
for shape in shapes_ok_1d + shapes_ok_2d + shapes_ok_3d:
|
|
964
|
-
cl_array.empty(queue, shape, np.float32)
|
|
965
|
-
|
|
966
|
-
for shape in shapes_not_ok_1d + shapes_not_ok_2d + shapes_not_ok_3d:
|
|
967
|
-
with pytest.raises(ValueError):
|
|
968
|
-
cl_array.empty(queue, shape, np.float32)
|
|
969
|
-
|
|
970
|
-
# }}}
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
# {{{ test_len
|
|
974
|
-
|
|
975
|
-
def test_len(ctx_factory):
|
|
976
|
-
context = ctx_factory()
|
|
977
|
-
queue = cl.CommandQueue(context)
|
|
978
|
-
|
|
979
|
-
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
|
|
980
|
-
a_cpu = cl_array.to_device(queue, a)
|
|
981
|
-
assert len(a_cpu) == 10
|
|
982
|
-
|
|
983
|
-
# }}}
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
# {{{ test_stride_preservation
|
|
987
|
-
|
|
988
|
-
def test_stride_preservation(ctx_factory):
|
|
989
|
-
if _PYPY:
|
|
990
|
-
pytest.xfail("numpypy: no array creation from __array_interface__")
|
|
991
|
-
|
|
992
|
-
context = ctx_factory()
|
|
993
|
-
queue = cl.CommandQueue(context)
|
|
994
|
-
|
|
995
|
-
rng = np.random.default_rng(seed=42)
|
|
996
|
-
a = rng.random(size=(3, 3))
|
|
997
|
-
|
|
998
|
-
at = a.T
|
|
999
|
-
print(at.flags.f_contiguous, at.flags.c_contiguous)
|
|
1000
|
-
at_gpu = cl_array.to_device(queue, at)
|
|
1001
|
-
print(at_gpu.flags.f_contiguous, at_gpu.flags.c_contiguous)
|
|
1002
|
-
assert np.allclose(at_gpu.get(), at)
|
|
1003
|
-
|
|
1004
|
-
# }}}
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
# {{{ test_nan_arithmetic
|
|
1008
|
-
|
|
1009
|
-
def test_nan_arithmetic(ctx_factory):
|
|
1010
|
-
context = ctx_factory()
|
|
1011
|
-
queue = cl.CommandQueue(context)
|
|
1012
|
-
rng = np.random.default_rng(seed=42)
|
|
1013
|
-
|
|
1014
|
-
def make_nan_contaminated_vector(size):
|
|
1015
|
-
a = rng.standard_normal(size=(size,), dtype=np.float32)
|
|
1016
|
-
|
|
1017
|
-
from random import randrange
|
|
1018
|
-
for _i in range(size // 10):
|
|
1019
|
-
a[randrange(0, size)] = float("nan")
|
|
1020
|
-
return a
|
|
1021
|
-
|
|
1022
|
-
size = 1 << 20
|
|
1023
|
-
|
|
1024
|
-
a = make_nan_contaminated_vector(size)
|
|
1025
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
1026
|
-
b = make_nan_contaminated_vector(size)
|
|
1027
|
-
b_gpu = cl_array.to_device(queue, b)
|
|
1028
|
-
|
|
1029
|
-
ab = a * b
|
|
1030
|
-
ab_gpu = (a_gpu * b_gpu).get()
|
|
1031
|
-
|
|
1032
|
-
assert (np.isnan(ab) == np.isnan(ab_gpu)).all()
|
|
1033
|
-
|
|
1034
|
-
# }}}
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
# {{{ test_mem_pool_with_arrays
|
|
1038
|
-
|
|
1039
|
-
def test_mem_pool_with_arrays(ctx_factory):
|
|
1040
|
-
context = ctx_factory()
|
|
1041
|
-
queue = cl.CommandQueue(context)
|
|
1042
|
-
mem_pool = cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue))
|
|
1043
|
-
|
|
1044
|
-
a_dev = cl_array.arange(queue, 2000, dtype=np.float32, allocator=mem_pool)
|
|
1045
|
-
b_dev = cl_array.to_device(queue, np.arange(2000), allocator=mem_pool) + 4000
|
|
1046
|
-
|
|
1047
|
-
assert a_dev.allocator is mem_pool
|
|
1048
|
-
assert b_dev.allocator is mem_pool
|
|
1049
|
-
|
|
1050
|
-
# }}}
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
# {{{ test_view
|
|
1054
|
-
|
|
1055
|
-
def test_view(ctx_factory):
|
|
1056
|
-
context = ctx_factory()
|
|
1057
|
-
queue = cl.CommandQueue(context)
|
|
1058
|
-
|
|
1059
|
-
a = np.arange(128).reshape(8, 16).astype(np.float32)
|
|
1060
|
-
a_dev = cl_array.to_device(queue, a)
|
|
1061
|
-
|
|
1062
|
-
# same dtype
|
|
1063
|
-
view = a_dev.view()
|
|
1064
|
-
assert view.shape == a_dev.shape and view.dtype == a_dev.dtype
|
|
1065
|
-
|
|
1066
|
-
# larger dtype
|
|
1067
|
-
view = a_dev.view(np.complex64)
|
|
1068
|
-
assert view.shape == (8, 8) and view.dtype == np.complex64
|
|
1069
|
-
|
|
1070
|
-
# smaller dtype
|
|
1071
|
-
view = a_dev.view(np.int16)
|
|
1072
|
-
assert view.shape == (8, 32) and view.dtype == np.int16
|
|
1073
|
-
|
|
1074
|
-
# }}}
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
# {{{ test_diff
|
|
1078
|
-
|
|
1079
|
-
def test_diff(ctx_factory):
|
|
1080
|
-
context = ctx_factory()
|
|
1081
|
-
queue = cl.CommandQueue(context)
|
|
1082
|
-
|
|
1083
|
-
from pyopencl.clrandom import rand as clrand
|
|
1084
|
-
|
|
1085
|
-
ary_len = 20000
|
|
1086
|
-
a_dev = clrand(queue, (ary_len,), dtype=np.float32)
|
|
1087
|
-
a = a_dev.get()
|
|
1088
|
-
|
|
1089
|
-
err = la.norm(
|
|
1090
|
-
cl.array.diff(a_dev).get() - np.diff(a))
|
|
1091
|
-
assert err < 1e-4
|
|
1092
|
-
|
|
1093
|
-
# }}}
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
# {{{ test_copy
|
|
1097
|
-
|
|
1098
|
-
def test_copy(ctx_factory):
|
|
1099
|
-
context = ctx_factory()
|
|
1100
|
-
queue1 = cl.CommandQueue(context)
|
|
1101
|
-
queue2 = cl.CommandQueue(context)
|
|
1102
|
-
|
|
1103
|
-
# Test copy
|
|
1104
|
-
|
|
1105
|
-
arr = cl.array.zeros(queue1, 100, np.int32)
|
|
1106
|
-
arr_copy = arr.copy()
|
|
1107
|
-
|
|
1108
|
-
assert (arr == arr_copy).all().get()
|
|
1109
|
-
assert arr.data != arr_copy.data
|
|
1110
|
-
assert arr_copy.queue is queue1
|
|
1111
|
-
|
|
1112
|
-
# Test queue association
|
|
1113
|
-
|
|
1114
|
-
arr_copy = arr.copy(queue=queue2)
|
|
1115
|
-
assert arr_copy.queue is queue2
|
|
1116
|
-
|
|
1117
|
-
arr_copy = arr.copy(queue=None)
|
|
1118
|
-
assert arr_copy.queue is None
|
|
1119
|
-
|
|
1120
|
-
arr_copy = arr.with_queue(None).copy(queue=queue1)
|
|
1121
|
-
assert arr_copy.queue is queue1
|
|
1122
|
-
|
|
1123
|
-
# }}}
|
|
1124
|
-
|
|
1125
|
-
# }}}
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
# {{{ slices, concatenation
|
|
1129
|
-
|
|
1130
|
-
# {{{ test_slice
|
|
1131
|
-
|
|
1132
|
-
def test_slice(ctx_factory):
|
|
1133
|
-
if _PYPY:
|
|
1134
|
-
pytest.xfail("numpypy: spurious as_strided failure")
|
|
1135
|
-
|
|
1136
|
-
context = ctx_factory()
|
|
1137
|
-
queue = cl.CommandQueue(context)
|
|
1138
|
-
|
|
1139
|
-
from pyopencl.clrandom import rand as clrand
|
|
1140
|
-
|
|
1141
|
-
tp = np.float32
|
|
1142
|
-
|
|
1143
|
-
ary_len = 20000
|
|
1144
|
-
a_gpu = clrand(queue, (ary_len,), dtype=tp)
|
|
1145
|
-
b_gpu = clrand(queue, (ary_len,), dtype=tp)
|
|
1146
|
-
a = a_gpu.get()
|
|
1147
|
-
b = b_gpu.get()
|
|
1148
|
-
|
|
1149
|
-
if queue.device.platform.name == "Intel(R) OpenCL":
|
|
1150
|
-
pytest.skip("Intel CL regularly crashes on this test case "
|
|
1151
|
-
"-- https://github.com/conda-forge/"
|
|
1152
|
-
"intel-compiler-repack-feedstock/issues/7")
|
|
1153
|
-
else:
|
|
1154
|
-
start_offset = 0
|
|
1155
|
-
|
|
1156
|
-
from random import randrange
|
|
1157
|
-
for _i in range(20):
|
|
1158
|
-
start = randrange(ary_len - start_offset)
|
|
1159
|
-
end = randrange(start+start_offset, ary_len)
|
|
1160
|
-
|
|
1161
|
-
a_gpu_slice = tp(2)*a_gpu[start:end]
|
|
1162
|
-
a_slice = tp(2)*a[start:end]
|
|
1163
|
-
|
|
1164
|
-
assert la.norm(a_gpu_slice.get() - a_slice) == 0
|
|
1165
|
-
|
|
1166
|
-
for _i in range(20):
|
|
1167
|
-
start = randrange(ary_len-start_offset)
|
|
1168
|
-
#end = randrange(start+start_offset, ary_len)
|
|
1169
|
-
end = start
|
|
1170
|
-
|
|
1171
|
-
a_gpu[start:end] = tp(2)*b[start:end]
|
|
1172
|
-
a[start:end] = tp(2)*b[start:end]
|
|
1173
|
-
|
|
1174
|
-
assert la.norm(a_gpu.get() - a) == 0
|
|
1175
|
-
|
|
1176
|
-
for _i in range(20):
|
|
1177
|
-
start = randrange(ary_len-start_offset)
|
|
1178
|
-
end = randrange(start+start_offset, ary_len)
|
|
1179
|
-
|
|
1180
|
-
a_gpu[start:end] = tp(2)*b_gpu[start:end]
|
|
1181
|
-
a[start:end] = tp(2)*b[start:end]
|
|
1182
|
-
|
|
1183
|
-
assert la.norm(a_gpu.get() - a) == 0
|
|
1184
|
-
|
|
1185
|
-
# }}}
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
# {{{ test_concatenate
|
|
1189
|
-
|
|
1190
|
-
def test_concatenate(ctx_factory):
|
|
1191
|
-
context = ctx_factory()
|
|
1192
|
-
queue = cl.CommandQueue(context)
|
|
1193
|
-
|
|
1194
|
-
from pyopencl.clrandom import rand as clrand
|
|
1195
|
-
|
|
1196
|
-
a_dev = clrand(queue, (5, 15, 20), dtype=np.float32)
|
|
1197
|
-
b_dev = clrand(queue, (4, 15, 20), dtype=np.float32)
|
|
1198
|
-
c_dev = clrand(queue, (3, 15, 20), dtype=np.float32)
|
|
1199
|
-
a = a_dev.get()
|
|
1200
|
-
b = b_dev.get()
|
|
1201
|
-
c = c_dev.get()
|
|
1202
|
-
|
|
1203
|
-
cat_dev = cl.array.concatenate((a_dev, b_dev, c_dev))
|
|
1204
|
-
cat = np.concatenate((a, b, c))
|
|
1205
|
-
|
|
1206
|
-
assert la.norm(cat - cat_dev.get()) == 0
|
|
1207
|
-
|
|
1208
|
-
# }}}
|
|
1209
|
-
|
|
1210
|
-
# }}}
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
# {{{ conditionals, any, all
|
|
1214
|
-
|
|
1215
|
-
# {{{ test_comparisons
|
|
1216
|
-
|
|
1217
|
-
def test_comparisons(ctx_factory):
|
|
1218
|
-
context = ctx_factory()
|
|
1219
|
-
queue = cl.CommandQueue(context)
|
|
1220
|
-
|
|
1221
|
-
from pyopencl.clrandom import rand as clrand
|
|
1222
|
-
|
|
1223
|
-
ary_len = 20000
|
|
1224
|
-
a_dev = clrand(queue, (ary_len,), dtype=np.float32)
|
|
1225
|
-
b_dev = clrand(queue, (ary_len,), dtype=np.float32)
|
|
1226
|
-
|
|
1227
|
-
a = a_dev.get()
|
|
1228
|
-
b = b_dev.get()
|
|
1229
|
-
|
|
1230
|
-
import operator as o
|
|
1231
|
-
for op in [o.eq, o.ne, o.le, o.lt, o.ge, o.gt]:
|
|
1232
|
-
res_dev = op(a_dev, b_dev)
|
|
1233
|
-
res = op(a, b)
|
|
1234
|
-
|
|
1235
|
-
assert (res_dev.get() == res).all()
|
|
1236
|
-
|
|
1237
|
-
res_dev = op(a_dev, 0)
|
|
1238
|
-
res = op(a, 0)
|
|
1239
|
-
|
|
1240
|
-
assert (res_dev.get() == res).all()
|
|
1241
|
-
|
|
1242
|
-
res_dev = op(0, b_dev)
|
|
1243
|
-
res = op(0, b)
|
|
1244
|
-
|
|
1245
|
-
assert (res_dev.get() == res).all()
|
|
1246
|
-
|
|
1247
|
-
res2_dev = op(0, res_dev)
|
|
1248
|
-
res2 = op(0, res)
|
|
1249
|
-
assert (res2_dev.get() == res2).all()
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
# }}}
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
# {{{ test_any_all
|
|
1256
|
-
|
|
1257
|
-
def test_any_all(ctx_factory):
|
|
1258
|
-
context = ctx_factory()
|
|
1259
|
-
queue = cl.CommandQueue(context)
|
|
1260
|
-
|
|
1261
|
-
ary_len = 20000
|
|
1262
|
-
a_dev = cl_array.zeros(queue, (ary_len,), dtype=np.int8)
|
|
1263
|
-
|
|
1264
|
-
assert not a_dev.all().get()
|
|
1265
|
-
assert not a_dev.any().get()
|
|
1266
|
-
|
|
1267
|
-
a_dev[15213] = 1
|
|
1268
|
-
|
|
1269
|
-
assert not a_dev.all().get()
|
|
1270
|
-
assert a_dev.any().get()
|
|
1271
|
-
|
|
1272
|
-
a_dev.fill(1)
|
|
1273
|
-
|
|
1274
|
-
assert a_dev.all().get()
|
|
1275
|
-
assert a_dev.any().get()
|
|
1276
|
-
|
|
1277
|
-
# }}}
|
|
1278
|
-
|
|
1279
|
-
# }}}
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
# {{{ test_map_to_host
|
|
1283
|
-
|
|
1284
|
-
def test_map_to_host(ctx_factory):
|
|
1285
|
-
if _PYPY:
|
|
1286
|
-
pytest.skip("numpypy: no array creation from __array_interface__")
|
|
1287
|
-
|
|
1288
|
-
context = ctx_factory()
|
|
1289
|
-
queue = cl.CommandQueue(context)
|
|
1290
|
-
|
|
1291
|
-
if context.devices[0].type & cl.device_type.GPU:
|
|
1292
|
-
mf = cl.mem_flags
|
|
1293
|
-
allocator = cl_tools.DeferredAllocator(
|
|
1294
|
-
context, mf.READ_WRITE | mf.ALLOC_HOST_PTR)
|
|
1295
|
-
else:
|
|
1296
|
-
allocator = None
|
|
1297
|
-
|
|
1298
|
-
a_dev = cl_array.zeros(queue, (5, 6, 7,), dtype=np.float32, allocator=allocator)
|
|
1299
|
-
a_dev[3, 2, 1] = 10
|
|
1300
|
-
a_host = a_dev.map_to_host()
|
|
1301
|
-
a_host[1, 2, 3] = 10
|
|
1302
|
-
|
|
1303
|
-
a_host_saved = a_host.copy()
|
|
1304
|
-
a_host.base.release(queue)
|
|
1305
|
-
|
|
1306
|
-
a_dev.finish()
|
|
1307
|
-
|
|
1308
|
-
print("DEV[HOST_WRITE]", a_dev.get()[1, 2, 3])
|
|
1309
|
-
print("HOST[DEV_WRITE]", a_host_saved[3, 2, 1])
|
|
1310
|
-
|
|
1311
|
-
assert (a_host_saved == a_dev.get()).all()
|
|
1312
|
-
|
|
1313
|
-
# }}}
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
# {{{ test_view_and_strides
|
|
1317
|
-
|
|
1318
|
-
def test_view_and_strides(ctx_factory):
|
|
1319
|
-
if _PYPY:
|
|
1320
|
-
pytest.xfail("numpypy: no array creation from __array_interface__")
|
|
1321
|
-
|
|
1322
|
-
context = ctx_factory()
|
|
1323
|
-
queue = cl.CommandQueue(context)
|
|
1324
|
-
|
|
1325
|
-
from pyopencl.clrandom import rand as clrand
|
|
1326
|
-
|
|
1327
|
-
x = clrand(queue, (5, 10), dtype=np.float32)
|
|
1328
|
-
y = x[:3, :5]
|
|
1329
|
-
yv = y.view()
|
|
1330
|
-
|
|
1331
|
-
assert yv.shape == y.shape
|
|
1332
|
-
assert yv.strides == y.strides
|
|
1333
|
-
|
|
1334
|
-
with pytest.raises(AssertionError):
|
|
1335
|
-
assert (yv.get() == x.get()[:3, :5]).all()
|
|
1336
|
-
|
|
1337
|
-
# }}}
|
|
1338
|
-
|
|
1339
|
-
|
|
1340
|
-
# {{{ test_meshmode_view
|
|
1341
|
-
|
|
1342
|
-
def test_meshmode_view(ctx_factory):
|
|
1343
|
-
if _PYPY:
|
|
1344
|
-
# https://bitbucket.org/pypy/numpy/issue/28/indexerror-on-ellipsis-slice
|
|
1345
|
-
pytest.xfail("numpypy bug #28")
|
|
1346
|
-
|
|
1347
|
-
context = ctx_factory()
|
|
1348
|
-
queue = cl.CommandQueue(context)
|
|
1349
|
-
|
|
1350
|
-
n = 2
|
|
1351
|
-
result = cl.array.empty(queue, (2, n*6), np.float32)
|
|
1352
|
-
|
|
1353
|
-
def view(z):
|
|
1354
|
-
return z[..., n*3:n*6].reshape(z.shape[:-1] + (n, 3))
|
|
1355
|
-
|
|
1356
|
-
result = result.with_queue(queue)
|
|
1357
|
-
result.fill(0)
|
|
1358
|
-
view(result)[0].fill(1)
|
|
1359
|
-
view(result)[1].fill(1)
|
|
1360
|
-
x = result.get()
|
|
1361
|
-
assert (view(x) == 1).all()
|
|
1362
|
-
|
|
1363
|
-
# }}}
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
# {{{ test_event_management
|
|
1367
|
-
|
|
1368
|
-
def test_event_management(ctx_factory):
|
|
1369
|
-
context = ctx_factory()
|
|
1370
|
-
queue = cl.CommandQueue(context)
|
|
1371
|
-
|
|
1372
|
-
from pyopencl.clrandom import rand as clrand
|
|
1373
|
-
|
|
1374
|
-
x = clrand(queue, (5, 10), dtype=np.float32)
|
|
1375
|
-
assert len(x.events) == 1, len(x.events)
|
|
1376
|
-
|
|
1377
|
-
x.finish()
|
|
1378
|
-
|
|
1379
|
-
assert len(x.events) == 0
|
|
1380
|
-
|
|
1381
|
-
y = x+x
|
|
1382
|
-
assert len(y.events) == 1
|
|
1383
|
-
y = x*x
|
|
1384
|
-
assert len(y.events) == 1
|
|
1385
|
-
y = 2*x
|
|
1386
|
-
assert len(y.events) == 1
|
|
1387
|
-
y = 2/x
|
|
1388
|
-
assert len(y.events) == 1
|
|
1389
|
-
y = x/2
|
|
1390
|
-
assert len(y.events) == 1
|
|
1391
|
-
y = x**2
|
|
1392
|
-
assert len(y.events) == 1
|
|
1393
|
-
y = 2**x
|
|
1394
|
-
assert len(y.events) == 1
|
|
1395
|
-
|
|
1396
|
-
for _i in range(10):
|
|
1397
|
-
x.fill(0)
|
|
1398
|
-
|
|
1399
|
-
assert len(x.events) == 10
|
|
1400
|
-
|
|
1401
|
-
for _i in range(1000):
|
|
1402
|
-
x.fill(0)
|
|
1403
|
-
|
|
1404
|
-
assert len(x.events) < 100
|
|
1405
|
-
|
|
1406
|
-
# }}}
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
# {{{ test_reshape
|
|
1410
|
-
|
|
1411
|
-
def test_reshape(ctx_factory):
|
|
1412
|
-
context = ctx_factory()
|
|
1413
|
-
queue = cl.CommandQueue(context)
|
|
1414
|
-
|
|
1415
|
-
a = np.arange(128).reshape(8, 16).astype(np.float32)
|
|
1416
|
-
a_dev = cl_array.to_device(queue, a)
|
|
1417
|
-
|
|
1418
|
-
# different ways to specify the shape
|
|
1419
|
-
a_dev.reshape(4, 32)
|
|
1420
|
-
a_dev.reshape((4, 32))
|
|
1421
|
-
a_dev.reshape([4, 32])
|
|
1422
|
-
|
|
1423
|
-
# using -1 as unknown dimension
|
|
1424
|
-
assert a_dev.reshape(-1, 32).shape == (4, 32)
|
|
1425
|
-
assert a_dev.reshape((32, -1)).shape == (32, 4)
|
|
1426
|
-
assert a_dev.reshape((8, -1, 4)).shape == (8, 4, 4)
|
|
1427
|
-
|
|
1428
|
-
import pytest
|
|
1429
|
-
with pytest.raises(ValueError):
|
|
1430
|
-
a_dev.reshape(-1, -1, 4)
|
|
1431
|
-
|
|
1432
|
-
# }}}
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
# {{{ test_skip_slicing
|
|
1436
|
-
|
|
1437
|
-
def test_skip_slicing(ctx_factory):
|
|
1438
|
-
context = ctx_factory()
|
|
1439
|
-
queue = cl.CommandQueue(context)
|
|
1440
|
-
|
|
1441
|
-
a_host = np.arange(16).reshape((4, 4))
|
|
1442
|
-
b_host = a_host[::3]
|
|
1443
|
-
|
|
1444
|
-
a = cl_array.to_device(queue, a_host)
|
|
1445
|
-
b = a[::3]
|
|
1446
|
-
assert b.shape == b_host.shape
|
|
1447
|
-
# pylint:disable=unsubscriptable-object
|
|
1448
|
-
assert np.array_equal(b[1].get(), b_host[1])
|
|
1449
|
-
|
|
1450
|
-
# }}}
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
# {{{ test_transpose
|
|
1454
|
-
|
|
1455
|
-
def test_transpose(ctx_factory):
|
|
1456
|
-
if _PYPY:
|
|
1457
|
-
pytest.xfail("numpypy: no array creation from __array_interface__")
|
|
1458
|
-
|
|
1459
|
-
context = ctx_factory()
|
|
1460
|
-
queue = cl.CommandQueue(context)
|
|
1461
|
-
|
|
1462
|
-
from pyopencl.clrandom import rand as clrand
|
|
1463
|
-
|
|
1464
|
-
a_gpu = clrand(queue, (10, 20, 30), dtype=np.float32)
|
|
1465
|
-
a = a_gpu.get()
|
|
1466
|
-
|
|
1467
|
-
# FIXME: not contiguous
|
|
1468
|
-
#assert np.allclose(a_gpu.transpose((1,2,0)).get(), a.transpose((1,2,0)))
|
|
1469
|
-
assert np.array_equal(a_gpu.T.get(), a.T)
|
|
1470
|
-
|
|
1471
|
-
# }}}
|
|
1472
|
-
|
|
1473
|
-
|
|
1474
|
-
# {{{ test_newaxis
|
|
1475
|
-
|
|
1476
|
-
def test_newaxis(ctx_factory):
|
|
1477
|
-
context = ctx_factory()
|
|
1478
|
-
queue = cl.CommandQueue(context)
|
|
1479
|
-
|
|
1480
|
-
from pyopencl.clrandom import rand as clrand
|
|
1481
|
-
|
|
1482
|
-
a_gpu = clrand(queue, (10, 20, 30), dtype=np.float32)
|
|
1483
|
-
a = a_gpu.get()
|
|
1484
|
-
|
|
1485
|
-
b_gpu = a_gpu[:, np.newaxis]
|
|
1486
|
-
b = a[:, np.newaxis]
|
|
1487
|
-
|
|
1488
|
-
assert b_gpu.shape == b.shape
|
|
1489
|
-
for i in range(b.ndim):
|
|
1490
|
-
if b.shape[i] > 1:
|
|
1491
|
-
assert b_gpu.strides[i] == b.strides[i]
|
|
1492
|
-
|
|
1493
|
-
# }}}
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
# {{{ test_squeeze
|
|
1497
|
-
|
|
1498
|
-
def test_squeeze(ctx_factory):
|
|
1499
|
-
context = ctx_factory()
|
|
1500
|
-
queue = cl.CommandQueue(context)
|
|
1501
|
-
rng = np.random.default_rng(seed=42)
|
|
1502
|
-
|
|
1503
|
-
shape = (40, 2, 5, 100)
|
|
1504
|
-
a_cpu = rng.random(size=shape)
|
|
1505
|
-
a_gpu = cl_array.to_device(queue, a_cpu)
|
|
1506
|
-
|
|
1507
|
-
# Slice with length 1 on dimensions 0 and 1
|
|
1508
|
-
a_gpu_slice = a_gpu[0:1, 1:2, :, :]
|
|
1509
|
-
assert a_gpu_slice.shape == (1, 1, shape[2], shape[3])
|
|
1510
|
-
assert a_gpu_slice.flags.c_contiguous
|
|
1511
|
-
|
|
1512
|
-
# Squeeze it and obtain contiguity
|
|
1513
|
-
a_gpu_squeezed_slice = a_gpu[0:1, 1:2, :, :].squeeze()
|
|
1514
|
-
assert a_gpu_squeezed_slice.shape == (shape[2], shape[3])
|
|
1515
|
-
assert a_gpu_squeezed_slice.flags.c_contiguous
|
|
1516
|
-
|
|
1517
|
-
# Check that we get the original values out
|
|
1518
|
-
#assert np.all(a_gpu_slice.get().ravel() == a_gpu_squeezed_slice.get().ravel())
|
|
1519
|
-
|
|
1520
|
-
# Slice with length 1 on dimensions 2
|
|
1521
|
-
a_gpu_slice = a_gpu[:, :, 2:3, :]
|
|
1522
|
-
assert a_gpu_slice.shape == (shape[0], shape[1], 1, shape[3])
|
|
1523
|
-
assert not a_gpu_slice.flags.c_contiguous
|
|
1524
|
-
|
|
1525
|
-
# Squeeze it, but no contiguity here
|
|
1526
|
-
a_gpu_squeezed_slice = a_gpu[:, :, 2:3, :].squeeze()
|
|
1527
|
-
assert a_gpu_squeezed_slice.shape == (shape[0], shape[1], shape[3])
|
|
1528
|
-
assert not a_gpu_squeezed_slice.flags.c_contiguous
|
|
1529
|
-
|
|
1530
|
-
# Check that we get the original values out
|
|
1531
|
-
#assert np.all(a_gpu_slice.get().ravel() == a_gpu_squeezed_slice.get().ravel())
|
|
1532
|
-
|
|
1533
|
-
# }}}
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
# {{{ test_fancy_fill
|
|
1537
|
-
|
|
1538
|
-
def test_fancy_fill(ctx_factory):
|
|
1539
|
-
if _PYPY:
|
|
1540
|
-
pytest.xfail("numpypy: multi value setting is not supported")
|
|
1541
|
-
context = ctx_factory()
|
|
1542
|
-
queue = cl.CommandQueue(context)
|
|
1543
|
-
|
|
1544
|
-
numpy_dest = np.zeros((4,), np.int32)
|
|
1545
|
-
numpy_idx = np.arange(3, dtype=np.int32)
|
|
1546
|
-
numpy_src = np.arange(8, 9, dtype=np.int32)
|
|
1547
|
-
numpy_dest[numpy_idx] = numpy_src
|
|
1548
|
-
|
|
1549
|
-
cl_dest = cl_array.zeros(queue, (4,), np.int32)
|
|
1550
|
-
cl_idx = cl_array.arange(queue, 3, dtype=np.int32)
|
|
1551
|
-
cl_src = cl_array.arange(queue, 8, 9, dtype=np.int32)
|
|
1552
|
-
cl_dest[cl_idx] = cl_src
|
|
1553
|
-
|
|
1554
|
-
assert np.all(numpy_dest == cl_dest.get())
|
|
1555
|
-
|
|
1556
|
-
# }}}
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
# {{{ test_fancy_indexing
|
|
1560
|
-
|
|
1561
|
-
def test_fancy_indexing(ctx_factory):
|
|
1562
|
-
if _PYPY:
|
|
1563
|
-
pytest.xfail("numpypy: multi value setting is not supported")
|
|
1564
|
-
context = ctx_factory()
|
|
1565
|
-
queue = cl.CommandQueue(context)
|
|
1566
|
-
rng = np.random.default_rng(seed=42)
|
|
1567
|
-
|
|
1568
|
-
n = 2 ** 20 + 2**18 + 22
|
|
1569
|
-
numpy_dest = np.zeros(n, dtype=np.int32)
|
|
1570
|
-
numpy_idx = np.arange(n, dtype=np.int32)
|
|
1571
|
-
rng.shuffle(numpy_idx)
|
|
1572
|
-
numpy_src = 20000+np.arange(n, dtype=np.int32)
|
|
1573
|
-
|
|
1574
|
-
cl_dest = cl_array.to_device(queue, numpy_dest)
|
|
1575
|
-
cl_idx = cl_array.to_device(queue, numpy_idx)
|
|
1576
|
-
cl_src = cl_array.to_device(queue, numpy_src)
|
|
1577
|
-
|
|
1578
|
-
numpy_dest[numpy_idx] = numpy_src
|
|
1579
|
-
cl_dest[cl_idx] = cl_src
|
|
1580
|
-
|
|
1581
|
-
assert np.array_equal(numpy_dest, cl_dest.get())
|
|
1582
|
-
|
|
1583
|
-
numpy_dest = numpy_src[numpy_idx]
|
|
1584
|
-
cl_dest = cl_src[cl_idx]
|
|
1585
|
-
|
|
1586
|
-
assert np.array_equal(numpy_dest, cl_dest.get())
|
|
1587
|
-
|
|
1588
|
-
# }}}
|
|
1589
|
-
|
|
1590
|
-
|
|
1591
|
-
# {{{ test_multi_put
|
|
1592
|
-
|
|
1593
|
-
def test_multi_put(ctx_factory):
|
|
1594
|
-
if _PYPY:
|
|
1595
|
-
pytest.xfail("numpypy: multi value setting is not supported")
|
|
1596
|
-
|
|
1597
|
-
context = ctx_factory()
|
|
1598
|
-
queue = cl.CommandQueue(context)
|
|
1599
|
-
|
|
1600
|
-
cl_arrays = [
|
|
1601
|
-
cl_array.arange(queue, 0, 3, dtype=np.float32)
|
|
1602
|
-
for i in range(1, 10)
|
|
1603
|
-
]
|
|
1604
|
-
idx = cl_array.arange(queue, 0, 6, dtype=np.int32)
|
|
1605
|
-
out_arrays = [
|
|
1606
|
-
cl_array.zeros(queue, (10,), np.float32)
|
|
1607
|
-
for i in range(9)
|
|
1608
|
-
]
|
|
1609
|
-
|
|
1610
|
-
out_compare = [np.zeros((10,), np.float32) for i in range(9)]
|
|
1611
|
-
for _i, ary in enumerate(out_compare):
|
|
1612
|
-
ary[idx.get()] = np.arange(0, 6, dtype=np.float32)
|
|
1613
|
-
|
|
1614
|
-
cl_array.multi_put(cl_arrays, idx, out=out_arrays)
|
|
1615
|
-
|
|
1616
|
-
assert np.all(np.all(out_compare[i] == out_arrays[i].get()) for i in range(9))
|
|
1617
|
-
|
|
1618
|
-
# }}}
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
# {{{ test_get_async
|
|
1622
|
-
|
|
1623
|
-
def test_get_async(ctx_factory):
|
|
1624
|
-
context = ctx_factory()
|
|
1625
|
-
queue = cl.CommandQueue(context)
|
|
1626
|
-
|
|
1627
|
-
device = queue.device
|
|
1628
|
-
if device.platform.vendor == "The pocl project" \
|
|
1629
|
-
and device.type & cl.device_type.GPU:
|
|
1630
|
-
pytest.xfail("the async get test fails on PoCL + Nvidia,"
|
|
1631
|
-
"at least the K40, as of PoCL 1.6, 2021-01-20")
|
|
1632
|
-
|
|
1633
|
-
rng = np.random.default_rng(seed=42)
|
|
1634
|
-
a = rng.random(10**6, dtype=np.float32)
|
|
1635
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
1636
|
-
b = a + a**5 + 1
|
|
1637
|
-
b_gpu = a_gpu + a_gpu**5 + 1
|
|
1638
|
-
|
|
1639
|
-
# deprecated, but still test
|
|
1640
|
-
b1 = b_gpu.get(async_=True) # testing that this waits for events
|
|
1641
|
-
b_gpu.finish()
|
|
1642
|
-
assert np.abs(b1 - b).mean() < 1e-5
|
|
1643
|
-
|
|
1644
|
-
b1, evt = b_gpu.get_async() # testing that this waits for events
|
|
1645
|
-
evt.wait()
|
|
1646
|
-
assert np.abs(b1 - b).mean() < 1e-5
|
|
1647
|
-
|
|
1648
|
-
wait_event = cl.UserEvent(context)
|
|
1649
|
-
b_gpu.add_event(wait_event)
|
|
1650
|
-
b, evt = b_gpu.get_async() # testing that this doesn't hang
|
|
1651
|
-
wait_event.set_status(cl.command_execution_status.COMPLETE)
|
|
1652
|
-
evt.wait()
|
|
1653
|
-
assert np.abs(b1 - b).mean() < 1e-5
|
|
1654
|
-
|
|
1655
|
-
# }}}
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
# {{{ test_outoforderqueue_get
|
|
1659
|
-
|
|
1660
|
-
def test_outoforderqueue_get(ctx_factory):
|
|
1661
|
-
context = ctx_factory()
|
|
1662
|
-
try:
|
|
1663
|
-
queue = cl.CommandQueue(context,
|
|
1664
|
-
properties=cl.command_queue_properties.OUT_OF_ORDER_EXEC_MODE_ENABLE)
|
|
1665
|
-
except Exception:
|
|
1666
|
-
pytest.skip("out-of-order queue not available")
|
|
1667
|
-
|
|
1668
|
-
rng = np.random.default_rng(seed=42)
|
|
1669
|
-
a = rng.random(10**6, dtype=np.float32)
|
|
1670
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
1671
|
-
b_gpu = a_gpu + a_gpu**5 + 1
|
|
1672
|
-
b1 = b_gpu.get() # testing that this waits for events
|
|
1673
|
-
b = a + a**5 + 1
|
|
1674
|
-
assert np.abs(b1 - b).mean() < 1e-5
|
|
1675
|
-
|
|
1676
|
-
# }}}
|
|
1677
|
-
|
|
1678
|
-
|
|
1679
|
-
# {{{ test_outoforderqueue_copy
|
|
1680
|
-
|
|
1681
|
-
def test_outoforderqueue_copy(ctx_factory):
|
|
1682
|
-
context = ctx_factory()
|
|
1683
|
-
try:
|
|
1684
|
-
queue = cl.CommandQueue(context,
|
|
1685
|
-
properties=cl.command_queue_properties.OUT_OF_ORDER_EXEC_MODE_ENABLE)
|
|
1686
|
-
except Exception:
|
|
1687
|
-
pytest.skip("out-of-order queue not available")
|
|
1688
|
-
|
|
1689
|
-
rng = np.random.default_rng(seed=42)
|
|
1690
|
-
a = rng.random(10**6, dtype=np.float32)
|
|
1691
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
1692
|
-
c_gpu = a_gpu**2 - 7
|
|
1693
|
-
b_gpu = c_gpu.copy() # testing that this waits for and creates events
|
|
1694
|
-
b_gpu *= 10
|
|
1695
|
-
queue.finish()
|
|
1696
|
-
b1 = b_gpu.get()
|
|
1697
|
-
b = 10 * (a**2 - 7)
|
|
1698
|
-
assert np.abs(b1 - b).mean() < 1e-5
|
|
1699
|
-
|
|
1700
|
-
|
|
1701
|
-
# }}}
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
# {{{ test_outoforderqueue_indexing
|
|
1705
|
-
|
|
1706
|
-
def test_outoforderqueue_indexing(ctx_factory):
|
|
1707
|
-
context = ctx_factory()
|
|
1708
|
-
try:
|
|
1709
|
-
queue = cl.CommandQueue(context,
|
|
1710
|
-
properties=cl.command_queue_properties.OUT_OF_ORDER_EXEC_MODE_ENABLE)
|
|
1711
|
-
except Exception:
|
|
1712
|
-
pytest.skip("out-of-order queue not available")
|
|
1713
|
-
|
|
1714
|
-
rng = np.random.default_rng(seed=42)
|
|
1715
|
-
a = rng.random(10**6, dtype=np.float32)
|
|
1716
|
-
i = (8e5 + 1e5 * rng.random(10**5)).astype(np.int32)
|
|
1717
|
-
|
|
1718
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
1719
|
-
i_gpu = cl_array.to_device(queue, i)
|
|
1720
|
-
c_gpu = (a_gpu**2)[i_gpu - 10000]
|
|
1721
|
-
b_gpu = 10 - a_gpu
|
|
1722
|
-
b_gpu[:] = 8 * a_gpu
|
|
1723
|
-
b_gpu[i_gpu + 10000] = c_gpu - 10
|
|
1724
|
-
queue.finish()
|
|
1725
|
-
b1 = b_gpu.get()
|
|
1726
|
-
c = (a**2)[i - 10000]
|
|
1727
|
-
b = 8 * a
|
|
1728
|
-
b[i + 10000] = c - 10
|
|
1729
|
-
assert np.abs(b1 - b).mean() < 1e-5
|
|
1730
|
-
|
|
1731
|
-
# }}}
|
|
1732
|
-
|
|
1733
|
-
|
|
1734
|
-
# {{{ test_outoforderqueue_reductions
|
|
1735
|
-
|
|
1736
|
-
def test_outoforderqueue_reductions(ctx_factory):
|
|
1737
|
-
context = ctx_factory()
|
|
1738
|
-
try:
|
|
1739
|
-
queue = cl.CommandQueue(context,
|
|
1740
|
-
properties=cl.command_queue_properties.OUT_OF_ORDER_EXEC_MODE_ENABLE)
|
|
1741
|
-
except Exception:
|
|
1742
|
-
pytest.skip("out-of-order queue not available")
|
|
1743
|
-
# 0/1 values to avoid accumulated rounding error
|
|
1744
|
-
rng = np.random.default_rng(seed=42)
|
|
1745
|
-
a = (rng.random(10**6) > 0.5).astype(np.float32)
|
|
1746
|
-
|
|
1747
|
-
a[800000] = 10 # all<5 looks true until near the end
|
|
1748
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
1749
|
-
b1 = cl_array.sum(a_gpu).get()
|
|
1750
|
-
b2 = cl_array.dot(a_gpu, 3 - a_gpu).get()
|
|
1751
|
-
b3 = (a_gpu < 5).all().get()
|
|
1752
|
-
assert b1 == a.sum() and b2 == a.dot(3 - a) and b3 == 0
|
|
1753
|
-
|
|
1754
|
-
# }}}
|
|
1755
|
-
|
|
1756
|
-
|
|
1757
|
-
# {{{ test_negative_dim_rejection
|
|
1758
|
-
|
|
1759
|
-
def test_negative_dim_rejection(ctx_factory):
|
|
1760
|
-
context = ctx_factory()
|
|
1761
|
-
queue = cl.CommandQueue(context)
|
|
1762
|
-
|
|
1763
|
-
with pytest.raises(ValueError):
|
|
1764
|
-
cl_array.Array(queue, shape=-10, dtype=np.float64)
|
|
1765
|
-
|
|
1766
|
-
with pytest.raises(ValueError):
|
|
1767
|
-
cl_array.Array(queue, shape=(-10,), dtype=np.float64)
|
|
1768
|
-
|
|
1769
|
-
for left_dim in (-1, 0, 1):
|
|
1770
|
-
with pytest.raises(ValueError):
|
|
1771
|
-
cl_array.Array(queue, shape=(left_dim, -1), dtype=np.float64)
|
|
1772
|
-
|
|
1773
|
-
for right_dim in (-1, 0, 1):
|
|
1774
|
-
with pytest.raises(ValueError):
|
|
1775
|
-
cl_array.Array(queue, shape=(-1, right_dim), dtype=np.float64)
|
|
1776
|
-
|
|
1777
|
-
# }}}
|
|
1778
|
-
|
|
1779
|
-
|
|
1780
|
-
# {{{ test_zero_size_array
|
|
1781
|
-
|
|
1782
|
-
@pytest.mark.parametrize("empty_shape", [0, (), (3, 0, 2), (0, 5), (5, 0)])
|
|
1783
|
-
def test_zero_size_array(ctx_factory, empty_shape):
|
|
1784
|
-
context = ctx_factory()
|
|
1785
|
-
queue = cl.CommandQueue(context)
|
|
1786
|
-
|
|
1787
|
-
if queue.device.platform.name == "Intel(R) OpenCL":
|
|
1788
|
-
pytest.xfail("size-0 arrays fail on Intel CL")
|
|
1789
|
-
|
|
1790
|
-
a = cl_array.zeros(queue, empty_shape, dtype=np.float32)
|
|
1791
|
-
b = cl_array.zeros(queue, empty_shape, dtype=np.float32)
|
|
1792
|
-
b.fill(1)
|
|
1793
|
-
c = a + b
|
|
1794
|
-
c_host = c.get()
|
|
1795
|
-
cl_array.to_device(queue, c_host)
|
|
1796
|
-
|
|
1797
|
-
assert c.flags.c_contiguous == c_host.flags.c_contiguous
|
|
1798
|
-
assert c.flags.f_contiguous == c_host.flags.f_contiguous
|
|
1799
|
-
|
|
1800
|
-
for order in "CF":
|
|
1801
|
-
c_flat = c.reshape(-1, order=order)
|
|
1802
|
-
c_host_flat = c_host.reshape(-1, order=order)
|
|
1803
|
-
assert c_flat.shape == c_host_flat.shape
|
|
1804
|
-
assert c_flat.strides == c_host_flat.strides
|
|
1805
|
-
assert c_flat.flags.c_contiguous == c_host_flat.flags.c_contiguous
|
|
1806
|
-
assert c_flat.flags.f_contiguous == c_host_flat.flags.f_contiguous
|
|
1807
|
-
|
|
1808
|
-
# }}}
|
|
1809
|
-
|
|
1810
|
-
|
|
1811
|
-
# {{{ test_str_without_queue
|
|
1812
|
-
|
|
1813
|
-
def test_str_without_queue(ctx_factory):
|
|
1814
|
-
context = ctx_factory()
|
|
1815
|
-
queue = cl.CommandQueue(context)
|
|
1816
|
-
|
|
1817
|
-
a = cl_array.zeros(queue, 10, dtype=np.float32).with_queue(None)
|
|
1818
|
-
print(str(a))
|
|
1819
|
-
print(repr(a))
|
|
1820
|
-
|
|
1821
|
-
# }}}
|
|
1822
|
-
|
|
1823
|
-
|
|
1824
|
-
# {{{ test_stack
|
|
1825
|
-
|
|
1826
|
-
@pytest.mark.parametrize("order", ("F", "C"))
|
|
1827
|
-
@pytest.mark.parametrize("input_dims", (1, 2, 3))
|
|
1828
|
-
def test_stack(ctx_factory, input_dims, order):
|
|
1829
|
-
# Replicates pytato/test/test_codegen.py::test_stack
|
|
1830
|
-
import pyopencl.array as cla
|
|
1831
|
-
cl_ctx = ctx_factory()
|
|
1832
|
-
queue = cl.CommandQueue(cl_ctx)
|
|
1833
|
-
|
|
1834
|
-
shape = (2, 2, 2)[:input_dims]
|
|
1835
|
-
axis = -1 if order == "F" else 0
|
|
1836
|
-
|
|
1837
|
-
rng = np.random.default_rng(seed=42)
|
|
1838
|
-
x_in = rng.random(size=shape)
|
|
1839
|
-
y_in = rng.random(size=shape)
|
|
1840
|
-
x_in = x_in if order == "C" else np.asfortranarray(x_in)
|
|
1841
|
-
y_in = y_in if order == "C" else np.asfortranarray(y_in)
|
|
1842
|
-
|
|
1843
|
-
x = cla.to_device(queue, x_in)
|
|
1844
|
-
y = cla.to_device(queue, y_in)
|
|
1845
|
-
|
|
1846
|
-
np.testing.assert_allclose(cla.stack((x, y), axis=axis).get(),
|
|
1847
|
-
np.stack((x_in, y_in), axis=axis))
|
|
1848
|
-
|
|
1849
|
-
# }}}
|
|
1850
|
-
|
|
1851
|
-
|
|
1852
|
-
# {{{ test_assign_different_strides
|
|
1853
|
-
|
|
1854
|
-
def test_assign_different_strides(ctx_factory):
|
|
1855
|
-
cl_ctx = ctx_factory()
|
|
1856
|
-
queue = cl.CommandQueue(cl_ctx)
|
|
1857
|
-
|
|
1858
|
-
from pyopencl.clrandom import rand as clrand
|
|
1859
|
-
|
|
1860
|
-
a = clrand(queue, (20, 30), dtype=np.float32)
|
|
1861
|
-
b = cl_array.empty(queue, (20, 30), dtype=np.float32, order="F")
|
|
1862
|
-
with pytest.raises(NotImplementedError):
|
|
1863
|
-
b[:] = a
|
|
1864
|
-
|
|
1865
|
-
# }}}
|
|
1866
|
-
|
|
1867
|
-
|
|
1868
|
-
# {{{ test_branch_operations_on_pure_scalars
|
|
1869
|
-
|
|
1870
|
-
def test_branch_operations_on_pure_scalars():
|
|
1871
|
-
rng = np.random.default_rng(seed=42)
|
|
1872
|
-
x = rng.random()
|
|
1873
|
-
y = rng.random()
|
|
1874
|
-
cond = rng.choice([False, True])
|
|
1875
|
-
|
|
1876
|
-
np.testing.assert_allclose(np.maximum(x, y),
|
|
1877
|
-
cl_array.maximum(x, y))
|
|
1878
|
-
np.testing.assert_allclose(np.minimum(x, y),
|
|
1879
|
-
cl_array.minimum(x, y))
|
|
1880
|
-
np.testing.assert_allclose(np.where(cond, x, y),
|
|
1881
|
-
cl_array.if_positive(cond, x, y))
|
|
1882
|
-
|
|
1883
|
-
# }}}
|
|
1884
|
-
|
|
1885
|
-
|
|
1886
|
-
# {{{ test_branch_operations_on_nans
|
|
1887
|
-
|
|
1888
|
-
@pytest.mark.parametrize("op", [
|
|
1889
|
-
cl_array.maximum,
|
|
1890
|
-
cl_array.minimum,
|
|
1891
|
-
])
|
|
1892
|
-
@pytest.mark.parametrize("special_a", [
|
|
1893
|
-
np.nan,
|
|
1894
|
-
np.inf,
|
|
1895
|
-
-np.inf,
|
|
1896
|
-
])
|
|
1897
|
-
@pytest.mark.parametrize("special_b", [
|
|
1898
|
-
np.nan,
|
|
1899
|
-
np.inf,
|
|
1900
|
-
-np.inf,
|
|
1901
|
-
None
|
|
1902
|
-
])
|
|
1903
|
-
def test_branch_operations_on_nans(ctx_factory, op, special_a, special_b):
|
|
1904
|
-
ctx = ctx_factory()
|
|
1905
|
-
cq = cl.CommandQueue(ctx)
|
|
1906
|
-
|
|
1907
|
-
def sb_or(x):
|
|
1908
|
-
if special_b is None:
|
|
1909
|
-
return x
|
|
1910
|
-
else:
|
|
1911
|
-
return special_b
|
|
1912
|
-
|
|
1913
|
-
x_np = np.array([special_a, sb_or(1.), special_a, sb_or(2.), sb_or(3.)],
|
|
1914
|
-
dtype=np.float64)
|
|
1915
|
-
y_np = np.array([special_a, special_a, sb_or(1.), sb_or(3.), sb_or(2.)],
|
|
1916
|
-
dtype=np.float64)
|
|
1917
|
-
|
|
1918
|
-
x_cl = cl_array.to_device(cq, x_np)
|
|
1919
|
-
y_cl = cl_array.to_device(cq, y_np)
|
|
1920
|
-
|
|
1921
|
-
ref = getattr(np, op.__name__)(x_np, y_np)
|
|
1922
|
-
result = op(x_cl, y_cl)
|
|
1923
|
-
if isinstance(result, cl_array.Array):
|
|
1924
|
-
result = result.get()
|
|
1925
|
-
|
|
1926
|
-
np.testing.assert_allclose(result, ref)
|
|
1927
|
-
|
|
1928
|
-
# }}}
|
|
1929
|
-
|
|
1930
|
-
|
|
1931
|
-
# {{{ test_slice_copy
|
|
1932
|
-
|
|
1933
|
-
def test_slice_copy(ctx_factory):
|
|
1934
|
-
cl_ctx = ctx_factory()
|
|
1935
|
-
queue = cl.CommandQueue(cl_ctx)
|
|
1936
|
-
|
|
1937
|
-
rng = np.random.default_rng(seed=42)
|
|
1938
|
-
x = cl.array.to_device(queue, rng.random(size=(96, 27)))
|
|
1939
|
-
y = x[::8, ::3]
|
|
1940
|
-
with pytest.raises(RuntimeError):
|
|
1941
|
-
y.copy()
|
|
1942
|
-
|
|
1943
|
-
# }}}
|
|
1944
|
-
|
|
1945
|
-
|
|
1946
|
-
# {{{{ test_ravel
|
|
1947
|
-
|
|
1948
|
-
@pytest.mark.parametrize("order", ("C", "F"))
|
|
1949
|
-
def test_ravel(ctx_factory, order):
|
|
1950
|
-
ctx = ctx_factory()
|
|
1951
|
-
cq = cl.CommandQueue(ctx)
|
|
1952
|
-
|
|
1953
|
-
rng = np.random.default_rng(seed=42)
|
|
1954
|
-
x = rng.standard_normal(size=(10, 4))
|
|
1955
|
-
|
|
1956
|
-
if order == "F":
|
|
1957
|
-
x = np.asfortranarray(x)
|
|
1958
|
-
elif order == "C":
|
|
1959
|
-
pass
|
|
1960
|
-
else:
|
|
1961
|
-
raise AssertionError
|
|
1962
|
-
|
|
1963
|
-
x_cl = cl.array.to_device(cq, x)
|
|
1964
|
-
|
|
1965
|
-
np.testing.assert_allclose(x_cl.ravel(order=order).get(),
|
|
1966
|
-
x.ravel(order=order))
|
|
1967
|
-
|
|
1968
|
-
# }}}
|
|
1969
|
-
|
|
1970
|
-
|
|
1971
|
-
# {{{ test_arithmetic_on_non_scalars
|
|
1972
|
-
|
|
1973
|
-
def test_arithmetic_on_non_scalars(ctx_factory):
|
|
1974
|
-
pytest.importorskip("dataclasses")
|
|
1975
|
-
|
|
1976
|
-
from dataclasses import dataclass
|
|
1977
|
-
ctx = ctx_factory()
|
|
1978
|
-
cq = cl.CommandQueue(ctx)
|
|
1979
|
-
|
|
1980
|
-
@dataclass
|
|
1981
|
-
class ArrayContainer:
|
|
1982
|
-
_data: np.ndarray
|
|
1983
|
-
|
|
1984
|
-
def __eq__(self, other):
|
|
1985
|
-
return ArrayContainer(self._data == other)
|
|
1986
|
-
|
|
1987
|
-
with pytest.raises(TypeError):
|
|
1988
|
-
ArrayContainer(np.ones(100)) + cl.array.zeros(cq, (10,), dtype=np.float64)
|
|
1989
|
-
|
|
1990
|
-
# }}}
|
|
1991
|
-
|
|
1992
|
-
|
|
1993
|
-
# {{{ test_arithmetic_with_device_scalars
|
|
1994
|
-
|
|
1995
|
-
@pytest.mark.parametrize("which", ("add", "sub", "mul", "truediv"))
|
|
1996
|
-
def test_arithmetic_with_device_scalars(ctx_factory, which):
|
|
1997
|
-
import operator
|
|
1998
|
-
|
|
1999
|
-
ctx = ctx_factory()
|
|
2000
|
-
cq = cl.CommandQueue(ctx)
|
|
2001
|
-
|
|
2002
|
-
rng = np.random.default_rng(seed=42)
|
|
2003
|
-
ndim = rng.integers(1, 5)
|
|
2004
|
-
|
|
2005
|
-
shape = tuple(rng.integers(2, 7) for i in range(ndim))
|
|
2006
|
-
|
|
2007
|
-
x_in = rng.random(shape)
|
|
2008
|
-
x_cl = cl_array.to_device(cq, x_in)
|
|
2009
|
-
idx = tuple(rng.integers(0, dim) for dim in shape)
|
|
2010
|
-
|
|
2011
|
-
op = getattr(operator, which)
|
|
2012
|
-
res_cl = op(x_cl, x_cl[idx])
|
|
2013
|
-
res_np = op(x_in, x_in[idx])
|
|
2014
|
-
|
|
2015
|
-
np.testing.assert_allclose(res_cl.get(), res_np)
|
|
2016
|
-
|
|
2017
|
-
# }}}
|
|
2018
|
-
|
|
2019
|
-
|
|
2020
|
-
# {{{ test_if_positive_with_scalars
|
|
2021
|
-
|
|
2022
|
-
@pytest.mark.parametrize("then_type", ["array", "host_scalar", "device_scalar"])
|
|
2023
|
-
@pytest.mark.parametrize("else_type", ["array", "host_scalar", "device_scalar"])
|
|
2024
|
-
def test_if_positive_with_scalars(ctx_factory, then_type, else_type):
|
|
2025
|
-
ctx = ctx_factory()
|
|
2026
|
-
cq = cl.CommandQueue(ctx)
|
|
2027
|
-
|
|
2028
|
-
rng = np.random.default_rng(seed=42)
|
|
2029
|
-
shape = (512,)
|
|
2030
|
-
|
|
2031
|
-
criterion_np = rng.random(shape)
|
|
2032
|
-
criterion_cl = cl_array.to_device(cq, criterion_np)
|
|
2033
|
-
|
|
2034
|
-
def _get_array_or_scalar(rtype, value):
|
|
2035
|
-
if rtype == "array":
|
|
2036
|
-
ary_np = value + np.zeros(shape, dtype=criterion_cl.dtype)
|
|
2037
|
-
ary_cl = value + cl_array.zeros_like(criterion_cl)
|
|
2038
|
-
elif rtype == "host_scalar":
|
|
2039
|
-
ary_np = ary_cl = value
|
|
2040
|
-
elif rtype == "device_scalar":
|
|
2041
|
-
ary_np = value
|
|
2042
|
-
ary_cl = cl_array.to_device(cq, np.array(value))
|
|
2043
|
-
else:
|
|
2044
|
-
raise ValueError(rtype)
|
|
2045
|
-
|
|
2046
|
-
return ary_np, ary_cl
|
|
2047
|
-
|
|
2048
|
-
then_np, then_cl = _get_array_or_scalar(then_type, 0.0)
|
|
2049
|
-
else_np, else_cl = _get_array_or_scalar(else_type, 1.0)
|
|
2050
|
-
|
|
2051
|
-
result_cl = cl_array.if_positive(criterion_cl < 0.5, then_cl, else_cl)
|
|
2052
|
-
result_np = np.where(criterion_np < 0.5, then_np, else_np)
|
|
2053
|
-
|
|
2054
|
-
np.testing.assert_allclose(result_cl.get(), result_np)
|
|
2055
|
-
|
|
2056
|
-
# }}}
|
|
2057
|
-
|
|
2058
|
-
|
|
2059
|
-
# {{{ test_maximum_minimum_with_scalars
|
|
2060
|
-
|
|
2061
|
-
def test_maximum_minimum_with_scalars(ctx_factory):
|
|
2062
|
-
ctx = ctx_factory()
|
|
2063
|
-
cq = cl.CommandQueue(ctx)
|
|
2064
|
-
|
|
2065
|
-
a_np = np.float64(4.0)
|
|
2066
|
-
a_cl = cl_array.to_device(cq, np.array(a_np)).with_queue(None)
|
|
2067
|
-
|
|
2068
|
-
b_np = np.float64(-3.0)
|
|
2069
|
-
b_cl = cl_array.to_device(cq, np.array(b_np)).with_queue(None)
|
|
2070
|
-
|
|
2071
|
-
result = cl_array.maximum(a_np, b_cl, queue=cq)
|
|
2072
|
-
np.testing.assert_allclose(result.get(), a_np)
|
|
2073
|
-
result = cl_array.maximum(a_cl, b_np, queue=cq)
|
|
2074
|
-
np.testing.assert_allclose(result.get(), a_np)
|
|
2075
|
-
result = cl_array.maximum(a_cl, b_cl, queue=cq)
|
|
2076
|
-
np.testing.assert_allclose(result.get(), a_np)
|
|
2077
|
-
|
|
2078
|
-
result = cl_array.minimum(a_np, b_cl, queue=cq)
|
|
2079
|
-
np.testing.assert_allclose(result.get(), b_np)
|
|
2080
|
-
result = cl_array.minimum(a_cl, b_np, queue=cq)
|
|
2081
|
-
np.testing.assert_allclose(result.get(), b_np)
|
|
2082
|
-
result = cl_array.minimum(a_cl, b_cl, queue=cq)
|
|
2083
|
-
np.testing.assert_allclose(result.get(), b_np)
|
|
2084
|
-
|
|
2085
|
-
# Test 'untyped' scalars
|
|
2086
|
-
# FIXME: these don't work with unsized ints
|
|
2087
|
-
result = cl_array.minimum(4.0, b_cl, queue=cq)
|
|
2088
|
-
np.testing.assert_allclose(result.get(), b_np)
|
|
2089
|
-
result = cl_array.maximum(4.0, b_cl, queue=cq)
|
|
2090
|
-
np.testing.assert_allclose(result.get(), a_np)
|
|
2091
|
-
|
|
2092
|
-
result = cl_array.minimum(b_cl, 4.0, queue=cq)
|
|
2093
|
-
np.testing.assert_allclose(result.get(), b_np)
|
|
2094
|
-
result = cl_array.maximum(b_cl, 4.0, queue=cq)
|
|
2095
|
-
np.testing.assert_allclose(result.get(), a_np)
|
|
2096
|
-
|
|
2097
|
-
result = cl_array.minimum(-3.0, 4.0, queue=cq)
|
|
2098
|
-
np.testing.assert_allclose(result, b_np)
|
|
2099
|
-
result = cl_array.maximum(-3.0, 4.0, queue=cq)
|
|
2100
|
-
np.testing.assert_allclose(result, a_np)
|
|
2101
|
-
|
|
2102
|
-
# }}}
|
|
2103
|
-
|
|
2104
|
-
|
|
2105
|
-
# {{{ test_empty_reductions_vs_numpy
|
|
2106
|
-
|
|
2107
|
-
@pytest.mark.parametrize(("reduction", "supports_initial"), [
|
|
2108
|
-
(cl_array.any, False),
|
|
2109
|
-
(cl_array.all, False),
|
|
2110
|
-
(cl_array.sum, True),
|
|
2111
|
-
(cl_array.max, True),
|
|
2112
|
-
(cl_array.min, True),
|
|
2113
|
-
])
|
|
2114
|
-
def test_empty_reductions_vs_numpy(ctx_factory, reduction, supports_initial):
|
|
2115
|
-
ctx = ctx_factory()
|
|
2116
|
-
cq = cl.CommandQueue(ctx)
|
|
2117
|
-
|
|
2118
|
-
# {{{ empty
|
|
2119
|
-
|
|
2120
|
-
x_np = np.array([], dtype=np.float64)
|
|
2121
|
-
x_cl = cl_array.to_device(cq, x_np)
|
|
2122
|
-
|
|
2123
|
-
try:
|
|
2124
|
-
ref = getattr(np, reduction.__name__)(x_np)
|
|
2125
|
-
except ValueError:
|
|
2126
|
-
ref = None
|
|
2127
|
-
|
|
2128
|
-
if ref is None:
|
|
2129
|
-
with pytest.raises(ValueError):
|
|
2130
|
-
reduction(x_cl)
|
|
2131
|
-
else:
|
|
2132
|
-
result = reduction(x_cl)
|
|
2133
|
-
if isinstance(result, cl_array.Array):
|
|
2134
|
-
result = result.get()
|
|
2135
|
-
|
|
2136
|
-
np.testing.assert_allclose(result, ref)
|
|
2137
|
-
|
|
2138
|
-
# }}}
|
|
2139
|
-
|
|
2140
|
-
# {{{ empty with initial
|
|
2141
|
-
|
|
2142
|
-
if supports_initial:
|
|
2143
|
-
ref = getattr(np, reduction.__name__)(x_np, initial=5.0)
|
|
2144
|
-
result = reduction(x_cl, initial=5.0)
|
|
2145
|
-
if isinstance(result, cl_array.Array):
|
|
2146
|
-
result = result.get()
|
|
2147
|
-
|
|
2148
|
-
np.testing.assert_allclose(result, ref)
|
|
2149
|
-
|
|
2150
|
-
# }}}
|
|
2151
|
-
|
|
2152
|
-
# {{{ non-empty with initial
|
|
2153
|
-
|
|
2154
|
-
if supports_initial:
|
|
2155
|
-
x_np = np.linspace(-1, 1, 10)
|
|
2156
|
-
x_cl = cl_array.to_device(cq, x_np)
|
|
2157
|
-
|
|
2158
|
-
ref = getattr(np, reduction.__name__)(x_np, initial=5.0)
|
|
2159
|
-
result = reduction(x_cl, initial=5.0).get()
|
|
2160
|
-
np.testing.assert_allclose(result, ref)
|
|
2161
|
-
|
|
2162
|
-
ref = getattr(np, reduction.__name__)(x_np, initial=-5.0)
|
|
2163
|
-
result = reduction(x_cl, initial=-5.0).get()
|
|
2164
|
-
np.testing.assert_allclose(result, ref)
|
|
2165
|
-
|
|
2166
|
-
# }}}
|
|
2167
|
-
|
|
2168
|
-
# }}}
|
|
2169
|
-
|
|
2170
|
-
|
|
2171
|
-
# {{{ test_reduction_nan_handling
|
|
2172
|
-
|
|
2173
|
-
@pytest.mark.parametrize("with_initial", [False, True])
|
|
2174
|
-
@pytest.mark.parametrize("input_case", ["only nans", "mixed"])
|
|
2175
|
-
@pytest.mark.parametrize("reduction", [
|
|
2176
|
-
cl_array.sum,
|
|
2177
|
-
cl_array.max,
|
|
2178
|
-
cl_array.min,
|
|
2179
|
-
])
|
|
2180
|
-
def test_reduction_nan_handling(ctx_factory, reduction, input_case, with_initial):
|
|
2181
|
-
ctx = ctx_factory()
|
|
2182
|
-
cq = cl.CommandQueue(ctx)
|
|
2183
|
-
|
|
2184
|
-
if input_case == "only nans":
|
|
2185
|
-
x_np = np.array([np.nan, np.nan], dtype=np.float64)
|
|
2186
|
-
elif input_case == "mixed":
|
|
2187
|
-
x_np = np.array([np.nan, 1.], dtype=np.float64)
|
|
2188
|
-
else:
|
|
2189
|
-
raise ValueError("invalid input case")
|
|
2190
|
-
|
|
2191
|
-
x_cl = cl_array.to_device(cq, x_np)
|
|
2192
|
-
|
|
2193
|
-
if with_initial:
|
|
2194
|
-
ref = getattr(np, reduction.__name__)(x_np, initial=5.0)
|
|
2195
|
-
result = reduction(x_cl, initial=5.0)
|
|
2196
|
-
else:
|
|
2197
|
-
ref = getattr(np, reduction.__name__)(x_np)
|
|
2198
|
-
result = reduction(x_cl)
|
|
2199
|
-
|
|
2200
|
-
if isinstance(result, cl_array.Array):
|
|
2201
|
-
result = result.get()
|
|
2202
|
-
|
|
2203
|
-
np.testing.assert_allclose(result, ref)
|
|
2204
|
-
|
|
2205
|
-
# }}}
|
|
2206
|
-
|
|
2207
|
-
|
|
2208
|
-
# {{{ test_reductions_dtype
|
|
2209
|
-
|
|
2210
|
-
def test_dtype_conversions(ctx_factory):
|
|
2211
|
-
ctx = ctx_factory()
|
|
2212
|
-
queue = cl.CommandQueue(ctx)
|
|
2213
|
-
|
|
2214
|
-
ary = cl.array.to_device(queue, np.linspace(0, 1, 32))
|
|
2215
|
-
|
|
2216
|
-
for func, nargs, arg_name in [
|
|
2217
|
-
(cl.array.sum, 1, "dtype"),
|
|
2218
|
-
(cl.array.dot, 2, "dtype"),
|
|
2219
|
-
(cl.array.vdot, 2, "dtype"),
|
|
2220
|
-
(cl.array.cumsum, 1, "output_dtype"),
|
|
2221
|
-
]:
|
|
2222
|
-
for dtype in [np.float32, np.float64]:
|
|
2223
|
-
result = func(*((ary,) * nargs), **{arg_name: dtype})
|
|
2224
|
-
assert result.dtype == dtype, result.dtype
|
|
2225
|
-
|
|
2226
|
-
# }}}
|
|
2227
|
-
|
|
2228
|
-
|
|
2229
|
-
# {{{ test_svm_mem_pool_with_arrays
|
|
2230
|
-
|
|
2231
|
-
@pytest.mark.parametrize("use_mempool", [False, True])
|
|
2232
|
-
def test_arrays_with_svm_allocators(ctx_factory, use_mempool):
|
|
2233
|
-
context = ctx_factory()
|
|
2234
|
-
queue = cl.CommandQueue(context)
|
|
2235
|
-
queue2 = cl.CommandQueue(context)
|
|
2236
|
-
|
|
2237
|
-
from pyopencl.characterize import has_coarse_grain_buffer_svm
|
|
2238
|
-
has_cg_svm = has_coarse_grain_buffer_svm(queue.device)
|
|
2239
|
-
|
|
2240
|
-
if not has_cg_svm:
|
|
2241
|
-
pytest.skip("Need coarse-grained SVM support for this test.")
|
|
2242
|
-
|
|
2243
|
-
alloc = cl_tools.SVMAllocator(context, queue=queue)
|
|
2244
|
-
if use_mempool:
|
|
2245
|
-
alloc = cl_tools.SVMPool(alloc)
|
|
2246
|
-
|
|
2247
|
-
def alloc2(size):
|
|
2248
|
-
allocation = alloc(size)
|
|
2249
|
-
allocation.bind_to_queue(queue2)
|
|
2250
|
-
return allocation
|
|
2251
|
-
|
|
2252
|
-
a_dev = cl_array.arange(queue, 2000, dtype=np.float32, allocator=alloc)
|
|
2253
|
-
b_dev = cl_array.to_device(queue, np.arange(2000), allocator=alloc) + 4000
|
|
2254
|
-
|
|
2255
|
-
assert a_dev.allocator is alloc
|
|
2256
|
-
assert b_dev.allocator is alloc
|
|
2257
|
-
|
|
2258
|
-
assert a_dev.data._queue == queue
|
|
2259
|
-
assert b_dev.data._queue == queue
|
|
2260
|
-
|
|
2261
|
-
a_dev2 = cl_array.arange(queue2, 2000, dtype=np.float32, allocator=alloc2)
|
|
2262
|
-
b_dev2 = cl_array.to_device(queue2, np.arange(2000), allocator=alloc2) + 4000
|
|
2263
|
-
|
|
2264
|
-
assert a_dev2.allocator is alloc2
|
|
2265
|
-
assert b_dev2.allocator is alloc2
|
|
2266
|
-
|
|
2267
|
-
assert a_dev2.data._queue == queue2
|
|
2268
|
-
assert b_dev2.data._queue == queue2
|
|
2269
|
-
|
|
2270
|
-
np.testing.assert_allclose((a_dev+b_dev).get(), (a_dev2+b_dev2).get())
|
|
2271
|
-
|
|
2272
|
-
with pytest.warns(cl_array.InconsistentOpenCLQueueWarning):
|
|
2273
|
-
a_dev2.with_queue(queue)
|
|
2274
|
-
|
|
2275
|
-
# safe to let this proceed to deallocation, since we're not
|
|
2276
|
-
# operating on the memory
|
|
2277
|
-
|
|
2278
|
-
with pytest.warns(cl_array.InconsistentOpenCLQueueWarning):
|
|
2279
|
-
cl_array.empty(queue2, 2000, np.float32, allocator=alloc)
|
|
2280
|
-
|
|
2281
|
-
# safe to let this proceed to deallocation, since we're not
|
|
2282
|
-
# operating on the memory
|
|
2283
|
-
|
|
2284
|
-
# }}}
|
|
2285
|
-
|
|
2286
|
-
|
|
2287
|
-
def test_logical_and_or(ctx_factory):
|
|
2288
|
-
# NOTE: Copied over from pycuda/test/test_gpuarray.py
|
|
2289
|
-
rng = np.random.default_rng(seed=0)
|
|
2290
|
-
ctx = ctx_factory()
|
|
2291
|
-
cq = cl.CommandQueue(ctx)
|
|
2292
|
-
|
|
2293
|
-
for op in ["logical_and", "logical_or"]:
|
|
2294
|
-
x_np = rng.random((10, 4))
|
|
2295
|
-
y_np = rng.random((10, 4))
|
|
2296
|
-
zeros_np = np.zeros((10, 4))
|
|
2297
|
-
ones_np = np.ones((10, 4))
|
|
2298
|
-
|
|
2299
|
-
x_cl = cl_array.to_device(cq, x_np)
|
|
2300
|
-
y_cl = cl_array.to_device(cq, y_np)
|
|
2301
|
-
zeros_cl = cl_array.zeros(cq, (10, 4), np.float64)
|
|
2302
|
-
ones_cl = cl_array.zeros(cq, (10, 4), np.float64) + 1
|
|
2303
|
-
|
|
2304
|
-
np.testing.assert_array_equal(
|
|
2305
|
-
getattr(cl_array, op)(x_cl, y_cl).get(),
|
|
2306
|
-
getattr(np, op)(x_np, y_np))
|
|
2307
|
-
np.testing.assert_array_equal(
|
|
2308
|
-
getattr(cl_array, op)(x_cl, ones_cl).get(),
|
|
2309
|
-
getattr(np, op)(x_np, ones_np))
|
|
2310
|
-
np.testing.assert_array_equal(
|
|
2311
|
-
getattr(cl_array, op)(x_cl, zeros_cl).get(),
|
|
2312
|
-
getattr(np, op)(x_np, zeros_np))
|
|
2313
|
-
np.testing.assert_array_equal(
|
|
2314
|
-
getattr(cl_array, op)(x_cl, 1.0).get(),
|
|
2315
|
-
getattr(np, op)(x_np, ones_np))
|
|
2316
|
-
np.testing.assert_array_equal(
|
|
2317
|
-
getattr(cl_array, op)(x_cl, 0.0).get(),
|
|
2318
|
-
getattr(np, op)(x_np, 0.0))
|
|
2319
|
-
|
|
2320
|
-
|
|
2321
|
-
def test_logical_not(ctx_factory):
|
|
2322
|
-
# NOTE: Copied over from pycuda/test/test_gpuarray.py
|
|
2323
|
-
ctx = ctx_factory()
|
|
2324
|
-
cq = cl.CommandQueue(ctx)
|
|
2325
|
-
|
|
2326
|
-
rng = np.random.default_rng(seed=0)
|
|
2327
|
-
x_np = rng.random((10, 4))
|
|
2328
|
-
x_cl = cl_array.to_device(cq, x_np)
|
|
2329
|
-
|
|
2330
|
-
np.testing.assert_array_equal(
|
|
2331
|
-
cl_array.logical_not(x_cl).get(),
|
|
2332
|
-
np.logical_not(x_np))
|
|
2333
|
-
np.testing.assert_array_equal(
|
|
2334
|
-
cl_array.logical_not(cl_array.zeros(cq, 10, np.float64)).get(),
|
|
2335
|
-
np.logical_not(np.zeros(10)))
|
|
2336
|
-
np.testing.assert_array_equal(
|
|
2337
|
-
cl_array.logical_not((cl_array.zeros(cq, 10, np.float64) + 1)).get(),
|
|
2338
|
-
np.logical_not(np.ones(10)))
|
|
2339
|
-
|
|
2340
|
-
|
|
2341
|
-
# {{{ test XDG_CACHE_HOME handling
|
|
2342
|
-
|
|
2343
|
-
@pytest.mark.skipif(sys.platform == "win32",
|
|
2344
|
-
reason="XDG_CACHE_HOME is not used on Windows")
|
|
2345
|
-
def test_xdg_cache_home(ctx_factory):
|
|
2346
|
-
import os
|
|
2347
|
-
import shutil
|
|
2348
|
-
from os.path import join
|
|
2349
|
-
|
|
2350
|
-
context = ctx_factory()
|
|
2351
|
-
queue = cl.CommandQueue(context)
|
|
2352
|
-
|
|
2353
|
-
a = np.array([1, 2, 3, 4, 5]).astype(np.float32)
|
|
2354
|
-
a_gpu = cl_array.to_device(queue, a)
|
|
2355
|
-
|
|
2356
|
-
xdg_dir = "tmpdir_pyopencl_xdg_test"
|
|
2357
|
-
|
|
2358
|
-
# PyOpenCL uses pytools.PersistentDict for invoker caches,
|
|
2359
|
-
# which is why xdg_dir will always exist. Therefore, check
|
|
2360
|
-
# whether xdg_pyopencl_dir exists.
|
|
2361
|
-
xdg_pyopencl_dir = join(xdg_dir, "pyopencl")
|
|
2362
|
-
assert not os.path.exists(xdg_dir)
|
|
2363
|
-
|
|
2364
|
-
old_xdg_cache_home = None
|
|
2365
|
-
|
|
2366
|
-
try:
|
|
2367
|
-
old_xdg_cache_home = os.getenv("XDG_CACHE_HOME")
|
|
2368
|
-
os.environ["XDG_CACHE_HOME"] = xdg_dir
|
|
2369
|
-
|
|
2370
|
-
result = pow(a_gpu, a_gpu).get()
|
|
2371
|
-
assert (np.abs(a ** a - result) < 3e-3).all()
|
|
2372
|
-
|
|
2373
|
-
assert os.path.exists(xdg_pyopencl_dir)
|
|
2374
|
-
finally:
|
|
2375
|
-
if old_xdg_cache_home is not None:
|
|
2376
|
-
os.environ["XDG_CACHE_HOME"] = old_xdg_cache_home
|
|
2377
|
-
else:
|
|
2378
|
-
del os.environ["XDG_CACHE_HOME"]
|
|
2379
|
-
|
|
2380
|
-
shutil.rmtree(xdg_dir)
|
|
2381
|
-
|
|
2382
|
-
# }}}
|
|
2383
|
-
|
|
2384
|
-
|
|
2385
|
-
if __name__ == "__main__":
|
|
2386
|
-
if len(sys.argv) > 1:
|
|
2387
|
-
exec(sys.argv[1])
|
|
2388
|
-
else:
|
|
2389
|
-
from pytest import main
|
|
2390
|
-
main([__file__])
|
|
2391
|
-
|
|
2392
|
-
# vim: fdm=marker
|