pyopencl 2026.1.1__cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. pyopencl/.libs/libOpenCL-34a55fe4.so.1.0.0 +0 -0
  2. pyopencl/__init__.py +1995 -0
  3. pyopencl/_cl.cpython-314t-aarch64-linux-gnu.so +0 -0
  4. pyopencl/_cl.pyi +2009 -0
  5. pyopencl/_cluda.py +57 -0
  6. pyopencl/_monkeypatch.py +1104 -0
  7. pyopencl/_mymako.py +17 -0
  8. pyopencl/algorithm.py +1454 -0
  9. pyopencl/array.py +3530 -0
  10. pyopencl/bitonic_sort.py +245 -0
  11. pyopencl/bitonic_sort_templates.py +597 -0
  12. pyopencl/cache.py +553 -0
  13. pyopencl/capture_call.py +200 -0
  14. pyopencl/characterize/__init__.py +461 -0
  15. pyopencl/characterize/performance.py +240 -0
  16. pyopencl/cl/pyopencl-airy.cl +324 -0
  17. pyopencl/cl/pyopencl-bessel-j-complex.cl +238 -0
  18. pyopencl/cl/pyopencl-bessel-j.cl +1084 -0
  19. pyopencl/cl/pyopencl-bessel-y.cl +435 -0
  20. pyopencl/cl/pyopencl-complex.h +303 -0
  21. pyopencl/cl/pyopencl-eval-tbl.cl +120 -0
  22. pyopencl/cl/pyopencl-hankel-complex.cl +444 -0
  23. pyopencl/cl/pyopencl-random123/array.h +325 -0
  24. pyopencl/cl/pyopencl-random123/openclfeatures.h +93 -0
  25. pyopencl/cl/pyopencl-random123/philox.cl +486 -0
  26. pyopencl/cl/pyopencl-random123/threefry.cl +864 -0
  27. pyopencl/clmath.py +281 -0
  28. pyopencl/clrandom.py +412 -0
  29. pyopencl/cltypes.py +217 -0
  30. pyopencl/compyte/.gitignore +21 -0
  31. pyopencl/compyte/__init__.py +0 -0
  32. pyopencl/compyte/array.py +211 -0
  33. pyopencl/compyte/dtypes.py +314 -0
  34. pyopencl/compyte/pyproject.toml +49 -0
  35. pyopencl/elementwise.py +1288 -0
  36. pyopencl/invoker.py +417 -0
  37. pyopencl/ipython_ext.py +70 -0
  38. pyopencl/py.typed +0 -0
  39. pyopencl/reduction.py +829 -0
  40. pyopencl/scan.py +1921 -0
  41. pyopencl/tools.py +1680 -0
  42. pyopencl/typing.py +61 -0
  43. pyopencl/version.py +11 -0
  44. pyopencl-2026.1.1.dist-info/METADATA +108 -0
  45. pyopencl-2026.1.1.dist-info/RECORD +47 -0
  46. pyopencl-2026.1.1.dist-info/WHEEL +6 -0
  47. pyopencl-2026.1.1.dist-info/licenses/LICENSE +104 -0
@@ -0,0 +1,245 @@
1
+ from __future__ import annotations
2
+
3
+
4
+ __copyright__ = """
5
+ Copyright (c) 2011, Eric Bainville
6
+ Copyright (c) 2015, Ilya Efimoff
7
+ All rights reserved.
8
+ """
9
+
10
+ # based on code at http://www.bealto.com/gpu-sorting_intro.html
11
+
12
+ __license__ = """
13
+ Redistribution and use in source and binary forms, with or without
14
+ modification, are permitted provided that the following conditions are met:
15
+
16
+ 1. Redistributions of source code must retain the above copyright notice, this
17
+ list of conditions and the following disclaimer.
18
+
19
+ 2. Redistributions in binary form must reproduce the above copyright notice,
20
+ this list of conditions and the following disclaimer in the documentation
21
+ and/or other materials provided with the distribution.
22
+
23
+ 3. Neither the name of the copyright holder nor the names of its contributors
24
+ may be used to endorse or promote products derived from this software without
25
+ specific prior written permission.
26
+
27
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
28
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
31
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
36
+ OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ """
38
+
39
+ from functools import reduce
40
+ from operator import mul
41
+ from typing import ClassVar
42
+
43
+ from mako.template import Template
44
+
45
+ from pytools import memoize_method
46
+
47
+ import pyopencl as cl
48
+ import pyopencl.bitonic_sort_templates as _tmpl
49
+ from pyopencl.tools import dtype_to_ctype
50
+
51
+
52
+ def _is_power_of_2(n):
53
+ from pyopencl.tools import bitlog2
54
+ return n == 0 or 2**bitlog2(n) == n
55
+
56
+
57
+ class BitonicSort:
58
+ """Sort an array (or one axis of one) using a sorting network.
59
+
60
+ Will only work if the axis of the array to be sorted has a length
61
+ that is a power of 2.
62
+
63
+ .. versionadded:: 2015.2
64
+
65
+ .. seealso:: :class:`pyopencl.algorithm.RadixSort`
66
+
67
+ .. automethod:: __call__
68
+ """
69
+
70
+ kernels_srcs: ClassVar[dict[str, str]] = {
71
+ "B2": _tmpl.ParallelBitonic_B2,
72
+ "B4": _tmpl.ParallelBitonic_B4,
73
+ "B8": _tmpl.ParallelBitonic_B8,
74
+ "B16": _tmpl.ParallelBitonic_B16,
75
+ "C4": _tmpl.ParallelBitonic_C4,
76
+ "BL": _tmpl.ParallelBitonic_Local,
77
+ "BLO": _tmpl.ParallelBitonic_Local_Optim,
78
+ "PML": _tmpl.ParallelMerge_Local
79
+ }
80
+
81
+ def __init__(self, context):
82
+ self.context = context
83
+
84
+ def __call__(self, arr, idx=None, queue=None, wait_for=None, axis=0):
85
+ """
86
+ :arg arr: the array to be sorted. Will be overwritten with the sorted array.
87
+ :arg idx: an array of indices to be tracked along with the sorting of *arr*
88
+ :arg queue: a :class:`pyopencl.CommandQueue`, defaults to the array's queue
89
+ if None
90
+ :arg wait_for: a list of :class:`pyopencl.Event` instances or None
91
+ :arg axis: the axis of the array by which to sort
92
+
93
+ :returns: a tuple (sorted_array, event)
94
+ """
95
+
96
+ if queue is None:
97
+ queue = arr.queue
98
+
99
+ if wait_for is None:
100
+ wait_for = []
101
+ wait_for = wait_for + arr.events
102
+
103
+ last_evt = cl.enqueue_marker(queue, wait_for=wait_for)
104
+
105
+ if arr.shape[axis] == 0:
106
+ return arr, last_evt
107
+
108
+ if not _is_power_of_2(arr.shape[axis]):
109
+ raise ValueError("sorted array axis length must be a power of 2")
110
+
111
+ if idx is None:
112
+ argsort = 0
113
+ else:
114
+ argsort = 1
115
+
116
+ run_queue = self.sort_b_prepare_wl(
117
+ argsort,
118
+ arr.dtype,
119
+ idx.dtype if idx is not None else None, arr.shape,
120
+ axis)
121
+
122
+ knl, nt, wg, aux = run_queue[0]
123
+
124
+ if idx is not None:
125
+ if aux:
126
+ last_evt = knl(
127
+ queue, (nt,), wg, arr.data, idx.data,
128
+ cl.LocalMemory(
129
+ _tmpl.LOCAL_MEM_FACTOR*wg[0]*arr.dtype.itemsize),
130
+ cl.LocalMemory(
131
+ _tmpl.LOCAL_MEM_FACTOR*wg[0]*idx.dtype.itemsize),
132
+ wait_for=[last_evt])
133
+ for knl, nt, wg, _ in run_queue[1:]:
134
+ last_evt = knl(
135
+ queue, (nt,), wg, arr.data, idx.data,
136
+ wait_for=[last_evt])
137
+
138
+ else:
139
+ if aux:
140
+ last_evt = knl(
141
+ queue, (nt,), wg, arr.data,
142
+ cl.LocalMemory(
143
+ _tmpl.LOCAL_MEM_FACTOR*wg[0]*4*arr.dtype.itemsize),
144
+ wait_for=[last_evt])
145
+ for knl, nt, wg, _ in run_queue[1:]:
146
+ last_evt = knl(queue, (nt,), wg, arr.data, wait_for=[last_evt])
147
+
148
+ return arr, last_evt
149
+
150
+ @memoize_method
151
+ def get_program(self, letter, argsort, params):
152
+ defstpl = Template(_tmpl.defines)
153
+
154
+ defs = defstpl.render(
155
+ NS="\\", argsort=argsort, inc=params[0], dir=params[1],
156
+ dtype=params[2], idxtype=params[3],
157
+ dsize=params[4], nsize=params[5])
158
+
159
+ kid = Template(self.kernels_srcs[letter]).render(argsort=argsort)
160
+
161
+ prg = cl.Program(self.context, defs + kid).build()
162
+ return prg
163
+
164
+ @memoize_method
165
+ def sort_b_prepare_wl(self, argsort, key_dtype, idx_dtype, shape, axis):
166
+ key_ctype = dtype_to_ctype(key_dtype)
167
+
168
+ if idx_dtype is None:
169
+ idx_ctype = "uint" # Dummy
170
+
171
+ else:
172
+ idx_ctype = dtype_to_ctype(idx_dtype)
173
+
174
+ run_queue = []
175
+ ds = int(shape[axis])
176
+ size = reduce(mul, shape)
177
+ ndim = len(shape)
178
+
179
+ ns = reduce(mul, shape[(axis+1):]) if axis < ndim-1 else 1
180
+
181
+ ds = int(shape[axis])
182
+ allowb4 = True
183
+ allowb8 = True
184
+ allowb16 = True
185
+
186
+ dev = self.context.devices[0]
187
+
188
+ # {{{ find workgroup size
189
+
190
+ wg = min(ds, dev.max_work_group_size)
191
+
192
+ available_lmem = dev.local_mem_size
193
+ while True:
194
+ lmem_size = _tmpl.LOCAL_MEM_FACTOR*wg*key_dtype.itemsize
195
+ if argsort:
196
+ lmem_size += _tmpl.LOCAL_MEM_FACTOR*wg*idx_dtype.itemsize
197
+
198
+ if lmem_size + 512 > available_lmem:
199
+ wg //= 2
200
+
201
+ if not wg:
202
+ raise RuntimeError(
203
+ "too little local memory available on '%s'"
204
+ % dev)
205
+
206
+ else:
207
+ break
208
+
209
+ # }}}
210
+
211
+ length = wg >> 1
212
+ prg = self.get_program(
213
+ "BLO", argsort, (1, 1, key_ctype, idx_ctype, ds, ns))
214
+ run_queue.append((prg.run, size, (wg,), True))
215
+
216
+ while length < ds:
217
+ inc = length
218
+ while inc > 0:
219
+ ninc = 0
220
+ direction = length << 1
221
+ if allowb16 and inc >= 8 and ninc == 0:
222
+ letter = "B16"
223
+ ninc = 4
224
+ elif allowb8 and inc >= 4 and ninc == 0:
225
+ letter = "B8"
226
+ ninc = 3
227
+ elif allowb4 and inc >= 2 and ninc == 0:
228
+ letter = "B4"
229
+ ninc = 2
230
+ elif inc >= 0:
231
+ letter = "B2"
232
+ ninc = 1
233
+ else:
234
+ raise AssertionError("Should not happen")
235
+
236
+ nthreads = size >> ninc
237
+
238
+ prg = self.get_program(letter, argsort,
239
+ (inc, direction, key_ctype, idx_ctype, ds, ns))
240
+ run_queue.append((prg.run, nthreads, None, False,))
241
+ inc >>= ninc
242
+
243
+ length <<= 1
244
+
245
+ return run_queue