pyopencl 2025.2.5__cp311-cp311-win_amd64.whl → 2025.2.7__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyopencl might be problematic. Click here for more details.

pyopencl/cache.py CHANGED
@@ -505,7 +505,7 @@ def create_built_program_from_source_cached(ctx, src, options_bytes, devices=Non
505
505
  except Exception as e:
506
506
  from pyopencl import Error
507
507
  build_program_failure = (isinstance(e, Error)
508
- and e.code == _cl.status_code.BUILD_PROGRAM_FAILURE) # pylint:disable=no-member
508
+ and e.code == _cl.status_code.BUILD_PROGRAM_FAILURE)
509
509
 
510
510
  # Mac error on intel CPU driver: can't build from cached version.
511
511
  # If we get a build_program_failure from the cached version then
@@ -24,8 +24,6 @@ THE SOFTWARE.
24
24
  """
25
25
 
26
26
 
27
- from typing import cast
28
-
29
27
  from pytools import memoize
30
28
 
31
29
  import pyopencl as cl
@@ -70,9 +68,9 @@ def reasonable_work_group_size_multiple(
70
68
  }
71
69
  """)
72
70
  prg.build()
73
- return cast("int", prg.knl.get_work_group_info(
71
+ return prg.knl.get_work_group_info(
74
72
  cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
75
- dev))
73
+ dev)
76
74
 
77
75
 
78
76
  def nv_compute_capability(dev: cl.Device):
pyopencl/clmath.py CHANGED
@@ -1,4 +1,3 @@
1
- # pylint:disable=unexpected-keyword-arg # for @elwise_kernel_runner
2
1
  from __future__ import annotations
3
2
 
4
3
 
pyopencl/cltypes.py CHANGED
@@ -22,13 +22,17 @@ THE SOFTWARE.
22
22
  """
23
23
 
24
24
  import warnings
25
- from typing import Any
25
+ from typing import TYPE_CHECKING, Any, cast
26
26
 
27
27
  import numpy as np
28
28
 
29
29
  from pyopencl.tools import get_or_register_dtype
30
30
 
31
31
 
32
+ if TYPE_CHECKING:
33
+ import builtins
34
+ from collections.abc import MutableSequence
35
+
32
36
  if __file__.endswith("array.py"):
33
37
  warnings.warn(
34
38
  "pyopencl.array.vec is deprecated. Please use pyopencl.cltypes.",
@@ -53,16 +57,19 @@ double = np.float64
53
57
 
54
58
  # {{{ vector types
55
59
 
56
- def _create_vector_types():
60
+ def _create_vector_types() -> tuple[
61
+ dict[tuple[np.dtype[Any], builtins.int], np.dtype[Any]],
62
+ dict[np.dtype[Any], tuple[np.dtype[Any], builtins.int]]]:
57
63
  mapping = [(k, globals()[k]) for k in
58
64
  ["char", "uchar", "short", "ushort", "int",
59
65
  "uint", "long", "ulong", "float", "double"]]
60
66
 
61
- def set_global(key, val):
67
+ def set_global(key: str, val: np.dtype[Any]) -> None:
62
68
  globals()[key] = val
63
69
 
64
- vec_types = {}
65
- vec_type_to_scalar_and_count = {}
70
+ vec_types: dict[tuple[np.dtype[Any], builtins.int], np.dtype[Any]] = {}
71
+ vec_type_to_scalar_and_count: dict[np.dtype[Any],
72
+ tuple[np.dtype[Any], builtins.int]] = {}
66
73
 
67
74
  field_names = ["x", "y", "z", "w"]
68
75
 
@@ -70,20 +77,21 @@ def _create_vector_types():
70
77
 
71
78
  for base_name, base_type in mapping:
72
79
  for count in counts:
73
- name = "%s%d" % (base_name, count)
74
-
75
- titles = field_names[:count]
80
+ name = f"{base_name}{count}"
81
+ titles = cast("MutableSequence[str | None]", field_names[:count])
76
82
 
77
83
  padded_count = count
78
84
  if count == 3:
79
85
  padded_count = 4
80
86
 
81
- names = ["s%d" % i for i in range(count)]
87
+ names = [f"s{i}" for i in range(count)]
82
88
  while len(names) < padded_count:
83
- names.append("padding%d" % (len(names) - count))
89
+ pad = len(names) - count
90
+ names.append(f"padding{pad}")
84
91
 
85
92
  if len(titles) < len(names):
86
- titles.extend((len(names) - len(titles)) * [None])
93
+ pad = len(names) - len(titles)
94
+ titles.extend([None] * pad)
87
95
 
88
96
  try:
89
97
  dtype = np.dtype({
@@ -96,14 +104,16 @@ def _create_vector_types():
96
104
  for (n, title)
97
105
  in zip(names, titles, strict=True)])
98
106
  except TypeError:
99
- dtype = np.dtype([(n, base_type) for (n, title)
100
- in zip(names, titles, strict=True)])
107
+ dtype = np.dtype([(n, base_type) for n in names])
101
108
 
109
+ assert isinstance(dtype, np.dtype)
102
110
  get_or_register_dtype(name, dtype)
103
-
104
111
  set_global(name, dtype)
105
112
 
106
- def create_array(dtype, count, padded_count, *args, **kwargs):
113
+ def create_array(dtype: np.dtype[Any],
114
+ count: int,
115
+ padded_count: int,
116
+ *args: Any, **kwargs: Any) -> dict[str, Any]:
107
117
  if len(args) < count:
108
118
  from warnings import warn
109
119
  warn("default values for make_xxx are deprecated;"
@@ -116,21 +126,26 @@ def _create_vector_types():
116
126
  {"array": np.array,
117
127
  "padded_args": padded_args,
118
128
  "dtype": dtype})
119
- for key, val in list(kwargs.items()):
129
+
130
+ for key, val in kwargs.items():
120
131
  array[key] = val
132
+
121
133
  return array
122
134
 
123
- set_global("make_" + name, eval(
124
- "lambda *args, **kwargs: create_array(dtype, %i, %i, "
125
- "*args, **kwargs)" % (count, padded_count),
126
- {"create_array": create_array, "dtype": dtype}))
127
- set_global("filled_" + name, eval(
128
- "lambda val: make_%s(*[val]*%i)" % (name, count)))
129
- set_global("zeros_" + name, eval("lambda: filled_%s(0)" % (name)))
130
- set_global("ones_" + name, eval("lambda: filled_%s(1)" % (name)))
131
-
132
- vec_types[np.dtype(base_type), count] = dtype
133
- vec_type_to_scalar_and_count[dtype] = np.dtype(base_type), count
135
+ set_global(
136
+ f"make_{name}",
137
+ eval("lambda *args, **kwargs: "
138
+ f"create_array(dtype, {count}, {padded_count}, *args, **kwargs)",
139
+ {"create_array": create_array, "dtype": dtype}))
140
+ set_global(
141
+ f"filled_{name}",
142
+ eval(f"lambda val: make_{name}(*[val]*{count})"))
143
+ set_global(f"zeros_{name}", eval(f"lambda: filled_{name}(0)"))
144
+ set_global(f"ones_{name}", eval(f"lambda: filled_{name}(1)"))
145
+
146
+ base_dtype = np.dtype(base_type)
147
+ vec_types[base_dtype, count] = dtype
148
+ vec_type_to_scalar_and_count[dtype] = base_dtype, count
134
149
 
135
150
  return vec_types, vec_type_to_scalar_and_count
136
151
 
pyopencl/compyte/array.py CHANGED
@@ -63,7 +63,7 @@ def equal_strides(
63
63
  if len(strides1) != len(strides2) or len(strides2) != len(shape):
64
64
  return False
65
65
 
66
- for s, st1, st2 in zip(shape, strides1, strides2):
66
+ for s, st1, st2 in zip(shape, strides1, strides2, strict=True):
67
67
  if s != 1 and st1 != st2:
68
68
  return False
69
69
 
@@ -98,30 +98,27 @@ class ArrayIsh(Protocol):
98
98
 
99
99
 
100
100
  class ArrayFlags:
101
- f_contiguous: bool
102
- c_contiguous: bool
103
- forc: bool
104
-
105
- def __init__(self, ary: ArrayIsh):
106
- self.f_contiguous = is_f_contiguous_strides(
101
+ def __init__(self, ary: ArrayIsh) -> None:
102
+ self.f_contiguous: bool = is_f_contiguous_strides(
107
103
  ary.strides, ary.dtype.itemsize, ary.shape)
108
- self.c_contiguous = is_c_contiguous_strides(
104
+ self.c_contiguous: bool = is_c_contiguous_strides(
109
105
  ary.strides, ary.dtype.itemsize, ary.shape)
110
- self.forc = self.f_contiguous or self.c_contiguous
106
+ self.forc: bool = self.f_contiguous or self.c_contiguous
111
107
 
112
108
  @override
113
- def __repr__(self):
109
+ def __repr__(self) -> str:
114
110
  return (
115
111
  f" C_CONTIGUOUS : {self.c_contiguous}\n"
116
112
  f" F_CONTIGUOUS : {self.f_contiguous}"
117
113
  )
118
114
 
119
115
  @override
120
- def __str__(self):
116
+ def __str__(self) -> str:
121
117
  return repr(self)
122
118
 
123
119
 
124
- def get_common_dtype(obj1, obj2, allow_double):
120
+ def get_common_dtype(obj1: ArrayIsh, obj2: ArrayIsh,
121
+ allow_double: bool) -> np.dtype[Any]:
125
122
  # Yes, numpy behaves differently depending on whether
126
123
  # we're dealing with arrays or scalars.
127
124
 
@@ -143,33 +140,6 @@ def get_common_dtype(obj1, obj2, allow_double):
143
140
  return result
144
141
 
145
142
 
146
- def bound(a):
147
- high = a.bytes
148
- low = a.bytes
149
-
150
- for stri, shp in zip(a.strides, a.shape):
151
- if stri < 0:
152
- low += (stri)*(shp-1)
153
- else:
154
- high += (stri)*(shp-1)
155
- return low, high
156
-
157
-
158
- def may_share_memory(a, b):
159
- # When this is called with a an ndarray and b
160
- # a sparse matrix, numpy.may_share_memory fails.
161
- if a is b:
162
- return True
163
- if a.__class__ is b.__class__:
164
- a_l, a_h = bound(a)
165
- b_l, b_h = bound(b)
166
- if b_l >= a_h or a_l >= b_h:
167
- return False
168
- return True
169
- else:
170
- return False
171
-
172
-
173
143
  # {{{ as_strided implementation
174
144
 
175
145
  try:
@@ -26,8 +26,8 @@ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26
26
  OTHER DEALINGS IN THE SOFTWARE.
27
27
  """
28
28
 
29
- from collections.abc import Sequence
30
- from typing import Any, Callable, TypeVar
29
+ from collections.abc import Callable, Sequence
30
+ from typing import Any, TypeVar
31
31
 
32
32
  import numpy as np
33
33
  from numpy.typing import DTypeLike
@@ -40,14 +40,12 @@ class TypeNameNotKnown(RuntimeError): # noqa: N818
40
40
  # {{{ registry
41
41
 
42
42
  class DTypeRegistry:
43
- dtype_to_name: dict[np.dtype[Any] | str, str]
44
- name_to_dtype: dict[str, np.dtype[Any]]
43
+ def __init__(self) -> None:
44
+ self.dtype_to_name: dict[np.dtype[Any] | str, str] = {}
45
+ self.name_to_dtype: dict[str, np.dtype[Any]] = {}
45
46
 
46
- def __init__(self):
47
- self.dtype_to_name = {}
48
- self.name_to_dtype = {}
49
-
50
- def get_or_register_dtype(self,
47
+ def get_or_register_dtype(
48
+ self,
51
49
  names: str | Sequence[str],
52
50
  dtype: DTypeLike | None = None) -> np.dtype[Any]:
53
51
  """Get or register a :class:`numpy.dtype` associated with the C type names
@@ -227,7 +225,7 @@ dtype_to_ctype = TYPE_REGISTRY.dtype_to_ctype
227
225
  get_or_register_dtype = TYPE_REGISTRY.get_or_register_dtype
228
226
 
229
227
 
230
- def _fill_dtype_registry(respect_windows, include_bool=True):
228
+ def _fill_dtype_registry(respect_windows: bool, include_bool: bool = True) -> None:
231
229
  fill_registry_with_c_types(
232
230
  TYPE_REGISTRY, respect_windows, include_bool)
233
231
 
@@ -244,7 +242,7 @@ def parse_c_arg_backend(
244
242
  scalar_arg_factory: Callable[[np.dtype[Any], str], ArgTypeT],
245
243
  vec_arg_factory: Callable[[np.dtype[Any], str], ArgTypeT],
246
244
  name_to_dtype: Callable[[str], np.dtype[Any]] | DTypeRegistry | None = None,
247
- ):
245
+ ) -> ArgTypeT:
248
246
  if isinstance(name_to_dtype, DTypeRegistry):
249
247
  name_to_dtype_clbl = name_to_dtype.name_to_dtype.__getitem__
250
248
  elif name_to_dtype is None:
@@ -40,9 +40,6 @@ reportUnnecessaryIsInstance = "none"
40
40
  reportUnusedCallResult = "none"
41
41
  reportExplicitAny = "none"
42
42
  reportUnreachable = "hint"
43
- # array.py looks like stdlib array, but pyright doesn't know this
44
- # won't ever be a top-level anything.
45
- reportShadowedImports = "none"
46
43
 
47
44
  # This reports even cycles that are qualified by 'if TYPE_CHECKING'. Not what
48
45
  # we care about at this moment.