imops 0.8.2__cp310-cp310-win_amd64.whl → 0.8.3__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imops might be problematic. Click here for more details.
- _build_utils.py +87 -0
- imops/__init__.py +1 -0
- imops/__version__.py +1 -1
- imops/backend.py +14 -10
- imops/crop.py +18 -2
- imops/interp1d.py +7 -4
- imops/measure.py +7 -7
- imops/morphology.py +6 -5
- imops/numeric.py +376 -0
- imops/pad.py +41 -5
- imops/radon.py +7 -5
- imops/src/_backprojection.c +83 -83
- imops/src/_backprojection.cp310-win_amd64.pyd +0 -0
- imops/src/_fast_backprojection.c +96 -96
- imops/src/_fast_backprojection.cp310-win_amd64.pyd +0 -0
- imops/src/_fast_measure.c +96 -96
- imops/src/_fast_measure.cp310-win_amd64.pyd +0 -0
- imops/src/_fast_morphology.c +96 -96
- imops/src/_fast_morphology.cp310-win_amd64.pyd +0 -0
- imops/src/_fast_numeric.c +20545 -4996
- imops/src/_fast_numeric.cp310-win_amd64.pyd +0 -0
- imops/src/_fast_numeric.pyx +208 -30
- imops/src/_fast_radon.c +96 -96
- imops/src/_fast_radon.cp310-win_amd64.pyd +0 -0
- imops/src/_fast_zoom.c +96 -96
- imops/src/_fast_zoom.cp310-win_amd64.pyd +0 -0
- imops/src/_measure.c +83 -83
- imops/src/_measure.cp310-win_amd64.pyd +0 -0
- imops/src/_morphology.c +83 -83
- imops/src/_morphology.cp310-win_amd64.pyd +0 -0
- imops/src/_numeric.c +20532 -4983
- imops/src/_numeric.cp310-win_amd64.pyd +0 -0
- imops/src/_numeric.pyx +208 -30
- imops/src/_radon.c +83 -83
- imops/src/_radon.cp310-win_amd64.pyd +0 -0
- imops/src/_zoom.c +83 -83
- imops/src/_zoom.cp310-win_amd64.pyd +0 -0
- imops/utils.py +65 -12
- imops/zoom.py +2 -2
- {imops-0.8.2.dist-info → imops-0.8.3.dist-info}/METADATA +3 -2
- imops-0.8.3.dist-info/RECORD +60 -0
- {imops-0.8.2.dist-info → imops-0.8.3.dist-info}/WHEEL +1 -1
- imops-0.8.3.dist-info/top_level.txt +2 -0
- _pyproject_build.py +0 -61
- imops/_numeric.py +0 -124
- imops-0.8.2.dist-info/RECORD +0 -60
- imops-0.8.2.dist-info/top_level.txt +0 -2
- {imops-0.8.2.dist-info → imops-0.8.3.dist-info}/LICENSE +0 -0
imops/numeric.py
ADDED
|
@@ -0,0 +1,376 @@
|
|
|
1
|
+
from typing import Callable, Sequence, Union
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from .backend import BackendLike, resolve_backend
|
|
6
|
+
from .src._fast_numeric import (
|
|
7
|
+
_copy_3d as cython_fast_copy_3d,
|
|
8
|
+
_copy_3d_fp16 as cython_fast_copy_3d_fp16,
|
|
9
|
+
_copy_4d as cython_fast_copy_4d,
|
|
10
|
+
_copy_4d_fp16 as cython_fast_copy_4d_fp16,
|
|
11
|
+
_fill_3d as cython_fast_fill_3d,
|
|
12
|
+
_fill_4d as cython_fast_fill_4d,
|
|
13
|
+
_pointwise_add_array_3d as cython_fast_pointwise_add_array_3d,
|
|
14
|
+
_pointwise_add_array_3d_fp16 as cython_fast_pointwise_add_array_3d_fp16,
|
|
15
|
+
_pointwise_add_array_4d as cython_fast_pointwise_add_array_4d,
|
|
16
|
+
_pointwise_add_array_4d_fp16 as cython_fast_pointwise_add_array_4d_fp16,
|
|
17
|
+
_pointwise_add_value_3d as cython_fast_pointwise_add_value_3d,
|
|
18
|
+
_pointwise_add_value_3d_fp16 as cython_fast_pointwise_add_value_3d_fp16,
|
|
19
|
+
_pointwise_add_value_4d as cython_fast_pointwise_add_value_4d,
|
|
20
|
+
_pointwise_add_value_4d_fp16 as cython_fast_pointwise_add_value_4d_fp16,
|
|
21
|
+
)
|
|
22
|
+
from .src._numeric import (
|
|
23
|
+
_copy_3d as cython_copy_3d,
|
|
24
|
+
_copy_3d_fp16 as cython_copy_3d_fp16,
|
|
25
|
+
_copy_4d as cython_copy_4d,
|
|
26
|
+
_copy_4d_fp16 as cython_copy_4d_fp16,
|
|
27
|
+
_fill_3d as cython_fill_3d,
|
|
28
|
+
_fill_4d as cython_fill_4d,
|
|
29
|
+
_pointwise_add_array_3d as cython_pointwise_add_array_3d,
|
|
30
|
+
_pointwise_add_array_3d_fp16 as cython_pointwise_add_array_3d_fp16,
|
|
31
|
+
_pointwise_add_array_4d as cython_pointwise_add_array_4d,
|
|
32
|
+
_pointwise_add_array_4d_fp16 as cython_pointwise_add_array_4d_fp16,
|
|
33
|
+
_pointwise_add_value_3d as cython_pointwise_add_value_3d,
|
|
34
|
+
_pointwise_add_value_3d_fp16 as cython_pointwise_add_value_3d_fp16,
|
|
35
|
+
_pointwise_add_value_4d as cython_pointwise_add_value_4d,
|
|
36
|
+
_pointwise_add_value_4d_fp16 as cython_pointwise_add_value_4d_fp16,
|
|
37
|
+
)
|
|
38
|
+
from .utils import normalize_num_threads
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
_TYPES = (np.int16, np.int32, np.int64, np.float16, np.float32, np.float64)
|
|
42
|
+
_STR_TYPES = ('int16', 'int32', 'int64', 'float16', 'float32', 'float64')
|
|
43
|
+
# TODO: Decide which value to use. Functions below are quite fast and simple, so parallelization overhead is noticeable.
|
|
44
|
+
_NUMERIC_DEFAULT_NUM_THREADS = 4
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# TODO: Maybe dict is better?
|
|
48
|
+
def _choose_cython_pointwise_add(ndim: int, summand_is_array: bool, is_fp16: bool, fast: bool) -> Callable:
|
|
49
|
+
assert ndim <= 4, ndim
|
|
50
|
+
|
|
51
|
+
if ndim <= 3:
|
|
52
|
+
if summand_is_array:
|
|
53
|
+
if is_fp16:
|
|
54
|
+
return cython_fast_pointwise_add_array_3d_fp16 if fast else cython_pointwise_add_array_3d_fp16
|
|
55
|
+
|
|
56
|
+
return cython_fast_pointwise_add_array_3d if fast else cython_pointwise_add_array_3d
|
|
57
|
+
|
|
58
|
+
if is_fp16:
|
|
59
|
+
return cython_fast_pointwise_add_value_3d_fp16 if fast else cython_pointwise_add_value_3d_fp16
|
|
60
|
+
|
|
61
|
+
return cython_fast_pointwise_add_value_3d if fast else cython_pointwise_add_value_3d
|
|
62
|
+
|
|
63
|
+
if summand_is_array:
|
|
64
|
+
if is_fp16:
|
|
65
|
+
return cython_fast_pointwise_add_array_4d_fp16 if fast else cython_pointwise_add_array_4d_fp16
|
|
66
|
+
|
|
67
|
+
return cython_fast_pointwise_add_array_4d if fast else cython_pointwise_add_array_4d
|
|
68
|
+
|
|
69
|
+
if is_fp16:
|
|
70
|
+
return cython_fast_pointwise_add_value_4d_fp16 if fast else cython_pointwise_add_value_4d_fp16
|
|
71
|
+
|
|
72
|
+
return cython_fast_pointwise_add_value_4d if fast else cython_pointwise_add_value_4d
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _choose_cython_fill_(ndim: int, fast: bool) -> Callable:
|
|
76
|
+
assert ndim <= 4, ndim
|
|
77
|
+
|
|
78
|
+
if ndim <= 3:
|
|
79
|
+
return cython_fast_fill_3d if fast else cython_fill_3d
|
|
80
|
+
|
|
81
|
+
return cython_fast_fill_4d if fast else cython_fill_4d
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _choose_cython_copy(ndim: int, is_fp16: bool, fast: bool) -> Callable:
|
|
85
|
+
assert ndim <= 4, ndim
|
|
86
|
+
|
|
87
|
+
if ndim <= 3:
|
|
88
|
+
if is_fp16:
|
|
89
|
+
return cython_fast_copy_3d_fp16 if fast else cython_copy_3d_fp16
|
|
90
|
+
|
|
91
|
+
return cython_fast_copy_3d if fast else cython_copy_3d
|
|
92
|
+
|
|
93
|
+
if is_fp16:
|
|
94
|
+
return cython_fast_copy_4d_fp16 if fast else cython_copy_4d_fp16
|
|
95
|
+
|
|
96
|
+
return cython_fast_copy_4d if fast else cython_copy_4d
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def pointwise_add(
|
|
100
|
+
nums: np.ndarray,
|
|
101
|
+
summand: Union[np.array, int, float],
|
|
102
|
+
output: np.ndarray = None,
|
|
103
|
+
num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
|
|
104
|
+
backend: BackendLike = None,
|
|
105
|
+
) -> np.ndarray:
|
|
106
|
+
"""
|
|
107
|
+
Perform pointwise addition between array and array or scalar.
|
|
108
|
+
|
|
109
|
+
Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.
|
|
110
|
+
|
|
111
|
+
Parameters
|
|
112
|
+
----------
|
|
113
|
+
nums: np.ndarray
|
|
114
|
+
n-dimensional array
|
|
115
|
+
summand: np.ndarray | int | float
|
|
116
|
+
array of the same shape or scalar
|
|
117
|
+
output: np.ndarray
|
|
118
|
+
array of the same shape as input, into which the output is placed. By default, a new
|
|
119
|
+
array is created
|
|
120
|
+
num_threads: int
|
|
121
|
+
the number of threads to use for computation. Default = 4. If negative value passed
|
|
122
|
+
cpu count + num_threads + 1 threads will be used
|
|
123
|
+
backend: BackendLike
|
|
124
|
+
which backend to use. `cython` and `scipy` are available, `cython` is used by default
|
|
125
|
+
|
|
126
|
+
Returns
|
|
127
|
+
-------
|
|
128
|
+
sum: np.ndarray
|
|
129
|
+
result of summation
|
|
130
|
+
|
|
131
|
+
Examples
|
|
132
|
+
--------
|
|
133
|
+
>>> sum = pointwise_add(x, 1, x) # inplace addition
|
|
134
|
+
>>> sum = pointwise_add(x, 1, backend='Scipy') # just `np.add`
|
|
135
|
+
>>> sum = pointwise_add(x.astype('float32'), x.astype('float16')) # will fail because of different dtypes
|
|
136
|
+
"""
|
|
137
|
+
backend = resolve_backend(backend, warn_stacklevel=3)
|
|
138
|
+
if backend.name not in ('Scipy', 'Cython'):
|
|
139
|
+
raise ValueError(f'Unsupported backend "{backend.name}".')
|
|
140
|
+
|
|
141
|
+
dtype = nums.dtype
|
|
142
|
+
|
|
143
|
+
if dtype not in _TYPES:
|
|
144
|
+
raise ValueError(f'Input array dtype must be one of {", ".join(_STR_TYPES)}, got {dtype}.')
|
|
145
|
+
|
|
146
|
+
if output is None:
|
|
147
|
+
output = np.empty_like(nums, dtype=dtype)
|
|
148
|
+
elif output.shape != nums.shape:
|
|
149
|
+
raise ValueError(f'Input array and output array shapes must be the same, got {nums.shape} vs {output.shape}.')
|
|
150
|
+
elif dtype != output.dtype:
|
|
151
|
+
raise ValueError(f'Input array and output array dtypes must be the same, got {dtype} vs {output.dtype}.')
|
|
152
|
+
|
|
153
|
+
summand_is_array = isinstance(summand, np.ndarray)
|
|
154
|
+
if summand_is_array:
|
|
155
|
+
if dtype != summand.dtype:
|
|
156
|
+
raise ValueError(f'Input and summand arrays must have same dtypes, got {dtype} vs {summand.dtype}.')
|
|
157
|
+
elif not isinstance(summand, (*_TYPES, *(int, float))):
|
|
158
|
+
raise ValueError(f'Summand dtype must be one of {", ".join(_STR_TYPES)}, got {type(summand)}.')
|
|
159
|
+
else:
|
|
160
|
+
summand = dtype.type(summand)
|
|
161
|
+
|
|
162
|
+
ndim = nums.ndim
|
|
163
|
+
num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)
|
|
164
|
+
|
|
165
|
+
if backend.name == 'Scipy' or ndim > 4:
|
|
166
|
+
np.add(nums, summand, out=output)
|
|
167
|
+
return output
|
|
168
|
+
|
|
169
|
+
is_fp16 = dtype == np.float16
|
|
170
|
+
src_pointwise_add = _choose_cython_pointwise_add(ndim, summand_is_array, is_fp16, backend.fast)
|
|
171
|
+
|
|
172
|
+
n_dummy = 3 - ndim if ndim <= 3 else 0
|
|
173
|
+
|
|
174
|
+
if n_dummy:
|
|
175
|
+
nums = nums[(None,) * n_dummy]
|
|
176
|
+
output = output[(None,) * n_dummy]
|
|
177
|
+
if summand_is_array:
|
|
178
|
+
summand = summand[(None,) * n_dummy]
|
|
179
|
+
|
|
180
|
+
if is_fp16:
|
|
181
|
+
output = src_pointwise_add(
|
|
182
|
+
nums.view(np.uint16), summand.view(np.uint16), output.view(np.uint16), num_threads
|
|
183
|
+
).view(np.float16)
|
|
184
|
+
else:
|
|
185
|
+
output = src_pointwise_add(nums, summand, output, num_threads)
|
|
186
|
+
|
|
187
|
+
if n_dummy:
|
|
188
|
+
output = output[(0,) * n_dummy]
|
|
189
|
+
|
|
190
|
+
return output
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def fill_(
|
|
194
|
+
nums: np.ndarray,
|
|
195
|
+
value: Union[np.number, int, float],
|
|
196
|
+
num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
|
|
197
|
+
backend: BackendLike = None,
|
|
198
|
+
) -> None:
|
|
199
|
+
"""
|
|
200
|
+
Fill the array with a scalar value.
|
|
201
|
+
|
|
202
|
+
Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.
|
|
203
|
+
|
|
204
|
+
Parameters
|
|
205
|
+
----------
|
|
206
|
+
nums: np.ndarray
|
|
207
|
+
n-dimensional array
|
|
208
|
+
value: np.number | int | float
|
|
209
|
+
scalar
|
|
210
|
+
num_threads: int
|
|
211
|
+
the number of threads to use for computation. Default = 4. If negative value passed
|
|
212
|
+
cpu count + num_threads + 1 threads will be used
|
|
213
|
+
backend: BackendLike
|
|
214
|
+
which backend to use. `cython` and `scipy` are available, `cython` is used by default
|
|
215
|
+
|
|
216
|
+
Examples
|
|
217
|
+
--------
|
|
218
|
+
>>> fill_(x, 1)
|
|
219
|
+
>>> fill_(np.empty((2, 3, 4)), 42)
|
|
220
|
+
>>> fill_(x.astype('uint16'), 3) # will fail because of unsupported uint16 dtype
|
|
221
|
+
"""
|
|
222
|
+
backend = resolve_backend(backend, warn_stacklevel=3)
|
|
223
|
+
if backend.name not in ('Scipy', 'Cython'):
|
|
224
|
+
raise ValueError(f'Unsupported backend "{backend.name}".')
|
|
225
|
+
|
|
226
|
+
ndim = nums.ndim
|
|
227
|
+
dtype = nums.dtype
|
|
228
|
+
|
|
229
|
+
if dtype not in _TYPES or backend.name == 'Scipy' or ndim > 4:
|
|
230
|
+
nums.fill(value)
|
|
231
|
+
return
|
|
232
|
+
|
|
233
|
+
is_fp16 = dtype == np.float16
|
|
234
|
+
num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)
|
|
235
|
+
src_fill_ = _choose_cython_fill_(ndim, backend.fast)
|
|
236
|
+
value = dtype.type(value)
|
|
237
|
+
|
|
238
|
+
n_dummy = 3 - ndim if ndim <= 3 else 0
|
|
239
|
+
|
|
240
|
+
if n_dummy:
|
|
241
|
+
nums = nums[(None,) * n_dummy]
|
|
242
|
+
|
|
243
|
+
if is_fp16:
|
|
244
|
+
src_fill_(nums.view(np.uint16), value.view(np.uint16), num_threads)
|
|
245
|
+
else:
|
|
246
|
+
src_fill_(nums, value, num_threads)
|
|
247
|
+
|
|
248
|
+
if n_dummy:
|
|
249
|
+
nums = nums[(0,) * n_dummy]
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def full(
|
|
253
|
+
shape: Union[int, Sequence[int]],
|
|
254
|
+
fill_value: Union[np.number, int, float],
|
|
255
|
+
dtype: Union[type, str] = None,
|
|
256
|
+
order: str = 'C',
|
|
257
|
+
num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
|
|
258
|
+
backend: BackendLike = None,
|
|
259
|
+
) -> np.ndarray:
|
|
260
|
+
"""
|
|
261
|
+
Return a new array of given shape and type, filled with `fill_value`.
|
|
262
|
+
|
|
263
|
+
Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.
|
|
264
|
+
|
|
265
|
+
Parameters
|
|
266
|
+
----------
|
|
267
|
+
shape: int | Sequence[int]
|
|
268
|
+
desired shape
|
|
269
|
+
fill_value: np.number | int | float
|
|
270
|
+
scalar to fill array with
|
|
271
|
+
dtype: type | str
|
|
272
|
+
desired dtype to which `fill_value` will be casted. If not specified, `np.array(fill_value).dtype` will be used
|
|
273
|
+
order: str
|
|
274
|
+
whether to store multidimensional data in C or F contiguous order in memory
|
|
275
|
+
num_threads: int
|
|
276
|
+
the number of threads to use for computation. Default = 4. If negative value passed
|
|
277
|
+
cpu count + num_threads + 1 threads will be used
|
|
278
|
+
backend: BackendLike
|
|
279
|
+
which backend to use. `cython` and `scipy` are available, `cython` is used by default
|
|
280
|
+
|
|
281
|
+
Examples
|
|
282
|
+
--------
|
|
283
|
+
>>> x = full((2, 3, 4), 1.0) # same as `np.ones((2, 3, 4))`
|
|
284
|
+
>>> x = full((2, 3, 4), 1.5, dtype=int) # same as np.ones((2, 3, 4), dtype=int)
|
|
285
|
+
>>> x = full((2, 3, 4), 1, dtype='uint16') # will fail because of unsupported uint16 dtype
|
|
286
|
+
"""
|
|
287
|
+
nums = np.empty(shape, dtype=dtype, order=order)
|
|
288
|
+
|
|
289
|
+
if dtype is not None:
|
|
290
|
+
fill_value = nums.dtype.type(fill_value)
|
|
291
|
+
|
|
292
|
+
fill_(nums, fill_value, num_threads, backend)
|
|
293
|
+
|
|
294
|
+
return nums
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def copy(
|
|
298
|
+
nums: np.ndarray,
|
|
299
|
+
output: np.ndarray = None,
|
|
300
|
+
order: str = 'K',
|
|
301
|
+
num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
|
|
302
|
+
backend: BackendLike = None,
|
|
303
|
+
) -> np.ndarray:
|
|
304
|
+
"""
|
|
305
|
+
Return copy of the given array.
|
|
306
|
+
|
|
307
|
+
Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.
|
|
308
|
+
|
|
309
|
+
Parameters
|
|
310
|
+
----------
|
|
311
|
+
nums: np.ndarray
|
|
312
|
+
n-dimensional array
|
|
313
|
+
output: np.ndarray
|
|
314
|
+
array of the same shape and dtype as input, into which the copy is placed. By default, a new
|
|
315
|
+
array is created
|
|
316
|
+
order: str
|
|
317
|
+
controls the memory layout of the copy. `C` means C-order, `F` means F-order, `A` means `F` if a is Fortran
|
|
318
|
+
contiguous, `C` otherwise. `K` means match the layout of a as closely as possible
|
|
319
|
+
num_threads: int
|
|
320
|
+
the number of threads to use for computation. Default = 4. If negative value passed
|
|
321
|
+
cpu count + num_threads + 1 threads will be used
|
|
322
|
+
backend: BackendLike
|
|
323
|
+
which backend to use. `cython` and `scipy` are available, `cython` is used by default
|
|
324
|
+
|
|
325
|
+
Returns
|
|
326
|
+
-------
|
|
327
|
+
copy: np.ndarray
|
|
328
|
+
copy of array
|
|
329
|
+
|
|
330
|
+
Examples
|
|
331
|
+
--------
|
|
332
|
+
>>> copied = copy(x)
|
|
333
|
+
>>> copied = copy(x, backend='Scipy') # same as `np.copy`
|
|
334
|
+
>>> copy(x, output=y) # copied into `y`
|
|
335
|
+
"""
|
|
336
|
+
backend = resolve_backend(backend, warn_stacklevel=3)
|
|
337
|
+
if backend.name not in ('Scipy', 'Cython'):
|
|
338
|
+
raise ValueError(f'Unsupported backend "{backend.name}".')
|
|
339
|
+
|
|
340
|
+
ndim = nums.ndim
|
|
341
|
+
dtype = nums.dtype
|
|
342
|
+
num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)
|
|
343
|
+
|
|
344
|
+
if output is None:
|
|
345
|
+
output = np.empty_like(nums, dtype=dtype, order=order)
|
|
346
|
+
elif output.shape != nums.shape:
|
|
347
|
+
raise ValueError(f'Input array and output array shapes must be the same, got {nums.shape} vs {output.shape}.')
|
|
348
|
+
elif dtype != output.dtype:
|
|
349
|
+
raise ValueError(f'Input array and output array dtypes must be the same, got {dtype} vs {output.dtype}.')
|
|
350
|
+
|
|
351
|
+
if dtype not in _TYPES or backend.name == 'Scipy' or ndim > 4:
|
|
352
|
+
output = np.copy(nums, order=order)
|
|
353
|
+
return output
|
|
354
|
+
|
|
355
|
+
is_fp16 = dtype == np.float16
|
|
356
|
+
src_copy = _choose_cython_copy(ndim, is_fp16, backend.fast)
|
|
357
|
+
|
|
358
|
+
n_dummy = 3 - ndim if ndim <= 3 else 0
|
|
359
|
+
|
|
360
|
+
if n_dummy:
|
|
361
|
+
nums = nums[(None,) * n_dummy]
|
|
362
|
+
output = output[(None,) * n_dummy]
|
|
363
|
+
|
|
364
|
+
if is_fp16:
|
|
365
|
+
src_copy(nums.view(np.uint16), output.view(np.uint16), num_threads)
|
|
366
|
+
else:
|
|
367
|
+
src_copy(nums, output, num_threads)
|
|
368
|
+
|
|
369
|
+
if n_dummy:
|
|
370
|
+
nums = nums[(0,) * n_dummy]
|
|
371
|
+
output = output[(0,) * n_dummy]
|
|
372
|
+
|
|
373
|
+
return output
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
# TODO: add parallel astype?
|
imops/pad.py
CHANGED
|
@@ -2,6 +2,8 @@ from typing import Callable, Sequence, Union
|
|
|
2
2
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
|
|
5
|
+
from .backend import BackendLike
|
|
6
|
+
from .numeric import _NUMERIC_DEFAULT_NUM_THREADS, copy
|
|
5
7
|
from .utils import AxesLike, AxesParams, axis_from_dim, broadcast_axis, broadcast_to_axis, fill_by_indices
|
|
6
8
|
|
|
7
9
|
|
|
@@ -10,6 +12,8 @@ def pad(
|
|
|
10
12
|
padding: Union[AxesLike, Sequence[Sequence[int]]],
|
|
11
13
|
axis: AxesLike = None,
|
|
12
14
|
padding_values: Union[AxesParams, Callable] = 0,
|
|
15
|
+
num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
|
|
16
|
+
backend: BackendLike = None,
|
|
13
17
|
) -> np.ndarray:
|
|
14
18
|
"""
|
|
15
19
|
Pad `x` according to `padding` along the `axis`.
|
|
@@ -28,6 +32,11 @@ def pad(
|
|
|
28
32
|
padding_values: Union[AxesParams, Callable]
|
|
29
33
|
values to pad with, must be broadcastable to the resulting array.
|
|
30
34
|
If Callable (e.g. `numpy.min`) - `padding_values(x)` will be used
|
|
35
|
+
num_threads: int
|
|
36
|
+
the number of threads to use for computation. Default = 4. If negative value passed
|
|
37
|
+
cpu count + num_threads + 1 threads will be used
|
|
38
|
+
backend: BackendLike
|
|
39
|
+
which backend to use. `cython` and `scipy` are available, `cython` is used by default
|
|
31
40
|
|
|
32
41
|
Returns
|
|
33
42
|
-------
|
|
@@ -52,10 +61,11 @@ def pad(
|
|
|
52
61
|
|
|
53
62
|
new_shape = np.array(x.shape) + np.sum(padding, axis=1)
|
|
54
63
|
new_x = np.array(padding_values, dtype=x.dtype)
|
|
55
|
-
new_x = np.broadcast_to(new_x, new_shape)
|
|
64
|
+
new_x = copy(np.broadcast_to(new_x, new_shape), order='C', num_threads=num_threads, backend=backend)
|
|
56
65
|
|
|
57
66
|
start = padding[:, 0]
|
|
58
67
|
end = np.where(padding[:, 1] != 0, -padding[:, 1], None)
|
|
68
|
+
# TODO: how to parallelize this?
|
|
59
69
|
new_x[tuple(map(slice, start, end))] = x
|
|
60
70
|
|
|
61
71
|
return new_x
|
|
@@ -67,6 +77,8 @@ def pad_to_shape(
|
|
|
67
77
|
axis: AxesLike = None,
|
|
68
78
|
padding_values: Union[AxesParams, Callable] = 0,
|
|
69
79
|
ratio: AxesParams = 0.5,
|
|
80
|
+
num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
|
|
81
|
+
backend: BackendLike = None,
|
|
70
82
|
) -> np.ndarray:
|
|
71
83
|
"""
|
|
72
84
|
Pad `x` to match `shape` along the `axis`.
|
|
@@ -85,6 +97,11 @@ def pad_to_shape(
|
|
|
85
97
|
ratio: AxesParams
|
|
86
98
|
float or sequence of floats describing what proportion of padding to apply on the left sides of padding axes.
|
|
87
99
|
Remaining ratio of padding will be applied on the right sides
|
|
100
|
+
num_threads: int
|
|
101
|
+
the number of threads to use for computation. Default = 4. If negative value passed
|
|
102
|
+
cpu count + num_threads + 1 threads will be used
|
|
103
|
+
backend: BackendLike
|
|
104
|
+
which backend to use. `cython` and `scipy` are available, `cython` is used by default
|
|
88
105
|
|
|
89
106
|
Returns
|
|
90
107
|
-------
|
|
@@ -108,7 +125,7 @@ def pad_to_shape(
|
|
|
108
125
|
start = (delta * ratio).astype(int)
|
|
109
126
|
padding = np.array((start, delta - start)).T.astype(int)
|
|
110
127
|
|
|
111
|
-
return pad(x, padding, axis, padding_values=padding_values)
|
|
128
|
+
return pad(x, padding, axis, padding_values=padding_values, num_threads=num_threads, backend=backend)
|
|
112
129
|
|
|
113
130
|
|
|
114
131
|
def pad_to_divisible(
|
|
@@ -118,6 +135,8 @@ def pad_to_divisible(
|
|
|
118
135
|
padding_values: Union[AxesParams, Callable] = 0,
|
|
119
136
|
ratio: AxesParams = 0.5,
|
|
120
137
|
remainder: AxesLike = 0,
|
|
138
|
+
num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
|
|
139
|
+
backend: BackendLike = None,
|
|
121
140
|
) -> np.ndarray:
|
|
122
141
|
"""
|
|
123
142
|
Pad `x` to be divisible by `divisor` along the `axis`.
|
|
@@ -137,6 +156,11 @@ def pad_to_divisible(
|
|
|
137
156
|
Remaining ratio of padding will be applied on the right sides
|
|
138
157
|
remainder: AxesLike
|
|
139
158
|
`x` will be padded such that its shape gives the remainder `remainder` when divided by `divisor`
|
|
159
|
+
num_threads: int
|
|
160
|
+
the number of threads to use for computation. Default = 4. If negative value passed
|
|
161
|
+
cpu count + num_threads + 1 threads will be used
|
|
162
|
+
backend: BackendLike
|
|
163
|
+
which backend to use. `cython` and `scipy` are available, `cython` is used by default
|
|
140
164
|
|
|
141
165
|
Returns
|
|
142
166
|
-------
|
|
@@ -157,11 +181,18 @@ def pad_to_divisible(
|
|
|
157
181
|
assert np.all(remainder >= 0)
|
|
158
182
|
shape = np.maximum(np.array(x.shape)[list(axis)], remainder)
|
|
159
183
|
|
|
160
|
-
return pad_to_shape(
|
|
184
|
+
return pad_to_shape(
|
|
185
|
+
x, shape + (remainder - shape) % divisor, axis, padding_values, ratio, num_threads=num_threads, backend=backend
|
|
186
|
+
)
|
|
161
187
|
|
|
162
188
|
|
|
163
189
|
def restore_crop(
|
|
164
|
-
x: np.ndarray,
|
|
190
|
+
x: np.ndarray,
|
|
191
|
+
box: np.ndarray,
|
|
192
|
+
shape: AxesLike,
|
|
193
|
+
padding_values: Union[AxesParams, Callable] = 0,
|
|
194
|
+
num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
|
|
195
|
+
backend: BackendLike = None,
|
|
165
196
|
) -> np.ndarray:
|
|
166
197
|
"""
|
|
167
198
|
Pad `x` to match `shape`. The left padding is taken equal to `box`'s start.
|
|
@@ -176,6 +207,11 @@ def restore_crop(
|
|
|
176
207
|
shape to restore crop to
|
|
177
208
|
padding_values: Union[AxesParams, Callable]
|
|
178
209
|
values to pad with. If Callable (e.g. `numpy.min`) - `padding_values(x)` will be used
|
|
210
|
+
num_threads: int
|
|
211
|
+
the number of threads to use for computation. Default = 4. If negative value passed
|
|
212
|
+
cpu count + num_threads + 1 threads will be used
|
|
213
|
+
backend: BackendLike
|
|
214
|
+
which backend to use. `cython` and `scipy` are available, `cython` is used by default
|
|
179
215
|
|
|
180
216
|
Returns
|
|
181
217
|
-------
|
|
@@ -203,7 +239,7 @@ def restore_crop(
|
|
|
203
239
|
)
|
|
204
240
|
|
|
205
241
|
padding = np.array([start, shape - stop], dtype=int).T
|
|
206
|
-
x = pad(x, padding, padding_values=padding_values)
|
|
242
|
+
x = pad(x, padding, padding_values=padding_values, num_threads=num_threads, backend=backend)
|
|
207
243
|
assert all(np.array(x.shape) == shape)
|
|
208
244
|
|
|
209
245
|
return x
|
imops/radon.py
CHANGED
|
@@ -4,6 +4,7 @@ import numpy as np
|
|
|
4
4
|
from scipy.fftpack import fft, ifft
|
|
5
5
|
|
|
6
6
|
from .backend import BackendLike, resolve_backend
|
|
7
|
+
from .numeric import copy
|
|
7
8
|
from .src._backprojection import backprojection3d
|
|
8
9
|
from .src._fast_backprojection import backprojection3d as fast_backprojection3d
|
|
9
10
|
from .src._fast_radon import radon3d as fast_radon3d
|
|
@@ -52,7 +53,7 @@ def radon(
|
|
|
52
53
|
>>> sinogram, fill_value = radon(image, return_fill=True) # 2d image with fill value
|
|
53
54
|
>>> sinogram = radon(image, axes=(-2, -1)) # nd image
|
|
54
55
|
"""
|
|
55
|
-
backend = resolve_backend(backend)
|
|
56
|
+
backend = resolve_backend(backend, warn_stacklevel=3)
|
|
56
57
|
if backend.name not in ('Cython',):
|
|
57
58
|
raise ValueError(f'Unsupported backend "{backend.name}".')
|
|
58
59
|
|
|
@@ -78,13 +79,14 @@ def radon(
|
|
|
78
79
|
)
|
|
79
80
|
|
|
80
81
|
if min_ != 0 or max_ != 0:
|
|
81
|
-
|
|
82
|
+
# FIXME: how to accurately pass `num_threads` and `backend` arguments to `copy`?
|
|
83
|
+
image = copy(image, order='C')
|
|
82
84
|
image[:, outside_circle] = 0
|
|
83
85
|
|
|
84
86
|
# TODO: f(arange)?
|
|
85
87
|
limits = ((squared[:, None] + squared[None, :]) > (radius + 2) ** 2).sum(0) // 2
|
|
86
88
|
|
|
87
|
-
num_threads = normalize_num_threads(num_threads, backend)
|
|
89
|
+
num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)
|
|
88
90
|
|
|
89
91
|
radon3d_ = fast_radon3d if backend.fast else radon3d
|
|
90
92
|
|
|
@@ -141,7 +143,7 @@ def inverse_radon(
|
|
|
141
143
|
>>> image = inverse_radon(sinogram, fill_value=-1000) # 2d image with fill value
|
|
142
144
|
>>> image = inverse_radon(sinogram, axes=(-2, -1)) # nd image
|
|
143
145
|
"""
|
|
144
|
-
backend = resolve_backend(backend)
|
|
146
|
+
backend = resolve_backend(backend, warn_stacklevel=3)
|
|
145
147
|
if backend.name not in ('Cython',):
|
|
146
148
|
raise ValueError(f'Unsupported backend "{backend.name}".')
|
|
147
149
|
|
|
@@ -183,7 +185,7 @@ def inverse_radon(
|
|
|
183
185
|
filtered_sinogram = filtered_sinogram.astype(dtype, copy=False)
|
|
184
186
|
theta, xs = np.deg2rad(theta, dtype=dtype), xs.astype(dtype, copy=False)
|
|
185
187
|
|
|
186
|
-
num_threads = normalize_num_threads(num_threads, backend)
|
|
188
|
+
num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)
|
|
187
189
|
|
|
188
190
|
backprojection3d_ = fast_backprojection3d if backend.fast else backprojection3d
|
|
189
191
|
|