imops 0.8.8__cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. _build_utils.py +113 -0
  2. imops/__init__.py +10 -0
  3. imops/__version__.py +1 -0
  4. imops/_configs.py +29 -0
  5. imops/backend.py +95 -0
  6. imops/box.py +74 -0
  7. imops/cpp/cpp_modules.cpython-38-i386-linux-gnu.so +0 -0
  8. imops/cpp/interp2d/delaunator/delaunator-header-only.hpp +33 -0
  9. imops/cpp/interp2d/delaunator/delaunator.cpp +645 -0
  10. imops/cpp/interp2d/delaunator/delaunator.hpp +170 -0
  11. imops/cpp/interp2d/interpolator.h +52 -0
  12. imops/cpp/interp2d/triangulator.h +198 -0
  13. imops/cpp/interp2d/utils.h +63 -0
  14. imops/cpp/main.cpp +13 -0
  15. imops/crop.py +120 -0
  16. imops/interp1d.py +207 -0
  17. imops/interp2d.py +120 -0
  18. imops/measure.py +228 -0
  19. imops/morphology.py +525 -0
  20. imops/numeric.py +384 -0
  21. imops/pad.py +253 -0
  22. imops/py.typed +0 -0
  23. imops/radon.py +247 -0
  24. imops/src/__init__.py +0 -0
  25. imops/src/_backprojection.c +27339 -0
  26. imops/src/_backprojection.cpython-38-i386-linux-gnu.so +0 -0
  27. imops/src/_fast_backprojection.c +27339 -0
  28. imops/src/_fast_backprojection.cpython-38-i386-linux-gnu.so +0 -0
  29. imops/src/_fast_measure.c +33810 -0
  30. imops/src/_fast_measure.cpython-38-i386-linux-gnu.so +0 -0
  31. imops/src/_fast_morphology.c +26089 -0
  32. imops/src/_fast_morphology.cpython-38-i386-linux-gnu.so +0 -0
  33. imops/src/_fast_numeric.c +48651 -0
  34. imops/src/_fast_numeric.cpython-38-i386-linux-gnu.so +0 -0
  35. imops/src/_fast_radon.c +30714 -0
  36. imops/src/_fast_radon.cpython-38-i386-linux-gnu.so +0 -0
  37. imops/src/_fast_zoom.c +57203 -0
  38. imops/src/_fast_zoom.cpython-38-i386-linux-gnu.so +0 -0
  39. imops/src/_measure.c +33810 -0
  40. imops/src/_measure.cpython-38-i386-linux-gnu.so +0 -0
  41. imops/src/_morphology.c +26089 -0
  42. imops/src/_morphology.cpython-38-i386-linux-gnu.so +0 -0
  43. imops/src/_numba_zoom.py +503 -0
  44. imops/src/_numeric.c +48651 -0
  45. imops/src/_numeric.cpython-38-i386-linux-gnu.so +0 -0
  46. imops/src/_radon.c +30714 -0
  47. imops/src/_radon.cpython-38-i386-linux-gnu.so +0 -0
  48. imops/src/_zoom.c +57203 -0
  49. imops/src/_zoom.cpython-38-i386-linux-gnu.so +0 -0
  50. imops/testing.py +57 -0
  51. imops/utils.py +205 -0
  52. imops/zoom.py +297 -0
  53. imops-0.8.8.dist-info/LICENSE +21 -0
  54. imops-0.8.8.dist-info/METADATA +218 -0
  55. imops-0.8.8.dist-info/RECORD +58 -0
  56. imops-0.8.8.dist-info/WHEEL +6 -0
  57. imops-0.8.8.dist-info/top_level.txt +2 -0
  58. imops.libs/libgomp-65f46eca.so.1.0.0 +0 -0
imops/numeric.py ADDED
@@ -0,0 +1,384 @@
1
+ from typing import Callable, Sequence, Union
2
+
3
+ import numpy as np
4
+
5
+ from .backend import BackendLike, resolve_backend
6
+ from .src._fast_numeric import (
7
+ _copy_3d as cython_fast_copy_3d,
8
+ _copy_3d_fp16 as cython_fast_copy_3d_fp16,
9
+ _copy_4d as cython_fast_copy_4d,
10
+ _copy_4d_fp16 as cython_fast_copy_4d_fp16,
11
+ _fill_3d as cython_fast_fill_3d,
12
+ _fill_4d as cython_fast_fill_4d,
13
+ _pointwise_add_array_3d as cython_fast_pointwise_add_array_3d,
14
+ _pointwise_add_array_3d_fp16 as cython_fast_pointwise_add_array_3d_fp16,
15
+ _pointwise_add_array_4d as cython_fast_pointwise_add_array_4d,
16
+ _pointwise_add_array_4d_fp16 as cython_fast_pointwise_add_array_4d_fp16,
17
+ _pointwise_add_value_3d as cython_fast_pointwise_add_value_3d,
18
+ _pointwise_add_value_3d_fp16 as cython_fast_pointwise_add_value_3d_fp16,
19
+ _pointwise_add_value_4d as cython_fast_pointwise_add_value_4d,
20
+ _pointwise_add_value_4d_fp16 as cython_fast_pointwise_add_value_4d_fp16,
21
+ )
22
+ from .src._numeric import (
23
+ _copy_3d as cython_copy_3d,
24
+ _copy_3d_fp16 as cython_copy_3d_fp16,
25
+ _copy_4d as cython_copy_4d,
26
+ _copy_4d_fp16 as cython_copy_4d_fp16,
27
+ _fill_3d as cython_fill_3d,
28
+ _fill_4d as cython_fill_4d,
29
+ _pointwise_add_array_3d as cython_pointwise_add_array_3d,
30
+ _pointwise_add_array_3d_fp16 as cython_pointwise_add_array_3d_fp16,
31
+ _pointwise_add_array_4d as cython_pointwise_add_array_4d,
32
+ _pointwise_add_array_4d_fp16 as cython_pointwise_add_array_4d_fp16,
33
+ _pointwise_add_value_3d as cython_pointwise_add_value_3d,
34
+ _pointwise_add_value_3d_fp16 as cython_pointwise_add_value_3d_fp16,
35
+ _pointwise_add_value_4d as cython_pointwise_add_value_4d,
36
+ _pointwise_add_value_4d_fp16 as cython_pointwise_add_value_4d_fp16,
37
+ )
38
+ from .utils import normalize_num_threads
39
+
40
+
41
+ _TYPES = (np.int16, np.int32, np.int64, np.float16, np.float32, np.float64)
42
+ _STR_TYPES = ('int16', 'int32', 'int64', 'float16', 'float32', 'float64')
43
+ # TODO: Decide which value to use. Functions below are quite fast and simple, so parallelization overhead is noticeable.
44
+ _NUMERIC_DEFAULT_NUM_THREADS = 4
45
+
46
+
47
+ # TODO: Maybe dict is better?
48
+ def _choose_cython_pointwise_add(ndim: int, summand_is_array: bool, is_fp16: bool, fast: bool) -> Callable:
49
+ assert ndim <= 4, ndim
50
+
51
+ if ndim <= 3:
52
+ if summand_is_array:
53
+ if is_fp16:
54
+ return cython_fast_pointwise_add_array_3d_fp16 if fast else cython_pointwise_add_array_3d_fp16
55
+
56
+ return cython_fast_pointwise_add_array_3d if fast else cython_pointwise_add_array_3d
57
+
58
+ if is_fp16:
59
+ return cython_fast_pointwise_add_value_3d_fp16 if fast else cython_pointwise_add_value_3d_fp16
60
+
61
+ return cython_fast_pointwise_add_value_3d if fast else cython_pointwise_add_value_3d
62
+
63
+ if summand_is_array:
64
+ if is_fp16:
65
+ return cython_fast_pointwise_add_array_4d_fp16 if fast else cython_pointwise_add_array_4d_fp16
66
+
67
+ return cython_fast_pointwise_add_array_4d if fast else cython_pointwise_add_array_4d
68
+
69
+ if is_fp16:
70
+ return cython_fast_pointwise_add_value_4d_fp16 if fast else cython_pointwise_add_value_4d_fp16
71
+
72
+ return cython_fast_pointwise_add_value_4d if fast else cython_pointwise_add_value_4d
73
+
74
+
75
+ def _choose_cython_fill_(ndim: int, fast: bool) -> Callable:
76
+ assert ndim <= 4, ndim
77
+
78
+ if ndim <= 3:
79
+ return cython_fast_fill_3d if fast else cython_fill_3d
80
+
81
+ return cython_fast_fill_4d if fast else cython_fill_4d
82
+
83
+
84
+ def _choose_cython_copy(ndim: int, is_fp16: bool, fast: bool) -> Callable:
85
+ assert ndim <= 4, ndim
86
+
87
+ if ndim <= 3:
88
+ if is_fp16:
89
+ return cython_fast_copy_3d_fp16 if fast else cython_copy_3d_fp16
90
+
91
+ return cython_fast_copy_3d if fast else cython_copy_3d
92
+
93
+ if is_fp16:
94
+ return cython_fast_copy_4d_fp16 if fast else cython_copy_4d_fp16
95
+
96
+ return cython_fast_copy_4d if fast else cython_copy_4d
97
+
98
+
99
+ def pointwise_add(
100
+ nums: np.ndarray,
101
+ summand: Union[np.array, int, float],
102
+ output: np.ndarray = None,
103
+ num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
104
+ backend: BackendLike = None,
105
+ ) -> np.ndarray:
106
+ """
107
+ Perform pointwise addition between array and array or scalar.
108
+
109
+ Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.
110
+
111
+ Parameters
112
+ ----------
113
+ nums: np.ndarray
114
+ n-dimensional array
115
+ summand: np.ndarray | int | float
116
+ array of the same shape or scalar
117
+ output: np.ndarray
118
+ array of the same shape as input, into which the output is placed. By default, a new
119
+ array is created
120
+ num_threads: int
121
+ the number of threads to use for computation. Default = 4. If negative value passed
122
+ cpu count + num_threads + 1 threads will be used
123
+ backend: BackendLike
124
+ which backend to use. `cython` and `scipy` are available, `cython` is used by default
125
+
126
+ Returns
127
+ -------
128
+ sum: np.ndarray
129
+ result of summation
130
+
131
+ Examples
132
+ --------
133
+ ```python
134
+ sum = pointwise_add(x, 1, x) # inplace addition
135
+ sum = pointwise_add(x, 1, backend='Scipy') # just `np.add`
136
+ sum = pointwise_add(x.astype('float32'), x.astype('float16')) # will fail because of different dtypes
137
+ ```
138
+ """
139
+ backend = resolve_backend(backend, warn_stacklevel=3)
140
+ if backend.name not in ('Scipy', 'Cython'):
141
+ raise ValueError(f'Unsupported backend "{backend.name}".')
142
+
143
+ dtype = nums.dtype
144
+
145
+ if dtype not in _TYPES:
146
+ raise ValueError(f'Input array dtype must be one of {", ".join(_STR_TYPES)}, got {dtype}.')
147
+
148
+ if output is None:
149
+ output = np.empty_like(nums, dtype=dtype)
150
+ elif output.shape != nums.shape:
151
+ raise ValueError(f'Input array and output array shapes must be the same, got {nums.shape} vs {output.shape}.')
152
+ elif dtype != output.dtype:
153
+ raise ValueError(f'Input array and output array dtypes must be the same, got {dtype} vs {output.dtype}.')
154
+
155
+ summand_is_array = isinstance(summand, np.ndarray)
156
+ if summand_is_array:
157
+ if dtype != summand.dtype:
158
+ raise ValueError(f'Input and summand arrays must have same dtypes, got {dtype} vs {summand.dtype}.')
159
+ elif not isinstance(summand, (*_TYPES, *(int, float))):
160
+ raise ValueError(f'Summand dtype must be one of {", ".join(_STR_TYPES)}, got {type(summand)}.')
161
+ else:
162
+ summand = dtype.type(summand)
163
+
164
+ ndim = nums.ndim
165
+ num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)
166
+
167
+ if backend.name == 'Scipy' or ndim > 4:
168
+ np.add(nums, summand, out=output)
169
+ return output
170
+
171
+ is_fp16 = dtype == np.float16
172
+ src_pointwise_add = _choose_cython_pointwise_add(ndim, summand_is_array, is_fp16, backend.fast)
173
+
174
+ n_dummy = 3 - ndim if ndim <= 3 else 0
175
+
176
+ if n_dummy:
177
+ nums = nums[(None,) * n_dummy]
178
+ output = output[(None,) * n_dummy]
179
+ if summand_is_array:
180
+ summand = summand[(None,) * n_dummy]
181
+
182
+ if is_fp16:
183
+ output = src_pointwise_add(
184
+ nums.view(np.uint16), summand.view(np.uint16), output.view(np.uint16), num_threads
185
+ ).view(np.float16)
186
+ else:
187
+ output = src_pointwise_add(nums, summand, output, num_threads)
188
+
189
+ if n_dummy:
190
+ output = output[(0,) * n_dummy]
191
+
192
+ return output
193
+
194
+
195
+ def fill_(
196
+ nums: np.ndarray,
197
+ value: Union[np.number, int, float],
198
+ num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
199
+ backend: BackendLike = None,
200
+ ) -> None:
201
+ """
202
+ Fill the array with a scalar value.
203
+
204
+ Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.
205
+
206
+ Parameters
207
+ ----------
208
+ nums: np.ndarray
209
+ n-dimensional array
210
+ value: np.number | int | float
211
+ scalar
212
+ num_threads: int
213
+ the number of threads to use for computation. Default = 4. If negative value passed
214
+ cpu count + num_threads + 1 threads will be used
215
+ backend: BackendLike
216
+ which backend to use. `cython` and `scipy` are available, `cython` is used by default
217
+
218
+ Examples
219
+ --------
220
+ ```python
221
+ fill_(x, 1)
222
+ fill_(np.empty((2, 3, 4)), 42)
223
+ fill_(x.astype('uint16'), 3) # will fail because of unsupported uint16 dtype
224
+ ```
225
+ """
226
+ backend = resolve_backend(backend, warn_stacklevel=3)
227
+ if backend.name not in ('Scipy', 'Cython'):
228
+ raise ValueError(f'Unsupported backend "{backend.name}".')
229
+
230
+ ndim = nums.ndim
231
+ dtype = nums.dtype
232
+
233
+ if dtype not in _TYPES or backend.name == 'Scipy' or ndim > 4:
234
+ nums.fill(value)
235
+ return
236
+
237
+ is_fp16 = dtype == np.float16
238
+ num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)
239
+ src_fill_ = _choose_cython_fill_(ndim, backend.fast)
240
+ value = dtype.type(value)
241
+
242
+ n_dummy = 3 - ndim if ndim <= 3 else 0
243
+
244
+ if n_dummy:
245
+ nums = nums[(None,) * n_dummy]
246
+
247
+ if is_fp16:
248
+ src_fill_(nums.view(np.uint16), value.view(np.uint16), num_threads)
249
+ else:
250
+ src_fill_(nums, value, num_threads)
251
+
252
+ if n_dummy:
253
+ nums = nums[(0,) * n_dummy]
254
+
255
+
256
+ def full(
257
+ shape: Union[int, Sequence[int]],
258
+ fill_value: Union[np.number, int, float],
259
+ dtype: Union[type, str] = None,
260
+ order: str = 'C',
261
+ num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
262
+ backend: BackendLike = None,
263
+ ) -> np.ndarray:
264
+ """
265
+ Return a new array of given shape and dtype, filled with `fill_value`.
266
+
267
+ Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.
268
+
269
+ Parameters
270
+ ----------
271
+ shape: int | Sequence[int]
272
+ desired shape
273
+ fill_value: np.number | int | float
274
+ scalar to fill array with
275
+ dtype: type | str
276
+ desired dtype to which `fill_value` will be casted. If not specified, `np.array(fill_value).dtype` will be used
277
+ order: str
278
+ whether to store multidimensional data in C or F contiguous order in memory
279
+ num_threads: int
280
+ the number of threads to use for computation. Default = 4. If negative value passed
281
+ cpu count + num_threads + 1 threads will be used
282
+ backend: BackendLike
283
+ which backend to use. `cython` and `scipy` are available, `cython` is used by default
284
+
285
+ Examples
286
+ --------
287
+ ```python
288
+ x = full((2, 3, 4), 1.0) # same as `np.ones((2, 3, 4))`
289
+ x = full((2, 3, 4), 1.5, dtype=int) # same as np.ones((2, 3, 4), dtype=int)
290
+ x = full((2, 3, 4), 1, dtype='uint16') # will fail because of unsupported uint16 dtype
291
+ ```
292
+ """
293
+ dtype = dtype or np.array(fill_value).dtype
294
+
295
+ nums = np.empty(shape, dtype=dtype, order=order)
296
+ fill_value = nums.dtype.type(fill_value)
297
+
298
+ fill_(nums, fill_value, num_threads, backend)
299
+
300
+ return nums
301
+
302
+
303
+ def copy(
304
+ nums: np.ndarray,
305
+ output: np.ndarray = None,
306
+ order: str = 'K',
307
+ num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
308
+ backend: BackendLike = None,
309
+ ) -> np.ndarray:
310
+ """
311
+ Return copy of the given array.
312
+
313
+ Uses a fast parallelizable implementation for fp16-32-64 and int16-32-64 inputs and ndim <= 4.
314
+
315
+ Parameters
316
+ ----------
317
+ nums: np.ndarray
318
+ n-dimensional array
319
+ output: np.ndarray
320
+ array of the same shape and dtype as input, into which the copy is placed. By default, a new
321
+ array is created
322
+ order: str
323
+ controls the memory layout of the copy. `C` means C-order, `F` means F-order, `A` means `F` if a is Fortran
324
+ contiguous, `C` otherwise. `K` means match the layout of a as closely as possible
325
+ num_threads: int
326
+ the number of threads to use for computation. Default = 4. If negative value passed
327
+ cpu count + num_threads + 1 threads will be used
328
+ backend: BackendLike
329
+ which backend to use. `cython` and `scipy` are available, `cython` is used by default
330
+
331
+ Returns
332
+ -------
333
+ copy: np.ndarray
334
+ copy of array
335
+
336
+ Examples
337
+ --------
338
+ ```python
339
+ copied = copy(x)
340
+ copied = copy(x, backend='Scipy') # same as `np.copy`
341
+ copy(x, output=y) # copied into `y`
342
+ ```
343
+ """
344
+ backend = resolve_backend(backend, warn_stacklevel=3)
345
+ if backend.name not in ('Scipy', 'Cython'):
346
+ raise ValueError(f'Unsupported backend "{backend.name}".')
347
+
348
+ ndim = nums.ndim
349
+ dtype = nums.dtype
350
+ num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=3)
351
+
352
+ if output is None:
353
+ output = np.empty_like(nums, dtype=dtype, order=order)
354
+ elif output.shape != nums.shape:
355
+ raise ValueError(f'Input array and output array shapes must be the same, got {nums.shape} vs {output.shape}.')
356
+ elif dtype != output.dtype:
357
+ raise ValueError(f'Input array and output array dtypes must be the same, got {dtype} vs {output.dtype}.')
358
+
359
+ if dtype not in _TYPES or backend.name == 'Scipy' or ndim > 4:
360
+ output = np.copy(nums, order=order)
361
+ return output
362
+
363
+ is_fp16 = dtype == np.float16
364
+ src_copy = _choose_cython_copy(ndim, is_fp16, backend.fast)
365
+
366
+ n_dummy = 3 - ndim if ndim <= 3 else 0
367
+
368
+ if n_dummy:
369
+ nums = nums[(None,) * n_dummy]
370
+ output = output[(None,) * n_dummy]
371
+
372
+ if is_fp16:
373
+ src_copy(nums.view(np.uint16), output.view(np.uint16), num_threads)
374
+ else:
375
+ src_copy(nums, output, num_threads)
376
+
377
+ if n_dummy:
378
+ nums = nums[(0,) * n_dummy]
379
+ output = output[(0,) * n_dummy]
380
+
381
+ return output
382
+
383
+
384
+ # TODO: add parallel astype?
imops/pad.py ADDED
@@ -0,0 +1,253 @@
1
+ from typing import Callable, Sequence, Union
2
+
3
+ import numpy as np
4
+
5
+ from .backend import BackendLike
6
+ from .numeric import _NUMERIC_DEFAULT_NUM_THREADS, copy
7
+ from .utils import AxesLike, AxesParams, axis_from_dim, broadcast_axis, broadcast_to_axis, fill_by_indices
8
+
9
+
10
+ def pad(
11
+ x: np.ndarray,
12
+ padding: Union[AxesLike, Sequence[Sequence[int]]],
13
+ axis: AxesLike = None,
14
+ padding_values: Union[AxesParams, Callable] = 0,
15
+ num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
16
+ backend: BackendLike = None,
17
+ ) -> np.ndarray:
18
+ """
19
+ Pad `x` according to `padding` along the `axis`.
20
+
21
+ Parameters
22
+ ----------
23
+ x: np.ndarray
24
+ n-dimensional array to pad
25
+ padding: Union[AxesLike, Sequence[Sequence[int]]]
26
+ if 2D array [[start_1, stop_1], ..., [start_n, stop_n]] - specifies individual padding
27
+ for each axis from `axis`. The length of the array must either be equal to 1 or match the length of `axis`.
28
+ If 1D array [val_1, ..., val_n] - same as [[val_1, val_1], ..., [val_n, val_n]].
29
+ If scalar (val) - same as [[val, val]]
30
+ axis: AxesLike
31
+ axis along which `x` will be padded
32
+ padding_values: Union[AxesParams, Callable]
33
+ values to pad with, must be broadcastable to the resulting array.
34
+ If Callable (e.g. `numpy.min`) - `padding_values(x)` will be used
35
+ num_threads: int
36
+ the number of threads to use for computation. Default = 4. If negative value passed
37
+ cpu count + num_threads + 1 threads will be used
38
+ backend: BackendLike
39
+ which backend to use. `cython` and `scipy` are available, `cython` is used by default
40
+
41
+ Returns
42
+ -------
43
+ padded: np.ndarray
44
+ padded array
45
+
46
+ Examples
47
+ --------
48
+ ```python
49
+ padded = pad(x, 2) # pad 2 zeros on each side of each axes
50
+ padded = pad(x, [1, 1], axis=(-1, -2)) # pad 1 zero on each side of last 2 axes
51
+ ```
52
+ """
53
+ x = np.asarray(x)
54
+ padding = np.asarray(padding)
55
+ if padding.ndim < 2:
56
+ padding = padding.reshape(-1, 1)
57
+ axis = axis_from_dim(axis, x.ndim)
58
+ padding = np.asarray(fill_by_indices(np.zeros((x.ndim, 2), dtype=int), np.atleast_2d(padding), axis))
59
+ if (padding < 0).any():
60
+ raise ValueError(f'Padding must be non-negative: {padding.tolist()}.')
61
+ if callable(padding_values):
62
+ padding_values = padding_values(x)
63
+
64
+ new_shape = np.array(x.shape) + np.sum(padding, axis=1)
65
+ new_x = np.array(padding_values, dtype=x.dtype)
66
+ new_x = copy(np.broadcast_to(new_x, new_shape), order='C', num_threads=num_threads, backend=backend)
67
+
68
+ start = padding[:, 0]
69
+ end = np.where(padding[:, 1] != 0, -padding[:, 1], None)
70
+ # TODO: how to parallelize this?
71
+ new_x[tuple(map(slice, start, end))] = x
72
+
73
+ return new_x
74
+
75
+
76
+ def pad_to_shape(
77
+ x: np.ndarray,
78
+ shape: AxesLike,
79
+ axis: AxesLike = None,
80
+ padding_values: Union[AxesParams, Callable] = 0,
81
+ ratio: AxesParams = 0.5,
82
+ num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
83
+ backend: BackendLike = None,
84
+ ) -> np.ndarray:
85
+ """
86
+ Pad `x` to match `shape` along the `axis`.
87
+
88
+ Parameters
89
+ ----------
90
+ x: np.ndarray
91
+ n-dimensional array to pad
92
+ shape: AxesLike
93
+ final shape
94
+ axis: AxesLike
95
+ axis along which `x` will be padded
96
+ padding_values: Union[AxesParams, Callable]
97
+ values to pad with, must be broadcastable to the resulting array.
98
+ If Callable (e.g. `numpy.min`) - `padding_values(x)` will be used
99
+ ratio: AxesParams
100
+ float or sequence of floats describing what proportion of padding to apply on the left sides of padding axes.
101
+ Remaining ratio of padding will be applied on the right sides
102
+ num_threads: int
103
+ the number of threads to use for computation. Default = 4. If negative value passed
104
+ cpu count + num_threads + 1 threads will be used
105
+ backend: BackendLike
106
+ which backend to use. `cython` and `scipy` are available, `cython` is used by default
107
+
108
+ Returns
109
+ -------
110
+ padded: np.ndarray
111
+ padded array
112
+
113
+ Examples
114
+ --------
115
+ ```python
116
+ padded = pad_to_shape(x, [4, 5, 6]) # pad 3d array
117
+ padded = pad_to_shape(x, [4, 5], axis=[0, 1], ratio=0) # pad first 2 axes on the right
118
+ ```
119
+ """
120
+ x = np.asarray(x)
121
+ axis, shape, ratio = broadcast_axis(axis, x.ndim, shape, ratio)
122
+
123
+ old_shape = np.array(x.shape)[list(axis)]
124
+ if (old_shape > shape).any():
125
+ shape = fill_by_indices(x.shape, shape, axis)
126
+ raise ValueError(f'The resulting shape cannot be smaller than the original: {x.shape} vs {shape}.')
127
+
128
+ delta = shape - old_shape
129
+ start = (delta * ratio).astype(int)
130
+ padding = np.array((start, delta - start)).T.astype(int)
131
+
132
+ return pad(x, padding, axis, padding_values=padding_values, num_threads=num_threads, backend=backend)
133
+
134
+
135
+ def pad_to_divisible(
136
+ x: np.ndarray,
137
+ divisor: AxesLike,
138
+ axis: AxesLike = None,
139
+ padding_values: Union[AxesParams, Callable] = 0,
140
+ ratio: AxesParams = 0.5,
141
+ remainder: AxesLike = 0,
142
+ num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
143
+ backend: BackendLike = None,
144
+ ) -> np.ndarray:
145
+ """
146
+ Pad `x` to be divisible by `divisor` along the `axis`.
147
+
148
+ Parameters
149
+ ----------
150
+ x: np.ndarray
151
+ n-dimensional array to pad
152
+ divisor: AxesLike
153
+ float or sequence of floats an incoming array shape will be divisible by
154
+ axis: AxesLike
155
+ axis along which the array will be padded. If None - the last `len(divisor)` axes are used
156
+ padding_values: Union[AxesParams, Callable]
157
+ values to pad with. If Callable (e.g. `numpy.min`) - `padding_values(x)` will be used
158
+ ratio: AxesParams
159
+ float or sequence of floats describing what proportion of padding to apply on the left sides of padding axes.
160
+ Remaining ratio of padding will be applied on the right sides
161
+ remainder: AxesLike
162
+ `x` will be padded such that its shape gives the remainder `remainder` when divided by `divisor`
163
+ num_threads: int
164
+ the number of threads to use for computation. Default = 4. If negative value passed
165
+ cpu count + num_threads + 1 threads will be used
166
+ backend: BackendLike
167
+ which backend to use. `cython` and `scipy` are available, `cython` is used by default
168
+
169
+ Returns
170
+ -------
171
+ padded: np.ndarray
172
+ padded array
173
+
174
+ Examples
175
+ --------
176
+ ```python
177
+ x # array of shape [2, 3, 4]
178
+ padded = pad_to_divisible(x, 6) # pad to shape [6, 6, 6]
179
+ padded = pad_to_divisible(x, [4, 3], axis=[0, 1], ratio=1) # pad first 2 axes on the left, shape - [4, 3, 4]
180
+ padded = pad_to_divisible(x, 3, remainder=1) # pad to shape [4, 4, 4]
181
+ ```
182
+ """
183
+ x = np.asarray(x)
184
+ axis = axis_from_dim(axis, x.ndim)
185
+ divisor, remainder, ratio = broadcast_to_axis(axis, divisor, remainder, ratio)
186
+
187
+ assert np.all(remainder >= 0)
188
+ shape = np.maximum(np.array(x.shape)[list(axis)], remainder)
189
+
190
+ return pad_to_shape(
191
+ x, shape + (remainder - shape) % divisor, axis, padding_values, ratio, num_threads=num_threads, backend=backend
192
+ )
193
+
194
+
195
+ def restore_crop(
196
+ x: np.ndarray,
197
+ box: np.ndarray,
198
+ shape: AxesLike,
199
+ padding_values: Union[AxesParams, Callable] = 0,
200
+ num_threads: int = _NUMERIC_DEFAULT_NUM_THREADS,
201
+ backend: BackendLike = None,
202
+ ) -> np.ndarray:
203
+ """
204
+ Pad `x` to match `shape`. The left padding is taken equal to `box`'s start.
205
+
206
+ Parameters
207
+ ----------
208
+ x: np.ndarray
209
+ n-dimensional array to pad
210
+ box: np.ndarray
211
+ array of shape (2, x.ndim) describing crop boundaries
212
+ shape: AxesLike
213
+ shape to restore crop to
214
+ padding_values: Union[AxesParams, Callable]
215
+ values to pad with. If Callable (e.g. `numpy.min`) - `padding_values(x)` will be used
216
+ num_threads: int
217
+ the number of threads to use for computation. Default = 4. If negative value passed
218
+ cpu count + num_threads + 1 threads will be used
219
+ backend: BackendLike
220
+ which backend to use. `cython` and `scipy` are available, `cython` is used by default
221
+
222
+ Returns
223
+ -------
224
+ padded: np.ndarray
225
+ padded array
226
+
227
+ Examples
228
+ --------
229
+ ```python
230
+ x # array of shape [2, 3, 4]
231
+ padded = restore_crop(x, np.array([[0, 0, 0], [2, 3, 4]]), [4, 4, 4]) # pad to shape [4, 4, 4]
232
+ padded = restore_crop(x, np.array([[0, 0, 0], [1, 1, 1]]), [4, 4, 4]) # fail, box is inconsistent with an array
233
+ padded = restore_crop(x, np.array([[1, 2, 3], [3, 5, 7]]), [3, 5, 7]) # pad to shape [3, 5, 7]
234
+ ```
235
+ """
236
+ start, stop = np.asarray(box)
237
+
238
+ assert len(shape) == x.ndim
239
+ assert len(start) == len(stop) == x.ndim
240
+
241
+ x = np.asarray(x)
242
+
243
+ if (stop > shape).any() or (stop - start != x.shape).any():
244
+ raise ValueError(
245
+ f'The input array (of shape {x.shape}) was not obtained by cropping a '
246
+ f'box {start, stop} from the shape {shape}.'
247
+ )
248
+
249
+ padding = np.array([start, shape - stop], dtype=int).T
250
+ x = pad(x, padding, padding_values=padding_values, num_threads=num_threads, backend=backend)
251
+ assert all(np.array(x.shape) == shape)
252
+
253
+ return x
imops/py.typed ADDED
File without changes