imops 0.8.1__cp36-cp36m-win_amd64.whl → 0.8.3__cp36-cp36m-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imops might be problematic. Click here for more details.

imops/src/_numeric.pyx CHANGED
@@ -8,10 +8,19 @@
8
8
  import numpy as np
9
9
 
10
10
  cimport numpy as np
11
+ from libc.stdint cimport uint16_t
11
12
 
12
13
  from cython.parallel import prange
13
14
 
14
15
 
16
+ # https://stackoverflow.com/questions/47421443/using-half-precision-numpy-floats-in-cython
17
+ cdef extern from "numpy/halffloat.h":
18
+ ctypedef uint16_t npy_half
19
+
20
+ float npy_half_to_float(npy_half h) nogil
21
+ npy_half npy_float_to_half(float f) nogil
22
+
23
+
15
24
  ctypedef fused NUM:
16
25
  short
17
26
  int
@@ -20,45 +29,214 @@ ctypedef fused NUM:
20
29
  double
21
30
 
22
31
 
23
- def _parallel_sum(NUM[:] nums, Py_ssize_t num_threads) -> NUM:
24
- cdef NUM res = 0
25
- cdef Py_ssize_t i, len_nums = len(nums)
32
+ ctypedef fused NUM_AND_NPY_HALF:
33
+ NUM
34
+ npy_half
26
35
 
27
- for i in prange(len_nums, num_threads=num_threads, nogil=True):
28
- res += nums[i]
29
36
 
30
- return res
31
-
32
-
33
- def _parallel_pointwise_mul(
37
+ # TODO: Generalize code below to n-d
38
+ def _pointwise_add_array_3d(
34
39
  NUM[:, :, :] nums1,
35
40
  NUM[:, :, :] nums2,
36
- Py_ssize_t[:] res_shape,
37
- Py_ssize_t num_threads
41
+ NUM[:, :, :] out,
42
+ Py_ssize_t num_threads,
38
43
  ) -> np.ndarray:
39
- cdef NUM[:, :, ::1] contiguous_nums1 = np.ascontiguousarray(nums1), contiguous_nums2 = np.ascontiguousarray(nums2)
40
- cdef Py_ssize_t rows = res_shape[0], cols = res_shape[1], dims = res_shape[2]
44
+ cdef Py_ssize_t rows = out.shape[0], cols = out.shape[1], dims = out.shape[2]
45
+ cdef Py_ssize_t i, j, k
46
+
47
+ for i in prange(rows, nogil=True, num_threads=num_threads):
48
+ for j in prange(cols):
49
+ for k in prange(dims):
50
+ out[i, j, k] = nums1[i, j, k] + nums2[i, j, k]
51
+
52
+ return np.asarray(out)
41
53
 
42
- cdef char[:] broadcast_mask1 = np.array([x == y for x, y in zip(res_shape, nums1.shape)], dtype=np.int8)
43
- cdef char[:] broadcast_mask2 = np.array([x == y for x, y in zip(res_shape, nums2.shape)], dtype=np.int8)
44
54
 
45
- cdef NUM[:, :, ::1] mul = np.empty_like(nums1, shape=res_shape)
55
+ def _pointwise_add_array_4d(
56
+ NUM[:, :, :, :] nums1,
57
+ NUM[:, :, :, :] nums2,
58
+ NUM[:, :, :, :] out,
59
+ Py_ssize_t num_threads,
60
+ ) -> np.ndarray:
61
+ cdef Py_ssize_t dim1 = out.shape[0], dim2 = out.shape[1], dim3 = out.shape[2], dim4 = out.shape[3]
62
+ cdef Py_ssize_t i1, i2, i3, i4
63
+
64
+ for i1 in prange(dim1, nogil=True, num_threads=num_threads):
65
+ for i2 in prange(dim2):
66
+ for i3 in prange(dim3):
67
+ for i4 in prange(dim4):
68
+ out[i1, i2, i3, i4] = nums1[i1, i2, i3, i4] + nums2[i1, i2, i3, i4]
69
+
70
+ return np.asarray(out)
71
+
72
+
73
+ def _pointwise_add_value_3d(
74
+ NUM[:, :, :] nums,
75
+ NUM value,
76
+ NUM[:, :, :] out,
77
+ Py_ssize_t num_threads,
78
+ ) -> np.ndarray:
79
+ cdef Py_ssize_t rows = out.shape[0], cols = out.shape[1], dims = out.shape[2]
46
80
  cdef Py_ssize_t i, j, k
47
81
 
48
82
  for i in prange(rows, nogil=True, num_threads=num_threads):
49
83
  for j in prange(cols):
50
84
  for k in prange(dims):
51
- mul[i, j, k] = (
52
- contiguous_nums1[
53
- i * broadcast_mask1[0],
54
- j * broadcast_mask1[1],
55
- k * broadcast_mask1[2]
56
- ] *
57
- contiguous_nums2[
58
- i * broadcast_mask2[0],
59
- j * broadcast_mask2[1],
60
- k * broadcast_mask2[2]
61
- ]
62
- )
63
-
64
- return np.asarray(mul)
85
+ out[i, j, k] = nums[i, j, k] + value
86
+
87
+ return np.asarray(out)
88
+
89
+
90
+ def _pointwise_add_value_4d(
91
+ NUM[:, :, :, :] nums,
92
+ NUM value,
93
+ NUM[:, :, :, :] out,
94
+ Py_ssize_t num_threads,
95
+ ) -> np.ndarray:
96
+ cdef Py_ssize_t dim1 = out.shape[0], dim2 = out.shape[1], dim3 = out.shape[2], dim4 = out.shape[3]
97
+ cdef Py_ssize_t i1, i2, i3, i4
98
+
99
+ for i1 in prange(dim1, nogil=True, num_threads=num_threads):
100
+ for i2 in prange(dim2):
101
+ for i3 in prange(dim3):
102
+ for i4 in prange(dim4):
103
+ out[i1, i2, i3, i4] = nums[i1, i2, i3, i4] + value
104
+
105
+ return np.asarray(out)
106
+
107
+
108
+ def _pointwise_add_array_3d_fp16(
109
+ npy_half[:, :, :] nums1,
110
+ npy_half[:, :, :] nums2,
111
+ npy_half[:, :, :] out,
112
+ Py_ssize_t num_threads,
113
+ ) -> np.ndarray:
114
+ cdef Py_ssize_t rows = out.shape[0], cols = out.shape[1], dims = out.shape[2]
115
+ cdef Py_ssize_t i, j, k
116
+
117
+ for i in prange(rows, nogil=True, num_threads=num_threads):
118
+ for j in prange(cols):
119
+ for k in prange(dims):
120
+ out[i, j, k] = (npy_float_to_half(npy_half_to_float(nums1[i, j, k]) +
121
+ npy_half_to_float(nums2[i, j, k])))
122
+
123
+ return np.asarray(out)
124
+
125
+
126
+ def _pointwise_add_array_4d_fp16(
127
+ npy_half[:, :, :, :] nums1,
128
+ npy_half[:, :, :, :] nums2,
129
+ npy_half[:, :, :, :] out,
130
+ Py_ssize_t num_threads,
131
+ ) -> np.ndarray:
132
+ cdef Py_ssize_t dim1 = out.shape[0], dim2 = out.shape[1], dim3 = out.shape[2], dim4 = out.shape[3]
133
+ cdef Py_ssize_t i1, i2, i3, i4
134
+
135
+ for i1 in prange(dim1, nogil=True, num_threads=num_threads):
136
+ for i2 in prange(dim2):
137
+ for i3 in prange(dim3):
138
+ for i4 in prange(dim4):
139
+ out[i1, i2, i3, i4] = (npy_float_to_half(npy_half_to_float(nums1[i1, i2, i3, i4]) +
140
+ npy_half_to_float(nums2[i1, i2, i3, i4])))
141
+
142
+ return np.asarray(out)
143
+
144
+
145
+ def _pointwise_add_value_3d_fp16(
146
+ npy_half[:, :, :] nums,
147
+ npy_half value,
148
+ npy_half[:, :, :] out,
149
+ Py_ssize_t num_threads,
150
+ ) -> np.ndarray:
151
+ cdef Py_ssize_t rows = out.shape[0], cols = out.shape[1], dims = out.shape[2]
152
+ cdef Py_ssize_t i, j, k
153
+
154
+ for i in prange(rows, nogil=True, num_threads=num_threads):
155
+ for j in prange(cols):
156
+ for k in prange(dims):
157
+ out[i, j, k] = npy_float_to_half(npy_half_to_float(nums[i, j, k]) + npy_half_to_float(value))
158
+
159
+ return np.asarray(out)
160
+
161
+
162
+ def _pointwise_add_value_4d_fp16(
163
+ npy_half[:, :, :, :] nums,
164
+ npy_half value,
165
+ npy_half[:, :, :, :] out,
166
+ Py_ssize_t num_threads,
167
+ ) -> np.ndarray:
168
+ cdef Py_ssize_t dim1 = out.shape[0], dim2 = out.shape[1], dim3 = out.shape[2], dim4 = out.shape[3]
169
+ cdef Py_ssize_t i1, i2, i3, i4
170
+
171
+ for i1 in prange(dim1, nogil=True, num_threads=num_threads):
172
+ for i2 in prange(dim2):
173
+ for i3 in prange(dim3):
174
+ for i4 in prange(dim4):
175
+ out[i1, i2, i3, i4] = (npy_float_to_half(npy_half_to_float(nums[i1, i2, i3, i4]) +
176
+ npy_half_to_float(value)))
177
+
178
+ return np.asarray(out)
179
+
180
+
181
+ def _fill_3d(NUM_AND_NPY_HALF[:, :, :] nums, NUM_AND_NPY_HALF value, Py_ssize_t num_threads) -> None:
182
+ cdef Py_ssize_t rows = nums.shape[0], cols = nums.shape[1], dims = nums.shape[2]
183
+ cdef Py_ssize_t i, j, k
184
+
185
+ for i in prange(rows, nogil=True, num_threads=num_threads):
186
+ for j in prange(cols):
187
+ for k in prange(dims):
188
+ nums[i, j, k] = value
189
+
190
+
191
+ def _fill_4d(NUM_AND_NPY_HALF[:, :, :, :] nums, NUM_AND_NPY_HALF value, Py_ssize_t num_threads) -> None:
192
+ cdef Py_ssize_t dim1 = nums.shape[0], dim2 = nums.shape[1], dim3 = nums.shape[2], dim4 = nums.shape[3]
193
+ cdef Py_ssize_t i1, i2, i3, i4
194
+
195
+ for i1 in prange(dim1, nogil=True, num_threads=num_threads):
196
+ for i2 in prange(dim2):
197
+ for i3 in prange(dim3):
198
+ for i4 in prange(dim4):
199
+ nums[i1, i2, i3, i4] = value
200
+
201
+
202
+ # FIXME: somehow `const NUM_AND_NPY_HALF` is not working
203
+ cpdef void _copy_3d(const NUM[:, :, :] nums1, NUM[:, :, :] nums2, Py_ssize_t num_threads):
204
+ cdef Py_ssize_t rows = nums1.shape[0], cols = nums1.shape[1], dims = nums1.shape[2]
205
+ cdef Py_ssize_t i, j, k
206
+
207
+ for i in prange(rows, nogil=True, num_threads=num_threads):
208
+ for j in prange(cols):
209
+ for k in prange(dims):
210
+ nums2[i, j, k] = nums1[i, j, k]
211
+
212
+
213
+ cpdef void _copy_4d(const NUM[:, :, :, :] nums1, NUM[:, :, :, :] nums2, Py_ssize_t num_threads):
214
+ cdef Py_ssize_t dim1 = nums1.shape[0], dim2 = nums1.shape[1], dim3 = nums1.shape[2], dim4 = nums1.shape[3]
215
+ cdef Py_ssize_t i1, i2, i3, i4
216
+
217
+ for i1 in prange(dim1, nogil=True, num_threads=num_threads):
218
+ for i2 in prange(dim2):
219
+ for i3 in prange(dim3):
220
+ for i4 in prange(dim4):
221
+ nums2[i1, i2, i3, i4] = nums1[i1, i2, i3, i4]
222
+
223
+
224
+ cpdef void _copy_3d_fp16(const npy_half[:, :, :] nums1, npy_half[:, :, :] nums2, Py_ssize_t num_threads):
225
+ cdef Py_ssize_t rows = nums1.shape[0], cols = nums1.shape[1], dims = nums1.shape[2]
226
+ cdef Py_ssize_t i, j, k
227
+
228
+ for i in prange(rows, nogil=True, num_threads=num_threads):
229
+ for j in prange(cols):
230
+ for k in prange(dims):
231
+ nums2[i, j, k] = nums1[i, j, k]
232
+
233
+
234
+ cpdef void _copy_4d_fp16(const npy_half[:, :, :, :] nums1, npy_half[:, :, :, :] nums2, Py_ssize_t num_threads):
235
+ cdef Py_ssize_t dim1 = nums1.shape[0], dim2 = nums1.shape[1], dim3 = nums1.shape[2], dim4 = nums1.shape[3]
236
+ cdef Py_ssize_t i1, i2, i3, i4
237
+
238
+ for i1 in prange(dim1, nogil=True, num_threads=num_threads):
239
+ for i2 in prange(dim2):
240
+ for i3 in prange(dim3):
241
+ for i4 in prange(dim4):
242
+ nums2[i1, i2, i3, i4] = nums1[i1, i2, i3, i4]
Binary file
Binary file
imops/src/_zoom.pyx CHANGED
@@ -17,6 +17,7 @@ from libc.math cimport floor, sqrt
17
17
 
18
18
  ctypedef cython.floating FLOAT
19
19
  ctypedef fused NUM:
20
+ np.uint8_t
20
21
  short
21
22
  int
22
23
  long long
imops/utils.py CHANGED
@@ -1,39 +1,92 @@
1
1
  import os
2
+ from contextlib import contextmanager
2
3
  from itertools import permutations
3
- from typing import Callable, Optional, Sequence, Union
4
+ from typing import Callable, Optional, Sequence, Tuple, Union
4
5
  from warnings import warn
5
6
 
6
7
  import numpy as np
7
8
 
8
- from .backend import BACKEND2NUM_THREADS_VAR_NAME, SINGLE_THREADED_BACKENDS, Backend
9
+ from .backend import BACKEND_NAME2ENV_NUM_THREADS_VAR_NAME, SINGLE_THREADED_BACKENDS, Backend
9
10
 
10
11
 
11
12
  AxesLike = Union[int, Sequence[int]]
12
13
  AxesParams = Union[float, Sequence[float]]
13
14
 
14
15
  ZOOM_SRC_DIM = 4
16
+ # TODO: define imops-specific environment variable like `OMP_NUM_THREADS`?
17
+ IMOPS_NUM_THREADS = None
15
18
 
16
19
 
17
- def normalize_num_threads(num_threads: int, backend: Backend):
20
+ def set_num_threads(num_threads: int) -> int:
21
+ assert isinstance(num_threads, int) or num_threads is None, 'Number of threads must be int value or None.'
22
+ global IMOPS_NUM_THREADS
23
+ current = IMOPS_NUM_THREADS
24
+ IMOPS_NUM_THREADS = num_threads
25
+ return current
26
+
27
+
28
+ @contextmanager
29
+ def imops_num_threads(num_threads: int):
30
+ previous = set_num_threads(num_threads)
31
+ try:
32
+ yield
33
+ finally:
34
+ set_num_threads(previous)
35
+
36
+
37
+ def normalize_num_threads(num_threads: int, backend: Backend, warn_stacklevel: int = 1) -> int:
38
+ """Calculate the effective number of threads"""
39
+
40
+ global IMOPS_NUM_THREADS
18
41
  if backend.name in SINGLE_THREADED_BACKENDS:
19
42
  if num_threads != -1:
20
- warn(f'"{backend.name}" backend is single-threaded. Setting `num_threads` has no effect.')
43
+ warn(
44
+ f'"{backend.name}" backend is single-threaded. Setting `num_threads` has no effect.',
45
+ stacklevel=warn_stacklevel,
46
+ )
21
47
  return 1
48
+
49
+ env_num_threads_var_name = BACKEND_NAME2ENV_NUM_THREADS_VAR_NAME[backend.name]
50
+ # here we also handle the case `env_num_threads_var_name`=" " gracefully
51
+ env_num_threads = os.environ.get(env_num_threads_var_name, '').strip()
52
+ env_num_threads = int(env_num_threads) if env_num_threads else None
53
+ # TODO: maybe let user set the absolute maximum number of threads?
54
+ num_available_cpus = len(os.sched_getaffinity(0))
55
+
56
+ max_num_threads = min(filter(bool, [IMOPS_NUM_THREADS, env_num_threads, num_available_cpus]))
57
+
22
58
  if num_threads >= 0:
23
59
  # FIXME
24
60
  if backend.name == 'Numba':
25
61
  warn(
26
62
  'Setting `num_threads` has no effect with "Numba" backend. '
27
- 'Use `NUMBA_NUM_THREADS` environment variable.'
63
+ 'Use `NUMBA_NUM_THREADS` environment variable.',
64
+ stacklevel=warn_stacklevel,
28
65
  )
29
- return num_threads
30
-
31
- num_threads_var_name = BACKEND2NUM_THREADS_VAR_NAME[backend.name]
32
- # here we also handle the case `num_threads_var`=" " gracefully
33
- env_num_threads = os.environ.get(num_threads_var_name, '').strip()
34
- max_threads = int(env_num_threads) if env_num_threads else len(os.sched_getaffinity(0))
35
-
36
- return max_threads + num_threads + 1
66
+ return num_threads
67
+
68
+ if num_threads > max_num_threads:
69
+ if max_num_threads == IMOPS_NUM_THREADS:
70
+ warn(
71
+ f'Required number of threads ({num_threads}) is greater than `IMOPS_NUM_THREADS` '
72
+ f'({IMOPS_NUM_THREADS}). Using {IMOPS_NUM_THREADS} threads.',
73
+ stacklevel=warn_stacklevel,
74
+ )
75
+ elif max_num_threads == env_num_threads:
76
+ warn(
77
+ f'Required number of threads ({num_threads}) is greater than `{env_num_threads_var_name}` '
78
+ f'({env_num_threads}). Using {env_num_threads} threads.',
79
+ stacklevel=warn_stacklevel,
80
+ )
81
+ else:
82
+ warn(
83
+ f'Required number of threads ({num_threads}) is greater than number of available CPU-s '
84
+ f'({num_available_cpus}). Using {num_available_cpus} threads.',
85
+ stacklevel=warn_stacklevel,
86
+ )
87
+ return min(num_threads, max_num_threads)
88
+
89
+ return max_num_threads + num_threads + 1
37
90
 
38
91
 
39
92
  def get_c_contiguous_permutaion(array: np.ndarray) -> Optional[np.ndarray]:
@@ -104,3 +157,50 @@ def composition_args(f: Callable, g: Callable) -> Callable:
104
157
  return f(g(*args), *args[1:])
105
158
 
106
159
  return inner
160
+
161
+
162
+ def morphology_composition_args(f, g) -> Callable:
163
+ def wrapper(
164
+ image: np.ndarray,
165
+ footprint: np.ndarray,
166
+ output: np.ndarray,
167
+ num_threads: int,
168
+ ):
169
+ temp = np.empty_like(image, dtype=bool)
170
+ temp = g(image, footprint, temp, num_threads)
171
+
172
+ return f(temp, footprint, output, num_threads)
173
+
174
+ return wrapper
175
+
176
+
177
+ def build_slices(start: Sequence[int], stop: Sequence[int] = None, step: Sequence[int] = None) -> Tuple[slice, ...]:
178
+ """
179
+ Returns a tuple of slices built from `start` and `stop` with `step`.
180
+
181
+ Examples
182
+ --------
183
+ >>> build_slices([1, 2, 3], [4, 5, 6])
184
+ (slice(1, 4), slice(2, 5), slice(3, 6))
185
+ >>> build_slices([10, 11])
186
+ (slice(10), slice(11))
187
+ """
188
+
189
+ check_len(*filter(lambda x: x is not None, [start, stop, step]))
190
+
191
+ if stop is None and step is None:
192
+ return tuple(map(slice, start))
193
+
194
+ args = [
195
+ start,
196
+ stop if stop is not None else [None for _ in start],
197
+ step if step is not None else [None for _ in start],
198
+ ]
199
+
200
+ return tuple(map(slice, *args))
201
+
202
+
203
+ def check_len(*args) -> None:
204
+ lengths = list(map(len, args))
205
+ if any(length != lengths[0] for length in lengths):
206
+ raise ValueError(f'Arguments of equal length are required: {", ".join(map(str, lengths))}')
imops/zoom.py CHANGED
@@ -81,7 +81,7 @@ def zoom(
81
81
  """
82
82
  Rescale `x` according to `scale_factor` along the `axis`.
83
83
 
84
- Uses a fast parallelizable implementation for fp32 / fp64 (and int16-32-64 if order == 0) inputs,
84
+ Uses a fast parallelizable implementation for fp32 / fp64 (and bool-int16-32-64 if order == 0) inputs,
85
85
  ndim <= 4 and order = 0 or 1.
86
86
 
87
87
  Parameters
@@ -120,6 +120,7 @@ def zoom(
120
120
  if callable(fill_value):
121
121
  fill_value = fill_value(x)
122
122
 
123
+ # TODO: does `fill_value/cval` change anythng?
123
124
  return _zoom(x, scale_factor, order=order, cval=fill_value, num_threads=num_threads, backend=backend)
124
125
 
125
126
 
@@ -135,7 +136,7 @@ def zoom_to_shape(
135
136
  """
136
137
  Rescale `x` to match `shape` along the `axis`.
137
138
 
138
- Uses a fast parallelizable implementation for fp32 / fp64 (and int16-32-64 if order == 0) inputs,
139
+ Uses a fast parallelizable implementation for fp32 / fp64 (and bool-int16-32-64 if order == 0) inputs,
139
140
  ndim <= 4 and order = 0 or 1.
140
141
 
141
142
  Parameters
@@ -197,7 +198,7 @@ def _zoom(
197
198
  backend: BackendLike = None,
198
199
  ) -> np.ndarray:
199
200
  """
200
- Faster parallelizable version of `scipy.ndimage.zoom` for fp32 / fp64 (and int16-32-64 if order == 0) inputs.
201
+ Faster parallelizable version of `scipy.ndimage.zoom` for fp32 / fp64 (and bool-int16-32-64 if order == 0) inputs.
201
202
 
202
203
  Works faster only for ndim <= 4. Shares interface with `scipy.ndimage.zoom`
203
204
  except for
@@ -207,7 +208,7 @@ def _zoom(
207
208
 
208
209
  See `https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.zoom.html`
209
210
  """
210
- backend = resolve_backend(backend)
211
+ backend = resolve_backend(backend, warn_stacklevel=4)
211
212
  if backend.name not in ('Scipy', 'Numba', 'Cython'):
212
213
  raise ValueError(f'Unsupported backend "{backend.name}".')
213
214
 
@@ -215,7 +216,7 @@ def _zoom(
215
216
  dtype = input.dtype
216
217
  cval = np.dtype(dtype).type(cval)
217
218
  zoom = fill_by_indices(np.ones(input.ndim, 'float64'), zoom, range(input.ndim))
218
- num_threads = normalize_num_threads(num_threads, backend)
219
+ num_threads = normalize_num_threads(num_threads, backend, warn_stacklevel=4)
219
220
 
220
221
  if backend.name == 'Scipy':
221
222
  return scipy_zoom(
@@ -227,7 +228,7 @@ def _zoom(
227
228
  or (
228
229
  dtype not in (np.float32, np.float64)
229
230
  if order == 1
230
- else dtype not in (np.float32, np.float64, np.int16, np.int32, np.int64)
231
+ else dtype not in (bool, np.float32, np.float64, np.int16, np.int32, np.int64)
231
232
  )
232
233
  or ndim > 4
233
234
  or output is not None
@@ -235,10 +236,9 @@ def _zoom(
235
236
  or grid_mode
236
237
  ):
237
238
  warn(
238
- 'Fast zoom is only supported for ndim<=4, dtype=fp32 or fp64 (and int16-32-64 if order == 0), output=None, '
239
- "order=0 or 1, mode='constant', grid_mode=False. Falling back to scipy's implementation.",
239
+ 'Fast zoom is only supported for ndim<=4, dtype=fp32 or fp64 (and bool-int16-32-64 if order == 0), '
240
+ "output=None, order=0 or 1, mode='constant', grid_mode=False. Falling back to scipy's implementation.",
240
241
  )
241
-
242
242
  return scipy_zoom(
243
243
  input, zoom, output=output, order=order, mode=mode, cval=cval, prefilter=prefilter, grid_mode=grid_mode
244
244
  )
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: imops
3
- Version: 0.8.1
3
+ Version: 0.8.3
4
4
  Summary: Efficient parallelizable algorithms for multidimensional arrays to speed up your data pipelines
5
5
  Home-page: https://github.com/neuro-ml/imops
6
6
  Author: maxme1, vovaf709, talgat
7
7
  Author-email: maxs987@gmail.com, vovaf709@yandex.ru, saparov2130@gmail.com
8
8
  License: MIT
9
- Download-URL: https://github.com/neuro-ml/imops/archive/v0.8.1.tar.gz
9
+ Download-URL: https://github.com/neuro-ml/imops/archive/v0.8.3.tar.gz
10
10
  Keywords: image processing,fast,ndarray,data pipelines
11
11
  Platform: UNKNOWN
12
12
  Classifier: Development Status :: 5 - Production/Stable
@@ -21,6 +21,7 @@ Requires-Python: >=3.6
21
21
  Description-Content-Type: text/markdown
22
22
  License-File: LICENSE
23
23
  Requires-Dist: scipy (<2.0.0,>=1.0)
24
+ Requires-Dist: scikit-image
24
25
  Requires-Dist: connected-components-3d
25
26
  Requires-Dist: fastremap
26
27
  Requires-Dist: dataclasses ; python_version < "3.7"
@@ -38,7 +39,9 @@ Requires-Dist: numba ; extra == 'numba'
38
39
 
39
40
  # Imops
40
41
 
41
- Efficient parallelizable algorithms for multidimensional arrays to speed up your data pipelines. Docs are [here](https://neuro-ml.github.io/imops/).
42
+ Efficient parallelizable algorithms for multidimensional arrays to speed up your data pipelines.
43
+ - [Documentation](https://neuro-ml.github.io/imops/)
44
+ - [Benchmarks](https://neuro-ml.github.io/imops/benchmarks/)
42
45
 
43
46
  # Install
44
47
 
@@ -47,15 +50,33 @@ pip install imops # default install with Cython backend
47
50
  pip install imops[numba] # additionally install Numba backend
48
51
  ```
49
52
 
53
+ # How fast is it?
54
+
55
+ Time comparisons (ms) for Intel(R) Xeon(R) Silver 4114 CPU @ 2.20GHz using 8 threads. All inputs are C-contiguous NumPy arrays. For morphology functions `bool` dtype is used and `float64` for all others.
56
+ | function / backend | Scipy() | Cython(fast=False) | Cython(fast=True) | Numba() |
57
+ |:----------------------:|:-----------:|:----------------------:|:---------------------:|:-----------:|
58
+ | `zoom(..., order=0)` | 2072 | 1114 | **867** | 3590 |
59
+ | `zoom(..., order=1)` | 6527 | 596 | **575** | 3757 |
60
+ | `interp1d` | 780 | 149 | **146** | 420 |
61
+ | `radon` | 59711 | 5982 | **4837** | - |
62
+ | `inverse_radon` | 52928 | 8254 | **6535** | - |
63
+ | `binary_dilation` | 2207 | 310 | **298** | - |
64
+ | `binary_erosion` | 2296 | 326 | **304** | - |
65
+ | `binary_closing` | 4158 | 544 | **469** | - |
66
+ | `binary_opening` | 4410 | 567 | **522** | - |
67
+ | `center_of_mass` | 2237 | **64** | **64** | - |
68
+
69
+ We use [`airspeed velocity`](https://asv.readthedocs.io/en/stable/) to benchmark our code. For detailed results visit [benchmark page](https://neuro-ml.github.io/imops/benchmarks/).
70
+
50
71
  # Features
51
72
 
52
- ## Fast Radon transform
73
+ ### Fast Radon transform
53
74
 
54
75
  ```python
55
76
  from imops import radon, inverse_radon
56
77
  ```
57
78
 
58
- ## Fast linear/bilinear/trilinear zoom
79
+ ### Fast 0/1-order zoom
59
80
 
60
81
  ```python
61
82
  from imops import zoom, zoom_to_shape
@@ -66,20 +87,20 @@ y = zoom(x, 2, axis=[0, 1])
66
87
  # without the need to compute the scale factor
67
88
  z = zoom_to_shape(x, (4, 120, 67))
68
89
  ```
69
- Works faster only for `ndim<=3, dtype=float32 or float64, output=None, order=1, mode='constant', grid_mode=False`
70
- ## Fast 1d linear interpolation
90
+ Works faster only for `ndim<=4, dtype=float32 or float64 (and bool-int16-32-64 if order == 0), output=None, order=0 or 1, mode='constant', grid_mode=False`
91
+ ### Fast 1d linear interpolation
71
92
 
72
93
  ```python
73
94
  from imops import interp1d # same as `scipy.interpolate.interp1d`
74
95
  ```
75
- Works faster only for `ndim<=3, dtype=float32 or float64, order=1 or 'linear'`
76
- ## Fast binary morphology
96
+ Works faster only for `ndim<=3, dtype=float32 or float64, order=1`
97
+ ### Fast binary morphology
77
98
 
78
99
  ```python
79
100
  from imops import binary_dilation, binary_erosion, binary_opening, binary_closing
80
101
  ```
81
102
  These functions mimic `scikit-image` counterparts
82
- ## Padding
103
+ ### Padding
83
104
 
84
105
  ```python
85
106
  from imops import pad, pad_to_shape
@@ -92,7 +113,7 @@ y = pad(x, 10, axis=[0, 1])
92
113
  z = pad_to_shape(x, (4, 120, 67), ratio=0.25)
93
114
  ```
94
115
 
95
- ## Cropping
116
+ ### Cropping
96
117
 
97
118
  ```python
98
119
  from imops import crop_to_shape
@@ -104,7 +125,7 @@ from imops import crop_to_shape
104
125
  z = crop_to_shape(x, (4, 120, 67), ratio=0.25)
105
126
  ```
106
127
 
107
- ## Labeling
128
+ ### Labeling
108
129
 
109
130
  ```python
110
131
  from imops import label
@@ -114,7 +135,7 @@ labeled, num_components = label(x, background=1, return_num=True)
114
135
  ```
115
136
 
116
137
  # Backends
117
- For `zoom`, `zoom_to_shape`, `interp1d`, `radon`, `inverse_radon` you can specify which backend to use. Backend can be specified by a string or by an instance of `Backend` class. The latter allows you to customize some backend options:
138
+ For all heavy image routines except `label` you can specify which backend to use. Backend can be specified by a string or by an instance of `Backend` class. The latter allows you to customize some backend options:
118
139
  ```python
119
140
  from imops import Cython, Numba, Scipy, zoom
120
141
 
@@ -135,10 +156,9 @@ with imops_backend('Cython'): # sets Cython backend via context manager
135
156
  ```
136
157
  Note that for `Numba` backend setting `num_threads` argument has no effect for now and you should use `NUMBA_NUM_THREADS` environment variable.
137
158
  Available backends:
138
- | | Scipy | Cython | Numba |
139
- |-------------------|---------|---------|---------|
159
+ | function / backend | Scipy | Cython | Numba |
160
+ |:-------------------:|:---------:|:---------:|:---------:|
140
161
  | `zoom` | &check; | &check; | &check; |
141
- | `zoom_to_shape` | &check; | &check; | &check; |
142
162
  | `interp1d` | &check; | &check; | &check; |
143
163
  | `radon` | &cross; | &check; | &cross; |
144
164
  | `inverse_radon` | &cross; | &check; | &cross; |
@@ -146,6 +166,7 @@ Available backends:
146
166
  | `binary_erosion` | &check; | &check; | &cross; |
147
167
  | `binary_closing` | &check; | &check; | &cross; |
148
168
  | `binary_opening` | &check; | &check; | &cross; |
169
+ | `center_of_mass` | &check; | &check; | &cross; |
149
170
 
150
171
  # Acknowledgements
151
172