fastremap 1.16.0__cp39-cp39-win32.whl → 1.17.0__cp39-cp39-win32.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fastremap/__init__.py +47 -0
- fastremap/fastremap.cp39-win32.pyd +0 -0
- fastremap/fastremap.pxd +100 -0
- fastremap/fastremap.pyi +493 -0
- fastremap/fastremap.pyx +1630 -0
- fastremap/ipt.hpp +354 -0
- fastremap/py.typed +0 -0
- fastremap/ska_flat_hash_map.hpp +1572 -0
- {fastremap-1.16.0.dist-info → fastremap-1.17.0.dist-info}/METADATA +4 -4
- fastremap-1.17.0.dist-info/RECORD +15 -0
- {fastremap-1.16.0.dist-info → fastremap-1.17.0.dist-info}/WHEEL +1 -1
- fastremap-1.17.0.dist-info/pbr.json +1 -0
- fastremap-1.16.0.dist-info/RECORD +0 -8
- fastremap-1.16.0.dist-info/pbr.json +0 -1
- fastremap.cp39-win32.pyd +0 -0
- {fastremap-1.16.0.dist-info → fastremap-1.17.0.dist-info}/licenses/AUTHORS +0 -0
- {fastremap-1.16.0.dist-info → fastremap-1.17.0.dist-info}/licenses/LICENSE +0 -0
- {fastremap-1.16.0.dist-info → fastremap-1.17.0.dist-info}/top_level.txt +0 -0
fastremap/__init__.py
ADDED
@@ -0,0 +1,47 @@
|
|
1
|
+
from .fastremap import (
|
2
|
+
ascontiguousarray,
|
3
|
+
asfortranarray,
|
4
|
+
component_map,
|
5
|
+
foreground,
|
6
|
+
indices,
|
7
|
+
inverse_component_map,
|
8
|
+
mask,
|
9
|
+
mask_except,
|
10
|
+
minmax,
|
11
|
+
narrow_dtype,
|
12
|
+
pixel_pairs,
|
13
|
+
point_cloud,
|
14
|
+
refit,
|
15
|
+
remap,
|
16
|
+
remap_from_array,
|
17
|
+
remap_from_array_kv,
|
18
|
+
renumber,
|
19
|
+
tobytes,
|
20
|
+
transpose,
|
21
|
+
unique,
|
22
|
+
widen_dtype,
|
23
|
+
)
|
24
|
+
|
25
|
+
__all__ = [
|
26
|
+
"ascontiguousarray",
|
27
|
+
"asfortranarray",
|
28
|
+
"component_map",
|
29
|
+
"foreground",
|
30
|
+
"indices",
|
31
|
+
"inverse_component_map",
|
32
|
+
"mask",
|
33
|
+
"mask_except",
|
34
|
+
"minmax",
|
35
|
+
"narrow_dtype",
|
36
|
+
"pixel_pairs",
|
37
|
+
"point_cloud",
|
38
|
+
"refit",
|
39
|
+
"remap",
|
40
|
+
"remap_from_array",
|
41
|
+
"remap_from_array_kv",
|
42
|
+
"renumber",
|
43
|
+
"tobytes",
|
44
|
+
"transpose",
|
45
|
+
"unique",
|
46
|
+
"widen_dtype",
|
47
|
+
]
|
Binary file
|
fastremap/fastremap.pxd
ADDED
@@ -0,0 +1,100 @@
|
|
1
|
+
# cython: language_level=3
|
2
|
+
from libcpp.utility cimport pair
|
3
|
+
|
4
|
+
cdef extern from "ska_flat_hash_map.hpp" namespace "ska" nogil:
|
5
|
+
cdef cppclass flat_hash_map[T, U, HASH=*, PRED=*, ALLOCATOR=*]:
|
6
|
+
ctypedef T key_type
|
7
|
+
ctypedef U mapped_type
|
8
|
+
ctypedef pair[const T, U] value_type
|
9
|
+
ctypedef ALLOCATOR allocator_type
|
10
|
+
|
11
|
+
# these should really be allocator_type.size_type and
|
12
|
+
# allocator_type.difference_type to be true to the C++ definition
|
13
|
+
# but cython doesn't support deferred access on template arguments
|
14
|
+
ctypedef size_t size_type
|
15
|
+
ctypedef ptrdiff_t difference_type
|
16
|
+
|
17
|
+
cppclass iterator
|
18
|
+
cppclass iterator:
|
19
|
+
iterator() except +
|
20
|
+
iterator(iterator&) except +
|
21
|
+
# correct would be value_type& but this does not work
|
22
|
+
# well with cython's code gen
|
23
|
+
pair[T, U]& operator*()
|
24
|
+
iterator operator++()
|
25
|
+
iterator operator--()
|
26
|
+
iterator operator++(int)
|
27
|
+
iterator operator--(int)
|
28
|
+
bint operator==(iterator)
|
29
|
+
bint operator==(const_iterator)
|
30
|
+
bint operator!=(iterator)
|
31
|
+
bint operator!=(const_iterator)
|
32
|
+
cppclass const_iterator:
|
33
|
+
const_iterator() except +
|
34
|
+
const_iterator(iterator&) except +
|
35
|
+
operator=(iterator&) except +
|
36
|
+
# correct would be const value_type& but this does not work
|
37
|
+
# well with cython's code gen
|
38
|
+
const pair[T, U]& operator*()
|
39
|
+
const_iterator operator++()
|
40
|
+
const_iterator operator--()
|
41
|
+
const_iterator operator++(int)
|
42
|
+
const_iterator operator--(int)
|
43
|
+
bint operator==(iterator)
|
44
|
+
bint operator==(const_iterator)
|
45
|
+
bint operator!=(iterator)
|
46
|
+
bint operator!=(const_iterator)
|
47
|
+
|
48
|
+
flat_hash_map() except +
|
49
|
+
flat_hash_map(flat_hash_map&) except +
|
50
|
+
#flat_hash_map(key_compare&)
|
51
|
+
U& operator[](const T&)
|
52
|
+
#flat_hash_map& operator=(flat_hash_map&)
|
53
|
+
bint operator==(flat_hash_map&, flat_hash_map&)
|
54
|
+
bint operator!=(flat_hash_map&, flat_hash_map&)
|
55
|
+
bint operator<(flat_hash_map&, flat_hash_map&)
|
56
|
+
bint operator>(flat_hash_map&, flat_hash_map&)
|
57
|
+
bint operator<=(flat_hash_map&, flat_hash_map&)
|
58
|
+
bint operator>=(flat_hash_map&, flat_hash_map&)
|
59
|
+
U& at(const T&) except +
|
60
|
+
const U& const_at "at"(const T&) except +
|
61
|
+
iterator begin()
|
62
|
+
const_iterator const_begin "begin"()
|
63
|
+
const_iterator cbegin()
|
64
|
+
void clear()
|
65
|
+
size_t count(const T&)
|
66
|
+
bint empty()
|
67
|
+
iterator end()
|
68
|
+
const_iterator const_end "end"()
|
69
|
+
const_iterator cend()
|
70
|
+
pair[iterator, iterator] equal_range(const T&)
|
71
|
+
pair[const_iterator, const_iterator] const_equal_range "equal_range"(const T&)
|
72
|
+
iterator erase(iterator)
|
73
|
+
iterator const_erase "erase"(const_iterator)
|
74
|
+
iterator erase(const_iterator, const_iterator)
|
75
|
+
size_t erase(const T&)
|
76
|
+
iterator find(const T&)
|
77
|
+
const_iterator const_find "find"(const T&)
|
78
|
+
pair[iterator, bint] insert(const pair[T, U]&) except +
|
79
|
+
iterator insert(const_iterator, const pair[T, U]&) except +
|
80
|
+
void insert[InputIt](InputIt, InputIt) except +
|
81
|
+
#key_compare key_comp()
|
82
|
+
iterator lower_bound(const T&)
|
83
|
+
const_iterator const_lower_bound "lower_bound"(const T&)
|
84
|
+
size_t max_size()
|
85
|
+
size_t size()
|
86
|
+
void swap(flat_hash_map&)
|
87
|
+
iterator upper_bound(const T&)
|
88
|
+
const_iterator const_upper_bound "upper_bound"(const T&)
|
89
|
+
#value_compare value_comp()
|
90
|
+
void max_load_factor(float)
|
91
|
+
float max_load_factor()
|
92
|
+
float load_factor()
|
93
|
+
void rehash(size_t)
|
94
|
+
void reserve(size_t)
|
95
|
+
size_t bucket_count()
|
96
|
+
size_t max_bucket_count()
|
97
|
+
size_t bucket_size(size_t)
|
98
|
+
size_t bucket(const T&)
|
99
|
+
|
100
|
+
|
fastremap/fastremap.pyi
ADDED
@@ -0,0 +1,493 @@
|
|
1
|
+
from typing import Any, Literal, Union, overload
|
2
|
+
|
3
|
+
import numpy as np
|
4
|
+
from numpy.typing import ArrayLike, DTypeLike, NDArray
|
5
|
+
|
6
|
+
@overload
|
7
|
+
def unique(
|
8
|
+
labels: ArrayLike,
|
9
|
+
return_index: Literal[False] = False,
|
10
|
+
return_inverse: Literal[False] = False,
|
11
|
+
return_counts: Literal[False] = False,
|
12
|
+
axis: Union[int, None] = None,
|
13
|
+
) -> NDArray[Any]: ...
|
14
|
+
@overload
|
15
|
+
def unique(
|
16
|
+
labels: ArrayLike,
|
17
|
+
return_index: Literal[True],
|
18
|
+
return_inverse: Literal[False] = False,
|
19
|
+
return_counts: Literal[False] = False,
|
20
|
+
axis: Union[int, None] = None,
|
21
|
+
) -> tuple[NDArray[Any], NDArray[Any]]: ...
|
22
|
+
@overload
|
23
|
+
def unique(
|
24
|
+
labels: ArrayLike,
|
25
|
+
return_index: Literal[False],
|
26
|
+
return_inverse: Literal[True],
|
27
|
+
return_counts: Literal[False] = False,
|
28
|
+
axis: Union[int, None] = None,
|
29
|
+
) -> tuple[NDArray[Any], NDArray[Any]]: ...
|
30
|
+
@overload
|
31
|
+
def unique(
|
32
|
+
labels: ArrayLike,
|
33
|
+
return_index: Literal[False] = False,
|
34
|
+
*,
|
35
|
+
return_inverse: Literal[True],
|
36
|
+
return_counts: Literal[False] = False,
|
37
|
+
axis: Union[int, None] = None,
|
38
|
+
) -> tuple[NDArray[Any], NDArray[Any]]: ...
|
39
|
+
@overload
|
40
|
+
def unique(
|
41
|
+
labels: ArrayLike,
|
42
|
+
return_index: Literal[False],
|
43
|
+
return_inverse: Literal[False],
|
44
|
+
return_counts: Literal[True],
|
45
|
+
axis: Union[int, None] = None,
|
46
|
+
) -> tuple[NDArray[Any], NDArray[Any]]: ...
|
47
|
+
@overload
|
48
|
+
def unique(
|
49
|
+
labels: ArrayLike,
|
50
|
+
return_index: Literal[False] = False,
|
51
|
+
return_inverse: Literal[False] = False,
|
52
|
+
*,
|
53
|
+
return_counts: Literal[True],
|
54
|
+
axis: Union[int, None] = None,
|
55
|
+
) -> tuple[NDArray[Any], NDArray[Any]]: ...
|
56
|
+
@overload
|
57
|
+
def unique(
|
58
|
+
labels: ArrayLike,
|
59
|
+
return_index: Literal[True],
|
60
|
+
return_inverse: Literal[True],
|
61
|
+
return_counts: Literal[False] = False,
|
62
|
+
axis: Union[int, None] = None,
|
63
|
+
) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ...
|
64
|
+
@overload
|
65
|
+
def unique(
|
66
|
+
labels: ArrayLike,
|
67
|
+
return_index: Literal[True],
|
68
|
+
return_inverse: Literal[False],
|
69
|
+
return_counts: Literal[True],
|
70
|
+
axis: Union[int, None] = None,
|
71
|
+
) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ...
|
72
|
+
@overload
|
73
|
+
def unique(
|
74
|
+
labels: ArrayLike,
|
75
|
+
return_index: Literal[True],
|
76
|
+
return_inverse: Literal[False] = False,
|
77
|
+
*,
|
78
|
+
return_counts: Literal[True],
|
79
|
+
axis: Union[int, None] = None,
|
80
|
+
) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ...
|
81
|
+
@overload
|
82
|
+
def unique(
|
83
|
+
labels: ArrayLike,
|
84
|
+
return_index: Literal[False],
|
85
|
+
return_inverse: Literal[True],
|
86
|
+
return_counts: Literal[True],
|
87
|
+
axis: Union[int, None] = None,
|
88
|
+
) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ...
|
89
|
+
@overload
|
90
|
+
def unique(
|
91
|
+
labels: ArrayLike,
|
92
|
+
return_index: Literal[False] = False,
|
93
|
+
*,
|
94
|
+
return_inverse: Literal[True],
|
95
|
+
return_counts: Literal[True],
|
96
|
+
axis: Union[int, None] = None,
|
97
|
+
) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ...
|
98
|
+
@overload
|
99
|
+
def unique(
|
100
|
+
labels: ArrayLike,
|
101
|
+
return_index: Literal[True],
|
102
|
+
return_inverse: Literal[True],
|
103
|
+
return_counts: Literal[True],
|
104
|
+
axis: Union[int, None] = None,
|
105
|
+
) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any], NDArray[Any]]: ...
|
106
|
+
def unique( # type: ignore[misc]
|
107
|
+
labels: ArrayLike,
|
108
|
+
return_index: bool = False,
|
109
|
+
return_inverse: bool = False,
|
110
|
+
return_counts: bool = False,
|
111
|
+
axis: Union[int, None] = None,
|
112
|
+
) -> Union[
|
113
|
+
NDArray[Any],
|
114
|
+
tuple[NDArray[Any], NDArray[Any]],
|
115
|
+
tuple[NDArray[Any], NDArray[Any], NDArray[Any]],
|
116
|
+
tuple[NDArray[Any], NDArray[Any], NDArray[Any], NDArray[Any]],
|
117
|
+
]:
|
118
|
+
"""Compute the sorted set of unique labels in the input array.
|
119
|
+
|
120
|
+
Args:
|
121
|
+
labels: The input array containing labels.
|
122
|
+
return_index: If True, also return the index of the first detected
|
123
|
+
occurance of each label.
|
124
|
+
return_inverse: If True, also return the indices of the unique array
|
125
|
+
(for the specified axis, if provided) that can be used to
|
126
|
+
reconstruct the input array.
|
127
|
+
return_counts: If True, also return the unique label frequency as an
|
128
|
+
array.
|
129
|
+
axis: If specified and not None, compute the unique values along this
|
130
|
+
axis.
|
131
|
+
|
132
|
+
Returns:
|
133
|
+
Either an array of the sorted values or a tuple containing the following
|
134
|
+
elements depending on the flags set.
|
135
|
+
|
136
|
+
unique:
|
137
|
+
The sorted unique values.
|
138
|
+
Always provided.
|
139
|
+
|
140
|
+
unique_indices, optional:
|
141
|
+
The indices of the first occurrences of the unique values in the
|
142
|
+
original array.
|
143
|
+
Only provided if return_index is True.
|
144
|
+
|
145
|
+
unique_inverse, optional:
|
146
|
+
The indices to reconstruct the original array from the unique array.
|
147
|
+
Only provided if return_inverse is True.
|
148
|
+
|
149
|
+
unique_counts, optional:
|
150
|
+
The number of times each of the unique values comes up in the
|
151
|
+
original array.
|
152
|
+
Only provided if return_counts is True.
|
153
|
+
"""
|
154
|
+
...
|
155
|
+
|
156
|
+
def renumber(
|
157
|
+
arr: NDArray[Any],
|
158
|
+
start: Union[int, float] = 1,
|
159
|
+
preserve_zero: bool = True,
|
160
|
+
in_place: bool = False,
|
161
|
+
) -> Union[tuple[NDArray[Any], dict[int, int], dict[float, float]]]:
|
162
|
+
"""Renumber an array.
|
163
|
+
|
164
|
+
Given an array of integers, renumber all the unique values starting
|
165
|
+
from 1. This can allow us to reduce the size of the data width required
|
166
|
+
to represent it.
|
167
|
+
|
168
|
+
Args:
|
169
|
+
arr: A numpy array.
|
170
|
+
start (default: 1): Start renumbering from this value.
|
171
|
+
preserve_zero (default: True): Don't renumber zero.
|
172
|
+
in_place (default: False): Perform the renumbering in-place to avoid
|
173
|
+
an extra copy. This option depends on a fortran or C contiguous
|
174
|
+
array. A copy will be made if the array is not contiguous.
|
175
|
+
|
176
|
+
Returns:
|
177
|
+
A renumbered array, dict with remapping of oldval => newval.
|
178
|
+
"""
|
179
|
+
...
|
180
|
+
|
181
|
+
def indices(
|
182
|
+
arr: NDArray[Any],
|
183
|
+
value: Union[int, float],
|
184
|
+
) -> NDArray[Any]:
|
185
|
+
"""Search an array for indices where value matches the array value."""
|
186
|
+
...
|
187
|
+
|
188
|
+
def remap(
|
189
|
+
arr: ArrayLike,
|
190
|
+
table: Union[dict[int, int], dict[float, float]],
|
191
|
+
preserve_missing_labels: bool = False,
|
192
|
+
in_place: bool = False,
|
193
|
+
) -> NDArray[Any]:
|
194
|
+
"""Remap an input numpy array in-place according to a dictionary "table".
|
195
|
+
|
196
|
+
Args:
|
197
|
+
arr: An N-dimensional numpy array.
|
198
|
+
table: A dictionary resembling: { label: new_label_value, ... }.
|
199
|
+
preserve_missing_labels: If an array value is not present in "table"...
|
200
|
+
True: Leave it alone.
|
201
|
+
False: Throw a KeyError.
|
202
|
+
in_place: if True, modify the input array to reduce memory consumption.
|
203
|
+
|
204
|
+
Returns:
|
205
|
+
The remapped array.
|
206
|
+
"""
|
207
|
+
...
|
208
|
+
|
209
|
+
def refit(
|
210
|
+
arr: NDArray[Any],
|
211
|
+
value: Union[int, float, None] = None,
|
212
|
+
increase_only: bool = False,
|
213
|
+
exotics: bool = False,
|
214
|
+
) -> NDArray[Any]:
|
215
|
+
"""Resize the array to the smallest dtype of the same kind that will fit.
|
216
|
+
|
217
|
+
For example, if the input array is uint8 and the value is 2^20 return the
|
218
|
+
array as a uint32.
|
219
|
+
|
220
|
+
Works for standard floating, integer, unsigned integer, and complex types.
|
221
|
+
|
222
|
+
Args:
|
223
|
+
arr: A numpy array.
|
224
|
+
value: Value to fit array to. If None, it is set to the value of the
|
225
|
+
absolutely larger of the min and max value in the array.
|
226
|
+
increase_only: if true, only resize the array if it can't contain value.
|
227
|
+
If false, always resize to the smallest size that fits.
|
228
|
+
exotics: If true, allow e.g. half precision floats (16-bit) or double
|
229
|
+
complex (128-bit)
|
230
|
+
|
231
|
+
Returns:
|
232
|
+
The refitted array.
|
233
|
+
"""
|
234
|
+
...
|
235
|
+
|
236
|
+
def narrow_dtype(
|
237
|
+
dtype: DTypeLike,
|
238
|
+
exotics: bool = False,
|
239
|
+
) -> DTypeLike:
|
240
|
+
"""Widen the given dtype to the next size of the same type.
|
241
|
+
|
242
|
+
For example, int16 -> int8 or uint64 -> uint32.
|
243
|
+
|
244
|
+
8-bit types will map to themselves.
|
245
|
+
|
246
|
+
Args:
|
247
|
+
exotics: Whether to include exotics like half precision floats (16-bit)
|
248
|
+
or double complex (128-bit).
|
249
|
+
|
250
|
+
Returns:
|
251
|
+
The downgraded dtype.
|
252
|
+
"""
|
253
|
+
...
|
254
|
+
|
255
|
+
def widen_dtype(
|
256
|
+
dtype: DTypeLike,
|
257
|
+
exotics: bool = False,
|
258
|
+
) -> DTypeLike:
|
259
|
+
"""Widen the given dtype to the next size of the same type.
|
260
|
+
|
261
|
+
For example, int8 -> int16 or uint32 -> uint64.
|
262
|
+
|
263
|
+
64-bit types will map to themselves.
|
264
|
+
|
265
|
+
Args:
|
266
|
+
exotics: Whether to include exotics like half precision floats (16-bit)
|
267
|
+
or double complex (128-bit).
|
268
|
+
|
269
|
+
Returns:
|
270
|
+
The upgraded dtype.
|
271
|
+
"""
|
272
|
+
...
|
273
|
+
|
274
|
+
def mask(
|
275
|
+
arr: ArrayLike,
|
276
|
+
labels: ArrayLike,
|
277
|
+
in_place: bool = False,
|
278
|
+
value: Union[int, float] = 0,
|
279
|
+
) -> NDArray[Any]:
|
280
|
+
"""Mask out designated labels in an array with the given value.
|
281
|
+
|
282
|
+
Alternative implementation of:
|
283
|
+
|
284
|
+
arr[np.isin(labels)] = value
|
285
|
+
|
286
|
+
Args:
|
287
|
+
arr: An N-dimensional numpy array.
|
288
|
+
labels: An iterable list of integers.
|
289
|
+
in_place: If True, modify the input array to reduce memory consumption.
|
290
|
+
value: A mask value.
|
291
|
+
|
292
|
+
Returns:
|
293
|
+
The array with `labels` masked out.
|
294
|
+
"""
|
295
|
+
...
|
296
|
+
|
297
|
+
def mask_except(
|
298
|
+
arr: NDArray[Any],
|
299
|
+
labels: ArrayLike,
|
300
|
+
in_place: bool = False,
|
301
|
+
value: Union[int, float] = 0,
|
302
|
+
) -> NDArray[Any]:
|
303
|
+
"""Mask out all labels except the provided list.
|
304
|
+
|
305
|
+
Alternative implementation of:
|
306
|
+
|
307
|
+
arr[~np.isin(labels)] = value
|
308
|
+
|
309
|
+
Args:
|
310
|
+
arr: An N-dimensional numpy array.
|
311
|
+
labels: An iterable list of integers.
|
312
|
+
in_place: If True, modify the input array to reduce memory consumption.
|
313
|
+
value: A mask value.
|
314
|
+
|
315
|
+
Returns:
|
316
|
+
The array with all labels except `labels` masked out.
|
317
|
+
"""
|
318
|
+
...
|
319
|
+
|
320
|
+
def component_map(
|
321
|
+
component_labels: ArrayLike,
|
322
|
+
parent_labels: ArrayLike,
|
323
|
+
) -> Union[dict[int, int], dict[float, float]]:
|
324
|
+
"""Generate a mapping from connected components to their parent labels.
|
325
|
+
|
326
|
+
Given two sets of images that have a surjective mapping between their
|
327
|
+
labels, generate a dictionary for that mapping. For example, generate a
|
328
|
+
mapping from connected components of labels to their parent labels.
|
329
|
+
|
330
|
+
Returns:
|
331
|
+
{ $COMPONENT_LABEL: $PARENT_LABEL }
|
332
|
+
|
333
|
+
Examples:
|
334
|
+
>>> fastremap.component_map([1, 2, 3, 4], [5, 5, 6, 7])
|
335
|
+
{1: 5, 2: 5, 3: 6, 4: 7}
|
336
|
+
"""
|
337
|
+
...
|
338
|
+
|
339
|
+
def inverse_component_map(
|
340
|
+
parent_labels: ArrayLike,
|
341
|
+
component_labels: ArrayLike,
|
342
|
+
) -> Union[dict[int, int], dict[float, float]]:
|
343
|
+
"""Generate a mapping from parent labels to connected components.
|
344
|
+
|
345
|
+
Given two sets of images that have a mapping between their labels, generate
|
346
|
+
a dictionary for that mapping. For example, generate a mapping from
|
347
|
+
connected components of labels to their parent labels.
|
348
|
+
|
349
|
+
Returns:
|
350
|
+
A dictionary resembling: { $PARENT_LABEL: [ $COMPONENT_LABELS, ... ] }.
|
351
|
+
|
352
|
+
Examples:
|
353
|
+
>>> fastremap.inverse_component_map([1, 2, 1, 3], [4, 4, 5, 6])
|
354
|
+
{1: [4, 5], 2: [4], 3: [6]}
|
355
|
+
"""
|
356
|
+
...
|
357
|
+
|
358
|
+
def remap_from_array(
|
359
|
+
arr: NDArray[np.uint],
|
360
|
+
vals: NDArray[np.uint],
|
361
|
+
in_place: bool = True,
|
362
|
+
) -> NDArray[Any]:
|
363
|
+
"""Remap an input numpy array according to the given values array.
|
364
|
+
|
365
|
+
Args:
|
366
|
+
arr: An N-dimensional numpy array.
|
367
|
+
vals: An array of values to remap to, where the index of the value in
|
368
|
+
the array corresponds to the label in the input array.
|
369
|
+
in_place: If True, modify the input array to reduce memory consumption.
|
370
|
+
|
371
|
+
Returns:
|
372
|
+
The remapped array.
|
373
|
+
"""
|
374
|
+
...
|
375
|
+
|
376
|
+
def remap_from_array_kv(
|
377
|
+
arr: NDArray[np.integer],
|
378
|
+
keys: NDArray[np.integer],
|
379
|
+
vals: NDArray[np.integer],
|
380
|
+
preserve_missing_labels: bool = True,
|
381
|
+
in_place: bool = True,
|
382
|
+
) -> NDArray[Any]:
|
383
|
+
"""Remap an input numpy array according to the keys and values arrays.
|
384
|
+
|
385
|
+
Args:
|
386
|
+
arr: An N-dimensional numpy array.
|
387
|
+
keys: An array of keys to remap from. Must be the same length as `vals`.
|
388
|
+
vals: An array of values to remap to. Must be the same length as `vals`.
|
389
|
+
preserve_missing_labels: If an array value is not present in `keys`...
|
390
|
+
True: Leave it alone.
|
391
|
+
False: Throw a KeyError.
|
392
|
+
in_place: If True, modify the input array to reduce memory consumption.
|
393
|
+
|
394
|
+
Returns:
|
395
|
+
The remapped array.
|
396
|
+
"""
|
397
|
+
...
|
398
|
+
|
399
|
+
def transpose(arr: NDArray[Any]) -> NDArray[Any]:
|
400
|
+
"""For up to four dimensional matrices, perform in-place transposition.
|
401
|
+
|
402
|
+
Square matrices up to three dimensions are faster than numpy's out-of-place
|
403
|
+
algorithm. Default to the out-of-place implementation numpy uses for cases
|
404
|
+
that aren't specially handled.
|
405
|
+
|
406
|
+
Args:
|
407
|
+
arr: The input numpy array to transpose.
|
408
|
+
|
409
|
+
Returns:
|
410
|
+
The transposed numpy array
|
411
|
+
"""
|
412
|
+
...
|
413
|
+
|
414
|
+
def asfortranarray(arr: NDArray[Any]) -> NDArray[Any]:
|
415
|
+
"""For up to four dimensional matrices, perform in-place transposition.
|
416
|
+
|
417
|
+
Square matrices up to three dimensions are faster than numpy's out-of-place
|
418
|
+
algorithm. Default to the out-of-place implementation numpy uses for cases
|
419
|
+
that aren't specially handled.
|
420
|
+
|
421
|
+
Args:
|
422
|
+
arr: The input numpy array to transpose.
|
423
|
+
|
424
|
+
Returns:
|
425
|
+
The transposed numpy array.
|
426
|
+
"""
|
427
|
+
...
|
428
|
+
|
429
|
+
def ascontiguousarray(arr: NDArray[Any]) -> NDArray[Any]:
|
430
|
+
"""For up to four dimensional matrices, perform in-place transposition.
|
431
|
+
|
432
|
+
Square matrices up to three dimensions are faster than numpy's out-of-place
|
433
|
+
algorithm. Default to the out-of-place implementation numpy uses for cases
|
434
|
+
that aren't specially handled.
|
435
|
+
|
436
|
+
Args:
|
437
|
+
arr: The input numpy array to transpose.
|
438
|
+
|
439
|
+
Returns:
|
440
|
+
The transposed numpy array.
|
441
|
+
"""
|
442
|
+
...
|
443
|
+
|
444
|
+
def minmax(
|
445
|
+
arr: NDArray[Any],
|
446
|
+
) -> tuple[Union[int, float, None], Union[int, float, None]]:
|
447
|
+
"""Returns (min(arr), max(arr)) computed in a single pass.
|
448
|
+
|
449
|
+
Returns (None, None) if array is size zero.
|
450
|
+
"""
|
451
|
+
...
|
452
|
+
|
453
|
+
def pixel_pairs(labels: NDArray[Any]) -> int:
|
454
|
+
"""Computes the number of matching adjacent memory locations.
|
455
|
+
|
456
|
+
This is useful for rapidly evaluating whether an image is
|
457
|
+
more binary or more connectomics like.
|
458
|
+
"""
|
459
|
+
...
|
460
|
+
|
461
|
+
def foreground(arr: NDArray[Any]) -> int:
|
462
|
+
"""Returns the number of non-zero voxels in an array."""
|
463
|
+
...
|
464
|
+
|
465
|
+
def point_cloud(arr: NDArray[Any]) -> dict[int, NDArray[Any]]:
|
466
|
+
"""Generate a mapping from labels to their (x, y, z) position in the image.
|
467
|
+
|
468
|
+
Zero is considered a background label.
|
469
|
+
|
470
|
+
Args:
|
471
|
+
arr: A 2D or 3D numpy array.
|
472
|
+
|
473
|
+
Returns:
|
474
|
+
A dictionary mapping label values to their (x, y, z) coordinates in the
|
475
|
+
image. The coordinates are stored as a numpy array of shape (N, 3),
|
476
|
+
where N is the number of points for that label.
|
477
|
+
"""
|
478
|
+
...
|
479
|
+
|
480
|
+
def tobytes(
|
481
|
+
image: NDArray[Any],
|
482
|
+
chunk_size: tuple[int, int, int],
|
483
|
+
order: str = "C",
|
484
|
+
) -> list[bytes]:
|
485
|
+
"""Compute the bytes with the image divided into a grid of cutouts.
|
486
|
+
|
487
|
+
Return the resultant binaries indexed by their cutout's gridpoint in
|
488
|
+
fortran order.
|
489
|
+
|
490
|
+
This is faster than calling tobytes on each cutout individually if the input
|
491
|
+
and output orders match.
|
492
|
+
"""
|
493
|
+
...
|