dcnum 0.25.6__py3-none-any.whl → 0.25.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dcnum might be problematic. Click here for more details.
- dcnum/_version.py +2 -2
- dcnum/feat/event_extractor_manager_thread.py +29 -14
- dcnum/feat/feat_background/base.py +19 -11
- dcnum/feat/feat_background/bg_copy.py +4 -0
- dcnum/feat/feat_background/bg_roll_median.py +24 -15
- dcnum/feat/feat_background/bg_sparse_median.py +32 -17
- dcnum/feat/feat_contour/volume.py +2 -2
- dcnum/feat/gate.py +11 -11
- dcnum/feat/queue_event_extractor.py +39 -20
- dcnum/logic/ctrl.py +5 -4
- dcnum/logic/job.py +3 -1
- dcnum/logic/json_encoder.py +9 -0
- dcnum/meta/paths.py +1 -0
- dcnum/meta/ppid.py +4 -2
- dcnum/os_env_st.py +2 -2
- dcnum/read/cache.py +3 -1
- dcnum/read/const.py +5 -2
- dcnum/read/detect_flicker.py +1 -1
- dcnum/segm/segm_torch/segm_torch_base.py +3 -2
- dcnum/segm/segm_torch/torch_postproc.py +1 -0
- dcnum/segm/segm_torch/torch_preproc.py +1 -0
- dcnum/segm/segmenter.py +31 -20
- dcnum/segm/segmenter_manager_thread.py +19 -12
- dcnum/segm/segmenter_mpo.py +4 -4
- dcnum/segm/segmenter_sto.py +2 -2
- dcnum/write/queue_collector_thread.py +35 -18
- dcnum/write/writer.py +4 -3
- {dcnum-0.25.6.dist-info → dcnum-0.25.7.dist-info}/METADATA +1 -1
- dcnum-0.25.7.dist-info/RECORD +57 -0
- {dcnum-0.25.6.dist-info → dcnum-0.25.7.dist-info}/WHEEL +1 -1
- dcnum-0.25.6.dist-info/RECORD +0 -57
- {dcnum-0.25.6.dist-info → dcnum-0.25.7.dist-info}/LICENSE +0 -0
- {dcnum-0.25.6.dist-info → dcnum-0.25.7.dist-info}/top_level.txt +0 -0
dcnum/_version.py
CHANGED
|
@@ -35,8 +35,8 @@ class EventExtractorManagerThread(threading.Thread):
|
|
|
35
35
|
with segmenter"), so that the segmenter can compute a new
|
|
36
36
|
chunk of labels.
|
|
37
37
|
slot_chunks:
|
|
38
|
-
For each slot in
|
|
39
|
-
on which chunk in
|
|
38
|
+
For each slot in ``slot_states``, this shared array defines
|
|
39
|
+
on which chunk in ``image_data`` the segmentation took place.
|
|
40
40
|
fe_kwargs:
|
|
41
41
|
Feature extraction keyword arguments. See
|
|
42
42
|
:func:`.EventExtractor.get_init_kwargs` for more information.
|
|
@@ -47,36 +47,51 @@ class EventExtractorManagerThread(threading.Thread):
|
|
|
47
47
|
fills up, we take a break.
|
|
48
48
|
debug:
|
|
49
49
|
Whether to run in debugging mode which means only one
|
|
50
|
-
event extraction thread (
|
|
50
|
+
event extraction thread (``num_workers`` has no effect).
|
|
51
51
|
"""
|
|
52
52
|
super(EventExtractorManagerThread, self).__init__(
|
|
53
53
|
name="EventExtractorManager", *args, **kwargs)
|
|
54
54
|
self.logger = logging.getLogger(
|
|
55
55
|
"dcnum.feat.EventExtractorManagerThread")
|
|
56
|
-
|
|
56
|
+
|
|
57
57
|
self.fe_kwargs = fe_kwargs
|
|
58
|
-
|
|
58
|
+
"""Keyword arguments
|
|
59
|
+
for :class:`event_extractor_manager_thread.py.QueueEventExtractor`
|
|
60
|
+
instances"""
|
|
61
|
+
|
|
59
62
|
self.data = fe_kwargs["data"]
|
|
60
|
-
|
|
63
|
+
"""Data instance"""
|
|
64
|
+
|
|
61
65
|
self.slot_states = slot_states
|
|
62
|
-
|
|
66
|
+
"""States of the segmenter-extractor pipeline slots"""
|
|
67
|
+
|
|
63
68
|
self.slot_chunks = slot_chunks
|
|
64
|
-
|
|
69
|
+
"""Chunk indices corresponding to ``slot_states``
|
|
70
|
+
"""
|
|
71
|
+
|
|
65
72
|
self.num_workers = 1 if debug else num_workers
|
|
66
|
-
|
|
73
|
+
"""Number of workers"""
|
|
74
|
+
|
|
67
75
|
self.raw_queue = self.fe_kwargs["raw_queue"]
|
|
68
|
-
|
|
76
|
+
"""Queue for sending chunks and label indices to the workers"""
|
|
77
|
+
|
|
69
78
|
self.labels_list = labels_list
|
|
70
|
-
|
|
79
|
+
"""List of chunk labels corresponding to ``slot_states``
|
|
80
|
+
"""
|
|
81
|
+
|
|
71
82
|
self.label_array = np.ctypeslib.as_array(
|
|
72
83
|
self.fe_kwargs["label_array"]).reshape(
|
|
73
84
|
self.data.image.chunk_shape)
|
|
74
|
-
|
|
85
|
+
"""Shared labeling array"""
|
|
86
|
+
|
|
75
87
|
self.writer_dq = writer_dq
|
|
76
|
-
|
|
88
|
+
"""Writer deque to monitor"""
|
|
89
|
+
|
|
77
90
|
self.t_count = 0
|
|
78
|
-
|
|
91
|
+
"""Time counter for feature extraction"""
|
|
92
|
+
|
|
79
93
|
self.debug = debug
|
|
94
|
+
"""Whether debugging is enabled"""
|
|
80
95
|
|
|
81
96
|
def run(self):
|
|
82
97
|
# Initialize all workers
|
|
@@ -21,7 +21,7 @@ mp_spawn = mp.get_context('spawn')
|
|
|
21
21
|
class Background(abc.ABC):
|
|
22
22
|
def __init__(self, input_data, output_path, compress=True, num_cpus=None,
|
|
23
23
|
**kwargs):
|
|
24
|
-
"""
|
|
24
|
+
"""Base class for background computation
|
|
25
25
|
|
|
26
26
|
Parameters
|
|
27
27
|
----------
|
|
@@ -56,28 +56,35 @@ class Background(abc.ABC):
|
|
|
56
56
|
# Using spec is not really necessary here, because kwargs are
|
|
57
57
|
# fully populated for background computation, but this might change.
|
|
58
58
|
spec = inspect.getfullargspec(self.check_user_kwargs)
|
|
59
|
-
|
|
59
|
+
|
|
60
60
|
self.kwargs = spec.kwonlydefaults or {}
|
|
61
|
+
"""background keyword arguments"""
|
|
61
62
|
self.kwargs.update(kwargs)
|
|
62
63
|
|
|
63
64
|
if num_cpus is None:
|
|
64
65
|
num_cpus = mp_spawn.cpu_count()
|
|
65
|
-
|
|
66
|
+
|
|
66
67
|
self.num_cpus = num_cpus
|
|
68
|
+
"""number of CPUs used"""
|
|
67
69
|
|
|
68
|
-
#: number of images in the input data
|
|
69
70
|
self.image_count = None
|
|
70
|
-
|
|
71
|
+
"""number of images in the input data"""
|
|
72
|
+
|
|
71
73
|
self.image_proc = mp_spawn.Value("d", 0)
|
|
74
|
+
"""fraction images that have been processed"""
|
|
72
75
|
|
|
73
|
-
#: HDF5Data instance for input data
|
|
74
76
|
self.hdin = None
|
|
75
|
-
|
|
77
|
+
"""HDF5Data instance for input data"""
|
|
78
|
+
|
|
76
79
|
self.h5in = None
|
|
77
|
-
|
|
80
|
+
"""input h5py.File"""
|
|
81
|
+
|
|
78
82
|
self.h5out = None
|
|
79
|
-
|
|
83
|
+
"""output h5py.File"""
|
|
84
|
+
|
|
80
85
|
self.paths_ref = []
|
|
86
|
+
"""reference paths for logging to the output .rtdc file"""
|
|
87
|
+
|
|
81
88
|
# Check whether user passed an array or a path
|
|
82
89
|
if isinstance(input_data, pathlib.Path):
|
|
83
90
|
if str(input_data.resolve()) == str(output_path.resolve()):
|
|
@@ -96,10 +103,11 @@ class Background(abc.ABC):
|
|
|
96
103
|
else:
|
|
97
104
|
self.input_data = input_data
|
|
98
105
|
|
|
99
|
-
#: shape of event images
|
|
100
106
|
self.image_shape = self.input_data[0].shape
|
|
101
|
-
|
|
107
|
+
"""shape of event images"""
|
|
108
|
+
|
|
102
109
|
self.image_count = len(self.input_data)
|
|
110
|
+
"""total number of events"""
|
|
103
111
|
|
|
104
112
|
if self.h5out is None:
|
|
105
113
|
if not output_path.exists():
|
|
@@ -42,7 +42,7 @@ class BackgroundRollMed(Background):
|
|
|
42
42
|
batch_size: int
|
|
43
43
|
Number of events to process at the same time. Increasing this
|
|
44
44
|
number much more than two orders of magnitude larger than
|
|
45
|
-
|
|
45
|
+
``kernel_size`` will not increase computation speed. Larger
|
|
46
46
|
values lead to a higher memory consumption.
|
|
47
47
|
compress: bool
|
|
48
48
|
Whether to compress background data. Set this to False
|
|
@@ -64,39 +64,47 @@ class BackgroundRollMed(Background):
|
|
|
64
64
|
f"size {len(self.input_data)} is larger than the "
|
|
65
65
|
f"kernel size {kernel_size}!")
|
|
66
66
|
|
|
67
|
-
#: kernel size used for median filtering
|
|
68
67
|
self.kernel_size = kernel_size
|
|
69
|
-
|
|
68
|
+
"""kernel size used for median filtering"""
|
|
69
|
+
|
|
70
70
|
self.batch_size = batch_size
|
|
71
|
+
"""number of events processed at once"""
|
|
71
72
|
|
|
72
|
-
#: mp.RawArray for temporary batch input data
|
|
73
73
|
self.shared_input_raw = mp_spawn.RawArray(
|
|
74
74
|
np.ctypeslib.ctypes.c_uint8,
|
|
75
75
|
int(np.prod(self.image_shape)) * (batch_size + kernel_size))
|
|
76
|
-
|
|
76
|
+
"""mp.RawArray for temporary batch input data"""
|
|
77
|
+
|
|
77
78
|
self.shared_output_raw = mp_spawn.RawArray(
|
|
78
79
|
np.ctypeslib.ctypes.c_uint8,
|
|
79
80
|
int(np.prod(self.image_shape)) * batch_size)
|
|
81
|
+
"""mp.RawArray for temporary batch output data"""
|
|
82
|
+
|
|
80
83
|
# Convert the RawArray to something we can write to fast
|
|
81
84
|
# (similar to memoryview, but without having to cast) using
|
|
82
85
|
# np.ctypeslib.as_array. See discussion in
|
|
83
86
|
# https://stackoverflow.com/questions/37705974
|
|
84
|
-
#: numpy array reshaped view on `self.shared_input_raw` with
|
|
85
|
-
#: first axis enumerating the events
|
|
86
87
|
self.shared_input = np.ctypeslib.as_array(
|
|
87
88
|
self.shared_input_raw).reshape(batch_size + kernel_size, -1)
|
|
88
|
-
|
|
89
|
-
|
|
89
|
+
"""numpy array reshaped view on `self.shared_input_raw`.
|
|
90
|
+
The first axis enumerating the events
|
|
91
|
+
"""
|
|
92
|
+
|
|
90
93
|
self.shared_output = np.ctypeslib.as_array(
|
|
91
94
|
self.shared_output_raw).reshape(batch_size, -1)
|
|
92
|
-
|
|
95
|
+
"""numpy array reshaped view on `self.shared_output_raw`.
|
|
96
|
+
The first axis enumerating the events
|
|
97
|
+
"""
|
|
98
|
+
|
|
93
99
|
self.current_batch = 0
|
|
100
|
+
"""current batch index (see `self.process` and `process_next_batch`)"""
|
|
94
101
|
|
|
95
|
-
#: counter tracking process of workers
|
|
96
102
|
self.worker_counter = mp_spawn.Value("l", 0)
|
|
97
|
-
|
|
103
|
+
"""counter tracking process of workers"""
|
|
104
|
+
|
|
98
105
|
self.queue = mp_spawn.Queue()
|
|
99
|
-
|
|
106
|
+
"""queue for median computation jobs"""
|
|
107
|
+
|
|
100
108
|
self.workers = [WorkerRollMed(self.queue,
|
|
101
109
|
self.worker_counter,
|
|
102
110
|
self.shared_input_raw,
|
|
@@ -104,6 +112,7 @@ class BackgroundRollMed(Background):
|
|
|
104
112
|
self.batch_size,
|
|
105
113
|
self.kernel_size)
|
|
106
114
|
for _ in range(self.num_cpus)]
|
|
115
|
+
"""list of workers (processes)"""
|
|
107
116
|
[w.start() for w in self.workers]
|
|
108
117
|
|
|
109
118
|
def __enter__(self):
|
|
@@ -131,7 +140,7 @@ class BackgroundRollMed(Background):
|
|
|
131
140
|
batch_size: int
|
|
132
141
|
Number of events to process at the same time. Increasing this
|
|
133
142
|
number much more than two orders of magnitude larger than
|
|
134
|
-
|
|
143
|
+
``kernel_size`` will not increase computation speed. Larger
|
|
135
144
|
values lead to a higher memory consumption.
|
|
136
145
|
"""
|
|
137
146
|
assert kernel_size > 0, "kernel size must be positive number"
|
|
@@ -283,7 +292,7 @@ def compute_median_for_slice(shared_input, shared_output, kernel_size,
|
|
|
283
292
|
in the original image, batch_size + kernel_size events are
|
|
284
293
|
stored in this array one after another in a row.
|
|
285
294
|
The total size of this array is
|
|
286
|
-
|
|
295
|
+
``batch_size * kernel_size * number_of_pixels_in_the_image``.
|
|
287
296
|
shared_output: multiprocessing.RawArray
|
|
288
297
|
Used for storing the result. Note that the last `kernel_size`
|
|
289
298
|
elements for each pixel in this output array are junk data
|
|
@@ -21,8 +21,8 @@ class BackgroundSparseMed(Background):
|
|
|
21
21
|
|
|
22
22
|
In contrast to the rolling median background correction,
|
|
23
23
|
this algorithm only computes the background image every
|
|
24
|
-
|
|
25
|
-
200 frames instead of 100 frames).
|
|
24
|
+
``split_time`` seconds, but with a larger window (default kernel
|
|
25
|
+
size is 200 frames instead of 100 frames).
|
|
26
26
|
|
|
27
27
|
1. At time stamps every `split_time` seconds, a background image is
|
|
28
28
|
computed, resulting in a background series.
|
|
@@ -103,16 +103,20 @@ class BackgroundSparseMed(Background):
|
|
|
103
103
|
f"size {len(self.input_data)}. Setting it to input data size!")
|
|
104
104
|
kernel_size = len(self.input_data)
|
|
105
105
|
|
|
106
|
-
#: kernel size used for median filtering
|
|
107
106
|
self.kernel_size = kernel_size
|
|
108
|
-
|
|
107
|
+
"""kernel size used for median filtering"""
|
|
108
|
+
|
|
109
109
|
self.split_time = split_time
|
|
110
|
-
|
|
110
|
+
"""time between background images in the background series"""
|
|
111
|
+
|
|
111
112
|
self.thresh_cleansing = thresh_cleansing
|
|
112
|
-
|
|
113
|
+
"""cleansing threshold factor"""
|
|
114
|
+
|
|
113
115
|
self.frac_cleansing = frac_cleansing
|
|
114
|
-
|
|
116
|
+
"""keep at least this many background images from the series"""
|
|
117
|
+
|
|
115
118
|
self.offset_correction = offset_correction
|
|
119
|
+
"""offset/flickering correction"""
|
|
116
120
|
|
|
117
121
|
# time axis
|
|
118
122
|
self.time = None
|
|
@@ -142,48 +146,59 @@ class BackgroundSparseMed(Background):
|
|
|
142
146
|
self.time = np.linspace(0, dur, self.image_count,
|
|
143
147
|
endpoint=True)
|
|
144
148
|
|
|
145
|
-
#: duration of the measurement
|
|
146
149
|
self.duration = self.time[-1] - self.time[0]
|
|
150
|
+
"""duration of the measurement"""
|
|
147
151
|
|
|
148
152
|
self.step_times = np.arange(0, self.duration, self.split_time)
|
|
149
|
-
|
|
153
|
+
|
|
150
154
|
self.bg_images = np.zeros((self.step_times.size,
|
|
151
155
|
self.image_shape[0],
|
|
152
156
|
self.image_shape[1]),
|
|
153
157
|
dtype=np.uint8)
|
|
158
|
+
"""array containing all background images"""
|
|
154
159
|
|
|
155
|
-
#: mp.RawArray for temporary batch input data
|
|
156
160
|
self.shared_input_raw = mp_spawn.RawArray(
|
|
157
161
|
np.ctypeslib.ctypes.c_uint8,
|
|
158
162
|
int(np.prod(self.image_shape)) * kernel_size)
|
|
159
|
-
|
|
163
|
+
"""mp.RawArray for temporary batch input data"""
|
|
164
|
+
|
|
160
165
|
self.shared_output_raw = mp_spawn.RawArray(
|
|
161
166
|
np.ctypeslib.ctypes.c_uint8,
|
|
162
167
|
int(np.prod(self.image_shape)))
|
|
168
|
+
"""mp.RawArray for the median background image"""
|
|
169
|
+
|
|
163
170
|
# Convert the RawArray to something we can write to fast
|
|
164
171
|
# (similar to memoryview, but without having to cast) using
|
|
165
172
|
# np.ctypeslib.as_array. See discussion in
|
|
166
173
|
# https://stackoverflow.com/questions/37705974
|
|
167
|
-
#: numpy array reshaped view on `self.shared_input_raw` with
|
|
168
|
-
#: first axis enumerating the events
|
|
169
174
|
self.shared_input = np.ctypeslib.as_array(
|
|
170
175
|
self.shared_input_raw).reshape(kernel_size, -1)
|
|
176
|
+
"""numpy array reshaped view on `self.shared_input_raw`.
|
|
177
|
+
The First axis enumerating the events
|
|
178
|
+
"""
|
|
179
|
+
|
|
171
180
|
self.shared_output = np.ctypeslib.as_array(
|
|
172
181
|
self.shared_output_raw).reshape(self.image_shape)
|
|
173
|
-
|
|
182
|
+
"""numpy array reshaped view on `self.shared_output_raw`.
|
|
183
|
+
The First axis enumerating the events
|
|
184
|
+
"""
|
|
185
|
+
|
|
174
186
|
self.pool = mp_spawn.Pool(processes=self.num_cpus)
|
|
187
|
+
"""multiprocessing pool for parallel processing"""
|
|
175
188
|
|
|
176
|
-
#: counter tracking process of workers
|
|
177
189
|
self.worker_counter = mp_spawn.Value("l", 0)
|
|
178
|
-
|
|
190
|
+
"""counter tracking process of workers"""
|
|
191
|
+
|
|
179
192
|
self.queue = mp_spawn.Queue()
|
|
180
|
-
|
|
193
|
+
"""queue for median computation jobs"""
|
|
194
|
+
|
|
181
195
|
self.workers = [WorkerSparseMed(self.queue,
|
|
182
196
|
self.worker_counter,
|
|
183
197
|
self.shared_input_raw,
|
|
184
198
|
self.shared_output_raw,
|
|
185
199
|
self.kernel_size)
|
|
186
200
|
for _ in range(self.num_cpus)]
|
|
201
|
+
"""list of workers (processes)"""
|
|
187
202
|
[w.start() for w in self.workers]
|
|
188
203
|
|
|
189
204
|
def __enter__(self):
|
|
@@ -35,7 +35,7 @@ def volume_from_contours(
|
|
|
35
35
|
average is then used.
|
|
36
36
|
|
|
37
37
|
The volume is computed radially from the center position
|
|
38
|
-
given by (
|
|
38
|
+
given by (``pos_x``, ``pos_y``). For sufficiently smooth contours,
|
|
39
39
|
such as densely sampled ellipses, the center position does not
|
|
40
40
|
play an important role. For contours that are given on a coarse
|
|
41
41
|
grid, as is the case for deformability cytometry, the center position
|
|
@@ -111,7 +111,7 @@ def vol_revolve(r, z, point_scale=1.):
|
|
|
111
111
|
V = \frac{h \cdot \pi}{3} \cdot (R^2 + R \cdot r + r^2)
|
|
112
112
|
|
|
113
113
|
Where :math:`h` is the height of the cone and :math:`r` and
|
|
114
|
-
|
|
114
|
+
``R`` are the smaller and larger radii of the truncated cone.
|
|
115
115
|
|
|
116
116
|
Each line segment of the contour resembles one truncated cone. If
|
|
117
117
|
the z-step is positive (counter-clockwise contour), then the
|
dcnum/feat/gate.py
CHANGED
|
@@ -9,8 +9,8 @@ from ..meta.ppid import kwargs_to_ppid, ppid_to_kwargs
|
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class Gate:
|
|
12
|
-
#: the default value for `size_thresh_mask` if not given as kwarg
|
|
13
12
|
_default_size_thresh_mask = 10
|
|
13
|
+
"""the default value for `size_thresh_mask` if not given as kwarg"""
|
|
14
14
|
|
|
15
15
|
def __init__(self, data, *,
|
|
16
16
|
online_gates: bool = False,
|
|
@@ -19,7 +19,7 @@ class Gate:
|
|
|
19
19
|
|
|
20
20
|
Parameters
|
|
21
21
|
----------
|
|
22
|
-
data: .HDF5Data
|
|
22
|
+
data: .hdf5_data.HDF5Data
|
|
23
23
|
dcnum data instance
|
|
24
24
|
online_gates: bool
|
|
25
25
|
set to True to enable gating with "online" gates stored
|
|
@@ -27,14 +27,14 @@ class Gate:
|
|
|
27
27
|
deformability cytometry before writing data to disk during
|
|
28
28
|
a measurement
|
|
29
29
|
size_thresh_mask: int
|
|
30
|
-
Only masks with more pixels than
|
|
30
|
+
Only masks with more pixels than ``size_thresh_mask`` are
|
|
31
31
|
considered to be a valid event; Originally, the
|
|
32
|
-
|
|
32
|
+
``bin area min / trig_thresh`` value defaulted to 200 which is
|
|
33
33
|
too large; defaults to 10 or the original value in case
|
|
34
|
-
|
|
34
|
+
``online_gates`` is set.
|
|
35
35
|
"""
|
|
36
|
-
#: box gating (value range for each feature)
|
|
37
36
|
self.box_gates = {}
|
|
37
|
+
"""box gating (value range for each feature)"""
|
|
38
38
|
|
|
39
39
|
if online_gates:
|
|
40
40
|
# Deal with online gates.
|
|
@@ -46,13 +46,13 @@ class Gate:
|
|
|
46
46
|
size_thresh_mask = data.meta_nest.get(
|
|
47
47
|
"online_contour", {}).get("bin area min")
|
|
48
48
|
|
|
49
|
-
#: gating keyword arguments
|
|
50
49
|
self.kwargs = {
|
|
51
50
|
"online_gates": online_gates,
|
|
52
51
|
# Set the size threshold, defaulting to `_default_size_thresh_mask`
|
|
53
52
|
"size_thresh_mask":
|
|
54
53
|
size_thresh_mask or self._default_size_thresh_mask
|
|
55
54
|
}
|
|
55
|
+
"""gating keyword arguments"""
|
|
56
56
|
|
|
57
57
|
def _extract_online_gates(self, data):
|
|
58
58
|
ogates = {}
|
|
@@ -168,10 +168,10 @@ class Gate:
|
|
|
168
168
|
data: numbers.Number | np.ndarray):
|
|
169
169
|
"""Return boolean indicating whether `data` value is in box gate
|
|
170
170
|
|
|
171
|
-
|
|
172
|
-
for
|
|
173
|
-
or a boolean array is returned, depending on the type of
|
|
174
|
-
Not that
|
|
171
|
+
``data`` may be a number or an array. If no box filter is defined
|
|
172
|
+
for ``feat``, True is always returned. Otherwise, either a boolean
|
|
173
|
+
or a boolean array is returned, depending on the type of ``data``.
|
|
174
|
+
Not that ``np.logical_and`` can deal with mixed argument types
|
|
175
175
|
(scalar and array).
|
|
176
176
|
"""
|
|
177
177
|
bound_lo, bound_up = self.box_gates[feat]
|
|
@@ -12,7 +12,7 @@ import numpy as np
|
|
|
12
12
|
|
|
13
13
|
from ..os_env_st import RequestSingleThreaded, confirm_single_threaded
|
|
14
14
|
from ..meta.ppid import kwargs_to_ppid, ppid_to_kwargs
|
|
15
|
-
from ..read import HDF5Data
|
|
15
|
+
from ..read.hdf5_data import HDF5Data
|
|
16
16
|
|
|
17
17
|
from .feat_brightness import brightness_features
|
|
18
18
|
from .feat_contour import moments_based_features, volume_from_contours
|
|
@@ -48,9 +48,9 @@ class QueueEventExtractor:
|
|
|
48
48
|
|
|
49
49
|
Parameters
|
|
50
50
|
----------
|
|
51
|
-
data: HDF5Data
|
|
51
|
+
data: .hdf5_data.HDF5Data
|
|
52
52
|
Data source.
|
|
53
|
-
gate: Gate
|
|
53
|
+
gate: .gate.Gate
|
|
54
54
|
Gating rules.
|
|
55
55
|
raw_queue:
|
|
56
56
|
Queue from which the worker obtains the chunks and
|
|
@@ -84,41 +84,57 @@ class QueueEventExtractor:
|
|
|
84
84
|
The index to increment values in `worker_monitor`
|
|
85
85
|
"""
|
|
86
86
|
super(QueueEventExtractor, self).__init__(*args, **kwargs)
|
|
87
|
-
|
|
87
|
+
|
|
88
88
|
self.worker_index = worker_index or 0
|
|
89
|
-
|
|
89
|
+
"""Worker index for populating"""
|
|
90
|
+
|
|
90
91
|
self.data = data
|
|
91
|
-
|
|
92
|
+
"""Data instance"""
|
|
93
|
+
|
|
92
94
|
self.gate = gate
|
|
93
|
-
|
|
95
|
+
"""Gating information"""
|
|
96
|
+
|
|
94
97
|
self.raw_queue = raw_queue
|
|
95
|
-
|
|
98
|
+
"""queue containing sub-indices for ``label_array``"""
|
|
99
|
+
|
|
96
100
|
self.event_queue = event_queue
|
|
97
|
-
|
|
101
|
+
"""queue with event-wise feature dictionaries"""
|
|
102
|
+
|
|
98
103
|
self.log_queue = log_queue
|
|
99
|
-
|
|
104
|
+
"""queue for logging"""
|
|
105
|
+
|
|
100
106
|
self.invalid_mask_counter = invalid_mask_counter
|
|
101
|
-
|
|
107
|
+
"""invalid mask counter"""
|
|
108
|
+
|
|
102
109
|
self.worker_monitor = worker_monitor
|
|
110
|
+
"""worker busy counter"""
|
|
111
|
+
|
|
103
112
|
# Logging needs to be set up after `start` is called, otherwise
|
|
104
113
|
# it looks like we have the same PID as the parent process. We
|
|
105
114
|
# are setting up logging in `run`.
|
|
106
115
|
self.logger = None
|
|
107
116
|
self.log_level = log_level or logging.getLogger("dcnum").level
|
|
108
|
-
|
|
109
|
-
#: events per frame is written.
|
|
117
|
+
|
|
110
118
|
self.feat_nevents = feat_nevents
|
|
111
|
-
|
|
119
|
+
"""Number of events per frame
|
|
120
|
+
Shared array of length `len(data)` into which the number of
|
|
121
|
+
events per frame is written.
|
|
122
|
+
"""
|
|
123
|
+
|
|
112
124
|
self.label_array = label_array
|
|
113
|
-
|
|
125
|
+
"""Shared array containing the labels of one chunk from `data`."""
|
|
126
|
+
|
|
114
127
|
self.finalize_extraction = finalize_extraction
|
|
128
|
+
"""Set to True to let worker join when `raw_queue` is empty."""
|
|
129
|
+
|
|
115
130
|
# Keyword arguments for data extraction
|
|
116
131
|
if extract_kwargs is None:
|
|
117
132
|
extract_kwargs = {}
|
|
118
133
|
extract_kwargs.setdefault("brightness", True)
|
|
119
134
|
extract_kwargs.setdefault("haralick", True)
|
|
120
|
-
|
|
135
|
+
|
|
121
136
|
self.extract_kwargs = extract_kwargs
|
|
137
|
+
"""Feature extraction keyword arguments."""
|
|
122
138
|
|
|
123
139
|
@staticmethod
|
|
124
140
|
def get_init_kwargs(data: HDF5Data,
|
|
@@ -127,18 +143,19 @@ class QueueEventExtractor:
|
|
|
127
143
|
log_queue: mp.Queue,
|
|
128
144
|
log_level: int = None,
|
|
129
145
|
):
|
|
130
|
-
"""Get initialization arguments for :
|
|
146
|
+
"""Get initialization arguments for :class:`.QueueEventExtractor`
|
|
131
147
|
|
|
132
148
|
This method was created for convenience reasons:
|
|
149
|
+
|
|
133
150
|
- It makes sure that the order of arguments is correct, since it
|
|
134
151
|
is implemented in the same class.
|
|
135
152
|
- It simplifies testing.
|
|
136
153
|
|
|
137
154
|
Parameters
|
|
138
155
|
----------
|
|
139
|
-
data: HDF5Data
|
|
156
|
+
data: .hdf5_data.HDF5Data
|
|
140
157
|
Input data
|
|
141
|
-
gate:
|
|
158
|
+
gate: .gate.Gate
|
|
142
159
|
Gating class to use
|
|
143
160
|
num_extractors: int
|
|
144
161
|
Number of extractors that will be used
|
|
@@ -330,9 +347,11 @@ class QueueEventExtractor:
|
|
|
330
347
|
self.worker_monitor[self.worker_index] = 0
|
|
331
348
|
# Don't wait for these two queues when joining workers
|
|
332
349
|
self.raw_queue.cancel_join_thread()
|
|
333
|
-
|
|
350
|
+
|
|
334
351
|
self.logger = logging.getLogger(
|
|
335
352
|
f"dcnum.feat.EventExtractor.{os.getpid()}")
|
|
353
|
+
"""logger that sends all logs to `self.log_queue`"""
|
|
354
|
+
|
|
336
355
|
self.logger.setLevel(self.log_level)
|
|
337
356
|
# Clear any handlers that might be set for this logger. This is
|
|
338
357
|
# important for the case when we are an instance of
|
dcnum/logic/ctrl.py
CHANGED
|
@@ -39,8 +39,6 @@ from .json_encoder import ExtendedJSONEncoder
|
|
|
39
39
|
# queues and threads and would end up with race conditions otherwise.
|
|
40
40
|
mp_spawn = mp.get_context("spawn")
|
|
41
41
|
|
|
42
|
-
#: valid states for a job runnter. The states must be in logical ordern,
|
|
43
|
-
#: not in alphabetical order.
|
|
44
42
|
valid_states = [
|
|
45
43
|
"created",
|
|
46
44
|
"init",
|
|
@@ -52,6 +50,9 @@ valid_states = [
|
|
|
52
50
|
"done",
|
|
53
51
|
"error",
|
|
54
52
|
]
|
|
53
|
+
"""Valid states for a `DCNumJobRunner`.
|
|
54
|
+
The states must be in logical order, not in alphabetical order.
|
|
55
|
+
"""
|
|
55
56
|
|
|
56
57
|
|
|
57
58
|
class DCNumJobRunner(threading.Thread):
|
|
@@ -59,11 +60,11 @@ class DCNumJobRunner(threading.Thread):
|
|
|
59
60
|
job: DCNumPipelineJob,
|
|
60
61
|
tmp_suffix: str = None,
|
|
61
62
|
*args, **kwargs):
|
|
62
|
-
"""Run a pipeline as defined by a :class
|
|
63
|
+
"""Run a pipeline as defined by a :class:`.job.DCNumPipelineJob`
|
|
63
64
|
|
|
64
65
|
Parameters
|
|
65
66
|
----------
|
|
66
|
-
job: DCNumPipelineJob
|
|
67
|
+
job: .job.DCNumPipelineJob
|
|
67
68
|
pipeline job to run
|
|
68
69
|
tmp_suffix: str
|
|
69
70
|
optional unique string for creating temporary files
|
dcnum/logic/job.py
CHANGED
|
@@ -67,6 +67,7 @@ class DCNumPipelineJob:
|
|
|
67
67
|
strategy on how to handle event data; In principle, not all
|
|
68
68
|
events have to be stored in the output file if basins are
|
|
69
69
|
defined, linking back to the original file.
|
|
70
|
+
|
|
70
71
|
- You can "drain" all basins which means that the output file
|
|
71
72
|
will contain all features, but will also be very big.
|
|
72
73
|
- You can "tap" the basins, including the input file, which means
|
|
@@ -89,8 +90,9 @@ class DCNumPipelineJob:
|
|
|
89
90
|
else:
|
|
90
91
|
basin_strategy = "tap"
|
|
91
92
|
|
|
92
|
-
#: initialize keyword arguments for this job
|
|
93
93
|
self.kwargs = {}
|
|
94
|
+
"""initialize keyword arguments for this job"""
|
|
95
|
+
|
|
94
96
|
spec = inspect.getfullargspec(DCNumPipelineJob.__init__)
|
|
95
97
|
locs = locals()
|
|
96
98
|
for arg in spec.args:
|
dcnum/logic/json_encoder.py
CHANGED
|
@@ -7,6 +7,15 @@ import numpy as np
|
|
|
7
7
|
|
|
8
8
|
class ExtendedJSONEncoder(json.JSONEncoder):
|
|
9
9
|
def default(self, obj):
|
|
10
|
+
"""Extended JSON encoder for the **dcnum** logic
|
|
11
|
+
|
|
12
|
+
This JSON encoder can handle the following additional objects:
|
|
13
|
+
|
|
14
|
+
- ``pathlib.Path``
|
|
15
|
+
- integer numbers
|
|
16
|
+
- ``numpy`` boolean
|
|
17
|
+
- slices (via "PYTHON-SLICE" identifier)
|
|
18
|
+
"""
|
|
10
19
|
if isinstance(obj, pathlib.Path):
|
|
11
20
|
return str(obj)
|
|
12
21
|
elif isinstance(obj, numbers.Integral):
|
dcnum/meta/paths.py
CHANGED
dcnum/meta/ppid.py
CHANGED
|
@@ -9,9 +9,11 @@ import warnings
|
|
|
9
9
|
|
|
10
10
|
import numpy as np
|
|
11
11
|
|
|
12
|
-
#: Increment this string if there are breaking changes that make
|
|
13
|
-
#: previous pipelines unreproducible.
|
|
14
12
|
DCNUM_PPID_GENERATION = "11"
|
|
13
|
+
"""The dcnum pipeline generation.
|
|
14
|
+
Increment this string if there are breaking changes that make
|
|
15
|
+
previous pipelines unreproducible.
|
|
16
|
+
"""
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
class ClassWithPPIDCapabilities(Protocol):
|
dcnum/os_env_st.py
CHANGED
|
@@ -4,7 +4,6 @@ import os
|
|
|
4
4
|
|
|
5
5
|
logger = logging.getLogger(__name__)
|
|
6
6
|
|
|
7
|
-
#: environment variables that set number of threads
|
|
8
7
|
os_env_threading = [
|
|
9
8
|
"MKL_NUM_THREADS",
|
|
10
9
|
"NUMBA_NUM_THREADS",
|
|
@@ -14,13 +13,14 @@ os_env_threading = [
|
|
|
14
13
|
"OPENBLAS_NUM_THREADS",
|
|
15
14
|
"VECLIB_MAXIMUM_THREADS",
|
|
16
15
|
]
|
|
16
|
+
"""environment variables that define number of threads libraries will use"""
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
class RequestSingleThreaded:
|
|
20
20
|
"""Context manager for starting a process with specific environment
|
|
21
21
|
|
|
22
22
|
When entering the context, the environment variables defined in
|
|
23
|
-
|
|
23
|
+
``os_env_threading`` are all set to "1", telling the relevant libraries
|
|
24
24
|
that they should work in single-threaded mode.
|
|
25
25
|
When exiting the context, these environment variables are reset to
|
|
26
26
|
their original values (or unset if applicable).
|
dcnum/read/cache.py
CHANGED
|
@@ -25,8 +25,10 @@ class BaseImageChunkCache(abc.ABC):
|
|
|
25
25
|
self._dtype = None
|
|
26
26
|
chunk_size = min(shape[0], chunk_size)
|
|
27
27
|
self._len = self.shape[0]
|
|
28
|
-
|
|
28
|
+
|
|
29
29
|
self.cache = collections.OrderedDict()
|
|
30
|
+
"""This is a FILO cache for the chunks"""
|
|
31
|
+
|
|
30
32
|
self.image_shape = self.shape[1:]
|
|
31
33
|
self.chunk_shape = (chunk_size,) + self.shape[1:]
|
|
32
34
|
self.chunk_size = chunk_size
|
dcnum/read/const.py
CHANGED
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
#: Scalar features that apply to all events in a frame and which are
|
|
2
|
-
#: not computed for individual events.
|
|
3
1
|
PROTECTED_FEATURES = [
|
|
4
2
|
"bg_off",
|
|
5
3
|
"flow_rate",
|
|
@@ -10,6 +8,11 @@ PROTECTED_FEATURES = [
|
|
|
10
8
|
"temp_amb",
|
|
11
9
|
"time",
|
|
12
10
|
]
|
|
11
|
+
"""Frame-defined scalar features.
|
|
12
|
+
Scalar features that apply to all events in a frame and which are
|
|
13
|
+
not computed for individual events
|
|
14
|
+
"""
|
|
15
|
+
|
|
13
16
|
|
|
14
17
|
# User-defined features may be anything, but if the user needs something
|
|
15
18
|
# very specific for the pipeline, having them protected is a nice feature.
|
dcnum/read/detect_flicker.py
CHANGED
|
@@ -17,7 +17,7 @@ def detect_flickering(image_data: np.ndarray | HDF5Data,
|
|
|
17
17
|
triggering signal.
|
|
18
18
|
|
|
19
19
|
If flickering is detected, you should use the "sparsemed" background
|
|
20
|
-
computation with
|
|
20
|
+
computation with ``offset_correction`` set to True.
|
|
21
21
|
|
|
22
22
|
Parameters
|
|
23
23
|
----------
|
|
@@ -52,9 +52,10 @@ class TorchSegmenterBase(Segmenter):
|
|
|
52
52
|
segmenter_kwargs: dict
|
|
53
53
|
Keyword arguments for the segmenter
|
|
54
54
|
meta: dict
|
|
55
|
-
Dictionary of metadata from an :class
|
|
55
|
+
Dictionary of metadata from an :class:`.hdf5_data.HDF5Data`
|
|
56
|
+
instance
|
|
56
57
|
logs: dict
|
|
57
|
-
Dictionary of logs from an :class
|
|
58
|
+
Dictionary of logs from an :class:`.hdf5_data.HDF5Data` instance
|
|
58
59
|
|
|
59
60
|
Returns
|
|
60
61
|
-------
|
|
@@ -11,6 +11,7 @@ def postprocess_masks(masks,
|
|
|
11
11
|
"""Postprocess mask images from ML segmenters
|
|
12
12
|
|
|
13
13
|
The transformation includes:
|
|
14
|
+
|
|
14
15
|
- Revert the cropping and padding operations done in
|
|
15
16
|
:func:`.preprocess_images` by padding with zeros and cropping.
|
|
16
17
|
- If the original image shape is larger than the mask image shape,
|
|
@@ -11,6 +11,7 @@ def preprocess_images(images: np.ndarray,
|
|
|
11
11
|
"""Transform image data to something torch models expect
|
|
12
12
|
|
|
13
13
|
The transformation includes:
|
|
14
|
+
|
|
14
15
|
- normalization (division by 255, subtraction of mean, division by std)
|
|
15
16
|
- cropping and padding of the input images to `image_shape`. For padding,
|
|
16
17
|
the median of each *individual* image is used.
|
dcnum/segm/segmenter.py
CHANGED
|
@@ -26,17 +26,23 @@ class SegmenterNotApplicableError(BaseException):
|
|
|
26
26
|
|
|
27
27
|
|
|
28
28
|
class Segmenter(abc.ABC):
|
|
29
|
-
#: Required hardware ("cpu" or "gpu") defined in first-level subclass.
|
|
30
29
|
hardware_processor = "none"
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
#: have to call `process_mask` in your `segment_algorithm` implementation.
|
|
30
|
+
"""Required hardware ("cpu" or "gpu") defined in first-level subclass."""
|
|
31
|
+
|
|
34
32
|
mask_postprocessing = True
|
|
35
|
-
|
|
36
|
-
|
|
33
|
+
"""Whether to enable mask post-processing.
|
|
34
|
+
If disabled, you should make sure that your mask is properly defined
|
|
35
|
+
and cleaned or you have to call `process_mask` in your
|
|
36
|
+
``segment_algorithm`` implementation.
|
|
37
|
+
"""
|
|
38
|
+
|
|
37
39
|
mask_default_kwargs = {}
|
|
38
|
-
|
|
40
|
+
"""Default keyword arguments for mask post-processing.
|
|
41
|
+
See `process_mask` for available options.
|
|
42
|
+
"""
|
|
43
|
+
|
|
39
44
|
requires_background_correction = False
|
|
45
|
+
"""Whether the segmenter requires a background-corrected image"""
|
|
40
46
|
|
|
41
47
|
def __init__(self,
|
|
42
48
|
*,
|
|
@@ -46,10 +52,11 @@ class Segmenter(abc.ABC):
|
|
|
46
52
|
"""Base segmenter class
|
|
47
53
|
|
|
48
54
|
This is the base segmenter class for the multiprocessing operation
|
|
49
|
-
segmenter :class:`.MPOSegmenter` (multiple
|
|
50
|
-
and each of them works on a queue of images)
|
|
51
|
-
|
|
52
|
-
segmentation on
|
|
55
|
+
segmenter :class:`.segmenter_mpo.MPOSegmenter` (multiple
|
|
56
|
+
subprocesses are spawned and each of them works on a queue of images)
|
|
57
|
+
and the single-threaded operation segmenter
|
|
58
|
+
:class:`.segmenter_sto.STOSegmenter` (e.g. for batch segmentation on
|
|
59
|
+
a GPU).
|
|
53
60
|
|
|
54
61
|
Parameters
|
|
55
62
|
----------
|
|
@@ -64,12 +71,15 @@ class Segmenter(abc.ABC):
|
|
|
64
71
|
self.logger = logging.getLogger(__name__).getChild(
|
|
65
72
|
self.__class__.__name__)
|
|
66
73
|
spec = inspect.getfullargspec(self.segment_algorithm)
|
|
67
|
-
|
|
74
|
+
|
|
68
75
|
self.kwargs = spec.kwonlydefaults or {}
|
|
76
|
+
"""custom keyword arguments for the subclassing segmenter"""
|
|
77
|
+
|
|
69
78
|
self.kwargs.update(kwargs)
|
|
70
79
|
|
|
71
|
-
#: keyword arguments for mask post-processing
|
|
72
80
|
self.kwargs_mask = {}
|
|
81
|
+
"""keyword arguments for mask post-processing"""
|
|
82
|
+
|
|
73
83
|
if self.mask_postprocessing:
|
|
74
84
|
spec_mask = inspect.getfullargspec(self.process_mask)
|
|
75
85
|
self.kwargs_mask.update(spec_mask.kwonlydefaults or {})
|
|
@@ -108,7 +118,7 @@ class Segmenter(abc.ABC):
|
|
|
108
118
|
KEY:KW_APPROACH:KW_MASK
|
|
109
119
|
|
|
110
120
|
Where KEY is e.g. "legacy" or "watershed", and KW_APPROACH is a
|
|
111
|
-
list of keyword arguments for
|
|
121
|
+
list of keyword arguments for ``segment_algorithm``, e.g.::
|
|
112
122
|
|
|
113
123
|
thresh=-6^blur=0
|
|
114
124
|
|
|
@@ -296,10 +306,10 @@ class Segmenter(abc.ABC):
|
|
|
296
306
|
|
|
297
307
|
@functools.cache
|
|
298
308
|
def segment_algorithm_wrapper(self):
|
|
299
|
-
"""Wraps
|
|
309
|
+
"""Wraps ``self.segment_algorithm`` to only accept an image
|
|
300
310
|
|
|
301
|
-
The static method
|
|
302
|
-
keyword arguments
|
|
311
|
+
The static method ``self.segment_algorithm`` may optionally accept
|
|
312
|
+
keyword arguments ``self.kwargs``. This wrapper returns the
|
|
303
313
|
wrapped method that only accepts the image as an argument. This
|
|
304
314
|
makes sense if you want to unify
|
|
305
315
|
"""
|
|
@@ -336,7 +346,7 @@ class Segmenter(abc.ABC):
|
|
|
336
346
|
additional background offset values that should be subtracted
|
|
337
347
|
from the image data before segmentation. Should only be
|
|
338
348
|
used in combination with segmenters that have
|
|
339
|
-
|
|
349
|
+
``requires_background_correction`` set to True.
|
|
340
350
|
"""
|
|
341
351
|
images = image_data.get_chunk(chunk)
|
|
342
352
|
if bg_off is not None:
|
|
@@ -364,9 +374,10 @@ class Segmenter(abc.ABC):
|
|
|
364
374
|
segmenter_kwargs: dict
|
|
365
375
|
Keyword arguments for the segmenter
|
|
366
376
|
meta: dict
|
|
367
|
-
Dictionary of metadata from an :class
|
|
377
|
+
Dictionary of metadata from an :class:`.hdf5_data.HDF5Data`
|
|
378
|
+
instance
|
|
368
379
|
logs: dict
|
|
369
|
-
Dictionary of logs from an :class
|
|
380
|
+
Dictionary of logs from an :class:`.hdf5_data.HDF5Data` instance
|
|
370
381
|
|
|
371
382
|
Returns
|
|
372
383
|
-------
|
|
@@ -36,8 +36,8 @@ class SegmenterManagerThread(threading.Thread):
|
|
|
36
36
|
its job for a slot, the slot value will be set to "e" (for
|
|
37
37
|
"task is with feature extractor").
|
|
38
38
|
slot_chunks:
|
|
39
|
-
For each slot in
|
|
40
|
-
on which chunk in
|
|
39
|
+
For each slot in ``slot_states``, this shared array defines
|
|
40
|
+
on which chunk in ``image_data`` the segmentation took place.
|
|
41
41
|
bg_off:
|
|
42
42
|
1d array containing additional background image offset values
|
|
43
43
|
that are added to each background image before subtraction
|
|
@@ -45,10 +45,10 @@ class SegmenterManagerThread(threading.Thread):
|
|
|
45
45
|
|
|
46
46
|
Notes
|
|
47
47
|
-----
|
|
48
|
-
This manager keeps a list
|
|
49
|
-
slots just like
|
|
48
|
+
This manager keeps a list ``labels_list`` which enumerates the
|
|
49
|
+
slots just like ``slot_states` and ``slot_chunks`` do. For each
|
|
50
50
|
slot, this list contains the labeled image data (integer-valued)
|
|
51
|
-
for the input
|
|
51
|
+
for the input ``image_data`` chunks.
|
|
52
52
|
|
|
53
53
|
The working principle of this `SegmenterManagerThread` allows
|
|
54
54
|
the user to define a fixed number of slots on which the segmenter
|
|
@@ -61,21 +61,28 @@ class SegmenterManagerThread(threading.Thread):
|
|
|
61
61
|
super(SegmenterManagerThread, self).__init__(
|
|
62
62
|
name="SegmenterManager", *args, **kwargs)
|
|
63
63
|
self.logger = logging.getLogger("dcnum.segm.SegmenterManagerThread")
|
|
64
|
-
|
|
64
|
+
|
|
65
65
|
self.segmenter = segmenter
|
|
66
|
-
|
|
66
|
+
"""Segmenter instance"""
|
|
67
|
+
|
|
67
68
|
self.image_data = image_data
|
|
68
|
-
|
|
69
|
+
"""Image data which is being segmented"""
|
|
70
|
+
|
|
69
71
|
self.bg_off = (
|
|
70
72
|
bg_off if self.segmenter.requires_background_correction else None)
|
|
71
|
-
|
|
73
|
+
"""Additional, optional background offset"""
|
|
74
|
+
|
|
72
75
|
self.slot_states = slot_states
|
|
73
|
-
|
|
76
|
+
"""Slot states"""
|
|
77
|
+
|
|
74
78
|
self.slot_chunks = slot_chunks
|
|
75
|
-
|
|
79
|
+
"""Current slot chunk index for the slot states"""
|
|
80
|
+
|
|
76
81
|
self.labels_list = [None] * len(self.slot_states)
|
|
77
|
-
|
|
82
|
+
"""List containing the segmented labels of each slot"""
|
|
83
|
+
|
|
78
84
|
self.t_count = 0
|
|
85
|
+
"""Time counter for segmentation"""
|
|
79
86
|
|
|
80
87
|
def run(self):
|
|
81
88
|
num_slots = len(self.slot_states)
|
dcnum/segm/segmenter_mpo.py
CHANGED
|
@@ -35,7 +35,7 @@ class MPOSegmenter(Segmenter, abc.ABC):
|
|
|
35
35
|
debug: bool
|
|
36
36
|
Debugging parameters
|
|
37
37
|
kwargs:
|
|
38
|
-
Additional, optional keyword arguments for
|
|
38
|
+
Additional, optional keyword arguments for ``segment_algorithm``
|
|
39
39
|
defined in the subclass.
|
|
40
40
|
"""
|
|
41
41
|
super(MPOSegmenter, self).__init__(kwargs_mask=kwargs_mask,
|
|
@@ -145,7 +145,7 @@ class MPOSegmenter(Segmenter, abc.ABC):
|
|
|
145
145
|
"""Perform batch segmentation of `images`
|
|
146
146
|
|
|
147
147
|
Before segmentation, an optional background offset correction with
|
|
148
|
-
|
|
148
|
+
``bg_off`` is performed. After segmentation, mask postprocessing is
|
|
149
149
|
performed according to the class definition.
|
|
150
150
|
|
|
151
151
|
Parameters
|
|
@@ -264,7 +264,7 @@ class MPOSegmenter(Segmenter, abc.ABC):
|
|
|
264
264
|
"""Return the integer label image for an input image
|
|
265
265
|
|
|
266
266
|
Before segmentation, an optional background offset correction with
|
|
267
|
-
|
|
267
|
+
``bg_off`` is performed. After segmentation, mask postprocessing is
|
|
268
268
|
performed according to the class definition.
|
|
269
269
|
"""
|
|
270
270
|
segm_wrap = self.segment_algorithm_wrapper()
|
|
@@ -296,7 +296,7 @@ class MPOSegmenterWorker:
|
|
|
296
296
|
|
|
297
297
|
Parameters
|
|
298
298
|
----------
|
|
299
|
-
segmenter: MPOSegmenter
|
|
299
|
+
segmenter: .segmenter_mpo.MPOSegmenter
|
|
300
300
|
The segmentation instance
|
|
301
301
|
sl_start: int
|
|
302
302
|
Start of slice of input array to process
|
dcnum/segm/segmenter_sto.py
CHANGED
|
@@ -27,7 +27,7 @@ class STOSegmenter(Segmenter, abc.ABC):
|
|
|
27
27
|
debug: bool
|
|
28
28
|
Debugging parameters
|
|
29
29
|
kwargs:
|
|
30
|
-
Additional, optional keyword arguments for
|
|
30
|
+
Additional, optional keyword arguments for ``segment_algorithm``
|
|
31
31
|
defined in the subclass.
|
|
32
32
|
"""
|
|
33
33
|
if num_workers not in [None, 1]:
|
|
@@ -46,7 +46,7 @@ class STOSegmenter(Segmenter, abc.ABC):
|
|
|
46
46
|
"""Perform batch segmentation of `images`
|
|
47
47
|
|
|
48
48
|
Before segmentation, an optional background offset correction with
|
|
49
|
-
|
|
49
|
+
``bg_off`` is performed. After segmentation, mask postprocessing is
|
|
50
50
|
performed according to the class definition.
|
|
51
51
|
|
|
52
52
|
Parameters
|
|
@@ -26,23 +26,31 @@ class EventStash:
|
|
|
26
26
|
List that defines how many events there are for each input
|
|
27
27
|
frame. If summed up, this defines `self.size`.
|
|
28
28
|
"""
|
|
29
|
-
#: Dictionary containing the event arrays
|
|
30
29
|
self.events = {}
|
|
31
|
-
|
|
30
|
+
"""Dictionary containing the event arrays"""
|
|
31
|
+
|
|
32
32
|
self.feat_nevents = feat_nevents
|
|
33
|
-
|
|
33
|
+
"""List containing the number of events per input frame"""
|
|
34
|
+
|
|
34
35
|
self.nev_idx = np.cumsum(feat_nevents)
|
|
35
|
-
|
|
36
|
+
"""Cumulative sum of `feat_nevents` for determining sorting offsets"""
|
|
37
|
+
|
|
36
38
|
self.size = int(np.sum(feat_nevents))
|
|
37
|
-
|
|
39
|
+
"""Number of events in this stash"""
|
|
40
|
+
|
|
38
41
|
self.num_frames = len(feat_nevents)
|
|
39
|
-
|
|
42
|
+
"""Number of frames in this stash"""
|
|
43
|
+
|
|
40
44
|
self.index_offset = index_offset
|
|
41
|
-
|
|
42
|
-
|
|
45
|
+
"""Global offset compared to the original data instance."""
|
|
46
|
+
|
|
43
47
|
self.indices_for_data = np.zeros(self.size, dtype=np.uint32)
|
|
44
|
-
|
|
48
|
+
"""Array containing the indices in the original data instance.
|
|
49
|
+
These indices correspond to the events in `events`.
|
|
50
|
+
"""
|
|
51
|
+
|
|
45
52
|
self._tracker = np.zeros(self.num_frames, dtype=bool)
|
|
53
|
+
"""Private array that tracks the progress."""
|
|
46
54
|
|
|
47
55
|
def is_complete(self):
|
|
48
56
|
"""Determine whether the event stash is complete (all events added)"""
|
|
@@ -141,22 +149,31 @@ class QueueCollectorThread(threading.Thread):
|
|
|
141
149
|
super(QueueCollectorThread, self).__init__(
|
|
142
150
|
name="QueueCollector", *args, **kwargs)
|
|
143
151
|
self.logger = logging.getLogger("dcnum.write.QueueCollector")
|
|
144
|
-
|
|
152
|
+
|
|
145
153
|
self.event_queue = event_queue
|
|
146
|
-
|
|
154
|
+
"""Event queue from which to collect event data"""
|
|
155
|
+
|
|
147
156
|
self.writer_dq = writer_dq
|
|
148
|
-
|
|
149
|
-
|
|
157
|
+
"""Writer deque to which event arrays are appended"""
|
|
158
|
+
|
|
150
159
|
self.buffer_dq = deque()
|
|
151
|
-
|
|
152
|
-
|
|
160
|
+
"""Buffer deque
|
|
161
|
+
Events that do not not belong to the current chunk
|
|
162
|
+
(chunk defined by `write_threshold`) go here.
|
|
163
|
+
"""
|
|
164
|
+
|
|
153
165
|
self.feat_nevents = feat_nevents
|
|
154
|
-
|
|
166
|
+
"""shared array containing the number of events
|
|
167
|
+
for each frame in `data`."""
|
|
168
|
+
|
|
155
169
|
self.write_threshold = write_threshold
|
|
156
|
-
|
|
170
|
+
"""Number of frames to send to `writer_dq` at a time."""
|
|
171
|
+
|
|
157
172
|
self.written_events = 0
|
|
158
|
-
|
|
173
|
+
"""Number of events sent to `writer_dq`"""
|
|
174
|
+
|
|
159
175
|
self.written_frames = 0
|
|
176
|
+
"""Number of frames from `data` written to `writer_dq`"""
|
|
160
177
|
|
|
161
178
|
def run(self):
|
|
162
179
|
# We are not writing to `event_queue` so we can safely cancel
|
dcnum/write/writer.py
CHANGED
|
@@ -275,13 +275,13 @@ class HDF5Writer:
|
|
|
275
275
|
def store_log(self,
|
|
276
276
|
log: str,
|
|
277
277
|
data: List[str],
|
|
278
|
-
override: bool = False):
|
|
278
|
+
override: bool = False) -> h5py.Dataset:
|
|
279
279
|
"""Store log data
|
|
280
280
|
|
|
281
281
|
Store the log data under the key `log`. The `data`
|
|
282
282
|
kwarg must be a list of strings. If the log entry
|
|
283
283
|
already exists, `ValueError` is raised unless
|
|
284
|
-
|
|
284
|
+
``override`` is set to True.
|
|
285
285
|
"""
|
|
286
286
|
logs = self.h5.require_group("logs")
|
|
287
287
|
if log in logs:
|
|
@@ -290,7 +290,7 @@ class HDF5Writer:
|
|
|
290
290
|
else:
|
|
291
291
|
raise ValueError(
|
|
292
292
|
f"Log '{log}' already exists in {self.h5.filename}!")
|
|
293
|
-
logs.create_dataset(
|
|
293
|
+
log_ds = logs.create_dataset(
|
|
294
294
|
name=log,
|
|
295
295
|
data=data,
|
|
296
296
|
shape=(len(data),),
|
|
@@ -298,6 +298,7 @@ class HDF5Writer:
|
|
|
298
298
|
dtype=f"S{max([len(ll) for ll in data])}",
|
|
299
299
|
chunks=True,
|
|
300
300
|
**self.ds_kwds)
|
|
301
|
+
return log_ds
|
|
301
302
|
|
|
302
303
|
|
|
303
304
|
def create_with_basins(
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
dcnum/__init__.py,sha256=hcawIKS7utYiOyVhOAX9t7K3xYzP1b9862VV0b6qSrQ,74
|
|
2
|
+
dcnum/_version.py,sha256=hjm1pvHN0ZX7nlIod7PG7Fba0cyr_wIaCY9stsERSag,513
|
|
3
|
+
dcnum/os_env_st.py,sha256=4psq-gPuWTTQ118kCiTx0Mhoyads4Irn6JSUzZk8gyc,3052
|
|
4
|
+
dcnum/feat/__init__.py,sha256=jUJYWTD3VIoDNKrmryXbjHb1rGwYtK4b7VPWihYgUoo,325
|
|
5
|
+
dcnum/feat/event_extractor_manager_thread.py,sha256=6D3RVYBuH7gOoGZ4Kz74n6fhq7MtlTY26kpSwZRqg3M,7972
|
|
6
|
+
dcnum/feat/gate.py,sha256=EOleB83sOlBjc8bjaZfWwGuxfCcEO5mknuFunlw_j7o,7252
|
|
7
|
+
dcnum/feat/queue_event_extractor.py,sha256=s-sC7Inkly_HYbqT3OPB6zWRvKm4TNOewujt1EuZF4M,16183
|
|
8
|
+
dcnum/feat/feat_background/__init__.py,sha256=OTmMuazHNaSrZb2XW4cnJ6PlgJLbKrPbaidpEixYa0A,341
|
|
9
|
+
dcnum/feat/feat_background/base.py,sha256=N2_CqA1LJwvsfhr3xMPbicVs3DVDXtvlNDZ-uPrJi5w,8749
|
|
10
|
+
dcnum/feat/feat_background/bg_copy.py,sha256=Xfju4DqbrtPyaUf3x-ybrWci5ip_cEbeiEg7Jh56YIY,1182
|
|
11
|
+
dcnum/feat/feat_background/bg_roll_median.py,sha256=rb3JnFzm4aXVLMKdP9wJlRXwTwmKUz2R1RBS9GXPepg,13622
|
|
12
|
+
dcnum/feat/feat_background/bg_sparse_median.py,sha256=PbcTehk46b9l03ZA1mBdyeMPHmGgGN4-59eLy2TzwX8,22634
|
|
13
|
+
dcnum/feat/feat_brightness/__init__.py,sha256=o6AebVlmydwNgVF5kW6ITqJyFreoKrU3Ki_3EC8If-s,155
|
|
14
|
+
dcnum/feat/feat_brightness/bright_all.py,sha256=vf8xaYBdKD24hHUXdkI0_S7nbr7m49KW6gvuWvbHDVg,4545
|
|
15
|
+
dcnum/feat/feat_brightness/common.py,sha256=JX49EszYDmnvoOKXFVV1CalEIWRmOuY5EryNbqGbdac,156
|
|
16
|
+
dcnum/feat/feat_contour/__init__.py,sha256=Td4Hs47kUgJj0VXm3q5ofXhaUWr9QTfVgbwh5EELA-I,163
|
|
17
|
+
dcnum/feat/feat_contour/contour.py,sha256=_qyHCGvylVxruMWafvVbVOzhWGXLoFi10LReNxGcWhY,463
|
|
18
|
+
dcnum/feat/feat_contour/moments.py,sha256=W8sD2X7JqIBq-9nL82hf4Hm2uJkfca8EvAl_hqI_IDg,5109
|
|
19
|
+
dcnum/feat/feat_contour/volume.py,sha256=ScxP_VdLRchDFnYJCR3srUa_stVbP3T4toQX0o-91jk,6645
|
|
20
|
+
dcnum/feat/feat_texture/__init__.py,sha256=6StM9S540UVtdFFR3bHa7nfCTomeVdoo7Uy9CjuTgH0,137
|
|
21
|
+
dcnum/feat/feat_texture/common.py,sha256=COXHpXS-7DMouGu3WF83I76L02Sr7P9re4lxajh6g0E,439
|
|
22
|
+
dcnum/feat/feat_texture/tex_all.py,sha256=_5H3sXYRN0Uq2eUHn3XUyEHkU_tncEqbqJTC-HZcnGY,5198
|
|
23
|
+
dcnum/logic/__init__.py,sha256=7J3GrwJInNQbrLk61HRIV7X7p69TAIbMYpR34hh6u14,177
|
|
24
|
+
dcnum/logic/ctrl.py,sha256=c06ZOUD0T4_FdQDHbigkLAPQTyoeFxhETg-K3W1UyeM,37520
|
|
25
|
+
dcnum/logic/job.py,sha256=MprDL6DwXWmvtGgy7W9A7s2rVRx68ObdJB8mvGFwVcw,7718
|
|
26
|
+
dcnum/logic/json_encoder.py,sha256=wb6uk6EeTkXyrvwtLm9uWe0cfmiBannzcsKLsDLHuQo,843
|
|
27
|
+
dcnum/meta/__init__.py,sha256=AVqRgyKXO1orKnE305h88IBvoZ1oz6X11HN1WP5nGvg,60
|
|
28
|
+
dcnum/meta/paths.py,sha256=aIG39JYbZpOlCbPQIlp0SqGumjbGINYhL2AAoznJt5o,1113
|
|
29
|
+
dcnum/meta/ppid.py,sha256=JInGtwSCsO9nr1E1aishm0k9iQIFB-essBKvv5aBE98,8510
|
|
30
|
+
dcnum/read/__init__.py,sha256=LYHyZHgiNTpjV5oEcty-7Kh5topLpHT_cFlNl-QX8gg,262
|
|
31
|
+
dcnum/read/cache.py,sha256=ChxokVuMaTfi6N6ZbOTWpNYkPgAAYi1lR8nD7JbzjPQ,6497
|
|
32
|
+
dcnum/read/const.py,sha256=x6LfRwWvIxm6nDWlSADVWqDuzMX6bLzy5kQprwLPzA4,496
|
|
33
|
+
dcnum/read/detect_flicker.py,sha256=XVf7nqaHx6weRTtS7KPa5_WRU2flDQIZTbKspeguqdU,1829
|
|
34
|
+
dcnum/read/hdf5_data.py,sha256=Q4sFT1HBrkrKCX1TUaOpibvz8VFj0ETMa9lw_xIF6tw,26360
|
|
35
|
+
dcnum/read/mapped.py,sha256=zU2fYdZfLNHn0rKHxDzBhNFMu4--WWa8nSeE2likyZA,3637
|
|
36
|
+
dcnum/segm/__init__.py,sha256=9cLEAd3JWE8IGqDHV-eSDIYOGBfOepd8OcebtNs8Omk,309
|
|
37
|
+
dcnum/segm/segm_thresh.py,sha256=iVhvIhzO0Gw0t3rXOgH71rOI0CNjJJQq4Gg6BulUhK8,948
|
|
38
|
+
dcnum/segm/segmenter.py,sha256=k130BoriJJ3cXHZjKXkL7rlBFEeAOQI-3Hp6FM_DdvM,15000
|
|
39
|
+
dcnum/segm/segmenter_manager_thread.py,sha256=vMZFBa18oO8OyVB3niy_mtEfKkGOWHEga41E0K-S6Tc,5963
|
|
40
|
+
dcnum/segm/segmenter_mpo.py,sha256=O6G4xzHKNMSmyX9HDXTfl-3f9Fk2filxvVrRIO2D9hg,14117
|
|
41
|
+
dcnum/segm/segmenter_sto.py,sha256=C55orEAZtMowNwtAT_WdSv46n5CzgLFuGq9kwdHc97I,3963
|
|
42
|
+
dcnum/segm/segm_torch/__init__.py,sha256=DtUqJTbj7ybrTbXlwHq1Y4SCzi22rMW9Cus6wX-iU-A,822
|
|
43
|
+
dcnum/segm/segm_torch/segm_torch_base.py,sha256=Z2c9_lZI4qEljQEMXuN_6CpBti57PbbjlDq0NGX3-EU,4514
|
|
44
|
+
dcnum/segm/segm_torch/segm_torch_mpo.py,sha256=GOva6o-6_SppxWD4BeBB3ap1TR-6rIYHavtfIstaYvc,2643
|
|
45
|
+
dcnum/segm/segm_torch/segm_torch_sto.py,sha256=PTOJrP_FkaxZZul8lM4VA2HL3KyxrheDDWWdJbmJdiw,3393
|
|
46
|
+
dcnum/segm/segm_torch/torch_model.py,sha256=5aL6SwSvg1N2gATEGBhP3aA4WTHlvGzQVYuizmh0LrU,3187
|
|
47
|
+
dcnum/segm/segm_torch/torch_postproc.py,sha256=3WUuBvcNyFJF-b-T0MEaj-8yZs-cYKTsq_i55uu5s54,3312
|
|
48
|
+
dcnum/segm/segm_torch/torch_preproc.py,sha256=m4Dd2URdvS7ifA1MkbEkc9d9T30lA_1qbE6P_Gsa0r4,4003
|
|
49
|
+
dcnum/write/__init__.py,sha256=sK79IlvCFIqf2oFABVeyYedMnHOsEIQpxAauEeNO-Tw,273
|
|
50
|
+
dcnum/write/deque_writer_thread.py,sha256=ao7F1yrVKyufgC4rC0Y2_Vt7snuT6KpI7W2qVxcjdhk,1994
|
|
51
|
+
dcnum/write/queue_collector_thread.py,sha256=-p5vrk9cDhtaIMFIu_cCmvlZJafrFkW68uONonMURYo,11617
|
|
52
|
+
dcnum/write/writer.py,sha256=JkVb4KDBV3oo9r3p2yy9wECO1REx7FG0PRBmVWTxJdk,20577
|
|
53
|
+
dcnum-0.25.7.dist-info/LICENSE,sha256=YRChA1C8A2E-amJbudwMcbTCZy_HzmeY0hMIvduh1MM,1089
|
|
54
|
+
dcnum-0.25.7.dist-info/METADATA,sha256=X3578YE2gN-g5mMPHH8bMnFyU9E64PP_ivRsIHPKcYc,2321
|
|
55
|
+
dcnum-0.25.7.dist-info/WHEEL,sha256=beeZ86-EfXScwlR_HKu4SllMC9wUEj_8Z_4FJ3egI2w,91
|
|
56
|
+
dcnum-0.25.7.dist-info/top_level.txt,sha256=Hmh38rgG_MFTVDpUDGuO2HWTSq80P585Het4COQzFTg,6
|
|
57
|
+
dcnum-0.25.7.dist-info/RECORD,,
|
dcnum-0.25.6.dist-info/RECORD
DELETED
|
@@ -1,57 +0,0 @@
|
|
|
1
|
-
dcnum/__init__.py,sha256=hcawIKS7utYiOyVhOAX9t7K3xYzP1b9862VV0b6qSrQ,74
|
|
2
|
-
dcnum/_version.py,sha256=N1UqtqTPcEuecB4pzTavKGDVtOMZMeCfIAQoIiuZRGY,513
|
|
3
|
-
dcnum/os_env_st.py,sha256=ujEVzn1G5sxZfJSITOMw48e2O_oMFu2VD6oj5QCUFSU,3025
|
|
4
|
-
dcnum/feat/__init__.py,sha256=jUJYWTD3VIoDNKrmryXbjHb1rGwYtK4b7VPWihYgUoo,325
|
|
5
|
-
dcnum/feat/event_extractor_manager_thread.py,sha256=FAxSyRfaNAuBWNplxHngp5h-44s0qIP24XX_oETdfMk,7836
|
|
6
|
-
dcnum/feat/gate.py,sha256=Yhxq80JoRMmQzBxl35C8NT91c9QcmQa-EIKLuxK6WvE,7221
|
|
7
|
-
dcnum/feat/queue_event_extractor.py,sha256=KA7K7fOUbLYRfHjdfmN6mVuPPjCJbWMCHv0-b41lBMs,16038
|
|
8
|
-
dcnum/feat/feat_background/__init__.py,sha256=OTmMuazHNaSrZb2XW4cnJ6PlgJLbKrPbaidpEixYa0A,341
|
|
9
|
-
dcnum/feat/feat_background/base.py,sha256=bQBPvztrku-8YSVk8YBUUNh7MaYcnztgyD2-dQHxpzw,8674
|
|
10
|
-
dcnum/feat/feat_background/bg_copy.py,sha256=PK8x4_Uph-_A6uszZC5uhe1gD1dSRdHnDMEsN0HSGHA,1034
|
|
11
|
-
dcnum/feat/feat_background/bg_roll_median.py,sha256=NqdgVYm-iUhgDZEonZxQrDvh5e26NoryQKCge8pNGoM,13571
|
|
12
|
-
dcnum/feat/feat_background/bg_sparse_median.py,sha256=QrerkPENHkC9PBgivamu-N1Od6-0b61Ohy_oZYL4www,22449
|
|
13
|
-
dcnum/feat/feat_brightness/__init__.py,sha256=o6AebVlmydwNgVF5kW6ITqJyFreoKrU3Ki_3EC8If-s,155
|
|
14
|
-
dcnum/feat/feat_brightness/bright_all.py,sha256=vf8xaYBdKD24hHUXdkI0_S7nbr7m49KW6gvuWvbHDVg,4545
|
|
15
|
-
dcnum/feat/feat_brightness/common.py,sha256=JX49EszYDmnvoOKXFVV1CalEIWRmOuY5EryNbqGbdac,156
|
|
16
|
-
dcnum/feat/feat_contour/__init__.py,sha256=Td4Hs47kUgJj0VXm3q5ofXhaUWr9QTfVgbwh5EELA-I,163
|
|
17
|
-
dcnum/feat/feat_contour/contour.py,sha256=_qyHCGvylVxruMWafvVbVOzhWGXLoFi10LReNxGcWhY,463
|
|
18
|
-
dcnum/feat/feat_contour/moments.py,sha256=W8sD2X7JqIBq-9nL82hf4Hm2uJkfca8EvAl_hqI_IDg,5109
|
|
19
|
-
dcnum/feat/feat_contour/volume.py,sha256=xVHWtv6USUHJZ5dM1Ur7fI7OwoPT5N2Ps0gKVWylfl8,6639
|
|
20
|
-
dcnum/feat/feat_texture/__init__.py,sha256=6StM9S540UVtdFFR3bHa7nfCTomeVdoo7Uy9CjuTgH0,137
|
|
21
|
-
dcnum/feat/feat_texture/common.py,sha256=COXHpXS-7DMouGu3WF83I76L02Sr7P9re4lxajh6g0E,439
|
|
22
|
-
dcnum/feat/feat_texture/tex_all.py,sha256=_5H3sXYRN0Uq2eUHn3XUyEHkU_tncEqbqJTC-HZcnGY,5198
|
|
23
|
-
dcnum/logic/__init__.py,sha256=7J3GrwJInNQbrLk61HRIV7X7p69TAIbMYpR34hh6u14,177
|
|
24
|
-
dcnum/logic/ctrl.py,sha256=RZ2xl0496Iv91cdCdbkIg2W1fClqsTUDq7YlHHwWQfk,37514
|
|
25
|
-
dcnum/logic/job.py,sha256=9BN2WjYqjjJuLnfNZAtQ2Nn47Glo2jVrivDodGJoqlQ,7713
|
|
26
|
-
dcnum/logic/json_encoder.py,sha256=cxMnqisbKEVf-rVcw6rK2BBAb6iz_hKFaGl81kK36lQ,571
|
|
27
|
-
dcnum/meta/__init__.py,sha256=AVqRgyKXO1orKnE305h88IBvoZ1oz6X11HN1WP5nGvg,60
|
|
28
|
-
dcnum/meta/paths.py,sha256=J_ikeHzd7gEeRgAKjuayz3x6q4h1fOiDadM-ZxhAGm4,1053
|
|
29
|
-
dcnum/meta/ppid.py,sha256=RnDkJSdV1kDznAsOhQN5WI7uC9UwSMCjyADP7yWNvkM,8478
|
|
30
|
-
dcnum/read/__init__.py,sha256=LYHyZHgiNTpjV5oEcty-7Kh5topLpHT_cFlNl-QX8gg,262
|
|
31
|
-
dcnum/read/cache.py,sha256=LNA5nnDyrw8Nj07E7XfG2GcHEoWm6vA6Qo_8N-n-sGw,6492
|
|
32
|
-
dcnum/read/const.py,sha256=GG9iyXDtEldvJYOBnhZjlimzIeBMAt4bSr2-xn2gzzc,464
|
|
33
|
-
dcnum/read/detect_flicker.py,sha256=CeUyxI6LaX_lCNvBPm_yzsiWmiNcZYqbNZCtvKPdkcU,1827
|
|
34
|
-
dcnum/read/hdf5_data.py,sha256=Q4sFT1HBrkrKCX1TUaOpibvz8VFj0ETMa9lw_xIF6tw,26360
|
|
35
|
-
dcnum/read/mapped.py,sha256=zU2fYdZfLNHn0rKHxDzBhNFMu4--WWa8nSeE2likyZA,3637
|
|
36
|
-
dcnum/segm/__init__.py,sha256=9cLEAd3JWE8IGqDHV-eSDIYOGBfOepd8OcebtNs8Omk,309
|
|
37
|
-
dcnum/segm/segm_thresh.py,sha256=iVhvIhzO0Gw0t3rXOgH71rOI0CNjJJQq4Gg6BulUhK8,948
|
|
38
|
-
dcnum/segm/segmenter.py,sha256=FWLFDBR-x_85ku2rObA2F-QBrM4IUaUL-YHChLagVvM,14902
|
|
39
|
-
dcnum/segm/segmenter_manager_thread.py,sha256=frM0sMxC7f7TQiFjmpRxuwG2kUBFpW1inV8dtpADHiI,5924
|
|
40
|
-
dcnum/segm/segmenter_mpo.py,sha256=XcYMKTnCu6-D-TJ62V18S3OE9DhaPhqFkhGhUaDWJFg,14096
|
|
41
|
-
dcnum/segm/segmenter_sto.py,sha256=e6MtN_RWusA0wTExV-FLGpDXNJs1CbSyXcSdWUPBMvM,3959
|
|
42
|
-
dcnum/segm/segm_torch/__init__.py,sha256=DtUqJTbj7ybrTbXlwHq1Y4SCzi22rMW9Cus6wX-iU-A,822
|
|
43
|
-
dcnum/segm/segm_torch/segm_torch_base.py,sha256=G9AhVyD6LkAmk0tkbYnJUSpvcj3_HYf0uqfILZQsyus,4479
|
|
44
|
-
dcnum/segm/segm_torch/segm_torch_mpo.py,sha256=GOva6o-6_SppxWD4BeBB3ap1TR-6rIYHavtfIstaYvc,2643
|
|
45
|
-
dcnum/segm/segm_torch/segm_torch_sto.py,sha256=PTOJrP_FkaxZZul8lM4VA2HL3KyxrheDDWWdJbmJdiw,3393
|
|
46
|
-
dcnum/segm/segm_torch/torch_model.py,sha256=5aL6SwSvg1N2gATEGBhP3aA4WTHlvGzQVYuizmh0LrU,3187
|
|
47
|
-
dcnum/segm/segm_torch/torch_postproc.py,sha256=ctirQTmsZnuZGIxkwFWN9arRneHRYJUxaJ_ZyCgjByM,3311
|
|
48
|
-
dcnum/segm/segm_torch/torch_preproc.py,sha256=kjabu76paw23kO7RP7Ik6IY60Kk1VBAHKBAedflA0aQ,4002
|
|
49
|
-
dcnum/write/__init__.py,sha256=sK79IlvCFIqf2oFABVeyYedMnHOsEIQpxAauEeNO-Tw,273
|
|
50
|
-
dcnum/write/deque_writer_thread.py,sha256=ao7F1yrVKyufgC4rC0Y2_Vt7snuT6KpI7W2qVxcjdhk,1994
|
|
51
|
-
dcnum/write/queue_collector_thread.py,sha256=KwwNIDFEF2DU83woKES5K05MxxOhDxPMZLLeyPugfDo,11542
|
|
52
|
-
dcnum/write/writer.py,sha256=oHlq4bDHQxb33-3Fw1xnzJwACecLyH-6koGK8SN0cSk,20528
|
|
53
|
-
dcnum-0.25.6.dist-info/LICENSE,sha256=YRChA1C8A2E-amJbudwMcbTCZy_HzmeY0hMIvduh1MM,1089
|
|
54
|
-
dcnum-0.25.6.dist-info/METADATA,sha256=1hcNBNKwW1nri1olDXf1Rb9jWMCvL2327enWOvMx4s0,2321
|
|
55
|
-
dcnum-0.25.6.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
|
56
|
-
dcnum-0.25.6.dist-info/top_level.txt,sha256=Hmh38rgG_MFTVDpUDGuO2HWTSq80P585Het4COQzFTg,6
|
|
57
|
-
dcnum-0.25.6.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|