dcnum 0.13.2__py3-none-any.whl → 0.23.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dcnum might be problematic. Click here for more details.

Files changed (55) hide show
  1. dcnum/_version.py +2 -2
  2. dcnum/feat/__init__.py +2 -1
  3. dcnum/feat/event_extractor_manager_thread.py +67 -33
  4. dcnum/feat/feat_background/__init__.py +3 -12
  5. dcnum/feat/feat_background/base.py +80 -65
  6. dcnum/feat/feat_background/bg_copy.py +31 -0
  7. dcnum/feat/feat_background/bg_roll_median.py +38 -30
  8. dcnum/feat/feat_background/bg_sparse_median.py +96 -45
  9. dcnum/feat/feat_brightness/__init__.py +1 -0
  10. dcnum/feat/feat_brightness/bright_all.py +41 -6
  11. dcnum/feat/feat_contour/__init__.py +4 -0
  12. dcnum/feat/{feat_moments/mt_legacy.py → feat_contour/moments.py} +32 -8
  13. dcnum/feat/feat_contour/volume.py +174 -0
  14. dcnum/feat/feat_texture/__init__.py +1 -0
  15. dcnum/feat/feat_texture/tex_all.py +28 -1
  16. dcnum/feat/gate.py +92 -70
  17. dcnum/feat/queue_event_extractor.py +139 -70
  18. dcnum/logic/__init__.py +5 -0
  19. dcnum/logic/ctrl.py +794 -0
  20. dcnum/logic/job.py +184 -0
  21. dcnum/logic/json_encoder.py +19 -0
  22. dcnum/meta/__init__.py +1 -0
  23. dcnum/meta/paths.py +30 -0
  24. dcnum/meta/ppid.py +66 -9
  25. dcnum/read/__init__.py +1 -0
  26. dcnum/read/cache.py +109 -77
  27. dcnum/read/const.py +6 -4
  28. dcnum/read/hdf5_data.py +190 -31
  29. dcnum/read/mapped.py +87 -0
  30. dcnum/segm/__init__.py +6 -15
  31. dcnum/segm/segm_thresh.py +7 -14
  32. dcnum/segm/segm_torch/__init__.py +19 -0
  33. dcnum/segm/segm_torch/segm_torch_base.py +125 -0
  34. dcnum/segm/segm_torch/segm_torch_mpo.py +71 -0
  35. dcnum/segm/segm_torch/segm_torch_sto.py +88 -0
  36. dcnum/segm/segm_torch/torch_model.py +95 -0
  37. dcnum/segm/segm_torch/torch_postproc.py +93 -0
  38. dcnum/segm/segm_torch/torch_preproc.py +114 -0
  39. dcnum/segm/segmenter.py +245 -96
  40. dcnum/segm/segmenter_manager_thread.py +39 -28
  41. dcnum/segm/{segmenter_cpu.py → segmenter_mpo.py} +137 -43
  42. dcnum/segm/segmenter_sto.py +110 -0
  43. dcnum/write/__init__.py +3 -1
  44. dcnum/write/deque_writer_thread.py +15 -5
  45. dcnum/write/queue_collector_thread.py +14 -17
  46. dcnum/write/writer.py +225 -55
  47. {dcnum-0.13.2.dist-info → dcnum-0.23.1.dist-info}/METADATA +4 -2
  48. dcnum-0.23.1.dist-info/RECORD +55 -0
  49. {dcnum-0.13.2.dist-info → dcnum-0.23.1.dist-info}/WHEEL +1 -1
  50. dcnum/feat/feat_moments/__init__.py +0 -3
  51. dcnum/segm/segmenter_gpu.py +0 -45
  52. dcnum-0.13.2.dist-info/RECORD +0 -40
  53. /dcnum/feat/{feat_moments/ct_opencv.py → feat_contour/contour.py} +0 -0
  54. {dcnum-0.13.2.dist-info → dcnum-0.23.1.dist-info}/LICENSE +0 -0
  55. {dcnum-0.13.2.dist-info → dcnum-0.23.1.dist-info}/top_level.txt +0 -0
dcnum/segm/segmenter.py CHANGED
@@ -1,43 +1,69 @@
1
1
  import abc
2
+ import copy
2
3
  import functools
3
4
  import inspect
4
5
  import logging
6
+ from typing import Dict
5
7
 
6
8
  import cv2
7
9
  import numpy as np
8
10
  import scipy.ndimage as ndi
9
11
  from skimage import morphology
10
12
 
11
- from ..meta.ppid import kwargs_to_ppid
13
+ from ..meta.ppid import kwargs_to_ppid, ppid_to_kwargs
14
+
15
+
16
+ class SegmenterNotApplicableError(BaseException):
17
+ """Used to indicate when a dataset cannot be segmented with a segmenter"""
18
+ def __init__(self, segmenter_class, reasons_list):
19
+ super(SegmenterNotApplicableError, self).__init__(
20
+ f"The dataset cannot be segmented with the "
21
+ f"'{segmenter_class.get_ppid_code()}' segmenter: "
22
+ f"{', '.join(reasons_list)}"
23
+ )
24
+ self.reasons_list = reasons_list
25
+ self.segmenter_class = segmenter_class
12
26
 
13
27
 
14
28
  class Segmenter(abc.ABC):
29
+ #: Required hardware ("cpu" or "gpu") defined in first-level subclass.
30
+ hardware_processor = "none"
15
31
  #: Whether to enable mask post-processing. If disabled, you should
16
32
  #: make sure that your mask is properly defined and cleaned or you
17
- #: have to call `process_mask` in your `segment_approach` implementation.
18
- mask_postprocessing = False
33
+ #: have to call `process_mask` in your `segment_algorithm` implementation.
34
+ mask_postprocessing = True
19
35
  #: Default keyword arguments for mask post-processing. See `process_mask`
20
36
  #: for available options.
21
37
  mask_default_kwargs = {}
22
38
  #: If the segmenter requires a background-corrected image, set this to True
23
39
  requires_background_correction = False
24
40
 
25
- def __init__(self, kwargs_mask=None, debug=False, **kwargs):
26
- """Base segemnter
41
+ def __init__(self,
42
+ *,
43
+ kwargs_mask: Dict = None,
44
+ debug: bool = False,
45
+ **kwargs):
46
+ """Base segmenter class
47
+
48
+ This is the base segmenter class for the multiprocessing operation
49
+ segmenter :class:`.MPOSegmenter` (multiple subprocesses are spawned
50
+ and each of them works on a queue of images) and the single-threaded
51
+ operation segmenter :class:`.STOSegmenter` (e.g. for batch
52
+ segmentation on a GPU).
27
53
 
28
54
  Parameters
29
55
  ----------
30
- data: HDF5Data
31
- Instance containing the raw data. Requires at least the
32
- `image` and `image_bg` attributes. Some segemnters require
33
- more properties, so make sure to use :class:`.HDF5Data`.
34
56
  kwargs_mask: dict
35
57
  Keyword arguments for mask post-processing (see `process_mask`)
58
+ debug: bool
59
+ Enable debugging mode (e.g. CPU segmenter runs in one thread)
60
+ kwargs:
61
+ Additional, optional keyword arguments for `segment_batch`.
36
62
  """
37
63
  self.debug = debug
38
64
  self.logger = logging.getLogger(__name__).getChild(
39
65
  self.__class__.__name__)
40
- spec = inspect.getfullargspec(self.segment_approach)
66
+ spec = inspect.getfullargspec(self.segment_algorithm)
41
67
  #: custom keyword arguments for the subclassing segmenter
42
68
  self.kwargs = spec.kwonlydefaults or {}
43
69
  self.kwargs.update(kwargs)
@@ -55,19 +81,26 @@ class Segmenter(abc.ABC):
55
81
  "`kwargs_mask` has been specified, but mask post-processing "
56
82
  f"is disabled for segmenter {self.__class__}")
57
83
 
58
- @classmethod
59
- def key(cls):
60
- """The unique key/name of this segmenter class"""
61
- key = cls.__name__.lower()
62
- if key.startswith("segment"):
63
- key = key[7:]
64
- return key
84
+ @staticmethod
85
+ @functools.cache
86
+ def get_border(shape):
87
+ """Cached boolean image with outer pixels set to True"""
88
+ border = np.zeros(shape, dtype=bool)
89
+ border[[0, -1], :] = True
90
+ border[:, [0, -1]] = True
91
+ return border
92
+
93
+ @staticmethod
94
+ @functools.cache
95
+ def get_disk(radius):
96
+ """Cached `skimage.morphology.disk(radius)`"""
97
+ return morphology.disk(radius)
65
98
 
66
99
  def get_ppid(self):
67
100
  """Return a unique segmentation pipeline identifier
68
101
 
69
102
  The pipeline identifier is universally applicable and must
70
- be backwards-compatible (future versions of dcevent will
103
+ be backwards-compatible (future versions of dcnum will
71
104
  correctly acknowledge the ID).
72
105
 
73
106
  The segmenter pipeline ID is defined as::
@@ -75,7 +108,7 @@ class Segmenter(abc.ABC):
75
108
  KEY:KW_APPROACH:KW_MASK
76
109
 
77
110
  Where KEY is e.g. "legacy" or "watershed", and KW_APPROACH is a
78
- list of keyword arguments for `segment_approach`, e.g.::
111
+ list of keyword arguments for `segment_algorithm`, e.g.::
79
112
 
80
113
  thresh=-6^blur=0
81
114
 
@@ -85,36 +118,75 @@ class Segmenter(abc.ABC):
85
118
 
86
119
  KW_MASK represents keyword arguments for `process_mask`.
87
120
  """
88
- return self.get_ppid_from_kwargs(self.kwargs, self.kwargs_mask)
121
+ return self.get_ppid_from_ppkw(self.kwargs, self.kwargs_mask)
122
+
123
+ @classmethod
124
+ def get_ppid_code(cls):
125
+ """The unique code/name of this segmenter class"""
126
+ code = cls.__name__.lower()
127
+ if code.startswith("segment"):
128
+ code = code[7:]
129
+ return code
89
130
 
90
131
  @classmethod
91
- def get_ppid_from_kwargs(cls, kwargs, kwargs_mask=None):
132
+ def get_ppid_from_ppkw(cls, kwargs, kwargs_mask=None):
92
133
  """Return the pipeline ID from given keyword arguments
93
134
 
94
135
  See Also
95
136
  --------
96
137
  get_ppid: Same method for class instances
97
138
  """
98
- if kwargs_mask is None and kwargs.get("kwargs_mask", None) is None:
99
- raise KeyError("`kwargs_mask` must be either specified as "
100
- "keyword argument to this method or as a key "
101
- "in `kwargs`!")
102
- if kwargs_mask is None:
103
- # see check above (kwargs_mask may also be {})
104
- kwargs_mask = kwargs["kwargs_mask"]
105
- # Start with the default mask kwargs defined for this subclass
106
- kwargs_mask_used = cls.mask_default_kwargs
107
- kwargs_mask_used.update(kwargs_mask)
108
- key = cls.key()
109
- csegm = kwargs_to_ppid(cls, "segment_approach", kwargs)
110
- cmask = kwargs_to_ppid(cls, "process_mask", kwargs_mask_used)
111
- return ":".join([key, csegm, cmask])
139
+ kwargs = copy.deepcopy(kwargs)
140
+ if cls.mask_postprocessing:
141
+ if kwargs_mask is None and kwargs.get("kwargs_mask", None) is None:
142
+ raise KeyError("`kwargs_mask` must be either specified as "
143
+ "keyword argument to this method or as a key "
144
+ "in `kwargs`!")
145
+ if kwargs_mask is None:
146
+ # see check above (kwargs_mask may also be {})
147
+ kwargs_mask = kwargs.pop("kwargs_mask")
148
+ # Start with the default mask kwargs defined for this subclass
149
+ kwargs_mask_used = copy.deepcopy(cls.mask_default_kwargs)
150
+ kwargs_mask_used.update(kwargs_mask)
151
+ elif kwargs_mask:
152
+ raise ValueError(f"The segmenter '{cls.__name__}' does not "
153
+ f"support mask postprocessing, but 'kwargs_mask' "
154
+ f"was provided: {kwargs_mask}")
155
+
156
+ ppid_parts = [
157
+ cls.get_ppid_code(),
158
+ kwargs_to_ppid(cls, "segment_algorithm", kwargs),
159
+ ]
160
+
161
+ if cls.mask_postprocessing:
162
+ ppid_parts.append(
163
+ kwargs_to_ppid(cls, "process_mask", kwargs_mask_used))
164
+
165
+ return ":".join(ppid_parts)
112
166
 
113
167
  @staticmethod
114
- @functools.cache
115
- def get_disk(radius):
116
- """Cached `skimage.morphology.disk(radius)`"""
117
- return morphology.disk(radius)
168
+ def get_ppkw_from_ppid(segm_ppid):
169
+ """Return keyword arguments for this pipeline identifier"""
170
+ ppid_parts = segm_ppid.split(":")
171
+ code = ppid_parts[0]
172
+ pp_kwargs = ppid_parts[1]
173
+
174
+ for cls_code in get_available_segmenters():
175
+ if cls_code == code:
176
+ cls = get_available_segmenters()[cls_code]
177
+ break
178
+ else:
179
+ raise ValueError(
180
+ f"Could not find segmenter '{code}'!")
181
+ kwargs = ppid_to_kwargs(cls=cls,
182
+ method="segment_algorithm",
183
+ ppid=pp_kwargs)
184
+ if cls.mask_postprocessing:
185
+ pp_kwargs_mask = ppid_parts[2]
186
+ kwargs["kwargs_mask"] = ppid_to_kwargs(cls=cls,
187
+ method="process_mask",
188
+ ppid=pp_kwargs_mask)
189
+ return kwargs
118
190
 
119
191
  @staticmethod
120
192
  def process_mask(labels, *,
@@ -130,8 +202,8 @@ class Segmenter(abc.ABC):
130
202
 
131
203
  Parameters
132
204
  ----------
133
- labels: 2d integer ndarray
134
- Labeled input (contains blobs with same number)
205
+ labels: 2d integer or boolean ndarray
206
+ Labeled input (contains blobs consisting of unique numbers)
135
207
  clear_border: bool
136
208
  clear the image boarder using
137
209
  :func:`skimage.segmentation.clear_border`
@@ -142,6 +214,12 @@ class Segmenter(abc.ABC):
142
214
  if > 0, perform a binary closing with a disk
143
215
  of that radius in pixels
144
216
  """
217
+ if labels.dtype == bool:
218
+ # Convert mask image to labels
219
+ labels, _ = ndi.label(
220
+ input=labels,
221
+ structure=ndi.generate_binary_structure(2, 2))
222
+
145
223
  if clear_border:
146
224
  #
147
225
  # from skimage import segmentation
@@ -149,33 +227,13 @@ class Segmenter(abc.ABC):
149
227
  #
150
228
  if (labels[0, :].sum() or labels[-1, :].sum()
151
229
  or labels[:, 0].sum() or labels[:, -1].sum()):
152
- border = np.zeros_like(labels, dtype=bool)
153
- border[0] = True
154
- border[-1] = True
155
- border[:, 0] = True
156
- border[:, -1] = True
230
+ border = Segmenter.get_border(labels.shape)
157
231
  indices = sorted(np.unique(labels[border]))
158
- for ii in indices[1:]:
159
- labels[labels == ii] = 0
160
-
161
- # scikit-image is too slow for us here. So we use OpenCV.
162
- # https://github.com/scikit-image/scikit-image/issues/1190
163
-
164
- if closing_disk:
165
- #
166
- # from skimage import morphology
167
- # morphology.binary_closing(
168
- # mask,
169
- # footprint=morphology.disk(closing_disk),
170
- # out=mask)
171
- #
172
- element = Segmenter.get_disk(closing_disk)
173
- labels_uint8 = np.array(labels, dtype=np.uint8)
174
- labels_dilated = cv2.dilate(labels_uint8, element)
175
- labels_eroded = cv2.erode(labels_dilated, element)
176
- labels, _ = ndi.label(
177
- input=labels_eroded > 0,
178
- structure=ndi.generate_binary_structure(2, 2))
232
+ for li in indices:
233
+ if li == 0:
234
+ # ignore background values
235
+ continue
236
+ labels[labels == li] = 0
179
237
 
180
238
  if fill_holes:
181
239
  # Floodfill only works with uint8 (too small) or int32
@@ -188,55 +246,146 @@ class Segmenter(abc.ABC):
188
246
  # Floodfill algorithm fills the background image and
189
247
  # the resulting inversion is the image with holes filled.
190
248
  # This will destroy labels (adding 2,147,483,647 to background)
249
+ # Since floodfill will use the upper left corner of the image as
250
+ # a seed, we have to make sure it is set to background. We set
251
+ # a line of pixels in the upper channel wall to zero to be sure.
252
+ labels[0, :] = 0
253
+ # ...and a 4x4 pixel region in the top left corner.
254
+ labels[1, :2] = 0
191
255
  cv2.floodFill(labels, None, (0, 0), 2147483647)
192
256
  mask = labels != 2147483647
193
257
  labels, _ = ndi.label(
194
258
  input=mask,
195
259
  structure=ndi.generate_binary_structure(2, 2))
196
260
 
197
- return labels
198
-
199
- def segment_chunk(self, image_data, chunk):
200
- """Return the integer labels for one `image_data` chunk"""
201
- data = image_data.get_chunk(chunk)
202
- return self.segment_batch(data)
203
-
204
- def segment_frame(self, image):
205
- """Return the integer label image for `index`"""
206
- segm_wrap = self.segment_frame_wrapper()
207
- # obtain mask or label
208
- mol = segm_wrap(image)
209
- if mol.dtype == bool:
210
- # convert mask to label
261
+ if closing_disk:
262
+ # scikit-image is too slow for us here. So we use OpenCV.
263
+ # https://github.com/scikit-image/scikit-image/issues/1190
264
+ #
265
+ # from skimage import morphology
266
+ # morphology.binary_closing(
267
+ # mask,
268
+ # footprint=morphology.disk(closing_disk),
269
+ # out=mask)
270
+ #
271
+ element = Segmenter.get_disk(closing_disk)
272
+ # Note: erode/dilate not implemented for int32
273
+ labels_uint8 = np.array(labels, dtype=np.uint8)
274
+ # Historically, we would like to do a closing (dilation followed
275
+ # by erosion) on the image data where lower brightness values
276
+ # meant "we have an event". However, since we are now working
277
+ # with labels instead of image data (0 is background and labels
278
+ # are enumerated with integers), high "brightness" values are
279
+ # actually the event. Thus, we have to perform an opening
280
+ # (erosion followed by dilation) of the label image.
281
+ labels_eroded = cv2.erode(labels_uint8, element)
282
+ labels_dilated = cv2.dilate(labels_eroded, element)
211
283
  labels, _ = ndi.label(
212
- input=mol,
284
+ input=labels_dilated > 0,
213
285
  structure=ndi.generate_binary_structure(2, 2))
214
- else:
215
- labels = mol
216
- # optional postprocessing
217
- if self.mask_postprocessing:
218
- labels = self.process_mask(labels, **self.kwargs_mask)
286
+
219
287
  return labels
220
288
 
289
+ @staticmethod
290
+ @abc.abstractmethod
291
+ def segment_algorithm(image):
292
+ """The segmentation algorithm implemented in the subclass
293
+
294
+ Perform segmentation and return integer label or binary mask image
295
+ """
296
+
221
297
  @functools.cache
222
- def segment_frame_wrapper(self):
298
+ def segment_algorithm_wrapper(self):
299
+ """Wraps `self.segment_algorithm` to only accept an image
300
+
301
+ The static method `self.segment_algorithm` may optionally accept
302
+ keyword arguments `self.kwargs`. This wrapper returns the
303
+ wrapped method that only accepts the image as an argument. This
304
+ makes sense if you want to unify
305
+ """
223
306
  if self.kwargs:
224
307
  # For segmenters that accept keyword arguments.
225
- segm_wrap = functools.partial(self.segment_approach,
308
+ segm_wrap = functools.partial(self.segment_algorithm,
226
309
  **self.kwargs)
227
310
  else:
228
311
  # For segmenters that don't accept keyword arguments.
229
- segm_wrap = self.segment_approach
312
+ segm_wrap = self.segment_algorithm
230
313
  return segm_wrap
231
314
 
232
- @staticmethod
233
315
  @abc.abstractmethod
234
- def segment_approach(image):
235
- """Perform segmentation and return integer label or binary mask image
316
+ def segment_batch(self, images, start=None, stop=None, bg_off=None):
317
+ """Return the integer labels for an entire batch
318
+
319
+ This is implemented in the MPO and STO segmenters.
320
+ """
236
321
 
237
- This is the approach the subclasses implement.
322
+ def segment_chunk(self, image_data, chunk, bg_off=None):
323
+ """Return the integer labels for one `image_data` chunk
324
+
325
+ This is a wrapper for `segment_batch`.
326
+
327
+ Parameters
328
+ ----------
329
+ image_data:
330
+ Instance of dcnum's :class:`.BaseImageChunkCache` with
331
+ the methods `get_chunk` and `get_chunk_slice`.
332
+ chunk: int
333
+ Integer identifying the chunk in `image_data` to segment
334
+ bg_off: ndarray
335
+ Optional 1D array with same length as `image_data` that holds
336
+ additional background offset values that should be subtracted
337
+ from the image data before segmentation. Should only be
338
+ used in combination with segmenters that have
339
+ `requires_background_correction` set to True.
238
340
  """
341
+ images = image_data.get_chunk(chunk)
342
+ if bg_off is not None:
343
+ bg_off_chunk = bg_off[image_data.get_chunk_slice(chunk)]
344
+ else:
345
+ bg_off_chunk = None
346
+ return self.segment_batch(images, bg_off=bg_off_chunk)
239
347
 
240
348
  @abc.abstractmethod
241
- def segment_batch(self, data, start=None, stop=None):
242
- """Return the integer labels for an entire batch"""
349
+ def segment_single(self, image):
350
+ """Return the integer label for one image
351
+
352
+ This is implemented in the MPO and STO segmenters.
353
+ """
354
+
355
+ @classmethod
356
+ def validate_applicability(cls,
357
+ segmenter_kwargs: Dict,
358
+ meta: Dict = None,
359
+ logs: Dict = None):
360
+ """Validate the applicability of this segmenter for a dataset
361
+
362
+ Parameters
363
+ ----------
364
+ segmenter_kwargs: dict
365
+ Keyword arguments for the segmenter
366
+ meta: dict
367
+ Dictionary of metadata from an :class:`HDF5Data` instance
368
+ logs: dict
369
+ Dictionary of logs from an :class:`HDF5Data` instance
370
+
371
+ Returns
372
+ -------
373
+ applicable: bool
374
+ True if the segmenter is applicable to the dataset
375
+
376
+ Raises
377
+ ------
378
+ SegmenterNotApplicableError
379
+ If the segmenter is not applicable to the dataset
380
+ """
381
+ return True
382
+
383
+
384
+ @functools.cache
385
+ def get_available_segmenters():
386
+ """Return dictionary of available segmenters"""
387
+ segmenters = {}
388
+ for scls in Segmenter.__subclasses__():
389
+ for cls in scls.__subclasses__():
390
+ segmenters[cls.get_ppid_code()] = cls
391
+ return segmenters
@@ -8,7 +8,7 @@ import numpy as np
8
8
  from ..read.cache import HDF5ImageCache, ImageCorrCache
9
9
 
10
10
  from .segmenter import Segmenter
11
- from .segmenter_cpu import CPUSegmenter
11
+ from .segmenter_mpo import MPOSegmenter
12
12
 
13
13
 
14
14
  class SegmenterManagerThread(threading.Thread):
@@ -17,7 +17,7 @@ class SegmenterManagerThread(threading.Thread):
17
17
  image_data: HDF5ImageCache | ImageCorrCache,
18
18
  slot_states: mp.Array,
19
19
  slot_chunks: mp.Array,
20
- debug: bool = False,
20
+ bg_off: np.ndarray = None,
21
21
  *args, **kwargs):
22
22
  """Manage the segmentation of image data
23
23
 
@@ -38,10 +38,10 @@ class SegmenterManagerThread(threading.Thread):
38
38
  slot_chunks:
39
39
  For each slot in `slot_states`, this shared array defines
40
40
  on which chunk in `image_data` the segmentation took place.
41
- debug:
42
- Whether to run in debugging mode (more verbose messages and
43
- CPU-based segmentation is done in one single thread instead
44
- of in multiple subprocesses).
41
+ bg_off:
42
+ 1d array containing additional background image offset values
43
+ that are added to each background image before subtraction
44
+ from the input image
45
45
 
46
46
  Notes
47
47
  -----
@@ -65,6 +65,9 @@ class SegmenterManagerThread(threading.Thread):
65
65
  self.segmenter = segmenter
66
66
  #: Image data which is being segmented
67
67
  self.image_data = image_data
68
+ #: Additional, optional background offset
69
+ self.bg_off = (
70
+ bg_off if self.segmenter.requires_background_correction else None)
68
71
  #: Slot states
69
72
  self.slot_states = slot_states
70
73
  #: Current slot chunk index for the slot states
@@ -73,37 +76,45 @@ class SegmenterManagerThread(threading.Thread):
73
76
  self.labels_list = [None] * len(self.slot_states)
74
77
  #: Time counter for segmentation
75
78
  self.t_count = 0
76
- #: Whether running in debugging mode
77
- self.debug = debug
78
79
 
79
80
  def run(self):
81
+ num_slots = len(self.slot_states)
80
82
  # We iterate over all the chunks of the image data.
81
83
  for chunk in self.image_data.iter_chunks():
82
- num_slots = len(self.slot_states)
83
- cur_slot = 0
84
- empty_slots = 0
84
+ unavailable_slots = 0
85
+ found_free_slot = False
85
86
  # Wait for a free slot to perform segmentation (compute labels)
86
- while True:
87
- # - "e" there is data from the segmenter (the extractor
88
- # can take it and process it)
89
- # - "s" the extractor processed the data and is waiting
90
- # for the segmenter
91
- if self.slot_states[cur_slot] != "e":
92
- break
93
- else:
94
- empty_slots += 1
95
- cur_slot = (cur_slot + 1) % num_slots
96
- if empty_slots >= num_slots:
97
- # There is nothing to do, try to avoid 100% CPU
98
- empty_slots = 0
99
- time.sleep(.01)
87
+ while not found_free_slot:
88
+ # We sort the slots according to the slot chunks so that we
89
+ # always process the slot with the smallest slot chunk number
90
+ # first. Initially, the slot_chunks array is filled with
91
+ # zeros, but we populate it here.
92
+ for cur_slot in np.argsort(self.slot_chunks):
93
+ # - "e" there is data from the segmenter (the extractor
94
+ # can take it and process it)
95
+ # - "s" the extractor processed the data and is waiting
96
+ # for the segmenter
97
+ if self.slot_states[cur_slot] != "e":
98
+ # It's the segmenter's turn. Note that we use '!= "e"',
99
+ # because the initial value is "\x00".
100
+ found_free_slot = True
101
+ break
102
+ else:
103
+ # Try another slot.
104
+ unavailable_slots += 1
105
+ if unavailable_slots >= num_slots:
106
+ # There is nothing to do, try to avoid 100% CPU
107
+ unavailable_slots = 0
108
+ time.sleep(.1)
100
109
 
101
110
  t1 = time.monotonic()
102
111
 
103
112
  # We have a free slot to compute the segmentation
104
113
  labels = self.segmenter.segment_chunk(
105
114
  image_data=self.image_data,
106
- chunk=chunk)
115
+ chunk=chunk,
116
+ bg_off=self.bg_off,
117
+ )
107
118
 
108
119
  # TODO: make this more memory efficient (pre-shared mp.Arrays?)
109
120
  # Store labels in a list accessible by the main thread
@@ -113,12 +124,12 @@ class SegmenterManagerThread(threading.Thread):
113
124
  # This must be done last: Let the extractor know that this
114
125
  # slot is ready for processing.
115
126
  self.slot_states[cur_slot] = "e"
116
- self.logger.debug(f"Segmented one chunk: {chunk}")
127
+ self.logger.debug(f"Segmented chunk {chunk} in slot {cur_slot}")
117
128
 
118
129
  self.t_count += time.monotonic() - t1
119
130
 
120
131
  # Cleanup
121
- if isinstance(self.segmenter, CPUSegmenter):
132
+ if isinstance(self.segmenter, MPOSegmenter):
122
133
  # Join the segmentation workers.
123
134
  self.segmenter.join_workers()
124
135