dcnum 0.17.1__tar.gz → 0.18.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dcnum might be problematic. Click here for more details.

Files changed (105) hide show
  1. {dcnum-0.17.1 → dcnum-0.18.0}/.github/workflows/check.yml +2 -0
  2. {dcnum-0.17.1 → dcnum-0.18.0}/CHANGELOG +33 -1
  3. {dcnum-0.17.1 → dcnum-0.18.0}/PKG-INFO +1 -1
  4. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/_version.py +2 -2
  5. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/__init__.py +1 -1
  6. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/feat_background/base.py +18 -22
  7. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/feat_background/bg_copy.py +8 -4
  8. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/feat_background/bg_roll_median.py +16 -7
  9. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/feat_background/bg_sparse_median.py +53 -5
  10. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/feat_brightness/bright_all.py +41 -6
  11. dcnum-0.18.0/src/dcnum/feat/feat_contour/__init__.py +4 -0
  12. dcnum-0.17.1/src/dcnum/feat/feat_moments/mt_legacy.py → dcnum-0.18.0/src/dcnum/feat/feat_contour/moments.py +32 -8
  13. dcnum-0.18.0/src/dcnum/feat/feat_contour/volume.py +174 -0
  14. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/queue_event_extractor.py +25 -4
  15. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/logic/ctrl.py +24 -2
  16. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/logic/json_encoder.py +2 -0
  17. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/meta/ppid.py +1 -1
  18. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/read/__init__.py +1 -0
  19. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/read/cache.py +78 -78
  20. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/read/const.py +4 -1
  21. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/read/hdf5_data.py +74 -16
  22. dcnum-0.18.0/src/dcnum/read/mapped.py +79 -0
  23. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/segm/segm_thresh.py +3 -3
  24. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/segm/segmenter.py +73 -42
  25. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/segm/segmenter_cpu.py +5 -5
  26. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/segm/segmenter_manager_thread.py +11 -2
  27. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/write/writer.py +37 -5
  28. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum.egg-info/PKG-INFO +1 -1
  29. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum.egg-info/SOURCES.txt +9 -3
  30. dcnum-0.18.0/tests/data/fmt-hdf5_cytoshot_full-features_2024.zip +0 -0
  31. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_feat_background_bg_roll_median.py +1 -1
  32. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_feat_background_bg_sparsemed.py +1 -1
  33. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_feat_event_extractor_manager.py +3 -1
  34. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_feat_moments_based.py +13 -13
  35. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_feat_moments_based_extended.py +3 -3
  36. dcnum-0.18.0/tests/test_feat_volume.py +203 -0
  37. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_logic_pipeline.py +125 -20
  38. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_meta_ppid_base.py +3 -3
  39. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_meta_ppid_bg.py +9 -7
  40. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_meta_ppid_feat.py +2 -2
  41. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_read_concat_hdf5.py +21 -0
  42. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_read_hdf5.py +1 -1
  43. dcnum-0.18.0/tests/test_read_hdf5_index_mapping.py +95 -0
  44. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_segm_base.py +122 -12
  45. dcnum-0.18.0/tests/test_segm_no_mask_proc.py +55 -0
  46. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_segm_thresh.py +12 -17
  47. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_write_writer.py +14 -0
  48. dcnum-0.17.1/src/dcnum/feat/feat_moments/__init__.py +0 -4
  49. {dcnum-0.17.1 → dcnum-0.18.0}/.github/workflows/deploy_pypi.yml +0 -0
  50. {dcnum-0.17.1 → dcnum-0.18.0}/.gitignore +0 -0
  51. {dcnum-0.17.1 → dcnum-0.18.0}/.readthedocs.yml +0 -0
  52. {dcnum-0.17.1 → dcnum-0.18.0}/LICENSE +0 -0
  53. {dcnum-0.17.1 → dcnum-0.18.0}/README.rst +0 -0
  54. {dcnum-0.17.1 → dcnum-0.18.0}/docs/conf.py +0 -0
  55. {dcnum-0.17.1 → dcnum-0.18.0}/docs/extensions/github_changelog.py +0 -0
  56. {dcnum-0.17.1 → dcnum-0.18.0}/docs/index.rst +0 -0
  57. {dcnum-0.17.1 → dcnum-0.18.0}/docs/requirements.txt +0 -0
  58. {dcnum-0.17.1 → dcnum-0.18.0}/pyproject.toml +0 -0
  59. {dcnum-0.17.1 → dcnum-0.18.0}/setup.cfg +0 -0
  60. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/__init__.py +0 -0
  61. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/event_extractor_manager_thread.py +0 -0
  62. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/feat_background/__init__.py +0 -0
  63. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/feat_brightness/__init__.py +0 -0
  64. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/feat_brightness/common.py +0 -0
  65. /dcnum-0.17.1/src/dcnum/feat/feat_moments/ct_opencv.py → /dcnum-0.18.0/src/dcnum/feat/feat_contour/contour.py +0 -0
  66. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/feat_texture/__init__.py +0 -0
  67. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/feat_texture/common.py +0 -0
  68. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/feat_texture/tex_all.py +0 -0
  69. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/feat/gate.py +0 -0
  70. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/logic/__init__.py +0 -0
  71. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/logic/job.py +0 -0
  72. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/meta/__init__.py +0 -0
  73. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/meta/paths.py +0 -0
  74. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/segm/__init__.py +0 -0
  75. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/segm/segmenter_gpu.py +0 -0
  76. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/write/__init__.py +0 -0
  77. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/write/deque_writer_thread.py +0 -0
  78. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum/write/queue_collector_thread.py +0 -0
  79. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum.egg-info/dependency_links.txt +0 -0
  80. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum.egg-info/requires.txt +0 -0
  81. {dcnum-0.17.1 → dcnum-0.18.0}/src/dcnum.egg-info/top_level.txt +0 -0
  82. {dcnum-0.17.1 → dcnum-0.18.0}/tests/conftest.py +0 -0
  83. {dcnum-0.17.1 → dcnum-0.18.0}/tests/data/fmt-hdf5_cytoshot_extended-moments-features.zip +0 -0
  84. {dcnum-0.17.1 → dcnum-0.18.0}/tests/data/fmt-hdf5_cytoshot_full-features_2023.zip +0 -0
  85. {dcnum-0.17.1 → dcnum-0.18.0}/tests/data/fmt-hdf5_cytoshot_full-features_legacy_allev_2023.zip +0 -0
  86. {dcnum-0.17.1 → dcnum-0.18.0}/tests/data/fmt-hdf5_shapein_empty.zip +0 -0
  87. {dcnum-0.17.1 → dcnum-0.18.0}/tests/data/fmt-hdf5_shapein_raw-with-variable-length-logs.zip +0 -0
  88. {dcnum-0.17.1 → dcnum-0.18.0}/tests/helper_methods.py +0 -0
  89. {dcnum-0.17.1 → dcnum-0.18.0}/tests/requirements.txt +0 -0
  90. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_feat_background_base.py +0 -0
  91. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_feat_background_bg_copy.py +0 -0
  92. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_feat_brightness.py +0 -0
  93. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_feat_gate.py +0 -0
  94. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_feat_haralick.py +0 -0
  95. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_init.py +0 -0
  96. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_logic_job.py +0 -0
  97. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_logic_join.py +0 -0
  98. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_logic_json.py +0 -0
  99. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_meta_paths.py +0 -0
  100. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_meta_ppid_data.py +0 -0
  101. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_meta_ppid_gate.py +0 -0
  102. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_meta_ppid_segm.py +0 -0
  103. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_read_basin.py +0 -0
  104. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_write_deque_writer_thread.py +0 -0
  105. {dcnum-0.17.1 → dcnum-0.18.0}/tests/test_write_queue_collector_thread.py +0 -0
@@ -29,6 +29,8 @@ jobs:
29
29
  python -m pip install coverage flake8 pytest
30
30
  - name: Install dcnum
31
31
  run: |
32
+ # https://github.com/luispedro/mahotas/issues/144
33
+ pip install mahotas==1.4.13
32
34
  pip install -e .
33
35
  - name: List installed packages
34
36
  run: |
@@ -1,3 +1,34 @@
1
+ 0.18.0
2
+ - BREAKING CHANGE: mask postprocessing did a morphological opening instead
3
+ of a morphological closing, failing to remove spurious noise
4
+ - BREAKING CHANGE: perform first fill_holes and then closing_disk in mask
5
+ postprocessing
6
+ - feat: allow to specify ranges when creating an HDF5Data instance to
7
+ enable e.g. processing only a portion of an input file
8
+ - feat: volume computation via contour revolve algorithm
9
+ - feat: background offset (flickering) correction via the
10
+ "offset_correction" keyword for the "sparsemed" background computer
11
+ and "bg_off" everywhere else
12
+ - enh: allow creating HDF5Writer from h5py.File
13
+ - fix: mask postprocessing did a morphological opening instead
14
+ of a morphological closing, failing to remove spurious noise
15
+ - fix: remove mask ppid part for segmenters that do not use it
16
+ - fix: mask postprocessing with "fill_holes" using `cv2.floodFill`
17
+ sometimes segmented the entire frame if the upper left pixel was not
18
+ set to background
19
+ - enh: perform first fill_holes and then closing_disk in mask
20
+ postprocessing
21
+ - enh: pop read-cache items before adding a new one
22
+ - enh: allow to request the raw contour from `moments_based_features`
23
+ - ref: increment DCNUM_PPID_GENERATION to 8
24
+ - ref: added new super class `BaseImageChunkCache`
25
+ - ref: use HDF5Writer in Background class
26
+ - ref: minor cleanup
27
+ - ref: rename submodule `feat_moments` to `feat_contour`
28
+ - ref: remove unused `name` property from `Background` class
29
+ 0.17.2
30
+ - fix: make sure unsupported features are not propagated in
31
+ `concatenated_hdf5_data` (#27)
1
32
  0.17.1
2
33
  - ref: remove "bg_med" and "index_online" from protected scalar features,
3
34
  because "bg_med" may change due to a different background computation
@@ -8,7 +39,8 @@
8
39
  - ref: remove deprecated `preselect` and `ptp_median` keyword
9
40
  arguments from `QueueEventExtractor`
10
41
  - ref: remove deprecated "key" from `get_class_method_info` info dict
11
- - ref: issue UserWarning instead of DeprecationWarning when
42
+ - ref: issue UserWarning instead of DeprecationWarning when checking
43
+ segmenter keyword arguments
12
44
  - ref: remove pixel size check for HDF5 data
13
45
  - ref: remove unused `_get_model_file` from GPUSegmenter
14
46
  0.16.8
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dcnum
3
- Version: 0.17.1
3
+ Version: 0.18.0
4
4
  Summary: numerics toolbox for imaging deformability cytometry
5
5
  Author: Maximilian Schlögel, Paul Müller
6
6
  Maintainer-email: Paul Müller <dev@craban.de>
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '0.17.1'
16
- __version_tuple__ = version_tuple = (0, 17, 1)
15
+ __version__ = version = '0.18.0'
16
+ __version_tuple__ = version_tuple = (0, 18, 0)
@@ -1,6 +1,6 @@
1
1
  # flake8: noqa: F401
2
2
  """Feature computation"""
3
- from . import feat_background, feat_brightness, feat_moments, feat_texture
3
+ from . import feat_background, feat_brightness, feat_contour, feat_texture
4
4
  from .event_extractor_manager_thread import EventExtractorManagerThread
5
5
  from .queue_event_extractor import (
6
6
  QueueEventExtractor, EventExtractorThread, EventExtractorProcess
@@ -3,14 +3,12 @@ import functools
3
3
  import inspect
4
4
  import multiprocessing as mp
5
5
  import pathlib
6
- import uuid
7
6
 
8
7
  import h5py
9
- import numpy as np
10
8
 
11
9
  from ...meta import ppid
12
10
  from ...read import HDF5Data
13
- from ...write import create_with_basins, set_default_filter_kwargs
11
+ from ...write import HDF5Writer, create_with_basins, set_default_filter_kwargs
14
12
 
15
13
 
16
14
  # All subprocesses should use 'spawn' to avoid issues with threads
@@ -93,8 +91,6 @@ class Background(abc.ABC):
93
91
  else:
94
92
  self.input_data = input_data
95
93
 
96
- #: unique identifier
97
- self.name = str(uuid.uuid4())
98
94
  #: shape of event images
99
95
  self.image_shape = self.input_data[0].shape
100
96
  #: total number of events
@@ -109,25 +105,17 @@ class Background(abc.ABC):
109
105
  # "a", because output file already exists
110
106
  self.h5out = h5py.File(output_path, "a", libver="latest")
111
107
 
112
- # Initialize background data
113
- ds_kwargs = set_default_filter_kwargs(compression=compress)
114
- h5bg = self.h5out.require_dataset(
115
- "events/image_bg",
116
- shape=self.input_data.shape,
117
- dtype=np.uint8,
118
- chunks=(min(100, self.image_count),
119
- self.image_shape[0],
120
- self.image_shape[1]),
121
- **ds_kwargs,
108
+ # Initialize writer
109
+ self.writer = HDF5Writer(
110
+ obj=self.h5out,
111
+ ds_kwds=set_default_filter_kwargs(compression=compress),
122
112
  )
123
- h5bg.attrs.create('CLASS', np.string_('IMAGE'))
124
- h5bg.attrs.create('IMAGE_VERSION', np.string_('1.2'))
125
- h5bg.attrs.create('IMAGE_SUBCLASS', np.string_('IMAGE_GRAYSCALE'))
126
113
 
127
114
  def __enter__(self):
128
115
  return self
129
116
 
130
117
  def __exit__(self, type, value, tb):
118
+ self.writer.close()
131
119
  # Close h5in and h5out
132
120
  if self.hdin is not None: # we have an input file
133
121
  self.hdin.close() # this closes self.h5in
@@ -200,12 +188,20 @@ class Background(abc.ABC):
200
188
  return self.image_proc.value / self.image_count
201
189
 
202
190
  def process(self):
191
+ # Delete any old background data
192
+ for key in ["image_bg", "bg_off"]:
193
+ if key in self.h5out["events"]:
194
+ del self.h5out["events"][key]
195
+ # Perform the actual background computation
203
196
  self.process_approach()
204
197
  bg_ppid = self.get_ppid()
205
- # Store pipeline information in the image_bg feature
206
- self.h5out["events/image_bg"].attrs["dcnum ppid background"] = bg_ppid
207
- self.h5out["events/image_bg"].attrs["dcnum ppid generation"] = \
208
- ppid.DCNUM_PPID_GENERATION
198
+ # Store pipeline information in the image_bg/bg_off feature
199
+ for key in ["image_bg", "bg_off"]:
200
+ if key in self.h5out["events"]:
201
+ self.h5out[f"events/{key}"].attrs["dcnum ppid background"] = \
202
+ bg_ppid
203
+ self.h5out[F"events/{key}"].attrs["dcnum ppid generation"] = \
204
+ ppid.DCNUM_PPID_GENERATION
209
205
 
210
206
  @abc.abstractmethod
211
207
  def process_approach(self):
@@ -4,17 +4,14 @@ from .base import Background
4
4
 
5
5
 
6
6
  class BackgroundCopy(Background):
7
-
8
7
  @staticmethod
9
8
  def check_user_kwargs():
10
9
  pass
11
10
 
12
- def process_approach(self):
11
+ def process(self):
13
12
  """Perform median computation on entire input data"""
14
13
  if self.h5in != self.h5out:
15
14
  hin = self.hdin.image_bg.h5ds
16
- if "image_bg" in self.h5out["events"]:
17
- del self.h5out["events/image_bg"]
18
15
  h5py.h5o.copy(src_loc=hin.parent.id,
19
16
  src_name=b"image_bg",
20
17
  dst_loc=self.h5out["events"].id,
@@ -23,3 +20,10 @@ class BackgroundCopy(Background):
23
20
 
24
21
  # set progress to 100%
25
22
  self.image_proc.value = self.image_count
23
+
24
+ def process_approach(self):
25
+ # We do the copying in `process`, because we do not want to modify
26
+ # any metadata or delete datasets as is done in the base class.
27
+ # But we still have to implement this method, because it is an
28
+ # abstractmethod in the base class.
29
+ pass
@@ -172,11 +172,18 @@ class BackgroundRollMed(Background):
172
172
  num_steps = int(np.ceil(self.image_count / self.batch_size))
173
173
  for ii in range(num_steps):
174
174
  self.process_next_batch()
175
- # Set the remaining kernel_size median values to the last one
176
- last_image = self.h5out["events/image_bg"][-self.kernel_size-1]
177
- for ii in range(self.kernel_size):
178
- self.h5out["events/image_bg"][self.image_count-ii-1] = last_image
179
- self.image_proc.value = self.image_count
175
+
176
+ # Set the remaining median bg images to the last one.
177
+ num_remaining = (self.input_data.shape[0]
178
+ - self.h5out["events/image_bg"].shape[0])
179
+ if num_remaining:
180
+ last_image = self.h5out["events/image_bg"][-1]
181
+ last_chunk = np.repeat(
182
+ last_image[np.newaxis],
183
+ num_remaining,
184
+ axis=0)
185
+ self.writer.store_feature_chunk("image_bg", last_chunk)
186
+ self.image_proc.value += num_remaining
180
187
 
181
188
  def process_next_batch(self):
182
189
  """Process one batch of input data"""
@@ -208,9 +215,11 @@ class BackgroundRollMed(Background):
208
215
  # TODO:
209
216
  # Do this in a different thread so workers can keep going
210
217
  # and use a lock somewhere in case the disk is too slow.
211
- self.h5out["events/image_bg"][cur_slice_out] = \
218
+ self.writer.store_feature_chunk(
219
+ "image_bg",
212
220
  self.shared_output[:output_size].reshape(output_size,
213
- *self.image_shape)
221
+ *self.image_shape),
222
+ )
214
223
 
215
224
  self.current_batch += 1
216
225
  self.image_proc.value += self.batch_size
@@ -15,6 +15,7 @@ logger = logging.getLogger(__name__)
15
15
  class BackgroundSparseMed(Background):
16
16
  def __init__(self, input_data, output_path, kernel_size=200,
17
17
  split_time=1., thresh_cleansing=0, frac_cleansing=.8,
18
+ offset_correction=True,
18
19
  compress=True, num_cpus=None):
19
20
  """Sparse median background correction with cleansing
20
21
 
@@ -57,6 +58,21 @@ class BackgroundSparseMed(Background):
57
58
  Fraction between 0 and 1 indicating how many background images
58
59
  must still be present after cleansing (in case the cleansing
59
60
  factor is too large). Set to 1 to disable cleansing altogether.
61
+ offset_correction: bool
62
+ The sparse median background correction produces one median
63
+ image for multiple input frames (BTW this also leads to very
64
+ efficient data storage with HDF5 data compression filters). In
65
+ case the input frames are subject to frame-by-frame brightness
66
+ variations (e.g. flickering of the illumination source), it
67
+ is useful to have an offset value per frame that can then be
68
+ used in a later step to perform a more accurate background
69
+ correction. This offset is computed here by taking a 20px wide
70
+ slice from each frame (where the channel wall is located)
71
+ and computing the median therein relative to the computed
72
+ background image. The data are written to the "bg_off" feature
73
+ in the output file alongside "image_bg". To obtain the
74
+ corrected background image, add "image_bg" and "bg_off".
75
+ Set this to False if you don't need the "bg_off" feature.
60
76
  compress: bool
61
77
  Whether to compress background data. Set this to False
62
78
  for faster processing.
@@ -72,7 +88,9 @@ class BackgroundSparseMed(Background):
72
88
  kernel_size=kernel_size,
73
89
  split_time=split_time,
74
90
  thresh_cleansing=thresh_cleansing,
75
- frac_cleansing=frac_cleansing)
91
+ frac_cleansing=frac_cleansing,
92
+ offset_correction=offset_correction,
93
+ )
76
94
 
77
95
  if kernel_size > len(self.input_data):
78
96
  logger.warning(
@@ -88,6 +106,8 @@ class BackgroundSparseMed(Background):
88
106
  self.thresh_cleansing = thresh_cleansing
89
107
  #: keep at least this many background images from the series
90
108
  self.frac_cleansing = frac_cleansing
109
+ #: offset/flickering correction
110
+ self.offset_correction = offset_correction
91
111
 
92
112
  # time axis
93
113
  self.time = None
@@ -175,11 +195,13 @@ class BackgroundSparseMed(Background):
175
195
  kernel_size: int = 200,
176
196
  split_time: float = 1.,
177
197
  thresh_cleansing: float = 0,
178
- frac_cleansing: float = .8):
198
+ frac_cleansing: float = .8,
199
+ offset_correction: bool = True,
200
+ ):
179
201
  """Initialize user-defined properties of this class
180
202
 
181
203
  This method primarily exists so that the CLI knows which
182
- keyword arguements can be passed to this class.
204
+ keyword arguments can be passed to this class.
183
205
 
184
206
  Parameters
185
207
  ----------
@@ -197,6 +219,21 @@ class BackgroundSparseMed(Background):
197
219
  Fraction between 0 and 1 indicating how many background images
198
220
  must still be present after cleansing (in case the cleansing
199
221
  factor is too large). Set to 1 to disable cleansing altogether.
222
+ offset_correction: bool
223
+ The sparse median background correction produces one median
224
+ image for multiple input frames (BTW this also leads to very
225
+ efficient data storage with HDF5 data compression filters). In
226
+ case the input frames are subject to frame-by-frame brightness
227
+ variations (e.g. flickering of the illumination source), it
228
+ is useful to have an offset value per frame that can then be
229
+ used in a later step to perform a more accurate background
230
+ correction. This offset is computed here by taking a 20px wide
231
+ slice from each frame (where the channel wall is located)
232
+ and computing the median therein relative to the computed
233
+ background image. The data are written to the "bg_off" feature
234
+ in the output file alongside "image_bg". To obtain the
235
+ corrected background image, add "image_bg" and "bg_off".
236
+ Set this to False if you don't need the "bg_off" feature.
200
237
  """
201
238
  assert kernel_size > 0
202
239
  assert split_time > 0
@@ -300,8 +337,19 @@ class BackgroundSparseMed(Background):
300
337
  while pos < self.image_count:
301
338
  stop = min(pos + step, self.image_count)
302
339
  cur_slice = slice(pos, stop)
303
- self.h5out["events/image_bg"][cur_slice] = \
304
- bg_images[bg_idx[cur_slice]]
340
+ cur_bg_data = bg_images[bg_idx[cur_slice]]
341
+ self.writer.store_feature_chunk("image_bg", cur_bg_data)
342
+ if self.offset_correction:
343
+ # Record background offset correction "bg_off". We take a
344
+ # slice of 20px from the top of the image (there are normally
345
+ # no events here, only the channel walls are visible).
346
+ sh, sw = self.input_data.shape[1:]
347
+ roi_full = (slice(None), slice(0, 20), slice(0, sw))
348
+ roi_cur = (cur_slice, slice(0, 20), slice(0, sw))
349
+ val_bg = np.mean(cur_bg_data[roi_full], axis=(1, 2))
350
+ val_dat = np.mean(self.input_data[roi_cur], axis=(1, 2))
351
+ # background image = image_bg + bg_off
352
+ self.writer.store_feature_chunk("bg_off", val_dat - val_bg)
305
353
  pos += step
306
354
 
307
355
  def process_second(self,
@@ -4,16 +4,38 @@ import numpy as np
4
4
  from .common import brightness_names
5
5
 
6
6
 
7
- def brightness_features(image,
8
- mask,
9
- image_bg=None,
10
- image_corr=None):
7
+ def brightness_features(image: np.ndarray[np.uint8],
8
+ mask: np.ndarray[np.bool_],
9
+ image_bg: np.ndarray[np.uint8] = None,
10
+ image_corr: np.ndarray[np.int16] = None,
11
+ bg_off: float = None,
12
+ ):
13
+ """Compute brightness features
14
+
15
+ Parameters
16
+ ----------
17
+ image: np.ndarray
18
+ 2D array of "image" of shape (H, W)
19
+ mask: np.ndarray
20
+ 3D array containing the N masks of shape (N, H, W)
21
+ image_bg: np.ndarray
22
+ 2D array of "image_bg" of shape (H, W), required for computing
23
+ the "bg_med" feature.
24
+ image_corr: np.ndarray
25
+ 2D array of (image - image_bg), which can be optionally passed
26
+ to this method. If not given, will be computed.
27
+ bg_off: float
28
+ Systematic offset value for correcting the brightness of the
29
+ background data which has an effect on "bright_bc_avg",
30
+ "bright_perc_10", "bright_perc_90", and "bg_med" (`bg_off` is
31
+ generated by sparsemed background correction).
32
+ """
11
33
  mask = np.array(mask, dtype=bool)
12
34
  size = mask.shape[0]
13
35
 
14
36
  br_dict = {}
15
- for key in brightness_names:
16
- br_dict[key] = np.full(size, np.nan, dtype=np.float64)
37
+ for mkey in brightness_names:
38
+ br_dict[mkey] = np.full(size, np.nan, dtype=np.float64)
17
39
 
18
40
  avg_sd = compute_avg_sd_masked_uint8(image, mask)
19
41
  br_dict["bright_avg"][:] = avg_sd[:, 0]
@@ -36,6 +58,19 @@ def brightness_features(image,
36
58
  br_dict["bright_perc_10"][:] = percentiles[:, 0]
37
59
  br_dict["bright_perc_90"][:] = percentiles[:, 1]
38
60
 
61
+ if bg_off is not None:
62
+ # subtract the background offset for all values that are computed
63
+ # from background-corrected images
64
+ for mkey in ["bright_bc_avg", "bright_perc_10", "bright_perc_90"]:
65
+ if mkey in br_dict:
66
+ br_dict[mkey] -= bg_off
67
+
68
+ # add the background offset to all values that were computed from
69
+ # the background only
70
+ for pkey in ["bg_med"]:
71
+ if pkey in br_dict:
72
+ br_dict[pkey] += bg_off
73
+
39
74
  return br_dict
40
75
 
41
76
 
@@ -0,0 +1,4 @@
1
+ # flake8: noqa: F401
2
+ """Feature computation: OpenCV moments-based features"""
3
+ from .moments import moments_based_features
4
+ from .volume import volume_from_contours
@@ -2,11 +2,27 @@ import cv2
2
2
  import numpy as np
3
3
 
4
4
 
5
- from .ct_opencv import contour_single_opencv
6
-
7
-
8
- def moments_based_features(mask, pixel_size):
5
+ from .contour import contour_single_opencv
6
+
7
+
8
+ def moments_based_features(
9
+ mask: np.ndarray,
10
+ pixel_size: float,
11
+ ret_contour: bool = False,
12
+ ):
13
+ """Compute moment-based features for a mask image
14
+
15
+ Parameters
16
+ ----------
17
+ mask: np.ndarray
18
+ 3D stack of 2D boolean mask images to analyze
19
+ pixel_size: float
20
+ pixel size of the mask image in µm
21
+ ret_contour: bool
22
+ whether to also return the raw contour
23
+ """
9
24
  assert pixel_size is not None and pixel_size != 0
25
+ raw_contours = []
10
26
 
11
27
  size = mask.shape[0]
12
28
 
@@ -42,9 +58,13 @@ def moments_based_features(mask, pixel_size):
42
58
  for ii in range(size):
43
59
  # raw contour
44
60
  cont_raw = contour_single_opencv(mask[ii])
45
- if len(cont_raw.shape) < 2:
46
- continue
47
- if cv2.contourArea(cont_raw) == 0:
61
+ # only continue if the contour is valid
62
+ not_valid = len(cont_raw.shape) < 2 or cv2.contourArea(cont_raw) == 0
63
+
64
+ if ret_contour:
65
+ raw_contours.append(None if not_valid else cont_raw)
66
+
67
+ if not_valid:
48
68
  continue
49
69
 
50
70
  mu_raw = cv2.moments(cont_raw)
@@ -53,6 +73,7 @@ def moments_based_features(mask, pixel_size):
53
73
 
54
74
  # convex hull
55
75
  cont_cvx = np.squeeze(cv2.convexHull(cont_raw))
76
+
56
77
  mu_cvx = cv2.moments(cont_cvx)
57
78
  arc_cvx = np.float64(cv2.arcLength(cont_cvx, True))
58
79
 
@@ -110,7 +131,7 @@ def moments_based_features(mask, pixel_size):
110
131
  # specify validity
111
132
  valid[ii] = True
112
133
 
113
- return {
134
+ data = {
114
135
  "area_msd": feat_area_msd,
115
136
  "area_ratio": feat_area_ratio,
116
137
  "area_um": feat_area_um,
@@ -131,3 +152,6 @@ def moments_based_features(mask, pixel_size):
131
152
  "tilt": feat_tilt,
132
153
  "valid": valid,
133
154
  }
155
+ if ret_contour:
156
+ data["contour"] = raw_contours
157
+ return data
@@ -0,0 +1,174 @@
1
+ from typing import List
2
+
3
+ import numpy as np
4
+
5
+
6
+ def volume_from_contours(
7
+ contour: List[np.ndarray],
8
+ pos_x: np.ndarray,
9
+ pos_y: np.ndarray,
10
+ pixel_size: float):
11
+ """Calculate the volume of a polygon revolved around an axis
12
+
13
+ The volume estimation assumes rotational symmetry.
14
+
15
+ Parameters
16
+ ----------
17
+ contour: list of ndarrays of shape (N,2)
18
+ One entry is a 2D array that holds the contour of an event
19
+ pos_x: float ndarray of length N
20
+ The x coordinate(s) of the centroid of the event(s) [µm]
21
+ pos_y: float ndarray of length N
22
+ The y coordinate(s) of the centroid of the event(s) [µm]
23
+ pixel_size: float
24
+ The detector pixel size in µm.
25
+
26
+ Returns
27
+ -------
28
+ volume: float ndarray
29
+ volume in um^3
30
+
31
+ Notes
32
+ -----
33
+ The computation of the volume is based on a full rotation of the
34
+ upper and the lower halves of the contour from which the
35
+ average is then used.
36
+
37
+ The volume is computed radially from the center position
38
+ given by (`pos_x`, `pos_y`). For sufficiently smooth contours,
39
+ such as densely sampled ellipses, the center position does not
40
+ play an important role. For contours that are given on a coarse
41
+ grid, as is the case for deformability cytometry, the center position
42
+ must be given.
43
+
44
+ References
45
+ ----------
46
+ - https://de.wikipedia.org/wiki/Kegelstumpf#Formeln
47
+ - Yields identical results to the Matlab script by Geoff Olynyk
48
+ <https://de.mathworks.com/matlabcentral/fileexchange/36525-volrevolve>`_
49
+ """
50
+ # results are stored in a separate array initialized with nans
51
+ v_avg = np.zeros_like(pos_x, dtype=np.float64) * np.nan
52
+
53
+ for ii in range(pos_x.shape[0]):
54
+ # If the contour has less than 4 pixels, the computation will fail.
55
+ # In that case, the value np.nan is already assigned.
56
+ cc = contour[ii]
57
+ if cc is not None and cc.shape[0] >= 4:
58
+ # Center contour coordinates with given centroid
59
+ contour_x = cc[:, 0] - pos_x[ii] / pixel_size
60
+ contour_y = cc[:, 1] - pos_y[ii] / pixel_size
61
+ # Switch to r and z to follow notation of vol_revolve
62
+ # (In RT-DC the axis of rotation is x, but for vol_revolve
63
+ # we need the axis vertically)
64
+ contour_r = contour_y
65
+ contour_z = contour_x
66
+
67
+ # Compute right volume
68
+ # Which points are at negative r-values (r<0)?
69
+ inx_neg = np.where(contour_r < 0)
70
+ # These points will be shifted up to r=0 directly on the z-axis
71
+ contour_right = np.copy(contour_r)
72
+ contour_right[inx_neg] = 0
73
+ vol_right = vol_revolve(r=contour_right,
74
+ z=contour_z,
75
+ point_scale=pixel_size)
76
+
77
+ # Compute left volume
78
+ # Which points are at positive r-values? (r>0)?
79
+ idx_pos = np.where(contour_r > 0)
80
+ # These points will be shifted down to y=0 to build an x-axis
81
+ contour_left = np.copy(contour_r)
82
+ contour_left[idx_pos] = 0
83
+ # Now we still have negative r values, but vol_revolve needs
84
+ # positive values, so we flip the sign...
85
+ contour_left[:] *= -1
86
+ # ... but in doing so, we have switched to clockwise rotation,
87
+ # and we need to pass the array in reverse order
88
+ vol_left = vol_revolve(r=contour_left[::-1],
89
+ z=contour_z[::-1],
90
+ point_scale=pixel_size)
91
+
92
+ # Compute the average
93
+ v_avg[ii] = (vol_right + vol_left) / 2
94
+
95
+ return {"volume": v_avg}
96
+
97
+
98
+ def vol_revolve(r, z, point_scale=1.):
99
+ r"""Calculate the volume of a polygon revolved around the Z-axis
100
+
101
+ This implementation yields the same results as the volRevolve
102
+ Matlab function by Geoff Olynyk (from 2012-05-03)
103
+ https://de.mathworks.com/matlabcentral/fileexchange/36525-volrevolve.
104
+
105
+ The difference here is that the volume is computed using (a much
106
+ more approachable) implementation using the volume of a truncated
107
+ cone (https://de.wikipedia.org/wiki/Kegelstumpf).
108
+
109
+ .. math::
110
+
111
+ V = \frac{h \cdot \pi}{3} \cdot (R^2 + R \cdot r + r^2)
112
+
113
+ Where :math:`h` is the height of the cone and :math:`r` and
114
+ `R` are the smaller and larger radii of the truncated cone.
115
+
116
+ Each line segment of the contour resembles one truncated cone. If
117
+ the z-step is positive (counter-clockwise contour), then the
118
+ truncated cone volume is added to the total volume. If the z-step
119
+ is negative (e.g. inclusion), then the truncated cone volume is
120
+ removed from the total volume.
121
+
122
+ Parameters
123
+ ----------
124
+ r: 1d np.ndarray
125
+ radial coordinates (perpendicular to the z axis)
126
+ z: 1d np.ndarray
127
+ coordinate along the axis of rotation
128
+ point_scale: float
129
+ point size in your preferred units; The volume is multiplied
130
+ by a factor of `point_scale**3`.
131
+
132
+ Notes
133
+ -----
134
+ The coordinates must be given in counter-clockwise order,
135
+ otherwise the volume will be negative.
136
+ """
137
+ r = np.atleast_1d(r)
138
+ z = np.atleast_1d(z)
139
+
140
+ # make sure we have a closed contour
141
+ if (r[-1] != r[0]) or (z[-1] != z[0]):
142
+ # We have an open contour - close it.
143
+ r = np.resize(r, len(r) + 1)
144
+ z = np.resize(z, len(z) + 1)
145
+
146
+ rp = r[:-1]
147
+
148
+ # array of radii differences: R - r
149
+ dr = np.diff(r)
150
+ # array of height differences: h
151
+ dz = np.diff(z)
152
+
153
+ # If we expand the function in the doc string with
154
+ # dr = R - r and dz = h, then we get three terms for the volume
155
+ # (as opposed to four terms in Olynyk's script). Those three terms
156
+ # all resemble area slices multiplied by the z-distance dz.
157
+ a1 = 3 * rp ** 2
158
+ a2 = 3 * rp * dr
159
+ a3 = dr ** 2
160
+
161
+ # Note that the formula for computing the volume is symmetric
162
+ # with respect to r and R. This means that it does not matter
163
+ # which sign dr has (R and r are always positive). Since this
164
+ # algorithm assumes that the contour is ordered counter-clockwise,
165
+ # positive dz means adding to the contour while negative dz means
166
+ # subtracting from the contour (see test functions for more details).
167
+ # Conveniently so, dz only appears one time in this formula, so
168
+ # we can take the sign of dz as it is (Otherwise, we would have
169
+ # to take the absolute value of every truncated cone volume and
170
+ # multiply it by np.sign(dz)).
171
+ v = np.pi / 3 * dz * np.abs(a1 + a2 + a3)
172
+ vol = np.sum(v) * point_scale ** 3
173
+
174
+ return vol