dcnum 0.23.1__tar.gz → 0.23.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dcnum might be problematic. Click here for more details.

Files changed (118) hide show
  1. {dcnum-0.23.1 → dcnum-0.23.2}/CHANGELOG +4 -0
  2. {dcnum-0.23.1 → dcnum-0.23.2}/PKG-INFO +2 -2
  3. {dcnum-0.23.1 → dcnum-0.23.2}/pyproject.toml +1 -1
  4. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/_version.py +2 -2
  5. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/logic/ctrl.py +24 -0
  6. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/logic/job.py +22 -0
  7. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/segm/segm_torch/__init__.py +8 -4
  8. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum.egg-info/PKG-INFO +2 -2
  9. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum.egg-info/requires.txt +1 -1
  10. {dcnum-0.23.1 → dcnum-0.23.2}/tests/helper_methods.py +1 -1
  11. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_feat_brightness.py +2 -8
  12. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_feat_haralick.py +1 -6
  13. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_feat_moments_based.py +18 -23
  14. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_feat_moments_based_extended.py +9 -14
  15. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_feat_volume.py +1 -5
  16. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_logic_job.py +37 -1
  17. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_logic_pipeline.py +8 -8
  18. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_read_concat_hdf5.py +10 -24
  19. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_read_hdf5.py +7 -12
  20. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_segm_base.py +3 -5
  21. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_segm_mpo.py +10 -10
  22. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_segm_no_mask_proc.py +3 -6
  23. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_segm_sto.py +10 -10
  24. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_segm_thresh.py +2 -5
  25. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_segm_torch.py +12 -17
  26. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_write_deque_writer_thread.py +1 -5
  27. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_write_writer.py +2 -2
  28. {dcnum-0.23.1 → dcnum-0.23.2}/.github/workflows/check.yml +0 -0
  29. {dcnum-0.23.1 → dcnum-0.23.2}/.github/workflows/deploy_pypi.yml +0 -0
  30. {dcnum-0.23.1 → dcnum-0.23.2}/.gitignore +0 -0
  31. {dcnum-0.23.1 → dcnum-0.23.2}/.readthedocs.yml +0 -0
  32. {dcnum-0.23.1 → dcnum-0.23.2}/LICENSE +0 -0
  33. {dcnum-0.23.1 → dcnum-0.23.2}/README.rst +0 -0
  34. {dcnum-0.23.1 → dcnum-0.23.2}/docs/conf.py +0 -0
  35. {dcnum-0.23.1 → dcnum-0.23.2}/docs/extensions/github_changelog.py +0 -0
  36. {dcnum-0.23.1 → dcnum-0.23.2}/docs/index.rst +0 -0
  37. {dcnum-0.23.1 → dcnum-0.23.2}/docs/requirements.txt +0 -0
  38. {dcnum-0.23.1 → dcnum-0.23.2}/setup.cfg +0 -0
  39. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/__init__.py +0 -0
  40. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/__init__.py +0 -0
  41. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/event_extractor_manager_thread.py +0 -0
  42. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_background/__init__.py +0 -0
  43. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_background/base.py +0 -0
  44. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_background/bg_copy.py +0 -0
  45. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_background/bg_roll_median.py +0 -0
  46. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_background/bg_sparse_median.py +0 -0
  47. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_brightness/__init__.py +0 -0
  48. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_brightness/bright_all.py +0 -0
  49. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_brightness/common.py +0 -0
  50. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_contour/__init__.py +0 -0
  51. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_contour/contour.py +0 -0
  52. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_contour/moments.py +0 -0
  53. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_contour/volume.py +0 -0
  54. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_texture/__init__.py +0 -0
  55. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_texture/common.py +0 -0
  56. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/feat_texture/tex_all.py +0 -0
  57. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/gate.py +0 -0
  58. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/feat/queue_event_extractor.py +0 -0
  59. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/logic/__init__.py +0 -0
  60. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/logic/json_encoder.py +0 -0
  61. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/meta/__init__.py +0 -0
  62. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/meta/paths.py +0 -0
  63. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/meta/ppid.py +0 -0
  64. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/read/__init__.py +0 -0
  65. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/read/cache.py +0 -0
  66. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/read/const.py +0 -0
  67. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/read/hdf5_data.py +0 -0
  68. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/read/mapped.py +0 -0
  69. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/segm/__init__.py +0 -0
  70. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/segm/segm_thresh.py +0 -0
  71. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/segm/segm_torch/segm_torch_base.py +0 -0
  72. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/segm/segm_torch/segm_torch_mpo.py +0 -0
  73. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/segm/segm_torch/segm_torch_sto.py +0 -0
  74. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/segm/segm_torch/torch_model.py +0 -0
  75. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/segm/segm_torch/torch_postproc.py +0 -0
  76. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/segm/segm_torch/torch_preproc.py +0 -0
  77. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/segm/segmenter.py +0 -0
  78. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/segm/segmenter_manager_thread.py +0 -0
  79. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/segm/segmenter_mpo.py +0 -0
  80. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/segm/segmenter_sto.py +0 -0
  81. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/write/__init__.py +0 -0
  82. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/write/deque_writer_thread.py +0 -0
  83. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/write/queue_collector_thread.py +0 -0
  84. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum/write/writer.py +0 -0
  85. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum.egg-info/SOURCES.txt +0 -0
  86. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum.egg-info/dependency_links.txt +0 -0
  87. {dcnum-0.23.1 → dcnum-0.23.2}/src/dcnum.egg-info/top_level.txt +0 -0
  88. {dcnum-0.23.1 → dcnum-0.23.2}/tests/conftest.py +0 -0
  89. {dcnum-0.23.1 → dcnum-0.23.2}/tests/data/fmt-hdf5_cytoshot_extended-moments-features.zip +0 -0
  90. {dcnum-0.23.1 → dcnum-0.23.2}/tests/data/fmt-hdf5_cytoshot_full-features_2023.zip +0 -0
  91. {dcnum-0.23.1 → dcnum-0.23.2}/tests/data/fmt-hdf5_cytoshot_full-features_2024.zip +0 -0
  92. {dcnum-0.23.1 → dcnum-0.23.2}/tests/data/fmt-hdf5_cytoshot_full-features_legacy_allev_2023.zip +0 -0
  93. {dcnum-0.23.1 → dcnum-0.23.2}/tests/data/fmt-hdf5_shapein_empty.zip +0 -0
  94. {dcnum-0.23.1 → dcnum-0.23.2}/tests/data/fmt-hdf5_shapein_raw-with-variable-length-logs.zip +0 -0
  95. {dcnum-0.23.1 → dcnum-0.23.2}/tests/data/segm-torch-model_unet-dcnum-test_g1_910c2.zip +0 -0
  96. {dcnum-0.23.1 → dcnum-0.23.2}/tests/data/segm-torch-test-data_unet-dcnum-test_g1_910c2.zip +0 -0
  97. {dcnum-0.23.1 → dcnum-0.23.2}/tests/requirements.txt +0 -0
  98. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_feat_background_base.py +0 -0
  99. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_feat_background_bg_copy.py +0 -0
  100. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_feat_background_bg_roll_median.py +0 -0
  101. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_feat_background_bg_sparsemed.py +0 -0
  102. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_feat_event_extractor_manager.py +0 -0
  103. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_feat_gate.py +0 -0
  104. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_init.py +0 -0
  105. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_logic_join.py +0 -0
  106. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_logic_json.py +0 -0
  107. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_meta_paths.py +0 -0
  108. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_meta_ppid_base.py +0 -0
  109. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_meta_ppid_bg.py +0 -0
  110. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_meta_ppid_data.py +0 -0
  111. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_meta_ppid_feat.py +0 -0
  112. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_meta_ppid_gate.py +0 -0
  113. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_meta_ppid_segm.py +0 -0
  114. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_read_basin.py +0 -0
  115. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_read_hdf5_basins.py +0 -0
  116. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_read_hdf5_index_mapping.py +0 -0
  117. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_segm_torch_preproc.py +0 -0
  118. {dcnum-0.23.1 → dcnum-0.23.2}/tests/test_write_queue_collector_thread.py +0 -0
@@ -1,3 +1,7 @@
1
+ 0.23.2
2
+ - enh: add DCNumPipelineJob.validate method
3
+ - enh: list Python libraries used in job log
4
+ - setup: change required pytorch version from 2.3 to 2.2 (hardware support)
1
5
  0.23.1
2
6
  - enh: support passing custom default arguments to get_class_method_info
3
7
  - tests: fix torch preprocessing tests
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dcnum
3
- Version: 0.23.1
3
+ Version: 0.23.2
4
4
  Summary: numerics toolbox for imaging deformability cytometry
5
5
  Author: Maximilian Schlögel, Paul Müller, Raghava Alajangi
6
6
  Maintainer-email: Paul Müller <dev@craban.de>
@@ -26,7 +26,7 @@ Requires-Dist: opencv-python-headless
26
26
  Requires-Dist: scikit-image
27
27
  Requires-Dist: scipy>=1.8.0
28
28
  Provides-Extra: torch
29
- Requires-Dist: torch>=2.3; extra == "torch"
29
+ Requires-Dist: torch>=2.2; extra == "torch"
30
30
 
31
31
  |dcnum|
32
32
  =======
@@ -37,7 +37,7 @@ dependencies = [
37
37
  dynamic = ["version"]
38
38
 
39
39
  [project.optional-dependencies]
40
- torch = ["torch>=2.3"]
40
+ torch = ["torch>=2.2"]
41
41
 
42
42
  [project.urls]
43
43
  source = "https://github.com/DC-Analysis/dcnum"
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '0.23.1'
16
- __version_tuple__ = version_tuple = (0, 23, 1)
15
+ __version__ = version = '0.23.2'
16
+ __version_tuple__ = version_tuple = (0, 23, 2)
@@ -1,6 +1,7 @@
1
1
  import collections
2
2
  import datetime
3
3
  import hashlib
4
+ import importlib
4
5
  import json
5
6
  import logging
6
7
  from logging.handlers import QueueListener
@@ -430,6 +431,16 @@ class DCNumJobRunner(threading.Thread):
430
431
  "build": ", ".join(platform.python_build()),
431
432
  "implementation":
432
433
  platform.python_implementation(),
434
+ "libraries": get_library_versions_dict([
435
+ "cv2",
436
+ "h5py",
437
+ "mahotas",
438
+ "numba",
439
+ "numpy",
440
+ "scipy",
441
+ "skimage",
442
+ "torch",
443
+ ]),
433
444
  "version": platform.python_version(),
434
445
  },
435
446
  "system": {
@@ -780,6 +791,19 @@ class DCNumJobRunner(threading.Thread):
780
791
  self.logger.info("Finished segmentation and feature extraction")
781
792
 
782
793
 
794
+ def get_library_versions_dict(library_name_list):
795
+ version_dict = {}
796
+ for library_name in library_name_list:
797
+ try:
798
+ lib = importlib.import_module(library_name)
799
+ except BaseException:
800
+ version = None
801
+ else:
802
+ version = lib.__version__
803
+ version_dict[library_name] = version
804
+ return version_dict
805
+
806
+
783
807
  def join_thread_helper(thr, timeout, retries, logger, name):
784
808
  for _ in range(retries):
785
809
  thr.join(timeout=timeout)
@@ -182,3 +182,25 @@ class DCNumPipelineJob:
182
182
  if len(ret) == 1:
183
183
  ret = ret[0]
184
184
  return ret
185
+
186
+ def validate(self):
187
+ """Make sure the pipeline will run given the job kwargs
188
+
189
+ Returns
190
+ -------
191
+ True:
192
+ for testing convenience
193
+
194
+ Raises
195
+ ------
196
+ dcnum.segm.SegmenterNotApplicableError:
197
+ the segmenter is incompatible with the input path
198
+ """
199
+ # Check segmenter applicability applicability
200
+ seg_cls = get_available_segmenters()[self.kwargs["segmenter_code"]]
201
+ with HDF5Data(self.kwargs["path_in"]) as hd:
202
+ seg_cls.validate_applicability(
203
+ segmenter_kwargs=self.kwargs["segmenter_kwargs"],
204
+ logs=hd.logs,
205
+ meta=hd.meta)
206
+ return True
@@ -1,16 +1,20 @@
1
1
  import importlib
2
+ import warnings
2
3
 
3
4
  try:
4
5
  torch = importlib.import_module("torch")
5
6
  req_maj = 2
6
- req_min = 3
7
+ req_min = 2
7
8
  ver_tuple = torch.__version__.split(".")
8
9
  act_maj = int(ver_tuple[0])
9
10
  act_min = int(ver_tuple[1])
10
11
  if act_maj < req_maj or (act_maj == req_maj and act_min < req_min):
11
- raise ValueError(f"Your PyTorch version {act_maj}.{act_min} is not "
12
- f"supported, please update to at least "
13
- f"{req_maj}.{req_min}")
12
+ warnings.warn(f"Your PyTorch version {act_maj}.{act_min} is "
13
+ f"not supported, please update to at least "
14
+ f"{req_maj}.{req_min} to use dcnum's PyTorch"
15
+ f"segmenters")
16
+ raise ImportError(
17
+ f"Could not find PyTorch {req_maj}.{req_min}")
14
18
  except ImportError:
15
19
  pass
16
20
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dcnum
3
- Version: 0.23.1
3
+ Version: 0.23.2
4
4
  Summary: numerics toolbox for imaging deformability cytometry
5
5
  Author: Maximilian Schlögel, Paul Müller, Raghava Alajangi
6
6
  Maintainer-email: Paul Müller <dev@craban.de>
@@ -26,7 +26,7 @@ Requires-Dist: opencv-python-headless
26
26
  Requires-Dist: scikit-image
27
27
  Requires-Dist: scipy>=1.8.0
28
28
  Provides-Extra: torch
29
- Requires-Dist: torch>=2.3; extra == "torch"
29
+ Requires-Dist: torch>=2.2; extra == "torch"
30
30
 
31
31
  |dcnum|
32
32
  =======
@@ -8,4 +8,4 @@ scikit-image
8
8
  scipy>=1.8.0
9
9
 
10
10
  [torch]
11
- torch>=2.3
11
+ torch>=2.2
@@ -21,7 +21,7 @@ class MockImageData:
21
21
  [0, 0, 1, 1, 1, 0, 0, 0],
22
22
  [0, 0, 1, 1, 1, 0, 0, 0],
23
23
  [0, 0, 0, 0, 0, 0, 0, 0],
24
- ], dtype=bool)
24
+ ], dtype=bool)
25
25
 
26
26
  def get_chunk(self, chunk_index):
27
27
  image = np.array(-(10 + chunk_index) * self.mask, dtype=np.int16)
@@ -1,5 +1,3 @@
1
- import pathlib
2
-
3
1
  import h5py
4
2
  import numpy as np
5
3
 
@@ -7,13 +5,10 @@ from dcnum.feat import feat_brightness
7
5
 
8
6
  from helper_methods import retrieve_data
9
7
 
10
- data_path = pathlib.Path(__file__).parent / "data"
11
-
12
8
 
13
9
  def test_basic_brightness():
14
10
  # This original file was generated with dcevent for reference.
15
- path = retrieve_data(data_path /
16
- "fmt-hdf5_cytoshot_full-features_2023.zip")
11
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
17
12
  # Make data available
18
13
  with h5py.File(path) as h5:
19
14
  data = feat_brightness.brightness_features(
@@ -35,8 +30,7 @@ def test_basic_brightness():
35
30
 
36
31
  def test_basic_brightness_single_image():
37
32
  # This original file was generated with dcevent for reference.
38
- path = retrieve_data(data_path /
39
- "fmt-hdf5_cytoshot_full-features_2023.zip")
33
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
40
34
  # Make data available
41
35
  with h5py.File(path) as h5:
42
36
  data = feat_brightness.brightness_features(
@@ -1,5 +1,3 @@
1
- import pathlib
2
-
3
1
  import h5py
4
2
  import numpy as np
5
3
 
@@ -7,13 +5,10 @@ from dcnum.feat import feat_texture
7
5
 
8
6
  from helper_methods import retrieve_data
9
7
 
10
- data_path = pathlib.Path(__file__).parent / "data"
11
-
12
8
 
13
9
  def test_basic_haralick():
14
10
  # This original file was generated with dcevent for reference.
15
- path = retrieve_data(data_path /
16
- "fmt-hdf5_cytoshot_full-features_2023.zip")
11
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
17
12
  # Make data available
18
13
  with h5py.File(path) as h5:
19
14
  ret_arr = feat_texture.haralick_texture_features(
@@ -1,5 +1,3 @@
1
- import pathlib
2
-
3
1
  import h5py
4
2
  import numpy as np
5
3
  import scipy.ndimage as ndi
@@ -8,8 +6,6 @@ from dcnum.feat import feat_contour
8
6
 
9
7
  from helper_methods import retrieve_data
10
8
 
11
- data_path = pathlib.Path(__file__).parent / "data"
12
-
13
9
 
14
10
  def test_inert_ratio_prnc():
15
11
  """Test tilt and equivalence of inert_ratio_raw and inert_ratio_prnc"""
@@ -151,8 +147,7 @@ def test_inert_ratio_prnc_simple_2():
151
147
 
152
148
  def test_moments_based_features():
153
149
  # This original file was generated with dcevent for reference.
154
- path = retrieve_data(data_path /
155
- "fmt-hdf5_cytoshot_full-features_2023.zip")
150
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
156
151
  feats = [
157
152
  "deform",
158
153
  "size_x",
@@ -201,9 +196,9 @@ def test_mask_0d():
201
196
  [0, 0, 0, 0, 0, 0],
202
197
  ], dtype=bool)[np.newaxis]
203
198
  data = feat_contour.moments_based_features(
204
- mask=masks,
205
- pixel_size=0.2645
206
- )
199
+ mask=masks,
200
+ pixel_size=0.2645
201
+ )
207
202
  assert data["deform"].shape == (1,)
208
203
  assert np.isnan(data["deform"][0])
209
204
  assert np.isnan(data["area_um"][0])
@@ -219,9 +214,9 @@ def test_mask_1d():
219
214
  [0, 0, 0, 0, 0, 0],
220
215
  ], dtype=bool)[np.newaxis]
221
216
  data = feat_contour.moments_based_features(
222
- mask=masks,
223
- pixel_size=0.2645
224
- )
217
+ mask=masks,
218
+ pixel_size=0.2645
219
+ )
225
220
  assert data["deform"].shape == (1,)
226
221
  assert np.isnan(data["deform"][0])
227
222
  assert np.isnan(data["area_um"][0])
@@ -237,9 +232,9 @@ def test_mask_1d_large():
237
232
  [0, 0, 1, 0, 0, 0],
238
233
  ], dtype=bool)[np.newaxis]
239
234
  data = feat_contour.moments_based_features(
240
- mask=masks,
241
- pixel_size=0.2645
242
- )
235
+ mask=masks,
236
+ pixel_size=0.2645
237
+ )
243
238
  assert data["deform"].shape == (1,)
244
239
  assert np.isnan(data["deform"][0])
245
240
  assert np.isnan(data["area_um"][0])
@@ -257,9 +252,9 @@ def test_mask_1d_large_no_border():
257
252
  [0, 0, 0, 0, 0, 0],
258
253
  ], dtype=bool)[np.newaxis]
259
254
  data = feat_contour.moments_based_features(
260
- mask=masks,
261
- pixel_size=0.2645
262
- )
255
+ mask=masks,
256
+ pixel_size=0.2645
257
+ )
263
258
  assert data["deform"].shape == (1,)
264
259
  assert np.isnan(data["deform"][0])
265
260
  assert np.isnan(data["area_um"][0])
@@ -275,9 +270,9 @@ def test_mask_2d():
275
270
  [0, 0, 0, 0, 0, 0],
276
271
  ], dtype=bool)[np.newaxis]
277
272
  data = feat_contour.moments_based_features(
278
- mask=masks,
279
- pixel_size=0.2645
280
- )
273
+ mask=masks,
274
+ pixel_size=0.2645
275
+ )
281
276
  assert data["deform"].shape == (1,)
282
277
  # This is the deformation of a square (compared to circle)
283
278
  assert np.allclose(data["deform"][0], 0.11377307454724206)
@@ -305,8 +300,8 @@ def test_mask_mixed():
305
300
  mixed_masks = np.append(mask_valid[None, ...],
306
301
  mask_invalid[None, ...], axis=0)
307
302
  data = feat_contour.moments_based_features(
308
- mask=mixed_masks,
309
- pixel_size=0.2645)
303
+ mask=mixed_masks,
304
+ pixel_size=0.2645)
310
305
  assert data["deform"].shape == (2,)
311
306
  assert np.all(data["valid"][:] == np.array([True, False]))
312
307
  assert not np.isnan(data["deform"][0])
@@ -1,5 +1,3 @@
1
- import pathlib
2
-
3
1
  import h5py
4
2
  import numpy as np
5
3
 
@@ -7,21 +5,18 @@ from dcnum.feat import feat_contour
7
5
 
8
6
  from helper_methods import retrieve_data
9
7
 
10
- data_path = pathlib.Path(__file__).parent / "data"
11
-
12
8
 
13
9
  def test_moments_based_features():
14
10
  # This file has new cell features belonging to
15
11
  # fmt-hdf5_cytoshot_full-features_2023.zip
16
- path = retrieve_data(data_path /
17
- "fmt-hdf5_cytoshot_extended-moments-features.zip")
12
+ path = retrieve_data("fmt-hdf5_cytoshot_extended-moments-features.zip")
18
13
 
19
14
  feats = [
20
- "area_um_raw",
21
- "deform_raw",
22
- "eccentr_prnc",
23
- "per_ratio",
24
- "per_um_raw",
15
+ "area_um_raw",
16
+ "deform_raw",
17
+ "eccentr_prnc",
18
+ "per_ratio",
19
+ "per_um_raw",
25
20
  ]
26
21
 
27
22
  # Make data available
@@ -49,9 +44,9 @@ def test_mask_2d():
49
44
  [0, 0, 0, 0, 0, 0],
50
45
  ], dtype=bool)[np.newaxis]
51
46
  data = feat_contour.moments_based_features(
52
- mask=masks,
53
- pixel_size=0.2645
54
- )
47
+ mask=masks,
48
+ pixel_size=0.2645
49
+ )
55
50
  assert data["deform_raw"].shape == (1,)
56
51
  # This is the deformation of a square (compared to circle)
57
52
  assert np.allclose(data["deform_raw"][0], 0.11377307454724206)
@@ -1,5 +1,4 @@
1
1
  import itertools
2
- import pathlib
3
2
 
4
3
  import h5py
5
4
  import numpy as np
@@ -10,8 +9,6 @@ from dcnum.feat.feat_contour import moments_based_features
10
9
 
11
10
  from helper_methods import retrieve_data
12
11
 
13
- data_path = pathlib.Path(__file__).parent / "data"
14
-
15
12
 
16
13
  def area_of_polygon(x, y):
17
14
  return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
@@ -105,8 +102,7 @@ def test_volume_from_file():
105
102
  1.09564687e+02, 2.39239370e-01, 1.71917437e-01, 8.98323862e+01,
106
103
  4.13412223e+00, 2.91659170e+02, 2.00198054e+02, 1.97545320e+00,
107
104
  9.15408837e+01, 1.60965362e-01, 3.48553309e-01, 2.04561447e+02])
108
- path = retrieve_data(data_path /
109
- "fmt-hdf5_cytoshot_full-features_2023.zip")
105
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
110
106
 
111
107
  with h5py.File(path) as h5:
112
108
  pixel_size = h5.attrs["imaging:pixel size"]
@@ -1,8 +1,12 @@
1
1
  import multiprocessing as mp
2
2
 
3
3
  from dcnum import logic
4
+ from dcnum.segm.segm_torch import segm_torch_base # noqa: E402
5
+ import h5py
4
6
 
5
- from helper_methods import retrieve_data
7
+ import pytest
8
+
9
+ from helper_methods import retrieve_data, retrieve_model
6
10
 
7
11
 
8
12
  def test_basic_job():
@@ -36,3 +40,35 @@ def test_segmenter_mask():
36
40
  )
37
41
  _, pdict = job.get_ppid(ret_dict=True)
38
42
  assert pdict["seg_id"] == "thresh:t=-6:cle=1^f=1^clo=3"
43
+
44
+
45
+ def test_validate_invalid_model():
46
+ model_file = retrieve_model(
47
+ "segm-torch-model_unet-dcnum-test_g1_910c2.zip")
48
+
49
+ # Create a test dataset with metadata that will make the model invalid
50
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2024.zip")
51
+
52
+ with h5py.File(path, "a") as h5:
53
+ h5.attrs["setup:chip region"] = "reservoir"
54
+
55
+ job = logic.DCNumPipelineJob(path_in=path,
56
+ segmenter_code="torchmpo",
57
+ segmenter_kwargs={
58
+ "model_file": model_file},
59
+ )
60
+
61
+ with pytest.raises(
62
+ segm_torch_base.SegmenterNotApplicableError,
63
+ match="only experiments in channel region supported"):
64
+ job.validate()
65
+
66
+
67
+ def test_validate_ok():
68
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
69
+ job = logic.DCNumPipelineJob(path_in=path,
70
+ segmenter_code="thresh",
71
+ segmenter_kwargs={
72
+ "kwargs_mask": {"closing_disk": 3}},
73
+ )
74
+ assert job.validate()
@@ -135,7 +135,7 @@ def test_basin_strategy_tap():
135
135
 
136
136
  with h5py.File(path_out) as h5:
137
137
  assert h5.attrs["pipeline:dcnum background"] \
138
- == "sparsemed:k=150^s=1^t=0^f=0.8^o=1"
138
+ == "sparsemed:k=150^s=1^t=0^f=0.8^o=1"
139
139
  assert "image_bg" in h5["events"]
140
140
  assert "bg_off" in h5["events"]
141
141
  assert "deform" in h5["events"]
@@ -174,7 +174,7 @@ def test_basin_relative_path():
174
174
  # Everything should just work, because we have relative paths in the basin.
175
175
  with h5py.File(path_out_new) as h5:
176
176
  assert h5.attrs["pipeline:dcnum background"] \
177
- == "sparsemed:k=150^s=1^t=0^f=0.8^o=1"
177
+ == "sparsemed:k=150^s=1^t=0^f=0.8^o=1"
178
178
  assert "image_bg" in h5["events"]
179
179
  assert "bg_off" in h5["events"]
180
180
  assert "deform" in h5["events"]
@@ -207,7 +207,7 @@ def test_chained_pipeline():
207
207
 
208
208
  with h5py.File(path2) as h5:
209
209
  assert h5.attrs["pipeline:dcnum background"] \
210
- == "sparsemed:k=150^s=1^t=0^f=0.8^o=1"
210
+ == "sparsemed:k=150^s=1^t=0^f=0.8^o=1"
211
211
  assert "image" in h5["events"]
212
212
  assert "image_bg" in h5["events"]
213
213
  for feat in h5["events"]:
@@ -228,7 +228,7 @@ def test_chained_pipeline():
228
228
  assert "image_bg" in h5["events"]
229
229
  assert len(h5["events/deform"]) == 285
230
230
  assert h5.attrs["pipeline:dcnum background"] \
231
- == "sparsemed:k=250^s=1^t=0^f=0.8^o=1"
231
+ == "sparsemed:k=250^s=1^t=0^f=0.8^o=1"
232
232
  for feat in h5["events"]:
233
233
  assert len(h5["events"][feat]) == 285
234
234
 
@@ -745,8 +745,8 @@ def test_simple_pipeline(debug):
745
745
  assert h5.attrs["experiment:sample"] == "data"
746
746
  assert h5.attrs["experiment:date"] == "2022-04-21"
747
747
  assert h5.attrs["experiment:run identifier"] == \
748
- (f"d5a40aed-0b6c-0412-e87c-59789fdd28d0_"
749
- f"dcn-{pp_hash[:7]}")
748
+ (f"d5a40aed-0b6c-0412-e87c-59789fdd28d0_"
749
+ f"dcn-{pp_hash[:7]}")
750
750
 
751
751
 
752
752
  @pytest.mark.parametrize("debug", [True, False])
@@ -814,8 +814,8 @@ def test_simple_pipeline_no_offset_correction(debug):
814
814
  assert h5.attrs["experiment:sample"] == "data"
815
815
  assert h5.attrs["experiment:date"] == "2022-04-21"
816
816
  assert h5.attrs["experiment:run identifier"] == \
817
- (f"d5a40aed-0b6c-0412-e87c-59789fdd28d0_"
818
- f"dcn-{pp_hash[:7]}")
817
+ (f"d5a40aed-0b6c-0412-e87c-59789fdd28d0_"
818
+ f"dcn-{pp_hash[:7]}")
819
819
 
820
820
 
821
821
  def test_simple_pipeline_in_thread():
@@ -1,5 +1,3 @@
1
- import pathlib
2
-
3
1
  from dcnum import read
4
2
  import h5py
5
3
  import numpy as np
@@ -7,13 +5,10 @@ import pytest
7
5
 
8
6
  from helper_methods import retrieve_data
9
7
 
10
- data_path = pathlib.Path(__file__).parent / "data"
11
-
12
8
 
13
9
  @pytest.mark.parametrize("path_out", [None, True])
14
10
  def test_concat_basic(path_out):
15
- path = retrieve_data(data_path /
16
- "fmt-hdf5_cytoshot_full-features_2023.zip")
11
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
17
12
  # create simple concatenated dataset, repeating a file
18
13
  data = read.concatenated_hdf5_data([path, path, path],
19
14
  path_out=path_out)
@@ -22,8 +17,7 @@ def test_concat_basic(path_out):
22
17
 
23
18
  @pytest.mark.parametrize("path_out", [None, True])
24
19
  def test_concat_basic_frame(path_out):
25
- path = retrieve_data(data_path /
26
- "fmt-hdf5_cytoshot_full-features_2023.zip")
20
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
27
21
  # create simple concatenated dataset, repeating a file
28
22
  data = read.concatenated_hdf5_data([path, path, path],
29
23
  path_out=path_out)
@@ -43,8 +37,7 @@ def test_concat_basic_frame(path_out):
43
37
 
44
38
 
45
39
  def test_concat_basic_to_file(tmp_path):
46
- path = retrieve_data(data_path /
47
- "fmt-hdf5_cytoshot_full-features_2023.zip")
40
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
48
41
  # create simple concatenated dataset, repeating a file
49
42
  path_out = tmp_path / "test.rtdc"
50
43
  assert not path_out.exists()
@@ -56,8 +49,7 @@ def test_concat_basic_to_file(tmp_path):
56
49
 
57
50
  def test_concat_ignore_contour(tmp_path):
58
51
  # get file wtih contour information
59
- path = retrieve_data(data_path /
60
- "fmt-hdf5_cytoshot_full-features_2023.zip")
52
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
61
53
  cdata = np.array([[2, 2], [2, 3], [2, 5], [4, 5], [4, 2]])
62
54
  with h5py.File(path, mode="a") as h5:
63
55
  contour = h5["events"].create_group("contour")
@@ -76,10 +68,8 @@ def test_concat_ignore_contour(tmp_path):
76
68
 
77
69
 
78
70
  def test_concat_invalid_input_feature_number():
79
- path = retrieve_data(data_path /
80
- "fmt-hdf5_cytoshot_full-features_2023.zip")
81
- path2 = retrieve_data(data_path /
82
- "fmt-hdf5_cytoshot_full-features_2023.zip")
71
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
72
+ path2 = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
83
73
 
84
74
  # Add a new feature to path2
85
75
  with h5py.File(path2, mode="a") as h5:
@@ -91,8 +81,7 @@ def test_concat_invalid_input_feature_number():
91
81
 
92
82
 
93
83
  def test_concat_invalid_input_path():
94
- path = retrieve_data(data_path /
95
- "fmt-hdf5_cytoshot_full-features_2023.zip")
84
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
96
85
  invalid_output = 42
97
86
  with pytest.raises(ValueError, match="Invalid type"):
98
87
  read.concatenated_hdf5_data([path, path, path],
@@ -100,17 +89,14 @@ def test_concat_invalid_input_path():
100
89
 
101
90
 
102
91
  def test_concat_invalid_input_path_number():
103
- path = retrieve_data(data_path /
104
- "fmt-hdf5_cytoshot_full-features_2023.zip")
92
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
105
93
  with pytest.raises(ValueError, match="Please specify at least two"):
106
94
  read.concatenated_hdf5_data([path])
107
95
 
108
96
 
109
97
  def test_concat_specify_input_feature_number():
110
- path = retrieve_data(data_path /
111
- "fmt-hdf5_cytoshot_full-features_2023.zip")
112
- path2 = retrieve_data(data_path /
113
- "fmt-hdf5_cytoshot_full-features_2023.zip")
98
+ path = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
99
+ path2 = retrieve_data("fmt-hdf5_cytoshot_full-features_2023.zip")
114
100
 
115
101
  # Add a new feature to path2
116
102
  with h5py.File(path2, mode="a") as h5: