dcnum 0.25.5__tar.gz → 0.25.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dcnum might be problematic. Click here for more details.

Files changed (128) hide show
  1. {dcnum-0.25.5 → dcnum-0.25.7}/.github/workflows/check.yml +1 -0
  2. {dcnum-0.25.5 → dcnum-0.25.7}/.readthedocs.yml +2 -0
  3. {dcnum-0.25.5 → dcnum-0.25.7}/CHANGELOG +6 -1
  4. {dcnum-0.25.5 → dcnum-0.25.7}/PKG-INFO +1 -1
  5. dcnum-0.25.7/docs/.gitignore +1 -0
  6. {dcnum-0.25.5 → dcnum-0.25.7}/docs/conf.py +5 -6
  7. {dcnum-0.25.5 → dcnum-0.25.7}/docs/index.rst +1 -0
  8. {dcnum-0.25.5 → dcnum-0.25.7}/docs/requirements.txt +4 -2
  9. dcnum-0.25.7/docs/sec_design.rst +88 -0
  10. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/_version.py +9 -4
  11. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/event_extractor_manager_thread.py +29 -14
  12. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_background/base.py +19 -11
  13. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_background/bg_copy.py +4 -0
  14. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_background/bg_roll_median.py +24 -15
  15. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_background/bg_sparse_median.py +32 -17
  16. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_contour/volume.py +2 -2
  17. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/gate.py +11 -11
  18. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/queue_event_extractor.py +39 -20
  19. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/logic/ctrl.py +5 -4
  20. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/logic/job.py +3 -1
  21. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/logic/json_encoder.py +9 -0
  22. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/meta/paths.py +1 -0
  23. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/meta/ppid.py +4 -2
  24. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/os_env_st.py +2 -2
  25. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/read/cache.py +3 -1
  26. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/read/const.py +5 -2
  27. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/read/detect_flicker.py +1 -1
  28. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/segm/segm_torch/segm_torch_base.py +3 -2
  29. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/segm/segm_torch/torch_postproc.py +1 -0
  30. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/segm/segm_torch/torch_preproc.py +1 -0
  31. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/segm/segmenter.py +31 -20
  32. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/segm/segmenter_manager_thread.py +19 -12
  33. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/segm/segmenter_mpo.py +4 -4
  34. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/segm/segmenter_sto.py +2 -2
  35. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/write/queue_collector_thread.py +35 -18
  36. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/write/writer.py +4 -3
  37. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum.egg-info/PKG-INFO +1 -1
  38. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum.egg-info/SOURCES.txt +2 -0
  39. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_logic_pipeline.py +1 -1
  40. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_read_hdf5.py +1 -1
  41. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_write_writer.py +2 -1
  42. {dcnum-0.25.5 → dcnum-0.25.7}/.github/workflows/deploy_pypi.yml +0 -0
  43. {dcnum-0.25.5 → dcnum-0.25.7}/.gitignore +0 -0
  44. {dcnum-0.25.5 → dcnum-0.25.7}/LICENSE +0 -0
  45. {dcnum-0.25.5 → dcnum-0.25.7}/README.rst +0 -0
  46. {dcnum-0.25.5 → dcnum-0.25.7}/benchmark/.gitignore +0 -0
  47. {dcnum-0.25.5 → dcnum-0.25.7}/benchmark/Readme.md +0 -0
  48. {dcnum-0.25.5 → dcnum-0.25.7}/benchmark/benchmark.py +0 -0
  49. {dcnum-0.25.5 → dcnum-0.25.7}/benchmark/bm_write_deque_writer_thread.py +0 -0
  50. {dcnum-0.25.5 → dcnum-0.25.7}/benchmark/bm_write_queue_collector_thread.py +0 -0
  51. {dcnum-0.25.5 → dcnum-0.25.7}/docs/extensions/github_changelog.py +0 -0
  52. {dcnum-0.25.5 → dcnum-0.25.7}/pyproject.toml +0 -0
  53. {dcnum-0.25.5 → dcnum-0.25.7}/setup.cfg +0 -0
  54. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/__init__.py +0 -0
  55. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/__init__.py +0 -0
  56. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_background/__init__.py +0 -0
  57. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_brightness/__init__.py +0 -0
  58. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_brightness/bright_all.py +0 -0
  59. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_brightness/common.py +0 -0
  60. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_contour/__init__.py +0 -0
  61. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_contour/contour.py +0 -0
  62. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_contour/moments.py +0 -0
  63. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_texture/__init__.py +0 -0
  64. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_texture/common.py +0 -0
  65. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/feat/feat_texture/tex_all.py +0 -0
  66. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/logic/__init__.py +0 -0
  67. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/meta/__init__.py +0 -0
  68. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/read/__init__.py +0 -0
  69. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/read/hdf5_data.py +0 -0
  70. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/read/mapped.py +0 -0
  71. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/segm/__init__.py +0 -0
  72. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/segm/segm_thresh.py +0 -0
  73. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/segm/segm_torch/__init__.py +0 -0
  74. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/segm/segm_torch/segm_torch_mpo.py +0 -0
  75. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/segm/segm_torch/segm_torch_sto.py +0 -0
  76. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/segm/segm_torch/torch_model.py +0 -0
  77. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/write/__init__.py +0 -0
  78. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum/write/deque_writer_thread.py +0 -0
  79. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum.egg-info/dependency_links.txt +0 -0
  80. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum.egg-info/requires.txt +0 -0
  81. {dcnum-0.25.5 → dcnum-0.25.7}/src/dcnum.egg-info/top_level.txt +0 -0
  82. {dcnum-0.25.5 → dcnum-0.25.7}/tests/conftest.py +0 -0
  83. {dcnum-0.25.5 → dcnum-0.25.7}/tests/data/fmt-hdf5_cytoshot_extended-moments-features.zip +0 -0
  84. {dcnum-0.25.5 → dcnum-0.25.7}/tests/data/fmt-hdf5_cytoshot_full-features_2023.zip +0 -0
  85. {dcnum-0.25.5 → dcnum-0.25.7}/tests/data/fmt-hdf5_cytoshot_full-features_2024.zip +0 -0
  86. {dcnum-0.25.5 → dcnum-0.25.7}/tests/data/fmt-hdf5_cytoshot_full-features_legacy_allev_2023.zip +0 -0
  87. {dcnum-0.25.5 → dcnum-0.25.7}/tests/data/fmt-hdf5_shapein_empty.zip +0 -0
  88. {dcnum-0.25.5 → dcnum-0.25.7}/tests/data/fmt-hdf5_shapein_raw-with-variable-length-logs.zip +0 -0
  89. {dcnum-0.25.5 → dcnum-0.25.7}/tests/data/segm-torch-model_unet-dcnum-test_g1_910c2.zip +0 -0
  90. {dcnum-0.25.5 → dcnum-0.25.7}/tests/data/segm-torch-test-data_unet-dcnum-test_g1_910c2.zip +0 -0
  91. {dcnum-0.25.5 → dcnum-0.25.7}/tests/helper_methods.py +0 -0
  92. {dcnum-0.25.5 → dcnum-0.25.7}/tests/requirements.txt +0 -0
  93. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_feat_background_base.py +0 -0
  94. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_feat_background_bg_copy.py +0 -0
  95. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_feat_background_bg_roll_median.py +0 -0
  96. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_feat_background_bg_sparsemed.py +0 -0
  97. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_feat_brightness.py +0 -0
  98. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_feat_event_extractor_manager.py +0 -0
  99. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_feat_gate.py +0 -0
  100. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_feat_haralick.py +0 -0
  101. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_feat_moments_based.py +0 -0
  102. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_feat_moments_based_extended.py +0 -0
  103. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_feat_volume.py +0 -0
  104. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_init.py +0 -0
  105. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_logic_job.py +0 -0
  106. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_logic_join.py +0 -0
  107. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_logic_json.py +0 -0
  108. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_meta_paths.py +0 -0
  109. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_meta_ppid_base.py +0 -0
  110. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_meta_ppid_bg.py +0 -0
  111. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_meta_ppid_data.py +0 -0
  112. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_meta_ppid_feat.py +0 -0
  113. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_meta_ppid_gate.py +0 -0
  114. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_meta_ppid_segm.py +0 -0
  115. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_read_basin.py +0 -0
  116. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_read_concat_hdf5.py +0 -0
  117. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_read_detect_flicker.py +0 -0
  118. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_read_hdf5_basins.py +0 -0
  119. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_read_hdf5_index_mapping.py +0 -0
  120. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_segm_base.py +0 -0
  121. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_segm_mpo.py +0 -0
  122. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_segm_no_mask_proc.py +0 -0
  123. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_segm_sto.py +0 -0
  124. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_segm_thresh.py +0 -0
  125. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_segm_torch.py +0 -0
  126. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_segm_torch_preproc.py +0 -0
  127. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_write_deque_writer_thread.py +0 -0
  128. {dcnum-0.25.5 → dcnum-0.25.7}/tests/test_write_queue_collector_thread.py +0 -0
@@ -12,6 +12,7 @@ jobs:
12
12
  matrix:
13
13
  python-version: ['3.10', '3.x']
14
14
  os: [macos-latest, ubuntu-latest, windows-latest]
15
+ fail-fast: false
15
16
  timeout-minutes: 30
16
17
 
17
18
  steps:
@@ -5,6 +5,8 @@ build:
5
5
  os: ubuntu-22.04
6
6
  tools:
7
7
  python: "3.11"
8
+ sphinx:
9
+ configuration: docs/conf.py
8
10
  python:
9
11
  install:
10
12
  - requirements: docs/requirements.txt
@@ -1,3 +1,8 @@
1
+ 0.25.7
2
+ - enh: `HDF5Writer.store_log` returns created dataset
3
+ - docs: add code reference using apidoc
4
+ 0.25.6
5
+ - maintenance release
1
6
  0.25.5
2
7
  - enh: support unnamed table data in `HDF5Data`
3
8
  - setup: pin scipy<1.15 due to https://github.com/scipy/scipy/issues/22333
@@ -218,7 +223,7 @@
218
223
  - ref: increment DCNUM_PPID_GENERATION to 6
219
224
  0.13.3
220
225
  - fix: correctly raise KeyError for missing image-based feature from
221
- HDF5Data._image_cache
226
+ `HDF5Data._image_cache`
222
227
  0.13.2
223
228
  - fix: properly convert variable-length string logs in `copy_metadata`
224
229
  0.13.1
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: dcnum
3
- Version: 0.25.5
3
+ Version: 0.25.7
4
4
  Summary: numerics toolbox for imaging deformability cytometry
5
5
  Author: Maximilian Schlögel, Paul Müller, Raghava Alajangi
6
6
  Maintainer-email: Paul Müller <dev@craban.de>
@@ -0,0 +1 @@
1
+ autoapi
@@ -33,7 +33,7 @@ author = 'Paul Müller'
33
33
  # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34
34
  # ones.
35
35
  extensions = ['sphinx.ext.autodoc',
36
- 'sphinx.ext.autosummary',
36
+ 'autoapi.extension',
37
37
  'sphinx.ext.intersphinx',
38
38
  'sphinx.ext.mathjax',
39
39
  'sphinx.ext.viewcode',
@@ -63,7 +63,7 @@ html_theme = 'sphinx_rtd_theme'
63
63
  # Add any paths that contain custom static files (such as style sheets) here,
64
64
  # relative to this directory. They are copied after the builtin static files,
65
65
  # so a file named "default.css" will overwrite the builtin "default.css".
66
- html_static_path = ['_static']
66
+ html_static_path = []
67
67
 
68
68
  # The version info for the project you're documenting, acts as replacement for
69
69
  # |version| and |release|, also used in various other places throughout the
@@ -81,7 +81,6 @@ numfig = True
81
81
  # include source of matplotlib plots
82
82
  plot_include_source = True
83
83
 
84
- # http://www.sphinx-doc.org/en/stable/ext/autodoc.html#confval-autodoc_member_order
85
- # Order class attributes and functions in separate blocks
86
- autodoc_member_order = 'groupwise'
87
- autoclass_content = 'both'
84
+ autoapi_dirs = ['../src/dcnum']
85
+ autoapi_python_class_content = "init"
86
+ autoapi_keep_files = True # for debugging docstrings
@@ -10,6 +10,7 @@ Welcome to dcnum's documentation!
10
10
  :maxdepth: 2
11
11
  :caption: Contents:
12
12
 
13
+ sec_design
13
14
 
14
15
 
15
16
  Indices and tables
@@ -3,5 +3,7 @@ h5py
3
3
  ipython
4
4
  matplotlib
5
5
  numpy
6
- sphinx==4.2.0
7
- sphinx_rtd_theme==1.0
6
+ sphinx>=5
7
+ sphinx_rtd_theme
8
+ sphinx-autoapi
9
+
@@ -0,0 +1,88 @@
1
+ .. _sec_design:
2
+
3
+ The design of dcnum
4
+ ===================
5
+
6
+
7
+ Submodule Structure
8
+ -------------------
9
+
10
+ The general idea of dcnum is to have a toolset for processing raw DC data,
11
+ which includes reading images, segmenting events, extracting features for
12
+ each event, and writing to an output file.
13
+
14
+ Each of the individual submodules serves one particular aspect of the
15
+ pipeline:
16
+
17
+ .. list-table:: dcnum submodules
18
+ :header-rows: 1
19
+
20
+ * - Submodule
21
+ - Description
22
+
23
+ * - :mod:`.feat`
24
+ - Feature extraction from segmented image data.
25
+
26
+ * - :mod:`.logic`
27
+ - | Contains the necessary logic (the glue) to combine all
28
+ | the other submodules for processing a dataset.
29
+
30
+ * - :mod:`.meta`
31
+ - | Handles metadata, most importantly the pipeline identifiers
32
+ | (PPIDs).
33
+
34
+ * - :mod:`.read`
35
+ - For reading raw HDF5 (.rtdc) files.
36
+
37
+ * - :mod:`.segm`
38
+ - | Event segmentation finds objects in an image and returns a
39
+ | binary mask for each object.
40
+
41
+ * - :mod:`.write`
42
+ - For writing data to HDF5 (.rtdc) files.
43
+
44
+
45
+ Pipeline sequence
46
+ -----------------
47
+
48
+ A pipeline (including its PPID) is defined via the
49
+ :class:`.logic.job.DCNumPipelineJob` class which represents the recipe for a
50
+ pipeline. The pipeline is executed with the :class:`.logic.ctrl.DCNumJobRunner`.
51
+ Here is a simple example that runs the default pipeline for an .rtdc file.
52
+
53
+ .. code:: python
54
+
55
+ from dcnum.logic import DCNumPipelineJob, DCNumJobRunner
56
+
57
+ job = logic.DCNumPipelineJob(path_in="input.rtdc")
58
+ with logic.DCNumJobRunner(job=job) as runner:
59
+ runner.run()
60
+
61
+ Take a look at the keyword arguments that the classes mentioned above
62
+ accept. Note that you can specify methods for background correction as
63
+ well as segmentation, and that you have full access to the keyword arguments
64
+ for every step in the pipeline. Also note that a reproducible PPID is derived
65
+ from these keyword arguments (:meth:`.logic.job.DCNumPipelineJob.get_ppid`).
66
+
67
+ The following happens when you run the above code snippet:
68
+
69
+ 1. The file `input.rtdc` is opened using the module :mod:`.read`.
70
+ 2. The ``DCNumJobRunner`` creates two managers:
71
+
72
+ - :class:`.segm.segmenter_manager_thread.SegmenterManagerThread` which spawns
73
+ segmentation workers (subclasses of :class:`.segm.segmenter.Segmenter`)
74
+ in separate subprocesses.
75
+ - :class:`.feat.event_extractor_manager_thread.EventExtractorManagerThread`
76
+ which spawns feature extraction workers
77
+ (:class:`.feat.queue_event_extractor.QueueEventExtractor`) in
78
+ separate subprocesses.
79
+ 3. The segmentation workers read a chunk of image data and return the label
80
+ image (integer-valued labels, one mask per event in a frame).
81
+ 4. The label images are fed via a shared array to the feature extraction
82
+ workers.
83
+ 5. The feature extraction workers put the event information (one event per
84
+ unique integer-labeled mask in the label image) in the event queue.
85
+ 6. A :class:`write.queue_collector_thread.QueueCollectorThread` puts the
86
+ events in the right order and stages them for writing in chunks.
87
+ 7. A :class:`write.dequeue_writer_thread.DequeWriterThread` writes the
88
+ chunks to the output file.
@@ -1,8 +1,13 @@
1
- # file generated by setuptools_scm
1
+ # file generated by setuptools-scm
2
2
  # don't change, don't track in version control
3
+
4
+ __all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
5
+
3
6
  TYPE_CHECKING = False
4
7
  if TYPE_CHECKING:
5
- from typing import Tuple, Union
8
+ from typing import Tuple
9
+ from typing import Union
10
+
6
11
  VERSION_TUPLE = Tuple[Union[int, str], ...]
7
12
  else:
8
13
  VERSION_TUPLE = object
@@ -12,5 +17,5 @@ __version__: str
12
17
  __version_tuple__: VERSION_TUPLE
13
18
  version_tuple: VERSION_TUPLE
14
19
 
15
- __version__ = version = '0.25.5'
16
- __version_tuple__ = version_tuple = (0, 25, 5)
20
+ __version__ = version = '0.25.7'
21
+ __version_tuple__ = version_tuple = (0, 25, 7)
@@ -35,8 +35,8 @@ class EventExtractorManagerThread(threading.Thread):
35
35
  with segmenter"), so that the segmenter can compute a new
36
36
  chunk of labels.
37
37
  slot_chunks:
38
- For each slot in `slot_states`, this shared array defines
39
- on which chunk in `image_data` the segmentation took place.
38
+ For each slot in ``slot_states``, this shared array defines
39
+ on which chunk in ``image_data`` the segmentation took place.
40
40
  fe_kwargs:
41
41
  Feature extraction keyword arguments. See
42
42
  :func:`.EventExtractor.get_init_kwargs` for more information.
@@ -47,36 +47,51 @@ class EventExtractorManagerThread(threading.Thread):
47
47
  fills up, we take a break.
48
48
  debug:
49
49
  Whether to run in debugging mode which means only one
50
- event extraction thread (`num_workers` has no effect).
50
+ event extraction thread (``num_workers`` has no effect).
51
51
  """
52
52
  super(EventExtractorManagerThread, self).__init__(
53
53
  name="EventExtractorManager", *args, **kwargs)
54
54
  self.logger = logging.getLogger(
55
55
  "dcnum.feat.EventExtractorManagerThread")
56
- #: Keyword arguments for class:`.EventExtractor`
56
+
57
57
  self.fe_kwargs = fe_kwargs
58
- #: Data instance
58
+ """Keyword arguments
59
+ for :class:`event_extractor_manager_thread.py.QueueEventExtractor`
60
+ instances"""
61
+
59
62
  self.data = fe_kwargs["data"]
60
- #: States of the segmenter-extractor pipeline slots
63
+ """Data instance"""
64
+
61
65
  self.slot_states = slot_states
62
- #: Chunks indices corresponding to `slot_states`
66
+ """States of the segmenter-extractor pipeline slots"""
67
+
63
68
  self.slot_chunks = slot_chunks
64
- #: Number of workers
69
+ """Chunk indices corresponding to ``slot_states``
70
+ """
71
+
65
72
  self.num_workers = 1 if debug else num_workers
66
- #: Queue for sending chunks and label indices to the workers
73
+ """Number of workers"""
74
+
67
75
  self.raw_queue = self.fe_kwargs["raw_queue"]
68
- #: List of chunk labels corresponding to `slot_states`
76
+ """Queue for sending chunks and label indices to the workers"""
77
+
69
78
  self.labels_list = labels_list
70
- #: Shared labeling array
79
+ """List of chunk labels corresponding to ``slot_states``
80
+ """
81
+
71
82
  self.label_array = np.ctypeslib.as_array(
72
83
  self.fe_kwargs["label_array"]).reshape(
73
84
  self.data.image.chunk_shape)
74
- #: Writer deque to monitor
85
+ """Shared labeling array"""
86
+
75
87
  self.writer_dq = writer_dq
76
- #: Time counter for feature extraction
88
+ """Writer deque to monitor"""
89
+
77
90
  self.t_count = 0
78
- #: Whether debugging is enabled
91
+ """Time counter for feature extraction"""
92
+
79
93
  self.debug = debug
94
+ """Whether debugging is enabled"""
80
95
 
81
96
  def run(self):
82
97
  # Initialize all workers
@@ -21,7 +21,7 @@ mp_spawn = mp.get_context('spawn')
21
21
  class Background(abc.ABC):
22
22
  def __init__(self, input_data, output_path, compress=True, num_cpus=None,
23
23
  **kwargs):
24
- """
24
+ """Base class for background computation
25
25
 
26
26
  Parameters
27
27
  ----------
@@ -56,28 +56,35 @@ class Background(abc.ABC):
56
56
  # Using spec is not really necessary here, because kwargs are
57
57
  # fully populated for background computation, but this might change.
58
58
  spec = inspect.getfullargspec(self.check_user_kwargs)
59
- #: background keyword arguments
59
+
60
60
  self.kwargs = spec.kwonlydefaults or {}
61
+ """background keyword arguments"""
61
62
  self.kwargs.update(kwargs)
62
63
 
63
64
  if num_cpus is None:
64
65
  num_cpus = mp_spawn.cpu_count()
65
- #: number of CPUs used
66
+
66
67
  self.num_cpus = num_cpus
68
+ """number of CPUs used"""
67
69
 
68
- #: number of images in the input data
69
70
  self.image_count = None
70
- #: fraction images that have been processed
71
+ """number of images in the input data"""
72
+
71
73
  self.image_proc = mp_spawn.Value("d", 0)
74
+ """fraction images that have been processed"""
72
75
 
73
- #: HDF5Data instance for input data
74
76
  self.hdin = None
75
- #: input h5py.File
77
+ """HDF5Data instance for input data"""
78
+
76
79
  self.h5in = None
77
- #: output h5py.File
80
+ """input h5py.File"""
81
+
78
82
  self.h5out = None
79
- #: reference paths for logging to the output .rtdc file
83
+ """output h5py.File"""
84
+
80
85
  self.paths_ref = []
86
+ """reference paths for logging to the output .rtdc file"""
87
+
81
88
  # Check whether user passed an array or a path
82
89
  if isinstance(input_data, pathlib.Path):
83
90
  if str(input_data.resolve()) == str(output_path.resolve()):
@@ -96,10 +103,11 @@ class Background(abc.ABC):
96
103
  else:
97
104
  self.input_data = input_data
98
105
 
99
- #: shape of event images
100
106
  self.image_shape = self.input_data[0].shape
101
- #: total number of events
107
+ """shape of event images"""
108
+
102
109
  self.image_count = len(self.input_data)
110
+ """total number of events"""
103
111
 
104
112
  if self.h5out is None:
105
113
  if not output_path.exists():
@@ -4,6 +4,10 @@ from .base import Background
4
4
 
5
5
 
6
6
  class BackgroundCopy(Background):
7
+ def __init__(self, *args, **kwargs):
8
+ """Copy the input background data to the output file"""
9
+ super().__init__(*args, **kwargs)
10
+
7
11
  @staticmethod
8
12
  def check_user_kwargs():
9
13
  pass
@@ -42,7 +42,7 @@ class BackgroundRollMed(Background):
42
42
  batch_size: int
43
43
  Number of events to process at the same time. Increasing this
44
44
  number much more than two orders of magnitude larger than
45
- `kernel_size` will not increase computation speed. Larger
45
+ ``kernel_size`` will not increase computation speed. Larger
46
46
  values lead to a higher memory consumption.
47
47
  compress: bool
48
48
  Whether to compress background data. Set this to False
@@ -64,39 +64,47 @@ class BackgroundRollMed(Background):
64
64
  f"size {len(self.input_data)} is larger than the "
65
65
  f"kernel size {kernel_size}!")
66
66
 
67
- #: kernel size used for median filtering
68
67
  self.kernel_size = kernel_size
69
- #: number of events processed at once
68
+ """kernel size used for median filtering"""
69
+
70
70
  self.batch_size = batch_size
71
+ """number of events processed at once"""
71
72
 
72
- #: mp.RawArray for temporary batch input data
73
73
  self.shared_input_raw = mp_spawn.RawArray(
74
74
  np.ctypeslib.ctypes.c_uint8,
75
75
  int(np.prod(self.image_shape)) * (batch_size + kernel_size))
76
- #: mp.RawArray for temporary batch output data
76
+ """mp.RawArray for temporary batch input data"""
77
+
77
78
  self.shared_output_raw = mp_spawn.RawArray(
78
79
  np.ctypeslib.ctypes.c_uint8,
79
80
  int(np.prod(self.image_shape)) * batch_size)
81
+ """mp.RawArray for temporary batch output data"""
82
+
80
83
  # Convert the RawArray to something we can write to fast
81
84
  # (similar to memoryview, but without having to cast) using
82
85
  # np.ctypeslib.as_array. See discussion in
83
86
  # https://stackoverflow.com/questions/37705974
84
- #: numpy array reshaped view on `self.shared_input_raw` with
85
- #: first axis enumerating the events
86
87
  self.shared_input = np.ctypeslib.as_array(
87
88
  self.shared_input_raw).reshape(batch_size + kernel_size, -1)
88
- #: numpy array reshaped view on `self.shared_output_raw` with
89
- #: first axis enumerating the events
89
+ """numpy array reshaped view on `self.shared_input_raw`.
90
+ The first axis enumerating the events
91
+ """
92
+
90
93
  self.shared_output = np.ctypeslib.as_array(
91
94
  self.shared_output_raw).reshape(batch_size, -1)
92
- #: current batch index (see `self.process` and `process_next_batch`)
95
+ """numpy array reshaped view on `self.shared_output_raw`.
96
+ The first axis enumerating the events
97
+ """
98
+
93
99
  self.current_batch = 0
100
+ """current batch index (see `self.process` and `process_next_batch`)"""
94
101
 
95
- #: counter tracking process of workers
96
102
  self.worker_counter = mp_spawn.Value("l", 0)
97
- #: queue for median computation jobs
103
+ """counter tracking process of workers"""
104
+
98
105
  self.queue = mp_spawn.Queue()
99
- #: list of workers (processes)
106
+ """queue for median computation jobs"""
107
+
100
108
  self.workers = [WorkerRollMed(self.queue,
101
109
  self.worker_counter,
102
110
  self.shared_input_raw,
@@ -104,6 +112,7 @@ class BackgroundRollMed(Background):
104
112
  self.batch_size,
105
113
  self.kernel_size)
106
114
  for _ in range(self.num_cpus)]
115
+ """list of workers (processes)"""
107
116
  [w.start() for w in self.workers]
108
117
 
109
118
  def __enter__(self):
@@ -131,7 +140,7 @@ class BackgroundRollMed(Background):
131
140
  batch_size: int
132
141
  Number of events to process at the same time. Increasing this
133
142
  number much more than two orders of magnitude larger than
134
- `kernel_size` will not increase computation speed. Larger
143
+ ``kernel_size`` will not increase computation speed. Larger
135
144
  values lead to a higher memory consumption.
136
145
  """
137
146
  assert kernel_size > 0, "kernel size must be positive number"
@@ -283,7 +292,7 @@ def compute_median_for_slice(shared_input, shared_output, kernel_size,
283
292
  in the original image, batch_size + kernel_size events are
284
293
  stored in this array one after another in a row.
285
294
  The total size of this array is
286
- `batch_size` * `kernel_size` * `number_of_pixels_in_the_image`.
295
+ ``batch_size * kernel_size * number_of_pixels_in_the_image``.
287
296
  shared_output: multiprocessing.RawArray
288
297
  Used for storing the result. Note that the last `kernel_size`
289
298
  elements for each pixel in this output array are junk data
@@ -21,8 +21,8 @@ class BackgroundSparseMed(Background):
21
21
 
22
22
  In contrast to the rolling median background correction,
23
23
  this algorithm only computes the background image every
24
- `split_time` seconds, but with a larger window (default kernel size is
25
- 200 frames instead of 100 frames).
24
+ ``split_time`` seconds, but with a larger window (default kernel
25
+ size is 200 frames instead of 100 frames).
26
26
 
27
27
  1. At time stamps every `split_time` seconds, a background image is
28
28
  computed, resulting in a background series.
@@ -103,16 +103,20 @@ class BackgroundSparseMed(Background):
103
103
  f"size {len(self.input_data)}. Setting it to input data size!")
104
104
  kernel_size = len(self.input_data)
105
105
 
106
- #: kernel size used for median filtering
107
106
  self.kernel_size = kernel_size
108
- #: time between background images in the background series
107
+ """kernel size used for median filtering"""
108
+
109
109
  self.split_time = split_time
110
- #: cleansing threshold factor
110
+ """time between background images in the background series"""
111
+
111
112
  self.thresh_cleansing = thresh_cleansing
112
- #: keep at least this many background images from the series
113
+ """cleansing threshold factor"""
114
+
113
115
  self.frac_cleansing = frac_cleansing
114
- #: offset/flickering correction
116
+ """keep at least this many background images from the series"""
117
+
115
118
  self.offset_correction = offset_correction
119
+ """offset/flickering correction"""
116
120
 
117
121
  # time axis
118
122
  self.time = None
@@ -142,48 +146,59 @@ class BackgroundSparseMed(Background):
142
146
  self.time = np.linspace(0, dur, self.image_count,
143
147
  endpoint=True)
144
148
 
145
- #: duration of the measurement
146
149
  self.duration = self.time[-1] - self.time[0]
150
+ """duration of the measurement"""
147
151
 
148
152
  self.step_times = np.arange(0, self.duration, self.split_time)
149
- #: array containing all background images
153
+
150
154
  self.bg_images = np.zeros((self.step_times.size,
151
155
  self.image_shape[0],
152
156
  self.image_shape[1]),
153
157
  dtype=np.uint8)
158
+ """array containing all background images"""
154
159
 
155
- #: mp.RawArray for temporary batch input data
156
160
  self.shared_input_raw = mp_spawn.RawArray(
157
161
  np.ctypeslib.ctypes.c_uint8,
158
162
  int(np.prod(self.image_shape)) * kernel_size)
159
- #: mp.RawArray for the median background image
163
+ """mp.RawArray for temporary batch input data"""
164
+
160
165
  self.shared_output_raw = mp_spawn.RawArray(
161
166
  np.ctypeslib.ctypes.c_uint8,
162
167
  int(np.prod(self.image_shape)))
168
+ """mp.RawArray for the median background image"""
169
+
163
170
  # Convert the RawArray to something we can write to fast
164
171
  # (similar to memoryview, but without having to cast) using
165
172
  # np.ctypeslib.as_array. See discussion in
166
173
  # https://stackoverflow.com/questions/37705974
167
- #: numpy array reshaped view on `self.shared_input_raw` with
168
- #: first axis enumerating the events
169
174
  self.shared_input = np.ctypeslib.as_array(
170
175
  self.shared_input_raw).reshape(kernel_size, -1)
176
+ """numpy array reshaped view on `self.shared_input_raw`.
177
+ The First axis enumerating the events
178
+ """
179
+
171
180
  self.shared_output = np.ctypeslib.as_array(
172
181
  self.shared_output_raw).reshape(self.image_shape)
173
- #: multiprocessing pool for parallel processing
182
+ """numpy array reshaped view on `self.shared_output_raw`.
183
+ The First axis enumerating the events
184
+ """
185
+
174
186
  self.pool = mp_spawn.Pool(processes=self.num_cpus)
187
+ """multiprocessing pool for parallel processing"""
175
188
 
176
- #: counter tracking process of workers
177
189
  self.worker_counter = mp_spawn.Value("l", 0)
178
- #: queue for median computation jobs
190
+ """counter tracking process of workers"""
191
+
179
192
  self.queue = mp_spawn.Queue()
180
- #: list of workers (processes)
193
+ """queue for median computation jobs"""
194
+
181
195
  self.workers = [WorkerSparseMed(self.queue,
182
196
  self.worker_counter,
183
197
  self.shared_input_raw,
184
198
  self.shared_output_raw,
185
199
  self.kernel_size)
186
200
  for _ in range(self.num_cpus)]
201
+ """list of workers (processes)"""
187
202
  [w.start() for w in self.workers]
188
203
 
189
204
  def __enter__(self):
@@ -35,7 +35,7 @@ def volume_from_contours(
35
35
  average is then used.
36
36
 
37
37
  The volume is computed radially from the center position
38
- given by (`pos_x`, `pos_y`). For sufficiently smooth contours,
38
+ given by (``pos_x``, ``pos_y``). For sufficiently smooth contours,
39
39
  such as densely sampled ellipses, the center position does not
40
40
  play an important role. For contours that are given on a coarse
41
41
  grid, as is the case for deformability cytometry, the center position
@@ -111,7 +111,7 @@ def vol_revolve(r, z, point_scale=1.):
111
111
  V = \frac{h \cdot \pi}{3} \cdot (R^2 + R \cdot r + r^2)
112
112
 
113
113
  Where :math:`h` is the height of the cone and :math:`r` and
114
- `R` are the smaller and larger radii of the truncated cone.
114
+ ``R`` are the smaller and larger radii of the truncated cone.
115
115
 
116
116
  Each line segment of the contour resembles one truncated cone. If
117
117
  the z-step is positive (counter-clockwise contour), then the
@@ -9,8 +9,8 @@ from ..meta.ppid import kwargs_to_ppid, ppid_to_kwargs
9
9
 
10
10
 
11
11
  class Gate:
12
- #: the default value for `size_thresh_mask` if not given as kwarg
13
12
  _default_size_thresh_mask = 10
13
+ """the default value for `size_thresh_mask` if not given as kwarg"""
14
14
 
15
15
  def __init__(self, data, *,
16
16
  online_gates: bool = False,
@@ -19,7 +19,7 @@ class Gate:
19
19
 
20
20
  Parameters
21
21
  ----------
22
- data: .HDF5Data
22
+ data: .hdf5_data.HDF5Data
23
23
  dcnum data instance
24
24
  online_gates: bool
25
25
  set to True to enable gating with "online" gates stored
@@ -27,14 +27,14 @@ class Gate:
27
27
  deformability cytometry before writing data to disk during
28
28
  a measurement
29
29
  size_thresh_mask: int
30
- Only masks with more pixels than `size_thresh_mask` are
30
+ Only masks with more pixels than ``size_thresh_mask`` are
31
31
  considered to be a valid event; Originally, the
32
- `bin area min`/`trig_thresh` value defaulted to 200 which is
32
+ ``bin area min / trig_thresh`` value defaulted to 200 which is
33
33
  too large; defaults to 10 or the original value in case
34
- `online_gates` is set.
34
+ ``online_gates`` is set.
35
35
  """
36
- #: box gating (value range for each feature)
37
36
  self.box_gates = {}
37
+ """box gating (value range for each feature)"""
38
38
 
39
39
  if online_gates:
40
40
  # Deal with online gates.
@@ -46,13 +46,13 @@ class Gate:
46
46
  size_thresh_mask = data.meta_nest.get(
47
47
  "online_contour", {}).get("bin area min")
48
48
 
49
- #: gating keyword arguments
50
49
  self.kwargs = {
51
50
  "online_gates": online_gates,
52
51
  # Set the size threshold, defaulting to `_default_size_thresh_mask`
53
52
  "size_thresh_mask":
54
53
  size_thresh_mask or self._default_size_thresh_mask
55
54
  }
55
+ """gating keyword arguments"""
56
56
 
57
57
  def _extract_online_gates(self, data):
58
58
  ogates = {}
@@ -168,10 +168,10 @@ class Gate:
168
168
  data: numbers.Number | np.ndarray):
169
169
  """Return boolean indicating whether `data` value is in box gate
170
170
 
171
- `data` may be a number or an array. If no box filter is defined
172
- for `feat`, `True` is always returned. Otherwise, either a boolean
173
- or a boolean array is returned, depending on the type of `data`.
174
- Not that `np.logical_and` can deal with mixed argument types
171
+ ``data`` may be a number or an array. If no box filter is defined
172
+ for ``feat``, True is always returned. Otherwise, either a boolean
173
+ or a boolean array is returned, depending on the type of ``data``.
174
+ Not that ``np.logical_and`` can deal with mixed argument types
175
175
  (scalar and array).
176
176
  """
177
177
  bound_lo, bound_up = self.box_gates[feat]