dcnum 0.11.4__py3-none-any.whl → 0.11.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dcnum might be problematic. Click here for more details.

dcnum/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # file generated by setuptools_scm
2
2
  # don't change, don't track in version control
3
- __version__ = version = '0.11.4'
4
- __version_tuple__ = version_tuple = (0, 11, 4)
3
+ __version__ = version = '0.11.6'
4
+ __version_tuple__ = version_tuple = (0, 11, 6)
@@ -257,7 +257,21 @@ class QueueEventExtractor:
257
257
  if self.finalize_extraction.value:
258
258
  # The manager told us that there is nothing more coming.
259
259
  self.logger.debug(
260
- f"Finalizing worker {self} with PID {os.getpid()}")
260
+ f"Finalizing worker {self} with PID {os.getpid()}. "
261
+ f"{self.event_queue.qsize()} events are still queued.")
262
+ # Tell the queue background thread to flush all data to
263
+ # the queue. The background thread will quit once it has
264
+ # flushed all buffered data to the pipe.
265
+ self.event_queue.close()
266
+ self.logger.debug(
267
+ f"Closed event queue from Process PID {os.getpid()}")
268
+ # Join the queue background thread. It blocks until the
269
+ # background thread exits, ensuring that all data in the
270
+ # buffer has been flushed to the pipe.
271
+ self.event_queue.join_thread()
272
+ self.logger.debug(
273
+ f"Joined event queue background thread from"
274
+ f"Process PID {os.getpid()}")
261
275
  break
262
276
  else:
263
277
  try:
@@ -183,12 +183,14 @@ class QueueCollectorThread(threading.Thread):
183
183
  continue
184
184
 
185
185
  if len(cur_nevents) == 0:
186
- self.logger.warning("Encountered empty nevents array!")
186
+ self.logger.info(
187
+ "Reached the end of the current dataset (frame "
188
+ f"{cur_frame + 1} of {len(self.feat_nevents)}).")
187
189
  break
188
190
 
189
191
  # We have reached the writer threshold. This means the extractor
190
192
  # has analyzed at least `write_threshold` frames (not events).
191
- self.logger.debug(f"Current frane: {cur_frame}")
193
+ self.logger.debug(f"Current frame: {cur_frame}")
192
194
 
193
195
  # Create an event stash
194
196
  stash = EventStash(
@@ -214,6 +216,7 @@ class QueueCollectorThread(threading.Thread):
214
216
  try:
215
217
  idx, events = self.event_queue.get(timeout=.3)
216
218
  except queue.Empty:
219
+ # No time.sleep here, because we are using timeout above.
217
220
  continue
218
221
  if cur_frame <= idx < cur_frame + self.write_threshold:
219
222
  stash.add_events(index=idx, events=events)
@@ -253,6 +256,8 @@ class QueueCollectorThread(threading.Thread):
253
256
 
254
257
  # Write the number of events.
255
258
  self.writer_dq.append(("nevents",
259
+ # Get nevents for each event from the
260
+ # frame-based cur_nevents array.
256
261
  np.array(stash.feat_nevents)[
257
262
  indices - stash.index_offset]
258
263
  ))
@@ -261,4 +266,4 @@ class QueueCollectorThread(threading.Thread):
261
266
  self.written_frames += stash.num_frames
262
267
 
263
268
  # Increment current frame index.
264
- cur_frame += self.write_threshold
269
+ cur_frame += len(cur_nevents)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dcnum
3
- Version: 0.11.4
3
+ Version: 0.11.6
4
4
  Summary: numerics toolbox for imaging deformability cytometry
5
5
  Author: Paul Müller
6
6
  Maintainer-email: Paul Müller <dev@craban.de>
@@ -1,9 +1,9 @@
1
1
  dcnum/__init__.py,sha256=hcawIKS7utYiOyVhOAX9t7K3xYzP1b9862VV0b6qSrQ,74
2
- dcnum/_version.py,sha256=Bbplzvuz1KWstfyKIRofOtvOUu7akc5l3tJGqDcI0-E,162
2
+ dcnum/_version.py,sha256=3Gew5At6W5w-3RNIQtvr0brz3P-MJACFL9J-IorkINc,162
3
3
  dcnum/feat/__init__.py,sha256=0oX765AyfL1BcVt-FI6R_i6x5LgYYLeyO5lkxSauI0Y,299
4
4
  dcnum/feat/event_extractor_manager_thread.py,sha256=54eCKbLBkv3U7RLGfqtiMB1CnqHNKcTm9vJE84qu9X8,5681
5
5
  dcnum/feat/gate.py,sha256=UEHbj3hkMWNm4tlY8Tz8sOsruhByjJxgO1s-ztQ7WTw,6235
6
- dcnum/feat/queue_event_extractor.py,sha256=_CwtEcJ-zzChkiPbRHC-WdVNy9sNWOYTzZPd2oEvtrU,11493
6
+ dcnum/feat/queue_event_extractor.py,sha256=8IJZ6_2Snv0qr79hxKF-FPw9tBvTxbaLaZMYQyIZHyc,12375
7
7
  dcnum/feat/feat_background/__init__.py,sha256=mL8QJYK6m3hxTqF6Cuosu__Fm5tZUMa-hTgSGcNw9AE,458
8
8
  dcnum/feat/feat_background/base.py,sha256=YCkkxgl19dslqmbkbtMv8CH6oUrrdHmoheBlpDQZHa0,5021
9
9
  dcnum/feat/feat_background/bg_roll_median.py,sha256=3zwXlNE-CWu30RI8s3Z_ihKUPUVi_cm3yAaXIHlMz7Y,13626
@@ -31,32 +31,10 @@ dcnum/segm/segmenter_gpu.py,sha256=F-6H425eQc9B2-k5PURJziU5uQubdF96GTkoysD2JDM,1
31
31
  dcnum/segm/segmenter_manager_thread.py,sha256=xtuk7gnk7xhoRoV_J97rrv7IR3JgeRvVewCDT-chqpk,5172
32
32
  dcnum/write/__init__.py,sha256=Oy-ORTyzUUswsaJvd0C6LyXtOgAY0iTIRqFNU9d7M8Y,160
33
33
  dcnum/write/deque_writer_thread.py,sha256=UUn5OYxDvckvhLw3llLYu7y8MI7RfsOhdJhMonKKB3k,1625
34
- dcnum/write/queue_collector_thread.py,sha256=Iw83KAcZu13elorHUOfJT6cVQntAKBLm6OVgkldjtL0,11088
34
+ dcnum/write/queue_collector_thread.py,sha256=mO4sSDB9gQW9ttugqAFFEcXlxz2E5QDjQcdZU_NT-BY,11404
35
35
  dcnum/write/writer.py,sha256=aUg_F1Ocs_wkV47lJJgkqGa8IRFhHH30NI7q0VxBNUA,3092
36
- docs/conf.py,sha256=VqB1WtClmmAVdfQ45SLa3aG7t6g2AcDG_BI6MO7j4wI,3022
37
- docs/index.rst,sha256=eWjHCDrw_VEC885Z3ON_wX5p2FRn_DI12WjSt0WcU5g,431
38
- docs/requirements.txt,sha256=KA1AT05zfznj2eE8ixs9cikcl_MtWplqbiXhp4h0YsI,87
39
- docs/extensions/github_changelog.py,sha256=SEJkjEvVtJ-42daYkNbcr9mHmmO6ZjDs6phvnySlNAE,2375
40
- tests/conftest.py,sha256=bYh2DmQRP2TbkmLBdeiKF-nSv5IX1oUNWcwT6w_bEUw,577
41
- tests/helper_methods.py,sha256=MsCppTICPqv4vq-DhUBIKFyOhvRO_oS2O0Fb3OYhNxs,1855
42
- tests/requirements.txt,sha256=Mfj5F30ZlweJpp9LC64TVta_lq6G5AINIfJxVxTK-2c,20
43
- tests/test_feat_background_bg_roll_median.py,sha256=FtrVcKumIctUrCWtNimGicExsouFxoZpbCwPnmeY1BU,4809
44
- tests/test_feat_brightness.py,sha256=V8F_zhxYvS0rrZl_SKZwVwpPLI6jq0J7NR-eOdNS6qg,2082
45
- tests/test_feat_haralick.py,sha256=_00p24WOzIOAtfKiXWcd8XoYBQKt3DbYdSijHZYzi64,3499
46
- tests/test_feat_moments_based.py,sha256=wRlqM8sGSPIzUB95gUuixNH1hTLgD7otj15-53CRxcE,3037
47
- tests/test_init.py,sha256=umUGuhCJ4iCsI5qjoNtrIAW_3xFfI3rDEGk8BKgzekc,73
48
- tests/test_ppid.py,sha256=gyrFLROG9IZIcHb0YQCHujTw-LdzGYTKZhJRAFgza6c,2785
49
- tests/test_ppid_segm.py,sha256=4NzGXns3lMetH970SKMnzizbnThx8ku4A1MiTozXMlA,238
50
- tests/test_read_concat_hdf5.py,sha256=jeOxG6T_Z8wtPVK_UbEa-X-VQtYdFCLE2oxFbWjN2iU,1951
51
- tests/test_read_hdf5.py,sha256=nEtjLI1oF5W0Ab_sJ0xLYctD8oR24OwORcj6sfG57Y4,6671
52
- tests/test_segm_thresh.py,sha256=ecHbhwy8_qVJTdpDJlo_sIiQPSIi_xKXM2EqUJXRU20,5101
53
- tests/test_segmenter.py,sha256=qjHTTUmaR08X4ATjndxJr9eZWFvpPQwJ_fSCw8-tr9Y,9307
54
- tests/test_write_deque_writer_thread.py,sha256=EAnqKayr4_jskv_599QYD3gdBZhtyVM7-MuqvtLHYAI,1140
55
- tests/test_write_writer.py,sha256=SzNTLsHz4RZceRwqflc4Wfn02vYc4Hb4WQVk1X8dmiw,1107
56
- tests/data/fmt-hdf5_cytoshot_full-features_2023.zip,sha256=LfkFxAXTIkcqxrJYYNMC364Q1x5HT5X9cTHuNz5eeuk,650653
57
- tests/data/fmt-hdf5_cytoshot_full-features_legacy_allev_2023.zip,sha256=z2Bk6u3wjr-bJa7sOxBcNKOQ0Zoi3Xmf_cMi6d-3CMk,154010
58
- dcnum-0.11.4.dist-info/LICENSE,sha256=YRChA1C8A2E-amJbudwMcbTCZy_HzmeY0hMIvduh1MM,1089
59
- dcnum-0.11.4.dist-info/METADATA,sha256=Hr8-6f7PT7eR9dUfCGG9xhVivAlpDSCNElVRMS9yWk8,2180
60
- dcnum-0.11.4.dist-info/WHEEL,sha256=AtBG6SXL3KF_v0NxLf0ehyVOh0cold-JbJYXNGorC6Q,92
61
- dcnum-0.11.4.dist-info/top_level.txt,sha256=Utc_P-_-7hbtniTp00IsHTry4h3rY5KFmwHfYM9g44k,22
62
- dcnum-0.11.4.dist-info/RECORD,,
36
+ dcnum-0.11.6.dist-info/LICENSE,sha256=YRChA1C8A2E-amJbudwMcbTCZy_HzmeY0hMIvduh1MM,1089
37
+ dcnum-0.11.6.dist-info/METADATA,sha256=5nNFMRnm1iAuuK_hX7m0alvm-P_ISS7FEGQrbNKI828,2180
38
+ dcnum-0.11.6.dist-info/WHEEL,sha256=AtBG6SXL3KF_v0NxLf0ehyVOh0cold-JbJYXNGorC6Q,92
39
+ dcnum-0.11.6.dist-info/top_level.txt,sha256=Hmh38rgG_MFTVDpUDGuO2HWTSq80P585Het4COQzFTg,6
40
+ dcnum-0.11.6.dist-info/RECORD,,
@@ -0,0 +1 @@
1
+ dcnum
@@ -1,4 +0,0 @@
1
- dcnum
2
- dist
3
- docs
4
- tests
docs/conf.py DELETED
@@ -1,87 +0,0 @@
1
- # Configuration file for the Sphinx documentation builder.
2
- #
3
- # This file only contains a selection of the most common options. For a full
4
- # list see the documentation:
5
- # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
-
7
- # -- Path setup --------------------------------------------------------------
8
-
9
- # If extensions (or modules to document with autodoc) are in another directory,
10
- # add these directories to sys.path here. If the directory is relative to the
11
- # documentation root, use os.path.abspath to make it absolute, like shown here.
12
- #
13
- # import os
14
- # import sys
15
- # sys.path.insert(0, os.path.abspath('.'))
16
- import pathlib
17
- import sys
18
-
19
- import dcnum
20
-
21
- sys.path.insert(0, str(pathlib.Path(__file__).parent / "extensions"))
22
-
23
- # -- Project information -----------------------------------------------------
24
-
25
- project = 'dcnum'
26
- copyright = '2023, Paul Müller'
27
- author = 'Paul Müller'
28
-
29
-
30
- # -- General configuration ---------------------------------------------------
31
-
32
- # Add any Sphinx extension module names here, as strings. They can be
33
- # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34
- # ones.
35
- extensions = ['sphinx.ext.autodoc',
36
- 'sphinx.ext.autosummary',
37
- 'sphinx.ext.intersphinx',
38
- 'sphinx.ext.mathjax',
39
- 'sphinx.ext.viewcode',
40
- 'sphinx.ext.napoleon',
41
- 'matplotlib.sphinxext.plot_directive',
42
- 'IPython.sphinxext.ipython_directive',
43
- 'IPython.sphinxext.ipython_console_highlighting',
44
- 'github_changelog',
45
- ]
46
-
47
- # Add any paths that contain templates here, relative to this directory.
48
- templates_path = ['_templates']
49
-
50
- # List of patterns, relative to source directory, that match files and
51
- # directories to ignore when looking for source files.
52
- # This pattern also affects html_static_path and html_extra_path.
53
- exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
54
-
55
-
56
- # -- Options for HTML output -------------------------------------------------
57
-
58
- # The theme to use for HTML and HTML Help pages. See the documentation for
59
- # a list of builtin themes.
60
- #
61
- html_theme = 'sphinx_rtd_theme'
62
-
63
- # Add any paths that contain custom static files (such as style sheets) here,
64
- # relative to this directory. They are copied after the builtin static files,
65
- # so a file named "default.css" will overwrite the builtin "default.css".
66
- html_static_path = ['_static']
67
-
68
- # The version info for the project you're documenting, acts as replacement for
69
- # |version| and |release|, also used in various other places throughout the
70
- # built documents.
71
- #
72
- # The short X.Y version.
73
- #
74
- # The full version, including alpha/beta/rc tags.
75
- # This gets 'version'
76
- release = dcnum.__version__
77
-
78
- # enable enumeration of figures
79
- numfig = True
80
-
81
- # include source of matplotlib plots
82
- plot_include_source = True
83
-
84
- # http://www.sphinx-doc.org/en/stable/ext/autodoc.html#confval-autodoc_member_order
85
- # Order class attributes and functions in separate blocks
86
- autodoc_member_order = 'groupwise'
87
- autoclass_content = 'both'
@@ -1,75 +0,0 @@
1
- """Display changelog with links to GitHub issues
2
-
3
- Usage
4
- -----
5
- The directive
6
-
7
- .. include_changelog:: ../CHANGELOG
8
-
9
- adds the content of the changelog file into the current document.
10
- References to GitHub issues are identified as "(#XY)" (with parentheses
11
- and hash) and a link is inserted
12
-
13
- https://github.com/RI-imaging/{PROJECT}/issues/{XY}
14
-
15
- where PROJECT ist the `project` variable defined in conf.py.
16
- """
17
- import io
18
- import re
19
-
20
- from docutils.statemachine import ViewList
21
- from docutils.parsers.rst import Directive
22
- from sphinx.util.nodes import nested_parse_with_titles
23
- from docutils import nodes
24
-
25
-
26
- class IncludeDirective(Directive):
27
- required_arguments = 1
28
- optional_arguments = 0
29
-
30
- def run(self):
31
- full_path = self.arguments[0]
32
- project = self.state.document.settings.env.config.github_project
33
-
34
- def insert_github_link(reobj):
35
- line = reobj.string
36
- instr = line[reobj.start():reobj.end()]
37
- issue = instr.strip("#()")
38
- link = "https://github.com/{}/issues/".format(project)
39
- rstlink = "(`#{issue} <{link}{issue}>`_)".format(issue=issue,
40
- link=link)
41
- return rstlink
42
-
43
- with io.open(full_path, "r") as myfile:
44
- text = myfile.readlines()
45
-
46
- rst = []
47
- for line in text:
48
- line = line.strip("\n")
49
- if line.startswith(" ") and line.strip().startswith("-"):
50
- # list in list:
51
- rst.append("")
52
- if not line.startswith(" "):
53
- rst.append("")
54
- line = "version " + line
55
- rst.append(line)
56
- rst.append("-"*len(line))
57
- elif not line.strip():
58
- rst.append(line)
59
- else:
60
- line = re.sub(r"\(#[0-9]*\)", insert_github_link, line)
61
- rst.append(line)
62
-
63
- vl = ViewList(rst, "fakefile.rst")
64
- # Create a node.
65
- node = nodes.section()
66
- node.document = self.state.document
67
- # Parse the rst.
68
- nested_parse_with_titles(self.state, vl, node)
69
- return node.children
70
-
71
-
72
- def setup(app):
73
- app.add_config_value('github_project', "user/project", 'html')
74
- app.add_directive('include_changelog', IncludeDirective)
75
- return {'version': '0.1'} # identifies the version of our extension
docs/index.rst DELETED
@@ -1,20 +0,0 @@
1
- .. dcnum documentation master file, created by
2
- sphinx-quickstart on Fri Jun 2 23:46:40 2023.
3
- You can adapt this file completely to your liking, but it should at least
4
- contain the root `toctree` directive.
5
-
6
- Welcome to dcnum's documentation!
7
- =================================
8
-
9
- .. toctree::
10
- :maxdepth: 2
11
- :caption: Contents:
12
-
13
-
14
-
15
- Indices and tables
16
- ==================
17
-
18
- * :ref:`genindex`
19
- * :ref:`modindex`
20
- * :ref:`search`
docs/requirements.txt DELETED
@@ -1,7 +0,0 @@
1
- # requirement on rtd
2
- h5py
3
- ipython
4
- matplotlib
5
- numpy
6
- sphinx==4.2.0
7
- sphinx_rtd_theme==1.0
tests/conftest.py DELETED
@@ -1,20 +0,0 @@
1
- import atexit
2
- import os
3
- import shutil
4
- import tempfile
5
- import time
6
-
7
- TMPDIR = tempfile.mkdtemp(prefix=time.strftime(
8
- "dcnum_test_%H.%M_"))
9
-
10
-
11
- def pytest_configure(config):
12
- """
13
- Allows plugins and conftest files to perform initial configuration.
14
- This hook is called for every plugin and initial conftest
15
- file after command line options have been parsed.
16
- """
17
- tempfile.tempdir = TMPDIR
18
- atexit.register(shutil.rmtree, TMPDIR, ignore_errors=True)
19
- # Disable JIT compiler during testing for coverage
20
- os.environ.setdefault("NUMBA_DISABLE_JIT", "1")
tests/helper_methods.py DELETED
@@ -1,72 +0,0 @@
1
- import pathlib
2
- import tempfile
3
- import zipfile
4
-
5
-
6
- def calltracker(func):
7
- """Decorator to track how many times a function is called"""
8
-
9
- def wrapped(*args, **kwargs):
10
- wrapped.calls += 1
11
- return func(*args, **kwargs)
12
-
13
- wrapped.calls = 0
14
- return wrapped
15
-
16
-
17
- def find_data(path):
18
- """Find .avi and .rtdc data files in a directory"""
19
- path = pathlib.Path(path)
20
- avifiles = [r for r in path.rglob("*.avi") if r.is_file()]
21
- rtdcfiles = [r for r in path.rglob("*.rtdc") if r.is_file()]
22
- files = [pathlib.Path(ff) for ff in rtdcfiles + avifiles]
23
- return files
24
-
25
-
26
- def retrieve_data(zip_file):
27
- """Extract contents of data zip file and return data files
28
- """
29
- zpath = pathlib.Path(__file__).resolve().parent / "data" / zip_file
30
- # unpack
31
- arc = zipfile.ZipFile(str(zpath))
32
-
33
- # extract all files to a temporary directory
34
- edest = tempfile.mkdtemp(prefix=zpath.name)
35
- arc.extractall(edest)
36
-
37
- # Load RT-DC dataset
38
- # find tdms files
39
- datafiles = find_data(edest)
40
-
41
- if len(datafiles) == 1:
42
- datafiles = datafiles[0]
43
-
44
- return datafiles
45
-
46
-
47
- def find_model(path):
48
- """Find .ckp files in a directory"""
49
- path = pathlib.Path(path)
50
- jit_files = [r for r in path.rglob("*.ckp") if r.is_file()]
51
- files = [pathlib.Path(ff) for ff in jit_files]
52
- return files
53
-
54
-
55
- def retrieve_model(zip_file):
56
- """Extract contents of model zip file and return model ckeckpoint paths
57
- """
58
- zpath = pathlib.Path(__file__).resolve().parent / "data" / zip_file
59
- # unpack
60
- arc = zipfile.ZipFile(str(zpath))
61
-
62
- # extract all files to a temporary directory
63
- edest = tempfile.mkdtemp(prefix=zpath.name)
64
- arc.extractall(edest)
65
-
66
- # find model checkpoint paths
67
- modelpaths = find_model(edest)
68
-
69
- if len(modelpaths) == 1:
70
- modelpaths = modelpaths[0]
71
-
72
- return modelpaths
tests/requirements.txt DELETED
@@ -1,2 +0,0 @@
1
- pytest
2
- scikit-image
@@ -1,131 +0,0 @@
1
- import h5py
2
- import numpy as np
3
- import pytest
4
-
5
- from dcnum.feat.feat_background import bg_roll_median
6
-
7
-
8
- def test_compute_median_for_slice():
9
- # events in shared arrays: 100
10
- # image shape: 5 * 7
11
- shared_input = np.arange(5*7).reshape(1, 5*7) * np.ones((100, 1))
12
- assert shared_input.size == 100 * 5 * 7
13
- assert np.all(shared_input[:, 0] == 0) # pixel 0
14
- assert np.all(shared_input[:, 1] == 1) # pixel 1
15
-
16
- shared_output = np.zeros((100, 5 * 7))
17
-
18
- job_slice = slice(1, 4)
19
-
20
- batch_size = 90
21
- kernel_size = 10
22
-
23
- bg_roll_median.compute_median_for_slice(
24
- shared_input=shared_input,
25
- shared_output=shared_output,
26
- job_slice=job_slice,
27
- output_size=batch_size,
28
- kernel_size=kernel_size,
29
- )
30
-
31
- # compare input and output at batch size
32
- assert np.all(shared_input[:90, 1:4] == shared_input[:90, 1:4])
33
-
34
- # sanity check with boundary values
35
- comp_in_b = shared_input.reshape(-1, 100)[1:4, 90:]
36
- comp_out_b = shared_output.reshape(-1, 100)[1:4, 90:]
37
- assert not np.all(comp_in_b == comp_out_b)
38
-
39
-
40
- @pytest.mark.parametrize("event_count", [720, 730]) # should be independent
41
- def test_median_map_iterator(tmp_path, event_count):
42
- output_path = tmp_path / "test.h5"
43
- # batch size: 90
44
- # image shape: 5 * 7
45
- # kernel size: 10
46
- input_data = np.arange(5*7).reshape(1, 5, 7) * np.ones((event_count, 1, 1))
47
- assert np.all(input_data[0] == input_data[1])
48
- assert np.all(input_data[0].flatten() == np.arange(5*7))
49
-
50
- with bg_roll_median.BackgroundRollMed(input_data=input_data,
51
- output_path=output_path,
52
- kernel_size=10,
53
- batch_size=90,
54
- ) as mic:
55
- assert len(mic.shared_input_raw) == (10 + 90) * 5 * 7
56
-
57
- jobs = list(mic.map_iterator())
58
- assert len(jobs) == 7
59
- assert jobs[1].start == 1 * 5
60
- assert jobs[1].stop == 2 * 5
61
- assert jobs[2].start == 2 * 5
62
- assert jobs[2].stop == 3 * 5
63
- assert jobs[6].stop == 7 * 5
64
-
65
-
66
- @pytest.mark.parametrize("event_count", [720, 730])
67
- def test_median_process_next_batch(tmp_path, event_count):
68
- output_path = tmp_path / "test.h5"
69
- # batch size: 90
70
- # image shape: 5 * 7
71
- # kernel size: 10
72
- input_data = np.arange(5*7).reshape(1, 5, 7) * np.ones((event_count, 1, 1))
73
- input_data = np.array(input_data, dtype=np.uint8)
74
- assert np.all(input_data[0] == input_data[1])
75
- assert np.all(input_data[0].flatten() == np.arange(5*7))
76
-
77
- with bg_roll_median.BackgroundRollMed(input_data=input_data,
78
- output_path=output_path,
79
- kernel_size=10,
80
- batch_size=90,
81
- ) as mic:
82
- assert len(mic.shared_input_raw) == (10 + 90) * 5 * 7
83
-
84
- assert mic.current_batch == 0
85
- mic.process_next_batch()
86
-
87
- assert mic.current_batch == 1
88
-
89
- with h5py.File(output_path) as h5:
90
- ds = h5["events/image_bg"]
91
- assert ds.shape == (event_count, 5, 7)
92
- assert np.all(ds[90:] == 0), "not processed"
93
- assert np.all(ds[:90, 0, 0] == 0)
94
- assert np.all(ds[:90, 0, 1] == 1)
95
- assert np.all(ds[:90, 0, 2] == 2)
96
- assert np.all(ds[:90, 1, 0] == 7)
97
-
98
-
99
- @pytest.mark.parametrize("event_count, chunk_count", [[720, 8], [730, 9]])
100
- def test_median_process_full(tmp_path, event_count, chunk_count):
101
- output_path = tmp_path / "test.h5"
102
- # batch size: 90
103
- # image shape: 5 * 7
104
- # kernel size: 10
105
- input_data = np.arange(5*7).reshape(1, 5, 7) * np.ones((event_count, 1, 1))
106
- input_data = np.array(input_data, dtype=np.uint8)
107
- assert np.all(input_data[0] == input_data[1])
108
- assert np.all(input_data[0].flatten() == np.arange(5*7))
109
-
110
- with bg_roll_median.BackgroundRollMed(input_data=input_data,
111
- output_path=output_path,
112
- kernel_size=10,
113
- batch_size=90,
114
- ) as mic:
115
- assert len(mic.shared_input_raw) == (10 + 90) * 5 * 7
116
- # output array is smaller
117
- assert len(mic.shared_output_raw) == 90 * 5 * 7
118
-
119
- assert mic.current_batch == 0
120
- mic.process()
121
- assert mic.current_batch == chunk_count
122
-
123
- with h5py.File(output_path) as h5:
124
- ds = h5["events/image_bg"]
125
- assert ds.shape == (event_count, 5, 7)
126
- assert np.all(ds[:90, 0, 0] == 0)
127
- assert np.all(ds[:90, 0, 1] == 1)
128
- assert np.all(ds[:90, 0, 2] == 2)
129
- assert np.all(ds[:90, 1, 0] == 7)
130
- assert np.all(ds[690:, 0, 0] == 0)
131
- assert np.all(ds[690:, 0, 1] == 1)
@@ -1,56 +0,0 @@
1
- import pathlib
2
-
3
- import h5py
4
- import numpy as np
5
-
6
- from dcnum.feat import feat_brightness
7
-
8
- from helper_methods import retrieve_data
9
-
10
- data_path = pathlib.Path(__file__).parent / "data"
11
-
12
-
13
- def test_basic_brightness():
14
- # This original file was generated with dcevent for reference.
15
- path = retrieve_data(data_path /
16
- "fmt-hdf5_cytoshot_full-features_2023.zip")
17
- # Make data available
18
- with h5py.File(path) as h5:
19
- data = feat_brightness.brightness_features(
20
- image=h5["events/image"][:],
21
- image_bg=h5["events/image_bg"][:],
22
- mask=h5["events/mask"][:],
23
- )
24
-
25
- assert np.allclose(data["bright_bc_avg"][1],
26
- -43.75497215592681,
27
- atol=0, rtol=1e-10)
28
- for feat in feat_brightness.brightness_names:
29
- assert np.allclose(h5["events"][feat][:],
30
- data[feat]), f"Feature {feat} mismatch!"
31
- # control test
32
- assert not np.allclose(h5["events"]["bright_perc_10"][:],
33
- data["bright_perc_90"])
34
-
35
-
36
- def test_basic_brightness_single_image():
37
- # This original file was generated with dcevent for reference.
38
- path = retrieve_data(data_path /
39
- "fmt-hdf5_cytoshot_full-features_2023.zip")
40
- # Make data available
41
- with h5py.File(path) as h5:
42
- data = feat_brightness.brightness_features(
43
- image=h5["events/image"][1][np.newaxis],
44
- image_bg=h5["events/image_bg"][1][np.newaxis],
45
- mask=h5["events/mask"][1][np.newaxis],
46
- )
47
-
48
- assert np.allclose(data["bright_bc_avg"][0],
49
- -43.75497215592681,
50
- atol=0, rtol=1e-10)
51
- for feat in feat_brightness.brightness_names:
52
- assert np.allclose(h5["events"][feat][1],
53
- data[feat][0]), f"Feature {feat} mismatch!"
54
- # control test
55
- assert not np.allclose(h5["events"]["bright_perc_10"][1],
56
- data["bright_perc_90"][0])