dcnum 0.11.3__py3-none-any.whl → 0.11.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dcnum might be problematic. Click here for more details.
- dcnum/_version.py +2 -2
- dcnum/read/hdf5_data.py +4 -3
- dcnum/write/queue_collector_thread.py +1 -1
- {dcnum-0.11.3.dist-info → dcnum-0.11.5.dist-info}/METADATA +1 -1
- {dcnum-0.11.3.dist-info → dcnum-0.11.5.dist-info}/RECORD +8 -30
- {dcnum-0.11.3.dist-info → dcnum-0.11.5.dist-info}/WHEEL +1 -1
- dcnum-0.11.5.dist-info/top_level.txt +1 -0
- dcnum-0.11.3.dist-info/top_level.txt +0 -4
- docs/conf.py +0 -87
- docs/extensions/github_changelog.py +0 -75
- docs/index.rst +0 -20
- docs/requirements.txt +0 -7
- tests/conftest.py +0 -20
- tests/data/fmt-hdf5_cytoshot_full-features_2023.zip +0 -0
- tests/data/fmt-hdf5_cytoshot_full-features_legacy_allev_2023.zip +0 -0
- tests/helper_methods.py +0 -72
- tests/requirements.txt +0 -2
- tests/test_feat_background_bg_roll_median.py +0 -131
- tests/test_feat_brightness.py +0 -56
- tests/test_feat_haralick.py +0 -120
- tests/test_feat_moments_based.py +0 -108
- tests/test_init.py +0 -5
- tests/test_ppid.py +0 -94
- tests/test_ppid_segm.py +0 -8
- tests/test_read_concat_hdf5.py +0 -54
- tests/test_read_hdf5.py +0 -186
- tests/test_segm_thresh.py +0 -139
- tests/test_segmenter.py +0 -225
- tests/test_write_deque_writer_thread.py +0 -42
- tests/test_write_writer.py +0 -33
- {dcnum-0.11.3.dist-info → dcnum-0.11.5.dist-info}/LICENSE +0 -0
tests/test_segm_thresh.py
DELETED
|
@@ -1,139 +0,0 @@
|
|
|
1
|
-
import multiprocessing as mp
|
|
2
|
-
import pathlib
|
|
3
|
-
|
|
4
|
-
from dcnum import segm
|
|
5
|
-
import h5py
|
|
6
|
-
import numpy as np
|
|
7
|
-
from skimage import morphology
|
|
8
|
-
|
|
9
|
-
import pytest
|
|
10
|
-
|
|
11
|
-
from helper_methods import retrieve_data
|
|
12
|
-
|
|
13
|
-
data_path = pathlib.Path(__file__).parent / "data"
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
def test_segm_thresh_basic():
|
|
17
|
-
"""Basic thresholding segmenter
|
|
18
|
-
|
|
19
|
-
The segmenter is equivalent to the old dcevent legacy segmenter with
|
|
20
|
-
the options legacy:t=-6^bl=0^bi=0^d=1:cle=1^f=1^clo=3
|
|
21
|
-
(no blur, no binaryops, clear borders, fill holes, closing disk 3).
|
|
22
|
-
Since in the dcevent pipeline, the data are gated and small objects
|
|
23
|
-
are removed, we have to do this here manually before comparing mask
|
|
24
|
-
images.
|
|
25
|
-
"""
|
|
26
|
-
path = retrieve_data(
|
|
27
|
-
data_path / "fmt-hdf5_cytoshot_full-features_legacy_allev_2023.zip")
|
|
28
|
-
|
|
29
|
-
# Get all the relevant information
|
|
30
|
-
with h5py.File(path) as h5:
|
|
31
|
-
image = h5["events/image"][:]
|
|
32
|
-
image_bg = h5["events/image_bg"][:]
|
|
33
|
-
mask = h5["events/mask"][:]
|
|
34
|
-
frame = h5["events/frame"][:]
|
|
35
|
-
|
|
36
|
-
# Concatenate the masks
|
|
37
|
-
frame_u, indices = np.unique(frame, return_index=True)
|
|
38
|
-
image_u = image[indices]
|
|
39
|
-
image_bg_u = image_bg[indices]
|
|
40
|
-
mask_u = np.zeros_like(image_u, dtype=bool)
|
|
41
|
-
for ii, fr in enumerate(frame):
|
|
42
|
-
idx = np.where(frame_u == fr)[0]
|
|
43
|
-
mask_u[idx] = np.logical_or(mask_u[idx], mask[ii])
|
|
44
|
-
|
|
45
|
-
image_u_c = np.array(image_u, dtype=int) - image_bg_u
|
|
46
|
-
|
|
47
|
-
sm = segm.segm_thresh.SegmentThresh(thresh=-6,
|
|
48
|
-
kwargs_mask={"closing_disk": 3})
|
|
49
|
-
for ii in range(len(frame_u)):
|
|
50
|
-
labels_seg = sm.segment_frame(image_u_c[ii])
|
|
51
|
-
mask_seg = np.array(labels_seg, dtype=bool)
|
|
52
|
-
# Remove small objects, because this is not implemented in the
|
|
53
|
-
# segmenter class as it would be part of gating.
|
|
54
|
-
mask_seg = morphology.remove_small_objects(mask_seg, min_size=10)
|
|
55
|
-
assert np.all(mask_seg == mask_u[ii]), f"masks not matching at {ii}"
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
@pytest.mark.parametrize("worker_type", ["thread", "process"])
|
|
59
|
-
def test_segm_thresh_segment_batch(worker_type):
|
|
60
|
-
debug = worker_type == "thread"
|
|
61
|
-
path = retrieve_data(
|
|
62
|
-
data_path / "fmt-hdf5_cytoshot_full-features_legacy_allev_2023.zip")
|
|
63
|
-
|
|
64
|
-
# Get all the relevant information
|
|
65
|
-
with h5py.File(path) as h5:
|
|
66
|
-
image = h5["events/image"][:]
|
|
67
|
-
image_bg = h5["events/image_bg"][:]
|
|
68
|
-
mask = h5["events/mask"][:]
|
|
69
|
-
frame = h5["events/frame"][:]
|
|
70
|
-
|
|
71
|
-
# Concatenate the masks
|
|
72
|
-
frame_u, indices = np.unique(frame, return_index=True)
|
|
73
|
-
image_u = image[indices]
|
|
74
|
-
image_bg_u = image_bg[indices]
|
|
75
|
-
mask_u = np.zeros_like(image_u, dtype=bool)
|
|
76
|
-
for ii, fr in enumerate(frame):
|
|
77
|
-
idx = np.where(frame_u == fr)[0]
|
|
78
|
-
mask_u[idx] = np.logical_or(mask_u[idx], mask[ii])
|
|
79
|
-
|
|
80
|
-
image_u_c = np.array(image_u, dtype=int) - image_bg_u
|
|
81
|
-
|
|
82
|
-
sm = segm.segm_thresh.SegmentThresh(thresh=-6,
|
|
83
|
-
debug=debug,
|
|
84
|
-
kwargs_mask={"closing_disk": 3})
|
|
85
|
-
|
|
86
|
-
labels_seg = sm.segment_batch(image_u_c, start=0, stop=5)
|
|
87
|
-
assert labels_seg is sm.labels_array
|
|
88
|
-
assert np.all(np.array(labels_seg, dtype=bool) == sm.mask_array)
|
|
89
|
-
# tell workers to stop
|
|
90
|
-
sm.join_workers()
|
|
91
|
-
|
|
92
|
-
for ii in range(len(frame_u)):
|
|
93
|
-
mask_seg = np.array(labels_seg[ii], dtype=bool)
|
|
94
|
-
# Remove small objects, because this is not implemented in the
|
|
95
|
-
# segmenter class as it would be part of gating.
|
|
96
|
-
mask_seg = morphology.remove_small_objects(mask_seg, min_size=10)
|
|
97
|
-
assert np.all(mask_seg == mask_u[ii]), f"masks not matching at {ii}"
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
@pytest.mark.parametrize("worker_type", ["thread", "process"])
|
|
101
|
-
def test_segm_thresh_segment_batch_large(worker_type):
|
|
102
|
-
debug = worker_type == "thread"
|
|
103
|
-
|
|
104
|
-
# Create fake data
|
|
105
|
-
mask = np.zeros((121, 80, 200), dtype=bool)
|
|
106
|
-
mask[:, 10:71, 100:161] = morphology.disk(30).reshape(-1, 61, 61)
|
|
107
|
-
image = -10 * mask
|
|
108
|
-
|
|
109
|
-
sm = segm.segm_thresh.SegmentThresh(thresh=-6,
|
|
110
|
-
kwargs_mask={"closing_disk": 3},
|
|
111
|
-
debug=debug)
|
|
112
|
-
|
|
113
|
-
labels_seg_1 = np.copy(
|
|
114
|
-
sm.segment_batch(image, start=0, stop=101))
|
|
115
|
-
|
|
116
|
-
assert labels_seg_1.dtype == np.uint16 # uint8 is not enough
|
|
117
|
-
assert sm.mp_batch_index.value == 0
|
|
118
|
-
if worker_type == "thread":
|
|
119
|
-
assert len(sm._mp_workers) == 1
|
|
120
|
-
assert sm.mp_batch_worker.value == 1
|
|
121
|
-
else:
|
|
122
|
-
# This will fail if you have too many CPUs in your system
|
|
123
|
-
assert len(sm._mp_workers) == mp.cpu_count()
|
|
124
|
-
# Check whether all processes did their deeds
|
|
125
|
-
assert sm.mp_batch_worker.value == mp.cpu_count()
|
|
126
|
-
|
|
127
|
-
labels_seg_2 = np.copy(
|
|
128
|
-
sm.segment_batch(image, start=101, stop=121))
|
|
129
|
-
|
|
130
|
-
# tell workers to stop
|
|
131
|
-
sm.join_workers()
|
|
132
|
-
|
|
133
|
-
for ii in range(101):
|
|
134
|
-
mask_seg = np.array(labels_seg_1[ii], dtype=bool)
|
|
135
|
-
assert np.all(mask_seg == mask[ii]), f"masks not matching at {ii}"
|
|
136
|
-
|
|
137
|
-
for jj in range(101, 121):
|
|
138
|
-
mask_seg = np.array(labels_seg_2[jj - 101], dtype=bool)
|
|
139
|
-
assert np.all(mask_seg == mask[jj]), f"masks not matching at {jj}"
|
tests/test_segmenter.py
DELETED
|
@@ -1,225 +0,0 @@
|
|
|
1
|
-
import pathlib
|
|
2
|
-
|
|
3
|
-
from dcnum import segm
|
|
4
|
-
import numpy as np
|
|
5
|
-
|
|
6
|
-
data_path = pathlib.Path(__file__).parent / "data"
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class MockImageData:
|
|
10
|
-
mask = np.array([
|
|
11
|
-
[0, 0, 0, 0, 0, 0, 0, 0],
|
|
12
|
-
[0, 0, 1, 1, 1, 0, 0, 0],
|
|
13
|
-
[0, 0, 1, 0, 1, 0, 0, 0], # filled, 1
|
|
14
|
-
[0, 0, 1, 1, 1, 0, 0, 0],
|
|
15
|
-
[0, 0, 0, 0, 0, 0, 0, 0],
|
|
16
|
-
[0, 0, 0, 0, 0, 0, 0, 0],
|
|
17
|
-
[0, 0, 0, 0, 1, 1, 1, 1],
|
|
18
|
-
[0, 0, 0, 0, 0, 1, 1, 1], # border, 2
|
|
19
|
-
[0, 0, 0, 0, 0, 1, 1, 1],
|
|
20
|
-
[0, 1, 1, 1, 0, 0, 0, 0],
|
|
21
|
-
[0, 0, 1, 1, 1, 0, 0, 0], # other, 3
|
|
22
|
-
[0, 0, 1, 1, 1, 0, 0, 0],
|
|
23
|
-
[0, 0, 1, 1, 1, 0, 0, 0],
|
|
24
|
-
[0, 0, 0, 0, 0, 0, 0, 0],
|
|
25
|
-
], dtype=bool)
|
|
26
|
-
|
|
27
|
-
def get_chunk(self, chunk_index):
|
|
28
|
-
image = np.array(-(10 + chunk_index) * self.mask, dtype=np.int16)
|
|
29
|
-
chunk = np.stack([image] * 100, dtype=np.int16)
|
|
30
|
-
return chunk
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
def test_segmenter_labeled_mask():
|
|
34
|
-
mask = np.array([
|
|
35
|
-
[0, 0, 0, 0, 0, 0, 0, 0],
|
|
36
|
-
[0, 0, 1, 1, 1, 0, 0, 0],
|
|
37
|
-
[0, 0, 1, 0, 1, 0, 0, 0], # filled, 1
|
|
38
|
-
[0, 0, 1, 1, 1, 0, 0, 0],
|
|
39
|
-
[0, 0, 0, 0, 0, 0, 0, 0],
|
|
40
|
-
[0, 0, 0, 0, 0, 0, 0, 0],
|
|
41
|
-
[0, 0, 0, 0, 1, 1, 1, 1],
|
|
42
|
-
[0, 0, 0, 0, 0, 1, 1, 1], # border, 2
|
|
43
|
-
[0, 0, 0, 0, 0, 1, 1, 1],
|
|
44
|
-
[0, 1, 1, 1, 0, 0, 0, 0],
|
|
45
|
-
[0, 0, 1, 1, 1, 0, 0, 0], # other, 3
|
|
46
|
-
[0, 0, 1, 1, 1, 0, 0, 0],
|
|
47
|
-
[0, 0, 1, 1, 1, 0, 0, 0],
|
|
48
|
-
[0, 0, 0, 0, 0, 0, 0, 0],
|
|
49
|
-
], dtype=bool)
|
|
50
|
-
|
|
51
|
-
sm1 = segm.segm_thresh.SegmentThresh(thresh=-6,
|
|
52
|
-
kwargs_mask={"clear_border": True,
|
|
53
|
-
"fill_holes": True,
|
|
54
|
-
"closing_disk": 0,
|
|
55
|
-
})
|
|
56
|
-
labels1 = sm1.segment_frame(-10 * mask)
|
|
57
|
-
assert np.sum(labels1 != 0) == 21
|
|
58
|
-
assert len(np.unique(labels1)) == 3 # (bg, filled, other)
|
|
59
|
-
assert np.sum(labels1 == 1) == 9
|
|
60
|
-
# due to the relabeling done in `fill_holes`, the index of "other" is "3"
|
|
61
|
-
assert np.sum(labels1 == 2) == 12
|
|
62
|
-
|
|
63
|
-
sm2 = segm.segm_thresh.SegmentThresh(thresh=-6,
|
|
64
|
-
kwargs_mask={"clear_border": True,
|
|
65
|
-
"fill_holes": False,
|
|
66
|
-
"closing_disk": 0,
|
|
67
|
-
})
|
|
68
|
-
labels2 = sm2.segment_frame(-10 * mask)
|
|
69
|
-
_, l2a, l2b = np.unique(labels2)
|
|
70
|
-
assert np.sum(labels2 != 0) == 20
|
|
71
|
-
assert len(np.unique(labels2)) == 3 # (bg, filled, other)
|
|
72
|
-
assert np.sum(labels2 == l2a) == 8
|
|
73
|
-
assert np.sum(labels2 == l2b) == 12
|
|
74
|
-
|
|
75
|
-
sm3 = segm.segm_thresh.SegmentThresh(thresh=-6,
|
|
76
|
-
kwargs_mask={"clear_border": False,
|
|
77
|
-
"fill_holes": False,
|
|
78
|
-
"closing_disk": 0,
|
|
79
|
-
})
|
|
80
|
-
labels3 = sm3.segment_frame(-10 * mask)
|
|
81
|
-
assert np.sum(labels3 != 0) == 30
|
|
82
|
-
assert len(np.unique(labels3)) == 4 # (bg, filled, border, other)
|
|
83
|
-
assert np.sum(labels3 == 1) == 8
|
|
84
|
-
assert np.sum(labels3 == 2) == 10
|
|
85
|
-
assert np.sum(labels3 == 3) == 12
|
|
86
|
-
|
|
87
|
-
sm4 = segm.segm_thresh.SegmentThresh(thresh=-6,
|
|
88
|
-
kwargs_mask={"clear_border": False,
|
|
89
|
-
"fill_holes": True,
|
|
90
|
-
"closing_disk": 0,
|
|
91
|
-
})
|
|
92
|
-
labels4 = sm4.segment_frame(-10 * mask)
|
|
93
|
-
assert np.sum(labels4 != 0) == 31
|
|
94
|
-
assert len(np.unique(labels4)) == 4 # (bg, filled, border, other)
|
|
95
|
-
assert np.sum(labels4 == 1) == 9
|
|
96
|
-
assert np.sum(labels4 == 2) == 10
|
|
97
|
-
assert np.sum(labels4 == 3) == 12
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
def test_segmenter_labeled_mask_closing_disk():
|
|
101
|
-
mask = np.array([
|
|
102
|
-
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
|
103
|
-
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
|
104
|
-
[0, 0, 1, 1, 1, 0, 0, 0, 0],
|
|
105
|
-
[0, 0, 1, 0, 1, 0, 0, 0, 0], # filled, 1
|
|
106
|
-
[0, 0, 1, 1, 1, 0, 0, 0, 0],
|
|
107
|
-
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
|
108
|
-
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
|
109
|
-
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
|
110
|
-
[0, 0, 0, 0, 0, 1, 1, 1, 1],
|
|
111
|
-
[0, 0, 0, 0, 0, 0, 1, 1, 1], # border, 2
|
|
112
|
-
[0, 0, 0, 0, 0, 0, 0, 0, 1],
|
|
113
|
-
[0, 0, 1, 1, 1, 0, 0, 0, 0],
|
|
114
|
-
[0, 0, 1, 0, 0, 1, 1, 0, 0], # other, 3
|
|
115
|
-
[0, 0, 1, 0, 0, 0, 1, 0, 0],
|
|
116
|
-
[0, 0, 1, 0, 0, 0, 1, 0, 0],
|
|
117
|
-
[0, 0, 1, 1, 1, 1, 1, 0, 0],
|
|
118
|
-
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
|
119
|
-
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
|
120
|
-
], dtype=bool)
|
|
121
|
-
|
|
122
|
-
sm1 = segm.segm_thresh.SegmentThresh(thresh=-6,
|
|
123
|
-
kwargs_mask={"clear_border": True,
|
|
124
|
-
"fill_holes": True,
|
|
125
|
-
"closing_disk": 1,
|
|
126
|
-
})
|
|
127
|
-
labels1 = sm1.segment_frame(-10 * mask)
|
|
128
|
-
assert np.sum(labels1 != 0) == 32
|
|
129
|
-
assert len(np.unique(labels1)) == 3 # (bg, filled, other)
|
|
130
|
-
assert np.sum(labels1 == 1) == 9
|
|
131
|
-
# due to the relabeling done in `fill_holes`, the index of "other" is "3"
|
|
132
|
-
assert np.sum(labels1 == 2) == 23
|
|
133
|
-
|
|
134
|
-
sm2 = segm.segm_thresh.SegmentThresh(thresh=-6,
|
|
135
|
-
kwargs_mask={"clear_border": True,
|
|
136
|
-
"fill_holes": False,
|
|
137
|
-
"closing_disk": 1,
|
|
138
|
-
})
|
|
139
|
-
labels2 = sm2.segment_frame(-10 * mask)
|
|
140
|
-
_, l2a, l2b = np.unique(labels2)
|
|
141
|
-
assert np.sum(labels2 != 0) == 27
|
|
142
|
-
assert len(np.unique(labels2)) == 3 # (bg, filled, other)
|
|
143
|
-
assert np.sum(labels2 == l2a) == 9
|
|
144
|
-
assert np.sum(labels2 == l2b) == 18
|
|
145
|
-
|
|
146
|
-
sm3 = segm.segm_thresh.SegmentThresh(thresh=-6,
|
|
147
|
-
kwargs_mask={"clear_border": False,
|
|
148
|
-
"fill_holes": False,
|
|
149
|
-
"closing_disk": 1,
|
|
150
|
-
})
|
|
151
|
-
labels3 = sm3.segment_frame(-10 * mask)
|
|
152
|
-
assert np.sum(labels3 != 0) == 35
|
|
153
|
-
assert len(np.unique(labels3)) == 4 # (bg, filled, border, other)
|
|
154
|
-
assert np.sum(labels3 == 1) == 9
|
|
155
|
-
assert np.sum(labels3 == 2) == 8
|
|
156
|
-
assert np.sum(labels3 == 3) == 18
|
|
157
|
-
|
|
158
|
-
sm4 = segm.segm_thresh.SegmentThresh(thresh=-6,
|
|
159
|
-
kwargs_mask={"clear_border": False,
|
|
160
|
-
"fill_holes": True,
|
|
161
|
-
"closing_disk": 1,
|
|
162
|
-
})
|
|
163
|
-
labels4 = sm4.segment_frame(-10 * mask)
|
|
164
|
-
assert np.sum(labels4 != 0) == 40
|
|
165
|
-
assert len(np.unique(labels4)) == 4 # (bg, filled, border, other)
|
|
166
|
-
assert np.sum(labels4 == 1) == 9
|
|
167
|
-
assert np.sum(labels4 == 2) == 8
|
|
168
|
-
assert np.sum(labels4 == 3) == 23
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
def test_segmenter_labeled_mask_fill_holes_int32():
|
|
172
|
-
mask = np.array([
|
|
173
|
-
[0, 0, 0, 0, 0, 0, 0, 0],
|
|
174
|
-
[0, 0, 1, 1, 1, 0, 0, 0],
|
|
175
|
-
[0, 0, 1, 0, 1, 0, 0, 0], # filled, 1
|
|
176
|
-
[0, 0, 1, 1, 1, 0, 0, 0],
|
|
177
|
-
[0, 0, 0, 0, 0, 0, 0, 0],
|
|
178
|
-
[0, 0, 0, 0, 0, 0, 0, 0],
|
|
179
|
-
[0, 0, 0, 0, 1, 1, 1, 1],
|
|
180
|
-
[0, 0, 0, 0, 0, 1, 1, 1], # border, 2
|
|
181
|
-
[0, 0, 0, 0, 0, 1, 1, 1],
|
|
182
|
-
[0, 1, 1, 1, 0, 0, 0, 0],
|
|
183
|
-
[0, 0, 1, 1, 1, 0, 0, 0], # other, 3
|
|
184
|
-
[0, 0, 1, 1, 1, 0, 0, 0],
|
|
185
|
-
[0, 0, 1, 1, 1, 0, 0, 0],
|
|
186
|
-
[0, 0, 0, 0, 0, 0, 0, 0],
|
|
187
|
-
], dtype=bool)
|
|
188
|
-
|
|
189
|
-
sm1 = segm.segm_thresh.SegmentThresh(thresh=-6)
|
|
190
|
-
labels = np.array(sm1.segment_frame(-10 * mask), dtype=np.int64)
|
|
191
|
-
# sanity checks
|
|
192
|
-
assert labels.dtype == np.int64
|
|
193
|
-
assert labels.dtype != np.int32
|
|
194
|
-
labels_2 = sm1.process_mask(labels,
|
|
195
|
-
clear_border=False,
|
|
196
|
-
fill_holes=True,
|
|
197
|
-
closing_disk=False)
|
|
198
|
-
assert np.allclose(labels, labels_2)
|
|
199
|
-
assert labels_2.dtype == np.int32
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
def test_segmenter_segment_chunk():
|
|
203
|
-
with segm.segm_thresh.SegmentThresh(thresh=-12, debug=True) as sm:
|
|
204
|
-
image_data = MockImageData()
|
|
205
|
-
labels_1 = np.copy(sm.segment_chunk(image_data, 0)) # below threshold
|
|
206
|
-
assert sm.image_array.min() == -10
|
|
207
|
-
labels_2 = np.copy(sm.segment_chunk(image_data, 10)) # above threshold
|
|
208
|
-
assert sm.image_array.min() == -20
|
|
209
|
-
assert np.all(labels_1 == 0)
|
|
210
|
-
assert not np.all(labels_2 == 0)
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
def test_cpu_segmenter_getsetstate():
|
|
214
|
-
sm1 = segm.segm_thresh.SegmentThresh(thresh=-12, debug=True)
|
|
215
|
-
with segm.segm_thresh.SegmentThresh(thresh=-12, debug=True) as sm2:
|
|
216
|
-
image_data = MockImageData()
|
|
217
|
-
# Do some processing so that we have workers
|
|
218
|
-
sm2.segment_chunk(image_data, 0)
|
|
219
|
-
# get the state
|
|
220
|
-
state = sm2.__getstate__()
|
|
221
|
-
# set the state
|
|
222
|
-
sm1.__setstate__(state)
|
|
223
|
-
# and here we test for the raw data that was transferred
|
|
224
|
-
assert not np.all(sm1.image_array == sm2.image_array)
|
|
225
|
-
assert np.all(sm1.mp_image_raw == sm2.mp_image_raw)
|
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
import collections
|
|
2
|
-
import pathlib
|
|
3
|
-
|
|
4
|
-
import h5py
|
|
5
|
-
|
|
6
|
-
from dcnum import write
|
|
7
|
-
|
|
8
|
-
from helper_methods import retrieve_data
|
|
9
|
-
|
|
10
|
-
data_path = pathlib.Path(__file__).parent / "data"
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
def test_writer_thread_basic():
|
|
14
|
-
path = retrieve_data(data_path /
|
|
15
|
-
"fmt-hdf5_cytoshot_full-features_2023.zip")
|
|
16
|
-
path_wrt = path.with_name("written.hdf5")
|
|
17
|
-
dq = collections.deque()
|
|
18
|
-
|
|
19
|
-
wrthr = write.DequeWriterThread(path_out=path_wrt, dq=dq)
|
|
20
|
-
wrthr.start()
|
|
21
|
-
|
|
22
|
-
with h5py.File(path) as h5:
|
|
23
|
-
deform = h5["events"]["deform"][:]
|
|
24
|
-
image = h5["events"]["image"][:]
|
|
25
|
-
|
|
26
|
-
dq.append(("deform", deform))
|
|
27
|
-
dq.append(("deform", deform))
|
|
28
|
-
dq.append(("deform", deform[:10]))
|
|
29
|
-
|
|
30
|
-
dq.append(("image", image))
|
|
31
|
-
dq.append(("image", image))
|
|
32
|
-
dq.append(("image", image[:10]))
|
|
33
|
-
|
|
34
|
-
wrthr.finished_when_queue_empty()
|
|
35
|
-
wrthr.join()
|
|
36
|
-
|
|
37
|
-
with h5py.File(path_wrt) as ho:
|
|
38
|
-
events = ho["events"]
|
|
39
|
-
size = deform.shape[0]
|
|
40
|
-
assert events["deform"].shape[0] == 2*size + 10
|
|
41
|
-
assert events["image"].shape[0] == 2 * size + 10
|
|
42
|
-
assert events["image"].shape[1:] == image.shape[1:]
|
tests/test_write_writer.py
DELETED
|
@@ -1,33 +0,0 @@
|
|
|
1
|
-
import pathlib
|
|
2
|
-
|
|
3
|
-
import h5py
|
|
4
|
-
|
|
5
|
-
from dcnum import write
|
|
6
|
-
|
|
7
|
-
from helper_methods import retrieve_data
|
|
8
|
-
|
|
9
|
-
data_path = pathlib.Path(__file__).parent / "data"
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
def test_writer_basic():
|
|
13
|
-
path = retrieve_data(data_path /
|
|
14
|
-
"fmt-hdf5_cytoshot_full-features_2023.zip")
|
|
15
|
-
path_wrt = path.with_name("written.hdf5")
|
|
16
|
-
with h5py.File(path) as h5, write.HDF5Writer(path_wrt) as hw:
|
|
17
|
-
deform = h5["events"]["deform"][:]
|
|
18
|
-
image = h5["events"]["image"][:]
|
|
19
|
-
|
|
20
|
-
hw.store_feature_chunk(feat="deform", data=deform)
|
|
21
|
-
hw.store_feature_chunk(feat="deform", data=deform)
|
|
22
|
-
hw.store_feature_chunk(feat="deform", data=deform[:10])
|
|
23
|
-
|
|
24
|
-
hw.store_feature_chunk(feat="image", data=image)
|
|
25
|
-
hw.store_feature_chunk(feat="image", data=image)
|
|
26
|
-
hw.store_feature_chunk(feat="image", data=image[:10])
|
|
27
|
-
|
|
28
|
-
with h5py.File(path_wrt) as ho:
|
|
29
|
-
events = ho["events"]
|
|
30
|
-
size = deform.shape[0]
|
|
31
|
-
assert events["deform"].shape[0] == 2*size + 10
|
|
32
|
-
assert events["image"].shape[0] == 2 * size + 10
|
|
33
|
-
assert events["image"].shape[1:] == image.shape[1:]
|
|
File without changes
|