dcnum 0.16.2__py3-none-any.whl → 0.16.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dcnum might be problematic. Click here for more details.
- dcnum/_version.py +2 -2
- dcnum/feat/event_extractor_manager_thread.py +4 -2
- dcnum/feat/feat_background/base.py +3 -8
- dcnum/feat/feat_background/bg_sparse_median.py +9 -6
- dcnum/feat/queue_event_extractor.py +1 -1
- dcnum/logic/ctrl.py +66 -19
- dcnum/read/cache.py +1 -1
- dcnum/read/hdf5_data.py +1 -1
- dcnum/segm/segmenter_gpu.py +4 -0
- dcnum/segm/segmenter_manager_thread.py +4 -1
- dcnum/write/__init__.py +2 -1
- dcnum/write/writer.py +14 -9
- {dcnum-0.16.2.dist-info → dcnum-0.16.3.dist-info}/METADATA +1 -1
- {dcnum-0.16.2.dist-info → dcnum-0.16.3.dist-info}/RECORD +17 -17
- {dcnum-0.16.2.dist-info → dcnum-0.16.3.dist-info}/LICENSE +0 -0
- {dcnum-0.16.2.dist-info → dcnum-0.16.3.dist-info}/WHEEL +0 -0
- {dcnum-0.16.2.dist-info → dcnum-0.16.3.dist-info}/top_level.txt +0 -0
dcnum/_version.py
CHANGED
|
@@ -81,9 +81,9 @@ class EventExtractorManagerThread(threading.Thread):
|
|
|
81
81
|
for _ in range(self.num_workers)]
|
|
82
82
|
[w.start() for w in workers]
|
|
83
83
|
|
|
84
|
+
num_slots = len(self.slot_states)
|
|
84
85
|
chunks_processed = 0
|
|
85
86
|
while True:
|
|
86
|
-
num_slots = len(self.slot_states)
|
|
87
87
|
cur_slot = 0
|
|
88
88
|
unavailable_slots = 0
|
|
89
89
|
# Check all slots for segmented labels
|
|
@@ -93,8 +93,10 @@ class EventExtractorManagerThread(threading.Thread):
|
|
|
93
93
|
# - "s" the extractor processed the data and is waiting
|
|
94
94
|
# for the segmenter
|
|
95
95
|
if self.slot_states[cur_slot] == "e":
|
|
96
|
+
# The segmenter has something for us in this slot.
|
|
96
97
|
break
|
|
97
98
|
else:
|
|
99
|
+
# Try another slot.
|
|
98
100
|
unavailable_slots += 1
|
|
99
101
|
cur_slot = (cur_slot + 1) % num_slots
|
|
100
102
|
if unavailable_slots >= num_slots:
|
|
@@ -152,7 +154,7 @@ class EventExtractorManagerThread(threading.Thread):
|
|
|
152
154
|
if inv_masks:
|
|
153
155
|
self.logger.info(f"Encountered {inv_masks} invalid masks.")
|
|
154
156
|
inv_frac = inv_masks / len(self.data)
|
|
155
|
-
if inv_frac > 0.
|
|
157
|
+
if inv_frac > 0.005: # warn above one half percent
|
|
156
158
|
self.logger.warning(f"Discarded {inv_frac:.1%} of the masks. "
|
|
157
159
|
f"Please check segmenter applicability.")
|
|
158
160
|
|
|
@@ -7,12 +7,11 @@ import uuid
|
|
|
7
7
|
import warnings
|
|
8
8
|
|
|
9
9
|
import h5py
|
|
10
|
-
import hdf5plugin
|
|
11
10
|
import numpy as np
|
|
12
11
|
|
|
13
12
|
from ...meta import ppid
|
|
14
13
|
from ...read import HDF5Data
|
|
15
|
-
from ...write import create_with_basins
|
|
14
|
+
from ...write import create_with_basins, set_default_filter_kwargs
|
|
16
15
|
|
|
17
16
|
|
|
18
17
|
# All subprocesses should use 'spawn' to avoid issues with threads
|
|
@@ -112,10 +111,7 @@ class Background(abc.ABC):
|
|
|
112
111
|
self.h5out = h5py.File(output_path, "a", libver="latest")
|
|
113
112
|
|
|
114
113
|
# Initialize background data
|
|
115
|
-
|
|
116
|
-
compression_kwargs = hdf5plugin.Zstd(clevel=5)
|
|
117
|
-
else:
|
|
118
|
-
compression_kwargs = {}
|
|
114
|
+
ds_kwargs = set_default_filter_kwargs(compression=compress)
|
|
119
115
|
h5bg = self.h5out.require_dataset(
|
|
120
116
|
"events/image_bg",
|
|
121
117
|
shape=self.input_data.shape,
|
|
@@ -123,8 +119,7 @@ class Background(abc.ABC):
|
|
|
123
119
|
chunks=(min(100, self.image_count),
|
|
124
120
|
self.image_shape[0],
|
|
125
121
|
self.image_shape[1]),
|
|
126
|
-
|
|
127
|
-
**compression_kwargs,
|
|
122
|
+
**ds_kwargs,
|
|
128
123
|
)
|
|
129
124
|
h5bg.attrs.create('CLASS', np.string_('IMAGE'))
|
|
130
125
|
h5bg.attrs.create('IMAGE_VERSION', np.string_('1.2'))
|
|
@@ -5,6 +5,8 @@ import time
|
|
|
5
5
|
import numpy as np
|
|
6
6
|
from scipy import ndimage
|
|
7
7
|
|
|
8
|
+
from ...read import HDF5Data
|
|
9
|
+
|
|
8
10
|
from .base import mp_spawn, Background
|
|
9
11
|
|
|
10
12
|
logger = logging.getLogger(__name__)
|
|
@@ -90,15 +92,16 @@ class BackgroundSparseMed(Background):
|
|
|
90
92
|
# time axis
|
|
91
93
|
self.time = None
|
|
92
94
|
if self.h5in is not None:
|
|
93
|
-
|
|
95
|
+
hd = HDF5Data(self.h5in)
|
|
96
|
+
if "time" in hd:
|
|
94
97
|
# use actual time from dataset
|
|
95
|
-
self.time =
|
|
98
|
+
self.time = hd["time"][:]
|
|
96
99
|
self.time -= self.time[0]
|
|
97
|
-
elif "imaging:frame rate" in
|
|
98
|
-
fr =
|
|
99
|
-
if "frame" in
|
|
100
|
+
elif "imaging:frame rate" in hd.meta:
|
|
101
|
+
fr = hd.meta["imaging:frame rate"]
|
|
102
|
+
if "frame" in hd:
|
|
100
103
|
# compute time from frame rate and frame numbers
|
|
101
|
-
self.time =
|
|
104
|
+
self.time = hd["frame"] / fr
|
|
102
105
|
self.time -= self.time[0]
|
|
103
106
|
else:
|
|
104
107
|
# compute time using frame rate (approximate)
|
|
@@ -315,7 +315,7 @@ class QueueEventExtractor:
|
|
|
315
315
|
queue_handler = QueueHandler(self.log_queue)
|
|
316
316
|
queue_handler.setLevel(self.log_level)
|
|
317
317
|
self.logger.addHandler(queue_handler)
|
|
318
|
-
self.logger.info("
|
|
318
|
+
self.logger.info("Ready")
|
|
319
319
|
|
|
320
320
|
mp_array = np.ctypeslib.as_array(
|
|
321
321
|
self.label_array).reshape(self.data.image.chunk_shape)
|
dcnum/logic/ctrl.py
CHANGED
|
@@ -13,7 +13,6 @@ import time
|
|
|
13
13
|
import traceback
|
|
14
14
|
import uuid
|
|
15
15
|
|
|
16
|
-
import hdf5plugin
|
|
17
16
|
import h5py
|
|
18
17
|
|
|
19
18
|
from ..feat.feat_background.base import get_available_background_methods
|
|
@@ -26,7 +25,7 @@ from ..read import HDF5Data
|
|
|
26
25
|
from .._version import version_tuple
|
|
27
26
|
from ..write import (
|
|
28
27
|
DequeWriterThread, HDF5Writer, QueueCollectorThread,
|
|
29
|
-
copy_metadata, create_with_basins,
|
|
28
|
+
copy_metadata, create_with_basins, set_default_filter_kwargs
|
|
30
29
|
)
|
|
31
30
|
|
|
32
31
|
from .job import DCNumPipelineJob
|
|
@@ -36,6 +35,19 @@ from .json_encoder import ExtendedJSONEncoder
|
|
|
36
35
|
# queues and threads and would end up with race conditions otherwise.
|
|
37
36
|
mp_spawn = mp.get_context("spawn")
|
|
38
37
|
|
|
38
|
+
#: valid states for a job runnter. The states must be in logical ordern,
|
|
39
|
+
#: not in alphabetical order.
|
|
40
|
+
valid_states = [
|
|
41
|
+
"created",
|
|
42
|
+
"init",
|
|
43
|
+
"setup",
|
|
44
|
+
"background",
|
|
45
|
+
"segmentation",
|
|
46
|
+
"cleanup",
|
|
47
|
+
"done",
|
|
48
|
+
"error",
|
|
49
|
+
]
|
|
50
|
+
|
|
39
51
|
|
|
40
52
|
class DCNumJobRunner(threading.Thread):
|
|
41
53
|
def __init__(self,
|
|
@@ -178,6 +190,16 @@ class DCNumJobRunner(threading.Thread):
|
|
|
178
190
|
po = pathlib.Path(self.job["path_out"])
|
|
179
191
|
return po.with_name(po.stem + f"_output_{self.tmp_suffix}.rtdc~")
|
|
180
192
|
|
|
193
|
+
@property
|
|
194
|
+
def state(self):
|
|
195
|
+
return self._state
|
|
196
|
+
|
|
197
|
+
@state.setter
|
|
198
|
+
def state(self, state):
|
|
199
|
+
if state not in valid_states:
|
|
200
|
+
raise ValueError(f"Invalid state '{state}' specified!")
|
|
201
|
+
self._state = state
|
|
202
|
+
|
|
181
203
|
def close(self, delete_temporary_files=True):
|
|
182
204
|
if self._data_raw is not None:
|
|
183
205
|
self._data_raw.close()
|
|
@@ -209,17 +231,32 @@ class DCNumJobRunner(threading.Thread):
|
|
|
209
231
|
self.close(delete_temporary_files=delete_temporary_files)
|
|
210
232
|
|
|
211
233
|
def get_status(self):
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
234
|
+
# Compute the total progress. The following weights indicate
|
|
235
|
+
# how much fractional time each processing step takes.
|
|
236
|
+
bgw = 4 # fraction of background
|
|
237
|
+
exw = 27 # fraction of segmentation and feature extraction
|
|
238
|
+
clw = 1 # fraction of cleanup operations
|
|
239
|
+
tot = bgw + exw + clw
|
|
215
240
|
progress = 0
|
|
216
|
-
|
|
241
|
+
st = self.state
|
|
242
|
+
|
|
243
|
+
# background
|
|
244
|
+
if valid_states.index(st) > valid_states.index("background"):
|
|
245
|
+
# background already computed
|
|
246
|
+
progress += bgw / tot
|
|
247
|
+
elif self._progress_bg is not None:
|
|
217
248
|
# This is the image count of the input dataset
|
|
218
|
-
progress +=
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
if
|
|
222
|
-
|
|
249
|
+
progress += bgw / tot * (self._progress_bg.value / len(self.draw))
|
|
250
|
+
|
|
251
|
+
# segmentation
|
|
252
|
+
if valid_states.index(st) > valid_states.index("segmentation"):
|
|
253
|
+
# segmentation already done
|
|
254
|
+
progress += exw / tot
|
|
255
|
+
elif self._progress_ex is not None:
|
|
256
|
+
progress += exw / tot * self._progress_ex
|
|
257
|
+
|
|
258
|
+
if self.state == "done":
|
|
259
|
+
progress = 1
|
|
223
260
|
|
|
224
261
|
return {
|
|
225
262
|
"progress": progress,
|
|
@@ -231,7 +268,7 @@ class DCNumJobRunner(threading.Thread):
|
|
|
231
268
|
try:
|
|
232
269
|
self.run_pipeline()
|
|
233
270
|
except BaseException:
|
|
234
|
-
self.
|
|
271
|
+
self.state = "error"
|
|
235
272
|
self.error_tb = traceback.format_exc()
|
|
236
273
|
if not self.is_alive():
|
|
237
274
|
# Thread has not been started. This means we are not running
|
|
@@ -248,7 +285,7 @@ class DCNumJobRunner(threading.Thread):
|
|
|
248
285
|
f"Output file {self.job['path_out']} already exists!")
|
|
249
286
|
# Make sure the output directory exists.
|
|
250
287
|
self.job["path_out"].parent.mkdir(parents=True, exist_ok=True)
|
|
251
|
-
self.
|
|
288
|
+
self.state = "setup"
|
|
252
289
|
# First get a list of all pipeline IDs. If the input file has
|
|
253
290
|
# already been processed by dcnum, then we do not have to redo
|
|
254
291
|
# everything.
|
|
@@ -290,7 +327,7 @@ class DCNumJobRunner(threading.Thread):
|
|
|
290
327
|
or (datdict["feat_id"] != self.ppdict["feat_id"])
|
|
291
328
|
or (datdict["gate_id"] != self.ppdict["gate_id"]))
|
|
292
329
|
|
|
293
|
-
self.
|
|
330
|
+
self.state = "background"
|
|
294
331
|
|
|
295
332
|
if redo_bg:
|
|
296
333
|
# The 'image_bg' feature is written to `self.path_temp_in`.
|
|
@@ -299,7 +336,7 @@ class DCNumJobRunner(threading.Thread):
|
|
|
299
336
|
# (note that `self.path_temp_in` is basin-based).
|
|
300
337
|
self.task_background()
|
|
301
338
|
|
|
302
|
-
self.
|
|
339
|
+
self.state = "segmentation"
|
|
303
340
|
|
|
304
341
|
# We have the input data covered, and we have to run the
|
|
305
342
|
# long-lasting segmentation and feature extraction step.
|
|
@@ -323,7 +360,7 @@ class DCNumJobRunner(threading.Thread):
|
|
|
323
360
|
# reflected in `self.path_temp_out`.
|
|
324
361
|
self.path_temp_in.rename(self.path_temp_out)
|
|
325
362
|
|
|
326
|
-
self.
|
|
363
|
+
self.state = "cleanup"
|
|
327
364
|
|
|
328
365
|
# The user would normally expect the output file to be something
|
|
329
366
|
# that is self-contained (copying the file wildly across file
|
|
@@ -410,7 +447,7 @@ class DCNumJobRunner(threading.Thread):
|
|
|
410
447
|
|
|
411
448
|
# Rename the output file
|
|
412
449
|
self.path_temp_out.rename(self.job["path_out"])
|
|
413
|
-
self.
|
|
450
|
+
self.state = "done"
|
|
414
451
|
|
|
415
452
|
def task_background(self):
|
|
416
453
|
"""Perform background computation task
|
|
@@ -442,8 +479,7 @@ class DCNumJobRunner(threading.Thread):
|
|
|
442
479
|
self.logger.info("Starting segmentation and feature extraction")
|
|
443
480
|
# Start writer thread
|
|
444
481
|
writer_dq = collections.deque()
|
|
445
|
-
ds_kwds =
|
|
446
|
-
ds_kwds["fletcher32"] = True
|
|
482
|
+
ds_kwds = set_default_filter_kwargs()
|
|
447
483
|
thr_write = DequeWriterThread(
|
|
448
484
|
path_out=self.path_temp_out,
|
|
449
485
|
dq=writer_dq,
|
|
@@ -462,13 +498,24 @@ class DCNumJobRunner(threading.Thread):
|
|
|
462
498
|
if self.job["debug"]:
|
|
463
499
|
num_slots = 1
|
|
464
500
|
num_extractors = 1
|
|
501
|
+
num_segmenters = 1
|
|
465
502
|
elif seg_cls.hardware_processor == "cpu": # CPU segmenter
|
|
503
|
+
# We could in principle set the number of slots to one and
|
|
504
|
+
# jave both number of extractors and number of segmenters set
|
|
505
|
+
# to the total number of CPUs. However, we would need more RAM
|
|
506
|
+
# (for caching the image data) and we also have more overhead.
|
|
507
|
+
# Having two slots shared between all workers is more efficient.
|
|
466
508
|
num_slots = 2
|
|
509
|
+
# Split segmentation and feature extraction workers evenly.
|
|
467
510
|
num_extractors = self.job["num_procs"] // 2
|
|
511
|
+
num_segmenters = self.job["num_procs"] - num_extractors
|
|
468
512
|
else: # GPU segmenter
|
|
469
513
|
num_slots = 3
|
|
470
514
|
num_extractors = self.job["num_procs"]
|
|
515
|
+
num_segmenters = 1
|
|
471
516
|
num_extractors = max(1, num_extractors)
|
|
517
|
+
num_segmenters = max(1, num_segmenters)
|
|
518
|
+
self.job["segmenter_kwargs"]["num_workers"] = num_segmenters
|
|
472
519
|
|
|
473
520
|
slot_chunks = mp_spawn.Array("i", num_slots)
|
|
474
521
|
slot_states = mp_spawn.Array("u", num_slots)
|
dcnum/read/cache.py
CHANGED
dcnum/read/hdf5_data.py
CHANGED
|
@@ -25,7 +25,7 @@ class HDF5Data:
|
|
|
25
25
|
basins: List[Dict[List[str] | str]] = None,
|
|
26
26
|
logs: Dict[List[str]] = None,
|
|
27
27
|
tables: Dict[np.ndarray] = None,
|
|
28
|
-
image_cache_size: int =
|
|
28
|
+
image_cache_size: int = 2,
|
|
29
29
|
):
|
|
30
30
|
# Init is in __setstate__ so we can pickle this class
|
|
31
31
|
# and use it for multiprocessing.
|
dcnum/segm/segmenter_gpu.py
CHANGED
|
@@ -15,6 +15,7 @@ class GPUSegmenter(Segmenter, abc.ABC):
|
|
|
15
15
|
|
|
16
16
|
def __init__(self,
|
|
17
17
|
*,
|
|
18
|
+
num_workers: int = None,
|
|
18
19
|
kwargs_mask: Dict = None,
|
|
19
20
|
debug: bool = False,
|
|
20
21
|
**kwargs
|
|
@@ -31,6 +32,9 @@ class GPUSegmenter(Segmenter, abc.ABC):
|
|
|
31
32
|
Additional, optional keyword arguments for `segment_approach`
|
|
32
33
|
defined in the subclass.
|
|
33
34
|
"""
|
|
35
|
+
if num_workers not in [None, 1]:
|
|
36
|
+
raise ValueError(f"Number of workers must not be larger than 1 "
|
|
37
|
+
f"for GPU segmenter, got '{num_workers}'!")
|
|
34
38
|
super(GPUSegmenter, self).__init__(kwargs_mask=kwargs_mask,
|
|
35
39
|
debug=debug,
|
|
36
40
|
**kwargs)
|
|
@@ -77,9 +77,9 @@ class SegmenterManagerThread(threading.Thread):
|
|
|
77
77
|
self.debug = debug
|
|
78
78
|
|
|
79
79
|
def run(self):
|
|
80
|
+
num_slots = len(self.slot_states)
|
|
80
81
|
# We iterate over all the chunks of the image data.
|
|
81
82
|
for chunk in self.image_data.iter_chunks():
|
|
82
|
-
num_slots = len(self.slot_states)
|
|
83
83
|
cur_slot = 0
|
|
84
84
|
empty_slots = 0
|
|
85
85
|
# Wait for a free slot to perform segmentation (compute labels)
|
|
@@ -89,8 +89,11 @@ class SegmenterManagerThread(threading.Thread):
|
|
|
89
89
|
# - "s" the extractor processed the data and is waiting
|
|
90
90
|
# for the segmenter
|
|
91
91
|
if self.slot_states[cur_slot] != "e":
|
|
92
|
+
# It's the segmenters turn. Note that we use '!= "e"',
|
|
93
|
+
# because the initial value is "\x00".
|
|
92
94
|
break
|
|
93
95
|
else:
|
|
96
|
+
# Try another slot.
|
|
94
97
|
empty_slots += 1
|
|
95
98
|
cur_slot = (cur_slot + 1) % num_slots
|
|
96
99
|
if empty_slots >= num_slots:
|
dcnum/write/__init__.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
# flake8: noqa: F401
|
|
2
2
|
from .deque_writer_thread import DequeWriterThread
|
|
3
3
|
from .queue_collector_thread import EventStash, QueueCollectorThread
|
|
4
|
-
from .writer import
|
|
4
|
+
from .writer import (
|
|
5
|
+
HDF5Writer, copy_metadata, create_with_basins, set_default_filter_kwargs)
|
dcnum/write/writer.py
CHANGED
|
@@ -21,11 +21,7 @@ class HDF5Writer:
|
|
|
21
21
|
"""Write deformability cytometry HDF5 data"""
|
|
22
22
|
self.h5 = h5py.File(path, mode=mode, libver="latest")
|
|
23
23
|
self.events = self.h5.require_group("events")
|
|
24
|
-
|
|
25
|
-
ds_kwds = {}
|
|
26
|
-
for key, val in dict(hdf5plugin.Zstd(clevel=5)).items():
|
|
27
|
-
ds_kwds.setdefault(key, val)
|
|
28
|
-
ds_kwds.setdefault("fletcher32", True)
|
|
24
|
+
ds_kwds = set_default_filter_kwargs(ds_kwds)
|
|
29
25
|
self.ds_kwds = ds_kwds
|
|
30
26
|
|
|
31
27
|
def __enter__(self):
|
|
@@ -249,10 +245,7 @@ def copy_metadata(h5_src: h5py.File,
|
|
|
249
245
|
are not defined already are added.
|
|
250
246
|
"""
|
|
251
247
|
# compress data
|
|
252
|
-
ds_kwds =
|
|
253
|
-
for key, val in dict(hdf5plugin.Zstd(clevel=5)).items():
|
|
254
|
-
ds_kwds.setdefault(key, val)
|
|
255
|
-
ds_kwds.setdefault("fletcher32", True)
|
|
248
|
+
ds_kwds = set_default_filter_kwargs()
|
|
256
249
|
# set attributes
|
|
257
250
|
src_attrs = dict(h5_src.attrs)
|
|
258
251
|
for kk in src_attrs:
|
|
@@ -283,3 +276,15 @@ def copy_metadata(h5_src: h5py.File,
|
|
|
283
276
|
f"dcnum {version}"]
|
|
284
277
|
soft_strgs = [s for s in soft_strgs if s is not None]
|
|
285
278
|
ds.attrs["software"] = " | ".join(soft_strgs)
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def set_default_filter_kwargs(ds_kwds=None, compression=True):
|
|
282
|
+
if ds_kwds is None:
|
|
283
|
+
ds_kwds = {}
|
|
284
|
+
if compression:
|
|
285
|
+
# compression
|
|
286
|
+
for key, val in dict(hdf5plugin.Zstd(clevel=5)).items():
|
|
287
|
+
ds_kwds.setdefault(key, val)
|
|
288
|
+
# checksums
|
|
289
|
+
ds_kwds.setdefault("fletcher32", True)
|
|
290
|
+
return ds_kwds
|
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
dcnum/__init__.py,sha256=hcawIKS7utYiOyVhOAX9t7K3xYzP1b9862VV0b6qSrQ,74
|
|
2
|
-
dcnum/_version.py,sha256=
|
|
2
|
+
dcnum/_version.py,sha256=KgYmvPTSKj3FMTlG8S1wtqb14ST1EcsHHNSwLJp3foQ,413
|
|
3
3
|
dcnum/feat/__init__.py,sha256=JqlgzOgDJhoTk8WVYcIiKTWq9EAM16_jGivzOtN6JGo,325
|
|
4
|
-
dcnum/feat/event_extractor_manager_thread.py,sha256=
|
|
4
|
+
dcnum/feat/event_extractor_manager_thread.py,sha256=ypsGEwmM_ohHCnnl8g1vpruezFAkH0drIU1AOngH5Bg,6837
|
|
5
5
|
dcnum/feat/gate.py,sha256=srobj5p2RDr_S2SUtbwGbTKatnc_aPSndt0cR2P9zoY,7060
|
|
6
|
-
dcnum/feat/queue_event_extractor.py,sha256=
|
|
6
|
+
dcnum/feat/queue_event_extractor.py,sha256=o7K4p5VNExnaO6lgnlHrVk_qPhbXzocUyFhUcoP7OAU,14970
|
|
7
7
|
dcnum/feat/feat_background/__init__.py,sha256=OTmMuazHNaSrZb2XW4cnJ6PlgJLbKrPbaidpEixYa0A,341
|
|
8
|
-
dcnum/feat/feat_background/base.py,sha256=
|
|
8
|
+
dcnum/feat/feat_background/base.py,sha256=KA1H5giTyMBADex2-LmGbu7B1PEAKjiCUAvSF89WiZs,8375
|
|
9
9
|
dcnum/feat/feat_background/bg_copy.py,sha256=aHabgizRuwIdOH8S850Cun9NsmpMzo4B3yHWv1aFNFI,645
|
|
10
10
|
dcnum/feat/feat_background/bg_roll_median.py,sha256=FfC3v1cX8mreLO971C_kTpFRBtuJP4Sv-Hj1Wj8yb3Q,12826
|
|
11
|
-
dcnum/feat/feat_background/bg_sparse_median.py,sha256=
|
|
11
|
+
dcnum/feat/feat_background/bg_sparse_median.py,sha256=CDO8X7-7agBxTrC79lskt0zWTaSex6ouxUVfxImhgs4,17630
|
|
12
12
|
dcnum/feat/feat_brightness/__init__.py,sha256=o6AebVlmydwNgVF5kW6ITqJyFreoKrU3Ki_3EC8If-s,155
|
|
13
13
|
dcnum/feat/feat_brightness/bright_all.py,sha256=Z5b-xkw7g7ejMpbGmdUqrxGRymqFhAQsZ938gaGXk9Y,3102
|
|
14
14
|
dcnum/feat/feat_brightness/common.py,sha256=JX49EszYDmnvoOKXFVV1CalEIWRmOuY5EryNbqGbdac,156
|
|
@@ -19,27 +19,27 @@ dcnum/feat/feat_texture/__init__.py,sha256=6StM9S540UVtdFFR3bHa7nfCTomeVdoo7Uy9C
|
|
|
19
19
|
dcnum/feat/feat_texture/common.py,sha256=COXHpXS-7DMouGu3WF83I76L02Sr7P9re4lxajh6g0E,439
|
|
20
20
|
dcnum/feat/feat_texture/tex_all.py,sha256=eGjjNfPpfZw7FA_VNFCIMiU38KD0qcGbxLciYy-tCiA,4097
|
|
21
21
|
dcnum/logic/__init__.py,sha256=5hgAQMp2YGsqpWoeTQ9qxGAWfxPOKQjJsYyNsS49t0g,131
|
|
22
|
-
dcnum/logic/ctrl.py,sha256=
|
|
22
|
+
dcnum/logic/ctrl.py,sha256=aqXCH_yyrfifeAxmpW6Cg-FQIwTBjpElHbva60ghYpY,26655
|
|
23
23
|
dcnum/logic/job.py,sha256=M0Q-Rfcm-zkTXTQc79W6YSNUjUlgmRPG0Ikbdn1aOpY,4608
|
|
24
24
|
dcnum/logic/json_encoder.py,sha256=dy44ArmdnxpUfxxONmKdIv-fde3aTXPjZDN0HPATaxs,467
|
|
25
25
|
dcnum/meta/__init__.py,sha256=cQT_HN5yDKzMnZM8CUyNmeA68OhE3ENO_rvFmgDj95c,40
|
|
26
26
|
dcnum/meta/ppid.py,sha256=_xUqJal4wBqgic2aRN3ZMMteTggHeYGs44nrYbTKlpQ,8107
|
|
27
27
|
dcnum/read/__init__.py,sha256=iV2wrBMdwJgpXaphNiiAVybndDzTTv0CAGRNXyvxcLY,157
|
|
28
|
-
dcnum/read/cache.py,sha256=
|
|
28
|
+
dcnum/read/cache.py,sha256=0tMurtHOA7VnPNpfeAGi-dxWXfYhL5wmWuXb6ka_eEo,5467
|
|
29
29
|
dcnum/read/const.py,sha256=SVlvEJiRIHyTyUlWG24_ogcnT5nTxCi0CRslNuNP56I,282
|
|
30
|
-
dcnum/read/hdf5_data.py,sha256=
|
|
30
|
+
dcnum/read/hdf5_data.py,sha256=8g39CZoFIa2tUvizZt_vzMeoCUcTkkt3AkXK6MMN0iY,18817
|
|
31
31
|
dcnum/segm/__init__.py,sha256=iiq_1A9DU5wMUcKnsZ53E7NyzCkbZCJeUDimzunE-OM,247
|
|
32
32
|
dcnum/segm/segm_thresh.py,sha256=aLVTydPjbrgKDkZFY3Ew5CX-miwOw71meHfxcO5EjCc,1176
|
|
33
33
|
dcnum/segm/segmenter.py,sha256=F3gCp-Z51F9GxdFYPF1CHjnbfgqnS0_g-34lJF2tMCM,10611
|
|
34
34
|
dcnum/segm/segmenter_cpu.py,sha256=tCY105rVr9_0RIq2618qnF1ueHRj7UtuK_nUBoAg-nY,10743
|
|
35
|
-
dcnum/segm/segmenter_gpu.py,sha256=
|
|
36
|
-
dcnum/segm/segmenter_manager_thread.py,sha256=
|
|
37
|
-
dcnum/write/__init__.py,sha256=
|
|
35
|
+
dcnum/segm/segmenter_gpu.py,sha256=tL2X5BN0jKmhC7wgfG0hygd-6UpG1ZCVuKe5OP1qde0,2133
|
|
36
|
+
dcnum/segm/segmenter_manager_thread.py,sha256=2znDaKedSueomcU1pbHtFmVcGoHzp--sf494VgJF_Tk,5342
|
|
37
|
+
dcnum/write/__init__.py,sha256=Cpn3LqL18hh8OScUnGp_AnNfpWPpKW-oAJZH6ot7aRA,241
|
|
38
38
|
dcnum/write/deque_writer_thread.py,sha256=R4x3p-HZUls3upCBX3vV1VqSdSmaiHdrAswMJj_tVpk,1643
|
|
39
39
|
dcnum/write/queue_collector_thread.py,sha256=c0Z6uZfZ3B8xsTMCB5jglEukM5sesA9HgEawBk_YEUA,11910
|
|
40
|
-
dcnum/write/writer.py,sha256=
|
|
41
|
-
dcnum-0.16.
|
|
42
|
-
dcnum-0.16.
|
|
43
|
-
dcnum-0.16.
|
|
44
|
-
dcnum-0.16.
|
|
45
|
-
dcnum-0.16.
|
|
40
|
+
dcnum/write/writer.py,sha256=Hr37OSDJGUpJJ4OufJHYYBanE26GiNwUPOMAt-5Yc2Y,10478
|
|
41
|
+
dcnum-0.16.3.dist-info/LICENSE,sha256=YRChA1C8A2E-amJbudwMcbTCZy_HzmeY0hMIvduh1MM,1089
|
|
42
|
+
dcnum-0.16.3.dist-info/METADATA,sha256=BChO0SWVq5w9ZEFvvP0KDsFd-T1WRbMZkhW2xCBeVWc,2172
|
|
43
|
+
dcnum-0.16.3.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
|
44
|
+
dcnum-0.16.3.dist-info/top_level.txt,sha256=Hmh38rgG_MFTVDpUDGuO2HWTSq80P585Het4COQzFTg,6
|
|
45
|
+
dcnum-0.16.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|