dcnum 0.17.0__py3-none-any.whl → 0.23.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dcnum might be problematic. Click here for more details.
- dcnum/_version.py +2 -2
- dcnum/feat/__init__.py +1 -1
- dcnum/feat/event_extractor_manager_thread.py +34 -25
- dcnum/feat/feat_background/base.py +22 -26
- dcnum/feat/feat_background/bg_copy.py +18 -12
- dcnum/feat/feat_background/bg_roll_median.py +20 -10
- dcnum/feat/feat_background/bg_sparse_median.py +55 -7
- dcnum/feat/feat_brightness/bright_all.py +41 -6
- dcnum/feat/feat_contour/__init__.py +4 -0
- dcnum/feat/{feat_moments/mt_legacy.py → feat_contour/moments.py} +32 -8
- dcnum/feat/feat_contour/volume.py +174 -0
- dcnum/feat/feat_texture/tex_all.py +28 -1
- dcnum/feat/gate.py +2 -2
- dcnum/feat/queue_event_extractor.py +30 -9
- dcnum/logic/ctrl.py +199 -49
- dcnum/logic/job.py +63 -2
- dcnum/logic/json_encoder.py +2 -0
- dcnum/meta/ppid.py +17 -3
- dcnum/read/__init__.py +1 -0
- dcnum/read/cache.py +100 -78
- dcnum/read/const.py +6 -4
- dcnum/read/hdf5_data.py +146 -23
- dcnum/read/mapped.py +87 -0
- dcnum/segm/__init__.py +6 -3
- dcnum/segm/segm_thresh.py +6 -18
- dcnum/segm/segm_torch/__init__.py +19 -0
- dcnum/segm/segm_torch/segm_torch_base.py +125 -0
- dcnum/segm/segm_torch/segm_torch_mpo.py +71 -0
- dcnum/segm/segm_torch/segm_torch_sto.py +88 -0
- dcnum/segm/segm_torch/torch_model.py +95 -0
- dcnum/segm/segm_torch/torch_postproc.py +93 -0
- dcnum/segm/segm_torch/torch_preproc.py +114 -0
- dcnum/segm/segmenter.py +181 -80
- dcnum/segm/segmenter_manager_thread.py +38 -30
- dcnum/segm/{segmenter_cpu.py → segmenter_mpo.py} +116 -44
- dcnum/segm/segmenter_sto.py +110 -0
- dcnum/write/__init__.py +2 -1
- dcnum/write/deque_writer_thread.py +9 -1
- dcnum/write/queue_collector_thread.py +8 -14
- dcnum/write/writer.py +128 -5
- {dcnum-0.17.0.dist-info → dcnum-0.23.1.dist-info}/METADATA +4 -2
- dcnum-0.23.1.dist-info/RECORD +55 -0
- {dcnum-0.17.0.dist-info → dcnum-0.23.1.dist-info}/WHEEL +1 -1
- dcnum/feat/feat_moments/__init__.py +0 -4
- dcnum/segm/segmenter_gpu.py +0 -64
- dcnum-0.17.0.dist-info/RECORD +0 -46
- /dcnum/feat/{feat_moments/ct_opencv.py → feat_contour/contour.py} +0 -0
- {dcnum-0.17.0.dist-info → dcnum-0.23.1.dist-info}/LICENSE +0 -0
- {dcnum-0.17.0.dist-info → dcnum-0.23.1.dist-info}/top_level.txt +0 -0
dcnum/logic/ctrl.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import collections
|
|
2
2
|
import datetime
|
|
3
|
+
import hashlib
|
|
3
4
|
import json
|
|
4
5
|
import logging
|
|
5
6
|
from logging.handlers import QueueListener
|
|
@@ -14,6 +15,7 @@ import traceback
|
|
|
14
15
|
import uuid
|
|
15
16
|
|
|
16
17
|
import h5py
|
|
18
|
+
import numpy as np
|
|
17
19
|
|
|
18
20
|
from ..feat.feat_background.base import get_available_background_methods
|
|
19
21
|
from ..feat.queue_event_extractor import QueueEventExtractor
|
|
@@ -21,10 +23,10 @@ from ..feat import gate
|
|
|
21
23
|
from ..feat import EventExtractorManagerThread
|
|
22
24
|
from ..segm import SegmenterManagerThread, get_available_segmenters
|
|
23
25
|
from ..meta import ppid
|
|
24
|
-
from ..read import HDF5Data
|
|
25
|
-
from .._version import version_tuple
|
|
26
|
+
from ..read import HDF5Data, get_mapping_indices
|
|
27
|
+
from .._version import version, version_tuple
|
|
26
28
|
from ..write import (
|
|
27
|
-
DequeWriterThread, HDF5Writer, QueueCollectorThread,
|
|
29
|
+
DequeWriterThread, HDF5Writer, QueueCollectorThread, copy_features,
|
|
28
30
|
copy_metadata, create_with_basins, set_default_filter_kwargs
|
|
29
31
|
)
|
|
30
32
|
|
|
@@ -43,6 +45,7 @@ valid_states = [
|
|
|
43
45
|
"setup",
|
|
44
46
|
"background",
|
|
45
47
|
"segmentation",
|
|
48
|
+
"plumbing",
|
|
46
49
|
"cleanup",
|
|
47
50
|
"done",
|
|
48
51
|
"error",
|
|
@@ -79,16 +82,16 @@ class DCNumJobRunner(threading.Thread):
|
|
|
79
82
|
# current job state
|
|
80
83
|
self._state = "init"
|
|
81
84
|
# overall progress [0, 1]
|
|
82
|
-
self._progress_bg = None
|
|
83
|
-
self._progress_ex = None
|
|
85
|
+
self._progress_bg = None # background
|
|
86
|
+
self._progress_ex = None # segmentation
|
|
87
|
+
self._progress_bn = None # creating basins
|
|
84
88
|
# segmentation frame rate
|
|
85
89
|
self._segm_rate = 0
|
|
86
90
|
|
|
87
91
|
# Set up logging
|
|
88
92
|
# General logger for this job
|
|
89
93
|
self.main_logger = logging.getLogger("dcnum")
|
|
90
|
-
self.main_logger.setLevel(
|
|
91
|
-
logging.DEBUG if job["debug"] else logging.INFO)
|
|
94
|
+
self.main_logger.setLevel(job["log_level"])
|
|
92
95
|
# Log file output in target directory
|
|
93
96
|
self.path_log = job["path_out"].with_suffix(".log")
|
|
94
97
|
self.path_log.parent.mkdir(exist_ok=True, parents=True)
|
|
@@ -237,8 +240,12 @@ class DCNumJobRunner(threading.Thread):
|
|
|
237
240
|
# how much fractional time each processing step takes.
|
|
238
241
|
bgw = 4 # fraction of background
|
|
239
242
|
exw = 27 # fraction of segmentation and feature extraction
|
|
243
|
+
if self.job["basin_strategy"] == "drain":
|
|
244
|
+
drw = 15 # because data need to be copied
|
|
245
|
+
else:
|
|
246
|
+
drw = 1 # just creating the basins in output file
|
|
240
247
|
clw = 1 # fraction of cleanup operations
|
|
241
|
-
tot = bgw + exw + clw
|
|
248
|
+
tot = bgw + exw + drw + clw
|
|
242
249
|
progress = 0
|
|
243
250
|
st = self.state
|
|
244
251
|
|
|
@@ -247,15 +254,22 @@ class DCNumJobRunner(threading.Thread):
|
|
|
247
254
|
# background already computed
|
|
248
255
|
progress += bgw / tot
|
|
249
256
|
elif self._progress_bg is not None:
|
|
250
|
-
# This is the image count of the input dataset
|
|
251
|
-
progress +=
|
|
257
|
+
# This is the image count of the input dataset.
|
|
258
|
+
progress += self._progress_bg.value * bgw / tot
|
|
252
259
|
|
|
253
260
|
# segmentation
|
|
254
261
|
if valid_states.index(st) > valid_states.index("segmentation"):
|
|
255
262
|
# segmentation already done
|
|
256
263
|
progress += exw / tot
|
|
257
264
|
elif self._progress_ex is not None:
|
|
258
|
-
progress += exw / tot
|
|
265
|
+
progress += self._progress_ex * exw / tot
|
|
266
|
+
|
|
267
|
+
# draining basins
|
|
268
|
+
if valid_states.index(st) > valid_states.index("plumbing"):
|
|
269
|
+
# plumbing already done
|
|
270
|
+
progress += drw / tot
|
|
271
|
+
if self._progress_bn is not None:
|
|
272
|
+
progress += self._progress_bn * drw / tot
|
|
259
273
|
|
|
260
274
|
if self.state == "done":
|
|
261
275
|
progress = 1
|
|
@@ -310,12 +324,23 @@ class DCNumJobRunner(threading.Thread):
|
|
|
310
324
|
# Whether pipeline hash is invalid.
|
|
311
325
|
ppid.compute_pipeline_hash(**datdict) != dathash
|
|
312
326
|
# Whether the input file is the original output of the pipeline.
|
|
313
|
-
or len(self.draw) != evyield
|
|
327
|
+
or len(self.draw) != evyield
|
|
328
|
+
# If index mapping is defined, then we always redo the pipeline.
|
|
329
|
+
# If the pipeline hashes are identical and index mapping is not
|
|
330
|
+
# None, then both pipelines were done with index mapping.
|
|
331
|
+
# But applying the same pipeline with index mapping in series
|
|
332
|
+
# will lead to a different result in the second run (e.g. 1st
|
|
333
|
+
# pipeline run: take every 2nd event; 2nd pipeline run: take
|
|
334
|
+
# every second event -> results in every 4th event in output of
|
|
335
|
+
# second pipeline run).
|
|
336
|
+
or self.draw.index_mapping is not None
|
|
337
|
+
)
|
|
314
338
|
# Do we have to recompute the background data? In addition to the
|
|
315
339
|
# hash sanity check above, check the generation, input data,
|
|
316
340
|
# and background pipeline identifiers.
|
|
317
341
|
redo_bg = (
|
|
318
|
-
|
|
342
|
+
"image_bg" not in self.draw
|
|
343
|
+
or (datdict["gen_id"] != self.ppdict["gen_id"])
|
|
319
344
|
or (datdict["dat_id"] != self.ppdict["dat_id"])
|
|
320
345
|
or (datdict["bg_id"] != self.ppdict["bg_id"]))
|
|
321
346
|
|
|
@@ -361,16 +386,20 @@ class DCNumJobRunner(threading.Thread):
|
|
|
361
386
|
# Note any new actions that work on `self.path_temp_in` are not
|
|
362
387
|
# reflected in `self.path_temp_out`.
|
|
363
388
|
self.path_temp_in.rename(self.path_temp_out)
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
389
|
+
# Since no segmentation was done, the output file now does not
|
|
390
|
+
# contain any events. This is not really what we wanted, but we
|
|
391
|
+
# can still store all features in the output file if required.
|
|
392
|
+
if self.job["basin_strategy"] == "drain":
|
|
393
|
+
orig_feats = []
|
|
394
|
+
for feat in self.draw.h5["events"].keys():
|
|
395
|
+
if isinstance(self.draw.h5["events"][feat], h5py.Dataset):
|
|
396
|
+
# copy_features does not support Groups
|
|
397
|
+
orig_feats.append(feat)
|
|
398
|
+
with h5py.File(self.path_temp_out, "a") as h5_dst:
|
|
399
|
+
copy_features(h5_src=self.draw.h5,
|
|
400
|
+
h5_dst=h5_dst,
|
|
401
|
+
features=orig_feats,
|
|
402
|
+
mapping=None)
|
|
374
403
|
|
|
375
404
|
with HDF5Writer(self.path_temp_out) as hw:
|
|
376
405
|
# pipeline metadata
|
|
@@ -382,6 +411,10 @@ class DCNumJobRunner(threading.Thread):
|
|
|
382
411
|
hw.h5.attrs["pipeline:dcnum gate"] = self.ppdict["gate_id"]
|
|
383
412
|
hw.h5.attrs["pipeline:dcnum hash"] = self.pphash
|
|
384
413
|
hw.h5.attrs["pipeline:dcnum yield"] = self.event_count
|
|
414
|
+
# index mapping information
|
|
415
|
+
im = self.job.kwargs["data_kwargs"].get("index_mapping", None)
|
|
416
|
+
dim = HDF5Data.get_ppid_index_mapping(im)
|
|
417
|
+
hw.h5.attrs["pipeline:dcnum mapping"] = dim
|
|
385
418
|
# regular metadata
|
|
386
419
|
hw.h5.attrs["experiment:event count"] = self.event_count
|
|
387
420
|
hw.h5.attrs["imaging:pixel size"] = self.draw.pixel_size
|
|
@@ -419,7 +452,8 @@ class DCNumJobRunner(threading.Thread):
|
|
|
419
452
|
with h5py.File(self.job["path_in"]) as h5_src:
|
|
420
453
|
copy_metadata(h5_src=h5_src,
|
|
421
454
|
h5_dst=hw.h5,
|
|
422
|
-
#
|
|
455
|
+
# Don't copy basins, we would have to index-map
|
|
456
|
+
# them first.
|
|
423
457
|
copy_basins=False)
|
|
424
458
|
if redo_seg:
|
|
425
459
|
# Store the correct measurement identifier. This is used to
|
|
@@ -429,13 +463,27 @@ class DCNumJobRunner(threading.Thread):
|
|
|
429
463
|
# This is the identifier appendix that we use to identify this
|
|
430
464
|
# dataset. Note that we only override the run identifier when
|
|
431
465
|
# segmentation did actually take place.
|
|
432
|
-
mid_ap = "dcn-
|
|
433
|
-
# This is the current measurement identifier
|
|
434
|
-
mid_cur = hw.h5.attrs.get("experiment:run identifier"
|
|
466
|
+
mid_ap = f"dcn-{self.pphash[:7]}"
|
|
467
|
+
# This is the current measurement identifier
|
|
468
|
+
mid_cur = hw.h5.attrs.get("experiment:run identifier")
|
|
469
|
+
if not mid_cur:
|
|
470
|
+
# Compute a measurement identifier from the metadata
|
|
471
|
+
m_time = hw.h5.attrs.get("experiment:time", "none")
|
|
472
|
+
m_date = hw.h5.attrs.get("experiment:date", "none")
|
|
473
|
+
m_sid = hw.h5.attrs.get("setup:identifier", "none")
|
|
474
|
+
hasher = hashlib.md5(
|
|
475
|
+
f"{m_time}_{m_date}_{m_sid}".encode("utf-8"))
|
|
476
|
+
mid_cur = str(uuid.UUID(hex=hasher.hexdigest()))
|
|
435
477
|
# The new measurement identifier is a combination of both.
|
|
436
478
|
mid_new = f"{mid_cur}_{mid_ap}" if mid_cur else mid_ap
|
|
437
479
|
hw.h5.attrs["experiment:run identifier"] = mid_new
|
|
438
480
|
|
|
481
|
+
# Handle basin data according to the user's request
|
|
482
|
+
self.state = "plumbing"
|
|
483
|
+
self.task_enforce_basin_strategy()
|
|
484
|
+
|
|
485
|
+
self.state = "cleanup"
|
|
486
|
+
|
|
439
487
|
trun = datetime.timedelta(seconds=round(time.monotonic() - time_start))
|
|
440
488
|
self.logger.info(f"Run duration: {str(trun)}")
|
|
441
489
|
self.logger.info(time.strftime("Run stop: %Y-%m-%d-%H.%M.%S",
|
|
@@ -477,6 +525,115 @@ class DCNumJobRunner(threading.Thread):
|
|
|
477
525
|
bic.process()
|
|
478
526
|
self.logger.info("Finished background computation")
|
|
479
527
|
|
|
528
|
+
def task_enforce_basin_strategy(self):
|
|
529
|
+
"""Transfer basin data from input files to output if requested
|
|
530
|
+
|
|
531
|
+
The user specified the "basin_strategy" keyword argument in
|
|
532
|
+
`self.job`. If this is set to "drain", then copy all basin
|
|
533
|
+
information from the input file to the output file. If it
|
|
534
|
+
is set to "tap", then only create basins in the output file.
|
|
535
|
+
"""
|
|
536
|
+
self._progress_bn = 0
|
|
537
|
+
t0 = time.perf_counter()
|
|
538
|
+
# We need to make sure that the features are correctly attributed
|
|
539
|
+
# from the input files. E.g. if the input file already has
|
|
540
|
+
# background images, but we recompute the background images, then
|
|
541
|
+
# we have to use the data from the recomputed background file.
|
|
542
|
+
# We achieve this by keeping a specific order and only copying those
|
|
543
|
+
# features that we don't already have in the output file.
|
|
544
|
+
feats_raw = [
|
|
545
|
+
# 1. background data from the temporary input image
|
|
546
|
+
# (this must come before draw [sic!])
|
|
547
|
+
[self.dtin.h5, ["image_bg", "bg_off"], "critical"],
|
|
548
|
+
# 2. frame-based scalar features from the raw input file
|
|
549
|
+
# (e.g. "temp" or "frame")
|
|
550
|
+
[self.draw.h5, self.draw.features_scalar_frame, "optional"],
|
|
551
|
+
# 3. image features from the input file
|
|
552
|
+
[self.draw.h5, ["image", "image_bg", "bg_off"], "optional"],
|
|
553
|
+
]
|
|
554
|
+
with h5py.File(self.path_temp_out, "a") as hout:
|
|
555
|
+
hw = HDF5Writer(hout)
|
|
556
|
+
# First, we have to determine the basin mapping from input to
|
|
557
|
+
# output. This information is stored by the QueueCollectorThread
|
|
558
|
+
# in the "basinmap0" feature, ready to be used by us.
|
|
559
|
+
if "index_unmapped" in hout["events"]:
|
|
560
|
+
# The unmapped indices enumerate the events in the output file
|
|
561
|
+
# with indices from the mapped input file. E.g. if for the
|
|
562
|
+
# first image in the input file, two events are found and for
|
|
563
|
+
# the second image in the input file, three events are found,
|
|
564
|
+
# then this would contain [0, 0, 1, 1, 1, ...]. If the index
|
|
565
|
+
# mapping of the input file was set to slice(1, 100), then the
|
|
566
|
+
# first image would not be there, and we would have
|
|
567
|
+
# [1, 1, 1, ...].
|
|
568
|
+
idx_um = hout["events/index_unmapped"]
|
|
569
|
+
|
|
570
|
+
# If we want to convert this to an actual basinmap feature,
|
|
571
|
+
# then we have to convert those indices to indices that map
|
|
572
|
+
# to the original input HDF5 file.
|
|
573
|
+
raw_im = self.draw.index_mapping
|
|
574
|
+
if raw_im is None:
|
|
575
|
+
self.logger.info("Input file mapped with basinmap0")
|
|
576
|
+
# Create a hard link to save time and space
|
|
577
|
+
hout["events/basinmap0"] = hout["events/index_unmapped"]
|
|
578
|
+
basinmap = idx_um
|
|
579
|
+
else:
|
|
580
|
+
basinmap = get_mapping_indices(raw_im)[idx_um]
|
|
581
|
+
# Store the mapped basin data in the output file.
|
|
582
|
+
hw.store_feature_chunk("basinmap0", basinmap)
|
|
583
|
+
# We don't need them anymore.
|
|
584
|
+
del hout["events/index_unmapped"]
|
|
585
|
+
|
|
586
|
+
# Note that `size_raw != (len(self.draw))` [sic!]. The former
|
|
587
|
+
# is the size of the raw dataset and the latter is its mapped
|
|
588
|
+
# size!
|
|
589
|
+
size_raw = self.draw.h5.attrs["experiment:event count"]
|
|
590
|
+
if (len(basinmap) == size_raw
|
|
591
|
+
and np.all(basinmap == np.arange(size_raw))):
|
|
592
|
+
# This means that the images in the input overlap perfectly
|
|
593
|
+
# with the images in the output, i.e. a "copy" segmenter
|
|
594
|
+
# was used or something is very reproducible.
|
|
595
|
+
# We set basinmap to None to be more efficient.
|
|
596
|
+
basinmap = None
|
|
597
|
+
|
|
598
|
+
else:
|
|
599
|
+
# The input is identical to the output, because we are using
|
|
600
|
+
# the same pipeline identifier.
|
|
601
|
+
basinmap = None
|
|
602
|
+
|
|
603
|
+
for hin, feats, importance in feats_raw:
|
|
604
|
+
# Only consider features that are available in the input
|
|
605
|
+
# and that are not already in the output.
|
|
606
|
+
feats = [f for f in feats
|
|
607
|
+
if (f in hin["events"] and f not in hout["events"])]
|
|
608
|
+
if not feats:
|
|
609
|
+
continue
|
|
610
|
+
elif (self.job["basin_strategy"] == "drain"
|
|
611
|
+
or importance == "critical"):
|
|
612
|
+
# DRAIN: Copy all features over to the output file.
|
|
613
|
+
self.logger.debug(f"Transferring {feats} to output file")
|
|
614
|
+
copy_features(h5_src=hin,
|
|
615
|
+
h5_dst=hout,
|
|
616
|
+
features=feats,
|
|
617
|
+
mapping=basinmap)
|
|
618
|
+
else:
|
|
619
|
+
# TAP: Create basins for the "optional" features in the
|
|
620
|
+
# output file. Note that the "critical" features never
|
|
621
|
+
# reach this case.
|
|
622
|
+
self.logger.debug(f"Creating basin for {feats}")
|
|
623
|
+
# Relative and absolute paths.
|
|
624
|
+
pin = pathlib.Path(hin.filename).resolve()
|
|
625
|
+
pout = pathlib.Path(hout.filename).resolve().parent
|
|
626
|
+
paths = [pin, os.path.relpath(pin, pout)]
|
|
627
|
+
hw.store_basin(name="dcnum basin",
|
|
628
|
+
features=feats,
|
|
629
|
+
mapping=basinmap,
|
|
630
|
+
paths=paths,
|
|
631
|
+
description=f"Created with dcnum {version}",
|
|
632
|
+
)
|
|
633
|
+
self._progress_bn += 1 / len(feats_raw)
|
|
634
|
+
t_tot = time.perf_counter() - t0
|
|
635
|
+
self.logger.info(f"Enforcing basin strategy time: {t_tot:.1f}s")
|
|
636
|
+
|
|
480
637
|
def task_segment_extract(self):
|
|
481
638
|
self.logger.info("Starting segmentation and feature extraction")
|
|
482
639
|
# Start writer thread
|
|
@@ -501,9 +658,9 @@ class DCNumJobRunner(threading.Thread):
|
|
|
501
658
|
num_slots = 1
|
|
502
659
|
num_extractors = 1
|
|
503
660
|
num_segmenters = 1
|
|
504
|
-
elif seg_cls.hardware_processor == "cpu": #
|
|
661
|
+
elif seg_cls.hardware_processor == "cpu": # MPO segmenter
|
|
505
662
|
# We could in principle set the number of slots to one and
|
|
506
|
-
#
|
|
663
|
+
# have both number of extractors and number of segmenters set
|
|
507
664
|
# to the total number of CPUs. However, we would need more RAM
|
|
508
665
|
# (for caching the image data) and we also have more overhead.
|
|
509
666
|
# Having two slots shared between all workers is more efficient.
|
|
@@ -511,24 +668,32 @@ class DCNumJobRunner(threading.Thread):
|
|
|
511
668
|
# Split segmentation and feature extraction workers evenly.
|
|
512
669
|
num_extractors = self.job["num_procs"] // 2
|
|
513
670
|
num_segmenters = self.job["num_procs"] - num_extractors
|
|
671
|
+
# leave one CPU for the writer and the remaining Threads
|
|
672
|
+
num_segmenters -= 1
|
|
514
673
|
else: # GPU segmenter
|
|
515
674
|
num_slots = 3
|
|
516
675
|
num_extractors = self.job["num_procs"]
|
|
676
|
+
# leave one CPU for the writer and the remaining Threads
|
|
677
|
+
num_extractors -= 1
|
|
517
678
|
num_segmenters = 1
|
|
518
679
|
num_extractors = max(1, num_extractors)
|
|
519
680
|
num_segmenters = max(1, num_segmenters)
|
|
520
681
|
self.job.kwargs["segmenter_kwargs"]["num_workers"] = num_segmenters
|
|
682
|
+
self.job.kwargs["segmenter_kwargs"]["debug"] = self.job["debug"]
|
|
683
|
+
slot_chunks = mp_spawn.Array("i", num_slots, lock=False)
|
|
684
|
+
slot_states = mp_spawn.Array("u", num_slots, lock=False)
|
|
521
685
|
|
|
522
|
-
|
|
523
|
-
|
|
686
|
+
self.logger.debug(f"Number of slots: {num_slots}")
|
|
687
|
+
self.logger.debug(f"Number of segmenters: {num_segmenters}")
|
|
688
|
+
self.logger.debug(f"Number of extractors: {num_extractors}")
|
|
524
689
|
|
|
525
|
-
# Initialize thread
|
|
690
|
+
# Initialize segmenter manager thread
|
|
526
691
|
thr_segm = SegmenterManagerThread(
|
|
527
692
|
segmenter=seg_cls(**self.job["segmenter_kwargs"]),
|
|
528
693
|
image_data=imdat,
|
|
694
|
+
bg_off=self.dtin["bg_off"] if "bg_off" in self.dtin else None,
|
|
529
695
|
slot_states=slot_states,
|
|
530
696
|
slot_chunks=slot_chunks,
|
|
531
|
-
debug=self.job["debug"],
|
|
532
697
|
)
|
|
533
698
|
thr_segm.start()
|
|
534
699
|
|
|
@@ -538,7 +703,7 @@ class DCNumJobRunner(threading.Thread):
|
|
|
538
703
|
gate=gate.Gate(self.dtin, **self.job["gate_kwargs"]),
|
|
539
704
|
num_extractors=num_extractors,
|
|
540
705
|
log_queue=self.log_queue,
|
|
541
|
-
log_level=
|
|
706
|
+
log_level=self.logger.level,
|
|
542
707
|
)
|
|
543
708
|
fe_kwargs["extract_kwargs"] = self.job["feature_kwargs"]
|
|
544
709
|
|
|
@@ -614,21 +779,6 @@ class DCNumJobRunner(threading.Thread):
|
|
|
614
779
|
|
|
615
780
|
self.logger.info("Finished segmentation and feature extraction")
|
|
616
781
|
|
|
617
|
-
def task_transfer_basin_data(self):
|
|
618
|
-
with h5py.File(self.path_temp_out, "a") as hout:
|
|
619
|
-
hd = HDF5Data(hout)
|
|
620
|
-
for ii, _ in enumerate(hd.basins):
|
|
621
|
-
hindat, features = hd.get_basin_data(ii)
|
|
622
|
-
for feat in features:
|
|
623
|
-
if feat not in hout["events"]:
|
|
624
|
-
self.logger.debug(
|
|
625
|
-
f"Transferring {feat} to output file")
|
|
626
|
-
h5py.h5o.copy(src_loc=hindat.h5["events"].id,
|
|
627
|
-
src_name=feat.encode(),
|
|
628
|
-
dst_loc=hout["events"].id,
|
|
629
|
-
dst_name=feat.encode(),
|
|
630
|
-
)
|
|
631
|
-
|
|
632
782
|
|
|
633
783
|
def join_thread_helper(thr, timeout, retries, logger, name):
|
|
634
784
|
for _ in range(retries):
|
dcnum/logic/job.py
CHANGED
|
@@ -1,9 +1,11 @@
|
|
|
1
1
|
import collections
|
|
2
2
|
import copy
|
|
3
3
|
import inspect
|
|
4
|
+
import logging
|
|
4
5
|
import multiprocessing as mp
|
|
5
6
|
import pathlib
|
|
6
|
-
from typing import Dict
|
|
7
|
+
from typing import Dict, Literal
|
|
8
|
+
import warnings
|
|
7
9
|
|
|
8
10
|
from ..feat import QueueEventExtractor
|
|
9
11
|
from ..feat.feat_background.base import get_available_background_methods
|
|
@@ -27,10 +29,66 @@ class DCNumPipelineJob:
|
|
|
27
29
|
feature_kwargs: Dict = None,
|
|
28
30
|
gate_code: str = "norm",
|
|
29
31
|
gate_kwargs: Dict = None,
|
|
30
|
-
|
|
32
|
+
basin_strategy: Literal["drain", "tap"] = "drain",
|
|
33
|
+
no_basins_in_output: bool = None,
|
|
31
34
|
num_procs: int = None,
|
|
35
|
+
log_level: int = logging.INFO,
|
|
32
36
|
debug: bool = False,
|
|
33
37
|
):
|
|
38
|
+
"""Pipeline job recipe
|
|
39
|
+
|
|
40
|
+
Parameters
|
|
41
|
+
----------
|
|
42
|
+
path_in: pathlib.Path | str
|
|
43
|
+
input data path
|
|
44
|
+
path_out: pathlib.Path | str
|
|
45
|
+
output data path
|
|
46
|
+
data_code: str
|
|
47
|
+
code of input data reader to use
|
|
48
|
+
data_kwargs: dict
|
|
49
|
+
keyword arguments for data reader
|
|
50
|
+
background_code: str
|
|
51
|
+
code of background data computer to use
|
|
52
|
+
background_kwargs: dict
|
|
53
|
+
keyword arguments for background data computer
|
|
54
|
+
segmenter_code: str
|
|
55
|
+
code of segmenter to use
|
|
56
|
+
segmenter_kwargs: dict
|
|
57
|
+
keyword arguments for segmenter
|
|
58
|
+
feature_code: str
|
|
59
|
+
code of feature extractor
|
|
60
|
+
feature_kwargs: dict
|
|
61
|
+
keyword arguments for feature extractor
|
|
62
|
+
gate_code: str
|
|
63
|
+
code for gating/event filtering class
|
|
64
|
+
gate_kwargs: dict
|
|
65
|
+
keyword arguments for gating/event filtering class
|
|
66
|
+
basin_strategy: str
|
|
67
|
+
strategy on how to handle event data; In principle, not all
|
|
68
|
+
events have to be stored in the output file if basins are
|
|
69
|
+
defined, linking back to the original file.
|
|
70
|
+
- You can "drain" all basins which means that the output file
|
|
71
|
+
will contain all features, but will also be very big.
|
|
72
|
+
- You can "tap" the basins, including the input file, which means
|
|
73
|
+
that the output file will be comparatively small.
|
|
74
|
+
no_basins_in_output: bool
|
|
75
|
+
Deprecated
|
|
76
|
+
num_procs: int
|
|
77
|
+
Number of processes to use
|
|
78
|
+
log_level: int
|
|
79
|
+
Logging level to use.
|
|
80
|
+
debug: bool
|
|
81
|
+
Whether to set logging level to "DEBUG" and
|
|
82
|
+
use threads instead of processes
|
|
83
|
+
"""
|
|
84
|
+
if no_basins_in_output is not None:
|
|
85
|
+
warnings.warn("The `no_basins_in_output` keyword argument is "
|
|
86
|
+
"deprecated. Please use `basin_strategy` instead.")
|
|
87
|
+
if no_basins_in_output:
|
|
88
|
+
basin_strategy = "drain"
|
|
89
|
+
else:
|
|
90
|
+
basin_strategy = "tap"
|
|
91
|
+
|
|
34
92
|
#: initialize keyword arguments for this job
|
|
35
93
|
self.kwargs = {}
|
|
36
94
|
spec = inspect.getfullargspec(DCNumPipelineJob.__init__)
|
|
@@ -51,6 +109,9 @@ class DCNumPipelineJob:
|
|
|
51
109
|
if path_out is None:
|
|
52
110
|
pin = pathlib.Path(path_in)
|
|
53
111
|
path_out = pin.with_name(pin.stem + "_dcn.rtdc")
|
|
112
|
+
# Set logging level to DEBUG in debugging mode
|
|
113
|
+
if self.kwargs["debug"]:
|
|
114
|
+
self.kwargs["log_level"] = logging.DEBUG
|
|
54
115
|
self.kwargs["path_out"] = pathlib.Path(path_out)
|
|
55
116
|
# Set default mask kwargs for segmenter
|
|
56
117
|
self.kwargs["segmenter_kwargs"].setdefault("kwargs_mask", {})
|
dcnum/logic/json_encoder.py
CHANGED
|
@@ -13,5 +13,7 @@ class ExtendedJSONEncoder(json.JSONEncoder):
|
|
|
13
13
|
return int(obj)
|
|
14
14
|
elif isinstance(obj, np.bool_):
|
|
15
15
|
return bool(obj)
|
|
16
|
+
elif isinstance(obj, slice):
|
|
17
|
+
return "PYTHON-SLICE", (obj.start, obj.stop, obj.step)
|
|
16
18
|
# Let the base class default method raise the TypeError
|
|
17
19
|
return json.JSONEncoder.default(self, obj)
|
dcnum/meta/ppid.py
CHANGED
|
@@ -10,7 +10,7 @@ import warnings
|
|
|
10
10
|
|
|
11
11
|
#: Increment this string if there are breaking changes that make
|
|
12
12
|
#: previous pipelines unreproducible.
|
|
13
|
-
DCNUM_PPID_GENERATION = "
|
|
13
|
+
DCNUM_PPID_GENERATION = "10"
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
class ClassWithPPIDCapabilities(Protocol):
|
|
@@ -59,7 +59,9 @@ def convert_to_dtype(value, dtype):
|
|
|
59
59
|
|
|
60
60
|
|
|
61
61
|
def get_class_method_info(class_obj: ClassWithPPIDCapabilities,
|
|
62
|
-
static_kw_methods: List = None
|
|
62
|
+
static_kw_methods: List = None,
|
|
63
|
+
static_kw_defaults: Dict = None,
|
|
64
|
+
):
|
|
63
65
|
"""Return dictionary of class info with static keyword methods docs
|
|
64
66
|
|
|
65
67
|
Parameters
|
|
@@ -69,7 +71,16 @@ def get_class_method_info(class_obj: ClassWithPPIDCapabilities,
|
|
|
69
71
|
static_kw_methods: list of callable
|
|
70
72
|
The methods to inspect; all kwargs-only keyword arguments
|
|
71
73
|
are extracted.
|
|
74
|
+
static_kw_defaults: dict
|
|
75
|
+
If a key in this dictionary matches an item in `static_kw_methods`,
|
|
76
|
+
then these are the default values returned in the "defaults"
|
|
77
|
+
dictionary. This is used in cases where a base class does
|
|
78
|
+
implement some annotations, but the subclass does not actually
|
|
79
|
+
use them, because e.g. they are taken from a property such as is
|
|
80
|
+
the case for the mask postprocessing of segmenter classes.
|
|
72
81
|
"""
|
|
82
|
+
if static_kw_defaults is None:
|
|
83
|
+
static_kw_defaults = {}
|
|
73
84
|
doc = class_obj.__doc__ or class_obj.__init__.__doc__
|
|
74
85
|
info = {
|
|
75
86
|
"code": class_obj.get_ppid_code(),
|
|
@@ -82,7 +93,10 @@ def get_class_method_info(class_obj: ClassWithPPIDCapabilities,
|
|
|
82
93
|
for mm in static_kw_methods:
|
|
83
94
|
meth = getattr(class_obj, mm)
|
|
84
95
|
spec = inspect.getfullargspec(meth)
|
|
85
|
-
|
|
96
|
+
if mm_defaults := static_kw_defaults.get(mm):
|
|
97
|
+
defau[mm] = mm_defaults
|
|
98
|
+
else:
|
|
99
|
+
defau[mm] = spec.kwonlydefaults or {}
|
|
86
100
|
annot[mm] = spec.annotations
|
|
87
101
|
info["defaults"] = defau
|
|
88
102
|
info["annotations"] = annot
|
dcnum/read/__init__.py
CHANGED