dcnum 0.20.3__py3-none-any.whl → 0.21.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dcnum might be problematic. Click here for more details.
- dcnum/_version.py +2 -2
- dcnum/feat/event_extractor_manager_thread.py +27 -20
- dcnum/feat/queue_event_extractor.py +4 -4
- dcnum/logic/ctrl.py +13 -6
- dcnum/logic/job.py +9 -1
- dcnum/meta/ppid.py +1 -1
- dcnum/segm/segmenter.py +2 -2
- dcnum/segm/segmenter_cpu.py +2 -0
- dcnum/segm/segmenter_gpu.py +0 -1
- dcnum/segm/segmenter_manager_thread.py +25 -27
- {dcnum-0.20.3.dist-info → dcnum-0.21.0.dist-info}/METADATA +1 -1
- {dcnum-0.20.3.dist-info → dcnum-0.21.0.dist-info}/RECORD +15 -15
- {dcnum-0.20.3.dist-info → dcnum-0.21.0.dist-info}/LICENSE +0 -0
- {dcnum-0.20.3.dist-info → dcnum-0.21.0.dist-info}/WHEEL +0 -0
- {dcnum-0.20.3.dist-info → dcnum-0.21.0.dist-info}/top_level.txt +0 -0
dcnum/_version.py
CHANGED
|
@@ -46,8 +46,8 @@ class EventExtractorManagerThread(threading.Thread):
|
|
|
46
46
|
The queue the writer uses. We monitor this queue. If it
|
|
47
47
|
fills up, we take a break.
|
|
48
48
|
debug:
|
|
49
|
-
Whether to run in debugging mode which means
|
|
50
|
-
|
|
49
|
+
Whether to run in debugging mode which means only one
|
|
50
|
+
event extraction thread (`num_workers` has no effect).
|
|
51
51
|
"""
|
|
52
52
|
super(EventExtractorManagerThread, self).__init__(
|
|
53
53
|
name="EventExtractorManager", *args, **kwargs)
|
|
@@ -104,25 +104,32 @@ class EventExtractorManagerThread(threading.Thread):
|
|
|
104
104
|
f"Stalled {stall_time + 1:.1f}s for slow writer "
|
|
105
105
|
f"({ldq} chunks queued)")
|
|
106
106
|
|
|
107
|
-
cur_slot = 0
|
|
108
107
|
unavailable_slots = 0
|
|
108
|
+
found_free_slot = False
|
|
109
109
|
# Check all slots for segmented labels
|
|
110
|
-
while
|
|
111
|
-
#
|
|
112
|
-
#
|
|
113
|
-
#
|
|
114
|
-
#
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
#
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
110
|
+
while not found_free_slot:
|
|
111
|
+
# We sort the slots according to the slot chunks so that we
|
|
112
|
+
# always process the slot with the smallest slot chunk number
|
|
113
|
+
# first. Initially, the slot_chunks array is filled with
|
|
114
|
+
# zeros, but the segmenter fills up the slots with the lowest
|
|
115
|
+
# number first.
|
|
116
|
+
for cur_slot in np.argsort(self.slot_chunks):
|
|
117
|
+
# - "e" there is data from the segmenter (the extractor
|
|
118
|
+
# can take it and process it)
|
|
119
|
+
# - "s" the extractor processed the data and is waiting
|
|
120
|
+
# for the segmenter
|
|
121
|
+
if self.slot_states[cur_slot] == "e":
|
|
122
|
+
# The segmenter has something for us in this slot.
|
|
123
|
+
found_free_slot = True
|
|
124
|
+
break
|
|
125
|
+
else:
|
|
126
|
+
# Try another slot.
|
|
127
|
+
unavailable_slots += 1
|
|
128
|
+
cur_slot = (cur_slot + 1) % num_slots
|
|
129
|
+
if unavailable_slots >= num_slots:
|
|
130
|
+
# There is nothing to do, try to avoid 100% CPU
|
|
131
|
+
unavailable_slots = 0
|
|
132
|
+
time.sleep(.1)
|
|
126
133
|
|
|
127
134
|
t1 = time.monotonic()
|
|
128
135
|
|
|
@@ -149,7 +156,7 @@ class EventExtractorManagerThread(threading.Thread):
|
|
|
149
156
|
# We are done here. The segmenter may continue its deed.
|
|
150
157
|
self.slot_states[cur_slot] = "w"
|
|
151
158
|
|
|
152
|
-
self.logger.debug(f"Extracted
|
|
159
|
+
self.logger.debug(f"Extracted chunk {chunk} in slot {cur_slot}")
|
|
153
160
|
self.t_count += time.monotonic() - t1
|
|
154
161
|
|
|
155
162
|
chunks_processed += 1
|
|
@@ -36,7 +36,7 @@ class QueueEventExtractor:
|
|
|
36
36
|
finalize_extraction: mp.Value,
|
|
37
37
|
invalid_mask_counter: mp.Value,
|
|
38
38
|
worker_monitor: mp.RawArray,
|
|
39
|
-
log_level: int =
|
|
39
|
+
log_level: int = None,
|
|
40
40
|
extract_kwargs: dict = None,
|
|
41
41
|
worker_index: int = None,
|
|
42
42
|
*args, **kwargs):
|
|
@@ -103,7 +103,7 @@ class QueueEventExtractor:
|
|
|
103
103
|
# it looks like we have the same PID as the parent process. We
|
|
104
104
|
# are setting up logging in `run`.
|
|
105
105
|
self.logger = None
|
|
106
|
-
self.log_level = log_level
|
|
106
|
+
self.log_level = log_level or logging.getLogger("dcnum").level
|
|
107
107
|
#: Shared array of length `len(data)` into which the number of
|
|
108
108
|
#: events per frame is written.
|
|
109
109
|
self.feat_nevents = feat_nevents
|
|
@@ -124,7 +124,7 @@ class QueueEventExtractor:
|
|
|
124
124
|
gate: Gate,
|
|
125
125
|
num_extractors: int,
|
|
126
126
|
log_queue: mp.Queue,
|
|
127
|
-
log_level: int =
|
|
127
|
+
log_level: int = None,
|
|
128
128
|
):
|
|
129
129
|
"""Get initialization arguments for :cass:`.QueueEventExtractor`
|
|
130
130
|
|
|
@@ -172,7 +172,7 @@ class QueueEventExtractor:
|
|
|
172
172
|
args["finalize_extraction"] = mp_spawn.Value("b", False)
|
|
173
173
|
args["invalid_mask_counter"] = mp_spawn.Value("L", 0)
|
|
174
174
|
args["worker_monitor"] = mp_spawn.RawArray("L", num_extractors)
|
|
175
|
-
args["log_level"] = log_level
|
|
175
|
+
args["log_level"] = log_level or logging.getLogger("dcnum").level
|
|
176
176
|
return args
|
|
177
177
|
|
|
178
178
|
def get_events_from_masks(self, masks, data_index, *,
|
dcnum/logic/ctrl.py
CHANGED
|
@@ -90,8 +90,7 @@ class DCNumJobRunner(threading.Thread):
|
|
|
90
90
|
# Set up logging
|
|
91
91
|
# General logger for this job
|
|
92
92
|
self.main_logger = logging.getLogger("dcnum")
|
|
93
|
-
self.main_logger.setLevel(
|
|
94
|
-
logging.DEBUG if job["debug"] else logging.INFO)
|
|
93
|
+
self.main_logger.setLevel(job["log_level"])
|
|
95
94
|
# Log file output in target directory
|
|
96
95
|
self.path_log = job["path_out"].with_suffix(".log")
|
|
97
96
|
self.path_log.parent.mkdir(exist_ok=True, parents=True)
|
|
@@ -659,15 +658,24 @@ class DCNumJobRunner(threading.Thread):
|
|
|
659
658
|
# Split segmentation and feature extraction workers evenly.
|
|
660
659
|
num_extractors = self.job["num_procs"] // 2
|
|
661
660
|
num_segmenters = self.job["num_procs"] - num_extractors
|
|
661
|
+
# leave one CPU for the writer and the remaining Threads
|
|
662
|
+
num_segmenters -= 1
|
|
662
663
|
else: # GPU segmenter
|
|
663
664
|
num_slots = 3
|
|
664
665
|
num_extractors = self.job["num_procs"]
|
|
666
|
+
# leave one CPU for the writer and the remaining Threads
|
|
667
|
+
num_extractors -= 1
|
|
665
668
|
num_segmenters = 1
|
|
666
669
|
num_extractors = max(1, num_extractors)
|
|
667
670
|
num_segmenters = max(1, num_segmenters)
|
|
668
671
|
self.job.kwargs["segmenter_kwargs"]["num_workers"] = num_segmenters
|
|
669
|
-
|
|
670
|
-
|
|
672
|
+
self.job.kwargs["segmenter_kwargs"]["debug"] = self.job["debug"]
|
|
673
|
+
slot_chunks = mp_spawn.Array("i", num_slots, lock=False)
|
|
674
|
+
slot_states = mp_spawn.Array("u", num_slots, lock=False)
|
|
675
|
+
|
|
676
|
+
self.logger.debug(f"Number of slots: {num_slots}")
|
|
677
|
+
self.logger.debug(f"Number of segmenters: {num_segmenters}")
|
|
678
|
+
self.logger.debug(f"Number of extractors: {num_extractors}")
|
|
671
679
|
|
|
672
680
|
# Initialize segmenter manager thread
|
|
673
681
|
thr_segm = SegmenterManagerThread(
|
|
@@ -676,7 +684,6 @@ class DCNumJobRunner(threading.Thread):
|
|
|
676
684
|
bg_off=self.dtin["bg_off"] if "bg_off" in self.dtin else None,
|
|
677
685
|
slot_states=slot_states,
|
|
678
686
|
slot_chunks=slot_chunks,
|
|
679
|
-
debug=self.job["debug"],
|
|
680
687
|
)
|
|
681
688
|
thr_segm.start()
|
|
682
689
|
|
|
@@ -686,7 +693,7 @@ class DCNumJobRunner(threading.Thread):
|
|
|
686
693
|
gate=gate.Gate(self.dtin, **self.job["gate_kwargs"]),
|
|
687
694
|
num_extractors=num_extractors,
|
|
688
695
|
log_queue=self.log_queue,
|
|
689
|
-
log_level=
|
|
696
|
+
log_level=self.logger.level,
|
|
690
697
|
)
|
|
691
698
|
fe_kwargs["extract_kwargs"] = self.job["feature_kwargs"]
|
|
692
699
|
|
dcnum/logic/job.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import collections
|
|
2
2
|
import copy
|
|
3
3
|
import inspect
|
|
4
|
+
import logging
|
|
4
5
|
import multiprocessing as mp
|
|
5
6
|
import pathlib
|
|
6
7
|
from typing import Dict, Literal
|
|
@@ -31,6 +32,7 @@ class DCNumPipelineJob:
|
|
|
31
32
|
basin_strategy: Literal["drain", "tap"] = "drain",
|
|
32
33
|
no_basins_in_output: bool = None,
|
|
33
34
|
num_procs: int = None,
|
|
35
|
+
log_level: int = logging.INFO,
|
|
34
36
|
debug: bool = False,
|
|
35
37
|
):
|
|
36
38
|
"""Pipeline job recipe
|
|
@@ -73,8 +75,11 @@ class DCNumPipelineJob:
|
|
|
73
75
|
Deprecated
|
|
74
76
|
num_procs: int
|
|
75
77
|
Number of processes to use
|
|
78
|
+
log_level: int
|
|
79
|
+
Logging level to use.
|
|
76
80
|
debug: bool
|
|
77
|
-
Whether to
|
|
81
|
+
Whether to set logging level to "DEBUG" and
|
|
82
|
+
use threads instead of processes
|
|
78
83
|
"""
|
|
79
84
|
if no_basins_in_output is not None:
|
|
80
85
|
warnings.warn("The `no_basins_in_output` keyword argument is "
|
|
@@ -104,6 +109,9 @@ class DCNumPipelineJob:
|
|
|
104
109
|
if path_out is None:
|
|
105
110
|
pin = pathlib.Path(path_in)
|
|
106
111
|
path_out = pin.with_name(pin.stem + "_dcn.rtdc")
|
|
112
|
+
# Set logging level to DEBUG in debugging mode
|
|
113
|
+
if self.kwargs["debug"]:
|
|
114
|
+
self.kwargs["log_level"] = logging.DEBUG
|
|
107
115
|
self.kwargs["path_out"] = pathlib.Path(path_out)
|
|
108
116
|
# Set default mask kwargs for segmenter
|
|
109
117
|
self.kwargs["segmenter_kwargs"].setdefault("kwargs_mask", {})
|
dcnum/meta/ppid.py
CHANGED
dcnum/segm/segmenter.py
CHANGED
|
@@ -19,7 +19,7 @@ class Segmenter(abc.ABC):
|
|
|
19
19
|
#: Whether to enable mask post-processing. If disabled, you should
|
|
20
20
|
#: make sure that your mask is properly defined and cleaned or you
|
|
21
21
|
#: have to call `process_mask` in your `segment_approach` implementation.
|
|
22
|
-
mask_postprocessing =
|
|
22
|
+
mask_postprocessing = True
|
|
23
23
|
#: Default keyword arguments for mask post-processing. See `process_mask`
|
|
24
24
|
#: for available options.
|
|
25
25
|
mask_default_kwargs = {}
|
|
@@ -38,7 +38,7 @@ class Segmenter(abc.ABC):
|
|
|
38
38
|
kwargs_mask: dict
|
|
39
39
|
Keyword arguments for mask post-processing (see `process_mask`)
|
|
40
40
|
debug: bool
|
|
41
|
-
|
|
41
|
+
Enable debugging mode (e.g. CPU segmenter runs in one thread)
|
|
42
42
|
kwargs:
|
|
43
43
|
Additional, optional keyword arguments for `segment_batch`.
|
|
44
44
|
"""
|
dcnum/segm/segmenter_cpu.py
CHANGED
|
@@ -189,9 +189,11 @@ class CPUSegmenter(Segmenter, abc.ABC):
|
|
|
189
189
|
if self.debug:
|
|
190
190
|
worker_cls = CPUSegmenterWorkerThread
|
|
191
191
|
num_workers = 1
|
|
192
|
+
self.logger.debug("Running with one worker in main thread")
|
|
192
193
|
else:
|
|
193
194
|
worker_cls = CPUSegmenterWorkerProcess
|
|
194
195
|
num_workers = min(self.num_workers, image_data.shape[0])
|
|
196
|
+
self.logger.debug(f"Running with {num_workers} workers")
|
|
195
197
|
|
|
196
198
|
if not self._mp_workers:
|
|
197
199
|
step_size = batch_size // num_workers
|
dcnum/segm/segmenter_gpu.py
CHANGED
|
@@ -18,7 +18,6 @@ class SegmenterManagerThread(threading.Thread):
|
|
|
18
18
|
slot_states: mp.Array,
|
|
19
19
|
slot_chunks: mp.Array,
|
|
20
20
|
bg_off: np.ndarray = None,
|
|
21
|
-
debug: bool = False,
|
|
22
21
|
*args, **kwargs):
|
|
23
22
|
"""Manage the segmentation of image data
|
|
24
23
|
|
|
@@ -43,10 +42,6 @@ class SegmenterManagerThread(threading.Thread):
|
|
|
43
42
|
1d array containing additional background image offset values
|
|
44
43
|
that are added to each background image before subtraction
|
|
45
44
|
from the input image
|
|
46
|
-
debug:
|
|
47
|
-
Whether to run in debugging mode (more verbose messages and
|
|
48
|
-
CPU-based segmentation is done in one single thread instead
|
|
49
|
-
of in multiple subprocesses).
|
|
50
45
|
|
|
51
46
|
Notes
|
|
52
47
|
-----
|
|
@@ -80,33 +75,36 @@ class SegmenterManagerThread(threading.Thread):
|
|
|
80
75
|
self.labels_list = [None] * len(self.slot_states)
|
|
81
76
|
#: Time counter for segmentation
|
|
82
77
|
self.t_count = 0
|
|
83
|
-
#: Whether running in debugging mode
|
|
84
|
-
self.debug = debug
|
|
85
78
|
|
|
86
79
|
def run(self):
|
|
87
80
|
num_slots = len(self.slot_states)
|
|
88
81
|
# We iterate over all the chunks of the image data.
|
|
89
82
|
for chunk in self.image_data.iter_chunks():
|
|
90
|
-
|
|
91
|
-
|
|
83
|
+
unavailable_slots = 0
|
|
84
|
+
found_free_slot = False
|
|
92
85
|
# Wait for a free slot to perform segmentation (compute labels)
|
|
93
|
-
while
|
|
94
|
-
#
|
|
95
|
-
#
|
|
96
|
-
#
|
|
97
|
-
#
|
|
98
|
-
|
|
99
|
-
#
|
|
100
|
-
#
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
86
|
+
while not found_free_slot:
|
|
87
|
+
# We sort the slots according to the slot chunks so that we
|
|
88
|
+
# always process the slot with the smallest slot chunk number
|
|
89
|
+
# first. Initially, the slot_chunks array is filled with
|
|
90
|
+
# zeros, but we populate it here.
|
|
91
|
+
for cur_slot in np.argsort(self.slot_chunks):
|
|
92
|
+
# - "e" there is data from the segmenter (the extractor
|
|
93
|
+
# can take it and process it)
|
|
94
|
+
# - "s" the extractor processed the data and is waiting
|
|
95
|
+
# for the segmenter
|
|
96
|
+
if self.slot_states[cur_slot] != "e":
|
|
97
|
+
# It's the segmenter's turn. Note that we use '!= "e"',
|
|
98
|
+
# because the initial value is "\x00".
|
|
99
|
+
found_free_slot = True
|
|
100
|
+
break
|
|
101
|
+
else:
|
|
102
|
+
# Try another slot.
|
|
103
|
+
unavailable_slots += 1
|
|
104
|
+
if unavailable_slots >= num_slots:
|
|
105
|
+
# There is nothing to do, try to avoid 100% CPU
|
|
106
|
+
unavailable_slots = 0
|
|
107
|
+
time.sleep(.1)
|
|
110
108
|
|
|
111
109
|
t1 = time.monotonic()
|
|
112
110
|
|
|
@@ -125,7 +123,7 @@ class SegmenterManagerThread(threading.Thread):
|
|
|
125
123
|
# This must be done last: Let the extractor know that this
|
|
126
124
|
# slot is ready for processing.
|
|
127
125
|
self.slot_states[cur_slot] = "e"
|
|
128
|
-
self.logger.debug(f"Segmented
|
|
126
|
+
self.logger.debug(f"Segmented chunk {chunk} in slot {cur_slot}")
|
|
129
127
|
|
|
130
128
|
self.t_count += time.monotonic() - t1
|
|
131
129
|
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
dcnum/__init__.py,sha256=hcawIKS7utYiOyVhOAX9t7K3xYzP1b9862VV0b6qSrQ,74
|
|
2
|
-
dcnum/_version.py,sha256=
|
|
2
|
+
dcnum/_version.py,sha256=ey0T2QT7DroTwq1OFrMquZ-sNrLzojab2ikTifByhUQ,413
|
|
3
3
|
dcnum/feat/__init__.py,sha256=jUJYWTD3VIoDNKrmryXbjHb1rGwYtK4b7VPWihYgUoo,325
|
|
4
|
-
dcnum/feat/event_extractor_manager_thread.py,sha256=
|
|
4
|
+
dcnum/feat/event_extractor_manager_thread.py,sha256=gmvWNMmMTuUENVbDAn59PEp4LiRDE0PQA4VYhfWQ8EY,7825
|
|
5
5
|
dcnum/feat/gate.py,sha256=svbObmqpYdqPawpfrsEjTiUPJXf24GrNi8PXTKT-z44,7225
|
|
6
|
-
dcnum/feat/queue_event_extractor.py,sha256=
|
|
6
|
+
dcnum/feat/queue_event_extractor.py,sha256=bNdYzMPto37FCIgBbBw-YRQ2TlTpJKCWj9r_Y4sak3E,15700
|
|
7
7
|
dcnum/feat/feat_background/__init__.py,sha256=OTmMuazHNaSrZb2XW4cnJ6PlgJLbKrPbaidpEixYa0A,341
|
|
8
8
|
dcnum/feat/feat_background/base.py,sha256=phZdyOrHQPjvYlw1JQ8DkdXw5H2-eE1LfLGqCAo1rlo,7965
|
|
9
9
|
dcnum/feat/feat_background/bg_copy.py,sha256=PK8x4_Uph-_A6uszZC5uhe1gD1dSRdHnDMEsN0HSGHA,1034
|
|
@@ -20,12 +20,12 @@ dcnum/feat/feat_texture/__init__.py,sha256=6StM9S540UVtdFFR3bHa7nfCTomeVdoo7Uy9C
|
|
|
20
20
|
dcnum/feat/feat_texture/common.py,sha256=COXHpXS-7DMouGu3WF83I76L02Sr7P9re4lxajh6g0E,439
|
|
21
21
|
dcnum/feat/feat_texture/tex_all.py,sha256=eGjjNfPpfZw7FA_VNFCIMiU38KD0qcGbxLciYy-tCiA,4097
|
|
22
22
|
dcnum/logic/__init__.py,sha256=7J3GrwJInNQbrLk61HRIV7X7p69TAIbMYpR34hh6u14,177
|
|
23
|
-
dcnum/logic/ctrl.py,sha256=
|
|
24
|
-
dcnum/logic/job.py,sha256=
|
|
23
|
+
dcnum/logic/ctrl.py,sha256=aqbYi3OsqcjATCXAhmTSrOUwyJRS9evOJuHZcdS0sDg,34792
|
|
24
|
+
dcnum/logic/job.py,sha256=H1uDZ1nnNHNWWCe6mS8OWB0Uxc6XUKLISx5xExeplZY,7015
|
|
25
25
|
dcnum/logic/json_encoder.py,sha256=cxMnqisbKEVf-rVcw6rK2BBAb6iz_hKFaGl81kK36lQ,571
|
|
26
26
|
dcnum/meta/__init__.py,sha256=AVqRgyKXO1orKnE305h88IBvoZ1oz6X11HN1WP5nGvg,60
|
|
27
27
|
dcnum/meta/paths.py,sha256=J_ikeHzd7gEeRgAKjuayz3x6q4h1fOiDadM-ZxhAGm4,1053
|
|
28
|
-
dcnum/meta/ppid.py,sha256=
|
|
28
|
+
dcnum/meta/ppid.py,sha256=2Lw7sSDFMZFaAiKM7kLB7ozBQJru8rV1DbUKdU5g5w8,7706
|
|
29
29
|
dcnum/read/__init__.py,sha256=ksLdV8EkOU3EPje8teCOSehcUeGAZfg9TQ5ltuEUgls,216
|
|
30
30
|
dcnum/read/cache.py,sha256=lisrGG7AyvVitf0h92wh5FvYCsxa0pWyGcAyYwGP-LQ,6471
|
|
31
31
|
dcnum/read/const.py,sha256=GG9iyXDtEldvJYOBnhZjlimzIeBMAt4bSr2-xn2gzzc,464
|
|
@@ -33,16 +33,16 @@ dcnum/read/hdf5_data.py,sha256=Yyq02UTILc5ZgIQXpR9Y0wuX2WT8s0g23PraI7KxmJY,23489
|
|
|
33
33
|
dcnum/read/mapped.py,sha256=UryArlrIsHxjOyimBL2Nooi3r73zuGtnGdqdxa6PK_g,3076
|
|
34
34
|
dcnum/segm/__init__.py,sha256=iiq_1A9DU5wMUcKnsZ53E7NyzCkbZCJeUDimzunE-OM,247
|
|
35
35
|
dcnum/segm/segm_thresh.py,sha256=W9f9--aYmXpFjv3IASB6I90V40PJcIe_Jt_1iK-eHhw,946
|
|
36
|
-
dcnum/segm/segmenter.py,sha256=
|
|
37
|
-
dcnum/segm/segmenter_cpu.py,sha256=
|
|
38
|
-
dcnum/segm/segmenter_gpu.py,sha256=
|
|
39
|
-
dcnum/segm/segmenter_manager_thread.py,sha256=
|
|
36
|
+
dcnum/segm/segmenter.py,sha256=aCu7WfwOXU6nza1m3sAW7WhywR_jz3kmyfF3NCQo24A,12260
|
|
37
|
+
dcnum/segm/segmenter_cpu.py,sha256=e6XTlEUqXR2HBKZJxumpCCHprdX8Ql7dsbIcfdf4dH4,10831
|
|
38
|
+
dcnum/segm/segmenter_gpu.py,sha256=opgIBSL9lYFOo405xGoLUt2GuxpVQPMlPxcNHpQVt3s,1930
|
|
39
|
+
dcnum/segm/segmenter_manager_thread.py,sha256=LbNBg1R19GaDKow8pgfOFx1sE6ulS27Ky47_pJBvnK4,5850
|
|
40
40
|
dcnum/write/__init__.py,sha256=QvWHeZmjHI18i-YlGYuzN3i7dVWY9UCReKchrJ-gif0,260
|
|
41
41
|
dcnum/write/deque_writer_thread.py,sha256=ao7F1yrVKyufgC4rC0Y2_Vt7snuT6KpI7W2qVxcjdhk,1994
|
|
42
42
|
dcnum/write/queue_collector_thread.py,sha256=d_WfdsZdFnFsiAY0zVMwUlA4juIMeiWYmE_-rezBQCE,11734
|
|
43
43
|
dcnum/write/writer.py,sha256=e6J8YVqhS7kzkpPIMoDMokJpqSy1WWNdOrwaJof1oVc,15601
|
|
44
|
-
dcnum-0.
|
|
45
|
-
dcnum-0.
|
|
46
|
-
dcnum-0.
|
|
47
|
-
dcnum-0.
|
|
48
|
-
dcnum-0.
|
|
44
|
+
dcnum-0.21.0.dist-info/LICENSE,sha256=YRChA1C8A2E-amJbudwMcbTCZy_HzmeY0hMIvduh1MM,1089
|
|
45
|
+
dcnum-0.21.0.dist-info/METADATA,sha256=CUlOprvoa-ThjgNoDt_liTNSR1Ao3hBdjb5J4FDn_Fo,2194
|
|
46
|
+
dcnum-0.21.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
47
|
+
dcnum-0.21.0.dist-info/top_level.txt,sha256=Hmh38rgG_MFTVDpUDGuO2HWTSq80P585Het4COQzFTg,6
|
|
48
|
+
dcnum-0.21.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|