dcnum 0.13.2__py3-none-any.whl → 0.23.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dcnum might be problematic. Click here for more details.

Files changed (55) hide show
  1. dcnum/_version.py +2 -2
  2. dcnum/feat/__init__.py +2 -1
  3. dcnum/feat/event_extractor_manager_thread.py +67 -33
  4. dcnum/feat/feat_background/__init__.py +3 -12
  5. dcnum/feat/feat_background/base.py +80 -65
  6. dcnum/feat/feat_background/bg_copy.py +31 -0
  7. dcnum/feat/feat_background/bg_roll_median.py +38 -30
  8. dcnum/feat/feat_background/bg_sparse_median.py +96 -45
  9. dcnum/feat/feat_brightness/__init__.py +1 -0
  10. dcnum/feat/feat_brightness/bright_all.py +41 -6
  11. dcnum/feat/feat_contour/__init__.py +4 -0
  12. dcnum/feat/{feat_moments/mt_legacy.py → feat_contour/moments.py} +32 -8
  13. dcnum/feat/feat_contour/volume.py +174 -0
  14. dcnum/feat/feat_texture/__init__.py +1 -0
  15. dcnum/feat/feat_texture/tex_all.py +28 -1
  16. dcnum/feat/gate.py +92 -70
  17. dcnum/feat/queue_event_extractor.py +139 -70
  18. dcnum/logic/__init__.py +5 -0
  19. dcnum/logic/ctrl.py +794 -0
  20. dcnum/logic/job.py +184 -0
  21. dcnum/logic/json_encoder.py +19 -0
  22. dcnum/meta/__init__.py +1 -0
  23. dcnum/meta/paths.py +30 -0
  24. dcnum/meta/ppid.py +66 -9
  25. dcnum/read/__init__.py +1 -0
  26. dcnum/read/cache.py +109 -77
  27. dcnum/read/const.py +6 -4
  28. dcnum/read/hdf5_data.py +190 -31
  29. dcnum/read/mapped.py +87 -0
  30. dcnum/segm/__init__.py +6 -15
  31. dcnum/segm/segm_thresh.py +7 -14
  32. dcnum/segm/segm_torch/__init__.py +19 -0
  33. dcnum/segm/segm_torch/segm_torch_base.py +125 -0
  34. dcnum/segm/segm_torch/segm_torch_mpo.py +71 -0
  35. dcnum/segm/segm_torch/segm_torch_sto.py +88 -0
  36. dcnum/segm/segm_torch/torch_model.py +95 -0
  37. dcnum/segm/segm_torch/torch_postproc.py +93 -0
  38. dcnum/segm/segm_torch/torch_preproc.py +114 -0
  39. dcnum/segm/segmenter.py +245 -96
  40. dcnum/segm/segmenter_manager_thread.py +39 -28
  41. dcnum/segm/{segmenter_cpu.py → segmenter_mpo.py} +137 -43
  42. dcnum/segm/segmenter_sto.py +110 -0
  43. dcnum/write/__init__.py +3 -1
  44. dcnum/write/deque_writer_thread.py +15 -5
  45. dcnum/write/queue_collector_thread.py +14 -17
  46. dcnum/write/writer.py +225 -55
  47. {dcnum-0.13.2.dist-info → dcnum-0.23.1.dist-info}/METADATA +4 -2
  48. dcnum-0.23.1.dist-info/RECORD +55 -0
  49. {dcnum-0.13.2.dist-info → dcnum-0.23.1.dist-info}/WHEEL +1 -1
  50. dcnum/feat/feat_moments/__init__.py +0 -3
  51. dcnum/segm/segmenter_gpu.py +0 -45
  52. dcnum-0.13.2.dist-info/RECORD +0 -40
  53. /dcnum/feat/{feat_moments/ct_opencv.py → feat_contour/contour.py} +0 -0
  54. {dcnum-0.13.2.dist-info → dcnum-0.23.1.dist-info}/LICENSE +0 -0
  55. {dcnum-0.13.2.dist-info → dcnum-0.23.1.dist-info}/top_level.txt +0 -0
dcnum/write/writer.py CHANGED
@@ -1,7 +1,8 @@
1
1
  import hashlib
2
2
  import json
3
3
  import pathlib
4
- from typing import List
4
+ from typing import Dict, List
5
+ import warnings
5
6
 
6
7
  import h5py
7
8
  import hdf5plugin
@@ -10,16 +11,47 @@ import numpy as np
10
11
  from .._version import version
11
12
 
12
13
 
14
+ class CreatingFileWithoutBasinWarning(UserWarning):
15
+ """Issued when creating a basin-based dataset without basins"""
16
+ pass
17
+
18
+
13
19
  class HDF5Writer:
14
- def __init__(self, path, mode="a", ds_kwds=None):
15
- """Write deformability cytometry HDF5 data"""
16
- self.h5 = h5py.File(path, mode=mode, libver="latest")
20
+ def __init__(self,
21
+ # TODO: make this a mandatory argument when `path` is
22
+ # properly removed
23
+ obj: h5py.File | pathlib.Path | str = None,
24
+ mode: str = "a",
25
+ ds_kwds: Dict = None,
26
+ path: pathlib.Path | str = None,
27
+ ):
28
+ """Write deformability cytometry HDF5 data
29
+
30
+ Parameters
31
+ ----------
32
+ obj: h5py.File | pathlib.Path | str
33
+ object to instantiate the writer from; If this is already
34
+ a :class:`h5py.File` object, then it is used, otherwise the
35
+ argument is passed to :class:`h5py.File`
36
+ mode: str
37
+ opening mode when using :class:`h5py.File`
38
+ ds_kwds: Dict
39
+ keyword arguments with which to initialize new Datasets
40
+ (e.g. compression)
41
+ """
42
+ if path is not None:
43
+ obj = path
44
+ warnings.warn("The `path` keyword argument is deprecated, use "
45
+ "`obj` instead",
46
+ DeprecationWarning)
47
+ if isinstance(obj, h5py.File):
48
+ self.h5 = obj
49
+ self.h5_owned = False
50
+ else:
51
+ self.h5 = h5py.File(obj, mode=mode, libver="latest")
52
+ self.h5_owned = True
17
53
  self.events = self.h5.require_group("events")
18
- if ds_kwds is None:
19
- ds_kwds = {}
20
- for key, val in dict(hdf5plugin.Zstd(clevel=5)).items():
21
- ds_kwds.setdefault(key, val)
22
- ds_kwds.setdefault("fletcher32", True)
54
+ ds_kwds = set_default_filter_kwargs(ds_kwds)
23
55
  self.ds_kwds = ds_kwds
24
56
 
25
57
  def __enter__(self):
@@ -29,41 +61,43 @@ class HDF5Writer:
29
61
  self.close()
30
62
 
31
63
  def close(self):
32
- self.h5.close()
64
+ self.h5.flush()
65
+ if self.h5_owned:
66
+ self.h5.close()
33
67
 
34
68
  @staticmethod
35
- def get_best_nd_chunks(item_shape):
36
- """Return best chunks for image data
69
+ def get_best_nd_chunks(item_shape, feat_dtype=np.float64):
70
+ """Return best chunks for HDF5 datasets
37
71
 
38
72
  Chunking has performance implications. It’s recommended to keep the
39
- total size of your chunks between 10 KiB and 1 MiB. This number defines
40
- the maximum chunk size as well as half the maximum cache size for each
41
- dataset.
73
+ total size of dataset chunks between 10 KiB and 1 MiB. This number
74
+ defines the maximum chunk size as well as half the maximum cache
75
+ size for each dataset.
42
76
  """
43
- num_bytes = 1024**2 # between 10KiB and 1 MiB
44
- if len(item_shape) == 0:
45
- # scalar feature
46
- chunk_size_int = 10000
47
- else:
48
- event_size = np.prod(item_shape) * np.dtype(np.uint8).itemsize
49
- chunk_size = num_bytes / event_size
50
- chunk_size_int = max(1, int(np.floor(chunk_size)))
77
+ # set image feature chunk size to approximately 1MiB
78
+ num_bytes = 1024 ** 2
79
+ # Note that `np.prod(()) == 1`
80
+ event_size = np.prod(item_shape) * np.dtype(feat_dtype).itemsize
81
+ chunk_size = num_bytes / event_size
82
+ # Set minimum chunk size to 10 so that we can have at least some
83
+ # compression performance.
84
+ chunk_size_int = max(10, int(np.floor(chunk_size)))
51
85
  return tuple([chunk_size_int] + list(item_shape))
52
86
 
53
- def require_feature(self, feat, item_shape, dtype, ds_kwds=None):
87
+ def require_feature(self, feat, item_shape, feat_dtype, ds_kwds=None):
54
88
  """Create a new feature in the "events" group"""
55
-
56
- if ds_kwds is None:
57
- ds_kwds = {}
58
- for key in self.ds_kwds:
59
- ds_kwds.setdefault(key, self.ds_kwds[key])
60
89
  if feat not in self.events:
90
+ if ds_kwds is None:
91
+ ds_kwds = {}
92
+ for key in self.ds_kwds:
93
+ ds_kwds.setdefault(key, self.ds_kwds[key])
61
94
  dset = self.events.create_dataset(
62
95
  feat,
63
96
  shape=tuple([0] + list(item_shape)),
64
- dtype=dtype,
97
+ dtype=feat_dtype,
65
98
  maxshape=tuple([None] + list(item_shape)),
66
- chunks=self.get_best_nd_chunks(item_shape),
99
+ chunks=self.get_best_nd_chunks(item_shape,
100
+ feat_dtype=feat_dtype),
67
101
  **ds_kwds)
68
102
  if len(item_shape) == 2:
69
103
  dset.attrs.create('CLASS', np.string_('IMAGE'))
@@ -81,6 +115,7 @@ class HDF5Writer:
81
115
  paths: List[str | pathlib.Path],
82
116
  features: List[str] = None,
83
117
  description: str | None = None,
118
+ mapping: np.ndarray = None
84
119
  ):
85
120
  """Write an HDF5-based file basin
86
121
 
@@ -94,6 +129,9 @@ class HDF5Writer:
94
129
  list of features provided by `paths`
95
130
  description: str
96
131
  optional string describing the basin
132
+ mapping: 1D array
133
+ integer array with indices that map the basin dataset
134
+ to this dataset
97
135
  """
98
136
  bdat = {
99
137
  "description": description,
@@ -102,8 +140,38 @@ class HDF5Writer:
102
140
  "paths": [str(pp) for pp in paths],
103
141
  "type": "file",
104
142
  }
143
+ # Explicit features stored in basin file
105
144
  if features is not None and len(features):
106
145
  bdat["features"] = features
146
+ # Mapped basin information
147
+ if mapping is not None:
148
+ events = self.h5.require_group("events")
149
+ # Reserve a mapping feature for this dataset
150
+ for ii in range(10): # basinmap0 to basinmap9
151
+ bm_cand = f"basinmap{ii}"
152
+ if bm_cand in events:
153
+ # There is a basin mapping defined in the file. Check
154
+ # whether it is identical to ours.
155
+ if np.all(events[bm_cand] == mapping):
156
+ # Great, we are done here.
157
+ feat_basinmap = bm_cand
158
+ break
159
+ else:
160
+ # This mapping belongs to a different basin,
161
+ # try the next mapping.
162
+ continue
163
+ else:
164
+ # The mapping is not defined in the dataset, and we may
165
+ # write it to a new feature.
166
+ feat_basinmap = bm_cand
167
+ self.store_feature_chunk(feat=feat_basinmap, data=mapping)
168
+ break
169
+ else:
170
+ raise ValueError(
171
+ "You have exhausted the usage of mapped basins for "
172
+ "the current dataset. Please revise your analysis "
173
+ "pipeline.")
174
+ bdat["mapping"] = feat_basinmap
107
175
  bstring = json.dumps(bdat, indent=2)
108
176
  # basin key is its hash
109
177
  key = hashlib.md5(bstring.encode("utf-8",
@@ -131,11 +199,38 @@ class HDF5Writer:
131
199
  data = 255 * np.array(data, dtype=np.uint8)
132
200
  ds, offset = self.require_feature(feat=feat,
133
201
  item_shape=data.shape[1:],
134
- dtype=data.dtype)
202
+ feat_dtype=data.dtype)
135
203
  dsize = data.shape[0]
136
204
  ds.resize(offset + dsize, axis=0)
137
205
  ds[offset:offset + dsize] = data
138
206
 
207
+ def store_log(self,
208
+ log: str,
209
+ data: List[str],
210
+ override: bool = False):
211
+ """Store log data
212
+
213
+ Store the log data under the key `log`. The `data`
214
+ kwarg must be a list of strings. If the log entry
215
+ already exists, `ValueError` is raised unless
216
+ `override` is set to True.
217
+ """
218
+ logs = self.h5.require_group("logs")
219
+ if log in logs:
220
+ if override:
221
+ del logs[log]
222
+ else:
223
+ raise ValueError(
224
+ f"Log '{log}' already exists in {self.h5.filename}!")
225
+ logs.create_dataset(
226
+ name=log,
227
+ data=data,
228
+ shape=(len(data),),
229
+ # maximum line length
230
+ dtype=f"S{max([len(ll) for ll in data])}",
231
+ chunks=True,
232
+ **self.ds_kwds)
233
+
139
234
 
140
235
  def create_with_basins(
141
236
  path_out: str | pathlib.Path,
@@ -154,6 +249,10 @@ def create_with_basins(
154
249
  commonly used for relative and absolute paths).
155
250
  """
156
251
  path_out = pathlib.Path(path_out)
252
+ if not basin_paths:
253
+ warnings.warn(f"Creating basin-based file '{path_out}' without any "
254
+ f"basins, since the list `basin_paths' is empty!",
255
+ CreatingFileWithoutBasinWarning)
157
256
  with HDF5Writer(path_out, mode="w") as hw:
158
257
  # Get the metadata from the first available basin path
159
258
 
@@ -184,7 +283,7 @@ def create_with_basins(
184
283
  # Copy the metadata from the representative path.
185
284
  if prep is not None:
186
285
  # copy metadata
187
- with h5py.File(prep) as h5:
286
+ with h5py.File(prep, libver="latest") as h5:
188
287
  copy_metadata(h5_src=h5, h5_dst=hw.h5)
189
288
  # extract features
190
289
  features = sorted(h5["events"].keys())
@@ -201,43 +300,114 @@ def create_with_basins(
201
300
  )
202
301
 
203
302
 
303
+ def copy_features(h5_src: h5py.File,
304
+ h5_dst: h5py.File,
305
+ features: List[str],
306
+ mapping: np.ndarray = None,
307
+ ):
308
+ """Copy feature data from one HDF5 file to another
309
+
310
+ The feature must not exist in the destination file.
311
+
312
+ Parameters
313
+ ----------
314
+ h5_src: h5py.File
315
+ Input HDF5File containing `features` in the "events" group
316
+ h5_dst: h5py.File
317
+ Output HDF5File opened in write mode not containing `features`
318
+ features: List[str]
319
+ List of features to copy from source to destination
320
+ mapping: 1D array
321
+ If given, contains indices in the input file that should be
322
+ written to the output file. If set to None, all features are written.
323
+ """
324
+ ei = h5_src["events"]
325
+ eo = h5_dst.require_group("events")
326
+ # This is the size of the output dataset
327
+ size = h5_dst.attrs["experiment:event count"]
328
+ hw = HDF5Writer(h5_dst)
329
+ for feat in features:
330
+ if feat in eo:
331
+ raise ValueError(f"Output file {h5_dst.filename} already contains "
332
+ f"the feature {feat}.")
333
+ if not isinstance(ei[feat], h5py.Dataset):
334
+ raise NotImplementedError(
335
+ f"Only dataset-based features are supported here, not {feat}")
336
+ if mapping is None:
337
+ # Just copy the data as-is.
338
+ h5py.h5o.copy(src_loc=ei.id,
339
+ src_name=feat.encode(),
340
+ dst_loc=eo.id,
341
+ dst_name=feat.encode(),
342
+ )
343
+ else:
344
+ # Perform mapping and store the features in chunks to keep
345
+ # memory usage down.
346
+ dsi = ei[feat]
347
+ chunk_size = hw.get_best_nd_chunks(dsi[0].shape, dsi.dtype)[0]
348
+ start = 0
349
+ while start < size:
350
+ chunk_idx = mapping[start:start + chunk_size]
351
+ # h5py only supports indexing in increasing order
352
+ chunk_unique, order = np.unique(chunk_idx, return_inverse=True)
353
+ data_unique = dsi[chunk_unique]
354
+ data = data_unique[order]
355
+ hw.store_feature_chunk(feat, data)
356
+ # increment start
357
+ start += chunk_size
358
+
359
+
204
360
  def copy_metadata(h5_src: h5py.File,
205
- h5_dst: h5py.File):
361
+ h5_dst: h5py.File,
362
+ copy_basins=True):
206
363
  """Copy attributes, tables, logs, and basins from one H5File to another
207
364
 
208
365
  Notes
209
366
  -----
210
367
  Metadata in `h5_dst` are never overridden, only metadata that
211
- are not defined are added.
368
+ are not defined already are added.
212
369
  """
213
370
  # compress data
214
- ds_kwds = {}
215
- for key, val in dict(hdf5plugin.Zstd(clevel=5)).items():
216
- ds_kwds.setdefault(key, val)
217
- ds_kwds.setdefault("fletcher32", True)
371
+ ds_kwds = set_default_filter_kwargs()
218
372
  # set attributes
219
373
  src_attrs = dict(h5_src.attrs)
220
374
  for kk in src_attrs:
221
375
  h5_dst.attrs.setdefault(kk, src_attrs[kk])
376
+ copy_data = ["logs", "tables"]
377
+ if copy_basins:
378
+ copy_data.append("basins")
222
379
  # copy other metadata
223
- for topic in ["basins", "logs", "tables"]:
380
+ for topic in copy_data:
224
381
  if topic in h5_src:
225
382
  for key in h5_src[topic]:
226
383
  h5_dst.require_group(topic)
227
384
  if key not in h5_dst[topic]:
228
385
  data = h5_src[topic][key][:]
229
- if data.dtype == np.dtype("O"):
230
- # convert variable-length strings to fixed-length
231
- max_length = max([len(line) for line in data])
232
- data = np.asarray(data, dtype=f"S{max_length}")
233
- ds = h5_dst[topic].create_dataset(
234
- name=key,
235
- data=data,
236
- **ds_kwds
237
- )
238
- # help with debugging and add some meta-metadata
239
- ds.attrs.update(h5_src[topic][key].attrs)
240
- soft_strings = [ds.attrs.get("software"),
241
- f"dcnum {version}"]
242
- soft_strings = [s for s in soft_strings if s is not None]
243
- ds.attrs["software"] = " | ".join(soft_strings)
386
+ if data.size: # ignore empty datasets
387
+ if data.dtype == np.dtype("O"):
388
+ # convert variable-length strings to fixed-length
389
+ max_length = max([len(line) for line in data])
390
+ data = np.asarray(data, dtype=f"S{max_length}")
391
+ ds = h5_dst[topic].create_dataset(
392
+ name=key,
393
+ data=data,
394
+ **ds_kwds
395
+ )
396
+ # help with debugging and add some meta-metadata
397
+ ds.attrs.update(h5_src[topic][key].attrs)
398
+ soft_strgs = [ds.attrs.get("software"),
399
+ f"dcnum {version}"]
400
+ soft_strgs = [s for s in soft_strgs if s is not None]
401
+ ds.attrs["software"] = " | ".join(soft_strgs)
402
+
403
+
404
+ def set_default_filter_kwargs(ds_kwds=None, compression=True):
405
+ if ds_kwds is None:
406
+ ds_kwds = {}
407
+ if compression:
408
+ # compression
409
+ for key, val in dict(hdf5plugin.Zstd(clevel=5)).items():
410
+ ds_kwds.setdefault(key, val)
411
+ # checksums
412
+ ds_kwds.setdefault("fletcher32", True)
413
+ return ds_kwds
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dcnum
3
- Version: 0.13.2
3
+ Version: 0.23.1
4
4
  Summary: numerics toolbox for imaging deformability cytometry
5
- Author: Paul Müller
5
+ Author: Maximilian Schlögel, Paul Müller, Raghava Alajangi
6
6
  Maintainer-email: Paul Müller <dev@craban.de>
7
7
  License: MIT
8
8
  Project-URL: source, https://github.com/DC-Analysis/dcnum
@@ -25,6 +25,8 @@ Requires-Dist: numpy >=1.21
25
25
  Requires-Dist: opencv-python-headless
26
26
  Requires-Dist: scikit-image
27
27
  Requires-Dist: scipy >=1.8.0
28
+ Provides-Extra: torch
29
+ Requires-Dist: torch >=2.3 ; extra == 'torch'
28
30
 
29
31
  |dcnum|
30
32
  =======
@@ -0,0 +1,55 @@
1
+ dcnum/__init__.py,sha256=hcawIKS7utYiOyVhOAX9t7K3xYzP1b9862VV0b6qSrQ,74
2
+ dcnum/_version.py,sha256=FvSfCBPqaPzqv_nSHIaBJNolXopVkF1cRxifqhCXk4g,413
3
+ dcnum/feat/__init__.py,sha256=jUJYWTD3VIoDNKrmryXbjHb1rGwYtK4b7VPWihYgUoo,325
4
+ dcnum/feat/event_extractor_manager_thread.py,sha256=5HdCQCywyQ5QC56AMjSqCroqif9oOFyiSFWCe07JojM,7820
5
+ dcnum/feat/gate.py,sha256=Yhxq80JoRMmQzBxl35C8NT91c9QcmQa-EIKLuxK6WvE,7221
6
+ dcnum/feat/queue_event_extractor.py,sha256=0ncTQleT1sfc98zYkFuZWxU-akecfTrW6-OOU3z-d8o,15698
7
+ dcnum/feat/feat_background/__init__.py,sha256=OTmMuazHNaSrZb2XW4cnJ6PlgJLbKrPbaidpEixYa0A,341
8
+ dcnum/feat/feat_background/base.py,sha256=A-K3qlJ0ABFBGm5eMKYcNCC7ktFAInSm0eR3N3DHQZY,7963
9
+ dcnum/feat/feat_background/bg_copy.py,sha256=PK8x4_Uph-_A6uszZC5uhe1gD1dSRdHnDMEsN0HSGHA,1034
10
+ dcnum/feat/feat_background/bg_roll_median.py,sha256=EyjstMDXFBYuJB1lN6g4Uw7tPm434X3hXQxKSqvcoJ4,13175
11
+ dcnum/feat/feat_background/bg_sparse_median.py,sha256=ab7Boj7cmr6PBdTbyWTj_yNNJSfuowr7u-iSGW989WI,20709
12
+ dcnum/feat/feat_brightness/__init__.py,sha256=o6AebVlmydwNgVF5kW6ITqJyFreoKrU3Ki_3EC8If-s,155
13
+ dcnum/feat/feat_brightness/bright_all.py,sha256=vf8xaYBdKD24hHUXdkI0_S7nbr7m49KW6gvuWvbHDVg,4545
14
+ dcnum/feat/feat_brightness/common.py,sha256=JX49EszYDmnvoOKXFVV1CalEIWRmOuY5EryNbqGbdac,156
15
+ dcnum/feat/feat_contour/__init__.py,sha256=Td4Hs47kUgJj0VXm3q5ofXhaUWr9QTfVgbwh5EELA-I,163
16
+ dcnum/feat/feat_contour/contour.py,sha256=_qyHCGvylVxruMWafvVbVOzhWGXLoFi10LReNxGcWhY,463
17
+ dcnum/feat/feat_contour/moments.py,sha256=W8sD2X7JqIBq-9nL82hf4Hm2uJkfca8EvAl_hqI_IDg,5109
18
+ dcnum/feat/feat_contour/volume.py,sha256=xVHWtv6USUHJZ5dM1Ur7fI7OwoPT5N2Ps0gKVWylfl8,6639
19
+ dcnum/feat/feat_texture/__init__.py,sha256=6StM9S540UVtdFFR3bHa7nfCTomeVdoo7Uy9CjuTgH0,137
20
+ dcnum/feat/feat_texture/common.py,sha256=COXHpXS-7DMouGu3WF83I76L02Sr7P9re4lxajh6g0E,439
21
+ dcnum/feat/feat_texture/tex_all.py,sha256=_5H3sXYRN0Uq2eUHn3XUyEHkU_tncEqbqJTC-HZcnGY,5198
22
+ dcnum/logic/__init__.py,sha256=7J3GrwJInNQbrLk61HRIV7X7p69TAIbMYpR34hh6u14,177
23
+ dcnum/logic/ctrl.py,sha256=FyVlizHOIaIGMqINvM-outPywAQU0-5NM7t1dEDml4c,35332
24
+ dcnum/logic/job.py,sha256=H1uDZ1nnNHNWWCe6mS8OWB0Uxc6XUKLISx5xExeplZY,7015
25
+ dcnum/logic/json_encoder.py,sha256=cxMnqisbKEVf-rVcw6rK2BBAb6iz_hKFaGl81kK36lQ,571
26
+ dcnum/meta/__init__.py,sha256=AVqRgyKXO1orKnE305h88IBvoZ1oz6X11HN1WP5nGvg,60
27
+ dcnum/meta/paths.py,sha256=J_ikeHzd7gEeRgAKjuayz3x6q4h1fOiDadM-ZxhAGm4,1053
28
+ dcnum/meta/ppid.py,sha256=OD79NrZ8waC3julwdH8NjneUuXqSRSHsUDpKzT5pdyU,8432
29
+ dcnum/read/__init__.py,sha256=ksLdV8EkOU3EPje8teCOSehcUeGAZfg9TQ5ltuEUgls,216
30
+ dcnum/read/cache.py,sha256=lisrGG7AyvVitf0h92wh5FvYCsxa0pWyGcAyYwGP-LQ,6471
31
+ dcnum/read/const.py,sha256=GG9iyXDtEldvJYOBnhZjlimzIeBMAt4bSr2-xn2gzzc,464
32
+ dcnum/read/hdf5_data.py,sha256=Yyq02UTILc5ZgIQXpR9Y0wuX2WT8s0g23PraI7KxmJY,23489
33
+ dcnum/read/mapped.py,sha256=UryArlrIsHxjOyimBL2Nooi3r73zuGtnGdqdxa6PK_g,3076
34
+ dcnum/segm/__init__.py,sha256=9cLEAd3JWE8IGqDHV-eSDIYOGBfOepd8OcebtNs8Omk,309
35
+ dcnum/segm/segm_thresh.py,sha256=iVhvIhzO0Gw0t3rXOgH71rOI0CNjJJQq4Gg6BulUhK8,948
36
+ dcnum/segm/segmenter.py,sha256=FWLFDBR-x_85ku2rObA2F-QBrM4IUaUL-YHChLagVvM,14902
37
+ dcnum/segm/segmenter_manager_thread.py,sha256=frM0sMxC7f7TQiFjmpRxuwG2kUBFpW1inV8dtpADHiI,5924
38
+ dcnum/segm/segmenter_mpo.py,sha256=o6mQlITHgEWvQt9v6oCWwAcZUvxE7MOeLE9DFManzpY,13757
39
+ dcnum/segm/segmenter_sto.py,sha256=e6MtN_RWusA0wTExV-FLGpDXNJs1CbSyXcSdWUPBMvM,3959
40
+ dcnum/segm/segm_torch/__init__.py,sha256=re9jVLYvV1GgC7J5vx2LHKeFYVZPpiwubecAV9f_2kA,670
41
+ dcnum/segm/segm_torch/segm_torch_base.py,sha256=G9AhVyD6LkAmk0tkbYnJUSpvcj3_HYf0uqfILZQsyus,4479
42
+ dcnum/segm/segm_torch/segm_torch_mpo.py,sha256=N01dVXai_4eIGfHJrPjg5C2Bkyq1TOeXeJhw3YbGidw,2504
43
+ dcnum/segm/segm_torch/segm_torch_sto.py,sha256=PTOJrP_FkaxZZul8lM4VA2HL3KyxrheDDWWdJbmJdiw,3393
44
+ dcnum/segm/segm_torch/torch_model.py,sha256=5aL6SwSvg1N2gATEGBhP3aA4WTHlvGzQVYuizmh0LrU,3187
45
+ dcnum/segm/segm_torch/torch_postproc.py,sha256=ctirQTmsZnuZGIxkwFWN9arRneHRYJUxaJ_ZyCgjByM,3311
46
+ dcnum/segm/segm_torch/torch_preproc.py,sha256=kjabu76paw23kO7RP7Ik6IY60Kk1VBAHKBAedflA0aQ,4002
47
+ dcnum/write/__init__.py,sha256=QvWHeZmjHI18i-YlGYuzN3i7dVWY9UCReKchrJ-gif0,260
48
+ dcnum/write/deque_writer_thread.py,sha256=ao7F1yrVKyufgC4rC0Y2_Vt7snuT6KpI7W2qVxcjdhk,1994
49
+ dcnum/write/queue_collector_thread.py,sha256=d_WfdsZdFnFsiAY0zVMwUlA4juIMeiWYmE_-rezBQCE,11734
50
+ dcnum/write/writer.py,sha256=e6J8YVqhS7kzkpPIMoDMokJpqSy1WWNdOrwaJof1oVc,15601
51
+ dcnum-0.23.1.dist-info/LICENSE,sha256=YRChA1C8A2E-amJbudwMcbTCZy_HzmeY0hMIvduh1MM,1089
52
+ dcnum-0.23.1.dist-info/METADATA,sha256=NL79bLTjTqm5R4HW0TNcVZ9YXqbxH7Xcfl6xuk-hfw4,2280
53
+ dcnum-0.23.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
54
+ dcnum-0.23.1.dist-info/top_level.txt,sha256=Hmh38rgG_MFTVDpUDGuO2HWTSq80P585Het4COQzFTg,6
55
+ dcnum-0.23.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.41.3)
2
+ Generator: bdist_wheel (0.43.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,3 +0,0 @@
1
- # flake8: noqa: F401
2
- from .mt_legacy import moments_based_features
3
-
@@ -1,45 +0,0 @@
1
- import abc
2
- import pathlib
3
-
4
- import numpy as np
5
- import scipy.ndimage as ndi
6
-
7
-
8
- from .segmenter import Segmenter
9
-
10
-
11
- class GPUSegmenter(Segmenter, abc.ABC):
12
- mask_postprocessing = False
13
-
14
- def __init__(self, *args, **kwargs):
15
- super(GPUSegmenter, self).__init__(*args, **kwargs)
16
-
17
- @staticmethod
18
- def _get_model_path(model_file):
19
- """Custom hook that may be defined by subclasses"""
20
- return pathlib.Path(model_file)
21
-
22
- def segment_batch(self,
23
- image_data: np.ndarray,
24
- start: int = None,
25
- stop: int = None):
26
- if stop is None or start is None:
27
- start = 0
28
- stop = len(image_data)
29
-
30
- image_slice = image_data[start:stop]
31
- segm = self.segment_frame_wrapper()
32
-
33
- labels = segm(image_slice)
34
-
35
- # Make sure we have integer labels
36
- if labels.dtype == bool:
37
- new_labels = np.zeros_like(labels, dtype=np.uint16)
38
- for ii in range(len(labels)):
39
- ndi.label(
40
- input=labels[ii],
41
- output=new_labels[ii],
42
- structure=ndi.generate_binary_structure(2, 2))
43
- labels = new_labels
44
-
45
- return labels
@@ -1,40 +0,0 @@
1
- dcnum/__init__.py,sha256=hcawIKS7utYiOyVhOAX9t7K3xYzP1b9862VV0b6qSrQ,74
2
- dcnum/_version.py,sha256=ByxP_AGaDBx1nhN9W4fte2GtMXAPh9eMZ4pSGlztQPc,413
3
- dcnum/feat/__init__.py,sha256=0oX765AyfL1BcVt-FI6R_i6x5LgYYLeyO5lkxSauI0Y,299
4
- dcnum/feat/event_extractor_manager_thread.py,sha256=5vcCzAnyg59bpNj-8IV8YM7uHTzqs5du3-KgORNwDok,6020
5
- dcnum/feat/gate.py,sha256=UEHbj3hkMWNm4tlY8Tz8sOsruhByjJxgO1s-ztQ7WTw,6235
6
- dcnum/feat/queue_event_extractor.py,sha256=RdA-8OW0uB8svAFRQtUBMp7GBKPisEFvX7mc-RfisqE,13170
7
- dcnum/feat/feat_background/__init__.py,sha256=mL8QJYK6m3hxTqF6Cuosu__Fm5tZUMa-hTgSGcNw9AE,458
8
- dcnum/feat/feat_background/base.py,sha256=XcefqydfyOt9vNnIjOOIljdLbN78amTM1oGU65mgg5Y,7367
9
- dcnum/feat/feat_background/bg_roll_median.py,sha256=E86AiSzpw0RZ0nYL4UdKKkskS5ywKJCLeHlWYfTPS0k,12781
10
- dcnum/feat/feat_background/bg_sparse_median.py,sha256=COJeY8suHmrHBWiXxTLw3C17-QOnc4R75GMdZiuU3cc,17670
11
- dcnum/feat/feat_brightness/__init__.py,sha256=j-Gen6zutc74VopPGJsr4eHS-_CRnGnnB73HjKsz1C4,102
12
- dcnum/feat/feat_brightness/bright_all.py,sha256=Z5b-xkw7g7ejMpbGmdUqrxGRymqFhAQsZ938gaGXk9Y,3102
13
- dcnum/feat/feat_brightness/common.py,sha256=JX49EszYDmnvoOKXFVV1CalEIWRmOuY5EryNbqGbdac,156
14
- dcnum/feat/feat_moments/__init__.py,sha256=RxDTbl-XVVk8HIgihTuqWdmD0ciNGdfg715ShHEGUHs,68
15
- dcnum/feat/feat_moments/ct_opencv.py,sha256=_qyHCGvylVxruMWafvVbVOzhWGXLoFi10LReNxGcWhY,463
16
- dcnum/feat/feat_moments/mt_legacy.py,sha256=tp85oeQ1GwVNdo6nXWhtbUGjMaXR8C6NMMWhobzThq0,4490
17
- dcnum/feat/feat_texture/__init__.py,sha256=SjYRb917PrFN231M2nVxq12DDH2y72WocsS9yY7xqaI,84
18
- dcnum/feat/feat_texture/common.py,sha256=COXHpXS-7DMouGu3WF83I76L02Sr7P9re4lxajh6g0E,439
19
- dcnum/feat/feat_texture/tex_all.py,sha256=eGjjNfPpfZw7FA_VNFCIMiU38KD0qcGbxLciYy-tCiA,4097
20
- dcnum/meta/__init__.py,sha256=cQT_HN5yDKzMnZM8CUyNmeA68OhE3ENO_rvFmgDj95c,40
21
- dcnum/meta/ppid.py,sha256=tHbn7rZWEQYCoMzvt8QXhnBDEY6cRJlKxMlbmMwFhVM,5951
22
- dcnum/read/__init__.py,sha256=iV2wrBMdwJgpXaphNiiAVybndDzTTv0CAGRNXyvxcLY,157
23
- dcnum/read/cache.py,sha256=mr2DBJZYgNIAiz64TQ4cgkPmRt8nJWBvgkOpaz-p6Yg,5467
24
- dcnum/read/const.py,sha256=SVlvEJiRIHyTyUlWG24_ogcnT5nTxCi0CRslNuNP56I,282
25
- dcnum/read/hdf5_data.py,sha256=3Pti8U7-VZkyNnQR7C53ZV9_ljvn7N-AhCso9_HQkZg,16538
26
- dcnum/segm/__init__.py,sha256=BNFn7VvWsRmOzxCtvr4AXi3mmWsJSsQlEmGlq7umCac,510
27
- dcnum/segm/segm_thresh.py,sha256=fim5HRNWq0DUhVRwLf6nmu4gOHAFtydGDRS1Ww_XzFo,1134
28
- dcnum/segm/segmenter.py,sha256=o5G9JnHG-dHnNwG8IdPK8MideF0IlSDbuYwCdrSr31w,9002
29
- dcnum/segm/segmenter_cpu.py,sha256=oFiK73swm7UVPZcTLX4cpFHxp1DIyn9AjQ93JB0_Eh4,10072
30
- dcnum/segm/segmenter_gpu.py,sha256=RKas-IOZqyg99nvjUUD2NuNTd1sYqjzjHEJeunmmjaY,1236
31
- dcnum/segm/segmenter_manager_thread.py,sha256=xtuk7gnk7xhoRoV_J97rrv7IR3JgeRvVewCDT-chqpk,5172
32
- dcnum/write/__init__.py,sha256=6vAQECatcd7DJMXFEuab1wdvEiaxisbY8_qmK5tzIwY,207
33
- dcnum/write/deque_writer_thread.py,sha256=UUn5OYxDvckvhLw3llLYu7y8MI7RfsOhdJhMonKKB3k,1625
34
- dcnum/write/queue_collector_thread.py,sha256=BivSe5ZA-rTPH4sridXU1yFB6CP7LYzIFudLMbN481s,11793
35
- dcnum/write/writer.py,sha256=_6aSz6vJYQfgUftwIiIHEtKMAAm83WcaH_OgplqRGzk,8785
36
- dcnum-0.13.2.dist-info/LICENSE,sha256=YRChA1C8A2E-amJbudwMcbTCZy_HzmeY0hMIvduh1MM,1089
37
- dcnum-0.13.2.dist-info/METADATA,sha256=ItcmJIpJrhbvA1hcXpKS-XWcZFxq2wbnOYDCuPyO5Is,2172
38
- dcnum-0.13.2.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
39
- dcnum-0.13.2.dist-info/top_level.txt,sha256=Hmh38rgG_MFTVDpUDGuO2HWTSq80P585Het4COQzFTg,6
40
- dcnum-0.13.2.dist-info/RECORD,,