dclab 0.62.11__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dclab might be problematic. Click here for more details.

Files changed (137) hide show
  1. dclab/__init__.py +23 -0
  2. dclab/_version.py +16 -0
  3. dclab/cached.py +97 -0
  4. dclab/cli/__init__.py +10 -0
  5. dclab/cli/common.py +237 -0
  6. dclab/cli/task_compress.py +126 -0
  7. dclab/cli/task_condense.py +223 -0
  8. dclab/cli/task_join.py +229 -0
  9. dclab/cli/task_repack.py +98 -0
  10. dclab/cli/task_split.py +154 -0
  11. dclab/cli/task_tdms2rtdc.py +186 -0
  12. dclab/cli/task_verify_dataset.py +75 -0
  13. dclab/definitions/__init__.py +79 -0
  14. dclab/definitions/feat_const.py +202 -0
  15. dclab/definitions/feat_logic.py +183 -0
  16. dclab/definitions/meta_const.py +252 -0
  17. dclab/definitions/meta_logic.py +111 -0
  18. dclab/definitions/meta_parse.py +94 -0
  19. dclab/downsampling.cp313-win_amd64.pyd +0 -0
  20. dclab/downsampling.pyx +230 -0
  21. dclab/external/__init__.py +4 -0
  22. dclab/external/packaging/LICENSE +3 -0
  23. dclab/external/packaging/LICENSE.APACHE +177 -0
  24. dclab/external/packaging/LICENSE.BSD +23 -0
  25. dclab/external/packaging/__init__.py +6 -0
  26. dclab/external/packaging/_structures.py +61 -0
  27. dclab/external/packaging/version.py +505 -0
  28. dclab/external/skimage/LICENSE +28 -0
  29. dclab/external/skimage/__init__.py +2 -0
  30. dclab/external/skimage/_find_contours.py +216 -0
  31. dclab/external/skimage/_find_contours_cy.cp313-win_amd64.pyd +0 -0
  32. dclab/external/skimage/_find_contours_cy.pyx +188 -0
  33. dclab/external/skimage/_pnpoly.cp313-win_amd64.pyd +0 -0
  34. dclab/external/skimage/_pnpoly.pyx +99 -0
  35. dclab/external/skimage/_shared/__init__.py +1 -0
  36. dclab/external/skimage/_shared/geometry.cp313-win_amd64.pyd +0 -0
  37. dclab/external/skimage/_shared/geometry.pxd +6 -0
  38. dclab/external/skimage/_shared/geometry.pyx +55 -0
  39. dclab/external/skimage/measure.py +7 -0
  40. dclab/external/skimage/pnpoly.py +53 -0
  41. dclab/external/statsmodels/LICENSE +35 -0
  42. dclab/external/statsmodels/__init__.py +6 -0
  43. dclab/external/statsmodels/nonparametric/__init__.py +1 -0
  44. dclab/external/statsmodels/nonparametric/_kernel_base.py +203 -0
  45. dclab/external/statsmodels/nonparametric/kernel_density.py +165 -0
  46. dclab/external/statsmodels/nonparametric/kernels.py +36 -0
  47. dclab/features/__init__.py +9 -0
  48. dclab/features/bright.py +81 -0
  49. dclab/features/bright_bc.py +93 -0
  50. dclab/features/bright_perc.py +63 -0
  51. dclab/features/contour.py +161 -0
  52. dclab/features/emodulus/__init__.py +339 -0
  53. dclab/features/emodulus/load.py +252 -0
  54. dclab/features/emodulus/lut_HE-2D-FEM-22.txt +16432 -0
  55. dclab/features/emodulus/lut_HE-3D-FEM-22.txt +1276 -0
  56. dclab/features/emodulus/lut_LE-2D-FEM-19.txt +13082 -0
  57. dclab/features/emodulus/pxcorr.py +135 -0
  58. dclab/features/emodulus/scale_linear.py +247 -0
  59. dclab/features/emodulus/viscosity.py +256 -0
  60. dclab/features/fl_crosstalk.py +95 -0
  61. dclab/features/inert_ratio.py +377 -0
  62. dclab/features/volume.py +242 -0
  63. dclab/http_utils.py +322 -0
  64. dclab/isoelastics/__init__.py +468 -0
  65. dclab/isoelastics/iso_HE-2D-FEM-22-area_um-deform.txt +2440 -0
  66. dclab/isoelastics/iso_HE-2D-FEM-22-volume-deform.txt +2635 -0
  67. dclab/isoelastics/iso_HE-3D-FEM-22-area_um-deform.txt +1930 -0
  68. dclab/isoelastics/iso_HE-3D-FEM-22-volume-deform.txt +2221 -0
  69. dclab/isoelastics/iso_LE-2D-FEM-19-area_um-deform.txt +2151 -0
  70. dclab/isoelastics/iso_LE-2D-FEM-19-volume-deform.txt +2250 -0
  71. dclab/isoelastics/iso_LE-2D-ana-18-area_um-deform.txt +1266 -0
  72. dclab/kde_contours.py +222 -0
  73. dclab/kde_methods.py +303 -0
  74. dclab/lme4/__init__.py +5 -0
  75. dclab/lme4/lme4_template.R +94 -0
  76. dclab/lme4/rsetup.py +204 -0
  77. dclab/lme4/wrapr.py +386 -0
  78. dclab/polygon_filter.py +398 -0
  79. dclab/rtdc_dataset/__init__.py +15 -0
  80. dclab/rtdc_dataset/check.py +902 -0
  81. dclab/rtdc_dataset/config.py +533 -0
  82. dclab/rtdc_dataset/copier.py +353 -0
  83. dclab/rtdc_dataset/core.py +1001 -0
  84. dclab/rtdc_dataset/export.py +737 -0
  85. dclab/rtdc_dataset/feat_anc_core/__init__.py +24 -0
  86. dclab/rtdc_dataset/feat_anc_core/af_basic.py +75 -0
  87. dclab/rtdc_dataset/feat_anc_core/af_emodulus.py +160 -0
  88. dclab/rtdc_dataset/feat_anc_core/af_fl_max_ctc.py +133 -0
  89. dclab/rtdc_dataset/feat_anc_core/af_image_contour.py +113 -0
  90. dclab/rtdc_dataset/feat_anc_core/af_ml_class.py +102 -0
  91. dclab/rtdc_dataset/feat_anc_core/ancillary_feature.py +320 -0
  92. dclab/rtdc_dataset/feat_anc_ml/__init__.py +32 -0
  93. dclab/rtdc_dataset/feat_anc_plugin/__init__.py +3 -0
  94. dclab/rtdc_dataset/feat_anc_plugin/plugin_feature.py +329 -0
  95. dclab/rtdc_dataset/feat_basin.py +550 -0
  96. dclab/rtdc_dataset/feat_temp.py +102 -0
  97. dclab/rtdc_dataset/filter.py +263 -0
  98. dclab/rtdc_dataset/fmt_dcor/__init__.py +7 -0
  99. dclab/rtdc_dataset/fmt_dcor/access_token.py +52 -0
  100. dclab/rtdc_dataset/fmt_dcor/api.py +111 -0
  101. dclab/rtdc_dataset/fmt_dcor/base.py +200 -0
  102. dclab/rtdc_dataset/fmt_dcor/basin.py +73 -0
  103. dclab/rtdc_dataset/fmt_dcor/logs.py +26 -0
  104. dclab/rtdc_dataset/fmt_dcor/tables.py +42 -0
  105. dclab/rtdc_dataset/fmt_dict.py +103 -0
  106. dclab/rtdc_dataset/fmt_hdf5/__init__.py +6 -0
  107. dclab/rtdc_dataset/fmt_hdf5/base.py +192 -0
  108. dclab/rtdc_dataset/fmt_hdf5/basin.py +30 -0
  109. dclab/rtdc_dataset/fmt_hdf5/events.py +257 -0
  110. dclab/rtdc_dataset/fmt_hdf5/feat_defect.py +164 -0
  111. dclab/rtdc_dataset/fmt_hdf5/logs.py +33 -0
  112. dclab/rtdc_dataset/fmt_hdf5/tables.py +30 -0
  113. dclab/rtdc_dataset/fmt_hierarchy/__init__.py +11 -0
  114. dclab/rtdc_dataset/fmt_hierarchy/base.py +278 -0
  115. dclab/rtdc_dataset/fmt_hierarchy/events.py +146 -0
  116. dclab/rtdc_dataset/fmt_hierarchy/hfilter.py +140 -0
  117. dclab/rtdc_dataset/fmt_hierarchy/mapper.py +134 -0
  118. dclab/rtdc_dataset/fmt_http.py +102 -0
  119. dclab/rtdc_dataset/fmt_s3.py +320 -0
  120. dclab/rtdc_dataset/fmt_tdms/__init__.py +476 -0
  121. dclab/rtdc_dataset/fmt_tdms/event_contour.py +264 -0
  122. dclab/rtdc_dataset/fmt_tdms/event_image.py +220 -0
  123. dclab/rtdc_dataset/fmt_tdms/event_mask.py +62 -0
  124. dclab/rtdc_dataset/fmt_tdms/event_trace.py +146 -0
  125. dclab/rtdc_dataset/fmt_tdms/exc.py +37 -0
  126. dclab/rtdc_dataset/fmt_tdms/naming.py +151 -0
  127. dclab/rtdc_dataset/load.py +72 -0
  128. dclab/rtdc_dataset/writer.py +985 -0
  129. dclab/statistics.py +203 -0
  130. dclab/util.py +156 -0
  131. dclab/warn.py +15 -0
  132. dclab-0.62.11.dist-info/LICENSE +343 -0
  133. dclab-0.62.11.dist-info/METADATA +146 -0
  134. dclab-0.62.11.dist-info/RECORD +137 -0
  135. dclab-0.62.11.dist-info/WHEEL +5 -0
  136. dclab-0.62.11.dist-info/entry_points.txt +8 -0
  137. dclab-0.62.11.dist-info/top_level.txt +1 -0
@@ -0,0 +1,985 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Mapping
4
+ import copy
5
+ import json
6
+ import os
7
+ import pathlib
8
+ from typing import Dict, List, Literal, Tuple
9
+ import warnings
10
+
11
+ import h5py
12
+ import hdf5plugin
13
+ import numpy as np
14
+
15
+ from .. import definitions as dfn
16
+ from ..util import hashobj
17
+ from .._version import version
18
+
19
+ from .feat_anc_plugin import PlugInFeature
20
+
21
+ #: DEPRECATED (use `CHUNK_SIZE_BYTES` instead)
22
+ CHUNK_SIZE = 100
23
+
24
+ #: Chunks size in bytes for storing HDF5 datasets
25
+ CHUNK_SIZE_BYTES = 1024**2 # 1MiB
26
+
27
+ #: features that should be written to the output file as uint32 values
28
+ FEATURES_UINT32 = [
29
+ "fl1_max",
30
+ "fl1_npeaks",
31
+ "fl2_max",
32
+ "fl2_npeaks",
33
+ "fl3_max",
34
+ "fl3_npeaks",
35
+ "index",
36
+ "ml_class",
37
+ "nevents",
38
+ ]
39
+
40
+ #: features that should be written to the output file as uint64 values
41
+ FEATURES_UINT64 = [
42
+ "frame",
43
+ ]
44
+
45
+
46
+ class RTDCWriter:
47
+ def __init__(self,
48
+ path_or_h5file: str | pathlib.Path | h5py.File,
49
+ mode: Literal['append', 'replace', 'reset'] = "append",
50
+ compression_kwargs: Dict | Mapping = None,
51
+ compression: str = "deprecated"):
52
+ """RT-DC data writer classe
53
+
54
+ Parameters
55
+ ----------
56
+ path_or_h5file: str or pathlib.Path or h5py.Group
57
+ Path to an HDF5 file or an HDF5 file opened in write mode
58
+ mode: str
59
+ Defines how the data are stored:
60
+
61
+ - "append": append new feature data to existing h5py Datasets
62
+ - "replace": replace existing h5py Datasets with new features
63
+ (used for ancillary feature storage)
64
+ - "reset": do not keep any previous data
65
+ compression_kwargs: dict-like
66
+ Dictionary with the keys "compression" and "compression_opts"
67
+ which are passed to :func:`h5py.H5File.create_dataset`. The
68
+ default is Zstandard compression with the lowest compression
69
+ level `hdf5plugin.Zstd(clevel=1)`. To disable compression, use
70
+ `{"compression": None}`.
71
+ compression: str or None
72
+ Compression method used for data storage;
73
+ one of [None, "lzf", "gzip", "szip"].
74
+
75
+ .. deprecated:: 0.43.0
76
+ Use `compression_kwargs` instead.
77
+ """
78
+ if mode not in ["append", "replace", "reset"]:
79
+ raise ValueError(f"Invalid mode '{mode}'!")
80
+ if compression != "deprecated":
81
+ warnings.warn("The `compression` kwarg is deprecated in favor of "
82
+ "`compression_kwargs`!",
83
+ DeprecationWarning)
84
+ if compression_kwargs is not None:
85
+ raise ValueError("You may not specify `compression` and "
86
+ "`compression_kwargs` at the same time!")
87
+ # be backwards-compatible
88
+ compression_kwargs = {"compression": compression}
89
+ if compression_kwargs is None:
90
+ compression_kwargs = hdf5plugin.Zstd(clevel=1)
91
+
92
+ self.mode = mode
93
+ self.compression_kwargs = compression_kwargs
94
+ if isinstance(path_or_h5file, h5py.Group):
95
+ self.owns_path = False
96
+ self.path = pathlib.Path(path_or_h5file.file.filename)
97
+ self.h5file = path_or_h5file
98
+ if mode == "reset":
99
+ raise ValueError("'reset' mode incompatible with h5py.Group!")
100
+ else:
101
+ self.owns_path = True
102
+ self.path = pathlib.Path(path_or_h5file)
103
+ self.h5file = h5py.File(path_or_h5file,
104
+ mode=("w" if mode == "reset" else "a"))
105
+ #: unfortunate necessity, as `len(h5py.Group)` can be really slow
106
+ self._group_sizes = {}
107
+
108
+ def __enter__(self):
109
+ return self
110
+
111
+ def __exit__(self, type, value, tb):
112
+ # close the HDF5 file
113
+ try:
114
+ self.h5file.require_group("events")
115
+ if len(self.h5file["events"]):
116
+ self.rectify_metadata()
117
+ self.version_brand()
118
+ except BaseException:
119
+ raise
120
+ finally:
121
+ # This is guaranteed to run if any exception is raised.
122
+ self.close()
123
+
124
+ @staticmethod
125
+ def get_best_nd_chunks(item_shape, item_dtype=np.float64):
126
+ """Return best chunks for HDF5 datasets
127
+
128
+ Chunking has performance implications. It’s recommended to keep the
129
+ total size of dataset chunks between 10 KiB and 1 MiB. This number
130
+ defines the maximum chunk size as well as half the maximum cache
131
+ size for each dataset.
132
+ """
133
+ # Note that `np.prod(()) == 1`
134
+ event_size = np.prod(item_shape) * np.dtype(item_dtype).itemsize
135
+
136
+ chunk_size = CHUNK_SIZE_BYTES / event_size
137
+ # Set minimum chunk size to 10 so that we can have at least some
138
+ # compression performance.
139
+ chunk_size_int = max(10, int(np.floor(chunk_size)))
140
+ return tuple([chunk_size_int] + list(item_shape))
141
+
142
+ def close(self):
143
+ """Close the underlying HDF5 file if a path was given during init"""
144
+ if self.owns_path:
145
+ self.h5file.close()
146
+
147
+ def rectify_metadata(self):
148
+ """Autocomplete the metadta of the RTDC-measurement
149
+
150
+ The following configuration keys are updated:
151
+
152
+ - experiment:event count
153
+ - fluorescence:samples per event
154
+ - imaging: roi size x (if image or mask is given)
155
+ - imaging: roi size y (if image or mask is given)
156
+
157
+ The following configuration keys are added if not present:
158
+
159
+ - fluorescence:channel count
160
+ """
161
+ # set event count
162
+ feats = sorted(self.h5file.get("events", {}).keys())
163
+ if feats:
164
+ self.h5file.attrs["experiment:event count"] = len(
165
+ self.h5file["events"][feats[0]])
166
+ else:
167
+ raise ValueError(f"No features in '{self.path}'!")
168
+
169
+ # ignore empty features in the checks further below
170
+ for feat in feats[:]: # iterate over a copy of the list
171
+ obj = self.h5file["events"][feat]
172
+ if ((isinstance(feat, h5py.Dataset) and obj.shape[0] == 0) # ds
173
+ or len(obj) == 0): # groups
174
+ feats.remove(feat)
175
+
176
+ # set samples per event
177
+ if "trace" in feats:
178
+ traces = list(self.h5file["events"]["trace"].keys())
179
+ trsize = self.h5file["events"]["trace"][traces[0]].shape[1]
180
+ self.h5file.attrs["fluorescence:samples per event"] = trsize
181
+
182
+ # set channel count
183
+ chcount = sum(
184
+ ["fl1_max" in feats, "fl2_max" in feats, "fl3_max" in feats])
185
+ if chcount:
186
+ if "fluorescence:channel count" not in self.h5file.attrs:
187
+ self.h5file.attrs["fluorescence:channel count"] = chcount
188
+
189
+ # set roi size x/y
190
+ if "image" in feats:
191
+ shape = self.h5file["events"]["image"][0].shape
192
+ elif "mask" in feats:
193
+ shape = self.h5file["events"]["mask"][0].shape
194
+ else:
195
+ shape = None
196
+ if shape is not None:
197
+ # update shape
198
+ self.h5file.attrs["imaging:roi size x"] = shape[1]
199
+ self.h5file.attrs["imaging:roi size y"] = shape[0]
200
+
201
+ def store_basin(self,
202
+ basin_name: str,
203
+ basin_type: Literal['file', 'internal', 'remote'],
204
+ basin_format: str,
205
+ basin_locs: List[str | pathlib.Path],
206
+ basin_descr: str | None = None,
207
+ basin_feats: List[str] = None,
208
+ basin_map: np.ndarray | Tuple[str, np.ndarray] = None,
209
+ internal_data: Dict | h5py.Group = None,
210
+ verify: bool = True,
211
+ ):
212
+ """Write basin information
213
+
214
+ Parameters
215
+ ----------
216
+ basin_name: str
217
+ basin name; Names do not have to be unique.
218
+ basin_type: str
219
+ basin type (file or remote); Files are paths accessible by the
220
+ operating system (including e.g. network shares) whereas
221
+ remote locations normally require an active internet connection.
222
+ basin_format: str
223
+ The basin format must match the ``format`` property of an
224
+ :class:`.RTDCBase` subclass (e.g. "hdf5" or "dcor")
225
+ basin_locs: list
226
+ location of the basin as a string or (optionally)
227
+ a ``pathlib.Path``
228
+ basin_descr: str
229
+ optional string describing the basin
230
+ basin_feats: list of str
231
+ list of features this basin provides; You may use this to
232
+ restrict access to features for a specific basin.
233
+ basin_map: np.ndarray or tuple of (str, np.ndarray)
234
+ If this is an integer numpy array, it defines the mapping
235
+ of event indices from the basin dataset to the referring dataset
236
+ (the dataset being written to disk). Normally, the basinmap
237
+ feature used for storing the mapping information is inferred
238
+ from the currently defined basinmap features. However, if you
239
+ are incepting basins, then this might not be sufficient, and you
240
+ have to specify explicitly which basinmap feature to use. In such
241
+ a case, you may specify a tuple `(feature_name, mapping_array)`
242
+ where `feature_name` is the explicit mapping name, e.g.
243
+ `"basinmap3"`.
244
+ internal_data: dict or instance of h5py.Group
245
+ A dictionary or an `h5py.Group` containing the basin data.
246
+ The data are copied to the "basin_events" group, if
247
+ `internal_data` is not an `h5py.Group` in the current HDF5 file.
248
+ This must be specified when storing internal basins, and it
249
+ must not be specified for any other basin type.
250
+ verify: bool
251
+ whether to verify the basin before storing it; You might have
252
+ set this to False if you would like to write a basin that is
253
+ e.g. temporarily not available
254
+
255
+ Returns
256
+ -------
257
+ basin_hash: str
258
+ hash of the basin which serves as the name of the HDF5 dataset
259
+ stored in the output file
260
+
261
+ .. versionadded:: 0.58.0
262
+ """
263
+ if basin_type == "internal":
264
+ if internal_data is None:
265
+ raise ValueError(
266
+ "When writing an internal basin, you must specify "
267
+ "`internal_data` which is either a dictionary of numpy "
268
+ "arrays or an `h5py.Group` containing the relevant "
269
+ "datasets.")
270
+ if (isinstance(internal_data, dict)
271
+ or (isinstance(internal_data, h5py.Group)
272
+ and internal_data.file != self.h5file)):
273
+ # The data are not yet stored in this HDF5 file
274
+ for feat in basin_feats:
275
+ igroup = self.h5file.require_group("basin_events")
276
+ if feat in igroup:
277
+ raise ValueError(f"The feature '{feat}' already "
278
+ f"exists in the 'basin_events' group")
279
+ self.write_ndarray(group=igroup,
280
+ name=feat,
281
+ data=internal_data[feat])
282
+ # just override it with the default
283
+ basin_locs = ["basin_events"]
284
+ elif verify:
285
+ # Verify the existence of the data inside this HDF5 file
286
+ if basin_locs != ["basin_events"]:
287
+ warnings.warn("You specified an uncommon location for "
288
+ f"your internal basins: {basin_locs}. "
289
+ f"Please use 'basin_events' instead.")
290
+ for feat in basin_feats:
291
+ if feat not in self.h5file[basin_locs[0]]:
292
+ raise ValueError(f"Could not find feature '{feat}' in "
293
+ f"the group [{basin_locs[0]}]")
294
+
295
+ # Expand optional tuple for basin_map
296
+ if isinstance(basin_map, (list, tuple)) and len(basin_map) == 2:
297
+ basin_map_name, basin_map = basin_map
298
+ else:
299
+ basin_map_name = None
300
+
301
+ if verify and basin_type in ["file", "remote"]:
302
+ # We have to import this here to avoid circular imports
303
+ from .load import new_dataset
304
+ # Make sure the basin can be opened by dclab, verify its ID
305
+ cur_id = self.h5file.attrs.get("experiment:run identifier")
306
+ for loc in basin_locs:
307
+ with new_dataset(loc) as ds:
308
+ # We can open the file, which is great.
309
+ if cur_id:
310
+ # Compare the IDs.
311
+ ds_id = ds.get_measurement_identifier()
312
+ if not (ds_id == cur_id
313
+ or (basin_map is not None
314
+ and cur_id.startswith(ds_id))):
315
+ raise ValueError(
316
+ f"Measurement identifier mismatch between "
317
+ f"{self.path} ({cur_id}) and {loc} ({ds_id})!")
318
+ if basin_feats:
319
+ for feat in basin_feats:
320
+ if not dfn.feature_exists(feat):
321
+ raise ValueError(f"Invalid feature: '{feat}'")
322
+ if basin_map is not None:
323
+ if (not isinstance(basin_map, np.ndarray)
324
+ and basin_map.dtype != np.uint64):
325
+ raise ValueError(
326
+ "The array specified in `basin_map` argument must be "
327
+ "a numpy array with the dtype `np.uint64`!")
328
+
329
+ # determine the basinmap to use
330
+ if basin_map is not None:
331
+ self.h5file.require_group("events")
332
+ if basin_map_name is None:
333
+ # We have to determine the basin_map_name to use for this
334
+ # mapped basin.
335
+ for ii in range(10): # basinmap0 to basinmap9
336
+ bm_cand = f"basinmap{ii}"
337
+ if bm_cand in self.h5file["events"]:
338
+ # There is a basin mapping defined in the file. Check
339
+ # whether it is identical to ours.
340
+ if np.all(self.h5file["events"][bm_cand] == basin_map):
341
+ # Great, we are done here.
342
+ basin_map_name = bm_cand
343
+ break
344
+ else:
345
+ # This mapping belongs to a different basin,
346
+ # try the next mapping.
347
+ continue
348
+ else:
349
+ # The mapping is not defined in the dataset, and we may
350
+ # write it to a new feature.
351
+ basin_map_name = bm_cand
352
+ self.store_feature(feat=basin_map_name, data=basin_map)
353
+ break
354
+ else:
355
+ raise ValueError(
356
+ "You have exhausted the usage of mapped basins for "
357
+ "the current dataset. Please revise your analysis "
358
+ "pipeline.")
359
+ else:
360
+ if basin_map_name not in self.h5file["events"]:
361
+ # Write the explicit basin mapping into the file.
362
+ self.store_feature(feat=basin_map_name, data=basin_map)
363
+ elif not np.all(
364
+ self.h5file["events"][basin_map_name] == basin_map):
365
+ # This is a sanity check that we have to perform.
366
+ raise ValueError(
367
+ f"The basin mapping feature {basin_map_name} you "
368
+ f"specified explicitly already exists in "
369
+ f"{self.h5file} and they do not match. I assume "
370
+ f"you are trying explicitly write to a basinmap that "
371
+ f"is already used elsewhere.")
372
+ else:
373
+ # Classic, simple case
374
+ basin_map_name = "same"
375
+
376
+ b_data = {
377
+ "description": basin_descr,
378
+ "format": basin_format,
379
+ "name": basin_name,
380
+ "type": basin_type,
381
+ "features": None if basin_feats is None else sorted(basin_feats),
382
+ "mapping": basin_map_name,
383
+ }
384
+ if basin_type == "file":
385
+ flocs = []
386
+ for pp in basin_locs:
387
+ pp = pathlib.Path(pp)
388
+ if verify:
389
+ flocs.append(str(pp.resolve()))
390
+ # Also store the relative path for user convenience.
391
+ # Don't use pathlib.Path.relative_to, because that
392
+ # only has `walk_up` since Python 3.12.
393
+ # Also, just look in subdirectories which simplifies
394
+ # path resolution.
395
+ this_parent = str(self.path.parent) + os.sep
396
+ path_parent = str(pp.parent) + os.sep
397
+ if path_parent.startswith(this_parent):
398
+ flocs.append(str(pp).replace(this_parent, "", 1))
399
+ else:
400
+ # We already did (or did not upon user request) verify
401
+ # the path. Just pass it on to the list.
402
+ flocs.append(str(pp))
403
+ b_data["paths"] = flocs
404
+ elif basin_type == "internal":
405
+ b_data["paths"] = basin_locs
406
+ elif basin_type == "remote":
407
+ b_data["urls"] = [str(p) for p in basin_locs]
408
+ else:
409
+ raise ValueError(f"Unknown basin type '{basin_type}'")
410
+
411
+ b_lines = json.dumps(b_data, indent=2, sort_keys=True).split("\n")
412
+ basins = self.h5file.require_group("basins")
413
+ key = hashobj(b_lines)
414
+ if key not in basins:
415
+ self.write_text(basins, key, b_lines)
416
+ return key
417
+
418
+ def store_feature(self, feat, data, shape=None):
419
+ """Write feature data
420
+
421
+ Parameters
422
+ ----------
423
+ feat: str
424
+ feature name
425
+ data: np.ndarray or list or dict
426
+ feature data
427
+ shape: tuple of int
428
+ For non-scalar features, this is the shape of the
429
+ feature for one event (e.g. `(90, 250)` for an "image".
430
+ Usually, you do not have to specify this value, but you
431
+ do need it in case of plugin features that don't have
432
+ the "feature shape" set or in case of temporary features.
433
+ If you don't specify it, then the shape is guessed based
434
+ on the data you provide and a UserWarning will be issued.
435
+ """
436
+ if not dfn.feature_exists(feat):
437
+ raise ValueError(f"Undefined feature '{feat}'!")
438
+
439
+ events = self.h5file.require_group("events")
440
+
441
+ # replace data?
442
+ if feat in events and self.mode == "replace":
443
+ if feat == "trace":
444
+ for tr_name in data.keys():
445
+ if tr_name in events[feat]:
446
+ del events[feat][tr_name]
447
+ else:
448
+ del events[feat]
449
+
450
+ if feat in FEATURES_UINT32:
451
+ dtype = np.uint32
452
+ elif feat in FEATURES_UINT64:
453
+ dtype = np.uint64
454
+ else:
455
+ dtype = None
456
+
457
+ if feat == "index":
458
+ # By design, the index must be a simple enumeration.
459
+ # We enforce that by not trusting the user. If you need
460
+ # a different index, please take a look at the index_online
461
+ # feature.
462
+ nev = len(data)
463
+ if "index" in events:
464
+ nev0 = len(events["index"])
465
+ else:
466
+ nev0 = 0
467
+ self.write_ndarray(group=events,
468
+ name="index",
469
+ data=np.arange(nev0 + 1, nev0 + nev + 1),
470
+ dtype=dtype)
471
+ elif dfn.scalar_feature_exists(feat):
472
+ self.write_ndarray(group=events,
473
+ name=feat,
474
+ data=np.atleast_1d(data),
475
+ dtype=dtype)
476
+ elif feat == "contour":
477
+ self.write_ragged(group=events, name=feat, data=data)
478
+ elif feat in ["image", "image_bg", "mask", "qpi_oah", "qpi_oah_bg"]:
479
+ self.write_image_grayscale(group=events,
480
+ name=feat,
481
+ data=data,
482
+ is_boolean=(feat == "mask"))
483
+ elif feat in ["qpi_amp", "qpi_pha"]:
484
+ self.write_image_float32(group=events,
485
+ name=feat,
486
+ data=data)
487
+ elif feat == "trace":
488
+ for tr_name in data.keys():
489
+ # verify trace names
490
+ if tr_name not in dfn.FLUOR_TRACES:
491
+ raise ValueError(f"Unknown trace key: '{tr_name}'!")
492
+ # write trace
493
+ self.write_ndarray(group=events.require_group("trace"),
494
+ name=tr_name,
495
+ data=np.atleast_2d(data[tr_name]),
496
+ dtype=dtype
497
+ )
498
+ else:
499
+ if not shape:
500
+ # OK, so we are dealing with a plugin feature or a temporary
501
+ # feature here. Now, we don't know the exact shape of that
502
+ # feature, but we give the user the option to advertise
503
+ # the shape of the feature in the plugin.
504
+ # First, try to obtain the shape from the PluginFeature
505
+ # (if that exists).
506
+ for pf in PlugInFeature.get_instances(feat):
507
+ if isinstance(pf, PlugInFeature):
508
+ shape = pf.plugin_feature_info.get("feature shape")
509
+ if shape is not None:
510
+ break # This is good.
511
+ else:
512
+ # Temporary features will have to live with this warning.
513
+ warnings.warn(
514
+ "There is no information about the shape of the "
515
+ + f"feature '{feat}'. I am going out on a limb "
516
+ + "for you and assume that you are storing "
517
+ + "multiple events at a time. If this works, "
518
+ + f"you could put the shape `{data[0].shape}` "
519
+ + 'in the `info["feature shapes"]` key of '
520
+ + "your plugin feature.")
521
+ shape = data.shape[1:]
522
+ if shape == data.shape:
523
+ data = data.reshape(1, *shape)
524
+ elif shape == data.shape[1:]:
525
+ pass
526
+ else:
527
+ raise ValueError(f"Bad shape for {feat}! Expeted {shape}, "
528
+ + f"but got {data.shape[1:]}!")
529
+ self.write_ndarray(group=events, name=feat, data=data, dtype=dtype)
530
+
531
+ def store_log(self, name, lines):
532
+ """Write log data
533
+
534
+ Parameters
535
+ ----------
536
+ name: str
537
+ name of the log entry
538
+ lines: list of str or str
539
+ the text lines of the log
540
+ """
541
+ log_group = self.h5file.require_group("logs")
542
+ self.write_text(group=log_group, name=name, lines=lines)
543
+
544
+ def store_metadata(self, meta):
545
+ """Store RT-DC metadata
546
+
547
+ Parameters
548
+ ----------
549
+ meta: dict-like
550
+ The metadata to store. Each key depicts a metadata section
551
+ name whose data is given as a dictionary, e.g.::
552
+
553
+ meta = {"imaging": {"exposure time": 20,
554
+ "flash duration": 2,
555
+ ...
556
+ },
557
+ "setup": {"channel width": 20,
558
+ "chip region": "channel",
559
+ ...
560
+ },
561
+ ...
562
+ }
563
+
564
+ Only section key names and key values therein registered
565
+ in dclab are allowed and are converted to the pre-defined
566
+ dtype. Only sections from the
567
+ :const:`dclab.definitions.CFG_METADATA` dictionary are
568
+ stored. If you have custom metadata, you can use the "user"
569
+ section.
570
+ """
571
+ meta = copy.deepcopy(meta)
572
+ # Ignore/remove tdms section
573
+ meta.pop("fmt_tdms", None)
574
+ # Check meta data
575
+ for sec in meta:
576
+ if sec == "user":
577
+ # user-defined metadata are always written.
578
+ # Any errors (incompatibilities with HDF5 attributes)
579
+ # are the user's responsibility
580
+ continue
581
+ elif sec not in dfn.CFG_METADATA:
582
+ # only allow writing of meta data that are not editable
583
+ # by the user (not dclab.dfn.CFG_ANALYSIS)
584
+ raise ValueError(
585
+ f"Meta data section not defined in dclab: {sec}")
586
+ for ck in meta[sec]:
587
+ if not dfn.config_key_exists(sec, ck):
588
+ raise ValueError(
589
+ f"Meta key not defined in dclab: {sec}:{ck}")
590
+
591
+ # update version
592
+ old_version = meta.get("setup", {}).get("software version", "")
593
+ new_version = self.version_brand(
594
+ old_version=old_version or None,
595
+ write_attribute=False
596
+ )
597
+ meta.setdefault("setup", {})["software version"] = new_version
598
+
599
+ # Write metadata
600
+ for sec in meta:
601
+ for ck in meta[sec]:
602
+ idk = f"{sec}:{ck}"
603
+ value = meta[sec][ck]
604
+ if isinstance(value, bytes):
605
+ # We never store byte attribute values.
606
+ # In this case, `convfunc` should be `str` or `lcstr` or
607
+ # somesuch. But we don't test that, because no other
608
+ # datatype competes with str for bytes.
609
+ value = value.decode("utf-8")
610
+ if sec == "user":
611
+ # store user-defined metadata as-is
612
+ self.h5file.attrs[idk] = value
613
+ else:
614
+ # pipe the metadata through the hard-coded converter
615
+ # functions
616
+ convfunc = dfn.get_config_value_func(sec, ck)
617
+ self.h5file.attrs[idk] = convfunc(value)
618
+
619
+ def store_table(self, name, cmp_array, h5_attrs=None):
620
+ """Store a compound array table
621
+
622
+ Tables are semi-metadata. They may contain information collected
623
+ during a measurement (but with a lower temporal resolution) or
624
+ other tabular data relevant for a dataset. Tables have named
625
+ columns. Therefore, they can be represented as a numy recarray,
626
+ and they should be stored as such in an HDF5 file (compund dataset).
627
+
628
+ Parameters
629
+ ----------
630
+ name: str
631
+ Name of the table
632
+ cmp_array: np.recarray, h5py.Dataset, np.ndarray, or dict
633
+ If a np.recarray or h5py.Dataset are provided, then they
634
+ are written as-is to the file. If a dictionary is provided,
635
+ then the dictionary is converted into a numpy recarray.
636
+ If a numpy array is provided, then the array is written
637
+ as a raw table (no column names) to the file.
638
+ h5_attrs: dict, optional
639
+ Attributes to store alongside the corresponding HDF5 dataset
640
+ """
641
+ if h5_attrs is None:
642
+ h5_attrs = {}
643
+
644
+ if isinstance(cmp_array, np.recarray):
645
+ # A table is a compound array (np.recarray). If we are here,
646
+ # this means that the user passed an instance of np.recarray.
647
+ pass
648
+ elif isinstance(cmp_array, h5py.Dataset):
649
+ # An instance of h5py.Dataset (which we trust to be a proper
650
+ # compound dataset at this point). No additional steps needed.
651
+ h5_attrs.update(cmp_array.attrs)
652
+ pass
653
+ elif isinstance(cmp_array, np.ndarray):
654
+ # A numpy array was passed. This usually means we have something
655
+ # that we can look at, so we add image tags.
656
+ h5_attrs['CLASS'] = np.bytes_('IMAGE')
657
+ h5_attrs['IMAGE_VERSION'] = np.bytes_('1.2')
658
+ h5_attrs['IMAGE_SUBCLASS'] = np.bytes_('IMAGE_GRAYSCALE')
659
+ pass
660
+ elif isinstance(cmp_array, dict):
661
+ # The user passed a dict which we now have to convert to a
662
+ # compound dataset. We do this because we are user-convenient.
663
+ # The user should not need to wade through these steps:
664
+ columns = list(cmp_array.keys())
665
+ # Everything should be floats in a table.
666
+ ds_dt = np.dtype({'names': columns,
667
+ 'formats': [np.float64] * len(columns)})
668
+ # We trust the user to provide a dictionary with one-dimensional
669
+ # lists or arrays of the same length.
670
+ tabsize = len(cmp_array[columns[0]])
671
+ tab_data = np.zeros((tabsize, len(columns)))
672
+ for ii, tab in enumerate(columns):
673
+ tab_data[:, ii] = cmp_array[tab]
674
+ # Now create a new compound array (discarding the old dict)
675
+ cmp_array = np.rec.array(tab_data, dtype=ds_dt)
676
+ else:
677
+ raise NotImplementedError(
678
+ f"Cannot convert {type(cmp_array)} to table!")
679
+
680
+ # data
681
+ group = self.h5file.require_group("tables")
682
+ tab = group.create_dataset(
683
+ name,
684
+ data=cmp_array,
685
+ fletcher32=True,
686
+ **self.compression_kwargs)
687
+
688
+ # metadata
689
+ if h5_attrs:
690
+ tab.attrs.update(h5_attrs)
691
+
692
+ def version_brand(self, old_version=None, write_attribute=True):
693
+ """Perform version branding
694
+
695
+ Append a " | dclab X.Y.Z" to the "setup:software version"
696
+ attribute.
697
+
698
+ Parameters
699
+ ----------
700
+ old_version: str or None
701
+ By default, the version string is taken from the HDF5 file.
702
+ If set to a string, then this version is used instead.
703
+ write_attribute: bool
704
+ If True (default), write the version string to the
705
+ "setup:software version" attribute
706
+ """
707
+ if old_version is None:
708
+ old_version = self.h5file.attrs.get("setup:software version", "")
709
+ if isinstance(old_version, bytes):
710
+ old_version = old_version.decode("utf-8")
711
+ version_chain = [vv.strip() for vv in old_version.split("|")]
712
+ version_chain = [vv for vv in version_chain if vv]
713
+ cur_version = "dclab {}".format(version)
714
+
715
+ if version_chain:
716
+ if version_chain[-1] != cur_version:
717
+ version_chain.append(cur_version)
718
+ else:
719
+ version_chain = [cur_version]
720
+ new_version = " | ".join(version_chain)
721
+ if write_attribute:
722
+ self.h5file.attrs["setup:software version"] = new_version
723
+ else:
724
+ return new_version
725
+
726
+ def write_image_float32(self, group, name, data):
727
+ """Write 32bit floating point image array
728
+
729
+ This function wraps :func:`RTDCWriter.write_ndarray`
730
+ and adds image attributes to the HDF5 file so HDFView
731
+ can display the images properly.
732
+
733
+ Parameters
734
+ ----------
735
+ group: h5py.Group
736
+ parent group
737
+ name: str
738
+ name of the dataset containing the text
739
+ data: np.ndarray or list of np.ndarray
740
+ image data
741
+ """
742
+ if isinstance(data, (list, tuple)):
743
+ # images may be in lists
744
+ data = np.atleast_2d(data)
745
+
746
+ if len(data.shape) == 2:
747
+ # put single event in 3D array
748
+ data = data[np.newaxis]
749
+
750
+ dset = self.write_ndarray(group=group, name=name, data=data,
751
+ dtype=np.float32)
752
+
753
+ # Create and Set image attributes:
754
+ # HDFView recognizes this as a series of images.
755
+ # Use np.bytes_ as per
756
+ # https://docs.h5py.org/en/stable/strings.html#compatibility
757
+ dset.attrs.create('CLASS', np.bytes_('IMAGE'))
758
+ dset.attrs.create('IMAGE_VERSION', np.bytes_('1.2'))
759
+ dset.attrs.create('IMAGE_SUBCLASS', np.bytes_('IMAGE_GRAYSCALE'))
760
+
761
+ def write_image_grayscale(self, group, name, data, is_boolean):
762
+ """Write grayscale image data to and HDF5 dataset
763
+
764
+ This function wraps :func:`RTDCWriter.write_ndarray`
765
+ and adds image attributes to the HDF5 file so HDFView
766
+ can display the images properly.
767
+
768
+ Parameters
769
+ ----------
770
+ group: h5py.Group
771
+ parent group
772
+ name: str
773
+ name of the dataset containing the text
774
+ data: np.ndarray or list of np.ndarray
775
+ image data
776
+ is_boolean: bool
777
+ whether the input data is of boolean nature
778
+ (e.g. mask data) - if so, data are converted to uint8
779
+ """
780
+ if isinstance(data, (list, tuple)):
781
+ # images may be in lists
782
+ data = np.atleast_2d(data)
783
+
784
+ if len(data.shape) == 2:
785
+ # put single event in 3D array
786
+ data = data.reshape(1, data.shape[0], data.shape[1])
787
+
788
+ if is_boolean:
789
+ # convert binary (mask) data to uint8
790
+ if data.__class__.__name__ == "H5MaskEvent":
791
+ # (if we use `isinstance`, we get circular imports)
792
+ # Be smart and directly write back the original data
793
+ # (otherwise we would convert to bool and back to uint8).
794
+ data = data.h5dataset
795
+ elif data.dtype == bool:
796
+ # Convert binary input mask data to uint8 with max range
797
+ data = np.asarray(data, dtype=np.uint8) * 255
798
+
799
+ dset = self.write_ndarray(group=group, name=name, data=data,
800
+ dtype=np.uint8)
801
+
802
+ # Create and Set image attributes:
803
+ # HDFView recognizes this as a series of images.
804
+ # Use np.bytes_ as per
805
+ # https://docs.h5py.org/en/stable/strings.html#compatibility
806
+ dset.attrs.create('CLASS', np.bytes_('IMAGE'))
807
+ dset.attrs.create('IMAGE_VERSION', np.bytes_('1.2'))
808
+ dset.attrs.create('IMAGE_SUBCLASS', np.bytes_('IMAGE_GRAYSCALE'))
809
+
810
+ def write_ndarray(self, group, name, data, dtype=None):
811
+ """Write n-dimensional array data to an HDF5 dataset
812
+
813
+ It is assumed that the shape of the array data is correct,
814
+ i.e. that the shape of `data` is
815
+ (number_events, feat_shape_1, ..., feat_shape_n).
816
+
817
+ Parameters
818
+ ----------
819
+ group: h5py.Group
820
+ parent group
821
+ name: str
822
+ name of the dataset containing the text
823
+ data: np.ndarray
824
+ data
825
+ dtype: dtype
826
+ the dtype to use for storing the data
827
+ (defaults to `data.dtype`)
828
+ """
829
+ if len(data) == 0:
830
+ raise ValueError(f"Empty data object for '{name}'")
831
+
832
+ if name not in group:
833
+ chunks = self.get_best_nd_chunks(item_shape=data.shape[1:],
834
+ item_dtype=data.dtype)
835
+ maxshape = tuple([None] + list(data.shape)[1:])
836
+ dset = group.create_dataset(
837
+ name,
838
+ shape=data.shape,
839
+ dtype=dtype or data.dtype,
840
+ maxshape=maxshape,
841
+ chunks=chunks,
842
+ fletcher32=True,
843
+ **self.compression_kwargs)
844
+ offset = 0
845
+ else:
846
+ dset = group[name]
847
+ offset = dset.shape[0]
848
+ dset.resize(offset + data.shape[0], axis=0)
849
+ if len(data.shape) == 1:
850
+ # store scalar data in one go
851
+ dset[offset:] = data
852
+ # store ufunc data for min/max
853
+ for uname, ufunc in [("min", np.nanmin),
854
+ ("max", np.nanmax)]:
855
+ val_a = dset.attrs.get(uname, None)
856
+ if val_a is not None:
857
+ val_b = ufunc(data)
858
+ val = ufunc([val_a, val_b])
859
+ else:
860
+ val = ufunc(dset)
861
+ dset.attrs[uname] = val
862
+ # store ufunc data for mean (weighted with size)
863
+ mean_a = dset.attrs.get("mean", None)
864
+ if mean_a is not None:
865
+ num_a = offset
866
+ mean_b = np.nanmean(data)
867
+ num_b = data.size
868
+ mean = (mean_a * num_a + mean_b * num_b) / (num_a + num_b)
869
+ else:
870
+ mean = np.nanmean(dset)
871
+ dset.attrs["mean"] = mean
872
+ else:
873
+ chunk_size = dset.chunks[0]
874
+ # populate higher-dimensional data in chunks
875
+ # (reduces file size, memory usage, and saves time)
876
+ num_chunks = len(data) // chunk_size
877
+ for ii in range(num_chunks):
878
+ start = ii * chunk_size
879
+ stop = start + chunk_size
880
+ dset[offset+start:offset+stop] = data[start:stop]
881
+ # write remainder (if applicable)
882
+ num_remain = len(data) % chunk_size
883
+ if num_remain:
884
+ start_e = num_chunks * chunk_size
885
+ stop_e = start_e + num_remain
886
+ dset[offset+start_e:offset+stop_e] = data[start_e:stop_e]
887
+ return dset
888
+
889
+ def write_ragged(self, group, name, data):
890
+ """Write ragged data (i.e. list of arrays of different lenghts)
891
+
892
+ Ragged array data (e.g. contour data) are stored in
893
+ a separate group and each entry becomes an HDF5 dataset.
894
+
895
+ Parameters
896
+ ----------
897
+ group: h5py.Group
898
+ parent group
899
+ name: str
900
+ name of the dataset containing the text
901
+ data: list of np.ndarray or np.ndarray
902
+ the data in a list
903
+ """
904
+ if isinstance(data, np.ndarray) and len(data.shape) == 2:
905
+ # place single event in list
906
+ data = [data]
907
+ grp = group.require_group(name)
908
+ # The following case is just a workaround for the very slow
909
+ # `len(grp)` which makes things horrible if you are storing
910
+ # contour data one-by-one. The only downside of this is that
911
+ # we have to keep track of the length of the group. But I
912
+ # think that is OK, since everything is very private here.
913
+ # - Paul (2021-10-18)
914
+ if grp not in self._group_sizes:
915
+ self._group_sizes[grp] = len(grp)
916
+ curid = self._group_sizes[grp]
917
+ for ii, cc in enumerate(data):
918
+ grp.create_dataset("{}".format(curid + ii),
919
+ data=cc,
920
+ fletcher32=True,
921
+ chunks=cc.shape,
922
+ **self.compression_kwargs)
923
+ self._group_sizes[grp] += 1
924
+
925
+ def write_text(self, group, name, lines):
926
+ """Write text to an HDF5 dataset
927
+
928
+ Text data are written as a fixed-length string dataset.
929
+
930
+ Parameters
931
+ ----------
932
+ group: h5py.Group
933
+ parent group
934
+ name: str
935
+ name of the dataset containing the text
936
+ lines: list of str or str
937
+ the text, line by line
938
+ """
939
+ # replace text?
940
+ if name in group and self.mode == "replace":
941
+ del group[name]
942
+
943
+ # handle strings
944
+ if isinstance(lines, (str, bytes)):
945
+ lines = [lines]
946
+
947
+ lnum = len(lines)
948
+ # Determine the maximum line length and use fixed-length strings,
949
+ # because compression and fletcher32 filters won't work with
950
+ # variable length strings.
951
+ # https://github.com/h5py/h5py/issues/1948
952
+ # 100 is the recommended maximum and the default, because if
953
+ # `mode` is e.g. "append", then this line may not be the longest.
954
+ max_length = 100
955
+ lines_as_bytes = []
956
+ for line in lines:
957
+ # convert lines to bytes
958
+ if not isinstance(line, bytes):
959
+ lbytes = line.encode("UTF-8")
960
+ else:
961
+ lbytes = line
962
+ max_length = max(max_length, len(lbytes))
963
+ lines_as_bytes.append(lbytes)
964
+
965
+ if name not in group:
966
+ # Create the dataset
967
+ txt_dset = group.create_dataset(
968
+ name,
969
+ shape=(lnum,),
970
+ dtype=f"S{max_length}",
971
+ maxshape=(None,),
972
+ chunks=True,
973
+ fletcher32=True,
974
+ **self.compression_kwargs)
975
+ line_offset = 0
976
+ else:
977
+ # TODO: test whether fixed length is long enough!
978
+ # Resize the dataset
979
+ txt_dset = group[name]
980
+ line_offset = txt_dset.shape[0]
981
+ txt_dset.resize(line_offset + lnum, axis=0)
982
+
983
+ # Write the text data line-by-line
984
+ for ii, lbytes in enumerate(lines_as_bytes):
985
+ txt_dset[line_offset + ii] = lbytes