volsegtools 0.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. volsegtools/__init__.py +0 -0
  2. volsegtools/_cli/__init__.py +1 -0
  3. volsegtools/_cli/molstar_preprocessor.py +79 -0
  4. volsegtools/abc/__init__.py +6 -0
  5. volsegtools/abc/converter.py +69 -0
  6. volsegtools/abc/data_handle.py +24 -0
  7. volsegtools/abc/downsampler.py +26 -0
  8. volsegtools/abc/kernel.py +8 -0
  9. volsegtools/abc/preprocessor.py +38 -0
  10. volsegtools/abc/serializer.py +12 -0
  11. volsegtools/converter/__init__.py +1 -0
  12. volsegtools/converter/map_converter.py +148 -0
  13. volsegtools/core/__init__.py +5 -0
  14. volsegtools/core/bounds.py +12 -0
  15. volsegtools/core/downsampling_parameters.py +28 -0
  16. volsegtools/core/gaussian_kernel_3D.py +16 -0
  17. volsegtools/core/lattice_kind.py +8 -0
  18. volsegtools/core/vector.py +9 -0
  19. volsegtools/downsampler/__init__.py +2 -0
  20. volsegtools/downsampler/base_downsampler.py +20 -0
  21. volsegtools/downsampler/hierarchy_downsampler.py +253 -0
  22. volsegtools/model/__init__.py +13 -0
  23. volsegtools/model/chunking_mode.py +16 -0
  24. volsegtools/model/metadata.py +50 -0
  25. volsegtools/model/opaque_data_handle.py +112 -0
  26. volsegtools/model/storing_parameters.py +52 -0
  27. volsegtools/model/working_store.py +142 -0
  28. volsegtools/preprocessor/__init__.py +2 -0
  29. volsegtools/preprocessor/preprocessor.py +75 -0
  30. volsegtools/preprocessor/preprocessor_builder.py +110 -0
  31. volsegtools/serialization/__init__.py +1 -0
  32. volsegtools/serialization/bcif_serializer.py +318 -0
  33. volsegtools/typing.py +12 -0
  34. volsegtools-0.0.0.dist-info/METADATA +22 -0
  35. volsegtools-0.0.0.dist-info/RECORD +38 -0
  36. volsegtools-0.0.0.dist-info/WHEEL +5 -0
  37. volsegtools-0.0.0.dist-info/entry_points.txt +2 -0
  38. volsegtools-0.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,75 @@
1
+ import asyncio
2
+ from pathlib import Path
3
+ from typing import List, Optional
4
+
5
+ from volsegtools.abc import Converter, Downsampler
6
+ from volsegtools.core.lattice_kind import LatticeKind
7
+ from volsegtools.model.working_store import WorkingStore
8
+ from volsegtools.serialization import BCIFSerializer
9
+
10
+
11
+ class Preprocessor:
12
+ state = {"downsampling_status": 0.0}
13
+
14
+ def __init__(
15
+ self,
16
+ downsampler: Downsampler,
17
+ converter: Converter,
18
+ volume_input_files: List[Path],
19
+ segmentation_input_files: List[Path],
20
+ metadata_input_files: Optional[List[Path]] = None,
21
+ annotations_input_files: Optional[List[Path]] = None,
22
+ work_dir: Optional[Path] = None,
23
+ output_dir: Optional[Path] = None,
24
+ ):
25
+ self.converter: Converter = converter
26
+ self._output_dir = output_dir if output_dir is not None else Path()
27
+ self._data = WorkingStore.instance
28
+ self.downsampler = downsampler
29
+ self._volume_input_files = volume_input_files
30
+ self._segmentation_input_files = segmentation_input_files
31
+ self._metadata_input_files = metadata_input_files
32
+ self._annotations_input_files = annotations_input_files
33
+ self._work_dir = work_dir
34
+
35
+ async def transform_volume(self):
36
+ raise NotImplementedError()
37
+
38
+ async def collect_metadata(self):
39
+ raise NotImplementedError()
40
+
41
+ def create_converter_from_format(self):
42
+ raise NotImplementedError()
43
+
44
+ async def transform_segmentation(self):
45
+ raise NotImplementedError()
46
+
47
+ async def preprocess(self):
48
+ raise NotImplementedError()
49
+
50
+ def sync_preprocess(self):
51
+ data = []
52
+
53
+ # 1. Data conversion phase
54
+ for path in self._volume_input_files:
55
+ data.append(asyncio.run(self.converter.transform_volume(path)))
56
+ data[-1].metadata = asyncio.run(self.converter.collect_metadata(path))
57
+ # FIX: This should be automatic!
58
+ data[-1].metadata.kind = LatticeKind.VOLUME
59
+ for path in self._segmentation_input_files:
60
+ data.append(asyncio.run(self.converter.transform_segmentation(path)))
61
+ data[-1].metadata = asyncio.run(self.converter.collect_metadata(path))
62
+ # FIX: This should be automatic!
63
+ data[-1].metadata.kind = LatticeKind.SEGMENTATION
64
+
65
+ downsampled_data = []
66
+ for ref in data:
67
+ # TODO: it has to be flattened!
68
+ downsampled_data += asyncio.run(self.downsampler.downsample_lattice(ref))
69
+ print("Downsampled Data:", downsampled_data)
70
+ # 4. Serialization Phase
71
+ for ref in downsampled_data:
72
+ asyncio.run(BCIFSerializer.serialize(ref, self._output_dir))
73
+
74
+ def downsample(self):
75
+ pass
@@ -0,0 +1,110 @@
1
+ from pathlib import Path
2
+ from typing import List
3
+
4
+ from typing_extensions import Self
5
+
6
+ from volsegtools.abc import Converter, Downsampler
7
+ from volsegtools.preprocessor import Preprocessor
8
+
9
+
10
+ class PreprocessorBuilder:
11
+ """Allows fine-grained specification of the preprocessor."""
12
+
13
+ def __init__(self) -> None:
14
+ self._work_dir: Path | None = None
15
+ self._output_dir: Path | None = None
16
+ self._volume_sources: List[Path] = []
17
+ self._segmentation_sources: List[Path] = []
18
+ self._annotations_sources: List[Path] = []
19
+ self._metadata_sources: List[Path] = []
20
+ self._converter: Converter | None = None
21
+ self._downsampler: Downsampler | None = None
22
+ # Currently, we only support a single preprocessor, after that we
23
+ # could add support for generic preprocessor
24
+ # self._preprocessor_type: Optional[Preprocessor] = None
25
+
26
+ def set_converter(self, converter: Converter) -> Self:
27
+ """Sets a converter that is going to be used by the resulting
28
+ preprocessor.
29
+
30
+ If this method is going to be called multimple times, it is going
31
+ to override the previously set converter.
32
+ """
33
+ self._converter = converter
34
+ return self
35
+
36
+ def set_downsampler(self, downsampler: Downsampler) -> Self:
37
+ """Sets a downsampler that is going to be used by the resulting
38
+ preprocessor.
39
+
40
+ If this method is going to be called multimple times, it is going
41
+ to override the previously set downsampler.
42
+ """
43
+ self._downsampler = downsampler
44
+ return self
45
+
46
+ def add_volume_src_file(self, file_path: Path) -> Self:
47
+ """Adds the source file for volumetric data.
48
+
49
+ It should be used in cases where there are multiple files and
50
+ set_input_file cannot be used.
51
+ """
52
+ self._volume_sources.append(file_path)
53
+ return self
54
+
55
+ def add_segmentation_src_file(self, file_path: Path) -> Self:
56
+ """Sets the source file for annotations.
57
+
58
+ It should be used in cases where there are multiple files and
59
+ set_input_file cannot be used.
60
+ """
61
+ self._segmentation_sources.append(file_path)
62
+ return self
63
+
64
+ def add_metadata_src_file(self, file_path: Path) -> Self:
65
+ """Adds the source file for metadata.
66
+
67
+ It should be used in cases where there are multiple files and
68
+ set_input_file cannot be used.
69
+ """
70
+ return self
71
+
72
+ def add_annotations_src_file(self, file_path: Path) -> Self:
73
+ """Adds the source file for annotations.
74
+
75
+ It should be used in cases where there are multiple files and
76
+ set_input_file cannot be used.
77
+ """
78
+ return self
79
+
80
+ def set_work_dir(self, file_path: Path) -> Self:
81
+ """Sets the working directory of the processor.
82
+
83
+ It is going to be added to the search path for source files of the
84
+ preprocessor. Also, the output of the preprocessor is going to be
85
+ saved at this location.
86
+ """
87
+ return self
88
+
89
+ def set_output_dir(self, file_path: Path) -> Self:
90
+ self._output_dir = file_path
91
+ return self
92
+
93
+ def build(self) -> Preprocessor:
94
+ """Builds the resulting preprocessor."""
95
+ if self._downsampler is None:
96
+ raise RuntimeError("Downsampler was not set")
97
+
98
+ if self._converter is None:
99
+ raise RuntimeError("Converter was not set")
100
+
101
+ return Preprocessor(
102
+ self._downsampler,
103
+ self._converter,
104
+ self._volume_sources,
105
+ self._segmentation_sources,
106
+ self._metadata_sources,
107
+ self._annotations_sources,
108
+ self._work_dir,
109
+ self._output_dir,
110
+ )
@@ -0,0 +1 @@
1
+ from .bcif_serializer import BCIFSerializer
@@ -0,0 +1,318 @@
1
+ import dataclasses
2
+ import sys
3
+ from pathlib import Path
4
+ from typing import Collection
5
+
6
+ import ciftools
7
+ import ciftools.serialization
8
+ import numpy as np
9
+ from ciftools.binary.decoder import ByteArrayEncoding, DataType
10
+ from ciftools.binary.encoder import BYTE_ARRAY, BinaryCIFEncoder, DataTypeEnum
11
+ from ciftools.binary.writer import EncodedCIFData
12
+ from ciftools.models.writer import CIFCategoryDesc as CategoryDesc
13
+ from ciftools.models.writer import CIFFieldDesc as Field
14
+
15
+ from volsegtools.abc import Serializer
16
+ from volsegtools.model import (
17
+ ChannelMetadata,
18
+ OpaqueDataHandle,
19
+ OriginalTimeFrameMetadata,
20
+ TimeFrameMetadata,
21
+ )
22
+ from volsegtools.model.working_store import WorkingStore
23
+
24
+
25
+ @dataclasses.dataclass
26
+ class VolumeDataBatch:
27
+ original_metadata: OriginalTimeFrameMetadata
28
+ target_metadata: TimeFrameMetadata
29
+ channel: ChannelMetadata
30
+
31
+
32
+ class DummyVolumeServerEncoder(BinaryCIFEncoder):
33
+ def encode(self, data: np.ndarray) -> EncodedCIFData:
34
+ data_type: DataTypeEnum = DataType.from_dtype(data.dtype)
35
+ encoding: ByteArrayEncoding = {
36
+ "kind": "VolumeServer",
37
+ "type": data_type,
38
+ }
39
+
40
+ bo = data.dtype.byteorder
41
+ if bo == ">" or (bo == "=" and sys.byteorder == "big"):
42
+ new_bo = data.dtype.newbyteorder("<")
43
+ data = np.array(data, dtype=new_bo)
44
+
45
+ return EncodedCIFData(data=data.tobytes(), encoding=[encoding])
46
+
47
+
48
+ DUMMY_VOLUME_SERVER = DummyVolumeServerEncoder()
49
+
50
+
51
+ class VolumeData3DInfoDesc(CategoryDesc):
52
+ name = "volume_data_3d_info"
53
+
54
+ @staticmethod
55
+ def get_row_count(_) -> int:
56
+ return 1
57
+
58
+ @staticmethod
59
+ def get_field_descriptors(data: VolumeDataBatch) -> Collection[Field]:
60
+ volume_server_encoder = lambda _: BYTE_ARRAY
61
+ return [
62
+ Field.strings(
63
+ name="name",
64
+ value=lambda d, i: str(data.target_metadata.id),
65
+ ),
66
+ Field.numbers(
67
+ name="axis_order[0]",
68
+ value=lambda d, i: data.target_metadata.axis_order.x,
69
+ encoder=volume_server_encoder,
70
+ dtype="i4",
71
+ ),
72
+ Field.numbers(
73
+ name="axis_order[1]",
74
+ value=lambda d, i: data.target_metadata.axis_order.y,
75
+ encoder=volume_server_encoder,
76
+ dtype="i4",
77
+ ),
78
+ Field.numbers(
79
+ name="axis_order[2]",
80
+ value=lambda d, i: data.target_metadata.axis_order.z,
81
+ encoder=volume_server_encoder,
82
+ dtype="i4",
83
+ ),
84
+ Field.numbers(
85
+ name="origin[0]",
86
+ value=lambda d, i: data.target_metadata.origin.x,
87
+ encoder=volume_server_encoder,
88
+ dtype="i4",
89
+ ),
90
+ Field.numbers(
91
+ name="origin[1]",
92
+ value=lambda d, i: data.target_metadata.origin.y,
93
+ encoder=volume_server_encoder,
94
+ dtype="i4",
95
+ ),
96
+ Field.numbers(
97
+ name="origin[2]",
98
+ value=lambda d, i: data.target_metadata.origin.z,
99
+ encoder=volume_server_encoder,
100
+ dtype="i4",
101
+ ),
102
+ Field.numbers(
103
+ name="dimensions[0]",
104
+ value=lambda d, i: 1,
105
+ encoder=volume_server_encoder,
106
+ dtype="i4",
107
+ ),
108
+ Field.numbers(
109
+ name="dimensions[1]",
110
+ value=lambda d, i: 1,
111
+ encoder=volume_server_encoder,
112
+ dtype="i4",
113
+ ),
114
+ Field.numbers(
115
+ name="dimensions[2]",
116
+ value=lambda d, i: 1,
117
+ encoder=volume_server_encoder,
118
+ dtype="i4",
119
+ ),
120
+ Field.numbers(
121
+ name="sample_rate",
122
+ value=lambda d, i: data.target_metadata.resolution,
123
+ encoder=volume_server_encoder,
124
+ dtype="i4",
125
+ ),
126
+ Field.numbers(
127
+ name="sample_count[0]",
128
+ value=lambda d, i: data.target_metadata.lattice_dimensions.x,
129
+ encoder=volume_server_encoder,
130
+ dtype="i4",
131
+ ),
132
+ Field.numbers(
133
+ name="sample_count[1]",
134
+ value=lambda d, i: data.target_metadata.lattice_dimensions.y,
135
+ encoder=volume_server_encoder,
136
+ dtype="i4",
137
+ ),
138
+ Field.numbers(
139
+ name="sample_count[2]",
140
+ value=lambda d, i: data.target_metadata.lattice_dimensions.z,
141
+ encoder=volume_server_encoder,
142
+ dtype="i4",
143
+ ),
144
+ Field.numbers(
145
+ name="spacegroup_number",
146
+ value=lambda d, i: 1,
147
+ encoder=volume_server_encoder,
148
+ dtype="i4",
149
+ ),
150
+ Field.numbers(
151
+ name="spacegroup_cell_size[0]",
152
+ value=lambda d, i: data.target_metadata.voxel_size.x,
153
+ encoder=volume_server_encoder,
154
+ dtype="f8",
155
+ ),
156
+ Field.numbers(
157
+ name="spacegroup_cell_size[1]",
158
+ value=lambda d, i: data.target_metadata.voxel_size.y,
159
+ encoder=volume_server_encoder,
160
+ dtype="f8",
161
+ ),
162
+ Field.numbers(
163
+ name="spacegroup_cell_size[2]",
164
+ value=lambda d, i: data.target_metadata.voxel_size.z,
165
+ encoder=volume_server_encoder,
166
+ dtype="f8",
167
+ ),
168
+ Field.numbers(
169
+ name="spacegroup_cell_angles[0]",
170
+ value=lambda d, i: 90,
171
+ encoder=volume_server_encoder,
172
+ dtype="f8",
173
+ ),
174
+ Field.numbers(
175
+ name="spacegroup_cell_angles[1]",
176
+ value=lambda d, i: 90,
177
+ encoder=volume_server_encoder,
178
+ dtype="f8",
179
+ ),
180
+ Field.numbers(
181
+ name="spacegroup_cell_angles[2]",
182
+ value=lambda d, i: 90,
183
+ encoder=volume_server_encoder,
184
+ dtype="f8",
185
+ ),
186
+ Field.numbers(
187
+ name="mean_source",
188
+ value=lambda d, i: data.channel.statistics.mean,
189
+ encoder=volume_server_encoder,
190
+ dtype="f8",
191
+ ),
192
+ Field.numbers(
193
+ name="mean_sampled",
194
+ value=lambda d, i: data.channel.statistics.mean,
195
+ encoder=volume_server_encoder,
196
+ dtype="f8",
197
+ ),
198
+ Field.numbers(
199
+ name="sigma_source",
200
+ value=lambda d, i: data.channel.statistics.std,
201
+ encoder=volume_server_encoder,
202
+ dtype="f8",
203
+ ),
204
+ Field.numbers(
205
+ name="sigma_sampled",
206
+ value=lambda d, i: data.channel.statistics.std,
207
+ encoder=volume_server_encoder,
208
+ dtype="f8",
209
+ ),
210
+ Field.numbers(
211
+ name="min_source",
212
+ value=lambda d, i: data.channel.statistics.min,
213
+ encoder=volume_server_encoder,
214
+ dtype="f8",
215
+ ),
216
+ Field.numbers(
217
+ name="min_sampled",
218
+ value=lambda d, i: data.channel.statistics.min,
219
+ encoder=volume_server_encoder,
220
+ dtype="f8",
221
+ ),
222
+ Field.numbers(
223
+ name="max_source",
224
+ value=lambda d, i: data.channel.statistics.max,
225
+ encoder=volume_server_encoder,
226
+ dtype="f8",
227
+ ),
228
+ Field.numbers(
229
+ name="max_sampled",
230
+ value=lambda d, i: data.channel.statistics.max,
231
+ encoder=volume_server_encoder,
232
+ dtype="f8",
233
+ ),
234
+ ]
235
+
236
+
237
+ class DensityServerResultDesc(CategoryDesc):
238
+ name = "density_server_result"
239
+
240
+ @staticmethod
241
+ def get_row_count(_) -> int:
242
+ return 1
243
+
244
+ @staticmethod
245
+ def get_field_descriptors(data) -> Collection[Field]:
246
+ return [
247
+ Field.strings(name="query_type", value=lambda d, i: "box"),
248
+ ]
249
+
250
+
251
+ class VolumeData3DDesc(CategoryDesc):
252
+ name = "volume_data_3d"
253
+
254
+ @staticmethod
255
+ def get_row_count(data: np.ndarray) -> int:
256
+ return data.size
257
+
258
+ @staticmethod
259
+ def get_field_descriptors(data: np.ndarray) -> Collection[Field]:
260
+ volume_server_encoder = lambda _: BYTE_ARRAY
261
+ return [
262
+ Field.number_array(
263
+ name="values",
264
+ array=lambda volume: volume,
265
+ encoder=volume_server_encoder,
266
+ dtype="f8",
267
+ ),
268
+ ]
269
+
270
+
271
+ class BCIFSerializer(Serializer):
272
+ @staticmethod
273
+ async def serialize(data: OpaqueDataHandle, output_path: Path) -> None:
274
+ # This is currently working only for volumes!
275
+ for channel in data.metadata.channels:
276
+ data_batch = VolumeDataBatch(
277
+ # TODO: THERE HAS TO BE SOME EQUIVALENT
278
+ # data.metadata.original_time_frame,
279
+ data.metadata,
280
+ data.metadata,
281
+ channel,
282
+ )
283
+ writer = ciftools.serialization.create_binary_writer()
284
+
285
+ # We have to create the SERVER category, because it is required, we
286
+ # just have to say that it is a box
287
+ writer.start_data_block("SERVER")
288
+ # Adding a dummy
289
+ writer.write_category(DensityServerResultDesc, [np.arange(0)])
290
+
291
+ writer.start_data_block("VOLUME")
292
+ writer.write_category(VolumeData3DInfoDesc, [data_batch])
293
+
294
+ # TODO: prepend with WorkingStore path
295
+ metadata_file_name = Path(
296
+ "{}_r{}_tf{}_metadata.json".format(
297
+ data.metadata.lattice_id,
298
+ data.metadata.resolution,
299
+ data.metadata.id,
300
+ )
301
+ )
302
+ metadata_file_name.write_text(str(dataclasses.asdict(data_batch)))
303
+
304
+ lattice = WorkingStore.instance.get_data_array(
305
+ data.metadata.lattice_id,
306
+ data.metadata.resolution,
307
+ data.metadata.id,
308
+ int(channel.id),
309
+ )
310
+ # We have to make the array 1D
311
+
312
+ print("COUNT BEFORE STORE:", np.count_nonzero(np.ravel(lattice)))
313
+ np.savetxt("data.csv", np.ravel(lattice), delimiter=",")
314
+
315
+ writer.write_category(VolumeData3DDesc, [np.ravel(lattice, "F")])
316
+
317
+ file_name = f"{data.metadata.lattice_id}_r{data.metadata.resolution}_tf{data.metadata.id}.bcif"
318
+ (output_path / file_name).write_bytes(writer.encode())
volsegtools/typing.py ADDED
@@ -0,0 +1,12 @@
1
+ from typing import Union
2
+
3
+ import numpy as np
4
+
5
+ StorableDType = Union[
6
+ np.uint8,
7
+ np.uint16,
8
+ np.uint32,
9
+ np.int8,
10
+ np.int16,
11
+ np.int32,
12
+ ]
@@ -0,0 +1,22 @@
1
+ Metadata-Version: 2.4
2
+ Name: volsegtools
3
+ Version: 0.0.0
4
+ Classifier: Programming Language :: Python :: 3
5
+ Classifier: Operating System :: OS Independent
6
+ Requires-Python: >=3.9
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: dask
9
+ Requires-Dist: dask_image
10
+ Requires-Dist: zarr
11
+ Requires-Dist: numpy
12
+ Requires-Dist: mrcfile
13
+ Requires-Dist: nibabel
14
+ Requires-Dist: pydantic
15
+ Requires-Dist: typer
16
+ Requires-Dist: ciftools
17
+ Provides-Extra: dev
18
+ Requires-Dist: pytest; extra == "dev"
19
+
20
+ # Volseg Tools
21
+
22
+ A library used for preprocessing of volumes and segmentations for further usage.
@@ -0,0 +1,38 @@
1
+ volsegtools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ volsegtools/typing.py,sha256=-m0FLalCKGUT--tGct_pSRtw7JnUzaIBMh0BfcRVekM,156
3
+ volsegtools/_cli/__init__.py,sha256=wIdwA2jyn2aw9LTygSdI35VEAwUpnNMQtM-aHei8o_8,38
4
+ volsegtools/_cli/molstar_preprocessor.py,sha256=u_f9IlWwDpvJBojFowU1IrpxAjBkbGYCZWdZM5q69uI,2461
5
+ volsegtools/abc/__init__.py,sha256=xJNQ2c2Fbw6G34CvD-hPhKluUB4Mb6aA5U1_oBIRVB4,218
6
+ volsegtools/abc/converter.py,sha256=J-GocTeHAENHPfDq9CKl-voAcPcv8lg_kO6wOkFPs_0,1819
7
+ volsegtools/abc/data_handle.py,sha256=0gBw0U6NdH6s0fcq3CLPG2iOEndWmJNYL6r3yrRNIwo,542
8
+ volsegtools/abc/downsampler.py,sha256=uWb5nXBOldbIa0O023setj_IijIb4uTMBcLUn6c3GMQ,609
9
+ volsegtools/abc/kernel.py,sha256=EQupdor1unWJL7E5Wyso_JVrKrArY-9DXZlFRcpmW78,135
10
+ volsegtools/abc/preprocessor.py,sha256=_CJmI1ZA6Xqek5iKfLeEWE5f9Hserkhx2vGqNWVykZU,684
11
+ volsegtools/abc/serializer.py,sha256=7rk0liOYn-wDhLKD_KYdqCnjK0pJZjWxTXXrVKnXjbI,307
12
+ volsegtools/converter/__init__.py,sha256=1IAdRYtZ8E9SbnJ7xjYsuE7Deouaq3QfpHR6gcVaC6M,40
13
+ volsegtools/converter/map_converter.py,sha256=aWEEtaPw6qxbnfA227FzwWyLxp3pD2-6c9D1MphKnd8,4822
14
+ volsegtools/core/__init__.py,sha256=fww5lVXbV9I-zin5DbKv2izYXOdL5Fcn7HINVxqK1Zc,212
15
+ volsegtools/core/bounds.py,sha256=WDkgVJhy_q40RMJtohkkMS2doro6jEjGamUQN8VGjQg,334
16
+ volsegtools/core/downsampling_parameters.py,sha256=tv_A6nHHMWp6DcOaLkXLKrLqjAaZ66Yd1oL585fqWtA,821
17
+ volsegtools/core/gaussian_kernel_3D.py,sha256=vUg3dosqm05_fY3C4j-YH-T0dTxG1hDy5MTXF_0JBIg,519
18
+ volsegtools/core/lattice_kind.py,sha256=FqF-MCulLMOPajbmjKLTN8Vmj6WXPO8REE3miWZmXJk,93
19
+ volsegtools/core/vector.py,sha256=U7F6WxtWHlTapsWG-v1JPKqIDtLwPoV3QZ1Z6rZBMuQ,138
20
+ volsegtools/downsampler/__init__.py,sha256=f7V9d8-sPUOYAOf_ayFvSseAR4QMoXNIl6aIWMX0Otk,102
21
+ volsegtools/downsampler/base_downsampler.py,sha256=zYMgnJgJjR5C-mvM1QssmU7wlYLxXPrqqfFAGPM-exc,655
22
+ volsegtools/downsampler/hierarchy_downsampler.py,sha256=LeNNv2-7gHMxaZxQTzhZObDVilDvJdgSh_AlY_Vw6GE,9118
23
+ volsegtools/model/__init__.py,sha256=JsoHfio1E9UdiFSGBXfH-NzKAhrkGbLlohcV24m2o-8,428
24
+ volsegtools/model/chunking_mode.py,sha256=V6Bho2uvGvtX16Ggj9xdIQp7nwXyoJmSNwFmVboF7mg,185
25
+ volsegtools/model/metadata.py,sha256=XJFdJGensZXt9v65BeU4Tzy55kPVJlCgbWbxgkxyYmc,1475
26
+ volsegtools/model/opaque_data_handle.py,sha256=Mo_OzT49tdDoRyll5_71R_HF8-n7KwqlB7ZgrvqcxtE,3164
27
+ volsegtools/model/storing_parameters.py,sha256=nz5Qgi4UJGck1-59NpASxeWdepFra79HkQ-BGYb0-NA,1907
28
+ volsegtools/model/working_store.py,sha256=Rq5GL1rT0hRwKQ5QUe1hsYckFA7BBmQMEcc5FleYNi0,4706
29
+ volsegtools/preprocessor/__init__.py,sha256=xErdaranOjR9Ea4accLRynbzfxWzDh_OovWWsR3SdsQ,93
30
+ volsegtools/preprocessor/preprocessor.py,sha256=p3BiW6zb9jkMg-2MEnr_VKhy-esiB452srOKDWkkQgU,2747
31
+ volsegtools/preprocessor/preprocessor_builder.py,sha256=QvhXNMPkkn6ce5TQ42mbfOnsAqYDF0b24h4nZ_lpSy4,3739
32
+ volsegtools/serialization/__init__.py,sha256=Rg_vB2cJ0dj29B8kKT-chgyNTNbUmUa4tT5Pc6WqEZg,44
33
+ volsegtools/serialization/bcif_serializer.py,sha256=o25p-XpIdKHEFBQeFVr-3RpbYTiVQ_EWK97yi5QP794,10878
34
+ volsegtools-0.0.0.dist-info/METADATA,sha256=U4wyqLU0YndiwiM20caeL6SMI7s0LiJsnL9kAZqngis,571
35
+ volsegtools-0.0.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
36
+ volsegtools-0.0.0.dist-info/entry_points.txt,sha256=REupaYQqcapvrApUyRFL1xFo4Iq9zD3KymP2n6PzOxQ,62
37
+ volsegtools-0.0.0.dist-info/top_level.txt,sha256=xy_7ax33CZyn-ATVZRUtg12EkNct768TJgkVr5EQRCU,12
38
+ volsegtools-0.0.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ molstar-preprocessor = volsegtools._cli:app
@@ -0,0 +1 @@
1
+ volsegtools