Rhapso 0.1.92__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Rhapso/__init__.py +1 -0
- Rhapso/data_prep/__init__.py +2 -0
- Rhapso/data_prep/n5_reader.py +188 -0
- Rhapso/data_prep/s3_big_stitcher_reader.py +55 -0
- Rhapso/data_prep/xml_to_dataframe.py +215 -0
- Rhapso/detection/__init__.py +5 -0
- Rhapso/detection/advanced_refinement.py +203 -0
- Rhapso/detection/difference_of_gaussian.py +324 -0
- Rhapso/detection/image_reader.py +117 -0
- Rhapso/detection/metadata_builder.py +130 -0
- Rhapso/detection/overlap_detection.py +327 -0
- Rhapso/detection/points_validation.py +49 -0
- Rhapso/detection/save_interest_points.py +265 -0
- Rhapso/detection/view_transform_models.py +67 -0
- Rhapso/fusion/__init__.py +0 -0
- Rhapso/fusion/affine_fusion/__init__.py +2 -0
- Rhapso/fusion/affine_fusion/blend.py +289 -0
- Rhapso/fusion/affine_fusion/fusion.py +601 -0
- Rhapso/fusion/affine_fusion/geometry.py +159 -0
- Rhapso/fusion/affine_fusion/io.py +546 -0
- Rhapso/fusion/affine_fusion/script_utils.py +111 -0
- Rhapso/fusion/affine_fusion/setup.py +4 -0
- Rhapso/fusion/affine_fusion_worker.py +234 -0
- Rhapso/fusion/multiscale/__init__.py +0 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/__init__.py +19 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/czi_to_zarr.py +698 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/zarr_writer.py +265 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/models.py +81 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/utils.py +526 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/zeiss_job.py +249 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/__init__.py +21 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/array_to_zarr.py +257 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/radial_correction.py +557 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/run_capsule.py +98 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/utils.py +266 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/worker.py +89 -0
- Rhapso/fusion/multiscale_worker.py +113 -0
- Rhapso/fusion/neuroglancer_link_gen/__init__.py +8 -0
- Rhapso/fusion/neuroglancer_link_gen/dispim_link.py +235 -0
- Rhapso/fusion/neuroglancer_link_gen/exaspim_link.py +127 -0
- Rhapso/fusion/neuroglancer_link_gen/hcr_link.py +368 -0
- Rhapso/fusion/neuroglancer_link_gen/iSPIM_top.py +47 -0
- Rhapso/fusion/neuroglancer_link_gen/link_utils.py +239 -0
- Rhapso/fusion/neuroglancer_link_gen/main.py +299 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_layer.py +1434 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_state.py +1123 -0
- Rhapso/fusion/neuroglancer_link_gen/parsers.py +336 -0
- Rhapso/fusion/neuroglancer_link_gen/raw_link.py +116 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/__init__.py +4 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/shader_utils.py +85 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py +43 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/utils.py +303 -0
- Rhapso/fusion/neuroglancer_link_gen_worker.py +30 -0
- Rhapso/matching/__init__.py +0 -0
- Rhapso/matching/load_and_transform_points.py +458 -0
- Rhapso/matching/ransac_matching.py +544 -0
- Rhapso/matching/save_matches.py +120 -0
- Rhapso/matching/xml_parser.py +302 -0
- Rhapso/pipelines/__init__.py +0 -0
- Rhapso/pipelines/ray/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/alignment_pipeline.py +227 -0
- Rhapso/pipelines/ray/aws/config/__init__.py +0 -0
- Rhapso/pipelines/ray/evaluation.py +71 -0
- Rhapso/pipelines/ray/interest_point_detection.py +137 -0
- Rhapso/pipelines/ray/interest_point_matching.py +110 -0
- Rhapso/pipelines/ray/local/__init__.py +0 -0
- Rhapso/pipelines/ray/local/alignment_pipeline.py +167 -0
- Rhapso/pipelines/ray/matching_stats.py +104 -0
- Rhapso/pipelines/ray/param/__init__.py +0 -0
- Rhapso/pipelines/ray/solver.py +120 -0
- Rhapso/pipelines/ray/split_dataset.py +78 -0
- Rhapso/solver/__init__.py +0 -0
- Rhapso/solver/compute_tiles.py +562 -0
- Rhapso/solver/concatenate_models.py +116 -0
- Rhapso/solver/connected_graphs.py +111 -0
- Rhapso/solver/data_prep.py +181 -0
- Rhapso/solver/global_optimization.py +410 -0
- Rhapso/solver/model_and_tile_setup.py +109 -0
- Rhapso/solver/pre_align_tiles.py +323 -0
- Rhapso/solver/save_results.py +97 -0
- Rhapso/solver/view_transforms.py +75 -0
- Rhapso/solver/xml_to_dataframe_solver.py +213 -0
- Rhapso/split_dataset/__init__.py +0 -0
- Rhapso/split_dataset/compute_grid_rules.py +78 -0
- Rhapso/split_dataset/save_points.py +101 -0
- Rhapso/split_dataset/save_xml.py +377 -0
- Rhapso/split_dataset/split_images.py +537 -0
- Rhapso/split_dataset/xml_to_dataframe_split.py +219 -0
- rhapso-0.1.92.dist-info/METADATA +39 -0
- rhapso-0.1.92.dist-info/RECORD +101 -0
- rhapso-0.1.92.dist-info/WHEEL +5 -0
- rhapso-0.1.92.dist-info/licenses/LICENSE +21 -0
- rhapso-0.1.92.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/test_detection.py +17 -0
- tests/test_matching.py +21 -0
- tests/test_solving.py +21 -0
|
@@ -0,0 +1,336 @@
|
|
|
1
|
+
"""Module for parsing dataset info."""
|
|
2
|
+
|
|
3
|
+
from typing import Dict, List, OrderedDict, Tuple
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
import xmltodict
|
|
7
|
+
import zarr
|
|
8
|
+
|
|
9
|
+
from . import link_utils
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class OmeZarrParser:
|
|
13
|
+
"""Class for parsing OME-Zarr datasets."""
|
|
14
|
+
|
|
15
|
+
@staticmethod
|
|
16
|
+
def parse_transform(z, res) -> Dict[str, list]:
|
|
17
|
+
"""
|
|
18
|
+
Parses scale and translation transformations for a resolution level
|
|
19
|
+
in an OME-Zarr dataset.
|
|
20
|
+
|
|
21
|
+
Parameters
|
|
22
|
+
----------
|
|
23
|
+
z : zarr.core.Array
|
|
24
|
+
Zarr array representing a dataset.
|
|
25
|
+
res : str
|
|
26
|
+
Dataset resolution to parse.
|
|
27
|
+
|
|
28
|
+
Returns
|
|
29
|
+
-------
|
|
30
|
+
Dict[str, list] A dictionary containing scale and
|
|
31
|
+
translation data for the first dataset.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
# Read the metadata from .zattrs
|
|
35
|
+
try:
|
|
36
|
+
metadata = z.attrs.asdict()
|
|
37
|
+
except KeyError:
|
|
38
|
+
raise ValueError("OME-Zarr metadata not found.")
|
|
39
|
+
|
|
40
|
+
# Extract transformations for the first dataset
|
|
41
|
+
transformations = {}
|
|
42
|
+
multiscales = metadata.get("multiscales", [])
|
|
43
|
+
|
|
44
|
+
if multiscales:
|
|
45
|
+
datasets = multiscales[0].get("datasets", [])
|
|
46
|
+
for ds in datasets:
|
|
47
|
+
if ds["path"] == res:
|
|
48
|
+
coord_transforms = ds.get("coordinateTransformations", [])
|
|
49
|
+
scale = next(
|
|
50
|
+
(
|
|
51
|
+
t["scale"]
|
|
52
|
+
for t in coord_transforms
|
|
53
|
+
if t["type"] == "scale"
|
|
54
|
+
),
|
|
55
|
+
None,
|
|
56
|
+
)
|
|
57
|
+
translation = next(
|
|
58
|
+
(
|
|
59
|
+
t["translation"]
|
|
60
|
+
for t in coord_transforms
|
|
61
|
+
if t["type"] == "translation"
|
|
62
|
+
),
|
|
63
|
+
None,
|
|
64
|
+
)
|
|
65
|
+
transformations = {
|
|
66
|
+
"scale": scale,
|
|
67
|
+
"translation": translation,
|
|
68
|
+
}
|
|
69
|
+
break
|
|
70
|
+
|
|
71
|
+
return transformations
|
|
72
|
+
|
|
73
|
+
@staticmethod
|
|
74
|
+
def extract_info(
|
|
75
|
+
s3_path: str,
|
|
76
|
+
) -> Tuple[tuple, Dict[int, str], Dict[int, np.ndarray]]:
|
|
77
|
+
"""
|
|
78
|
+
Extracts voxel sizes, tile paths, and tile offsets from a given
|
|
79
|
+
OME-Zarr path.
|
|
80
|
+
|
|
81
|
+
Parameters
|
|
82
|
+
----------
|
|
83
|
+
s3_path : str
|
|
84
|
+
Path to the OME-Zarr dataset.
|
|
85
|
+
|
|
86
|
+
Returns
|
|
87
|
+
-------
|
|
88
|
+
Tuple[tuple, Dict[int, str], Dict[int, np.ndarray]]
|
|
89
|
+
A tuple containing voxel sizes, tile paths, and tile offsets.
|
|
90
|
+
"""
|
|
91
|
+
vox_sizes: tuple[float, float, float] = (
|
|
92
|
+
OmeZarrParser.extract_tile_vox_size(s3_path)
|
|
93
|
+
)
|
|
94
|
+
tile_paths: dict[int, str] = OmeZarrParser.extract_tile_paths(s3_path)
|
|
95
|
+
net_transforms: dict[int, np.ndarray] = (
|
|
96
|
+
OmeZarrParser._get_identity_mats(s3_path)
|
|
97
|
+
)
|
|
98
|
+
return vox_sizes, tile_paths, net_transforms
|
|
99
|
+
|
|
100
|
+
@staticmethod
|
|
101
|
+
def extract_tile_paths(zarr_path: str) -> Dict[int, str]:
|
|
102
|
+
"""
|
|
103
|
+
Extracts tile paths from a given Zarr dataset.
|
|
104
|
+
|
|
105
|
+
Parameters
|
|
106
|
+
----------
|
|
107
|
+
zarr_path : str
|
|
108
|
+
Path to the Zarr dataset.
|
|
109
|
+
|
|
110
|
+
Returns
|
|
111
|
+
-------
|
|
112
|
+
Dict[int, str]
|
|
113
|
+
A dictionary mapping tile indices to their paths.
|
|
114
|
+
"""
|
|
115
|
+
z = zarr.open(zarr_path, mode="r")
|
|
116
|
+
return {i: k for i, k in enumerate(sorted(z.keys()))}
|
|
117
|
+
|
|
118
|
+
@staticmethod
|
|
119
|
+
def extract_tile_vox_size(zarr_path: str) -> Tuple[float, float, float]:
|
|
120
|
+
"""
|
|
121
|
+
Extracts the voxel size of tiles from a given Zarr dataset.
|
|
122
|
+
|
|
123
|
+
Parameters
|
|
124
|
+
----------
|
|
125
|
+
zarr_path : str
|
|
126
|
+
Path to the Zarr dataset.
|
|
127
|
+
|
|
128
|
+
Returns
|
|
129
|
+
-------
|
|
130
|
+
Tuple[float, float, float]
|
|
131
|
+
A tuple representing the voxel size in the x, y, and z dimensions.
|
|
132
|
+
"""
|
|
133
|
+
z = zarr.open(zarr_path, mode="r")
|
|
134
|
+
first_tile = z[next(iter(z.keys()))]
|
|
135
|
+
|
|
136
|
+
return tuple(
|
|
137
|
+
reversed(
|
|
138
|
+
OmeZarrParser.parse_transform(first_tile, "0")["scale"][2:]
|
|
139
|
+
)
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
@staticmethod
|
|
143
|
+
def _get_identity_mats(zarr_path: str) -> Dict[int, np.ndarray]:
|
|
144
|
+
"""
|
|
145
|
+
Create a homogeneous identity matrix for each tile in the dataset.
|
|
146
|
+
We need to do this because neuroglancer expects the offset to be
|
|
147
|
+
encoded in the .zattrs. The transformation matrix in the viewer
|
|
148
|
+
state should do nothing.
|
|
149
|
+
|
|
150
|
+
Parameters
|
|
151
|
+
----------
|
|
152
|
+
zarr_path : str
|
|
153
|
+
Path to the Zarr dataset.
|
|
154
|
+
|
|
155
|
+
Returns
|
|
156
|
+
-------
|
|
157
|
+
Dict[int, np.ndarray]
|
|
158
|
+
A dictionary mapping tile indices to their offset matrices.
|
|
159
|
+
"""
|
|
160
|
+
z = zarr.open(zarr_path, mode="r")
|
|
161
|
+
tile_offsets = {}
|
|
162
|
+
for i in range(len(list(z.keys()))):
|
|
163
|
+
# Use the identity matrix since the offset is already encoded in
|
|
164
|
+
# the .zattrs
|
|
165
|
+
tile_offsets[i] = np.hstack((np.eye(3), np.zeros(3).reshape(3, 1)))
|
|
166
|
+
return tile_offsets
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
class XmlParser:
|
|
170
|
+
"""Class for parsing BigStitcher XML datasets."""
|
|
171
|
+
|
|
172
|
+
@staticmethod
|
|
173
|
+
def extract_dataset_path(xml_path: str) -> str:
|
|
174
|
+
"""
|
|
175
|
+
Parses BDV XML and extracts the dataset path.
|
|
176
|
+
|
|
177
|
+
Parameters
|
|
178
|
+
----------
|
|
179
|
+
xml_path : str
|
|
180
|
+
Path to the XML file.
|
|
181
|
+
|
|
182
|
+
Returns
|
|
183
|
+
-------
|
|
184
|
+
str
|
|
185
|
+
Path of the dataset extracted from the XML.
|
|
186
|
+
"""
|
|
187
|
+
|
|
188
|
+
# view_paths: dict[int, str] = {}
|
|
189
|
+
with open(xml_path, "r") as file:
|
|
190
|
+
data: OrderedDict = xmltodict.parse(file.read())
|
|
191
|
+
|
|
192
|
+
dataset_path = data["SpimData"]["SequenceDescription"]["ImageLoader"][
|
|
193
|
+
"zarr"
|
|
194
|
+
]
|
|
195
|
+
|
|
196
|
+
return dataset_path["#text"]
|
|
197
|
+
|
|
198
|
+
@staticmethod
|
|
199
|
+
def extract_tile_paths(xml_path: str) -> Dict[int, str]:
|
|
200
|
+
"""
|
|
201
|
+
Parses BDV XML and extracts a map of setup IDs to tile paths.
|
|
202
|
+
|
|
203
|
+
Parameters
|
|
204
|
+
----------
|
|
205
|
+
xml_path : str
|
|
206
|
+
Path to the XML file.
|
|
207
|
+
|
|
208
|
+
Returns
|
|
209
|
+
-------
|
|
210
|
+
Dict[int, str]
|
|
211
|
+
Dictionary mapping tile IDs to their paths.
|
|
212
|
+
"""
|
|
213
|
+
|
|
214
|
+
view_paths: dict[int, str] = {}
|
|
215
|
+
with open(xml_path, "r") as file:
|
|
216
|
+
data: OrderedDict = xmltodict.parse(file.read())
|
|
217
|
+
|
|
218
|
+
for id, zgroup in enumerate(
|
|
219
|
+
data["SpimData"]["SequenceDescription"]["ImageLoader"]["zgroups"][
|
|
220
|
+
"zgroup"
|
|
221
|
+
]
|
|
222
|
+
):
|
|
223
|
+
view_paths[int(id)] = zgroup["path"]
|
|
224
|
+
|
|
225
|
+
return view_paths
|
|
226
|
+
|
|
227
|
+
@staticmethod
|
|
228
|
+
def extract_tile_vox_size(xml_path: str) -> Tuple[float, float, float]:
|
|
229
|
+
"""
|
|
230
|
+
Parses BDV XML and extracts voxel sizes.
|
|
231
|
+
|
|
232
|
+
Parameters
|
|
233
|
+
----------
|
|
234
|
+
xml_path : str
|
|
235
|
+
Path to the XML file.
|
|
236
|
+
|
|
237
|
+
Returns
|
|
238
|
+
-------
|
|
239
|
+
Tuple[float, float, float]
|
|
240
|
+
Tuple containing voxel sizes (x, y, z).
|
|
241
|
+
"""
|
|
242
|
+
|
|
243
|
+
with open(xml_path, "r") as file:
|
|
244
|
+
data: OrderedDict = xmltodict.parse(file.read())
|
|
245
|
+
|
|
246
|
+
first_tile_metadata = data["SpimData"]["SequenceDescription"][
|
|
247
|
+
"ViewSetups"
|
|
248
|
+
]["ViewSetup"][0]
|
|
249
|
+
vox_sizes: str = first_tile_metadata["voxelSize"]["size"]
|
|
250
|
+
return tuple(float(val) for val in vox_sizes.split(" "))
|
|
251
|
+
|
|
252
|
+
@staticmethod
|
|
253
|
+
def extract_tile_transforms(xml_path: str) -> Dict[int, List[dict]]:
|
|
254
|
+
"""
|
|
255
|
+
Parses BDV XML and extracts a map of setup IDs to lists of
|
|
256
|
+
transformations.
|
|
257
|
+
|
|
258
|
+
Parameters
|
|
259
|
+
----------
|
|
260
|
+
xml_path : str
|
|
261
|
+
Path to the XML file.
|
|
262
|
+
|
|
263
|
+
Returns
|
|
264
|
+
-------
|
|
265
|
+
Dict[int, List[dict]]
|
|
266
|
+
Dictionary mapping tile IDs to lists of transformations.
|
|
267
|
+
"""
|
|
268
|
+
|
|
269
|
+
view_transforms: dict[int, list[dict]] = {}
|
|
270
|
+
with open(xml_path, "r") as file:
|
|
271
|
+
data: OrderedDict = xmltodict.parse(file.read())
|
|
272
|
+
|
|
273
|
+
for view_reg in data["SpimData"]["ViewRegistrations"][
|
|
274
|
+
"ViewRegistration"
|
|
275
|
+
]:
|
|
276
|
+
tfm_stack = view_reg["ViewTransform"]
|
|
277
|
+
if not isinstance(tfm_stack, list):
|
|
278
|
+
tfm_stack = [tfm_stack]
|
|
279
|
+
view_transforms[int(view_reg["@setup"])] = tfm_stack
|
|
280
|
+
|
|
281
|
+
view_transforms = {
|
|
282
|
+
view: tfs[::-1] for view, tfs in view_transforms.items()
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
return view_transforms
|
|
286
|
+
|
|
287
|
+
@staticmethod
|
|
288
|
+
def extract_info(
|
|
289
|
+
xml_path: str,
|
|
290
|
+
) -> Tuple[tuple, Dict[int, str], Dict[int, np.ndarray]]:
|
|
291
|
+
"""
|
|
292
|
+
Extracts voxel sizes, tile paths, and tile transforms from a given
|
|
293
|
+
XML path.
|
|
294
|
+
|
|
295
|
+
Parameters
|
|
296
|
+
----------
|
|
297
|
+
xml_path : str
|
|
298
|
+
Path to the BDV XML dataset.
|
|
299
|
+
|
|
300
|
+
Returns
|
|
301
|
+
-------
|
|
302
|
+
Tuple[tuple, Dict[int, str], Dict[int, np.ndarray]]
|
|
303
|
+
A tuple containing voxel sizes, tile paths, and tile offsets.
|
|
304
|
+
"""
|
|
305
|
+
vox_sizes: tuple[float, float, float] = (
|
|
306
|
+
XmlParser.extract_tile_vox_size(xml_path)
|
|
307
|
+
)
|
|
308
|
+
tile_paths: dict[int, str] = XmlParser.extract_tile_paths(xml_path)
|
|
309
|
+
tile_transforms: dict[int, list[dict]] = (
|
|
310
|
+
XmlParser.extract_tile_transforms(xml_path)
|
|
311
|
+
)
|
|
312
|
+
XmlParser.omit_initial_offsets(tile_transforms)
|
|
313
|
+
net_transforms: dict[int, np.ndarray] = (
|
|
314
|
+
link_utils.calculate_net_transforms(tile_transforms)
|
|
315
|
+
)
|
|
316
|
+
return vox_sizes, tile_paths, net_transforms
|
|
317
|
+
|
|
318
|
+
@staticmethod
|
|
319
|
+
def omit_initial_offsets(view_transforms: dict[int, list[dict]]) -> None:
|
|
320
|
+
"""
|
|
321
|
+
For OME-Zarr datasets, inital offsets are
|
|
322
|
+
already encoded in the metadata and extracted my neuroglancer.
|
|
323
|
+
This function removes the duplicate transform.
|
|
324
|
+
|
|
325
|
+
Parameters
|
|
326
|
+
------------------------
|
|
327
|
+
view_transforms: dict[int, list[dict]]
|
|
328
|
+
Dictionary of tile ids to list of transforms.
|
|
329
|
+
|
|
330
|
+
Returns
|
|
331
|
+
------------------------
|
|
332
|
+
None
|
|
333
|
+
"""
|
|
334
|
+
|
|
335
|
+
for view, tfs in view_transforms.items():
|
|
336
|
+
tfs.pop(0)
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Library for generating raw link for visualizing tiles in nominal position.
|
|
3
|
+
"""
|
|
4
|
+
import numpy as np
|
|
5
|
+
from parsers import XmlParser
|
|
6
|
+
from . import link_utils
|
|
7
|
+
from ng_state import NgState
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def generate_raw_link(
|
|
11
|
+
xml_path: str,
|
|
12
|
+
s3_path: str,
|
|
13
|
+
max_dr: int = 200,
|
|
14
|
+
opacity: float = 1.0,
|
|
15
|
+
blend: str = "default",
|
|
16
|
+
output_json_path: str = ".",
|
|
17
|
+
bucket_path: str = "aind-open-data",
|
|
18
|
+
) -> None:
|
|
19
|
+
"""Creates an neuroglancer link to visualize
|
|
20
|
+
raw tile placements of one color channel defined in the input xml.
|
|
21
|
+
|
|
22
|
+
Parameters
|
|
23
|
+
------------------------
|
|
24
|
+
xml_path: str
|
|
25
|
+
Path of xml outputted by BigStitcher.
|
|
26
|
+
s3_path: str
|
|
27
|
+
Path of s3 bucket where exaspim dataset is located.
|
|
28
|
+
output_json_path: str
|
|
29
|
+
Local path to write process_output.json file that nueroglancer reads.
|
|
30
|
+
bucket_path: str
|
|
31
|
+
S3 bucket name where the process_output.json will be uploaded.
|
|
32
|
+
Default is "aind-open-data".
|
|
33
|
+
|
|
34
|
+
Returns
|
|
35
|
+
------------------------
|
|
36
|
+
None
|
|
37
|
+
"""
|
|
38
|
+
# Gather xml info
|
|
39
|
+
vox_sizes: tuple[float, float, float] = XmlParser.extract_tile_vox_size(
|
|
40
|
+
xml_path
|
|
41
|
+
)
|
|
42
|
+
tile_paths: dict[int, str] = XmlParser.extract_tile_paths(xml_path)
|
|
43
|
+
|
|
44
|
+
# Reference Pathstring:
|
|
45
|
+
# "s3://aind-open-data/diSPIM_647459_2022-12-21_00-39-00/diSPIM.zarr"
|
|
46
|
+
tile_transforms: dict[int, list[dict]] = {}
|
|
47
|
+
|
|
48
|
+
s3_list = s3_path.split("/")
|
|
49
|
+
dataset_name = s3_list[4]
|
|
50
|
+
dataset_list = dataset_name.split(".")
|
|
51
|
+
dataset_type = dataset_list[0]
|
|
52
|
+
|
|
53
|
+
if dataset_type == "diSPIM":
|
|
54
|
+
tile_transforms: dict[
|
|
55
|
+
int, list[dict]
|
|
56
|
+
] = XmlParser.extract_tile_transforms(xml_path)
|
|
57
|
+
|
|
58
|
+
net_transforms: dict[
|
|
59
|
+
int, np.ndarray
|
|
60
|
+
] = link_utils.calculate_net_transforms(tile_transforms)
|
|
61
|
+
|
|
62
|
+
# Determine color
|
|
63
|
+
channel: int = link_utils.extract_channel_from_tile_path(tile_paths[0])
|
|
64
|
+
hex_val: int = link_utils.wavelength_to_hex(channel)
|
|
65
|
+
hex_str = f"#{str(hex(hex_val))[2:]}"
|
|
66
|
+
|
|
67
|
+
# Generate input config
|
|
68
|
+
layers = [] # Nueroglancer Tabs
|
|
69
|
+
input_config = {
|
|
70
|
+
"dimensions": {
|
|
71
|
+
"x": {"voxel_size": vox_sizes[0], "unit": "microns"},
|
|
72
|
+
"y": {"voxel_size": vox_sizes[1], "unit": "microns"},
|
|
73
|
+
"z": {"voxel_size": vox_sizes[2], "unit": "microns"},
|
|
74
|
+
"c'": {"voxel_size": 1, "unit": ""},
|
|
75
|
+
"t": {"voxel_size": 0.001, "unit": "seconds"},
|
|
76
|
+
},
|
|
77
|
+
"layers": layers,
|
|
78
|
+
"showScaleBar": False,
|
|
79
|
+
"showAxisLines": False,
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
sources = [] # Tiles within tabs
|
|
83
|
+
layers.append(
|
|
84
|
+
{
|
|
85
|
+
"type": "image", # Optional
|
|
86
|
+
"source": sources,
|
|
87
|
+
"channel": 0, # Optional
|
|
88
|
+
"shaderControls": {
|
|
89
|
+
"normalized": {"range": [0, max_dr]}
|
|
90
|
+
}, # Optional # Exaspim has low HDR
|
|
91
|
+
"shader": {"color": hex_str, "emitter": "RGB", "vec": "vec3",},
|
|
92
|
+
"visible": True, # Optional
|
|
93
|
+
"opacity": opacity,
|
|
94
|
+
"name": f"CH_{channel}",
|
|
95
|
+
"blend": blend,
|
|
96
|
+
}
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
for tile_id, t_path in tile_paths.items():
|
|
100
|
+
source_dict = {"url": f"{s3_path}/{t_path}"}
|
|
101
|
+
if dataset_type == "diSPIM":
|
|
102
|
+
net_tf = net_transforms[tile_id]
|
|
103
|
+
final_transform = link_utils.convert_matrix_3x4_to_5x6(net_tf)
|
|
104
|
+
source_dict["transform_matrix"] = final_transform.tolist()
|
|
105
|
+
|
|
106
|
+
sources.append(source_dict)
|
|
107
|
+
|
|
108
|
+
# Generate the link
|
|
109
|
+
neuroglancer_link = NgState(
|
|
110
|
+
input_config=input_config,
|
|
111
|
+
mount_service="s3",
|
|
112
|
+
bucket_path=bucket_path,
|
|
113
|
+
output_dir=output_json_path,
|
|
114
|
+
)
|
|
115
|
+
neuroglancer_link.save_state_as_json()
|
|
116
|
+
print(neuroglancer_link.get_url_link())
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Module containing utility functions to generate neuroglancer shader code
|
|
3
|
+
"""
|
|
4
|
+
from typing import Tuple
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def create_monochrome_shader(color: str, emitter: str, vec: str) -> str:
|
|
8
|
+
"""
|
|
9
|
+
Creates a configuration for the neuroglancer shader.
|
|
10
|
+
This shader generates a monochromatic image.
|
|
11
|
+
|
|
12
|
+
Parameters
|
|
13
|
+
------------------------
|
|
14
|
+
color: str
|
|
15
|
+
The color of the monochromatic image
|
|
16
|
+
emitter: str
|
|
17
|
+
suffix of WebGL emit* method being called (e.g. 'RGB')
|
|
18
|
+
vec: str
|
|
19
|
+
class of vector we want color returned as in WebGL
|
|
20
|
+
(e.g. 'vec3')
|
|
21
|
+
|
|
22
|
+
Returns
|
|
23
|
+
------------------------
|
|
24
|
+
str
|
|
25
|
+
String with the shader configuration for neuroglancer.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
# Add all necessary ui controls here
|
|
29
|
+
ui_controls = [
|
|
30
|
+
f'#uicontrol {vec} color color(default="{color}")',
|
|
31
|
+
"#uicontrol invlerp normalized",
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
# color emitter
|
|
35
|
+
emit_color = (
|
|
36
|
+
"void main() {\n" + f"emit{emitter}(color * normalized());" + "\n}"
|
|
37
|
+
)
|
|
38
|
+
shader_string = ""
|
|
39
|
+
|
|
40
|
+
for ui_control in ui_controls:
|
|
41
|
+
shader_string += ui_control + "\n"
|
|
42
|
+
|
|
43
|
+
shader_string += emit_color
|
|
44
|
+
|
|
45
|
+
return shader_string
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def create_rgb_shader(
|
|
49
|
+
r_range: Tuple[int, int],
|
|
50
|
+
g_range: Tuple[int, int],
|
|
51
|
+
b_range: Tuple[int, int],
|
|
52
|
+
) -> str:
|
|
53
|
+
"""
|
|
54
|
+
Return shader code for an RGB image with different dynamic
|
|
55
|
+
ranges for each channel.
|
|
56
|
+
|
|
57
|
+
Parameters
|
|
58
|
+
----------
|
|
59
|
+
r_range: Tuple[int, int]
|
|
60
|
+
Dynamic range of the R channel (min, max)
|
|
61
|
+
g_range: Tuple[int, int]
|
|
62
|
+
Dynamic range of the G channel (min, max)
|
|
63
|
+
b_range: Tuple[int, int[
|
|
64
|
+
Dynamic range of the B channel (min, max)
|
|
65
|
+
|
|
66
|
+
Returns
|
|
67
|
+
-------
|
|
68
|
+
str
|
|
69
|
+
String containing the shader code for a neuroglancer
|
|
70
|
+
RGB image.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
code = "#uicontrol invlerp "
|
|
74
|
+
code += f"normalized_r(range=[{r_range[0]}, {r_range[1]}])\n"
|
|
75
|
+
code += "#uicontrol invlerp "
|
|
76
|
+
code += f"normalized_g(range=[{g_range[0]}, {g_range[1]}])\n"
|
|
77
|
+
code += "#uicontrol invlerp "
|
|
78
|
+
code += f"normalized_b(range=[{b_range[0]}, {b_range[1]}])\n"
|
|
79
|
+
code += "void main(){\n"
|
|
80
|
+
code += "float r = normalized_r(getDataValue(0));\n"
|
|
81
|
+
code += "float g = normalized_g(getDataValue(1));\n"
|
|
82
|
+
code += "float b = normalized_b(getDataValue(2));\n"
|
|
83
|
+
code += "emitRGB(vec3(r, g, b));\n}\n"
|
|
84
|
+
|
|
85
|
+
return code
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"""Transfer file to S3 bucket"""
|
|
2
|
+
import subprocess
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def copy_to_s3(file_loc: str, bucket: str = None):
|
|
7
|
+
"""Copy fileout to s3 bucket, generally from a
|
|
8
|
+
/scratch location to an S3 URI
|
|
9
|
+
|
|
10
|
+
Requires AWS CLI to be installed and configured with
|
|
11
|
+
credentials.
|
|
12
|
+
|
|
13
|
+
Parameters:
|
|
14
|
+
------------
|
|
15
|
+
file_loc: str
|
|
16
|
+
The location of the process_output.json to be copied.
|
|
17
|
+
|
|
18
|
+
bucket: str
|
|
19
|
+
The S3 Bucket URI that the masks will be copied to.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
print("Copying to s3 bucket")
|
|
23
|
+
file_loc = Path(file_loc)
|
|
24
|
+
assert file_loc.exists(), f"Fileout {file_loc} does not exist."
|
|
25
|
+
file_loc = str(file_loc)
|
|
26
|
+
if bucket is None:
|
|
27
|
+
print(
|
|
28
|
+
f"No bucket specified, segmentation masks at \
|
|
29
|
+
{file_loc} not transfered"
|
|
30
|
+
)
|
|
31
|
+
return
|
|
32
|
+
else:
|
|
33
|
+
bucket = str(bucket)
|
|
34
|
+
|
|
35
|
+
cmd = f"aws s3 cp {file_loc} {bucket}"
|
|
36
|
+
try:
|
|
37
|
+
subprocess.run(cmd, shell=True)
|
|
38
|
+
print("*" * 70)
|
|
39
|
+
print("Finished Copy segmentation masks to S3!")
|
|
40
|
+
print("*" * 70)
|
|
41
|
+
except Exception as e:
|
|
42
|
+
print("Error copying to s3 bucket: ", e)
|
|
43
|
+
raise e
|