Rhapso 0.1.92__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Rhapso/__init__.py +1 -0
- Rhapso/data_prep/__init__.py +2 -0
- Rhapso/data_prep/n5_reader.py +188 -0
- Rhapso/data_prep/s3_big_stitcher_reader.py +55 -0
- Rhapso/data_prep/xml_to_dataframe.py +215 -0
- Rhapso/detection/__init__.py +5 -0
- Rhapso/detection/advanced_refinement.py +203 -0
- Rhapso/detection/difference_of_gaussian.py +324 -0
- Rhapso/detection/image_reader.py +117 -0
- Rhapso/detection/metadata_builder.py +130 -0
- Rhapso/detection/overlap_detection.py +327 -0
- Rhapso/detection/points_validation.py +49 -0
- Rhapso/detection/save_interest_points.py +265 -0
- Rhapso/detection/view_transform_models.py +67 -0
- Rhapso/fusion/__init__.py +0 -0
- Rhapso/fusion/affine_fusion/__init__.py +2 -0
- Rhapso/fusion/affine_fusion/blend.py +289 -0
- Rhapso/fusion/affine_fusion/fusion.py +601 -0
- Rhapso/fusion/affine_fusion/geometry.py +159 -0
- Rhapso/fusion/affine_fusion/io.py +546 -0
- Rhapso/fusion/affine_fusion/script_utils.py +111 -0
- Rhapso/fusion/affine_fusion/setup.py +4 -0
- Rhapso/fusion/affine_fusion_worker.py +234 -0
- Rhapso/fusion/multiscale/__init__.py +0 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/__init__.py +19 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/czi_to_zarr.py +698 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/zarr_writer.py +265 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/models.py +81 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/utils.py +526 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/zeiss_job.py +249 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/__init__.py +21 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/array_to_zarr.py +257 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/radial_correction.py +557 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/run_capsule.py +98 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/utils.py +266 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/worker.py +89 -0
- Rhapso/fusion/multiscale_worker.py +113 -0
- Rhapso/fusion/neuroglancer_link_gen/__init__.py +8 -0
- Rhapso/fusion/neuroglancer_link_gen/dispim_link.py +235 -0
- Rhapso/fusion/neuroglancer_link_gen/exaspim_link.py +127 -0
- Rhapso/fusion/neuroglancer_link_gen/hcr_link.py +368 -0
- Rhapso/fusion/neuroglancer_link_gen/iSPIM_top.py +47 -0
- Rhapso/fusion/neuroglancer_link_gen/link_utils.py +239 -0
- Rhapso/fusion/neuroglancer_link_gen/main.py +299 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_layer.py +1434 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_state.py +1123 -0
- Rhapso/fusion/neuroglancer_link_gen/parsers.py +336 -0
- Rhapso/fusion/neuroglancer_link_gen/raw_link.py +116 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/__init__.py +4 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/shader_utils.py +85 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py +43 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/utils.py +303 -0
- Rhapso/fusion/neuroglancer_link_gen_worker.py +30 -0
- Rhapso/matching/__init__.py +0 -0
- Rhapso/matching/load_and_transform_points.py +458 -0
- Rhapso/matching/ransac_matching.py +544 -0
- Rhapso/matching/save_matches.py +120 -0
- Rhapso/matching/xml_parser.py +302 -0
- Rhapso/pipelines/__init__.py +0 -0
- Rhapso/pipelines/ray/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/alignment_pipeline.py +227 -0
- Rhapso/pipelines/ray/aws/config/__init__.py +0 -0
- Rhapso/pipelines/ray/evaluation.py +71 -0
- Rhapso/pipelines/ray/interest_point_detection.py +137 -0
- Rhapso/pipelines/ray/interest_point_matching.py +110 -0
- Rhapso/pipelines/ray/local/__init__.py +0 -0
- Rhapso/pipelines/ray/local/alignment_pipeline.py +167 -0
- Rhapso/pipelines/ray/matching_stats.py +104 -0
- Rhapso/pipelines/ray/param/__init__.py +0 -0
- Rhapso/pipelines/ray/solver.py +120 -0
- Rhapso/pipelines/ray/split_dataset.py +78 -0
- Rhapso/solver/__init__.py +0 -0
- Rhapso/solver/compute_tiles.py +562 -0
- Rhapso/solver/concatenate_models.py +116 -0
- Rhapso/solver/connected_graphs.py +111 -0
- Rhapso/solver/data_prep.py +181 -0
- Rhapso/solver/global_optimization.py +410 -0
- Rhapso/solver/model_and_tile_setup.py +109 -0
- Rhapso/solver/pre_align_tiles.py +323 -0
- Rhapso/solver/save_results.py +97 -0
- Rhapso/solver/view_transforms.py +75 -0
- Rhapso/solver/xml_to_dataframe_solver.py +213 -0
- Rhapso/split_dataset/__init__.py +0 -0
- Rhapso/split_dataset/compute_grid_rules.py +78 -0
- Rhapso/split_dataset/save_points.py +101 -0
- Rhapso/split_dataset/save_xml.py +377 -0
- Rhapso/split_dataset/split_images.py +537 -0
- Rhapso/split_dataset/xml_to_dataframe_split.py +219 -0
- rhapso-0.1.92.dist-info/METADATA +39 -0
- rhapso-0.1.92.dist-info/RECORD +101 -0
- rhapso-0.1.92.dist-info/WHEEL +5 -0
- rhapso-0.1.92.dist-info/licenses/LICENSE +21 -0
- rhapso-0.1.92.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/test_detection.py +17 -0
- tests/test_matching.py +21 -0
- tests/test_solving.py +21 -0
|
@@ -0,0 +1,1123 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Class to represent a configuration state to visualize data in neuroglancer
|
|
3
|
+
"""
|
|
4
|
+
import re
|
|
5
|
+
from itertools import combinations
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import List, Optional, Union
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
import xmltodict
|
|
11
|
+
from pint import UnitRegistry
|
|
12
|
+
|
|
13
|
+
from .ng_layer import NgLayer
|
|
14
|
+
from .utils import utils
|
|
15
|
+
|
|
16
|
+
# IO types
|
|
17
|
+
PathLike = Union[str, Path]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class NgState:
|
|
21
|
+
"""
|
|
22
|
+
Class to represent a neuroglancer state (configuration json)
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
input_config: dict,
|
|
28
|
+
mount_service: str,
|
|
29
|
+
bucket_path: str,
|
|
30
|
+
output_dir: PathLike,
|
|
31
|
+
verbose: Optional[bool] = False,
|
|
32
|
+
base_url: Optional[str] = "https://neuroglancer-demo.appspot.com/",
|
|
33
|
+
json_name: Optional[str] = "process_output.json",
|
|
34
|
+
dataset_name: Optional[str] = None,
|
|
35
|
+
) -> None:
|
|
36
|
+
"""
|
|
37
|
+
Class constructor
|
|
38
|
+
|
|
39
|
+
Parameters
|
|
40
|
+
------------------------
|
|
41
|
+
image_config: dict
|
|
42
|
+
Dictionary with the json configuration based on neuroglancer docs.
|
|
43
|
+
mount_service: Optional[str]
|
|
44
|
+
Could be 'gs' for a bucket in Google Cloud or 's3' in Amazon.
|
|
45
|
+
bucket_path: str
|
|
46
|
+
Path in cloud service where the dataset will be saved
|
|
47
|
+
output_dir: PathLike
|
|
48
|
+
Directory where the json will be written.
|
|
49
|
+
verbose: Optional[bool]
|
|
50
|
+
If true, additional information will be shown. Default False.
|
|
51
|
+
base_url: Optional[str]
|
|
52
|
+
Neuroglancer service url
|
|
53
|
+
json_name: Optional[str]
|
|
54
|
+
Name of json file with neuroglancer configuration
|
|
55
|
+
dataset_name: Optional[str]
|
|
56
|
+
Name of the dataset. If None, the name of the output_dir directory will be used.
|
|
57
|
+
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
self.input_config = input_config
|
|
61
|
+
self.output_json = Path(self.__fix_output_json_path(output_dir))
|
|
62
|
+
self.verbose = verbose
|
|
63
|
+
self.mount_service = mount_service
|
|
64
|
+
self.bucket_path = bucket_path
|
|
65
|
+
self.base_url = base_url
|
|
66
|
+
self.json_name = json_name
|
|
67
|
+
# Component after S3 bucket and before filename in the "ng_link" field of the output JSON
|
|
68
|
+
self.dataset_name = dataset_name
|
|
69
|
+
if self.dataset_name is None:
|
|
70
|
+
self.dataset_name = Path(self.output_json).stem
|
|
71
|
+
|
|
72
|
+
# State and layers attributes
|
|
73
|
+
self.__state = {}
|
|
74
|
+
self.__dimensions = {}
|
|
75
|
+
self.__layers = []
|
|
76
|
+
|
|
77
|
+
# Initialize principal attributes
|
|
78
|
+
self.initialize_attributes(self.input_config)
|
|
79
|
+
|
|
80
|
+
def __fix_output_json_path(self, output_json: PathLike) -> str:
|
|
81
|
+
"""
|
|
82
|
+
Fixes the json output path to have a similar structure for all links.
|
|
83
|
+
|
|
84
|
+
Parameters
|
|
85
|
+
------------------------
|
|
86
|
+
output_json: PathLike
|
|
87
|
+
Path of the json output path.
|
|
88
|
+
|
|
89
|
+
Returns
|
|
90
|
+
------------------------
|
|
91
|
+
str
|
|
92
|
+
String with the fixed outputh path.
|
|
93
|
+
"""
|
|
94
|
+
output_json = Path(
|
|
95
|
+
str(output_json)
|
|
96
|
+
.replace("/home/jupyter/", "")
|
|
97
|
+
.replace("////", "//")
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
return output_json
|
|
101
|
+
|
|
102
|
+
def __unpack_axis(
|
|
103
|
+
self, axis_values: dict, dest_metric: Optional[str] = "meters"
|
|
104
|
+
) -> List:
|
|
105
|
+
"""
|
|
106
|
+
Unpack axis voxel sizes converting them to meters.
|
|
107
|
+
neuroglancer uses meters by default.
|
|
108
|
+
|
|
109
|
+
Parameters
|
|
110
|
+
------------------------
|
|
111
|
+
axis_values: dict
|
|
112
|
+
Dictionary with the axis values with
|
|
113
|
+
the following structure for an axis:
|
|
114
|
+
e.g. for Z dimension {
|
|
115
|
+
"voxel_size": 2.0,
|
|
116
|
+
"unit": 'microns'
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
dest_metric: Optional[str]
|
|
120
|
+
Destination metric to be used in neuroglancer. Default 'meters'.
|
|
121
|
+
|
|
122
|
+
Returns
|
|
123
|
+
------------------------
|
|
124
|
+
List
|
|
125
|
+
List with two values, the converted quantity
|
|
126
|
+
and it's metric in neuroglancer format.
|
|
127
|
+
"""
|
|
128
|
+
|
|
129
|
+
if dest_metric not in ["meters", "seconds"]:
|
|
130
|
+
raise NotImplementedError(
|
|
131
|
+
f"{dest_metric} has not been implemented"
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# Converting to desired metric
|
|
135
|
+
unit_register = UnitRegistry()
|
|
136
|
+
quantity = (
|
|
137
|
+
axis_values["voxel_size"] * unit_register[axis_values["unit"]]
|
|
138
|
+
)
|
|
139
|
+
dest_quantity = quantity.to(dest_metric)
|
|
140
|
+
|
|
141
|
+
# Neuroglancer metric
|
|
142
|
+
neuroglancer_metric = None
|
|
143
|
+
if dest_metric == "meters":
|
|
144
|
+
neuroglancer_metric = "m"
|
|
145
|
+
|
|
146
|
+
elif dest_metric == "seconds":
|
|
147
|
+
neuroglancer_metric = "s"
|
|
148
|
+
|
|
149
|
+
return [dest_quantity.m, neuroglancer_metric]
|
|
150
|
+
|
|
151
|
+
@property
|
|
152
|
+
def dimensions(self) -> dict:
|
|
153
|
+
"""
|
|
154
|
+
Property getter of dimensions.
|
|
155
|
+
|
|
156
|
+
Returns
|
|
157
|
+
------------------------
|
|
158
|
+
dict
|
|
159
|
+
Dictionary with neuroglancer dimensions' configuration.
|
|
160
|
+
"""
|
|
161
|
+
return self.__dimensions
|
|
162
|
+
|
|
163
|
+
@dimensions.setter
|
|
164
|
+
def dimensions(self, new_dimensions: dict) -> None:
|
|
165
|
+
"""
|
|
166
|
+
Set dimensions with voxel sizes for the image.
|
|
167
|
+
|
|
168
|
+
Parameters
|
|
169
|
+
------------------------
|
|
170
|
+
dimensions: dict
|
|
171
|
+
Dictionary with the axis values
|
|
172
|
+
with the following structure for an axis:
|
|
173
|
+
e.g. for Z dimension {
|
|
174
|
+
"voxel_size": 2.0,
|
|
175
|
+
"unit": 'microns'
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
"""
|
|
179
|
+
|
|
180
|
+
if not isinstance(new_dimensions, dict):
|
|
181
|
+
raise ValueError(
|
|
182
|
+
f"Dimensions accepts only dict. Received: {new_dimensions}"
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
regex_axis = r"([x-zX-Z])$"
|
|
186
|
+
|
|
187
|
+
for axis, axis_values in new_dimensions.items():
|
|
188
|
+
if re.search(regex_axis, axis):
|
|
189
|
+
self.__dimensions[axis] = self.__unpack_axis(axis_values)
|
|
190
|
+
elif axis == "t":
|
|
191
|
+
self.__dimensions[axis] = self.__unpack_axis(
|
|
192
|
+
axis_values, "seconds"
|
|
193
|
+
)
|
|
194
|
+
elif axis == "c'":
|
|
195
|
+
self.__dimensions[axis] = [
|
|
196
|
+
axis_values["voxel_size"],
|
|
197
|
+
axis_values["unit"],
|
|
198
|
+
]
|
|
199
|
+
|
|
200
|
+
@property
|
|
201
|
+
def layers(self) -> List[dict]:
|
|
202
|
+
"""
|
|
203
|
+
Property getter of layers.
|
|
204
|
+
|
|
205
|
+
Returns
|
|
206
|
+
------------------------
|
|
207
|
+
List[dict]
|
|
208
|
+
List with neuroglancer layers' configuration.
|
|
209
|
+
"""
|
|
210
|
+
return self.__layers
|
|
211
|
+
|
|
212
|
+
@layers.setter
|
|
213
|
+
def layers(self, layers: List[dict]) -> None:
|
|
214
|
+
"""
|
|
215
|
+
Property setter of layers.
|
|
216
|
+
|
|
217
|
+
Parameters
|
|
218
|
+
------------------------
|
|
219
|
+
layers: List[dict]
|
|
220
|
+
List that contains a configuration for each image layer.
|
|
221
|
+
|
|
222
|
+
"""
|
|
223
|
+
|
|
224
|
+
if not isinstance(layers, list):
|
|
225
|
+
raise ValueError(
|
|
226
|
+
f"layers accepts only list. Received value: {layers}"
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
for layer in layers:
|
|
230
|
+
config = {}
|
|
231
|
+
|
|
232
|
+
if layer["type"] == "image":
|
|
233
|
+
config = {
|
|
234
|
+
"image_config": layer,
|
|
235
|
+
"mount_service": self.mount_service,
|
|
236
|
+
"bucket_path": self.bucket_path,
|
|
237
|
+
"output_dimensions": self.dimensions,
|
|
238
|
+
"layer_type": layer["type"],
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
elif layer["type"] == "annotation":
|
|
242
|
+
config = {
|
|
243
|
+
"annotation_source": layer["source"],
|
|
244
|
+
"annotation_locations": layer["annotations"],
|
|
245
|
+
"layer_type": layer["type"],
|
|
246
|
+
"output_dimensions": self.dimensions,
|
|
247
|
+
"limits": layer["limits"] if "limits" in layer else None,
|
|
248
|
+
"mount_service": self.mount_service,
|
|
249
|
+
"bucket_path": self.bucket_path,
|
|
250
|
+
"layer_name": layer["name"],
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
elif layer["type"] == "segmentation":
|
|
254
|
+
config = {
|
|
255
|
+
"segmentation_source": layer["source"],
|
|
256
|
+
"tab": layer["tab"],
|
|
257
|
+
"layer_name": layer["name"],
|
|
258
|
+
"mount_service": self.mount_service,
|
|
259
|
+
"bucket_path": self.bucket_path,
|
|
260
|
+
"layer_type": layer["type"],
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
self.__layers.append(NgLayer().create(config).layer_state)
|
|
264
|
+
|
|
265
|
+
@property
|
|
266
|
+
def state(self, new_state: dict) -> None:
|
|
267
|
+
"""
|
|
268
|
+
Property setter of state.
|
|
269
|
+
|
|
270
|
+
Parameters
|
|
271
|
+
------------------------
|
|
272
|
+
input_config: dict
|
|
273
|
+
Dictionary with the configuration for the neuroglancer state
|
|
274
|
+
|
|
275
|
+
"""
|
|
276
|
+
self.__state = dict(new_state)
|
|
277
|
+
|
|
278
|
+
@state.getter
|
|
279
|
+
def state(self) -> dict:
|
|
280
|
+
"""
|
|
281
|
+
Property getter of state.
|
|
282
|
+
|
|
283
|
+
Returns
|
|
284
|
+
------------------------
|
|
285
|
+
dict
|
|
286
|
+
Dictionary with the actual layer state.
|
|
287
|
+
"""
|
|
288
|
+
|
|
289
|
+
actual_state = {}
|
|
290
|
+
actual_state["ng_link"] = self.get_url_link()
|
|
291
|
+
actual_state["dimensions"] = {}
|
|
292
|
+
|
|
293
|
+
# Getting actual state for all attributes
|
|
294
|
+
for axis, value_list in self.__dimensions.items():
|
|
295
|
+
actual_state["dimensions"][axis] = value_list
|
|
296
|
+
|
|
297
|
+
actual_state["layers"] = self.__layers
|
|
298
|
+
|
|
299
|
+
actual_state["showAxisLines"] = True
|
|
300
|
+
actual_state["showScaleBar"] = True
|
|
301
|
+
|
|
302
|
+
return actual_state
|
|
303
|
+
|
|
304
|
+
def initialize_attributes(self, input_config: dict) -> None:
|
|
305
|
+
"""
|
|
306
|
+
Initializes the following attributes for a given
|
|
307
|
+
image layer: dimensions, layers.
|
|
308
|
+
|
|
309
|
+
Parameters
|
|
310
|
+
------------------------
|
|
311
|
+
input_config: dict
|
|
312
|
+
Dictionary with the configuration for each image layer
|
|
313
|
+
|
|
314
|
+
"""
|
|
315
|
+
|
|
316
|
+
# Initializing dimension
|
|
317
|
+
self.dimensions = input_config["dimensions"]
|
|
318
|
+
|
|
319
|
+
# Initializing layers
|
|
320
|
+
self.layers = input_config["layers"]
|
|
321
|
+
|
|
322
|
+
# Initializing state
|
|
323
|
+
self.__state = self.state
|
|
324
|
+
|
|
325
|
+
for key, val in input_config.items():
|
|
326
|
+
if key == "showAxisLines":
|
|
327
|
+
self.show_axis_lines = val
|
|
328
|
+
|
|
329
|
+
elif key == "showScaleBar":
|
|
330
|
+
self.show_scale_bar = val
|
|
331
|
+
|
|
332
|
+
elif key == "title":
|
|
333
|
+
self.title = val
|
|
334
|
+
|
|
335
|
+
elif key == "crossSectionOrientation":
|
|
336
|
+
self.cross_section_orientation = val
|
|
337
|
+
|
|
338
|
+
elif key == "crossSectionScale":
|
|
339
|
+
self.cross_section_scale = val
|
|
340
|
+
|
|
341
|
+
elif key == "projectionScale":
|
|
342
|
+
self.projection_scale = val
|
|
343
|
+
|
|
344
|
+
elif key == "layout":
|
|
345
|
+
self.layout = val
|
|
346
|
+
|
|
347
|
+
elif key == "position":
|
|
348
|
+
self.position = val
|
|
349
|
+
|
|
350
|
+
@property
|
|
351
|
+
def title(self) -> str:
|
|
352
|
+
"""
|
|
353
|
+
Getter of the title property
|
|
354
|
+
|
|
355
|
+
Returns
|
|
356
|
+
------------------------
|
|
357
|
+
str
|
|
358
|
+
String value of the title.
|
|
359
|
+
"""
|
|
360
|
+
return self.__state["title"]
|
|
361
|
+
|
|
362
|
+
@title.setter
|
|
363
|
+
def title(self, new_title: str) -> None:
|
|
364
|
+
"""
|
|
365
|
+
Sets the title parameter in neuroglancer link.
|
|
366
|
+
|
|
367
|
+
Parameters
|
|
368
|
+
------------------------
|
|
369
|
+
new_title: str
|
|
370
|
+
String that will appear in the browser tab title.
|
|
371
|
+
|
|
372
|
+
Raises
|
|
373
|
+
------------------------
|
|
374
|
+
ValueError:
|
|
375
|
+
If the parameter is not an string.
|
|
376
|
+
"""
|
|
377
|
+
self.__state["title"] = str(new_title)
|
|
378
|
+
|
|
379
|
+
@property
|
|
380
|
+
def cross_section_scale(self) -> float:
|
|
381
|
+
"""
|
|
382
|
+
Getter of the cross_section_scale property
|
|
383
|
+
|
|
384
|
+
Returns
|
|
385
|
+
------------------------
|
|
386
|
+
float
|
|
387
|
+
Value of the cross_section_scale.
|
|
388
|
+
"""
|
|
389
|
+
return self.__state["crossSectionScale"]
|
|
390
|
+
|
|
391
|
+
@cross_section_scale.setter
|
|
392
|
+
def cross_section_scale(self, new_cross_section_scale: float) -> None:
|
|
393
|
+
"""
|
|
394
|
+
Sets the cross_section_scale parameter in neuroglancer link.
|
|
395
|
+
|
|
396
|
+
Parameters
|
|
397
|
+
------------------------
|
|
398
|
+
new_cross_section_scale: float
|
|
399
|
+
Cross section scale value for the neuroglancer state.
|
|
400
|
+
|
|
401
|
+
Raises
|
|
402
|
+
------------------------
|
|
403
|
+
ValueError:
|
|
404
|
+
If the parameter is not an float.
|
|
405
|
+
"""
|
|
406
|
+
self.__state["crossSectionScale"] = float(new_cross_section_scale)
|
|
407
|
+
|
|
408
|
+
@property
|
|
409
|
+
def projection_scale(self) -> float:
|
|
410
|
+
"""
|
|
411
|
+
Getter of the projection_scale property
|
|
412
|
+
|
|
413
|
+
Returns
|
|
414
|
+
------------------------
|
|
415
|
+
float
|
|
416
|
+
Value of the projection_scale.
|
|
417
|
+
"""
|
|
418
|
+
return self.__state["projectionScale"]
|
|
419
|
+
|
|
420
|
+
@projection_scale.setter
|
|
421
|
+
def projection_scale(self, new_scale: float) -> None:
|
|
422
|
+
"""
|
|
423
|
+
Sets the projection_scale parameter in neuroglancer link.
|
|
424
|
+
|
|
425
|
+
Parameters
|
|
426
|
+
------------------------
|
|
427
|
+
new_scale: float
|
|
428
|
+
Projection scale value for the neuroglancer state.
|
|
429
|
+
|
|
430
|
+
Raises
|
|
431
|
+
------------------------
|
|
432
|
+
ValueError:
|
|
433
|
+
If the parameter is not an float.
|
|
434
|
+
"""
|
|
435
|
+
self.__state["projectionScale"] = float(new_scale)
|
|
436
|
+
|
|
437
|
+
@property
|
|
438
|
+
def cross_section_orientation(self) -> List[float]:
|
|
439
|
+
"""
|
|
440
|
+
Getter of the cross_section_orientation property
|
|
441
|
+
|
|
442
|
+
Returns
|
|
443
|
+
------------------------
|
|
444
|
+
List[float]
|
|
445
|
+
List of values to set the cross section orientation
|
|
446
|
+
"""
|
|
447
|
+
return self.__state["crossSectionOrientation"]
|
|
448
|
+
|
|
449
|
+
@cross_section_orientation.setter
|
|
450
|
+
def cross_section_orientation(self, new_orientation: List[float]) -> None:
|
|
451
|
+
"""
|
|
452
|
+
Sets the cross_section_orientation parameter in neuroglancer link.
|
|
453
|
+
|
|
454
|
+
Parameters
|
|
455
|
+
------------------------
|
|
456
|
+
new_orientation: List[float]
|
|
457
|
+
Cross section orientation values for the neuroglancer state.
|
|
458
|
+
|
|
459
|
+
Raises
|
|
460
|
+
------------------------
|
|
461
|
+
ValueError:
|
|
462
|
+
If the list contents are not float.
|
|
463
|
+
"""
|
|
464
|
+
new_orientation = [float(i) for i in new_orientation]
|
|
465
|
+
self.__state["crossSectionOrientation"] = new_orientation
|
|
466
|
+
|
|
467
|
+
@property
|
|
468
|
+
def layout(self) -> str:
|
|
469
|
+
"""
|
|
470
|
+
Getter of the layout property.
|
|
471
|
+
This specifies panel layout in neuroglancer, such as '4panel',
|
|
472
|
+
'xz', 'zx', etc.
|
|
473
|
+
|
|
474
|
+
Returns
|
|
475
|
+
------------------------
|
|
476
|
+
str
|
|
477
|
+
Viewer panel layout.
|
|
478
|
+
"""
|
|
479
|
+
return self.__state["layout"]
|
|
480
|
+
|
|
481
|
+
@layout.setter
|
|
482
|
+
def layout(self, new_layout: str) -> None:
|
|
483
|
+
"""
|
|
484
|
+
Sets the layout parameter in neuroglancer link.
|
|
485
|
+
|
|
486
|
+
Parameters
|
|
487
|
+
------------------------
|
|
488
|
+
new_layout: str
|
|
489
|
+
Neuroglancer viewer panels layout.
|
|
490
|
+
Must be one of:
|
|
491
|
+
- 4panel
|
|
492
|
+
- 3d
|
|
493
|
+
- xy, yx, xz, etc.
|
|
494
|
+
|
|
495
|
+
Raises
|
|
496
|
+
------------------------
|
|
497
|
+
ValueError:
|
|
498
|
+
If the string is not one of the defined choices
|
|
499
|
+
"""
|
|
500
|
+
available_layouts = [
|
|
501
|
+
k[0] + k[1] for k in combinations(["x", "y", "z"], 2)
|
|
502
|
+
]
|
|
503
|
+
available_layouts += [i[::-1] for i in available_layouts]
|
|
504
|
+
available_layouts += ["3d", "4panel"]
|
|
505
|
+
|
|
506
|
+
if new_layout not in available_layouts:
|
|
507
|
+
raise ValueError(f"Viewer layout {new_layout} is not valid")
|
|
508
|
+
else:
|
|
509
|
+
self.__state["layout"] = new_layout
|
|
510
|
+
|
|
511
|
+
@property
|
|
512
|
+
def position(self) -> List[float]:
|
|
513
|
+
"""
|
|
514
|
+
Getter of the position property
|
|
515
|
+
|
|
516
|
+
Returns
|
|
517
|
+
------------------------
|
|
518
|
+
List[float]
|
|
519
|
+
List of values of the position
|
|
520
|
+
"""
|
|
521
|
+
return self.__state["position"]
|
|
522
|
+
|
|
523
|
+
@position.setter
|
|
524
|
+
def position(self, new_position: List[float]):
|
|
525
|
+
"""
|
|
526
|
+
Sets the viewer's center position.
|
|
527
|
+
|
|
528
|
+
Parameters
|
|
529
|
+
------------------------
|
|
530
|
+
new_position: List[float]
|
|
531
|
+
List of coordinates to center on.
|
|
532
|
+
If the list is shorter than the number of axes, the viewer
|
|
533
|
+
will be positioned at the center of the unset axes.
|
|
534
|
+
|
|
535
|
+
Raises
|
|
536
|
+
------------------------
|
|
537
|
+
ValueError:
|
|
538
|
+
If the list contents are not float.
|
|
539
|
+
"""
|
|
540
|
+
new_position = [float(i) for i in new_position]
|
|
541
|
+
self.__state["position"] = new_position
|
|
542
|
+
|
|
543
|
+
@property
|
|
544
|
+
def show_axis_lines(self) -> bool:
|
|
545
|
+
"""
|
|
546
|
+
Getter of the show axis lines property
|
|
547
|
+
|
|
548
|
+
Returns
|
|
549
|
+
------------------------
|
|
550
|
+
bool
|
|
551
|
+
Boolean with the show axis lines value.
|
|
552
|
+
"""
|
|
553
|
+
return self.__state["showAxisLines"]
|
|
554
|
+
|
|
555
|
+
@show_axis_lines.setter
|
|
556
|
+
def show_axis_lines(self, new_show_axis_lines: bool) -> None:
|
|
557
|
+
"""
|
|
558
|
+
Sets the visible parameter in neuroglancer link.
|
|
559
|
+
|
|
560
|
+
Parameters
|
|
561
|
+
------------------------
|
|
562
|
+
new_show_axis_lines: bool
|
|
563
|
+
Boolean that dictates if the image axis are visible or not.
|
|
564
|
+
|
|
565
|
+
Raises
|
|
566
|
+
------------------------
|
|
567
|
+
ValueError:
|
|
568
|
+
If the parameter is not an boolean.
|
|
569
|
+
"""
|
|
570
|
+
self.__state["showAxisLines"] = bool(new_show_axis_lines)
|
|
571
|
+
|
|
572
|
+
@property
|
|
573
|
+
def show_scale_bar(self) -> bool:
|
|
574
|
+
"""
|
|
575
|
+
Getter of the show scale bar property
|
|
576
|
+
|
|
577
|
+
Returns
|
|
578
|
+
------------------------
|
|
579
|
+
bool
|
|
580
|
+
Boolean with the show scale bar value.
|
|
581
|
+
"""
|
|
582
|
+
return self.__state["showScaleBar"]
|
|
583
|
+
|
|
584
|
+
@show_scale_bar.setter
|
|
585
|
+
def show_scale_bar(self, new_show_scale_bar: bool) -> None:
|
|
586
|
+
"""
|
|
587
|
+
Sets the visible parameter in neuroglancer link.
|
|
588
|
+
|
|
589
|
+
Parameters
|
|
590
|
+
------------------------
|
|
591
|
+
new_show_scale_bar: bool
|
|
592
|
+
Boolean that dictates if the image scale bar are visible or not.
|
|
593
|
+
|
|
594
|
+
Raises
|
|
595
|
+
------------------------
|
|
596
|
+
ValueError:
|
|
597
|
+
If the parameter is not an boolean.
|
|
598
|
+
"""
|
|
599
|
+
self.__state["showScaleBar"] = bool(new_show_scale_bar)
|
|
600
|
+
|
|
601
|
+
def save_state_as_json(self, update_state: Optional[bool] = False) -> None:
|
|
602
|
+
"""
|
|
603
|
+
Saves a neuroglancer state as json.
|
|
604
|
+
|
|
605
|
+
Parameters
|
|
606
|
+
------------------------
|
|
607
|
+
update_state: Optional[bool]
|
|
608
|
+
Updates the neuroglancer state with dimensions
|
|
609
|
+
and layers in case they were changed using
|
|
610
|
+
class methods. Default False
|
|
611
|
+
"""
|
|
612
|
+
|
|
613
|
+
if update_state:
|
|
614
|
+
self.__state = self.state
|
|
615
|
+
|
|
616
|
+
final_path = Path(self.output_json).joinpath(self.json_name)
|
|
617
|
+
utils.save_dict_as_json(final_path, self.__state, verbose=self.verbose)
|
|
618
|
+
|
|
619
|
+
def get_url_link(self) -> str:
|
|
620
|
+
"""
|
|
621
|
+
Creates the neuroglancer link based on where the json will be written.
|
|
622
|
+
|
|
623
|
+
Returns
|
|
624
|
+
------------------------
|
|
625
|
+
str
|
|
626
|
+
Neuroglancer url to visualize data.
|
|
627
|
+
"""
|
|
628
|
+
|
|
629
|
+
json_path = f"{self.mount_service}://{self.bucket_path}/{self.dataset_name}/{self.json_name}"
|
|
630
|
+
|
|
631
|
+
link = f"{self.base_url}#!{json_path}"
|
|
632
|
+
|
|
633
|
+
return link
|
|
634
|
+
|
|
635
|
+
|
|
636
|
+
def get_points_from_xml(path: PathLike, encoding: str = "utf-8") -> List[dict]:
|
|
637
|
+
"""
|
|
638
|
+
Function to parse the points from the
|
|
639
|
+
cell segmentation capsule.
|
|
640
|
+
|
|
641
|
+
Parameters
|
|
642
|
+
-----------------
|
|
643
|
+
|
|
644
|
+
Path: PathLike
|
|
645
|
+
Path where the XML is stored.
|
|
646
|
+
|
|
647
|
+
encoding: str
|
|
648
|
+
XML encoding. Default: "utf-8"
|
|
649
|
+
|
|
650
|
+
Returns
|
|
651
|
+
-----------------
|
|
652
|
+
List[dict]
|
|
653
|
+
List with the location of the points.
|
|
654
|
+
"""
|
|
655
|
+
|
|
656
|
+
with open(path, "r", encoding=encoding) as xml_reader:
|
|
657
|
+
xml_file = xml_reader.read()
|
|
658
|
+
|
|
659
|
+
xml_dict = xmltodict.parse(xml_file)
|
|
660
|
+
cell_data = xml_dict["CellCounter_Marker_File"]["Marker_Data"][
|
|
661
|
+
"Marker_Type"
|
|
662
|
+
]["Marker"]
|
|
663
|
+
|
|
664
|
+
new_cell_data = []
|
|
665
|
+
for cell in cell_data:
|
|
666
|
+
new_cell_data.append(
|
|
667
|
+
{"x": cell["MarkerX"], "y": cell["MarkerY"], "z": cell["MarkerZ"],}
|
|
668
|
+
)
|
|
669
|
+
|
|
670
|
+
return new_cell_data
|
|
671
|
+
|
|
672
|
+
|
|
673
|
+
def smartspim_example():
|
|
674
|
+
"""
|
|
675
|
+
Example one related to the SmartSPIM data
|
|
676
|
+
"""
|
|
677
|
+
example_data = {
|
|
678
|
+
"dimensions": {
|
|
679
|
+
# check the order
|
|
680
|
+
"z": {"voxel_size": 2.0, "unit": "microns"},
|
|
681
|
+
"y": {"voxel_size": 1.8, "unit": "microns"},
|
|
682
|
+
"x": {"voxel_size": 1.8, "unit": "microns"},
|
|
683
|
+
"t": {"voxel_size": 0.001, "unit": "seconds"},
|
|
684
|
+
},
|
|
685
|
+
"position": [1900.5, 4400.5, 3800.5, 0.5],
|
|
686
|
+
"crossSectionOrientation": [0.5, 0.5, -0.5, 0.5],
|
|
687
|
+
"crossSectionScale": 10.0,
|
|
688
|
+
"projectionOrientation": [0.641, 0.660, 0.004, 0.391],
|
|
689
|
+
"projectionScale": 13000.0,
|
|
690
|
+
"layers": [
|
|
691
|
+
{
|
|
692
|
+
"source": "image_path.zarr",
|
|
693
|
+
"type": "image",
|
|
694
|
+
"channel": 0,
|
|
695
|
+
# 'name': 'image_name_0',
|
|
696
|
+
"shader": {"color": "green", "emitter": "RGB", "vec": "vec3"},
|
|
697
|
+
"shaderControls": {
|
|
698
|
+
"normalized": {"range": [0, 500]}
|
|
699
|
+
}, # Optional
|
|
700
|
+
},
|
|
701
|
+
{
|
|
702
|
+
"source": "image_path.zarr",
|
|
703
|
+
"type": "image",
|
|
704
|
+
"channel": 1,
|
|
705
|
+
# 'name': 'image_name_1',
|
|
706
|
+
"shader": {"color": "red", "emitter": "RGB", "vec": "vec3"},
|
|
707
|
+
"shaderControls": {
|
|
708
|
+
"normalized": {"range": [0, 500]}
|
|
709
|
+
}, # Optional
|
|
710
|
+
},
|
|
711
|
+
],
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
neuroglancer_link = NgState(
|
|
715
|
+
input_config=example_data,
|
|
716
|
+
mount_service="s3",
|
|
717
|
+
bucket_path="aind-msma-data",
|
|
718
|
+
output_dir="/Users/camilo.laiton/repositories/aind-ng-link/src",
|
|
719
|
+
)
|
|
720
|
+
|
|
721
|
+
data = neuroglancer_link.state
|
|
722
|
+
print(data)
|
|
723
|
+
# neuroglancer_link.save_state_as_json('test.json')
|
|
724
|
+
neuroglancer_link.save_state_as_json()
|
|
725
|
+
print(neuroglancer_link.get_url_link())
|
|
726
|
+
|
|
727
|
+
|
|
728
|
+
def exaspim_example():
|
|
729
|
+
"""
|
|
730
|
+
Example 2 related to the ExaSPIM data
|
|
731
|
+
"""
|
|
732
|
+
example_data = {
|
|
733
|
+
"dimensions": {
|
|
734
|
+
# check the order
|
|
735
|
+
"x": {"voxel_size": 0.74800002019210531934, "unit": "microns"},
|
|
736
|
+
"y": {"voxel_size": 0.74800002019210531934, "unit": "microns"},
|
|
737
|
+
"z": {"voxel_size": 1, "unit": "microns"},
|
|
738
|
+
"c'": {"voxel_size": 1, "unit": ""},
|
|
739
|
+
"t": {"voxel_size": 0.001, "unit": "seconds"},
|
|
740
|
+
},
|
|
741
|
+
"layers": [
|
|
742
|
+
{
|
|
743
|
+
"type": "image", # Optional
|
|
744
|
+
"source": [
|
|
745
|
+
{
|
|
746
|
+
"url": "s3://aind-open-data/exaSPIM_609107_2022-09-21_14-48-48/exaSPIM/tile_x_0000_y_0000_z_0000_ch_488.zarr",
|
|
747
|
+
"transform_matrix": {
|
|
748
|
+
"delta_x": -14192,
|
|
749
|
+
"delta_y": -10640,
|
|
750
|
+
"delta_z": 0,
|
|
751
|
+
},
|
|
752
|
+
},
|
|
753
|
+
{
|
|
754
|
+
"url": "s3://aind-open-data/exaSPIM_609107_2022-09-21_14-48-48/exaSPIM/tile_x_0000_y_0001_z_0000_ch_488.zarr",
|
|
755
|
+
"transform_matrix": {
|
|
756
|
+
"delta_x": -14192,
|
|
757
|
+
"delta_y": -19684.000456947142,
|
|
758
|
+
"delta_z": 0,
|
|
759
|
+
},
|
|
760
|
+
},
|
|
761
|
+
{
|
|
762
|
+
"url": "s3://aind-open-data/exaSPIM_609107_2022-09-21_14-48-48/exaSPIM/tile_x_0000_y_0002_z_0000_ch_488.zarr",
|
|
763
|
+
"transform_matrix": {
|
|
764
|
+
"delta_x": -14192,
|
|
765
|
+
"delta_y": -28727.998694435275,
|
|
766
|
+
"delta_z": 0,
|
|
767
|
+
},
|
|
768
|
+
},
|
|
769
|
+
{
|
|
770
|
+
"url": "s3://aind-open-data/exaSPIM_609107_2022-09-21_14-48-48/exaSPIM/tile_x_0001_y_0000_z_0000_ch_488.zarr",
|
|
771
|
+
"transform_matrix": {
|
|
772
|
+
"delta_x": -26255.200652782467,
|
|
773
|
+
"delta_y": -10640,
|
|
774
|
+
"delta_z": 0,
|
|
775
|
+
},
|
|
776
|
+
},
|
|
777
|
+
{
|
|
778
|
+
"url": "s3://aind-open-data/exaSPIM_609107_2022-09-21_14-48-48/exaSPIM/tile_x_0001_y_0001_z_0000_ch_488.zarr",
|
|
779
|
+
"transform_matrix": {
|
|
780
|
+
"delta_x": -26255.200652782467,
|
|
781
|
+
"delta_y": -19684.000456947142,
|
|
782
|
+
"delta_z": 0,
|
|
783
|
+
},
|
|
784
|
+
},
|
|
785
|
+
{
|
|
786
|
+
"url": "s3://aind-open-data/exaSPIM_609107_2022-09-21_14-48-48/exaSPIM/tile_x_0001_y_0002_z_0000_ch_488.zarr",
|
|
787
|
+
"transform_matrix": {
|
|
788
|
+
"delta_x": -26255.200652782467,
|
|
789
|
+
"delta_y": -28727.998694435275,
|
|
790
|
+
"delta_z": 0,
|
|
791
|
+
},
|
|
792
|
+
},
|
|
793
|
+
{
|
|
794
|
+
"url": "s3://aind-open-data/exaSPIM_609107_2022-09-21_14-48-48/exaSPIM/tile_x_0002_y_0000_z_0000_ch_488.zarr",
|
|
795
|
+
"transform_matrix": {
|
|
796
|
+
"delta_x": -38318.39686664473,
|
|
797
|
+
"delta_y": -10640,
|
|
798
|
+
"delta_z": 0,
|
|
799
|
+
},
|
|
800
|
+
},
|
|
801
|
+
{
|
|
802
|
+
"url": "s3://aind-open-data/exaSPIM_609107_2022-09-21_14-48-48/exaSPIM/tile_x_0002_y_0001_z_0000_ch_488.zarr",
|
|
803
|
+
"transform_matrix": {
|
|
804
|
+
"delta_x": -38318.39686664473,
|
|
805
|
+
"delta_y": -19684.000456947142,
|
|
806
|
+
"delta_z": 0,
|
|
807
|
+
},
|
|
808
|
+
},
|
|
809
|
+
{
|
|
810
|
+
"url": "s3://aind-open-data/exaSPIM_609107_2022-09-21_14-48-48/exaSPIM/tile_x_0002_y_0002_z_0000_ch_488.zarr",
|
|
811
|
+
"transform_matrix": {
|
|
812
|
+
"delta_x": -38318.39686664473,
|
|
813
|
+
"delta_y": -28727.998694435275,
|
|
814
|
+
"delta_z": 0,
|
|
815
|
+
},
|
|
816
|
+
},
|
|
817
|
+
{
|
|
818
|
+
"url": "s3://aind-open-data/exaSPIM_609107_2022-09-21_14-48-48/exaSPIM/tile_x_0003_y_0000_z_0000_ch_488.zarr",
|
|
819
|
+
"transform_matrix": {
|
|
820
|
+
"delta_x": -50381.5952999671,
|
|
821
|
+
"delta_y": -10640,
|
|
822
|
+
"delta_z": 0,
|
|
823
|
+
},
|
|
824
|
+
},
|
|
825
|
+
{
|
|
826
|
+
"url": "s3://aind-open-data/exaSPIM_609107_2022-09-21_14-48-48/exaSPIM/tile_x_0003_y_0001_z_0000_ch_488.zarr",
|
|
827
|
+
"transform_matrix": {
|
|
828
|
+
"delta_x": -50381.5952999671,
|
|
829
|
+
"delta_y": -19684.000456947142,
|
|
830
|
+
"delta_z": 0,
|
|
831
|
+
},
|
|
832
|
+
},
|
|
833
|
+
{
|
|
834
|
+
"url": "s3://aind-open-data/exaSPIM_609107_2022-09-21_14-48-48/exaSPIM/tile_x_0003_y_0002_z_0000_ch_488.zarr",
|
|
835
|
+
"transform_matrix": {
|
|
836
|
+
"delta_x": -50381.5952999671,
|
|
837
|
+
"delta_y": -28727.998694435275,
|
|
838
|
+
"delta_z": 0,
|
|
839
|
+
},
|
|
840
|
+
},
|
|
841
|
+
],
|
|
842
|
+
"channel": 0, # Optional
|
|
843
|
+
"shaderControls": {
|
|
844
|
+
"normalized": {"range": [30, 70]}
|
|
845
|
+
}, # Optional
|
|
846
|
+
"visible": True, # Optional
|
|
847
|
+
"opacity": 0.50,
|
|
848
|
+
}
|
|
849
|
+
],
|
|
850
|
+
"showScaleBar": False,
|
|
851
|
+
"showAxisLines": False,
|
|
852
|
+
}
|
|
853
|
+
|
|
854
|
+
neuroglancer_link = NgState(
|
|
855
|
+
input_config=example_data,
|
|
856
|
+
mount_service="s3",
|
|
857
|
+
bucket_path="aind-msma-data",
|
|
858
|
+
output_dir="/Users/camilo.laiton/repositories/aind-ng-link/src",
|
|
859
|
+
)
|
|
860
|
+
|
|
861
|
+
data = neuroglancer_link.state
|
|
862
|
+
# print(data)
|
|
863
|
+
neuroglancer_link.save_state_as_json()
|
|
864
|
+
print(neuroglancer_link.get_url_link())
|
|
865
|
+
|
|
866
|
+
|
|
867
|
+
def example_3(cells):
|
|
868
|
+
"""
|
|
869
|
+
Example 3 with the annotation layer
|
|
870
|
+
"""
|
|
871
|
+
example_data = {
|
|
872
|
+
"dimensions": {
|
|
873
|
+
# check the order
|
|
874
|
+
"z": {"voxel_size": 2.0, "unit": "microns"},
|
|
875
|
+
"y": {"voxel_size": 1.8, "unit": "microns"},
|
|
876
|
+
"x": {"voxel_size": 1.8, "unit": "microns"},
|
|
877
|
+
"t": {"voxel_size": 0.001, "unit": "seconds"},
|
|
878
|
+
},
|
|
879
|
+
"layers": [
|
|
880
|
+
{
|
|
881
|
+
"source": "image_path.zarr",
|
|
882
|
+
"type": "image",
|
|
883
|
+
"channel": 0,
|
|
884
|
+
# 'name': 'image_name_0',
|
|
885
|
+
"shader": {"color": "green", "emitter": "RGB", "vec": "vec3"},
|
|
886
|
+
"shaderControls": {
|
|
887
|
+
"normalized": {"range": [0, 500]}
|
|
888
|
+
}, # Optional
|
|
889
|
+
},
|
|
890
|
+
{
|
|
891
|
+
"type": "annotation",
|
|
892
|
+
"source": "precomputed:///Users/camilo.laiton/repositories/aind-ng-link/src/precomputed",
|
|
893
|
+
"tool": "annotatePoint",
|
|
894
|
+
"name": "annotation_name_layer",
|
|
895
|
+
"annotations": cells,
|
|
896
|
+
# Pass None or delete limits if
|
|
897
|
+
# you want to include all the points
|
|
898
|
+
# "limits": [100, 200], # None # erase line
|
|
899
|
+
},
|
|
900
|
+
],
|
|
901
|
+
}
|
|
902
|
+
|
|
903
|
+
neuroglancer_link = NgState(
|
|
904
|
+
input_config=example_data,
|
|
905
|
+
mount_service="s3",
|
|
906
|
+
bucket_path="aind-msma-data",
|
|
907
|
+
output_dir="/Users/camilo.laiton/repositories/aind-ng-link/src",
|
|
908
|
+
)
|
|
909
|
+
|
|
910
|
+
data = neuroglancer_link.state
|
|
911
|
+
print(data)
|
|
912
|
+
# neuroglancer_link.save_state_as_json('test.json')
|
|
913
|
+
neuroglancer_link.save_state_as_json()
|
|
914
|
+
print(neuroglancer_link.get_url_link())
|
|
915
|
+
|
|
916
|
+
|
|
917
|
+
def dispim_example():
|
|
918
|
+
"""
|
|
919
|
+
Example related to the dispim data
|
|
920
|
+
"""
|
|
921
|
+
|
|
922
|
+
def generate_source_list(
|
|
923
|
+
s3_path: str,
|
|
924
|
+
channel_name: str,
|
|
925
|
+
camera_index: str,
|
|
926
|
+
n_tiles: int,
|
|
927
|
+
affine_transform: list,
|
|
928
|
+
translation_deltas: list,
|
|
929
|
+
) -> list:
|
|
930
|
+
"""
|
|
931
|
+
Example to generate layers with
|
|
932
|
+
an affine transformation
|
|
933
|
+
|
|
934
|
+
Parameters
|
|
935
|
+
----------
|
|
936
|
+
s3_path: str
|
|
937
|
+
Path in S3 where the images are stored
|
|
938
|
+
|
|
939
|
+
channel_name: str
|
|
940
|
+
Channel name of the dataset
|
|
941
|
+
|
|
942
|
+
camera_index: str
|
|
943
|
+
Camera index of the dataset
|
|
944
|
+
|
|
945
|
+
n_tiles: int
|
|
946
|
+
Number of tiles in the dataset
|
|
947
|
+
|
|
948
|
+
affine_transform: list
|
|
949
|
+
List with the affine transformation
|
|
950
|
+
that will be applied in the data
|
|
951
|
+
|
|
952
|
+
translation_deltas: list
|
|
953
|
+
List with the translation per axis
|
|
954
|
+
xyz
|
|
955
|
+
|
|
956
|
+
Returns
|
|
957
|
+
-------
|
|
958
|
+
list
|
|
959
|
+
List with the source layers for
|
|
960
|
+
neuroglancer
|
|
961
|
+
"""
|
|
962
|
+
multisource_layer = []
|
|
963
|
+
n_rows = 5
|
|
964
|
+
# Affine transformation without translation
|
|
965
|
+
# and in ng format tczyx usually, check output dims.
|
|
966
|
+
|
|
967
|
+
list_n_tiles = range(0, n_tiles + 1)
|
|
968
|
+
|
|
969
|
+
shift = 1
|
|
970
|
+
|
|
971
|
+
if camera_index:
|
|
972
|
+
list_n_tiles = range(n_tiles, -1, -1)
|
|
973
|
+
shift = -1
|
|
974
|
+
|
|
975
|
+
new_affine_transform = affine_transform.copy()
|
|
976
|
+
|
|
977
|
+
for n_tile in list_n_tiles:
|
|
978
|
+
n_tile = str(n_tile)
|
|
979
|
+
|
|
980
|
+
if len(n_tile) == 1:
|
|
981
|
+
n_tile = "0" + str(n_tile)
|
|
982
|
+
|
|
983
|
+
tile_name = f"{s3_path}/tile_X_00{n_tile}_Y_0000_Z_0000_CH_{channel_name}_cam{camera_index}.zarr"
|
|
984
|
+
|
|
985
|
+
if n_tile:
|
|
986
|
+
start_point = n_rows - 1
|
|
987
|
+
|
|
988
|
+
new_translation_deltas = list(
|
|
989
|
+
map(
|
|
990
|
+
lambda delta: delta * shift * int(n_tile),
|
|
991
|
+
translation_deltas,
|
|
992
|
+
)
|
|
993
|
+
)
|
|
994
|
+
|
|
995
|
+
# Setting translations for axis
|
|
996
|
+
for delta in new_translation_deltas:
|
|
997
|
+
new_affine_transform[start_point][-1] = delta
|
|
998
|
+
start_point -= 1
|
|
999
|
+
|
|
1000
|
+
else:
|
|
1001
|
+
new_affine_transform = affine_transform.copy()
|
|
1002
|
+
|
|
1003
|
+
multisource_layer.append(
|
|
1004
|
+
{
|
|
1005
|
+
"url": tile_name,
|
|
1006
|
+
"transform_matrix": new_affine_transform.tolist(),
|
|
1007
|
+
}
|
|
1008
|
+
)
|
|
1009
|
+
|
|
1010
|
+
return multisource_layer
|
|
1011
|
+
|
|
1012
|
+
# t c z y x T
|
|
1013
|
+
ng_affine_transform = np.zeros((5, 6), np.float16)
|
|
1014
|
+
np.fill_diagonal(ng_affine_transform, 1)
|
|
1015
|
+
|
|
1016
|
+
theta = 45
|
|
1017
|
+
|
|
1018
|
+
# Adding shearing
|
|
1019
|
+
shearing_zx = np.tan(np.deg2rad(theta))
|
|
1020
|
+
ng_affine_transform[2, 4] = shearing_zx
|
|
1021
|
+
|
|
1022
|
+
translation_x = 0
|
|
1023
|
+
translation_y = 1140
|
|
1024
|
+
translation_z = 0
|
|
1025
|
+
|
|
1026
|
+
# Parameters
|
|
1027
|
+
s3_path = (
|
|
1028
|
+
"s3://aind-open-data/diSPIM_647459_2022-12-07_00-00-00/diSPIM.zarr"
|
|
1029
|
+
)
|
|
1030
|
+
channel_names = ["0405", "0488", "0561"]
|
|
1031
|
+
colors = ["#3f2efe", "#58fea1", "#f15211"]
|
|
1032
|
+
camera_indexes = [0] # , 1]
|
|
1033
|
+
n_tiles = 13 # 13
|
|
1034
|
+
layers = []
|
|
1035
|
+
visible = True
|
|
1036
|
+
|
|
1037
|
+
for camera_index in camera_indexes:
|
|
1038
|
+
if camera_index == 1:
|
|
1039
|
+
# Mirror Z stack and apply same angle for cam0
|
|
1040
|
+
ng_affine_transform[2, 2] = -1
|
|
1041
|
+
|
|
1042
|
+
# elif camera_index == 1:
|
|
1043
|
+
# # No mirror for camera 1
|
|
1044
|
+
# ng_affine_transform[2, 2] = 1
|
|
1045
|
+
|
|
1046
|
+
for channel_name_idx in range(len(channel_names)):
|
|
1047
|
+
layers.append(
|
|
1048
|
+
{
|
|
1049
|
+
"type": "image", # Optional
|
|
1050
|
+
"source": generate_source_list(
|
|
1051
|
+
s3_path=s3_path,
|
|
1052
|
+
channel_name=channel_names[channel_name_idx],
|
|
1053
|
+
camera_index=camera_index,
|
|
1054
|
+
n_tiles=n_tiles,
|
|
1055
|
+
affine_transform=ng_affine_transform,
|
|
1056
|
+
translation_deltas=[
|
|
1057
|
+
translation_x,
|
|
1058
|
+
translation_y,
|
|
1059
|
+
translation_z,
|
|
1060
|
+
],
|
|
1061
|
+
),
|
|
1062
|
+
"channel": 0, # Optional
|
|
1063
|
+
"shaderControls": {
|
|
1064
|
+
"normalized": {"range": [0, 800]}
|
|
1065
|
+
}, # Optional
|
|
1066
|
+
"shader": {
|
|
1067
|
+
"color": colors[channel_name_idx],
|
|
1068
|
+
"emitter": "RGB",
|
|
1069
|
+
"vec": "vec3",
|
|
1070
|
+
},
|
|
1071
|
+
"visible": visible, # Optional
|
|
1072
|
+
"opacity": 0.50,
|
|
1073
|
+
"name": f"CH_{channel_names[channel_name_idx]}_CAM{camera_index}",
|
|
1074
|
+
}
|
|
1075
|
+
)
|
|
1076
|
+
|
|
1077
|
+
example_data = {
|
|
1078
|
+
"dimensions": {
|
|
1079
|
+
# check the order
|
|
1080
|
+
"x": {"voxel_size": 0.298, "unit": "microns"},
|
|
1081
|
+
"y": {"voxel_size": 0.298, "unit": "microns"},
|
|
1082
|
+
"z": {"voxel_size": 0.176, "unit": "microns"},
|
|
1083
|
+
"c'": {"voxel_size": 1, "unit": ""},
|
|
1084
|
+
"t": {"voxel_size": 0.001, "unit": "seconds"},
|
|
1085
|
+
},
|
|
1086
|
+
"layers": layers,
|
|
1087
|
+
"showScaleBar": False,
|
|
1088
|
+
"showAxisLines": False,
|
|
1089
|
+
}
|
|
1090
|
+
|
|
1091
|
+
neuroglancer_link = NgState(
|
|
1092
|
+
input_config=example_data,
|
|
1093
|
+
mount_service="s3",
|
|
1094
|
+
bucket_path="aind-msma-data",
|
|
1095
|
+
output_dir="/Users/camilo.laiton/repositories/aind-ng-link/src",
|
|
1096
|
+
)
|
|
1097
|
+
|
|
1098
|
+
data = neuroglancer_link.state
|
|
1099
|
+
# print(data)
|
|
1100
|
+
neuroglancer_link.save_state_as_json()
|
|
1101
|
+
print(neuroglancer_link.get_url_link())
|
|
1102
|
+
|
|
1103
|
+
|
|
1104
|
+
# flake8: noqa: E501
|
|
1105
|
+
def examples():
|
|
1106
|
+
"""
|
|
1107
|
+
Examples of how to use the neurglancer state class.
|
|
1108
|
+
"""
|
|
1109
|
+
# example_1()
|
|
1110
|
+
|
|
1111
|
+
# Transformation matrix can be a dictionary with the axis translations
|
|
1112
|
+
# or a affine transformation (list of lists)
|
|
1113
|
+
# example_2()
|
|
1114
|
+
|
|
1115
|
+
cells_path = "/Users/camilo.laiton/Downloads/detected_cells (5).xml"
|
|
1116
|
+
cells = get_points_from_xml(cells_path)
|
|
1117
|
+
example_3(cells)
|
|
1118
|
+
|
|
1119
|
+
# dispim_example()
|
|
1120
|
+
|
|
1121
|
+
|
|
1122
|
+
if __name__ == "__main__":
|
|
1123
|
+
examples()
|