Rhapso 0.1.92__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Rhapso/__init__.py +1 -0
- Rhapso/data_prep/__init__.py +2 -0
- Rhapso/data_prep/n5_reader.py +188 -0
- Rhapso/data_prep/s3_big_stitcher_reader.py +55 -0
- Rhapso/data_prep/xml_to_dataframe.py +215 -0
- Rhapso/detection/__init__.py +5 -0
- Rhapso/detection/advanced_refinement.py +203 -0
- Rhapso/detection/difference_of_gaussian.py +324 -0
- Rhapso/detection/image_reader.py +117 -0
- Rhapso/detection/metadata_builder.py +130 -0
- Rhapso/detection/overlap_detection.py +327 -0
- Rhapso/detection/points_validation.py +49 -0
- Rhapso/detection/save_interest_points.py +265 -0
- Rhapso/detection/view_transform_models.py +67 -0
- Rhapso/fusion/__init__.py +0 -0
- Rhapso/fusion/affine_fusion/__init__.py +2 -0
- Rhapso/fusion/affine_fusion/blend.py +289 -0
- Rhapso/fusion/affine_fusion/fusion.py +601 -0
- Rhapso/fusion/affine_fusion/geometry.py +159 -0
- Rhapso/fusion/affine_fusion/io.py +546 -0
- Rhapso/fusion/affine_fusion/script_utils.py +111 -0
- Rhapso/fusion/affine_fusion/setup.py +4 -0
- Rhapso/fusion/affine_fusion_worker.py +234 -0
- Rhapso/fusion/multiscale/__init__.py +0 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/__init__.py +19 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/czi_to_zarr.py +698 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/zarr_writer.py +265 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/models.py +81 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/utils.py +526 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/zeiss_job.py +249 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/__init__.py +21 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/array_to_zarr.py +257 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/radial_correction.py +557 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/run_capsule.py +98 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/utils.py +266 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/worker.py +89 -0
- Rhapso/fusion/multiscale_worker.py +113 -0
- Rhapso/fusion/neuroglancer_link_gen/__init__.py +8 -0
- Rhapso/fusion/neuroglancer_link_gen/dispim_link.py +235 -0
- Rhapso/fusion/neuroglancer_link_gen/exaspim_link.py +127 -0
- Rhapso/fusion/neuroglancer_link_gen/hcr_link.py +368 -0
- Rhapso/fusion/neuroglancer_link_gen/iSPIM_top.py +47 -0
- Rhapso/fusion/neuroglancer_link_gen/link_utils.py +239 -0
- Rhapso/fusion/neuroglancer_link_gen/main.py +299 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_layer.py +1434 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_state.py +1123 -0
- Rhapso/fusion/neuroglancer_link_gen/parsers.py +336 -0
- Rhapso/fusion/neuroglancer_link_gen/raw_link.py +116 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/__init__.py +4 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/shader_utils.py +85 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py +43 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/utils.py +303 -0
- Rhapso/fusion/neuroglancer_link_gen_worker.py +30 -0
- Rhapso/matching/__init__.py +0 -0
- Rhapso/matching/load_and_transform_points.py +458 -0
- Rhapso/matching/ransac_matching.py +544 -0
- Rhapso/matching/save_matches.py +120 -0
- Rhapso/matching/xml_parser.py +302 -0
- Rhapso/pipelines/__init__.py +0 -0
- Rhapso/pipelines/ray/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/alignment_pipeline.py +227 -0
- Rhapso/pipelines/ray/aws/config/__init__.py +0 -0
- Rhapso/pipelines/ray/evaluation.py +71 -0
- Rhapso/pipelines/ray/interest_point_detection.py +137 -0
- Rhapso/pipelines/ray/interest_point_matching.py +110 -0
- Rhapso/pipelines/ray/local/__init__.py +0 -0
- Rhapso/pipelines/ray/local/alignment_pipeline.py +167 -0
- Rhapso/pipelines/ray/matching_stats.py +104 -0
- Rhapso/pipelines/ray/param/__init__.py +0 -0
- Rhapso/pipelines/ray/solver.py +120 -0
- Rhapso/pipelines/ray/split_dataset.py +78 -0
- Rhapso/solver/__init__.py +0 -0
- Rhapso/solver/compute_tiles.py +562 -0
- Rhapso/solver/concatenate_models.py +116 -0
- Rhapso/solver/connected_graphs.py +111 -0
- Rhapso/solver/data_prep.py +181 -0
- Rhapso/solver/global_optimization.py +410 -0
- Rhapso/solver/model_and_tile_setup.py +109 -0
- Rhapso/solver/pre_align_tiles.py +323 -0
- Rhapso/solver/save_results.py +97 -0
- Rhapso/solver/view_transforms.py +75 -0
- Rhapso/solver/xml_to_dataframe_solver.py +213 -0
- Rhapso/split_dataset/__init__.py +0 -0
- Rhapso/split_dataset/compute_grid_rules.py +78 -0
- Rhapso/split_dataset/save_points.py +101 -0
- Rhapso/split_dataset/save_xml.py +377 -0
- Rhapso/split_dataset/split_images.py +537 -0
- Rhapso/split_dataset/xml_to_dataframe_split.py +219 -0
- rhapso-0.1.92.dist-info/METADATA +39 -0
- rhapso-0.1.92.dist-info/RECORD +101 -0
- rhapso-0.1.92.dist-info/WHEEL +5 -0
- rhapso-0.1.92.dist-info/licenses/LICENSE +21 -0
- rhapso-0.1.92.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/test_detection.py +17 -0
- tests/test_matching.py +21 -0
- tests/test_solving.py +21 -0
|
@@ -0,0 +1,1434 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Class to represent a layer of a configuration state to visualize images in neuroglancer
|
|
3
|
+
"""
|
|
4
|
+
import inspect
|
|
5
|
+
import json
|
|
6
|
+
import multiprocessing
|
|
7
|
+
import os
|
|
8
|
+
import struct
|
|
9
|
+
import time
|
|
10
|
+
from multiprocessing.managers import BaseManager, NamespaceProxy
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Dict, List, Optional, Union, get_args
|
|
13
|
+
|
|
14
|
+
import neuroglancer
|
|
15
|
+
import numpy as np
|
|
16
|
+
|
|
17
|
+
from .utils import shader_utils, utils
|
|
18
|
+
|
|
19
|
+
# IO types
|
|
20
|
+
PathLike = Union[str, Path]
|
|
21
|
+
SourceLike = Union[PathLike, List[Dict]]
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ObjProxy(NamespaceProxy):
|
|
25
|
+
"""Returns a proxy instance for any user defined data-type. The proxy instance will have the namespace and
|
|
26
|
+
functions of the data-type (except private/protected callables/attributes). Furthermore, the proxy will be
|
|
27
|
+
pickable and can its state can be shared among different processes."""
|
|
28
|
+
|
|
29
|
+
@classmethod
|
|
30
|
+
def populate_obj_attributes(cls, real_cls):
|
|
31
|
+
"""
|
|
32
|
+
Populates attributes of the proxy object
|
|
33
|
+
"""
|
|
34
|
+
DISALLOWED = set(dir(cls))
|
|
35
|
+
ALLOWED = [
|
|
36
|
+
"__sizeof__",
|
|
37
|
+
"__eq__",
|
|
38
|
+
"__ne__",
|
|
39
|
+
"__le__",
|
|
40
|
+
"__repr__",
|
|
41
|
+
"__dict__",
|
|
42
|
+
"__lt__",
|
|
43
|
+
"__gt__",
|
|
44
|
+
]
|
|
45
|
+
DISALLOWED.add("__class__")
|
|
46
|
+
new_dict = {}
|
|
47
|
+
for attr, value in inspect.getmembers(real_cls, callable):
|
|
48
|
+
if attr not in DISALLOWED or attr in ALLOWED:
|
|
49
|
+
new_dict[attr] = cls._proxy_wrap(attr)
|
|
50
|
+
return new_dict
|
|
51
|
+
|
|
52
|
+
@staticmethod
|
|
53
|
+
def _proxy_wrap(attr):
|
|
54
|
+
"""
|
|
55
|
+
This method creates function that calls the proxified object's method.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def f(self, *args, **kwargs):
|
|
59
|
+
"""
|
|
60
|
+
Function that calls the proxified object's method.
|
|
61
|
+
"""
|
|
62
|
+
return self._callmethod(attr, args, kwargs)
|
|
63
|
+
|
|
64
|
+
return f
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def buf_builder(x, y, z, buf_):
|
|
68
|
+
"""builds the buffer"""
|
|
69
|
+
pt_buf = struct.pack("<3f", x, y, z)
|
|
70
|
+
buf_.extend(pt_buf)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
attributes = ObjProxy.populate_obj_attributes(bytearray)
|
|
74
|
+
bytearrayProxy = type("bytearrayProxy", (ObjProxy,), attributes)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def generate_precomputed_cells(cells, path, res):
|
|
78
|
+
"""
|
|
79
|
+
Function for saving precomputed annotation layer
|
|
80
|
+
|
|
81
|
+
Parameters
|
|
82
|
+
-----------------
|
|
83
|
+
|
|
84
|
+
cells: dict
|
|
85
|
+
output of the xmltodict function for importing cell locations
|
|
86
|
+
path: str
|
|
87
|
+
path to where you want to save the precomputed files
|
|
88
|
+
res: neuroglancer.CoordinateSpace()
|
|
89
|
+
data on the space that the data will be viewed
|
|
90
|
+
buf: bytearrayProxy object
|
|
91
|
+
if you want to use multiprocessing set to bytearrayProxy object else
|
|
92
|
+
leave as None
|
|
93
|
+
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
BaseManager.register(
|
|
97
|
+
"bytearray",
|
|
98
|
+
bytearray,
|
|
99
|
+
bytearrayProxy,
|
|
100
|
+
exposed=tuple(dir(bytearrayProxy)),
|
|
101
|
+
)
|
|
102
|
+
manager = BaseManager()
|
|
103
|
+
manager.start()
|
|
104
|
+
|
|
105
|
+
buf = manager.bytearray()
|
|
106
|
+
|
|
107
|
+
cell_list = []
|
|
108
|
+
for cell in cells:
|
|
109
|
+
cell_list.append([int(cell["z"]), int(cell["y"]), int(cell["x"])])
|
|
110
|
+
|
|
111
|
+
l_bounds = np.min(cell_list, axis=0)
|
|
112
|
+
u_bounds = np.max(cell_list, axis=0)
|
|
113
|
+
|
|
114
|
+
output_path = os.path.join(path, "spatial0")
|
|
115
|
+
utils.create_folder(output_path)
|
|
116
|
+
|
|
117
|
+
metadata = {
|
|
118
|
+
"@type": "neuroglancer_annotations_v1",
|
|
119
|
+
"dimensions": res.to_json(),
|
|
120
|
+
"lower_bound": [float(x) for x in l_bounds],
|
|
121
|
+
"upper_bound": [float(x) for x in u_bounds],
|
|
122
|
+
"annotation_type": "point",
|
|
123
|
+
"properties": [],
|
|
124
|
+
"relationships": [],
|
|
125
|
+
"by_id": {"key": "by_id",},
|
|
126
|
+
"spatial": [
|
|
127
|
+
{
|
|
128
|
+
"key": "spatial0",
|
|
129
|
+
"grid_shape": [1] * res.rank,
|
|
130
|
+
"chunk_size": [max(1, float(x)) for x in u_bounds - l_bounds],
|
|
131
|
+
"limit": len(cell_list),
|
|
132
|
+
},
|
|
133
|
+
],
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
with open(os.path.join(path, "info"), "w") as f:
|
|
137
|
+
f.write(json.dumps(metadata))
|
|
138
|
+
|
|
139
|
+
with open(os.path.join(output_path, "0_0_0"), "wb") as outfile:
|
|
140
|
+
start_t = time.time()
|
|
141
|
+
|
|
142
|
+
total_count = len(cell_list) # coordinates is a list of tuples (x,y,z)
|
|
143
|
+
|
|
144
|
+
print("Running multiprocessing")
|
|
145
|
+
|
|
146
|
+
if not isinstance(buf, type(None)):
|
|
147
|
+
buf.extend(struct.pack("<Q", total_count))
|
|
148
|
+
|
|
149
|
+
with multiprocessing.Pool(processes=os.cpu_count()) as p:
|
|
150
|
+
p.starmap(
|
|
151
|
+
buf_builder, [(x, y, z, buf) for (x, y, z) in cell_list]
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
# write the ids at the end of the buffer as increasing integers
|
|
155
|
+
id_buf = struct.pack(
|
|
156
|
+
"<%sQ" % len(cell_list), *range(len(cell_list))
|
|
157
|
+
)
|
|
158
|
+
buf.extend(id_buf)
|
|
159
|
+
else:
|
|
160
|
+
buf = struct.pack("<Q", total_count)
|
|
161
|
+
|
|
162
|
+
for x, y, z in cell_list:
|
|
163
|
+
pt_buf = struct.pack("<3f", x, y, z)
|
|
164
|
+
buf += pt_buf
|
|
165
|
+
|
|
166
|
+
# write the ids at the end of the buffer as increasing integers
|
|
167
|
+
id_buf = struct.pack(
|
|
168
|
+
"<%sQ" % len(cell_list), *range(len(cell_list))
|
|
169
|
+
)
|
|
170
|
+
buf += id_buf
|
|
171
|
+
|
|
172
|
+
print(
|
|
173
|
+
"Building file took {0} minutes".format(
|
|
174
|
+
(time.time() - start_t) / 60
|
|
175
|
+
)
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
outfile.write(bytes(buf))
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def helper_create_ng_translation_matrix(
|
|
182
|
+
delta_x: Optional[float] = 0,
|
|
183
|
+
delta_y: Optional[float] = 0,
|
|
184
|
+
delta_z: Optional[float] = 0,
|
|
185
|
+
n_cols: Optional[int] = 6,
|
|
186
|
+
n_rows: Optional[int] = 5,
|
|
187
|
+
) -> List:
|
|
188
|
+
"""
|
|
189
|
+
Helper function to create the translation matrix based on deltas over each axis
|
|
190
|
+
|
|
191
|
+
Parameters
|
|
192
|
+
------------------------
|
|
193
|
+
delta_x: Optional[float]
|
|
194
|
+
Translation over the x axis.
|
|
195
|
+
delta_y: Optional[float]
|
|
196
|
+
Translation over the y axis.
|
|
197
|
+
delta_z: Optional[float]
|
|
198
|
+
Translation over the z axis.
|
|
199
|
+
n_cols: Optional[int]
|
|
200
|
+
number of columns to create the translation matrix.
|
|
201
|
+
n_rows: Optional[int]
|
|
202
|
+
number of rows to create the translation matrix.
|
|
203
|
+
|
|
204
|
+
Raises
|
|
205
|
+
------------------------
|
|
206
|
+
ValueError:
|
|
207
|
+
Raises if the N size of the transformation matrix is not
|
|
208
|
+
enough for the deltas.
|
|
209
|
+
|
|
210
|
+
Returns
|
|
211
|
+
------------------------
|
|
212
|
+
List
|
|
213
|
+
List with the translation matrix
|
|
214
|
+
"""
|
|
215
|
+
|
|
216
|
+
translation_matrix = np.zeros((n_rows, n_cols), np.float16)
|
|
217
|
+
np.fill_diagonal(translation_matrix, 1)
|
|
218
|
+
|
|
219
|
+
deltas = [delta_x, delta_y, delta_z]
|
|
220
|
+
start_point = n_rows - 1
|
|
221
|
+
|
|
222
|
+
if start_point < len(deltas):
|
|
223
|
+
raise ValueError(
|
|
224
|
+
"N size of transformation matrix is not enough for deltas"
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
# Setting translations for axis
|
|
228
|
+
for delta in deltas:
|
|
229
|
+
translation_matrix[start_point][-1] = delta
|
|
230
|
+
start_point -= 1
|
|
231
|
+
|
|
232
|
+
return translation_matrix.tolist()
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def helper_reverse_dictionary(dictionary: dict) -> dict:
|
|
236
|
+
"""
|
|
237
|
+
Helper to reverse a dictionary
|
|
238
|
+
|
|
239
|
+
Parameters
|
|
240
|
+
------------------------
|
|
241
|
+
dictionary: dict
|
|
242
|
+
Dictionary to reverse
|
|
243
|
+
|
|
244
|
+
Returns
|
|
245
|
+
------------------------
|
|
246
|
+
dict
|
|
247
|
+
Reversed dictionary
|
|
248
|
+
"""
|
|
249
|
+
|
|
250
|
+
keys = list(dictionary.keys())
|
|
251
|
+
values = list(dictionary.values())
|
|
252
|
+
new_dict = {}
|
|
253
|
+
|
|
254
|
+
for idx in range(len(keys) - 1, -1, -1):
|
|
255
|
+
new_dict[keys[idx]] = values[idx]
|
|
256
|
+
|
|
257
|
+
return new_dict
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
class SegmentationLayer:
|
|
261
|
+
"""
|
|
262
|
+
Class to represent a neuroglancer segmentation layer in the
|
|
263
|
+
configuration json
|
|
264
|
+
"""
|
|
265
|
+
|
|
266
|
+
def __init__(
|
|
267
|
+
self,
|
|
268
|
+
segmentation_source: PathLike,
|
|
269
|
+
tab: str,
|
|
270
|
+
layer_name: str,
|
|
271
|
+
mount_service: str,
|
|
272
|
+
bucket_path: str,
|
|
273
|
+
layer_type: Optional[str] = "segmentation",
|
|
274
|
+
) -> None:
|
|
275
|
+
"""
|
|
276
|
+
Class constructor
|
|
277
|
+
|
|
278
|
+
Parameters
|
|
279
|
+
------------------------
|
|
280
|
+
segmentation_source: PathLike
|
|
281
|
+
Segmentation layer path
|
|
282
|
+
|
|
283
|
+
tab: str
|
|
284
|
+
Tab name
|
|
285
|
+
|
|
286
|
+
layer_name: str
|
|
287
|
+
Layer name
|
|
288
|
+
|
|
289
|
+
mount_service: Optional[str]
|
|
290
|
+
This parameter could be 'gs' referring to a bucket in Google Cloud or 's3'in Amazon.
|
|
291
|
+
|
|
292
|
+
bucket_path: str
|
|
293
|
+
Path in cloud service where the dataset will be saved
|
|
294
|
+
|
|
295
|
+
mount_service: Optional[str]
|
|
296
|
+
This parameter could be 'gs' referring to a bucket in Google Cloud or 's3'in Amazon.
|
|
297
|
+
|
|
298
|
+
bucket_path: str
|
|
299
|
+
Path in cloud service where the dataset will be saved
|
|
300
|
+
|
|
301
|
+
layer_type: str
|
|
302
|
+
Layer type. Default: segmentation
|
|
303
|
+
"""
|
|
304
|
+
|
|
305
|
+
self.__layer_state = {}
|
|
306
|
+
self.segmentation_source = segmentation_source
|
|
307
|
+
self.tab_name = tab
|
|
308
|
+
self.layer_name = layer_name
|
|
309
|
+
self.mount_service = mount_service
|
|
310
|
+
self.bucket_path = bucket_path
|
|
311
|
+
self.layer_type = layer_type
|
|
312
|
+
|
|
313
|
+
# Optional parameter that must be used when we have multiple images per layer
|
|
314
|
+
# Dictionary needs to be reversed for correct visualization
|
|
315
|
+
self.update_state()
|
|
316
|
+
|
|
317
|
+
def __set_s3_path(self, orig_source_path: PathLike) -> str:
|
|
318
|
+
"""
|
|
319
|
+
Private method to set a s3 path based on a source path.
|
|
320
|
+
Available image formats: ['.zarr']
|
|
321
|
+
|
|
322
|
+
Parameters
|
|
323
|
+
------------------------
|
|
324
|
+
orig_source_path: PathLike
|
|
325
|
+
Source path of the image
|
|
326
|
+
|
|
327
|
+
Raises
|
|
328
|
+
------------------------
|
|
329
|
+
NotImplementedError:
|
|
330
|
+
Raises if the image format is not zarr.
|
|
331
|
+
|
|
332
|
+
Returns
|
|
333
|
+
------------------------
|
|
334
|
+
str
|
|
335
|
+
String with the source path pointing to the mount service in the cloud
|
|
336
|
+
"""
|
|
337
|
+
|
|
338
|
+
s3_path = None
|
|
339
|
+
if not orig_source_path.startswith(f"{self.mount_service}://"):
|
|
340
|
+
orig_source_path = Path(orig_source_path)
|
|
341
|
+
s3_path = (
|
|
342
|
+
f"{self.mount_service}://{self.bucket_path}/{orig_source_path}"
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
else:
|
|
346
|
+
s3_path = orig_source_path
|
|
347
|
+
|
|
348
|
+
return s3_path
|
|
349
|
+
|
|
350
|
+
def set_segmentation_source(self, source: PathLike) -> dict:
|
|
351
|
+
"""
|
|
352
|
+
Sets the segmentation source.
|
|
353
|
+
|
|
354
|
+
Parameters
|
|
355
|
+
---------------
|
|
356
|
+
source: PathLike
|
|
357
|
+
Path where the precomputed format is
|
|
358
|
+
located
|
|
359
|
+
|
|
360
|
+
Returns
|
|
361
|
+
---------------
|
|
362
|
+
dict:
|
|
363
|
+
Dictionary with the modified layer.
|
|
364
|
+
"""
|
|
365
|
+
|
|
366
|
+
actual_state = self.__layer_state
|
|
367
|
+
|
|
368
|
+
if isinstance(source, str) and 'precomputed://' in source:
|
|
369
|
+
write_path = Path(source.replace("precomputed://", ""))
|
|
370
|
+
s3_path = self.__set_s3_path(str(write_path))
|
|
371
|
+
|
|
372
|
+
actual_state["source"] = f"precomputed://{s3_path}"
|
|
373
|
+
|
|
374
|
+
return actual_state
|
|
375
|
+
|
|
376
|
+
elif isinstance(source, dict):
|
|
377
|
+
layer_dict = {}
|
|
378
|
+
for k, v in source.items():
|
|
379
|
+
if k == "url":
|
|
380
|
+
write_path = Path(v.replace("precomputed://", ""))
|
|
381
|
+
s3_path = self.__set_s3_path(str(write_path))
|
|
382
|
+
|
|
383
|
+
layer_dict[k] = s3_path
|
|
384
|
+
elif k == "transform":
|
|
385
|
+
layer_dict[k] = v
|
|
386
|
+
|
|
387
|
+
actual_state["source"] = layer_dict
|
|
388
|
+
|
|
389
|
+
else:
|
|
390
|
+
raise NotImplementedError("This option has not been implemented")
|
|
391
|
+
|
|
392
|
+
return actual_state
|
|
393
|
+
|
|
394
|
+
def set_tool(self, tool_name: str) -> dict:
|
|
395
|
+
"""
|
|
396
|
+
Sets the tool name in neuroglancer.
|
|
397
|
+
|
|
398
|
+
Parameters
|
|
399
|
+
---------------
|
|
400
|
+
tool_name: str
|
|
401
|
+
Tool name in neuroglancer.
|
|
402
|
+
|
|
403
|
+
Returns
|
|
404
|
+
---------------
|
|
405
|
+
dict:
|
|
406
|
+
Dictionary with the modified layer.
|
|
407
|
+
"""
|
|
408
|
+
actual_state = self.__layer_state
|
|
409
|
+
actual_state["tool"] = str(tool_name)
|
|
410
|
+
return actual_state
|
|
411
|
+
|
|
412
|
+
def set_tab_name(self, tab_name: str) -> dict:
|
|
413
|
+
"""
|
|
414
|
+
Sets the tab name in neuroglancer.
|
|
415
|
+
|
|
416
|
+
Parameters
|
|
417
|
+
---------------
|
|
418
|
+
tab_name: str
|
|
419
|
+
Tab name in neuroglancer.
|
|
420
|
+
|
|
421
|
+
Returns
|
|
422
|
+
---------------
|
|
423
|
+
dict:
|
|
424
|
+
Dictionary with the modified layer.
|
|
425
|
+
"""
|
|
426
|
+
actual_state = self.__layer_state
|
|
427
|
+
actual_state["tab"] = str(tab_name)
|
|
428
|
+
return actual_state
|
|
429
|
+
|
|
430
|
+
def set_layer_name(self, layer_name: str) -> dict:
|
|
431
|
+
"""
|
|
432
|
+
Sets the layer name
|
|
433
|
+
|
|
434
|
+
Parameters
|
|
435
|
+
---------------
|
|
436
|
+
layer_name: str
|
|
437
|
+
Layer name
|
|
438
|
+
|
|
439
|
+
Returns
|
|
440
|
+
---------------
|
|
441
|
+
dict:
|
|
442
|
+
Dictionary with the modified layer.
|
|
443
|
+
"""
|
|
444
|
+
actual_state = self.__layer_state
|
|
445
|
+
actual_state["name"] = layer_name
|
|
446
|
+
return actual_state
|
|
447
|
+
|
|
448
|
+
def update_state(self):
|
|
449
|
+
"""
|
|
450
|
+
Updates the state of the layer
|
|
451
|
+
"""
|
|
452
|
+
|
|
453
|
+
self.__layer_state = self.set_segmentation_source(
|
|
454
|
+
self.segmentation_source
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
self.__layer_state = self.set_tab_name(self.tab_name)
|
|
458
|
+
|
|
459
|
+
self.__layer_state = self.set_layer_name(self.layer_name)
|
|
460
|
+
|
|
461
|
+
self.__layer_state["type"] = "segmentation"
|
|
462
|
+
|
|
463
|
+
@property
|
|
464
|
+
def layer_state(self) -> dict:
|
|
465
|
+
"""
|
|
466
|
+
Getter of layer state property.
|
|
467
|
+
|
|
468
|
+
Returns
|
|
469
|
+
------------------------
|
|
470
|
+
dict:
|
|
471
|
+
Dictionary with the current configuration of the layer state.
|
|
472
|
+
"""
|
|
473
|
+
return self.__layer_state
|
|
474
|
+
|
|
475
|
+
@layer_state.setter
|
|
476
|
+
def layer_state(self, new_layer_state: dict) -> None:
|
|
477
|
+
"""
|
|
478
|
+
Setter of layer state property.
|
|
479
|
+
|
|
480
|
+
Parameters
|
|
481
|
+
------------------------
|
|
482
|
+
new_layer_state: dict
|
|
483
|
+
Dictionary with the new configuration of the layer state.
|
|
484
|
+
"""
|
|
485
|
+
self.__layer_state = dict(new_layer_state)
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
class AnnotationLayer:
|
|
489
|
+
"""
|
|
490
|
+
Class to represent a neuroglancer annotation layer in the
|
|
491
|
+
configuration json
|
|
492
|
+
"""
|
|
493
|
+
|
|
494
|
+
def __init__(
|
|
495
|
+
self,
|
|
496
|
+
annotation_source: Union[str, dict],
|
|
497
|
+
annotation_locations: List[dict],
|
|
498
|
+
output_dimensions: dict,
|
|
499
|
+
mount_service: str,
|
|
500
|
+
bucket_path: str,
|
|
501
|
+
layer_type: Optional[str] = "annotation",
|
|
502
|
+
limits: Optional[List[int]] = None,
|
|
503
|
+
layer_name: Optional[str] = "annotationLayer",
|
|
504
|
+
) -> None:
|
|
505
|
+
"""
|
|
506
|
+
Class constructor
|
|
507
|
+
|
|
508
|
+
Parameters
|
|
509
|
+
------------------------
|
|
510
|
+
annotation_source: Union[str, dict]
|
|
511
|
+
Location of the annotation layer information
|
|
512
|
+
|
|
513
|
+
annotation_locations: List[dict]
|
|
514
|
+
List with the location of the points. The dictionary
|
|
515
|
+
must have this order: {"x": valx, "y": valy, "z": valz}
|
|
516
|
+
|
|
517
|
+
output_dimensions: dict
|
|
518
|
+
Dictionary with the output dimensions of the layer.
|
|
519
|
+
Note: The axis order indicates where the points
|
|
520
|
+
will be placed.
|
|
521
|
+
|
|
522
|
+
mount_service: Optional[str]
|
|
523
|
+
This parameter could be 'gs' referring to a bucket in Google Cloud or 's3'in Amazon.
|
|
524
|
+
|
|
525
|
+
bucket_path: str
|
|
526
|
+
Path in cloud service where the dataset will be saved
|
|
527
|
+
|
|
528
|
+
mount_service: Optional[str]
|
|
529
|
+
This parameter could be 'gs' referring to a bucket in Google Cloud or 's3'in Amazon.
|
|
530
|
+
|
|
531
|
+
bucket_path: str
|
|
532
|
+
Path in cloud service where the dataset will be saved
|
|
533
|
+
|
|
534
|
+
layer_type: str
|
|
535
|
+
Layer type. Default: annotation
|
|
536
|
+
|
|
537
|
+
limits: Optional[List[int]]
|
|
538
|
+
Range of points to visualize
|
|
539
|
+
|
|
540
|
+
layer_name: Optional[str]
|
|
541
|
+
Layer name
|
|
542
|
+
"""
|
|
543
|
+
|
|
544
|
+
self.__layer_state = {}
|
|
545
|
+
self.annotation_source = annotation_source
|
|
546
|
+
self.annotation_locations = annotation_locations
|
|
547
|
+
self.mount_service = mount_service
|
|
548
|
+
self.bucket_path = bucket_path
|
|
549
|
+
self.layer_type = layer_type
|
|
550
|
+
self.limits = limits
|
|
551
|
+
self.layer_name = layer_name
|
|
552
|
+
|
|
553
|
+
# Optional parameter that must be used when we have multiple images per layer
|
|
554
|
+
# Dictionary needs to be reversed for correct visualization
|
|
555
|
+
self.output_dimensions = (
|
|
556
|
+
output_dimensions # helper_reverse_dictionary(output_dimensions)
|
|
557
|
+
)
|
|
558
|
+
self.update_state()
|
|
559
|
+
|
|
560
|
+
def __set_s3_path(self, orig_source_path: PathLike) -> str:
|
|
561
|
+
"""
|
|
562
|
+
Private method to set a s3 path based on a source path.
|
|
563
|
+
Available image formats: ['.zarr']
|
|
564
|
+
|
|
565
|
+
Parameters
|
|
566
|
+
------------------------
|
|
567
|
+
orig_source_path: PathLike
|
|
568
|
+
Source path of the image
|
|
569
|
+
|
|
570
|
+
Raises
|
|
571
|
+
------------------------
|
|
572
|
+
NotImplementedError:
|
|
573
|
+
Raises if the image format is not zarr.
|
|
574
|
+
|
|
575
|
+
Returns
|
|
576
|
+
------------------------
|
|
577
|
+
str
|
|
578
|
+
String with the source path pointing to the mount service in the cloud
|
|
579
|
+
"""
|
|
580
|
+
|
|
581
|
+
s3_path = None
|
|
582
|
+
if not orig_source_path.startswith(f"{self.mount_service}://"):
|
|
583
|
+
orig_source_path = Path(orig_source_path)
|
|
584
|
+
s3_path = (
|
|
585
|
+
f"{self.mount_service}://{self.bucket_path}/{orig_source_path}"
|
|
586
|
+
)
|
|
587
|
+
|
|
588
|
+
else:
|
|
589
|
+
s3_path = orig_source_path
|
|
590
|
+
|
|
591
|
+
return s3_path
|
|
592
|
+
|
|
593
|
+
def set_annotation_source(
|
|
594
|
+
self, source: Union[str, dict], output_dimensions: dict
|
|
595
|
+
) -> dict:
|
|
596
|
+
"""
|
|
597
|
+
Sets the annotation source.
|
|
598
|
+
|
|
599
|
+
Parameters
|
|
600
|
+
---------------
|
|
601
|
+
source: Union[str, dict]
|
|
602
|
+
Dictionary with the annotation source
|
|
603
|
+
|
|
604
|
+
Returns
|
|
605
|
+
---------------
|
|
606
|
+
dict:
|
|
607
|
+
Dictionary with the modified layer.
|
|
608
|
+
"""
|
|
609
|
+
|
|
610
|
+
actual_state = self.__layer_state
|
|
611
|
+
|
|
612
|
+
if "precomputed://" in source:
|
|
613
|
+
axis = list(output_dimensions.keys())
|
|
614
|
+
values = list(output_dimensions.values())
|
|
615
|
+
|
|
616
|
+
names = []
|
|
617
|
+
units = []
|
|
618
|
+
scales = []
|
|
619
|
+
|
|
620
|
+
for axis_idx in range(len(axis)):
|
|
621
|
+
if axis[axis_idx] == "t" or axis[axis_idx] == "c'":
|
|
622
|
+
continue
|
|
623
|
+
|
|
624
|
+
names.append(axis[axis_idx])
|
|
625
|
+
scales.append(values[axis_idx][0])
|
|
626
|
+
units.append(values[axis_idx][1])
|
|
627
|
+
|
|
628
|
+
write_path = Path(source.replace("precomputed://", ""))
|
|
629
|
+
|
|
630
|
+
coord_space = neuroglancer.CoordinateSpace(
|
|
631
|
+
names=names, units=units, scales=scales
|
|
632
|
+
)
|
|
633
|
+
|
|
634
|
+
print("Write path: ", write_path)
|
|
635
|
+
|
|
636
|
+
# Generates the precomputed format
|
|
637
|
+
generate_precomputed_cells(
|
|
638
|
+
self.annotation_locations, write_path, coord_space
|
|
639
|
+
)
|
|
640
|
+
|
|
641
|
+
s3_path = self.__set_s3_path(str(write_path))
|
|
642
|
+
|
|
643
|
+
actual_state["source"] = f"precomputed://{s3_path}"
|
|
644
|
+
|
|
645
|
+
else:
|
|
646
|
+
actual_state["source"] = source
|
|
647
|
+
actual_state = self.__set_transform(self.output_dimensions)
|
|
648
|
+
|
|
649
|
+
return actual_state
|
|
650
|
+
|
|
651
|
+
def __set_transform(
|
|
652
|
+
self, layer_state: dict, output_dimensions: dict
|
|
653
|
+
) -> dict:
|
|
654
|
+
"""
|
|
655
|
+
Sets the output dimensions and transformation
|
|
656
|
+
to the annotation layer.
|
|
657
|
+
|
|
658
|
+
Parameters
|
|
659
|
+
---------------
|
|
660
|
+
output_dimensions: dict
|
|
661
|
+
Dictionary with the output dimensions
|
|
662
|
+
for the layer. The order of the axis in
|
|
663
|
+
the dictionary determines the location
|
|
664
|
+
of the points. {"t": t, "c": c, "z", z, ...}
|
|
665
|
+
|
|
666
|
+
Returns
|
|
667
|
+
---------------
|
|
668
|
+
dict:
|
|
669
|
+
Dictionary with the modified layer.
|
|
670
|
+
"""
|
|
671
|
+
|
|
672
|
+
actual_state = layer_state.copy()
|
|
673
|
+
actual_state["source"]["transform"] = {
|
|
674
|
+
"outputDimensions": output_dimensions
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
return actual_state
|
|
678
|
+
|
|
679
|
+
def set_tool(self, tool_name: str) -> dict:
|
|
680
|
+
"""
|
|
681
|
+
Sets the tool name in neuroglancer.
|
|
682
|
+
|
|
683
|
+
Parameters
|
|
684
|
+
---------------
|
|
685
|
+
tool_name: str
|
|
686
|
+
Tool name in neuroglancer.
|
|
687
|
+
|
|
688
|
+
Returns
|
|
689
|
+
---------------
|
|
690
|
+
dict:
|
|
691
|
+
Dictionary with the modified layer.
|
|
692
|
+
"""
|
|
693
|
+
actual_state = self.__layer_state
|
|
694
|
+
actual_state["tool"] = str(tool_name)
|
|
695
|
+
return actual_state
|
|
696
|
+
|
|
697
|
+
def set_tab_name(self, tab_name: str) -> dict:
|
|
698
|
+
"""
|
|
699
|
+
Sets the tab name in neuroglancer.
|
|
700
|
+
|
|
701
|
+
Parameters
|
|
702
|
+
---------------
|
|
703
|
+
tab_name: str
|
|
704
|
+
Tab name in neuroglancer.
|
|
705
|
+
|
|
706
|
+
Returns
|
|
707
|
+
---------------
|
|
708
|
+
dict:
|
|
709
|
+
Dictionary with the modified layer.
|
|
710
|
+
"""
|
|
711
|
+
actual_state = self.__layer_state
|
|
712
|
+
actual_state["tab"] = str(tab_name)
|
|
713
|
+
return actual_state
|
|
714
|
+
|
|
715
|
+
def set_annotations(
|
|
716
|
+
self,
|
|
717
|
+
annotation_points: List[Dict[str, int]],
|
|
718
|
+
annotation_type: str,
|
|
719
|
+
limits: Optional[List[int]] = None,
|
|
720
|
+
) -> dict:
|
|
721
|
+
"""
|
|
722
|
+
Sets the annotations in neuroglancer using a
|
|
723
|
+
list with the locations of the annotations.
|
|
724
|
+
|
|
725
|
+
Parameters
|
|
726
|
+
---------------
|
|
727
|
+
annotation_points: List[Dict[str, int]]
|
|
728
|
+
Points where the annotations will
|
|
729
|
+
be placed.
|
|
730
|
+
|
|
731
|
+
annotation_type: str
|
|
732
|
+
Annotation type. e.g., "points"
|
|
733
|
+
|
|
734
|
+
limits: Optional[List[int]]
|
|
735
|
+
Limist of points. [lower_limit, upper_limit]
|
|
736
|
+
|
|
737
|
+
Returns
|
|
738
|
+
---------------
|
|
739
|
+
dict:
|
|
740
|
+
Dictionary with the modified layer.
|
|
741
|
+
"""
|
|
742
|
+
|
|
743
|
+
# Conditional to add specific points in
|
|
744
|
+
# visualization link
|
|
745
|
+
|
|
746
|
+
annotation_len = len(annotation_points)
|
|
747
|
+
lower_limit = 0
|
|
748
|
+
upper_limit = 0
|
|
749
|
+
|
|
750
|
+
if limits is None:
|
|
751
|
+
upper_limit = annotation_len
|
|
752
|
+
lower_limit = 0
|
|
753
|
+
|
|
754
|
+
else:
|
|
755
|
+
upper_limit = limits[1]
|
|
756
|
+
lower_limit = limits[0]
|
|
757
|
+
|
|
758
|
+
if not isinstance(upper_limit, int):
|
|
759
|
+
upper_limit = annotation_len
|
|
760
|
+
|
|
761
|
+
if not isinstance(lower_limit, int) or lower_limit < 0:
|
|
762
|
+
lower_limit = 0
|
|
763
|
+
|
|
764
|
+
if (
|
|
765
|
+
upper_limit <= 0
|
|
766
|
+
or upper_limit < lower_limit
|
|
767
|
+
or upper_limit > annotation_len
|
|
768
|
+
):
|
|
769
|
+
raise ValueError("Limits must be in a valid range.")
|
|
770
|
+
|
|
771
|
+
actual_state = self.__layer_state
|
|
772
|
+
|
|
773
|
+
if annotation_type == "points":
|
|
774
|
+
|
|
775
|
+
def get_point_config(id: str, point: Dict[str, int]) -> dict:
|
|
776
|
+
"""
|
|
777
|
+
Gets the point configuration for neuroglancer
|
|
778
|
+
|
|
779
|
+
Parameters
|
|
780
|
+
--------------
|
|
781
|
+
id: str
|
|
782
|
+
Unique ID to represent a point
|
|
783
|
+
|
|
784
|
+
point: Dict[str, int]
|
|
785
|
+
Point location
|
|
786
|
+
|
|
787
|
+
Returns
|
|
788
|
+
---------------
|
|
789
|
+
dict:
|
|
790
|
+
Dictionary with the point configuration
|
|
791
|
+
adapted to neuroglancer.
|
|
792
|
+
"""
|
|
793
|
+
|
|
794
|
+
dimension_order = self.output_dimensions.keys()
|
|
795
|
+
|
|
796
|
+
point_list = []
|
|
797
|
+
tc_missing = len(dimension_order) - 3
|
|
798
|
+
|
|
799
|
+
if tc_missing < 0:
|
|
800
|
+
raise ValueError("Expected number of dimensions: 3")
|
|
801
|
+
|
|
802
|
+
# Decrease # of iterations by setting it by default
|
|
803
|
+
for axis in dimension_order:
|
|
804
|
+
if axis in point:
|
|
805
|
+
point_list.append(float(point[axis]))
|
|
806
|
+
|
|
807
|
+
else:
|
|
808
|
+
point_list.append(float(0.5))
|
|
809
|
+
|
|
810
|
+
point_config = {
|
|
811
|
+
"point": point_list,
|
|
812
|
+
"type": "point",
|
|
813
|
+
"id": str(id),
|
|
814
|
+
}
|
|
815
|
+
|
|
816
|
+
return point_config
|
|
817
|
+
|
|
818
|
+
actual_state["annotations"] = []
|
|
819
|
+
|
|
820
|
+
for annotation_point_idx in range(lower_limit, upper_limit):
|
|
821
|
+
point_config = get_point_config(
|
|
822
|
+
annotation_point_idx,
|
|
823
|
+
annotation_points[annotation_point_idx],
|
|
824
|
+
)
|
|
825
|
+
|
|
826
|
+
actual_state["annotations"].append(point_config)
|
|
827
|
+
|
|
828
|
+
return actual_state
|
|
829
|
+
|
|
830
|
+
def set_layer_name(self, layer_name: str) -> dict:
|
|
831
|
+
"""
|
|
832
|
+
Sets the layer name
|
|
833
|
+
|
|
834
|
+
Parameters
|
|
835
|
+
---------------
|
|
836
|
+
layer_name: str
|
|
837
|
+
Layer name
|
|
838
|
+
|
|
839
|
+
Returns
|
|
840
|
+
---------------
|
|
841
|
+
dict:
|
|
842
|
+
Dictionary with the modified layer.
|
|
843
|
+
"""
|
|
844
|
+
actual_state = self.__layer_state
|
|
845
|
+
actual_state["name"] = layer_name
|
|
846
|
+
return actual_state
|
|
847
|
+
|
|
848
|
+
def update_state(self):
|
|
849
|
+
"""
|
|
850
|
+
Updates the state of the layer
|
|
851
|
+
"""
|
|
852
|
+
|
|
853
|
+
self.__layer_state = self.set_annotation_source(
|
|
854
|
+
self.annotation_source, self.output_dimensions
|
|
855
|
+
)
|
|
856
|
+
|
|
857
|
+
if isinstance(self.annotation_source, dict):
|
|
858
|
+
self.__layer_state = self.set_annotations(
|
|
859
|
+
self.annotation_locations, "points", self.limits
|
|
860
|
+
)
|
|
861
|
+
|
|
862
|
+
self.__layer_state = self.set_tool("annotatePoint")
|
|
863
|
+
|
|
864
|
+
self.__layer_state = self.set_tab_name("annotations")
|
|
865
|
+
|
|
866
|
+
self.__layer_state = self.set_layer_name(self.layer_name)
|
|
867
|
+
|
|
868
|
+
self.__layer_state["type"] = "annotation"
|
|
869
|
+
|
|
870
|
+
@property
|
|
871
|
+
def layer_state(self) -> dict:
|
|
872
|
+
"""
|
|
873
|
+
Getter of layer state property.
|
|
874
|
+
|
|
875
|
+
Returns
|
|
876
|
+
------------------------
|
|
877
|
+
dict:
|
|
878
|
+
Dictionary with the current configuration of the layer state.
|
|
879
|
+
"""
|
|
880
|
+
return self.__layer_state
|
|
881
|
+
|
|
882
|
+
@layer_state.setter
|
|
883
|
+
def layer_state(self, new_layer_state: dict) -> None:
|
|
884
|
+
"""
|
|
885
|
+
Setter of layer state property.
|
|
886
|
+
|
|
887
|
+
Parameters
|
|
888
|
+
------------------------
|
|
889
|
+
new_layer_state: dict
|
|
890
|
+
Dictionary with the new configuration of the layer state.
|
|
891
|
+
"""
|
|
892
|
+
self.__layer_state = dict(new_layer_state)
|
|
893
|
+
|
|
894
|
+
|
|
895
|
+
class ImageLayer:
|
|
896
|
+
"""
|
|
897
|
+
Class to represent a neuroglancer image layer in the
|
|
898
|
+
configuration json
|
|
899
|
+
"""
|
|
900
|
+
|
|
901
|
+
def __init__(
|
|
902
|
+
self,
|
|
903
|
+
image_config: dict,
|
|
904
|
+
mount_service: str,
|
|
905
|
+
bucket_path: str,
|
|
906
|
+
layer_type: Optional[str] = "image",
|
|
907
|
+
output_dimensions: Optional[dict] = None,
|
|
908
|
+
) -> None:
|
|
909
|
+
"""
|
|
910
|
+
Class constructor
|
|
911
|
+
|
|
912
|
+
Parameters
|
|
913
|
+
------------------------
|
|
914
|
+
image_config: dict
|
|
915
|
+
Dictionary with the image configuration based on neuroglancer documentation.
|
|
916
|
+
mount_service: Optional[str]
|
|
917
|
+
This parameter could be 'gs' referring to a bucket in Google Cloud or 's3'in Amazon.
|
|
918
|
+
bucket_path: str
|
|
919
|
+
Path in cloud service where the dataset will be saved
|
|
920
|
+
layer_type: Optional[str]
|
|
921
|
+
Image type based on neuroglancer documentation.
|
|
922
|
+
|
|
923
|
+
"""
|
|
924
|
+
|
|
925
|
+
self.__layer_state = {}
|
|
926
|
+
self.image_config = image_config
|
|
927
|
+
self.mount_service = mount_service
|
|
928
|
+
self.bucket_path = bucket_path
|
|
929
|
+
self.layer_type = layer_type
|
|
930
|
+
|
|
931
|
+
# Optional parameter that must be used when we have multiple images per layer
|
|
932
|
+
# Dictionary needs to be reversed for correct visualization
|
|
933
|
+
self.output_dimensions = helper_reverse_dictionary(output_dimensions)
|
|
934
|
+
|
|
935
|
+
# Fix image source
|
|
936
|
+
self.image_source = self.__fix_image_source(image_config["source"])
|
|
937
|
+
image_config["source"] = self.image_source
|
|
938
|
+
|
|
939
|
+
self.update_state(image_config)
|
|
940
|
+
|
|
941
|
+
def __set_s3_path(self, orig_source_path: PathLike) -> str:
|
|
942
|
+
"""
|
|
943
|
+
Private method to set a s3 path based on a source path.
|
|
944
|
+
Available image formats: ['.zarr']
|
|
945
|
+
|
|
946
|
+
Parameters
|
|
947
|
+
------------------------
|
|
948
|
+
orig_source_path: PathLike
|
|
949
|
+
Source path of the image
|
|
950
|
+
|
|
951
|
+
Raises
|
|
952
|
+
------------------------
|
|
953
|
+
NotImplementedError:
|
|
954
|
+
Raises if the image format is not zarr.
|
|
955
|
+
|
|
956
|
+
Returns
|
|
957
|
+
------------------------
|
|
958
|
+
str
|
|
959
|
+
String with the source path pointing to the mount service in the cloud
|
|
960
|
+
"""
|
|
961
|
+
|
|
962
|
+
s3_path = None
|
|
963
|
+
if not orig_source_path.startswith(f"{self.mount_service}://"):
|
|
964
|
+
# Work with code ocean
|
|
965
|
+
if "/scratch/" in orig_source_path:
|
|
966
|
+
orig_source_path = orig_source_path.replace("/scratch/", "")
|
|
967
|
+
|
|
968
|
+
elif "/results/" in orig_source_path:
|
|
969
|
+
orig_source_path = orig_source_path.replace("/results/", "")
|
|
970
|
+
|
|
971
|
+
orig_source_path = Path(orig_source_path)
|
|
972
|
+
s3_path = (
|
|
973
|
+
f"{self.mount_service}://{self.bucket_path}/{orig_source_path}"
|
|
974
|
+
)
|
|
975
|
+
|
|
976
|
+
else:
|
|
977
|
+
s3_path = orig_source_path
|
|
978
|
+
|
|
979
|
+
if s3_path.endswith(".zarr"):
|
|
980
|
+
s3_path = "zarr://" + s3_path
|
|
981
|
+
|
|
982
|
+
else:
|
|
983
|
+
raise NotImplementedError(
|
|
984
|
+
"This format has not been implemented yet for visualization"
|
|
985
|
+
)
|
|
986
|
+
|
|
987
|
+
return s3_path
|
|
988
|
+
|
|
989
|
+
def __set_sources_paths(self, sources_paths: List) -> List:
|
|
990
|
+
"""
|
|
991
|
+
Private method to set multiple image sources on s3 path. It also accepts
|
|
992
|
+
a transformation matrix that should be provided in the form of a list for
|
|
993
|
+
or a affine transformation or dictionary for a translation matrix.
|
|
994
|
+
Available image formats: ['.zarr']
|
|
995
|
+
|
|
996
|
+
Parameters
|
|
997
|
+
------------------------
|
|
998
|
+
sources_paths: List
|
|
999
|
+
List of dictionaries with the image sources and its transformation
|
|
1000
|
+
matrices in the case they are provided.
|
|
1001
|
+
|
|
1002
|
+
Returns
|
|
1003
|
+
------------------------
|
|
1004
|
+
List
|
|
1005
|
+
List of dictionaries with the configuration for neuroglancer
|
|
1006
|
+
"""
|
|
1007
|
+
new_source_path = []
|
|
1008
|
+
|
|
1009
|
+
for source in sources_paths:
|
|
1010
|
+
new_dict = {}
|
|
1011
|
+
|
|
1012
|
+
for key in source.keys():
|
|
1013
|
+
if key == "transform_matrix" and isinstance(
|
|
1014
|
+
source["transform_matrix"], dict
|
|
1015
|
+
):
|
|
1016
|
+
new_dict["transform"] = {
|
|
1017
|
+
"matrix": helper_create_ng_translation_matrix(
|
|
1018
|
+
delta_x=source["transform_matrix"]["delta_x"],
|
|
1019
|
+
delta_y=source["transform_matrix"]["delta_y"],
|
|
1020
|
+
delta_z=source["transform_matrix"]["delta_z"],
|
|
1021
|
+
),
|
|
1022
|
+
"outputDimensions": self.output_dimensions,
|
|
1023
|
+
}
|
|
1024
|
+
|
|
1025
|
+
elif key == "transform_matrix" and isinstance(
|
|
1026
|
+
source["transform_matrix"], list
|
|
1027
|
+
):
|
|
1028
|
+
new_dict["transform"] = {
|
|
1029
|
+
"matrix": source["transform_matrix"],
|
|
1030
|
+
"outputDimensions": self.output_dimensions,
|
|
1031
|
+
}
|
|
1032
|
+
|
|
1033
|
+
elif key == "url":
|
|
1034
|
+
new_dict["url"] = self.__set_s3_path(source["url"])
|
|
1035
|
+
|
|
1036
|
+
else:
|
|
1037
|
+
new_dict[key] = source[key]
|
|
1038
|
+
|
|
1039
|
+
new_source_path.append(new_dict)
|
|
1040
|
+
|
|
1041
|
+
return new_source_path
|
|
1042
|
+
|
|
1043
|
+
def __fix_image_source(self, source_path: SourceLike) -> str:
|
|
1044
|
+
"""
|
|
1045
|
+
Fixes the image source path to include the type of image neuroglancer accepts.
|
|
1046
|
+
|
|
1047
|
+
Parameters
|
|
1048
|
+
------------------------
|
|
1049
|
+
source_path: SourceLike
|
|
1050
|
+
Path or list of paths where the images are located with their transformation matrix.
|
|
1051
|
+
|
|
1052
|
+
Returns
|
|
1053
|
+
------------------------
|
|
1054
|
+
SourceLike
|
|
1055
|
+
Fixed path(s) for neuroglancer json configuration.
|
|
1056
|
+
"""
|
|
1057
|
+
new_source_path = None
|
|
1058
|
+
|
|
1059
|
+
if isinstance(source_path, list):
|
|
1060
|
+
# multiple sources in single image
|
|
1061
|
+
new_source_path = self.__set_sources_paths(source_path)
|
|
1062
|
+
|
|
1063
|
+
elif isinstance(source_path, get_args(PathLike)):
|
|
1064
|
+
# Single source image
|
|
1065
|
+
new_source_path = self.__set_s3_path(source_path)
|
|
1066
|
+
|
|
1067
|
+
return new_source_path
|
|
1068
|
+
|
|
1069
|
+
# flake8: noqa: C901
|
|
1070
|
+
def set_default_values(
|
|
1071
|
+
self, image_config: dict = {}, overwrite: bool = False
|
|
1072
|
+
) -> None:
|
|
1073
|
+
"""
|
|
1074
|
+
Set default values for the image.
|
|
1075
|
+
|
|
1076
|
+
Parameters
|
|
1077
|
+
------------------------
|
|
1078
|
+
image_config: dict
|
|
1079
|
+
Dictionary with the image configuration. Similar to self.image_config
|
|
1080
|
+
|
|
1081
|
+
overwrite: bool
|
|
1082
|
+
If the parameters already have values, with this flag they can be overwritten.
|
|
1083
|
+
|
|
1084
|
+
"""
|
|
1085
|
+
|
|
1086
|
+
if overwrite:
|
|
1087
|
+
self.image_channel = 0
|
|
1088
|
+
self.shader_control = {"normalized": {"range": [0, 200]}}
|
|
1089
|
+
self.visible = True
|
|
1090
|
+
self.__layer_state["name"] = str(Path(self.image_source).stem)
|
|
1091
|
+
self.__layer_state["type"] = str(self.layer_type)
|
|
1092
|
+
|
|
1093
|
+
elif len(image_config):
|
|
1094
|
+
# Setting default image_config in json image layer
|
|
1095
|
+
if "channel" not in image_config:
|
|
1096
|
+
# Setting channel to 0 for image
|
|
1097
|
+
self.image_channel = 0
|
|
1098
|
+
|
|
1099
|
+
if "shaderControls" not in image_config:
|
|
1100
|
+
self.shader_control = {"normalized": {"range": [0, 200]}}
|
|
1101
|
+
|
|
1102
|
+
if "visible" not in image_config:
|
|
1103
|
+
self.visible = True
|
|
1104
|
+
|
|
1105
|
+
if "name" not in image_config:
|
|
1106
|
+
try:
|
|
1107
|
+
channel = self.__layer_state["localDimensions"]["c'"][0]
|
|
1108
|
+
|
|
1109
|
+
except KeyError:
|
|
1110
|
+
channel = ""
|
|
1111
|
+
|
|
1112
|
+
if isinstance(self.image_source, get_args(PathLike)):
|
|
1113
|
+
self.__layer_state[
|
|
1114
|
+
"name"
|
|
1115
|
+
] = f"{Path(self.image_source).stem}_{channel}"
|
|
1116
|
+
|
|
1117
|
+
else:
|
|
1118
|
+
self.__layer_state[
|
|
1119
|
+
"name"
|
|
1120
|
+
] = f"{Path(self.image_source[0]['url']).stem}_{channel}"
|
|
1121
|
+
|
|
1122
|
+
if "type" not in image_config:
|
|
1123
|
+
self.__layer_state["type"] = str(self.layer_type)
|
|
1124
|
+
|
|
1125
|
+
# flake8: noqa: C901
|
|
1126
|
+
def update_state(self, image_config: dict) -> None:
|
|
1127
|
+
"""
|
|
1128
|
+
Set default values for the image.
|
|
1129
|
+
|
|
1130
|
+
Parameters
|
|
1131
|
+
------------------------
|
|
1132
|
+
image_config: dict
|
|
1133
|
+
Dictionary with the image configuration. Similar to self.image_config
|
|
1134
|
+
e.g.: image_config = {
|
|
1135
|
+
'type': 'image', # Optional
|
|
1136
|
+
'source': 'image_path',
|
|
1137
|
+
'channel': 0, # Optional
|
|
1138
|
+
'name': 'image_name', # Optional
|
|
1139
|
+
'shader': {
|
|
1140
|
+
'color': 'green',
|
|
1141
|
+
'emitter': 'RGB',
|
|
1142
|
+
'vec': 'vec3'
|
|
1143
|
+
},
|
|
1144
|
+
'shaderControls': { # Optional
|
|
1145
|
+
"normalized": {
|
|
1146
|
+
"range": [0, 200]
|
|
1147
|
+
}
|
|
1148
|
+
}
|
|
1149
|
+
}
|
|
1150
|
+
"""
|
|
1151
|
+
|
|
1152
|
+
for param, value in image_config.items():
|
|
1153
|
+
if param in ["type", "name", "blend"]:
|
|
1154
|
+
self.__layer_state[param] = str(value)
|
|
1155
|
+
|
|
1156
|
+
if param in ["visible"]:
|
|
1157
|
+
self.visible = value
|
|
1158
|
+
|
|
1159
|
+
if param == "shader":
|
|
1160
|
+
self.shader = self.__create_shader(value)
|
|
1161
|
+
|
|
1162
|
+
if param == "channel":
|
|
1163
|
+
self.image_channel = value
|
|
1164
|
+
|
|
1165
|
+
if param == "shaderControls":
|
|
1166
|
+
self.shader_control = value
|
|
1167
|
+
|
|
1168
|
+
if param == "opacity":
|
|
1169
|
+
self.opacity = value
|
|
1170
|
+
|
|
1171
|
+
if param == "source":
|
|
1172
|
+
if isinstance(value, get_args(PathLike)):
|
|
1173
|
+
self.__layer_state[param] = str(value)
|
|
1174
|
+
|
|
1175
|
+
elif isinstance(value, list):
|
|
1176
|
+
# Setting list of dictionaries with image configuration
|
|
1177
|
+
self.__layer_state[param] = value
|
|
1178
|
+
|
|
1179
|
+
self.set_default_values(image_config)
|
|
1180
|
+
|
|
1181
|
+
def __create_shader(self, shader_config: dict) -> str:
|
|
1182
|
+
"""
|
|
1183
|
+
Creates a configuration for the neuroglancer shader.
|
|
1184
|
+
|
|
1185
|
+
Parameters
|
|
1186
|
+
------------------------
|
|
1187
|
+
shader_config: dict
|
|
1188
|
+
Configuration of neuroglancer's shader.
|
|
1189
|
+
|
|
1190
|
+
Returns
|
|
1191
|
+
------------------------
|
|
1192
|
+
str
|
|
1193
|
+
String with the shader configuration for neuroglancer.
|
|
1194
|
+
"""
|
|
1195
|
+
|
|
1196
|
+
monochrome_keys = set(["color", "emitter", "vec"])
|
|
1197
|
+
rgb_keys = set(["r_range", "g_range", "b_range"])
|
|
1198
|
+
config_keys = set(shader_config.keys())
|
|
1199
|
+
if config_keys == monochrome_keys:
|
|
1200
|
+
return shader_utils.create_monochrome_shader(
|
|
1201
|
+
color=shader_config["color"],
|
|
1202
|
+
emitter=shader_config["emitter"],
|
|
1203
|
+
vec=shader_config["vec"],
|
|
1204
|
+
)
|
|
1205
|
+
elif config_keys == rgb_keys:
|
|
1206
|
+
return shader_utils.create_rgb_shader(
|
|
1207
|
+
r_range=shader_config["r_range"],
|
|
1208
|
+
g_range=shader_config["g_range"],
|
|
1209
|
+
b_range=shader_config["b_range"],
|
|
1210
|
+
)
|
|
1211
|
+
else:
|
|
1212
|
+
raise RuntimeError(
|
|
1213
|
+
f"Do not know how to create shader code for shader_config "
|
|
1214
|
+
f"with keys {list(shader_config.keys())}"
|
|
1215
|
+
)
|
|
1216
|
+
|
|
1217
|
+
@property
|
|
1218
|
+
def opacity(self) -> str:
|
|
1219
|
+
"""
|
|
1220
|
+
Getter of the opacity property
|
|
1221
|
+
|
|
1222
|
+
Returns
|
|
1223
|
+
------------------------
|
|
1224
|
+
str
|
|
1225
|
+
String with the opacity value
|
|
1226
|
+
"""
|
|
1227
|
+
return self.__layer_state["opacity"]
|
|
1228
|
+
|
|
1229
|
+
@opacity.setter
|
|
1230
|
+
def opacity(self, opacity: float) -> None:
|
|
1231
|
+
"""
|
|
1232
|
+
Sets the opacity parameter in neuroglancer link.
|
|
1233
|
+
|
|
1234
|
+
Parameters
|
|
1235
|
+
------------------------
|
|
1236
|
+
opacity: float
|
|
1237
|
+
Float number between [0-1] that indicates the opacity.
|
|
1238
|
+
|
|
1239
|
+
Raises
|
|
1240
|
+
------------------------
|
|
1241
|
+
ValueError:
|
|
1242
|
+
If the parameter is not an boolean.
|
|
1243
|
+
"""
|
|
1244
|
+
self.__layer_state["opacity"] = float(opacity)
|
|
1245
|
+
|
|
1246
|
+
@property
|
|
1247
|
+
def shader(self) -> str:
|
|
1248
|
+
"""
|
|
1249
|
+
Getter of the shader property
|
|
1250
|
+
|
|
1251
|
+
Returns
|
|
1252
|
+
------------------------
|
|
1253
|
+
str
|
|
1254
|
+
String with the shader value
|
|
1255
|
+
"""
|
|
1256
|
+
return self.__layer_state["shader"]
|
|
1257
|
+
|
|
1258
|
+
@shader.setter
|
|
1259
|
+
def shader(self, shader_config: str) -> None:
|
|
1260
|
+
"""
|
|
1261
|
+
Sets a configuration for the neuroglancer shader.
|
|
1262
|
+
|
|
1263
|
+
Parameters
|
|
1264
|
+
------------------------
|
|
1265
|
+
shader_config: str
|
|
1266
|
+
Shader configuration for neuroglancer in string format.
|
|
1267
|
+
e.g. #uicontrol vec3 color color(default=\"green\")\n#uicontrol invlerp normalized\nvoid main() {\n emitRGB(color * normalized());\n}
|
|
1268
|
+
|
|
1269
|
+
Raises
|
|
1270
|
+
------------------------
|
|
1271
|
+
ValueError:
|
|
1272
|
+
If the provided shader_config is not a string.
|
|
1273
|
+
|
|
1274
|
+
"""
|
|
1275
|
+
self.__layer_state["shader"] = str(shader_config)
|
|
1276
|
+
|
|
1277
|
+
@property
|
|
1278
|
+
def shader_control(self) -> dict:
|
|
1279
|
+
"""
|
|
1280
|
+
Getter of the shader control property
|
|
1281
|
+
|
|
1282
|
+
Returns
|
|
1283
|
+
------------------------
|
|
1284
|
+
str
|
|
1285
|
+
String with the shader control value
|
|
1286
|
+
"""
|
|
1287
|
+
return self.__layer_state["shaderControls"]
|
|
1288
|
+
|
|
1289
|
+
@shader_control.setter
|
|
1290
|
+
def shader_control(self, shader_control_config: dict) -> None:
|
|
1291
|
+
"""
|
|
1292
|
+
Sets a configuration for the neuroglancer shader control.
|
|
1293
|
+
|
|
1294
|
+
Parameters
|
|
1295
|
+
------------------------
|
|
1296
|
+
shader_control_config: dict
|
|
1297
|
+
Shader control configuration for neuroglancer.
|
|
1298
|
+
|
|
1299
|
+
Raises
|
|
1300
|
+
------------------------
|
|
1301
|
+
ValueError:
|
|
1302
|
+
If the provided shader_control_config is not a dictionary.
|
|
1303
|
+
|
|
1304
|
+
"""
|
|
1305
|
+
self.__layer_state["shaderControls"] = dict(shader_control_config)
|
|
1306
|
+
|
|
1307
|
+
@property
|
|
1308
|
+
def image_channel(self) -> int:
|
|
1309
|
+
"""
|
|
1310
|
+
Getter of the current image channel in the layer
|
|
1311
|
+
|
|
1312
|
+
Returns
|
|
1313
|
+
------------------------
|
|
1314
|
+
int
|
|
1315
|
+
Integer with the current image channel
|
|
1316
|
+
"""
|
|
1317
|
+
return self.__layer_state["localDimensions"]["c"]
|
|
1318
|
+
|
|
1319
|
+
@image_channel.setter
|
|
1320
|
+
def image_channel(self, channel: int) -> None:
|
|
1321
|
+
"""
|
|
1322
|
+
Sets the image channel in case the file contains multiple channels.
|
|
1323
|
+
|
|
1324
|
+
Parameters
|
|
1325
|
+
------------------------
|
|
1326
|
+
channel: int
|
|
1327
|
+
Channel position. It will be incremented in 1 since neuroglancer channels starts in 1.
|
|
1328
|
+
|
|
1329
|
+
Raises
|
|
1330
|
+
------------------------
|
|
1331
|
+
ValueError:
|
|
1332
|
+
If the provided channel is not an integer.
|
|
1333
|
+
|
|
1334
|
+
"""
|
|
1335
|
+
self.__layer_state["localDimensions"] = {"c'": [int(channel) + 1, ""]}
|
|
1336
|
+
|
|
1337
|
+
@property
|
|
1338
|
+
def visible(self) -> bool:
|
|
1339
|
+
"""
|
|
1340
|
+
Getter of the visible attribute of the layer.
|
|
1341
|
+
True means the layer will be visible when the image
|
|
1342
|
+
is loaded in neuroglancer, False otherwise.
|
|
1343
|
+
|
|
1344
|
+
Returns
|
|
1345
|
+
------------------------
|
|
1346
|
+
bool
|
|
1347
|
+
Boolean with the current visible value
|
|
1348
|
+
"""
|
|
1349
|
+
return self.__layer_state["visible"]
|
|
1350
|
+
|
|
1351
|
+
@visible.setter
|
|
1352
|
+
def visible(self, visible: bool) -> None:
|
|
1353
|
+
"""
|
|
1354
|
+
Sets the visible parameter in neuroglancer link.
|
|
1355
|
+
|
|
1356
|
+
Parameters
|
|
1357
|
+
------------------------
|
|
1358
|
+
visible: bool
|
|
1359
|
+
Boolean that dictates if the image is visible or not.
|
|
1360
|
+
|
|
1361
|
+
Raises
|
|
1362
|
+
------------------------
|
|
1363
|
+
ValueError:
|
|
1364
|
+
If the parameter is not an boolean.
|
|
1365
|
+
"""
|
|
1366
|
+
self.__layer_state["visible"] = bool(visible)
|
|
1367
|
+
|
|
1368
|
+
@property
|
|
1369
|
+
def layer_state(self) -> dict:
|
|
1370
|
+
"""
|
|
1371
|
+
Getter of layer state property.
|
|
1372
|
+
|
|
1373
|
+
Returns
|
|
1374
|
+
------------------------
|
|
1375
|
+
dict:
|
|
1376
|
+
Dictionary with the current configuration of the layer state.
|
|
1377
|
+
"""
|
|
1378
|
+
return self.__layer_state
|
|
1379
|
+
|
|
1380
|
+
@layer_state.setter
|
|
1381
|
+
def layer_state(self, new_layer_state: dict) -> None:
|
|
1382
|
+
"""
|
|
1383
|
+
Setter of layer state property.
|
|
1384
|
+
|
|
1385
|
+
Parameters
|
|
1386
|
+
------------------------
|
|
1387
|
+
new_layer_state: dict
|
|
1388
|
+
Dictionary with the new configuration of the layer state.
|
|
1389
|
+
"""
|
|
1390
|
+
self.__layer_state = dict(new_layer_state)
|
|
1391
|
+
|
|
1392
|
+
|
|
1393
|
+
class NgLayer:
|
|
1394
|
+
"""
|
|
1395
|
+
Class to represent a neuroglancer layer in the configuration json
|
|
1396
|
+
"""
|
|
1397
|
+
|
|
1398
|
+
def __init__(self) -> None:
|
|
1399
|
+
"""
|
|
1400
|
+
Class constructor
|
|
1401
|
+
"""
|
|
1402
|
+
self.__extensions = ["image", "annotation", "segmentation"]
|
|
1403
|
+
|
|
1404
|
+
self.factory = {
|
|
1405
|
+
"image": ImageLayer,
|
|
1406
|
+
"annotation": AnnotationLayer,
|
|
1407
|
+
"segmentation": SegmentationLayer,
|
|
1408
|
+
}
|
|
1409
|
+
|
|
1410
|
+
@property
|
|
1411
|
+
def extensions(self) -> List:
|
|
1412
|
+
"""
|
|
1413
|
+
Method to return the allowed format extensions of the layers.
|
|
1414
|
+
Returns
|
|
1415
|
+
------------------------
|
|
1416
|
+
List
|
|
1417
|
+
List with the allowed layers format extensions
|
|
1418
|
+
"""
|
|
1419
|
+
return self.__extensions
|
|
1420
|
+
|
|
1421
|
+
def create(self, params: dict):
|
|
1422
|
+
"""
|
|
1423
|
+
Instantiates the class corresponding to
|
|
1424
|
+
the type of annotation.
|
|
1425
|
+
"""
|
|
1426
|
+
|
|
1427
|
+
layer_type = params["layer_type"]
|
|
1428
|
+
|
|
1429
|
+
if layer_type not in self.__extensions:
|
|
1430
|
+
raise NotImplementedError(
|
|
1431
|
+
f"Layer type {layer_type} has not been implemented"
|
|
1432
|
+
)
|
|
1433
|
+
|
|
1434
|
+
return self.factory[layer_type](**params)
|