Rhapso 0.1.92__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Rhapso/__init__.py +1 -0
- Rhapso/data_prep/__init__.py +2 -0
- Rhapso/data_prep/n5_reader.py +188 -0
- Rhapso/data_prep/s3_big_stitcher_reader.py +55 -0
- Rhapso/data_prep/xml_to_dataframe.py +215 -0
- Rhapso/detection/__init__.py +5 -0
- Rhapso/detection/advanced_refinement.py +203 -0
- Rhapso/detection/difference_of_gaussian.py +324 -0
- Rhapso/detection/image_reader.py +117 -0
- Rhapso/detection/metadata_builder.py +130 -0
- Rhapso/detection/overlap_detection.py +327 -0
- Rhapso/detection/points_validation.py +49 -0
- Rhapso/detection/save_interest_points.py +265 -0
- Rhapso/detection/view_transform_models.py +67 -0
- Rhapso/fusion/__init__.py +0 -0
- Rhapso/fusion/affine_fusion/__init__.py +2 -0
- Rhapso/fusion/affine_fusion/blend.py +289 -0
- Rhapso/fusion/affine_fusion/fusion.py +601 -0
- Rhapso/fusion/affine_fusion/geometry.py +159 -0
- Rhapso/fusion/affine_fusion/io.py +546 -0
- Rhapso/fusion/affine_fusion/script_utils.py +111 -0
- Rhapso/fusion/affine_fusion/setup.py +4 -0
- Rhapso/fusion/affine_fusion_worker.py +234 -0
- Rhapso/fusion/multiscale/__init__.py +0 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/__init__.py +19 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/czi_to_zarr.py +698 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/zarr_writer.py +265 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/models.py +81 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/utils.py +526 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/zeiss_job.py +249 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/__init__.py +21 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/array_to_zarr.py +257 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/radial_correction.py +557 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/run_capsule.py +98 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/utils.py +266 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/worker.py +89 -0
- Rhapso/fusion/multiscale_worker.py +113 -0
- Rhapso/fusion/neuroglancer_link_gen/__init__.py +8 -0
- Rhapso/fusion/neuroglancer_link_gen/dispim_link.py +235 -0
- Rhapso/fusion/neuroglancer_link_gen/exaspim_link.py +127 -0
- Rhapso/fusion/neuroglancer_link_gen/hcr_link.py +368 -0
- Rhapso/fusion/neuroglancer_link_gen/iSPIM_top.py +47 -0
- Rhapso/fusion/neuroglancer_link_gen/link_utils.py +239 -0
- Rhapso/fusion/neuroglancer_link_gen/main.py +299 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_layer.py +1434 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_state.py +1123 -0
- Rhapso/fusion/neuroglancer_link_gen/parsers.py +336 -0
- Rhapso/fusion/neuroglancer_link_gen/raw_link.py +116 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/__init__.py +4 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/shader_utils.py +85 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py +43 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/utils.py +303 -0
- Rhapso/fusion/neuroglancer_link_gen_worker.py +30 -0
- Rhapso/matching/__init__.py +0 -0
- Rhapso/matching/load_and_transform_points.py +458 -0
- Rhapso/matching/ransac_matching.py +544 -0
- Rhapso/matching/save_matches.py +120 -0
- Rhapso/matching/xml_parser.py +302 -0
- Rhapso/pipelines/__init__.py +0 -0
- Rhapso/pipelines/ray/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/alignment_pipeline.py +227 -0
- Rhapso/pipelines/ray/aws/config/__init__.py +0 -0
- Rhapso/pipelines/ray/evaluation.py +71 -0
- Rhapso/pipelines/ray/interest_point_detection.py +137 -0
- Rhapso/pipelines/ray/interest_point_matching.py +110 -0
- Rhapso/pipelines/ray/local/__init__.py +0 -0
- Rhapso/pipelines/ray/local/alignment_pipeline.py +167 -0
- Rhapso/pipelines/ray/matching_stats.py +104 -0
- Rhapso/pipelines/ray/param/__init__.py +0 -0
- Rhapso/pipelines/ray/solver.py +120 -0
- Rhapso/pipelines/ray/split_dataset.py +78 -0
- Rhapso/solver/__init__.py +0 -0
- Rhapso/solver/compute_tiles.py +562 -0
- Rhapso/solver/concatenate_models.py +116 -0
- Rhapso/solver/connected_graphs.py +111 -0
- Rhapso/solver/data_prep.py +181 -0
- Rhapso/solver/global_optimization.py +410 -0
- Rhapso/solver/model_and_tile_setup.py +109 -0
- Rhapso/solver/pre_align_tiles.py +323 -0
- Rhapso/solver/save_results.py +97 -0
- Rhapso/solver/view_transforms.py +75 -0
- Rhapso/solver/xml_to_dataframe_solver.py +213 -0
- Rhapso/split_dataset/__init__.py +0 -0
- Rhapso/split_dataset/compute_grid_rules.py +78 -0
- Rhapso/split_dataset/save_points.py +101 -0
- Rhapso/split_dataset/save_xml.py +377 -0
- Rhapso/split_dataset/split_images.py +537 -0
- Rhapso/split_dataset/xml_to_dataframe_split.py +219 -0
- rhapso-0.1.92.dist-info/METADATA +39 -0
- rhapso-0.1.92.dist-info/RECORD +101 -0
- rhapso-0.1.92.dist-info/WHEEL +5 -0
- rhapso-0.1.92.dist-info/licenses/LICENSE +21 -0
- rhapso-0.1.92.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/test_detection.py +17 -0
- tests/test_matching.py +21 -0
- tests/test_solving.py +21 -0
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Library for generating exaspim link.
|
|
3
|
+
"""
|
|
4
|
+
from collections import defaultdict
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
from . import link_utils
|
|
8
|
+
from .ng_layer import NgLayer
|
|
9
|
+
from .ng_state import NgState
|
|
10
|
+
from .parsers import OmeZarrParser, XmlParser
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def generate_exaspim_link(
|
|
14
|
+
xml_path: Optional[str] = None,
|
|
15
|
+
s3_path: Optional[str] = None,
|
|
16
|
+
vmin: Optional[float] = 0,
|
|
17
|
+
vmax: Optional[float] = 200,
|
|
18
|
+
opacity: Optional[float] = 1.0,
|
|
19
|
+
blend: Optional[str] = "default",
|
|
20
|
+
output_json_path: Optional[str] = ".",
|
|
21
|
+
dataset_name: Optional[str] = None,
|
|
22
|
+
bucket_path: Optional[str] = "aind-open-data",
|
|
23
|
+
) -> None:
|
|
24
|
+
"""Creates a neuroglancer link to visualize
|
|
25
|
+
registration transforms on exaspim dataset pre-fusion.
|
|
26
|
+
|
|
27
|
+
Parameters
|
|
28
|
+
------------------------
|
|
29
|
+
xml_path: str
|
|
30
|
+
Path of xml outputted by BigStitcher.
|
|
31
|
+
s3_path: str
|
|
32
|
+
Path of s3 bucket where exaspim dataset is located.
|
|
33
|
+
vmin: float
|
|
34
|
+
Minimum value for shader.
|
|
35
|
+
vmax: float
|
|
36
|
+
Maximum value for shader.
|
|
37
|
+
opacity: float
|
|
38
|
+
Opacity of shader.
|
|
39
|
+
blend: str
|
|
40
|
+
Blend mode of shader.
|
|
41
|
+
output_json_path: str
|
|
42
|
+
Local directory to write process_output.json file that
|
|
43
|
+
neuroglancer reads.
|
|
44
|
+
dataset_name: Optional[str]
|
|
45
|
+
Name of dataset. If None, will be directory name of
|
|
46
|
+
output_json_path.
|
|
47
|
+
bucket_path: Optional[str]
|
|
48
|
+
S3 bucket name where the process_output.json will be uploaded.
|
|
49
|
+
Default is "aind-open-data". Change this to your own bucket name
|
|
50
|
+
if you plan to upload the JSON to a different bucket.
|
|
51
|
+
|
|
52
|
+
Returns
|
|
53
|
+
------------------------
|
|
54
|
+
None
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
if xml_path is None and s3_path.endswith(".zarr"):
|
|
58
|
+
vox_sizes, tile_paths, net_transforms = OmeZarrParser.extract_info(
|
|
59
|
+
s3_path
|
|
60
|
+
)
|
|
61
|
+
else:
|
|
62
|
+
vox_sizes, tile_paths, net_transforms = XmlParser.extract_info(
|
|
63
|
+
xml_path
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
channel_sources = defaultdict(list)
|
|
67
|
+
for tile_id, _ in enumerate(net_transforms):
|
|
68
|
+
t_path = tile_paths[tile_id]
|
|
69
|
+
|
|
70
|
+
channel: int = link_utils.extract_channel_from_tile_path(t_path)
|
|
71
|
+
|
|
72
|
+
final_transform = link_utils.convert_matrix_3x4_to_5x6(
|
|
73
|
+
net_transforms[tile_id]
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
channel_sources[channel].append(
|
|
77
|
+
{
|
|
78
|
+
"url": f"{s3_path}/{t_path}",
|
|
79
|
+
"transform_matrix": final_transform.tolist(),
|
|
80
|
+
}
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
layers = [] # Neuroglancer Tabs
|
|
84
|
+
for i, (channel, sources) in enumerate(channel_sources.items()):
|
|
85
|
+
hex_val: int = link_utils.wavelength_to_hex(channel)
|
|
86
|
+
hex_str = f"#{str(hex(hex_val))[2:]}"
|
|
87
|
+
|
|
88
|
+
layers.append(
|
|
89
|
+
{
|
|
90
|
+
"type": "image", # Optional
|
|
91
|
+
"source": sources,
|
|
92
|
+
"channel": 0, # Optional
|
|
93
|
+
"shaderControls": {
|
|
94
|
+
"normalized": {"range": [vmin, vmax]}
|
|
95
|
+
}, # Optional # Exaspim has low HDR
|
|
96
|
+
"shader": {"color": hex_str, "emitter": "RGB", "vec": "vec3",},
|
|
97
|
+
"visible": True, # Optional
|
|
98
|
+
"opacity": opacity,
|
|
99
|
+
"name": f"CH_{channel}",
|
|
100
|
+
"blend": blend,
|
|
101
|
+
}
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# Generate input config
|
|
105
|
+
input_config = {
|
|
106
|
+
"dimensions": {
|
|
107
|
+
"x": {"voxel_size": vox_sizes[0], "unit": "microns"},
|
|
108
|
+
"y": {"voxel_size": vox_sizes[1], "unit": "microns"},
|
|
109
|
+
"z": {"voxel_size": vox_sizes[2], "unit": "microns"},
|
|
110
|
+
"c'": {"voxel_size": 1, "unit": ""},
|
|
111
|
+
"t": {"voxel_size": 0.001, "unit": "seconds"},
|
|
112
|
+
},
|
|
113
|
+
"layers": layers,
|
|
114
|
+
"showScaleBar": False,
|
|
115
|
+
"showAxisLines": False,
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
# Generate the link
|
|
119
|
+
neuroglancer_link = NgState(
|
|
120
|
+
input_config=input_config,
|
|
121
|
+
mount_service="s3",
|
|
122
|
+
bucket_path=bucket_path,
|
|
123
|
+
output_dir=output_json_path,
|
|
124
|
+
dataset_name=dataset_name,
|
|
125
|
+
)
|
|
126
|
+
neuroglancer_link.save_state_as_json()
|
|
127
|
+
print(neuroglancer_link.get_url_link())
|
|
@@ -0,0 +1,368 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Library for generating HCR (Hybridization Chain Reaction) neuroglancer links.
|
|
3
|
+
"""
|
|
4
|
+
import boto3
|
|
5
|
+
from typing import List, Optional
|
|
6
|
+
import json
|
|
7
|
+
from .ng_state import NgState
|
|
8
|
+
from botocore.exceptions import ClientError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def list_s3_zarr_folders(s3_path: str) -> List[str]:
|
|
12
|
+
"""
|
|
13
|
+
List all .zarr folders in an S3 path.
|
|
14
|
+
|
|
15
|
+
Parameters
|
|
16
|
+
----------
|
|
17
|
+
s3_path : str
|
|
18
|
+
S3 path in format s3://bucket/path/
|
|
19
|
+
|
|
20
|
+
Returns
|
|
21
|
+
-------
|
|
22
|
+
List[str]
|
|
23
|
+
List of .zarr folder names
|
|
24
|
+
"""
|
|
25
|
+
# Parse S3 path
|
|
26
|
+
if not s3_path.startswith("s3://"):
|
|
27
|
+
raise ValueError("S3 path must start with s3://")
|
|
28
|
+
|
|
29
|
+
path_parts = s3_path[5:].split("/", 1)
|
|
30
|
+
bucket = path_parts[0]
|
|
31
|
+
prefix = path_parts[1] if len(path_parts) > 1 and path_parts[1] else ""
|
|
32
|
+
|
|
33
|
+
# Ensure prefix ends with / if it's not empty
|
|
34
|
+
if prefix and not prefix.endswith("/"):
|
|
35
|
+
prefix += "/"
|
|
36
|
+
|
|
37
|
+
s3_client = boto3.client('s3')
|
|
38
|
+
zarr_folders = []
|
|
39
|
+
|
|
40
|
+
try:
|
|
41
|
+
paginator = s3_client.get_paginator('list_objects_v2')
|
|
42
|
+
pages = paginator.paginate(Bucket=bucket, Prefix=prefix, Delimiter='/')
|
|
43
|
+
|
|
44
|
+
for page in pages:
|
|
45
|
+
if 'CommonPrefixes' in page:
|
|
46
|
+
for prefix_info in page['CommonPrefixes']:
|
|
47
|
+
folder_name = prefix_info['Prefix'].rstrip('/').split('/')[-1]
|
|
48
|
+
if folder_name.endswith('.zarr'):
|
|
49
|
+
zarr_folders.append(folder_name)
|
|
50
|
+
except ClientError as e:
|
|
51
|
+
print(f"Error listing S3 objects: {e}")
|
|
52
|
+
return []
|
|
53
|
+
|
|
54
|
+
return sorted(zarr_folders)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def generate_hcr_link(
|
|
58
|
+
s3_path: str,
|
|
59
|
+
vmin: float = 90,
|
|
60
|
+
vmax: float = 400,
|
|
61
|
+
opacity: float = 1.0,
|
|
62
|
+
blend: str = "additive",
|
|
63
|
+
output_json_path: str = ".",
|
|
64
|
+
dataset_name: Optional[str] = None,
|
|
65
|
+
bucket_path: str = "aind-open-data",
|
|
66
|
+
) -> None:
|
|
67
|
+
"""
|
|
68
|
+
Creates a neuroglancer link to visualize HCR dataset - handles both multi-channel
|
|
69
|
+
directories and single zarr files.
|
|
70
|
+
|
|
71
|
+
Parameters
|
|
72
|
+
----------
|
|
73
|
+
s3_path : str
|
|
74
|
+
S3 path to either a directory containing .zarr folders or a single .zarr file
|
|
75
|
+
vmin : float
|
|
76
|
+
Minimum value for shader normalization range
|
|
77
|
+
vmax : float
|
|
78
|
+
Maximum value for shader normalization range
|
|
79
|
+
opacity : float
|
|
80
|
+
Opacity of layers
|
|
81
|
+
blend : str
|
|
82
|
+
Blend mode for layers
|
|
83
|
+
output_json_path : str
|
|
84
|
+
Local directory to write process_output.json file
|
|
85
|
+
dataset_name : Optional[str]
|
|
86
|
+
Name of dataset
|
|
87
|
+
bucket_path : str
|
|
88
|
+
S3 bucket name where the process_output.json will be uploaded
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
# Check if this is a single zarr file or a directory with multiple zarr folders
|
|
92
|
+
if s3_path.endswith('.zarr') or '.zarr/' in s3_path:
|
|
93
|
+
# Single zarr file - use ExaSPIM-like processing but with HCR formatting
|
|
94
|
+
print("Processing single zarr file in HCR format...")
|
|
95
|
+
return _generate_single_zarr_hcr_link(
|
|
96
|
+
s3_path, vmin, vmax, opacity, blend, output_json_path, dataset_name, bucket_path
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# Multi-channel HCR directory processing (original logic)
|
|
100
|
+
zarr_folders = list_s3_zarr_folders(s3_path)
|
|
101
|
+
|
|
102
|
+
if not zarr_folders:
|
|
103
|
+
raise ValueError(f"No .zarr folders found in {s3_path}")
|
|
104
|
+
|
|
105
|
+
print(f"Found {len(zarr_folders)} .zarr folders: {zarr_folders}")
|
|
106
|
+
|
|
107
|
+
# Define the standard dimensions for multi-channel HCR data
|
|
108
|
+
dimensions = {
|
|
109
|
+
"x": {"voxel_size": 2.3371543469894166e-07, "unit": "meters"},
|
|
110
|
+
"y": {"voxel_size": 2.3371543469894166e-07, "unit": "meters"},
|
|
111
|
+
"z": {"voxel_size": 1e-06, "unit": "meters"},
|
|
112
|
+
"c'": {"voxel_size": 1, "unit": ""},
|
|
113
|
+
"t": {"voxel_size": 0.001, "unit": "seconds"}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
# Define shader configuration in the format expected by NgLayer
|
|
117
|
+
shader_config = {
|
|
118
|
+
"color": "#690afe",
|
|
119
|
+
"emitter": "RGB",
|
|
120
|
+
"vec": "vec3"
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
# Create layers for each zarr folder
|
|
124
|
+
layers = []
|
|
125
|
+
for zarr_folder in zarr_folders:
|
|
126
|
+
# Extract channel name from folder name (e.g., "channel_405.zarr" -> "CH_405")
|
|
127
|
+
if zarr_folder.startswith("channel_") and zarr_folder.endswith(".zarr"):
|
|
128
|
+
channel_name = zarr_folder[8:-5] # Remove "channel_" prefix and ".zarr" suffix
|
|
129
|
+
display_name = f"CH_{channel_name}"
|
|
130
|
+
else:
|
|
131
|
+
# Fallback naming
|
|
132
|
+
display_name = zarr_folder.replace(".zarr", "").upper()
|
|
133
|
+
|
|
134
|
+
# Construct proper zarr source URL - always point to original data location
|
|
135
|
+
# Clean the s3_path and ensure proper format
|
|
136
|
+
clean_s3_path = s3_path.rstrip('/')
|
|
137
|
+
if clean_s3_path.startswith("s3://"):
|
|
138
|
+
# Use the s3_path as-is, just add zarr:// prefix and zarr_folder
|
|
139
|
+
zarr_source = f"zarr://{clean_s3_path}/{zarr_folder}"
|
|
140
|
+
else:
|
|
141
|
+
# If no s3:// prefix, add it
|
|
142
|
+
zarr_source = f"zarr://s3://{clean_s3_path}/{zarr_folder}"
|
|
143
|
+
|
|
144
|
+
layer = {
|
|
145
|
+
"type": "image",
|
|
146
|
+
"source": zarr_source,
|
|
147
|
+
"localDimensions": {
|
|
148
|
+
"c'": {"voxel_size": 1, "unit": ""}
|
|
149
|
+
},
|
|
150
|
+
"shaderControls": {
|
|
151
|
+
"normalized": {
|
|
152
|
+
"range": [vmin, vmax]
|
|
153
|
+
}
|
|
154
|
+
},
|
|
155
|
+
"shader": shader_config,
|
|
156
|
+
"visible": True,
|
|
157
|
+
"opacity": opacity,
|
|
158
|
+
"name": display_name,
|
|
159
|
+
"blend": blend
|
|
160
|
+
}
|
|
161
|
+
layers.append(layer)
|
|
162
|
+
|
|
163
|
+
# Create the neuroglancer configuration
|
|
164
|
+
config = {
|
|
165
|
+
"dimensions": dimensions,
|
|
166
|
+
"layers": layers,
|
|
167
|
+
"showAxisLines": False,
|
|
168
|
+
"showScaleBar": False
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
# Generate the link using NgState
|
|
172
|
+
# Always use "aind-open-data" for source paths, bucket_path is only for the JSON location
|
|
173
|
+
neuroglancer_link = NgState(
|
|
174
|
+
input_config=config,
|
|
175
|
+
mount_service="s3",
|
|
176
|
+
bucket_path="aind-open-data", # Keep source paths pointing to original data
|
|
177
|
+
output_dir=output_json_path,
|
|
178
|
+
dataset_name=dataset_name,
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
neuroglancer_link.save_state_as_json()
|
|
182
|
+
|
|
183
|
+
# Post-process the JSON to match the desired format
|
|
184
|
+
_post_process_hcr_json(output_json_path, neuroglancer_link.json_name)
|
|
185
|
+
|
|
186
|
+
print(neuroglancer_link.get_url_link())
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def _post_process_hcr_json(output_dir: str, json_filename: str) -> None:
|
|
190
|
+
"""
|
|
191
|
+
Post-process the generated JSON to match the HCR-specific format requirements.
|
|
192
|
+
"""
|
|
193
|
+
import json
|
|
194
|
+
from pathlib import Path
|
|
195
|
+
|
|
196
|
+
json_path = Path(output_dir) / json_filename
|
|
197
|
+
|
|
198
|
+
try:
|
|
199
|
+
# Read the generated JSON
|
|
200
|
+
with open(json_path, 'r') as f:
|
|
201
|
+
data = json.load(f)
|
|
202
|
+
|
|
203
|
+
# Replace shader with the specific HCR shader string
|
|
204
|
+
hcr_shader = "#uicontrol vec3 color color(default=\"#690afe\")\n#uicontrol invlerp normalized\nvoid main() {\nemitRGB(color * normalized());\n}"
|
|
205
|
+
|
|
206
|
+
if "layers" in data:
|
|
207
|
+
for layer in data["layers"]:
|
|
208
|
+
if layer.get("type") == "image":
|
|
209
|
+
layer["shader"] = hcr_shader
|
|
210
|
+
|
|
211
|
+
# Fix any corrupted source paths
|
|
212
|
+
source = layer.get("source", "")
|
|
213
|
+
if source:
|
|
214
|
+
# Clean up corrupted paths by removing duplicate prefixes and wrong buckets
|
|
215
|
+
cleaned_source = source
|
|
216
|
+
|
|
217
|
+
# Remove any bucket references that shouldn't be in source paths
|
|
218
|
+
import re
|
|
219
|
+
|
|
220
|
+
# Pattern to match and extract the correct zarr path
|
|
221
|
+
# This handles cases like "zarr://s3://wrong-bucket/zarr:/s3:/correct-path"
|
|
222
|
+
pattern = r'zarr://s3://[^/]+/zarr:/s3:/(.+)'
|
|
223
|
+
match = re.search(pattern, cleaned_source)
|
|
224
|
+
if match:
|
|
225
|
+
# Rebuild with correct format
|
|
226
|
+
correct_path = match.group(1)
|
|
227
|
+
cleaned_source = f"zarr://s3://{correct_path}"
|
|
228
|
+
else:
|
|
229
|
+
# Clean up other malformed patterns
|
|
230
|
+
cleaned_source = re.sub(r'zarr://s3://[^/]+/zarr://s3://', 'zarr://s3://', cleaned_source)
|
|
231
|
+
cleaned_source = re.sub(r'zarr:/s3:/', 'zarr://s3://', cleaned_source)
|
|
232
|
+
|
|
233
|
+
layer["source"] = cleaned_source
|
|
234
|
+
|
|
235
|
+
# Ensure showAxisLines and showScaleBar are False (as specified in the requirement)
|
|
236
|
+
data["showAxisLines"] = False
|
|
237
|
+
data["showScaleBar"] = False
|
|
238
|
+
|
|
239
|
+
# Save the modified JSON back
|
|
240
|
+
with open(json_path, 'w') as f:
|
|
241
|
+
json.dump(data, f, indent=2)
|
|
242
|
+
|
|
243
|
+
print(f"✅ Post-processed JSON with HCR-specific formatting")
|
|
244
|
+
|
|
245
|
+
except Exception as e:
|
|
246
|
+
print(f"⚠️ Warning: Could not post-process JSON: {e}")
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def _generate_single_zarr_hcr_link(
|
|
250
|
+
s3_path: str,
|
|
251
|
+
vmin: float,
|
|
252
|
+
vmax: float,
|
|
253
|
+
opacity: float,
|
|
254
|
+
blend: str,
|
|
255
|
+
output_json_path: str,
|
|
256
|
+
dataset_name: Optional[str],
|
|
257
|
+
bucket_path: str
|
|
258
|
+
) -> None:
|
|
259
|
+
"""
|
|
260
|
+
Generate HCR link for a single zarr file with ExaSPIM-like structure but HCR formatting.
|
|
261
|
+
"""
|
|
262
|
+
from .parsers import OmeZarrParser
|
|
263
|
+
import numpy as np
|
|
264
|
+
from pathlib import Path
|
|
265
|
+
|
|
266
|
+
# Get zarr metadata using existing parser
|
|
267
|
+
try:
|
|
268
|
+
# For single zarr files, use the path as-is for parsing
|
|
269
|
+
base_zarr_path = s3_path
|
|
270
|
+
if '.zarr/' in base_zarr_path:
|
|
271
|
+
# If path includes resolution level, strip it for metadata parsing
|
|
272
|
+
base_zarr_path = base_zarr_path.split('.zarr')[0] + '.zarr'
|
|
273
|
+
|
|
274
|
+
vox_sizes = OmeZarrParser.extract_tile_vox_size(base_zarr_path)
|
|
275
|
+
|
|
276
|
+
# Extract channel from filename (e.g., channel_488.zarr -> 488)
|
|
277
|
+
channel_name = "488" # default
|
|
278
|
+
if "channel_" in s3_path:
|
|
279
|
+
channel_part = s3_path.split("channel_")[1].split(".zarr")[0].split("/")[0]
|
|
280
|
+
channel_name = channel_part
|
|
281
|
+
|
|
282
|
+
except Exception as e:
|
|
283
|
+
print(f"Warning: Could not extract zarr metadata: {e}")
|
|
284
|
+
# Use fallback values from your example
|
|
285
|
+
vox_sizes = (9.201793828644069e-08, 9.201793828644069e-08, 4.4860451398192966e-07)
|
|
286
|
+
channel_name = "488"
|
|
287
|
+
|
|
288
|
+
# Create dimensions using extracted voxel sizes
|
|
289
|
+
dimensions = {
|
|
290
|
+
"x": [vox_sizes[0], "m"],
|
|
291
|
+
"y": [vox_sizes[1], "m"],
|
|
292
|
+
"z": [vox_sizes[2], "m"],
|
|
293
|
+
"c'": [1, ""],
|
|
294
|
+
"t": [0.001, "s"]
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
# Create identity transform matrix (5x6 as per your example)
|
|
298
|
+
identity_transform = [
|
|
299
|
+
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
|
300
|
+
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
|
|
301
|
+
[0.0, 0.0, 1.0, 0.0, 0.0, 0.1982632279396057],
|
|
302
|
+
[0.0, 0.0, 0.0, 1.0, 0.0, -0.8770887851715088],
|
|
303
|
+
[0.0, 0.0, 0.0, 0.0, 1.0, 0.507804274559021]
|
|
304
|
+
]
|
|
305
|
+
|
|
306
|
+
# Create source array with transform
|
|
307
|
+
source_array = [{
|
|
308
|
+
"url": f"zarr://{s3_path}",
|
|
309
|
+
"transform": {
|
|
310
|
+
"matrix": identity_transform,
|
|
311
|
+
"outputDimensions": {
|
|
312
|
+
"t": [0.001, "s"],
|
|
313
|
+
"c'": [1, ""],
|
|
314
|
+
"z": [vox_sizes[2], "m"],
|
|
315
|
+
"y": [vox_sizes[1], "m"],
|
|
316
|
+
"x": [vox_sizes[0], "m"]
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
}]
|
|
320
|
+
|
|
321
|
+
# Create layer
|
|
322
|
+
layer = {
|
|
323
|
+
"type": "image",
|
|
324
|
+
"source": source_array,
|
|
325
|
+
"localDimensions": {
|
|
326
|
+
"c'": [1, ""]
|
|
327
|
+
},
|
|
328
|
+
"shaderControls": {
|
|
329
|
+
"normalized": {
|
|
330
|
+
"range": [vmin, vmax]
|
|
331
|
+
}
|
|
332
|
+
},
|
|
333
|
+
"shader": f"#uicontrol vec3 color color(default=\"#59d5f8\")\n#uicontrol invlerp normalized\nvoid main() {{\nemitRGB(color * normalized());\n}}",
|
|
334
|
+
"visible": True,
|
|
335
|
+
"opacity": opacity,
|
|
336
|
+
"name": f"CH_{channel_name}",
|
|
337
|
+
"blend": blend
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
# Create config
|
|
341
|
+
config = {
|
|
342
|
+
"dimensions": dimensions,
|
|
343
|
+
"layers": [layer],
|
|
344
|
+
"showAxisLines": False,
|
|
345
|
+
"showScaleBar": False
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
# Generate output directly (don't use NgState to avoid path corruption)
|
|
349
|
+
output_file = Path(output_json_path) / "process_output.json"
|
|
350
|
+
|
|
351
|
+
# Add ng_link
|
|
352
|
+
if bucket_path != "aind-open-data":
|
|
353
|
+
ng_link = f"https://neuroglancer-demo.appspot.com/#!s3://{bucket_path}/{dataset_name}/process_output.json"
|
|
354
|
+
else:
|
|
355
|
+
ng_link = f"https://neuroglancer-demo.appspot.com/#!s3://aind-open-data/{dataset_name}/process_output.json"
|
|
356
|
+
|
|
357
|
+
final_output = {
|
|
358
|
+
"ng_link": ng_link,
|
|
359
|
+
**config
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
# Save JSON directly
|
|
363
|
+
with open(output_file, 'w') as f:
|
|
364
|
+
import json
|
|
365
|
+
json.dump(final_output, f, indent=4)
|
|
366
|
+
|
|
367
|
+
print(f"✅ Generated single zarr HCR configuration")
|
|
368
|
+
print(f"🔗 Neuroglancer Link: {ng_link}")
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""Main function to run with argparse"""
|
|
2
|
+
import argparse
|
|
3
|
+
|
|
4
|
+
import dispim_link
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def main():
|
|
8
|
+
"""Make a Neuroglancer link from a Bigstitcher XML file.
|
|
9
|
+
|
|
10
|
+
Arguments
|
|
11
|
+
------------------------
|
|
12
|
+
xml_in: str
|
|
13
|
+
Path to Bigstitcher XML file.
|
|
14
|
+
s3_bucket: str
|
|
15
|
+
Name of S3 bucket to upload to.
|
|
16
|
+
|
|
17
|
+
Returns
|
|
18
|
+
------------------------
|
|
19
|
+
str:
|
|
20
|
+
Neuroglancer link.
|
|
21
|
+
|
|
22
|
+
"""
|
|
23
|
+
parser = argparse.ArgumentParser()
|
|
24
|
+
parser.add_argument(
|
|
25
|
+
"xml_in",
|
|
26
|
+
type=str,
|
|
27
|
+
help="Bigstitcher XML to make Neuroglancer link of.",
|
|
28
|
+
)
|
|
29
|
+
parser.add_argument(
|
|
30
|
+
"s3_bucket", type=str, help="S3 bucket name.", default="aind-open-data"
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
args = parser.parse_args()
|
|
34
|
+
|
|
35
|
+
# print(args)
|
|
36
|
+
xml_file_in = args.xml_in
|
|
37
|
+
|
|
38
|
+
s3_bucket = args.s3_bucket
|
|
39
|
+
|
|
40
|
+
ng_link = dispim_link.ingest_xml_and_write_ng_link(xml_file_in, s3_bucket)
|
|
41
|
+
|
|
42
|
+
print(ng_link)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
if __name__ == "__main__":
|
|
46
|
+
"""run main function."""
|
|
47
|
+
main()
|