Rhapso 0.1.92__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. Rhapso/__init__.py +1 -0
  2. Rhapso/data_prep/__init__.py +2 -0
  3. Rhapso/data_prep/n5_reader.py +188 -0
  4. Rhapso/data_prep/s3_big_stitcher_reader.py +55 -0
  5. Rhapso/data_prep/xml_to_dataframe.py +215 -0
  6. Rhapso/detection/__init__.py +5 -0
  7. Rhapso/detection/advanced_refinement.py +203 -0
  8. Rhapso/detection/difference_of_gaussian.py +324 -0
  9. Rhapso/detection/image_reader.py +117 -0
  10. Rhapso/detection/metadata_builder.py +130 -0
  11. Rhapso/detection/overlap_detection.py +327 -0
  12. Rhapso/detection/points_validation.py +49 -0
  13. Rhapso/detection/save_interest_points.py +265 -0
  14. Rhapso/detection/view_transform_models.py +67 -0
  15. Rhapso/fusion/__init__.py +0 -0
  16. Rhapso/fusion/affine_fusion/__init__.py +2 -0
  17. Rhapso/fusion/affine_fusion/blend.py +289 -0
  18. Rhapso/fusion/affine_fusion/fusion.py +601 -0
  19. Rhapso/fusion/affine_fusion/geometry.py +159 -0
  20. Rhapso/fusion/affine_fusion/io.py +546 -0
  21. Rhapso/fusion/affine_fusion/script_utils.py +111 -0
  22. Rhapso/fusion/affine_fusion/setup.py +4 -0
  23. Rhapso/fusion/affine_fusion_worker.py +234 -0
  24. Rhapso/fusion/multiscale/__init__.py +0 -0
  25. Rhapso/fusion/multiscale/aind_hcr_data_transformation/__init__.py +19 -0
  26. Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/__init__.py +3 -0
  27. Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/czi_to_zarr.py +698 -0
  28. Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/zarr_writer.py +265 -0
  29. Rhapso/fusion/multiscale/aind_hcr_data_transformation/models.py +81 -0
  30. Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/__init__.py +3 -0
  31. Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/utils.py +526 -0
  32. Rhapso/fusion/multiscale/aind_hcr_data_transformation/zeiss_job.py +249 -0
  33. Rhapso/fusion/multiscale/aind_z1_radial_correction/__init__.py +21 -0
  34. Rhapso/fusion/multiscale/aind_z1_radial_correction/array_to_zarr.py +257 -0
  35. Rhapso/fusion/multiscale/aind_z1_radial_correction/radial_correction.py +557 -0
  36. Rhapso/fusion/multiscale/aind_z1_radial_correction/run_capsule.py +98 -0
  37. Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/__init__.py +3 -0
  38. Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/utils.py +266 -0
  39. Rhapso/fusion/multiscale/aind_z1_radial_correction/worker.py +89 -0
  40. Rhapso/fusion/multiscale_worker.py +113 -0
  41. Rhapso/fusion/neuroglancer_link_gen/__init__.py +8 -0
  42. Rhapso/fusion/neuroglancer_link_gen/dispim_link.py +235 -0
  43. Rhapso/fusion/neuroglancer_link_gen/exaspim_link.py +127 -0
  44. Rhapso/fusion/neuroglancer_link_gen/hcr_link.py +368 -0
  45. Rhapso/fusion/neuroglancer_link_gen/iSPIM_top.py +47 -0
  46. Rhapso/fusion/neuroglancer_link_gen/link_utils.py +239 -0
  47. Rhapso/fusion/neuroglancer_link_gen/main.py +299 -0
  48. Rhapso/fusion/neuroglancer_link_gen/ng_layer.py +1434 -0
  49. Rhapso/fusion/neuroglancer_link_gen/ng_state.py +1123 -0
  50. Rhapso/fusion/neuroglancer_link_gen/parsers.py +336 -0
  51. Rhapso/fusion/neuroglancer_link_gen/raw_link.py +116 -0
  52. Rhapso/fusion/neuroglancer_link_gen/utils/__init__.py +4 -0
  53. Rhapso/fusion/neuroglancer_link_gen/utils/shader_utils.py +85 -0
  54. Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py +43 -0
  55. Rhapso/fusion/neuroglancer_link_gen/utils/utils.py +303 -0
  56. Rhapso/fusion/neuroglancer_link_gen_worker.py +30 -0
  57. Rhapso/matching/__init__.py +0 -0
  58. Rhapso/matching/load_and_transform_points.py +458 -0
  59. Rhapso/matching/ransac_matching.py +544 -0
  60. Rhapso/matching/save_matches.py +120 -0
  61. Rhapso/matching/xml_parser.py +302 -0
  62. Rhapso/pipelines/__init__.py +0 -0
  63. Rhapso/pipelines/ray/__init__.py +0 -0
  64. Rhapso/pipelines/ray/aws/__init__.py +0 -0
  65. Rhapso/pipelines/ray/aws/alignment_pipeline.py +227 -0
  66. Rhapso/pipelines/ray/aws/config/__init__.py +0 -0
  67. Rhapso/pipelines/ray/evaluation.py +71 -0
  68. Rhapso/pipelines/ray/interest_point_detection.py +137 -0
  69. Rhapso/pipelines/ray/interest_point_matching.py +110 -0
  70. Rhapso/pipelines/ray/local/__init__.py +0 -0
  71. Rhapso/pipelines/ray/local/alignment_pipeline.py +167 -0
  72. Rhapso/pipelines/ray/matching_stats.py +104 -0
  73. Rhapso/pipelines/ray/param/__init__.py +0 -0
  74. Rhapso/pipelines/ray/solver.py +120 -0
  75. Rhapso/pipelines/ray/split_dataset.py +78 -0
  76. Rhapso/solver/__init__.py +0 -0
  77. Rhapso/solver/compute_tiles.py +562 -0
  78. Rhapso/solver/concatenate_models.py +116 -0
  79. Rhapso/solver/connected_graphs.py +111 -0
  80. Rhapso/solver/data_prep.py +181 -0
  81. Rhapso/solver/global_optimization.py +410 -0
  82. Rhapso/solver/model_and_tile_setup.py +109 -0
  83. Rhapso/solver/pre_align_tiles.py +323 -0
  84. Rhapso/solver/save_results.py +97 -0
  85. Rhapso/solver/view_transforms.py +75 -0
  86. Rhapso/solver/xml_to_dataframe_solver.py +213 -0
  87. Rhapso/split_dataset/__init__.py +0 -0
  88. Rhapso/split_dataset/compute_grid_rules.py +78 -0
  89. Rhapso/split_dataset/save_points.py +101 -0
  90. Rhapso/split_dataset/save_xml.py +377 -0
  91. Rhapso/split_dataset/split_images.py +537 -0
  92. Rhapso/split_dataset/xml_to_dataframe_split.py +219 -0
  93. rhapso-0.1.92.dist-info/METADATA +39 -0
  94. rhapso-0.1.92.dist-info/RECORD +101 -0
  95. rhapso-0.1.92.dist-info/WHEEL +5 -0
  96. rhapso-0.1.92.dist-info/licenses/LICENSE +21 -0
  97. rhapso-0.1.92.dist-info/top_level.txt +2 -0
  98. tests/__init__.py +1 -0
  99. tests/test_detection.py +17 -0
  100. tests/test_matching.py +21 -0
  101. tests/test_solving.py +21 -0
@@ -0,0 +1,239 @@
1
+ """
2
+ Utilities for nueroglancer links.
3
+ """
4
+ import pathlib
5
+ import re
6
+ from collections import defaultdict
7
+
8
+ import boto3
9
+ import numpy as np
10
+
11
+
12
+ def calculate_net_transforms(
13
+ view_transforms: dict[int, list[dict]]
14
+ ) -> dict[int, np.ndarray]:
15
+ """
16
+ Accumulate net transform and net translation for each matrix stack.
17
+ Net translation =
18
+ Sum of translation vectors converted into original nominal basis
19
+ Net transform =
20
+ Product of 3x3 matrices
21
+ NOTE: Translational component (last column) is defined
22
+ wrt to the DOMAIN, not codomain.
23
+ Implementation is informed by this given.
24
+
25
+ Parameters
26
+ ------------------------
27
+ view_transforms: dict[int, list[dict]]
28
+ Dictionary of tile ids to transforms associated with each tile.
29
+
30
+ Returns
31
+ ------------------------
32
+ dict[int, np.ndarray]:
33
+ Dictionary of tile ids to net transform.
34
+
35
+ """
36
+
37
+ identity_transform = np.array(
38
+ [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]
39
+ )
40
+ net_transforms: dict[int, np.ndarray] = defaultdict(
41
+ lambda: np.copy(identity_transform)
42
+ )
43
+
44
+ for view, tfs in view_transforms.items():
45
+ net_translation = np.zeros(3)
46
+ net_matrix_3x3 = np.eye(3)
47
+
48
+ # Tfs is a list of dicts containing transform under 'affine' key
49
+ for tf in tfs:
50
+ nums = [float(val) for val in tf["affine"].split(" ")]
51
+ matrix_3x3 = np.array([nums[0::4], nums[1::4], nums[2::4]])
52
+ translation = np.array(nums[3::4])
53
+ net_translation = net_translation + (translation)
54
+ net_matrix_3x3 = net_matrix_3x3 @ matrix_3x3
55
+ net_transforms[view] = np.hstack(
56
+ (net_matrix_3x3, net_translation.reshape(3, 1))
57
+ )
58
+
59
+ return net_transforms
60
+
61
+
62
+ def convert_matrix_3x4_to_5x6(matrix_3x4: np.ndarray) -> np.ndarray:
63
+ """
64
+ Converts classic 3x4 homogeneous coordinates: (x y z T)
65
+ to nueroglancer 5x6 coordinates (t c z y x T)
66
+
67
+ Parameters
68
+ ------------------------
69
+ matrix_3x4: np.ndarray
70
+ See description above.
71
+
72
+ Returns
73
+ ------------------------
74
+ np.ndarray:
75
+ See description above.
76
+ """
77
+
78
+ # Initalize
79
+ matrix_5x6 = np.zeros((5, 6), np.float32)
80
+ np.fill_diagonal(matrix_5x6, 1)
81
+
82
+ # Swap Rows 0 and 2; Swap Colums 0 and 2
83
+ patch = np.copy(matrix_3x4)
84
+ patch[[0, 2], :] = patch[[2, 0], :]
85
+ patch[:, [0, 2]] = patch[:, [2, 0]]
86
+
87
+ # Place patch in bottom-right corner
88
+ matrix_5x6[2:6, 2:7] = patch
89
+
90
+ return matrix_5x6
91
+
92
+
93
+ def list_all_tiles_in_path(SPIM_folder: str) -> list:
94
+ """
95
+ Lists all tiles in a given SPIM folder.
96
+
97
+ Parameters
98
+ ------------------------
99
+ SPIM_folder: str
100
+ Path to SPIM folder.
101
+
102
+ Returns
103
+ ------------------------
104
+ list:
105
+ List of all tiles in SPIM folder.
106
+ """
107
+ SPIM_folder = pathlib.Path(SPIM_folder)
108
+ # assert SPIM_folder.exists()
109
+
110
+ return list(SPIM_folder.glob("*.zarr"))
111
+
112
+
113
+ def list_all_tiles_in_bucket_path(
114
+ bucket_SPIM_folder: str, bucket_name="aind-open-data"
115
+ ) -> list:
116
+ """
117
+ Lists all tiles in a given bucket path
118
+
119
+ Parameters
120
+ ------------------------
121
+ bucket_SPIM_folder: str
122
+ Path to SPIM folder in bucket.
123
+ bucket_name: str
124
+ Name of bucket.
125
+
126
+ Returns
127
+ ------------------------
128
+ list:
129
+ List of all tiles in SPIM folder.
130
+ """
131
+ # s3 = boto3.resource('s3')
132
+ bucket_name, prefix = bucket_SPIM_folder.replace("s3://", "").split("/", 1)
133
+ # my_bucket = s3.Bucket(bucket_name)
134
+
135
+ client = boto3.client("s3")
136
+ result = client.list_objects(
137
+ Bucket=bucket_name, Prefix=prefix + "/", Delimiter="/"
138
+ )
139
+ # print(result)
140
+ tiles = []
141
+ for o in result.get("CommonPrefixes"):
142
+ # print 'sub folder : ', o.get('Prefix')
143
+ tiles.append(o.get("Prefix"))
144
+ return tiles
145
+
146
+
147
+ def extract_channel_from_tile_path(t_path: str) -> int:
148
+ """
149
+ Extracts channel from tile path naming convention:
150
+ tile_X_####_Y_####_Z_####_ch_####.filetype
151
+
152
+ Parameters
153
+ ------------------------
154
+ t_path: str
155
+ Tile path to run regex on.
156
+
157
+ Returns
158
+ ------------------------
159
+ int:
160
+ Channel value.
161
+
162
+ """
163
+
164
+ pattern = r"(ch|CH)_(\d+)"
165
+ match = re.search(pattern, t_path)
166
+ channel = int(match.group(2))
167
+ return channel
168
+
169
+
170
+ def get_unique_channels_for_dataset(dataset_path: str) -> list:
171
+ """
172
+ Extracts a list of channels in a given dataset
173
+
174
+ Parameters:
175
+ -----------
176
+ dataset_path: str
177
+ Path to a dataset's zarr folder
178
+
179
+ Returns:
180
+ --------
181
+ unique_list_of_channels: list(int)
182
+ A list of int, containing the unique list of channel wavelengths
183
+
184
+ """
185
+ if pathlib.Path(dataset_path).exists():
186
+ tiles_in_path = list_all_tiles_in_path(dataset_path)
187
+ else:
188
+ tiles_in_path = list_all_tiles_in_bucket_path(
189
+ dataset_path, "aind-open-data"
190
+ )
191
+ unique_list_of_channels = []
192
+ for tile in tiles_in_path:
193
+ channel = extract_channel_from_tile_path(tile)
194
+
195
+ if channel not in unique_list_of_channels:
196
+ unique_list_of_channels.append(channel)
197
+
198
+ return unique_list_of_channels
199
+
200
+
201
+ def wavelength_to_hex(wavelength: int) -> int:
202
+ """
203
+ Converts wavelength to corresponding color hex value.
204
+ Parameters
205
+ ------------------------
206
+ wavelength: int
207
+ Integer value representing wavelength.
208
+ Returns
209
+ ------------------------
210
+ int:
211
+ Hex value color.
212
+ """
213
+
214
+ # Each wavelength key is the upper bound to a wavelgnth band.
215
+ # Wavelengths range from 380-750nm.
216
+ # Color map wavelength/hex pairs are generated
217
+ # by sampling along a CIE diagram arc.
218
+ color_map = {
219
+ 460: 0x690AFE, # Purple
220
+ 470: 0x3F2EFE, # Blue-Purple
221
+ 480: 0x4B90FE, # Blue
222
+ 490: 0x59D5F8, # Blue-Green
223
+ 500: 0x5DF8D6, # Green
224
+ 520: 0x5AFEB8, # Green
225
+ 540: 0x58FEA1, # Green
226
+ 560: 0x51FF1E, # Green
227
+ 565: 0xBBFB01, # Green-Yellow
228
+ 575: 0xE9EC02, # Yellow
229
+ 580: 0xF5C503, # Yellow-Orange
230
+ 590: 0xF39107, # Orange
231
+ 600: 0xF15211, # Orange-Red
232
+ 620: 0xF0121E, # Red
233
+ 750: 0xF00050, # Pink
234
+ }
235
+
236
+ for ub, hex_val in color_map.items():
237
+ if wavelength < ub: # Exclusive
238
+ return hex_val
239
+ return hex_val # hex_val is set to the last color in for loop
@@ -0,0 +1,299 @@
1
+ import argparse
2
+ from pathlib import Path
3
+ import boto3
4
+ import json
5
+ import urllib.parse
6
+
7
+ from .exaspim_link import generate_exaspim_link
8
+ from .hcr_link import generate_hcr_link
9
+
10
+ def upload_to_s3(file_path, bucket_name, s3_file_path):
11
+ """
12
+ Upload a file to an S3 bucket
13
+
14
+ :param file_path: File to upload
15
+ :param bucket_name: Bucket to upload to
16
+ :param s3_file_path: S3 object name
17
+ """
18
+ s3_client = boto3.client('s3')
19
+ try:
20
+ s3_client.upload_file(file_path, bucket_name, s3_file_path)
21
+ print(f"File {file_path} uploaded to {bucket_name}/{s3_file_path}")
22
+ except Exception as e:
23
+ print(f"Error uploading file: {e}")
24
+
25
+ def is_hcr_dataset(s3_path):
26
+ """
27
+ Determine if the S3 path contains HCR data by checking for multiple .zarr folders
28
+
29
+ Parameters
30
+ ----------
31
+ s3_path : str
32
+ S3 path to check
33
+
34
+ Returns
35
+ -------
36
+ bool
37
+ True if this appears to be an HCR dataset
38
+ """
39
+ try:
40
+ from hcr_link import list_s3_zarr_folders
41
+ zarr_folders = list_s3_zarr_folders(s3_path)
42
+ # Consider it HCR if we find multiple .zarr folders or folders with "channel_" prefix
43
+ return len(zarr_folders) > 1 or any(folder.startswith("channel_") for folder in zarr_folders)
44
+ except Exception as e:
45
+ print(f"Could not check for HCR dataset: {e}")
46
+ return False
47
+
48
+ def parse_s3_path(s3_path):
49
+ """
50
+ Parse the S3 path to get the bucket name and the parent directory
51
+
52
+ :param s3_path: S3 path (s3://bucket-name/path/to/zarr)
53
+ :return: tuple (bucket_name, parent_directory)
54
+ """
55
+ if s3_path.startswith("s3://"):
56
+ path_parts = s3_path[5:].split("/")
57
+ bucket_name = path_parts[0]
58
+ parent_directory = "/".join(path_parts[1:-1]) # Exclude the zarr file/directory itself
59
+ return bucket_name, parent_directory
60
+ else:
61
+ raise ValueError("Invalid S3 path format")
62
+
63
+ def parse_s3_upload_path(json_upload_bucket, parent_directory, json_upload_path=None):
64
+ """
65
+ Construct the S3 upload path for the JSON file
66
+
67
+ :param json_upload_bucket: S3 bucket name for uploading
68
+ :param parent_directory: Parent directory from the zarr path
69
+ :param json_upload_path: Optional exact S3 path to use
70
+ :return: tuple (bucket_name, s3_key)
71
+ """
72
+ if json_upload_path:
73
+ # Use the exact path specified by user
74
+ s3_key = json_upload_path
75
+ else:
76
+ # Use auto-generated path based on zarr structure
77
+ s3_key = f"{parent_directory}/process_output.json"
78
+ return json_upload_bucket, s3_key
79
+
80
+
81
+ def generate_neuroglancer_link(
82
+ zarr_path,
83
+ vmin,
84
+ vmax,
85
+ json_upload_bucket=None,
86
+ json_upload_path=None,
87
+ json_local_output="results",
88
+ dataset_type="auto",
89
+ opacity=0.5,
90
+ blend="default"
91
+ ):
92
+ """
93
+ Generate Neuroglancer link and configuration files
94
+
95
+ :param zarr_path: Path to the Zarr dataset (s3://bucket/path/to/zarr)
96
+ :param vmin: Minimum value for scaling
97
+ :param vmax: Maximum value for scaling
98
+ :param json_upload_bucket: S3 bucket name to upload JSON
99
+ :param json_upload_path: Exact S3 path within the bucket to upload JSON
100
+ :param json_local_output: Local folder name to save the output JSON
101
+ :param dataset_type: Processing type: 'auto', 'hcr', or 'exaspim'
102
+ :param opacity: Opacity for the visualization
103
+ :param blend: Blend mode
104
+ """
105
+ # Print input parameters
106
+ print("=" * 60)
107
+ print("NEUROGLANCER LINK GENERATOR")
108
+ print("=" * 60)
109
+ print(f"Input Parameters:")
110
+ print(f" Zarr Path: {zarr_path}")
111
+ print(f" VMin: {vmin}")
112
+ print(f" VMax: {vmax}")
113
+ print(f" Opacity: {opacity}")
114
+ print(f" Blend: {blend}")
115
+ print(f" Local Output Folder: {json_local_output}")
116
+ print(f" Upload Bucket: {json_upload_bucket if json_upload_bucket else 'None (using aind-open-data for link)'}")
117
+ print(f" Upload Path: {json_upload_path if json_upload_path else 'Auto-generated from zarr path'}")
118
+ print("=" * 60)
119
+
120
+ print("šŸ”„ Processing...")
121
+
122
+ # Determine the S3 bucket and parent directory
123
+ s3_bucket, parent_directory = parse_s3_path(zarr_path)
124
+
125
+ # Determine which bucket to use for the neuroglancer link
126
+ bucket_path = json_upload_bucket if json_upload_bucket else "aind-open-data"
127
+
128
+ # Create the local output directory path (relative to current working directory)
129
+ local_output_path = Path.cwd() / json_local_output
130
+
131
+ # Create the directory if it doesn't exist
132
+ local_output_path.mkdir(parents=True, exist_ok=True)
133
+
134
+ print("šŸ”„ Generating Neuroglancer configuration...")
135
+
136
+ # Decide processing mode: use CLI override when provided, otherwise auto-detect
137
+ chosen_mode = dataset_type
138
+ if chosen_mode == "auto":
139
+ chosen_mode = "hcr" if is_hcr_dataset(zarr_path) else "exaspim"
140
+
141
+ if chosen_mode == "hcr":
142
+ print("šŸ“Š Using HCR processing...")
143
+ generate_hcr_link(
144
+ s3_path=zarr_path,
145
+ vmin=vmin,
146
+ vmax=vmax,
147
+ opacity=1.0, # Set opacity to 1.0 for HCR data as per spec
148
+ blend="additive",
149
+ output_json_path=str(local_output_path),
150
+ dataset_name=parent_directory,
151
+ bucket_path=bucket_path,
152
+ )
153
+ else:
154
+ # ExaSPIM: ensure s3_path points at the .zarr root (trim trailing resolution index like '/0')
155
+ normalized_path = zarr_path
156
+ if ".zarr/" in normalized_path:
157
+ # Keep everything up to and including '.zarr'
158
+ normalized_path = normalized_path.split('.zarr')[0] + '.zarr'
159
+
160
+ print("šŸ“Š Using ExaSPIM processing...")
161
+ # Call the function with the parsed arguments
162
+ generate_exaspim_link(
163
+ None,
164
+ s3_path=normalized_path,
165
+ opacity=opacity,
166
+ blend=blend,
167
+ output_json_path=str(local_output_path),
168
+ vmin=vmin,
169
+ vmax=vmax,
170
+ dataset_name=parent_directory,
171
+ bucket_path=bucket_path,
172
+ )
173
+
174
+ # Define the local JSON file path
175
+ output_json_file = local_output_path / "process_output.json"
176
+
177
+ print("āœ… Neuroglancer configuration generated!")
178
+
179
+ # Read the generated JSON to get the ng_link
180
+ ng_link_from_file = None
181
+ neuroglancer_state = None
182
+ try:
183
+ with open(output_json_file, 'r') as f:
184
+ json_content = json.load(f)
185
+ ng_link_from_file = json_content.get("ng_link")
186
+ # Remove the ng_link to get just the state for URL encoding
187
+ neuroglancer_state = {k: v for k, v in json_content.items() if k != "ng_link"}
188
+ except Exception as e:
189
+ print(f"āš ļø Error reading generated JSON: {e}")
190
+
191
+ print("šŸ”„ Handling file operations...")
192
+
193
+ # Handle S3 upload if json_upload_bucket is provided
194
+ s3_upload_location = None
195
+ if json_upload_bucket:
196
+ try:
197
+ upload_bucket, upload_key = parse_s3_upload_path(json_upload_bucket, parent_directory, json_upload_path)
198
+ upload_to_s3(str(output_json_file), upload_bucket, upload_key)
199
+ s3_upload_location = f"s3://{upload_bucket}/{upload_key}"
200
+ print("āœ… S3 upload completed!")
201
+
202
+ # If a custom upload path was provided, update the ng_link inside the local JSON
203
+ try:
204
+ with open(output_json_file, 'r') as f:
205
+ current_json = json.load(f)
206
+
207
+ # Construct the new ng_link that points to the uploaded S3 location
208
+ new_ng_link = f"https://neuroglancer-demo.appspot.com/#!s3://{upload_bucket}/{'/'.join(upload_key.split('/')[:-1])}/{Path(output_json_file).name}" if '/' in upload_key else f"https://neuroglancer-demo.appspot.com/#!s3://{upload_bucket}/{Path(output_json_file).name}"
209
+
210
+ # If user provided an explicit json_upload_path, prefer that exact key in the link
211
+ if json_upload_path:
212
+ # If the json_upload_path includes a filename, use it; otherwise, append the default filename
213
+ key_parts = json_upload_path.split('/')
214
+ if key_parts[-1].endswith('.json'):
215
+ new_ng_link = f"https://neuroglancer-demo.appspot.com/#!s3://{upload_bucket}/{json_upload_path}"
216
+ else:
217
+ new_ng_link = f"https://neuroglancer-demo.appspot.com/#!s3://{upload_bucket}/{json_upload_path.rstrip('/')}/{Path(output_json_file).name}"
218
+
219
+ # Update local JSON ng_link and write back
220
+ current_json['ng_link'] = new_ng_link
221
+ with open(output_json_file, 'w') as f:
222
+ json.dump(current_json, f, indent=2)
223
+
224
+ # Update variable used for printing below
225
+ ng_link_from_file = new_ng_link
226
+ except Exception as e:
227
+ print(f"āš ļø Warning: could not update local JSON ng_link to uploaded path: {e}")
228
+ except Exception as e:
229
+ print(f"āŒ Error with S3 upload: {e}")
230
+
231
+ # Generate results summary
232
+ print("\n" + "=" * 60)
233
+ print("RESULTS")
234
+ print("=" * 60)
235
+ print(f"šŸ“ Local JSON saved: {output_json_file}")
236
+
237
+ if s3_upload_location:
238
+ print(f"ā˜ļø S3 JSON uploaded: {s3_upload_location}")
239
+ else:
240
+ print("ā˜ļø S3 Upload: None (no bucket specified)")
241
+
242
+ print(f"šŸ”— Neuroglancer bucket: {bucket_path}")
243
+
244
+ if ng_link_from_file:
245
+ print(f"\n🌐 Neuroglancer Link (with JSON path):")
246
+ print(f" {ng_link_from_file}")
247
+
248
+ # Create URL-encoded version
249
+ if neuroglancer_state:
250
+ try:
251
+ state_json = json.dumps(neuroglancer_state, separators=(',', ':'))
252
+ encoded_state = urllib.parse.quote(state_json)
253
+ base_url = "https://neuroglancer-demo.appspot.com/"
254
+ encoded_url = f"{base_url}#!{encoded_state}"
255
+
256
+ # Save URL-encoded link to file
257
+ encoded_url_file = local_output_path / "neuroglancer_encoded_url.txt"
258
+ with open(encoded_url_file, 'w') as f:
259
+ f.write(encoded_url)
260
+
261
+ print(f"\nšŸ’¾ URL-encoded link saved to: {encoded_url_file}")
262
+ except Exception as e:
263
+ print(f"āš ļø Error creating URL-encoded link: {e}")
264
+ else:
265
+ print("āš ļø Could not extract Neuroglancer link from generated JSON")
266
+
267
+ print("=" * 60)
268
+
269
+
270
+ if __name__ == "__main__":
271
+ # Create the parser
272
+ parser = argparse.ArgumentParser(description="Generate Exaspim Link")
273
+
274
+ # Add arguments
275
+ parser.add_argument("--zarr_path", required=True, help="Path to the Zarr dataset (s3://bucket/path/to/zarr)")
276
+ parser.add_argument("--opacity", type=float, default=0.5, help="Opacity for the visualization")
277
+ parser.add_argument("--blend", default="default", help="Blend mode")
278
+ parser.add_argument("--json_local_output", default="results", help="Local folder name to save the output JSON (e.g., 'results'). Will be created in current working directory.")
279
+ parser.add_argument("--vmin", type=float, required=True, help="Minimum value for scaling")
280
+ parser.add_argument("--vmax", type=float, required=True, help="Maximum value for scaling")
281
+ parser.add_argument("--json_upload_bucket", default=None, help="S3 bucket name to upload JSON (e.g., martin-test-bucket). If not provided, no upload will occur and 'aind-open-data' will be used for the neuroglancer link.")
282
+ parser.add_argument("--json_upload_path", default=None, help="Exact S3 path within the bucket to upload JSON (e.g., output/fuse/out.json). If not provided, will use auto-generated path based on zarr structure.")
283
+ parser.add_argument("--dataset_type", choices=["auto","hcr","exaspim"], default="auto", help="Processing type: 'auto' detect HCR vs ExaSPIM, or force 'hcr' or 'exaspim'.")
284
+
285
+ # Parse the arguments
286
+ args = parser.parse_args()
287
+
288
+ # Call the main function with parsed arguments
289
+ generate_neuroglancer_link(
290
+ zarr_path=args.zarr_path,
291
+ vmin=args.vmin,
292
+ vmax=args.vmax,
293
+ json_upload_bucket=args.json_upload_bucket,
294
+ json_upload_path=args.json_upload_path,
295
+ json_local_output=args.json_local_output,
296
+ dataset_type=args.dataset_type,
297
+ opacity=args.opacity,
298
+ blend=args.blend
299
+ )