Rhapso 0.1.92__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Rhapso/__init__.py +1 -0
- Rhapso/data_prep/__init__.py +2 -0
- Rhapso/data_prep/n5_reader.py +188 -0
- Rhapso/data_prep/s3_big_stitcher_reader.py +55 -0
- Rhapso/data_prep/xml_to_dataframe.py +215 -0
- Rhapso/detection/__init__.py +5 -0
- Rhapso/detection/advanced_refinement.py +203 -0
- Rhapso/detection/difference_of_gaussian.py +324 -0
- Rhapso/detection/image_reader.py +117 -0
- Rhapso/detection/metadata_builder.py +130 -0
- Rhapso/detection/overlap_detection.py +327 -0
- Rhapso/detection/points_validation.py +49 -0
- Rhapso/detection/save_interest_points.py +265 -0
- Rhapso/detection/view_transform_models.py +67 -0
- Rhapso/fusion/__init__.py +0 -0
- Rhapso/fusion/affine_fusion/__init__.py +2 -0
- Rhapso/fusion/affine_fusion/blend.py +289 -0
- Rhapso/fusion/affine_fusion/fusion.py +601 -0
- Rhapso/fusion/affine_fusion/geometry.py +159 -0
- Rhapso/fusion/affine_fusion/io.py +546 -0
- Rhapso/fusion/affine_fusion/script_utils.py +111 -0
- Rhapso/fusion/affine_fusion/setup.py +4 -0
- Rhapso/fusion/affine_fusion_worker.py +234 -0
- Rhapso/fusion/multiscale/__init__.py +0 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/__init__.py +19 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/czi_to_zarr.py +698 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/zarr_writer.py +265 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/models.py +81 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/utils.py +526 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/zeiss_job.py +249 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/__init__.py +21 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/array_to_zarr.py +257 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/radial_correction.py +557 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/run_capsule.py +98 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/utils.py +266 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/worker.py +89 -0
- Rhapso/fusion/multiscale_worker.py +113 -0
- Rhapso/fusion/neuroglancer_link_gen/__init__.py +8 -0
- Rhapso/fusion/neuroglancer_link_gen/dispim_link.py +235 -0
- Rhapso/fusion/neuroglancer_link_gen/exaspim_link.py +127 -0
- Rhapso/fusion/neuroglancer_link_gen/hcr_link.py +368 -0
- Rhapso/fusion/neuroglancer_link_gen/iSPIM_top.py +47 -0
- Rhapso/fusion/neuroglancer_link_gen/link_utils.py +239 -0
- Rhapso/fusion/neuroglancer_link_gen/main.py +299 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_layer.py +1434 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_state.py +1123 -0
- Rhapso/fusion/neuroglancer_link_gen/parsers.py +336 -0
- Rhapso/fusion/neuroglancer_link_gen/raw_link.py +116 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/__init__.py +4 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/shader_utils.py +85 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py +43 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/utils.py +303 -0
- Rhapso/fusion/neuroglancer_link_gen_worker.py +30 -0
- Rhapso/matching/__init__.py +0 -0
- Rhapso/matching/load_and_transform_points.py +458 -0
- Rhapso/matching/ransac_matching.py +544 -0
- Rhapso/matching/save_matches.py +120 -0
- Rhapso/matching/xml_parser.py +302 -0
- Rhapso/pipelines/__init__.py +0 -0
- Rhapso/pipelines/ray/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/alignment_pipeline.py +227 -0
- Rhapso/pipelines/ray/aws/config/__init__.py +0 -0
- Rhapso/pipelines/ray/evaluation.py +71 -0
- Rhapso/pipelines/ray/interest_point_detection.py +137 -0
- Rhapso/pipelines/ray/interest_point_matching.py +110 -0
- Rhapso/pipelines/ray/local/__init__.py +0 -0
- Rhapso/pipelines/ray/local/alignment_pipeline.py +167 -0
- Rhapso/pipelines/ray/matching_stats.py +104 -0
- Rhapso/pipelines/ray/param/__init__.py +0 -0
- Rhapso/pipelines/ray/solver.py +120 -0
- Rhapso/pipelines/ray/split_dataset.py +78 -0
- Rhapso/solver/__init__.py +0 -0
- Rhapso/solver/compute_tiles.py +562 -0
- Rhapso/solver/concatenate_models.py +116 -0
- Rhapso/solver/connected_graphs.py +111 -0
- Rhapso/solver/data_prep.py +181 -0
- Rhapso/solver/global_optimization.py +410 -0
- Rhapso/solver/model_and_tile_setup.py +109 -0
- Rhapso/solver/pre_align_tiles.py +323 -0
- Rhapso/solver/save_results.py +97 -0
- Rhapso/solver/view_transforms.py +75 -0
- Rhapso/solver/xml_to_dataframe_solver.py +213 -0
- Rhapso/split_dataset/__init__.py +0 -0
- Rhapso/split_dataset/compute_grid_rules.py +78 -0
- Rhapso/split_dataset/save_points.py +101 -0
- Rhapso/split_dataset/save_xml.py +377 -0
- Rhapso/split_dataset/split_images.py +537 -0
- Rhapso/split_dataset/xml_to_dataframe_split.py +219 -0
- rhapso-0.1.92.dist-info/METADATA +39 -0
- rhapso-0.1.92.dist-info/RECORD +101 -0
- rhapso-0.1.92.dist-info/WHEEL +5 -0
- rhapso-0.1.92.dist-info/licenses/LICENSE +21 -0
- rhapso-0.1.92.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/test_detection.py +17 -0
- tests/test_matching.py +21 -0
- tests/test_solving.py +21 -0
|
@@ -0,0 +1,302 @@
|
|
|
1
|
+
import xml.etree.ElementTree as ET
|
|
2
|
+
import boto3
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
XML Parser Matching parses xml content into memory
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
class XMLParserMatching:
|
|
9
|
+
def __init__(self, xml_input_path, input_type):
|
|
10
|
+
self.xml_input_path = xml_input_path
|
|
11
|
+
self.input_type = input_type
|
|
12
|
+
self.data_global = None
|
|
13
|
+
|
|
14
|
+
def check_labels(self, root):
|
|
15
|
+
"""
|
|
16
|
+
Verifies the presence of required XML labels including bounding boxes, point spread functions,
|
|
17
|
+
stitching results, and intensity adjustments.
|
|
18
|
+
"""
|
|
19
|
+
labels = True
|
|
20
|
+
if root.find(".//BoundingBoxes") is None:
|
|
21
|
+
labels = False
|
|
22
|
+
if root.find(".//PointSpreadFunctions") is None:
|
|
23
|
+
labels = False
|
|
24
|
+
if root.find(".//StitchingResults") is None:
|
|
25
|
+
labels = False
|
|
26
|
+
if root.find(".//IntensityAdjustments") is None:
|
|
27
|
+
labels = False
|
|
28
|
+
|
|
29
|
+
return labels
|
|
30
|
+
|
|
31
|
+
def check_length(self, root):
|
|
32
|
+
"""
|
|
33
|
+
Validates that the count of elements within the XML structure aligns with expected relationships
|
|
34
|
+
between file mappings, view setups, and view registrations.
|
|
35
|
+
"""
|
|
36
|
+
length = True
|
|
37
|
+
if len(root.findall(".//ImageLoader/files/FileMapping")) != len(root.findall(".//ViewRegistration")) or \
|
|
38
|
+
len(root.findall(".//ViewSetup")) != len(root.findall(".//ViewRegistration")) * (1 / 2):
|
|
39
|
+
length = False
|
|
40
|
+
return length
|
|
41
|
+
|
|
42
|
+
def parse_view_setup(self, root):
|
|
43
|
+
timepoints = set()
|
|
44
|
+
|
|
45
|
+
# Zarr loader
|
|
46
|
+
for zg in root.findall(".//ImageLoader/zgroups/zgroup"):
|
|
47
|
+
tp_attr = zg.get("tp") or zg.get("timepoint") or "0"
|
|
48
|
+
try:
|
|
49
|
+
timepoints.add(int(tp_attr))
|
|
50
|
+
except ValueError:
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
# Tiff loader
|
|
54
|
+
for fm in root.findall(".//ImageLoader/files/FileMapping"):
|
|
55
|
+
tp_attr = fm.get("timepoint") or "0"
|
|
56
|
+
try:
|
|
57
|
+
timepoints.add(int(tp_attr))
|
|
58
|
+
except ValueError:
|
|
59
|
+
pass
|
|
60
|
+
|
|
61
|
+
if not timepoints:
|
|
62
|
+
timepoints = {0}
|
|
63
|
+
|
|
64
|
+
# Parse ViewSetups
|
|
65
|
+
by_id = {}
|
|
66
|
+
for vs in root.findall(".//ViewSetups/ViewSetup"):
|
|
67
|
+
sid = int(vs.findtext("id"))
|
|
68
|
+
name = (vs.findtext("name") or "").strip()
|
|
69
|
+
|
|
70
|
+
size_txt = (vs.findtext("size") or "").strip()
|
|
71
|
+
try:
|
|
72
|
+
sx, sy, sz = [int(x) for x in size_txt.split()]
|
|
73
|
+
except Exception:
|
|
74
|
+
sx = sy = sz = None
|
|
75
|
+
|
|
76
|
+
vox_txt = (vs.findtext("voxelSize/size") or "").strip()
|
|
77
|
+
try:
|
|
78
|
+
vx, vy, vz = [float(x) for x in vox_txt.split()]
|
|
79
|
+
except Exception:
|
|
80
|
+
vx = vy = vz = None
|
|
81
|
+
|
|
82
|
+
attrs = {}
|
|
83
|
+
attrs_node = vs.find("attributes")
|
|
84
|
+
if attrs_node is not None:
|
|
85
|
+
for child in list(attrs_node):
|
|
86
|
+
txt = (child.text or "").strip()
|
|
87
|
+
try:
|
|
88
|
+
attrs[child.tag] = int(txt)
|
|
89
|
+
except ValueError:
|
|
90
|
+
attrs[child.tag] = txt
|
|
91
|
+
|
|
92
|
+
by_id[sid] = {
|
|
93
|
+
"id": sid,
|
|
94
|
+
"name": name,
|
|
95
|
+
"size": (sx, sy, sz),
|
|
96
|
+
"voxelSize": (vx, vy, vz),
|
|
97
|
+
"attributes": attrs,
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
viewSizes = {}
|
|
101
|
+
viewVoxelSizes = {}
|
|
102
|
+
for tp in sorted(timepoints):
|
|
103
|
+
for sid, meta in by_id.items():
|
|
104
|
+
if meta["size"] != (None, None, None):
|
|
105
|
+
viewSizes[(tp, sid)] = meta["size"]
|
|
106
|
+
if meta["voxelSize"] != (None, None, None):
|
|
107
|
+
viewVoxelSizes[(tp, sid)] = meta["voxelSize"]
|
|
108
|
+
|
|
109
|
+
return {
|
|
110
|
+
"byId": by_id,
|
|
111
|
+
"viewSizes": viewSizes,
|
|
112
|
+
"viewVoxelSizes": viewVoxelSizes,
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
def parse_image_loader(self, root):
|
|
116
|
+
image_loader_data = []
|
|
117
|
+
|
|
118
|
+
if self.input_type == "zarr":
|
|
119
|
+
for il in root.findall(".//ImageLoader/zgroups/zgroup"):
|
|
120
|
+
view_setup = il.get("setup")
|
|
121
|
+
timepoint = il.get('timepoint') if 'timepoint' in il else il.get('tp')
|
|
122
|
+
file_path = (il.get("path") or il.findtext("path") or "").strip()
|
|
123
|
+
channel = file_path.split("_ch_", 1)[1].split(".ome.zarr", 1)[0]
|
|
124
|
+
|
|
125
|
+
image_loader_data.append(
|
|
126
|
+
{
|
|
127
|
+
"view_setup": view_setup,
|
|
128
|
+
"timepoint": timepoint,
|
|
129
|
+
"series": 1,
|
|
130
|
+
"channel": channel,
|
|
131
|
+
"file_path": file_path,
|
|
132
|
+
}
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
elif self.input_type == "tiff":
|
|
136
|
+
if not root.findall(".//ImageLoader/files/FileMapping"):
|
|
137
|
+
raise Exception("There are no files in this XML")
|
|
138
|
+
|
|
139
|
+
if not self.check_labels(root):
|
|
140
|
+
raise Exception("Required labels do not exist")
|
|
141
|
+
|
|
142
|
+
if not self.check_length(root):
|
|
143
|
+
raise Exception(
|
|
144
|
+
"The amount of view setups, view registrations, and tiles do not match"
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Iterate over each file mapping in the XML
|
|
148
|
+
for fm in root.findall(".//ImageLoader/files/FileMapping"):
|
|
149
|
+
view_setup = fm.get("view_setup")
|
|
150
|
+
timepoint = fm.get("timepoint")
|
|
151
|
+
series = fm.get("series")
|
|
152
|
+
channel = fm.get("channel")
|
|
153
|
+
file_path = fm.find("file").text if fm.find("file") is not None else None
|
|
154
|
+
full_path = self.xml_input_path.replace("dataset-detection.xml", "") + file_path
|
|
155
|
+
image_loader_data.append(
|
|
156
|
+
{
|
|
157
|
+
"view_setup": view_setup,
|
|
158
|
+
"timepoint": timepoint,
|
|
159
|
+
"series": series,
|
|
160
|
+
"channel": channel,
|
|
161
|
+
"file_path": full_path,
|
|
162
|
+
}
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
return image_loader_data
|
|
166
|
+
|
|
167
|
+
def parse(self, xml_content):
|
|
168
|
+
"""
|
|
169
|
+
Parse XML file or string and create complete dataset object
|
|
170
|
+
"""
|
|
171
|
+
try:
|
|
172
|
+
# Check if the input is a string containing XML content
|
|
173
|
+
if str(xml_content).lstrip().startswith('<') or xml_content.strip().startswith('<?xml') or self.xml_input_path.strip().startswith('<'):
|
|
174
|
+
root = ET.fromstring(xml_content)
|
|
175
|
+
else:
|
|
176
|
+
tree = ET.parse(xml_content)
|
|
177
|
+
root = tree.getroot()
|
|
178
|
+
|
|
179
|
+
self.data_global = {
|
|
180
|
+
'basePathURI': root.find(".//BasePath").text if root.find(".//BasePath") is not None else "",
|
|
181
|
+
'viewRegistrations': self._parse_view_registrations(root),
|
|
182
|
+
'viewsInterestPoints': self._parse_view_paths(root),
|
|
183
|
+
'imageLoader': self.parse_image_loader(root),
|
|
184
|
+
'viewSetup': self.parse_view_setup(root)
|
|
185
|
+
}
|
|
186
|
+
return self.data_global
|
|
187
|
+
|
|
188
|
+
except Exception as e:
|
|
189
|
+
print(f"❌ Error parsing XML content: {e}")
|
|
190
|
+
raise
|
|
191
|
+
|
|
192
|
+
def _parse_view_registrations(self, root):
|
|
193
|
+
"""
|
|
194
|
+
Parse ViewRegistration entries from XML
|
|
195
|
+
"""
|
|
196
|
+
view_registrations = {}
|
|
197
|
+
|
|
198
|
+
# Find all ViewRegistration elements
|
|
199
|
+
for view_reg in root.findall(".//ViewRegistration"):
|
|
200
|
+
try:
|
|
201
|
+
# Extract timepoint and setup
|
|
202
|
+
timepoint = int(view_reg.get('timepoint'))
|
|
203
|
+
setup = int(view_reg.get('setup'))
|
|
204
|
+
view_id = (timepoint, setup)
|
|
205
|
+
|
|
206
|
+
# Parse all ViewTransform elements for this view
|
|
207
|
+
transforms = []
|
|
208
|
+
for transform_elem in view_reg.findall("ViewTransform"):
|
|
209
|
+
transform_type = transform_elem.get('type', 'unknown')
|
|
210
|
+
|
|
211
|
+
# Extract the Name element
|
|
212
|
+
name_elem = transform_elem.find('Name')
|
|
213
|
+
transform_name = name_elem.text.strip() if name_elem is not None and name_elem.text else f"Unnamed_{transform_type}"
|
|
214
|
+
|
|
215
|
+
# Extract the affine transformation matrix
|
|
216
|
+
affine_elem = transform_elem.find('affine')
|
|
217
|
+
if affine_elem is not None and affine_elem.text:
|
|
218
|
+
affine_text = affine_elem.text.strip()
|
|
219
|
+
|
|
220
|
+
transform_data = {
|
|
221
|
+
'type': transform_type,
|
|
222
|
+
'name': transform_name,
|
|
223
|
+
'affine': affine_text
|
|
224
|
+
}
|
|
225
|
+
transforms.append(transform_data)
|
|
226
|
+
else:
|
|
227
|
+
print(f" ⚠️ No affine data found for transform type='{transform_type}', name='{transform_name}'")
|
|
228
|
+
pass
|
|
229
|
+
|
|
230
|
+
if transforms:
|
|
231
|
+
view_registrations[view_id] = transforms
|
|
232
|
+
else:
|
|
233
|
+
print(f"⚠️ No valid transforms found for view {view_id}")
|
|
234
|
+
pass
|
|
235
|
+
|
|
236
|
+
except Exception as e:
|
|
237
|
+
print(f"❌ Error parsing ViewRegistration: {e}")
|
|
238
|
+
continue
|
|
239
|
+
|
|
240
|
+
return view_registrations
|
|
241
|
+
|
|
242
|
+
def _parse_view_paths(self, root):
|
|
243
|
+
"""Parse view interest point file paths"""
|
|
244
|
+
view_paths = {}
|
|
245
|
+
for vip in root.findall(".//ViewInterestPointsFile"):
|
|
246
|
+
setup_id = int(vip.attrib['setup'])
|
|
247
|
+
timepoint = int(vip.attrib['timepoint'])
|
|
248
|
+
label = vip.attrib.get('label', 'beads')
|
|
249
|
+
params = vip.attrib.get('params', '')
|
|
250
|
+
path = (vip.text or '').strip().split('/', 1)[0]
|
|
251
|
+
|
|
252
|
+
key = (timepoint, setup_id)
|
|
253
|
+
|
|
254
|
+
if key in view_paths and label not in view_paths[key]['label']:
|
|
255
|
+
view_paths[key]['label'].append(label)
|
|
256
|
+
else:
|
|
257
|
+
view_paths[key] = {
|
|
258
|
+
'timepoint': timepoint,
|
|
259
|
+
'setup': setup_id,
|
|
260
|
+
'label': [label],
|
|
261
|
+
'params': params,
|
|
262
|
+
'path': path
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
return view_paths
|
|
266
|
+
|
|
267
|
+
def fetch_local_xml(self, file_path):
|
|
268
|
+
try:
|
|
269
|
+
with open(file_path, "r", encoding="utf-8") as file:
|
|
270
|
+
return file.read()
|
|
271
|
+
except FileNotFoundError:
|
|
272
|
+
print(f"pipeline failed, could not find xml file located at '{file_path}'")
|
|
273
|
+
return None
|
|
274
|
+
except Exception as e:
|
|
275
|
+
print(f"pipeline failed, error while parsing xml file at '{file_path}': {e}")
|
|
276
|
+
return None
|
|
277
|
+
|
|
278
|
+
def get_xml_content(self):
|
|
279
|
+
if self.xml_input_path.startswith('s3://'):
|
|
280
|
+
s3_path = self.xml_input_path[5:]
|
|
281
|
+
parts = s3_path.split('/', 1)
|
|
282
|
+
bucket_name = parts[0]
|
|
283
|
+
file_key = parts[1]
|
|
284
|
+
s3_client = boto3.client('s3')
|
|
285
|
+
response = s3_client.get_object(Bucket=bucket_name, Key=file_key)
|
|
286
|
+
xml_content = response["Body"].read().decode("utf-8")
|
|
287
|
+
|
|
288
|
+
else:
|
|
289
|
+
xml_content = self.fetch_local_xml(self.xml_input_path)
|
|
290
|
+
if xml_content is None:
|
|
291
|
+
return None, None
|
|
292
|
+
|
|
293
|
+
return xml_content
|
|
294
|
+
|
|
295
|
+
def run(self):
|
|
296
|
+
"""
|
|
297
|
+
Executes the entry point of the script.
|
|
298
|
+
"""
|
|
299
|
+
xml_content = self.get_xml_content()
|
|
300
|
+
data_global = self.parse(xml_content)
|
|
301
|
+
return data_global
|
|
302
|
+
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
from Rhapso.pipelines.ray.solver import Solver
|
|
2
|
+
import yaml
|
|
3
|
+
import subprocess
|
|
4
|
+
import json
|
|
5
|
+
import base64, json
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
with open("Rhapso/pipelines/ray/param/dev/zarr_s3_sean.yml", "r") as file:
|
|
9
|
+
config = yaml.safe_load(file)
|
|
10
|
+
|
|
11
|
+
serialized_config = base64.b64encode(json.dumps(config).encode()).decode()
|
|
12
|
+
|
|
13
|
+
# Detection run command
|
|
14
|
+
detection_cmd = (
|
|
15
|
+
"bash -lc \""
|
|
16
|
+
"python3 - <<\\\"PY\\\"\n"
|
|
17
|
+
"import sys, json, base64\n"
|
|
18
|
+
"from Rhapso.pipelines.ray.interest_point_detection import InterestPointDetection\n"
|
|
19
|
+
f"cfg = json.loads(base64.b64decode(\\\"{serialized_config}\\\").decode())\n"
|
|
20
|
+
"ipd = InterestPointDetection(\n"
|
|
21
|
+
" dsxy=cfg[\\\"dsxy\\\"], dsz=cfg[\\\"dsz\\\"],\n"
|
|
22
|
+
" min_intensity=cfg[\\\"min_intensity\\\"], max_intensity=cfg[\\\"max_intensity\\\"],\n"
|
|
23
|
+
" sigma=cfg[\\\"sigma\\\"], threshold=cfg[\\\"threshold\\\"], file_type=cfg[\\\"file_type\\\"],\n"
|
|
24
|
+
" xml_file_path=cfg[\\\"xml_file_path_detection\\\"],\n"
|
|
25
|
+
" image_file_prefix=cfg[\\\"image_file_prefix\\\"],\n"
|
|
26
|
+
" xml_output_file_path=cfg[\\\"xml_output_file_path\\\"], n5_output_file_prefix=cfg[\\\"n5_output_file_prefix\\\"],\n"
|
|
27
|
+
" combine_distance=cfg[\\\"combine_distance\\\"],\n"
|
|
28
|
+
" chunks_per_bound=cfg[\\\"chunks_per_bound\\\"], run_type=cfg[\\\"detection_run_type\\\"],\n"
|
|
29
|
+
" max_spots=cfg[\\\"max_spots\\\"], median_filter=cfg[\\\"median_filter\\\"],\n"
|
|
30
|
+
")\n"
|
|
31
|
+
"ipd.run()\n"
|
|
32
|
+
"PY\n"
|
|
33
|
+
"\""
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
# Rigid Matching run command
|
|
37
|
+
matching_cmd_rigid = (
|
|
38
|
+
"bash -lc \""
|
|
39
|
+
"python3 - <<\\\"PY\\\"\n"
|
|
40
|
+
"import json, base64\n"
|
|
41
|
+
"from Rhapso.pipelines.ray.interest_point_matching import InterestPointMatching\n"
|
|
42
|
+
f"cfg = json.loads(base64.b64decode(\\\"{serialized_config}\\\").decode())\n"
|
|
43
|
+
"ipm = InterestPointMatching(\n"
|
|
44
|
+
" xml_input_path=cfg[\\\"xml_file_path_matching_rigid\\\"],\n"
|
|
45
|
+
" n5_output_path=cfg[\\\"n5_matching_output_path\\\"],\n"
|
|
46
|
+
" input_type=cfg[\\\"input_type\\\"],\n"
|
|
47
|
+
" match_type=cfg[\\\"match_type_rigid\\\"],\n"
|
|
48
|
+
" num_neighbors=cfg[\\\"num_neighbors_rigid\\\"],\n"
|
|
49
|
+
" redundancy=cfg[\\\"redundancy_rigid\\\"],\n"
|
|
50
|
+
" significance=cfg[\\\"significance_rigid\\\"],\n"
|
|
51
|
+
" search_radius=cfg[\\\"search_radius_rigid\\\"],\n"
|
|
52
|
+
" num_required_neighbors=cfg[\\\"num_required_neighbors_rigid\\\"],\n"
|
|
53
|
+
" model_min_matches=cfg[\\\"model_min_matches_rigid\\\"],\n"
|
|
54
|
+
" inlier_factor=cfg[\\\"inlier_factor_rigid\\\"],\n"
|
|
55
|
+
" lambda_value=cfg[\\\"lambda_value_rigid\\\"],\n"
|
|
56
|
+
" num_iterations=cfg[\\\"num_iterations_rigid\\\"],\n"
|
|
57
|
+
" regularization_weight=cfg[\\\"regularization_weight_rigid\\\"],\n"
|
|
58
|
+
" image_file_prefix=cfg[\\\"image_file_prefix\\\"]\n"
|
|
59
|
+
")\n"
|
|
60
|
+
"ipm.run()\n"
|
|
61
|
+
"PY\n"
|
|
62
|
+
"\""
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
# Affine matching run command
|
|
66
|
+
matching_cmd_affine = (
|
|
67
|
+
"bash -lc \""
|
|
68
|
+
"python3 - <<\\\"PY\\\"\n"
|
|
69
|
+
"import json, base64\n"
|
|
70
|
+
"from Rhapso.pipelines.ray.interest_point_matching import InterestPointMatching\n"
|
|
71
|
+
f"cfg = json.loads(base64.b64decode(\\\"{serialized_config}\\\").decode())\n"
|
|
72
|
+
"ipm = InterestPointMatching(\n"
|
|
73
|
+
" xml_input_path=cfg[\\\"xml_file_path_matching_affine\\\"],\n"
|
|
74
|
+
" n5_output_path=cfg[\\\"n5_matching_output_path\\\"],\n"
|
|
75
|
+
" input_type=cfg[\\\"input_type\\\"],\n"
|
|
76
|
+
" match_type=cfg[\\\"match_type_affine\\\"],\n"
|
|
77
|
+
" num_neighbors=cfg[\\\"num_neighbors_affine\\\"],\n"
|
|
78
|
+
" redundancy=cfg[\\\"redundancy_affine\\\"],\n"
|
|
79
|
+
" significance=cfg[\\\"significance_affine\\\"],\n"
|
|
80
|
+
" search_radius=cfg[\\\"search_radius_affine\\\"],\n"
|
|
81
|
+
" num_required_neighbors=cfg[\\\"num_required_neighbors_affine\\\"],\n"
|
|
82
|
+
" model_min_matches=cfg[\\\"model_min_matches_affine\\\"],\n"
|
|
83
|
+
" inlier_factor=cfg[\\\"inlier_factor_affine\\\"],\n"
|
|
84
|
+
" lambda_value=cfg[\\\"lambda_value_affine\\\"],\n"
|
|
85
|
+
" num_iterations=cfg[\\\"num_iterations_affine\\\"],\n"
|
|
86
|
+
" regularization_weight=cfg[\\\"regularization_weight_affine\\\"],\n"
|
|
87
|
+
" image_file_prefix=cfg[\\\"image_file_prefix\\\"]\n"
|
|
88
|
+
")\n"
|
|
89
|
+
"ipm.run()\n"
|
|
90
|
+
"PY\n"
|
|
91
|
+
"\""
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
# Split affine matching run command
|
|
95
|
+
matching_cmd_split_affine = (
|
|
96
|
+
"bash -lc \""
|
|
97
|
+
"python3 - <<\\\"PY\\\"\n"
|
|
98
|
+
"import json, base64\n"
|
|
99
|
+
"from Rhapso.pipelines.ray.interest_point_matching import InterestPointMatching\n"
|
|
100
|
+
f"cfg = json.loads(base64.b64decode(\\\"{serialized_config}\\\").decode())\n"
|
|
101
|
+
"ipm = InterestPointMatching(\n"
|
|
102
|
+
" xml_input_path=cfg[\\\"xml_file_path_matching_split_affine\\\"],\n"
|
|
103
|
+
" n5_output_path=cfg[\\\"n5_matching_output_path\\\"],\n"
|
|
104
|
+
" input_type=cfg[\\\"input_type\\\"],\n"
|
|
105
|
+
" match_type=cfg[\\\"match_type_split_affine\\\"],\n"
|
|
106
|
+
" num_neighbors=cfg[\\\"num_neighbors_split_affine\\\"],\n"
|
|
107
|
+
" redundancy=cfg[\\\"redundancy_split_affine\\\"],\n"
|
|
108
|
+
" significance=cfg[\\\"significance_split_affine\\\"],\n"
|
|
109
|
+
" search_radius=cfg[\\\"search_radius_split_affine\\\"],\n"
|
|
110
|
+
" num_required_neighbors=cfg[\\\"num_required_neighbors_split_affine\\\"],\n"
|
|
111
|
+
" model_min_matches=cfg[\\\"model_min_matches_split_affine\\\"],\n"
|
|
112
|
+
" inlier_factor=cfg[\\\"inlier_factor_split_affine\\\"],\n"
|
|
113
|
+
" lambda_value=cfg[\\\"lambda_value_split_affine\\\"],\n"
|
|
114
|
+
" num_iterations=cfg[\\\"num_iterations_split_affine\\\"],\n"
|
|
115
|
+
" regularization_weight=cfg[\\\"regularization_weight_split_affine\\\"],\n"
|
|
116
|
+
" image_file_prefix=cfg[\\\"image_file_prefix\\\"]\n"
|
|
117
|
+
")\n"
|
|
118
|
+
"ipm.run()\n"
|
|
119
|
+
"PY\n"
|
|
120
|
+
"\""
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Split run command
|
|
124
|
+
split_cmd = (
|
|
125
|
+
"bash -lc \""
|
|
126
|
+
"python3 - <<\\\"PY\\\"\n"
|
|
127
|
+
"import sys, json, base64\n"
|
|
128
|
+
"from Rhapso.pipelines.ray.split_dataset import SplitDataset\n"
|
|
129
|
+
f"cfg = json.loads(base64.b64decode(\\\"{serialized_config}\\\").decode())\n"
|
|
130
|
+
"split = SplitDataset(\n"
|
|
131
|
+
" xml_file_path=cfg[\\\"xml_file_path_split\\\"],\n"
|
|
132
|
+
" xml_output_file_path=cfg[\\\"xml_output_file_path_split\\\"],\n"
|
|
133
|
+
" n5_path=cfg[\\\"n5_path_split\\\"], point_density=cfg[\\\"point_density\\\"], min_points=cfg[\\\"min_points\\\"],\n"
|
|
134
|
+
" max_points=cfg[\\\"max_points\\\"],\n"
|
|
135
|
+
" error=cfg[\\\"error\\\"],\n"
|
|
136
|
+
" exclude_radius=cfg[\\\"exclude_radius\\\"], target_image_size=cfg[\\\"target_image_size\\\"],\n"
|
|
137
|
+
" target_overlap=cfg[\\\"target_overlap\\\"],\n"
|
|
138
|
+
")\n"
|
|
139
|
+
"split.run()\n"
|
|
140
|
+
"PY\n"
|
|
141
|
+
"\""
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
# Rigid solver run command
|
|
145
|
+
solver_rigid = Solver(
|
|
146
|
+
xml_file_path_output=config['xml_file_path_output_rigid'],
|
|
147
|
+
n5_input_path=config['n5_input_path'],
|
|
148
|
+
xml_file_path=config['xml_file_path_solver_rigid'],
|
|
149
|
+
run_type=config['run_type_solver_rigid'],
|
|
150
|
+
relative_threshold=config['relative_threshold'],
|
|
151
|
+
absolute_threshold=config['absolute_threshold'],
|
|
152
|
+
min_matches=config['min_matches'],
|
|
153
|
+
damp=config['damp'],
|
|
154
|
+
max_iterations=config['max_iterations'],
|
|
155
|
+
max_allowed_error=config['max_allowed_error'],
|
|
156
|
+
max_plateauwidth=config['max_plateauwidth'],
|
|
157
|
+
metrics_output_path=config['metrics_output_path'],
|
|
158
|
+
fixed_tile=config['fixed_tile']
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# Affine solver run command
|
|
162
|
+
solver_affine = Solver(
|
|
163
|
+
xml_file_path_output=config['xml_file_path_output_affine'],
|
|
164
|
+
n5_input_path=config['n5_input_path'],
|
|
165
|
+
xml_file_path=config['xml_file_path_solver_affine'],
|
|
166
|
+
run_type=config['run_type_solver_affine'],
|
|
167
|
+
relative_threshold=config['relative_threshold'],
|
|
168
|
+
absolute_threshold=config['absolute_threshold'],
|
|
169
|
+
min_matches=config['min_matches'],
|
|
170
|
+
damp=config['damp'],
|
|
171
|
+
max_iterations=config['max_iterations'],
|
|
172
|
+
max_allowed_error=config['max_allowed_error'],
|
|
173
|
+
max_plateauwidth=config['max_plateauwidth'],
|
|
174
|
+
metrics_output_path=config['metrics_output_path'],
|
|
175
|
+
fixed_tile=config['fixed_tile']
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
# SOLVER SPLIT AFFINE
|
|
179
|
+
solver_split_affine = Solver(
|
|
180
|
+
xml_file_path_output=config['xml_file_path_output_split_affine'],
|
|
181
|
+
n5_input_path=config['n5_input_path'],
|
|
182
|
+
xml_file_path=config['xml_file_path_solver_split_affine'],
|
|
183
|
+
run_type=config['run_type_solver_split_affine'],
|
|
184
|
+
relative_threshold=config['relative_threshold'],
|
|
185
|
+
absolute_threshold=config['absolute_threshold'],
|
|
186
|
+
min_matches=config['min_matches'],
|
|
187
|
+
damp=config['damp'],
|
|
188
|
+
max_iterations=config['max_iterations'],
|
|
189
|
+
max_allowed_error=config['max_allowed_error'],
|
|
190
|
+
max_plateauwidth=config['max_plateauwidth'],
|
|
191
|
+
metrics_output_path=config['metrics_output_path'],
|
|
192
|
+
fixed_tile=config['fixed_tile']
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
prefix = (Path(__file__).resolve().parent / "config/dev").as_posix()
|
|
196
|
+
unified_yml = "alignment_cluster_sean.yml"
|
|
197
|
+
|
|
198
|
+
def exec_on_cluster(name, yml, cmd, cwd):
|
|
199
|
+
print(f"\n=== {name} ===")
|
|
200
|
+
print("$", " ".join(["ray", "exec", yml, cmd]))
|
|
201
|
+
subprocess.run(["ray", "exec", yml, cmd], check=True, cwd=cwd)
|
|
202
|
+
|
|
203
|
+
print("\n=== Start cluster ===")
|
|
204
|
+
print("$", " ".join(["ray", "up", unified_yml, "-y"]))
|
|
205
|
+
subprocess.run(["ray", "up", unified_yml, "-y"], check=True, cwd=prefix)
|
|
206
|
+
|
|
207
|
+
try:
|
|
208
|
+
exec_on_cluster("Detection", unified_yml, detection_cmd, prefix)
|
|
209
|
+
exec_on_cluster("Matching (rigid)", unified_yml, matching_cmd_rigid, prefix)
|
|
210
|
+
solver_rigid.run()
|
|
211
|
+
exec_on_cluster("Matching (affine)", unified_yml, matching_cmd_affine, prefix)
|
|
212
|
+
solver_affine.run()
|
|
213
|
+
exec_on_cluster("Split Dataset", unified_yml, split_cmd, prefix)
|
|
214
|
+
exec_on_cluster("Matching (split_affine)", unified_yml, matching_cmd_split_affine, prefix)
|
|
215
|
+
solver_split_affine.run()
|
|
216
|
+
print("\n✅ Pipeline complete.")
|
|
217
|
+
|
|
218
|
+
except subprocess.CalledProcessError as e:
|
|
219
|
+
print(f"❌ Pipeline error: {e}")
|
|
220
|
+
raise
|
|
221
|
+
|
|
222
|
+
finally:
|
|
223
|
+
print("\n=== Tear down cluster ===")
|
|
224
|
+
print("$", " ".join(["ray", "down", unified_yml, "-y"]))
|
|
225
|
+
subprocess.run(["ray", "down", unified_yml, "-y"], cwd=prefix)
|
|
226
|
+
|
|
227
|
+
print("\n✅ Pipeline complete.")
|
|
File without changes
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import json
|
|
2
|
+
|
|
3
|
+
class MetricReviewCLI:
|
|
4
|
+
def __init__(self, file_path, matching_affine, solve_affine, matching_rigid, solve_rigid):
|
|
5
|
+
self.file_path = file_path
|
|
6
|
+
self.data = {}
|
|
7
|
+
self.matching_affine = matching_affine
|
|
8
|
+
self.solve_affine = solve_affine
|
|
9
|
+
self.matching_rigid = matching_rigid
|
|
10
|
+
self.solve_rigid = solve_rigid
|
|
11
|
+
|
|
12
|
+
def retrieve_data(self):
|
|
13
|
+
try:
|
|
14
|
+
with open(self.file_path, 'r') as file:
|
|
15
|
+
self.data = json.load(file)
|
|
16
|
+
except:
|
|
17
|
+
return "Data cannot be retrieved"
|
|
18
|
+
|
|
19
|
+
def descriptive_stats(self):
|
|
20
|
+
print("Retrieving statistics")
|
|
21
|
+
return self.data.get("Descriptive stats", {})
|
|
22
|
+
|
|
23
|
+
def voxel_stats(self):
|
|
24
|
+
print("Retrieving statistics")
|
|
25
|
+
return self.data.get("Voxelization stats", {})
|
|
26
|
+
|
|
27
|
+
def kde_stats(self):
|
|
28
|
+
print("Retrieving statistics")
|
|
29
|
+
return self.data.get("KDE", {})
|
|
30
|
+
|
|
31
|
+
def alignment(self):
|
|
32
|
+
print("Retrieving statistics")
|
|
33
|
+
return self.data.get("alignment errors", "Solve has not been ran yet or no tiles were compared.")
|
|
34
|
+
|
|
35
|
+
def review_data(self, step_name, method_func):
|
|
36
|
+
while True:
|
|
37
|
+
print(f"\n--- Reviewing {step_name} ---")
|
|
38
|
+
result = method_func()
|
|
39
|
+
print("Data:", result)
|
|
40
|
+
|
|
41
|
+
while True:
|
|
42
|
+
choice = input("Options: [r]erun, [c]ontinue, [q]quit: ").strip().lower()
|
|
43
|
+
if choice == "r":
|
|
44
|
+
choice2 = input("Options: rerun [a]ffine, [r]igid, go [b]ack to review: ").strip().lower()
|
|
45
|
+
if choice2 == "a":
|
|
46
|
+
self.matching_affine.run()
|
|
47
|
+
self.solve_affine.run()
|
|
48
|
+
self.run()
|
|
49
|
+
return
|
|
50
|
+
elif choice2 == "r":
|
|
51
|
+
self.matching_rigid.run()
|
|
52
|
+
self.solve_rigid.run()
|
|
53
|
+
self.run()
|
|
54
|
+
return
|
|
55
|
+
elif choice2 == "b":
|
|
56
|
+
break
|
|
57
|
+
elif choice == "c":
|
|
58
|
+
return
|
|
59
|
+
elif choice == "q":
|
|
60
|
+
print("Exiting CLI.")
|
|
61
|
+
exit()
|
|
62
|
+
else:
|
|
63
|
+
print("Invalid choice. Please try again.")
|
|
64
|
+
|
|
65
|
+
def run(self):
|
|
66
|
+
self.retrieve_data()
|
|
67
|
+
self.review_data("Base statistics", self.descriptive_stats)
|
|
68
|
+
self.review_data("Voxelization", self.voxel_stats)
|
|
69
|
+
self.review_data("KDE stats", self.kde_stats)
|
|
70
|
+
self.review_data("Alignment Statistic", self.alignment)
|
|
71
|
+
print("\nAll steps completed.")
|