Rhapso 0.1.92__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Rhapso/__init__.py +1 -0
- Rhapso/data_prep/__init__.py +2 -0
- Rhapso/data_prep/n5_reader.py +188 -0
- Rhapso/data_prep/s3_big_stitcher_reader.py +55 -0
- Rhapso/data_prep/xml_to_dataframe.py +215 -0
- Rhapso/detection/__init__.py +5 -0
- Rhapso/detection/advanced_refinement.py +203 -0
- Rhapso/detection/difference_of_gaussian.py +324 -0
- Rhapso/detection/image_reader.py +117 -0
- Rhapso/detection/metadata_builder.py +130 -0
- Rhapso/detection/overlap_detection.py +327 -0
- Rhapso/detection/points_validation.py +49 -0
- Rhapso/detection/save_interest_points.py +265 -0
- Rhapso/detection/view_transform_models.py +67 -0
- Rhapso/fusion/__init__.py +0 -0
- Rhapso/fusion/affine_fusion/__init__.py +2 -0
- Rhapso/fusion/affine_fusion/blend.py +289 -0
- Rhapso/fusion/affine_fusion/fusion.py +601 -0
- Rhapso/fusion/affine_fusion/geometry.py +159 -0
- Rhapso/fusion/affine_fusion/io.py +546 -0
- Rhapso/fusion/affine_fusion/script_utils.py +111 -0
- Rhapso/fusion/affine_fusion/setup.py +4 -0
- Rhapso/fusion/affine_fusion_worker.py +234 -0
- Rhapso/fusion/multiscale/__init__.py +0 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/__init__.py +19 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/czi_to_zarr.py +698 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/zarr_writer.py +265 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/models.py +81 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/utils.py +526 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/zeiss_job.py +249 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/__init__.py +21 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/array_to_zarr.py +257 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/radial_correction.py +557 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/run_capsule.py +98 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/utils.py +266 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/worker.py +89 -0
- Rhapso/fusion/multiscale_worker.py +113 -0
- Rhapso/fusion/neuroglancer_link_gen/__init__.py +8 -0
- Rhapso/fusion/neuroglancer_link_gen/dispim_link.py +235 -0
- Rhapso/fusion/neuroglancer_link_gen/exaspim_link.py +127 -0
- Rhapso/fusion/neuroglancer_link_gen/hcr_link.py +368 -0
- Rhapso/fusion/neuroglancer_link_gen/iSPIM_top.py +47 -0
- Rhapso/fusion/neuroglancer_link_gen/link_utils.py +239 -0
- Rhapso/fusion/neuroglancer_link_gen/main.py +299 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_layer.py +1434 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_state.py +1123 -0
- Rhapso/fusion/neuroglancer_link_gen/parsers.py +336 -0
- Rhapso/fusion/neuroglancer_link_gen/raw_link.py +116 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/__init__.py +4 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/shader_utils.py +85 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py +43 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/utils.py +303 -0
- Rhapso/fusion/neuroglancer_link_gen_worker.py +30 -0
- Rhapso/matching/__init__.py +0 -0
- Rhapso/matching/load_and_transform_points.py +458 -0
- Rhapso/matching/ransac_matching.py +544 -0
- Rhapso/matching/save_matches.py +120 -0
- Rhapso/matching/xml_parser.py +302 -0
- Rhapso/pipelines/__init__.py +0 -0
- Rhapso/pipelines/ray/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/alignment_pipeline.py +227 -0
- Rhapso/pipelines/ray/aws/config/__init__.py +0 -0
- Rhapso/pipelines/ray/evaluation.py +71 -0
- Rhapso/pipelines/ray/interest_point_detection.py +137 -0
- Rhapso/pipelines/ray/interest_point_matching.py +110 -0
- Rhapso/pipelines/ray/local/__init__.py +0 -0
- Rhapso/pipelines/ray/local/alignment_pipeline.py +167 -0
- Rhapso/pipelines/ray/matching_stats.py +104 -0
- Rhapso/pipelines/ray/param/__init__.py +0 -0
- Rhapso/pipelines/ray/solver.py +120 -0
- Rhapso/pipelines/ray/split_dataset.py +78 -0
- Rhapso/solver/__init__.py +0 -0
- Rhapso/solver/compute_tiles.py +562 -0
- Rhapso/solver/concatenate_models.py +116 -0
- Rhapso/solver/connected_graphs.py +111 -0
- Rhapso/solver/data_prep.py +181 -0
- Rhapso/solver/global_optimization.py +410 -0
- Rhapso/solver/model_and_tile_setup.py +109 -0
- Rhapso/solver/pre_align_tiles.py +323 -0
- Rhapso/solver/save_results.py +97 -0
- Rhapso/solver/view_transforms.py +75 -0
- Rhapso/solver/xml_to_dataframe_solver.py +213 -0
- Rhapso/split_dataset/__init__.py +0 -0
- Rhapso/split_dataset/compute_grid_rules.py +78 -0
- Rhapso/split_dataset/save_points.py +101 -0
- Rhapso/split_dataset/save_xml.py +377 -0
- Rhapso/split_dataset/split_images.py +537 -0
- Rhapso/split_dataset/xml_to_dataframe_split.py +219 -0
- rhapso-0.1.92.dist-info/METADATA +39 -0
- rhapso-0.1.92.dist-info/RECORD +101 -0
- rhapso-0.1.92.dist-info/WHEEL +5 -0
- rhapso-0.1.92.dist-info/licenses/LICENSE +21 -0
- rhapso-0.1.92.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/test_detection.py +17 -0
- tests/test_matching.py +21 -0
- tests/test_solving.py +21 -0
|
@@ -0,0 +1,458 @@
|
|
|
1
|
+
import zarr
|
|
2
|
+
import numpy as np
|
|
3
|
+
import s3fs
|
|
4
|
+
from itertools import combinations
|
|
5
|
+
import ray
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
Load and Transform Points loads interest points from n5 and transforms them into global space
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
class LoadAndTransformPoints:
|
|
12
|
+
def __init__(self, data_global, xml_input_path, n5_output_path, match_type):
|
|
13
|
+
self.data_global = data_global
|
|
14
|
+
self.xml_input_path = xml_input_path
|
|
15
|
+
self.n5_output_path = n5_output_path
|
|
16
|
+
self.match_type = match_type
|
|
17
|
+
|
|
18
|
+
def transform_interest_points(self, points, transformation_matrix):
|
|
19
|
+
"""
|
|
20
|
+
Transform interest points using the given transformation matrix
|
|
21
|
+
"""
|
|
22
|
+
if len(points) == 0: return []
|
|
23
|
+
|
|
24
|
+
# Convert points to homogeneous coordinates (add 1 as 4th coordinate)
|
|
25
|
+
homogeneous_points = np.column_stack([points, np.ones(len(points))])
|
|
26
|
+
|
|
27
|
+
# Apply transformation: result = matrix @ points.T, then transpose back
|
|
28
|
+
transformed_homogeneous = (transformation_matrix @ homogeneous_points.T).T
|
|
29
|
+
|
|
30
|
+
# Convert back to 3D coordinates (remove homogeneous coordinate)
|
|
31
|
+
transformed_points = transformed_homogeneous[:, :3]
|
|
32
|
+
|
|
33
|
+
return transformed_points.astype(np.float64)
|
|
34
|
+
|
|
35
|
+
def _parse_affine_matrix(self, affine_text):
|
|
36
|
+
"""
|
|
37
|
+
Parse affine transformation matrix from text string
|
|
38
|
+
"""
|
|
39
|
+
try:
|
|
40
|
+
# Split the affine text into float values
|
|
41
|
+
values = [float(x) for x in affine_text.strip().split()]
|
|
42
|
+
|
|
43
|
+
if len(values) != 12:
|
|
44
|
+
raise ValueError(f"Expected 12 values for 3x4 affine matrix, got {len(values)}")
|
|
45
|
+
|
|
46
|
+
# Reshape into 3x4 matrix (row-major order)
|
|
47
|
+
matrix_3x4 = np.array(values).reshape(3, 4)
|
|
48
|
+
|
|
49
|
+
# Convert to 4x4 homogeneous matrix by adding bottom row [0, 0, 0, 1]
|
|
50
|
+
matrix_4x4 = np.eye(4)
|
|
51
|
+
matrix_4x4[:3, :] = matrix_3x4
|
|
52
|
+
|
|
53
|
+
return matrix_4x4
|
|
54
|
+
|
|
55
|
+
except Exception as e:
|
|
56
|
+
print(f"❌ Error parsing affine matrix from '{affine_text}': {e}")
|
|
57
|
+
# Return identity matrix as fallback
|
|
58
|
+
return np.eye(4)
|
|
59
|
+
|
|
60
|
+
def get_transformation_matrix(self, view_id, view_registrations):
|
|
61
|
+
"""
|
|
62
|
+
Compose all affine ViewTransforms for a given view (timepoint, setup)
|
|
63
|
+
"""
|
|
64
|
+
try:
|
|
65
|
+
transforms = view_registrations.get(view_id, [])
|
|
66
|
+
if not transforms:
|
|
67
|
+
print(f"⚠️ No transforms found for view {view_id}, using identity matrix")
|
|
68
|
+
return np.eye(4)
|
|
69
|
+
|
|
70
|
+
final_matrix = np.eye(4)
|
|
71
|
+
|
|
72
|
+
for i, transform in enumerate(transforms):
|
|
73
|
+
affine_str = transform.get("affine")
|
|
74
|
+
if not affine_str:
|
|
75
|
+
print(f"⚠️ No affine string in transform {i+1} for view {view_id}")
|
|
76
|
+
continue
|
|
77
|
+
|
|
78
|
+
values = [float(x) for x in affine_str.strip().split()]
|
|
79
|
+
if len(values) != 12:
|
|
80
|
+
raise ValueError(f"Transform {i+1} in view {view_id} has {len(values)} values, expected 12.")
|
|
81
|
+
|
|
82
|
+
matrix3x4 = np.array(values).reshape(3, 4)
|
|
83
|
+
matrix4x4 = np.eye(4)
|
|
84
|
+
matrix4x4[:3, :4] = matrix3x4
|
|
85
|
+
|
|
86
|
+
final_matrix = final_matrix @ matrix4x4
|
|
87
|
+
|
|
88
|
+
return final_matrix
|
|
89
|
+
|
|
90
|
+
except Exception as e:
|
|
91
|
+
print(f"❌ Error in get_transformation_matrix for view {view_id}: {e}")
|
|
92
|
+
raise
|
|
93
|
+
|
|
94
|
+
def load_interest_points_from_path(self, base_path, loc_path):
|
|
95
|
+
"""
|
|
96
|
+
Load data from any N5 dataset path
|
|
97
|
+
"""
|
|
98
|
+
try:
|
|
99
|
+
if self.n5_output_path.startswith("s3://"):
|
|
100
|
+
path = base_path.rstrip("/")
|
|
101
|
+
s3 = s3fs.S3FileSystem(anon=False)
|
|
102
|
+
store = s3fs.S3Map(root=path, s3=s3, check=False)
|
|
103
|
+
root = zarr.open(store, mode="r")
|
|
104
|
+
group = root[loc_path]
|
|
105
|
+
data = group[:]
|
|
106
|
+
return data.astype(np.float64)
|
|
107
|
+
|
|
108
|
+
else:
|
|
109
|
+
store = zarr.N5Store(base_path)
|
|
110
|
+
root = zarr.open(store, mode="r")
|
|
111
|
+
group = root[loc_path]
|
|
112
|
+
data = group[:]
|
|
113
|
+
return data.astype(np.float64)
|
|
114
|
+
|
|
115
|
+
except Exception as e:
|
|
116
|
+
return []
|
|
117
|
+
|
|
118
|
+
def get_transformed_points(self, view_id, view_data, view_registrations, label):
|
|
119
|
+
"""
|
|
120
|
+
Retrieve and transform interest points for a given view
|
|
121
|
+
"""
|
|
122
|
+
view_info = view_data[view_id]
|
|
123
|
+
path = view_info['path']
|
|
124
|
+
loc_path = f"{path}/{label}/interestpoints/loc"
|
|
125
|
+
full_path = self.n5_output_path + "interestpoints.n5"
|
|
126
|
+
|
|
127
|
+
raw_points = self.load_interest_points_from_path(full_path, loc_path)
|
|
128
|
+
|
|
129
|
+
if len(raw_points) == 0:
|
|
130
|
+
return []
|
|
131
|
+
|
|
132
|
+
transform = self.get_transformation_matrix(view_id, view_registrations)
|
|
133
|
+
transformed_points = self.transform_interest_points(raw_points, transform)
|
|
134
|
+
|
|
135
|
+
return transformed_points
|
|
136
|
+
|
|
137
|
+
def load_and_transform_points(self, pair, view_data, view_registrations, label):
|
|
138
|
+
"""
|
|
139
|
+
Process a single matching task
|
|
140
|
+
"""
|
|
141
|
+
viewA, viewB = pair
|
|
142
|
+
try:
|
|
143
|
+
# Retrieve and transform interest points for both views
|
|
144
|
+
if isinstance(viewA, tuple) and len(viewA) == 2:
|
|
145
|
+
tpA, setupA = viewA
|
|
146
|
+
viewA_str = f"(tpId={tpA}, setupId={setupA})"
|
|
147
|
+
else:
|
|
148
|
+
viewA_str = str(viewA)
|
|
149
|
+
if isinstance(viewB, tuple) and len(viewB) == 2:
|
|
150
|
+
tpB, setupB = viewB
|
|
151
|
+
viewB_str = f"(tpId={tpB}, setupId={setupB})"
|
|
152
|
+
else:
|
|
153
|
+
viewB_str = str(viewB)
|
|
154
|
+
|
|
155
|
+
pointsA = self.get_transformed_points(viewA, view_data, view_registrations, label)
|
|
156
|
+
pointsB = self.get_transformed_points(viewB, view_data, view_registrations, label)
|
|
157
|
+
|
|
158
|
+
return pointsA, pointsB, viewA_str, viewB_str
|
|
159
|
+
|
|
160
|
+
except Exception as e:
|
|
161
|
+
print(f"❌ ERROR: Failed in process_matching_task for views {viewA} and {viewB}")
|
|
162
|
+
print(f"Exception type: {type(e).__name__}")
|
|
163
|
+
print(f"Exception details: {str(e)}")
|
|
164
|
+
import traceback
|
|
165
|
+
traceback.print_exc()
|
|
166
|
+
return []
|
|
167
|
+
|
|
168
|
+
# TODO - eventually handle if more than 1 timepoint
|
|
169
|
+
def merge_sets(self, v_sets, pair_sets, i1, i2):
|
|
170
|
+
return [], []
|
|
171
|
+
|
|
172
|
+
def set_id(self, v1, v_sets):
|
|
173
|
+
"""
|
|
174
|
+
Find the index of the component in `v_sets` that contains `v1`
|
|
175
|
+
"""
|
|
176
|
+
i = -1
|
|
177
|
+
for j in range(len(v_sets)):
|
|
178
|
+
if v1 in v_sets[j]:
|
|
179
|
+
i = j
|
|
180
|
+
|
|
181
|
+
return i
|
|
182
|
+
|
|
183
|
+
def subsets(self, pairs):
|
|
184
|
+
"""
|
|
185
|
+
Cluster views into connected components based on the given pairs
|
|
186
|
+
"""
|
|
187
|
+
views = list(self.data_global['viewsInterestPoints'].keys())
|
|
188
|
+
v_sets: list[set] = []
|
|
189
|
+
pair_sets: list[list[tuple]] = []
|
|
190
|
+
groups = None
|
|
191
|
+
|
|
192
|
+
counter = 0
|
|
193
|
+
|
|
194
|
+
for pair_a, pair_b in pairs:
|
|
195
|
+
|
|
196
|
+
counter += 1
|
|
197
|
+
if counter == 100:
|
|
198
|
+
break
|
|
199
|
+
|
|
200
|
+
i1 = self.set_id(pair_a, v_sets)
|
|
201
|
+
i2 = self.set_id(pair_b, v_sets)
|
|
202
|
+
|
|
203
|
+
if i1 == -1 and i2 == -1:
|
|
204
|
+
v_set: list[set] = []
|
|
205
|
+
pair_set: list[set] = []
|
|
206
|
+
pair_set.append((pair_a, pair_b))
|
|
207
|
+
v_set.append(pair_a)
|
|
208
|
+
v_set.append(pair_b)
|
|
209
|
+
|
|
210
|
+
v_sets.append(v_set)
|
|
211
|
+
pair_sets.append(pair_set)
|
|
212
|
+
|
|
213
|
+
elif i1 >= 0 and i2 == 0:
|
|
214
|
+
v_sets[i2].append(pair_a)
|
|
215
|
+
pair_sets[i2].append((pair_a, pair_b))
|
|
216
|
+
|
|
217
|
+
elif i1 >= 0 and i2 == -1:
|
|
218
|
+
v_sets[i1].append(pair_b)
|
|
219
|
+
pair_sets[i1].append((pair_a, pair_b))
|
|
220
|
+
|
|
221
|
+
elif i1 == i2:
|
|
222
|
+
pair_sets[i1].append((pair_a, pair_b))
|
|
223
|
+
|
|
224
|
+
else:
|
|
225
|
+
pair_sets, v_sets = self.merge_sets(v_sets, pair_sets, i1, i2)
|
|
226
|
+
|
|
227
|
+
for view in views:
|
|
228
|
+
is_present = False
|
|
229
|
+
|
|
230
|
+
for subset_precursor in v_sets:
|
|
231
|
+
if view in subset_precursor:
|
|
232
|
+
is_present = True
|
|
233
|
+
|
|
234
|
+
if not is_present:
|
|
235
|
+
v_set = []
|
|
236
|
+
pair_set = []
|
|
237
|
+
|
|
238
|
+
v_set.append(view)
|
|
239
|
+
v_sets.append(v_set)
|
|
240
|
+
pair_sets.append(pair_set)
|
|
241
|
+
|
|
242
|
+
subsets = []
|
|
243
|
+
|
|
244
|
+
for i in range(len(v_sets)):
|
|
245
|
+
set_pairs = pair_sets[i]
|
|
246
|
+
set_views = v_sets[i]
|
|
247
|
+
subsets.append((set_views, set_pairs, groups))
|
|
248
|
+
|
|
249
|
+
return {
|
|
250
|
+
'groups': None,
|
|
251
|
+
'pairs': pairs,
|
|
252
|
+
'rangeComparator': None,
|
|
253
|
+
'subsets': subsets,
|
|
254
|
+
'views': views
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
def get_bounding_boxes(self, M, dims):
|
|
258
|
+
"""
|
|
259
|
+
Compute world-space AABB (min/max corners) of a voxel-aligned box
|
|
260
|
+
"""
|
|
261
|
+
M = np.asarray(M, float)
|
|
262
|
+
if M.shape == (3, 4):
|
|
263
|
+
M = np.vstack([M, [0.0, 0.0, 0.0, 1.0]])
|
|
264
|
+
|
|
265
|
+
# interval mins/maxes
|
|
266
|
+
t0 = 0.0; t1 = 0.0; t2 = 0.0
|
|
267
|
+
s0 = float(dims[0]) - 1.0
|
|
268
|
+
s1 = float(dims[1]) - 1.0
|
|
269
|
+
s2 = float(dims[2]) - 1.0
|
|
270
|
+
|
|
271
|
+
A = M[:3, :3]
|
|
272
|
+
tx, ty, tz = M[0, 3], M[1, 3], M[2, 3]
|
|
273
|
+
|
|
274
|
+
# row 0
|
|
275
|
+
tt0 = A[0,0]*t0 + A[0,1]*t1 + A[0,2]*t2 + tx
|
|
276
|
+
rMin0 = rMax0 = tt0
|
|
277
|
+
rMin0 += s0*A[0,0] if A[0,0] < 0 else 0.0; rMax0 += 0.0 if A[0,0] < 0 else s0*A[0,0]
|
|
278
|
+
rMin0 += s1*A[0,1] if A[0,1] < 0 else 0.0; rMax0 += 0.0 if A[0,1] < 0 else s1*A[0,1]
|
|
279
|
+
rMin0 += s2*A[0,2] if A[0,2] < 0 else 0.0; rMax0 += 0.0 if A[0,2] < 0 else s2*A[0,2]
|
|
280
|
+
|
|
281
|
+
# row 1
|
|
282
|
+
tt1 = A[1,0]*t0 + A[1,1]*t1 + A[1,2]*t2 + ty
|
|
283
|
+
rMin1 = rMax1 = tt1
|
|
284
|
+
rMin1 += s0*A[1,0] if A[1,0] < 0 else 0.0; rMax1 += 0.0 if A[1,0] < 0 else s0*A[1,0]
|
|
285
|
+
rMin1 += s1*A[1,1] if A[1,1] < 0 else 0.0; rMax1 += 0.0 if A[1,1] < 0 else s1*A[1,1]
|
|
286
|
+
rMin1 += s2*A[1,2] if A[1,2] < 0 else 0.0; rMax1 += 0.0 if A[1,2] < 0 else s2*A[1,2]
|
|
287
|
+
|
|
288
|
+
# row 2
|
|
289
|
+
tt2 = A[2,0]*t0 + A[2,1]*t1 + A[2,2]*t2 + tz
|
|
290
|
+
rMin2 = rMax2 = tt2
|
|
291
|
+
rMin2 += s0*A[2,0] if A[2,0] < 0 else 0.0; rMax2 += 0.0 if A[2,0] < 0 else s0*A[2,0]
|
|
292
|
+
rMin2 += s1*A[2,1] if A[2,1] < 0 else 0.0; rMax2 += 0.0 if A[2,1] < 0 else s1*A[2,1]
|
|
293
|
+
rMin2 += s2*A[2,2] if A[2,2] < 0 else 0.0; rMax2 += 0.0 if A[2,2] < 0 else s2*A[2,2]
|
|
294
|
+
|
|
295
|
+
rMin = np.array([rMin0, rMin1, rMin2], float)
|
|
296
|
+
rMax = np.array([rMax0, rMax1, rMax2], float)
|
|
297
|
+
return rMin, rMax
|
|
298
|
+
|
|
299
|
+
def bounding_boxes(self, M, dims):
|
|
300
|
+
"""
|
|
301
|
+
Compute an integer, padded axis-aligned bounding box from the real-valued bounds
|
|
302
|
+
"""
|
|
303
|
+
rMin, rMax = self.get_bounding_boxes(M, dims['size'])
|
|
304
|
+
min_i = np.rint(rMin).astype(int) - 1
|
|
305
|
+
max_i = np.rint(rMax).astype(int) + 1
|
|
306
|
+
return (min_i.tolist(), max_i.tolist())
|
|
307
|
+
|
|
308
|
+
def transform_matrices(self, view):
|
|
309
|
+
"""
|
|
310
|
+
Compose the per-view 4x4 world transform by chaining all affine models in order
|
|
311
|
+
"""
|
|
312
|
+
M = np.eye(4, dtype=float)
|
|
313
|
+
for model in self.data_global['viewRegistrations'][view]:
|
|
314
|
+
vals = np.fromstring(str(model['affine']).replace(',', ' '), sep=' ', dtype=float)
|
|
315
|
+
T = np.eye(4, dtype=float); T[:3, :4] = vals.reshape(3, 4)
|
|
316
|
+
M = M @ T
|
|
317
|
+
return M
|
|
318
|
+
|
|
319
|
+
def overlaps(self, bba, bbb):
|
|
320
|
+
"""
|
|
321
|
+
Boolean check if two axis-aligned boxes overlap in every dimension
|
|
322
|
+
"""
|
|
323
|
+
(minA, maxA) = bba
|
|
324
|
+
(minB, maxB) = bbb
|
|
325
|
+
for d in range(len(minA)):
|
|
326
|
+
if ((minA[d] <= minB[d] and maxA[d] <= minB[d]) or
|
|
327
|
+
(minA[d] >= maxB[d] and maxA[d] >= maxB[d])):
|
|
328
|
+
return False
|
|
329
|
+
return True
|
|
330
|
+
|
|
331
|
+
def overlap(self, view_a, dims_a, view_b, dims_b):
|
|
332
|
+
"""
|
|
333
|
+
Build each view's transform, derive their axis-aligned bounding boxes, then test for intersection
|
|
334
|
+
"""
|
|
335
|
+
ma = self.transform_matrices(view_a)
|
|
336
|
+
mb = self.transform_matrices(view_b)
|
|
337
|
+
|
|
338
|
+
bba = self.bounding_boxes(ma, dims_a)
|
|
339
|
+
bbb = self.bounding_boxes(mb, dims_b)
|
|
340
|
+
|
|
341
|
+
return self.overlaps(bba, bbb)
|
|
342
|
+
|
|
343
|
+
def setup_groups_split(self):
|
|
344
|
+
"""
|
|
345
|
+
Generate all unique view pairs and keep only those whose setups overlap
|
|
346
|
+
"""
|
|
347
|
+
views = list(self.data_global['viewsInterestPoints'].keys())
|
|
348
|
+
pairs = list(combinations(views, 2))
|
|
349
|
+
final_pairs = []
|
|
350
|
+
|
|
351
|
+
for view_a, view_b in pairs:
|
|
352
|
+
dims_a = self.data_global['viewSetup']['byId'][view_a[1]]
|
|
353
|
+
dims_b = self.data_global['viewSetup']['byId'][view_b[1]]
|
|
354
|
+
|
|
355
|
+
if self.overlap(view_a, dims_a, view_b, dims_b):
|
|
356
|
+
view_a = (view_a[0], view_a[1])
|
|
357
|
+
view_b = (view_b[0], view_b[1])
|
|
358
|
+
final_pairs.append((view_a, view_b))
|
|
359
|
+
|
|
360
|
+
return final_pairs
|
|
361
|
+
|
|
362
|
+
def setup_groups(self):
|
|
363
|
+
"""
|
|
364
|
+
Group views by timepoint and generate all unique unordered intra-timepoint pairs
|
|
365
|
+
"""
|
|
366
|
+
views = list(self.data_global['viewsInterestPoints'].keys())
|
|
367
|
+
|
|
368
|
+
# Group views by timepoint
|
|
369
|
+
timepoint_groups = {}
|
|
370
|
+
for view in views:
|
|
371
|
+
timepoint, _ = view
|
|
372
|
+
if timepoint not in timepoint_groups:
|
|
373
|
+
timepoint_groups[timepoint] = []
|
|
374
|
+
timepoint_groups[timepoint].append(view)
|
|
375
|
+
|
|
376
|
+
# Create pairs within each timepoint
|
|
377
|
+
pairs = []
|
|
378
|
+
for timepoint, timepoint_views in timepoint_groups.items():
|
|
379
|
+
for i in range(len(timepoint_views)):
|
|
380
|
+
for j in range(i + 1, len(timepoint_views)):
|
|
381
|
+
pairs.append((timepoint_views[i], timepoint_views[j]))
|
|
382
|
+
|
|
383
|
+
return {
|
|
384
|
+
'groups': timepoint_groups,
|
|
385
|
+
'pairs': pairs,
|
|
386
|
+
'rangeComparator': None,
|
|
387
|
+
'subsets': None,
|
|
388
|
+
'views': views
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
def as_list(self, x):
|
|
392
|
+
return x if isinstance(x, list) else [x]
|
|
393
|
+
|
|
394
|
+
def expand_pairs_with_labels(self, base_pairs, view_ids_global):
|
|
395
|
+
"""
|
|
396
|
+
Add a label for each pair
|
|
397
|
+
"""
|
|
398
|
+
out = []
|
|
399
|
+
for va, vb in base_pairs:
|
|
400
|
+
la = self.as_list(view_ids_global[va].get('label', []))
|
|
401
|
+
lb = self.as_list(view_ids_global[vb].get('label', []))
|
|
402
|
+
|
|
403
|
+
if not la or not lb:
|
|
404
|
+
continue
|
|
405
|
+
|
|
406
|
+
lb_set = set(lb)
|
|
407
|
+
common = [l for l in la if l in lb_set]
|
|
408
|
+
|
|
409
|
+
for l in common:
|
|
410
|
+
out.append(((va[0], va[1]), (vb[0], vb[1]), l))
|
|
411
|
+
|
|
412
|
+
return out
|
|
413
|
+
|
|
414
|
+
def run(self):
|
|
415
|
+
"""
|
|
416
|
+
Executes the entry point of the script.
|
|
417
|
+
"""
|
|
418
|
+
view_ids_global = self.data_global['viewsInterestPoints']
|
|
419
|
+
view_registrations = self.data_global['viewRegistrations']
|
|
420
|
+
|
|
421
|
+
# Set up view groups using complete dataset info
|
|
422
|
+
if self.match_type == "split-affine":
|
|
423
|
+
setup = self.setup_groups_split()
|
|
424
|
+
setup = self.subsets(setup)
|
|
425
|
+
else:
|
|
426
|
+
setup = self.setup_groups()
|
|
427
|
+
|
|
428
|
+
# Distribute points loading (very helpful with split-affine)
|
|
429
|
+
@ray.remote
|
|
430
|
+
def process_pair(view_a, view_b, label, view_ids_global, view_registrations):
|
|
431
|
+
if isinstance(view_a, tuple) and len(view_a) == 2:
|
|
432
|
+
tpA, setupA = view_a
|
|
433
|
+
viewA_str = f"(tpId={tpA}, setupId={setupA})"
|
|
434
|
+
else:
|
|
435
|
+
viewA_str = str(view_a)
|
|
436
|
+
|
|
437
|
+
if isinstance(view_b, tuple) and len(view_b) == 2:
|
|
438
|
+
tpB, setupB = view_b
|
|
439
|
+
viewB_str = f"(tpId={tpB}, setupId={setupB})"
|
|
440
|
+
else:
|
|
441
|
+
viewB_str = str(view_b)
|
|
442
|
+
|
|
443
|
+
pointsA, pointsB, viewA_str, viewB_str = self.load_and_transform_points(
|
|
444
|
+
(view_a, view_b), view_ids_global, view_registrations, label
|
|
445
|
+
)
|
|
446
|
+
return pointsA, pointsB, viewA_str, viewB_str, label
|
|
447
|
+
|
|
448
|
+
setup['pairs'] = self.expand_pairs_with_labels(setup['pairs'], view_ids_global)
|
|
449
|
+
|
|
450
|
+
# launch Ray tasks
|
|
451
|
+
futures = [
|
|
452
|
+
process_pair.remote(view_a, view_b, label, view_ids_global, view_registrations)
|
|
453
|
+
for view_a, view_b, label in setup['pairs']
|
|
454
|
+
]
|
|
455
|
+
|
|
456
|
+
process_pairs = ray.get(futures)
|
|
457
|
+
|
|
458
|
+
return process_pairs, view_registrations
|