Rhapso 0.1.92__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Rhapso/__init__.py +1 -0
- Rhapso/data_prep/__init__.py +2 -0
- Rhapso/data_prep/n5_reader.py +188 -0
- Rhapso/data_prep/s3_big_stitcher_reader.py +55 -0
- Rhapso/data_prep/xml_to_dataframe.py +215 -0
- Rhapso/detection/__init__.py +5 -0
- Rhapso/detection/advanced_refinement.py +203 -0
- Rhapso/detection/difference_of_gaussian.py +324 -0
- Rhapso/detection/image_reader.py +117 -0
- Rhapso/detection/metadata_builder.py +130 -0
- Rhapso/detection/overlap_detection.py +327 -0
- Rhapso/detection/points_validation.py +49 -0
- Rhapso/detection/save_interest_points.py +265 -0
- Rhapso/detection/view_transform_models.py +67 -0
- Rhapso/fusion/__init__.py +0 -0
- Rhapso/fusion/affine_fusion/__init__.py +2 -0
- Rhapso/fusion/affine_fusion/blend.py +289 -0
- Rhapso/fusion/affine_fusion/fusion.py +601 -0
- Rhapso/fusion/affine_fusion/geometry.py +159 -0
- Rhapso/fusion/affine_fusion/io.py +546 -0
- Rhapso/fusion/affine_fusion/script_utils.py +111 -0
- Rhapso/fusion/affine_fusion/setup.py +4 -0
- Rhapso/fusion/affine_fusion_worker.py +234 -0
- Rhapso/fusion/multiscale/__init__.py +0 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/__init__.py +19 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/czi_to_zarr.py +698 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/zarr_writer.py +265 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/models.py +81 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/utils.py +526 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/zeiss_job.py +249 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/__init__.py +21 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/array_to_zarr.py +257 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/radial_correction.py +557 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/run_capsule.py +98 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/utils.py +266 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/worker.py +89 -0
- Rhapso/fusion/multiscale_worker.py +113 -0
- Rhapso/fusion/neuroglancer_link_gen/__init__.py +8 -0
- Rhapso/fusion/neuroglancer_link_gen/dispim_link.py +235 -0
- Rhapso/fusion/neuroglancer_link_gen/exaspim_link.py +127 -0
- Rhapso/fusion/neuroglancer_link_gen/hcr_link.py +368 -0
- Rhapso/fusion/neuroglancer_link_gen/iSPIM_top.py +47 -0
- Rhapso/fusion/neuroglancer_link_gen/link_utils.py +239 -0
- Rhapso/fusion/neuroglancer_link_gen/main.py +299 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_layer.py +1434 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_state.py +1123 -0
- Rhapso/fusion/neuroglancer_link_gen/parsers.py +336 -0
- Rhapso/fusion/neuroglancer_link_gen/raw_link.py +116 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/__init__.py +4 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/shader_utils.py +85 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py +43 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/utils.py +303 -0
- Rhapso/fusion/neuroglancer_link_gen_worker.py +30 -0
- Rhapso/matching/__init__.py +0 -0
- Rhapso/matching/load_and_transform_points.py +458 -0
- Rhapso/matching/ransac_matching.py +544 -0
- Rhapso/matching/save_matches.py +120 -0
- Rhapso/matching/xml_parser.py +302 -0
- Rhapso/pipelines/__init__.py +0 -0
- Rhapso/pipelines/ray/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/alignment_pipeline.py +227 -0
- Rhapso/pipelines/ray/aws/config/__init__.py +0 -0
- Rhapso/pipelines/ray/evaluation.py +71 -0
- Rhapso/pipelines/ray/interest_point_detection.py +137 -0
- Rhapso/pipelines/ray/interest_point_matching.py +110 -0
- Rhapso/pipelines/ray/local/__init__.py +0 -0
- Rhapso/pipelines/ray/local/alignment_pipeline.py +167 -0
- Rhapso/pipelines/ray/matching_stats.py +104 -0
- Rhapso/pipelines/ray/param/__init__.py +0 -0
- Rhapso/pipelines/ray/solver.py +120 -0
- Rhapso/pipelines/ray/split_dataset.py +78 -0
- Rhapso/solver/__init__.py +0 -0
- Rhapso/solver/compute_tiles.py +562 -0
- Rhapso/solver/concatenate_models.py +116 -0
- Rhapso/solver/connected_graphs.py +111 -0
- Rhapso/solver/data_prep.py +181 -0
- Rhapso/solver/global_optimization.py +410 -0
- Rhapso/solver/model_and_tile_setup.py +109 -0
- Rhapso/solver/pre_align_tiles.py +323 -0
- Rhapso/solver/save_results.py +97 -0
- Rhapso/solver/view_transforms.py +75 -0
- Rhapso/solver/xml_to_dataframe_solver.py +213 -0
- Rhapso/split_dataset/__init__.py +0 -0
- Rhapso/split_dataset/compute_grid_rules.py +78 -0
- Rhapso/split_dataset/save_points.py +101 -0
- Rhapso/split_dataset/save_xml.py +377 -0
- Rhapso/split_dataset/split_images.py +537 -0
- Rhapso/split_dataset/xml_to_dataframe_split.py +219 -0
- rhapso-0.1.92.dist-info/METADATA +39 -0
- rhapso-0.1.92.dist-info/RECORD +101 -0
- rhapso-0.1.92.dist-info/WHEEL +5 -0
- rhapso-0.1.92.dist-info/licenses/LICENSE +21 -0
- rhapso-0.1.92.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/test_detection.py +17 -0
- tests/test_matching.py +21 -0
- tests/test_solving.py +21 -0
|
@@ -0,0 +1,323 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import random
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
Pre Align Tiles roughly align p1 with p2 to speed up global optimization rounds
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
class PreAlignTiles:
|
|
9
|
+
def __init__(self, min_matches, run_type, fixed_tile):
|
|
10
|
+
self.min_matches = min_matches
|
|
11
|
+
self.run_type = run_type
|
|
12
|
+
self.fixed_tile = fixed_tile
|
|
13
|
+
|
|
14
|
+
def rigid_fit_model(self, rigid_model, matches):
|
|
15
|
+
"""
|
|
16
|
+
Computes the best-fit rigid transformation (rotation + translation)
|
|
17
|
+
using unweighted quaternion-based estimation between 3D point sets.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
# === Compute unweighted centroids ===
|
|
21
|
+
pc = np.mean([m['p1']['l'] for m in matches], axis=0)
|
|
22
|
+
qc = np.mean([m['p2']['w'] for m in matches], axis=0)
|
|
23
|
+
|
|
24
|
+
# === Accumulate scalar components of S matrix ===
|
|
25
|
+
Sxx = Sxy = Sxz = Syx = Syy = Syz = Szx = Szy = Szz = 0.0
|
|
26
|
+
|
|
27
|
+
for m in matches:
|
|
28
|
+
px, py, pz = m['p1']['l'] - pc
|
|
29
|
+
qx, qy, qz = m['p2']['w'] - qc
|
|
30
|
+
|
|
31
|
+
Sxx += px * qx
|
|
32
|
+
Sxy += px * qy
|
|
33
|
+
Sxz += px * qz
|
|
34
|
+
Syx += py * qx
|
|
35
|
+
Syy += py * qy
|
|
36
|
+
Syz += py * qz
|
|
37
|
+
Szx += pz * qx
|
|
38
|
+
Szy += pz * qy
|
|
39
|
+
Szz += pz * qz
|
|
40
|
+
|
|
41
|
+
# === Construct symmetric matrix N ===
|
|
42
|
+
N = np.array([
|
|
43
|
+
[Sxx + Syy + Szz, Syz - Szy, Szx - Sxz, Sxy - Syx],
|
|
44
|
+
[Syz - Szy, Sxx - Syy - Szz, Sxy + Syx, Szx + Sxz],
|
|
45
|
+
[Szx - Sxz, Sxy + Syx, -Sxx + Syy - Szz, Syz + Szy],
|
|
46
|
+
[Sxy - Syx, Szx + Sxz, Syz + Szy, -Sxx - Syy + Szz]
|
|
47
|
+
])
|
|
48
|
+
|
|
49
|
+
if not np.all(np.isfinite(N)):
|
|
50
|
+
raise ValueError("Matrix N contains NaNs or Infs")
|
|
51
|
+
|
|
52
|
+
# === Eigenvalue decomposition ===
|
|
53
|
+
eigenvalues, eigenvectors = np.linalg.eigh(N)
|
|
54
|
+
q = eigenvectors[:, np.argmax(eigenvalues)]
|
|
55
|
+
q /= np.linalg.norm(q)
|
|
56
|
+
q0, qx, qy, qz = q
|
|
57
|
+
|
|
58
|
+
# === Quaternion to rotation matrix ===
|
|
59
|
+
R = np.array([
|
|
60
|
+
[q0*q0 + qx*qx - qy*qy - qz*qz, 2*(qx*qy - q0*qz), 2*(qx*qz + q0*qy)],
|
|
61
|
+
[2*(qy*qx + q0*qz), q0*q0 - qx*qx + qy*qy - qz*qz, 2*(qy*qz - q0*qx)],
|
|
62
|
+
[2*(qz*qx - q0*qy), 2*(qz*qy + q0*qx), q0*q0 - qx*qx - qy*qy + qz*qz]
|
|
63
|
+
])
|
|
64
|
+
|
|
65
|
+
# === Translation ===
|
|
66
|
+
t = qc - R @ pc
|
|
67
|
+
|
|
68
|
+
# === Populate model ===
|
|
69
|
+
rigid_model['m00'], rigid_model['m01'], rigid_model['m02'] = R[0, :]
|
|
70
|
+
rigid_model['m10'], rigid_model['m11'], rigid_model['m12'] = R[1, :]
|
|
71
|
+
rigid_model['m20'], rigid_model['m21'], rigid_model['m22'] = R[2, :]
|
|
72
|
+
rigid_model['m03'], rigid_model['m13'], rigid_model['m23'] = t
|
|
73
|
+
|
|
74
|
+
return rigid_model
|
|
75
|
+
|
|
76
|
+
def affine_fit_model(self, affine_model, matches):
|
|
77
|
+
"""
|
|
78
|
+
Exact translation of the Java affine fit() method into Python using scalar math.
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
if len(matches) < 3:
|
|
82
|
+
raise ValueError("Not enough matches for affine fit")
|
|
83
|
+
|
|
84
|
+
# === Centroids ===
|
|
85
|
+
pcx = pcy = pcz = 0.0
|
|
86
|
+
qcx = qcy = qcz = 0.0
|
|
87
|
+
for m in matches:
|
|
88
|
+
p = m['p1']['l']
|
|
89
|
+
q = m['p2']['w']
|
|
90
|
+
pcx += p[0]
|
|
91
|
+
pcy += p[1]
|
|
92
|
+
pcz += p[2]
|
|
93
|
+
qcx += q[0]
|
|
94
|
+
qcy += q[1]
|
|
95
|
+
qcz += q[2]
|
|
96
|
+
n = len(matches)
|
|
97
|
+
pcx /= n
|
|
98
|
+
pcy /= n
|
|
99
|
+
pcz /= n
|
|
100
|
+
qcx /= n
|
|
101
|
+
qcy /= n
|
|
102
|
+
qcz /= n
|
|
103
|
+
|
|
104
|
+
# === Accumulate A and B ===
|
|
105
|
+
a00 = a01 = a02 = a11 = a12 = a22 = 0.0
|
|
106
|
+
b00 = b01 = b02 = b10 = b11 = b12 = b20 = b21 = b22 = 0.0
|
|
107
|
+
|
|
108
|
+
for m in matches:
|
|
109
|
+
p = m['p1']['l']
|
|
110
|
+
q = m['p2']['w']
|
|
111
|
+
px = p[0] - pcx
|
|
112
|
+
py = p[1] - pcy
|
|
113
|
+
pz = p[2] - pcz
|
|
114
|
+
qx = q[0] - qcx
|
|
115
|
+
qy = q[1] - qcy
|
|
116
|
+
qz = q[2] - qcz
|
|
117
|
+
|
|
118
|
+
a00 += px * px
|
|
119
|
+
a01 += px * py
|
|
120
|
+
a02 += px * pz
|
|
121
|
+
a11 += py * py
|
|
122
|
+
a12 += py * pz
|
|
123
|
+
a22 += pz * pz
|
|
124
|
+
|
|
125
|
+
b00 += px * qx
|
|
126
|
+
b01 += px * qy
|
|
127
|
+
b02 += px * qz
|
|
128
|
+
b10 += py * qx
|
|
129
|
+
b11 += py * qy
|
|
130
|
+
b12 += py * qz
|
|
131
|
+
b20 += pz * qx
|
|
132
|
+
b21 += pz * qy
|
|
133
|
+
b22 += pz * qz
|
|
134
|
+
|
|
135
|
+
# === Compute inverse of A manually ===
|
|
136
|
+
det = (
|
|
137
|
+
a00 * a11 * a22 +
|
|
138
|
+
a01 * a12 * a02 +
|
|
139
|
+
a02 * a01 * a12 -
|
|
140
|
+
a02 * a11 * a02 -
|
|
141
|
+
a12 * a12 * a00 -
|
|
142
|
+
a22 * a01 * a01
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
if det == 0:
|
|
146
|
+
raise ValueError("Affine matrix is singular")
|
|
147
|
+
|
|
148
|
+
idet = 1.0 / det
|
|
149
|
+
ai00 = (a11 * a22 - a12 * a12) * idet
|
|
150
|
+
ai01 = (a02 * a12 - a01 * a22) * idet
|
|
151
|
+
ai02 = (a01 * a12 - a02 * a11) * idet
|
|
152
|
+
ai11 = (a00 * a22 - a02 * a02) * idet
|
|
153
|
+
ai12 = (a02 * a01 - a00 * a12) * idet
|
|
154
|
+
ai22 = (a00 * a11 - a01 * a01) * idet
|
|
155
|
+
|
|
156
|
+
# === Compute transformation matrix ===
|
|
157
|
+
m00 = ai00 * b00 + ai01 * b10 + ai02 * b20
|
|
158
|
+
m01 = ai01 * b00 + ai11 * b10 + ai12 * b20
|
|
159
|
+
m02 = ai02 * b00 + ai12 * b10 + ai22 * b20
|
|
160
|
+
|
|
161
|
+
m10 = ai00 * b01 + ai01 * b11 + ai02 * b21
|
|
162
|
+
m11 = ai01 * b01 + ai11 * b11 + ai12 * b21
|
|
163
|
+
m12 = ai02 * b01 + ai12 * b11 + ai22 * b21
|
|
164
|
+
|
|
165
|
+
m20 = ai00 * b02 + ai01 * b12 + ai02 * b22
|
|
166
|
+
m21 = ai01 * b02 + ai11 * b12 + ai12 * b22
|
|
167
|
+
m22 = ai02 * b02 + ai12 * b12 + ai22 * b22
|
|
168
|
+
|
|
169
|
+
m03 = qcx - m00 * pcx - m01 * pcy - m02 * pcz
|
|
170
|
+
m13 = qcy - m10 * pcx - m11 * pcy - m12 * pcz
|
|
171
|
+
m23 = qcz - m20 * pcx - m21 * pcy - m22 * pcz
|
|
172
|
+
|
|
173
|
+
# === Assign ===
|
|
174
|
+
affine_model['m00'], affine_model['m01'], affine_model['m02'], affine_model['m03'] = m00, m01, m02, m03
|
|
175
|
+
affine_model['m10'], affine_model['m11'], affine_model['m12'], affine_model['m13'] = m10, m11, m12, m13
|
|
176
|
+
affine_model['m20'], affine_model['m21'], affine_model['m22'], affine_model['m23'] = m20, m21, m22, m23
|
|
177
|
+
|
|
178
|
+
return affine_model
|
|
179
|
+
|
|
180
|
+
def regularize_models(self, affine, rigid):
|
|
181
|
+
"""
|
|
182
|
+
Blend affine and rigid models into a single "regularized" 3x4 affine by convex combination
|
|
183
|
+
(90% affine, 10% rigid)
|
|
184
|
+
"""
|
|
185
|
+
alpha=0.1
|
|
186
|
+
l1 = 1.0 - alpha
|
|
187
|
+
|
|
188
|
+
def to_array(model):
|
|
189
|
+
return [
|
|
190
|
+
model['m00'], model['m01'], model['m02'], model['m03'],
|
|
191
|
+
model['m10'], model['m11'], model['m12'], model['m13'],
|
|
192
|
+
model['m20'], model['m21'], model['m22'], model['m23'],
|
|
193
|
+
]
|
|
194
|
+
|
|
195
|
+
afs = to_array(affine)
|
|
196
|
+
bfs = to_array(rigid)
|
|
197
|
+
rfs = [l1 * a + alpha * b for a, b in zip(afs, bfs)]
|
|
198
|
+
|
|
199
|
+
keys = [
|
|
200
|
+
'm00', 'm01', 'm02', 'm03',
|
|
201
|
+
'm10', 'm11', 'm12', 'm13',
|
|
202
|
+
'm20', 'm21', 'm22', 'm23',
|
|
203
|
+
]
|
|
204
|
+
regularized = dict(zip(keys, rfs))
|
|
205
|
+
|
|
206
|
+
return regularized
|
|
207
|
+
|
|
208
|
+
def fit(self, tile, pm):
|
|
209
|
+
"""
|
|
210
|
+
Fits multiple transformation models to a tile using provided point matches.
|
|
211
|
+
"""
|
|
212
|
+
affine = self.affine_fit_model(tile['model']['a'], pm)
|
|
213
|
+
rigid = self.rigid_fit_model(tile['model']['b'], pm)
|
|
214
|
+
regularized = self.regularize_models(affine, rigid)
|
|
215
|
+
|
|
216
|
+
tile['model']['a'] = affine
|
|
217
|
+
tile['model']['b'] = rigid
|
|
218
|
+
tile['model']['regularized'] = regularized
|
|
219
|
+
|
|
220
|
+
return tile
|
|
221
|
+
|
|
222
|
+
def get_connected_point_matches(self, target_tile, reference_tile):
|
|
223
|
+
"""
|
|
224
|
+
Finds point matches in the target tile that connect to the reference tile.
|
|
225
|
+
"""
|
|
226
|
+
reference_point_ids = {id(match['p1']) for match in reference_tile['matches']}
|
|
227
|
+
|
|
228
|
+
# Collect matches in the target tile that connect to any reference point by object identity
|
|
229
|
+
connected_point_matches = [
|
|
230
|
+
match for match in target_tile['matches']
|
|
231
|
+
if id(match['p2']) in reference_point_ids
|
|
232
|
+
]
|
|
233
|
+
|
|
234
|
+
return connected_point_matches
|
|
235
|
+
|
|
236
|
+
def apply_model_in_place(self, point, model):
|
|
237
|
+
x, y, z = point[0], point[1], point[2]
|
|
238
|
+
point[0] = model['m00'] * x + model['m01'] * y + model['m02'] * z + model['m03']
|
|
239
|
+
point[1] = model['m10'] * x + model['m11'] * y + model['m12'] * z + model['m13']
|
|
240
|
+
point[2] = model['m20'] * x + model['m21'] * y + model['m22'] * z + model['m23']
|
|
241
|
+
|
|
242
|
+
return point
|
|
243
|
+
|
|
244
|
+
def apply_transform_to_tile(self, tile):
|
|
245
|
+
if self.run_type == "affine" or self.run_type == "split-affine":
|
|
246
|
+
model = tile['model']['regularized']
|
|
247
|
+
elif self.run_type == "rigid":
|
|
248
|
+
model = tile['model']['b']
|
|
249
|
+
|
|
250
|
+
for match in tile['matches']:
|
|
251
|
+
match['p1']['w'][:] = match['p1']['l']
|
|
252
|
+
self.apply_model_in_place(match['p1']['w'], model)
|
|
253
|
+
|
|
254
|
+
def pre_align(self, tiles):
|
|
255
|
+
"""
|
|
256
|
+
Greedily seed an initial alignment
|
|
257
|
+
"""
|
|
258
|
+
random.shuffle(tiles['tiles'])
|
|
259
|
+
|
|
260
|
+
if getattr(self, "fixed_tile", None):
|
|
261
|
+
seed = next((t for t in tiles['tiles'] if t.get('view') == self.fixed_tile), None)
|
|
262
|
+
if seed is None:
|
|
263
|
+
raise ValueError(f"Fixed tile '{self.fixed_tile}' not found in tiles.")
|
|
264
|
+
tiles['fixed_tiles'] = [seed]
|
|
265
|
+
|
|
266
|
+
unaligned_tiles = []
|
|
267
|
+
aligned_tiles = []
|
|
268
|
+
|
|
269
|
+
if not tiles:
|
|
270
|
+
return unaligned_tiles, aligned_tiles
|
|
271
|
+
|
|
272
|
+
if len(tiles['fixed_tiles']) == 0:
|
|
273
|
+
aligned_tiles.append(tiles['tiles'][0])
|
|
274
|
+
unaligned_tiles.extend(tiles['tiles'][1:])
|
|
275
|
+
else:
|
|
276
|
+
for tile in tiles['tiles']:
|
|
277
|
+
if tile in tiles['fixed_tiles']:
|
|
278
|
+
aligned_tiles.append(tile)
|
|
279
|
+
else:
|
|
280
|
+
unaligned_tiles.append(tile)
|
|
281
|
+
|
|
282
|
+
ref_index = 0
|
|
283
|
+
while ref_index < len(aligned_tiles):
|
|
284
|
+
|
|
285
|
+
if len(unaligned_tiles) == 0:
|
|
286
|
+
break
|
|
287
|
+
|
|
288
|
+
reference_tile = aligned_tiles[ref_index]
|
|
289
|
+
self.apply_transform_to_tile(reference_tile)
|
|
290
|
+
|
|
291
|
+
tiles_added = 0
|
|
292
|
+
target_index = 0
|
|
293
|
+
|
|
294
|
+
while target_index < len(unaligned_tiles):
|
|
295
|
+
target_tile = unaligned_tiles[target_index]
|
|
296
|
+
|
|
297
|
+
if any(conn['view'] == target_tile['view'] for conn in reference_tile['connected_tiles']):
|
|
298
|
+
pm = self.get_connected_point_matches(target_tile, reference_tile)
|
|
299
|
+
|
|
300
|
+
if len(pm) >= self.min_matches:
|
|
301
|
+
target_tile = self.fit(target_tile, pm)
|
|
302
|
+
unaligned_tiles.pop(target_index)
|
|
303
|
+
aligned_tiles.append(target_tile)
|
|
304
|
+
tiles_added += 1
|
|
305
|
+
continue
|
|
306
|
+
|
|
307
|
+
target_index += 1
|
|
308
|
+
|
|
309
|
+
# Always move to the next reference tile
|
|
310
|
+
ref_index += 1
|
|
311
|
+
|
|
312
|
+
return unaligned_tiles
|
|
313
|
+
|
|
314
|
+
def run(self, tiles):
|
|
315
|
+
"""
|
|
316
|
+
Executes the entry point of the script.
|
|
317
|
+
"""
|
|
318
|
+
unaligned_tiles = self.pre_align(tiles)
|
|
319
|
+
|
|
320
|
+
if len(unaligned_tiles) > 0:
|
|
321
|
+
print(f"aligned all tiles but: {len(unaligned_tiles)}")
|
|
322
|
+
|
|
323
|
+
return tiles['tiles']
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
import xml.etree.ElementTree as ET
|
|
2
|
+
import boto3
|
|
3
|
+
import io
|
|
4
|
+
import s3fs
|
|
5
|
+
import json
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
Utility class that saves the final matrices of alignment per view to XML
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
class SaveResults:
|
|
12
|
+
def __init__(self, tiles, xml_file, xml_file_path, run_type, validation_stats, n5_input_path):
|
|
13
|
+
self.tiles = tiles
|
|
14
|
+
self.xml_file = xml_file
|
|
15
|
+
self.xml_file_path = xml_file_path
|
|
16
|
+
self.run_type = run_type
|
|
17
|
+
self.validation_stats = validation_stats
|
|
18
|
+
self.n5_input_path = n5_input_path
|
|
19
|
+
|
|
20
|
+
def save_ransac_metrics(self):
|
|
21
|
+
path = self.n5_input_path + self.run_type + "-solver_metrics.txt"
|
|
22
|
+
if self.n5_input_path.startswith("s3://"):
|
|
23
|
+
fs = s3fs.S3FileSystem(anon=False)
|
|
24
|
+
with fs.open(path, "w", encoding="utf-8") as f:
|
|
25
|
+
json.dump(self.validation_stats, f, default=str, indent=2)
|
|
26
|
+
else:
|
|
27
|
+
with open(path, "w", encoding="utf-8") as f:
|
|
28
|
+
json.dump(self.validation_stats, f, default=str, indent=2)
|
|
29
|
+
|
|
30
|
+
def save_xml(self):
|
|
31
|
+
"""
|
|
32
|
+
Saves the XML tree to either an S3 bucket or the local filesystem based on the file source.
|
|
33
|
+
"""
|
|
34
|
+
if self.xml_file_path.startswith("s3://"):
|
|
35
|
+
xml_bytes = io.BytesIO()
|
|
36
|
+
self.tree.write(xml_bytes, encoding='utf-8', xml_declaration=True)
|
|
37
|
+
xml_bytes.seek(0)
|
|
38
|
+
no_scheme = self.xml_file_path.replace("s3://", "", 1)
|
|
39
|
+
bucket, key = no_scheme.split("/", 1)
|
|
40
|
+
s3 = boto3.client("s3")
|
|
41
|
+
s3.upload_fileobj(xml_bytes, bucket, key)
|
|
42
|
+
else:
|
|
43
|
+
with open(self.xml_file_path, 'wb') as file:
|
|
44
|
+
self.tree.write(file, encoding='utf-8', xml_declaration=True)
|
|
45
|
+
|
|
46
|
+
def add_new_view_transform(self):
|
|
47
|
+
"""
|
|
48
|
+
Adds an affine transform entry to each view in the XML, using fitted or default values.
|
|
49
|
+
"""
|
|
50
|
+
for view_registration in self.root.findall('.//ViewRegistration'):
|
|
51
|
+
timepoint = view_registration.get('timepoint')
|
|
52
|
+
setup = view_registration.get('setup')
|
|
53
|
+
view = f"timepoint: {timepoint}, setup: {setup}"
|
|
54
|
+
|
|
55
|
+
new_view_transform = ET.Element('ViewTransform', {'type': 'affine'})
|
|
56
|
+
new_view_transform.text = "\n\t\t\t"
|
|
57
|
+
|
|
58
|
+
name = ET.SubElement(new_view_transform, 'Name')
|
|
59
|
+
if self.run_type == "rigid":
|
|
60
|
+
name.text = 'RigidModel3D, lambda = 0.5'
|
|
61
|
+
elif self.run_type == "affine" or self.run_type == "split-affine":
|
|
62
|
+
name.text = 'AffineModel3D regularized with a RigidModel3D, lambda = 0.05'
|
|
63
|
+
name.tail = "\n\t\t\t"
|
|
64
|
+
|
|
65
|
+
affine = ET.SubElement(new_view_transform, 'affine')
|
|
66
|
+
|
|
67
|
+
tile = next((tile for tile in self.tiles if tile['view'] == view), None)
|
|
68
|
+
model = (tile or {}).get('model', {}).get('regularized', {})
|
|
69
|
+
|
|
70
|
+
if not model or all(float(v) == 0.0 for v in model.values()):
|
|
71
|
+
affine.text = '1.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 1.0 0.0'
|
|
72
|
+
else:
|
|
73
|
+
affine.text = ' '.join(str(model.get(f'm{i}{j}', 0.0)) for i in range(3) for j in range(4))
|
|
74
|
+
print(f"tile: {view}, model: {affine.text}")
|
|
75
|
+
|
|
76
|
+
view_registration.text = "\n\t\t\t"
|
|
77
|
+
view_registration.insert(0, new_view_transform)
|
|
78
|
+
new_view_transform.text = "\n\t\t\t\t"
|
|
79
|
+
name.tail = "\n\t\t\t\t"
|
|
80
|
+
affine.tail = "\n\t\t\t"
|
|
81
|
+
new_view_transform.tail = "\n\t\t\t"
|
|
82
|
+
|
|
83
|
+
def load_xml(self):
|
|
84
|
+
"""
|
|
85
|
+
Parses the loaded XML string and initializes the ElementTree structure.
|
|
86
|
+
"""
|
|
87
|
+
self.root = ET.fromstring(self.xml_file)
|
|
88
|
+
self.tree = ET.ElementTree(self.root)
|
|
89
|
+
|
|
90
|
+
def run(self):
|
|
91
|
+
"""
|
|
92
|
+
Executes the entry point of the script.
|
|
93
|
+
"""
|
|
94
|
+
self.load_xml()
|
|
95
|
+
self.add_new_view_transform()
|
|
96
|
+
self.save_xml()
|
|
97
|
+
self.save_ransac_metrics()
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
Utility class to parse and combine view registrations matrices
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
class ViewTransformModels:
|
|
9
|
+
def __init__(self, df):
|
|
10
|
+
self.view_registrations_df = df.get("view_registrations", pd.DataFrame())
|
|
11
|
+
self.calibration_matrices = {}
|
|
12
|
+
self.rotation_matrices = {}
|
|
13
|
+
self.concatenated_matrices = {}
|
|
14
|
+
|
|
15
|
+
def create_transform_matrices(self):
|
|
16
|
+
"""
|
|
17
|
+
Extracts transformation matrices from a dataframe and organizes them into appropriate data structures
|
|
18
|
+
based on their types and intended usage.
|
|
19
|
+
"""
|
|
20
|
+
if self.view_registrations_df.empty:
|
|
21
|
+
raise ValueError("view_registrations_df is empty")
|
|
22
|
+
|
|
23
|
+
for _, row in self.view_registrations_df.iterrows():
|
|
24
|
+
if row["type"] == "affine":
|
|
25
|
+
# Create affine matrix
|
|
26
|
+
affine_values = np.fromstring(row["affine"], sep=",").astype(np.float64)
|
|
27
|
+
if len(affine_values) == 12:
|
|
28
|
+
affine_values = np.append(affine_values, [0, 0, 0, 1]) # append homogeneous row
|
|
29
|
+
affine_matrix = affine_values.reshape(4, 4)
|
|
30
|
+
|
|
31
|
+
# Get view ID
|
|
32
|
+
view_id = f"timepoint: {row['timepoint']}, setup: {row['setup']}"
|
|
33
|
+
name = row["name"].strip().lower()
|
|
34
|
+
|
|
35
|
+
# Store matrix based on type
|
|
36
|
+
if "translation to nominal grid" in name:
|
|
37
|
+
self.calibration_matrices.setdefault(view_id, []).append(affine_matrix)
|
|
38
|
+
else:
|
|
39
|
+
self.rotation_matrices.setdefault(view_id, []).append(affine_matrix)
|
|
40
|
+
|
|
41
|
+
def concatenate_matrices_by_view_id(self):
|
|
42
|
+
"""
|
|
43
|
+
Concatenates calibration and rotation matrices for each view ID, if available.
|
|
44
|
+
"""
|
|
45
|
+
if not self.calibration_matrices and not self.rotation_matrices:
|
|
46
|
+
raise ValueError("No matrices to concatenate")
|
|
47
|
+
|
|
48
|
+
all_keys = set(self.calibration_matrices.keys()).union(self.rotation_matrices.keys())
|
|
49
|
+
|
|
50
|
+
for key in all_keys:
|
|
51
|
+
rotation_matrices = self.rotation_matrices.get(key, [])
|
|
52
|
+
translation_matrices = self.calibration_matrices.get(key, [])
|
|
53
|
+
|
|
54
|
+
# First combine all rotation matrices
|
|
55
|
+
rotation_combined = np.eye(4)
|
|
56
|
+
for mat in rotation_matrices:
|
|
57
|
+
rotation_combined = np.dot(rotation_combined, mat)
|
|
58
|
+
|
|
59
|
+
# Then combine all translation matrices
|
|
60
|
+
translation_combined = np.eye(4)
|
|
61
|
+
for mat in translation_matrices:
|
|
62
|
+
translation_combined = np.dot(translation_combined, mat)
|
|
63
|
+
|
|
64
|
+
# Final = translation * rotation (apply rotation first, then translation)
|
|
65
|
+
final_matrix = np.dot(rotation_combined, translation_combined)
|
|
66
|
+
|
|
67
|
+
self.concatenated_matrices[key] = final_matrix
|
|
68
|
+
|
|
69
|
+
def run(self):
|
|
70
|
+
"""
|
|
71
|
+
Executes the entry point of the script.
|
|
72
|
+
"""
|
|
73
|
+
self.create_transform_matrices()
|
|
74
|
+
self.concatenate_matrices_by_view_id()
|
|
75
|
+
return self.concatenated_matrices
|