Rhapso 0.1.92__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. Rhapso/__init__.py +1 -0
  2. Rhapso/data_prep/__init__.py +2 -0
  3. Rhapso/data_prep/n5_reader.py +188 -0
  4. Rhapso/data_prep/s3_big_stitcher_reader.py +55 -0
  5. Rhapso/data_prep/xml_to_dataframe.py +215 -0
  6. Rhapso/detection/__init__.py +5 -0
  7. Rhapso/detection/advanced_refinement.py +203 -0
  8. Rhapso/detection/difference_of_gaussian.py +324 -0
  9. Rhapso/detection/image_reader.py +117 -0
  10. Rhapso/detection/metadata_builder.py +130 -0
  11. Rhapso/detection/overlap_detection.py +327 -0
  12. Rhapso/detection/points_validation.py +49 -0
  13. Rhapso/detection/save_interest_points.py +265 -0
  14. Rhapso/detection/view_transform_models.py +67 -0
  15. Rhapso/fusion/__init__.py +0 -0
  16. Rhapso/fusion/affine_fusion/__init__.py +2 -0
  17. Rhapso/fusion/affine_fusion/blend.py +289 -0
  18. Rhapso/fusion/affine_fusion/fusion.py +601 -0
  19. Rhapso/fusion/affine_fusion/geometry.py +159 -0
  20. Rhapso/fusion/affine_fusion/io.py +546 -0
  21. Rhapso/fusion/affine_fusion/script_utils.py +111 -0
  22. Rhapso/fusion/affine_fusion/setup.py +4 -0
  23. Rhapso/fusion/affine_fusion_worker.py +234 -0
  24. Rhapso/fusion/multiscale/__init__.py +0 -0
  25. Rhapso/fusion/multiscale/aind_hcr_data_transformation/__init__.py +19 -0
  26. Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/__init__.py +3 -0
  27. Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/czi_to_zarr.py +698 -0
  28. Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/zarr_writer.py +265 -0
  29. Rhapso/fusion/multiscale/aind_hcr_data_transformation/models.py +81 -0
  30. Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/__init__.py +3 -0
  31. Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/utils.py +526 -0
  32. Rhapso/fusion/multiscale/aind_hcr_data_transformation/zeiss_job.py +249 -0
  33. Rhapso/fusion/multiscale/aind_z1_radial_correction/__init__.py +21 -0
  34. Rhapso/fusion/multiscale/aind_z1_radial_correction/array_to_zarr.py +257 -0
  35. Rhapso/fusion/multiscale/aind_z1_radial_correction/radial_correction.py +557 -0
  36. Rhapso/fusion/multiscale/aind_z1_radial_correction/run_capsule.py +98 -0
  37. Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/__init__.py +3 -0
  38. Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/utils.py +266 -0
  39. Rhapso/fusion/multiscale/aind_z1_radial_correction/worker.py +89 -0
  40. Rhapso/fusion/multiscale_worker.py +113 -0
  41. Rhapso/fusion/neuroglancer_link_gen/__init__.py +8 -0
  42. Rhapso/fusion/neuroglancer_link_gen/dispim_link.py +235 -0
  43. Rhapso/fusion/neuroglancer_link_gen/exaspim_link.py +127 -0
  44. Rhapso/fusion/neuroglancer_link_gen/hcr_link.py +368 -0
  45. Rhapso/fusion/neuroglancer_link_gen/iSPIM_top.py +47 -0
  46. Rhapso/fusion/neuroglancer_link_gen/link_utils.py +239 -0
  47. Rhapso/fusion/neuroglancer_link_gen/main.py +299 -0
  48. Rhapso/fusion/neuroglancer_link_gen/ng_layer.py +1434 -0
  49. Rhapso/fusion/neuroglancer_link_gen/ng_state.py +1123 -0
  50. Rhapso/fusion/neuroglancer_link_gen/parsers.py +336 -0
  51. Rhapso/fusion/neuroglancer_link_gen/raw_link.py +116 -0
  52. Rhapso/fusion/neuroglancer_link_gen/utils/__init__.py +4 -0
  53. Rhapso/fusion/neuroglancer_link_gen/utils/shader_utils.py +85 -0
  54. Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py +43 -0
  55. Rhapso/fusion/neuroglancer_link_gen/utils/utils.py +303 -0
  56. Rhapso/fusion/neuroglancer_link_gen_worker.py +30 -0
  57. Rhapso/matching/__init__.py +0 -0
  58. Rhapso/matching/load_and_transform_points.py +458 -0
  59. Rhapso/matching/ransac_matching.py +544 -0
  60. Rhapso/matching/save_matches.py +120 -0
  61. Rhapso/matching/xml_parser.py +302 -0
  62. Rhapso/pipelines/__init__.py +0 -0
  63. Rhapso/pipelines/ray/__init__.py +0 -0
  64. Rhapso/pipelines/ray/aws/__init__.py +0 -0
  65. Rhapso/pipelines/ray/aws/alignment_pipeline.py +227 -0
  66. Rhapso/pipelines/ray/aws/config/__init__.py +0 -0
  67. Rhapso/pipelines/ray/evaluation.py +71 -0
  68. Rhapso/pipelines/ray/interest_point_detection.py +137 -0
  69. Rhapso/pipelines/ray/interest_point_matching.py +110 -0
  70. Rhapso/pipelines/ray/local/__init__.py +0 -0
  71. Rhapso/pipelines/ray/local/alignment_pipeline.py +167 -0
  72. Rhapso/pipelines/ray/matching_stats.py +104 -0
  73. Rhapso/pipelines/ray/param/__init__.py +0 -0
  74. Rhapso/pipelines/ray/solver.py +120 -0
  75. Rhapso/pipelines/ray/split_dataset.py +78 -0
  76. Rhapso/solver/__init__.py +0 -0
  77. Rhapso/solver/compute_tiles.py +562 -0
  78. Rhapso/solver/concatenate_models.py +116 -0
  79. Rhapso/solver/connected_graphs.py +111 -0
  80. Rhapso/solver/data_prep.py +181 -0
  81. Rhapso/solver/global_optimization.py +410 -0
  82. Rhapso/solver/model_and_tile_setup.py +109 -0
  83. Rhapso/solver/pre_align_tiles.py +323 -0
  84. Rhapso/solver/save_results.py +97 -0
  85. Rhapso/solver/view_transforms.py +75 -0
  86. Rhapso/solver/xml_to_dataframe_solver.py +213 -0
  87. Rhapso/split_dataset/__init__.py +0 -0
  88. Rhapso/split_dataset/compute_grid_rules.py +78 -0
  89. Rhapso/split_dataset/save_points.py +101 -0
  90. Rhapso/split_dataset/save_xml.py +377 -0
  91. Rhapso/split_dataset/split_images.py +537 -0
  92. Rhapso/split_dataset/xml_to_dataframe_split.py +219 -0
  93. rhapso-0.1.92.dist-info/METADATA +39 -0
  94. rhapso-0.1.92.dist-info/RECORD +101 -0
  95. rhapso-0.1.92.dist-info/WHEEL +5 -0
  96. rhapso-0.1.92.dist-info/licenses/LICENSE +21 -0
  97. rhapso-0.1.92.dist-info/top_level.txt +2 -0
  98. tests/__init__.py +1 -0
  99. tests/test_detection.py +17 -0
  100. tests/test_matching.py +21 -0
  101. tests/test_solving.py +21 -0
@@ -0,0 +1,410 @@
1
+ import numpy as np
2
+ import copy
3
+ import math
4
+
5
+ """
6
+ GlobalOptimization iteratively refines per-tile transforms to achieve sub-pixel alignment using matched point correspondences.
7
+ """
8
+
9
+ class GlobalOptimization:
10
+ def __init__(self, tiles, relative_threshold, absolute_threshold, min_matches,
11
+ damp, max_iterations, max_allowed_error, max_plateauwidth, run_type, metrics_output_path):
12
+ self.tiles = tiles
13
+ self.relative_threshold = relative_threshold
14
+ self.absolute_threshold = absolute_threshold
15
+ self.min_matches = min_matches
16
+ self.damp = damp
17
+ self.max_iterations = max_iterations
18
+ self.max_allowed_error = max_allowed_error
19
+ self.max_plateauwidth = max_plateauwidth
20
+ self.run_type = run_type
21
+ self.metrics_output_path = metrics_output_path
22
+ self.validation_stats = {
23
+ 'solve_metrics_per_tile': {
24
+ 'i': 0,
25
+ 'stats': []
26
+ }
27
+ }
28
+ self.observer = {
29
+ 'max': 0,
30
+ 'mean': 0,
31
+ 'median': 0,
32
+ 'min': float('inf'),
33
+ 'slope': [],
34
+ 'sorted_values': [],
35
+ 'square_differences': 0,
36
+ 'squares': 0,
37
+ 'std': 0,
38
+ 'std_0': 0,
39
+ 'values': [],
40
+ 'var': 0,
41
+ 'var_0': 0,
42
+ }
43
+ # self.save_metrics = JSONFileHandler(self.metrics_output_path)
44
+
45
+ def update_observer(self, new_value):
46
+ obs = self.observer
47
+
48
+ obs['values'].append(new_value)
49
+ obs['sorted_values'].sort()
50
+
51
+ if len(obs['values']) == 1:
52
+ obs['slope'].append(0.0)
53
+ obs['mean'] = new_value
54
+ obs['var'] = 0
55
+ obs['var_0'] = 0
56
+ else:
57
+ obs['slope'].append(new_value - obs['values'][-2])
58
+
59
+ delta = new_value - obs['mean']
60
+ obs['mean'] += delta / (len(obs['values']))
61
+
62
+ obs['square_differences'] += delta * (new_value - obs['mean'])
63
+ obs['var'] = obs['square_differences'] / (len(obs['values']) - 1)
64
+
65
+ obs['squares'] += new_value * new_value
66
+ obs['var_0'] = obs['squares'] / (len(obs['values']) - 1)
67
+
68
+ obs['std_0'] = math.sqrt(obs['var_0'])
69
+ obs['std'] = math.sqrt(obs['var'])
70
+
71
+ if new_value < obs['min']:
72
+ obs['min'] = new_value
73
+ if new_value > obs['max']:
74
+ obs['max'] = new_value
75
+
76
+ def update_cost(self, tile):
77
+ """
78
+ Computes and stores the average distance and weighted cost (fit quality) of point matches for a tile.
79
+ """
80
+ distance = 0.0
81
+ cost = 0.0
82
+ if len(tile["matches"]) > 0:
83
+ sum_weight = 0.0
84
+
85
+ for match in tile["matches"]:
86
+ dl = np.linalg.norm(np.array(match["p1"]["w"]) - np.array(match["p2"]["w"]))
87
+
88
+ distance += dl
89
+ cost += dl * dl * match['weight']
90
+ sum_weight += match['weight']
91
+
92
+ distance /= len(tile["matches"])
93
+ cost /= sum_weight
94
+
95
+ tile['model']['cost'] = cost
96
+ tile['cost'] = cost
97
+ tile['distance'] = distance
98
+
99
+ def update_errors(self):
100
+ """
101
+ Monitor convergence by updating cost metrics for all tiles and returns the average alignment error.
102
+ """
103
+ total_distance = 0.0
104
+ min_error = float("inf")
105
+ max_error = 0.0
106
+
107
+ for tile in self.tiles:
108
+ self.update_cost(tile)
109
+
110
+ if tile['distance'] < min_error:
111
+ min_error = tile['distance']
112
+ if tile['distance'] > max_error:
113
+ max_error = tile['distance']
114
+ total_distance += tile['distance']
115
+
116
+ average_error = total_distance / len(self.tiles)
117
+
118
+ # self.save_metrics.update(
119
+ # "alignment errors",
120
+ # {
121
+ # "min_error": min_error,
122
+ # "max_error": max_error,
123
+ # "mean_error": average_error,
124
+ # },
125
+ # )
126
+
127
+ return average_error
128
+
129
+ def apply_damp(self, tile):
130
+ """
131
+ Use model to align p1 in all tile point matches
132
+ """
133
+ model = tile["model"]["regularized"]
134
+ matches = tile["matches"]
135
+
136
+ for match in matches:
137
+ a = self.apply_model_in_place(copy.deepcopy(match['p1']['l']), model)
138
+
139
+ for i in range(len(a)):
140
+ match['p1']['w'][i] += self.damp * (a[i] - match['p1']['w'][i])
141
+
142
+ def rigid_fit_model(self, rigid_model, matches):
143
+ """
144
+ Computes the best-fit rigid transformation (rotation + translation)
145
+ using unweighted quaternion-based estimation between 3D point sets.
146
+ """
147
+
148
+ # === Compute unweighted centroids ===
149
+ pc = np.mean([m['p1']['l'] for m in matches], axis=0)
150
+ qc = np.mean([m['p2']['w'] for m in matches], axis=0)
151
+
152
+ # === Accumulate scalar components of S matrix ===
153
+ Sxx = Sxy = Sxz = Syx = Syy = Syz = Szx = Szy = Szz = 0.0
154
+
155
+ for m in matches:
156
+ px, py, pz = m['p1']['l'] - pc
157
+ qx, qy, qz = m['p2']['w'] - qc
158
+
159
+ Sxx += px * qx
160
+ Sxy += px * qy
161
+ Sxz += px * qz
162
+ Syx += py * qx
163
+ Syy += py * qy
164
+ Syz += py * qz
165
+ Szx += pz * qx
166
+ Szy += pz * qy
167
+ Szz += pz * qz
168
+
169
+ # === Construct symmetric matrix N ===
170
+ N = np.array([
171
+ [Sxx + Syy + Szz, Syz - Szy, Szx - Sxz, Sxy - Syx],
172
+ [Syz - Szy, Sxx - Syy - Szz, Sxy + Syx, Szx + Sxz],
173
+ [Szx - Sxz, Sxy + Syx, -Sxx + Syy - Szz, Syz + Szy],
174
+ [Sxy - Syx, Szx + Sxz, Syz + Szy, -Sxx - Syy + Szz]
175
+ ])
176
+
177
+ if not np.all(np.isfinite(N)):
178
+ raise ValueError("Matrix N contains NaNs or Infs")
179
+
180
+ # === Eigenvalue decomposition ===
181
+ eigenvalues, eigenvectors = np.linalg.eigh(N)
182
+ q = eigenvectors[:, np.argmax(eigenvalues)]
183
+ q /= np.linalg.norm(q)
184
+ q0, qx, qy, qz = q
185
+
186
+ # === Quaternion to rotation matrix ===
187
+ R = np.array([
188
+ [q0*q0 + qx*qx - qy*qy - qz*qz, 2*(qx*qy - q0*qz), 2*(qx*qz + q0*qy)],
189
+ [2*(qy*qx + q0*qz), q0*q0 - qx*qx + qy*qy - qz*qz, 2*(qy*qz - q0*qx)],
190
+ [2*(qz*qx - q0*qy), 2*(qz*qy + q0*qx), q0*q0 - qx*qx - qy*qy + qz*qz]
191
+ ])
192
+
193
+ # === Translation ===
194
+ t = qc - R @ pc
195
+
196
+ # === Populate model ===
197
+ rigid_model['m00'], rigid_model['m01'], rigid_model['m02'] = R[0, :]
198
+ rigid_model['m10'], rigid_model['m11'], rigid_model['m12'] = R[1, :]
199
+ rigid_model['m20'], rigid_model['m21'], rigid_model['m22'] = R[2, :]
200
+ rigid_model['m03'], rigid_model['m13'], rigid_model['m23'] = t
201
+
202
+ return rigid_model
203
+
204
+ def affine_fit_model(self, affine_model, matches):
205
+ """
206
+ Affine transformation model updating using scalar math.
207
+ """
208
+
209
+ if len(matches) < 3:
210
+ raise ValueError("Not enough matches for affine fit")
211
+
212
+ # === Centroids ===
213
+ pcx = pcy = pcz = 0.0
214
+ qcx = qcy = qcz = 0.0
215
+ for m in matches:
216
+ p = m['p1']['l']
217
+ q = m['p2']['w']
218
+ pcx += p[0]
219
+ pcy += p[1]
220
+ pcz += p[2]
221
+ qcx += q[0]
222
+ qcy += q[1]
223
+ qcz += q[2]
224
+
225
+ n = len(matches)
226
+ pcx /= n
227
+ pcy /= n
228
+ pcz /= n
229
+ qcx /= n
230
+ qcy /= n
231
+ qcz /= n
232
+
233
+ # === Accumulate A and B ===
234
+ a00 = a01 = a02 = a11 = a12 = a22 = 0.0
235
+ b00 = b01 = b02 = b10 = b11 = b12 = b20 = b21 = b22 = 0.0
236
+
237
+ for m in matches:
238
+ p = m['p1']['l']
239
+ q = m['p2']['w']
240
+ px = p[0] - pcx
241
+ py = p[1] - pcy
242
+ pz = p[2] - pcz
243
+ qx = q[0] - qcx
244
+ qy = q[1] - qcy
245
+ qz = q[2] - qcz
246
+
247
+ a00 += px * px
248
+ a01 += px * py
249
+ a02 += px * pz
250
+ a11 += py * py
251
+ a12 += py * pz
252
+ a22 += pz * pz
253
+
254
+ b00 += px * qx
255
+ b01 += px * qy
256
+ b02 += px * qz
257
+ b10 += py * qx
258
+ b11 += py * qy
259
+ b12 += py * qz
260
+ b20 += pz * qx
261
+ b21 += pz * qy
262
+ b22 += pz * qz
263
+
264
+ # === Compute inverse of A manually ===
265
+ det = (
266
+ a00 * a11 * a22 +
267
+ a01 * a12 * a02 +
268
+ a02 * a01 * a12 -
269
+ a02 * a11 * a02 -
270
+ a12 * a12 * a00 -
271
+ a22 * a01 * a01
272
+ )
273
+
274
+ if det == 0:
275
+ raise ValueError("Affine matrix is singular")
276
+
277
+ idet = 1.0 / det
278
+ ai00 = (a11 * a22 - a12 * a12) * idet
279
+ ai01 = (a02 * a12 - a01 * a22) * idet
280
+ ai02 = (a01 * a12 - a02 * a11) * idet
281
+ ai11 = (a00 * a22 - a02 * a02) * idet
282
+ ai12 = (a02 * a01 - a00 * a12) * idet
283
+ ai22 = (a00 * a11 - a01 * a01) * idet
284
+
285
+ # === Compute transformation matrix ===
286
+ m00 = ai00 * b00 + ai01 * b10 + ai02 * b20
287
+ m01 = ai01 * b00 + ai11 * b10 + ai12 * b20
288
+ m02 = ai02 * b00 + ai12 * b10 + ai22 * b20
289
+
290
+ m10 = ai00 * b01 + ai01 * b11 + ai02 * b21
291
+ m11 = ai01 * b01 + ai11 * b11 + ai12 * b21
292
+ m12 = ai02 * b01 + ai12 * b11 + ai22 * b21
293
+
294
+ m20 = ai00 * b02 + ai01 * b12 + ai02 * b22
295
+ m21 = ai01 * b02 + ai11 * b12 + ai12 * b22
296
+ m22 = ai02 * b02 + ai12 * b12 + ai22 * b22
297
+
298
+ m03 = qcx - m00 * pcx - m01 * pcy - m02 * pcz
299
+ m13 = qcy - m10 * pcx - m11 * pcy - m12 * pcz
300
+ m23 = qcz - m20 * pcx - m21 * pcy - m22 * pcz
301
+
302
+ # === Assign ===
303
+ affine_model['m00'], affine_model['m01'], affine_model['m02'], affine_model['m03'] = m00, m01, m02, m03
304
+ affine_model['m10'], affine_model['m11'], affine_model['m12'], affine_model['m13'] = m10, m11, m12, m13
305
+ affine_model['m20'], affine_model['m21'], affine_model['m22'], affine_model['m23'] = m20, m21, m22, m23
306
+
307
+ return affine_model
308
+
309
+ def regularize_models(self, affine, rigid):
310
+ alpha=0.1
311
+ l1 = 1.0 - alpha
312
+
313
+ def to_array(model):
314
+ return [
315
+ model['m00'], model['m01'], model['m02'], model['m03'],
316
+ model['m10'], model['m11'], model['m12'], model['m13'],
317
+ model['m20'], model['m21'], model['m22'], model['m23'],
318
+ ]
319
+
320
+ afs = to_array(affine)
321
+ bfs = to_array(rigid)
322
+
323
+ rfs = [l1 * a + alpha * b for a, b in zip(afs, bfs)]
324
+
325
+ keys = [
326
+ 'm00', 'm01', 'm02', 'm03',
327
+ 'm10', 'm11', 'm12', 'm13',
328
+ 'm20', 'm21', 'm22', 'm23',
329
+ ]
330
+ regularized = dict(zip(keys, rfs))
331
+
332
+ return regularized
333
+
334
+ def fit(self, tile):
335
+ """
336
+ Fits multiple transformation models to a tile.
337
+ """
338
+ affine = self.affine_fit_model(tile['model']['a'], tile['matches'])
339
+ rigid = self.rigid_fit_model(tile['model']['b'], tile['matches'])
340
+ regularized = self.regularize_models(affine, rigid)
341
+
342
+ tile['model']['a'] = affine
343
+ tile['model']['b'] = rigid
344
+ tile['model']['regularized'] = regularized
345
+
346
+ def apply_model_in_place(self, point, model):
347
+ x, y, z = point[0], point[1], point[2]
348
+ point[0] = model['m00'] * x + model['m01'] * y + model['m02'] * z + model['m03']
349
+ point[1] = model['m10'] * x + model['m11'] * y + model['m12'] * z + model['m13']
350
+ point[2] = model['m20'] * x + model['m21'] * y + model['m22'] * z + model['m23']
351
+
352
+ return point
353
+
354
+ def apply(self):
355
+ for tile in self.tiles:
356
+ if self.run_type == 'affine' or self.run_type == 'split-affine':
357
+ model = tile['model']['regularized']
358
+ elif self.run_type == 'rigid':
359
+ model = tile['model']['b']
360
+
361
+ for match in tile['matches']:
362
+ match['p1']['w'][:] = match['p1']['l']
363
+ match['p1']['w'][:] = self.apply_model_in_place(match['p1']['w'], model)
364
+
365
+ def get_wide_slope(self, values, width):
366
+ width = int(width)
367
+ return (values[-1] - values[-1 - width]) / width
368
+
369
+ def optimize_silently(self):
370
+ """
371
+ Iteratively refines tile alignments using model fitting and dampening until convergence or max iterations.
372
+ """
373
+ i = 0
374
+ proceed = i < self.max_iterations
375
+ self.apply()
376
+
377
+ while proceed:
378
+ if not self.tiles:
379
+ return
380
+
381
+ for tile in self.tiles:
382
+ self.fit(tile)
383
+ self.apply_damp(tile)
384
+
385
+ error = self.update_errors()
386
+ self.update_observer(error)
387
+ self.validation_stats.setdefault('solver_metrics_per_tile', {}).setdefault('stats', []).append({
388
+ 'iteration': i,
389
+ 'observer': copy.deepcopy(self.observer),
390
+ })
391
+
392
+ if i > self.max_plateauwidth:
393
+ proceed = error > self.max_allowed_error
394
+ d = self.max_plateauwidth
395
+
396
+ while not proceed and d >= 1:
397
+ proceed = proceed or abs(self.get_wide_slope(self.observer['values'], d)) > 0.0001
398
+ d /= 2
399
+
400
+ i += 1
401
+ if i >= self.max_iterations:
402
+ proceed = False
403
+
404
+ def run(self):
405
+ """
406
+ Executes the entry point of the script.
407
+ """
408
+ self.optimize_silently()
409
+
410
+ return self.tiles, self.validation_stats
@@ -0,0 +1,109 @@
1
+ import numpy as np
2
+ import copy
3
+
4
+ """
5
+ Model and Tile Setup initializes models, tiles and view pair matches
6
+ """
7
+
8
+ class ModelAndTileSetup():
9
+ def __init__(self, connected_views, corresponding_interest_points, interest_points, view_transform_matrices, view_id_set, label_map):
10
+ self.corresponding_interest_points = corresponding_interest_points
11
+ self.view_transform_matrices = view_transform_matrices
12
+ self.connected_views = connected_views
13
+ self.interest_points = interest_points
14
+ self.view_id_set = view_id_set
15
+ self.label_map = label_map
16
+ self.pairs = []
17
+ self.tiles = {}
18
+
19
+ def apply_transform(self, point, matrix):
20
+ """
21
+ Applies a 3D affine transformation matrix to a point using homogeneous coordinates.
22
+ """
23
+ point_homogeneous = np.append(point, 1)
24
+ transformed_point = matrix.dot(point_homogeneous)[:3]
25
+ return transformed_point
26
+
27
+ def setup_point_matches_from_interest_points(self):
28
+ """
29
+ Generates transformed interest point pairs between views for downstream matching.
30
+ """
31
+ view_id_list = list(self.view_id_set)
32
+
33
+ # Iterate and compare all viewIDs
34
+ for i in range(len(view_id_list)):
35
+ for j in range(i + 1, len(view_id_list)):
36
+
37
+ # Get transform matrices for view_id A and B
38
+ key_i = f"timepoint: {view_id_list[i][0]}, setup: {view_id_list[i][1]}"
39
+ key_j = f"timepoint: {view_id_list[j][0]}, setup: {view_id_list[j][1]}"
40
+
41
+ mA = self.view_transform_matrices.get(key_i, None)
42
+ mB = self.view_transform_matrices.get(key_j, None)
43
+
44
+ if mA is None or mB is None: continue
45
+
46
+ for label_a in self.label_map[key_i]:
47
+ for label_b in self.label_map[key_j]:
48
+
49
+ cp_a = [it for it in self.corresponding_interest_points.get(key_i, []) if it.get('label') == label_a]
50
+
51
+ ip_list_a = self.interest_points.get(key_i, {}).get(label_a, [])
52
+ ip_list_b = self.interest_points.get(key_j, {}).get(label_b, [])
53
+
54
+ inliers = []
55
+ for p in cp_a:
56
+
57
+ # verify corresponding point is in ip_list_b
58
+ if label_a == label_b and p['corresponding_view_id'] == key_j:
59
+
60
+ ip_a = ip_list_a[p['detection_id']]
61
+ ip_b = ip_list_b[p['corresponding_detection_id']]
62
+
63
+ interest_point_a = {
64
+ 'l': copy.deepcopy(ip_a),
65
+ 'w': copy.deepcopy(ip_a),
66
+ 'index': p['detection_id']
67
+ }
68
+ interest_point_b = {
69
+ 'l': copy.deepcopy(ip_b),
70
+ 'w': copy.deepcopy(ip_b),
71
+ 'index': p['corresponding_detection_id']
72
+ }
73
+
74
+ transformed_l_a = self.apply_transform(interest_point_a['l'], mA)
75
+ transformed_w_a = self.apply_transform(interest_point_a['w'], mA)
76
+ transformed_l_b = self.apply_transform(interest_point_b['l'], mB)
77
+ transformed_w_b = self.apply_transform(interest_point_b['w'], mB)
78
+
79
+ interest_point_a['l'] = transformed_l_a
80
+ interest_point_a['w'] = transformed_w_a
81
+ interest_point_b['l'] = transformed_l_b
82
+ interest_point_b['w'] = transformed_w_b
83
+
84
+ interest_point_a['weight'] = 1
85
+ interest_point_a['strength'] = 1
86
+ interest_point_b['weight'] = 1
87
+ interest_point_b['strength'] = 1
88
+
89
+ inliers.append({
90
+ 'p1': interest_point_a,
91
+ 'p2': interest_point_b,
92
+ 'weight': 1,
93
+ 'strength': 1
94
+ })
95
+
96
+ if inliers:
97
+ self.pairs.append({
98
+ 'view': (key_i, key_j),
99
+ 'inliers': inliers,
100
+ 'flipped': None
101
+ })
102
+
103
+ def run(self):
104
+ """
105
+ Executes the entry point of the script.
106
+ """
107
+ self.setup_point_matches_from_interest_points()
108
+
109
+ return self.pairs