Rhapso 0.1.991__py3-none-any.whl → 0.1.993__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,7 +27,7 @@ class CustomBioImage(BioImage):
27
27
 
28
28
  class RansacMatching:
29
29
  def __init__(self, data_global, num_neighbors, redundancy, significance, num_required_neighbors, match_type,
30
- max_epsilon, min_inlier_ratio, num_iterations, model_min_matches, regularization_weight,
30
+ inlier_threshold, min_inlier_ratio, num_iterations, model_min_matches, regularization_weight,
31
31
  search_radius, view_registrations, input_type, image_file_prefix):
32
32
  self.data_global = data_global
33
33
  self.num_neighbors = num_neighbors
@@ -35,7 +35,7 @@ class RansacMatching:
35
35
  self.significance = significance
36
36
  self.num_required_neighbors = num_required_neighbors
37
37
  self.match_type = match_type
38
- self.max_epsilon = max_epsilon
38
+ self.inlier_threshold = inlier_threshold
39
39
  self.min_inlier_ratio = min_inlier_ratio
40
40
  self.num_iterations = num_iterations
41
41
  self.model_min_matches = model_min_matches
@@ -66,7 +66,7 @@ class RansacMatching:
66
66
  errors = []
67
67
  for match in temp:
68
68
  p1 = np.array(match[1])
69
- p2 = np.array(match[4])
69
+ p2 = np.array(match[5])
70
70
  p1_h = np.append(p1, 1.0)
71
71
  p1_trans = model_copy @ p1_h
72
72
  error = np.linalg.norm(p1_trans[:3] - p2)
@@ -137,60 +137,55 @@ class RansacMatching:
137
137
  rigid_matrix[:3, 3] = t
138
138
 
139
139
  return rigid_matrix
140
-
140
+
141
141
  def fit_affine_model(self, matches):
142
- matches = np.array(matches) # shape (N, 2, 3)
143
- P = matches[:, 0] # source points
144
- Q = matches[:, 1] # target points
145
- weights = np.ones(P.shape[0]) # uniform weights
142
+ """
143
+ Fit a 3x4 affine transform such that:
144
+ Q M @ P + t
145
+ where P, Q are 3D column vectors (but stored here as row vectors).
146
+ """
147
+ matches = np.asarray(matches) # shape (N, 2, 3)
148
+ P = matches[:, 0] # source points, shape (N, 3)
149
+ Q = matches[:, 1] # target points, shape (N, 3)
146
150
 
147
- ws = np.sum(weights)
151
+ # Uniform weights for now (kept in case you add non-uniform later)
152
+ weights = np.ones(P.shape[0], dtype=float)
148
153
 
154
+ # Weighted centroids
149
155
  pc = np.average(P, axis=0, weights=weights)
150
156
  qc = np.average(Q, axis=0, weights=weights)
151
157
 
158
+ # Centered coordinates
152
159
  P_centered = P - pc
153
160
  Q_centered = Q - qc
154
161
 
155
- A = np.zeros((3, 3))
156
- B = np.zeros((3, 3))
157
-
158
- for i in range(P.shape[0]):
159
- w = weights[i]
160
- p = P_centered[i]
161
- q = Q_centered[i]
162
-
163
- A += w * np.outer(p, p)
164
- B += w * np.outer(p, q)
162
+ # Weighted least squares: scale rows by sqrt(weight)
163
+ sqrt_w = np.sqrt(weights)[:, None] # (N, 1)
164
+ P_w = P_centered * sqrt_w # (N, 3)
165
+ Q_w = Q_centered * sqrt_w # (N, 3)
165
166
 
166
- det = np.linalg.det(A)
167
- if det == 0:
168
- raise ValueError("Ill-defined data points (det=0)")
167
+ # Solve P_w @ M^T ≈ Q_w → M_T is 3x3, then transpose
168
+ M_T, *_ = np.linalg.lstsq(P_w, Q_w, rcond=None)
169
+ M = M_T.T
169
170
 
170
- try:
171
- A_inv = np.linalg.inv(A)
172
- except np.linalg.LinAlgError:
173
- # If A is not invertible, use the pseudo-inverse
174
- A_inv = np.linalg.pinv(A)
171
+ # Translation so that M @ pc ≈ qc
172
+ t = qc - M @ pc
175
173
 
176
- M = A_inv @ B # 3x3 transformation matrix
177
-
178
- t = qc - M @ pc # translation
179
-
180
- affine_matrix = np.eye(4)
174
+ # Pack into 4x4 affine matrix
175
+ affine_matrix = np.eye(4, dtype=float)
181
176
  affine_matrix[:3, :3] = M
182
177
  affine_matrix[:3, 3] = t
183
178
 
184
179
  return affine_matrix
185
180
 
186
- def test(self, candidates, model, max_epsilon, min_inlier_ratio, min_num_inliers):
181
+ def test(self, candidates, model, inlier_threshold, min_inlier_ratio, min_num_inliers):
187
182
  inliers = []
188
183
  for idxA, pointA, view_a, label_a, idxB, pointB, view_b, label_b in candidates:
189
184
  p1_hom = np.append(pointA, 1.0)
190
185
  transformed = model @ p1_hom
191
186
  distance = np.linalg.norm(transformed[:3] - pointB)
192
187
 
193
- if distance < max_epsilon:
188
+ if distance < inlier_threshold:
194
189
  inliers.append((idxA, pointA, view_a, label_a, idxB, pointB, view_b, label_b))
195
190
 
196
191
  ir = len(inliers) / len(candidates)
@@ -251,15 +246,16 @@ class RansacMatching:
251
246
  regularized_model = self.model_regularization(point_pairs)
252
247
  except Exception as e:
253
248
  print(e)
249
+ continue
254
250
 
255
251
  num_inliers = 0
256
- is_good, tmp_inliers = self.test(candidates, regularized_model, self.max_epsilon, self.min_inlier_ratio, self.model_min_matches)
252
+ is_good, tmp_inliers = self.test(candidates, regularized_model, self.inlier_threshold, self.min_inlier_ratio, self.model_min_matches)
257
253
 
258
254
  while is_good and num_inliers < len(tmp_inliers):
259
255
  num_inliers = len(tmp_inliers)
260
256
  point_pairs = [(i[1], i[5]) for i in tmp_inliers]
261
257
  regularized_model = self.model_regularization(point_pairs)
262
- is_good, tmp_inliers = self.test(candidates, regularized_model, self.max_epsilon, self.min_inlier_ratio, self.model_min_matches)
258
+ is_good, tmp_inliers = self.test(candidates, regularized_model, self.inlier_threshold, self.min_inlier_ratio, self.model_min_matches)
263
259
 
264
260
  if len(tmp_inliers) > max_inliers:
265
261
  best_inliers = tmp_inliers
@@ -5,7 +5,7 @@ import json
5
5
  import base64, json
6
6
  from pathlib import Path
7
7
 
8
- with open("Rhapso/pipelines/ray/param/dev/zarr_s3_sean.yml", "r") as file:
8
+ with open("Rhapso/pipelines/ray/param/exaSPIM_802450.yml", "r") as file:
9
9
  config = yaml.safe_load(file)
10
10
 
11
11
  serialized_config = base64.b64encode(json.dumps(config).encode()).decode()
@@ -51,10 +51,10 @@ matching_cmd_rigid = (
51
51
  " search_radius=cfg[\\\"search_radius_rigid\\\"],\n"
52
52
  " num_required_neighbors=cfg[\\\"num_required_neighbors_rigid\\\"],\n"
53
53
  " model_min_matches=cfg[\\\"model_min_matches_rigid\\\"],\n"
54
- " inlier_factor=cfg[\\\"inlier_factor_rigid\\\"],\n"
55
- " lambda_value=cfg[\\\"lambda_value_rigid\\\"],\n"
54
+ " inlier_threshold=cfg[\\\"inlier_threshold_rigid\\\"],\n"
55
+ " min_inlier_ratio=cfg[\\\"min_inlier_ratio_rigid\\\"],\n"
56
56
  " num_iterations=cfg[\\\"num_iterations_rigid\\\"],\n"
57
- " regularization_weight=cfg[\\\"regularization_weight_rigid\\\"],\n"
57
+ " regularization_weight=cfg[\\\"regularization_weight_matching_rigid\\\"],\n"
58
58
  " image_file_prefix=cfg[\\\"image_file_prefix\\\"]\n"
59
59
  ")\n"
60
60
  "ipm.run()\n"
@@ -80,10 +80,10 @@ matching_cmd_affine = (
80
80
  " search_radius=cfg[\\\"search_radius_affine\\\"],\n"
81
81
  " num_required_neighbors=cfg[\\\"num_required_neighbors_affine\\\"],\n"
82
82
  " model_min_matches=cfg[\\\"model_min_matches_affine\\\"],\n"
83
- " inlier_factor=cfg[\\\"inlier_factor_affine\\\"],\n"
84
- " lambda_value=cfg[\\\"lambda_value_affine\\\"],\n"
83
+ " inlier_threshold=cfg[\\\"inlier_threshold_affine\\\"],\n"
84
+ " min_inlier_ratio=cfg[\\\"min_inlier_ratio_affine\\\"],\n"
85
85
  " num_iterations=cfg[\\\"num_iterations_affine\\\"],\n"
86
- " regularization_weight=cfg[\\\"regularization_weight_affine\\\"],\n"
86
+ " regularization_weight=cfg[\\\"regularization_weight_matching_affine\\\"],\n"
87
87
  " image_file_prefix=cfg[\\\"image_file_prefix\\\"]\n"
88
88
  ")\n"
89
89
  "ipm.run()\n"
@@ -109,10 +109,10 @@ matching_cmd_split_affine = (
109
109
  " search_radius=cfg[\\\"search_radius_split_affine\\\"],\n"
110
110
  " num_required_neighbors=cfg[\\\"num_required_neighbors_split_affine\\\"],\n"
111
111
  " model_min_matches=cfg[\\\"model_min_matches_split_affine\\\"],\n"
112
- " inlier_factor=cfg[\\\"inlier_factor_split_affine\\\"],\n"
113
- " lambda_value=cfg[\\\"lambda_value_split_affine\\\"],\n"
112
+ " inlier_threshold=cfg[\\\"inlier_threshold_split_affine\\\"],\n"
113
+ " min_inlier_ratio=cfg[\\\"min_inlier_ratio_split_affine\\\"],\n"
114
114
  " num_iterations=cfg[\\\"num_iterations_split_affine\\\"],\n"
115
- " regularization_weight=cfg[\\\"regularization_weight_split_affine\\\"],\n"
115
+ " regularization_weight=cfg[\\\"regularization_weight_matching_split_affine\\\"],\n"
116
116
  " image_file_prefix=cfg[\\\"image_file_prefix\\\"]\n"
117
117
  ")\n"
118
118
  "ipm.run()\n"
@@ -151,6 +151,7 @@ solver_rigid = Solver(
151
151
  absolute_threshold=config['absolute_threshold'],
152
152
  min_matches=config['min_matches'],
153
153
  damp=config['damp'],
154
+ regularization_weight=config['regularization_weight_solver_rigid'],
154
155
  max_iterations=config['max_iterations'],
155
156
  max_allowed_error=config['max_allowed_error'],
156
157
  max_plateauwidth=config['max_plateauwidth'],
@@ -168,6 +169,7 @@ solver_affine = Solver(
168
169
  absolute_threshold=config['absolute_threshold'],
169
170
  min_matches=config['min_matches'],
170
171
  damp=config['damp'],
172
+ regularization_weight=config['regularization_weight_solver_affine'],
171
173
  max_iterations=config['max_iterations'],
172
174
  max_allowed_error=config['max_allowed_error'],
173
175
  max_plateauwidth=config['max_plateauwidth'],
@@ -185,6 +187,7 @@ solver_split_affine = Solver(
185
187
  absolute_threshold=config['absolute_threshold'],
186
188
  min_matches=config['min_matches'],
187
189
  damp=config['damp'],
190
+ regularization_weight=config['regularization_weight_solver_split_affine'],
188
191
  max_iterations=config['max_iterations'],
189
192
  max_allowed_error=config['max_allowed_error'],
190
193
  max_plateauwidth=config['max_plateauwidth'],
@@ -210,10 +213,10 @@ try:
210
213
  solver_rigid.run()
211
214
  exec_on_cluster("Matching (affine)", unified_yml, matching_cmd_affine, prefix)
212
215
  solver_affine.run()
213
- exec_on_cluster("Split Dataset", unified_yml, split_cmd, prefix)
214
- exec_on_cluster("Matching (split_affine)", unified_yml, matching_cmd_split_affine, prefix)
215
- solver_split_affine.run()
216
- print("\n✅ Pipeline complete.")
216
+ # exec_on_cluster("Split Dataset", unified_yml, split_cmd, prefix)
217
+ # exec_on_cluster("Matching (split_affine)", unified_yml, matching_cmd_split_affine, prefix)
218
+ # solver_split_affine.run()
219
+ # print("\n✅ Pipeline complete.")
217
220
 
218
221
  except subprocess.CalledProcessError as e:
219
222
  print(f"❌ Pipeline error: {e}")
@@ -6,7 +6,7 @@ import ray
6
6
 
7
7
  class InterestPointMatching:
8
8
  def __init__(self, xml_input_path, n5_output_path, input_type, match_type, num_neighbors, redundancy, significance,
9
- search_radius, num_required_neighbors, model_min_matches, inlier_factor, lambda_value, num_iterations,
9
+ search_radius, num_required_neighbors, model_min_matches, inlier_threshold, min_inlier_ratio, num_iterations,
10
10
  regularization_weight, image_file_prefix):
11
11
  self.xml_input_path = xml_input_path
12
12
  self.n5_output_path = n5_output_path
@@ -18,8 +18,8 @@ class InterestPointMatching:
18
18
  self.search_radius = search_radius
19
19
  self.num_required_neighbors = num_required_neighbors
20
20
  self.model_min_matches = model_min_matches
21
- self.inlier_factor = inlier_factor
22
- self.lambda_value = lambda_value
21
+ self.inlier_threshold = inlier_threshold
22
+ self.min_inlier_ratio = min_inlier_ratio
23
23
  self.num_iterations = num_iterations
24
24
  self.regularization_weight = regularization_weight
25
25
  self.image_file_prefix = image_file_prefix
@@ -38,11 +38,11 @@ class InterestPointMatching:
38
38
  # Distribute interest point matching with Ray
39
39
  @ray.remote
40
40
  def match_pair(pointsA, pointsB, viewA_str, viewB_str, label, num_neighbors, redundancy, significance, num_required_neighbors,
41
- match_type, inlier_factor, lambda_value, num_iterations, model_min_matches, regularization_weight, search_radius,
41
+ match_type, inlier_threshold, min_inlier_ratio, num_iterations, model_min_matches, regularization_weight, search_radius,
42
42
  view_registrations, input_type, image_file_prefix):
43
43
 
44
- matcher = RansacMatching(data_global, num_neighbors, redundancy, significance, num_required_neighbors, match_type, inlier_factor,
45
- lambda_value, num_iterations, model_min_matches, regularization_weight, search_radius, view_registrations,
44
+ matcher = RansacMatching(data_global, num_neighbors, redundancy, significance, num_required_neighbors, match_type, inlier_threshold,
45
+ min_inlier_ratio, num_iterations, model_min_matches, regularization_weight, search_radius, view_registrations,
46
46
  input_type, image_file_prefix)
47
47
 
48
48
  pointsA, pointsB = matcher.filter_for_overlapping_points(pointsA, pointsB, viewA_str, viewB_str)
@@ -65,7 +65,7 @@ class InterestPointMatching:
65
65
  # --- Distribute ---
66
66
  futures = [
67
67
  match_pair.remote(pointsA, pointsB, viewA_str, viewB_str, label, self.num_neighbors, self.redundancy, self.significance, self.num_required_neighbors,
68
- self.match_type, self.inlier_factor, self.lambda_value, self.num_iterations, self.model_min_matches, self.regularization_weight,
68
+ self.match_type, self.inlier_threshold, self.min_inlier_ratio, self.num_iterations, self.model_min_matches, self.regularization_weight,
69
69
  self.search_radius, view_registrations, self.input_type, self.image_file_prefix)
70
70
  for pointsA, pointsB, viewA_str, viewB_str, label in process_pairs
71
71
  ]
@@ -88,8 +88,8 @@ class InterestPointMatching:
88
88
  # DEBUG MATCHING
89
89
  # all_results = []
90
90
  # for pointsA, pointsB, viewA_str, viewB_str, label in process_pairs:
91
- # matcher = RansacMatching(data_global, self.num_neighbors, self.redundancy, self.significance, self.num_required_neighbors, self.match_type, self.inlier_factor,
92
- # self.lambda_value, self.num_iterations, self.model_min_matches, self.regularization_weight, self.search_radius, view_registrations,
91
+ # matcher = RansacMatching(data_global, self.num_neighbors, self.redundancy, self.significance, self.num_required_neighbors, self.match_type, self.inlier_threshold,
92
+ # self.min_inlier_ratio, self.num_iterations, self.model_min_matches, self.regularization_weight, self.search_radius, view_registrations,
93
93
  # self.input_type, self.image_file_prefix)
94
94
 
95
95
  # pointsA, pointsB = matcher.filter_for_overlapping_points(pointsA, pointsB, viewA_str, viewB_str)
@@ -9,7 +9,7 @@ import ray
9
9
  ray.init()
10
10
 
11
11
  # Point to param file
12
- with open("Rhapso/pipelines/ray/param/dev/zarr_s3_sean.yml", "r") as file:
12
+ with open("Rhapso/pipelines/ray/param/exaSPIM_802450.yml", "r") as file:
13
13
  config = yaml.safe_load(file)
14
14
 
15
15
  # -- INITIALIZE EACH COMPONENT --
@@ -46,10 +46,10 @@ interest_point_matching_rigid = InterestPointMatching(
46
46
  search_radius=config['search_radius_rigid'],
47
47
  num_required_neighbors=config['num_required_neighbors_rigid'],
48
48
  model_min_matches=config['model_min_matches_rigid'],
49
- inlier_factor=config['inlier_factor_rigid'],
50
- lambda_value=config['lambda_value_rigid'],
49
+ inlier_threshold=config['inlier_threshold_rigid'],
50
+ min_inlier_ratio=config['min_inlier_ratio_rigid'],
51
51
  num_iterations=config['num_iterations_rigid'],
52
- regularization_weight=config['regularization_weight_rigid'],
52
+ regularization_weight=config['regularization_weight_matching_rigid'],
53
53
  image_file_prefix=config['image_file_prefix'],
54
54
  )
55
55
 
@@ -65,10 +65,10 @@ interest_point_matching_affine = InterestPointMatching(
65
65
  search_radius=config['search_radius_affine'],
66
66
  num_required_neighbors=config['num_required_neighbors_affine'],
67
67
  model_min_matches=config['model_min_matches_affine'],
68
- inlier_factor=config['inlier_factor_affine'],
69
- lambda_value=config['lambda_value_affine'],
68
+ inlier_threshold=config['inlier_threshold_affine'],
69
+ min_inlier_ratio=config['min_inlier_ratio_affine'],
70
70
  num_iterations=config['num_iterations_affine'],
71
- regularization_weight=config['regularization_weight_affine'],
71
+ regularization_weight=config['regularization_weight_matching_affine'],
72
72
  image_file_prefix=config['image_file_prefix'],
73
73
  )
74
74
 
@@ -84,10 +84,10 @@ interest_point_matching_split_affine = InterestPointMatching(
84
84
  search_radius=config['search_radius_split_affine'],
85
85
  num_required_neighbors=config['num_required_neighbors_split_affine'],
86
86
  model_min_matches=config['model_min_matches_split_affine'],
87
- inlier_factor=config['inlier_factor_split_affine'],
88
- lambda_value=config['lambda_value_split_affine'],
87
+ inlier_threshold=config['inlier_threshold_split_affine'],
88
+ min_inlier_ratio=config['min_inlier_ratio_split_affine'],
89
89
  num_iterations=config['num_iterations_split_affine'],
90
- regularization_weight=config['regularization_weight_split_affine'],
90
+ regularization_weight=config['regularization_weight_matching_split_affine'],
91
91
  image_file_prefix=config['image_file_prefix'],
92
92
  )
93
93
 
@@ -101,6 +101,7 @@ solver_rigid = Solver(
101
101
  absolute_threshold=config['absolute_threshold'],
102
102
  min_matches=config['min_matches'],
103
103
  damp=config['damp'],
104
+ regularization_weight=config['regularization_weight_solver_rigid'],
104
105
  max_iterations=config['max_iterations'],
105
106
  max_allowed_error=config['max_allowed_error'],
106
107
  max_plateauwidth=config['max_plateauwidth'],
@@ -118,6 +119,7 @@ solver_affine = Solver(
118
119
  absolute_threshold=config['absolute_threshold'],
119
120
  min_matches=config['min_matches'],
120
121
  damp=config['damp'],
122
+ regularization_weight=config['regularization_weight_solver_affine'],
121
123
  max_iterations=config['max_iterations'],
122
124
  max_allowed_error=config['max_allowed_error'],
123
125
  max_plateauwidth=config['max_plateauwidth'],
@@ -135,6 +137,7 @@ solver_split_affine = Solver(
135
137
  absolute_threshold=config['absolute_threshold'],
136
138
  min_matches=config['min_matches'],
137
139
  damp=config['damp'],
140
+ regularization_weight=config['regularization_weight_solver_split_affine'],
138
141
  max_iterations=config['max_iterations'],
139
142
  max_allowed_error=config['max_allowed_error'],
140
143
  max_plateauwidth=config['max_plateauwidth'],
@@ -157,11 +160,11 @@ split_dataset = SplitDataset(
157
160
  )
158
161
 
159
162
  # -- ALIGNMENT PIPELINE --
160
- # interest_point_detection.run()
161
- # interest_point_matching_rigid.run()
163
+ interest_point_detection.run()
164
+ interest_point_matching_rigid.run()
162
165
  solver_rigid.run()
163
- # interest_point_matching_affine.run()
164
- # solver_affine.run()
165
- # split_dataset.run()
166
- # interest_point_matching_split_affine.run()
167
- # solver_split_affine.run()
166
+ interest_point_matching_affine.run()
167
+ solver_affine.run()
168
+ split_dataset.run()
169
+ interest_point_matching_split_affine.run()
170
+ solver_split_affine.run()
@@ -16,7 +16,8 @@ This class implements the Solver pipeline for rigid, affine, and split-affine op
16
16
 
17
17
  class Solver:
18
18
  def __init__(self, xml_file_path_output, n5_input_path, xml_file_path, run_type, relative_threshold, absolute_threshold,
19
- min_matches, damp, max_iterations, max_allowed_error, max_plateauwidth, metrics_output_path, fixed_tile):
19
+ min_matches, damp, regularization_weight, max_iterations, max_allowed_error, max_plateauwidth, metrics_output_path,
20
+ fixed_tile):
20
21
  self.xml_file_path_output = xml_file_path_output
21
22
  self.n5_input_path = n5_input_path
22
23
  self.xml_file_path = xml_file_path
@@ -25,6 +26,7 @@ class Solver:
25
26
  self.absolute_threshold = absolute_threshold
26
27
  self.min_matches = min_matches
27
28
  self.damp = damp
29
+ self.regularization_weight = regularization_weight
28
30
  self.max_iterations = max_iterations
29
31
  self.max_allowed_error = max_allowed_error
30
32
  self.max_plateauwidth = max_plateauwidth
@@ -79,7 +81,8 @@ class Solver:
79
81
 
80
82
  # Update all points with transform models and iterate through all tiles (views) and optimize alignment
81
83
  global_optimization = GlobalOptimization(tc, self.relative_threshold, self.absolute_threshold, self.min_matches, self.damp,
82
- self.max_iterations, self.max_allowed_error, self.max_plateauwidth, self.run_type, self.metrics_output_path)
84
+ self.regularization_weight, self.max_iterations, self.max_allowed_error,
85
+ self.max_plateauwidth, self.run_type, self.metrics_output_path)
83
86
  tiles, validation_stats = global_optimization.run()
84
87
  print("Global optimization complete")
85
88
 
@@ -102,7 +105,8 @@ class Solver:
102
105
 
103
106
  # Update all points with transform models and iterate through all tiles (views) and optimize alignment
104
107
  global_optimization = GlobalOptimization(tc, self.relative_threshold, self.absolute_threshold, self.min_matches, self.damp,
105
- self.max_iterations, self.max_allowed_error, self.max_plateauwidth, self.run_type, self.metrics_output_path)
108
+ self.regularization_weight, self.max_iterations, self.max_allowed_error,
109
+ self.max_plateauwidth, self.run_type, self.metrics_output_path)
106
110
  tiles_round_2, validation_stats_round_2 = global_optimization.run()
107
111
  print("Global optimization complete")
108
112
 
@@ -7,13 +7,14 @@ GlobalOptimization iteratively refines per-tile transforms to achieve sub-pixel
7
7
  """
8
8
 
9
9
  class GlobalOptimization:
10
- def __init__(self, tiles, relative_threshold, absolute_threshold, min_matches,
11
- damp, max_iterations, max_allowed_error, max_plateauwidth, run_type, metrics_output_path):
10
+ def __init__(self, tiles, relative_threshold, absolute_threshold, min_matches, damp, regularization_weight,
11
+ max_iterations, max_allowed_error, max_plateauwidth, run_type, metrics_output_path):
12
12
  self.tiles = tiles
13
13
  self.relative_threshold = relative_threshold
14
14
  self.absolute_threshold = absolute_threshold
15
15
  self.min_matches = min_matches
16
16
  self.damp = damp
17
+ self.regularization_weight = regularization_weight
17
18
  self.max_iterations = max_iterations
18
19
  self.max_allowed_error = max_allowed_error
19
20
  self.max_plateauwidth = max_plateauwidth
@@ -307,8 +308,7 @@ class GlobalOptimization:
307
308
  return affine_model
308
309
 
309
310
  def regularize_models(self, affine, rigid):
310
- alpha=0.1
311
- l1 = 1.0 - alpha
311
+ l1 = 1.0 - self.regularization_weight
312
312
 
313
313
  def to_array(model):
314
314
  return [
@@ -320,7 +320,7 @@ class GlobalOptimization:
320
320
  afs = to_array(affine)
321
321
  bfs = to_array(rigid)
322
322
 
323
- rfs = [l1 * a + alpha * b for a, b in zip(afs, bfs)]
323
+ rfs = [l1 * a + self.regularization_weight * b for a, b in zip(afs, bfs)]
324
324
 
325
325
  keys = [
326
326
  'm00', 'm01', 'm02', 'm03',
@@ -1,6 +1,7 @@
1
1
  import numpy as np
2
2
  import boto3
3
3
  import re
4
+ import copy
4
5
  from xml.etree import ElementTree as ET
5
6
 
6
7
  class SaveXML:
@@ -11,44 +12,357 @@ class SaveXML:
11
12
  self.xml_file = xml_file
12
13
  self.xml_output_path = xml_output_path
13
14
 
15
+ def save_tile_attributes_to_xml(self, xml):
16
+ """
17
+ Ensure the *last* <ViewSetups> (the outer split one) has:
18
+ - <Attributes name="illumination"> old_tile_0..N </Attributes>
19
+ - <Attributes name="channel"> ... </Attributes>
20
+ - <Attributes name="tile"> with locations from Image Splitting </Attributes>
21
+ - <Attributes name="angle"><Angle id=0 name=0/></Attributes>
22
+ """
23
+ root = ET.fromstring(xml)
24
+
25
+ def tagname(el):
26
+ return el.tag.split('}')[-1]
27
+
28
+ def find_one(tag):
29
+ el = root.find(f'.//{{*}}{tag}')
30
+ if el is None:
31
+ el = root.find(tag)
32
+ return el
33
+
34
+ def _norm_id(raw):
35
+ if isinstance(raw, (tuple, list)):
36
+ return int(raw[1] if len(raw) > 1 else raw[0])
37
+ return int(raw)
38
+
39
+ # --- find ALL ViewSetups blocks ---
40
+ view_setups_all = root.findall('.//{*}ViewSetups')
41
+ if not view_setups_all:
42
+ return xml # nothing to do
43
+
44
+ # Outer split ViewSetups = last, inner original = first non-outer
45
+ outer_vs = view_setups_all[-1]
46
+ inner_vs = None
47
+ for vs in view_setups_all:
48
+ if vs is not outer_vs:
49
+ inner_vs = vs
50
+ break
51
+
52
+ # --- collect existing Attributes on OUTER ---
53
+ children = list(outer_vs)
54
+ attr_by_name = {}
55
+ for ch in children:
56
+ if tagname(ch) == 'Attributes':
57
+ nm = ch.get('name')
58
+ if nm:
59
+ attr_by_name[nm] = ch
60
+
61
+ # --- ensure CHANNEL attributes (can still be cloned from inner) ---
62
+ if 'channel' not in attr_by_name and inner_vs is not None:
63
+ for ch in list(inner_vs):
64
+ if tagname(ch) != 'Attributes':
65
+ continue
66
+ nm = ch.get('name')
67
+ if nm == 'channel':
68
+ cloned = copy.deepcopy(ch)
69
+ outer_vs.append(cloned)
70
+ attr_by_name['channel'] = cloned
71
+ break
72
+
73
+ # --- build/overwrite ILLUMINATION attributes: old_tile_0..N ---
74
+ illum_attrs = attr_by_name.get('illumination')
75
+ if illum_attrs is None:
76
+ illum_attrs = ET.Element('Attributes', {'name': 'illumination'})
77
+ outer_vs.append(illum_attrs)
78
+ attr_by_name['illumination'] = illum_attrs
79
+ else:
80
+ # clear existing <Illumination> entries
81
+ for ch in list(illum_attrs):
82
+ illum_attrs.remove(ch)
83
+
84
+ # unique original tile ids from old_view
85
+ orig_tile_ids = sorted({_norm_id(v['old_view']) for v in self.self_definition})
86
+
87
+ for tid in orig_tile_ids:
88
+ illum_el = ET.SubElement(illum_attrs, 'Illumination')
89
+ ET.SubElement(illum_el, 'id').text = str(tid)
90
+ ET.SubElement(illum_el, 'name').text = f"old_tile_{tid}"
91
+
92
+ # --- ensure ANGLE attributes: a single Angle id=0/name=0 ---
93
+ angle_attrs = attr_by_name.get('angle')
94
+ if angle_attrs is None:
95
+ # try clone from inner if it exists
96
+ if inner_vs is not None:
97
+ for ch in list(inner_vs):
98
+ if tagname(ch) == 'Attributes' and ch.get('name') == 'angle':
99
+ angle_attrs = copy.deepcopy(ch)
100
+ outer_vs.append(angle_attrs)
101
+ break
102
+ # if no inner angle, synthesize default
103
+ if angle_attrs is None:
104
+ angle_attrs = ET.Element('Attributes', {'name': 'angle'})
105
+ angle_el = ET.SubElement(angle_attrs, 'Angle')
106
+ ET.SubElement(angle_el, 'id').text = "0"
107
+ ET.SubElement(angle_el, 'name').text = "0"
108
+ outer_vs.append(angle_attrs)
109
+ else:
110
+ # if it exists but has no <Angle>, make one
111
+ has_angle = any(tagname(ch) == 'Angle' for ch in angle_attrs)
112
+ if not has_angle:
113
+ angle_el = ET.SubElement(angle_attrs, 'Angle')
114
+ ET.SubElement(angle_el, 'id').text = "0"
115
+ ET.SubElement(angle_el, 'name').text = "0"
116
+
117
+ attr_by_name['angle'] = angle_attrs
118
+
119
+ # ---- find or create <Attributes name="tile"> under OUTER <ViewSetups> ----
120
+ children = list(outer_vs)
121
+ tile_attrs = None
122
+ insert_idx = len(children) # default: append at end
123
+
124
+ for i, ch in enumerate(children):
125
+ if tagname(ch) == 'Attributes':
126
+ name_attr = ch.get('name')
127
+ # remember existing tile attributes if present
128
+ if name_attr == 'tile':
129
+ tile_attrs = ch
130
+ # prefer to insert tile after channel attributes if we create it
131
+ if name_attr == 'channel':
132
+ insert_idx = i + 1
133
+
134
+ if tile_attrs is None:
135
+ tile_attrs = ET.Element('Attributes', {'name': 'tile'})
136
+ outer_vs.insert(insert_idx, tile_attrs)
137
+
138
+ # ---- figure out which tile ids (new_view ids) we care about ----
139
+ target_ids = {_norm_id(v['new_view']) for v in self.self_definition}
140
+
141
+ # Remove existing Tile entries for those ids (so we can rewrite cleanly)
142
+ for child in list(tile_attrs):
143
+ if tagname(child) != 'Tile':
144
+ continue
145
+ id_el = child.find('id') or child.find('{*}id')
146
+ if id_el is None or not id_el.text:
147
+ continue
148
+ try:
149
+ if int(id_el.text.strip()) in target_ids:
150
+ tile_attrs.remove(child)
151
+ except Exception:
152
+ pass
153
+
154
+ # ---- build a map: setup_id -> (tx, ty, tz) from 'Image Splitting' ----
155
+ view_regs = find_one('ViewRegistrations')
156
+ tile_locations = {}
157
+
158
+ if view_regs is not None:
159
+ # iterate over ViewRegistration elements, namespace-agnostic
160
+ for vr in view_regs.findall('.//{*}ViewRegistration'):
161
+ setup_attr = vr.get('setup')
162
+ if setup_attr is None:
163
+ continue
164
+ try:
165
+ setup_id = int(setup_attr)
166
+ except ValueError:
167
+ continue
168
+
169
+ if setup_id not in target_ids:
170
+ continue
171
+
172
+ # find the Image Splitting transform
173
+ for vt in vr.findall('./{*}ViewTransform'):
174
+ name_el = vt.find('Name') or vt.find('{*}Name')
175
+ if name_el is None:
176
+ continue
177
+ if (name_el.text or '').strip().lower() != 'image splitting':
178
+ continue
179
+
180
+ aff_el = vt.find('affine') or vt.find('{*}affine')
181
+ if aff_el is None or not aff_el.text:
182
+ continue
183
+
184
+ nums = re.findall(
185
+ r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?',
186
+ aff_el.text
187
+ )
188
+
189
+ # 3x4 affine: we expect at least 12 numbers
190
+ if len(nums) >= 12:
191
+ tx, ty, tz = map(float, (nums[3], nums[7], nums[11]))
192
+ elif len(nums) >= 3:
193
+ # fallback: last 3 numbers
194
+ tx, ty, tz = map(float, nums[-3:])
195
+ else:
196
+ tx = ty = tz = 0.0
197
+
198
+ tile_locations[setup_id] = (tx, ty, tz)
199
+ break # stop after first 'Image Splitting' for this VR
200
+
201
+ # ---- create Tile entries for each new_view ----
202
+ for view in self.self_definition:
203
+ new_id = _norm_id(view['new_view'])
204
+
205
+ if new_id in tile_locations:
206
+ loc = tile_locations[new_id]
207
+ else:
208
+ # Fallback: use min bound of interval if we didn't find an image splitting transform
209
+ mins = np.array(view['interval'][0], dtype=float)
210
+ loc = (float(mins[0]), float(mins[1]), float(mins[2]))
211
+
212
+ tile_el = ET.SubElement(tile_attrs, 'Tile')
213
+ ET.SubElement(tile_el, 'id').text = str(new_id)
214
+ ET.SubElement(tile_el, 'name').text = str(new_id)
215
+ ET.SubElement(tile_el, 'location').text = f"{loc[0]:.1f} {loc[1]:.1f} {loc[2]:.1f}"
216
+
217
+ # ---- reorder children in OUTER <ViewSetups>:
218
+ # all <ViewSetup> first, then <Attributes> in illumination, channel, tile, angle order ----
219
+ children = list(outer_vs)
220
+
221
+ viewsetup_children = [ch for ch in children if tagname(ch) == 'ViewSetup']
222
+ attr_children = [ch for ch in children if tagname(ch) == 'Attributes']
223
+ other_children = [ch for ch in children if tagname(ch) not in ('ViewSetup', 'Attributes')]
224
+
225
+ # desired attributes order
226
+ attr_order = {'illumination': 0, 'channel': 1, 'tile': 2, 'angle': 3}
227
+
228
+ def _attr_sort_key(el):
229
+ name = el.get('name', '')
230
+ return attr_order.get(name, 99)
231
+
232
+ attr_children.sort(key=_attr_sort_key)
233
+
234
+ # Clear existing children and re-append in desired order
235
+ for ch in children:
236
+ outer_vs.remove(ch)
237
+
238
+ for ch in viewsetup_children + attr_children + other_children:
239
+ outer_vs.append(ch)
240
+
241
+ try:
242
+ ET.indent(root, space=" ")
243
+ except Exception:
244
+ pass
245
+
246
+ return ET.tostring(root, encoding='unicode')
247
+
14
248
  def wrap_image_loader_for_split(self, xml: str) -> str:
249
+ """
250
+ Wrap the top-level ImageLoader in <ImageLoader format="split.viewerimgloader">
251
+ and move the ORIGINAL ViewSetups/Timepoints/MissingViews into an inner
252
+ <SequenceDescription> inside that wrapper.
253
+
254
+ Resulting structure:
255
+
256
+ <SpimData>
257
+ <BasePath/>
258
+ <SequenceDescription>
259
+ <ImageLoader format="split.viewerimgloader">
260
+ <ImageLoader format="bdv.multimg.zarr"> ... </ImageLoader>
261
+ <SequenceDescription>
262
+ <ViewSetups> (ORIGINAL) </ViewSetups>
263
+ <Timepoints> (ORIGINAL) </Timepoints>
264
+ <MissingViews/>
265
+ </SequenceDescription>
266
+ <!-- SetupIds (for split tiles) will be added later -->
267
+ </ImageLoader>
268
+ <!-- NEW ViewSetups/Timepoints/MissingViews for split views are added later -->
269
+ </SequenceDescription>
270
+ <ViewRegistrations> ... </ViewRegistrations>
271
+ </SpimData>
272
+ """
15
273
  root = ET.fromstring(xml)
16
274
 
17
- def tn(el): return el.tag.split('}')[-1]
275
+ def tn(el):
276
+ return el.tag.split('}')[-1]
277
+
18
278
  def find_one(tag):
19
279
  el = root.find(f'.//{{*}}{tag}')
20
280
  return el if el is not None else root.find(tag)
21
281
 
22
- seq = find_one('SequenceDescription')
282
+ seq = None
283
+ # Prefer the top-level SequenceDescription (direct child of root)
284
+ for ch in list(root):
285
+ if tn(ch) == 'SequenceDescription':
286
+ seq = ch
287
+ break
288
+ if seq is None:
289
+ seq = find_one('SequenceDescription')
23
290
  if seq is None:
24
- return xml
25
-
26
- # find the first immediate ImageLoader under SequenceDescription
27
- loaders = [ch for ch in list(seq) if tn(ch) == 'ImageLoader']
28
- if not loaders:
29
291
  return xml
30
292
 
31
- inner = loaders[0]
293
+ children = list(seq)
32
294
 
33
- fmt = (inner.get('format') or '').lower()
34
- if fmt == 'split.viewerimgloader':
295
+ # Find the first immediate ImageLoader under SequenceDescription
296
+ base_loader = None
297
+ base_loader_idx = None
298
+ for i, ch in enumerate(children):
299
+ if tn(ch) == 'ImageLoader':
300
+ base_loader = ch
301
+ base_loader_idx = i
302
+ break
303
+
304
+ if base_loader is None:
35
305
  return xml
36
-
37
- # handle the case where the *outer* wrapper already exists
38
- if any(tn(ch) == 'ImageLoader' for ch in list(inner)) and fmt.startswith('bdv'):
306
+
307
+ fmt = (base_loader.get('format') or '').lower()
308
+ # Already wrapped; assume layout is correct and do nothing
309
+ if fmt == 'split.viewerimgloader':
39
310
  return xml
40
311
 
41
- # wrap the current loader
42
- idx = list(seq).index(inner)
43
- seq.remove(inner)
312
+ # Collect any other ImageLoader siblings (other sources)
313
+ other_imageloaders = []
314
+ # Collect ORIGINAL ViewSetups / Timepoints / MissingViews that are siblings
315
+ orig_viewsetups = None
316
+ orig_timepoints = None
317
+ orig_missingviews = None
318
+
319
+ for ch in children[base_loader_idx + 1:]:
320
+ name = tn(ch)
321
+ if name == 'ImageLoader':
322
+ other_imageloaders.append(ch)
323
+ elif name == 'ViewSetups':
324
+ orig_viewsetups = ch
325
+ elif name == 'Timepoints':
326
+ orig_timepoints = ch
327
+ elif name == 'MissingViews':
328
+ orig_missingviews = ch
329
+
330
+
331
+ # Remove them from the outer SequenceDescription
332
+ for node in (orig_viewsetups, orig_timepoints, orig_missingviews, *other_imageloaders):
333
+ if node is not None and node in seq:
334
+ seq.remove(node)
335
+
336
+ # Remove the original loader from seq
337
+ seq.remove(base_loader)
338
+
339
+ # Build wrapper <ImageLoader format="split.viewerimgloader">
44
340
  wrapper = ET.Element('ImageLoader', {'format': 'split.viewerimgloader'})
45
- wrapper.append(inner)
46
- seq.insert(idx, wrapper)
341
+ # First child: original loader
342
+ wrapper.append(base_loader)
343
+ for other_loader in other_imageloaders:
344
+ wrapper.append(other_loader)
345
+
346
+ # Inner <SequenceDescription> that holds the original ViewSetups/Timepoints/MissingViews
347
+ inner_seq = ET.Element('SequenceDescription')
348
+
349
+ if orig_viewsetups is not None:
350
+ inner_seq.append(orig_viewsetups)
351
+ if orig_timepoints is not None:
352
+ inner_seq.append(orig_timepoints)
353
+ if orig_missingviews is not None:
354
+ inner_seq.append(orig_missingviews)
355
+
356
+ wrapper.append(inner_seq)
357
+
358
+ # Insert wrapper where the original loader was
359
+ seq.insert(base_loader_idx, wrapper)
47
360
 
48
361
  try:
49
362
  ET.indent(root, space=" ")
50
363
  except Exception:
51
364
  pass
365
+
52
366
  return ET.tostring(root, encoding='unicode')
53
367
 
54
368
  def save_view_interest_points(self, xml):
@@ -202,46 +516,66 @@ class SaveXML:
202
516
  return ET.tostring(root, encoding='unicode')
203
517
 
204
518
  def save_setup_id_to_xml(self, xml):
519
+ """
520
+ Create/overwrite the OUTER <ViewSetups> (split tiles) and ensure outer
521
+ <Timepoints> and <MissingViews> exist under the top-level SequenceDescription.
522
+
523
+ Outer layout target:
524
+
525
+ <SpimData>
526
+ <BasePath/>
527
+ <SequenceDescription>
528
+ <ImageLoader format="split.viewerimgloader">
529
+ ...
530
+ <SequenceDescription> (original) </SequenceDescription>
531
+ <SetupIds> ... </SetupIds> (from save_setup_id_definition_to_xml)
532
+ </ImageLoader>
533
+ <ViewSetups> <-- created here (ids 0..499)
534
+ <ViewSetup>...</ViewSetup>
535
+ ...
536
+ <Attributes ...>...</Attributes>
537
+ </ViewSetups>
538
+ <Timepoints type="pattern">
539
+ <integerpattern>0</integerpattern>
540
+ </Timepoints>
541
+ <MissingViews/>
542
+ </SequenceDescription>
543
+ <ViewRegistrations>...</ViewRegistrations>
544
+ </SpimData>
545
+ """
205
546
  root = ET.fromstring(xml)
206
547
 
207
- def tagname(el):
548
+ def tn(el):
208
549
  return el.tag.split('}')[-1]
209
550
 
210
- def find_one(tag):
211
- el = root.find(f'.//{{*}}{tag}')
212
- if el is None:
213
- el = root.find(tag)
214
- return el
215
-
216
- seq = find_one('SequenceDescription')
217
- regs = find_one('ViewRegistrations')
218
- setup_ids = find_one('SetupIds')
219
- if setup_ids is None:
220
- setup_ids = ET.Element('SetupIds')
221
- kids = list(root)
222
- insert_idx = len(kids)
223
- if regs is not None and regs in kids:
224
- insert_idx = kids.index(regs)
225
- elif seq is not None and seq in kids:
226
- insert_idx = kids.index(seq) + 1
227
- root.insert(insert_idx, setup_ids)
551
+ # Find top-level SequenceDescription
552
+ outer_seq = None
553
+ for ch in list(root):
554
+ if tn(ch) == 'SequenceDescription':
555
+ outer_seq = ch
556
+ break
557
+ if outer_seq is None:
558
+ outer_seq = root.find('.//{*}SequenceDescription')
559
+ if outer_seq is None:
560
+ return xml
228
561
 
562
+ # Find or create OUTER <ViewSetups> under SequenceDescription
229
563
  view_setups = None
230
- for ch in list(root):
231
- if tagname(ch) == 'ViewSetups':
564
+ for ch in list(outer_seq):
565
+ if tn(ch) == 'ViewSetups':
232
566
  view_setups = ch
233
567
  break
234
- if view_setups is None:
235
- view_setups = find_one('ViewSetups')
568
+
236
569
  if view_setups is None:
237
570
  view_setups = ET.Element('ViewSetups')
238
- kids = list(root)
239
- after_idx = -1
240
- for i, ch in enumerate(kids):
241
- if tagname(ch) in ('ImageLoader', 'SequenceDescription'):
242
- after_idx = i
243
- root.insert(after_idx + 1 if after_idx >= 0 else len(kids), view_setups)
244
-
571
+ children = list(outer_seq)
572
+ insert_idx = len(children)
573
+ for i, ch in enumerate(children):
574
+ if tn(ch) == 'ImageLoader':
575
+ insert_idx = i + 1
576
+ outer_seq.insert(insert_idx, view_setups)
577
+
578
+ # Helper to normalize ids
245
579
  def _norm_id(raw):
246
580
  if isinstance(raw, (tuple, list)):
247
581
  if len(raw) >= 2:
@@ -249,11 +583,11 @@ class SaveXML:
249
583
  return int(raw[0])
250
584
  return int(raw)
251
585
 
252
- target_ids = set(_norm_id(v['new_view']) for v in self.self_definition)
586
+ target_ids = {_norm_id(v['new_view']) for v in self.self_definition}
253
587
 
254
- # ViewSetup cleanup
588
+ # Remove any existing ViewSetup with those ids (outer only)
255
589
  for child in list(view_setups):
256
- if tagname(child) != 'ViewSetup':
590
+ if tn(child) != 'ViewSetup':
257
591
  continue
258
592
  id_el = child.find('id') or child.find('{*}id')
259
593
  if id_el is not None and id_el.text:
@@ -263,40 +597,22 @@ class SaveXML:
263
597
  except Exception:
264
598
  pass
265
599
 
266
- # SetupIdDefinition cleanup
267
- for sid in list(setup_ids):
268
- if tagname(sid) != 'SetupIdDefinition':
269
- continue
270
- nid_el = sid.find('NewId') or sid.find('{*}NewId')
271
- if nid_el is not None and nid_el.text:
272
- try:
273
- if int(nid_el.text.strip()) in target_ids:
274
- setup_ids.remove(sid)
275
- except Exception:
276
- pass
277
-
600
+ # (Re)build ViewSetups for each new split view
278
601
  for view in self.self_definition:
279
602
  new_id = _norm_id(view['new_view'])
280
- old_id = _norm_id(view['old_view'])
281
- angle = view['angle']
282
- channel = view['channel']
603
+ # old_id = _norm_id(view['old_view']) # not strictly needed here
604
+
605
+ angle = view['angle']
606
+ channel = view['channel']
283
607
  illumination = view['illumination']
284
- tile = new_id
285
- voxel_unit = view['voxel_unit']
286
- voxel_size = view['voxel_dim']
608
+ tile = new_id
609
+ voxel_unit = view['voxel_unit']
610
+ voxel_size = view['voxel_dim']
287
611
 
288
612
  mins = np.array(view["interval"][0], dtype=np.int64)
289
613
  maxs = np.array(view["interval"][1], dtype=np.int64)
290
614
  size = (maxs - mins + 1).tolist()
291
615
 
292
- # <SetupIds>/<SetupIdDefinition>
293
- def_el = ET.SubElement(setup_ids, 'SetupIdDefinition')
294
- ET.SubElement(def_el, 'NewId').text = str(new_id)
295
- ET.SubElement(def_el, 'OldId').text = str(old_id)
296
- ET.SubElement(def_el, 'min').text = f"{int(mins[0])} {int(mins[1])} {int(mins[2])}"
297
- ET.SubElement(def_el, 'max').text = f"{int(maxs[0])} {int(maxs[1])} {int(maxs[2])}"
298
-
299
- # <ViewSetups>/<ViewSetup>
300
616
  vs = ET.SubElement(view_setups, 'ViewSetup')
301
617
  ET.SubElement(vs, 'id').text = str(new_id)
302
618
  ET.SubElement(vs, 'size').text = f"{int(size[0])} {int(size[1])} {int(size[2])}"
@@ -314,6 +630,31 @@ class SaveXML:
314
630
  ET.SubElement(attrs, 'tile').text = str(int(tile))
315
631
  ET.SubElement(attrs, 'angle').text = str(int(angle))
316
632
 
633
+ # Ensure outer <Timepoints> exists
634
+ outer_timepoints = None
635
+ for ch in list(outer_seq):
636
+ if tn(ch) == 'Timepoints':
637
+ outer_timepoints = ch
638
+ break
639
+ if outer_timepoints is None:
640
+ outer_timepoints = ET.Element('Timepoints', {'type': 'pattern'})
641
+ ip = ET.SubElement(outer_timepoints, 'integerpattern')
642
+ ip.text = "0"
643
+ # place right after ViewSetups
644
+ children = list(outer_seq)
645
+ insert_idx = children.index(view_setups) + 1 if view_setups in children else len(children)
646
+ outer_seq.insert(insert_idx, outer_timepoints)
647
+
648
+ # Ensure outer <MissingViews> exists
649
+ outer_missing = None
650
+ for ch in list(outer_seq):
651
+ if tn(ch) == 'MissingViews':
652
+ outer_missing = ch
653
+ break
654
+ if outer_missing is None:
655
+ outer_missing = ET.Element('MissingViews')
656
+ outer_seq.append(outer_missing)
657
+
317
658
  try:
318
659
  ET.indent(root, space=" ")
319
660
  except Exception:
@@ -322,38 +663,92 @@ class SaveXML:
322
663
  return ET.tostring(root, encoding='unicode')
323
664
 
324
665
  def save_setup_id_definition_to_xml(self, xml):
666
+ """
667
+ Create/overwrite <SetupIds> for the split views.
668
+
669
+ In the desired final layout, SetupIds lives inside:
670
+ <SequenceDescription>
671
+ <ImageLoader format="split.viewerimgloader">
672
+ ...
673
+ <SequenceDescription> ... </SequenceDescription>
674
+ <SetupIds> ... </SetupIds> <-- here
675
+ </ImageLoader>
676
+ ...
677
+ </SequenceDescription>
678
+ """
325
679
  root = ET.fromstring(xml)
326
680
 
327
- # find existing nodes (namespace-agnostic)
328
- def tagname(el): return el.tag.split('}')[-1]
329
- children = list(root)
330
- regs_idx = next((i for i, ch in enumerate(children) if tagname(ch) == 'ViewRegistrations'), None)
331
- seq_idx = next((i for i, ch in enumerate(children) if tagname(ch) == 'SequenceDescription'), None)
332
- setup_ids = next((ch for ch in children if tagname(ch) == 'SetupIds'), None)
681
+ def tn(el):
682
+ return el.tag.split('}')[-1]
683
+
684
+ # Find top-level SequenceDescription
685
+ outer_seq = None
686
+ for ch in list(root):
687
+ if tn(ch) == 'SequenceDescription':
688
+ outer_seq = ch
689
+ break
690
+ if outer_seq is None:
691
+ outer_seq = root.find('.//{*}SequenceDescription')
692
+ if outer_seq is None:
693
+ return xml
694
+
695
+ # Find the wrapper ImageLoader format="split.viewerimgloader"
696
+ wrapper = None
697
+ for ch in list(outer_seq):
698
+ if tn(ch) == 'ImageLoader' and (ch.get('format') or '').lower() == 'split.viewerimgloader':
699
+ wrapper = ch
700
+ break
701
+
702
+ # If wrapper not found, fall back to old behavior (root-level SetupIds)
703
+ parent_for_setupids = wrapper if wrapper is not None else root
704
+ children = list(parent_for_setupids)
705
+
706
+ # Locate existing <SetupIds> under the chosen parent
707
+ setup_ids = None
708
+ for ch in children:
709
+ if tn(ch) == 'SetupIds':
710
+ setup_ids = ch
711
+ break
333
712
 
334
- # create/position <SetupIds>
335
713
  if setup_ids is None:
336
714
  setup_ids = ET.Element('SetupIds')
337
- insert_idx = regs_idx if regs_idx is not None else ((seq_idx + 1) if seq_idx is not None else len(children))
338
- root.insert(insert_idx, setup_ids)
715
+ if wrapper is not None:
716
+ # Under wrapper: insert after inner SequenceDescription if present
717
+ inner_children = list(wrapper)
718
+ inner_seq = None
719
+ for ich in inner_children:
720
+ if tn(ich) == 'SequenceDescription':
721
+ inner_seq = ich
722
+ break
723
+ insert_idx = inner_children.index(inner_seq) + 1 if inner_seq is not None else len(inner_children)
724
+ wrapper.insert(insert_idx, setup_ids)
725
+ else:
726
+ # Root-level fallback: put before <ViewRegistrations> if present
727
+ root_children = list(root)
728
+ regs_idx = next((i for i, ch in enumerate(root_children) if tn(ch) == 'ViewRegistrations'), None)
729
+ insert_idx = regs_idx if regs_idx is not None else len(root_children)
730
+ root.insert(insert_idx, setup_ids)
339
731
  else:
340
- setup_ids.clear()
732
+ # Clear existing definitions so we can rewrite
733
+ setup_ids.clear()
341
734
 
735
+ # Now populate SetupIdDefinition from self.self_definition
342
736
  for view in self.self_definition:
343
737
  new_id = view['new_view']
344
738
  old_id = view['old_view']
345
739
  min_bound = view['interval'][0]
346
740
  max_bound = view['interval'][1]
347
-
741
+
742
+ # Normalize IDs (can be int or (tp, setup))
348
743
  nid = int(new_id[1] if isinstance(new_id, (tuple, list)) else new_id)
349
744
  oid = int(old_id[1] if isinstance(old_id, (tuple, list)) else old_id)
350
745
 
351
746
  def_el = ET.SubElement(setup_ids, 'SetupIdDefinition')
352
747
  ET.SubElement(def_el, 'NewId').text = str(nid)
353
748
  ET.SubElement(def_el, 'OldId').text = str(oid)
354
- ET.SubElement(def_el, 'min').text = f"{int(min_bound[0])} {int(min_bound[1])} {int(min_bound[2])}"
355
- ET.SubElement(def_el, 'max').text = f"{int(max_bound[0])} {int(max_bound[1])} {int(max_bound[2])}"
356
-
749
+ ET.SubElement(def_el, 'min').text = f"{int(min_bound[0])} {int(min_bound[1])} {int(min_bound[2])}"
750
+ ET.SubElement(def_el, 'max').text = f"{int(max_bound[0])} {int(max_bound[1])} {int(max_bound[2])}"
751
+
357
752
  try:
358
753
  ET.indent(root, space=" ")
359
754
  except Exception:
@@ -362,11 +757,13 @@ class SaveXML:
362
757
  return ET.tostring(root, encoding='unicode')
363
758
 
364
759
  def run(self):
365
- xml = self.save_setup_id_definition_to_xml(self.xml_file)
760
+ xml = self.xml_file
761
+ xml = self.wrap_image_loader_for_split(xml)
762
+ xml = self.save_setup_id_definition_to_xml(xml)
366
763
  xml = self.save_setup_id_to_xml(xml)
367
764
  xml = self.save_view_registrations_to_xml(xml)
765
+ xml = self.save_tile_attributes_to_xml(xml)
368
766
  xml = self.save_view_interest_points(xml)
369
- xml = self.wrap_image_loader_for_split(xml)
370
767
 
371
768
  if self.xml_output_path:
372
769
  if self.xml_output_path.startswith("s3://"):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: Rhapso
3
- Version: 0.1.991
3
+ Version: 0.1.993
4
4
  Summary: A python package for aligning and stitching light sheet fluorescence microscopy images
5
5
  Author: ND
6
6
  Author-email: sean.fite@alleninstitute.org
@@ -9,13 +9,9 @@ Project-URL: Roadmap, https://github.com/AllenNeuralDynamics/Rhapso/issues
9
9
  Classifier: Development Status :: 3 - Alpha
10
10
  Classifier: Intended Audience :: Developers
11
11
  Classifier: Natural Language :: English
12
- Classifier: Programming Language :: Python :: 3
13
- Classifier: Programming Language :: Python :: 3.7
14
- Classifier: Programming Language :: Python :: 3.8
15
- Classifier: Programming Language :: Python :: 3.9
16
12
  Classifier: Programming Language :: Python :: 3.10
17
13
  Classifier: Operating System :: OS Independent
18
- Requires-Python: >=3.7
14
+ Requires-Python: >=3.10
19
15
  Description-Content-Type: text/markdown
20
16
  License-File: LICENSE
21
17
  Requires-Dist: pandas
@@ -33,6 +29,7 @@ Requires-Dist: matplotlib==3.10.0
33
29
  Requires-Dist: memory-profiler==0.61.0
34
30
  Requires-Dist: s3fs==2024.12.0
35
31
  Requires-Dist: scikit-learn
32
+ Requires-Dist: click==8.2.1
36
33
  Dynamic: author
37
34
  Dynamic: author-email
38
35
  Dynamic: classifier
@@ -414,25 +411,26 @@ python Rhapso/pipelines/ray/aws/alignment_pipeline.py
414
411
  | Parameter | Feature / step | What it does | Typical range |
415
412
  | :---------------------------- | :------------------- | :---------------------------------------------------------------- | :------------- |
416
413
  | `model_min_matches` | RANSAC | Minimum correspondences to estimate a rigid transform | 18 – 32 |
417
- | `inlier_factor` | RANSAC | Inlier tolerance scaling; larger = looser inlier threshold | 30 – 100 |
418
- | `lambda_value` | RANSAC | Regularization strength during model fitting | 0.1 – 0.05 |
414
+ | `inlier_threshold` | RANSAC | Inlier tolerance scaling; larger = looser inlier threshold | 50 – 100 |
415
+ | `min_inlier_ratio` | RANSAC | Regularization strength during model fitting | 0.1 – 0.05 |
419
416
  | `num_iterations` | RANSAC | Number of RANSAC trials; higher = more robust, slower | 10,0000 |
420
- | `regularization_weight` | RANSAC | Weight applied to the regularization term | 1.0 |
417
+ | `regularization_weight` | RANSAC | Weight applied to the regularization term | 0.05 - 1.0 |
421
418
 
422
419
  ```
423
420
  <br>
424
421
 
425
422
  ### Solver
426
423
  ```
427
- | Parameter | Feature / step | What it does | Typical range |
428
- | :------------------- | :------------- | :----------------------------------------------------------------- | :------------------ |
429
- | `relative_threshold` | Graph pruning | Reject edges with residuals above dataset-relative cutoff | 3.5 |
430
- | `absolute_threshold` | Graph pruning | Reject edges above an absolute error bound (detection-space units) | 7.0 |
431
- | `min_matches` | Graph pruning | Minimum matches required to retain an edge between tiles | 3 |
432
- | `damp` | Optimization | Damping for iterative solver; higher can stabilize tough cases | 1.0 |
433
- | `max_iterations` | Optimization | Upper bound on solver iterations | 10,0000 |
434
- | `max_allowed_error` | Optimization | Overall error cap; `inf` disables hard stop by error | `inf` |
435
- | `max_plateauwidth` | Early stopping | Stagnation window before stopping on no improvement | 200 |
424
+ | Parameter | Feature / step | What it does | Typical range |
425
+ | :----------------------- | :------------- | :----------------------------------------------------------------- | :------------------ |
426
+ | `relative_threshold` | Graph pruning | Reject edges with residuals above dataset-relative cutoff | 3.5 |
427
+ | `absolute_threshold` | Graph pruning | Reject edges above an absolute error bound (detection-space units) | 7.0 |
428
+ | `min_matches` | Graph pruning | Minimum matches required to retain an edge between tiles | 3 |
429
+ | `damp` | Optimization | Damping for iterative solver; higher can stabilize tough cases | 1.0 |
430
+ | `max_iterations` | Optimization | Upper bound on solver iterations | 10,0000 |
431
+ | `max_allowed_error` | Optimization | Overall error cap; `inf` disables hard stop by error | `inf` |
432
+ | `max_plateauwidth` | Early stopping | Stagnation window before stopping on no improvement | 200 |
433
+ | `regularization_weight` | RANSAC | Weight applied to the regularization term | 0.05 - 1.0 |
436
434
 
437
435
  ```
438
436
 
@@ -56,29 +56,29 @@ Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py,sha256=8hFBM8lmCV3TsEEVXZk
56
56
  Rhapso/fusion/neuroglancer_link_gen/utils/utils.py,sha256=Q0jKPGI39ScCHDgVEoWbrZrBrEluCjm69z28T3JScTc,7322
57
57
  Rhapso/matching/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
58
  Rhapso/matching/load_and_transform_points.py,sha256=UEV9gnWyl2z4zAUR2Z_ewX1_TWoS-IpXPL-qRB1_Ms8,16416
59
- Rhapso/matching/ransac_matching.py,sha256=PH9JpWqCrWcZoDvwG_Zs2FSk2cCdIX9be_6y2ix-3RI,20031
59
+ Rhapso/matching/ransac_matching.py,sha256=BWSRmckG2GiA49RuGnja4le-Xo5o9aY53Yawes9P-_E,20268
60
60
  Rhapso/matching/save_matches.py,sha256=OYHzzGoF9-PTVfQZBJ7HaJ9JYB_Q8ZazH-f3HljxzLQ,4720
61
61
  Rhapso/matching/xml_parser.py,sha256=nP_jA_jEfAFPzt6Al91NpwWEk-Klk8P5QzNINJYVksU,11527
62
62
  Rhapso/pipelines/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
63
63
  Rhapso/pipelines/ray/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
64
64
  Rhapso/pipelines/ray/evaluation.py,sha256=k05e72WKu9UkptJkjk_rkScjn1zGOhIG1rTOLkxkOLI,2610
65
65
  Rhapso/pipelines/ray/interest_point_detection.py,sha256=NZ2Egm81KPC1dFncz_6srW0TYhBTpKqy80nEk0DigEU,6595
66
- Rhapso/pipelines/ray/interest_point_matching.py,sha256=aM-83D3JJUrkiWT0hogpcyxUw7h78kKegYtUFKMQu3s,5697
66
+ Rhapso/pipelines/ray/interest_point_matching.py,sha256=0wdCxcz-PeNSrxLIZpHpUyJLOvp7tM8S6LH2k1t2woA,5746
67
67
  Rhapso/pipelines/ray/matching_stats.py,sha256=7NZ4rQPTKMwzJBrPXkVbiWkzIeH4h9PfDozqrUo8TKU,4199
68
- Rhapso/pipelines/ray/solver.py,sha256=JqNZPyd_0tliM3mu8voDNcQuhkgwMKF5fBMzfjVMCy8,6198
68
+ Rhapso/pipelines/ray/solver.py,sha256=FK02ttFOvVRHTvlsBJ_kJ4GdwLHfIa3ljZ6VhE8FxS0,6458
69
69
  Rhapso/pipelines/ray/split_dataset.py,sha256=JVMd3qYw17Em-rQDvufSAaqaPWw5kf9yWpf5ShSh97M,2943
70
70
  Rhapso/pipelines/ray/aws/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
71
- Rhapso/pipelines/ray/aws/alignment_pipeline.py,sha256=JvSB08mIskgqzc-U-6vOprl5k7i9UN5yOgDOKtimIJ0,9716
71
+ Rhapso/pipelines/ray/aws/alignment_pipeline.py,sha256=c_UUw_90xnoAJ5zYuU5OD0Uzr1cPOqsZ6aXHOQauYCI,10015
72
72
  Rhapso/pipelines/ray/aws/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
73
73
  Rhapso/pipelines/ray/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
74
- Rhapso/pipelines/ray/local/alignment_pipeline.py,sha256=OFdns9kILtAQD8KNU7MoLXQeZba1q4YV9LiLVSo8fi0,6712
74
+ Rhapso/pipelines/ray/local/alignment_pipeline.py,sha256=EV2e0Kfx7uSHaIVMTKId-NG8f6B4TCWnI5yqSRrmuFE,6989
75
75
  Rhapso/pipelines/ray/param/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
76
76
  Rhapso/solver/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
77
77
  Rhapso/solver/compute_tiles.py,sha256=-7QFrZZfh1iW1nDdlBORlcqOGf7urJmgjWOj6DfDdHQ,20471
78
78
  Rhapso/solver/concatenate_models.py,sha256=-763cwYyyHM6b7E5RkHMRtykFrVjeVeB1l0jFeoLfnI,4472
79
79
  Rhapso/solver/connected_graphs.py,sha256=wKxOOKRNd4MctYRX677hiFIYHLAg3GinkpqDqO0ad50,3603
80
80
  Rhapso/solver/data_prep.py,sha256=nS96_4MJ0TcHyFMsGDS82n0R-aUm8J99g8d_MrWpsik,7753
81
- Rhapso/solver/global_optimization.py,sha256=6qYXHmJRCTf-4gSwFgmy9IBUBUp7SuL1kMOER8cTx6A,13614
81
+ Rhapso/solver/global_optimization.py,sha256=Yl9_GlZDVLHtYtmlRY7qS0CX7c3c4zYxexIaGJVtsGY,13720
82
82
  Rhapso/solver/model_and_tile_setup.py,sha256=WqCK9_sSMJ5MFmvIYi1_x1Sx_9giYORpCfcirZsMNOI,4889
83
83
  Rhapso/solver/pre_align_tiles.py,sha256=BccXMr5cZILM8U0ZzhUyflQYx_WUQzvNDdX-S_u4Roo,11100
84
84
  Rhapso/solver/save_results.py,sha256=dIkQ3SiGNlKx5z-6k_o_O4WjANxKYLJQ059URnuxVJw,3985
@@ -87,15 +87,15 @@ Rhapso/solver/xml_to_dataframe_solver.py,sha256=u96DPnZNDa-TgimsPqyZlIvHk4A1e8yZ
87
87
  Rhapso/split_dataset/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
88
88
  Rhapso/split_dataset/compute_grid_rules.py,sha256=dCAuNs4D7SaCfTXp54lchgttZVSgLH5f1tQWtkwr_ws,3028
89
89
  Rhapso/split_dataset/save_points.py,sha256=k-jH-slmxkbrxDl-uJvDkwOedi6cg7md3kg_a0NdL24,3713
90
- Rhapso/split_dataset/save_xml.py,sha256=Iq1UdFa8sdnWGygfIpDi4F5In-SCWggpl7lnuDTxkHE,14280
90
+ Rhapso/split_dataset/save_xml.py,sha256=55NdzfHPGAhpR1QYaDwXPZQlZlP6T9fymhJaOwTuZh0,29561
91
91
  Rhapso/split_dataset/split_images.py,sha256=2RzAi0btV1tmh4le9QotRif1IYUU6_4pLcGGpFBM9zk,22434
92
92
  Rhapso/split_dataset/xml_to_dataframe_split.py,sha256=ByaLzJ4sqT417UiCQU31_CS_V4Jms7pjMbBl0ZdSNNA,8570
93
- rhapso-0.1.991.dist-info/licenses/LICENSE,sha256=U0Y7B3gZJHXpjJVLgTQjM8e_c8w4JJpLgGhIdsoFR1Y,1092
93
+ rhapso-0.1.993.dist-info/licenses/LICENSE,sha256=U0Y7B3gZJHXpjJVLgTQjM8e_c8w4JJpLgGhIdsoFR1Y,1092
94
94
  tests/__init__.py,sha256=LYf6ZGyYRcduFFSaOLmnw3rTyfS3XLib0dsTHDWH0jo,37
95
95
  tests/test_detection.py,sha256=NtFYR_du9cbKrclQcNiJYsKzyqly6ivF61pw6_NICcM,440
96
96
  tests/test_matching.py,sha256=QX0ekSdyIkPpAsXHfSMqJUUlNZg09caSlhhUM63MduM,697
97
97
  tests/test_solving.py,sha256=t8I9XPV_4ZFM-DJpgvdYXxkG2_4DQgqs-FFyE5w8Nfg,695
98
- rhapso-0.1.991.dist-info/METADATA,sha256=QjjEf8EIF1t2I1mvBmN22MSawRvJNQLWV_pnh08YlZ0,19294
99
- rhapso-0.1.991.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
100
- rhapso-0.1.991.dist-info/top_level.txt,sha256=NXvsrsTfdowWbM7MxEjkDZE2Jo74lmq7ruWkp70JjSw,13
101
- rhapso-0.1.991.dist-info/RECORD,,
98
+ rhapso-0.1.993.dist-info/METADATA,sha256=U_wnul9wYKBsJqbjbs2FluO8uiAbaFTqm5ntyUxfzps,19298
99
+ rhapso-0.1.993.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
100
+ rhapso-0.1.993.dist-info/top_level.txt,sha256=NXvsrsTfdowWbM7MxEjkDZE2Jo74lmq7ruWkp70JjSw,13
101
+ rhapso-0.1.993.dist-info/RECORD,,