Rhapso 0.1.92__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. Rhapso/__init__.py +1 -0
  2. Rhapso/data_prep/__init__.py +2 -0
  3. Rhapso/data_prep/n5_reader.py +188 -0
  4. Rhapso/data_prep/s3_big_stitcher_reader.py +55 -0
  5. Rhapso/data_prep/xml_to_dataframe.py +215 -0
  6. Rhapso/detection/__init__.py +5 -0
  7. Rhapso/detection/advanced_refinement.py +203 -0
  8. Rhapso/detection/difference_of_gaussian.py +324 -0
  9. Rhapso/detection/image_reader.py +117 -0
  10. Rhapso/detection/metadata_builder.py +130 -0
  11. Rhapso/detection/overlap_detection.py +327 -0
  12. Rhapso/detection/points_validation.py +49 -0
  13. Rhapso/detection/save_interest_points.py +265 -0
  14. Rhapso/detection/view_transform_models.py +67 -0
  15. Rhapso/fusion/__init__.py +0 -0
  16. Rhapso/fusion/affine_fusion/__init__.py +2 -0
  17. Rhapso/fusion/affine_fusion/blend.py +289 -0
  18. Rhapso/fusion/affine_fusion/fusion.py +601 -0
  19. Rhapso/fusion/affine_fusion/geometry.py +159 -0
  20. Rhapso/fusion/affine_fusion/io.py +546 -0
  21. Rhapso/fusion/affine_fusion/script_utils.py +111 -0
  22. Rhapso/fusion/affine_fusion/setup.py +4 -0
  23. Rhapso/fusion/affine_fusion_worker.py +234 -0
  24. Rhapso/fusion/multiscale/__init__.py +0 -0
  25. Rhapso/fusion/multiscale/aind_hcr_data_transformation/__init__.py +19 -0
  26. Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/__init__.py +3 -0
  27. Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/czi_to_zarr.py +698 -0
  28. Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/zarr_writer.py +265 -0
  29. Rhapso/fusion/multiscale/aind_hcr_data_transformation/models.py +81 -0
  30. Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/__init__.py +3 -0
  31. Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/utils.py +526 -0
  32. Rhapso/fusion/multiscale/aind_hcr_data_transformation/zeiss_job.py +249 -0
  33. Rhapso/fusion/multiscale/aind_z1_radial_correction/__init__.py +21 -0
  34. Rhapso/fusion/multiscale/aind_z1_radial_correction/array_to_zarr.py +257 -0
  35. Rhapso/fusion/multiscale/aind_z1_radial_correction/radial_correction.py +557 -0
  36. Rhapso/fusion/multiscale/aind_z1_radial_correction/run_capsule.py +98 -0
  37. Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/__init__.py +3 -0
  38. Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/utils.py +266 -0
  39. Rhapso/fusion/multiscale/aind_z1_radial_correction/worker.py +89 -0
  40. Rhapso/fusion/multiscale_worker.py +113 -0
  41. Rhapso/fusion/neuroglancer_link_gen/__init__.py +8 -0
  42. Rhapso/fusion/neuroglancer_link_gen/dispim_link.py +235 -0
  43. Rhapso/fusion/neuroglancer_link_gen/exaspim_link.py +127 -0
  44. Rhapso/fusion/neuroglancer_link_gen/hcr_link.py +368 -0
  45. Rhapso/fusion/neuroglancer_link_gen/iSPIM_top.py +47 -0
  46. Rhapso/fusion/neuroglancer_link_gen/link_utils.py +239 -0
  47. Rhapso/fusion/neuroglancer_link_gen/main.py +299 -0
  48. Rhapso/fusion/neuroglancer_link_gen/ng_layer.py +1434 -0
  49. Rhapso/fusion/neuroglancer_link_gen/ng_state.py +1123 -0
  50. Rhapso/fusion/neuroglancer_link_gen/parsers.py +336 -0
  51. Rhapso/fusion/neuroglancer_link_gen/raw_link.py +116 -0
  52. Rhapso/fusion/neuroglancer_link_gen/utils/__init__.py +4 -0
  53. Rhapso/fusion/neuroglancer_link_gen/utils/shader_utils.py +85 -0
  54. Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py +43 -0
  55. Rhapso/fusion/neuroglancer_link_gen/utils/utils.py +303 -0
  56. Rhapso/fusion/neuroglancer_link_gen_worker.py +30 -0
  57. Rhapso/matching/__init__.py +0 -0
  58. Rhapso/matching/load_and_transform_points.py +458 -0
  59. Rhapso/matching/ransac_matching.py +544 -0
  60. Rhapso/matching/save_matches.py +120 -0
  61. Rhapso/matching/xml_parser.py +302 -0
  62. Rhapso/pipelines/__init__.py +0 -0
  63. Rhapso/pipelines/ray/__init__.py +0 -0
  64. Rhapso/pipelines/ray/aws/__init__.py +0 -0
  65. Rhapso/pipelines/ray/aws/alignment_pipeline.py +227 -0
  66. Rhapso/pipelines/ray/aws/config/__init__.py +0 -0
  67. Rhapso/pipelines/ray/evaluation.py +71 -0
  68. Rhapso/pipelines/ray/interest_point_detection.py +137 -0
  69. Rhapso/pipelines/ray/interest_point_matching.py +110 -0
  70. Rhapso/pipelines/ray/local/__init__.py +0 -0
  71. Rhapso/pipelines/ray/local/alignment_pipeline.py +167 -0
  72. Rhapso/pipelines/ray/matching_stats.py +104 -0
  73. Rhapso/pipelines/ray/param/__init__.py +0 -0
  74. Rhapso/pipelines/ray/solver.py +120 -0
  75. Rhapso/pipelines/ray/split_dataset.py +78 -0
  76. Rhapso/solver/__init__.py +0 -0
  77. Rhapso/solver/compute_tiles.py +562 -0
  78. Rhapso/solver/concatenate_models.py +116 -0
  79. Rhapso/solver/connected_graphs.py +111 -0
  80. Rhapso/solver/data_prep.py +181 -0
  81. Rhapso/solver/global_optimization.py +410 -0
  82. Rhapso/solver/model_and_tile_setup.py +109 -0
  83. Rhapso/solver/pre_align_tiles.py +323 -0
  84. Rhapso/solver/save_results.py +97 -0
  85. Rhapso/solver/view_transforms.py +75 -0
  86. Rhapso/solver/xml_to_dataframe_solver.py +213 -0
  87. Rhapso/split_dataset/__init__.py +0 -0
  88. Rhapso/split_dataset/compute_grid_rules.py +78 -0
  89. Rhapso/split_dataset/save_points.py +101 -0
  90. Rhapso/split_dataset/save_xml.py +377 -0
  91. Rhapso/split_dataset/split_images.py +537 -0
  92. Rhapso/split_dataset/xml_to_dataframe_split.py +219 -0
  93. rhapso-0.1.92.dist-info/METADATA +39 -0
  94. rhapso-0.1.92.dist-info/RECORD +101 -0
  95. rhapso-0.1.92.dist-info/WHEEL +5 -0
  96. rhapso-0.1.92.dist-info/licenses/LICENSE +21 -0
  97. rhapso-0.1.92.dist-info/top_level.txt +2 -0
  98. tests/__init__.py +1 -0
  99. tests/test_detection.py +17 -0
  100. tests/test_matching.py +21 -0
  101. tests/test_solving.py +21 -0
@@ -0,0 +1,67 @@
1
+ import pandas as pd
2
+ import numpy as np
3
+
4
+ """
5
+ View Transform Models parses and combines view registrations matrices
6
+ """
7
+
8
+ class ViewTransformModels:
9
+ def __init__(self, df):
10
+ self.view_registrations_df = df.get("view_registrations", pd.DataFrame())
11
+ self.calibration_matrices = {}
12
+ self.rotation_matrices = {}
13
+ self.concatenated_matrices = {}
14
+
15
+ def create_transform_matrices(self):
16
+ """
17
+ Extracts transformation matrices from a dataframe and organizes them into appropriate data structures
18
+ based on their types and intended usage.
19
+ """
20
+ if self.view_registrations_df.empty: raise ValueError("view_registrations_df is empty")
21
+
22
+ # parse DF for view_transform matrices
23
+ for _, row in self.view_registrations_df.iterrows():
24
+ if row["type"] == "affine":
25
+
26
+ # create affine matrix
27
+ affine_values = np.fromstring(row["affine"], sep=",").astype(np.float64)
28
+ if len(affine_values) == 12:
29
+ affine_values = np.append(affine_values, [0, 0, 0, 1]) # append homogeneous coordinates
30
+ affine_matrix = affine_values.reshape(4, 4)
31
+
32
+ # append matrix by row name
33
+ view_id = f"timepoint: {row['timepoint']}, setup: {row['setup']}"
34
+ if "calibration" in row["name"].lower():
35
+ self.calibration_matrices[view_id] = {"affine_matrix": affine_matrix}
36
+ else:
37
+ self.rotation_matrices[view_id] = {"affine_matrix": affine_matrix}
38
+
39
+ def concatenate_matrices_by_view_id(self):
40
+ """
41
+ Concatenates calibration and rotation matrices for each view ID, if available.
42
+ """
43
+ if not self.calibration_matrices and not self.rotation_matrices: raise ValueError("No matrices to concatenate")
44
+
45
+ # Zarr
46
+ if not self.calibration_matrices:
47
+ self.concatenated_matrices = {
48
+ key: self.rotation_matrices[key]["affine_matrix"]
49
+ for key in self.rotation_matrices
50
+ }
51
+
52
+ # TIFF
53
+ else:
54
+ for key in self.calibration_matrices:
55
+ if key in self.rotation_matrices:
56
+ calibration_matrix = self.calibration_matrices[key]["affine_matrix"]
57
+ rotation_matrix = self.rotation_matrices[key]["affine_matrix"]
58
+ concatenated_matrix = np.dot(rotation_matrix, calibration_matrix)
59
+ self.concatenated_matrices[key] = concatenated_matrix
60
+
61
+ def run(self):
62
+ """
63
+ Executes the entry point of the script.
64
+ """
65
+ self.create_transform_matrices()
66
+ self.concatenate_matrices_by_view_id()
67
+ return self.concatenated_matrices
File without changes
@@ -0,0 +1,2 @@
1
+ """Init package"""
2
+ __version__ = "0.0.0"
@@ -0,0 +1,289 @@
1
+ """
2
+ Interface for generic blending.
3
+ """
4
+
5
+ import dask.array as da
6
+ import numpy as np
7
+ import torch
8
+ import xmltodict
9
+
10
+ from collections import defaultdict
11
+
12
+ from . import geometry
13
+
14
+
15
+ class BlendingModule:
16
+ """
17
+ Minimal interface for modular blending function.
18
+ Subclass can define arbitrary constructors/attributes/members as necessary.
19
+ """
20
+
21
+ def blend(self,
22
+ chunks: list[torch.Tensor],
23
+ device: torch.device,
24
+ kwargs = {}
25
+ ) -> torch.Tensor:
26
+ """
27
+ chunks:
28
+ Chunks to blend into snowball_chunk
29
+ kwargs:
30
+ Extra keyword arguments
31
+ """
32
+
33
+ raise NotImplementedError(
34
+ "Please implement in BlendingModule subclass."
35
+ )
36
+
37
+
38
+ class MaxProjection(BlendingModule):
39
+ """
40
+ Simplest blending implementation possible. No constructor needed.
41
+ """
42
+
43
+ def blend(self,
44
+ chunks: list[torch.Tensor],
45
+ device: torch.device,
46
+ kwargs = {}
47
+ ) -> torch.Tensor:
48
+ """
49
+ Parameters
50
+ ----------
51
+ chunks: list of 3D tensors to combine. Contains 2 or more elements.
52
+
53
+ Returns
54
+ -------
55
+ fused_chunk: combined chunk
56
+ """
57
+
58
+ fused_chunk = chunks[0].to(device)
59
+ for c in chunks[1:]:
60
+ c = c.to(device)
61
+ fused_chunk = torch.maximum(fused_chunk, c)
62
+
63
+ return fused_chunk
64
+
65
+
66
+ def get_overlap_regions(tile_layout: list[list[int]],
67
+ tile_aabbs: dict[int, geometry.AABB]
68
+ ) -> tuple[dict[int, list[int]],
69
+ dict[int, geometry.AABB]]:
70
+ """
71
+ Input:
72
+ tile_layout: array of tile ids arranged corresponding to stage coordinates
73
+ tile_aabbs: dict of tile_id -> AABB, defined in fusion initalization.
74
+
75
+ Output:
76
+ tile_to_overlap_ids: Maps tile_id to associated overlap region id
77
+ overlaps: Maps overlap_id to actual overlap region AABB
78
+
79
+ Access pattern:
80
+ tile_id -> overlap_id -> overlaps
81
+ """
82
+
83
+ def _get_overlap_aabb(aabb_1: geometry.AABB,
84
+ aabb_2: geometry.AABB):
85
+ """
86
+ Utility for finding overlapping regions between tiles and chunks.
87
+ """
88
+
89
+ # Check AABB's are colliding, meaning they colllide in all 3 axes
90
+ assert (aabb_1[1] > aabb_2[0] and aabb_1[0] < aabb_2[1]) and \
91
+ (aabb_1[3] > aabb_2[2] and aabb_1[2] < aabb_2[3]) and \
92
+ (aabb_1[5] > aabb_2[4] and aabb_1[4] < aabb_2[5]), \
93
+ f'Input AABBs are not colliding: {aabb_1=}, {aabb_2=}'
94
+
95
+ # Between two colliding intervals A and B,
96
+ # the overlap interval is the maximum of (A_min, B_min)
97
+ # and the minimum of (A_max, B_max).
98
+ overlap_aabb = (np.max([aabb_1[0], aabb_2[0]]),
99
+ np.min([aabb_1[1], aabb_2[1]]),
100
+ np.max([aabb_1[2], aabb_2[2]]),
101
+ np.min([aabb_1[3], aabb_2[3]]),
102
+ np.max([aabb_1[4], aabb_2[4]]),
103
+ np.min([aabb_1[5], aabb_2[5]]))
104
+
105
+ return overlap_aabb
106
+
107
+ # Output Data Structures
108
+ tile_to_overlap_ids: dict[int, list[int]] = defaultdict(list)
109
+ overlaps: dict[int, geometry.AABB] = {}
110
+
111
+ # 1) Find all unique edges
112
+ edges: list[tuple[int, int]] = []
113
+ x_length = len(tile_layout)
114
+ y_length = len(tile_layout[0])
115
+ directions = [(-1, -1), (-1, 0), (-1, 1),
116
+ (0, -1), (0, 1),
117
+ (1, -1), (1, 0), (1, 1)]
118
+ for x in range(x_length):
119
+ for y in range(y_length):
120
+ for (dx, dy) in directions:
121
+ nx = x + dx
122
+ ny = y + dy
123
+ if (0 <= nx and nx < x_length and
124
+ 0 <= ny and ny < y_length and # Boundary conditions
125
+ tile_layout[x][y] != -1 and
126
+ tile_layout[nx][ny] != -1): # Spacer conditions
127
+
128
+ id_1 = tile_layout[x][y]
129
+ id_2 = tile_layout[nx][ny]
130
+ e = tuple(sorted([id_1, id_2]))
131
+ edges.append(e)
132
+ edges = sorted(list(set(edges)), key=lambda x: (x[0], x[1]))
133
+
134
+ # 2) Find overlap regions
135
+ overlap_id = 0
136
+ for (id_1, id_2) in edges:
137
+ aabb_1 = tile_aabbs[id_1]
138
+ aabb_2 = tile_aabbs[id_2]
139
+
140
+ try:
141
+ o_aabb = _get_overlap_aabb(aabb_1, aabb_2)
142
+ except:
143
+ continue
144
+
145
+ overlaps[overlap_id] = o_aabb
146
+ tile_to_overlap_ids[id_1].append(overlap_id)
147
+ tile_to_overlap_ids[id_2].append(overlap_id)
148
+ overlap_id += 1
149
+
150
+ return tile_to_overlap_ids, overlaps
151
+
152
+
153
+ class WeightedLinearBlending(BlendingModule):
154
+ """
155
+ Linear Blending with distance-based weights.
156
+ NOTE: Only supports translation-only registration on square tiles.
157
+ To modify for affine registration:
158
+ - Forward transform overlap weights into output volume.
159
+ - Inverse transform for local weights.
160
+ """
161
+
162
+ def __init__(self,
163
+ tile_aabbs: dict[int, geometry.AABB],
164
+ ) -> None:
165
+ super().__init__()
166
+ """
167
+ tile_aabbs: dict of tile_id -> AABB, defined in fusion initalization.
168
+ """
169
+ self.tile_aabbs = tile_aabbs
170
+
171
+ def blend(self,
172
+ chunks: list[torch.Tensor],
173
+ device: torch.device,
174
+ kwargs = {}
175
+ ) -> torch.Tensor:
176
+ """
177
+ Parameters
178
+ ----------
179
+ snowball chunk: 5d tensor in 11zyx order
180
+ chunks: 5d tensor(s) in 11zyx order
181
+ kwargs:
182
+ chunk_tile_ids:
183
+ list of tile ids corresponding to each chunk
184
+ cell_box:
185
+ cell AABB in output volume/absolute coordinates
186
+
187
+ Returns
188
+ -------
189
+ fused_chunk: combined chunk
190
+ """
191
+
192
+ # Trivial no blending case -- non-overlaping region.
193
+ if len(chunks) == 1:
194
+ return chunks[0]
195
+
196
+ # For 2+ chunks, within an overlapping region:
197
+ chunk_tile_ids = kwargs['chunk_tile_ids']
198
+ cell_box = kwargs['cell_box']
199
+
200
+ # Calculate local weight masks
201
+ local_weights: list[torch.Tensor] = []
202
+ total_weight: torch.Tensor = torch.zeros(chunks[0].shape)
203
+ for tile_id, chunk in zip(chunk_tile_ids, chunks):
204
+ tile_aabb = self.tile_aabbs[tile_id]
205
+ x_min = tile_aabb[4]
206
+ cy = (tile_aabb[3] + tile_aabb[2]) / 2
207
+ cx = (tile_aabb[5] + tile_aabb[4]) / 2
208
+
209
+ z_indices = torch.arange(cell_box[0], cell_box[1], step=1) + 0.5
210
+ y_indices = torch.arange(cell_box[2], cell_box[3], step=1) + 0.5
211
+ x_indices = torch.arange(cell_box[4], cell_box[5], step=1) + 0.5
212
+
213
+ z_grid, y_grid, x_grid = torch.meshgrid(
214
+ z_indices, y_indices, x_indices, indexing="ij" # {z_grid, y_grid, x_grid} are 3D Tensors
215
+ )
216
+
217
+ # Weight formula:
218
+ # 1) Apply pyramid function wrt to center of square tile.
219
+ # For each incoming chunk, a chunk may only have partial signal,
220
+ # representing cells that lie between two tiles.
221
+ # 2) After calculating pyramid weights, confine weights to actual boundary
222
+ # of image, represented by position of non-zero values in chunk.
223
+ weights = (cx - x_min) - torch.max(torch.abs(x_grid - cx), torch.abs(y_grid - cy))
224
+ signal_mask = torch.clamp(chunk, 0, 1)
225
+ inbound_weights = weights * signal_mask
226
+
227
+ local_weights.append(inbound_weights)
228
+ total_weight += inbound_weights
229
+
230
+ # Calculate fused chunk
231
+ fused_chunk = torch.zeros(chunks[0].shape)
232
+
233
+ for w, c in zip(local_weights, chunks):
234
+ w /= total_weight
235
+ w = w.to(device)
236
+ c = c.to(device)
237
+ fused_chunk += (w * c)
238
+
239
+ return fused_chunk
240
+
241
+ def parse_yx_tile_layout(xml_path: str) -> list[list[int]]:
242
+ """
243
+ Utility for parsing tile layout from a bigstitcher xml
244
+ requested by some blending modules.
245
+
246
+ tile_layout follows axis convention:
247
+ +--- +x
248
+ |
249
+ |
250
+ +y
251
+
252
+ Tile ids in output tile_layout uses the same tile ids
253
+ defined in the xml file. Spaces denoted with tile id '-1'.
254
+ """
255
+
256
+ # Parse stage positions
257
+ with open(xml_path, "r") as file:
258
+ data = xmltodict.parse(file.read())
259
+ stage_positions_xyz: dict[int, tuple[float, float, float]] = {}
260
+ for d in data['SpimData']['ViewRegistrations']['ViewRegistration']:
261
+ tile_id = d['@setup']
262
+
263
+ view_transform = d['ViewTransform']
264
+ if isinstance(view_transform, list):
265
+ view_transform = view_transform[-1]
266
+
267
+ nums = [float(val) for val in view_transform["affine"].split(" ")]
268
+ stage_positions_xyz[tile_id] = tuple(nums[3::4])
269
+
270
+ # Calculate delta_x and delta_y
271
+ positions_arr_xyz = np.array([pos for pos in stage_positions_xyz.values()])
272
+ x_pos = list(set(positions_arr_xyz[:, 0]))
273
+ x_pos = sorted(x_pos)
274
+ delta_x = x_pos[1] - x_pos[0]
275
+ y_pos = list(set(positions_arr_xyz[:, 1]))
276
+ y_pos = sorted(y_pos)
277
+ delta_y = y_pos[1] - y_pos[0]
278
+
279
+ # Fill tile_layout
280
+ tile_layout = np.ones((len(y_pos), len(x_pos))) * -1
281
+ for tile_id, s_pos in stage_positions_xyz.items():
282
+ ix = int(s_pos[0] / delta_x)
283
+ iy = int(s_pos[1] / delta_y)
284
+
285
+ tile_layout[iy, ix] = tile_id
286
+
287
+ tile_layout = tile_layout.astype(int)
288
+
289
+ return tile_layout