Rhapso 0.1.92__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Rhapso/__init__.py +1 -0
- Rhapso/data_prep/__init__.py +2 -0
- Rhapso/data_prep/n5_reader.py +188 -0
- Rhapso/data_prep/s3_big_stitcher_reader.py +55 -0
- Rhapso/data_prep/xml_to_dataframe.py +215 -0
- Rhapso/detection/__init__.py +5 -0
- Rhapso/detection/advanced_refinement.py +203 -0
- Rhapso/detection/difference_of_gaussian.py +324 -0
- Rhapso/detection/image_reader.py +117 -0
- Rhapso/detection/metadata_builder.py +130 -0
- Rhapso/detection/overlap_detection.py +327 -0
- Rhapso/detection/points_validation.py +49 -0
- Rhapso/detection/save_interest_points.py +265 -0
- Rhapso/detection/view_transform_models.py +67 -0
- Rhapso/fusion/__init__.py +0 -0
- Rhapso/fusion/affine_fusion/__init__.py +2 -0
- Rhapso/fusion/affine_fusion/blend.py +289 -0
- Rhapso/fusion/affine_fusion/fusion.py +601 -0
- Rhapso/fusion/affine_fusion/geometry.py +159 -0
- Rhapso/fusion/affine_fusion/io.py +546 -0
- Rhapso/fusion/affine_fusion/script_utils.py +111 -0
- Rhapso/fusion/affine_fusion/setup.py +4 -0
- Rhapso/fusion/affine_fusion_worker.py +234 -0
- Rhapso/fusion/multiscale/__init__.py +0 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/__init__.py +19 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/czi_to_zarr.py +698 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/zarr_writer.py +265 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/models.py +81 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/utils.py +526 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/zeiss_job.py +249 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/__init__.py +21 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/array_to_zarr.py +257 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/radial_correction.py +557 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/run_capsule.py +98 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/utils.py +266 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/worker.py +89 -0
- Rhapso/fusion/multiscale_worker.py +113 -0
- Rhapso/fusion/neuroglancer_link_gen/__init__.py +8 -0
- Rhapso/fusion/neuroglancer_link_gen/dispim_link.py +235 -0
- Rhapso/fusion/neuroglancer_link_gen/exaspim_link.py +127 -0
- Rhapso/fusion/neuroglancer_link_gen/hcr_link.py +368 -0
- Rhapso/fusion/neuroglancer_link_gen/iSPIM_top.py +47 -0
- Rhapso/fusion/neuroglancer_link_gen/link_utils.py +239 -0
- Rhapso/fusion/neuroglancer_link_gen/main.py +299 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_layer.py +1434 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_state.py +1123 -0
- Rhapso/fusion/neuroglancer_link_gen/parsers.py +336 -0
- Rhapso/fusion/neuroglancer_link_gen/raw_link.py +116 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/__init__.py +4 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/shader_utils.py +85 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py +43 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/utils.py +303 -0
- Rhapso/fusion/neuroglancer_link_gen_worker.py +30 -0
- Rhapso/matching/__init__.py +0 -0
- Rhapso/matching/load_and_transform_points.py +458 -0
- Rhapso/matching/ransac_matching.py +544 -0
- Rhapso/matching/save_matches.py +120 -0
- Rhapso/matching/xml_parser.py +302 -0
- Rhapso/pipelines/__init__.py +0 -0
- Rhapso/pipelines/ray/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/alignment_pipeline.py +227 -0
- Rhapso/pipelines/ray/aws/config/__init__.py +0 -0
- Rhapso/pipelines/ray/evaluation.py +71 -0
- Rhapso/pipelines/ray/interest_point_detection.py +137 -0
- Rhapso/pipelines/ray/interest_point_matching.py +110 -0
- Rhapso/pipelines/ray/local/__init__.py +0 -0
- Rhapso/pipelines/ray/local/alignment_pipeline.py +167 -0
- Rhapso/pipelines/ray/matching_stats.py +104 -0
- Rhapso/pipelines/ray/param/__init__.py +0 -0
- Rhapso/pipelines/ray/solver.py +120 -0
- Rhapso/pipelines/ray/split_dataset.py +78 -0
- Rhapso/solver/__init__.py +0 -0
- Rhapso/solver/compute_tiles.py +562 -0
- Rhapso/solver/concatenate_models.py +116 -0
- Rhapso/solver/connected_graphs.py +111 -0
- Rhapso/solver/data_prep.py +181 -0
- Rhapso/solver/global_optimization.py +410 -0
- Rhapso/solver/model_and_tile_setup.py +109 -0
- Rhapso/solver/pre_align_tiles.py +323 -0
- Rhapso/solver/save_results.py +97 -0
- Rhapso/solver/view_transforms.py +75 -0
- Rhapso/solver/xml_to_dataframe_solver.py +213 -0
- Rhapso/split_dataset/__init__.py +0 -0
- Rhapso/split_dataset/compute_grid_rules.py +78 -0
- Rhapso/split_dataset/save_points.py +101 -0
- Rhapso/split_dataset/save_xml.py +377 -0
- Rhapso/split_dataset/split_images.py +537 -0
- Rhapso/split_dataset/xml_to_dataframe_split.py +219 -0
- rhapso-0.1.92.dist-info/METADATA +39 -0
- rhapso-0.1.92.dist-info/RECORD +101 -0
- rhapso-0.1.92.dist-info/WHEEL +5 -0
- rhapso-0.1.92.dist-info/licenses/LICENSE +21 -0
- rhapso-0.1.92.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/test_detection.py +17 -0
- tests/test_matching.py +21 -0
- tests/test_solving.py +21 -0
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
from copy import deepcopy
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Concatenate Models stitches together the results of two alignment rounds (split-affine)
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
class ConcatenateModels:
|
|
8
|
+
def __init__(self, tiles, tiles_round_2, groups, validation_stats, validation_stats_round_2, view_map):
|
|
9
|
+
self.tiles = tiles
|
|
10
|
+
self.tiles_round_2 = tiles_round_2
|
|
11
|
+
self.groups = groups
|
|
12
|
+
self.validation_stats = validation_stats
|
|
13
|
+
self.validation_stats_round_2 = validation_stats_round_2
|
|
14
|
+
self.view_map = view_map
|
|
15
|
+
|
|
16
|
+
def map_models_back_from_groups(self):
|
|
17
|
+
"""
|
|
18
|
+
Copy solved model and matches back onto every tile in the same group
|
|
19
|
+
"""
|
|
20
|
+
view_to_group = {v: gi for gi, g in enumerate(self.groups) for v in g.get('views', [])}
|
|
21
|
+
|
|
22
|
+
for rep_tile in self.tiles_round_2:
|
|
23
|
+
rep_view = rep_tile['view']
|
|
24
|
+
gi = view_to_group.get(rep_view)
|
|
25
|
+
if gi is None:
|
|
26
|
+
continue
|
|
27
|
+
|
|
28
|
+
group_views = self.groups[gi]['views']
|
|
29
|
+
rep_model = deepcopy(rep_tile.get('model', {}))
|
|
30
|
+
rep_matches = list(rep_tile.get('matches', []))
|
|
31
|
+
|
|
32
|
+
# Normalize rep connections into dict
|
|
33
|
+
rep_conns = []
|
|
34
|
+
for c in rep_tile.get('connected_tiles', []):
|
|
35
|
+
v = c['view'] if isinstance(c, dict) else c
|
|
36
|
+
rep_conns.append({'view': v, 'tile': self.view_map.get(v)})
|
|
37
|
+
|
|
38
|
+
# Propagate to every member of the group
|
|
39
|
+
for v in group_views:
|
|
40
|
+
t = self.view_map.get(v)
|
|
41
|
+
if not t:
|
|
42
|
+
continue
|
|
43
|
+
|
|
44
|
+
t['model'] = deepcopy(rep_model)
|
|
45
|
+
t['matches'] = list(rep_matches)
|
|
46
|
+
|
|
47
|
+
return list(self.view_map.values())
|
|
48
|
+
|
|
49
|
+
def preconcatenate_affine(self, m1, m2):
|
|
50
|
+
"""
|
|
51
|
+
Compose two 3x4 affine transforms stored as dicts
|
|
52
|
+
"""
|
|
53
|
+
m00,m01,m02,m03 = m1['m00'],m1['m01'],m1['m02'],m1['m03']
|
|
54
|
+
m10,m11,m12,m13 = m1['m10'],m1['m11'],m1['m12'],m1['m13']
|
|
55
|
+
m20,m21,m22,m23 = m1['m20'],m1['m21'],m1['m22'],m1['m23']
|
|
56
|
+
|
|
57
|
+
n00,n01,n02,n03 = m2['m00'],m2['m01'],m2['m02'],m2['m03']
|
|
58
|
+
n10,n11,n12,n13 = m2['m10'],m2['m11'],m2['m12'],m2['m13']
|
|
59
|
+
n20,n21,n22,n23 = m2['m20'],m2['m21'],m2['m22'],m2['m23']
|
|
60
|
+
|
|
61
|
+
return {
|
|
62
|
+
'm00': n00*m00 + n01*m10 + n02*m20,
|
|
63
|
+
'm01': n00*m01 + n01*m11 + n02*m21,
|
|
64
|
+
'm02': n00*m02 + n01*m12 + n02*m22,
|
|
65
|
+
'm03': n00*m03 + n01*m13 + n02*m23 + n03,
|
|
66
|
+
|
|
67
|
+
'm10': n10*m00 + n11*m10 + n12*m20,
|
|
68
|
+
'm11': n10*m01 + n11*m11 + n12*m21,
|
|
69
|
+
'm12': n10*m02 + n11*m12 + n12*m22,
|
|
70
|
+
'm13': n10*m03 + n11*m13 + n12*m23 + n13,
|
|
71
|
+
|
|
72
|
+
'm20': n20*m00 + n21*m10 + n22*m20,
|
|
73
|
+
'm21': n20*m01 + n21*m11 + n22*m21,
|
|
74
|
+
'm22': n20*m02 + n21*m12 + n22*m22,
|
|
75
|
+
'm23': n20*m03 + n21*m13 + n22*m23 + n23,
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
def merge_validation_stats(self, v1, v2):
|
|
79
|
+
"""
|
|
80
|
+
Merge two validation-metrics dicts by concatenating per-tile from v2 after v1, offsetting v2's iteration numbers
|
|
81
|
+
and tagging entries
|
|
82
|
+
"""
|
|
83
|
+
out = deepcopy(v1) if v1 else {}
|
|
84
|
+
s1 = out.setdefault('solver_metrics_per_tile', {}).setdefault('stats', [])
|
|
85
|
+
s2 = (v2 or {}).get('solver_metrics_per_tile', {}).get('stats', []) or []
|
|
86
|
+
|
|
87
|
+
# tag round and offset iterations
|
|
88
|
+
if s1 and 'round' not in s1[0]:
|
|
89
|
+
for x in s1: x['round'] = 1
|
|
90
|
+
offset = (s1[-1]['iteration'] + 1) if s1 else 0
|
|
91
|
+
|
|
92
|
+
for x in s2:
|
|
93
|
+
y = dict(x)
|
|
94
|
+
y['iteration'] = x.get('iteration', 0) + offset
|
|
95
|
+
y['round'] = 2
|
|
96
|
+
s1.append(y)
|
|
97
|
+
|
|
98
|
+
return out
|
|
99
|
+
|
|
100
|
+
def run(self):
|
|
101
|
+
"""
|
|
102
|
+
Executes the entry point of the script.
|
|
103
|
+
"""
|
|
104
|
+
view_map = self.map_models_back_from_groups()
|
|
105
|
+
combined_validation_stats = self.merge_validation_stats(self.validation_stats, self.validation_stats_round_2)
|
|
106
|
+
|
|
107
|
+
tiles_round_1 = {t['view']: t for t in self.tiles}
|
|
108
|
+
tiles_round_2 = {t['view']: t for t in view_map}
|
|
109
|
+
|
|
110
|
+
for vid in tiles_round_1.keys() & tiles_round_2.keys():
|
|
111
|
+
m1 = tiles_round_1[vid]['model']['regularized']
|
|
112
|
+
m2 = tiles_round_2[vid]['model']['regularized']
|
|
113
|
+
tiles_round_2[vid]['model']['regularized'] = self.preconcatenate_affine(m1, m2)
|
|
114
|
+
|
|
115
|
+
tiles_round_2 = [tiles_round_2[t['view']] for t in view_map]
|
|
116
|
+
return tiles_round_2, combined_validation_stats
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
|
|
2
|
+
"""
|
|
3
|
+
Connected Graphs splits a tile set into connected components using each tiles connected_tiles links (split-affine).
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
class ConnectedGraphs:
|
|
7
|
+
def __init__(self, tiles, dataframes):
|
|
8
|
+
self.tiles = tiles
|
|
9
|
+
self.view_registrations = dataframes['view_registrations']
|
|
10
|
+
|
|
11
|
+
def identify_connected_graphs(self):
|
|
12
|
+
"""
|
|
13
|
+
Build an undirected graph of tiles
|
|
14
|
+
"""
|
|
15
|
+
view_to_tile = {t['view']: t for t in self.tiles}
|
|
16
|
+
|
|
17
|
+
# undirected adjacency by view_id
|
|
18
|
+
adj = {v: set() for v in view_to_tile}
|
|
19
|
+
for t in self.tiles:
|
|
20
|
+
v = t['view']
|
|
21
|
+
for conn in t.get('connected_tiles', []):
|
|
22
|
+
nv = conn['view'] if isinstance(conn, dict) else conn
|
|
23
|
+
if nv in view_to_tile and nv != v:
|
|
24
|
+
adj[v].add(nv)
|
|
25
|
+
adj[nv].add(v)
|
|
26
|
+
|
|
27
|
+
graphs, visited = [], set()
|
|
28
|
+
for v in adj:
|
|
29
|
+
if v in visited:
|
|
30
|
+
continue
|
|
31
|
+
stack = [v]
|
|
32
|
+
comp_views = set([v])
|
|
33
|
+
visited.add(v)
|
|
34
|
+
while stack:
|
|
35
|
+
u = stack.pop()
|
|
36
|
+
for w in adj[u]:
|
|
37
|
+
if w not in visited:
|
|
38
|
+
visited.add(w)
|
|
39
|
+
comp_views.add(w)
|
|
40
|
+
stack.append(w)
|
|
41
|
+
|
|
42
|
+
comp = sorted(comp_views)
|
|
43
|
+
graphs.append([view_to_tile[x] for x in comp])
|
|
44
|
+
|
|
45
|
+
return graphs
|
|
46
|
+
|
|
47
|
+
def assemble_views(self, connected):
|
|
48
|
+
"""
|
|
49
|
+
Normalize a collection of connected tiles to a set of view IDs
|
|
50
|
+
"""
|
|
51
|
+
if connected and isinstance(next(iter(connected)), dict):
|
|
52
|
+
connected_view_ids = {t['view'] for t in connected}
|
|
53
|
+
else:
|
|
54
|
+
connected_view_ids = set(connected)
|
|
55
|
+
|
|
56
|
+
group_views = [t['view'] for t in self.tiles if t['view'] in connected_view_ids]
|
|
57
|
+
|
|
58
|
+
return {'views': group_views}
|
|
59
|
+
|
|
60
|
+
def create_wlpmc(self):
|
|
61
|
+
"""
|
|
62
|
+
Initialize weak link point match correspondences
|
|
63
|
+
"""
|
|
64
|
+
views = [t['view'] for t in self.tiles]
|
|
65
|
+
return {
|
|
66
|
+
'view': views,
|
|
67
|
+
'models': self.tiles,
|
|
68
|
+
'overlap_detection': None,
|
|
69
|
+
'view_registrations': self.view_registrations
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
def label_subsets(self, group):
|
|
73
|
+
"""
|
|
74
|
+
Sort a group's view IDs by setup, then collapse consecutive setup numbers into ranges
|
|
75
|
+
"""
|
|
76
|
+
vs = sorted(group['views'], key=lambda v: int(v.partition('setup:')[2].split()[0]))
|
|
77
|
+
tp = int(vs[0].partition('timepoint:')[2].split(',')[0]) if vs else 0
|
|
78
|
+
|
|
79
|
+
nums = sorted({int(v.partition('setup:')[2].split()[0]) for v in vs})
|
|
80
|
+
labels, starts = [], []
|
|
81
|
+
if nums:
|
|
82
|
+
s = e = nums[0]
|
|
83
|
+
for x in nums[1:]:
|
|
84
|
+
if x == e + 1:
|
|
85
|
+
e = x
|
|
86
|
+
else:
|
|
87
|
+
labels.append(f"{tp}-{s} >-> {tp}-{e}"); starts.append(s)
|
|
88
|
+
s = e = x
|
|
89
|
+
labels.append(f"{tp}-{s} >-> {tp}-{e}"); starts.append(s)
|
|
90
|
+
|
|
91
|
+
group['views'] = vs
|
|
92
|
+
group['subset_labels'] = labels
|
|
93
|
+
return group
|
|
94
|
+
|
|
95
|
+
def run(self):
|
|
96
|
+
"""
|
|
97
|
+
Executes the entry point of the script.
|
|
98
|
+
"""
|
|
99
|
+
graph_sets = self.identify_connected_graphs()
|
|
100
|
+
groups_new = []
|
|
101
|
+
|
|
102
|
+
if len(graph_sets) == 1:
|
|
103
|
+
return self.tiles, groups_new
|
|
104
|
+
else:
|
|
105
|
+
for connected in graph_sets:
|
|
106
|
+
group = self.assemble_views(connected)
|
|
107
|
+
group = self.label_subsets(group)
|
|
108
|
+
groups_new.append(group)
|
|
109
|
+
|
|
110
|
+
wlpmc = self.create_wlpmc()
|
|
111
|
+
return wlpmc, groups_new
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
import zarr
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
import s3fs
|
|
5
|
+
|
|
6
|
+
"""
|
|
7
|
+
Data Prep fetches and preps n5 interest points data
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
class DataPrep():
|
|
11
|
+
def __init__(self, interest_points_df, view_transform_matrices, xml_file_path, n5_input_path):
|
|
12
|
+
self.interest_points_df = interest_points_df
|
|
13
|
+
self.view_transform_matrices = view_transform_matrices
|
|
14
|
+
self.xml_file_path = xml_file_path
|
|
15
|
+
self.n5_input_path = n5_input_path
|
|
16
|
+
|
|
17
|
+
self.connected_views = {}
|
|
18
|
+
self.corresponding_interest_points = {}
|
|
19
|
+
self.interest_points = {}
|
|
20
|
+
self.label_map_global = {}
|
|
21
|
+
|
|
22
|
+
def get_connected_views_from_n5(self):
|
|
23
|
+
"""
|
|
24
|
+
Loads connected view mappings from N5 metadata, supporting both S3 and local sources.
|
|
25
|
+
"""
|
|
26
|
+
for _, row in self.interest_points_df.iterrows():
|
|
27
|
+
view_id = f"timepoint: {row['timepoint']}, setup: {row['setup']}"
|
|
28
|
+
|
|
29
|
+
if self.n5_input_path.startswith("s3://"):
|
|
30
|
+
s3 = s3fs.S3FileSystem(anon=False)
|
|
31
|
+
root = self.n5_input_path.replace("s3://", "", 1)
|
|
32
|
+
path = root + "interestpoints.n5"
|
|
33
|
+
store = s3fs.S3Map(root=path, s3=s3)
|
|
34
|
+
root = zarr.open(store, mode='r')
|
|
35
|
+
correspondences_key = f"{row['path']}/correspondences"
|
|
36
|
+
try:
|
|
37
|
+
self.connected_views[view_id] = root[correspondences_key].attrs["idMap"]
|
|
38
|
+
except:
|
|
39
|
+
print(f"No connected views for tile {view_id}")
|
|
40
|
+
|
|
41
|
+
else:
|
|
42
|
+
n5_root = os.path.join(self.n5_input_path, "interestpoints.n5")
|
|
43
|
+
store = zarr.N5Store(n5_root)
|
|
44
|
+
root = zarr.open(store, mode="r")
|
|
45
|
+
correspondences_key = f"{row['path']}/correspondences"
|
|
46
|
+
try:
|
|
47
|
+
self.connected_views[view_id] = self.load_json_data(correspondences_key)
|
|
48
|
+
except:
|
|
49
|
+
print(f"No connected views for tile {view_id}")
|
|
50
|
+
|
|
51
|
+
def load_json_data(self, json_path):
|
|
52
|
+
try:
|
|
53
|
+
path = self.n5_input_path + json_path
|
|
54
|
+
if not os.path.exists(path):
|
|
55
|
+
return {}
|
|
56
|
+
with open(path, 'r') as f:
|
|
57
|
+
obj = json.load(f)
|
|
58
|
+
id_map = obj.get('idMap', {})
|
|
59
|
+
return id_map if isinstance(id_map, dict) else {}
|
|
60
|
+
except Exception:
|
|
61
|
+
return {}
|
|
62
|
+
|
|
63
|
+
def get_corresponding_data_from_n5(self):
|
|
64
|
+
"""
|
|
65
|
+
Parses and transforms corresponding interest point data from N5 format into world space coordinates.
|
|
66
|
+
"""
|
|
67
|
+
if self.n5_input_path.startswith("s3://"):
|
|
68
|
+
s3 = s3fs.S3FileSystem(anon=False)
|
|
69
|
+
root = self.n5_input_path.replace("s3://", "", 1)
|
|
70
|
+
path = root + "interestpoints.n5"
|
|
71
|
+
store = s3fs.S3Map(root=path, s3=s3)
|
|
72
|
+
else:
|
|
73
|
+
path = self.n5_input_path + "interestpoints.n5"
|
|
74
|
+
store = zarr.N5Store(path)
|
|
75
|
+
|
|
76
|
+
root = zarr.open(store, mode='r')
|
|
77
|
+
|
|
78
|
+
for _, row in self.interest_points_df.iterrows():
|
|
79
|
+
view_id = f"timepoint: {row['timepoint']}, setup: {row['setup']}"
|
|
80
|
+
correspondences_prefix = f"{row['path']}/correspondences"
|
|
81
|
+
attributes_path = 'interestpoints.n5' + f"/{row['path']}/correspondences/attributes.json"
|
|
82
|
+
|
|
83
|
+
# Load JSON data for idMap
|
|
84
|
+
if self.n5_input_path.startswith("s3://"):
|
|
85
|
+
try:
|
|
86
|
+
id_map = root[correspondences_prefix].attrs['idMap']
|
|
87
|
+
except Exception:
|
|
88
|
+
continue
|
|
89
|
+
else:
|
|
90
|
+
id_map = self.load_json_data(attributes_path)
|
|
91
|
+
if not id_map:
|
|
92
|
+
continue
|
|
93
|
+
|
|
94
|
+
try:
|
|
95
|
+
interest_points_index_map = root[correspondences_prefix + '/data'][:]
|
|
96
|
+
except (KeyError, FileNotFoundError, AttributeError, TypeError):
|
|
97
|
+
print(f"⚠️ Skipping {view_id}: missing correspondences.")
|
|
98
|
+
continue
|
|
99
|
+
|
|
100
|
+
# Load corresponding interest points data
|
|
101
|
+
for ip_index, corr_index, corr_group_id in interest_points_index_map:
|
|
102
|
+
if corr_group_id == view_id:
|
|
103
|
+
continue
|
|
104
|
+
|
|
105
|
+
corresponding_view_id = next((k for k, v in id_map.items() if v == int(corr_group_id)), None)
|
|
106
|
+
parts = corresponding_view_id.split(',')
|
|
107
|
+
timepoint, setup, label = parts[0], parts[1], parts[2]
|
|
108
|
+
corresponding_view_id = f"timepoint: {timepoint}, setup: {setup}"
|
|
109
|
+
|
|
110
|
+
ip = self.interest_points[view_id][label][int(ip_index)]
|
|
111
|
+
corr_ip = self.interest_points[corresponding_view_id][label][int(corr_index)]
|
|
112
|
+
|
|
113
|
+
if view_id not in self.corresponding_interest_points:
|
|
114
|
+
self.corresponding_interest_points[view_id] = []
|
|
115
|
+
|
|
116
|
+
self.corresponding_interest_points[view_id].append({
|
|
117
|
+
"detection_id": ip_index,
|
|
118
|
+
"detection_p1": ip,
|
|
119
|
+
"corresponding_detection_id": corr_index,
|
|
120
|
+
"corresponding_detection_p2": corr_ip,
|
|
121
|
+
"corresponding_view_id": corresponding_view_id,
|
|
122
|
+
"label": label
|
|
123
|
+
})
|
|
124
|
+
|
|
125
|
+
def get_all_interest_points_from_n5(self):
|
|
126
|
+
"""
|
|
127
|
+
Loads raw interest point coordinates from N5 storage into memory, keyed by view ID.
|
|
128
|
+
"""
|
|
129
|
+
if self.n5_input_path.startswith("s3://"):
|
|
130
|
+
s3 = s3fs.S3FileSystem(anon=False)
|
|
131
|
+
root = self.n5_input_path.replace("s3://", "", 1)
|
|
132
|
+
path = root + "interestpoints.n5"
|
|
133
|
+
store = s3fs.S3Map(root=path, s3=s3)
|
|
134
|
+
else:
|
|
135
|
+
path = self.n5_input_path + "interestpoints.n5"
|
|
136
|
+
store = zarr.N5Store(path)
|
|
137
|
+
|
|
138
|
+
root = zarr.open(store, mode='r')
|
|
139
|
+
|
|
140
|
+
for _, row in self.interest_points_df.iterrows():
|
|
141
|
+
view_id = f"timepoint: {row['timepoint']}, setup: {row['setup']}"
|
|
142
|
+
interestpoints_prefix = f"{row['path']}/interestpoints/loc/"
|
|
143
|
+
interest_points = root[interestpoints_prefix][:]
|
|
144
|
+
# interest_points = root[interestpoints_prefix][:] if interestpoints_prefix in root else np.empty((0, 3), dtype=np.float32)
|
|
145
|
+
label = str(row['path']).replace('\\','/').lstrip('/').split('/', 2)[1]
|
|
146
|
+
self.interest_points.setdefault(view_id, {})[label] = interest_points
|
|
147
|
+
|
|
148
|
+
def build_label_map(self):
|
|
149
|
+
"""
|
|
150
|
+
Constructs a mapping of labels for each view ID from the interest points dataframe.
|
|
151
|
+
"""
|
|
152
|
+
for _, row in self.interest_points_df.iterrows():
|
|
153
|
+
view_id_key = f"timepoint: {row['timepoint']}, setup: {row['setup']}"
|
|
154
|
+
|
|
155
|
+
if view_id_key not in self.label_map_global:
|
|
156
|
+
self.label_map_global[view_id_key] = {}
|
|
157
|
+
|
|
158
|
+
self.label_map_global[view_id_key][row['label']] = 1.0
|
|
159
|
+
|
|
160
|
+
def run(self):
|
|
161
|
+
"""
|
|
162
|
+
Executes the entry point of the script.
|
|
163
|
+
"""
|
|
164
|
+
self.build_label_map()
|
|
165
|
+
self.get_all_interest_points_from_n5()
|
|
166
|
+
self.get_corresponding_data_from_n5()
|
|
167
|
+
self.get_connected_views_from_n5()
|
|
168
|
+
|
|
169
|
+
view_id_set = set()
|
|
170
|
+
for k in self.corresponding_interest_points.keys():
|
|
171
|
+
try:
|
|
172
|
+
parts = [p.strip() for p in k.split(',')]
|
|
173
|
+
tp = parts[0].split(':')[-1].strip()
|
|
174
|
+
su = parts[1].split(':')[-1].strip()
|
|
175
|
+
view_id_set.add((str(tp), str(su)))
|
|
176
|
+
except Exception:
|
|
177
|
+
continue
|
|
178
|
+
|
|
179
|
+
self.view_id_set = sorted(view_id_set, key=lambda x: (int(x[0]), int(x[1])))
|
|
180
|
+
|
|
181
|
+
return self.connected_views, self.corresponding_interest_points, self.interest_points, self.label_map_global, self.view_id_set
|