Rhapso 0.1.92__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Rhapso/__init__.py +1 -0
- Rhapso/data_prep/__init__.py +2 -0
- Rhapso/data_prep/n5_reader.py +188 -0
- Rhapso/data_prep/s3_big_stitcher_reader.py +55 -0
- Rhapso/data_prep/xml_to_dataframe.py +215 -0
- Rhapso/detection/__init__.py +5 -0
- Rhapso/detection/advanced_refinement.py +203 -0
- Rhapso/detection/difference_of_gaussian.py +324 -0
- Rhapso/detection/image_reader.py +117 -0
- Rhapso/detection/metadata_builder.py +130 -0
- Rhapso/detection/overlap_detection.py +327 -0
- Rhapso/detection/points_validation.py +49 -0
- Rhapso/detection/save_interest_points.py +265 -0
- Rhapso/detection/view_transform_models.py +67 -0
- Rhapso/fusion/__init__.py +0 -0
- Rhapso/fusion/affine_fusion/__init__.py +2 -0
- Rhapso/fusion/affine_fusion/blend.py +289 -0
- Rhapso/fusion/affine_fusion/fusion.py +601 -0
- Rhapso/fusion/affine_fusion/geometry.py +159 -0
- Rhapso/fusion/affine_fusion/io.py +546 -0
- Rhapso/fusion/affine_fusion/script_utils.py +111 -0
- Rhapso/fusion/affine_fusion/setup.py +4 -0
- Rhapso/fusion/affine_fusion_worker.py +234 -0
- Rhapso/fusion/multiscale/__init__.py +0 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/__init__.py +19 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/czi_to_zarr.py +698 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/compress/zarr_writer.py +265 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/models.py +81 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/utils/utils.py +526 -0
- Rhapso/fusion/multiscale/aind_hcr_data_transformation/zeiss_job.py +249 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/__init__.py +21 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/array_to_zarr.py +257 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/radial_correction.py +557 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/run_capsule.py +98 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/__init__.py +3 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/utils/utils.py +266 -0
- Rhapso/fusion/multiscale/aind_z1_radial_correction/worker.py +89 -0
- Rhapso/fusion/multiscale_worker.py +113 -0
- Rhapso/fusion/neuroglancer_link_gen/__init__.py +8 -0
- Rhapso/fusion/neuroglancer_link_gen/dispim_link.py +235 -0
- Rhapso/fusion/neuroglancer_link_gen/exaspim_link.py +127 -0
- Rhapso/fusion/neuroglancer_link_gen/hcr_link.py +368 -0
- Rhapso/fusion/neuroglancer_link_gen/iSPIM_top.py +47 -0
- Rhapso/fusion/neuroglancer_link_gen/link_utils.py +239 -0
- Rhapso/fusion/neuroglancer_link_gen/main.py +299 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_layer.py +1434 -0
- Rhapso/fusion/neuroglancer_link_gen/ng_state.py +1123 -0
- Rhapso/fusion/neuroglancer_link_gen/parsers.py +336 -0
- Rhapso/fusion/neuroglancer_link_gen/raw_link.py +116 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/__init__.py +4 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/shader_utils.py +85 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/transfer.py +43 -0
- Rhapso/fusion/neuroglancer_link_gen/utils/utils.py +303 -0
- Rhapso/fusion/neuroglancer_link_gen_worker.py +30 -0
- Rhapso/matching/__init__.py +0 -0
- Rhapso/matching/load_and_transform_points.py +458 -0
- Rhapso/matching/ransac_matching.py +544 -0
- Rhapso/matching/save_matches.py +120 -0
- Rhapso/matching/xml_parser.py +302 -0
- Rhapso/pipelines/__init__.py +0 -0
- Rhapso/pipelines/ray/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/__init__.py +0 -0
- Rhapso/pipelines/ray/aws/alignment_pipeline.py +227 -0
- Rhapso/pipelines/ray/aws/config/__init__.py +0 -0
- Rhapso/pipelines/ray/evaluation.py +71 -0
- Rhapso/pipelines/ray/interest_point_detection.py +137 -0
- Rhapso/pipelines/ray/interest_point_matching.py +110 -0
- Rhapso/pipelines/ray/local/__init__.py +0 -0
- Rhapso/pipelines/ray/local/alignment_pipeline.py +167 -0
- Rhapso/pipelines/ray/matching_stats.py +104 -0
- Rhapso/pipelines/ray/param/__init__.py +0 -0
- Rhapso/pipelines/ray/solver.py +120 -0
- Rhapso/pipelines/ray/split_dataset.py +78 -0
- Rhapso/solver/__init__.py +0 -0
- Rhapso/solver/compute_tiles.py +562 -0
- Rhapso/solver/concatenate_models.py +116 -0
- Rhapso/solver/connected_graphs.py +111 -0
- Rhapso/solver/data_prep.py +181 -0
- Rhapso/solver/global_optimization.py +410 -0
- Rhapso/solver/model_and_tile_setup.py +109 -0
- Rhapso/solver/pre_align_tiles.py +323 -0
- Rhapso/solver/save_results.py +97 -0
- Rhapso/solver/view_transforms.py +75 -0
- Rhapso/solver/xml_to_dataframe_solver.py +213 -0
- Rhapso/split_dataset/__init__.py +0 -0
- Rhapso/split_dataset/compute_grid_rules.py +78 -0
- Rhapso/split_dataset/save_points.py +101 -0
- Rhapso/split_dataset/save_xml.py +377 -0
- Rhapso/split_dataset/split_images.py +537 -0
- Rhapso/split_dataset/xml_to_dataframe_split.py +219 -0
- rhapso-0.1.92.dist-info/METADATA +39 -0
- rhapso-0.1.92.dist-info/RECORD +101 -0
- rhapso-0.1.92.dist-info/WHEEL +5 -0
- rhapso-0.1.92.dist-info/licenses/LICENSE +21 -0
- rhapso-0.1.92.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/test_detection.py +17 -0
- tests/test_matching.py +21 -0
- tests/test_solving.py +21 -0
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
from Rhapso.data_prep.xml_to_dataframe import XMLToDataFrame
|
|
2
|
+
from Rhapso.detection.view_transform_models import ViewTransformModels
|
|
3
|
+
from Rhapso.detection.overlap_detection import OverlapDetection
|
|
4
|
+
from Rhapso.detection.metadata_builder import MetadataBuilder
|
|
5
|
+
from Rhapso.detection.image_reader import ImageReader
|
|
6
|
+
from Rhapso.detection.difference_of_gaussian import DifferenceOfGaussian
|
|
7
|
+
from Rhapso.detection.advanced_refinement import AdvancedRefinement
|
|
8
|
+
from Rhapso.detection.points_validation import PointsValidation
|
|
9
|
+
from Rhapso.detection.save_interest_points import SaveInterestPoints
|
|
10
|
+
import boto3
|
|
11
|
+
import ray
|
|
12
|
+
|
|
13
|
+
# This class implements the interest point detection pipeline
|
|
14
|
+
|
|
15
|
+
class InterestPointDetection:
|
|
16
|
+
def __init__(self, dsxy, dsz, min_intensity, max_intensity, sigma, threshold, file_type, xml_file_path,
|
|
17
|
+
image_file_prefix, xml_output_file_path, n5_output_file_prefix, combine_distance, chunks_per_bound, run_type,
|
|
18
|
+
max_spots, median_filter):
|
|
19
|
+
self.dsxy = dsxy
|
|
20
|
+
self.dsz = dsz
|
|
21
|
+
self.min_intensity = min_intensity
|
|
22
|
+
self.max_intensity = max_intensity
|
|
23
|
+
self.sigma = sigma
|
|
24
|
+
self.threshold = threshold
|
|
25
|
+
self.file_type = file_type
|
|
26
|
+
self.xml_file_path = xml_file_path
|
|
27
|
+
self.image_file_prefix = image_file_prefix
|
|
28
|
+
self.xml_output_file_path = xml_output_file_path
|
|
29
|
+
self.n5_output_file_prefix = n5_output_file_prefix
|
|
30
|
+
self.combine_distance = combine_distance
|
|
31
|
+
self.chunks_per_bound = chunks_per_bound
|
|
32
|
+
self.run_type = run_type
|
|
33
|
+
self.max_spots = max_spots
|
|
34
|
+
self.median_filter = median_filter
|
|
35
|
+
|
|
36
|
+
def detection(self):
|
|
37
|
+
# Get XML file
|
|
38
|
+
if self.xml_file_path.startswith("s3://"):
|
|
39
|
+
no_scheme = self.xml_file_path.replace("s3://", "", 1)
|
|
40
|
+
bucket, key = no_scheme.split("/", 1)
|
|
41
|
+
s3 = boto3.client("s3")
|
|
42
|
+
response = s3.get_object(Bucket=bucket, Key=key)
|
|
43
|
+
xml_file = response["Body"].read().decode("utf-8")
|
|
44
|
+
|
|
45
|
+
else:
|
|
46
|
+
with open(self.xml_file_path, "r", encoding="utf-8") as f:
|
|
47
|
+
xml_file = f.read()
|
|
48
|
+
|
|
49
|
+
# Load XML data into dataframes
|
|
50
|
+
processor = XMLToDataFrame(xml_file)
|
|
51
|
+
dataframes = processor.run()
|
|
52
|
+
print("XML loaded")
|
|
53
|
+
|
|
54
|
+
# Create view transform matrices
|
|
55
|
+
create_models = ViewTransformModels(dataframes)
|
|
56
|
+
view_transform_matrices = create_models.run()
|
|
57
|
+
print("Transforms models have been created")
|
|
58
|
+
|
|
59
|
+
# Use view transform matrices to find areas of overlap
|
|
60
|
+
overlap_detection = OverlapDetection(view_transform_matrices, dataframes, self.dsxy, self.dsz, self.image_file_prefix, self.file_type)
|
|
61
|
+
overlapping_area, new_dsxy, new_dsz, level, max_interval_size, mip_map_downsample = overlap_detection.run()
|
|
62
|
+
print("Overlap detection is done")
|
|
63
|
+
|
|
64
|
+
# Implement image chunking strategy as list of metadata
|
|
65
|
+
metadata_loader = MetadataBuilder(dataframes, overlapping_area, self.image_file_prefix, self.file_type, new_dsxy, new_dsz,
|
|
66
|
+
self.chunks_per_bound, self.sigma, self.run_type, level)
|
|
67
|
+
image_chunk_metadata = metadata_loader.run()
|
|
68
|
+
print("Metadata has loaded")
|
|
69
|
+
|
|
70
|
+
# Use Ray to distribute peak detection to image chunking metadata
|
|
71
|
+
@ray.remote
|
|
72
|
+
def process_peak_detection_task(chunk_metadata, new_dsxy, new_dsz, min_intensity, max_intensity, sigma, threshold,
|
|
73
|
+
median_filter, mip_map_downsample):
|
|
74
|
+
try:
|
|
75
|
+
difference_of_gaussian = DifferenceOfGaussian(min_intensity, max_intensity, sigma, threshold, median_filter, mip_map_downsample)
|
|
76
|
+
image_fetcher = ImageReader(self.file_type)
|
|
77
|
+
view_id, interval, image_chunk, offset, lb = image_fetcher.run(chunk_metadata, new_dsxy, new_dsz)
|
|
78
|
+
interest_points = difference_of_gaussian.run(image_chunk, offset, lb)
|
|
79
|
+
|
|
80
|
+
return {
|
|
81
|
+
'view_id': view_id,
|
|
82
|
+
'interval_key': interval,
|
|
83
|
+
'interest_points': interest_points['interest_points'],
|
|
84
|
+
'intensities': interest_points['intensities']
|
|
85
|
+
}
|
|
86
|
+
except Exception as e:
|
|
87
|
+
return {'error': str(e), 'view_id': chunk_metadata.get('view_id', 'unknown')}
|
|
88
|
+
|
|
89
|
+
# Submit tasks to Ray
|
|
90
|
+
futures = [process_peak_detection_task.remote(chunk_metadata, new_dsxy, new_dsz, self.min_intensity, self.max_intensity,
|
|
91
|
+
self.sigma, self.threshold, self.median_filter, mip_map_downsample)
|
|
92
|
+
for chunk_metadata in image_chunk_metadata
|
|
93
|
+
]
|
|
94
|
+
|
|
95
|
+
# Gather and process results
|
|
96
|
+
results = ray.get(futures)
|
|
97
|
+
final_peaks = [r for r in results if 'error' not in r]
|
|
98
|
+
print("Peak detection is done")
|
|
99
|
+
|
|
100
|
+
# Consolidate points and filter overlap duplicates using kd tree
|
|
101
|
+
advanced_refinement = AdvancedRefinement(final_peaks, self.combine_distance, dataframes, overlapping_area, max_interval_size, self.max_spots)
|
|
102
|
+
consolidated_data = advanced_refinement.run()
|
|
103
|
+
print("Advanced refinement is done")
|
|
104
|
+
|
|
105
|
+
# Print points metrics / validation tools
|
|
106
|
+
points_validation = PointsValidation(consolidated_data)
|
|
107
|
+
points_validation.run()
|
|
108
|
+
print("Points metrics printed")
|
|
109
|
+
|
|
110
|
+
# Save final interest points
|
|
111
|
+
save_interest_points = SaveInterestPoints(dataframes, consolidated_data, self.xml_file_path, self.xml_output_file_path, self.n5_output_file_prefix,
|
|
112
|
+
self.dsxy, self.dsz, self.min_intensity, self.max_intensity, self.sigma, self.threshold)
|
|
113
|
+
save_interest_points.run()
|
|
114
|
+
print("Interest points saved")
|
|
115
|
+
|
|
116
|
+
def run(self):
|
|
117
|
+
self.detection()
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
# DEBUG - to step through DOG
|
|
122
|
+
# final_peaks = []
|
|
123
|
+
# for chunk_metadata in image_chunk_metadata:
|
|
124
|
+
# difference_of_gaussian = DifferenceOfGaussian(
|
|
125
|
+
# self.min_intensity, self.max_intensity, self.sigma, self.threshold, self.median_filter, mip_map_downsample
|
|
126
|
+
# )
|
|
127
|
+
# image_fetcher = ImageReader(self.file_type)
|
|
128
|
+
|
|
129
|
+
# view_id, interval, image_chunk, offset, lb = image_fetcher.run(chunk_metadata, new_dsxy, new_dsz)
|
|
130
|
+
# interest_points = difference_of_gaussian.run(image_chunk, offset, lb)
|
|
131
|
+
|
|
132
|
+
# final_peaks.append({
|
|
133
|
+
# 'view_id': view_id,
|
|
134
|
+
# 'interval_key': interval,
|
|
135
|
+
# 'interest_points': interest_points['interest_points'],
|
|
136
|
+
# 'intensities': interest_points['intensities'],
|
|
137
|
+
# })
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
from Rhapso.matching.xml_parser import XMLParserMatching
|
|
2
|
+
from Rhapso.matching.load_and_transform_points import LoadAndTransformPoints
|
|
3
|
+
from Rhapso.matching.ransac_matching import RansacMatching
|
|
4
|
+
from Rhapso.matching.save_matches import SaveMatches
|
|
5
|
+
import ray
|
|
6
|
+
|
|
7
|
+
class InterestPointMatching:
|
|
8
|
+
def __init__(self, xml_input_path, n5_output_path, input_type, match_type, num_neighbors, redundancy, significance,
|
|
9
|
+
search_radius, num_required_neighbors, model_min_matches, inlier_factor, lambda_value, num_iterations,
|
|
10
|
+
regularization_weight, image_file_prefix):
|
|
11
|
+
self.xml_input_path = xml_input_path
|
|
12
|
+
self.n5_output_path = n5_output_path
|
|
13
|
+
self.input_type = input_type
|
|
14
|
+
self.match_type = match_type
|
|
15
|
+
self.num_neighbors = num_neighbors
|
|
16
|
+
self.redundancy = redundancy
|
|
17
|
+
self.significance = significance
|
|
18
|
+
self.search_radius = search_radius
|
|
19
|
+
self.num_required_neighbors = num_required_neighbors
|
|
20
|
+
self.model_min_matches = model_min_matches
|
|
21
|
+
self.inlier_factor = inlier_factor
|
|
22
|
+
self.lambda_value = lambda_value
|
|
23
|
+
self.num_iterations = num_iterations
|
|
24
|
+
self.regularization_weight = regularization_weight
|
|
25
|
+
self.image_file_prefix = image_file_prefix
|
|
26
|
+
|
|
27
|
+
def match(self):
|
|
28
|
+
# Load XML
|
|
29
|
+
parser = XMLParserMatching(self.xml_input_path, self.input_type)
|
|
30
|
+
data_global = parser.run()
|
|
31
|
+
print("XML loaded and parsed")
|
|
32
|
+
|
|
33
|
+
# Load and transform points
|
|
34
|
+
data_loader = LoadAndTransformPoints(data_global, self.xml_input_path, self.n5_output_path, self.match_type)
|
|
35
|
+
process_pairs, view_registrations = data_loader.run()
|
|
36
|
+
print("Points loaded and transformed into global space")
|
|
37
|
+
|
|
38
|
+
# Distribute interest point matching with Ray
|
|
39
|
+
@ray.remote
|
|
40
|
+
def match_pair(pointsA, pointsB, viewA_str, viewB_str, label, num_neighbors, redundancy, significance, num_required_neighbors,
|
|
41
|
+
match_type, inlier_factor, lambda_value, num_iterations, model_min_matches, regularization_weight, search_radius,
|
|
42
|
+
view_registrations, input_type, image_file_prefix):
|
|
43
|
+
|
|
44
|
+
matcher = RansacMatching(data_global, num_neighbors, redundancy, significance, num_required_neighbors, match_type, inlier_factor,
|
|
45
|
+
lambda_value, num_iterations, model_min_matches, regularization_weight, search_radius, view_registrations,
|
|
46
|
+
input_type, image_file_prefix)
|
|
47
|
+
|
|
48
|
+
pointsA, pointsB = matcher.filter_for_overlapping_points(pointsA, pointsB, viewA_str, viewB_str)
|
|
49
|
+
|
|
50
|
+
if len(pointsA) == 0 or len(pointsB) == 0:
|
|
51
|
+
return []
|
|
52
|
+
|
|
53
|
+
candidates = matcher.get_candidates(pointsA, pointsB, viewA_str, viewB_str, label)
|
|
54
|
+
inliers, regularized_model = matcher.compute_ransac(candidates)
|
|
55
|
+
filtered_inliers = matcher.filter_inliers(inliers, regularized_model)
|
|
56
|
+
|
|
57
|
+
percent = 100.0 * len(filtered_inliers) / len(candidates) if candidates else 0
|
|
58
|
+
print(f"✅ RANSAC inlier percentage: {percent:.1f}% ({len(filtered_inliers)} of {len(candidates)} for {viewA_str}), {viewB_str}")
|
|
59
|
+
|
|
60
|
+
if len(filtered_inliers) < self.model_min_matches:
|
|
61
|
+
return []
|
|
62
|
+
|
|
63
|
+
return filtered_inliers if filtered_inliers else []
|
|
64
|
+
|
|
65
|
+
# --- Distribute ---
|
|
66
|
+
futures = [
|
|
67
|
+
match_pair.remote(pointsA, pointsB, viewA_str, viewB_str, label, self.num_neighbors, self.redundancy, self.significance, self.num_required_neighbors,
|
|
68
|
+
self.match_type, self.inlier_factor, self.lambda_value, self.num_iterations, self.model_min_matches, self.regularization_weight,
|
|
69
|
+
self.search_radius, view_registrations, self.input_type, self.image_file_prefix)
|
|
70
|
+
for pointsA, pointsB, viewA_str, viewB_str, label in process_pairs
|
|
71
|
+
]
|
|
72
|
+
|
|
73
|
+
# --- Collect ---
|
|
74
|
+
results = ray.get(futures)
|
|
75
|
+
all_results = [inlier for sublist in results for inlier in sublist]
|
|
76
|
+
|
|
77
|
+
# --- Save ---
|
|
78
|
+
saver = SaveMatches(all_results, self.n5_output_path, data_global, self.match_type)
|
|
79
|
+
saver.run()
|
|
80
|
+
print("Matches Saved as N5")
|
|
81
|
+
|
|
82
|
+
print("Interest point matching is done")
|
|
83
|
+
|
|
84
|
+
def run(self):
|
|
85
|
+
self.match()
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
# DEBUG MATCHING
|
|
89
|
+
# all_results = []
|
|
90
|
+
# for pointsA, pointsB, viewA_str, viewB_str, label in process_pairs:
|
|
91
|
+
# matcher = RansacMatching(data_global, self.num_neighbors, self.redundancy, self.significance, self.num_required_neighbors, self.match_type, self.inlier_factor,
|
|
92
|
+
# self.lambda_value, self.num_iterations, self.model_min_matches, self.regularization_weight, self.search_radius, view_registrations,
|
|
93
|
+
# self.input_type, self.image_file_prefix)
|
|
94
|
+
|
|
95
|
+
# pointsA, pointsB = matcher.filter_for_overlapping_points(pointsA, pointsB, viewA_str, viewB_str)
|
|
96
|
+
|
|
97
|
+
# if len(pointsA) == 0 or len(pointsB) == 0:
|
|
98
|
+
# continue
|
|
99
|
+
|
|
100
|
+
# candidates = matcher.get_candidates(pointsA, pointsB, viewA_str, viewB_str, label)
|
|
101
|
+
# inliers, regularized_model = matcher.compute_ransac(candidates)
|
|
102
|
+
# filtered_inliers = matcher.filter_inliers(inliers, regularized_model)
|
|
103
|
+
|
|
104
|
+
# percent = 100.0 * len(filtered_inliers) / len(candidates) if candidates else 0
|
|
105
|
+
# print(f"✅ RANSAC inlier percentage: {percent:.1f}% ({len(filtered_inliers)} of {len(candidates)} for {viewA_str}), {viewB_str}")
|
|
106
|
+
|
|
107
|
+
# if len(filtered_inliers) < self.model_min_matches:
|
|
108
|
+
# continue
|
|
109
|
+
|
|
110
|
+
# all_results.append(filtered_inliers)
|
|
File without changes
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
from Rhapso.pipelines.ray.interest_point_detection import InterestPointDetection
|
|
2
|
+
from Rhapso.pipelines.ray.interest_point_matching import InterestPointMatching
|
|
3
|
+
from Rhapso.pipelines.ray.solver import Solver
|
|
4
|
+
from Rhapso.pipelines.ray.split_dataset import SplitDataset
|
|
5
|
+
import yaml
|
|
6
|
+
import ray
|
|
7
|
+
|
|
8
|
+
# Initialize Ray
|
|
9
|
+
ray.init()
|
|
10
|
+
|
|
11
|
+
# Point to param file
|
|
12
|
+
with open("Rhapso/pipelines/ray/param/dev/zarr_s3_sean.yml", "r") as file:
|
|
13
|
+
config = yaml.safe_load(file)
|
|
14
|
+
|
|
15
|
+
# -- INITIALIZE EACH COMPONENT --
|
|
16
|
+
|
|
17
|
+
# INTEREST POINT DETECTION
|
|
18
|
+
interest_point_detection = InterestPointDetection(
|
|
19
|
+
dsxy=config['dsxy'],
|
|
20
|
+
dsz=config['dsz'],
|
|
21
|
+
min_intensity=config['min_intensity'],
|
|
22
|
+
max_intensity=config['max_intensity'],
|
|
23
|
+
sigma=config['sigma'],
|
|
24
|
+
threshold=config['threshold'],
|
|
25
|
+
file_type=config['file_type'],
|
|
26
|
+
xml_file_path=config['xml_file_path_detection'],
|
|
27
|
+
image_file_prefix=config['image_file_prefix'],
|
|
28
|
+
xml_output_file_path=config['xml_output_file_path'],
|
|
29
|
+
n5_output_file_prefix=config['n5_output_file_prefix'],
|
|
30
|
+
combine_distance=config['combine_distance'],
|
|
31
|
+
chunks_per_bound=config['chunks_per_bound'],
|
|
32
|
+
run_type=config['detection_run_type'],
|
|
33
|
+
max_spots=config['max_spots'],
|
|
34
|
+
median_filter=config['median_filter'],
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
# INTEREST POINT MATCHING RIGID
|
|
38
|
+
interest_point_matching_rigid = InterestPointMatching(
|
|
39
|
+
xml_input_path=config['xml_file_path_matching_rigid'],
|
|
40
|
+
n5_output_path=config['n5_matching_output_path'],
|
|
41
|
+
input_type = config['input_type'],
|
|
42
|
+
match_type=config['match_type_rigid'],
|
|
43
|
+
num_neighbors=config['num_neighbors_rigid'],
|
|
44
|
+
redundancy=config['redundancy_rigid'],
|
|
45
|
+
significance=config['significance_rigid'],
|
|
46
|
+
search_radius=config['search_radius_rigid'],
|
|
47
|
+
num_required_neighbors=config['num_required_neighbors_rigid'],
|
|
48
|
+
model_min_matches=config['model_min_matches_rigid'],
|
|
49
|
+
inlier_factor=config['inlier_factor_rigid'],
|
|
50
|
+
lambda_value=config['lambda_value_rigid'],
|
|
51
|
+
num_iterations=config['num_iterations_rigid'],
|
|
52
|
+
regularization_weight=config['regularization_weight_rigid'],
|
|
53
|
+
image_file_prefix=config['image_file_prefix'],
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
# INTEREST POINT MATCHING AFFINE
|
|
57
|
+
interest_point_matching_affine = InterestPointMatching(
|
|
58
|
+
xml_input_path=config['xml_file_path_matching_affine'],
|
|
59
|
+
n5_output_path=config['n5_matching_output_path'],
|
|
60
|
+
input_type = config['input_type'],
|
|
61
|
+
match_type=config['match_type_affine'],
|
|
62
|
+
num_neighbors=config['num_neighbors_affine'],
|
|
63
|
+
redundancy=config['redundancy_affine'],
|
|
64
|
+
significance=config['significance_affine'],
|
|
65
|
+
search_radius=config['search_radius_affine'],
|
|
66
|
+
num_required_neighbors=config['num_required_neighbors_affine'],
|
|
67
|
+
model_min_matches=config['model_min_matches_affine'],
|
|
68
|
+
inlier_factor=config['inlier_factor_affine'],
|
|
69
|
+
lambda_value=config['lambda_value_affine'],
|
|
70
|
+
num_iterations=config['num_iterations_affine'],
|
|
71
|
+
regularization_weight=config['regularization_weight_affine'],
|
|
72
|
+
image_file_prefix=config['image_file_prefix'],
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
# INTEREST POINT MATCHING SPLIT AFFINE
|
|
76
|
+
interest_point_matching_split_affine = InterestPointMatching(
|
|
77
|
+
xml_input_path=config['xml_file_path_matching_split_affine'],
|
|
78
|
+
n5_output_path=config['n5_matching_output_path'],
|
|
79
|
+
input_type = config['input_type'],
|
|
80
|
+
match_type=config['match_type_split_affine'],
|
|
81
|
+
num_neighbors=config['num_neighbors_split_affine'],
|
|
82
|
+
redundancy=config['redundancy_split_affine'],
|
|
83
|
+
significance=config['significance_split_affine'],
|
|
84
|
+
search_radius=config['search_radius_split_affine'],
|
|
85
|
+
num_required_neighbors=config['num_required_neighbors_split_affine'],
|
|
86
|
+
model_min_matches=config['model_min_matches_split_affine'],
|
|
87
|
+
inlier_factor=config['inlier_factor_split_affine'],
|
|
88
|
+
lambda_value=config['lambda_value_split_affine'],
|
|
89
|
+
num_iterations=config['num_iterations_split_affine'],
|
|
90
|
+
regularization_weight=config['regularization_weight_split_affine'],
|
|
91
|
+
image_file_prefix=config['image_file_prefix'],
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
# SOLVER RIGID
|
|
95
|
+
solver_rigid = Solver(
|
|
96
|
+
xml_file_path_output=config['xml_file_path_output_rigid'],
|
|
97
|
+
n5_input_path=config['n5_input_path'],
|
|
98
|
+
xml_file_path=config['xml_file_path_solver_rigid'],
|
|
99
|
+
run_type=config['run_type_solver_rigid'],
|
|
100
|
+
relative_threshold=config['relative_threshold'],
|
|
101
|
+
absolute_threshold=config['absolute_threshold'],
|
|
102
|
+
min_matches=config['min_matches'],
|
|
103
|
+
damp=config['damp'],
|
|
104
|
+
max_iterations=config['max_iterations'],
|
|
105
|
+
max_allowed_error=config['max_allowed_error'],
|
|
106
|
+
max_plateauwidth=config['max_plateauwidth'],
|
|
107
|
+
metrics_output_path=config['metrics_output_path'],
|
|
108
|
+
fixed_tile=config['fixed_tile']
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# SOLVER AFFINE
|
|
112
|
+
solver_affine = Solver(
|
|
113
|
+
xml_file_path_output=config['xml_file_path_output_affine'],
|
|
114
|
+
n5_input_path=config['n5_input_path'],
|
|
115
|
+
xml_file_path=config['xml_file_path_solver_affine'],
|
|
116
|
+
run_type=config['run_type_solver_affine'],
|
|
117
|
+
relative_threshold=config['relative_threshold'],
|
|
118
|
+
absolute_threshold=config['absolute_threshold'],
|
|
119
|
+
min_matches=config['min_matches'],
|
|
120
|
+
damp=config['damp'],
|
|
121
|
+
max_iterations=config['max_iterations'],
|
|
122
|
+
max_allowed_error=config['max_allowed_error'],
|
|
123
|
+
max_plateauwidth=config['max_plateauwidth'],
|
|
124
|
+
metrics_output_path=config['metrics_output_path'],
|
|
125
|
+
fixed_tile=config['fixed_tile']
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# SOLVER SPLIT AFFINE
|
|
129
|
+
solver_split_affine = Solver(
|
|
130
|
+
xml_file_path_output=config['xml_file_path_output_split_affine'],
|
|
131
|
+
n5_input_path=config['n5_input_path'],
|
|
132
|
+
xml_file_path=config['xml_file_path_solver_split_affine'],
|
|
133
|
+
run_type=config['run_type_solver_split_affine'],
|
|
134
|
+
relative_threshold=config['relative_threshold'],
|
|
135
|
+
absolute_threshold=config['absolute_threshold'],
|
|
136
|
+
min_matches=config['min_matches'],
|
|
137
|
+
damp=config['damp'],
|
|
138
|
+
max_iterations=config['max_iterations'],
|
|
139
|
+
max_allowed_error=config['max_allowed_error'],
|
|
140
|
+
max_plateauwidth=config['max_plateauwidth'],
|
|
141
|
+
metrics_output_path=config['metrics_output_path'],
|
|
142
|
+
fixed_tile=config['fixed_tile']
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
# SPLIT DATASETS
|
|
146
|
+
split_dataset = SplitDataset(
|
|
147
|
+
xml_file_path=config['xml_file_path_split'],
|
|
148
|
+
xml_output_file_path=config['xml_output_file_path_split'],
|
|
149
|
+
n5_path=config['n5_path_split'],
|
|
150
|
+
point_density=config['point_density'],
|
|
151
|
+
min_points=config['min_points'],
|
|
152
|
+
max_points=config['max_points'],
|
|
153
|
+
error=config['error'],
|
|
154
|
+
exclude_radius=config['exclude_radius'],
|
|
155
|
+
target_image_size=config['target_image_size'],
|
|
156
|
+
target_overlap=config['target_overlap'],
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# -- ALIGNMENT PIPELINE --
|
|
160
|
+
# interest_point_detection.run()
|
|
161
|
+
# interest_point_matching_rigid.run()
|
|
162
|
+
solver_rigid.run()
|
|
163
|
+
# interest_point_matching_affine.run()
|
|
164
|
+
# solver_affine.run()
|
|
165
|
+
# split_dataset.run()
|
|
166
|
+
# interest_point_matching_split_affine.run()
|
|
167
|
+
# solver_split_affine.run()
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from Rhapso.evaluation.alignment_threshold import AlignmentThreshold
|
|
3
|
+
from Rhapso.evaluation.match_retrieval import MatchProcessor
|
|
4
|
+
from Rhapso.evaluation.matching_KDE import MatchingKDE
|
|
5
|
+
from Rhapso.evaluation.matching_descriptors import DescriptiveStatsMatching
|
|
6
|
+
from Rhapso.evaluation.matching_voxel_vis import VoxelVis
|
|
7
|
+
from Rhapso.evaluation.matching_voxelization import Voxelizer
|
|
8
|
+
from Rhapso.evaluation.save_metrics import JSONFileHandler
|
|
9
|
+
from Rhapso.evaluation.threshold import Threshold
|
|
10
|
+
from Rhapso.evaluation.total_ips import DetectionOutput
|
|
11
|
+
|
|
12
|
+
class StatsPipeline:
|
|
13
|
+
def __init__(self, args, xml_file, base_path, metrics_output_path, file_source, xml_bucket_name, KDE_type, bandwidth, view_id, pair, plot,
|
|
14
|
+
thresholding, min_alignment, max_alignment, minimum_points, maximum_points, minimum_total_matches, maximum_total_matches, max_kde, min_kde, max_cv, min_cv ):
|
|
15
|
+
self.args = args
|
|
16
|
+
self.xml_file = xml_file
|
|
17
|
+
self.base_path = base_path
|
|
18
|
+
self.metrics_output_path = metrics_output_path
|
|
19
|
+
self.file_source = file_source
|
|
20
|
+
self.xml_bucket_name = xml_bucket_name
|
|
21
|
+
self.thresholding = thresholding
|
|
22
|
+
|
|
23
|
+
# KDE parameters
|
|
24
|
+
self.KDE_type = KDE_type
|
|
25
|
+
self.bandwidth = bandwidth
|
|
26
|
+
self.view_id = view_id
|
|
27
|
+
self.pair = pair
|
|
28
|
+
self.plot = plot
|
|
29
|
+
|
|
30
|
+
self.min_alignment = min_alignment
|
|
31
|
+
self.max_alignment = max_alignment
|
|
32
|
+
self.minimum_points = minimum_points
|
|
33
|
+
self.maximum_points = maximum_points
|
|
34
|
+
self.minimum_total_matches = minimum_total_matches
|
|
35
|
+
self.maximum_total_matches = maximum_total_matches
|
|
36
|
+
self.max_kde = max_kde
|
|
37
|
+
self.min_kde = min_kde
|
|
38
|
+
self.max_cv = max_cv
|
|
39
|
+
self.min_cv = min_cv
|
|
40
|
+
|
|
41
|
+
def run(self):
|
|
42
|
+
# Detection Output
|
|
43
|
+
detection_output = DetectionOutput(self.base_path, self.xml_file, self.metrics_output_path)
|
|
44
|
+
detection_output.run()
|
|
45
|
+
print("Detection output complete")
|
|
46
|
+
|
|
47
|
+
# Match Processing
|
|
48
|
+
processor = MatchProcessor(self.base_path, self.xml_file)
|
|
49
|
+
matches, total_matches = processor.run()
|
|
50
|
+
|
|
51
|
+
# Matching Descriptive Statistics
|
|
52
|
+
descriptive_stats = DescriptiveStatsMatching(matches, total_matches)
|
|
53
|
+
saveJSON = JSONFileHandler(self.metrics_output_path)
|
|
54
|
+
points = descriptive_stats.get_matches()
|
|
55
|
+
results = descriptive_stats.results()
|
|
56
|
+
saveJSON.update("Descriptive stats", results)
|
|
57
|
+
print("Descriptive statistics complete")
|
|
58
|
+
|
|
59
|
+
# Voxelization
|
|
60
|
+
if self.args["voxel"]:
|
|
61
|
+
voxelization = Voxelizer(points, 10)
|
|
62
|
+
voxel_info = voxelization.compute_statistics()
|
|
63
|
+
saveJSON.update("Voxelization stats", voxel_info)
|
|
64
|
+
print("Voxel statistics complete")
|
|
65
|
+
|
|
66
|
+
# Voxel Visualization
|
|
67
|
+
if self.args["voxel_vis"]:
|
|
68
|
+
voxel_vis = VoxelVis(("30", "0"), matches)
|
|
69
|
+
voxel_vis.run_voxel_vis()
|
|
70
|
+
print("Voxel visualization complete")
|
|
71
|
+
|
|
72
|
+
# KDE Analysis
|
|
73
|
+
if self.args["KDE"]:
|
|
74
|
+
kde = MatchingKDE(matches, self.KDE_type, self.bandwidth, self.view_id, self.pair, self.plot)
|
|
75
|
+
kde_result = kde.get_data()
|
|
76
|
+
saveJSON.update("KDE", kde_result)
|
|
77
|
+
print("KDE computation complete")
|
|
78
|
+
|
|
79
|
+
# Thresholding (Optional)
|
|
80
|
+
if self.thresholding:
|
|
81
|
+
threshold = Threshold(
|
|
82
|
+
self.minimum_points,
|
|
83
|
+
self.maximum_points,
|
|
84
|
+
self.minimum_total_matches,
|
|
85
|
+
self.maximum_total_matches,
|
|
86
|
+
self.max_kde,
|
|
87
|
+
self.min_kde,
|
|
88
|
+
self.max_cv,
|
|
89
|
+
self.min_cv,
|
|
90
|
+
self.metrics_output_path,
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
threshold.get_metric_json()
|
|
94
|
+
threshold.run_threshold_checks()
|
|
95
|
+
# Will error out if solve has not already been ran
|
|
96
|
+
# alignmentThreshold = AlignmentThreshold(
|
|
97
|
+
# min_alignment, max_alignment, metrics_output_path
|
|
98
|
+
# )
|
|
99
|
+
# alignmentThreshold.check_alignment()
|
|
100
|
+
print("Thresholding complete")
|
|
101
|
+
|
|
102
|
+
print("All requested metrics are complete")
|
|
103
|
+
|
|
104
|
+
|
|
File without changes
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
from Rhapso.solver.xml_to_dataframe_solver import XMLToDataFrameSolver
|
|
2
|
+
from Rhapso.solver.global_optimization import GlobalOptimization
|
|
3
|
+
from Rhapso.solver.view_transforms import ViewTransformModels
|
|
4
|
+
from Rhapso.solver.data_prep import DataPrep
|
|
5
|
+
from Rhapso.solver.model_and_tile_setup import ModelAndTileSetup
|
|
6
|
+
from Rhapso.solver.compute_tiles import ComputeTiles
|
|
7
|
+
from Rhapso.solver.pre_align_tiles import PreAlignTiles
|
|
8
|
+
from Rhapso.solver.connected_graphs import ConnectedGraphs
|
|
9
|
+
from Rhapso.solver.concatenate_models import ConcatenateModels
|
|
10
|
+
from Rhapso.solver.save_results import SaveResults
|
|
11
|
+
import boto3
|
|
12
|
+
|
|
13
|
+
"""
|
|
14
|
+
This class implements the Solver pipeline for rigid, affine, and split-affine optimizations
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
class Solver:
|
|
18
|
+
def __init__(self, xml_file_path_output, n5_input_path, xml_file_path, run_type, relative_threshold, absolute_threshold,
|
|
19
|
+
min_matches, damp, max_iterations, max_allowed_error, max_plateauwidth, metrics_output_path, fixed_tile):
|
|
20
|
+
self.xml_file_path_output = xml_file_path_output
|
|
21
|
+
self.n5_input_path = n5_input_path
|
|
22
|
+
self.xml_file_path = xml_file_path
|
|
23
|
+
self.run_type = run_type
|
|
24
|
+
self.relative_threshold = relative_threshold
|
|
25
|
+
self.absolute_threshold = absolute_threshold
|
|
26
|
+
self.min_matches = min_matches
|
|
27
|
+
self.damp = damp
|
|
28
|
+
self.max_iterations = max_iterations
|
|
29
|
+
self.max_allowed_error = max_allowed_error
|
|
30
|
+
self.max_plateauwidth = max_plateauwidth
|
|
31
|
+
self.metrics_output_path = metrics_output_path
|
|
32
|
+
self.fixed_tile = fixed_tile
|
|
33
|
+
self.groups = None
|
|
34
|
+
self.s3 = boto3.client('s3')
|
|
35
|
+
|
|
36
|
+
def solve(self):
|
|
37
|
+
# Get XML file
|
|
38
|
+
if self.xml_file_path.startswith("s3://"):
|
|
39
|
+
no_scheme = self.xml_file_path.replace("s3://", "", 1)
|
|
40
|
+
bucket, key = no_scheme.split("/", 1)
|
|
41
|
+
s3 = boto3.client("s3")
|
|
42
|
+
response = s3.get_object(Bucket=bucket, Key=key)
|
|
43
|
+
xml_file = response["Body"].read().decode("utf-8")
|
|
44
|
+
else:
|
|
45
|
+
with open(self.xml_file_path, "r", encoding="utf-8") as f:
|
|
46
|
+
xml_file = f.read()
|
|
47
|
+
|
|
48
|
+
# Load XML data into dataframes
|
|
49
|
+
processor = XMLToDataFrameSolver(xml_file)
|
|
50
|
+
dataframes = processor.run()
|
|
51
|
+
print("XML loaded")
|
|
52
|
+
|
|
53
|
+
# Get affine matrices from view registration dataframe
|
|
54
|
+
create_models = ViewTransformModels(dataframes)
|
|
55
|
+
view_transform_matrices = create_models.run()
|
|
56
|
+
print("Transforms models have been created")
|
|
57
|
+
|
|
58
|
+
# Get data from n5 folders
|
|
59
|
+
data_prep = DataPrep(dataframes['view_interest_points'], view_transform_matrices, self.xml_file_path,
|
|
60
|
+
self.n5_input_path)
|
|
61
|
+
connected_views, corresponding_interest_points, interest_points, label_map_global, view_id_set = data_prep.run()
|
|
62
|
+
print("Data prep is complete")
|
|
63
|
+
|
|
64
|
+
# Create models, tiles, and point matches
|
|
65
|
+
model_and_tile_setup = ModelAndTileSetup(connected_views, corresponding_interest_points, interest_points,
|
|
66
|
+
view_transform_matrices, view_id_set, label_map_global)
|
|
67
|
+
pmc = model_and_tile_setup.run()
|
|
68
|
+
print("Models and tiles created")
|
|
69
|
+
|
|
70
|
+
# Find point matches and save to each tile
|
|
71
|
+
compute_tiles = ComputeTiles(pmc, view_id_set, self.groups, dataframes, self.run_type)
|
|
72
|
+
tiles, view_map = compute_tiles.run()
|
|
73
|
+
print("Tiles are computed")
|
|
74
|
+
|
|
75
|
+
# Use matches to update transformation matrices to represent rough alignment
|
|
76
|
+
pre_align_tiles = PreAlignTiles(self.min_matches, self.run_type, self.fixed_tile)
|
|
77
|
+
tc = pre_align_tiles.run(tiles)
|
|
78
|
+
print("Tiles are pre-aligned")
|
|
79
|
+
|
|
80
|
+
# Update all points with transform models and iterate through all tiles (views) and optimize alignment
|
|
81
|
+
global_optimization = GlobalOptimization(tc, self.relative_threshold, self.absolute_threshold, self.min_matches, self.damp,
|
|
82
|
+
self.max_iterations, self.max_allowed_error, self.max_plateauwidth, self.run_type, self.metrics_output_path)
|
|
83
|
+
tiles, validation_stats = global_optimization.run()
|
|
84
|
+
print("Global optimization complete")
|
|
85
|
+
|
|
86
|
+
if(self.run_type == "split-affine"):
|
|
87
|
+
|
|
88
|
+
# Combine splits into groups
|
|
89
|
+
connected_graphs = ConnectedGraphs(tiles, dataframes)
|
|
90
|
+
wlpmc, groups = connected_graphs.run()
|
|
91
|
+
print("Tiles have been grouped")
|
|
92
|
+
|
|
93
|
+
# Find point matches and save to each tile
|
|
94
|
+
compute_tiles = ComputeTiles(wlpmc, view_id_set, groups, dataframes, self.run_type)
|
|
95
|
+
tiles_round_2, view_map = compute_tiles.run()
|
|
96
|
+
print("Tiles are computed")
|
|
97
|
+
|
|
98
|
+
# Use matches to update transformation matrices to represent rough alignment
|
|
99
|
+
pre_align_tiles = PreAlignTiles(self.min_matches, self.run_type, self.fixed_tile)
|
|
100
|
+
tc = pre_align_tiles.run(tiles_round_2)
|
|
101
|
+
print("Tiles are pre-aligned")
|
|
102
|
+
|
|
103
|
+
# Update all points with transform models and iterate through all tiles (views) and optimize alignment
|
|
104
|
+
global_optimization = GlobalOptimization(tc, self.relative_threshold, self.absolute_threshold, self.min_matches, self.damp,
|
|
105
|
+
self.max_iterations, self.max_allowed_error, self.max_plateauwidth, self.run_type, self.metrics_output_path)
|
|
106
|
+
tiles_round_2, validation_stats_round_2 = global_optimization.run()
|
|
107
|
+
print("Global optimization complete")
|
|
108
|
+
|
|
109
|
+
# Combine models/metrics for round 1 and 2
|
|
110
|
+
concatenate_models = ConcatenateModels(tiles, tiles_round_2, groups, validation_stats, validation_stats_round_2, view_map)
|
|
111
|
+
tiles, validation_stats = concatenate_models.run()
|
|
112
|
+
print("Models and metrics have been combined")
|
|
113
|
+
|
|
114
|
+
# Save results to xml - one new affine matrix per view registration
|
|
115
|
+
save_results = SaveResults(tiles, xml_file, self.xml_file_path_output, self.run_type, validation_stats, self.n5_input_path)
|
|
116
|
+
save_results.run()
|
|
117
|
+
print("Results have been saved")
|
|
118
|
+
|
|
119
|
+
def run(self):
|
|
120
|
+
self.solve()
|