masster 0.3.16__py3-none-any.whl → 0.3.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of masster might be problematic. Click here for more details.
- masster/_version.py +1 -1
- masster/study/defaults/align_def.py +9 -0
- masster/study/load.py +88 -43
- masster/study/processing.py +149 -120
- {masster-0.3.16.dist-info → masster-0.3.17.dist-info}/METADATA +1 -1
- {masster-0.3.16.dist-info → masster-0.3.17.dist-info}/RECORD +9 -9
- {masster-0.3.16.dist-info → masster-0.3.17.dist-info}/WHEEL +0 -0
- {masster-0.3.16.dist-info → masster-0.3.17.dist-info}/entry_points.txt +0 -0
- {masster-0.3.16.dist-info → masster-0.3.17.dist-info}/licenses/LICENSE +0 -0
masster/_version.py
CHANGED
|
@@ -24,6 +24,7 @@ class align_defaults:
|
|
|
24
24
|
skip_blanks (bool): Whether to skip blank samples. Default is False.
|
|
25
25
|
|
|
26
26
|
KD algorithm specific parameters:
|
|
27
|
+
min_samples (int): Minimum number of samples required for KD alignment. Default is 3.
|
|
27
28
|
nr_partitions (int): Number of partitions in m/z dimension. Default is 100.
|
|
28
29
|
warp_enabled (bool): Enable non-linear retention time transformation. Default is True.
|
|
29
30
|
warp_rt_tol (float): RT tolerance for the LOWESS fit. Default is 5.0.
|
|
@@ -59,6 +60,7 @@ class align_defaults:
|
|
|
59
60
|
algo: str = "pc"
|
|
60
61
|
|
|
61
62
|
# KD algorithm specific parameters
|
|
63
|
+
min_samples: int = 3
|
|
62
64
|
nr_partitions: int = 100
|
|
63
65
|
warp_enabled: bool = True
|
|
64
66
|
warp_rt_tol: float = 5.0
|
|
@@ -137,6 +139,13 @@ class align_defaults:
|
|
|
137
139
|
"allowed_values": ["pc", "kd"],
|
|
138
140
|
},
|
|
139
141
|
# KD algorithm specific parameters
|
|
142
|
+
"min_samples": {
|
|
143
|
+
"dtype": int,
|
|
144
|
+
"description": "Minimum number of samples required for KD alignment algorithm",
|
|
145
|
+
"default": 3,
|
|
146
|
+
"min_value": 2,
|
|
147
|
+
"max_value": 1000,
|
|
148
|
+
},
|
|
140
149
|
"nr_partitions": {
|
|
141
150
|
"dtype": int,
|
|
142
151
|
"description": "Number of partitions in m/z dimension for KD algorithm",
|
masster/study/load.py
CHANGED
|
@@ -961,51 +961,96 @@ def _get_missing_consensus_sample_combinations(self, uids):
|
|
|
961
961
|
"""
|
|
962
962
|
Efficiently identify which consensus_uid/sample combinations are missing.
|
|
963
963
|
Returns a list of tuples: (consensus_uid, sample_uid, sample_name, sample_path)
|
|
964
|
+
|
|
965
|
+
Optimized for common scenarios:
|
|
966
|
+
- Early termination for fully-filled studies
|
|
967
|
+
- Efficient dictionary lookups instead of expensive DataFrame joins
|
|
968
|
+
- Smart handling of sparse vs dense missing data patterns
|
|
964
969
|
"""
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
# Get existing consensus/sample combinations from consensus_mapping_df
|
|
981
|
-
existing_combinations = set()
|
|
982
|
-
consensus_mapping_filtered = self.consensus_mapping_df.filter(
|
|
983
|
-
pl.col("consensus_uid").is_in(list(consensus_uids_set)),
|
|
984
|
-
)
|
|
985
|
-
|
|
986
|
-
# Join with features_df to get sample_uid information
|
|
987
|
-
existing_features = consensus_mapping_filtered.join(
|
|
988
|
-
self.features_df.select(["feature_uid", "sample_uid"]),
|
|
989
|
-
on="feature_uid",
|
|
990
|
-
how="inner",
|
|
970
|
+
if not uids:
|
|
971
|
+
return []
|
|
972
|
+
|
|
973
|
+
n_consensus = len(uids)
|
|
974
|
+
n_samples = len(self.samples_df)
|
|
975
|
+
total_possible = n_consensus * n_samples
|
|
976
|
+
|
|
977
|
+
# Quick early termination check for fully/nearly filled studies
|
|
978
|
+
# This handles the common case where fill() is run on an already-filled study
|
|
979
|
+
consensus_counts = (
|
|
980
|
+
self.consensus_mapping_df
|
|
981
|
+
.filter(pl.col("consensus_uid").is_in(uids))
|
|
982
|
+
.group_by("consensus_uid")
|
|
983
|
+
.agg(pl.count("feature_uid").alias("count"))
|
|
991
984
|
)
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
985
|
+
|
|
986
|
+
total_existing = consensus_counts["count"].sum() if not consensus_counts.is_empty() else 0
|
|
987
|
+
|
|
988
|
+
# If >95% filled, likely no gaps (common case)
|
|
989
|
+
if total_existing >= total_possible * 0.95:
|
|
990
|
+
self.logger.debug(f"Study appears {total_existing/total_possible*100:.1f}% filled, using sparse optimization")
|
|
991
|
+
|
|
992
|
+
# For sparse missing data, check each consensus feature individually
|
|
993
|
+
missing_combinations = []
|
|
994
|
+
uids_set = set(uids)
|
|
995
|
+
|
|
996
|
+
# Build efficient lookups
|
|
997
|
+
feature_to_sample = dict(
|
|
998
|
+
self.features_df.select(["feature_uid", "sample_uid"]).iter_rows()
|
|
999
|
+
)
|
|
1000
|
+
|
|
1001
|
+
# Get existing combinations for target UIDs only
|
|
1002
|
+
existing_by_consensus = {}
|
|
1003
|
+
for consensus_uid, feature_uid in self.consensus_mapping_df.select(["consensus_uid", "feature_uid"]).iter_rows():
|
|
1004
|
+
if consensus_uid in uids_set and feature_uid in feature_to_sample:
|
|
1005
|
+
if consensus_uid not in existing_by_consensus:
|
|
1006
|
+
existing_by_consensus[consensus_uid] = set()
|
|
1007
|
+
existing_by_consensus[consensus_uid].add(feature_to_sample[feature_uid])
|
|
1008
|
+
|
|
1009
|
+
# Get sample info once
|
|
1010
|
+
all_samples = list(
|
|
1011
|
+
self.samples_df.select(["sample_uid", "sample_name", "sample_path"]).iter_rows()
|
|
1012
|
+
)
|
|
1013
|
+
|
|
1014
|
+
# Check for missing combinations
|
|
1015
|
+
for consensus_uid in uids:
|
|
1016
|
+
existing_samples = existing_by_consensus.get(consensus_uid, set())
|
|
1017
|
+
for sample_uid, sample_name, sample_path in all_samples:
|
|
1018
|
+
if sample_uid not in existing_samples:
|
|
1019
|
+
missing_combinations.append((consensus_uid, sample_uid, sample_name, sample_path))
|
|
1020
|
+
|
|
1021
|
+
return missing_combinations
|
|
1022
|
+
|
|
1023
|
+
else:
|
|
1024
|
+
# For studies with many gaps, use bulk operations
|
|
1025
|
+
self.logger.debug(f"Study {total_existing/total_possible*100:.1f}% filled, using bulk optimization")
|
|
1026
|
+
|
|
1027
|
+
# Build efficient lookups
|
|
1028
|
+
uids_set = set(uids)
|
|
1029
|
+
feature_to_sample = dict(
|
|
1030
|
+
self.features_df.select(["feature_uid", "sample_uid"]).iter_rows()
|
|
1031
|
+
)
|
|
1032
|
+
|
|
1033
|
+
# Build existing combinations set
|
|
1034
|
+
existing_combinations = {
|
|
1035
|
+
(consensus_uid, feature_to_sample[feature_uid])
|
|
1036
|
+
for consensus_uid, feature_uid in self.consensus_mapping_df.select(["consensus_uid", "feature_uid"]).iter_rows()
|
|
1037
|
+
if consensus_uid in uids_set and feature_uid in feature_to_sample
|
|
1038
|
+
}
|
|
1039
|
+
|
|
1040
|
+
# Get all sample info
|
|
1041
|
+
all_samples = list(
|
|
1042
|
+
self.samples_df.select(["sample_uid", "sample_name", "sample_path"]).iter_rows()
|
|
1043
|
+
)
|
|
1044
|
+
|
|
1045
|
+
# Generate all missing combinations
|
|
1046
|
+
missing_combinations = [
|
|
1047
|
+
(consensus_uid, sample_uid, sample_name, sample_path)
|
|
1048
|
+
for consensus_uid in uids
|
|
1049
|
+
for sample_uid, sample_name, sample_path in all_samples
|
|
1050
|
+
if (consensus_uid, sample_uid) not in existing_combinations
|
|
1051
|
+
]
|
|
1052
|
+
|
|
1053
|
+
return missing_combinations
|
|
1009
1054
|
|
|
1010
1055
|
|
|
1011
1056
|
def sanitize(self):
|
masster/study/processing.py
CHANGED
|
@@ -33,6 +33,7 @@ def align(self, **kwargs):
|
|
|
33
33
|
- algo (str): Alignment algorithm ('pc' for PoseClustering, 'kd' for KD).
|
|
34
34
|
|
|
35
35
|
KD algorithm specific parameters:
|
|
36
|
+
- min_samples (int): Minimum number of samples required for KD alignment.
|
|
36
37
|
- nr_partitions (int): Number of partitions in m/z dimension.
|
|
37
38
|
- warp_enabled (bool): Enable non-linear retention time transformation.
|
|
38
39
|
- warp_rt_tol (float): RT tolerance for the LOWESS fit.
|
|
@@ -87,131 +88,17 @@ def align(self, **kwargs):
|
|
|
87
88
|
|
|
88
89
|
fmaps = self.features_maps
|
|
89
90
|
|
|
90
|
-
#
|
|
91
|
-
params_oms = oms.Param()
|
|
92
|
-
# Choose alignment algorithm based on parameter
|
|
91
|
+
# Choose alignment algorithm
|
|
93
92
|
algo = params.get("algo").lower()
|
|
94
|
-
|
|
95
|
-
# Set common parameters for both algorithms
|
|
96
|
-
if algo == "pc":
|
|
97
|
-
# Parameters specific to PoseClustering
|
|
98
|
-
params_oms.setValue("pairfinder:distance_intensity:log_transform", "disabled")
|
|
99
|
-
params_oms.setValue("pairfinder:ignore_charge", "true")
|
|
100
|
-
params_oms.setValue("max_num_peaks_considered", 1000)
|
|
101
|
-
params_oms.setValue("pairfinder:distance_RT:max_difference", params.get("rt_max_diff"))
|
|
102
|
-
params_oms.setValue("pairfinder:distance_MZ:max_difference", params.get("mz_max_diff"))
|
|
103
|
-
params_oms.setValue("superimposer:rt_pair_distance_fraction", params.get("rt_pair_distance_frac"))
|
|
104
|
-
params_oms.setValue("superimposer:mz_pair_max_distance", params.get("mz_pair_max_distance"))
|
|
105
|
-
params_oms.setValue("superimposer:num_used_points", params.get("num_used_points"))
|
|
106
|
-
params_oms.setValue("pairfinder:distance_MZ:exponent", 3.0)
|
|
107
|
-
params_oms.setValue("pairfinder:distance_RT:exponent", 2.0)
|
|
108
|
-
|
|
109
|
-
"""
|
|
110
|
-
{b'max_num_peaks_considered': 1000,
|
|
111
|
-
b'superimposer:mz_pair_max_distance': 0.5,
|
|
112
|
-
b'superimposer:rt_pair_distance_fraction': 0.1,
|
|
113
|
-
b'superimposer:num_used_points': 2000,
|
|
114
|
-
b'superimposer:scaling_bucket_size': 0.005,
|
|
115
|
-
b'superimposer:shift_bucket_size': 3.0,
|
|
116
|
-
b'superimposer:max_shift': 1000.0,
|
|
117
|
-
b'superimposer:max_scaling': 2.0,
|
|
118
|
-
b'superimposer:dump_buckets': '',
|
|
119
|
-
b'superimposer:dump_pairs': '',
|
|
120
|
-
b'pairfinder:second_nearest_gap': 2.0,
|
|
121
|
-
b'pairfinder:use_identifications': 'false',
|
|
122
|
-
b'pairfinder:ignore_charge': 'false',
|
|
123
|
-
b'pairfinder:ignore_adduct': 'true',
|
|
124
|
-
b'pairfinder:distance_RT:max_difference': 100.0,
|
|
125
|
-
b'pairfinder:distance_RT:exponent': 1.0,
|
|
126
|
-
b'pairfinder:distance_RT:weight': 1.0,
|
|
127
|
-
b'pairfinder:distance_MZ:max_difference': 0.3,
|
|
128
|
-
b'pairfinder:distance_MZ:unit': 'Da',
|
|
129
|
-
b'pairfinder:distance_MZ:exponent': 2.0,
|
|
130
|
-
b'pairfinder:distance_MZ:weight': 1.0,
|
|
131
|
-
b'pairfinder:distance_intensity:exponent': 1.0,
|
|
132
|
-
b'pairfinder:distance_intensity:weight': 0.0,
|
|
133
|
-
b'pairfinder:distance_intensity:log_transform': 'disabled'}
|
|
134
|
-
"""
|
|
135
|
-
elif algo == "kd":
|
|
136
|
-
# Parameters specific to KD algorithm
|
|
137
|
-
params_oms.setValue("mz_unit", "Da")
|
|
138
|
-
params_oms.setValue("nr_partitions", params.get("nr_partitions"))
|
|
139
|
-
|
|
140
|
-
# Warp parameters for non-linear RT transformation
|
|
141
|
-
params_oms.setValue("warp:enabled", "true" if params.get("warp_enabled") else "false")
|
|
142
|
-
params_oms.setValue("warp:rt_tol", params.get("warp_rt_tol"))
|
|
143
|
-
params_oms.setValue("warp:mz_tol", params.get("warp_mz_tol"))
|
|
144
|
-
params_oms.setValue("warp:max_pairwise_log_fc", params.get("warp_max_pairwise_log_fc"))
|
|
145
|
-
params_oms.setValue("warp:min_rel_cc_size", params.get("warp_min_rel_cc_size"))
|
|
146
|
-
params_oms.setValue("warp:max_nr_conflicts", params.get("warp_max_nr_conflicts"))
|
|
147
|
-
|
|
148
|
-
# Link parameters
|
|
149
|
-
params_oms.setValue("link:rt_tol", params.get("link_rt_tol"))
|
|
150
|
-
params_oms.setValue("link:mz_tol", params.get("link_mz_tol"))
|
|
151
|
-
params_oms.setValue("link:charge_merging", params.get("link_charge_merging"))
|
|
152
|
-
params_oms.setValue("link:adduct_merging", params.get("link_adduct_merging"))
|
|
153
|
-
|
|
154
|
-
# Distance parameters
|
|
155
|
-
params_oms.setValue("distance_RT:exponent", params.get("distance_RT_exponent"))
|
|
156
|
-
params_oms.setValue("distance_RT:weight", params.get("distance_RT_weight"))
|
|
157
|
-
params_oms.setValue("distance_MZ:exponent", params.get("distance_MZ_exponent"))
|
|
158
|
-
params_oms.setValue("distance_MZ:weight", params.get("distance_MZ_weight"))
|
|
159
|
-
params_oms.setValue("distance_intensity:exponent", params.get("distance_intensity_exponent"))
|
|
160
|
-
params_oms.setValue("distance_intensity:weight", params.get("distance_intensity_weight"))
|
|
161
|
-
params_oms.setValue("distance_intensity:log_transform", params.get("distance_intensity_log_transform"))
|
|
162
|
-
|
|
163
|
-
# LOWESS parameters
|
|
164
|
-
params_oms.setValue("LOWESS:span", params.get("LOWESS_span"))
|
|
165
|
-
params_oms.setValue("LOWESS:num_iterations", params.get("LOWESS_num_iterations"))
|
|
166
|
-
params_oms.setValue("LOWESS:delta", params.get("LOWESS_delta"))
|
|
167
|
-
params_oms.setValue("LOWESS:interpolation_type", params.get("LOWESS_interpolation_type"))
|
|
168
|
-
params_oms.setValue("LOWESS:extrapolation_type", params.get("LOWESS_extrapolation_type"))
|
|
169
|
-
|
|
93
|
+
|
|
170
94
|
if algo == "pc":
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
# set ref_index to feature map index with largest number of features
|
|
174
|
-
ref_index = [i[0] for i in sorted(enumerate([fm.size() for fm in fmaps]), key=lambda x: x[1])][-1]
|
|
175
|
-
self.logger.debug(
|
|
176
|
-
f"Reference map is {self.samples_df.row(ref_index, named=True)['sample_name']}",
|
|
177
|
-
)
|
|
178
|
-
aligner.setParameters(params_oms)
|
|
179
|
-
aligner.setReference(fmaps[ref_index])
|
|
180
|
-
self.logger.debug(f"Parameters for alignment: {params}")
|
|
181
|
-
# perform alignment and transformation of feature maps to the reference map (exclude reference map)
|
|
182
|
-
tdqm_disable = self.log_level not in ["TRACE", "DEBUG", "INFO"]
|
|
183
|
-
for index, fm in tqdm(
|
|
184
|
-
list(enumerate(fmaps)),
|
|
185
|
-
total=len(fmaps),
|
|
186
|
-
desc=f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]} | INFO | {self.log_label}Align feature maps",
|
|
187
|
-
disable=tdqm_disable,
|
|
188
|
-
):
|
|
189
|
-
if index == ref_index:
|
|
190
|
-
continue
|
|
191
|
-
if params.get("skip_blanks") and self.samples_df.row(index, named=True)["sample_type"] == "blank":
|
|
192
|
-
continue
|
|
193
|
-
trafo = oms.TransformationDescription()
|
|
194
|
-
aligner.align(fm, trafo)
|
|
195
|
-
transformer = oms.MapAlignmentTransformer()
|
|
196
|
-
transformer.transformRetentionTimes(fm, trafo, True)
|
|
197
|
-
|
|
198
|
-
self.alignment_ref_index = ref_index
|
|
199
|
-
|
|
95
|
+
_align_pose_clustering(self, fmaps, params)
|
|
96
|
+
|
|
200
97
|
elif algo == "kd":
|
|
201
|
-
|
|
202
|
-
num_maps = len(fmaps)
|
|
203
|
-
aligner = oms.MapAlignmentAlgorithmKD(3, params_oms)
|
|
204
|
-
self.logger.info(f"Starting alignment with KD algorithm using {num_maps} maps")
|
|
205
|
-
|
|
206
|
-
kdtree = oms.KDTreeFeatureMaps()
|
|
207
|
-
kdtree.addMaps(fmaps) # Add all feature maps to the KDTree
|
|
208
|
-
# kdtree.optimizeTree()
|
|
209
|
-
aligner.addRTFitData(kdtree)
|
|
210
|
-
aligner.fitLOWESS()
|
|
211
|
-
aligner.transform(kdtree)
|
|
212
|
-
|
|
98
|
+
_align_kd_algorithm(self, fmaps, params)
|
|
213
99
|
else:
|
|
214
100
|
self.logger.error(f"Unknown alignment algorithm '{algo}'")
|
|
101
|
+
self.logger.error(f"Unknown alignment algorithm '{algo}'")
|
|
215
102
|
|
|
216
103
|
# check if rt_original exists in features_df, if not, add it after rt
|
|
217
104
|
if "rt_original" not in self.features_df.columns:
|
|
@@ -1163,3 +1050,145 @@ def _find_closest_valley(chrom, rt, dir="left", threshold=0.9):
|
|
|
1163
1050
|
else:
|
|
1164
1051
|
break
|
|
1165
1052
|
return chrom.rt[idx]
|
|
1053
|
+
|
|
1054
|
+
|
|
1055
|
+
def _align_pose_clustering(study_obj, fmaps, params):
|
|
1056
|
+
"""Perform alignment using PoseClustering algorithm."""
|
|
1057
|
+
import pyopenms as oms
|
|
1058
|
+
from tqdm import tqdm
|
|
1059
|
+
from datetime import datetime
|
|
1060
|
+
|
|
1061
|
+
# Create PC-specific OpenMS parameters
|
|
1062
|
+
params_oms = oms.Param()
|
|
1063
|
+
params_oms.setValue("pairfinder:distance_intensity:log_transform", "disabled")
|
|
1064
|
+
params_oms.setValue("pairfinder:ignore_charge", "true")
|
|
1065
|
+
params_oms.setValue("max_num_peaks_considered", 1000)
|
|
1066
|
+
params_oms.setValue("pairfinder:distance_RT:max_difference", params.get("rt_max_diff"))
|
|
1067
|
+
params_oms.setValue("pairfinder:distance_MZ:max_difference", params.get("mz_max_diff"))
|
|
1068
|
+
params_oms.setValue("superimposer:rt_pair_distance_fraction", params.get("rt_pair_distance_frac"))
|
|
1069
|
+
params_oms.setValue("superimposer:mz_pair_max_distance", params.get("mz_pair_max_distance"))
|
|
1070
|
+
params_oms.setValue("superimposer:num_used_points", params.get("num_used_points"))
|
|
1071
|
+
params_oms.setValue("pairfinder:distance_MZ:exponent", 3.0)
|
|
1072
|
+
params_oms.setValue("pairfinder:distance_RT:exponent", 2.0)
|
|
1073
|
+
|
|
1074
|
+
aligner = oms.MapAlignmentAlgorithmPoseClustering()
|
|
1075
|
+
study_obj.logger.info("Starting alignment with PoseClustering")
|
|
1076
|
+
|
|
1077
|
+
# Set ref_index to feature map index with largest number of features
|
|
1078
|
+
ref_index = [i[0] for i in sorted(enumerate([fm.size() for fm in fmaps]), key=lambda x: x[1])][-1]
|
|
1079
|
+
study_obj.logger.debug(
|
|
1080
|
+
f"Reference map is {study_obj.samples_df.row(ref_index, named=True)['sample_name']}",
|
|
1081
|
+
)
|
|
1082
|
+
|
|
1083
|
+
aligner.setParameters(params_oms)
|
|
1084
|
+
aligner.setReference(fmaps[ref_index])
|
|
1085
|
+
study_obj.logger.debug(f"Parameters for alignment: {params}")
|
|
1086
|
+
|
|
1087
|
+
# Perform alignment and transformation of feature maps to the reference map (exclude reference map)
|
|
1088
|
+
tdqm_disable = study_obj.log_level not in ["TRACE", "DEBUG", "INFO"]
|
|
1089
|
+
for index, fm in tqdm(
|
|
1090
|
+
list(enumerate(fmaps)),
|
|
1091
|
+
total=len(fmaps),
|
|
1092
|
+
desc=f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]} | INFO | {study_obj.log_label}Align feature maps",
|
|
1093
|
+
disable=tdqm_disable,
|
|
1094
|
+
):
|
|
1095
|
+
if index == ref_index:
|
|
1096
|
+
continue
|
|
1097
|
+
if params.get("skip_blanks") and study_obj.samples_df.row(index, named=True)["sample_type"] == "blank":
|
|
1098
|
+
continue
|
|
1099
|
+
trafo = oms.TransformationDescription()
|
|
1100
|
+
aligner.align(fm, trafo)
|
|
1101
|
+
transformer = oms.MapAlignmentTransformer()
|
|
1102
|
+
transformer.transformRetentionTimes(fm, trafo, True)
|
|
1103
|
+
|
|
1104
|
+
study_obj.alignment_ref_index = ref_index
|
|
1105
|
+
|
|
1106
|
+
|
|
1107
|
+
def _align_kd_algorithm(study_obj, fmaps, params):
|
|
1108
|
+
"""Perform alignment using KD algorithm."""
|
|
1109
|
+
import pyopenms as oms
|
|
1110
|
+
|
|
1111
|
+
num_maps = len(fmaps)
|
|
1112
|
+
study_obj.logger.info(f"Starting alignment with KD algorithm using {num_maps} maps")
|
|
1113
|
+
|
|
1114
|
+
try:
|
|
1115
|
+
# Use the EXACT approach from test_oms.py that works
|
|
1116
|
+
# First parameter is DIMENSIONS (3), not min_samples!
|
|
1117
|
+
study_obj.logger.debug("Creating MapAlignmentAlgorithmKD with 3 dimensions and empty parameters...")
|
|
1118
|
+
empty_params = oms.Param() # Empty params - this is what worked in test_oms.py!
|
|
1119
|
+
aligner = oms.MapAlignmentAlgorithmKD(3, empty_params) # 3 = dimensions, not min_samples
|
|
1120
|
+
study_obj.logger.debug("Created MapAlignmentAlgorithmKD successfully")
|
|
1121
|
+
|
|
1122
|
+
# Create KD-tree structure
|
|
1123
|
+
kdtree = oms.KDTreeFeatureMaps()
|
|
1124
|
+
|
|
1125
|
+
# Set all required warping parameters based on OpenMS requirements
|
|
1126
|
+
kd_params = oms.Param()
|
|
1127
|
+
# Core warp parameters that OpenMS expects
|
|
1128
|
+
kd_params.setValue(b"warp:min_rel_cc_size", 0.2, b"Minimum relative connected component size")
|
|
1129
|
+
kd_params.setValue(b"warp:max_ratio_small_big", 0.5, b"Maximum ratio of small to big connected component")
|
|
1130
|
+
kd_params.setValue(b"warp:min_score", 0.3, b"Minimum score for warping")
|
|
1131
|
+
kd_params.setValue(b"warp:rt_tol", 5.0, b"RT tolerance for feature matching")
|
|
1132
|
+
kd_params.setValue(b"warp:mz_tol", 0.015, b"m/z tolerance for feature matching")
|
|
1133
|
+
# Additional potentially required parameters
|
|
1134
|
+
kd_params.setValue(b"warp:max_shift", 30.0, b"Maximum RT shift allowed")
|
|
1135
|
+
kd_params.setValue(b"warp:bins", 100, b"Number of bins for warping")
|
|
1136
|
+
kdtree.setParameters(kd_params)
|
|
1137
|
+
|
|
1138
|
+
# Add all feature maps to KD-tree (NO limiting - this worked with 38k features!)
|
|
1139
|
+
study_obj.logger.debug("Adding maps to KD-tree structure...")
|
|
1140
|
+
kdtree.addMaps(fmaps)
|
|
1141
|
+
study_obj.logger.debug("Successfully added maps to KD-tree")
|
|
1142
|
+
|
|
1143
|
+
# Add RT fitting data (this is where the magic happens)
|
|
1144
|
+
study_obj.logger.debug("Adding RT fitting data to aligner...")
|
|
1145
|
+
aligner.addRTFitData(kdtree)
|
|
1146
|
+
study_obj.logger.debug("Successfully added RT fitting data")
|
|
1147
|
+
|
|
1148
|
+
# Perform LOWESS fitting
|
|
1149
|
+
study_obj.logger.debug("Performing LOWESS fitting...")
|
|
1150
|
+
aligner.fitLOWESS()
|
|
1151
|
+
study_obj.logger.debug("Successfully completed LOWESS fitting")
|
|
1152
|
+
|
|
1153
|
+
# Apply transformations to feature maps
|
|
1154
|
+
study_obj.logger.debug("Applying transformations to feature maps...")
|
|
1155
|
+
for i, fmap in enumerate(fmaps):
|
|
1156
|
+
trafo = oms.TransformationDescription()
|
|
1157
|
+
aligner.getTransformation(i, trafo)
|
|
1158
|
+
oms.MapAlignmentTransformer.transformRetentionTimes(fmap, trafo, True)
|
|
1159
|
+
|
|
1160
|
+
study_obj.logger.info("KD alignment completed successfully")
|
|
1161
|
+
|
|
1162
|
+
except Exception as e:
|
|
1163
|
+
study_obj.logger.error(f"KD alignment failed with error: {e}")
|
|
1164
|
+
study_obj.logger.info("Falling back to PoseClustering alignment...")
|
|
1165
|
+
|
|
1166
|
+
# Fallback to pose clustering with basic parameters
|
|
1167
|
+
_align_pose_clustering_fallback(study_obj, fmaps, params)
|
|
1168
|
+
|
|
1169
|
+
|
|
1170
|
+
def _align_pose_clustering_fallback(study_obj, fmaps, params):
|
|
1171
|
+
"""Fallback PoseClustering alignment with minimal parameters."""
|
|
1172
|
+
import pyopenms as oms
|
|
1173
|
+
|
|
1174
|
+
aligner = oms.MapAlignmentAlgorithmPoseClustering()
|
|
1175
|
+
ref_index = [i[0] for i in sorted(enumerate([fm.size() for fm in fmaps]), key=lambda x: x[1])][-1]
|
|
1176
|
+
|
|
1177
|
+
# Set up basic parameters for pose clustering
|
|
1178
|
+
pc_params = oms.Param()
|
|
1179
|
+
pc_params.setValue("max_num_peaks_considered", 1000)
|
|
1180
|
+
pc_params.setValue("pairfinder:distance_RT:max_difference", params.get("rt_max_diff"))
|
|
1181
|
+
pc_params.setValue("pairfinder:distance_MZ:max_difference", params.get("mz_max_diff"))
|
|
1182
|
+
|
|
1183
|
+
aligner.setParameters(pc_params)
|
|
1184
|
+
aligner.setReference(fmaps[ref_index])
|
|
1185
|
+
|
|
1186
|
+
for index, fm in enumerate(fmaps):
|
|
1187
|
+
if index == ref_index:
|
|
1188
|
+
continue
|
|
1189
|
+
trafo = oms.TransformationDescription()
|
|
1190
|
+
aligner.align(fm, trafo)
|
|
1191
|
+
transformer = oms.MapAlignmentTransformer()
|
|
1192
|
+
transformer.transformRetentionTimes(fm, trafo, True)
|
|
1193
|
+
|
|
1194
|
+
study_obj.alignment_ref_index = ref_index
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
masster/__init__.py,sha256=G7hbKO8F_o1wFwQlvO25M8JYGka_YSAVU2_O__2rjlI,697
|
|
2
|
-
masster/_version.py,sha256=
|
|
2
|
+
masster/_version.py,sha256=aFPcwPY-GKnZFcAMlTHNlS4nlezVPNLaTH72j5pMbqM,257
|
|
3
3
|
masster/chromatogram.py,sha256=NgPr1uLGJHjRu6PWZZGOrS3pCl7sye1yQCJjlRi9ZSY,19305
|
|
4
4
|
masster/logger.py,sha256=W50V_uh8RSYwGxDrDFhOuj5jpu2tKJyt_16lMw9kQwA,14755
|
|
5
5
|
masster/spectrum.py,sha256=7wKQes97sI-O2lxrkQnYuoroXFyAWgwSzS4TnjUX8RY,47710
|
|
@@ -32,15 +32,15 @@ masster/study/export.py,sha256=E3Ps350T-STFlx-OQc3fU592WvenO1i6Dq7PVH6QUbA,28989
|
|
|
32
32
|
masster/study/h5.py,sha256=Tl_jdV75yOZ5PH76jvMvTdOJdhiup6uINPC04DhcDX0,71815
|
|
33
33
|
masster/study/helpers.py,sha256=PZ0Lb3eFSnEmPQ3sFzHn-cYr_OehHGzTOogfo3r5K-0,133432
|
|
34
34
|
masster/study/helpers_optimized.py,sha256=sd87kNPIEPdMijekXzZWSyeZzJ_DTAW8HQjAry-jVyY,13922
|
|
35
|
-
masster/study/load.py,sha256=
|
|
35
|
+
masster/study/load.py,sha256=8Y_hoRRRA9zW5SeAtZHnQlmGm3mnvz1MddWTSdfMArE,65334
|
|
36
36
|
masster/study/parameters.py,sha256=0elaF7YspTsB7qyajWAbRNL2VfKlGz5GJLifmO8IGkk,3276
|
|
37
37
|
masster/study/plot.py,sha256=RZ-ko0ocsXzaPtsa5QzBDX4FIwQkx4lYf5RPLJAH5Ss,76147
|
|
38
|
-
masster/study/processing.py,sha256=
|
|
38
|
+
masster/study/processing.py,sha256=ZXpeY_vpKs23de4rlvllQegmUrzj2DcH-X6D50MW6YA,53302
|
|
39
39
|
masster/study/save.py,sha256=YjFEiuiB4OFLVvW_AX4-kgnsbjCWrYZeqF85VNEtbdw,6560
|
|
40
40
|
masster/study/study.py,sha256=U6_tUk0SD1FaADq6ChwFTBVNYqgOTcva8QDN5Xdp4Ws,31724
|
|
41
41
|
masster/study/study5_schema.json,sha256=Fwt6U6r1J1BQxwR8ArFv1yN_2Cq1tt3tadS2sQCzjhg,5379
|
|
42
42
|
masster/study/defaults/__init__.py,sha256=m3Z5KXGqsTdh7GjYzZoENERt39yRg0ceVRV1DeCt1P0,610
|
|
43
|
-
masster/study/defaults/align_def.py,sha256=
|
|
43
|
+
masster/study/defaults/align_def.py,sha256=K_6xh7B1vf96maYIBTDZGjyA2zBHEgIP5ASzD9--n2Y,20241
|
|
44
44
|
masster/study/defaults/export_def.py,sha256=eXl3h4aoLX88XkHTpqahLd-QZ2gjUqrmjq8IJULXeWo,1203
|
|
45
45
|
masster/study/defaults/fill_chrom_def.py,sha256=hB6-tyC9bhx-IpGj2HC8FinQdW4VLYj_pn5t1rlj-Ew,8887
|
|
46
46
|
masster/study/defaults/fill_def.py,sha256=TdDqOt-fva44JptLvxOy7GNUCR5isOKz1jR2xj_V8sQ,8869
|
|
@@ -50,8 +50,8 @@ masster/study/defaults/integrate_chrom_def.py,sha256=0MNIWGTjty-Zu-NTQsIweuj3UVq
|
|
|
50
50
|
masster/study/defaults/integrate_def.py,sha256=Vf4SAzdBfnsSZ3IRaF0qZvWu3gMDPHdgPfMYoPKeWv8,7246
|
|
51
51
|
masster/study/defaults/merge_def.py,sha256=EBsKE3hsAkTEzN9dpdRD5W3_suTKy_WZ_96rwS0uBuE,8572
|
|
52
52
|
masster/study/defaults/study_def.py,sha256=v2V5i5y288gydhMOM78m8u_GaWC2XdjLM5nJP6e17sI,10476
|
|
53
|
-
masster-0.3.
|
|
54
|
-
masster-0.3.
|
|
55
|
-
masster-0.3.
|
|
56
|
-
masster-0.3.
|
|
57
|
-
masster-0.3.
|
|
53
|
+
masster-0.3.17.dist-info/METADATA,sha256=ThbBliz3HIe_59xDsd4IK2Ai3TdsEnQ9JqgdkyUrz3Q,44320
|
|
54
|
+
masster-0.3.17.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
55
|
+
masster-0.3.17.dist-info/entry_points.txt,sha256=ZHguQ_vPmdbpqq2uGtmEOLJfgP-DQ1T0c07Lxh30wc8,58
|
|
56
|
+
masster-0.3.17.dist-info/licenses/LICENSE,sha256=bx5iLIKjgAdYQ7sISn7DsfHRKkoCUm1154sJJKhgqnU,35184
|
|
57
|
+
masster-0.3.17.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|