masster 0.5.8__py3-none-any.whl → 0.5.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of masster might be problematic. Click here for more details.

masster/study/load.py CHANGED
@@ -139,7 +139,7 @@ def add(
139
139
  f"No files found in {folder}. Please check the folder path or file patterns.",
140
140
  )
141
141
  else:
142
- self.logger.debug(f"Successfully added {counter} samples to the study.")
142
+ self.logger.debug(f"Added {counter} samples to the study.")
143
143
 
144
144
  # Return a simple summary to suppress marimo's automatic object display
145
145
  return f"Added {counter} samples to study"
@@ -2055,169 +2055,6 @@ def _sanitize(self):
2055
2055
  except Exception as e:
2056
2056
  self.logger.error(f"Failed to recreate sanitized DataFrame: {e}")
2057
2057
 
2058
- '''
2059
- def _load_features(self):
2060
- """
2061
- Load features by reconstructing FeatureMaps from the processed features_df data.
2062
-
2063
- This ensures that the loaded FeatureMaps contain the same processed features
2064
- as stored in features_df, rather than loading raw features from .featureXML files
2065
- which may not match the processed data after filtering, alignment, etc.
2066
- """
2067
- import polars as pl
2068
- import pyopenms as oms
2069
- from tqdm import tqdm
2070
- from datetime import datetime
2071
-
2072
- self.features_maps = []
2073
-
2074
- # Check if features_df exists and is not empty
2075
- if self.features_df is None:
2076
- self.logger.warning("features_df is None. Falling back to XML loading.")
2077
- self._load_features_from_xml()
2078
- return
2079
-
2080
- if len(self.features_df) == 0:
2081
- self.logger.warning("features_df is empty. Falling back to XML loading.")
2082
- self._load_features_from_xml()
2083
- return
2084
-
2085
- # If we get here, we should use the new method
2086
- self.logger.debug("Reconstructing FeatureMaps from features_df.")
2087
-
2088
- tdqm_disable = self.log_level not in ["TRACE", "DEBUG", "INFO"]
2089
-
2090
- # Process each sample in order
2091
- for sample_index, row_dict in tqdm(
2092
- enumerate(self.samples_df.iter_rows(named=True)),
2093
- total=len(self.samples_df),
2094
- desc=f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]} | INFO | {self.log_label}Reconstruct FeatureMaps from DataFrame",
2095
- disable=tdqm_disable,
2096
- ):
2097
- sample_uid = row_dict["sample_uid"]
2098
- sample_name = row_dict["sample_name"]
2099
-
2100
- # Get features for this sample from features_df
2101
- sample_features = self.features_df.filter(pl.col("sample_uid") == sample_uid)
2102
-
2103
- # Create new FeatureMap
2104
- feature_map = oms.FeatureMap()
2105
-
2106
- # Convert DataFrame features to OpenMS Features
2107
- # Keep track of next available feature_id for this sample
2108
- next_feature_id = 1
2109
- used_feature_ids = set()
2110
-
2111
- # First pass: collect existing feature_ids to avoid conflicts
2112
- for feature_row in sample_features.iter_rows(named=True):
2113
- if feature_row["feature_id"] is not None:
2114
- used_feature_ids.add(int(feature_row["feature_id"]))
2115
-
2116
- # Find the next available feature_id
2117
- while next_feature_id in used_feature_ids:
2118
- next_feature_id += 1
2119
-
2120
- for feature_row in sample_features.iter_rows(named=True):
2121
- feature = oms.Feature()
2122
-
2123
- # Set properties from DataFrame (handle missing values gracefully)
2124
- try:
2125
- # Skip features with missing critical data
2126
- if feature_row["mz"] is None:
2127
- self.logger.warning("Skipping feature due to missing mz")
2128
- continue
2129
- if feature_row["rt"] is None:
2130
- self.logger.warning("Skipping feature due to missing rt")
2131
- continue
2132
- if feature_row["inty"] is None:
2133
- self.logger.warning("Skipping feature due to missing inty")
2134
- continue
2135
-
2136
- # Handle missing feature_id by generating a new one
2137
- if feature_row["feature_id"] is None:
2138
- feature_id = next_feature_id
2139
- next_feature_id += 1
2140
- self.logger.debug(f"Generated new feature_id {feature_id} for feature with missing ID")
2141
- else:
2142
- feature_id = int(feature_row["feature_id"])
2143
-
2144
- feature.setUniqueId(feature_id)
2145
- feature.setMZ(float(feature_row["mz"]))
2146
- feature.setRT(float(feature_row["rt"]))
2147
- feature.setIntensity(float(feature_row["inty"]))
2148
-
2149
- # Handle optional fields that might be None
2150
- if feature_row.get("quality") is not None:
2151
- feature.setOverallQuality(float(feature_row["quality"]))
2152
- if feature_row.get("charge") is not None:
2153
- feature.setCharge(int(feature_row["charge"]))
2154
-
2155
- # Add to feature map
2156
- feature_map.push_back(feature)
2157
- except (ValueError, TypeError) as e:
2158
- self.logger.warning(f"Skipping feature due to conversion error: {e}")
2159
- continue
2160
-
2161
- self.features_maps.append(feature_map)
2162
-
2163
- self.logger.debug(
2164
- f"Successfully reconstructed {len(self.features_maps)} FeatureMaps from features_df.",
2165
- )
2166
- '''
2167
-
2168
- '''
2169
- def _load_features_from_xml(self):
2170
- """
2171
- Original load_features method that loads from .featureXML files.
2172
- Used as fallback when features_df is not available.
2173
- """
2174
- self.features_maps = []
2175
- self.logger.debug("Loading features from featureXML files.")
2176
- tdqm_disable = self.log_level not in ["TRACE", "DEBUG", "INFO"]
2177
- for _index, row_dict in tqdm(
2178
- enumerate(self.samples_df.iter_rows(named=True)),
2179
- total=len(self.samples_df),
2180
- desc=f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]} | INFO | {self.log_label}Load feature maps from XML",
2181
- disable=tdqm_disable,
2182
- ):
2183
- if self.folder is not None:
2184
- filename = os.path.join(
2185
- self.folder,
2186
- row_dict["sample_name"] + ".featureXML",
2187
- )
2188
- else:
2189
- filename = os.path.join(
2190
- os.getcwd(),
2191
- row_dict["sample_name"] + ".featureXML",
2192
- )
2193
- # check if file exists
2194
- if not os.path.exists(filename):
2195
- filename = row_dict["sample_path"].replace(".sample5", ".featureXML")
2196
-
2197
- if not os.path.exists(filename):
2198
- self.features_maps.append(None)
2199
- continue
2200
-
2201
- fh = oms.FeatureXMLFile()
2202
- fm = oms.FeatureMap()
2203
- fh.load(filename, fm)
2204
- self.features_maps.append(fm)
2205
- self.logger.debug("Features loaded successfully.")
2206
- '''
2207
- '''
2208
- def _load_consensusXML(self, filename="alignment.consensusXML"):
2209
- """
2210
- Load a consensus map from a file.
2211
- """
2212
- if not os.path.exists(filename):
2213
- self.logger.error(f"File {filename} does not exist.")
2214
- return
2215
- fh = oms.ConsensusXMLFile()
2216
- self.consensus_map = oms.ConsensusMap()
2217
- fh.load(filename, self.consensus_map)
2218
- self.logger.debug(f"Loaded consensus map from {filename}.")
2219
- '''
2220
-
2221
2058
  def _add_samples_batch(
2222
2059
  self,
2223
2060
  files,
masster/study/merge.py CHANGED
@@ -340,8 +340,6 @@ def merge(study, **kwargs) -> None:
340
340
  - MS2 spectra are automatically linked when link_ms2=True
341
341
  - Adduct relationships are identified and stored after merging
342
342
  """
343
- start_time = time.time()
344
-
345
343
  # Initialize with defaults and override with kwargs
346
344
  params = merge_defaults()
347
345
 
@@ -486,10 +484,6 @@ def merge(study, **kwargs) -> None:
486
484
 
487
485
  # Finalize merge: filter by min_samples and add isotope/MS2 data
488
486
  __finalize_merge(study, params.link_ms2, params.extract_ms1, params.min_samples)
489
-
490
- # Log completion without the misleading feature count
491
- elapsed = time.time() - start_time
492
- study.logger.debug(f"Merge process completed in {elapsed:.1f}s")
493
487
 
494
488
 
495
489
  def _merge_kd(study, params: merge_defaults) -> oms.ConsensusMap:
@@ -818,7 +812,7 @@ def _merge_kd_chunked(study, params: merge_defaults, cached_adducts_df=None, cac
818
812
  serialized_chunk_results.append((chunk_start_idx, consensus_features))
819
813
  completed_chunks += 1
820
814
  n_samples_in_chunk = len(chunk_data_list[chunk_idx]['chunk_samples_data'])
821
- study.logger.info(f"Completed chunk {completed_chunks}/{total_chunks} (samples {chunk_start_idx + 1}-{chunk_start_idx + n_samples_in_chunk})")
815
+ study.logger.success(f"Completed chunk {completed_chunks}/{total_chunks} (samples {chunk_start_idx + 1}-{chunk_start_idx + n_samples_in_chunk})")
822
816
  except Exception as exc:
823
817
  # Check if this is a BrokenProcessPool exception from Windows multiprocessing issues
824
818
  if isinstance(exc, BrokenProcessPool) or "process pool" in str(exc).lower():
@@ -852,7 +846,7 @@ def _merge_kd_chunked(study, params: merge_defaults, cached_adducts_df=None, cac
852
846
  serialized_chunk_results.append((chunk_start_idx, consensus_features))
853
847
  completed_chunks += 1
854
848
  n_samples_in_chunk = len(chunk_data_list[chunk_idx]['chunk_samples_data'])
855
- study.logger.info(f"Completed chunk {completed_chunks}/{total_chunks} (samples {chunk_start_idx + 1}-{chunk_start_idx + n_samples_in_chunk})")
849
+ study.logger.success(f"Completed chunk {completed_chunks}/{total_chunks} (samples {chunk_start_idx + 1}-{chunk_start_idx + n_samples_in_chunk})")
856
850
  except Exception as exc:
857
851
  study.logger.error(f"Chunk {chunk_idx} generated an exception: {exc}")
858
852
  raise exc
@@ -993,7 +987,7 @@ def _merge_qt_chunked(study, params: merge_defaults, cached_adducts_df=None, cac
993
987
  serialized_chunk_results.append((chunk_start_idx, consensus_features))
994
988
  completed_chunks += 1
995
989
  n_samples_in_chunk = len(chunk_data_list[chunk_idx]['chunk_samples_data'])
996
- study.logger.info(f"Completed chunk {completed_chunks}/{total_chunks} (samples {chunk_start_idx + 1}-{chunk_start_idx + n_samples_in_chunk})")
990
+ study.logger.success(f"Completed chunk {completed_chunks}/{total_chunks} (samples {chunk_start_idx + 1}-{chunk_start_idx + n_samples_in_chunk})")
997
991
  except Exception as exc:
998
992
  # Check if this is a BrokenProcessPool exception from Windows multiprocessing issues
999
993
  if isinstance(exc, BrokenProcessPool) or "process pool" in str(exc).lower():
@@ -1027,7 +1021,7 @@ def _merge_qt_chunked(study, params: merge_defaults, cached_adducts_df=None, cac
1027
1021
  serialized_chunk_results.append((chunk_start_idx, consensus_features))
1028
1022
  completed_chunks += 1
1029
1023
  n_samples_in_chunk = len(chunk_data_list[chunk_idx]['chunk_samples_data'])
1030
- study.logger.info(f"Completed chunk {completed_chunks}/{total_chunks} (samples {chunk_start_idx + 1}-{chunk_start_idx + n_samples_in_chunk})")
1024
+ study.logger.success(f"Completed chunk {completed_chunks}/{total_chunks} (samples {chunk_start_idx + 1}-{chunk_start_idx + n_samples_in_chunk})")
1031
1025
  except Exception as exc:
1032
1026
  study.logger.error(f"Chunk {chunk_idx} generated an exception: {exc}")
1033
1027
  raise exc
@@ -3082,9 +3076,9 @@ def __finalize_merge(study, link_ms2, extract_ms1, min_samples):
3082
3076
  # Count tight clusters with specified thresholds
3083
3077
  tight_clusters = _count_tight_clusters(study,mz_tol=0.04, rt_tol=0.3)
3084
3078
 
3085
- study.logger.info(
3079
+ study.logger.success(
3086
3080
  f"Merging completed. Consensus features: {len(study.consensus_df)}. "
3087
- f"Completeness: {c:.2f}. Tight clusters left: {tight_clusters}.",
3081
+ f"Completeness: {c:.2f}. Tight clusters: {tight_clusters}.",
3088
3082
  )
3089
3083
  else:
3090
3084
  study.logger.warning(
masster/study/plot.py CHANGED
@@ -9,6 +9,8 @@ import panel
9
9
  import polars as pl
10
10
  from tqdm import tqdm
11
11
 
12
+ # Import cmap for colormap handling
13
+ from cmap import Colormap
12
14
  hv.extension("bokeh")
13
15
 
14
16
 
@@ -17,6 +19,93 @@ hv.extension("bokeh")
17
19
  from bokeh.layouts import row as bokeh_row
18
20
 
19
21
 
22
+ def _export_with_webdriver_manager(plot_obj, filename, format_type, logger=None):
23
+ """
24
+ Export plot to PNG or SVG using webdriver-manager for automatic driver management.
25
+
26
+ Parameters:
27
+ plot_obj: Bokeh plot object or holoviews object to export
28
+ filename: Output filename
29
+ format_type: Either "png" or "svg"
30
+ logger: Logger for error reporting (optional)
31
+
32
+ Returns:
33
+ bool: True if export successful, False otherwise
34
+ """
35
+ try:
36
+ # Convert holoviews to bokeh if needed
37
+ if hasattr(plot_obj, 'opts'): # Likely a holoviews object
38
+ import holoviews as hv
39
+ bokeh_plot = hv.render(plot_obj)
40
+ else:
41
+ bokeh_plot = plot_obj
42
+
43
+ # Try webdriver-manager export first
44
+ try:
45
+ from webdriver_manager.chrome import ChromeDriverManager
46
+ from selenium import webdriver
47
+ from selenium.webdriver.chrome.service import Service
48
+ from selenium.webdriver.chrome.options import Options
49
+
50
+ # Set up Chrome options for headless operation
51
+ chrome_options = Options()
52
+ chrome_options.add_argument("--headless")
53
+ chrome_options.add_argument("--no-sandbox")
54
+ chrome_options.add_argument("--disable-dev-shm-usage")
55
+ chrome_options.add_argument("--disable-gpu")
56
+
57
+ # Use webdriver-manager to automatically get the correct ChromeDriver
58
+ service = Service(ChromeDriverManager().install())
59
+ driver = webdriver.Chrome(service=service, options=chrome_options)
60
+
61
+ # Export with managed webdriver
62
+ if format_type == "png":
63
+ from bokeh.io import export_png
64
+ export_png(bokeh_plot, filename=filename, webdriver=driver)
65
+ elif format_type == "svg":
66
+ from bokeh.io import export_svg
67
+ export_svg(bokeh_plot, filename=filename, webdriver=driver)
68
+ else:
69
+ raise ValueError(f"Unsupported format: {format_type}")
70
+
71
+ driver.quit()
72
+ return True
73
+
74
+ except ImportError:
75
+ if logger:
76
+ logger.debug(f"webdriver-manager not available, using default {format_type.upper()} export")
77
+ # Fall back to default export
78
+ if format_type == "png":
79
+ from bokeh.io import export_png
80
+ export_png(bokeh_plot, filename=filename)
81
+ elif format_type == "svg":
82
+ from bokeh.io import export_svg
83
+ export_svg(bokeh_plot, filename=filename)
84
+ return True
85
+
86
+ except Exception as e:
87
+ if logger:
88
+ logger.debug(f"{format_type.upper()} export with webdriver-manager failed: {e}, using default {format_type.upper()} export")
89
+ try:
90
+ # Final fallback to default export
91
+ if format_type == "png":
92
+ from bokeh.io import export_png
93
+ export_png(bokeh_plot, filename=filename)
94
+ elif format_type == "svg":
95
+ from bokeh.io import export_svg
96
+ export_svg(bokeh_plot, filename=filename)
97
+ return True
98
+ except Exception as e2:
99
+ if logger:
100
+ logger.error(f"{format_type.upper()} export failed: {e2}")
101
+ return False
102
+
103
+ except Exception as e:
104
+ if logger:
105
+ logger.error(f"Export preparation failed: {e}")
106
+ return False
107
+
108
+
20
109
  def _isolated_save_plot(plot_object, filename, abs_filename, logger, plot_title="Plot"):
21
110
  """
22
111
  Save a plot using isolated file saving that doesn't affect global Bokeh state.
@@ -38,11 +127,10 @@ def _isolated_save_plot(plot_object, filename, abs_filename, logger, plot_title=
38
127
  logger.info(f"Plot saved to: {abs_filename}")
39
128
 
40
129
  elif filename.endswith(".png"):
41
- try:
42
- from bokeh.io.export import export_png
43
- export_png(plot_object, filename=filename)
130
+ success = _export_with_webdriver_manager(plot_object, filename, "png", logger)
131
+ if success:
44
132
  logger.info(f"Plot saved to: {abs_filename}")
45
- except Exception as e:
133
+ else:
46
134
  # Fall back to HTML if PNG export not available
47
135
  html_filename = filename.replace('.png', '.html')
48
136
  abs_html_filename = html_filename if abs_filename == filename else abs_filename.replace('.png', '.html')
@@ -55,16 +143,15 @@ def _isolated_save_plot(plot_object, filename, abs_filename, logger, plot_title=
55
143
  with open(html_filename, 'w', encoding='utf-8') as f:
56
144
  f.write(html)
57
145
 
58
- logger.warning(f"PNG export not available ({str(e)}). Use export_png. Saved as HTML instead: {abs_html_filename}")
59
- elif filename.endswith(".pdf"):
60
- # Try to save as PDF, fall back to HTML if not available
61
- try:
62
- from bokeh.io.export import export_pdf
63
- export_pdf(plot_object, filename=filename)
146
+ logger.warning(f"PNG export not available. Saved as HTML instead: {abs_html_filename}")
147
+ elif filename.endswith(".svg"):
148
+ success = _export_with_webdriver_manager(plot_object, filename, "svg", logger)
149
+ if success:
64
150
  logger.info(f"Plot saved to: {abs_filename}")
65
- except ImportError:
66
- # Fall back to HTML if PDF export not available
67
- html_filename = filename.replace('.pdf', '.html')
151
+ else:
152
+ # Fall back to HTML if SVG export not available
153
+ html_filename = filename.replace('.svg', '.html')
154
+ abs_html_filename = html_filename if abs_filename == filename else abs_filename.replace('.svg', '.html')
68
155
  from bokeh.resources import Resources
69
156
  from bokeh.embed import file_html
70
157
 
@@ -74,27 +161,13 @@ def _isolated_save_plot(plot_object, filename, abs_filename, logger, plot_title=
74
161
  with open(html_filename, 'w', encoding='utf-8') as f:
75
162
  f.write(html)
76
163
 
77
- logger.warning(f"PDF export not available, saved as HTML instead: {html_filename}")
78
- elif filename.endswith(".svg"):
79
- # Try to save as SVG, fall back to HTML if not available
80
- try:
81
- from bokeh.io.export import export_svg
82
- export_svg(plot_object, filename=filename)
83
- logger.info(f"Plot saved to: {abs_filename}")
84
- except Exception as e:
85
- # Fall back to HTML if SVG export not available
86
- html_filename = filename.replace('.svg', '.html')
87
- abs_html_filename = html_filename if abs_filename == filename else abs_filename.replace('.svg', '.html')
88
- from bokeh.resources import Resources
89
- from bokeh.embed import file_html
90
-
91
- resources = Resources(mode='cdn')
164
+ logger.warning(f"SVG export not available. Saved as HTML instead: {abs_html_filename}")
92
165
  html = file_html(plot_object, resources, title=plot_title)
93
166
 
94
167
  with open(html_filename, 'w', encoding='utf-8') as f:
95
168
  f.write(html)
96
169
 
97
- logger.warning(f"SVG export not available ({str(e)}). Saved as HTML instead: {abs_html_filename}")
170
+ logger.warning(f"SVG export not available. Saved as HTML instead: {abs_html_filename}")
98
171
  else:
99
172
  # Default to HTML for unknown extensions using isolated approach
100
173
  from bokeh.resources import Resources
@@ -548,11 +621,11 @@ def plot_consensus_2d(
548
621
  self,
549
622
  filename=None,
550
623
  colorby="number_samples",
551
- cmap=None,
624
+ cmap="viridis",
625
+ alpha=0.7,
552
626
  markersize=8,
553
627
  sizeby="inty_mean",
554
628
  scaling="static",
555
- alpha=0.7,
556
629
  width=600,
557
630
  height=450,
558
631
  mz_range=None,
@@ -639,7 +712,7 @@ def plot_consensus_2d(
639
712
  if cmap is None:
640
713
  cmap = "viridis"
641
714
  elif cmap == "grey":
642
- cmap = "Greys256"
715
+ cmap = "greys"
643
716
 
644
717
  # plot with bokeh
645
718
  import bokeh.plotting as bp
@@ -657,9 +730,6 @@ def plot_consensus_2d(
657
730
  from bokeh.models.annotations import ColorBar
658
731
  from bokeh.palettes import viridis, Category20
659
732
 
660
- # Import cmap for colormap handling
661
- from cmap import Colormap
662
-
663
733
  # Convert Polars DataFrame to pandas for Bokeh compatibility
664
734
  data_pd = data.to_pandas()
665
735
  source = ColumnDataSource(data_pd)
@@ -86,7 +86,7 @@ def align(self, **kwargs):
86
86
  self.logger.error(f"Unknown alignment algorithm '{algorithm}'")
87
87
  return
88
88
 
89
- self.logger.debug("Alignment completed successfully.")
89
+ self.logger.success("Alignment completed.")
90
90
 
91
91
  # Reset consensus data structures after alignment since RT changes invalidate consensus
92
92
  consensus_reset_count = 0
@@ -225,8 +225,8 @@ def find_ms2(self, **kwargs):
225
225
  unique_consensus_features = self.consensus_ms2["consensus_uid"].n_unique()
226
226
  else:
227
227
  unique_consensus_features = 0
228
- self.logger.info(
229
- f"Linking completed. {len(self.consensus_ms2)} MS2 spectra associated to {unique_consensus_features} consensus features.",
228
+ self.logger.success(
229
+ f"Linking completed. Found {len(self.consensus_ms2)} MS2 spectra associated to {unique_consensus_features} consensus features.",
230
230
  )
231
231
 
232
232
 
@@ -290,7 +290,7 @@ def filter_consensus(
290
290
  f"Filtered {after_quality - after_number_samples} entries based on number of samples. Remaining {after_number_samples} entries.",
291
291
  )
292
292
 
293
- self.logger.info(f"Filtering completed. {len(cons)} entries remaining.")
293
+ self.logger.success(f"Filtering completed. {len(cons)} entries remaining.")
294
294
 
295
295
  if inplace:
296
296
  self.consensus_df = cons
@@ -514,7 +514,7 @@ def _integrate_chrom_impl(self, **kwargs):
514
514
  ],
515
515
  ).drop("__row_idx") # Remove the temporary row index column
516
516
 
517
- self.logger.debug(
517
+ self.logger.success(
518
518
  f"Integration completed. Updated {len(update_rows)} features with chromatogram data.",
519
519
  )
520
520
  except Exception as e:
@@ -1091,7 +1091,7 @@ def _align_kd_algorithm(study_obj, params):
1091
1091
  ).alias("rt")
1092
1092
  )
1093
1093
 
1094
- study_obj.logger.info(
1094
+ study_obj.logger.success(
1095
1095
  f"Alignment completed. Reference sample UID {ref_sample_uid} (index {ref_index}).",
1096
1096
  )
1097
1097
 
@@ -1365,7 +1365,7 @@ def find_iso(self, rt_tol=0.1, mz_tol=0.01, uids=None):
1365
1365
  # Count how many consensus features have isotope data
1366
1366
  iso_count = sum(1 for data in consensus_iso_data.values() if data is not None and len(data) > 0)
1367
1367
 
1368
- self.logger.info(f"Optimized isotope detection completed. Found isotope patterns for {iso_count}/{len(self.consensus_df)} consensus features.")
1368
+ self.logger.success(f"Isotope detection completed. Found isotope patterns for {iso_count}/{len(self.consensus_df)} consensus features.")
1369
1369
 
1370
1370
 
1371
1371
  def reset_iso(self):
@@ -255,6 +255,9 @@
255
255
  },
256
256
  "ms2_specs": {
257
257
  "dtype": "pl.Object"
258
+ },
259
+ "ms1_spec": {
260
+ "dtype": "pl.Object"
258
261
  }
259
262
  }
260
263
  },
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: masster
3
- Version: 0.5.8
3
+ Version: 0.5.10
4
4
  Summary: Mass spectrometry data analysis package
5
5
  Project-URL: homepage, https://github.com/zamboni-lab/masster
6
6
  Project-URL: repository, https://github.com/zamboni-lab/masster
@@ -685,6 +685,7 @@ Requires-Dist: altair>=5.5.0
685
685
  Requires-Dist: bokeh>=3.7.3
686
686
  Requires-Dist: cmap>=0.6.2
687
687
  Requires-Dist: datashader>=0.18.1
688
+ Requires-Dist: get-gecko-driver>=1.4
688
689
  Requires-Dist: h5py>=3.14.0
689
690
  Requires-Dist: hdbscan>=0.8.40
690
691
  Requires-Dist: holoviews>=1.21.0
@@ -703,6 +704,7 @@ Requires-Dist: scikit-learn>=1.7.1
703
704
  Requires-Dist: scipy>=1.12.0
704
705
  Requires-Dist: tqdm>=4.65.0
705
706
  Requires-Dist: umap-learn>=0.5.9.post2
707
+ Requires-Dist: webdriver-manager>=4.0.2
706
708
  Provides-Extra: dev
707
709
  Requires-Dist: bandit>=1.7.0; extra == 'dev'
708
710
  Requires-Dist: black>=23.0.0; extra == 'dev'
@@ -1,7 +1,7 @@
1
1
  masster/__init__.py,sha256=ueZ224WPNRRjQEYTaQUol818nwQgJwB93HbEfmtPRmg,1041
2
- masster/_version.py,sha256=BXXXNsuN4ipe6lTSVTBWB-FcZgzIiyQ_OToEKfd6hos,256
2
+ masster/_version.py,sha256=ykzsX9zBanG6fFefFoIDPED_UySJmLofW0r3TROBhFY,257
3
3
  masster/chromatogram.py,sha256=iYpdv8C17zVnlWvOFgAn9ns2uFGiF-GgoYf5QVVAbHs,19319
4
- masster/logger.py,sha256=tR65N23zfrNpcZNbZm2ot_Aual9XrGB1MWjLrovZkMs,16749
4
+ masster/logger.py,sha256=XT2gUcUIct8LWzTp9n484g5MaB89toT76CGA41oBvfA,18375
5
5
  masster/spectrum.py,sha256=TWIgDcl0lveG40cLVZTWGp8-FxMolu-P8EjZyRBtXL4,49850
6
6
  masster/data/dda/20250530_VH_IQX_KW_RP_HSST3_100mm_12min_pos_v4_DDA_OT_C-MiLUT_QC_dil2_01_20250602151849.sample5,sha256=LdJMF8uLoDm9ixZNHBoOzBH6hX7NGY7vTvqa2Pzetb8,6539174
7
7
  masster/data/dda/20250530_VH_IQX_KW_RP_HSST3_100mm_12min_pos_v4_DDA_OT_C-MiLUT_QC_dil3_01_20250602150634.sample5,sha256=hWUfslGoOTiQw59jENSBXP4sa6DdkbOi40FJ68ep61Q,6956773
@@ -19,18 +19,18 @@ masster/data/wiff/2025_01_14_VW_7600_LpMx_DBS_CID_2min_TOP15_030msecMS1_005msecR
19
19
  masster/lib/__init__.py,sha256=TcePNx3SYZHz6763TL9Sg4gUNXaRWjlrOtyS6vsu-hg,178
20
20
  masster/lib/lib.py,sha256=SSN06UtiM-hIdjS3eCiIHsJ_8S4YHRGOLGmdPIh-efo,27481
21
21
  masster/sample/__init__.py,sha256=HL0m1ept0PMAYUCQtDDnkdOS12IFl6oLAq4TZQz83uY,170
22
- masster/sample/adducts.py,sha256=nl5KEuat0hvktgar6Ca4PbY8JXt9SD05EeTn0HOKt64,32592
23
- masster/sample/h5.py,sha256=tlTPGrT9AMUhduvY_YPDzk6dZF5dI-9NRc1xeiuze5c,115442
24
- masster/sample/helpers.py,sha256=27eZFFidr02-DlSi4-eF4bpSk_y-qU3eoFCAOshRO20,42138
22
+ masster/sample/adducts.py,sha256=aBDoBKRjh6rMIF65yH6vx7KpQxeMUv796q3H46TySwY,32603
23
+ masster/sample/h5.py,sha256=X5VBHBpgJ2FJc9mtCggJ1HSQ3ujRmb1Wnpr9sJ8bGVA,115445
24
+ masster/sample/helpers.py,sha256=Mt9LX-Dy1Xro1a_Sy6nxQzCkP_-q7nK4xVnNm44v7UA,43872
25
25
  masster/sample/lib.py,sha256=E-j9c3Wd8f9a-H8xj7CAOwlA8KcyXPoFyYm3c8r7LtI,33755
26
26
  masster/sample/load.py,sha256=swjRBCoFGni9iPztHIKPVB5ru_xDMVryB_inPXdujTw,51819
27
27
  masster/sample/parameters.py,sha256=Gg2KcuNbV_wZ_Wwv93QlM5J19ji0oSIvZLPV1NoBmq0,4456
28
- masster/sample/plot.py,sha256=-rHqdi6q7jqjS8ENpTlxjwJBMZAwo-6OsNmE_d1JVQk,86617
29
- masster/sample/processing.py,sha256=CjaLCElDKECeCvYWqzT5EH_-rPQ0Y4A30zKjZfqmS5s,55915
28
+ masster/sample/plot.py,sha256=Cf_kuUiZnVHSlZfJQbV8Wtmdw1PPG5D3g1UbLobaXMs,96483
29
+ masster/sample/processing.py,sha256=qk-6_v424nwfaoVmdbHj-_lJiW7OkWS7SuQzQWNAFGI,55919
30
30
  masster/sample/quant.py,sha256=tHNjvUFTdehKR31BXBZnVsBxMD9XJHgaltITOjr71uE,7562
31
- masster/sample/sample.py,sha256=VhQik_ev1liRqGUtbZvV1NOjfFzgfZI1orfQT87gai4,20643
31
+ masster/sample/sample.py,sha256=pw4fIE5gecdupZOOWFUiRCs0x-3qa3Nv7V_UdJ-CAsc,22202
32
32
  masster/sample/sample5_schema.json,sha256=H5e2T6rHIDzul2kp_yP-ILUUWUpW08wP2pEQjMR0nSk,3977
33
- masster/sample/save.py,sha256=IwWfcsmWLWM-2ASdhHXWAiPyrZBv5JUynvciNPppDUs,38643
33
+ masster/sample/save.py,sha256=pbiRoWEA2DnhDKmMJncjveNlBqizJLOVRm5cug4ZwyM,38658
34
34
  masster/sample/sciex.py,sha256=vnbxsq_qnAQVuzcpziP1o3IC4kM5amGBcPmC2TAuDLw,46319
35
35
  masster/sample/defaults/__init__.py,sha256=A09AOP44cxD_oYohyt7XFUho0zndRcrzVD4DUaGnKH4,447
36
36
  masster/sample/defaults/find_adducts_def.py,sha256=Bu2KiBJRxD0SAnOPNMm_Nk-6fx6QYoRXjFNGzz-0_o0,13570
@@ -41,17 +41,17 @@ masster/sample/defaults/sample_def.py,sha256=keoXyMyrm_iLgbYqfIbqCpJ3XHBVlNwCNmb
41
41
  masster/study/__init__.py,sha256=55axdFuqRX4aXtJ8ocnhcLB32fNtmmJpCi58moO0r4g,237
42
42
  masster/study/analysis.py,sha256=L-wXBnGZCLB5UUDrjIdOiMG9zdej3Tw_SftcEmmTukM,84264
43
43
  masster/study/export.py,sha256=joFK9jip2UM4lVAvhkdKVeUdNdM4D8uP2WE49IaVJgw,60172
44
- masster/study/h5.py,sha256=84plxM7gYFdn_mNbcg8XxE_NRZmiIBqs_XhfHMiXshk,95364
45
- masster/study/helpers.py,sha256=dOj7rJlVx7uKCRt1iMOsZHuz4b9Kch5d68biUyIK1mE,190834
46
- masster/study/id.py,sha256=Tiw_i2jDxUWaPnzd5PzgSnLSRDDDJkwLYbjzA0XcBwQ,80082
47
- masster/study/load.py,sha256=7d11294YYEGrSKox3cwvetv2vqcstYT1SnyAhHH5V_Q,107706
48
- masster/study/merge.py,sha256=-SNlroqQVuOyzsJimvgf9c6T9V3yt-mx_2lW3L2kE-g,169501
44
+ masster/study/h5.py,sha256=KpvV6-0RGIAjYBNa7AodbLmlGtoDUvbeC_jB2IZdYvA,96118
45
+ masster/study/helpers.py,sha256=QwPyGTuRKZoimK_y1kX4Ag_0rJNB1MYoP0Q2mXEVshs,191930
46
+ masster/study/id.py,sha256=heKU309cUsNeFxbWYvqxVIAJLrR1H0YqMgLanLx9Do4,80091
47
+ masster/study/load.py,sha256=BMjoUDkXNI6iU2tRE2eBRzxMrvW0gRyLepqYOWaMPXU,101192
48
+ masster/study/merge.py,sha256=aEZjNhrsQZxkRhyyuOUjlIN_tdA6y2VX2BAkvfPd_Sc,169300
49
49
  masster/study/parameters.py,sha256=bTvmcwX9INxzcrEAmTiFH8qeWVhwkvMTZjuP394pz5o,3279
50
- masster/study/plot.py,sha256=LEIzoYiUyq1aswh-sw8S-ESvN2DaQKN5l22yLW8gZe8,107647
51
- masster/study/processing.py,sha256=n5208v-JQGq3bBP-ncgl2__hHWSQQYHx2fl4Mm0THdI,58538
50
+ masster/study/plot.py,sha256=ftQAVgEYkZuKAVIlbTR5bUypF8DpMOxSXwOyYz_BsOQ,110610
51
+ masster/study/processing.py,sha256=n-JbH1ZHtSE1xlyi69ZrcHMsxw7dAyodC5hnaNld2to,58537
52
52
  masster/study/save.py,sha256=47AP518epJJ9TjaGGyrLKsMsyjIk8_J4ka7bmsnRtFQ,9268
53
53
  masster/study/study.py,sha256=gudugPJk3LOtZh-YsszSRCBDrBG78cexoG0CSM86EPs,38701
54
- masster/study/study5_schema.json,sha256=0IZxM9VVI0TUlx74BPzJDT44kySi6NZZ6iLR0j8bU_s,7736
54
+ masster/study/study5_schema.json,sha256=lTFePwY8bQngyBnNCP60-UP9tnZLGhFo3YtJgwHTWdo,7797
55
55
  masster/study/defaults/__init__.py,sha256=m3Z5KXGqsTdh7GjYzZoENERt39yRg0ceVRV1DeCt1P0,610
56
56
  masster/study/defaults/align_def.py,sha256=Du0F592ej2einT8kOx8EUs610axSvur8_-6N19O-uJY,10209
57
57
  masster/study/defaults/export_def.py,sha256=eXl3h4aoLX88XkHTpqahLd-QZ2gjUqrmjq8IJULXeWo,1203
@@ -67,8 +67,8 @@ masster/wizard/README.md,sha256=mL1A3YWJZOefpJ6D0-HqGLkVRmUlOpwyVFdvJBeeoZM,1414
67
67
  masster/wizard/__init__.py,sha256=a2hcZnHASjfuw1lqZhZnvTR58rc33rRnoGAY_JfvGhI,683
68
68
  masster/wizard/example.py,sha256=xEZFTH9UZ8HKOm6s3JL8Js0Uw5ChnISWBHSZCL32vsM,7983
69
69
  masster/wizard/wizard.py,sha256=UobIGFZtp1s_9WJlpl6DQ2-pp7flPQ6dlYZJqYE92OM,38131
70
- masster-0.5.8.dist-info/METADATA,sha256=Y_1eR5BbxbKoJmfrJE2W_gShiIa7ba2bw8vAPD6hMD4,45113
71
- masster-0.5.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
72
- masster-0.5.8.dist-info/entry_points.txt,sha256=ZHguQ_vPmdbpqq2uGtmEOLJfgP-DQ1T0c07Lxh30wc8,58
73
- masster-0.5.8.dist-info/licenses/LICENSE,sha256=bx5iLIKjgAdYQ7sISn7DsfHRKkoCUm1154sJJKhgqnU,35184
74
- masster-0.5.8.dist-info/RECORD,,
70
+ masster-0.5.10.dist-info/METADATA,sha256=wPI5dLDPHYjlcafoYNdUWlnUDc-bS-HjBruaVnVDxpA,45191
71
+ masster-0.5.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
72
+ masster-0.5.10.dist-info/entry_points.txt,sha256=ZHguQ_vPmdbpqq2uGtmEOLJfgP-DQ1T0c07Lxh30wc8,58
73
+ masster-0.5.10.dist-info/licenses/LICENSE,sha256=bx5iLIKjgAdYQ7sISn7DsfHRKkoCUm1154sJJKhgqnU,35184
74
+ masster-0.5.10.dist-info/RECORD,,