masster 0.5.7__tar.gz → 0.5.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of masster might be problematic. Click here for more details.
- {masster-0.5.7 → masster-0.5.9}/PKG-INFO +1 -1
- {masster-0.5.7 → masster-0.5.9}/pyproject.toml +1 -1
- {masster-0.5.7 → masster-0.5.9}/src/masster/_version.py +1 -1
- {masster-0.5.7 → masster-0.5.9}/src/masster/logger.py +58 -43
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/h5.py +1 -1
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/plot.py +4 -4
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/processing.py +3 -3
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/save.py +5 -5
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/h5.py +1 -1
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/helpers.py +150 -5
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/id.py +4 -4
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/merge.py +565 -162
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/processing.py +2 -2
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/study.py +2 -1
- {masster-0.5.7 → masster-0.5.9}/uv.lock +1 -1
- {masster-0.5.7 → masster-0.5.9}/.github/workflows/publish.yml +0 -0
- {masster-0.5.7 → masster-0.5.9}/.github/workflows/security.yml +0 -0
- {masster-0.5.7 → masster-0.5.9}/.github/workflows/test.yml +0 -0
- {masster-0.5.7 → masster-0.5.9}/.gitignore +0 -0
- {masster-0.5.7 → masster-0.5.9}/.pre-commit-config.yaml +0 -0
- {masster-0.5.7 → masster-0.5.9}/LICENSE +0 -0
- {masster-0.5.7 → masster-0.5.9}/Makefile +0 -0
- {masster-0.5.7 → masster-0.5.9}/README.md +0 -0
- {masster-0.5.7 → masster-0.5.9}/TESTING.md +0 -0
- {masster-0.5.7 → masster-0.5.9}/demo/example_batch_process.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/demo/example_sample_process.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/__init__.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/chromatogram.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/data/dda/20250530_VH_IQX_KW_RP_HSST3_100mm_12min_pos_v4_DDA_OT_C-MiLUT_QC_dil2_01_20250602151849.sample5 +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/data/dda/20250530_VH_IQX_KW_RP_HSST3_100mm_12min_pos_v4_DDA_OT_C-MiLUT_QC_dil3_01_20250602150634.sample5 +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/data/dda/20250530_VH_IQX_KW_RP_HSST3_100mm_12min_pos_v4_MS1_C-MiLUT_C008_v6_r38_01.sample5 +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/data/dda/20250530_VH_IQX_KW_RP_HSST3_100mm_12min_pos_v4_MS1_C-MiLUT_C008_v7_r37_01.sample5 +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/data/dda/20250530_VH_IQX_KW_RP_HSST3_100mm_12min_pos_v4_MS1_C-MiLUT_C017_v5_r99_01.sample5 +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/data/libs/aa.csv +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/data/libs/ccm.csv +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/data/libs/hilic.csv +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/data/libs/urine.csv +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/data/wiff/2025_01_14_VW_7600_LpMx_DBS_CID_2min_TOP15_030msecMS1_005msecReac_CE35_DBS-ON_3.timeseries.data +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/data/wiff/2025_01_14_VW_7600_LpMx_DBS_CID_2min_TOP15_030msecMS1_005msecReac_CE35_DBS-ON_3.wiff +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/data/wiff/2025_01_14_VW_7600_LpMx_DBS_CID_2min_TOP15_030msecMS1_005msecReac_CE35_DBS-ON_3.wiff.scan +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/data/wiff/2025_01_14_VW_7600_LpMx_DBS_CID_2min_TOP15_030msecMS1_005msecReac_CE35_DBS-ON_3.wiff2 +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/lib/__init__.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/lib/lib.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/__init__.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/adducts.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/defaults/__init__.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/defaults/find_adducts_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/defaults/find_features_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/defaults/find_ms2_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/defaults/get_spectrum_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/defaults/sample_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/helpers.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/lib.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/load.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/parameters.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/quant.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/sample.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/sample5_schema.json +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/sample/sciex.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/spectrum.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/__init__.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/analysis.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/defaults/__init__.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/defaults/align_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/defaults/export_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/defaults/fill_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/defaults/find_consensus_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/defaults/find_ms2_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/defaults/identify_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/defaults/integrate_chrom_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/defaults/integrate_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/defaults/merge_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/defaults/study_def.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/export.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/load.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/parameters.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/plot.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/save.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/study/study5_schema.json +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/wizard/README.md +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/wizard/__init__.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/wizard/example.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/src/masster/wizard/wizard.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/tests/conftest.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/tests/test_chromatogram.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/tests/test_defaults.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/tests/test_imports.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/tests/test_integration.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/tests/test_logger.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/tests/test_parameters.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/tests/test_sample.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/tests/test_spectrum.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/tests/test_study.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/tests/test_version.py +0 -0
- {masster-0.5.7 → masster-0.5.9}/tox.ini +0 -0
|
@@ -3,15 +3,15 @@
|
|
|
3
3
|
Simple logger system for masster Study and Sample instances.
|
|
4
4
|
Uses basic Python logging timestamp = dt.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
|
|
5
5
|
|
|
6
|
-
#
|
|
6
|
+
# Universal colors compatible with both dark and light themes
|
|
7
7
|
level_colors = {
|
|
8
|
-
'TRACE': '\x1b[
|
|
9
|
-
'DEBUG': '\x1b[
|
|
10
|
-
'INFO': '\x1b[
|
|
11
|
-
'SUCCESS': '\x1b[
|
|
12
|
-
'WARNING': '\x1b[
|
|
13
|
-
'ERROR': '\x1b[
|
|
14
|
-
'CRITICAL': '\x1b[
|
|
8
|
+
'TRACE': '\x1b[94m', # bright blue (readable on both dark/light)
|
|
9
|
+
'DEBUG': '\x1b[96m', # bright cyan (readable on both dark/light)
|
|
10
|
+
'INFO': '\x1b[90m', # bright black/gray (readable on both dark/light)
|
|
11
|
+
'SUCCESS': '\x1b[92m', # bright green (readable on both dark/light)
|
|
12
|
+
'WARNING': '\x1b[93m', # bright yellow (readable on both dark/light)
|
|
13
|
+
'ERROR': '\x1b[91m', # bright red (readable on both dark/light)
|
|
14
|
+
'CRITICAL': '\x1b[95m', # bright magenta (readable on both dark/light)
|
|
15
15
|
}
|
|
16
16
|
|
|
17
17
|
level_str = record.levelname.ljust(8)complex loguru filtering.
|
|
@@ -102,19 +102,19 @@ class MassterLogger:
|
|
|
102
102
|
dt = datetime.datetime.fromtimestamp(record.created)
|
|
103
103
|
timestamp = dt.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] # Remove last 3 digits for milliseconds
|
|
104
104
|
|
|
105
|
-
#
|
|
105
|
+
# Universal colors compatible with both dark and light themes
|
|
106
106
|
level_colors = {
|
|
107
|
-
"TRACE": "\x1b[
|
|
108
|
-
"DEBUG": "\x1b[
|
|
109
|
-
"INFO": "\x1b[
|
|
110
|
-
"SUCCESS": "\x1b[
|
|
111
|
-
"WARNING": "\x1b[
|
|
112
|
-
"ERROR": "\x1b[
|
|
113
|
-
"CRITICAL": "\x1b[
|
|
107
|
+
"TRACE": "\x1b[94m", # bright blue (readable on both dark/light)
|
|
108
|
+
"DEBUG": "\x1b[96m", # bright cyan (readable on both dark/light)
|
|
109
|
+
"INFO": "\x1b[90m", # bright black/gray (readable on both dark/light)
|
|
110
|
+
"SUCCESS": "\x1b[92m", # bright green (readable on both dark/light)
|
|
111
|
+
"WARNING": "\x1b[93m", # bright yellow (readable on both dark/light)
|
|
112
|
+
"ERROR": "\x1b[91m", # bright red (readable on both dark/light)
|
|
113
|
+
"CRITICAL": "\x1b[95m", # bright magenta (readable on both dark/light)
|
|
114
114
|
}
|
|
115
115
|
|
|
116
116
|
level_str = record.levelname.ljust(8)
|
|
117
|
-
level_color = level_colors.get(record.levelname, "\x1b[
|
|
117
|
+
level_color = level_colors.get(record.levelname, "\x1b[90m") # default to gray instead of white
|
|
118
118
|
label_part = self.label + " | " if self.label else ""
|
|
119
119
|
|
|
120
120
|
# For DEBUG and TRACE levels, add module/location information
|
|
@@ -133,9 +133,9 @@ class MassterLogger:
|
|
|
133
133
|
f"\x1b[90m{module_name}:{func_name}:{line_no}\x1b[0m | " # dim gray for location info
|
|
134
134
|
)
|
|
135
135
|
|
|
136
|
-
#
|
|
136
|
+
# Universal format: timestamp | level | location | label - message
|
|
137
137
|
return (
|
|
138
|
-
f"\x1b[
|
|
138
|
+
f"\x1b[90m{timestamp}\x1b[0m | " # gray timestamp (universal for both themes)
|
|
139
139
|
f"{level_color}{level_str}\x1b[0m | " # colored level
|
|
140
140
|
f"{location_info}" # location info for DEBUG/TRACE
|
|
141
141
|
f"{level_color}{label_part}\x1b[0m" # colored label
|
|
@@ -181,19 +181,19 @@ class MassterLogger:
|
|
|
181
181
|
dt = datetime.datetime.fromtimestamp(record.created)
|
|
182
182
|
timestamp = dt.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
|
183
183
|
|
|
184
|
-
#
|
|
184
|
+
# Universal colors compatible with both dark and light themes
|
|
185
185
|
level_colors = {
|
|
186
|
-
"TRACE": "\x1b[
|
|
187
|
-
"DEBUG": "\x1b[
|
|
188
|
-
"INFO": "\x1b[
|
|
189
|
-
"SUCCESS": "\x1b[
|
|
190
|
-
"WARNING": "\x1b[
|
|
191
|
-
"ERROR": "\x1b[
|
|
192
|
-
"CRITICAL": "\x1b[
|
|
186
|
+
"TRACE": "\x1b[94m", # bright blue (readable on both dark/light)
|
|
187
|
+
"DEBUG": "\x1b[96m", # bright cyan (readable on both dark/light)
|
|
188
|
+
"INFO": "\x1b[90m", # bright black/gray (readable on both dark/light)
|
|
189
|
+
"SUCCESS": "\x1b[92m", # bright green (readable on both dark/light)
|
|
190
|
+
"WARNING": "\x1b[93m", # bright yellow (readable on both dark/light)
|
|
191
|
+
"ERROR": "\x1b[91m", # bright red (readable on both dark/light)
|
|
192
|
+
"CRITICAL": "\x1b[95m", # bright magenta (readable on both dark/light)
|
|
193
193
|
}
|
|
194
194
|
|
|
195
195
|
level_str = record.levelname.ljust(8)
|
|
196
|
-
level_color = level_colors.get(record.levelname, "\x1b[
|
|
196
|
+
level_color = level_colors.get(record.levelname, "\x1b[90m") # default to gray instead of white
|
|
197
197
|
label_part = self.label + " | " if self.label else ""
|
|
198
198
|
|
|
199
199
|
# For DEBUG and TRACE levels, add module/location information
|
|
@@ -212,9 +212,9 @@ class MassterLogger:
|
|
|
212
212
|
f"\x1b[90m{module_name}:{func_name}:{line_no}\x1b[0m | " # dim gray for location info
|
|
213
213
|
)
|
|
214
214
|
|
|
215
|
-
#
|
|
215
|
+
# Universal format: timestamp | level | location | label - message
|
|
216
216
|
return (
|
|
217
|
-
f"\x1b[
|
|
217
|
+
f"\x1b[90m{timestamp}\x1b[0m | " # gray timestamp (universal for both themes)
|
|
218
218
|
f"{level_color}{level_str}\x1b[0m | " # colored level
|
|
219
219
|
f"{location_info}" # location info for DEBUG/TRACE
|
|
220
220
|
f"{level_color}{label_part}\x1b[0m" # colored label
|
|
@@ -245,19 +245,19 @@ class MassterLogger:
|
|
|
245
245
|
dt = datetime.datetime.fromtimestamp(record.created)
|
|
246
246
|
timestamp = dt.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
|
247
247
|
|
|
248
|
-
#
|
|
248
|
+
# Universal colors compatible with both dark and light themes
|
|
249
249
|
level_colors = {
|
|
250
|
-
"TRACE": "\x1b[
|
|
251
|
-
"DEBUG": "\x1b[
|
|
252
|
-
"INFO": "\x1b[
|
|
253
|
-
"SUCCESS": "\x1b[
|
|
254
|
-
"WARNING": "\x1b[
|
|
255
|
-
"ERROR": "\x1b[
|
|
256
|
-
"CRITICAL": "\x1b[
|
|
250
|
+
"TRACE": "\x1b[94m", # bright blue (readable on both dark/light)
|
|
251
|
+
"DEBUG": "\x1b[96m", # bright cyan (readable on both dark/light)
|
|
252
|
+
"INFO": "\x1b[90m", # bright black/gray (readable on both dark/light)
|
|
253
|
+
"SUCCESS": "\x1b[92m", # bright green (readable on both dark/light)
|
|
254
|
+
"WARNING": "\x1b[93m", # bright yellow (readable on both dark/light)
|
|
255
|
+
"ERROR": "\x1b[91m", # bright red (readable on both dark/light)
|
|
256
|
+
"CRITICAL": "\x1b[95m", # bright magenta (readable on both dark/light)
|
|
257
257
|
}
|
|
258
258
|
|
|
259
259
|
level_str = record.levelname.ljust(8)
|
|
260
|
-
level_color = level_colors.get(record.levelname, "\x1b[
|
|
260
|
+
level_color = level_colors.get(record.levelname, "\x1b[90m") # default to gray instead of white
|
|
261
261
|
label_part = self.label + " | " if self.label else ""
|
|
262
262
|
|
|
263
263
|
# For DEBUG and TRACE levels, add module/location information
|
|
@@ -276,9 +276,9 @@ class MassterLogger:
|
|
|
276
276
|
f"\x1b[90m{module_name}:{func_name}:{line_no}\x1b[0m | " # dim gray for location info
|
|
277
277
|
)
|
|
278
278
|
|
|
279
|
-
#
|
|
279
|
+
# Universal format: timestamp | level | location | label - message
|
|
280
280
|
return (
|
|
281
|
-
f"\x1b[
|
|
281
|
+
f"\x1b[90m{timestamp}\x1b[0m | " # gray timestamp (universal for both themes)
|
|
282
282
|
f"{level_color}{level_str}\x1b[0m | " # colored level
|
|
283
283
|
f"{location_info}" # location info for DEBUG/TRACE
|
|
284
284
|
f"{level_color}{label_part}\x1b[0m" # colored label
|
|
@@ -332,8 +332,22 @@ class MassterLogger:
|
|
|
332
332
|
self.logger_instance.info(message, *args, **kwargs)
|
|
333
333
|
|
|
334
334
|
def success(self, message: str, *args, **kwargs):
|
|
335
|
-
"""Log a SUCCESS level message (
|
|
336
|
-
|
|
335
|
+
"""Log a SUCCESS level message (custom level)."""
|
|
336
|
+
# Create a custom log record with SUCCESS level
|
|
337
|
+
import logging
|
|
338
|
+
|
|
339
|
+
# Create a LogRecord manually with SUCCESS level
|
|
340
|
+
record = self.logger_instance.makeRecord(
|
|
341
|
+
self.logger_instance.name,
|
|
342
|
+
logging.INFO, # Use INFO level for Python's filtering
|
|
343
|
+
"", 0, message, args, None, func="success"
|
|
344
|
+
)
|
|
345
|
+
# Override the levelname for display
|
|
346
|
+
record.levelname = "SUCCESS"
|
|
347
|
+
|
|
348
|
+
# Handle the record directly through our handler
|
|
349
|
+
if self.handler:
|
|
350
|
+
self.handler.handle(record)
|
|
337
351
|
|
|
338
352
|
def warning(self, message: str, *args, **kwargs):
|
|
339
353
|
"""Log a WARNING level message."""
|
|
@@ -372,3 +386,4 @@ class MassterLogger:
|
|
|
372
386
|
|
|
373
387
|
def __repr__(self):
|
|
374
388
|
return f"MassterLogger(type={self.instance_type}, id={self.instance_id}, level={self.level})"
|
|
389
|
+
|
|
@@ -295,7 +295,7 @@ def _save_sample5(
|
|
|
295
295
|
|
|
296
296
|
# Store lib and lib_match - removed (no longer saving lib data)
|
|
297
297
|
|
|
298
|
-
self.logger.
|
|
298
|
+
self.logger.success(f"Sample saved to {filename}")
|
|
299
299
|
if save_featurexml:
|
|
300
300
|
# Get or recreate the feature map if needed
|
|
301
301
|
feature_map = self._get_feature_map()
|
|
@@ -234,7 +234,7 @@ def _handle_sample_plot_output(self, plot_obj, filename=None, plot_type="bokeh")
|
|
|
234
234
|
from bokeh.io import save
|
|
235
235
|
output_file(filename)
|
|
236
236
|
save(plot_obj)
|
|
237
|
-
self.logger.
|
|
237
|
+
self.logger.success(f"Plot saved to: {abs_filename}")
|
|
238
238
|
elif filename.endswith(".png"):
|
|
239
239
|
try:
|
|
240
240
|
if plot_type == "bokeh":
|
|
@@ -243,7 +243,7 @@ def _handle_sample_plot_output(self, plot_obj, filename=None, plot_type="bokeh")
|
|
|
243
243
|
elif plot_type in ["panel", "holoviews"]:
|
|
244
244
|
import holoviews as hv
|
|
245
245
|
hv.save(plot_obj, filename, fmt="png")
|
|
246
|
-
self.logger.
|
|
246
|
+
self.logger.success(f"Plot saved to: {abs_filename}")
|
|
247
247
|
except Exception:
|
|
248
248
|
# Fall back to HTML if PNG export not available
|
|
249
249
|
html_filename = filename.replace('.png', '.html')
|
|
@@ -268,7 +268,7 @@ def _handle_sample_plot_output(self, plot_obj, filename=None, plot_type="bokeh")
|
|
|
268
268
|
elif plot_type in ["panel", "holoviews"]:
|
|
269
269
|
import holoviews as hv
|
|
270
270
|
hv.save(plot_obj, filename, fmt="pdf")
|
|
271
|
-
self.logger.
|
|
271
|
+
self.logger.success(f"Plot saved to: {abs_filename}")
|
|
272
272
|
except ImportError:
|
|
273
273
|
# Fall back to HTML if PDF export not available
|
|
274
274
|
html_filename = filename.replace('.pdf', '.html')
|
|
@@ -296,7 +296,7 @@ def _handle_sample_plot_output(self, plot_obj, filename=None, plot_type="bokeh")
|
|
|
296
296
|
from bokeh.io import save
|
|
297
297
|
output_file(filename)
|
|
298
298
|
save(plot_obj)
|
|
299
|
-
self.logger.
|
|
299
|
+
self.logger.success(f"Plot saved to: {abs_filename}")
|
|
300
300
|
else:
|
|
301
301
|
# Show in notebook when no filename provided
|
|
302
302
|
if plot_type == "panel":
|
|
@@ -796,7 +796,7 @@ def find_features(self, **kwargs):
|
|
|
796
796
|
|
|
797
797
|
self.features_df = df
|
|
798
798
|
#self._features_sync()
|
|
799
|
-
self.logger.
|
|
799
|
+
self.logger.success(f"Feature detection completed. Total features: {len(df)}")
|
|
800
800
|
|
|
801
801
|
# store params
|
|
802
802
|
self.update_history(["find_features"], params.to_dict())
|
|
@@ -1263,7 +1263,7 @@ def find_ms2(self, **kwargs):
|
|
|
1263
1263
|
)
|
|
1264
1264
|
|
|
1265
1265
|
# Log completion
|
|
1266
|
-
self.logger.
|
|
1266
|
+
self.logger.success(
|
|
1267
1267
|
f"MS2 linking completed. Total features with MS2 data: {c}",
|
|
1268
1268
|
)
|
|
1269
1269
|
self.features_df = features_df
|
|
@@ -1425,7 +1425,7 @@ def find_iso(self, rt_tolerance: float = 0.1, **kwargs):
|
|
|
1425
1425
|
|
|
1426
1426
|
# Log results
|
|
1427
1427
|
non_null_count = len([spec for spec in ms1_specs if spec is not None])
|
|
1428
|
-
self.logger.
|
|
1428
|
+
self.logger.success(f"Extracted isotopic distributions for {non_null_count}/{len(ms1_specs)} features.")
|
|
1429
1429
|
|
|
1430
1430
|
# Store parameters in history
|
|
1431
1431
|
params_dict = {"rt_tolerance": rt_tolerance}
|
|
@@ -148,10 +148,10 @@ def export_features(self, filename="features.csv"):
|
|
|
148
148
|
)
|
|
149
149
|
if filename.lower().endswith((".xls", ".xlsx")):
|
|
150
150
|
clean_df.to_pandas().to_excel(filename, index=False)
|
|
151
|
-
self.logger.
|
|
151
|
+
self.logger.success(f"Features exported to {filename} (Excel format)")
|
|
152
152
|
else:
|
|
153
153
|
clean_df.write_csv(filename)
|
|
154
|
-
self.logger.
|
|
154
|
+
self.logger.success(f"Features exported to {filename}")
|
|
155
155
|
|
|
156
156
|
|
|
157
157
|
def export_mgf(
|
|
@@ -649,7 +649,7 @@ def export_mgf(
|
|
|
649
649
|
elif result == "empty_ms2":
|
|
650
650
|
empty_ms2_count += 1
|
|
651
651
|
|
|
652
|
-
self.logger.
|
|
652
|
+
self.logger.success(f"Exported {ms1_spec_used_count} MS1 spectra and {c} MS2 spectra to {filename}")
|
|
653
653
|
if empty_ms2_count > 0:
|
|
654
654
|
self.logger.info(f"Skipped {empty_ms2_count} empty MS2 spectra")
|
|
655
655
|
if ms1_fallback_count > 0:
|
|
@@ -824,7 +824,7 @@ def export_dda_stats(self, filename="stats.csv"):
|
|
|
824
824
|
for line in lines:
|
|
825
825
|
f.write(line + "\n")
|
|
826
826
|
|
|
827
|
-
self.logger.
|
|
827
|
+
self.logger.success(f"DDA statistics exported to {filename}")
|
|
828
828
|
|
|
829
829
|
|
|
830
830
|
def export_xlsx(self, filename="features.xlsx"):
|
|
@@ -877,7 +877,7 @@ def export_xlsx(self, filename="features.xlsx"):
|
|
|
877
877
|
pandas_df = clean_df.to_pandas()
|
|
878
878
|
pandas_df.to_excel(filename, index=False)
|
|
879
879
|
|
|
880
|
-
self.logger.
|
|
880
|
+
self.logger.success(f"Features exported to {filename} (Excel format)")
|
|
881
881
|
self.logger.debug(f"Exported {len(clean_df)} features with {len(exportable_columns)} columns")
|
|
882
882
|
|
|
883
883
|
|
|
@@ -1738,7 +1738,7 @@ def _save_study5(self, filename):
|
|
|
1738
1738
|
)
|
|
1739
1739
|
pbar.update(1)
|
|
1740
1740
|
|
|
1741
|
-
self.logger.
|
|
1741
|
+
self.logger.success(f"Study saved successfully to {filename}")
|
|
1742
1742
|
self.logger.debug(f"Save completed for {filename}")
|
|
1743
1743
|
self.logger.debug(f"Save completed for {filename}")
|
|
1744
1744
|
|
|
@@ -1440,7 +1440,7 @@ def compress(self, features=True, ms2=True, chrom=False, ms2_max=5):
|
|
|
1440
1440
|
self.compress_ms2(max_replicates=ms2_max)
|
|
1441
1441
|
if chrom:
|
|
1442
1442
|
self.compress_chrom()
|
|
1443
|
-
self.logger.
|
|
1443
|
+
self.logger.success("Compression completed")
|
|
1444
1444
|
|
|
1445
1445
|
|
|
1446
1446
|
def compress_features(self):
|
|
@@ -1886,7 +1886,7 @@ def restore_chrom(self, samples=None, mz_tol=0.010, rt_tol=10.0):
|
|
|
1886
1886
|
self.logger.error(f"Failed to gap-fill sample {sample_name}: {e}")
|
|
1887
1887
|
continue
|
|
1888
1888
|
|
|
1889
|
-
self.logger.
|
|
1889
|
+
self.logger.success(f"Phase 2 complete: Gap-filled {filled_count} chromatograms")
|
|
1890
1890
|
|
|
1891
1891
|
# Final summary
|
|
1892
1892
|
final_non_null = self.features_df.filter(pl.col("chrom").is_not_null()).height
|
|
@@ -2051,7 +2051,7 @@ def sample_name_replace(self, replace_dict):
|
|
|
2051
2051
|
pl.Series("sample_name", new_names).alias("sample_name"),
|
|
2052
2052
|
)
|
|
2053
2053
|
|
|
2054
|
-
self.logger.
|
|
2054
|
+
self.logger.success(f"Successfully replaced {replaced_count} sample names")
|
|
2055
2055
|
|
|
2056
2056
|
|
|
2057
2057
|
def sample_name_reset(self):
|
|
@@ -2956,6 +2956,17 @@ def consensus_select(
|
|
|
2956
2956
|
rt_delta_mean=None,
|
|
2957
2957
|
id_top_score=None,
|
|
2958
2958
|
identified=None,
|
|
2959
|
+
# New adduct filter parameters
|
|
2960
|
+
adduct_top=None,
|
|
2961
|
+
adduct_charge_top=None,
|
|
2962
|
+
adduct_mass_neutral_top=None,
|
|
2963
|
+
adduct_mass_shift_top=None,
|
|
2964
|
+
adduct_group=None,
|
|
2965
|
+
adduct_of=None,
|
|
2966
|
+
# New identification filter parameters
|
|
2967
|
+
id_top_name=None,
|
|
2968
|
+
id_top_class=None,
|
|
2969
|
+
id_top_adduct=None,
|
|
2959
2970
|
sortby=None,
|
|
2960
2971
|
descending=True,
|
|
2961
2972
|
):
|
|
@@ -2990,6 +3001,17 @@ def consensus_select(
|
|
|
2990
3001
|
- True: select only rows with id_top_name not null
|
|
2991
3002
|
- False: select only rows with id_top_name null
|
|
2992
3003
|
- None: no filtering (default)
|
|
3004
|
+
# New adduct filter parameters
|
|
3005
|
+
adduct_top: adduct type filter (list or single string value, e.g. "[M+H]+", "[M+Na]+")
|
|
3006
|
+
adduct_charge_top: adduct charge filter (tuple for range, single value for exact match)
|
|
3007
|
+
adduct_mass_neutral_top: neutral mass filter (tuple for range, single value for minimum)
|
|
3008
|
+
adduct_mass_shift_top: adduct mass shift filter (tuple for range, single value for minimum)
|
|
3009
|
+
adduct_group: adduct group ID filter (list, single value, or tuple for range)
|
|
3010
|
+
adduct_of: adduct representative UID filter (list, single value, or tuple for range)
|
|
3011
|
+
# New identification filter parameters
|
|
3012
|
+
id_top_name: identification name filter (list or single string value for compound names)
|
|
3013
|
+
id_top_class: identification class filter (list or single string value for compound classes)
|
|
3014
|
+
id_top_adduct: identification adduct filter (list or single string value for identified adducts)
|
|
2993
3015
|
sortby: column name(s) to sort by (string, list of strings, or None for no sorting)
|
|
2994
3016
|
descending: sort direction (True for descending, False for ascending, default is True)
|
|
2995
3017
|
|
|
@@ -3004,7 +3026,10 @@ def consensus_select(
|
|
|
3004
3026
|
filter_params = [mz, rt, inty_mean, consensus_uid, consensus_id, number_samples,
|
|
3005
3027
|
number_ms2, quality, bl, chrom_coherence_mean, chrom_prominence_mean,
|
|
3006
3028
|
chrom_prominence_scaled_mean, chrom_height_scaled_mean,
|
|
3007
|
-
rt_delta_mean, id_top_score, identified
|
|
3029
|
+
rt_delta_mean, id_top_score, identified,
|
|
3030
|
+
# New adduct and identification parameters
|
|
3031
|
+
adduct_top, adduct_charge_top, adduct_mass_neutral_top, adduct_mass_shift_top,
|
|
3032
|
+
adduct_group, adduct_of, id_top_name, id_top_class, id_top_adduct]
|
|
3008
3033
|
|
|
3009
3034
|
if all(param is None for param in filter_params) and sortby is None:
|
|
3010
3035
|
return self.consensus_df.clone()
|
|
@@ -3132,6 +3157,126 @@ def consensus_select(
|
|
|
3132
3157
|
else:
|
|
3133
3158
|
warnings.append("'id_top_name' column not found in consensus_df")
|
|
3134
3159
|
|
|
3160
|
+
# Handle adduct_top filter (string or list)
|
|
3161
|
+
if adduct_top is not None:
|
|
3162
|
+
if "adduct_top" in available_columns:
|
|
3163
|
+
if isinstance(adduct_top, list):
|
|
3164
|
+
filter_conditions.append(pl.col("adduct_top").is_in(adduct_top))
|
|
3165
|
+
else:
|
|
3166
|
+
filter_conditions.append(pl.col("adduct_top") == adduct_top)
|
|
3167
|
+
else:
|
|
3168
|
+
warnings.append("'adduct_top' column not found in consensus_df")
|
|
3169
|
+
|
|
3170
|
+
# Handle adduct_charge_top filter (single value, range tuple, or list)
|
|
3171
|
+
if adduct_charge_top is not None:
|
|
3172
|
+
if "adduct_charge_top" in available_columns:
|
|
3173
|
+
if isinstance(adduct_charge_top, tuple) and len(adduct_charge_top) == 2:
|
|
3174
|
+
filter_conditions.append(
|
|
3175
|
+
(pl.col("adduct_charge_top") >= adduct_charge_top[0]) &
|
|
3176
|
+
(pl.col("adduct_charge_top") <= adduct_charge_top[1])
|
|
3177
|
+
)
|
|
3178
|
+
elif isinstance(adduct_charge_top, list):
|
|
3179
|
+
filter_conditions.append(pl.col("adduct_charge_top").is_in(adduct_charge_top))
|
|
3180
|
+
else:
|
|
3181
|
+
filter_conditions.append(pl.col("adduct_charge_top") == adduct_charge_top)
|
|
3182
|
+
else:
|
|
3183
|
+
warnings.append("'adduct_charge_top' column not found in consensus_df")
|
|
3184
|
+
|
|
3185
|
+
# Handle adduct_mass_neutral_top filter (single value, range tuple, or list)
|
|
3186
|
+
if adduct_mass_neutral_top is not None:
|
|
3187
|
+
if "adduct_mass_neutral_top" in available_columns:
|
|
3188
|
+
if isinstance(adduct_mass_neutral_top, tuple) and len(adduct_mass_neutral_top) == 2:
|
|
3189
|
+
filter_conditions.append(
|
|
3190
|
+
(pl.col("adduct_mass_neutral_top") >= adduct_mass_neutral_top[0]) &
|
|
3191
|
+
(pl.col("adduct_mass_neutral_top") <= adduct_mass_neutral_top[1])
|
|
3192
|
+
)
|
|
3193
|
+
elif isinstance(adduct_mass_neutral_top, list):
|
|
3194
|
+
filter_conditions.append(pl.col("adduct_mass_neutral_top").is_in(adduct_mass_neutral_top))
|
|
3195
|
+
else:
|
|
3196
|
+
filter_conditions.append(pl.col("adduct_mass_neutral_top") == adduct_mass_neutral_top)
|
|
3197
|
+
else:
|
|
3198
|
+
warnings.append("'adduct_mass_neutral_top' column not found in consensus_df")
|
|
3199
|
+
|
|
3200
|
+
# Handle adduct_mass_shift_top filter (single value, range tuple, or list)
|
|
3201
|
+
if adduct_mass_shift_top is not None:
|
|
3202
|
+
if "adduct_mass_shift_top" in available_columns:
|
|
3203
|
+
if isinstance(adduct_mass_shift_top, tuple) and len(adduct_mass_shift_top) == 2:
|
|
3204
|
+
filter_conditions.append(
|
|
3205
|
+
(pl.col("adduct_mass_shift_top") >= adduct_mass_shift_top[0]) &
|
|
3206
|
+
(pl.col("adduct_mass_shift_top") <= adduct_mass_shift_top[1])
|
|
3207
|
+
)
|
|
3208
|
+
elif isinstance(adduct_mass_shift_top, list):
|
|
3209
|
+
filter_conditions.append(pl.col("adduct_mass_shift_top").is_in(adduct_mass_shift_top))
|
|
3210
|
+
else:
|
|
3211
|
+
filter_conditions.append(pl.col("adduct_mass_shift_top") == adduct_mass_shift_top)
|
|
3212
|
+
else:
|
|
3213
|
+
warnings.append("'adduct_mass_shift_top' column not found in consensus_df")
|
|
3214
|
+
|
|
3215
|
+
# Handle adduct_group filter (single value or list)
|
|
3216
|
+
if adduct_group is not None:
|
|
3217
|
+
if "adduct_group" in available_columns:
|
|
3218
|
+
if isinstance(adduct_group, list):
|
|
3219
|
+
filter_conditions.append(pl.col("adduct_group").is_in(adduct_group))
|
|
3220
|
+
else:
|
|
3221
|
+
filter_conditions.append(pl.col("adduct_group") == adduct_group)
|
|
3222
|
+
else:
|
|
3223
|
+
warnings.append("'adduct_group' column not found in consensus_df")
|
|
3224
|
+
|
|
3225
|
+
# Handle adduct_of filter (single value or list)
|
|
3226
|
+
if adduct_of is not None:
|
|
3227
|
+
if "adduct_of" in available_columns:
|
|
3228
|
+
if isinstance(adduct_of, list):
|
|
3229
|
+
filter_conditions.append(pl.col("adduct_of").is_in(adduct_of))
|
|
3230
|
+
else:
|
|
3231
|
+
filter_conditions.append(pl.col("adduct_of") == adduct_of)
|
|
3232
|
+
else:
|
|
3233
|
+
warnings.append("'adduct_of' column not found in consensus_df")
|
|
3234
|
+
|
|
3235
|
+
# Handle id_top_name filter (string or list)
|
|
3236
|
+
if id_top_name is not None:
|
|
3237
|
+
if "id_top_name" in available_columns:
|
|
3238
|
+
if isinstance(id_top_name, list):
|
|
3239
|
+
filter_conditions.append(pl.col("id_top_name").is_in(id_top_name))
|
|
3240
|
+
else:
|
|
3241
|
+
filter_conditions.append(pl.col("id_top_name") == id_top_name)
|
|
3242
|
+
else:
|
|
3243
|
+
warnings.append("'id_top_name' column not found in consensus_df")
|
|
3244
|
+
|
|
3245
|
+
# Handle id_top_class filter (string or list)
|
|
3246
|
+
if id_top_class is not None:
|
|
3247
|
+
if "id_top_class" in available_columns:
|
|
3248
|
+
if isinstance(id_top_class, list):
|
|
3249
|
+
filter_conditions.append(pl.col("id_top_class").is_in(id_top_class))
|
|
3250
|
+
else:
|
|
3251
|
+
filter_conditions.append(pl.col("id_top_class") == id_top_class)
|
|
3252
|
+
else:
|
|
3253
|
+
warnings.append("'id_top_class' column not found in consensus_df")
|
|
3254
|
+
|
|
3255
|
+
# Handle id_top_adduct filter (string or list)
|
|
3256
|
+
if id_top_adduct is not None:
|
|
3257
|
+
if "id_top_adduct" in available_columns:
|
|
3258
|
+
if isinstance(id_top_adduct, list):
|
|
3259
|
+
filter_conditions.append(pl.col("id_top_adduct").is_in(id_top_adduct))
|
|
3260
|
+
else:
|
|
3261
|
+
filter_conditions.append(pl.col("id_top_adduct") == id_top_adduct)
|
|
3262
|
+
else:
|
|
3263
|
+
warnings.append("'id_top_adduct' column not found in consensus_df")
|
|
3264
|
+
|
|
3265
|
+
# Handle id_top_score filter (single value, range tuple, or list)
|
|
3266
|
+
if id_top_score is not None:
|
|
3267
|
+
if "id_top_score" in available_columns:
|
|
3268
|
+
if isinstance(id_top_score, tuple) and len(id_top_score) == 2:
|
|
3269
|
+
filter_conditions.append(
|
|
3270
|
+
(pl.col("id_top_score") >= id_top_score[0]) &
|
|
3271
|
+
(pl.col("id_top_score") <= id_top_score[1])
|
|
3272
|
+
)
|
|
3273
|
+
elif isinstance(id_top_score, list):
|
|
3274
|
+
filter_conditions.append(pl.col("id_top_score").is_in(id_top_score))
|
|
3275
|
+
else:
|
|
3276
|
+
filter_conditions.append(pl.col("id_top_score") == id_top_score)
|
|
3277
|
+
else:
|
|
3278
|
+
warnings.append("'id_top_score' column not found in consensus_df")
|
|
3279
|
+
|
|
3135
3280
|
# Log warnings once
|
|
3136
3281
|
for warning in warnings:
|
|
3137
3282
|
self.logger.warning(warning)
|
|
@@ -4477,7 +4622,7 @@ def decompress(self, features=True, ms2=True, chrom=True, samples=None, **kwargs
|
|
|
4477
4622
|
|
|
4478
4623
|
self.restore_ms2(samples=samples, **ms2_kwargs)
|
|
4479
4624
|
|
|
4480
|
-
self.logger.
|
|
4625
|
+
self.logger.success("Adaptive decompression completed successfully")
|
|
4481
4626
|
|
|
4482
4627
|
except Exception as e:
|
|
4483
4628
|
self.logger.error(f"Decompression failed: {e}")
|
|
@@ -1093,7 +1093,7 @@ def id_reset(study):
|
|
|
1093
1093
|
del study.history["identify"]
|
|
1094
1094
|
|
|
1095
1095
|
if logger:
|
|
1096
|
-
logger.
|
|
1096
|
+
logger.success("Identification data reset completed")
|
|
1097
1097
|
|
|
1098
1098
|
|
|
1099
1099
|
def lib_reset(study):
|
|
@@ -1198,10 +1198,10 @@ def lib_reset(study):
|
|
|
1198
1198
|
del study.history["lib_to_consensus"]
|
|
1199
1199
|
|
|
1200
1200
|
if logger:
|
|
1201
|
-
logger.
|
|
1201
|
+
logger.success("Library and identification data reset completed")
|
|
1202
1202
|
|
|
1203
1203
|
|
|
1204
|
-
def _get_adducts(study, adducts_list: list = None, **kwargs):
|
|
1204
|
+
def _get_adducts(study, adducts_list: list | None = None, **kwargs):
|
|
1205
1205
|
"""
|
|
1206
1206
|
Generate comprehensive adduct specifications for study-level adduct filtering.
|
|
1207
1207
|
|
|
@@ -1978,4 +1978,4 @@ def lib_to_consensus(study, chrom_fhwm: float = 5.0, mz_tol: float = 0.01, rt_to
|
|
|
1978
1978
|
logger.warning(f"find_ms2 failed: {e}")
|
|
1979
1979
|
|
|
1980
1980
|
if logger:
|
|
1981
|
-
logger.
|
|
1981
|
+
logger.success(f"lib_to_consensus completed: {len(consensus_metadata)} features added")
|