peak-performance 0.7.0__py3-none-any.whl → 0.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,19 +1,20 @@
1
- """
2
- PeakPerformance
3
- Copyright (C) 2023 Forschungszentrum Jülich GmbH
1
+ # PeakPerformance
2
+ # Copyright (C) 2023 Forschungszentrum Jülich GmbH
4
3
 
5
- This program is free software: you can redistribute it and/or modify
6
- it under the terms of the GNU Affero General Public License as published
7
- by the Free Software Foundation, either version 3 of the License, or
8
- (at your option) any later version.
4
+ # This program is free software: you can redistribute it and/or modify
5
+ # it under the terms of the GNU Affero General Public License as published
6
+ # by the Free Software Foundation, either version 3 of the License, or
7
+ # (at your option) any later version.
9
8
 
10
- This program is distributed in the hope that it will be useful,
11
- but WITHOUT ANY WARRANTY; without even the implied warranty of
12
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
- GNU Affero General Public License for more details.
9
+ # This program is distributed in the hope that it will be useful,
10
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
11
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
+ # GNU Affero General Public License for more details.
14
13
 
15
- You should have received a copy of the GNU Affero General Public License
16
- along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+ # You should have received a copy of the GNU Affero General Public License
15
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
16
+ """
17
+ This module contains functions for creating various kinds of peak models and to make initial guesses for their parameters.
17
18
  """
18
19
 
19
20
  from enum import Enum
@@ -28,12 +29,19 @@ import scipy.stats as st
28
29
 
29
30
 
30
31
  class ModelType(str, Enum):
31
- """Class containing all implemented model types."""
32
+ """Enum of default model types."""
32
33
 
33
34
  Normal = "normal"
35
+ """Shape of a Gaussian Normal PDF."""
36
+
34
37
  SkewNormal = "skew_normal"
38
+ """Shape of a skewed Normal PDF."""
39
+
35
40
  DoubleNormal = "double_normal"
41
+ """Superposition of two ``Normal`` peaks."""
42
+
36
43
  DoubleSkewNormal = "double_skew_normal"
44
+ """Superposition of two ``SkewedNormal`` peaks."""
37
45
 
38
46
 
39
47
  def guess_noise(intensity):
@@ -184,13 +192,13 @@ def define_model_normal(time: np.ndarray, intensity: np.ndarray) -> pm.Model:
184
192
  """
185
193
  slope_guess, intercept_guess, noise_width_guess = initial_guesses(time, intensity)
186
194
  with pm.Model() as pmodel:
187
- # add observations to the pmodel as ConstantData
188
- pm.ConstantData("time", time)
189
- pm.ConstantData("intensity", intensity)
190
- # add guesses to the pmodel as ConstantData
191
- pm.ConstantData("intercept_guess", intercept_guess)
192
- pm.ConstantData("slope_guess", slope_guess)
193
- noise_guess = pm.ConstantData("noise_width_guess", noise_width_guess)
195
+ # add observations to the pmodel as Data
196
+ pm.Data("time", time)
197
+ pm.Data("intensity", intensity)
198
+ # add guesses to the pmodel as Data
199
+ pm.Data("intercept_guess", intercept_guess)
200
+ pm.Data("slope_guess", slope_guess)
201
+ noise_guess = pm.Data("noise_width_guess", noise_width_guess)
194
202
 
195
203
  # priors plus error handling in case of mathematically impermissible values
196
204
  baseline_intercept = pm.Normal(
@@ -343,13 +351,13 @@ def define_model_double_normal(time: np.ndarray, intensity: np.ndarray) -> pm.Mo
343
351
  slope_guess, intercept_guess, noise_width_guess = initial_guesses(time, intensity)
344
352
  coords = {"subpeak": [0, 1]}
345
353
  with pm.Model(coords=coords) as pmodel:
346
- # add observations to the pmodel as ConstantData
347
- pm.ConstantData("time", time)
348
- pm.ConstantData("intensity", intensity)
349
- # add guesses to the pmodel as ConstantData
350
- pm.ConstantData("intercept_guess", intercept_guess)
351
- pm.ConstantData("slope_guess", slope_guess)
352
- noise_guess = pm.ConstantData("noise_width_guess", noise_width_guess)
354
+ # add observations to the pmodel as Data
355
+ pm.Data("time", time)
356
+ pm.Data("intensity", intensity)
357
+ # add guesses to the pmodel as Data
358
+ pm.Data("intercept_guess", intercept_guess)
359
+ pm.Data("slope_guess", slope_guess)
360
+ noise_guess = pm.Data("noise_width_guess", noise_width_guess)
353
361
 
354
362
  # priors
355
363
  baseline_intercept = pm.Normal(
@@ -392,10 +400,9 @@ def std_skew_calculation(scale, alpha):
392
400
  Skewness parameter of the skew normal distribution.
393
401
 
394
402
  Returns
395
- ----------
403
+ -------
396
404
  std
397
405
  Standard deviation of a skew normal distribution.
398
- -------
399
406
  """
400
407
  return np.sqrt(scale**2 * (1 - (2 * alpha**2) / ((alpha**2 + 1) * np.pi)))
401
408
 
@@ -414,7 +421,7 @@ def mean_skew_calculation(loc, scale, alpha):
414
421
  Skewness parameter of the skew normal distribution.
415
422
 
416
423
  Returns
417
- ----------
424
+ -------
418
425
  mean
419
426
  Arithmetic mean of a skew normal distribution.
420
427
  """
@@ -488,7 +495,7 @@ def height_calculation(area, loc, scale, alpha, mode_skew):
488
495
  Mode of the skew normal distribution.
489
496
 
490
497
  Returns
491
- ----------
498
+ -------
492
499
  mean
493
500
  Arithmetic mean of a skew normal distribution.
494
501
  """
@@ -552,13 +559,13 @@ def define_model_skew(time: np.ndarray, intensity: np.ndarray) -> pm.Model:
552
559
  """
553
560
  slope_guess, intercept_guess, noise_width_guess = initial_guesses(time, intensity)
554
561
  with pm.Model() as pmodel:
555
- # add observations to the pmodel as ConstantData
556
- pm.ConstantData("time", time)
557
- pm.ConstantData("intensity", intensity)
558
- # add guesses to the pmodel as ConstantData
559
- pm.ConstantData("intercept_guess", intercept_guess)
560
- pm.ConstantData("slope_guess", slope_guess)
561
- noise_guess = pm.ConstantData("noise_width_guess", noise_width_guess)
562
+ # add observations to the pmodel as Data
563
+ pm.Data("time", time)
564
+ pm.Data("intensity", intensity)
565
+ # add guesses to the pmodel as Data
566
+ pm.Data("intercept_guess", intercept_guess)
567
+ pm.Data("slope_guess", slope_guess)
568
+ noise_guess = pm.Data("noise_width_guess", noise_width_guess)
562
569
 
563
570
  # priors plus error handling in case of mathematically impermissible values
564
571
  baseline_intercept = pm.Normal(
@@ -668,13 +675,13 @@ def define_model_double_skew_normal(time: np.ndarray, intensity: np.ndarray) ->
668
675
  slope_guess, intercept_guess, noise_width_guess = initial_guesses(time, intensity)
669
676
  coords = {"subpeak": [0, 1]}
670
677
  with pm.Model(coords=coords) as pmodel:
671
- # add observations to the pmodel as ConstantData
672
- pm.ConstantData("time", time)
673
- pm.ConstantData("intensity", intensity)
674
- # add guesses to the pmodel as ConstantData
675
- pm.ConstantData("intercept_guess", intercept_guess)
676
- pm.ConstantData("slope_guess", slope_guess)
677
- noise_guess = pm.ConstantData("noise_width_guess", noise_width_guess)
678
+ # add observations to the pmodel as Data
679
+ pm.Data("time", time)
680
+ pm.Data("intensity", intensity)
681
+ # add guesses to the pmodel as Data
682
+ pm.Data("intercept_guess", intercept_guess)
683
+ pm.Data("slope_guess", slope_guess)
684
+ noise_guess = pm.Data("noise_width_guess", noise_width_guess)
678
685
 
679
686
  # priors plus error handling in case of mathematically impermissible values
680
687
  baseline_intercept = pm.Normal(
@@ -1,19 +1,20 @@
1
- """
2
- PeakPerformance
3
- Copyright (C) 2023 Forschungszentrum Jülich GmbH
1
+ # PeakPerformance
2
+ # Copyright (C) 2023 Forschungszentrum Jülich GmbH
4
3
 
5
- This program is free software: you can redistribute it and/or modify
6
- it under the terms of the GNU Affero General Public License as published
7
- by the Free Software Foundation, either version 3 of the License, or
8
- (at your option) any later version.
4
+ # This program is free software: you can redistribute it and/or modify
5
+ # it under the terms of the GNU Affero General Public License as published
6
+ # by the Free Software Foundation, either version 3 of the License, or
7
+ # (at your option) any later version.
9
8
 
10
- This program is distributed in the hope that it will be useful,
11
- but WITHOUT ANY WARRANTY; without even the implied warranty of
12
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
- GNU Affero General Public License for more details.
9
+ # This program is distributed in the hope that it will be useful,
10
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
11
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
+ # GNU Affero General Public License for more details.
14
13
 
15
- You should have received a copy of the GNU Affero General Public License
16
- along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+ # You should have received a copy of the GNU Affero General Public License
15
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
16
+ """
17
+ Defines steps for a pipeline to process LC-MS-MS data.
17
18
  """
18
19
 
19
20
  import importlib
@@ -1186,7 +1187,7 @@ def pipeline(
1186
1187
  Data format (suffix) of the raw data, default is '.npy'.
1187
1188
 
1188
1189
  Returns
1189
- ----------
1190
+ -------
1190
1191
  path_results
1191
1192
  Path variable pointing to the newly created folder for this batch.
1192
1193
  """
@@ -1223,7 +1224,7 @@ def pipeline_restart(
1223
1224
  Path variable pointing to the directory of the broken PeakPerformance batch
1224
1225
 
1225
1226
  Returns
1226
- ----------
1227
+ -------
1227
1228
  path_results_new
1228
1229
  Path variable pointing to the newly created folder for the restarted batch.
1229
1230
  """
@@ -1322,7 +1323,7 @@ def parse_files_for_model_selection(signals: pandas.DataFrame) -> Dict[str, str]
1322
1323
  DataFrame containing the signals tab of Template.xlsx.
1323
1324
 
1324
1325
  Returns
1325
- ----------
1326
+ -------
1326
1327
  files_for_selection
1327
1328
  Dict with file names as keys and unique identifiers as values.
1328
1329
  """
@@ -1429,7 +1430,7 @@ def model_selection_check(
1429
1430
  to be accepted.
1430
1431
 
1431
1432
  Returns
1432
- ----------
1433
+ -------
1433
1434
  selected_model
1434
1435
  Name of the selected model type.
1435
1436
  """
@@ -1472,7 +1473,7 @@ def selection_loop(
1472
1473
  "waic": widely applicable information criterion)
1473
1474
 
1474
1475
  Returns
1475
- ----------
1476
+ -------
1476
1477
  result_df
1477
1478
  DataFrame containing the ranking and scores of the model selection.
1478
1479
  model_dict
@@ -1564,7 +1565,7 @@ def model_selection(path_raw_data: Union[str, os.PathLike], *, ic: str = "loo"):
1564
1565
  "waic": widely applicable information criterion)
1565
1566
 
1566
1567
  Returns
1567
- ----------
1568
+ -------
1568
1569
  comparison_results
1569
1570
  DataFrame containing all rankings from model selection.
1570
1571
  model_dict
peak_performance/plots.py CHANGED
@@ -1,19 +1,20 @@
1
- """
2
- PeakPerformance
3
- Copyright (C) 2023 Forschungszentrum Jülich GmbH
1
+ # PeakPerformance
2
+ # Copyright (C) 2023 Forschungszentrum Jülich GmbH
4
3
 
5
- This program is free software: you can redistribute it and/or modify
6
- it under the terms of the GNU Affero General Public License as published
7
- by the Free Software Foundation, either version 3 of the License, or
8
- (at your option) any later version.
4
+ # This program is free software: you can redistribute it and/or modify
5
+ # it under the terms of the GNU Affero General Public License as published
6
+ # by the Free Software Foundation, either version 3 of the License, or
7
+ # (at your option) any later version.
9
8
 
10
- This program is distributed in the hope that it will be useful,
11
- but WITHOUT ANY WARRANTY; without even the implied warranty of
12
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
- GNU Affero General Public License for more details.
9
+ # This program is distributed in the hope that it will be useful,
10
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
11
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
+ # GNU Affero General Public License for more details.
14
13
 
15
- You should have received a copy of the GNU Affero General Public License
16
- along with this program. If not, see <https://www.gnu.org/licenses/>.
14
+ # You should have received a copy of the GNU Affero General Public License
15
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
16
+ """
17
+ Functions for preparing diagnostic and QC plots.
17
18
  """
18
19
 
19
20
  import os
@@ -0,0 +1,74 @@
1
+ Metadata-Version: 2.4
2
+ Name: peak_performance
3
+ Version: 0.7.3
4
+ Summary: A Python toolbox to fit chromatography peaks with uncertainty.
5
+ Author-email: Jochen Nießer <j.niesser@fz-juelich.de>, Michael Osthege <m.osthege@fz-juelich.de>
6
+ License: AGPLv3
7
+ Project-URL: homepage, https://jugit.fz-juelich.de/IBG-1/micropro/peak-performance
8
+ Project-URL: documentation, https://jugit.fz-juelich.de/IBG-1/micropro/peak-performance
9
+ Project-URL: repository, https://jugit.fz-juelich.de/IBG-1/micropro/peak-performance
10
+ Keywords: hplc,mass-spectrometry,uncertainty quantification
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Operating System :: OS Independent
13
+ Classifier: License :: OSI Approved :: GNU Affero General Public License v3
14
+ Classifier: Intended Audience :: Science/Research
15
+ Requires-Python: >=3.9
16
+ Description-Content-Type: text/markdown
17
+ License-File: LICENSE.md
18
+ Requires-Dist: arviz
19
+ Requires-Dist: matplotlib
20
+ Requires-Dist: numpy<3
21
+ Requires-Dist: pandas
22
+ Requires-Dist: pymc>=5.9.1
23
+ Requires-Dist: pytensor
24
+ Requires-Dist: scipy
25
+ Requires-Dist: openpyxl
26
+ Requires-Dist: numpy
27
+ Dynamic: license-file
28
+
29
+ [![PyPI version](https://img.shields.io/pypi/v/peak-performance)](https://pypi.org/project/peak-performance/)
30
+ [![pipeline](https://github.com/jubiotech/peak-performance/workflows/pipeline/badge.svg)](https://github.com/JuBiotech/peak-performance/actions)
31
+ [![coverage](https://codecov.io/gh/jubiotech/peak-performance/branch/main/graph/badge.svg)](https://app.codecov.io/gh/JuBiotech/peak-performance)
32
+ [![documentation](https://readthedocs.org/projects/peak-performance/badge/?version=latest)](https://peak-performance.readthedocs.io/en/latest)
33
+ [![DOI](https://joss.theoj.org/papers/10.21105/joss.07313/status.svg)](https://doi.org/10.21105/joss.07313)
34
+ [![DOI](https://zenodo.org/badge/713469041.svg)](https://zenodo.org/doi/10.5281/zenodo.10255543)
35
+
36
+ # About PeakPerformance
37
+ PeakPerformance employs Bayesian modeling for chromatographic peak data fitting.
38
+ This has the innate advantage of providing uncertainty quantification while jointly estimating all peak parameters united in a single peak model.
39
+ As Markov Chain Monte Carlo (MCMC) methods are utilized to infer the posterior probability distribution, convergence checks and the aformentioned uncertainty quantification are applied as novel quality metrics for a robust peak recognition.
40
+
41
+ # Installation
42
+
43
+ It is highly recommended to follow the following steps and install ``PeakPerformance`` in a fresh Python environment:
44
+ 1. Install the package manager [Mamba](https://github.com/conda-forge/miniforge/releases).
45
+ Choose the latest installer at the top of the page, click on "show all assets", and download an installer denominated by "Mambaforge-version number-name of your OS.exe", so e.g. "Mambaforge-23.3.1-1-Windows-x86_64.exe" for a Windows 64 bit operating system. Then, execute the installer to install mamba and activate the option "Add Mambaforge to my PATH environment variable".
46
+
47
+ ⚠ If you have already installed Miniconda, you can install Mamba on top of it but there are compatibility issues with Anaconda.
48
+
49
+ ℹ The newest conda version should also work, just replace `mamba` with `conda` in step 2.
50
+
51
+ 2. Create a new Python environment in the command line using the provided [`environment.yml`](https://github.com/JuBiotech/peak-performance/blob/main/environment.yml) file from the repo.
52
+ Download `environment.yml` first, then navigate to its location on the command line interface and run the following command:
53
+ ```
54
+ mamba env create -f environment.yml
55
+ ```
56
+
57
+ Naturally, it is alternatively possible to just install ``PeakPerformance`` via pip:
58
+
59
+ ```bash
60
+ pip install peak-performance
61
+ ```
62
+
63
+ # First steps
64
+ Be sure to check out our thorough [documentation](https://peak-performance.readthedocs.io/en/latest). It contains not only information on how to install PeakPerformance and prepare raw data for its application but also detailed treatises about the implemented model structures, validation with both synthetic and experimental data against a commercially available vendor software, exemplary usage of diagnostic plots and investigation of various effects.
65
+ Furthermore, you will find example notebooks and data sets showcasing different aspects of PeakPerformance.
66
+
67
+ # How to contribute
68
+ If you encounter bugs while using PeakPerformance, please bring them to our attention by opening an issue. When doing so, describe the problem in detail and add screenshots/code snippets and whatever other helpful material you can provide.
69
+ When contributing code, create a local clone of PeakPerformance, create a new branch, and open a pull request (PR).
70
+
71
+ # How to cite
72
+ Head over to Zenodo to [generate a BibTeX citation](https://doi.org/10.5281/zenodo.10255543) for the latest release.
73
+ In addition to the utilized software version, please cite our scientific publication over at the Journal of Open Source Software (JOSS).
74
+ A detailed citation can be found in CITATION.cff and in the sidebar.
@@ -0,0 +1,13 @@
1
+ peak_performance/__init__.py,sha256=yTq4THYewbWRnrs2Qkv4nCd-7MyvDlu_t0fPeWeKxQc,261
2
+ peak_performance/models.py,sha256=C_SoYqA0_fGTTh0LSlVmsQWfPfUYddDWudowzOmW22M,27531
3
+ peak_performance/pipeline.py,sha256=O38AtmtGTA4fFYj78S836TgcFa1nuyf6npsbIM7DGec,64456
4
+ peak_performance/plots.py,sha256=JToIsNxGF-uh09t8IJvN9cWRTsL3opjDE8DMqGocYJQ,9528
5
+ peak_performance/test_main.py,sha256=xQiLDjhldxZzY5sp3RyIJUTtXxX46auWY9Qy7nuifxw,97
6
+ peak_performance/test_models.py,sha256=r6kqAVBtAbycf4IoRaXcSCZp6Lras3afK6o9qcLZbH8,11592
7
+ peak_performance/test_pipeline.py,sha256=gTZAxcJEVwJ0XW4IewmIWGLmx1n7KaK8egrovKHsCFI,22961
8
+ peak_performance/test_plots.py,sha256=lGwPWzezAhzEnyu_NMx2lFtyzzb1wxy-jnRMtOaaniY,4100
9
+ peak_performance-0.7.3.dist-info/licenses/LICENSE.md,sha256=zj-4LZ7oChyw5Uj5sFYOrVI3juK06Cb9lFm0rPcHXYk,32387
10
+ peak_performance-0.7.3.dist-info/METADATA,sha256=VHpCXKMg2gDAD47fzkoJy6iiIErBoQrUA0djGnAoxyM,4953
11
+ peak_performance-0.7.3.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
12
+ peak_performance-0.7.3.dist-info/top_level.txt,sha256=-lZSmgn2fZA-xPVmddLwaRt2hQeeWj7TYVefOk7_T58,17
13
+ peak_performance-0.7.3.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.43.0)
2
+ Generator: setuptools (79.0.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,68 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: peak-performance
3
- Version: 0.7.0
4
- Summary: A Python toolbox to fit chromatography peaks with uncertainty.
5
- Author-email: Jochen Nießer <j.niesser@fz-juelich.de>, Michael Osthege <m.osthege@fz-juelich.de>
6
- License: AGPLv3
7
- Project-URL: homepage, https://jugit.fz-juelich.de/IBG-1/micropro/peak-performance
8
- Project-URL: documentation, https://jugit.fz-juelich.de/IBG-1/micropro/peak-performance
9
- Project-URL: repository, https://jugit.fz-juelich.de/IBG-1/micropro/peak-performance
10
- Keywords: hplc,mass-spectrometry,uncertainty quantification
11
- Classifier: Programming Language :: Python :: 3
12
- Classifier: Operating System :: OS Independent
13
- Classifier: License :: OSI Approved :: GNU Affero General Public License v3
14
- Classifier: Intended Audience :: Science/Research
15
- Requires-Python: >=3.9
16
- Description-Content-Type: text/markdown
17
- License-File: LICENSE.md
18
- Requires-Dist: arviz
19
- Requires-Dist: matplotlib
20
- Requires-Dist: numpy
21
- Requires-Dist: pandas
22
- Requires-Dist: pymc >=5.9.1
23
- Requires-Dist: pytensor
24
- Requires-Dist: scipy
25
- Requires-Dist: openpyxl
26
- Requires-Dist: numpy <1.26.0
27
-
28
- [![PyPI version](https://img.shields.io/pypi/v/peak-performance)](https://pypi.org/project/peak-performance/)
29
- [![pipeline](https://github.com/jubiotech/peak-performance/workflows/pipeline/badge.svg)](https://github.com/JuBiotech/peak-performance/actions)
30
- [![coverage](https://codecov.io/gh/jubiotech/peak-performance/branch/main/graph/badge.svg)](https://app.codecov.io/gh/JuBiotech/peak-performance)
31
- [![documentation](https://readthedocs.org/projects/peak-performance/badge/?version=latest)](https://peak-performance.readthedocs.io/en/latest)
32
- [![DOI](https://zenodo.org/badge/713469041.svg)](https://zenodo.org/doi/10.5281/zenodo.10255543)
33
-
34
- # How to use PeakPerformance
35
- For installation instructions, see `Installation.md`.
36
- For instructions regarding the use of PeakPerformance, check out the example notebook(s) under `notebooks`, the complementary example data under `example`, and the following introductory explanations.
37
-
38
- ## Preparing raw data
39
- This step is crucial when using PeakPerformance. Raw data has to be supplied as time series meaning for each signal you want to analyze, save a NumPy array consisting of time in the first dimension and intensity in the second dimension (compare example data). Both time and intensity should also be NumPy arrays. If you e.g. have time and intensity of a singal as lists, you can use the following code to convert, format, and save them in the correct manner:
40
-
41
- ```python
42
- import numpy as np
43
- from pathlib import Path
44
-
45
- time_series = np.array([np.array(time), np.array(intensity)])
46
- np.save(Path(r"example_path/time_series.npy"), time_series)
47
- ```
48
-
49
- The naming convention of raw data files is `<acquisition name>_<precursor ion m/z or experiment number>_<product ion m/z start>_<product ion m/z end>.npy`. There should be no underscores within the named sections such as `acquisition name`. Essentially, the raw data names include the acquisition and mass trace, thus yielding a recognizable and unique name for each isotopomer/fragment/metabolite/sample.
50
-
51
- ## Model selection
52
- When it comes to selecting models, PeakPerformance has a function performing an automated selection process by analyzing one acquisiton per mass trace with all implemented models. Subsequently, all models are ranked based on an information criterion (either pareto-smoothed importance sampling leave-one-out cross-validation or widely applicable information criterion). For this process to work as intended, you need to specify acquisitions with representative peaks for each mass trace (see example notebook 1). If e.g. most peaks of an analyte show a skewed shape, then select an acquisition where this is the case. For double peaks, select an acquision where the peaks are as distinct and comparable in height as possible.
53
- Since model selection is a computationally demanding and time consuming process, it is suggested to state the model type as the user (see example notebook 1) if possible.
54
-
55
- ## Troubleshooting
56
- ### A batch run broke and I want to restart it.
57
- If an error occured in the middle of a batch run, then you can use the `pipeline_restart` function in the `pipeline` module to create a new batch which will analyze only those samples, which have not been analyzed previously.
58
-
59
- ### The model parameters don't converge and/or the fit does not describe the raw data well.
60
- Check the separate file `How to adapt PeakPerformance to your data`.
61
-
62
- # How to contribute
63
- If you encounter bugs while using PeakPerformance, please bring them to our attention by opening an issue. When doing so, describe the problem in detail and add screenshots/code snippets and whatever other helpful material you can provide.
64
- When contributing code, create a local clone of PeakPerformance, create a new branch, and open a pull request (PR).
65
-
66
- # How to cite
67
- Head over to Zenodo to [generate a BibTeX citation](https://doi.org/10.5281/zenodo.10255543) for the latest release.
68
- A publication has just been submitted to a scientific journal. Once published, this section will be updated.
@@ -1,13 +0,0 @@
1
- peak_performance/__init__.py,sha256=yTq4THYewbWRnrs2Qkv4nCd-7MyvDlu_t0fPeWeKxQc,261
2
- peak_performance/models.py,sha256=HpJzjf9Eq1ZXUxKlX6GZDj21icL4s_G0naJrCgWntcM,27457
3
- peak_performance/pipeline.py,sha256=8yy2-hTNozBJeLE_dulQJCzCBWv2CoRRdXSHDN2UwD8,64395
4
- peak_performance/plots.py,sha256=5F-s7ZcFgZuN5xGIWRSJ5-_Pl99-vqbcr3F8dYnBFQc,9455
5
- peak_performance/test_main.py,sha256=xQiLDjhldxZzY5sp3RyIJUTtXxX46auWY9Qy7nuifxw,97
6
- peak_performance/test_models.py,sha256=r6kqAVBtAbycf4IoRaXcSCZp6Lras3afK6o9qcLZbH8,11592
7
- peak_performance/test_pipeline.py,sha256=gTZAxcJEVwJ0XW4IewmIWGLmx1n7KaK8egrovKHsCFI,22961
8
- peak_performance/test_plots.py,sha256=lGwPWzezAhzEnyu_NMx2lFtyzzb1wxy-jnRMtOaaniY,4100
9
- peak_performance-0.7.0.dist-info/LICENSE.md,sha256=zj-4LZ7oChyw5Uj5sFYOrVI3juK06Cb9lFm0rPcHXYk,32387
10
- peak_performance-0.7.0.dist-info/METADATA,sha256=uAz1t9qggYqguLgLPJ611PjUOYKM8CWJQLXbH3u8RsY,5076
11
- peak_performance-0.7.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
12
- peak_performance-0.7.0.dist-info/top_level.txt,sha256=-lZSmgn2fZA-xPVmddLwaRt2hQeeWj7TYVefOk7_T58,17
13
- peak_performance-0.7.0.dist-info/RECORD,,