peak-performance 0.7.1__py3-none-any.whl → 0.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -192,13 +192,13 @@ def define_model_normal(time: np.ndarray, intensity: np.ndarray) -> pm.Model:
192
192
  """
193
193
  slope_guess, intercept_guess, noise_width_guess = initial_guesses(time, intensity)
194
194
  with pm.Model() as pmodel:
195
- # add observations to the pmodel as ConstantData
196
- pm.ConstantData("time", time)
197
- pm.ConstantData("intensity", intensity)
198
- # add guesses to the pmodel as ConstantData
199
- pm.ConstantData("intercept_guess", intercept_guess)
200
- pm.ConstantData("slope_guess", slope_guess)
201
- noise_guess = pm.ConstantData("noise_width_guess", noise_width_guess)
195
+ # add observations to the pmodel as Data
196
+ pm.Data("time", time)
197
+ pm.Data("intensity", intensity)
198
+ # add guesses to the pmodel as Data
199
+ pm.Data("intercept_guess", intercept_guess)
200
+ pm.Data("slope_guess", slope_guess)
201
+ noise_guess = pm.Data("noise_width_guess", noise_width_guess)
202
202
 
203
203
  # priors plus error handling in case of mathematically impermissible values
204
204
  baseline_intercept = pm.Normal(
@@ -351,13 +351,13 @@ def define_model_double_normal(time: np.ndarray, intensity: np.ndarray) -> pm.Mo
351
351
  slope_guess, intercept_guess, noise_width_guess = initial_guesses(time, intensity)
352
352
  coords = {"subpeak": [0, 1]}
353
353
  with pm.Model(coords=coords) as pmodel:
354
- # add observations to the pmodel as ConstantData
355
- pm.ConstantData("time", time)
356
- pm.ConstantData("intensity", intensity)
357
- # add guesses to the pmodel as ConstantData
358
- pm.ConstantData("intercept_guess", intercept_guess)
359
- pm.ConstantData("slope_guess", slope_guess)
360
- noise_guess = pm.ConstantData("noise_width_guess", noise_width_guess)
354
+ # add observations to the pmodel as Data
355
+ pm.Data("time", time)
356
+ pm.Data("intensity", intensity)
357
+ # add guesses to the pmodel as Data
358
+ pm.Data("intercept_guess", intercept_guess)
359
+ pm.Data("slope_guess", slope_guess)
360
+ noise_guess = pm.Data("noise_width_guess", noise_width_guess)
361
361
 
362
362
  # priors
363
363
  baseline_intercept = pm.Normal(
@@ -559,13 +559,13 @@ def define_model_skew(time: np.ndarray, intensity: np.ndarray) -> pm.Model:
559
559
  """
560
560
  slope_guess, intercept_guess, noise_width_guess = initial_guesses(time, intensity)
561
561
  with pm.Model() as pmodel:
562
- # add observations to the pmodel as ConstantData
563
- pm.ConstantData("time", time)
564
- pm.ConstantData("intensity", intensity)
565
- # add guesses to the pmodel as ConstantData
566
- pm.ConstantData("intercept_guess", intercept_guess)
567
- pm.ConstantData("slope_guess", slope_guess)
568
- noise_guess = pm.ConstantData("noise_width_guess", noise_width_guess)
562
+ # add observations to the pmodel as Data
563
+ pm.Data("time", time)
564
+ pm.Data("intensity", intensity)
565
+ # add guesses to the pmodel as Data
566
+ pm.Data("intercept_guess", intercept_guess)
567
+ pm.Data("slope_guess", slope_guess)
568
+ noise_guess = pm.Data("noise_width_guess", noise_width_guess)
569
569
 
570
570
  # priors plus error handling in case of mathematically impermissible values
571
571
  baseline_intercept = pm.Normal(
@@ -675,13 +675,13 @@ def define_model_double_skew_normal(time: np.ndarray, intensity: np.ndarray) ->
675
675
  slope_guess, intercept_guess, noise_width_guess = initial_guesses(time, intensity)
676
676
  coords = {"subpeak": [0, 1]}
677
677
  with pm.Model(coords=coords) as pmodel:
678
- # add observations to the pmodel as ConstantData
679
- pm.ConstantData("time", time)
680
- pm.ConstantData("intensity", intensity)
681
- # add guesses to the pmodel as ConstantData
682
- pm.ConstantData("intercept_guess", intercept_guess)
683
- pm.ConstantData("slope_guess", slope_guess)
684
- noise_guess = pm.ConstantData("noise_width_guess", noise_width_guess)
678
+ # add observations to the pmodel as Data
679
+ pm.Data("time", time)
680
+ pm.Data("intensity", intensity)
681
+ # add guesses to the pmodel as Data
682
+ pm.Data("intercept_guess", intercept_guess)
683
+ pm.Data("slope_guess", slope_guess)
684
+ noise_guess = pm.Data("noise_width_guess", noise_width_guess)
685
685
 
686
686
  # priors plus error handling in case of mathematically impermissible values
687
687
  baseline_intercept = pm.Normal(
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
2
- Name: peak-performance
3
- Version: 0.7.1
1
+ Metadata-Version: 2.4
2
+ Name: peak_performance
3
+ Version: 0.7.3
4
4
  Summary: A Python toolbox to fit chromatography peaks with uncertainty.
5
5
  Author-email: Jochen Nießer <j.niesser@fz-juelich.de>, Michael Osthege <m.osthege@fz-juelich.de>
6
6
  License: AGPLv3
@@ -17,17 +17,20 @@ Description-Content-Type: text/markdown
17
17
  License-File: LICENSE.md
18
18
  Requires-Dist: arviz
19
19
  Requires-Dist: matplotlib
20
- Requires-Dist: numpy
20
+ Requires-Dist: numpy<3
21
21
  Requires-Dist: pandas
22
22
  Requires-Dist: pymc>=5.9.1
23
23
  Requires-Dist: pytensor
24
24
  Requires-Dist: scipy
25
25
  Requires-Dist: openpyxl
26
+ Requires-Dist: numpy
27
+ Dynamic: license-file
26
28
 
27
29
  [![PyPI version](https://img.shields.io/pypi/v/peak-performance)](https://pypi.org/project/peak-performance/)
28
30
  [![pipeline](https://github.com/jubiotech/peak-performance/workflows/pipeline/badge.svg)](https://github.com/JuBiotech/peak-performance/actions)
29
31
  [![coverage](https://codecov.io/gh/jubiotech/peak-performance/branch/main/graph/badge.svg)](https://app.codecov.io/gh/JuBiotech/peak-performance)
30
32
  [![documentation](https://readthedocs.org/projects/peak-performance/badge/?version=latest)](https://peak-performance.readthedocs.io/en/latest)
33
+ [![DOI](https://joss.theoj.org/papers/10.21105/joss.07313/status.svg)](https://doi.org/10.21105/joss.07313)
31
34
  [![DOI](https://zenodo.org/badge/713469041.svg)](https://zenodo.org/doi/10.5281/zenodo.10255543)
32
35
 
33
36
  # About PeakPerformance
@@ -35,6 +38,28 @@ PeakPerformance employs Bayesian modeling for chromatographic peak data fitting.
35
38
  This has the innate advantage of providing uncertainty quantification while jointly estimating all peak parameters united in a single peak model.
36
39
  As Markov Chain Monte Carlo (MCMC) methods are utilized to infer the posterior probability distribution, convergence checks and the aformentioned uncertainty quantification are applied as novel quality metrics for a robust peak recognition.
37
40
 
41
+ # Installation
42
+
43
+ It is highly recommended to follow the following steps and install ``PeakPerformance`` in a fresh Python environment:
44
+ 1. Install the package manager [Mamba](https://github.com/conda-forge/miniforge/releases).
45
+ Choose the latest installer at the top of the page, click on "show all assets", and download an installer denominated by "Mambaforge-version number-name of your OS.exe", so e.g. "Mambaforge-23.3.1-1-Windows-x86_64.exe" for a Windows 64 bit operating system. Then, execute the installer to install mamba and activate the option "Add Mambaforge to my PATH environment variable".
46
+
47
+ ⚠ If you have already installed Miniconda, you can install Mamba on top of it but there are compatibility issues with Anaconda.
48
+
49
+ ℹ The newest conda version should also work, just replace `mamba` with `conda` in step 2.
50
+
51
+ 2. Create a new Python environment in the command line using the provided [`environment.yml`](https://github.com/JuBiotech/peak-performance/blob/main/environment.yml) file from the repo.
52
+ Download `environment.yml` first, then navigate to its location on the command line interface and run the following command:
53
+ ```
54
+ mamba env create -f environment.yml
55
+ ```
56
+
57
+ Naturally, it is alternatively possible to just install ``PeakPerformance`` via pip:
58
+
59
+ ```bash
60
+ pip install peak-performance
61
+ ```
62
+
38
63
  # First steps
39
64
  Be sure to check out our thorough [documentation](https://peak-performance.readthedocs.io/en/latest). It contains not only information on how to install PeakPerformance and prepare raw data for its application but also detailed treatises about the implemented model structures, validation with both synthetic and experimental data against a commercially available vendor software, exemplary usage of diagnostic plots and investigation of various effects.
40
65
  Furthermore, you will find example notebooks and data sets showcasing different aspects of PeakPerformance.
@@ -45,4 +70,5 @@ When contributing code, create a local clone of PeakPerformance, create a new br
45
70
 
46
71
  # How to cite
47
72
  Head over to Zenodo to [generate a BibTeX citation](https://doi.org/10.5281/zenodo.10255543) for the latest release.
48
- A publication has just been submitted to a scientific journal. Once published, this section will be updated.
73
+ In addition to the utilized software version, please cite our scientific publication over at the Journal of Open Source Software (JOSS).
74
+ A detailed citation can be found in CITATION.cff and in the sidebar.
@@ -1,13 +1,13 @@
1
1
  peak_performance/__init__.py,sha256=yTq4THYewbWRnrs2Qkv4nCd-7MyvDlu_t0fPeWeKxQc,261
2
- peak_performance/models.py,sha256=m32qCkEW00E3WV5d8xDlcMVHvdmcLH0fRnziPLsgDMk,27755
2
+ peak_performance/models.py,sha256=C_SoYqA0_fGTTh0LSlVmsQWfPfUYddDWudowzOmW22M,27531
3
3
  peak_performance/pipeline.py,sha256=O38AtmtGTA4fFYj78S836TgcFa1nuyf6npsbIM7DGec,64456
4
4
  peak_performance/plots.py,sha256=JToIsNxGF-uh09t8IJvN9cWRTsL3opjDE8DMqGocYJQ,9528
5
5
  peak_performance/test_main.py,sha256=xQiLDjhldxZzY5sp3RyIJUTtXxX46auWY9Qy7nuifxw,97
6
6
  peak_performance/test_models.py,sha256=r6kqAVBtAbycf4IoRaXcSCZp6Lras3afK6o9qcLZbH8,11592
7
7
  peak_performance/test_pipeline.py,sha256=gTZAxcJEVwJ0XW4IewmIWGLmx1n7KaK8egrovKHsCFI,22961
8
8
  peak_performance/test_plots.py,sha256=lGwPWzezAhzEnyu_NMx2lFtyzzb1wxy-jnRMtOaaniY,4100
9
- peak_performance-0.7.1.dist-info/LICENSE.md,sha256=zj-4LZ7oChyw5Uj5sFYOrVI3juK06Cb9lFm0rPcHXYk,32387
10
- peak_performance-0.7.1.dist-info/METADATA,sha256=62R5sa4j-zdBzwVQRBaUD9cfS6pXvfPqUBpEUx0rVmk,3388
11
- peak_performance-0.7.1.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
12
- peak_performance-0.7.1.dist-info/top_level.txt,sha256=-lZSmgn2fZA-xPVmddLwaRt2hQeeWj7TYVefOk7_T58,17
13
- peak_performance-0.7.1.dist-info/RECORD,,
9
+ peak_performance-0.7.3.dist-info/licenses/LICENSE.md,sha256=zj-4LZ7oChyw5Uj5sFYOrVI3juK06Cb9lFm0rPcHXYk,32387
10
+ peak_performance-0.7.3.dist-info/METADATA,sha256=VHpCXKMg2gDAD47fzkoJy6iiIErBoQrUA0djGnAoxyM,4953
11
+ peak_performance-0.7.3.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
12
+ peak_performance-0.7.3.dist-info/top_level.txt,sha256=-lZSmgn2fZA-xPVmddLwaRt2hQeeWj7TYVefOk7_T58,17
13
+ peak_performance-0.7.3.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.44.0)
2
+ Generator: setuptools (79.0.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5