msreport 0.0.26__py3-none-any.whl → 0.0.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
msreport/__init__.py CHANGED
@@ -10,4 +10,4 @@ import msreport.normalize
10
10
  import msreport.plot
11
11
  import msreport.reader
12
12
 
13
- __version__ = "0.0.26"
13
+ __version__ = "0.0.27"
msreport/analyze.py CHANGED
@@ -9,7 +9,7 @@ import pandas as pd
9
9
 
10
10
  import msreport.normalize
11
11
  import msreport.rinterface
12
- from msreport.qtable import Qtable
12
+ from msreport.helper import find_sample_columns
13
13
 
14
14
 
15
15
  class Transformer(Protocol):
@@ -33,8 +33,8 @@ class CategoryTransformer(Protocol):
33
33
  def transform(self, table: pd.DataFrame) -> pd.DataFrame:
34
34
  """Transform values in 'table'."""
35
35
 
36
- def get_category_column(self, table: pd.DataFrame) -> pd.DataFrame:
37
- """Returns the specified category column."""
36
+ def get_category_column(self) -> str:
37
+ """Returns the name of the category column."""
38
38
 
39
39
 
40
40
  def analyze_missingness(qtable: Qtable) -> None:
@@ -75,6 +75,7 @@ def analyze_missingness(qtable: Qtable) -> None:
75
75
  def validate_proteins(
76
76
  qtable: Qtable,
77
77
  min_peptides: int = 0,
78
+ min_spectral_counts: int = 0,
78
79
  remove_contaminants: bool = True,
79
80
  min_events: Optional[int] = None,
80
81
  max_missing: Optional[int] = None,
@@ -84,12 +85,13 @@ def validate_proteins(
84
85
  Adds an additional column "Valid" to the qtable, containing Boolean values.
85
86
 
86
87
  Requires expression columns to be set. Depending on the arguments requires the
87
- columns "Total peptides", "Potential contaminant", and the experiment columns
88
- "Missing experiment_name" and "Events experiment_name".
88
+ columns "Total peptides", "Spectral count Combined", "Potential contaminant", and
89
+ the experiment columns "Missing experiment_name" and "Events experiment_name".
89
90
 
90
91
  Args:
91
92
  qtable: A Qtable instance.
92
93
  min_peptides: Minimum number of unique peptides, default 0.
94
+ min_spectral_counts: Minimum number of combined spectral counts, default 0.
93
95
  remove_contaminants: If true, the "Potential contaminant" column is used to
94
96
  remove invalid entries, default True. If no "Potential contaminant" column
95
97
  is present 'remove_contaminants' is ignored.
@@ -107,6 +109,16 @@ def validate_proteins(
107
109
  [valid_entries, qtable["Total peptides"] >= min_peptides], axis=0
108
110
  )
109
111
 
112
+ if min_spectral_counts > 0:
113
+ if "Spectral count Combined" not in qtable:
114
+ raise KeyError(
115
+ "'Spectral count Combined' column not present in qtable.data"
116
+ )
117
+ valid_entries = np.all(
118
+ [valid_entries, qtable["Spectral count Combined"] >= min_spectral_counts],
119
+ axis=0,
120
+ )
121
+
110
122
  # TODO: not tested from here #
111
123
  if remove_contaminants:
112
124
  if "Potential contaminant" not in qtable:
@@ -138,6 +150,50 @@ def validate_proteins(
138
150
  qtable["Valid"] = valid_entries
139
151
 
140
152
 
153
+ def apply_transformer(
154
+ qtable: msreport.Qtable,
155
+ transformer: Transformer,
156
+ tag: str,
157
+ exclude_invalid: bool,
158
+ remove_invalid: bool,
159
+ new_tag: Optional[str] = None,
160
+ ) -> None:
161
+ """Applies a transformer to the values of a Qtable selected with the tag parameter.
162
+
163
+ Args:
164
+ qtable: A Qtable instance, to which the transformer is applied.
165
+ transformer: The transformer to apply.
166
+ tag: The tag used to identify the columns for applying the transformer.
167
+ exclude_invalid: Exclude invalid values from the transformation.
168
+ remove_invalid: Remove invalid values from the table after the transformation.
169
+ new_tag: Optional, if specified than the tag is replaced with this value in the
170
+ column names and the transformed data is stored to these new columns.
171
+ """
172
+ valid = qtable.data["Valid"]
173
+ samples = qtable.get_samples()
174
+ sample_columns = find_sample_columns(qtable.data, tag, samples)
175
+
176
+ if not sample_columns:
177
+ raise ValueError(f"No sample columns found for tag '{tag}'.")
178
+
179
+ if new_tag is not None:
180
+ sample_columns = [c.replace(tag, new_tag) for c in sample_columns]
181
+ column_mapping = dict(zip(samples, sample_columns))
182
+
183
+ data_table = qtable.make_sample_table(tag, samples_as_columns=True)
184
+
185
+ if exclude_invalid:
186
+ data_table[valid] = transformer.transform(data_table[valid])
187
+ else:
188
+ data_table = transformer.transform(data_table)
189
+
190
+ if remove_invalid:
191
+ data_table[~valid] = np.nan
192
+
193
+ data_table.columns = [column_mapping[s] for s in data_table.columns]
194
+ qtable.data[data_table.columns] = data_table
195
+
196
+
141
197
  def normalize_expression(
142
198
  qtable: Qtable,
143
199
  normalizer: Transformer,
@@ -168,11 +224,9 @@ def normalize_expression(
168
224
  raw_data = table[sample_columns]
169
225
  if not normalizer.is_fitted():
170
226
  if exclude_invalid:
171
- valid_mask = table["Valid"]
227
+ normalizer.fit(raw_data[table["Valid"]])
172
228
  else:
173
- valid_mask = np.ones_like(table["Valid"], dtype=bool)
174
- fit_data = raw_data[valid_mask]
175
- normalizer = normalizer.fit(fit_data)
229
+ normalizer = normalizer.fit(raw_data)
176
230
 
177
231
  transformed_data = normalizer.transform(raw_data)
178
232
  qtable[expression_columns] = transformed_data[sample_columns]
@@ -421,7 +475,7 @@ def two_group_comparison(
421
475
 
422
476
  def calculate_multi_group_limma(
423
477
  qtable: Qtable,
424
- experiment_pairs: list[list[str, str]],
478
+ experiment_pairs: Iterable[Iterable[str]],
425
479
  exclude_invalid: bool = True,
426
480
  batch: bool = False,
427
481
  limma_trend: bool = True,
@@ -454,6 +508,14 @@ def calculate_multi_group_limma(
454
508
  limma_trend: If true, an intensity-dependent trend is fitted to the prior
455
509
  variance during calculation of the moderated t-statistics, refer to
456
510
  limma.eBayes for details; default True.
511
+
512
+ Raises:
513
+ KeyError: If the "Batch" column is not present in the qtable.design when
514
+ 'batch' is set to True.
515
+ ValueError: If all values from qtable.design["Batch"] are identical when 'batch'
516
+ is set to True.
517
+ ValueError: If the same experiment pair has been specified multiple times in
518
+ 'experiment_pairs'.
457
519
  """
458
520
  # TODO: not tested #
459
521
  if batch and "Batch" not in qtable.get_design():
@@ -466,6 +528,11 @@ def calculate_multi_group_limma(
466
528
  "When using calculate_multi_group_limma(batch=True), not all values from"
467
529
  ' qtable.design["Batch"] are allowed to be identical.'
468
530
  )
531
+ if len(list(experiment_pairs)) != len(set(experiment_pairs)):
532
+ raise ValueError(
533
+ "The same experiment pair has been specified multiple times."
534
+ " Each entry in the `experiment_pairs` argument must be unique."
535
+ )
469
536
 
470
537
  design = qtable.get_design()
471
538
  table = qtable.make_expression_table(
@@ -504,7 +571,7 @@ def calculate_multi_group_limma(
504
571
  limma_result.rename(columns=mapping, inplace=True)
505
572
 
506
573
  limma_table = pd.DataFrame(index=table.index)
507
- limma_table = limma_table.join(limma_results.values())
574
+ limma_table = limma_table.join(list(limma_results.values()))
508
575
  limma_table.fillna(np.nan, inplace=True)
509
576
  qtable.add_expression_features(limma_table)
510
577
 
@@ -516,7 +583,7 @@ def calculate_multi_group_limma(
516
583
 
517
584
  def calculate_two_group_limma(
518
585
  qtable: Qtable,
519
- experiment_pair: list[str, str],
586
+ experiment_pair: list[str],
520
587
  exclude_invalid: bool = True,
521
588
  limma_trend: bool = True,
522
589
  ) -> None:
@@ -582,5 +649,3 @@ def calculate_two_group_limma(
582
649
  mapping = {col: f"{col} {comparison_group}" for col in limma_table.columns}
583
650
  limma_table.rename(columns=mapping, inplace=True)
584
651
  qtable.add_expression_features(limma_table)
585
-
586
- return limma_result
msreport/helper/calc.py CHANGED
@@ -19,22 +19,28 @@ def mode(values: Iterable) -> float:
19
19
  Returns:
20
20
  The estimated mode. If no finite values are present, returns nan.
21
21
  """
22
- values = np.array(values)
22
+ values = np.asarray(values)
23
23
  finite_values = values[np.isfinite(values)]
24
24
  if len(finite_values) == 0:
25
- mode = np.nan
25
+ return np.nan
26
26
  elif len(np.unique(finite_values)) == 1:
27
- mode = np.unique(finite_values)[0]
28
- else:
29
- median = np.median(finite_values)
30
- bounds = (median - 1.5, median + 1.5)
31
- kde = scipy.stats.gaussian_kde(finite_values)
32
- optimize_result = scipy.optimize.minimize_scalar(
33
- lambda x: -kde(x)[0], method="Bounded", bounds=bounds
34
- )
35
- mode = optimize_result.x
36
- # Maybe add fallback function if optimize was not successful
37
- return mode
27
+ return np.unique(finite_values)[0]
28
+
29
+ kde = scipy.stats.gaussian_kde(finite_values)
30
+ minimum_function = lambda x: -kde(x)[0]
31
+
32
+ min_slice, max_sclice = np.percentile(finite_values, (2, 98))
33
+ slice_step = 0.2
34
+ brute_optimize_result = scipy.optimize.brute(
35
+ minimum_function, [slice(min_slice, max_sclice + slice_step, slice_step)]
36
+ )
37
+ rough_minimum = brute_optimize_result[0]
38
+
39
+ local_optimize_result = scipy.optimize.minimize(
40
+ minimum_function, x0=rough_minimum, method="BFGS"
41
+ )
42
+ fine_minimum = local_optimize_result.x[0]
43
+ return fine_minimum
38
44
 
39
45
 
40
46
  def calculate_tryptic_ibaq_peptides(protein_sequence: str) -> int:
msreport/impute.py CHANGED
@@ -1,5 +1,5 @@
1
1
  from __future__ import annotations
2
- from typing import Optional
2
+ from typing import Optional, Any
3
3
 
4
4
  import numpy as np
5
5
  import pandas as pd
@@ -18,7 +18,7 @@ class FixedValueImputer:
18
18
  def __init__(
19
19
  self,
20
20
  strategy: str,
21
- fill_value: Optional[float] = None,
21
+ fill_value: float = 0.0,
22
22
  column_wise: bool = True,
23
23
  ):
24
24
  """Initializes the FixedValueImputer.
@@ -51,13 +51,11 @@ class FixedValueImputer:
51
51
  Returns the fitted FixedValueImputer instance.
52
52
  """
53
53
  if self.strategy == "constant":
54
- # if not isinstance(self.fill_value, (float, int)):
55
- # raise Excpetion()
56
54
  fill_values = {column: self.fill_value for column in table.columns}
57
55
  elif self.strategy == "below":
58
56
  if self.column_wise:
59
57
  fill_values = {}
60
- for column in table:
58
+ for column in table.columns:
61
59
  fill_values[column] = _calculate_integer_below_min(table[column])
62
60
  else:
63
61
  int_below_min = _calculate_integer_below_min(table)
@@ -240,7 +238,7 @@ class PerseusImputer:
240
238
  return _table
241
239
 
242
240
 
243
- def confirm_is_fitted(imputer: any, msg: Optional[str] = None) -> None:
241
+ def confirm_is_fitted(imputer: Any, msg: Optional[str] = None) -> None:
244
242
  """Perform is_fitted validation for imputer instances.
245
243
 
246
244
  Checks if the imputer is fitted by verifying the presence of fitted attributes
msreport/normalize.py CHANGED
@@ -1,7 +1,6 @@
1
1
  from __future__ import annotations
2
- import abc
3
- import itertools
4
- from typing import Callable, Iterable, Optional
2
+ from typing import Callable, Iterable, Optional, Protocol
3
+ from typing_extensions import Self
5
4
 
6
5
  import numpy as np
7
6
  import pandas as pd
@@ -12,27 +11,18 @@ import msreport.helper.maxlfq as MAXLFQ
12
11
  from msreport.errors import NotFittedError
13
12
 
14
13
 
15
- class BaseSampleNormalizer(abc.ABC):
16
- """Base class for all sample normalizers."""
14
+ class AbstractTransformer(Protocol):
15
+ def fit(self, table: pd.DataFrame) -> Self:
16
+ """Fits the Transformer and returns a fitted Transformer instance."""
17
17
 
18
- @abc.abstractmethod
19
- def fit(self, table: pd.DataFrame) -> BaseSampleNormalizer:
20
- ...
21
-
22
- @abc.abstractmethod
23
18
  def is_fitted(self) -> bool:
24
- ...
25
-
26
- @abc.abstractmethod
27
- def get_fits(self) -> dict[...]:
28
- ...
19
+ """Returns True if the Transformer has been fitted."""
29
20
 
30
- @abc.abstractmethod
31
21
  def transform(self, table: pd.DataFrame) -> pd.DataFrame:
32
- ...
22
+ """Transform values in table."""
33
23
 
34
24
 
35
- class FixedValueNormalizer(BaseSampleNormalizer):
25
+ class FixedValueNormalizer:
36
26
  """Normalization by a constant normalization factor for each sample.
37
27
 
38
28
  Expects log transformed intensity values.
@@ -58,11 +48,11 @@ class FixedValueNormalizer(BaseSampleNormalizer):
58
48
  f'"comparison" = {comparison} not allowed. '
59
49
  'Must be either "paired" or "reference".'
60
50
  )
61
- self._comparison_mode = comparison
62
- self._fit_function = center_function
63
- self._sample_fits = None
51
+ self._comparison_mode: str = comparison
52
+ self._fit_function: Callable = center_function
53
+ self._sample_fits: dict[str, float] = {}
64
54
 
65
- def fit(self, table: pd.DataFrame) -> BaseSampleNormalizer:
55
+ def fit(self, table: pd.DataFrame) -> Self:
66
56
  """Fits the FixedValueNormalizer.
67
57
 
68
58
  Args:
@@ -80,7 +70,7 @@ class FixedValueNormalizer(BaseSampleNormalizer):
80
70
 
81
71
  def is_fitted(self) -> bool:
82
72
  """Returns True if the FixedValueNormalizer has been fitted."""
83
- return self._sample_fits is not None
73
+ return True if self._sample_fits else False
84
74
 
85
75
  def get_fits(self) -> dict[str, float]:
86
76
  """Returns a dictionary containing the fitted center values per sample.
@@ -159,13 +149,13 @@ class FixedValueNormalizer(BaseSampleNormalizer):
159
149
  self._sample_fits[sample] = sample_fit
160
150
 
161
151
 
162
- class ValueDependentNormalizer(BaseSampleNormalizer):
152
+ class ValueDependentNormalizer:
163
153
  """Normalization with a value dependent fit for each sample.
164
154
 
165
155
  Expects log transformed intensity values.
166
156
  """
167
157
 
168
- def __init__(self, fit_function: Callable):
158
+ def __init__(self, fit_function: Callable[[Iterable, Iterable], np.ndarray]):
169
159
  """Initializes the ValueDependentNormalizer.
170
160
 
171
161
  Args:
@@ -175,10 +165,10 @@ class ValueDependentNormalizer(BaseSampleNormalizer):
175
165
  with two columns. The first column contains the values and the second
176
166
  column the fitted deviations.
177
167
  """
178
- self._sample_fits = None
168
+ self._sample_fits: dict[str, np.ndarray] = {}
179
169
  self._fit_function = fit_function
180
170
 
181
- def fit(self, table: pd.DataFrame) -> BaseSampleNormalizer:
171
+ def fit(self, table: pd.DataFrame) -> Self:
182
172
  """Fits the ValueDependentNormalizer.
183
173
 
184
174
  Args:
@@ -192,9 +182,9 @@ class ValueDependentNormalizer(BaseSampleNormalizer):
192
182
 
193
183
  def is_fitted(self) -> bool:
194
184
  """Returns True if the ValueDependentNormalizer has been fitted."""
195
- return self._sample_fits is not None
185
+ return True if self._sample_fits else False
196
186
 
197
- def get_fits(self) -> dict[str, Iterable[float, float]]:
187
+ def get_fits(self) -> dict[str, np.ndarray]:
198
188
  """Returns a dictionary containing lists of fitting data per sample.
199
189
 
200
190
  Returns:
@@ -324,14 +314,14 @@ class CategoricalNormalizer:
324
314
  column must be present in the reference table and the table to be
325
315
  transformed.
326
316
  """
327
- self._fitted_table = None
328
- self._category_column = category_column
317
+ self._fitted_table: pd.DataFrame = pd.DataFrame()
318
+ self._category_column: str = category_column
329
319
 
330
320
  def is_fitted(self) -> bool:
331
321
  """Returns True if the CategoricalNormalizer has been fitted."""
332
- return self._fitted_table is not None
322
+ return not self._fitted_table.empty
333
323
 
334
- def fit(self, reference_table: pd.DataFrame) -> BaseSampleNormalizer:
324
+ def fit(self, reference_table: pd.DataFrame) -> Self:
335
325
  """Fits the CategoricalNormalizer to a reference table.
336
326
 
337
327
  Args:
@@ -397,7 +387,34 @@ class CategoricalNormalizer:
397
387
  return transformed_table
398
388
 
399
389
 
400
- class ZscoreScaler(BaseSampleNormalizer):
390
+ class PercentageScaler:
391
+ """Transform column values to percentages by dividing them with the column sum."""
392
+
393
+ def fit(self, table: pd.DataFrame) -> Self:
394
+ """Returns the instance itself."""
395
+ return self
396
+
397
+ def is_fitted(self) -> bool:
398
+ """Always returns True because the ZscoreScaler does not need to be fitted."""
399
+ return True
400
+
401
+ def get_fits(self) -> dict:
402
+ """Returns a dictionary containing the parameters 'with_mean' and 'with_std'."""
403
+ return {}
404
+
405
+ def transform(self, table: pd.DataFrame) -> pd.DataFrame:
406
+ """Transforms column values into percentages by devision with the column sum.
407
+
408
+ Args:
409
+ table: The table used to scale row values.
410
+
411
+ Returns:
412
+ A copy of the table containing the scaled values.
413
+ """
414
+ return table.divide(table.sum(axis=0), axis=1)
415
+
416
+
417
+ class ZscoreScaler:
401
418
  """Normalize samples by z-score scaling."""
402
419
 
403
420
  def __init__(self, with_mean: bool = True, with_std: bool = True):
@@ -410,7 +427,7 @@ class ZscoreScaler(BaseSampleNormalizer):
410
427
  self._with_mean = with_mean
411
428
  self._with_std = with_std
412
429
 
413
- def fit(self, table: pd.DataFrame) -> BaseSampleNormalizer:
430
+ def fit(self, table: pd.DataFrame) -> Self:
414
431
  """Returns the instance itself."""
415
432
  return self
416
433
 
@@ -440,7 +457,7 @@ class ZscoreScaler(BaseSampleNormalizer):
440
457
 
441
458
 
442
459
  def confirm_is_fitted(
443
- normalizer: BaseSampleNormalizer, msg: Optional[str] = None
460
+ normalizer: AbstractTransformer, msg: Optional[str] = None
444
461
  ) -> None:
445
462
  """Perform is_fitted validation for normalizer instances.
446
463
 
msreport/plot.py CHANGED
@@ -703,16 +703,18 @@ def volcano_ma(
703
703
  y_col = " ".join([y_variable, comparison_group])
704
704
  x_values = data[x_col]
705
705
  y_values = data[y_col]
706
- ax.grid(axis="both", linestyle="dotted", linewidth=1)
706
+ xy_labels = data[annotation_column]
707
707
 
708
- mask = masks["default"]
709
- ax.scatter(x_values[mask], y_values[mask], **params["default"])
708
+ valid_values = np.isfinite(x_values) & np.isfinite(y_values)
709
+ mask_default = masks["default"] & valid_values
710
+ mask_special = masks["highlight"] & valid_values
710
711
 
711
- mask = masks["highlight"]
712
+ ax.grid(axis="both", linestyle="dotted", linewidth=1)
713
+ ax.scatter(x_values[mask_default], y_values[mask_default], **params["default"])
712
714
  _annotated_scatter(
713
- x_values=data[x_col][mask],
714
- y_values=data[y_col][mask],
715
- labels=data[annotation_column][mask],
715
+ x_values=x_values[mask_special],
716
+ y_values=y_values[mask_special],
717
+ labels=xy_labels[mask_special],
716
718
  ax=ax,
717
719
  scatter_kws=params["highlight"],
718
720
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: msreport
3
- Version: 0.0.26
3
+ Version: 0.0.27
4
4
  Summary: Post processing and analysis of quantitative proteomics data
5
5
  Author-email: "David M. Hollenstein" <hollenstein.david@gmail.com>
6
6
  License: Apache-2.0
@@ -24,6 +24,7 @@ Requires-Dist: scikit-learn>=1.0.0
24
24
  Requires-Dist: scipy>=1.9.1
25
25
  Requires-Dist: seaborn>=0.12.0
26
26
  Requires-Dist: statsmodels>=0.13.2
27
+ Requires-Dist: typing_extensions>=4
27
28
  Dynamic: license-file
28
29
 
29
30
  [![Project Status: WIP – Initial development is in progress, but there has not yet been a stable, usable release suitable for the public.](https://www.repostatus.org/badges/latest/wip.svg)](https://www.repostatus.org/#wip)
@@ -1,13 +1,13 @@
1
- msreport/__init__.py,sha256=plg-xX45MtHFaMW2qKkNZMTku2gH1Eu5iY4mmFdYSa8,343
2
- msreport/analyze.py,sha256=JBLuFLYJjI-8NIIwzq2gEJKJYFmKyuLUwf8NgSzC9_c,25350
1
+ msreport/__init__.py,sha256=RPN5o7ubtWIQgnnnG7kAq9pBQzM40dSeeMlr_TYp1n8,343
2
+ msreport/analyze.py,sha256=ihdHqqmaRrCI8NXJP-jUfmvNAtSRUwVU7ixOkpgW9z4,28048
3
3
  msreport/errors.py,sha256=qJan3lzRX6uwnByg9LfAfGKEKC2tm9SYUBvk3TdH_AM,300
4
4
  msreport/export.py,sha256=pyB8D1VjvfJWO6ka6uMnHsbc6SRlmCyhUB2sfBlhYDQ,20095
5
5
  msreport/fasta.py,sha256=oS7n_wNu-4num5nMUiL5cTpMRU3dBe4Xurz09bGm9ZE,1083
6
- msreport/impute.py,sha256=UJXfni8xjfIrdaq-tIa9MOyklHLQxq-LHRlbHi7G5IM,10441
6
+ msreport/impute.py,sha256=cBFAaMTlzjdG99ABDHtiN6ZcHpddnx5lfqUp-A4Vr3M,10341
7
7
  msreport/isobar.py,sha256=H6AJA81AJQ-gGoc8NEFpNAe6ofwzoK4BGnxQHZ80GlA,6551
8
- msreport/normalize.py,sha256=fQx1QQxusrfddcA_oATz88_ojOqClLQVxNNoJxdOOX4,19334
8
+ msreport/normalize.py,sha256=Dm3loQjNqFWG1D-k5lAhwKzB_2G6ufa4R5cF5gqqYuE,20158
9
9
  msreport/peptidoform.py,sha256=C0IWyvnKivGyizkrtpA_i_lLgw8YZgIxuFBtURrE6rw,11879
10
- msreport/plot.py,sha256=MUWYSzNKRct9JYDQE7uL_zLxlG8vjTS2so9e4EXyvHw,44005
10
+ msreport/plot.py,sha256=gyMCMy5uDuqdGH71wRW_jJdohB5yaWeYbX5jvA0kSWc,44186
11
11
  msreport/qtable.py,sha256=g9hrVyUdQbiNkBnnFNp4JXNiEAEb_yddrefy34gYMgM,22947
12
12
  msreport/reader.py,sha256=PngSiFqX0uFcXCwyU9lwKANGgG8PeoejpcMB3EmlvQc,104972
13
13
  msreport/aggregate/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -15,7 +15,7 @@ msreport/aggregate/condense.py,sha256=sNBEsSqCej-zvzd8QeyaScBxUnP_UcaZWzQx9dvRAD
15
15
  msreport/aggregate/pivot.py,sha256=tCxdhSdXAgEobnsp-n2DX91PwTiNYbx_FYgY6SOef4M,5079
16
16
  msreport/aggregate/summarize.py,sha256=aI0YP9Kg4ZTVjS6c9gzu3xqvLOTsYJ5ZT3FcUJyg1vk,12155
17
17
  msreport/helper/__init__.py,sha256=8B7gdXn5tFeZATBF30TMaWu0ppc7xFa72JLxMYt_qUM,535
18
- msreport/helper/calc.py,sha256=ReMP1jA7bz-a7J1DdlAVATN9zjSXENqbw652WfjSEkY,4088
18
+ msreport/helper/calc.py,sha256=yZ-mp2V9_XrCf75mpOlmyZV3Q3lVINqROMVp33TXW6A,4246
19
19
  msreport/helper/maxlfq.py,sha256=DE9yX4vjMRSmolffrpC9UkCXmSs-rCN15cnlNzma4zw,14938
20
20
  msreport/helper/table.py,sha256=N-veDU3vQ2NeOfKcxUop5SfCv2iW038akkSKxU2yjzg,11464
21
21
  msreport/helper/temp.py,sha256=jNulgDATf9sKXEFWMXAhjflciOZPAqlxg_7QZS7IkW8,3736
@@ -23,8 +23,8 @@ msreport/rinterface/__init__.py,sha256=Z1I-4buERXTPIZmhdaTKZWfbJv_qVcqnUVqIJZx1D
23
23
  msreport/rinterface/limma.py,sha256=ATtsVyGSnqUGkyodQTgdpIFhEb4dJZ6wvSjrRsCrNRY,5421
24
24
  msreport/rinterface/rinstaller.py,sha256=sm6CJD0-XSxdDpUnL9EAI_CAIgo1NucYxPI9gL93zqw,1377
25
25
  msreport/rinterface/rscripts/limma.R,sha256=gr_yjMm_YoG45irDhWOo6gkRQSTwj_7uU_p3NBRHPm8,4331
26
- msreport-0.0.26.dist-info/licenses/LICENSE.txt,sha256=Pd-b5cKP4n2tFDpdx27qJSIq0d1ok0oEcGTlbtL6QMU,11560
27
- msreport-0.0.26.dist-info/METADATA,sha256=0OpBlUdIaAqhdzEK9Mkq12rVJpeJHuYflFI4griTxqE,5339
28
- msreport-0.0.26.dist-info/WHEEL,sha256=ck4Vq1_RXyvS4Jt6SI0Vz6fyVs4GWg7AINwpsaGEgPE,91
29
- msreport-0.0.26.dist-info/top_level.txt,sha256=Drl8mCckJHFIw-Ovh5AnyjKnqvLJltDOBUr1JAcHAlI,9
30
- msreport-0.0.26.dist-info/RECORD,,
26
+ msreport-0.0.27.dist-info/licenses/LICENSE.txt,sha256=Pd-b5cKP4n2tFDpdx27qJSIq0d1ok0oEcGTlbtL6QMU,11560
27
+ msreport-0.0.27.dist-info/METADATA,sha256=zieStsBUYoc7mBg3Xx1SyUuQgd7I99CrgKMimJxIeb0,5376
28
+ msreport-0.0.27.dist-info/WHEEL,sha256=ck4Vq1_RXyvS4Jt6SI0Vz6fyVs4GWg7AINwpsaGEgPE,91
29
+ msreport-0.0.27.dist-info/top_level.txt,sha256=Drl8mCckJHFIw-Ovh5AnyjKnqvLJltDOBUr1JAcHAlI,9
30
+ msreport-0.0.27.dist-info/RECORD,,