mlquantify 0.1.9__py3-none-any.whl → 0.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,7 +6,7 @@ import numpy as np
6
6
  # =====================================================
7
7
 
8
8
  def getHist(scores, nbins):
9
- """
9
+ r"""
10
10
  Calculate histogram-like bin probabilities for a given set of scores.
11
11
 
12
12
  This function divides the score range into equal bins and computes the proportion
@@ -42,7 +42,7 @@ def getHist(scores, nbins):
42
42
 
43
43
 
44
44
  def ternary_search(left: float, right: float, func, tol: float = 1e-4) -> float:
45
- """
45
+ r"""
46
46
  Ternary search to find the minimum of a unimodal function in [left, right].
47
47
 
48
48
  Parameters
@@ -53,8 +53,8 @@ def ternary_search(left: float, right: float, func, tol: float = 1e-4) -> float:
53
53
  Right bound.
54
54
  func : callable
55
55
  Function to minimize.
56
- tol : float
57
- Tolerance for termination.
56
+ tol : float, optional
57
+ Tolerance for termination. Default is 1e-4.
58
58
 
59
59
  Returns
60
60
  -------
@@ -73,10 +73,23 @@ def ternary_search(left: float, right: float, func, tol: float = 1e-4) -> float:
73
73
 
74
74
 
75
75
  def topsoe(p: np.ndarray, q: np.ndarray) -> float:
76
- """
76
+ r"""
77
77
  Topsoe distance between two probability distributions.
78
78
 
79
- D_T(p, q) = sum( p*log(2p/(p+q)) + q*log(2q/(p+q)) )
79
+ .. math::
80
+ D_T(p, q) = \sum \left( p \log \frac{2p}{p + q} + q \log \frac{2q}{p + q} \right)
81
+
82
+ Parameters
83
+ ----------
84
+ p : np.ndarray
85
+ First probability distribution.
86
+ q : np.ndarray
87
+ Second probability distribution.
88
+
89
+ Returns
90
+ -------
91
+ float
92
+ The Topsoe distance.
80
93
  """
81
94
  p = np.maximum(p, 1e-20)
82
95
  q = np.maximum(q, 1e-20)
@@ -84,10 +97,23 @@ def topsoe(p: np.ndarray, q: np.ndarray) -> float:
84
97
 
85
98
 
86
99
  def probsymm(p: np.ndarray, q: np.ndarray) -> float:
87
- """
100
+ r"""
88
101
  Probabilistic Symmetric distance.
89
102
 
90
- D_PS(p, q) = sum( (p - q) * log(p / q) )
103
+ .. math::
104
+ D_{PS}(p, q) = \sum (p - q) \log \frac{p}{q}
105
+
106
+ Parameters
107
+ ----------
108
+ p : np.ndarray
109
+ First probability distribution.
110
+ q : np.ndarray
111
+ Second probability distribution.
112
+
113
+ Returns
114
+ -------
115
+ float
116
+ The Probabilistic Symmetric distance.
91
117
  """
92
118
  p = np.maximum(p, 1e-20)
93
119
  q = np.maximum(q, 1e-20)
@@ -95,10 +121,23 @@ def probsymm(p: np.ndarray, q: np.ndarray) -> float:
95
121
 
96
122
 
97
123
  def hellinger(p: np.ndarray, q: np.ndarray) -> float:
98
- """
124
+ r"""
99
125
  Hellinger distance between two probability distributions.
100
126
 
101
- H(p, q) = (1/sqrt(2)) * sqrt( sum( (sqrt(p) - sqrt(q))^2 ) )
127
+ .. math::
128
+ H(p, q) = \frac{1}{\sqrt{2}} \sqrt{\sum \left( \sqrt{p} - \sqrt{q} \right)^2}
129
+
130
+ Parameters
131
+ ----------
132
+ p : np.ndarray
133
+ First probability distribution.
134
+ q : np.ndarray
135
+ Second probability distribution.
136
+
137
+ Returns
138
+ -------
139
+ float
140
+ The Hellinger distance.
102
141
  """
103
142
  p = np.maximum(p, 1e-20)
104
143
  q = np.maximum(q, 1e-20)
@@ -106,7 +145,19 @@ def hellinger(p: np.ndarray, q: np.ndarray) -> float:
106
145
 
107
146
 
108
147
  def sqEuclidean(p: np.ndarray, q: np.ndarray) -> float:
109
- """
148
+ r"""
110
149
  Squared Euclidean distance between two vectors.
150
+
151
+ Parameters
152
+ ----------
153
+ p : np.ndarray
154
+ First vector.
155
+ q : np.ndarray
156
+ Second vector.
157
+
158
+ Returns
159
+ -------
160
+ float
161
+ The squared Euclidean distance.
111
162
  """
112
163
  return np.sum((p - q) ** 2)
@@ -15,7 +15,7 @@ import numpy as np
15
15
 
16
16
 
17
17
  class BaseProtocol(ProtocolMixin, BaseQuantifier):
18
- """Base class for evaluation protocols.
18
+ r"""Base class for evaluation protocols.
19
19
 
20
20
  Parameters
21
21
  ----------
@@ -76,7 +76,7 @@ class BaseProtocol(ProtocolMixin, BaseQuantifier):
76
76
 
77
77
 
78
78
  def split(self, X: np.ndarray, y: np.ndarray):
79
- """
79
+ r"""
80
80
  Split the data into samples for evaluation.
81
81
 
82
82
  Parameters
@@ -117,7 +117,7 @@ class BaseProtocol(ProtocolMixin, BaseQuantifier):
117
117
 
118
118
 
119
119
  class APP(BaseProtocol):
120
- """
120
+ r"""
121
121
  Artificial Prevalence Protocol (APP) for exhaustive prevalent batch evaluation.
122
122
 
123
123
  Generates batches with artificially imposed prevalences across all possible
@@ -185,7 +185,7 @@ class APP(BaseProtocol):
185
185
 
186
186
 
187
187
  class NPP(BaseProtocol):
188
- """
188
+ r"""
189
189
  Natural Prevalence Protocol (NPP) that samples data without imposing prevalence constraints.
190
190
 
191
191
  This protocol simply samples batches randomly with replacement,
@@ -230,7 +230,7 @@ class NPP(BaseProtocol):
230
230
 
231
231
 
232
232
  class UPP(BaseProtocol):
233
- """
233
+ r"""
234
234
  Uniform Prevalence Protocol (UPP) for uniform sampling of artificial prevalences.
235
235
 
236
236
  Similar to APP, but uses uniform prevalence distribution generation
@@ -310,7 +310,7 @@ class UPP(BaseProtocol):
310
310
 
311
311
 
312
312
  class PPP(BaseProtocol):
313
- """
313
+ r"""
314
314
  Personalized Prevalence Protocol (PPP) for targeted prevalence batch generation.
315
315
 
316
316
  Generates batches with user-specified prevalence distributions, allowing for
@@ -18,7 +18,7 @@ from mlquantify.model_selection import (
18
18
  )
19
19
 
20
20
  class GridSearchQ(MetaquantifierMixin, BaseQuantifier):
21
- """
21
+ r"""
22
22
  Grid Search for Quantifiers with evaluation protocols.
23
23
 
24
24
  This class automates the hyperparameter search over a grid of parameter
@@ -13,94 +13,64 @@ from mlquantify.utils._validation import validate_prevalences
13
13
  EPS = 1e-12
14
14
 
15
15
  class BaseKDE(SoftLearnerQMixin, AggregationMixin, BaseQuantifier):
16
- r"""
17
- Base class for KDEy quantification methods.
18
-
19
- KDEy methods model the class-conditional densities of posterior probabilities
20
- using Kernel Density Estimation (KDE) in the probability simplex space.
21
- Given a probabilistic classifier's posterior outputs, each class distribution
22
- is approximated as a smooth density function via KDE. Class prevalences in
23
- the test set are estimated as the mixture weights of these densities that best
24
- explain the test posterior distribution.
25
-
26
- Formally, KDEy approximates the test posterior distribution as:
16
+ r"""Base class for KDEy quantification methods.
27
17
 
28
- \[
29
- p_{test}(x) \approx \sum_{k=1}^K \alpha_k p_k(x),
30
- \]
18
+ KDEy models the class-conditional densities of posterior probabilities using Kernel Density Estimation (KDE)
19
+ on the probability simplex. Given posterior outputs from a probabilistic classifier, each class distribution
20
+ is approximated as a smooth KDE. Test set class prevalences correspond to mixture weights that best explain
21
+ the overall test posterior distribution.
31
22
 
32
- where \( p_k(x) \) is the KDE of the posterior scores of class \( k \) on training data,
33
- and \( \alpha_k \) are the unknown class prevalences to be estimated under:
23
+ Mathematically, the test posterior distribution is approximated as:
34
24
 
35
- \[
36
- \alpha_k \geq 0, \quad \sum_{k=1}^K \alpha_k = 1.
37
- \]
38
-
39
- The quantification task is then to find the vector \( \boldsymbol{\alpha} = (\alpha_1,\dots,\alpha_K) \)
40
- minimizing an objective function defined on the mixture density and the test posteriors,
41
- subject to the simplex constraints on \( \boldsymbol{\alpha} \).
25
+ .. math::
26
+
27
+ p_{\mathrm{test}}(x) \approx \sum_{k=1}^K \alpha_k p_k(x),
28
+
29
+ where \(p_k(x)\) is the KDE of class \(k\) posteriors from training data, and \(\alpha_k\) are the unknown class
30
+ prevalences subject to:
31
+
32
+ .. math::
33
+
34
+ \alpha_k \geq 0, \quad \sum_{k=1}^K \alpha_k = 1.
35
+
36
+ The quantification minimizes an objective \(\mathcal{L}\) over \(\boldsymbol{\alpha} = (\alpha_1, \dots, \alpha_K)\) in the simplex:
37
+
38
+ .. math::
39
+
40
+ \min_{\boldsymbol{\alpha} \in \Delta^{K-1}} \mathcal{L} \left( \sum_{k=1}^K \alpha_k p_k(x), \hat{p}(x) \right),
41
+
42
+ where \(\hat{p}(x)\) is the test posterior distribution (empirical KDE or direct predictions).
43
+
44
+ This problem is typically solved using numerical constrained optimization methods.
42
45
 
43
46
  Attributes
44
47
  ----------
45
48
  learner : estimator
46
- The underlying probabilistic classifier yielding posterior predictions.
49
+ Probabilistic classifier generating posterior predictions.
47
50
  bandwidth : float
48
- Bandwidth (smoothing parameter) for the KDE models.
51
+ KDE bandwidth (smoothing parameter).
49
52
  kernel : str
50
- Kernel type used in KDE (e.g., 'gaussian').
53
+ KDE kernel type (e.g., 'gaussian').
51
54
  _precomputed : bool
52
- Indicates whether KDE models have been fitted on training data.
55
+ Indicates if KDE models have been fitted.
53
56
  best_distance : float or None
54
- Stores the best value of the objective (distance or loss) achieved.
55
-
56
- Methods
57
- -------
58
- fit(X, y, learner_fitted=False)
59
- Fits KDE models for each class using posterior predictions of the learner.
60
- predict(X)
61
- Aggregates learner’s posterior predictions on X to estimate class prevalences.
62
- aggregate(predictions, train_predictions, train_y_values)
63
- Core estimation method that validates inputs, ensures KDE precomputation,
64
- and calls `_solve_prevalences` implemented by subclasses.
65
- _fit_kde_models(train_predictions, train_y_values)
66
- Fits KDE model per class on training data posteriors.
67
- _solve_prevalences(predictions)
68
- Abstract method to estimate prevalence vector \( \boldsymbol{\alpha} \) for given posteriors.
69
- Must be implemented by subclasses.
57
+ Best objective value found during estimation.
70
58
 
71
59
  Examples
72
60
  --------
73
- To implement a new KDEy quantifier, subclass BaseKDE and implement the method
74
- `_solve_prevalences`, which receives posterior predictions and returns a tuple
75
-
76
- (estimated prevalences \(\boldsymbol{\alpha}\), objective value).
61
+ Subclasses should implement `_solve_prevalences` method returning estimated prevalences and objective value:
77
62
 
78
63
  >>> class KDEyExample(BaseKDE):
79
64
  ... def _solve_prevalences(self, predictions):
80
- ... # Example: simple uniform prevalences, replace with actual optimization
81
65
  ... n_classes = len(self._class_kdes)
82
66
  ... alpha = np.ones(n_classes) / n_classes
83
- ... obj_val = 0.0 # Replace with actual objective computation
67
+ ... obj_val = 0.0 # Placeholder, replace with actual objective
84
68
  ... return alpha, obj_val
85
69
 
86
- Mathematical formulation for prevalence estimation typically involves optimizing:
87
-
88
- \[
89
- \min_{\boldsymbol{\alpha} \in \Delta^{K-1}} \mathcal{L} \bigg( \sum_{k=1}^K \alpha_k p_k(x), \hat{p}(x) \bigg),
90
- \]
91
-
92
- where \(\hat{p}(x)\) is the test posterior distribution (empirical KDE or direct predictions),
93
- \(\Delta^{K-1}\) is the probability simplex defined by the constraints on \(\boldsymbol{\alpha}\),
94
- and \(\mathcal{L}\) is an appropriate divergence or loss function, e.g., negative log-likelihood,
95
- Hellinger distance, or Cauchy–Schwarz divergence.
96
-
97
- This optimization is typically solved numerically with constrained methods such as
98
- sequential quadratic programming or projected gradient descent.
99
-
100
70
  References
101
71
  ----------
102
- [1] Moreo, A., et al. (2023). Kernel Density Quantification methods and applications.
103
- In *Learning to Quantify*, Springer.
72
+ .. [1] Moreo, A., et al. (2023). Kernel Density Quantification methods and applications.
73
+ In *Learning to Quantify*, Springer.
104
74
  """
105
75
 
106
76
  _parameter_constraints = {
@@ -10,7 +10,7 @@ from mlquantify.utils._validation import validate_prevalences
10
10
 
11
11
 
12
12
  class PWK(BaseQuantifier):
13
- """
13
+ r"""
14
14
  Probabilistic Weighted k-Nearest Neighbor (PWK) Quantifier.
15
15
 
16
16
  This quantifier leverages the PWKCLF classifier to perform quantification by estimating
@@ -47,15 +47,6 @@ class PWK(BaseQuantifier):
47
47
  learner : PWKCLF
48
48
  Underlying probabilistic weighted k-NN classifier.
49
49
 
50
- Methods
51
- -------
52
- fit(X, y)
53
- Fits the quantifier by training the internal PWKCLF and wrapping it with
54
- Classify & Count quantification.
55
- predict(X)
56
- Predicts class prevalences for input data using the trained model.
57
- classify(X)
58
- Returns label predictions by applying the trained PWKCLF classifier.
59
50
 
60
51
  Examples
61
52
  --------
@@ -5,8 +5,7 @@ from sklearn.neighbors import NearestNeighbors
5
5
 
6
6
 
7
7
  class PWKCLF:
8
- """
9
- Probabilistic Weighted k-Nearest Neighbor Classifier (PWKCLF).
8
+ r"""Probabilistic Weighted k-Nearest Neighbor Classifier (PWKCLF).
10
9
 
11
10
  A weighted k-nearest neighbor classifier that assigns class probabilities to
12
11
  instances based on neighbor counts weighted by class-specific inverse frequency
@@ -29,22 +28,16 @@ class PWKCLF:
29
28
  y_train : ndarray
30
29
  Labels of training samples.
31
30
 
32
- Methods
33
- -------
34
- fit(X, y)
35
- Fits the k-NN structure and computes class weights.
36
- predict(X)
37
- Predicts class labels by weighted voting among neighbors.
38
31
 
39
32
  Notes
40
33
  -----
41
34
  The class weights are defined as:
42
35
 
43
- \[
44
- w_c = \left( \frac{N_c}{\min_{c'} N_{c'}} \right)^{-\frac{1}{\alpha}},
45
- \]
36
+ .. math::
37
+
38
+ w_c = \left( \frac{N_c}{\min_{c'} N_{c'}} \right)^{-\frac{1}{\alpha}},
46
39
 
47
- where \( N_c \) is the count of class \( c \) in the training set.
40
+ where :math:`N_c` is the count of class :math:`c` in the training set.
48
41
 
49
42
  This weighting scheme reduces bias towards majority classes by downweighting them
50
43
  in the voting process.
@@ -16,17 +16,16 @@ from scipy.optimize import minimize
16
16
  # ============================================================
17
17
 
18
18
  def _optimize_on_simplex(objective, n_classes, x0=None):
19
- """
20
- Optimize an objective function over the probability simplex.
19
+ r"""Optimize an objective function over the probability simplex.
21
20
 
22
21
  This function performs constrained optimization to find the mixture weights
23
- \( \alpha \) on the simplex \( \Delta^{n-1} = \{ \alpha \in \mathbb{R}^n : \alpha_i \geq 0, \sum_i \alpha_i = 1 \} \)
22
+ :math:`\alpha` on the simplex :math:`\Delta^{n-1} = \{ \alpha \in \mathbb{R}^n : \alpha_i \geq 0, \sum_i \alpha_i = 1 \}`
24
23
  that minimize the given objective function.
25
24
 
26
25
  Parameters
27
26
  ----------
28
27
  objective : callable
29
- Function from \( \mathbb{R}^n \to \mathbb{R} \) to minimize.
28
+ Function from :math:`\mathbb{R}^n \to \mathbb{R}` to minimize.
30
29
  n_classes : int
31
30
  Dimensionality of the simplex (number of classes).
32
31
  x0 : array-like, optional
@@ -59,7 +58,7 @@ def _optimize_on_simplex(objective, n_classes, x0=None):
59
58
  # ============================================================
60
59
 
61
60
  class KDEyML(BaseKDE):
62
- """KDEy Maximum Likelihood quantifier.
61
+ r"""KDEy Maximum Likelihood quantifier.
63
62
 
64
63
  Models class-conditional densities of posterior probabilities via Kernel Density
65
64
  Estimation (KDE) and estimates class prevalences by maximizing the likelihood of
@@ -80,13 +79,13 @@ class KDEyML(BaseKDE):
80
79
  """
81
80
 
82
81
  def _precompute_training(self, train_predictions, train_y_values):
83
- """
82
+ r"""
84
83
  Fit KDE models on class-specific training posterior predictions.
85
84
  """
86
85
  super()._fit_kde_models(train_predictions, train_y_values)
87
86
 
88
87
  def _solve_prevalences(self, predictions):
89
- """
88
+ r"""
90
89
  Estimate class prevalences by maximizing log-likelihood under KDE mixture.
91
90
 
92
91
  Parameters
@@ -208,8 +207,7 @@ class KDEyHD(BaseKDE):
208
207
  # ============================================================
209
208
 
210
209
  class KDEyCS(BaseKDE):
211
- """
212
- KDEy Cauchy-Schwarz Divergence quantifier.
210
+ r"""KDEy Cauchy-Schwarz Divergence quantifier.
213
211
 
214
212
  Uses a closed-form solution for minimizing the Cauchy-Schwarz (CS) divergence between
215
213
  Gaussian Mixture Models representing class-conditional densities fitted via KDE.
@@ -11,17 +11,16 @@ EPS = 1e-12
11
11
  # ============================================================
12
12
 
13
13
  def gaussian_kernel(X, Y, bandwidth):
14
- """
15
- Compute the Gaussian kernel matrix K(x, y) with specified bandwidth.
14
+ r"""Compute the Gaussian kernel matrix K(x, y) with specified bandwidth.
16
15
 
17
16
  This kernel matrix represents the similarity between each pair of points in X and Y,
18
17
  computed using the Gaussian (RBF) kernel function:
19
18
 
20
- \[
21
- K(x, y) = \frac{1}{(2 \pi)^{D/2} h^D} \exp\left(- \frac{\|x - y\|^2}{2 h^2}\right)
22
- \]
19
+ .. math::
20
+
21
+ K(x, y) = \frac{1}{(2 \pi)^{D/2} h^D} \exp\left(- \frac{\|x - y\|^2}{2 h^2}\right)
23
22
 
24
- where \( h \) is the bandwidth (smoothing parameter), and \( D \) is the dimensionality
23
+ where :math:`h` is the bandwidth (smoothing parameter), and :math:`D` is the dimensionality
25
24
  of the input feature space.
26
25
 
27
26
  Parameters
@@ -31,7 +30,7 @@ def gaussian_kernel(X, Y, bandwidth):
31
30
  Y : array-like of shape (n_samples_Y, n_features) or None
32
31
  Input data points for kernel computation. If None, defaults to X.
33
32
  bandwidth : float
34
- Kernel bandwidth parameter \( h \).
33
+ Kernel bandwidth parameter :math:`h`.
35
34
 
36
35
  Returns
37
36
  -------
@@ -50,14 +49,13 @@ def gaussian_kernel(X, Y, bandwidth):
50
49
 
51
50
 
52
51
  def negative_log_likelihood(mixture_likelihoods):
53
- """
54
- Compute the negative log-likelihood of given mixture likelihoods in a numerically stable way.
52
+ r"""Compute the negative log-likelihood of given mixture likelihoods in a numerically stable way.
53
+
54
+ Given mixture likelihood values :math:`p_i` for samples, the negative log-likelihood is:
55
55
 
56
- Given mixture likelihood values \( p_i \) for samples, the negative log-likelihood is:
56
+ .. math::
57
57
 
58
- \[
59
- - \sum_i \log(p_i)
60
- \]
58
+ - \sum_i \log(p_i)
61
59
 
62
60
  Numerical stability is achieved by clipping likelihoods below a small epsilon.
63
61
 
@@ -76,14 +74,13 @@ def negative_log_likelihood(mixture_likelihoods):
76
74
 
77
75
 
78
76
  def _simplex_constraints(n):
79
- """
80
- Define constraints and bounds for optimization over the probability simplex.
77
+ r"""Define constraints and bounds for optimization over the probability simplex.
78
+
79
+ The simplex is defined as all vectors :math:`\alpha \in \mathbb{R}^n` such that:
81
80
 
82
- The simplex is defined as all vectors \( \alpha \in \mathbb{R}^n \) such that:
81
+ .. math::
83
82
 
84
- \[
85
- \alpha_i \geq 0, \quad \sum_{i=1}^n \alpha_i = 1
86
- \]
83
+ \alpha_i \geq 0, \quad \sum_{i=1}^n \alpha_i = 1
87
84
 
88
85
  Parameters
89
86
  ----------
@@ -103,8 +100,7 @@ def _simplex_constraints(n):
103
100
 
104
101
 
105
102
  def _optimize_on_simplex(objective, n, x0=None):
106
- """
107
- Minimize an objective function over the probability simplex.
103
+ r"""Minimize an objective function over the probability simplex.
108
104
 
109
105
  This function solves for mixture weights \( \boldsymbol{\alpha} \) that minimize the
110
106
  objective function under the constraints \(\alpha_i \geq 0\) and \(\sum_i \alpha_i = 1\).
@@ -3,7 +3,7 @@ import pandas as pd
3
3
  from collections import defaultdict
4
4
 
5
5
 
6
- def get_prev_from_labels(y) -> dict:
6
+ def get_prev_from_labels(y, format="dict") -> dict:
7
7
  """
8
8
  Get the real prevalence of each class in the target array.
9
9
 
@@ -19,6 +19,9 @@ def get_prev_from_labels(y) -> dict:
19
19
  """
20
20
  if isinstance(y, np.ndarray):
21
21
  y = pd.Series(y)
22
+ if format == "array":
23
+ prevalences = y.value_counts(normalize=True).sort_index().values
24
+ return prevalences
22
25
  real_prevs = y.value_counts(normalize=True).to_dict()
23
26
  real_prevs = dict(sorted(real_prevs.items()))
24
27
  return real_prevs
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mlquantify
3
- Version: 0.1.9
3
+ Version: 0.1.10
4
4
  Summary: Quantification Library
5
5
  Home-page: https://github.com/luizfernandolj/QuantifyML/tree/master
6
6
  Maintainer: Luiz Fernando Luth Junior
@@ -0,0 +1,53 @@
1
+ mlquantify/__init__.py,sha256=O03s8PZhkTy7V8lOrmHIsGAwI3RPUlUPfeNTVTnU-Q8,326
2
+ mlquantify/base.py,sha256=o7IaKODocyi4tEmCvGmHKQ8F4ZJsaEh4kymsNcLyHAg,5077
3
+ mlquantify/base_aggregative.py,sha256=uqfhpUmgv5pNLLvqgROCWHfjs3sj_2jfwOTyzUySuGo,7545
4
+ mlquantify/calibration.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
5
+ mlquantify/confidence.py,sha256=QkEWr6s-Su3Nbinia_TRQbBeTM6ymDPe7Bv204XBKKA,10799
6
+ mlquantify/multiclass.py,sha256=Jux0fvL5IBZA3DXLCuqUEE77JYYBGAcW6GaEH9srmu4,11747
7
+ mlquantify/adjust_counting/__init__.py,sha256=AWio99zeaUULQq9vKggkFhnq-tqgXxasQt167NdcNVY,307
8
+ mlquantify/adjust_counting/_adjustment.py,sha256=JYfPj-x0tw6aLt6m3YehsuKXF6FMUIIvjXImbiqXkGI,23130
9
+ mlquantify/adjust_counting/_base.py,sha256=-nxH0seDXmEW9eGoqrH69JaIUCeNjFKBa8pDDH8u0Tg,9342
10
+ mlquantify/adjust_counting/_counting.py,sha256=7Ip7-XHQJcTWcWVDaLzEIM6WYcp8k5axsCIyD3QPWZE,5572
11
+ mlquantify/adjust_counting/_utils.py,sha256=DEPNzvcr0KszCnfUJaRzBilwWzuNVMSdy5eV7aQ_JPE,2907
12
+ mlquantify/likelihood/__init__.py,sha256=3dC5uregNmquUKz0r0-3aPspfjZjKGn3TRBoZPO1uFs,53
13
+ mlquantify/likelihood/_base.py,sha256=seu_Vb58QttcGbFjHKAplMYGZcVbIHqkyTXEK2cax9A,5830
14
+ mlquantify/likelihood/_classes.py,sha256=PZ31cAwO8q5X3O2_oSmQ1FM6bY4EsB8hWEcAgcEmWXQ,14731
15
+ mlquantify/meta/__init__.py,sha256=GzdGw4ky_kmd5VNWiLBULy06IdN_MLCDAuJKbnMOx4s,62
16
+ mlquantify/meta/_classes.py,sha256=3twKSrm4mF_AXZ1FP0V0hoOo-ceJulGKKgSEBvU8Vt0,30631
17
+ mlquantify/metrics/__init__.py,sha256=3bzzjSYTgrZIJsfAgJidQlB-bnjInwVYUvJ34bPhZxY,186
18
+ mlquantify/metrics/_oq.py,sha256=koXDKeHWksl_vHpZuhc2pAps8wvu_MOgEztlSr04MmE,3544
19
+ mlquantify/metrics/_rq.py,sha256=3yiEmGaRAGpzL29Et3tNqkJ3RMsLXwUX3uL9RoIgi40,3034
20
+ mlquantify/metrics/_slq.py,sha256=JZceO2LR3mjbT_0zVcl9xI6jf8pn3tIcpP3vP3Luf9I,6817
21
+ mlquantify/mixture/__init__.py,sha256=_KKhpFuvi3vYwxydm5nOy9MKwmIU4eyZDN9Pe00hqtk,70
22
+ mlquantify/mixture/_base.py,sha256=1-yW64FPQXB_d9hH9KjSlDnmFtW9FY7S2hppXAd1DBg,5645
23
+ mlquantify/mixture/_classes.py,sha256=uYtWh6oTx0M3rTG71gfO6RWt3QVXH6KN5F-J4YKN0TM,16329
24
+ mlquantify/mixture/_utils.py,sha256=CKlC081nrkJ8Pil7lrPZvNZC_xfpXV8SsuQq3M_LHgA,4037
25
+ mlquantify/model_selection/__init__.py,sha256=98I0uf8k6lbWAjazGyGjbOdPOvzU8aMRLqC3I7D3jzk,113
26
+ mlquantify/model_selection/_protocol.py,sha256=2k0M_7YwZf7YLoQ8ElR2xMvLySVgtE_EvWieMXTIzTA,12499
27
+ mlquantify/model_selection/_search.py,sha256=1UoP3tZ-pdfM25C-gOS89qjGKcDgQEeU7GTbwtsLKHU,10695
28
+ mlquantify/model_selection/_split.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
29
+ mlquantify/neighbors/__init__.py,sha256=rIOuSaUhjqEXsUN9HNZ62P53QG0N7lJ3j1pvf8kJzms,93
30
+ mlquantify/neighbors/_base.py,sha256=ZKU2r19mJHUNXT9Y5fDPOib7zUsT5H9cgDEE2MPa0DM,6582
31
+ mlquantify/neighbors/_classes.py,sha256=VxvuULA8O9hL3p0PxeDZkrZmUNbRYen_HsX_BkkzGd0,5234
32
+ mlquantify/neighbors/_classification.py,sha256=8xNqaTQXUGg_dbQd6SqwKWb07BM2QM0uwZeXZ5C_DMs,4136
33
+ mlquantify/neighbors/_kde.py,sha256=g0D8DlebJ5OZETW3SaMWyJUe1AZIytV--yIX3RCu46o,9931
34
+ mlquantify/neighbors/_utils.py,sha256=CozcKtmd6ZDluMT4bvOj4QI7xwORF_vCIJRucPEzJJo,4123
35
+ mlquantify/neural/__init__.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
36
+ mlquantify/utils/__init__.py,sha256=fCozxFABSv5L7lbD16-J370dbc_xHien3w0crYKPLTc,1344
37
+ mlquantify/utils/_artificial.py,sha256=6tqMoAuxUULFGHXtMez56re4DZ7d2Q6tK55LPGeEiO8,713
38
+ mlquantify/utils/_constraints.py,sha256=r1WDJuqsO3OS2Q45IBKJGtB6iUjcAXMW8USaEakyvCI,5600
39
+ mlquantify/utils/_context.py,sha256=25QmzmfSiuF_hwCjY_7db_XfCnj1dVe4mIbDycVTHf8,661
40
+ mlquantify/utils/_decorators.py,sha256=yYtnPBh1sLSN6wTY-7ZVAV0j--qbpJxBsgncm794JPc,1205
41
+ mlquantify/utils/_exceptions.py,sha256=C3BQSv3-7QDLaorKcV-ANxnBcSaxHQSlCc6YSZrPK6c,392
42
+ mlquantify/utils/_get_scores.py,sha256=VlTvgg_t4D9MzcgsH7YvP_wIL5AZ8XmEtGpbFivdVJk,5280
43
+ mlquantify/utils/_load.py,sha256=cMGXIs-8mUB4blAmagyDNNvAaV2hysRgeInQMl5fDHg,303
44
+ mlquantify/utils/_parallel.py,sha256=XotpX9nsj6nW-tNCmZ-ahTcRztgnn9oQKP2cl1rLdYM,196
45
+ mlquantify/utils/_random.py,sha256=7F3nyy7Pa_kN8xP8P1L6MOM4WFu4BirE7bOfGTZ1Spk,1275
46
+ mlquantify/utils/_sampling.py,sha256=QQxE2WKLdiCFUfPF6fKgzyrsOUIWYf74w_w8fbYVc2c,8409
47
+ mlquantify/utils/_tags.py,sha256=Rz78TLpxgVxBKS0mKTlC9Qo_kn6HaEwVKNXh8pxFT7M,1095
48
+ mlquantify/utils/_validation.py,sha256=dE7NYLy6C5UWf8tXIhQeWLTz2-rej_gr8-aAIwgJTPk,16762
49
+ mlquantify/utils/prevalence.py,sha256=FXLCJViQb2yDbyTXeGZt8WsPPnSZINhorQYZTKXOn14,1772
50
+ mlquantify-0.1.10.dist-info/METADATA,sha256=qvy3E7u4daj9ZSZnrza7ZtNHcs46xx63wMWbeq4R3T8,5193
51
+ mlquantify-0.1.10.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
52
+ mlquantify-0.1.10.dist-info/top_level.txt,sha256=tGEkYkbbFElwULvqENjam3u1uXtyC1J9dRmibsq8_n0,11
53
+ mlquantify-0.1.10.dist-info/RECORD,,
@@ -1,53 +0,0 @@
1
- mlquantify/__init__.py,sha256=P48iiVlcAeKeE6wr6yZGMTKwmtCOvQYO4ZUVCKAQMwM,52
2
- mlquantify/base.py,sha256=o7IaKODocyi4tEmCvGmHKQ8F4ZJsaEh4kymsNcLyHAg,5077
3
- mlquantify/base_aggregative.py,sha256=uqfhpUmgv5pNLLvqgROCWHfjs3sj_2jfwOTyzUySuGo,7545
4
- mlquantify/calibration.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
5
- mlquantify/confidence.py,sha256=IUF6sLVmDi4XxX5NvbSFl4-cBbl1mdedtDMFqV-GA48,10918
6
- mlquantify/multiclass.py,sha256=Jux0fvL5IBZA3DXLCuqUEE77JYYBGAcW6GaEH9srmu4,11747
7
- mlquantify/adjust_counting/__init__.py,sha256=8qQtTzRAoRiIux_R8wCXopdi6dOg1ESd8oPWv-LvUC0,191
8
- mlquantify/adjust_counting/_adjustment.py,sha256=BdFwYTwWhwdSxBgu98yTsVyyxgPz_Xm53YEjMxXI8f8,12824
9
- mlquantify/adjust_counting/_base.py,sha256=abF0lo3fetR77JP87MS7Hy204jF7NYdxwyJWPE5hNyE,9344
10
- mlquantify/adjust_counting/_counting.py,sha256=n4pBdyntrrxZTu7dWMnCsgN5kz6npU7CNIgRPQLY-nA,5266
11
- mlquantify/adjust_counting/_utils.py,sha256=wlBrihWKPzzxXmIqowreZ_lN6buD6hFCH98qA3H6s5s,2636
12
- mlquantify/likelihood/__init__.py,sha256=3dC5uregNmquUKz0r0-3aPspfjZjKGn3TRBoZPO1uFs,53
13
- mlquantify/likelihood/_base.py,sha256=J6ze15i-TlMMEVl4KvE2_wdam-fq0ZqWl7pSkas35qs,6075
14
- mlquantify/likelihood/_classes.py,sha256=Xp0hU83mYmfs1AOlmGEYLLsBPZBjPoi2xTx-2H4ztuI,15111
15
- mlquantify/meta/__init__.py,sha256=GzdGw4ky_kmd5VNWiLBULy06IdN_MLCDAuJKbnMOx4s,62
16
- mlquantify/meta/_classes.py,sha256=msivgTXvPw6Duq2Uv_odoayX-spZPtuWtD0FQ_8UFdw,29824
17
- mlquantify/metrics/__init__.py,sha256=3bzzjSYTgrZIJsfAgJidQlB-bnjInwVYUvJ34bPhZxY,186
18
- mlquantify/metrics/_oq.py,sha256=qTLyKpQkdnyzNOmWjplnLnr7nMDNqlBtfnddo5XHJ48,3542
19
- mlquantify/metrics/_rq.py,sha256=v0FUepNF-Wj0f1MdB1-9TXSNDze-J0BXUqaTCo5gnUA,3032
20
- mlquantify/metrics/_slq.py,sha256=nigIpZtPhPYVe5GU3qf1TOxGIkmKOrrhLXAm_tDPaCQ,6808
21
- mlquantify/mixture/__init__.py,sha256=_KKhpFuvi3vYwxydm5nOy9MKwmIU4eyZDN9Pe00hqtk,70
22
- mlquantify/mixture/_base.py,sha256=VDAOY6vFM2OayQxN4APysZ-ZycfrUwUS5Zzjr5v2t04,6076
23
- mlquantify/mixture/_classes.py,sha256=BbBrMIFKWoaP5CjW35agecwl3TE6ZmmBh8kwUyp72Ig,14012
24
- mlquantify/mixture/_utils.py,sha256=3507D13aw6Xl5Ki5bcC1j8yZH4EqawmtRke9m4AouT4,3049
25
- mlquantify/model_selection/__init__.py,sha256=98I0uf8k6lbWAjazGyGjbOdPOvzU8aMRLqC3I7D3jzk,113
26
- mlquantify/model_selection/_protocol.py,sha256=J-96OPJCkwtwk96P962qeztENRwypO__SbDLxM-Myvo,12493
27
- mlquantify/model_selection/_search.py,sha256=YXeSSJXQVrKjwxfKOKJ9amkXZ1mOPJWKh2x2SQNO5rM,10694
28
- mlquantify/model_selection/_split.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
29
- mlquantify/neighbors/__init__.py,sha256=rIOuSaUhjqEXsUN9HNZ62P53QG0N7lJ3j1pvf8kJzms,93
30
- mlquantify/neighbors/_base.py,sha256=tYgq_yjEuqv0dipo2hZW99GxlF09mwpTxlMLV2gYUHo,8258
31
- mlquantify/neighbors/_classes.py,sha256=zWPn9zhY_Xw3AgC1A112DUO3LrbuKOMvkZU2Cx0elYU,5577
32
- mlquantify/neighbors/_classification.py,sha256=wzqz6eZpE4AAyBmxv8cjheluoUB_RDYse_mfLCVXfzI,4310
33
- mlquantify/neighbors/_kde.py,sha256=URCq_o-bSb94_-1jX1Ag-1ZLkP0sBa-D3obAgqN6YYg,9930
34
- mlquantify/neighbors/_utils.py,sha256=rAu2VuW13rBj935z_m-u0MpfQbLQC0Iq_1WPSnAXZCk,4114
35
- mlquantify/neural/__init__.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
36
- mlquantify/utils/__init__.py,sha256=fCozxFABSv5L7lbD16-J370dbc_xHien3w0crYKPLTc,1344
37
- mlquantify/utils/_artificial.py,sha256=6tqMoAuxUULFGHXtMez56re4DZ7d2Q6tK55LPGeEiO8,713
38
- mlquantify/utils/_constraints.py,sha256=r1WDJuqsO3OS2Q45IBKJGtB6iUjcAXMW8USaEakyvCI,5600
39
- mlquantify/utils/_context.py,sha256=25QmzmfSiuF_hwCjY_7db_XfCnj1dVe4mIbDycVTHf8,661
40
- mlquantify/utils/_decorators.py,sha256=yYtnPBh1sLSN6wTY-7ZVAV0j--qbpJxBsgncm794JPc,1205
41
- mlquantify/utils/_exceptions.py,sha256=C3BQSv3-7QDLaorKcV-ANxnBcSaxHQSlCc6YSZrPK6c,392
42
- mlquantify/utils/_get_scores.py,sha256=VlTvgg_t4D9MzcgsH7YvP_wIL5AZ8XmEtGpbFivdVJk,5280
43
- mlquantify/utils/_load.py,sha256=cMGXIs-8mUB4blAmagyDNNvAaV2hysRgeInQMl5fDHg,303
44
- mlquantify/utils/_parallel.py,sha256=XotpX9nsj6nW-tNCmZ-ahTcRztgnn9oQKP2cl1rLdYM,196
45
- mlquantify/utils/_random.py,sha256=7F3nyy7Pa_kN8xP8P1L6MOM4WFu4BirE7bOfGTZ1Spk,1275
46
- mlquantify/utils/_sampling.py,sha256=QQxE2WKLdiCFUfPF6fKgzyrsOUIWYf74w_w8fbYVc2c,8409
47
- mlquantify/utils/_tags.py,sha256=Rz78TLpxgVxBKS0mKTlC9Qo_kn6HaEwVKNXh8pxFT7M,1095
48
- mlquantify/utils/_validation.py,sha256=dE7NYLy6C5UWf8tXIhQeWLTz2-rej_gr8-aAIwgJTPk,16762
49
- mlquantify/utils/prevalence.py,sha256=9chdjfUyac7Omxv50Rb_HmfkQFrHfTjGiQPdbVH7FXc,1631
50
- mlquantify-0.1.9.dist-info/METADATA,sha256=QZUSlEfWxeFGjI8R1QzJx5Y4DeyCiWjFqxFHQzYEIz0,5192
51
- mlquantify-0.1.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
52
- mlquantify-0.1.9.dist-info/top_level.txt,sha256=tGEkYkbbFElwULvqENjam3u1uXtyC1J9dRmibsq8_n0,11
53
- mlquantify-0.1.9.dist-info/RECORD,,