mlquantify 0.1.15__tar.gz → 0.1.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mlquantify-0.1.15/mlquantify.egg-info → mlquantify-0.1.17}/PKG-INFO +1 -1
- mlquantify-0.1.17/VERSION.txt +1 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/adjust_counting/_adjustment.py +8 -7
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/adjust_counting/_counting.py +3 -2
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/_validation.py +32 -6
- {mlquantify-0.1.15 → mlquantify-0.1.17/mlquantify.egg-info}/PKG-INFO +1 -1
- mlquantify-0.1.15/VERSION.txt +0 -1
- {mlquantify-0.1.15 → mlquantify-0.1.17}/MANIFEST.in +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/README.md +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/__init__.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/adjust_counting/__init__.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/adjust_counting/_base.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/adjust_counting/_utils.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/base.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/base_aggregative.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/calibration.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/confidence.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/likelihood/__init__.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/likelihood/_base.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/likelihood/_classes.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/meta/__init__.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/meta/_classes.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/metrics/__init__.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/metrics/_oq.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/metrics/_rq.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/metrics/_slq.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/mixture/__init__.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/mixture/_base.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/mixture/_classes.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/mixture/_utils.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/model_selection/__init__.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/model_selection/_protocol.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/model_selection/_search.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/model_selection/_split.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/multiclass.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/neighbors/__init__.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/neighbors/_base.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/neighbors/_classes.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/neighbors/_classification.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/neighbors/_kde.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/neighbors/_utils.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/neural/__init__.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/__init__.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/_artificial.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/_constraints.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/_context.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/_decorators.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/_exceptions.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/_get_scores.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/_load.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/_parallel.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/_random.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/_sampling.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/_tags.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify/utils/prevalence.py +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify.egg-info/SOURCES.txt +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify.egg-info/dependency_links.txt +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify.egg-info/requires.txt +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/mlquantify.egg-info/top_level.txt +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/setup.cfg +0 -0
- {mlquantify-0.1.15 → mlquantify-0.1.17}/setup.py +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
0.1.17
|
|
@@ -105,7 +105,8 @@ class ThresholdAdjustment(SoftLearnerQMixin, BaseAdjustCount):
|
|
|
105
105
|
thresholds, tprs, fprs = evaluate_thresholds(train_y_values, positive_scores)
|
|
106
106
|
threshold, tpr, fpr = self.get_best_threshold(thresholds, tprs, fprs)
|
|
107
107
|
|
|
108
|
-
cc_predictions = CC(threshold=threshold).aggregate(predictions, train_y_values)
|
|
108
|
+
cc_predictions = CC(threshold=threshold).aggregate(predictions, train_y_values)
|
|
109
|
+
cc_predictions = list(cc_predictions.values())[1]
|
|
109
110
|
|
|
110
111
|
if tpr - fpr == 0:
|
|
111
112
|
prevalence = cc_predictions
|
|
@@ -204,13 +205,13 @@ class MatrixAdjustment(BaseAdjustCount):
|
|
|
204
205
|
self.CM = np.zeros((n_class, n_class))
|
|
205
206
|
|
|
206
207
|
if self.solver == 'optim':
|
|
207
|
-
priors = np.array(list(CC().aggregate(train_y_pred).values()))
|
|
208
|
+
priors = np.array(list(CC().aggregate(train_y_pred, train_y_values).values()))
|
|
208
209
|
self.CM = self._compute_confusion_matrix(train_y_pred, train_y_values, priors)
|
|
209
|
-
prevs_estim = self._get_estimations(predictions > priors)
|
|
210
|
+
prevs_estim = self._get_estimations(predictions > priors, train_y_values)
|
|
210
211
|
prevalence = self._solve_optimization(prevs_estim, priors)
|
|
211
212
|
else:
|
|
212
213
|
self.CM = self._compute_confusion_matrix(train_y_pred, train_y_values)
|
|
213
|
-
prevs_estim = self._get_estimations(predictions)
|
|
214
|
+
prevs_estim = self._get_estimations(predictions, train_y_values)
|
|
214
215
|
prevalence = self._solve_linear(prevs_estim)
|
|
215
216
|
|
|
216
217
|
return prevalence
|
|
@@ -261,11 +262,11 @@ class MatrixAdjustment(BaseAdjustCount):
|
|
|
261
262
|
result = minimize(objective, init, constraints=constraints, bounds=bounds)
|
|
262
263
|
return result.x if result.success else priors
|
|
263
264
|
|
|
264
|
-
def _get_estimations(self, predictions):
|
|
265
|
+
def _get_estimations(self, predictions, train_y_values):
|
|
265
266
|
"""Return prevalence estimates using CC (crisp) or PCC (probabilistic)."""
|
|
266
267
|
if uses_soft_predictions(self):
|
|
267
268
|
return np.array(list(PCC().aggregate(predictions).values()))
|
|
268
|
-
return np.array(list(CC().aggregate(predictions).values()))
|
|
269
|
+
return np.array(list(CC().aggregate(predictions, train_y_values).values()))
|
|
269
270
|
|
|
270
271
|
@abstractmethod
|
|
271
272
|
def _compute_confusion_matrix(self, predictions, *args):
|
|
@@ -609,7 +610,7 @@ class MS(ThresholdAdjustment):
|
|
|
609
610
|
prevs = []
|
|
610
611
|
for thr, tpr, fpr in zip(thresholds, tprs, fprs):
|
|
611
612
|
cc_predictions = CC(threshold=thr).aggregate(predictions, train_y_values)
|
|
612
|
-
cc_predictions = cc_predictions[1]
|
|
613
|
+
cc_predictions = list(cc_predictions.values())[1]
|
|
613
614
|
|
|
614
615
|
if tpr - fpr == 0:
|
|
615
616
|
prevalence = cc_predictions
|
|
@@ -76,14 +76,15 @@ class CC(CrispLearnerQMixin, BaseCount):
|
|
|
76
76
|
self.threshold = threshold
|
|
77
77
|
|
|
78
78
|
def aggregate(self, predictions, train_y_values=None):
|
|
79
|
-
predictions = validate_predictions(self, predictions, self.threshold)
|
|
79
|
+
predictions = validate_predictions(self, predictions, self.threshold, train_y_values)
|
|
80
80
|
|
|
81
81
|
if train_y_values is None:
|
|
82
82
|
train_y_values = np.unique(predictions)
|
|
83
|
+
|
|
83
84
|
self.classes_ = check_classes_attribute(self, np.unique(train_y_values))
|
|
84
85
|
class_counts = np.array([np.count_nonzero(predictions == _class) for _class in self.classes_])
|
|
85
86
|
prevalences = class_counts / len(predictions)
|
|
86
|
-
|
|
87
|
+
|
|
87
88
|
prevalences = validate_prevalences(self, prevalences, self.classes_)
|
|
88
89
|
return prevalences
|
|
89
90
|
|
|
@@ -96,23 +96,49 @@ def validate_y(quantifier: Any, y: np.ndarray) -> None:
|
|
|
96
96
|
|
|
97
97
|
def _get_valid_crisp_predictions(predictions, threshold=0.5):
|
|
98
98
|
predictions = np.asarray(predictions)
|
|
99
|
-
|
|
100
99
|
dimensions = predictions.ndim
|
|
101
100
|
|
|
101
|
+
if train_y_values is not None:
|
|
102
|
+
classes = np.unique(train_y_values)
|
|
103
|
+
else:
|
|
104
|
+
classes = None
|
|
105
|
+
|
|
102
106
|
if dimensions > 2:
|
|
103
|
-
|
|
107
|
+
# Assuming the last dimension contains class probabilities
|
|
108
|
+
crisp_indices = np.argmax(predictions, axis=-1)
|
|
109
|
+
if classes is not None:
|
|
110
|
+
predictions = classes[crisp_indices]
|
|
111
|
+
else:
|
|
112
|
+
predictions = crisp_indices
|
|
104
113
|
elif dimensions == 2:
|
|
105
|
-
|
|
114
|
+
# Binary or multi-class probabilities (N, C)
|
|
115
|
+
if classes is not None and len(classes) == 2:
|
|
116
|
+
# Binary case with explicit classes
|
|
117
|
+
predictions = np.where(predictions[:, 1] >= threshold, classes[1], classes[0])
|
|
118
|
+
elif classes is not None and len(classes) > 2:
|
|
119
|
+
# Multi-class case with explicit classes
|
|
120
|
+
crisp_indices = np.argmax(predictions, axis=1)
|
|
121
|
+
predictions = classes[crisp_indices]
|
|
122
|
+
else:
|
|
123
|
+
# Default binary (0 or 1) or multi-class (0 to C-1)
|
|
124
|
+
if predictions.shape[1] == 2:
|
|
125
|
+
predictions = (predictions[:, 1] >= threshold).astype(int)
|
|
126
|
+
else:
|
|
127
|
+
predictions = np.argmax(predictions, axis=1)
|
|
106
128
|
elif dimensions == 1:
|
|
129
|
+
# 1D probabilities (e.g., probability of positive class)
|
|
107
130
|
if np.issubdtype(predictions.dtype, np.floating):
|
|
108
|
-
|
|
131
|
+
if classes is not None and len(classes) == 2:
|
|
132
|
+
predictions = np.where(predictions >= threshold, classes[1], classes[0])
|
|
133
|
+
else:
|
|
134
|
+
predictions = (predictions >= threshold).astype(int)
|
|
109
135
|
else:
|
|
110
136
|
raise ValueError(f"Predictions array has an invalid number of dimensions. Expected 1 or more dimensions, got {predictions.ndim}.")
|
|
111
137
|
|
|
112
138
|
return predictions
|
|
113
139
|
|
|
114
140
|
|
|
115
|
-
def validate_predictions(quantifier: Any, predictions: np.ndarray, threshold: float = 0.5) -> np.ndarray:
|
|
141
|
+
def validate_predictions(quantifier: Any, predictions: np.ndarray, threshold: float = 0.5, train_y_values=None) -> np.ndarray:
|
|
116
142
|
"""
|
|
117
143
|
Validate predictions using the quantifier's declared output tags.
|
|
118
144
|
Raises InputValidationError if inconsistent with tags.
|
|
@@ -132,7 +158,7 @@ def validate_predictions(quantifier: Any, predictions: np.ndarray, threshold: fl
|
|
|
132
158
|
f"Soft predictions for {quantifier.__class__.__name__} must be float, got dtype {predictions.dtype}."
|
|
133
159
|
)
|
|
134
160
|
elif estimator_type == "crisp" and np.issubdtype(predictions.dtype, np.floating):
|
|
135
|
-
predictions = _get_valid_crisp_predictions(predictions, threshold)
|
|
161
|
+
predictions = _get_valid_crisp_predictions(predictions, train_y_values, threshold)
|
|
136
162
|
return predictions
|
|
137
163
|
|
|
138
164
|
|
mlquantify-0.1.15/VERSION.txt
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
0.1.15
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|