valor-lite 0.33.13__py3-none-any.whl → 0.33.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of valor-lite might be problematic. Click here for more details.

@@ -9,15 +9,9 @@ from valor_lite.semantic_segmentation.computation import (
9
9
  compute_intermediate_confusion_matrices,
10
10
  compute_metrics,
11
11
  )
12
- from valor_lite.semantic_segmentation.metric import (
13
- F1,
14
- Accuracy,
15
- ConfusionMatrix,
16
- IoU,
17
- MetricType,
18
- Precision,
19
- Recall,
20
- mIoU,
12
+ from valor_lite.semantic_segmentation.metric import Metric, MetricType
13
+ from valor_lite.semantic_segmentation.utilities import (
14
+ unpack_precision_recall_iou_into_metric_lists,
21
15
  )
22
16
 
23
17
  """
@@ -193,7 +187,6 @@ class Evaluator:
193
187
  def compute_precision_recall_iou(
194
188
  self,
195
189
  filter_: Filter | None = None,
196
- as_dict: bool = False,
197
190
  ) -> dict[MetricType, list]:
198
191
  """
199
192
  Performs an evaluation and returns metrics.
@@ -202,8 +195,6 @@ class Evaluator:
202
195
  ----------
203
196
  filter_ : Filter, optional
204
197
  An optional filter object.
205
- as_dict : bool, default=False
206
- An option to return metrics as dictionaries.
207
198
 
208
199
  Returns
209
200
  -------
@@ -220,112 +211,22 @@ class Evaluator:
220
211
  label_metadata = filter_.label_metadata
221
212
  n_pixels = filter_.n_pixels
222
213
 
223
- (
224
- precision,
225
- recall,
226
- f1_score,
227
- accuracy,
228
- ious,
229
- hallucination_ratios,
230
- missing_prediction_ratios,
231
- ) = compute_metrics(
214
+ results = compute_metrics(
232
215
  data=data,
233
216
  label_metadata=label_metadata,
234
217
  n_pixels=n_pixels,
235
218
  )
236
219
 
237
- metrics = defaultdict(list)
238
-
239
- metrics[MetricType.Accuracy] = [
240
- Accuracy(
241
- value=float(accuracy),
242
- )
243
- ]
244
-
245
- metrics[MetricType.ConfusionMatrix] = [
246
- ConfusionMatrix(
247
- confusion_matrix={
248
- self.index_to_label[gt_label_idx]: {
249
- self.index_to_label[pd_label_idx]: {
250
- "iou": float(ious[gt_label_idx, pd_label_idx])
251
- }
252
- for pd_label_idx in range(self.n_labels)
253
- if label_metadata[pd_label_idx, 0] > 0
254
- }
255
- for gt_label_idx in range(self.n_labels)
256
- if label_metadata[gt_label_idx, 0] > 0
257
- },
258
- hallucinations={
259
- self.index_to_label[pd_label_idx]: {
260
- "ratio": float(hallucination_ratios[pd_label_idx])
261
- }
262
- for pd_label_idx in range(self.n_labels)
263
- if label_metadata[pd_label_idx, 0] > 0
264
- },
265
- missing_predictions={
266
- self.index_to_label[gt_label_idx]: {
267
- "ratio": float(missing_prediction_ratios[gt_label_idx])
268
- }
269
- for gt_label_idx in range(self.n_labels)
270
- if label_metadata[gt_label_idx, 0] > 0
271
- },
272
- )
273
- ]
274
-
275
- metrics[MetricType.mIoU] = [
276
- mIoU(
277
- value=float(ious.diagonal().mean()),
278
- )
279
- ]
280
-
281
- for label_idx, label in self.index_to_label.items():
282
-
283
- kwargs = {
284
- "label": label,
285
- }
286
-
287
- # if no groundtruths exists for a label, skip it.
288
- if label_metadata[label_idx, 0] == 0:
289
- continue
290
-
291
- metrics[MetricType.Precision].append(
292
- Precision(
293
- value=float(precision[label_idx]),
294
- **kwargs,
295
- )
296
- )
297
- metrics[MetricType.Recall].append(
298
- Recall(
299
- value=float(recall[label_idx]),
300
- **kwargs,
301
- )
302
- )
303
- metrics[MetricType.F1].append(
304
- F1(
305
- value=float(f1_score[label_idx]),
306
- **kwargs,
307
- )
308
- )
309
- metrics[MetricType.IoU].append(
310
- IoU(
311
- value=float(ious[label_idx, label_idx]),
312
- **kwargs,
313
- )
314
- )
315
-
316
- if as_dict:
317
- return {
318
- mtype: [metric.to_dict() for metric in mvalues]
319
- for mtype, mvalues in metrics.items()
320
- }
321
-
322
- return metrics
220
+ return unpack_precision_recall_iou_into_metric_lists(
221
+ results=results,
222
+ label_metadata=label_metadata,
223
+ index_to_label=self.index_to_label,
224
+ )
323
225
 
324
226
  def evaluate(
325
227
  self,
326
228
  filter_: Filter | None = None,
327
- as_dict: bool = False,
328
- ) -> dict[MetricType, list]:
229
+ ) -> dict[MetricType, list[Metric]]:
329
230
  """
330
231
  Computes all available metrics.
331
232
 
@@ -333,17 +234,13 @@ class Evaluator:
333
234
  ----------
334
235
  filter_ : Filter, optional
335
236
  An optional filter object.
336
- as_dict : bool, default=False
337
- An option to return metrics as dictionaries.
338
237
 
339
238
  Returns
340
239
  -------
341
- dict[MetricType, list]
342
- A dictionary mapping metric type to lists of metrics.
240
+ dict[MetricType, list[Metric]]
241
+ Lists of metrics organized by metric type.
343
242
  """
344
- return self.compute_precision_recall_iou(
345
- filter_=filter_, as_dict=as_dict
346
- )
243
+ return self.compute_precision_recall_iou(filter_=filter_)
347
244
 
348
245
 
349
246
  class DataLoader:
@@ -1,7 +1,6 @@
1
- from dataclasses import dataclass
2
1
  from enum import Enum
3
2
 
4
- from valor_lite.schemas import Metric
3
+ from valor_lite.schemas import BaseMetric
5
4
 
6
5
 
7
6
  class MetricType(Enum):
@@ -9,270 +8,248 @@ class MetricType(Enum):
9
8
  Recall = "Recall"
10
9
  Accuracy = "Accuracy"
11
10
  F1 = "F1"
12
- IoU = "IoU"
13
- mIoU = "mIoU"
11
+ IOU = "IOU"
12
+ mIOU = "mIOU"
14
13
  ConfusionMatrix = "ConfusionMatrix"
15
14
 
16
- @classmethod
17
- def base(cls):
18
- return [
19
- cls.Precision,
20
- cls.Recall,
21
- cls.Accuracy,
22
- cls.F1,
23
- cls.IoU,
24
- cls.mIoU,
25
- cls.ConfusionMatrix,
26
- ]
27
-
28
-
29
- @dataclass
30
- class _LabelValue:
31
- value: float
32
- label: str
33
-
34
- def to_metric(self) -> Metric:
35
- return Metric(
36
- type=type(self).__name__,
37
- value=self.value,
38
- parameters={
39
- "label": self.label,
40
- },
41
- )
42
-
43
- def to_dict(self) -> dict:
44
- return self.to_metric().to_dict()
45
-
46
-
47
- class Precision(_LabelValue):
48
- """
49
- Precision metric for a specific class label.
50
-
51
- Precision is calulated using the number of true-positive pixels divided by
52
- the sum of all true-positive and false-positive pixels.
53
-
54
- Attributes
55
- ----------
56
- value : float
57
- The computed precision value.
58
- label : str
59
- The label for which the precision is calculated.
60
-
61
- Methods
62
- -------
63
- to_metric()
64
- Converts the instance to a generic `Metric` object.
65
- to_dict()
66
- Converts the instance to a dictionary representation.
67
- """
68
-
69
- pass
70
-
71
-
72
- class Recall(_LabelValue):
73
- """
74
- Recall metric for a specific class label.
75
-
76
- Recall is calulated using the number of true-positive pixels divided by
77
- the sum of all true-positive and false-negative pixels.
78
-
79
- Attributes
80
- ----------
81
- value : float
82
- The computed recall value.
83
- label : str
84
- The label for which the recall is calculated.
85
-
86
- Methods
87
- -------
88
- to_metric()
89
- Converts the instance to a generic `Metric` object.
90
- to_dict()
91
- Converts the instance to a dictionary representation.
92
- """
93
-
94
- pass
95
-
96
-
97
- class F1(_LabelValue):
98
- """
99
- F1 score for a specific class label.
100
-
101
- Attributes
102
- ----------
103
- value : float
104
- The computed F1 score.
105
- label : str
106
- The label for which the F1 score is calculated.
107
-
108
- Methods
109
- -------
110
- to_metric()
111
- Converts the instance to a generic `Metric` object.
112
- to_dict()
113
- Converts the instance to a dictionary representation.
114
- """
115
15
 
116
- pass
117
-
118
-
119
- class IoU(_LabelValue):
16
+ class Metric(BaseMetric):
120
17
  """
121
- Intersection over Union (IoU) ratio for a specific class label.
18
+ Semantic Segmentation Metric.
122
19
 
123
20
  Attributes
124
21
  ----------
125
- value : float
126
- The computed IoU ratio.
127
- label : str
128
- The label for which the IoU is calculated.
129
-
130
- Methods
131
- -------
132
- to_metric()
133
- Converts the instance to a generic `Metric` object.
134
- to_dict()
135
- Converts the instance to a dictionary representation.
22
+ type : str
23
+ The metric type.
24
+ value : int | float | dict
25
+ The metric value.
26
+ parameters : dict[str, Any]
27
+ A dictionary containing metric parameters.
136
28
  """
137
29
 
138
- pass
139
-
140
-
141
- @dataclass
142
- class _Value:
143
- value: float
144
-
145
- def to_metric(self) -> Metric:
146
- return Metric(
147
- type=type(self).__name__,
148
- value=self.value,
149
- parameters={},
30
+ @classmethod
31
+ def precision(
32
+ cls,
33
+ value: float,
34
+ label: str,
35
+ ):
36
+ """
37
+ Precision metric for a specific class label.
38
+
39
+ Precision is calulated using the number of true-positive pixels divided by
40
+ the sum of all true-positive and false-positive pixels.
41
+
42
+ Parameters
43
+ ----------
44
+ value : float
45
+ The computed precision value.
46
+ label : str
47
+ The label for which the precision is calculated.
48
+
49
+ Returns
50
+ -------
51
+ Metric
52
+ """
53
+ return cls(
54
+ type=MetricType.Precision.value,
55
+ value=value,
56
+ parameters={
57
+ "label": label,
58
+ },
150
59
  )
151
60
 
152
- def to_dict(self) -> dict:
153
- return self.to_metric().to_dict()
154
-
61
+ @classmethod
62
+ def recall(
63
+ cls,
64
+ value: float,
65
+ label: str,
66
+ ):
67
+ """
68
+ Recall metric for a specific class label.
69
+
70
+ Recall is calulated using the number of true-positive pixels divided by
71
+ the sum of all true-positive and false-negative pixels.
72
+
73
+ Parameters
74
+ ----------
75
+ value : float
76
+ The computed recall value.
77
+ label : str
78
+ The label for which the recall is calculated.
79
+
80
+ Returns
81
+ -------
82
+ Metric
83
+ """
84
+ return cls(
85
+ type=MetricType.Recall.value,
86
+ value=value,
87
+ parameters={
88
+ "label": label,
89
+ },
90
+ )
155
91
 
156
- class Accuracy(_Value):
157
- """
158
- Accuracy metric computed over all labels.
92
+ @classmethod
93
+ def f1_score(
94
+ cls,
95
+ value: float,
96
+ label: str,
97
+ ):
98
+ """
99
+ F1 score for a specific class label.
100
+
101
+ Parameters
102
+ ----------
103
+ value : float
104
+ The computed F1 score.
105
+ label : str
106
+ The label for which the F1 score is calculated.
107
+
108
+ Returns
109
+ -------
110
+ Metric
111
+ """
112
+ return cls(
113
+ type=MetricType.F1.value,
114
+ value=value,
115
+ parameters={
116
+ "label": label,
117
+ },
118
+ )
159
119
 
160
- Attributes
161
- ----------
162
- value : float
163
- The accuracy value.
164
-
165
- Methods
166
- -------
167
- to_metric()
168
- Converts the instance to a generic `Metric` object.
169
- to_dict()
170
- Converts the instance to a dictionary representation.
171
- """
120
+ @classmethod
121
+ def iou(
122
+ cls,
123
+ value: float,
124
+ label: str,
125
+ ):
126
+ """
127
+ Intersection over Union (IOU) ratio for a specific class label.
128
+
129
+ Parameters
130
+ ----------
131
+ value : float
132
+ The computed IOU ratio.
133
+ label : str
134
+ The label for which the IOU is calculated.
135
+
136
+ Returns
137
+ -------
138
+ Metric
139
+ """
140
+ return cls(
141
+ type=MetricType.IOU.value,
142
+ value=value,
143
+ parameters={
144
+ "label": label,
145
+ },
146
+ )
172
147
 
173
- pass
148
+ @classmethod
149
+ def mean_iou(cls, value: float):
150
+ """
151
+ Mean Intersection over Union (mIOU) ratio.
174
152
 
153
+ The mIOU value is computed by averaging IOU over all labels.
175
154
 
176
- class mIoU(_Value):
177
- """
178
- Mean Intersection over Union (mIoU) ratio.
155
+ Parameters
156
+ ----------
157
+ value : float
158
+ The mIOU value.
179
159
 
180
- The mIoU value is computed by averaging IoU over all labels.
160
+ Returns
161
+ -------
162
+ Metric
163
+ """
164
+ return cls(type=MetricType.mIOU.value, value=value, parameters={})
181
165
 
182
- Attributes
183
- ----------
184
- value : float
185
- The mIoU value.
186
-
187
- Methods
188
- -------
189
- to_metric()
190
- Converts the instance to a generic `Metric` object.
191
- to_dict()
192
- Converts the instance to a dictionary representation.
193
- """
166
+ @classmethod
167
+ def accuracy(cls, value: float):
168
+ """
169
+ Accuracy metric computed over all labels.
194
170
 
195
- pass
171
+ Parameters
172
+ ----------
173
+ value : float
174
+ The accuracy value.
196
175
 
176
+ Returns
177
+ -------
178
+ Metric
179
+ """
180
+ return cls(type=MetricType.Accuracy.value, value=value, parameters={})
197
181
 
198
- @dataclass
199
- class ConfusionMatrix:
200
- """
201
- The confusion matrix and related metrics for semantic segmentation tasks.
202
-
203
- This class encapsulates detailed information about the model's performance, including correct
204
- predictions, misclassifications, hallucinations (false positives), and missing predictions
205
- (false negatives). It provides counts for each category to facilitate in-depth analysis.
182
+ @classmethod
183
+ def confusion_matrix(
184
+ cls,
185
+ confusion_matrix: dict[
186
+ str, # ground truth label value
187
+ dict[
188
+ str, # prediction label value
189
+ dict[str, float], # iou
190
+ ],
191
+ ],
192
+ hallucinations: dict[
193
+ str, # prediction label value
194
+ dict[str, float], # pixel ratio
195
+ ],
196
+ missing_predictions: dict[
197
+ str, # ground truth label value
198
+ dict[str, float], # pixel ratio
199
+ ],
200
+ ):
201
+ """
202
+ The confusion matrix and related metrics for semantic segmentation tasks.
203
+
204
+ This class encapsulates detailed information about the model's performance, including correct
205
+ predictions, misclassifications, hallucinations (false positives), and missing predictions
206
+ (false negatives). It provides counts for each category to facilitate in-depth analysis.
207
+
208
+ Confusion Matrix Format:
209
+ {
210
+ <ground truth label>: {
211
+ <prediction label>: {
212
+ 'iou': <float>,
213
+ },
214
+ },
215
+ }
206
216
 
207
- Confusion Matrix Format:
208
- {
209
- <ground truth label>: {
217
+ Hallucinations Format:
218
+ {
210
219
  <prediction label>: {
211
220
  'iou': <float>,
212
221
  },
213
- },
214
- }
215
-
216
- Hallucinations Format:
217
- {
218
- <prediction label>: {
219
- 'iou': <float>,
220
- },
221
- }
222
-
223
- Missing Predictions Format:
224
- {
225
- <ground truth label>: {
226
- 'iou': <float>,
227
- },
228
- }
222
+ }
229
223
 
230
- Attributes
231
- ----------
232
- confusion_matrix : dict
233
- Nested dictionaries representing the Intersection over Union (IoU) scores for each
234
- ground truth label and prediction label pair.
235
- hallucinations : dict
236
- Dictionary representing the pixel ratios for predicted labels that do not correspond
237
- to any ground truth labels (false positives).
238
- missing_predictions : dict
239
- Dictionary representing the pixel ratios for ground truth labels that were not predicted
240
- (false negatives).
241
-
242
- Methods
243
- -------
244
- to_metric()
245
- Converts the instance to a generic `Metric` object.
246
- to_dict()
247
- Converts the instance to a dictionary representation.
248
- """
249
-
250
- confusion_matrix: dict[
251
- str, # ground truth label value
252
- dict[
253
- str, # prediction label value
254
- dict[str, float], # iou
255
- ],
256
- ]
257
- hallucinations: dict[
258
- str, # prediction label value
259
- dict[str, float], # pixel ratio
260
- ]
261
- missing_predictions: dict[
262
- str, # ground truth label value
263
- dict[str, float], # pixel ratio
264
- ]
265
-
266
- def to_metric(self) -> Metric:
267
- return Metric(
268
- type=type(self).__name__,
224
+ Missing Predictions Format:
225
+ {
226
+ <ground truth label>: {
227
+ 'iou': <float>,
228
+ },
229
+ }
230
+
231
+ Parameters
232
+ ----------
233
+ confusion_matrix : dict
234
+ Nested dictionaries representing the Intersection over Union (IOU) scores for each
235
+ ground truth label and prediction label pair.
236
+ hallucinations : dict
237
+ Dictionary representing the pixel ratios for predicted labels that do not correspond
238
+ to any ground truth labels (false positives).
239
+ missing_predictions : dict
240
+ Dictionary representing the pixel ratios for ground truth labels that were not predicted
241
+ (false negatives).
242
+
243
+ Returns
244
+ -------
245
+ Metric
246
+ """
247
+ return cls(
248
+ type=MetricType.ConfusionMatrix.value,
269
249
  value={
270
- "confusion_matrix": self.confusion_matrix,
271
- "hallucinations": self.hallucinations,
272
- "missing_predictions": self.missing_predictions,
250
+ "confusion_matrix": confusion_matrix,
251
+ "hallucinations": hallucinations,
252
+ "missing_predictions": missing_predictions,
273
253
  },
274
254
  parameters={},
275
255
  )
276
-
277
- def to_dict(self) -> dict:
278
- return self.to_metric().to_dict()