valor-lite 0.33.7__py3-none-any.whl → 0.33.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -46,7 +46,7 @@ class Counts:
46
46
  tp: int
47
47
  fp: int
48
48
  fn: int
49
- label: tuple[str, str]
49
+ label: str
50
50
  iou_threshold: float
51
51
  score_threshold: float
52
52
 
@@ -62,10 +62,7 @@ class Counts:
62
62
  parameters={
63
63
  "iou_threshold": self.iou_threshold,
64
64
  "score_threshold": self.score_threshold,
65
- "label": {
66
- "key": self.label[0],
67
- "value": self.label[1],
68
- },
65
+ "label": self.label,
69
66
  },
70
67
  )
71
68
 
@@ -76,7 +73,7 @@ class Counts:
76
73
  @dataclass
77
74
  class ClassMetric:
78
75
  value: float
79
- label: tuple[str, str]
76
+ label: str
80
77
  iou_threshold: float
81
78
  score_threshold: float
82
79
 
@@ -88,10 +85,7 @@ class ClassMetric:
88
85
  parameters={
89
86
  "iou_threshold": self.iou_threshold,
90
87
  "score_threshold": self.score_threshold,
91
- "label": {
92
- "key": self.label[0],
93
- "value": self.label[1],
94
- },
88
+ "label": self.label,
95
89
  },
96
90
  )
97
91
 
@@ -119,7 +113,7 @@ class F1(ClassMetric):
119
113
  class AP:
120
114
  value: float
121
115
  iou_threshold: float
122
- label: tuple[str, str]
116
+ label: str
123
117
 
124
118
  @property
125
119
  def metric(self) -> Metric:
@@ -128,10 +122,7 @@ class AP:
128
122
  value=self.value,
129
123
  parameters={
130
124
  "iou_threshold": self.iou_threshold,
131
- "label": {
132
- "key": self.label[0],
133
- "value": self.label[1],
134
- },
125
+ "label": self.label,
135
126
  },
136
127
  )
137
128
 
@@ -143,7 +134,6 @@ class AP:
143
134
  class mAP:
144
135
  value: float
145
136
  iou_threshold: float
146
- label_key: str
147
137
 
148
138
  @property
149
139
  def metric(self) -> Metric:
@@ -152,7 +142,6 @@ class mAP:
152
142
  value=self.value,
153
143
  parameters={
154
144
  "iou_threshold": self.iou_threshold,
155
- "label_key": self.label_key,
156
145
  },
157
146
  )
158
147
 
@@ -164,7 +153,7 @@ class mAP:
164
153
  class APAveragedOverIOUs:
165
154
  value: float
166
155
  iou_thresholds: list[float]
167
- label: tuple[str, str]
156
+ label: str
168
157
 
169
158
  @property
170
159
  def metric(self) -> Metric:
@@ -173,10 +162,7 @@ class APAveragedOverIOUs:
173
162
  value=self.value,
174
163
  parameters={
175
164
  "iou_thresholds": self.iou_thresholds,
176
- "label": {
177
- "key": self.label[0],
178
- "value": self.label[1],
179
- },
165
+ "label": self.label,
180
166
  },
181
167
  )
182
168
 
@@ -188,7 +174,6 @@ class APAveragedOverIOUs:
188
174
  class mAPAveragedOverIOUs:
189
175
  value: float
190
176
  iou_thresholds: list[float]
191
- label_key: str
192
177
 
193
178
  @property
194
179
  def metric(self) -> Metric:
@@ -197,7 +182,6 @@ class mAPAveragedOverIOUs:
197
182
  value=self.value,
198
183
  parameters={
199
184
  "iou_thresholds": self.iou_thresholds,
200
- "label_key": self.label_key,
201
185
  },
202
186
  )
203
187
 
@@ -210,7 +194,7 @@ class AR:
210
194
  value: float
211
195
  score_threshold: float
212
196
  iou_thresholds: list[float]
213
- label: tuple[str, str]
197
+ label: str
214
198
 
215
199
  @property
216
200
  def metric(self) -> Metric:
@@ -220,10 +204,7 @@ class AR:
220
204
  parameters={
221
205
  "score_threshold": self.score_threshold,
222
206
  "iou_thresholds": self.iou_thresholds,
223
- "label": {
224
- "key": self.label[0],
225
- "value": self.label[1],
226
- },
207
+ "label": self.label,
227
208
  },
228
209
  )
229
210
 
@@ -236,7 +217,6 @@ class mAR:
236
217
  value: float
237
218
  score_threshold: float
238
219
  iou_thresholds: list[float]
239
- label_key: str
240
220
 
241
221
  @property
242
222
  def metric(self) -> Metric:
@@ -246,7 +226,6 @@ class mAR:
246
226
  parameters={
247
227
  "score_threshold": self.score_threshold,
248
228
  "iou_thresholds": self.iou_thresholds,
249
- "label_key": self.label_key,
250
229
  },
251
230
  )
252
231
 
@@ -259,7 +238,7 @@ class ARAveragedOverScores:
259
238
  value: float
260
239
  score_thresholds: list[float]
261
240
  iou_thresholds: list[float]
262
- label: tuple[str, str]
241
+ label: str
263
242
 
264
243
  @property
265
244
  def metric(self) -> Metric:
@@ -269,10 +248,7 @@ class ARAveragedOverScores:
269
248
  parameters={
270
249
  "score_thresholds": self.score_thresholds,
271
250
  "iou_thresholds": self.iou_thresholds,
272
- "label": {
273
- "key": self.label[0],
274
- "value": self.label[1],
275
- },
251
+ "label": self.label,
276
252
  },
277
253
  )
278
254
 
@@ -285,7 +261,6 @@ class mARAveragedOverScores:
285
261
  value: float
286
262
  score_thresholds: list[float]
287
263
  iou_thresholds: list[float]
288
- label_key: str
289
264
 
290
265
  @property
291
266
  def metric(self) -> Metric:
@@ -295,7 +270,6 @@ class mARAveragedOverScores:
295
270
  parameters={
296
271
  "score_thresholds": self.score_thresholds,
297
272
  "iou_thresholds": self.iou_thresholds,
298
- "label_key": self.label_key,
299
273
  },
300
274
  )
301
275
 
@@ -311,7 +285,7 @@ class PrecisionRecallCurve:
311
285
 
312
286
  precision: list[float]
313
287
  iou_threshold: float
314
- label: tuple[str, str]
288
+ label: str
315
289
 
316
290
  @property
317
291
  def metric(self) -> Metric:
@@ -320,7 +294,7 @@ class PrecisionRecallCurve:
320
294
  value=self.precision,
321
295
  parameters={
322
296
  "iou_threshold": self.iou_threshold,
323
- "label": {"key": self.label[0], "value": self.label[1]},
297
+ "label": self.label,
324
298
  },
325
299
  )
326
300
 
@@ -341,8 +315,8 @@ class ConfusionMatrix:
341
315
  dict[
342
316
  str, # either `datum`, `groundtruth`, `prediction` or score
343
317
  str # datum uid
344
- | tuple[
345
- float, float, float, float
318
+ | dict[
319
+ str, float
346
320
  ] # bounding box (xmin, xmax, ymin, ymax)
347
321
  | float, # prediction score
348
322
  ]
@@ -360,8 +334,8 @@ class ConfusionMatrix:
360
334
  str, # either `datum`, `prediction` or score
361
335
  str # datum uid
362
336
  | float # prediction score
363
- | tuple[
364
- float, float, float, float
337
+ | dict[
338
+ str, float
365
339
  ], # bounding box (xmin, xmax, ymin, ymax)
366
340
  ]
367
341
  ],
@@ -376,8 +350,8 @@ class ConfusionMatrix:
376
350
  dict[
377
351
  str, # either `datum` or `groundtruth`
378
352
  str # datum uid
379
- | tuple[
380
- float, float, float, float
353
+ | dict[
354
+ str, float
381
355
  ], # bounding box (xmin, xmax, ymin, ymax)
382
356
  ]
383
357
  ],
@@ -385,7 +359,6 @@ class ConfusionMatrix:
385
359
  ]
386
360
  score_threshold: float
387
361
  iou_threshold: float
388
- label_key: str
389
362
  number_of_examples: int
390
363
 
391
364
  @property
@@ -400,7 +373,6 @@ class ConfusionMatrix:
400
373
  parameters={
401
374
  "score_threshold": self.score_threshold,
402
375
  "iou_threshold": self.iou_threshold,
403
- "label_key": self.label_key,
404
376
  },
405
377
  )
406
378
 
@@ -0,0 +1,27 @@
1
+ from .annotation import Bitmask, Segmentation
2
+ from .manager import DataLoader, Evaluator
3
+ from .metric import (
4
+ F1,
5
+ Accuracy,
6
+ ConfusionMatrix,
7
+ IoU,
8
+ MetricType,
9
+ Precision,
10
+ Recall,
11
+ mIoU,
12
+ )
13
+
14
+ __all__ = [
15
+ "DataLoader",
16
+ "Evaluator",
17
+ "Segmentation",
18
+ "Bitmask",
19
+ "MetricType",
20
+ "Precision",
21
+ "Recall",
22
+ "Accuracy",
23
+ "F1",
24
+ "IoU",
25
+ "mIoU",
26
+ "ConfusionMatrix",
27
+ ]
@@ -0,0 +1,49 @@
1
+ from dataclasses import dataclass, field
2
+
3
+ import numpy as np
4
+ from numpy.typing import NDArray
5
+
6
+
7
+ @dataclass
8
+ class Bitmask:
9
+ mask: NDArray[np.bool_]
10
+ label: str
11
+
12
+ def __post_init__(self):
13
+ if self.mask.dtype != np.bool_:
14
+ raise ValueError(
15
+ f"Bitmask recieved mask with dtype `{self.mask.dtype}`."
16
+ )
17
+
18
+
19
+ @dataclass
20
+ class Segmentation:
21
+ uid: str
22
+ groundtruths: list[Bitmask]
23
+ predictions: list[Bitmask]
24
+ shape: tuple[int, ...] = field(default_factory=lambda: (0, 0))
25
+ size: int = field(default=0)
26
+
27
+ def __post_init__(self):
28
+
29
+ groundtruth_shape = {
30
+ groundtruth.mask.shape for groundtruth in self.groundtruths
31
+ }
32
+ prediction_shape = {
33
+ prediction.mask.shape for prediction in self.predictions
34
+ }
35
+ if len(groundtruth_shape) == 0:
36
+ raise ValueError("The segmenation is missing ground truths.")
37
+ elif len(prediction_shape) == 0:
38
+ raise ValueError("The segmenation is missing predictions.")
39
+ elif (
40
+ len(groundtruth_shape) != 1
41
+ or len(prediction_shape) != 1
42
+ or groundtruth_shape != prediction_shape
43
+ ):
44
+ raise ValueError(
45
+ "A shape mismatch exists within the segmentation."
46
+ )
47
+
48
+ self.shape = groundtruth_shape.pop()
49
+ self.size = int(np.prod(np.array(self.shape)))
@@ -0,0 +1,186 @@
1
+ import numpy as np
2
+ from numpy.typing import NDArray
3
+
4
+
5
+ def compute_intermediate_confusion_matrices(
6
+ groundtruths: NDArray[np.bool_],
7
+ predictions: NDArray[np.bool_],
8
+ groundtruth_labels: NDArray[np.int32],
9
+ prediction_labels: NDArray[np.int32],
10
+ n_labels: int,
11
+ ) -> NDArray[np.int32]:
12
+ """
13
+ Computes an intermediate confusion matrix containing label counts.
14
+
15
+ Parameters
16
+ ----------
17
+ groundtruths : NDArray[np.bool_]
18
+ A 2-D array containing flattened bitmasks for each label.
19
+ predictions : NDArray[np.bool_]
20
+ A 2-D array containing flattened bitmasks for each label.
21
+ groundtruth_labels : NDArray[np.int32]
22
+ A 1-D array containing label indices.
23
+ groundtruth_labels : NDArray[np.int32]
24
+ A 1-D array containing label indices.
25
+ n_labels : int
26
+ The number of unique labels.
27
+
28
+ Returns
29
+ -------
30
+ NDArray[np.int32]
31
+ A 2-D confusion matrix with shape (n_labels + 1, n_labels + 1).
32
+ """
33
+
34
+ n_gt_labels = groundtruth_labels.size
35
+ n_pd_labels = prediction_labels.size
36
+
37
+ groundtruth_counts = groundtruths.sum(axis=1)
38
+ prediction_counts = predictions.sum(axis=1)
39
+
40
+ background_counts = np.logical_not(
41
+ groundtruths.any(axis=0) | predictions.any(axis=0)
42
+ ).sum()
43
+
44
+ intersection_counts = np.logical_and(
45
+ groundtruths.reshape(n_gt_labels, 1, -1),
46
+ predictions.reshape(1, n_pd_labels, -1),
47
+ ).sum(axis=2)
48
+
49
+ intersected_groundtruth_counts = intersection_counts.sum(axis=0)
50
+ intersected_prediction_counts = intersection_counts.sum(axis=1)
51
+
52
+ confusion_matrix = np.zeros((n_labels + 1, n_labels + 1), dtype=np.int32)
53
+ confusion_matrix[0, 0] = background_counts
54
+ for gidx in range(n_gt_labels):
55
+ gt_label_idx = groundtruth_labels[gidx]
56
+ for pidx in range(n_pd_labels):
57
+ pd_label_idx = prediction_labels[pidx]
58
+ confusion_matrix[
59
+ gt_label_idx + 1,
60
+ pd_label_idx + 1,
61
+ ] = intersection_counts[gidx, pidx]
62
+
63
+ if gidx == 0:
64
+ confusion_matrix[0, pd_label_idx + 1] = (
65
+ prediction_counts[pidx]
66
+ - intersected_prediction_counts[pidx]
67
+ )
68
+
69
+ confusion_matrix[gt_label_idx + 1, 0] = (
70
+ groundtruth_counts[gidx] - intersected_groundtruth_counts[gidx]
71
+ )
72
+
73
+ return confusion_matrix
74
+
75
+
76
+ def compute_metrics(
77
+ data: NDArray[np.float64],
78
+ label_metadata: NDArray[np.int32],
79
+ n_pixels: int,
80
+ ) -> tuple[
81
+ NDArray[np.float64],
82
+ NDArray[np.float64],
83
+ NDArray[np.float64],
84
+ float,
85
+ NDArray[np.float64],
86
+ NDArray[np.float64],
87
+ NDArray[np.float64],
88
+ ]:
89
+ """
90
+ Computes semantic segmentation metrics.
91
+
92
+ Takes data with shape (3, N).
93
+
94
+ Parameters
95
+ ----------
96
+ data : NDArray[np.float64]
97
+ A 3-D array containing confusion matrices for each datum.
98
+ label_metadata : NDArray[np.int32]
99
+ A 2-D array containing label metadata.
100
+
101
+ Returns
102
+ -------
103
+ NDArray[np.float64]
104
+ Precision.
105
+ NDArray[np.float64]
106
+ Recall.
107
+ NDArray[np.float64]
108
+ F1 Score.
109
+ float
110
+ Accuracy
111
+ NDArray[np.float64]
112
+ Confusion matrix containing IoU values.
113
+ NDArray[np.float64]
114
+ Hallucination ratios.
115
+ NDArray[np.float64]
116
+ Missing prediction ratios.
117
+ """
118
+ n_labels = label_metadata.shape[0]
119
+ gt_counts = label_metadata[:, 0]
120
+ pd_counts = label_metadata[:, 1]
121
+
122
+ counts = data.sum(axis=0)
123
+
124
+ # compute iou, missing_predictions and hallucinations
125
+ intersection_ = counts[1:, 1:]
126
+ union_ = (
127
+ gt_counts[:, np.newaxis] + pd_counts[np.newaxis, :] - intersection_
128
+ )
129
+
130
+ ious = np.zeros((n_labels, n_labels), dtype=np.float64)
131
+ np.divide(
132
+ intersection_,
133
+ union_,
134
+ where=union_ > 1e-9,
135
+ out=ious,
136
+ )
137
+
138
+ hallucination_ratio = np.zeros((n_labels), dtype=np.float64)
139
+ np.divide(
140
+ counts[0, 1:],
141
+ pd_counts,
142
+ where=pd_counts > 1e-9,
143
+ out=hallucination_ratio,
144
+ )
145
+
146
+ missing_prediction_ratio = np.zeros((n_labels), dtype=np.float64)
147
+ np.divide(
148
+ counts[1:, 0],
149
+ gt_counts,
150
+ where=gt_counts > 1e-9,
151
+ out=missing_prediction_ratio,
152
+ )
153
+
154
+ # compute precision, recall, f1
155
+ tp_counts = counts.diagonal()[1:]
156
+
157
+ precision = np.zeros(n_labels, dtype=np.float64)
158
+ np.divide(tp_counts, pd_counts, where=pd_counts > 1e-9, out=precision)
159
+
160
+ recall = np.zeros_like(precision)
161
+ np.divide(tp_counts, gt_counts, where=gt_counts > 1e-9, out=recall)
162
+
163
+ f1_score = np.zeros_like(precision)
164
+ np.divide(
165
+ 2 * (precision * recall),
166
+ (precision + recall),
167
+ where=(precision + recall) > 0,
168
+ out=f1_score,
169
+ )
170
+
171
+ # compute accuracy
172
+ tp_count = counts[1:, 1:].diagonal().sum()
173
+ background_count = counts[0, 0]
174
+ accuracy = (
175
+ (tp_count + background_count) / n_pixels if n_pixels > 0 else 0.0
176
+ )
177
+
178
+ return (
179
+ precision,
180
+ recall,
181
+ f1_score,
182
+ accuracy,
183
+ ious,
184
+ hallucination_ratio,
185
+ missing_prediction_ratio,
186
+ )