valor-lite 0.33.11__py3-none-any.whl → 0.33.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -182,9 +182,9 @@ def compute_metrics(
182
182
  out=precision,
183
183
  )
184
184
 
185
- accuracy = np.zeros_like(recall)
185
+ accuracy = np.zeros(n_scores, dtype=np.float64)
186
186
  np.divide(
187
- (counts[:, :, 0] + counts[:, :, 3]),
187
+ counts[:, :, 0].sum(axis=1),
188
188
  float(n_datums),
189
189
  out=accuracy,
190
190
  )
@@ -367,6 +367,14 @@ class Evaluator:
367
367
  )
368
368
  ]
369
369
 
370
+ metrics[MetricType.Accuracy] = [
371
+ Accuracy(
372
+ value=accuracy.astype(float).tolist(),
373
+ score_thresholds=score_thresholds,
374
+ hardmax=hardmax,
375
+ )
376
+ ]
377
+
370
378
  for label_idx, label in self.index_to_label.items():
371
379
 
372
380
  kwargs = {
@@ -401,12 +409,6 @@ class Evaluator:
401
409
  **kwargs,
402
410
  )
403
411
  )
404
- metrics[MetricType.Accuracy].append(
405
- Accuracy(
406
- value=accuracy[:, label_idx].astype(float).tolist(),
407
- **kwargs,
408
- )
409
- )
410
412
  metrics[MetricType.F1].append(
411
413
  F1(
412
414
  value=f1_score[:, label_idx].astype(float).tolist(),
@@ -158,24 +158,23 @@ class Recall(_ThresholdValue):
158
158
  pass
159
159
 
160
160
 
161
- class Accuracy(_ThresholdValue):
161
+ class F1(_ThresholdValue):
162
162
  """
163
- Accuracy metric for a specific class label.
163
+ F1 score for a specific class label.
164
164
 
165
- This class calculates the accuracy at various score thresholds for a binary
166
- classification task. Accuracy is defined as the ratio of the sum of true positives and
167
- true negatives over all predictions.
165
+ This class calculates the F1 score at various score thresholds for a binary
166
+ classification task.
168
167
 
169
168
  Attributes
170
169
  ----------
171
170
  value : list[float]
172
- Accuracy values computed at each score threshold.
171
+ F1 scores computed at each score threshold.
173
172
  score_thresholds : list[float]
174
- Score thresholds at which the accuracy values are computed.
173
+ Score thresholds at which the F1 scores are computed.
175
174
  hardmax : bool
176
175
  Indicates whether hardmax thresholding was used.
177
176
  label : str
178
- The class label for which the accuracy is computed.
177
+ The class label for which the F1 score is computed.
179
178
 
180
179
  Methods
181
180
  -------
@@ -188,23 +187,21 @@ class Accuracy(_ThresholdValue):
188
187
  pass
189
188
 
190
189
 
191
- class F1(_ThresholdValue):
190
+ @dataclass
191
+ class Accuracy:
192
192
  """
193
- F1 score for a specific class label.
193
+ Multiclass accuracy metric.
194
194
 
195
- This class calculates the F1 score at various score thresholds for a binary
196
- classification task.
195
+ This class calculates the accuracy at various score thresholds.
197
196
 
198
197
  Attributes
199
198
  ----------
200
199
  value : list[float]
201
- F1 scores computed at each score threshold.
200
+ Accuracy values computed at each score threshold.
202
201
  score_thresholds : list[float]
203
- Score thresholds at which the F1 scores are computed.
202
+ Score thresholds at which the accuracy values are computed.
204
203
  hardmax : bool
205
204
  Indicates whether hardmax thresholding was used.
206
- label : str
207
- The class label for which the F1 score is computed.
208
205
 
209
206
  Methods
210
207
  -------
@@ -214,7 +211,22 @@ class F1(_ThresholdValue):
214
211
  Converts the instance to a dictionary representation.
215
212
  """
216
213
 
217
- pass
214
+ value: list[float]
215
+ score_thresholds: list[float]
216
+ hardmax: bool
217
+
218
+ def to_metric(self) -> Metric:
219
+ return Metric(
220
+ type=type(self).__name__,
221
+ value=self.value,
222
+ parameters={
223
+ "score_thresholds": self.score_thresholds,
224
+ "hardmax": self.hardmax,
225
+ },
226
+ )
227
+
228
+ def to_dict(self) -> dict:
229
+ return self.to_metric().to_dict()
218
230
 
219
231
 
220
232
  @dataclass
@@ -282,6 +282,7 @@ def compute_metrics(
282
282
  ],
283
283
  NDArray[np.float64],
284
284
  NDArray[np.float64],
285
+ NDArray[np.float64],
285
286
  ]:
286
287
  """
287
288
  Computes Object Detection metrics.
@@ -309,13 +310,15 @@ def compute_metrics(
309
310
 
310
311
  Returns
311
312
  -------
312
- tuple[NDArray, NDArray, NDArray, float]
313
+ tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64], float]
313
314
  Average Precision results.
314
- tuple[NDArray, NDArray, NDArray, float]
315
+ tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64], float]
315
316
  Average Recall results.
316
- np.ndarray
317
- Precision, Recall, TP, FP, FN, F1 Score, Accuracy.
318
- np.ndarray
317
+ NDArray[np.float64]
318
+ Accuracy.
319
+ NDArray[np.float64]
320
+ Precision, Recall, TP, FP, FN, F1 Score.
321
+ NDArray[np.float64]
319
322
  Interpolated Precision-Recall Curves.
320
323
  """
321
324
 
@@ -329,9 +332,10 @@ def compute_metrics(
329
332
  elif n_scores == 0:
330
333
  raise ValueError("At least one score threshold must be passed.")
331
334
 
332
- average_precision = np.zeros((n_ious, n_labels))
333
- average_recall = np.zeros((n_scores, n_labels))
334
- counts = np.zeros((n_ious, n_scores, n_labels, 7))
335
+ average_precision = np.zeros((n_ious, n_labels), dtype=np.float64)
336
+ average_recall = np.zeros((n_scores, n_labels), dtype=np.float64)
337
+ accuracy = np.zeros((n_ious, n_scores), dtype=np.float64)
338
+ counts = np.zeros((n_ious, n_scores, n_labels, 6), dtype=np.float64)
335
339
 
336
340
  pd_labels = data[:, 5].astype(np.int32)
337
341
  scores = data[:, 6]
@@ -417,14 +421,6 @@ def compute_metrics(
417
421
  out=f1_score,
418
422
  )
419
423
 
420
- accuracy = np.zeros_like(tp_count)
421
- np.divide(
422
- tp_count,
423
- (gt_count + pd_count),
424
- where=(gt_count + pd_count) > 1e-9,
425
- out=accuracy,
426
- )
427
-
428
424
  counts[iou_idx][score_idx] = np.concatenate(
429
425
  (
430
426
  tp_count[:, np.newaxis],
@@ -433,11 +429,18 @@ def compute_metrics(
433
429
  precision[:, np.newaxis],
434
430
  recall[:, np.newaxis],
435
431
  f1_score[:, np.newaxis],
436
- accuracy[:, np.newaxis],
437
432
  ),
438
433
  axis=1,
439
434
  )
440
435
 
436
+ # caluculate accuracy
437
+ total_pd_count = label_metadata[:, 1].sum()
438
+ accuracy[iou_idx, score_idx] = (
439
+ (tp_count.sum() / total_pd_count)
440
+ if total_pd_count > 1e-9
441
+ else 0.0
442
+ )
443
+
441
444
  # calculate recall for AR
442
445
  average_recall[score_idx] += recall
443
446
 
@@ -552,6 +555,7 @@ def compute_metrics(
552
555
  return (
553
556
  ap_results,
554
557
  ar_results,
558
+ accuracy,
555
559
  counts,
556
560
  pr_curve,
557
561
  )
@@ -506,6 +506,7 @@ class Evaluator:
506
506
  average_recall_averaged_over_scores,
507
507
  mean_average_recall_averaged_over_scores,
508
508
  ),
509
+ accuracy,
509
510
  precision_recall,
510
511
  pr_curves,
511
512
  ) = compute_metrics(
@@ -593,6 +594,16 @@ class Evaluator:
593
594
  )
594
595
  ]
595
596
 
597
+ metrics[MetricType.Accuracy] = [
598
+ Accuracy(
599
+ value=float(accuracy[iou_idx, score_idx]),
600
+ iou_threshold=iou_thresholds[iou_idx],
601
+ score_threshold=score_thresholds[score_idx],
602
+ )
603
+ for iou_idx in range(accuracy.shape[0])
604
+ for score_idx in range(accuracy.shape[1])
605
+ ]
606
+
596
607
  metrics[MetricType.PrecisionRecallCurve] = [
597
608
  PrecisionRecallCurve(
598
609
  precisions=pr_curves[iou_idx, label_idx, :, 0]
@@ -650,12 +661,6 @@ class Evaluator:
650
661
  **kwargs,
651
662
  )
652
663
  )
653
- metrics[MetricType.Accuracy].append(
654
- Accuracy(
655
- value=float(row[6]),
656
- **kwargs,
657
- )
658
- )
659
664
 
660
665
  if as_dict:
661
666
  return {
@@ -160,9 +160,9 @@ class Recall(_ClassMetric):
160
160
  pass
161
161
 
162
162
 
163
- class Accuracy(_ClassMetric):
163
+ class F1(_ClassMetric):
164
164
  """
165
- Accuracy metric for a specific class label in object detection.
165
+ F1 score for a specific class label in object detection.
166
166
 
167
167
  This class encapsulates a metric value for a particular class label,
168
168
  along with the associated Intersection over Union (IoU) threshold and
@@ -190,20 +190,18 @@ class Accuracy(_ClassMetric):
190
190
  pass
191
191
 
192
192
 
193
- class F1(_ClassMetric):
193
+ @dataclass
194
+ class Accuracy:
194
195
  """
195
- F1 score for a specific class label in object detection.
196
+ Accuracy metric for the object detection task type.
196
197
 
197
- This class encapsulates a metric value for a particular class label,
198
- along with the associated Intersection over Union (IoU) threshold and
199
- confidence score threshold.
198
+ This class encapsulates a metric value at a specific Intersection
199
+ over Union (IoU) threshold and confidence score threshold.
200
200
 
201
201
  Attributes
202
202
  ----------
203
203
  value : float
204
204
  The metric value.
205
- label : str
206
- The class label for which the metric is calculated.
207
205
  iou_threshold : float
208
206
  The IoU threshold used to determine matches between predicted and ground truth boxes.
209
207
  score_threshold : float
@@ -217,7 +215,22 @@ class F1(_ClassMetric):
217
215
  Converts the instance to a dictionary representation.
218
216
  """
219
217
 
220
- pass
218
+ value: float
219
+ iou_threshold: float
220
+ score_threshold: float
221
+
222
+ def to_metric(self) -> Metric:
223
+ return Metric(
224
+ type=type(self).__name__,
225
+ value=self.value,
226
+ parameters={
227
+ "iou_threshold": self.iou_threshold,
228
+ "score_threshold": self.score_threshold,
229
+ },
230
+ )
231
+
232
+ def to_dict(self) -> dict:
233
+ return self.to_metric().to_dict()
221
234
 
222
235
 
223
236
  @dataclass
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: valor-lite
3
- Version: 0.33.11
3
+ Version: 0.33.13
4
4
  Summary: Compute valor metrics locally.
5
5
  License: MIT License
6
6
 
@@ -3,22 +3,22 @@ valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  valor_lite/schemas.py,sha256=r4cC10w1xYsA785KmGE4ePeOX3wzEs846vT7QAiVg_I,293
4
4
  valor_lite/classification/__init__.py,sha256=2wmmziIzUATm7MbmAcPNLXrEX5l4oeD7XBwPd9bWM3Q,506
5
5
  valor_lite/classification/annotation.py,sha256=0aUOvcwBAZgiNOJuyh-pXyNTG7vP7r8CUfnU3OmpUwQ,1113
6
- valor_lite/classification/computation.py,sha256=qd9K7CcSGmMm_7shfX47_ZIuB-uE2LLiLMZSS_3NJTk,12093
7
- valor_lite/classification/manager.py,sha256=7NKk4syQHH5hBEUDWTD0zIFkJSNdOMzJn8a8GzfBnDc,23205
8
- valor_lite/classification/metric.py,sha256=m9_zD82YGl0QhuMql943YNKg67NZ6bsrR8ggs6_JZms,11728
6
+ valor_lite/classification/computation.py,sha256=pMePRFKCikYiGDgR-ZB8TmrzAts5ZIz4EywCT-XL42g,12100
7
+ valor_lite/classification/manager.py,sha256=fwb5z84SzgJ-ud1kTY3oYbUJLbA7R0cdWqqcaAIUcWs,23222
8
+ valor_lite/classification/metric.py,sha256=JjY9x6Sq1Hr_2agGnyT9EhVI5wXKQcMmEwxIK32yhGw,11903
9
9
  valor_lite/object_detection/__init__.py,sha256=PiKfemo8FkZRzBhPSjhil8ahGURLy0Vk_iV25CB4UBU,1139
10
10
  valor_lite/object_detection/annotation.py,sha256=o6VfiRobiB0ljqsNBLAYMXgi32RSIR7uTA-dgxq6zBI,8248
11
- valor_lite/object_detection/computation.py,sha256=ZW83XT-aemRg-5ZdISmrj0bRD9wWmYCU3gkSlfXlNZc,27747
12
- valor_lite/object_detection/manager.py,sha256=vb4JpynNF0JcnFwNmReFjls9UGAquigN2hpEbG89J04,38991
13
- valor_lite/object_detection/metric.py,sha256=tHRVnpBqw_w1VwnNkTCmu1yv7Max9FRlf5uh0wYew4s,24046
11
+ valor_lite/object_detection/computation.py,sha256=dbwyBgRQqG47R8FINd0vQq10b85rsq0jZH4M81KQT24,28017
12
+ valor_lite/object_detection/manager.py,sha256=gjKpytNldF51V_xUktJHrLRDQme-AkZ3HpiL8uMnYJY,39156
13
+ valor_lite/object_detection/metric.py,sha256=SS3U-HV3QgHoN3hcY2DmLl5GzK4KyvC78vjXTIa7XAU,24330
14
14
  valor_lite/semantic_segmentation/__init__.py,sha256=IdarTHKUuUMDvMBmInQu12Mm_NMCbql6Hf0nL5b56Ak,424
15
15
  valor_lite/semantic_segmentation/annotation.py,sha256=CujYFdHS3fgr4Y7mEDs_u1XBmbPJzNU2CdqvjCT_d_A,2938
16
16
  valor_lite/semantic_segmentation/computation.py,sha256=iJkEmTNmw9HwQCxSnpJkQsAdVcFriGhhu_WMks6D7tU,5122
17
17
  valor_lite/semantic_segmentation/manager.py,sha256=aJk6edWZWKqrzl6hVmEUSZVYhHLuyihxWgAIXsCXkZ0,17361
18
18
  valor_lite/semantic_segmentation/metric.py,sha256=Y8M3z92SaABEe9TwBUN37TFsh9DR5WoIxO-TfXVwz8I,6289
19
19
  valor_lite/text_generation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- valor_lite-0.33.11.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
21
- valor_lite-0.33.11.dist-info/METADATA,sha256=QniFV86iMnaqPtJElufV4tkF3-kI1sS6EXKRzupWavc,5632
22
- valor_lite-0.33.11.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
23
- valor_lite-0.33.11.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
24
- valor_lite-0.33.11.dist-info/RECORD,,
20
+ valor_lite-0.33.13.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
21
+ valor_lite-0.33.13.dist-info/METADATA,sha256=4WsoZ-i3KETNN5d1F333ZJvhGPj1tiGEAzSTjrOW7yk,5632
22
+ valor_lite-0.33.13.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
23
+ valor_lite-0.33.13.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
24
+ valor_lite-0.33.13.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.1.0)
2
+ Generator: setuptools (75.2.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5