valor-lite 0.33.12__py3-none-any.whl → 0.33.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -282,6 +282,7 @@ def compute_metrics(
282
282
  ],
283
283
  NDArray[np.float64],
284
284
  NDArray[np.float64],
285
+ NDArray[np.float64],
285
286
  ]:
286
287
  """
287
288
  Computes Object Detection metrics.
@@ -309,13 +310,15 @@ def compute_metrics(
309
310
 
310
311
  Returns
311
312
  -------
312
- tuple[NDArray, NDArray, NDArray, float]
313
+ tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64], float]
313
314
  Average Precision results.
314
- tuple[NDArray, NDArray, NDArray, float]
315
+ tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64], float]
315
316
  Average Recall results.
316
- np.ndarray
317
- Precision, Recall, TP, FP, FN, F1 Score, Accuracy.
318
- np.ndarray
317
+ NDArray[np.float64]
318
+ Accuracy.
319
+ NDArray[np.float64]
320
+ Precision, Recall, TP, FP, FN, F1 Score.
321
+ NDArray[np.float64]
319
322
  Interpolated Precision-Recall Curves.
320
323
  """
321
324
 
@@ -329,9 +332,10 @@ def compute_metrics(
329
332
  elif n_scores == 0:
330
333
  raise ValueError("At least one score threshold must be passed.")
331
334
 
332
- average_precision = np.zeros((n_ious, n_labels))
333
- average_recall = np.zeros((n_scores, n_labels))
334
- counts = np.zeros((n_ious, n_scores, n_labels, 7))
335
+ average_precision = np.zeros((n_ious, n_labels), dtype=np.float64)
336
+ average_recall = np.zeros((n_scores, n_labels), dtype=np.float64)
337
+ accuracy = np.zeros((n_ious, n_scores), dtype=np.float64)
338
+ counts = np.zeros((n_ious, n_scores, n_labels, 6), dtype=np.float64)
335
339
 
336
340
  pd_labels = data[:, 5].astype(np.int32)
337
341
  scores = data[:, 6]
@@ -417,14 +421,6 @@ def compute_metrics(
417
421
  out=f1_score,
418
422
  )
419
423
 
420
- accuracy = np.zeros_like(tp_count)
421
- np.divide(
422
- tp_count,
423
- (gt_count + pd_count),
424
- where=(gt_count + pd_count) > 1e-9,
425
- out=accuracy,
426
- )
427
-
428
424
  counts[iou_idx][score_idx] = np.concatenate(
429
425
  (
430
426
  tp_count[:, np.newaxis],
@@ -433,11 +429,18 @@ def compute_metrics(
433
429
  precision[:, np.newaxis],
434
430
  recall[:, np.newaxis],
435
431
  f1_score[:, np.newaxis],
436
- accuracy[:, np.newaxis],
437
432
  ),
438
433
  axis=1,
439
434
  )
440
435
 
436
+ # caluculate accuracy
437
+ total_pd_count = label_metadata[:, 1].sum()
438
+ accuracy[iou_idx, score_idx] = (
439
+ (tp_count.sum() / total_pd_count)
440
+ if total_pd_count > 1e-9
441
+ else 0.0
442
+ )
443
+
441
444
  # calculate recall for AR
442
445
  average_recall[score_idx] += recall
443
446
 
@@ -552,6 +555,7 @@ def compute_metrics(
552
555
  return (
553
556
  ap_results,
554
557
  ar_results,
558
+ accuracy,
555
559
  counts,
556
560
  pr_curve,
557
561
  )
@@ -506,6 +506,7 @@ class Evaluator:
506
506
  average_recall_averaged_over_scores,
507
507
  mean_average_recall_averaged_over_scores,
508
508
  ),
509
+ accuracy,
509
510
  precision_recall,
510
511
  pr_curves,
511
512
  ) = compute_metrics(
@@ -593,6 +594,16 @@ class Evaluator:
593
594
  )
594
595
  ]
595
596
 
597
+ metrics[MetricType.Accuracy] = [
598
+ Accuracy(
599
+ value=float(accuracy[iou_idx, score_idx]),
600
+ iou_threshold=iou_thresholds[iou_idx],
601
+ score_threshold=score_thresholds[score_idx],
602
+ )
603
+ for iou_idx in range(accuracy.shape[0])
604
+ for score_idx in range(accuracy.shape[1])
605
+ ]
606
+
596
607
  metrics[MetricType.PrecisionRecallCurve] = [
597
608
  PrecisionRecallCurve(
598
609
  precisions=pr_curves[iou_idx, label_idx, :, 0]
@@ -650,12 +661,6 @@ class Evaluator:
650
661
  **kwargs,
651
662
  )
652
663
  )
653
- metrics[MetricType.Accuracy].append(
654
- Accuracy(
655
- value=float(row[6]),
656
- **kwargs,
657
- )
658
- )
659
664
 
660
665
  if as_dict:
661
666
  return {
@@ -160,9 +160,9 @@ class Recall(_ClassMetric):
160
160
  pass
161
161
 
162
162
 
163
- class Accuracy(_ClassMetric):
163
+ class F1(_ClassMetric):
164
164
  """
165
- Accuracy metric for a specific class label in object detection.
165
+ F1 score for a specific class label in object detection.
166
166
 
167
167
  This class encapsulates a metric value for a particular class label,
168
168
  along with the associated Intersection over Union (IoU) threshold and
@@ -190,20 +190,18 @@ class Accuracy(_ClassMetric):
190
190
  pass
191
191
 
192
192
 
193
- class F1(_ClassMetric):
193
+ @dataclass
194
+ class Accuracy:
194
195
  """
195
- F1 score for a specific class label in object detection.
196
+ Accuracy metric for the object detection task type.
196
197
 
197
- This class encapsulates a metric value for a particular class label,
198
- along with the associated Intersection over Union (IoU) threshold and
199
- confidence score threshold.
198
+ This class encapsulates a metric value at a specific Intersection
199
+ over Union (IoU) threshold and confidence score threshold.
200
200
 
201
201
  Attributes
202
202
  ----------
203
203
  value : float
204
204
  The metric value.
205
- label : str
206
- The class label for which the metric is calculated.
207
205
  iou_threshold : float
208
206
  The IoU threshold used to determine matches between predicted and ground truth boxes.
209
207
  score_threshold : float
@@ -217,7 +215,22 @@ class F1(_ClassMetric):
217
215
  Converts the instance to a dictionary representation.
218
216
  """
219
217
 
220
- pass
218
+ value: float
219
+ iou_threshold: float
220
+ score_threshold: float
221
+
222
+ def to_metric(self) -> Metric:
223
+ return Metric(
224
+ type=type(self).__name__,
225
+ value=self.value,
226
+ parameters={
227
+ "iou_threshold": self.iou_threshold,
228
+ "score_threshold": self.score_threshold,
229
+ },
230
+ )
231
+
232
+ def to_dict(self) -> dict:
233
+ return self.to_metric().to_dict()
221
234
 
222
235
 
223
236
  @dataclass
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: valor-lite
3
- Version: 0.33.12
3
+ Version: 0.33.13
4
4
  Summary: Compute valor metrics locally.
5
5
  License: MIT License
6
6
 
@@ -8,17 +8,17 @@ valor_lite/classification/manager.py,sha256=fwb5z84SzgJ-ud1kTY3oYbUJLbA7R0cdWqqc
8
8
  valor_lite/classification/metric.py,sha256=JjY9x6Sq1Hr_2agGnyT9EhVI5wXKQcMmEwxIK32yhGw,11903
9
9
  valor_lite/object_detection/__init__.py,sha256=PiKfemo8FkZRzBhPSjhil8ahGURLy0Vk_iV25CB4UBU,1139
10
10
  valor_lite/object_detection/annotation.py,sha256=o6VfiRobiB0ljqsNBLAYMXgi32RSIR7uTA-dgxq6zBI,8248
11
- valor_lite/object_detection/computation.py,sha256=ZW83XT-aemRg-5ZdISmrj0bRD9wWmYCU3gkSlfXlNZc,27747
12
- valor_lite/object_detection/manager.py,sha256=vb4JpynNF0JcnFwNmReFjls9UGAquigN2hpEbG89J04,38991
13
- valor_lite/object_detection/metric.py,sha256=tHRVnpBqw_w1VwnNkTCmu1yv7Max9FRlf5uh0wYew4s,24046
11
+ valor_lite/object_detection/computation.py,sha256=dbwyBgRQqG47R8FINd0vQq10b85rsq0jZH4M81KQT24,28017
12
+ valor_lite/object_detection/manager.py,sha256=gjKpytNldF51V_xUktJHrLRDQme-AkZ3HpiL8uMnYJY,39156
13
+ valor_lite/object_detection/metric.py,sha256=SS3U-HV3QgHoN3hcY2DmLl5GzK4KyvC78vjXTIa7XAU,24330
14
14
  valor_lite/semantic_segmentation/__init__.py,sha256=IdarTHKUuUMDvMBmInQu12Mm_NMCbql6Hf0nL5b56Ak,424
15
15
  valor_lite/semantic_segmentation/annotation.py,sha256=CujYFdHS3fgr4Y7mEDs_u1XBmbPJzNU2CdqvjCT_d_A,2938
16
16
  valor_lite/semantic_segmentation/computation.py,sha256=iJkEmTNmw9HwQCxSnpJkQsAdVcFriGhhu_WMks6D7tU,5122
17
17
  valor_lite/semantic_segmentation/manager.py,sha256=aJk6edWZWKqrzl6hVmEUSZVYhHLuyihxWgAIXsCXkZ0,17361
18
18
  valor_lite/semantic_segmentation/metric.py,sha256=Y8M3z92SaABEe9TwBUN37TFsh9DR5WoIxO-TfXVwz8I,6289
19
19
  valor_lite/text_generation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- valor_lite-0.33.12.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
21
- valor_lite-0.33.12.dist-info/METADATA,sha256=Sak_wCIniTYTNYjNrF8sy-ITbFdG_j_zFUq-jz1PYLk,5632
22
- valor_lite-0.33.12.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
23
- valor_lite-0.33.12.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
24
- valor_lite-0.33.12.dist-info/RECORD,,
20
+ valor_lite-0.33.13.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
21
+ valor_lite-0.33.13.dist-info/METADATA,sha256=4WsoZ-i3KETNN5d1F333ZJvhGPj1tiGEAzSTjrOW7yk,5632
22
+ valor_lite-0.33.13.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
23
+ valor_lite-0.33.13.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
24
+ valor_lite-0.33.13.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.1.0)
2
+ Generator: setuptools (75.2.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5