valor-lite 0.34.1__tar.gz → 0.34.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. {valor_lite-0.34.1 → valor_lite-0.34.3}/PKG-INFO +1 -1
  2. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/classification/computation.py +3 -1
  3. valor_lite-0.34.3/valor_lite/classification/numpy_compatibility.py +13 -0
  4. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/object_detection/computation.py +9 -21
  5. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/object_detection/metric.py +0 -36
  6. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/object_detection/utilities.py +0 -12
  7. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite.egg-info/PKG-INFO +1 -1
  8. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite.egg-info/SOURCES.txt +1 -0
  9. {valor_lite-0.34.1 → valor_lite-0.34.3}/README.md +0 -0
  10. {valor_lite-0.34.1 → valor_lite-0.34.3}/pyproject.toml +0 -0
  11. {valor_lite-0.34.1 → valor_lite-0.34.3}/setup.cfg +0 -0
  12. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/LICENSE +0 -0
  13. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/__init__.py +0 -0
  14. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/classification/__init__.py +0 -0
  15. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/classification/annotation.py +0 -0
  16. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/classification/manager.py +0 -0
  17. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/classification/metric.py +0 -0
  18. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/classification/utilities.py +0 -0
  19. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/object_detection/__init__.py +0 -0
  20. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/object_detection/annotation.py +0 -0
  21. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/object_detection/manager.py +0 -0
  22. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/profiling.py +0 -0
  23. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/schemas.py +0 -0
  24. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/__init__.py +0 -0
  25. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/annotation.py +0 -0
  26. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/benchmark.py +0 -0
  27. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/computation.py +0 -0
  28. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/manager.py +0 -0
  29. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/metric.py +0 -0
  30. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/utilities.py +0 -0
  31. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/text_generation/__init__.py +0 -0
  32. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/text_generation/annotation.py +0 -0
  33. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/text_generation/computation.py +0 -0
  34. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/text_generation/llm/__init__.py +0 -0
  35. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/text_generation/llm/exceptions.py +0 -0
  36. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/text_generation/llm/generation.py +0 -0
  37. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/text_generation/llm/instructions.py +0 -0
  38. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/text_generation/llm/integrations.py +0 -0
  39. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/text_generation/llm/utilities.py +0 -0
  40. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/text_generation/llm/validators.py +0 -0
  41. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/text_generation/manager.py +0 -0
  42. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite/text_generation/metric.py +0 -0
  43. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite.egg-info/dependency_links.txt +0 -0
  44. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite.egg-info/requires.txt +0 -0
  45. {valor_lite-0.34.1 → valor_lite-0.34.3}/valor_lite.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: valor-lite
3
- Version: 0.34.1
3
+ Version: 0.34.3
4
4
  Summary: Evaluate machine learning models.
5
5
  Project-URL: homepage, https://www.striveworks.com
6
6
  Requires-Python: >=3.10
@@ -1,6 +1,8 @@
1
1
  import numpy as np
2
2
  from numpy.typing import NDArray
3
3
 
4
+ import valor_lite.classification.numpy_compatibility as npc
5
+
4
6
 
5
7
  def _compute_rocauc(
6
8
  data: NDArray[np.float64],
@@ -56,7 +58,7 @@ def _compute_rocauc(
56
58
  np.maximum.accumulate(tpr, axis=1, out=tpr)
57
59
 
58
60
  # compute rocauc
59
- rocauc = np.trapezoid(x=fpr, y=tpr, axis=1)
61
+ rocauc = npc.trapezoid(x=fpr, y=tpr, axis=1)
60
62
 
61
63
  # compute mean rocauc
62
64
  mean_rocauc = rocauc.mean()
@@ -0,0 +1,13 @@
1
+ import numpy as np
2
+ from numpy.typing import NDArray
3
+
4
+ try:
5
+ _numpy_trapezoid = np.trapezoid # numpy v2
6
+ except AttributeError:
7
+ _numpy_trapezoid = np.trapz # numpy v1
8
+
9
+
10
+ def trapezoid(
11
+ x: NDArray[np.float64], y: NDArray[np.float64], axis: int
12
+ ) -> NDArray[np.float64]:
13
+ return _numpy_trapezoid(x=x, y=y, axis=axis) # type: ignore - NumPy compatibility
@@ -282,7 +282,6 @@ def compute_precion_recall(
282
282
  ],
283
283
  NDArray[np.float64],
284
284
  NDArray[np.float64],
285
- NDArray[np.float64],
286
285
  ]:
287
286
  """
288
287
  Computes Object Detection metrics.
@@ -314,8 +313,6 @@ def compute_precion_recall(
314
313
  Average Precision results.
315
314
  tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64], float]
316
315
  Average Recall results.
317
- NDArray[np.float64]
318
- Accuracy.
319
316
  NDArray[np.float64]
320
317
  Precision, Recall, TP, FP, FN, F1 Score.
321
318
  NDArray[np.float64]
@@ -334,7 +331,6 @@ def compute_precion_recall(
334
331
 
335
332
  average_precision = np.zeros((n_ious, n_labels), dtype=np.float64)
336
333
  average_recall = np.zeros((n_scores, n_labels), dtype=np.float64)
337
- accuracy = np.zeros((n_ious, n_scores), dtype=np.float64)
338
334
  counts = np.zeros((n_ious, n_scores, n_labels, 6), dtype=np.float64)
339
335
 
340
336
  pd_labels = data[:, 5].astype(np.int32)
@@ -383,37 +379,38 @@ def compute_precion_recall(
383
379
  )
384
380
  mask_gt_unique = np.zeros(tp_candidates.shape[0], dtype=np.bool_)
385
381
  mask_gt_unique[indices_gt_unique] = True
382
+
386
383
  true_positives_mask = np.zeros(n_rows, dtype=np.bool_)
387
384
  true_positives_mask[mask_tp_inner] = mask_gt_unique
388
385
 
386
+ mask_fp_inner |= mask_tp_inner & ~true_positives_mask
387
+
389
388
  # calculate intermediates
390
- pd_count = np.bincount(pd_labels, minlength=n_labels).astype(
391
- np.float64
392
- )
393
389
  tp_count = np.bincount(
394
390
  pd_labels,
395
391
  weights=true_positives_mask,
396
392
  minlength=n_labels,
397
393
  ).astype(np.float64)
398
-
399
394
  fp_count = np.bincount(
400
395
  pd_labels[mask_fp_inner],
401
396
  minlength=n_labels,
402
397
  ).astype(np.float64)
403
-
404
398
  fn_count = np.bincount(
405
399
  pd_labels[mask_fn_inner],
406
400
  minlength=n_labels,
407
401
  )
408
402
 
403
+ fn_count = gt_count - tp_count
404
+ tp_fp_count = tp_count + fp_count
405
+
409
406
  # calculate component metrics
410
407
  recall = np.zeros_like(tp_count)
411
408
  np.divide(tp_count, gt_count, where=gt_count > 1e-9, out=recall)
412
409
 
413
410
  precision = np.zeros_like(tp_count)
414
- np.divide(tp_count, pd_count, where=pd_count > 1e-9, out=precision)
415
-
416
- fn_count = gt_count - tp_count
411
+ np.divide(
412
+ tp_count, tp_fp_count, where=tp_fp_count > 1e-9, out=precision
413
+ )
417
414
 
418
415
  f1_score = np.zeros_like(precision)
419
416
  np.divide(
@@ -436,14 +433,6 @@ def compute_precion_recall(
436
433
  axis=1,
437
434
  )
438
435
 
439
- # caluculate accuracy
440
- total_pd_count = label_metadata[:, 1].sum()
441
- accuracy[iou_idx, score_idx] = (
442
- (tp_count.sum() / total_pd_count)
443
- if total_pd_count > 1e-9
444
- else 0.0
445
- )
446
-
447
436
  # calculate recall for AR
448
437
  average_recall[score_idx] += recall
449
438
 
@@ -562,7 +551,6 @@ def compute_precion_recall(
562
551
  return (
563
552
  ap_results, # type: ignore[reportReturnType]
564
553
  ar_results,
565
- accuracy,
566
554
  counts,
567
555
  pr_curve,
568
556
  )
@@ -6,7 +6,6 @@ from valor_lite.schemas import BaseMetric
6
6
 
7
7
  class MetricType(str, Enum):
8
8
  Counts = "Counts"
9
- Accuracy = "Accuracy"
10
9
  Precision = "Precision"
11
10
  Recall = "Recall"
12
11
  F1 = "F1"
@@ -175,41 +174,6 @@ class Metric(BaseMetric):
175
174
  },
176
175
  )
177
176
 
178
- @classmethod
179
- def accuracy(
180
- cls,
181
- value: float,
182
- iou_threshold: float,
183
- score_threshold: float,
184
- ):
185
- """
186
- Accuracy metric for the object detection task type.
187
-
188
- This class encapsulates a metric value at a specific Intersection
189
- over Union (IOU) threshold and confidence score threshold.
190
-
191
- Parameters
192
- ----------
193
- value : float
194
- The metric value.
195
- iou_threshold : float
196
- The IOU threshold used to determine matches between predicted and ground truth boxes.
197
- score_threshold : float
198
- The confidence score threshold above which predictions are considered.
199
-
200
- Returns
201
- -------
202
- Metric
203
- """
204
- return cls(
205
- type=MetricType.Accuracy.value,
206
- value=value,
207
- parameters={
208
- "iou_threshold": iou_threshold,
209
- "score_threshold": score_threshold,
210
- },
211
- )
212
-
213
177
  @classmethod
214
178
  def average_precision(
215
179
  cls,
@@ -22,7 +22,6 @@ def unpack_precision_recall_into_metric_lists(
22
22
  ],
23
23
  NDArray[np.float64],
24
24
  NDArray[np.float64],
25
- NDArray[np.float64],
26
25
  ],
27
26
  iou_thresholds: list[float],
28
27
  score_thresholds: list[float],
@@ -42,7 +41,6 @@ def unpack_precision_recall_into_metric_lists(
42
41
  average_recall_averaged_over_scores,
43
42
  mean_average_recall_averaged_over_scores,
44
43
  ),
45
- accuracy,
46
44
  precision_recall,
47
45
  pr_curves,
48
46
  ) = results
@@ -125,16 +123,6 @@ def unpack_precision_recall_into_metric_lists(
125
123
  )
126
124
  ]
127
125
 
128
- metrics[MetricType.Accuracy] = [
129
- Metric.accuracy(
130
- value=float(accuracy[iou_idx, score_idx]),
131
- iou_threshold=iou_threshold,
132
- score_threshold=score_threshold,
133
- )
134
- for iou_idx, iou_threshold in enumerate(iou_thresholds)
135
- for score_idx, score_threshold in enumerate(score_thresholds)
136
- ]
137
-
138
126
  metrics[MetricType.PrecisionRecallCurve] = [
139
127
  Metric.precision_recall_curve(
140
128
  precisions=pr_curves[iou_idx, label_idx, :, 0].tolist(), # type: ignore[reportArgumentType]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: valor-lite
3
- Version: 0.34.1
3
+ Version: 0.34.3
4
4
  Summary: Evaluate machine learning models.
5
5
  Project-URL: homepage, https://www.striveworks.com
6
6
  Requires-Python: >=3.10
@@ -14,6 +14,7 @@ valor_lite/classification/annotation.py
14
14
  valor_lite/classification/computation.py
15
15
  valor_lite/classification/manager.py
16
16
  valor_lite/classification/metric.py
17
+ valor_lite/classification/numpy_compatibility.py
17
18
  valor_lite/classification/utilities.py
18
19
  valor_lite/object_detection/__init__.py
19
20
  valor_lite/object_detection/annotation.py
File without changes
File without changes
File without changes