valor-lite 0.34.2__tar.gz → 0.34.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {valor_lite-0.34.2 → valor_lite-0.34.3}/PKG-INFO +1 -1
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/object_detection/computation.py +9 -21
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/object_detection/metric.py +0 -36
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/object_detection/utilities.py +0 -12
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite.egg-info/PKG-INFO +1 -1
- {valor_lite-0.34.2 → valor_lite-0.34.3}/README.md +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/pyproject.toml +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/setup.cfg +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/LICENSE +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/__init__.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/classification/__init__.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/classification/annotation.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/classification/computation.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/classification/manager.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/classification/metric.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/classification/numpy_compatibility.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/classification/utilities.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/object_detection/__init__.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/object_detection/annotation.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/object_detection/manager.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/profiling.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/schemas.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/__init__.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/annotation.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/benchmark.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/computation.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/manager.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/metric.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/semantic_segmentation/utilities.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/text_generation/__init__.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/text_generation/annotation.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/text_generation/computation.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/text_generation/llm/__init__.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/text_generation/llm/exceptions.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/text_generation/llm/generation.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/text_generation/llm/instructions.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/text_generation/llm/integrations.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/text_generation/llm/utilities.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/text_generation/llm/validators.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/text_generation/manager.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite/text_generation/metric.py +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite.egg-info/SOURCES.txt +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite.egg-info/dependency_links.txt +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite.egg-info/requires.txt +0 -0
- {valor_lite-0.34.2 → valor_lite-0.34.3}/valor_lite.egg-info/top_level.txt +0 -0
|
@@ -282,7 +282,6 @@ def compute_precion_recall(
|
|
|
282
282
|
],
|
|
283
283
|
NDArray[np.float64],
|
|
284
284
|
NDArray[np.float64],
|
|
285
|
-
NDArray[np.float64],
|
|
286
285
|
]:
|
|
287
286
|
"""
|
|
288
287
|
Computes Object Detection metrics.
|
|
@@ -314,8 +313,6 @@ def compute_precion_recall(
|
|
|
314
313
|
Average Precision results.
|
|
315
314
|
tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64], float]
|
|
316
315
|
Average Recall results.
|
|
317
|
-
NDArray[np.float64]
|
|
318
|
-
Accuracy.
|
|
319
316
|
NDArray[np.float64]
|
|
320
317
|
Precision, Recall, TP, FP, FN, F1 Score.
|
|
321
318
|
NDArray[np.float64]
|
|
@@ -334,7 +331,6 @@ def compute_precion_recall(
|
|
|
334
331
|
|
|
335
332
|
average_precision = np.zeros((n_ious, n_labels), dtype=np.float64)
|
|
336
333
|
average_recall = np.zeros((n_scores, n_labels), dtype=np.float64)
|
|
337
|
-
accuracy = np.zeros((n_ious, n_scores), dtype=np.float64)
|
|
338
334
|
counts = np.zeros((n_ious, n_scores, n_labels, 6), dtype=np.float64)
|
|
339
335
|
|
|
340
336
|
pd_labels = data[:, 5].astype(np.int32)
|
|
@@ -383,37 +379,38 @@ def compute_precion_recall(
|
|
|
383
379
|
)
|
|
384
380
|
mask_gt_unique = np.zeros(tp_candidates.shape[0], dtype=np.bool_)
|
|
385
381
|
mask_gt_unique[indices_gt_unique] = True
|
|
382
|
+
|
|
386
383
|
true_positives_mask = np.zeros(n_rows, dtype=np.bool_)
|
|
387
384
|
true_positives_mask[mask_tp_inner] = mask_gt_unique
|
|
388
385
|
|
|
386
|
+
mask_fp_inner |= mask_tp_inner & ~true_positives_mask
|
|
387
|
+
|
|
389
388
|
# calculate intermediates
|
|
390
|
-
pd_count = np.bincount(pd_labels, minlength=n_labels).astype(
|
|
391
|
-
np.float64
|
|
392
|
-
)
|
|
393
389
|
tp_count = np.bincount(
|
|
394
390
|
pd_labels,
|
|
395
391
|
weights=true_positives_mask,
|
|
396
392
|
minlength=n_labels,
|
|
397
393
|
).astype(np.float64)
|
|
398
|
-
|
|
399
394
|
fp_count = np.bincount(
|
|
400
395
|
pd_labels[mask_fp_inner],
|
|
401
396
|
minlength=n_labels,
|
|
402
397
|
).astype(np.float64)
|
|
403
|
-
|
|
404
398
|
fn_count = np.bincount(
|
|
405
399
|
pd_labels[mask_fn_inner],
|
|
406
400
|
minlength=n_labels,
|
|
407
401
|
)
|
|
408
402
|
|
|
403
|
+
fn_count = gt_count - tp_count
|
|
404
|
+
tp_fp_count = tp_count + fp_count
|
|
405
|
+
|
|
409
406
|
# calculate component metrics
|
|
410
407
|
recall = np.zeros_like(tp_count)
|
|
411
408
|
np.divide(tp_count, gt_count, where=gt_count > 1e-9, out=recall)
|
|
412
409
|
|
|
413
410
|
precision = np.zeros_like(tp_count)
|
|
414
|
-
np.divide(
|
|
415
|
-
|
|
416
|
-
|
|
411
|
+
np.divide(
|
|
412
|
+
tp_count, tp_fp_count, where=tp_fp_count > 1e-9, out=precision
|
|
413
|
+
)
|
|
417
414
|
|
|
418
415
|
f1_score = np.zeros_like(precision)
|
|
419
416
|
np.divide(
|
|
@@ -436,14 +433,6 @@ def compute_precion_recall(
|
|
|
436
433
|
axis=1,
|
|
437
434
|
)
|
|
438
435
|
|
|
439
|
-
# caluculate accuracy
|
|
440
|
-
total_pd_count = label_metadata[:, 1].sum()
|
|
441
|
-
accuracy[iou_idx, score_idx] = (
|
|
442
|
-
(tp_count.sum() / total_pd_count)
|
|
443
|
-
if total_pd_count > 1e-9
|
|
444
|
-
else 0.0
|
|
445
|
-
)
|
|
446
|
-
|
|
447
436
|
# calculate recall for AR
|
|
448
437
|
average_recall[score_idx] += recall
|
|
449
438
|
|
|
@@ -562,7 +551,6 @@ def compute_precion_recall(
|
|
|
562
551
|
return (
|
|
563
552
|
ap_results, # type: ignore[reportReturnType]
|
|
564
553
|
ar_results,
|
|
565
|
-
accuracy,
|
|
566
554
|
counts,
|
|
567
555
|
pr_curve,
|
|
568
556
|
)
|
|
@@ -6,7 +6,6 @@ from valor_lite.schemas import BaseMetric
|
|
|
6
6
|
|
|
7
7
|
class MetricType(str, Enum):
|
|
8
8
|
Counts = "Counts"
|
|
9
|
-
Accuracy = "Accuracy"
|
|
10
9
|
Precision = "Precision"
|
|
11
10
|
Recall = "Recall"
|
|
12
11
|
F1 = "F1"
|
|
@@ -175,41 +174,6 @@ class Metric(BaseMetric):
|
|
|
175
174
|
},
|
|
176
175
|
)
|
|
177
176
|
|
|
178
|
-
@classmethod
|
|
179
|
-
def accuracy(
|
|
180
|
-
cls,
|
|
181
|
-
value: float,
|
|
182
|
-
iou_threshold: float,
|
|
183
|
-
score_threshold: float,
|
|
184
|
-
):
|
|
185
|
-
"""
|
|
186
|
-
Accuracy metric for the object detection task type.
|
|
187
|
-
|
|
188
|
-
This class encapsulates a metric value at a specific Intersection
|
|
189
|
-
over Union (IOU) threshold and confidence score threshold.
|
|
190
|
-
|
|
191
|
-
Parameters
|
|
192
|
-
----------
|
|
193
|
-
value : float
|
|
194
|
-
The metric value.
|
|
195
|
-
iou_threshold : float
|
|
196
|
-
The IOU threshold used to determine matches between predicted and ground truth boxes.
|
|
197
|
-
score_threshold : float
|
|
198
|
-
The confidence score threshold above which predictions are considered.
|
|
199
|
-
|
|
200
|
-
Returns
|
|
201
|
-
-------
|
|
202
|
-
Metric
|
|
203
|
-
"""
|
|
204
|
-
return cls(
|
|
205
|
-
type=MetricType.Accuracy.value,
|
|
206
|
-
value=value,
|
|
207
|
-
parameters={
|
|
208
|
-
"iou_threshold": iou_threshold,
|
|
209
|
-
"score_threshold": score_threshold,
|
|
210
|
-
},
|
|
211
|
-
)
|
|
212
|
-
|
|
213
177
|
@classmethod
|
|
214
178
|
def average_precision(
|
|
215
179
|
cls,
|
|
@@ -22,7 +22,6 @@ def unpack_precision_recall_into_metric_lists(
|
|
|
22
22
|
],
|
|
23
23
|
NDArray[np.float64],
|
|
24
24
|
NDArray[np.float64],
|
|
25
|
-
NDArray[np.float64],
|
|
26
25
|
],
|
|
27
26
|
iou_thresholds: list[float],
|
|
28
27
|
score_thresholds: list[float],
|
|
@@ -42,7 +41,6 @@ def unpack_precision_recall_into_metric_lists(
|
|
|
42
41
|
average_recall_averaged_over_scores,
|
|
43
42
|
mean_average_recall_averaged_over_scores,
|
|
44
43
|
),
|
|
45
|
-
accuracy,
|
|
46
44
|
precision_recall,
|
|
47
45
|
pr_curves,
|
|
48
46
|
) = results
|
|
@@ -125,16 +123,6 @@ def unpack_precision_recall_into_metric_lists(
|
|
|
125
123
|
)
|
|
126
124
|
]
|
|
127
125
|
|
|
128
|
-
metrics[MetricType.Accuracy] = [
|
|
129
|
-
Metric.accuracy(
|
|
130
|
-
value=float(accuracy[iou_idx, score_idx]),
|
|
131
|
-
iou_threshold=iou_threshold,
|
|
132
|
-
score_threshold=score_threshold,
|
|
133
|
-
)
|
|
134
|
-
for iou_idx, iou_threshold in enumerate(iou_thresholds)
|
|
135
|
-
for score_idx, score_threshold in enumerate(score_thresholds)
|
|
136
|
-
]
|
|
137
|
-
|
|
138
126
|
metrics[MetricType.PrecisionRecallCurve] = [
|
|
139
127
|
Metric.precision_recall_curve(
|
|
140
128
|
precisions=pr_curves[iou_idx, label_idx, :, 0].tolist(), # type: ignore[reportArgumentType]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|