valor-lite 0.33.1__py3-none-any.whl → 0.33.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valor_lite/detection/computation.py +142 -21
- valor_lite/detection/manager.py +90 -5
- {valor_lite-0.33.1.dist-info → valor_lite-0.33.2.dist-info}/METADATA +1 -1
- valor_lite-0.33.2.dist-info/RECORD +12 -0
- valor_lite-0.33.1.dist-info/RECORD +0 -12
- {valor_lite-0.33.1.dist-info → valor_lite-0.33.2.dist-info}/LICENSE +0 -0
- {valor_lite-0.33.1.dist-info → valor_lite-0.33.2.dist-info}/WHEEL +0 -0
- {valor_lite-0.33.1.dist-info → valor_lite-0.33.2.dist-info}/top_level.txt +0 -0
|
@@ -1,16 +1,38 @@
|
|
|
1
1
|
import numpy as np
|
|
2
2
|
from numpy.typing import NDArray
|
|
3
3
|
|
|
4
|
-
# datum id 0
|
|
5
|
-
# gt 1
|
|
6
|
-
# pd 2
|
|
7
|
-
# iou 3
|
|
8
|
-
# gt label 4
|
|
9
|
-
# pd label 5
|
|
10
|
-
# score 6
|
|
11
|
-
|
|
12
4
|
|
|
13
5
|
def compute_iou(data: NDArray[np.floating]) -> NDArray[np.floating]:
|
|
6
|
+
"""
|
|
7
|
+
Computes intersection-over-union (IoU) for axis-aligned bounding boxes.
|
|
8
|
+
|
|
9
|
+
Takes data with shape (N, 8):
|
|
10
|
+
|
|
11
|
+
Index 0 - xmin for Box 1
|
|
12
|
+
Index 1 - xmax for Box 1
|
|
13
|
+
Index 2 - ymin for Box 1
|
|
14
|
+
Index 3 - ymax for Box 1
|
|
15
|
+
Index 4 - xmin for Box 2
|
|
16
|
+
Index 5 - xmax for Box 2
|
|
17
|
+
Index 6 - ymin for Box 2
|
|
18
|
+
Index 7 - ymax for Box 2
|
|
19
|
+
|
|
20
|
+
Returns data with shape (N, 1):
|
|
21
|
+
|
|
22
|
+
Index 0 - IoU
|
|
23
|
+
|
|
24
|
+
Parameters
|
|
25
|
+
----------
|
|
26
|
+
data : NDArray[np.floating]
|
|
27
|
+
A sorted array of classification pairs.
|
|
28
|
+
label_metadata : NDArray[np.int32]
|
|
29
|
+
An array containing metadata related to labels.
|
|
30
|
+
|
|
31
|
+
Returns
|
|
32
|
+
-------
|
|
33
|
+
NDArray[np.floating]
|
|
34
|
+
Compute IoU's.
|
|
35
|
+
"""
|
|
14
36
|
|
|
15
37
|
xmin1, xmax1, ymin1, ymax1 = (
|
|
16
38
|
data[:, 0],
|
|
@@ -93,6 +115,33 @@ def compute_ranked_pairs(
|
|
|
93
115
|
data: list[NDArray[np.floating]],
|
|
94
116
|
label_metadata: NDArray[np.integer],
|
|
95
117
|
) -> NDArray[np.floating]:
|
|
118
|
+
"""
|
|
119
|
+
Performs pair ranking on input data.
|
|
120
|
+
|
|
121
|
+
Takes data with shape (N, 7):
|
|
122
|
+
|
|
123
|
+
Index 0 - Datum Index
|
|
124
|
+
Index 1 - GroundTruth Index
|
|
125
|
+
Index 2 - Prediction Index
|
|
126
|
+
Index 3 - IoU
|
|
127
|
+
Index 4 - GroundTruth Label Index
|
|
128
|
+
Index 5 - Prediction Label Index
|
|
129
|
+
Index 6 - Score
|
|
130
|
+
|
|
131
|
+
Returns data with shape (N - M, 7)
|
|
132
|
+
|
|
133
|
+
Parameters
|
|
134
|
+
----------
|
|
135
|
+
data : NDArray[np.floating]
|
|
136
|
+
A sorted array of classification pairs.
|
|
137
|
+
label_metadata : NDArray[np.int32]
|
|
138
|
+
An array containing metadata related to labels.
|
|
139
|
+
|
|
140
|
+
Returns
|
|
141
|
+
-------
|
|
142
|
+
NDArray[np.floating]
|
|
143
|
+
A filtered array containing only ranked pairs.
|
|
144
|
+
"""
|
|
96
145
|
pairs = np.concatenate(
|
|
97
146
|
[
|
|
98
147
|
_compute_ranked_pairs_for_datum(
|
|
@@ -136,6 +185,27 @@ def compute_metrics(
|
|
|
136
185
|
"""
|
|
137
186
|
Computes Object Detection metrics.
|
|
138
187
|
|
|
188
|
+
Takes data with shape (N, 7):
|
|
189
|
+
|
|
190
|
+
Index 0 - Datum Index
|
|
191
|
+
Index 1 - GroundTruth Index
|
|
192
|
+
Index 2 - Prediction Index
|
|
193
|
+
Index 3 - IoU
|
|
194
|
+
Index 4 - GroundTruth Label Index
|
|
195
|
+
Index 5 - Prediction Label Index
|
|
196
|
+
Index 6 - Score
|
|
197
|
+
|
|
198
|
+
Parameters
|
|
199
|
+
----------
|
|
200
|
+
data : NDArray[np.floating]
|
|
201
|
+
A sorted array of classification pairs.
|
|
202
|
+
label_metadata : NDArray[np.int32]
|
|
203
|
+
An array containing metadata related to labels.
|
|
204
|
+
iou_thresholds : NDArray[np.floating]
|
|
205
|
+
A 1-D array containing IoU thresholds.
|
|
206
|
+
score_thresholds : NDArray[np.floating]
|
|
207
|
+
A 1-D array containing score thresholds.
|
|
208
|
+
|
|
139
209
|
Returns
|
|
140
210
|
-------
|
|
141
211
|
tuple[NDArray, NDArray, NDArray NDArray]
|
|
@@ -155,7 +225,7 @@ def compute_metrics(
|
|
|
155
225
|
|
|
156
226
|
average_precision = np.zeros((n_ious, n_labels))
|
|
157
227
|
average_recall = np.zeros((n_scores, n_labels))
|
|
158
|
-
|
|
228
|
+
counts = np.zeros((n_ious, n_scores, n_labels, 7))
|
|
159
229
|
|
|
160
230
|
pd_labels = data[:, 5].astype(int)
|
|
161
231
|
unique_pd_labels = np.unique(pd_labels)
|
|
@@ -245,7 +315,7 @@ def compute_metrics(
|
|
|
245
315
|
out=accuracy,
|
|
246
316
|
)
|
|
247
317
|
|
|
248
|
-
|
|
318
|
+
counts[iou_idx][score_idx] = np.concatenate(
|
|
249
319
|
(
|
|
250
320
|
tp_count[:, np.newaxis],
|
|
251
321
|
fp_count[:, np.newaxis],
|
|
@@ -353,7 +423,7 @@ def compute_metrics(
|
|
|
353
423
|
return (
|
|
354
424
|
ap_results,
|
|
355
425
|
ar_results,
|
|
356
|
-
|
|
426
|
+
counts,
|
|
357
427
|
pr_curve,
|
|
358
428
|
)
|
|
359
429
|
|
|
@@ -365,16 +435,49 @@ def compute_detailed_counts(
|
|
|
365
435
|
score_thresholds: np.ndarray,
|
|
366
436
|
n_samples: int,
|
|
367
437
|
) -> np.ndarray:
|
|
368
|
-
|
|
369
438
|
"""
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
439
|
+
Compute detailed counts.
|
|
440
|
+
|
|
441
|
+
Takes data with shape (N, 7):
|
|
442
|
+
|
|
443
|
+
Index 0 - Datum Index
|
|
444
|
+
Index 1 - GroundTruth Index
|
|
445
|
+
Index 2 - Prediction Index
|
|
446
|
+
Index 3 - IoU
|
|
447
|
+
Index 4 - GroundTruth Label Index
|
|
448
|
+
Index 5 - Prediction Label Index
|
|
449
|
+
Index 6 - Score
|
|
450
|
+
|
|
451
|
+
Outputs an array with shape (N_IoUs, N_Score, N_Labels, 5 * n_samples + 5):
|
|
452
|
+
|
|
453
|
+
Index 0 - True Positive Count
|
|
454
|
+
... Datum ID Examples
|
|
455
|
+
Index n_samples + 1 - False Positive Misclassification Count
|
|
456
|
+
... Datum ID Examples
|
|
457
|
+
Index 2 * n_samples + 2 - False Positive Hallucination Count
|
|
458
|
+
... Datum ID Examples
|
|
459
|
+
Index 3 * n_samples + 3 - False Negative Misclassification Count
|
|
460
|
+
... Datum ID Examples
|
|
461
|
+
Index 4 * n_samples + 4 - False Negative Missing Prediction Count
|
|
462
|
+
... Datum ID Examples
|
|
463
|
+
|
|
464
|
+
Parameters
|
|
465
|
+
----------
|
|
466
|
+
data : NDArray[np.floating]
|
|
467
|
+
A sorted array of classification pairs.
|
|
468
|
+
label_metadata : NDArray[np.int32]
|
|
469
|
+
An array containing metadata related to labels.
|
|
470
|
+
iou_thresholds : NDArray[np.floating]
|
|
471
|
+
A 1-D array containing IoU thresholds.
|
|
472
|
+
score_thresholds : NDArray[np.floating]
|
|
473
|
+
A 1-D array containing score thresholds.
|
|
474
|
+
n_samples : int
|
|
475
|
+
The number of examples to return per count.
|
|
476
|
+
|
|
477
|
+
Returns
|
|
478
|
+
-------
|
|
479
|
+
NDArray[np.floating]
|
|
480
|
+
The detailed counts with optional examples.
|
|
378
481
|
"""
|
|
379
482
|
|
|
380
483
|
n_labels = label_metadata.shape[0]
|
|
@@ -466,12 +569,30 @@ def compute_detailed_counts(
|
|
|
466
569
|
| mask_groundtruths_without_passing_score
|
|
467
570
|
)
|
|
468
571
|
|
|
469
|
-
|
|
572
|
+
tp_pds = np.unique(data[mask_tp][:, [0, 2, 5]], axis=0)
|
|
573
|
+
tp_gts = np.unique(data[mask_tp][:, [0, 1, 4]], axis=0)
|
|
470
574
|
fp_misclf = np.unique(data[mask_fp_misclf][:, [0, 2, 5]], axis=0)
|
|
471
575
|
fp_halluc = np.unique(data[mask_fp_halluc][:, [0, 2, 5]], axis=0)
|
|
472
576
|
fn_misclf = np.unique(data[mask_fn_misclf][:, [0, 1, 4]], axis=0)
|
|
473
577
|
fn_misprd = np.unique(data[mask_fn_misprd][:, [0, 1, 4]], axis=0)
|
|
474
578
|
|
|
579
|
+
mask_fp_misclf_is_tp = (
|
|
580
|
+
(fp_misclf.reshape(-1, 1, 3) == tp_pds.reshape(1, -1, 3))
|
|
581
|
+
.all(axis=2)
|
|
582
|
+
.any(axis=1)
|
|
583
|
+
)
|
|
584
|
+
mask_fn_misclf_is_tp = (
|
|
585
|
+
(fn_misclf.reshape(-1, 1, 3) == tp_gts.reshape(1, -1, 3))
|
|
586
|
+
.all(axis=2)
|
|
587
|
+
.any(axis=1)
|
|
588
|
+
)
|
|
589
|
+
|
|
590
|
+
tp = tp_pds
|
|
591
|
+
fp_misclf = fp_misclf[~mask_fp_misclf_is_tp]
|
|
592
|
+
fp_halluc = fp_halluc
|
|
593
|
+
fn_misclf = fn_misclf[~mask_fn_misclf_is_tp]
|
|
594
|
+
fn_misprd = fn_misprd
|
|
595
|
+
|
|
475
596
|
tp_count = np.bincount(tp[:, 2].astype(int), minlength=n_labels)
|
|
476
597
|
fp_misclf_count = np.bincount(
|
|
477
598
|
fp_misclf[:, 2].astype(int), minlength=n_labels
|
valor_lite/detection/manager.py
CHANGED
|
@@ -58,6 +58,10 @@ class Filter:
|
|
|
58
58
|
|
|
59
59
|
|
|
60
60
|
class Evaluator:
|
|
61
|
+
"""
|
|
62
|
+
Object Detection Evaluator
|
|
63
|
+
"""
|
|
64
|
+
|
|
61
65
|
def __init__(self):
|
|
62
66
|
|
|
63
67
|
# metadata
|
|
@@ -87,6 +91,9 @@ class Evaluator:
|
|
|
87
91
|
|
|
88
92
|
@property
|
|
89
93
|
def ignored_prediction_labels(self) -> list[tuple[str, str]]:
|
|
94
|
+
"""
|
|
95
|
+
Prediction labels that are not present in the ground truth set.
|
|
96
|
+
"""
|
|
90
97
|
glabels = set(np.where(self._label_metadata[:, 0] > 0)[0])
|
|
91
98
|
plabels = set(np.where(self._label_metadata[:, 1] > 0)[0])
|
|
92
99
|
return [
|
|
@@ -95,6 +102,9 @@ class Evaluator:
|
|
|
95
102
|
|
|
96
103
|
@property
|
|
97
104
|
def missing_prediction_labels(self) -> list[tuple[str, str]]:
|
|
105
|
+
"""
|
|
106
|
+
Ground truth labels that are not present in the prediction set.
|
|
107
|
+
"""
|
|
98
108
|
glabels = set(np.where(self._label_metadata[:, 0] > 0)[0])
|
|
99
109
|
plabels = set(np.where(self._label_metadata[:, 1] > 0)[0])
|
|
100
110
|
return [
|
|
@@ -103,6 +113,9 @@ class Evaluator:
|
|
|
103
113
|
|
|
104
114
|
@property
|
|
105
115
|
def metadata(self) -> dict:
|
|
116
|
+
"""
|
|
117
|
+
Evaluation metadata.
|
|
118
|
+
"""
|
|
106
119
|
return {
|
|
107
120
|
"n_datums": self.n_datums,
|
|
108
121
|
"n_groundtruths": self.n_groundtruths,
|
|
@@ -216,16 +229,21 @@ class Evaluator:
|
|
|
216
229
|
filter_: Filter | None = None,
|
|
217
230
|
) -> dict[MetricType, list]:
|
|
218
231
|
"""
|
|
219
|
-
|
|
232
|
+
Performs an evaluation and returns metrics.
|
|
220
233
|
|
|
221
234
|
Parameters
|
|
222
235
|
----------
|
|
223
236
|
iou_thresholds : list[float]
|
|
224
|
-
A list of
|
|
237
|
+
A list of IoU thresholds to compute metrics over.
|
|
225
238
|
score_thresholds : list[float]
|
|
226
|
-
A list of score thresholds to compute over.
|
|
227
|
-
|
|
228
|
-
|
|
239
|
+
A list of score thresholds to compute metrics over.
|
|
240
|
+
filter_ : Filter, optional
|
|
241
|
+
An optional filter object.
|
|
242
|
+
|
|
243
|
+
Returns
|
|
244
|
+
-------
|
|
245
|
+
dict[MetricType, list]
|
|
246
|
+
A dictionary mapping MetricType enumerations to lists of computed metrics.
|
|
229
247
|
"""
|
|
230
248
|
|
|
231
249
|
data = self._ranked_pairs
|
|
@@ -360,6 +378,10 @@ class Evaluator:
|
|
|
360
378
|
for label_idx, label in self.index_to_label.items():
|
|
361
379
|
for score_idx, score_threshold in enumerate(score_thresholds):
|
|
362
380
|
for iou_idx, iou_threshold in enumerate(iou_thresholds):
|
|
381
|
+
|
|
382
|
+
if label_metadata[label_idx, 0] == 0:
|
|
383
|
+
continue
|
|
384
|
+
|
|
363
385
|
row = precision_recall[iou_idx][score_idx][label_idx]
|
|
364
386
|
kwargs = {
|
|
365
387
|
"label": label,
|
|
@@ -374,6 +396,7 @@ class Evaluator:
|
|
|
374
396
|
**kwargs,
|
|
375
397
|
)
|
|
376
398
|
)
|
|
399
|
+
|
|
377
400
|
metrics[MetricType.Precision].append(
|
|
378
401
|
Precision(
|
|
379
402
|
value=row[3],
|
|
@@ -532,6 +555,10 @@ class Evaluator:
|
|
|
532
555
|
|
|
533
556
|
|
|
534
557
|
class DataLoader:
|
|
558
|
+
"""
|
|
559
|
+
Object Detection DataLoader
|
|
560
|
+
"""
|
|
561
|
+
|
|
535
562
|
def __init__(self):
|
|
536
563
|
self._evaluator = Evaluator()
|
|
537
564
|
self.pairs = list()
|
|
@@ -539,6 +566,19 @@ class DataLoader:
|
|
|
539
566
|
self.prediction_count = defaultdict(lambda: defaultdict(int))
|
|
540
567
|
|
|
541
568
|
def _add_datum(self, uid: str) -> int:
|
|
569
|
+
"""
|
|
570
|
+
Helper function for adding a datum to the cache.
|
|
571
|
+
|
|
572
|
+
Parameters
|
|
573
|
+
----------
|
|
574
|
+
uid : str
|
|
575
|
+
The datum uid.
|
|
576
|
+
|
|
577
|
+
Returns
|
|
578
|
+
-------
|
|
579
|
+
int
|
|
580
|
+
The datum index.
|
|
581
|
+
"""
|
|
542
582
|
if uid not in self._evaluator.uid_to_index:
|
|
543
583
|
index = len(self._evaluator.uid_to_index)
|
|
544
584
|
self._evaluator.uid_to_index[uid] = index
|
|
@@ -546,6 +586,22 @@ class DataLoader:
|
|
|
546
586
|
return self._evaluator.uid_to_index[uid]
|
|
547
587
|
|
|
548
588
|
def _add_label(self, label: tuple[str, str]) -> tuple[int, int]:
|
|
589
|
+
"""
|
|
590
|
+
Helper function for adding a label to the cache.
|
|
591
|
+
|
|
592
|
+
Parameters
|
|
593
|
+
----------
|
|
594
|
+
label : tuple[str, str]
|
|
595
|
+
The label as a tuple in format (key, value).
|
|
596
|
+
|
|
597
|
+
Returns
|
|
598
|
+
-------
|
|
599
|
+
int
|
|
600
|
+
Label index.
|
|
601
|
+
int
|
|
602
|
+
Label key index.
|
|
603
|
+
"""
|
|
604
|
+
|
|
549
605
|
label_id = len(self._evaluator.index_to_label)
|
|
550
606
|
label_key_id = len(self._evaluator.index_to_label_key)
|
|
551
607
|
if label not in self._evaluator.label_to_index:
|
|
@@ -573,6 +629,16 @@ class DataLoader:
|
|
|
573
629
|
detections: list[Detection],
|
|
574
630
|
show_progress: bool = False,
|
|
575
631
|
):
|
|
632
|
+
"""
|
|
633
|
+
Adds detections to the cache.
|
|
634
|
+
|
|
635
|
+
Parameters
|
|
636
|
+
----------
|
|
637
|
+
detections : list[Detection]
|
|
638
|
+
A list of Detection objects.
|
|
639
|
+
show_progress : bool, default=False
|
|
640
|
+
Toggle for tqdm progress bar.
|
|
641
|
+
"""
|
|
576
642
|
disable_tqdm = not show_progress
|
|
577
643
|
for detection in tqdm(detections, disable=disable_tqdm):
|
|
578
644
|
|
|
@@ -688,6 +754,17 @@ class DataLoader:
|
|
|
688
754
|
detections: list[tuple[dict, dict]],
|
|
689
755
|
show_progress: bool = False,
|
|
690
756
|
):
|
|
757
|
+
"""
|
|
758
|
+
Adds Valor-format detections to the cache.
|
|
759
|
+
|
|
760
|
+
Parameters
|
|
761
|
+
----------
|
|
762
|
+
detections : list[tuple[dict, dict]]
|
|
763
|
+
A list of groundtruth, prediction pairs in Valor-format dictionaries.
|
|
764
|
+
show_progress : bool, default=False
|
|
765
|
+
Toggle for tqdm progress bar.
|
|
766
|
+
"""
|
|
767
|
+
|
|
691
768
|
def _get_bbox_extrema(
|
|
692
769
|
data: list[list[list[float]]],
|
|
693
770
|
) -> tuple[float, float, float, float]:
|
|
@@ -809,6 +886,14 @@ class DataLoader:
|
|
|
809
886
|
self.pairs.append(np.array(pairs))
|
|
810
887
|
|
|
811
888
|
def finalize(self) -> Evaluator:
|
|
889
|
+
"""
|
|
890
|
+
Performs data finalization and some preprocessing steps.
|
|
891
|
+
|
|
892
|
+
Returns
|
|
893
|
+
-------
|
|
894
|
+
Evaluator
|
|
895
|
+
A ready-to-use evaluator object.
|
|
896
|
+
"""
|
|
812
897
|
|
|
813
898
|
self.pairs = [pair for pair in self.pairs if pair.size > 0]
|
|
814
899
|
if len(self.pairs) == 0:
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
valor_lite/schemas.py,sha256=r4cC10w1xYsA785KmGE4ePeOX3wzEs846vT7QAiVg_I,293
|
|
3
|
+
valor_lite/detection/__init__.py,sha256=WHLHwHoKzXTBjkjC6E1_lhqB7gRWkiGWVWPqkKn-yK8,997
|
|
4
|
+
valor_lite/detection/annotation.py,sha256=ON9iVa33pxysUmZVTCb0wNz-eFX6MDOqDhGDz-ouymc,1466
|
|
5
|
+
valor_lite/detection/computation.py,sha256=L8FIwZ-qxOQnoT7mxgNzLyNyI-Bvga0i-gtbow3hN-o,22575
|
|
6
|
+
valor_lite/detection/manager.py,sha256=Y45Wy3PWi7dQ0VnDERdtpOixUbKVXTZxBcCR92ny0QY,34278
|
|
7
|
+
valor_lite/detection/metric.py,sha256=hHqClS7c71ztoUnfoaW3T7RmGYaVNU1SlM6vUs1P08I,8809
|
|
8
|
+
valor_lite-0.33.2.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
|
|
9
|
+
valor_lite-0.33.2.dist-info/METADATA,sha256=fe-Sj568DB-E9cyC5P8GA_lLjmM1t3MZUHj1f0JF6fM,1842
|
|
10
|
+
valor_lite-0.33.2.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
11
|
+
valor_lite-0.33.2.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
|
|
12
|
+
valor_lite-0.33.2.dist-info/RECORD,,
|
|
@@ -1,12 +0,0 @@
|
|
|
1
|
-
valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
valor_lite/schemas.py,sha256=r4cC10w1xYsA785KmGE4ePeOX3wzEs846vT7QAiVg_I,293
|
|
3
|
-
valor_lite/detection/__init__.py,sha256=WHLHwHoKzXTBjkjC6E1_lhqB7gRWkiGWVWPqkKn-yK8,997
|
|
4
|
-
valor_lite/detection/annotation.py,sha256=ON9iVa33pxysUmZVTCb0wNz-eFX6MDOqDhGDz-ouymc,1466
|
|
5
|
-
valor_lite/detection/computation.py,sha256=2FnVw6_dcAOvwCqpU9bIkeD7gPqDzfW48WSARnvKeOg,18873
|
|
6
|
-
valor_lite/detection/manager.py,sha256=HyODoIkmj92Kfspnpojp1pUY7noAw3FuCgQ36r6vMa4,32356
|
|
7
|
-
valor_lite/detection/metric.py,sha256=hHqClS7c71ztoUnfoaW3T7RmGYaVNU1SlM6vUs1P08I,8809
|
|
8
|
-
valor_lite-0.33.1.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
|
|
9
|
-
valor_lite-0.33.1.dist-info/METADATA,sha256=W36vWkCaas8e0H5RqfGwwlh5FritdeNO7bBj8r-lf6s,1842
|
|
10
|
-
valor_lite-0.33.1.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
11
|
-
valor_lite-0.33.1.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
|
|
12
|
-
valor_lite-0.33.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|