valor-lite 0.33.5__py3-none-any.whl → 0.33.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valor_lite/classification/__init__.py +30 -0
- valor_lite/classification/annotation.py +13 -0
- valor_lite/classification/computation.py +411 -0
- valor_lite/classification/manager.py +844 -0
- valor_lite/classification/metric.py +191 -0
- valor_lite/detection/manager.py +19 -8
- {valor_lite-0.33.5.dist-info → valor_lite-0.33.7.dist-info}/METADATA +1 -1
- valor_lite-0.33.7.dist-info/RECORD +17 -0
- valor_lite-0.33.5.dist-info/RECORD +0 -12
- {valor_lite-0.33.5.dist-info → valor_lite-0.33.7.dist-info}/LICENSE +0 -0
- {valor_lite-0.33.5.dist-info → valor_lite-0.33.7.dist-info}/WHEEL +0 -0
- {valor_lite-0.33.5.dist-info → valor_lite-0.33.7.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from enum import Enum
|
|
3
|
+
|
|
4
|
+
from valor_lite.schemas import Metric
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class MetricType(Enum):
|
|
8
|
+
Counts = "Counts"
|
|
9
|
+
ROCAUC = "ROCAUC"
|
|
10
|
+
mROCAUC = "mROCAUC"
|
|
11
|
+
Precision = "Precision"
|
|
12
|
+
Recall = "Recall"
|
|
13
|
+
Accuracy = "Accuracy"
|
|
14
|
+
F1 = "F1"
|
|
15
|
+
ConfusionMatrix = "ConfusionMatrix"
|
|
16
|
+
|
|
17
|
+
@classmethod
|
|
18
|
+
def base(cls):
|
|
19
|
+
return [
|
|
20
|
+
cls.Counts,
|
|
21
|
+
cls.ROCAUC,
|
|
22
|
+
cls.mROCAUC,
|
|
23
|
+
cls.Precision,
|
|
24
|
+
cls.Recall,
|
|
25
|
+
cls.Accuracy,
|
|
26
|
+
cls.F1,
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class Counts:
|
|
32
|
+
tp: list[int]
|
|
33
|
+
fp: list[int]
|
|
34
|
+
fn: list[int]
|
|
35
|
+
tn: list[int]
|
|
36
|
+
score_thresholds: list[float]
|
|
37
|
+
hardmax: bool
|
|
38
|
+
label: tuple[str, str]
|
|
39
|
+
|
|
40
|
+
@property
|
|
41
|
+
def metric(self) -> Metric:
|
|
42
|
+
return Metric(
|
|
43
|
+
type=type(self).__name__,
|
|
44
|
+
value={
|
|
45
|
+
"tp": self.tp,
|
|
46
|
+
"fp": self.fp,
|
|
47
|
+
"fn": self.fn,
|
|
48
|
+
"tn": self.tn,
|
|
49
|
+
},
|
|
50
|
+
parameters={
|
|
51
|
+
"score_thresholds": self.score_thresholds,
|
|
52
|
+
"hardmax": self.hardmax,
|
|
53
|
+
"label": {
|
|
54
|
+
"key": self.label[0],
|
|
55
|
+
"value": self.label[1],
|
|
56
|
+
},
|
|
57
|
+
},
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
def to_dict(self) -> dict:
|
|
61
|
+
return self.metric.to_dict()
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
@dataclass
|
|
65
|
+
class _ThresholdValue:
|
|
66
|
+
value: list[float]
|
|
67
|
+
score_thresholds: list[float]
|
|
68
|
+
hardmax: bool
|
|
69
|
+
label: tuple[str, str]
|
|
70
|
+
|
|
71
|
+
@property
|
|
72
|
+
def metric(self) -> Metric:
|
|
73
|
+
return Metric(
|
|
74
|
+
type=type(self).__name__,
|
|
75
|
+
value=self.value,
|
|
76
|
+
parameters={
|
|
77
|
+
"score_thresholds": self.score_thresholds,
|
|
78
|
+
"hardmax": self.hardmax,
|
|
79
|
+
"label": {
|
|
80
|
+
"key": self.label[0],
|
|
81
|
+
"value": self.label[1],
|
|
82
|
+
},
|
|
83
|
+
},
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
def to_dict(self) -> dict:
|
|
87
|
+
return self.metric.to_dict()
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class Precision(_ThresholdValue):
|
|
91
|
+
pass
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class Recall(_ThresholdValue):
|
|
95
|
+
pass
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class Accuracy(_ThresholdValue):
|
|
99
|
+
pass
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class F1(_ThresholdValue):
|
|
103
|
+
pass
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
@dataclass
|
|
107
|
+
class ROCAUC:
|
|
108
|
+
value: float
|
|
109
|
+
label: tuple[str, str]
|
|
110
|
+
|
|
111
|
+
@property
|
|
112
|
+
def metric(self) -> Metric:
|
|
113
|
+
return Metric(
|
|
114
|
+
type=type(self).__name__,
|
|
115
|
+
value=self.value,
|
|
116
|
+
parameters={
|
|
117
|
+
"label": {
|
|
118
|
+
"key": self.label[0],
|
|
119
|
+
"value": self.label[1],
|
|
120
|
+
},
|
|
121
|
+
},
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
def to_dict(self) -> dict:
|
|
125
|
+
return self.metric.to_dict()
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
@dataclass
|
|
129
|
+
class mROCAUC:
|
|
130
|
+
value: float
|
|
131
|
+
label_key: str
|
|
132
|
+
|
|
133
|
+
@property
|
|
134
|
+
def metric(self) -> Metric:
|
|
135
|
+
return Metric(
|
|
136
|
+
type=type(self).__name__,
|
|
137
|
+
value=self.value,
|
|
138
|
+
parameters={
|
|
139
|
+
"label_key": self.label_key,
|
|
140
|
+
},
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
def to_dict(self) -> dict:
|
|
144
|
+
return self.metric.to_dict()
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
@dataclass
|
|
148
|
+
class ConfusionMatrix:
|
|
149
|
+
confusion_matrix: dict[
|
|
150
|
+
str, # ground truth label value
|
|
151
|
+
dict[
|
|
152
|
+
str, # prediction label value
|
|
153
|
+
dict[
|
|
154
|
+
str, # either `count` or `examples`
|
|
155
|
+
int
|
|
156
|
+
| list[
|
|
157
|
+
dict[
|
|
158
|
+
str, # either `datum` or `score`
|
|
159
|
+
str | float, # datum uid # prediction score
|
|
160
|
+
]
|
|
161
|
+
],
|
|
162
|
+
],
|
|
163
|
+
],
|
|
164
|
+
]
|
|
165
|
+
missing_predictions: dict[
|
|
166
|
+
str, # ground truth label value
|
|
167
|
+
dict[
|
|
168
|
+
str, # either `count` or `examples`
|
|
169
|
+
int | list[dict[str, str]], # count or datum examples
|
|
170
|
+
],
|
|
171
|
+
]
|
|
172
|
+
score_threshold: float
|
|
173
|
+
label_key: str
|
|
174
|
+
number_of_examples: int
|
|
175
|
+
|
|
176
|
+
@property
|
|
177
|
+
def metric(self) -> Metric:
|
|
178
|
+
return Metric(
|
|
179
|
+
type=type(self).__name__,
|
|
180
|
+
value={
|
|
181
|
+
"confusion_matrix": self.confusion_matrix,
|
|
182
|
+
"missing_predictions": self.missing_predictions,
|
|
183
|
+
},
|
|
184
|
+
parameters={
|
|
185
|
+
"score_threshold": self.score_threshold,
|
|
186
|
+
"label_key": self.label_key,
|
|
187
|
+
},
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
def to_dict(self) -> dict:
|
|
191
|
+
return self.metric.to_dict()
|
valor_lite/detection/manager.py
CHANGED
|
@@ -342,6 +342,7 @@ class Evaluator:
|
|
|
342
342
|
score_thresholds: list[float] = [0.5],
|
|
343
343
|
number_of_examples: int = 0,
|
|
344
344
|
filter_: Filter | None = None,
|
|
345
|
+
as_dict: bool = False,
|
|
345
346
|
) -> dict[MetricType, list]:
|
|
346
347
|
"""
|
|
347
348
|
Performs an evaluation and returns metrics.
|
|
@@ -358,6 +359,8 @@ class Evaluator:
|
|
|
358
359
|
Maximum number of annotation examples to return in ConfusionMatrix.
|
|
359
360
|
filter_ : Filter, optional
|
|
360
361
|
An optional filter object.
|
|
362
|
+
as_dict : bool, default=False
|
|
363
|
+
An option to return metrics as dictionaries.
|
|
361
364
|
|
|
362
365
|
Returns
|
|
363
366
|
-------
|
|
@@ -559,6 +562,12 @@ class Evaluator:
|
|
|
559
562
|
if metric not in metrics_to_return:
|
|
560
563
|
del metrics[metric]
|
|
561
564
|
|
|
565
|
+
if as_dict:
|
|
566
|
+
return {
|
|
567
|
+
mtype: [metric.to_dict() for metric in mvalues]
|
|
568
|
+
for mtype, mvalues in metrics.items()
|
|
569
|
+
}
|
|
570
|
+
|
|
562
571
|
return metrics
|
|
563
572
|
|
|
564
573
|
def _unpack_confusion_matrix(
|
|
@@ -830,22 +839,24 @@ class Evaluator:
|
|
|
830
839
|
self,
|
|
831
840
|
data: NDArray[np.floating],
|
|
832
841
|
label_metadata: NDArray[np.int32],
|
|
833
|
-
iou_thresholds: list[float]
|
|
834
|
-
score_thresholds: list[float]
|
|
835
|
-
|
|
836
|
-
],
|
|
837
|
-
number_of_examples: int = 0,
|
|
842
|
+
iou_thresholds: list[float],
|
|
843
|
+
score_thresholds: list[float],
|
|
844
|
+
number_of_examples: int,
|
|
838
845
|
) -> list[ConfusionMatrix]:
|
|
839
846
|
"""
|
|
840
847
|
Computes detailed counting metrics.
|
|
841
848
|
|
|
842
849
|
Parameters
|
|
843
850
|
----------
|
|
844
|
-
|
|
851
|
+
data : NDArray[np.floating]
|
|
852
|
+
An array containing detailed pairs of detections.
|
|
853
|
+
label_metadata : NDArray[np.int32]
|
|
854
|
+
An array containing label metadata.
|
|
855
|
+
iou_thresholds : list[float]
|
|
845
856
|
List of IoU thresholds to compute metrics for.
|
|
846
|
-
score_thresholds : list[float]
|
|
857
|
+
score_thresholds : list[float]
|
|
847
858
|
List of confidence thresholds to compute metrics for.
|
|
848
|
-
number_of_examples : int
|
|
859
|
+
number_of_examples : int
|
|
849
860
|
Maximum number of annotation examples to return per metric.
|
|
850
861
|
|
|
851
862
|
Returns
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
valor_lite/schemas.py,sha256=r4cC10w1xYsA785KmGE4ePeOX3wzEs846vT7QAiVg_I,293
|
|
3
|
+
valor_lite/classification/__init__.py,sha256=2wmmziIzUATm7MbmAcPNLXrEX5l4oeD7XBwPd9bWM3Q,506
|
|
4
|
+
valor_lite/classification/annotation.py,sha256=rMDTvPHdAlvJ6_M2kRrnJQnj1oqKe-lxbncWC7Q50RE,345
|
|
5
|
+
valor_lite/classification/computation.py,sha256=pqAPX6zFlaWyYBnve4sdgJLba_m7smeaqZAsEBvi1no,12776
|
|
6
|
+
valor_lite/classification/manager.py,sha256=Tx6SpEBnV17V-rT6b4MG5jQN-fqG2dlau2-aBnzF_mI,27965
|
|
7
|
+
valor_lite/classification/metric.py,sha256=00qmagf-zQXUZ1qJW_UmN1k35aaYK_7GEM292Tc_cBE,4256
|
|
8
|
+
valor_lite/detection/__init__.py,sha256=PiKfemo8FkZRzBhPSjhil8ahGURLy0Vk_iV25CB4UBU,1139
|
|
9
|
+
valor_lite/detection/annotation.py,sha256=BspLc3SjWXj6qYlGGpzDPHEZ8j7CiFzIL5cNlk0WCAM,2732
|
|
10
|
+
valor_lite/detection/computation.py,sha256=HDFfPTFQN2obm-g570KKDf7SP9V-h09OyMtFEJXsoTA,26323
|
|
11
|
+
valor_lite/detection/manager.py,sha256=BnLqDGaP5h1aC5D_Vm6-oUYFlz-1yuQqlJAnQ1zztSI,53160
|
|
12
|
+
valor_lite/detection/metric.py,sha256=RYKN17nEFRIZIqmotQa6OyNnU0nkjXyfFIclX_5hGpY,9933
|
|
13
|
+
valor_lite-0.33.7.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
|
|
14
|
+
valor_lite-0.33.7.dist-info/METADATA,sha256=EyxuCPqIDbQa4PAQ0utdpb0TVmZUn8TfwSTHnvoKXBc,1865
|
|
15
|
+
valor_lite-0.33.7.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
16
|
+
valor_lite-0.33.7.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
|
|
17
|
+
valor_lite-0.33.7.dist-info/RECORD,,
|
|
@@ -1,12 +0,0 @@
|
|
|
1
|
-
valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
valor_lite/schemas.py,sha256=r4cC10w1xYsA785KmGE4ePeOX3wzEs846vT7QAiVg_I,293
|
|
3
|
-
valor_lite/detection/__init__.py,sha256=PiKfemo8FkZRzBhPSjhil8ahGURLy0Vk_iV25CB4UBU,1139
|
|
4
|
-
valor_lite/detection/annotation.py,sha256=BspLc3SjWXj6qYlGGpzDPHEZ8j7CiFzIL5cNlk0WCAM,2732
|
|
5
|
-
valor_lite/detection/computation.py,sha256=HDFfPTFQN2obm-g570KKDf7SP9V-h09OyMtFEJXsoTA,26323
|
|
6
|
-
valor_lite/detection/manager.py,sha256=ld2ytAw96UOO25iTwnfvAI1D2UY2Z1AGmP7cyCrT-V4,52801
|
|
7
|
-
valor_lite/detection/metric.py,sha256=RYKN17nEFRIZIqmotQa6OyNnU0nkjXyfFIclX_5hGpY,9933
|
|
8
|
-
valor_lite-0.33.5.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
|
|
9
|
-
valor_lite-0.33.5.dist-info/METADATA,sha256=WL0LQR2fT4CO4MuV0aXIkLPt3zQW2SsBS4MwcA_kHJY,1865
|
|
10
|
-
valor_lite-0.33.5.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
11
|
-
valor_lite-0.33.5.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
|
|
12
|
-
valor_lite-0.33.5.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|