valor-lite 0.33.17__py3-none-any.whl → 0.33.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of valor-lite might be problematic. Click here for more details.
- valor_lite/classification/computation.py +6 -6
- valor_lite/classification/metric.py +6 -6
- valor_lite/classification/utilities.py +10 -8
- valor_lite/object_detection/computation.py +14 -14
- valor_lite/object_detection/manager.py +6 -2
- valor_lite/object_detection/metric.py +12 -12
- valor_lite/object_detection/utilities.py +21 -19
- valor_lite/profiling.py +374 -0
- valor_lite/semantic_segmentation/__init__.py +2 -1
- valor_lite/semantic_segmentation/annotation.py +84 -1
- valor_lite/semantic_segmentation/benchmark.py +151 -0
- valor_lite/semantic_segmentation/computation.py +20 -33
- valor_lite/semantic_segmentation/manager.py +6 -2
- valor_lite/semantic_segmentation/metric.py +10 -10
- valor_lite/semantic_segmentation/utilities.py +6 -6
- {valor_lite-0.33.17.dist-info → valor_lite-0.33.19.dist-info}/METADATA +9 -9
- {valor_lite-0.33.17.dist-info → valor_lite-0.33.19.dist-info}/RECORD +20 -18
- {valor_lite-0.33.17.dist-info → valor_lite-0.33.19.dist-info}/WHEEL +1 -1
- {valor_lite-0.33.17.dist-info → valor_lite-0.33.19.dist-info}/LICENSE +0 -0
- {valor_lite-0.33.17.dist-info → valor_lite-0.33.19.dist-info}/top_level.txt +0 -0
|
@@ -31,9 +31,6 @@ def compute_intermediate_confusion_matrices(
|
|
|
31
31
|
A 2-D confusion matrix with shape (n_labels + 1, n_labels + 1).
|
|
32
32
|
"""
|
|
33
33
|
|
|
34
|
-
n_gt_labels = groundtruth_labels.size
|
|
35
|
-
n_pd_labels = prediction_labels.size
|
|
36
|
-
|
|
37
34
|
groundtruth_counts = groundtruths.sum(axis=1)
|
|
38
35
|
prediction_counts = predictions.sum(axis=1)
|
|
39
36
|
|
|
@@ -42,33 +39,23 @@ def compute_intermediate_confusion_matrices(
|
|
|
42
39
|
).sum()
|
|
43
40
|
|
|
44
41
|
intersection_counts = np.logical_and(
|
|
45
|
-
groundtruths
|
|
46
|
-
predictions
|
|
42
|
+
groundtruths[:, None, :],
|
|
43
|
+
predictions[None, :, :],
|
|
47
44
|
).sum(axis=2)
|
|
48
|
-
|
|
49
45
|
intersected_groundtruth_counts = intersection_counts.sum(axis=1)
|
|
50
46
|
intersected_prediction_counts = intersection_counts.sum(axis=0)
|
|
51
47
|
|
|
52
48
|
confusion_matrix = np.zeros((n_labels + 1, n_labels + 1), dtype=np.int32)
|
|
53
49
|
confusion_matrix[0, 0] = background_counts
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
if gidx == 0:
|
|
64
|
-
confusion_matrix[0, pd_label_idx + 1] = (
|
|
65
|
-
prediction_counts[pidx]
|
|
66
|
-
- intersected_prediction_counts[pidx]
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
confusion_matrix[gt_label_idx + 1, 0] = (
|
|
70
|
-
groundtruth_counts[gidx] - intersected_groundtruth_counts[gidx]
|
|
71
|
-
)
|
|
50
|
+
confusion_matrix[
|
|
51
|
+
np.ix_(groundtruth_labels + 1, prediction_labels + 1)
|
|
52
|
+
] = intersection_counts
|
|
53
|
+
confusion_matrix[0, prediction_labels + 1] = (
|
|
54
|
+
prediction_counts - intersected_prediction_counts
|
|
55
|
+
)
|
|
56
|
+
confusion_matrix[groundtruth_labels + 1, 0] = (
|
|
57
|
+
groundtruth_counts - intersected_groundtruth_counts
|
|
58
|
+
)
|
|
72
59
|
|
|
73
60
|
return confusion_matrix
|
|
74
61
|
|
|
@@ -111,9 +98,9 @@ def compute_metrics(
|
|
|
111
98
|
NDArray[np.float64]
|
|
112
99
|
Confusion matrix containing IOU values.
|
|
113
100
|
NDArray[np.float64]
|
|
114
|
-
|
|
101
|
+
Unmatched prediction ratios.
|
|
115
102
|
NDArray[np.float64]
|
|
116
|
-
|
|
103
|
+
Unmatched ground truth ratios.
|
|
117
104
|
"""
|
|
118
105
|
n_labels = label_metadata.shape[0]
|
|
119
106
|
gt_counts = label_metadata[:, 0]
|
|
@@ -121,7 +108,7 @@ def compute_metrics(
|
|
|
121
108
|
|
|
122
109
|
counts = data.sum(axis=0)
|
|
123
110
|
|
|
124
|
-
# compute iou,
|
|
111
|
+
# compute iou, unmatched_ground_truth and unmatched predictions
|
|
125
112
|
intersection_ = counts[1:, 1:]
|
|
126
113
|
union_ = (
|
|
127
114
|
gt_counts[:, np.newaxis] + pd_counts[np.newaxis, :] - intersection_
|
|
@@ -135,20 +122,20 @@ def compute_metrics(
|
|
|
135
122
|
out=ious,
|
|
136
123
|
)
|
|
137
124
|
|
|
138
|
-
|
|
125
|
+
unmatched_prediction_ratio = np.zeros((n_labels), dtype=np.float64)
|
|
139
126
|
np.divide(
|
|
140
127
|
counts[0, 1:],
|
|
141
128
|
pd_counts,
|
|
142
129
|
where=pd_counts > 1e-9,
|
|
143
|
-
out=
|
|
130
|
+
out=unmatched_prediction_ratio,
|
|
144
131
|
)
|
|
145
132
|
|
|
146
|
-
|
|
133
|
+
unmatched_ground_truth_ratio = np.zeros((n_labels), dtype=np.float64)
|
|
147
134
|
np.divide(
|
|
148
135
|
counts[1:, 0],
|
|
149
136
|
gt_counts,
|
|
150
137
|
where=gt_counts > 1e-9,
|
|
151
|
-
out=
|
|
138
|
+
out=unmatched_ground_truth_ratio,
|
|
152
139
|
)
|
|
153
140
|
|
|
154
141
|
# compute precision, recall, f1
|
|
@@ -181,6 +168,6 @@ def compute_metrics(
|
|
|
181
168
|
f1_score,
|
|
182
169
|
accuracy,
|
|
183
170
|
ious,
|
|
184
|
-
|
|
185
|
-
|
|
171
|
+
unmatched_prediction_ratio,
|
|
172
|
+
unmatched_ground_truth_ratio,
|
|
186
173
|
)
|
|
@@ -243,6 +243,10 @@ class Evaluator:
|
|
|
243
243
|
return self.compute_precision_recall_iou(filter_=filter_)
|
|
244
244
|
|
|
245
245
|
|
|
246
|
+
def defaultdict_int():
|
|
247
|
+
return defaultdict(int)
|
|
248
|
+
|
|
249
|
+
|
|
246
250
|
class DataLoader:
|
|
247
251
|
"""
|
|
248
252
|
Segmentation DataLoader.
|
|
@@ -250,8 +254,8 @@ class DataLoader:
|
|
|
250
254
|
|
|
251
255
|
def __init__(self):
|
|
252
256
|
self._evaluator = Evaluator()
|
|
253
|
-
self.groundtruth_count = defaultdict(
|
|
254
|
-
self.prediction_count = defaultdict(
|
|
257
|
+
self.groundtruth_count = defaultdict(defaultdict_int)
|
|
258
|
+
self.prediction_count = defaultdict(defaultdict_int)
|
|
255
259
|
self.matrices = list()
|
|
256
260
|
self.pixel_count = list()
|
|
257
261
|
|
|
@@ -209,11 +209,11 @@ class Metric(BaseMetric):
|
|
|
209
209
|
dict[str, float], # iou
|
|
210
210
|
],
|
|
211
211
|
],
|
|
212
|
-
|
|
212
|
+
unmatched_predictions: dict[
|
|
213
213
|
str, # prediction label value
|
|
214
214
|
dict[str, float], # pixel ratio
|
|
215
215
|
],
|
|
216
|
-
|
|
216
|
+
unmatched_ground_truths: dict[
|
|
217
217
|
str, # ground truth label value
|
|
218
218
|
dict[str, float], # pixel ratio
|
|
219
219
|
],
|
|
@@ -222,8 +222,8 @@ class Metric(BaseMetric):
|
|
|
222
222
|
The confusion matrix and related metrics for semantic segmentation tasks.
|
|
223
223
|
|
|
224
224
|
This class encapsulates detailed information about the model's performance, including correct
|
|
225
|
-
predictions, misclassifications,
|
|
226
|
-
(false negatives). It provides counts for each category to facilitate in-depth analysis.
|
|
225
|
+
predictions, misclassifications, unmatched_predictions (subset of false positives), and unmatched ground truths
|
|
226
|
+
(subset of false negatives). It provides counts for each category to facilitate in-depth analysis.
|
|
227
227
|
|
|
228
228
|
Confusion Matrix Format:
|
|
229
229
|
{
|
|
@@ -234,14 +234,14 @@ class Metric(BaseMetric):
|
|
|
234
234
|
},
|
|
235
235
|
}
|
|
236
236
|
|
|
237
|
-
|
|
237
|
+
Unmatched Predictions Format:
|
|
238
238
|
{
|
|
239
239
|
<prediction label>: {
|
|
240
240
|
'iou': <float>,
|
|
241
241
|
},
|
|
242
242
|
}
|
|
243
243
|
|
|
244
|
-
|
|
244
|
+
Unmatched Ground Truths Format:
|
|
245
245
|
{
|
|
246
246
|
<ground truth label>: {
|
|
247
247
|
'iou': <float>,
|
|
@@ -253,10 +253,10 @@ class Metric(BaseMetric):
|
|
|
253
253
|
confusion_matrix : dict
|
|
254
254
|
Nested dictionaries representing the Intersection over Union (IOU) scores for each
|
|
255
255
|
ground truth label and prediction label pair.
|
|
256
|
-
|
|
256
|
+
unmatched_predictions : dict
|
|
257
257
|
Dictionary representing the pixel ratios for predicted labels that do not correspond
|
|
258
258
|
to any ground truth labels (false positives).
|
|
259
|
-
|
|
259
|
+
unmatched_ground_truths : dict
|
|
260
260
|
Dictionary representing the pixel ratios for ground truth labels that were not predicted
|
|
261
261
|
(false negatives).
|
|
262
262
|
|
|
@@ -268,8 +268,8 @@ class Metric(BaseMetric):
|
|
|
268
268
|
type=MetricType.ConfusionMatrix.value,
|
|
269
269
|
value={
|
|
270
270
|
"confusion_matrix": confusion_matrix,
|
|
271
|
-
"
|
|
272
|
-
"
|
|
271
|
+
"unmatched_predictions": unmatched_predictions,
|
|
272
|
+
"unmatched_ground_truths": unmatched_ground_truths,
|
|
273
273
|
},
|
|
274
274
|
parameters={},
|
|
275
275
|
)
|
|
@@ -18,8 +18,8 @@ def unpack_precision_recall_iou_into_metric_lists(
|
|
|
18
18
|
f1_score,
|
|
19
19
|
accuracy,
|
|
20
20
|
ious,
|
|
21
|
-
|
|
22
|
-
|
|
21
|
+
unmatched_prediction_ratios,
|
|
22
|
+
unmatched_ground_truth_ratios,
|
|
23
23
|
) = results
|
|
24
24
|
|
|
25
25
|
metrics = defaultdict(list)
|
|
@@ -43,16 +43,16 @@ def unpack_precision_recall_iou_into_metric_lists(
|
|
|
43
43
|
for gt_label_idx in range(n_labels)
|
|
44
44
|
if label_metadata[gt_label_idx, 0] > 0
|
|
45
45
|
},
|
|
46
|
-
|
|
46
|
+
unmatched_predictions={
|
|
47
47
|
index_to_label[pd_label_idx]: {
|
|
48
|
-
"ratio": float(
|
|
48
|
+
"ratio": float(unmatched_prediction_ratios[pd_label_idx])
|
|
49
49
|
}
|
|
50
50
|
for pd_label_idx in range(n_labels)
|
|
51
51
|
if label_metadata[pd_label_idx, 0] > 0
|
|
52
52
|
},
|
|
53
|
-
|
|
53
|
+
unmatched_ground_truths={
|
|
54
54
|
index_to_label[gt_label_idx]: {
|
|
55
|
-
"ratio": float(
|
|
55
|
+
"ratio": float(unmatched_ground_truth_ratios[gt_label_idx])
|
|
56
56
|
}
|
|
57
57
|
for gt_label_idx in range(n_labels)
|
|
58
58
|
if label_metadata[gt_label_idx, 0] > 0
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: valor-lite
|
|
3
|
-
Version: 0.33.
|
|
3
|
+
Version: 0.33.19
|
|
4
4
|
Summary: Compute valor metrics locally.
|
|
5
5
|
License: MIT License
|
|
6
6
|
|
|
@@ -29,22 +29,22 @@ Requires-Python: >=3.10
|
|
|
29
29
|
Description-Content-Type: text/markdown
|
|
30
30
|
License-File: LICENSE
|
|
31
31
|
Requires-Dist: evaluate
|
|
32
|
+
Requires-Dist: importlib_metadata; python_version < "3.8"
|
|
32
33
|
Requires-Dist: nltk
|
|
33
34
|
Requires-Dist: numpy
|
|
34
|
-
Requires-Dist: Pillow
|
|
35
|
+
Requires-Dist: Pillow>=9.1.0
|
|
35
36
|
Requires-Dist: requests
|
|
36
|
-
Requires-Dist:
|
|
37
|
+
Requires-Dist: rouge_score
|
|
37
38
|
Requires-Dist: shapely
|
|
38
39
|
Requires-Dist: tqdm
|
|
39
|
-
Requires-Dist: importlib-metadata ; python_version < "3.8"
|
|
40
40
|
Provides-Extra: mistral
|
|
41
|
-
Requires-Dist: mistralai
|
|
41
|
+
Requires-Dist: mistralai>=1.0; extra == "mistral"
|
|
42
42
|
Provides-Extra: openai
|
|
43
|
-
Requires-Dist: openai
|
|
43
|
+
Requires-Dist: openai; extra == "openai"
|
|
44
44
|
Provides-Extra: test
|
|
45
|
-
Requires-Dist: pytest
|
|
46
|
-
Requires-Dist: coverage
|
|
47
|
-
Requires-Dist: pre-commit
|
|
45
|
+
Requires-Dist: pytest; extra == "test"
|
|
46
|
+
Requires-Dist: coverage; extra == "test"
|
|
47
|
+
Requires-Dist: pre-commit; extra == "test"
|
|
48
48
|
|
|
49
49
|
# valor-lite: Fast, local machine learning evaluation.
|
|
50
50
|
|
|
@@ -1,24 +1,26 @@
|
|
|
1
1
|
valor_lite/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
|
|
2
2
|
valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
+
valor_lite/profiling.py,sha256=TLIROA1qccFw9NoEkMeQcrvvGGO75c4K5yTIWoCUix8,11746
|
|
3
4
|
valor_lite/schemas.py,sha256=pB0MrPx5qFLbwBWDiOUUm-vmXdWvbJLFCBmKgbcbI5g,198
|
|
4
5
|
valor_lite/classification/__init__.py,sha256=8MI8bGwCxYGqRP7KxG7ezhYv4qQ5947XGvvlF8WPM5g,392
|
|
5
6
|
valor_lite/classification/annotation.py,sha256=0aUOvcwBAZgiNOJuyh-pXyNTG7vP7r8CUfnU3OmpUwQ,1113
|
|
6
|
-
valor_lite/classification/computation.py,sha256=
|
|
7
|
+
valor_lite/classification/computation.py,sha256=XU-55bzR2JPKDRr8CvCbHHavM5vjBw1TrUX1mOIm_TY,12121
|
|
7
8
|
valor_lite/classification/manager.py,sha256=8GXZECSx4CBbG5NfPrA19BPENqmrjo-wZBmaulWHY20,16676
|
|
8
|
-
valor_lite/classification/metric.py,sha256=
|
|
9
|
-
valor_lite/classification/utilities.py,sha256=
|
|
9
|
+
valor_lite/classification/metric.py,sha256=_mW3zynmpW8jUIhK2OeX4usdftHgHM9_l7EAbEe2N3w,12288
|
|
10
|
+
valor_lite/classification/utilities.py,sha256=7P3H2wsUR_qmK4WJBFPkViOvhhKVMkCHvDKJbeentMM,6963
|
|
10
11
|
valor_lite/object_detection/__init__.py,sha256=Ql8rju2q7y0Zd9zFvtBJDRhgQFDm1RSYkTsyH3ZE6pA,648
|
|
11
12
|
valor_lite/object_detection/annotation.py,sha256=x9bsl8b75yvkMByXXiIYI9d9T03olDqtykSvKJc3aFw,7729
|
|
12
|
-
valor_lite/object_detection/computation.py,sha256=
|
|
13
|
-
valor_lite/object_detection/manager.py,sha256=
|
|
14
|
-
valor_lite/object_detection/metric.py,sha256=
|
|
15
|
-
valor_lite/object_detection/utilities.py,sha256=
|
|
16
|
-
valor_lite/semantic_segmentation/__init__.py,sha256=
|
|
17
|
-
valor_lite/semantic_segmentation/annotation.py,sha256=
|
|
18
|
-
valor_lite/semantic_segmentation/
|
|
19
|
-
valor_lite/semantic_segmentation/
|
|
20
|
-
valor_lite/semantic_segmentation/
|
|
21
|
-
valor_lite/semantic_segmentation/
|
|
13
|
+
valor_lite/object_detection/computation.py,sha256=70XkgyxAbvZC8FcTKqdAATD5Muebw_jGjoTKPaecwjE,28141
|
|
14
|
+
valor_lite/object_detection/manager.py,sha256=utdILUUCx04EWC0_bHGpEPaxcCOhmsOx5lxT9qU1a9s,23033
|
|
15
|
+
valor_lite/object_detection/metric.py,sha256=npK2sxiwCUTKlRlFym1AlZTvP9herf9lakbsBDwljGM,24901
|
|
16
|
+
valor_lite/object_detection/utilities.py,sha256=F0sfhkledX3h3jXLDpLro7zHbUF9ig7r35J7QbogQVY,16437
|
|
17
|
+
valor_lite/semantic_segmentation/__init__.py,sha256=BhTUbwbdJa1FdS4ZA3QSIZ8TuJmdGGLGCd5hX6SzKa4,297
|
|
18
|
+
valor_lite/semantic_segmentation/annotation.py,sha256=xd2qJyIeTW8CT_Goyu3Kvl_51b9b6D3WvUfqwShR0Sk,4990
|
|
19
|
+
valor_lite/semantic_segmentation/benchmark.py,sha256=iVdxUo9LgDbbXUa6eRhZ49LOYw-yyr2W4p9FP3KHg0k,3848
|
|
20
|
+
valor_lite/semantic_segmentation/computation.py,sha256=l98h8s9RTWQOB_eg2rconqGL1ZbTS4GMtz69vbyEdQ0,4741
|
|
21
|
+
valor_lite/semantic_segmentation/manager.py,sha256=TtwJI7Bsn3zHL2ECOqCmymG-JqREo7I6qxYtycbz54Y,14322
|
|
22
|
+
valor_lite/semantic_segmentation/metric.py,sha256=T9RfPJf4WgqGQTXYvSy08vJG5bjXXJnyYZeW0mlxMa8,7132
|
|
23
|
+
valor_lite/semantic_segmentation/utilities.py,sha256=y9SgArhrk-u_r54SgQxGEKrDjow5KvPG8Zj1GeZefqY,2958
|
|
22
24
|
valor_lite/text_generation/__init__.py,sha256=pGhpWCSZjLM0pPHCtPykAfos55B8ie3mi9EzbNxfj-U,356
|
|
23
25
|
valor_lite/text_generation/annotation.py,sha256=O5aXiwCS4WjA-fqn4ly-O0MsTHoIOmqxqCaAp9IeI3M,1270
|
|
24
26
|
valor_lite/text_generation/computation.py,sha256=cG35qMpxNPEYHXN2fz8wcanESriSHoWMl1idpm9-ous,18638
|
|
@@ -31,8 +33,8 @@ valor_lite/text_generation/llm/instructions.py,sha256=fz2onBZZWcl5W8iy7zEWkPGU9N
|
|
|
31
33
|
valor_lite/text_generation/llm/integrations.py,sha256=-rTfdAjq1zH-4ixwYuMQEOQ80pIFzMTe0BYfroVx3Pg,6974
|
|
32
34
|
valor_lite/text_generation/llm/utilities.py,sha256=bjqatGgtVTcl1PrMwiDKTYPGJXKrBrx7PDtzIblGSys,1178
|
|
33
35
|
valor_lite/text_generation/llm/validators.py,sha256=Wzr5RlfF58_2wOU-uTw7C8skan_fYdhy4Gfn0jSJ8HM,2700
|
|
34
|
-
valor_lite-0.33.
|
|
35
|
-
valor_lite-0.33.
|
|
36
|
-
valor_lite-0.33.
|
|
37
|
-
valor_lite-0.33.
|
|
38
|
-
valor_lite-0.33.
|
|
36
|
+
valor_lite-0.33.19.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
|
|
37
|
+
valor_lite-0.33.19.dist-info/METADATA,sha256=lwmxYzLPlKkCZM0mVkepQYYy4mjbg34UEOeeqM7ZE2k,5880
|
|
38
|
+
valor_lite-0.33.19.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
|
39
|
+
valor_lite-0.33.19.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
|
|
40
|
+
valor_lite-0.33.19.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|