valor-lite 0.35.0__py3-none-any.whl → 0.36.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valor_lite/classification/computation.py +147 -38
- valor_lite/classification/manager.py +221 -235
- valor_lite/classification/metric.py +5 -8
- valor_lite/classification/utilities.py +18 -14
- valor_lite/exceptions.py +15 -0
- valor_lite/object_detection/__init__.py +2 -1
- valor_lite/object_detection/computation.py +83 -10
- valor_lite/object_detection/manager.py +313 -315
- valor_lite/semantic_segmentation/__init__.py +3 -3
- valor_lite/semantic_segmentation/annotation.py +32 -103
- valor_lite/semantic_segmentation/benchmark.py +87 -1
- valor_lite/semantic_segmentation/computation.py +96 -14
- valor_lite/semantic_segmentation/manager.py +193 -221
- valor_lite/semantic_segmentation/utilities.py +3 -3
- {valor_lite-0.35.0.dist-info → valor_lite-0.36.1.dist-info}/METADATA +2 -2
- {valor_lite-0.35.0.dist-info → valor_lite-0.36.1.dist-info}/RECORD +18 -17
- {valor_lite-0.35.0.dist-info → valor_lite-0.36.1.dist-info}/WHEEL +1 -1
- {valor_lite-0.35.0.dist-info → valor_lite-0.36.1.dist-info}/top_level.txt +0 -0
|
@@ -1,14 +1,16 @@
|
|
|
1
|
-
from
|
|
2
|
-
from dataclasses import dataclass
|
|
1
|
+
from dataclasses import asdict, dataclass
|
|
3
2
|
|
|
4
3
|
import numpy as np
|
|
5
4
|
from numpy.typing import NDArray
|
|
6
5
|
from tqdm import tqdm
|
|
7
6
|
|
|
7
|
+
from valor_lite.exceptions import EmptyEvaluatorException, EmptyFilterException
|
|
8
8
|
from valor_lite.semantic_segmentation.annotation import Segmentation
|
|
9
9
|
from valor_lite.semantic_segmentation.computation import (
|
|
10
10
|
compute_intermediate_confusion_matrices,
|
|
11
|
+
compute_label_metadata,
|
|
11
12
|
compute_metrics,
|
|
13
|
+
filter_cache,
|
|
12
14
|
)
|
|
13
15
|
from valor_lite.semantic_segmentation.metric import Metric, MetricType
|
|
14
16
|
from valor_lite.semantic_segmentation.utilities import (
|
|
@@ -31,16 +33,52 @@ metrics = evaluator.evaluate()
|
|
|
31
33
|
f1_metrics = metrics[MetricType.F1]
|
|
32
34
|
accuracy_metrics = metrics[MetricType.Accuracy]
|
|
33
35
|
|
|
34
|
-
filter_mask = evaluator.create_filter(
|
|
36
|
+
filter_mask = evaluator.create_filter(datum_ids=["uid1", "uid2"])
|
|
35
37
|
filtered_metrics = evaluator.evaluate(filter_mask=filter_mask)
|
|
36
38
|
"""
|
|
37
39
|
|
|
38
40
|
|
|
41
|
+
@dataclass
|
|
42
|
+
class Metadata:
|
|
43
|
+
number_of_labels: int = 0
|
|
44
|
+
number_of_pixels: int = 0
|
|
45
|
+
number_of_datums: int = 0
|
|
46
|
+
number_of_ground_truths: int = 0
|
|
47
|
+
number_of_predictions: int = 0
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def create(
|
|
51
|
+
cls,
|
|
52
|
+
confusion_matrices: NDArray[np.int64],
|
|
53
|
+
):
|
|
54
|
+
if confusion_matrices.size == 0:
|
|
55
|
+
return cls()
|
|
56
|
+
return cls(
|
|
57
|
+
number_of_labels=confusion_matrices.shape[1] - 1,
|
|
58
|
+
number_of_pixels=confusion_matrices.sum(),
|
|
59
|
+
number_of_datums=confusion_matrices.shape[0],
|
|
60
|
+
number_of_ground_truths=confusion_matrices[:, 1:, :].sum(),
|
|
61
|
+
number_of_predictions=confusion_matrices[:, :, 1:].sum(),
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
def to_dict(self) -> dict[str, int | bool]:
|
|
65
|
+
return asdict(self)
|
|
66
|
+
|
|
67
|
+
|
|
39
68
|
@dataclass
|
|
40
69
|
class Filter:
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
70
|
+
datum_mask: NDArray[np.bool_]
|
|
71
|
+
label_mask: NDArray[np.bool_]
|
|
72
|
+
metadata: Metadata
|
|
73
|
+
|
|
74
|
+
def __post_init__(self):
|
|
75
|
+
# validate datum mask
|
|
76
|
+
if not self.datum_mask.any():
|
|
77
|
+
raise EmptyFilterException("filter removes all datums")
|
|
78
|
+
|
|
79
|
+
# validate label mask
|
|
80
|
+
if self.label_mask.all():
|
|
81
|
+
raise EmptyFilterException("filter removes all labels")
|
|
44
82
|
|
|
45
83
|
|
|
46
84
|
class Evaluator:
|
|
@@ -49,29 +87,21 @@ class Evaluator:
|
|
|
49
87
|
"""
|
|
50
88
|
|
|
51
89
|
def __init__(self):
|
|
90
|
+
"""Initializes evaluator caches."""
|
|
91
|
+
# external references
|
|
92
|
+
self.datum_id_to_index: dict[str, int] = {}
|
|
93
|
+
self.index_to_datum_id: list[str] = []
|
|
94
|
+
self.label_to_index: dict[str, int] = {}
|
|
95
|
+
self.index_to_label: list[str] = []
|
|
96
|
+
|
|
97
|
+
# internal caches
|
|
98
|
+
self._confusion_matrices = np.array([], dtype=np.int64)
|
|
99
|
+
self._label_metadata = np.array([], dtype=np.int64)
|
|
100
|
+
self._metadata = Metadata()
|
|
52
101
|
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
self.
|
|
56
|
-
self.n_predictions = 0
|
|
57
|
-
self.n_pixels = 0
|
|
58
|
-
self.n_groundtruth_pixels = 0
|
|
59
|
-
self.n_prediction_pixels = 0
|
|
60
|
-
self.n_labels = 0
|
|
61
|
-
|
|
62
|
-
# datum reference
|
|
63
|
-
self.uid_to_index: dict[str, int] = dict()
|
|
64
|
-
self.index_to_uid: dict[int, str] = dict()
|
|
65
|
-
|
|
66
|
-
# label reference
|
|
67
|
-
self.label_to_index: dict[str, int] = dict()
|
|
68
|
-
self.index_to_label: dict[int, str] = dict()
|
|
69
|
-
|
|
70
|
-
# computation caches
|
|
71
|
-
self._confusion_matrices = np.array([])
|
|
72
|
-
self._label_metadata = np.array([], dtype=np.int32)
|
|
73
|
-
self._label_metadata_per_datum = np.array([], dtype=np.int32)
|
|
74
|
-
self._n_pixels_per_datum = np.array([], dtype=np.int32)
|
|
102
|
+
@property
|
|
103
|
+
def metadata(self) -> Metadata:
|
|
104
|
+
return self._metadata
|
|
75
105
|
|
|
76
106
|
@property
|
|
77
107
|
def ignored_prediction_labels(self) -> list[str]:
|
|
@@ -95,129 +125,130 @@ class Evaluator:
|
|
|
95
125
|
self.index_to_label[label_id] for label_id in (glabels - plabels)
|
|
96
126
|
]
|
|
97
127
|
|
|
98
|
-
@property
|
|
99
|
-
def metadata(self) -> dict:
|
|
100
|
-
"""
|
|
101
|
-
Evaluation metadata.
|
|
102
|
-
"""
|
|
103
|
-
return {
|
|
104
|
-
"number_of_datums": self.n_datums,
|
|
105
|
-
"number_of_groundtruths": self.n_groundtruths,
|
|
106
|
-
"number_of_predictions": self.n_predictions,
|
|
107
|
-
"number_of_groundtruth_pixels": self.n_groundtruth_pixels,
|
|
108
|
-
"number_of_prediction_pixels": self.n_prediction_pixels,
|
|
109
|
-
"number_of_labels": self.n_labels,
|
|
110
|
-
"ignored_prediction_labels": self.ignored_prediction_labels,
|
|
111
|
-
"missing_prediction_labels": self.missing_prediction_labels,
|
|
112
|
-
}
|
|
113
|
-
|
|
114
128
|
def create_filter(
|
|
115
129
|
self,
|
|
116
|
-
|
|
117
|
-
labels: list[str] |
|
|
130
|
+
datum_ids: list[str] | None = None,
|
|
131
|
+
labels: list[str] | None = None,
|
|
118
132
|
) -> Filter:
|
|
119
133
|
"""
|
|
120
|
-
Creates a
|
|
134
|
+
Creates a filter for use with the evaluator.
|
|
121
135
|
|
|
122
136
|
Parameters
|
|
123
137
|
----------
|
|
124
|
-
|
|
125
|
-
An optional list of string uids
|
|
126
|
-
labels : list[
|
|
127
|
-
An optional list of labels
|
|
138
|
+
datum_ids : list[str], optional
|
|
139
|
+
An optional list of string uids representing datums.
|
|
140
|
+
labels : list[str], optional
|
|
141
|
+
An optional list of labels.
|
|
128
142
|
|
|
129
143
|
Returns
|
|
130
144
|
-------
|
|
131
145
|
Filter
|
|
132
|
-
|
|
146
|
+
The filter object containing a mask and metadata.
|
|
133
147
|
"""
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
mask_labels = np.ones(n_labels, dtype=np.bool_)
|
|
148
|
+
datum_mask = np.ones(self._confusion_matrices.shape[0], dtype=np.bool_)
|
|
149
|
+
label_mask = np.zeros(
|
|
150
|
+
self.metadata.number_of_labels + 1, dtype=np.bool_
|
|
151
|
+
)
|
|
139
152
|
|
|
140
|
-
if
|
|
141
|
-
if
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
153
|
+
if datum_ids is not None:
|
|
154
|
+
if not datum_ids:
|
|
155
|
+
return Filter(
|
|
156
|
+
datum_mask=np.zeros_like(datum_mask),
|
|
157
|
+
label_mask=label_mask,
|
|
158
|
+
metadata=Metadata(),
|
|
145
159
|
)
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
).
|
|
153
|
-
|
|
160
|
+
datum_id_array = np.array(
|
|
161
|
+
[self.datum_id_to_index[uid] for uid in datum_ids],
|
|
162
|
+
dtype=np.int64,
|
|
163
|
+
)
|
|
164
|
+
datum_id_array.sort()
|
|
165
|
+
mask_valid_datums = (
|
|
166
|
+
np.arange(self._confusion_matrices.shape[0]).reshape(-1, 1)
|
|
167
|
+
== datum_id_array.reshape(1, -1)
|
|
168
|
+
).any(axis=1)
|
|
169
|
+
datum_mask[~mask_valid_datums] = False
|
|
154
170
|
|
|
155
171
|
if labels is not None:
|
|
156
|
-
if
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
172
|
+
if not labels:
|
|
173
|
+
return Filter(
|
|
174
|
+
datum_mask=datum_mask,
|
|
175
|
+
label_mask=np.ones_like(label_mask),
|
|
176
|
+
metadata=Metadata(),
|
|
160
177
|
)
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
mask = (
|
|
165
|
-
np.arange(n_labels).reshape(-1, 1) == labels.reshape(1, -1)
|
|
166
|
-
).any(axis=1)
|
|
167
|
-
mask_labels[~mask] = False
|
|
168
|
-
|
|
169
|
-
mask = mask_datums[:, np.newaxis] & mask_labels[np.newaxis, :]
|
|
170
|
-
label_metadata_per_datum = self._label_metadata_per_datum.copy()
|
|
171
|
-
label_metadata_per_datum[:, ~mask] = 0
|
|
172
|
-
|
|
173
|
-
label_metadata = np.zeros_like(self._label_metadata, dtype=np.int32)
|
|
174
|
-
label_metadata = np.transpose(
|
|
175
|
-
np.sum(
|
|
176
|
-
label_metadata_per_datum,
|
|
177
|
-
axis=1,
|
|
178
|
+
labels_id_array = np.array(
|
|
179
|
+
[self.label_to_index[label] for label in labels] + [-1],
|
|
180
|
+
dtype=np.int64,
|
|
178
181
|
)
|
|
182
|
+
label_range = np.arange(self.metadata.number_of_labels + 1) - 1
|
|
183
|
+
mask_valid_labels = (
|
|
184
|
+
label_range.reshape(-1, 1) == labels_id_array.reshape(1, -1)
|
|
185
|
+
).any(axis=1)
|
|
186
|
+
label_mask[~mask_valid_labels] = True
|
|
187
|
+
|
|
188
|
+
filtered_confusion_matrices, _ = filter_cache(
|
|
189
|
+
confusion_matrices=self._confusion_matrices.copy(),
|
|
190
|
+
datum_mask=datum_mask,
|
|
191
|
+
label_mask=label_mask,
|
|
192
|
+
number_of_labels=self.metadata.number_of_labels,
|
|
179
193
|
)
|
|
180
|
-
n_datums = int(np.sum(label_metadata[:, 0]))
|
|
181
194
|
|
|
182
195
|
return Filter(
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
196
|
+
datum_mask=datum_mask,
|
|
197
|
+
label_mask=label_mask,
|
|
198
|
+
metadata=Metadata.create(
|
|
199
|
+
confusion_matrices=filtered_confusion_matrices,
|
|
200
|
+
),
|
|
186
201
|
)
|
|
187
202
|
|
|
188
|
-
def
|
|
189
|
-
self,
|
|
190
|
-
|
|
191
|
-
) -> dict[MetricType, list]:
|
|
203
|
+
def filter(
|
|
204
|
+
self, filter_: Filter
|
|
205
|
+
) -> tuple[NDArray[np.int64], NDArray[np.int64]]:
|
|
192
206
|
"""
|
|
193
|
-
Performs
|
|
207
|
+
Performs the filter operation over the internal cache.
|
|
194
208
|
|
|
195
209
|
Parameters
|
|
196
210
|
----------
|
|
197
|
-
filter_ : Filter
|
|
198
|
-
An
|
|
211
|
+
filter_ : Filter
|
|
212
|
+
An object describing the filter operation.
|
|
213
|
+
|
|
214
|
+
Returns
|
|
215
|
+
-------
|
|
216
|
+
NDArray[int64]
|
|
217
|
+
Filtered confusion matrices.
|
|
218
|
+
NDArray[int64]
|
|
219
|
+
Filtered label metadata
|
|
220
|
+
"""
|
|
221
|
+
return filter_cache(
|
|
222
|
+
confusion_matrices=self._confusion_matrices.copy(),
|
|
223
|
+
datum_mask=filter_.datum_mask,
|
|
224
|
+
label_mask=filter_.label_mask,
|
|
225
|
+
number_of_labels=self.metadata.number_of_labels,
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
def compute_precision_recall_iou(
|
|
229
|
+
self, filter_: Filter | None = None
|
|
230
|
+
) -> dict[MetricType, list]:
|
|
231
|
+
"""
|
|
232
|
+
Performs an evaluation and returns metrics.
|
|
199
233
|
|
|
200
234
|
Returns
|
|
201
235
|
-------
|
|
202
236
|
dict[MetricType, list]
|
|
203
237
|
A dictionary mapping MetricType enumerations to lists of computed metrics.
|
|
204
238
|
"""
|
|
205
|
-
|
|
206
|
-
# apply filters
|
|
207
|
-
data = self._confusion_matrices
|
|
208
|
-
label_metadata = self._label_metadata
|
|
209
|
-
n_pixels = self.n_pixels
|
|
210
239
|
if filter_ is not None:
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
240
|
+
confusion_matrices, label_metadata = self.filter(filter_)
|
|
241
|
+
n_pixels = filter_.metadata.number_of_pixels
|
|
242
|
+
else:
|
|
243
|
+
confusion_matrices = self._confusion_matrices
|
|
244
|
+
label_metadata = self._label_metadata
|
|
245
|
+
n_pixels = self.metadata.number_of_pixels
|
|
214
246
|
|
|
215
247
|
results = compute_metrics(
|
|
216
|
-
|
|
248
|
+
confusion_matrices=confusion_matrices,
|
|
217
249
|
label_metadata=label_metadata,
|
|
218
250
|
n_pixels=n_pixels,
|
|
219
251
|
)
|
|
220
|
-
|
|
221
252
|
return unpack_precision_recall_iou_into_metric_lists(
|
|
222
253
|
results=results,
|
|
223
254
|
label_metadata=label_metadata,
|
|
@@ -225,17 +256,11 @@ class Evaluator:
|
|
|
225
256
|
)
|
|
226
257
|
|
|
227
258
|
def evaluate(
|
|
228
|
-
self,
|
|
229
|
-
filter_: Filter | None = None,
|
|
259
|
+
self, filter_: Filter | None = None
|
|
230
260
|
) -> dict[MetricType, list[Metric]]:
|
|
231
261
|
"""
|
|
232
262
|
Computes all available metrics.
|
|
233
263
|
|
|
234
|
-
Parameters
|
|
235
|
-
----------
|
|
236
|
-
filter_ : Filter, optional
|
|
237
|
-
An optional filter object.
|
|
238
|
-
|
|
239
264
|
Returns
|
|
240
265
|
-------
|
|
241
266
|
dict[MetricType, list[Metric]]
|
|
@@ -244,10 +269,6 @@ class Evaluator:
|
|
|
244
269
|
return self.compute_precision_recall_iou(filter_=filter_)
|
|
245
270
|
|
|
246
271
|
|
|
247
|
-
def defaultdict_int():
|
|
248
|
-
return defaultdict(int)
|
|
249
|
-
|
|
250
|
-
|
|
251
272
|
class DataLoader:
|
|
252
273
|
"""
|
|
253
274
|
Segmentation DataLoader.
|
|
@@ -255,10 +276,7 @@ class DataLoader:
|
|
|
255
276
|
|
|
256
277
|
def __init__(self):
|
|
257
278
|
self._evaluator = Evaluator()
|
|
258
|
-
self.groundtruth_count = defaultdict(defaultdict_int)
|
|
259
|
-
self.prediction_count = defaultdict(defaultdict_int)
|
|
260
279
|
self.matrices = list()
|
|
261
|
-
self.pixel_count = list()
|
|
262
280
|
|
|
263
281
|
def _add_datum(self, uid: str) -> int:
|
|
264
282
|
"""
|
|
@@ -274,11 +292,11 @@ class DataLoader:
|
|
|
274
292
|
int
|
|
275
293
|
The datum index.
|
|
276
294
|
"""
|
|
277
|
-
if uid in self._evaluator.
|
|
278
|
-
raise ValueError(f"Datum with uid `{uid}`
|
|
279
|
-
index = len(self._evaluator.
|
|
280
|
-
self._evaluator.
|
|
281
|
-
self._evaluator.
|
|
295
|
+
if uid in self._evaluator.datum_id_to_index:
|
|
296
|
+
raise ValueError(f"Datum with uid `{uid}` already exists.")
|
|
297
|
+
index = len(self._evaluator.datum_id_to_index)
|
|
298
|
+
self._evaluator.datum_id_to_index[uid] = index
|
|
299
|
+
self._evaluator.index_to_datum_id.append(uid)
|
|
282
300
|
return index
|
|
283
301
|
|
|
284
302
|
def _add_label(self, label: str) -> int:
|
|
@@ -298,7 +316,7 @@ class DataLoader:
|
|
|
298
316
|
if label not in self._evaluator.label_to_index:
|
|
299
317
|
label_id = len(self._evaluator.index_to_label)
|
|
300
318
|
self._evaluator.label_to_index[label] = label_id
|
|
301
|
-
self._evaluator.index_to_label
|
|
319
|
+
self._evaluator.index_to_label.append(label)
|
|
302
320
|
return self._evaluator.label_to_index[label]
|
|
303
321
|
|
|
304
322
|
def add_data(
|
|
@@ -319,56 +337,50 @@ class DataLoader:
|
|
|
319
337
|
|
|
320
338
|
disable_tqdm = not show_progress
|
|
321
339
|
for segmentation in tqdm(segmentations, disable=disable_tqdm):
|
|
322
|
-
|
|
323
|
-
# update metadata
|
|
324
|
-
self._evaluator.n_datums += 1
|
|
325
|
-
self._evaluator.n_groundtruths += len(segmentation.groundtruths)
|
|
326
|
-
self._evaluator.n_predictions += len(segmentation.predictions)
|
|
327
|
-
self._evaluator.n_pixels += segmentation.size
|
|
328
|
-
self._evaluator.n_groundtruth_pixels += segmentation.size * len(
|
|
329
|
-
segmentation.groundtruths
|
|
330
|
-
)
|
|
331
|
-
self._evaluator.n_prediction_pixels += segmentation.size * len(
|
|
332
|
-
segmentation.predictions
|
|
333
|
-
)
|
|
334
|
-
|
|
335
340
|
# update datum cache
|
|
336
|
-
|
|
341
|
+
self._add_datum(segmentation.uid)
|
|
337
342
|
|
|
338
|
-
groundtruth_labels = np.
|
|
339
|
-
len(segmentation.groundtruths),
|
|
343
|
+
groundtruth_labels = -1 * np.ones(
|
|
344
|
+
len(segmentation.groundtruths), dtype=np.int64
|
|
340
345
|
)
|
|
341
346
|
for idx, groundtruth in enumerate(segmentation.groundtruths):
|
|
342
347
|
label_idx = self._add_label(groundtruth.label)
|
|
343
348
|
groundtruth_labels[idx] = label_idx
|
|
344
|
-
self.groundtruth_count[label_idx][
|
|
345
|
-
uid_index
|
|
346
|
-
] += groundtruth.mask.sum()
|
|
347
349
|
|
|
348
|
-
prediction_labels = np.
|
|
349
|
-
len(segmentation.predictions),
|
|
350
|
+
prediction_labels = -1 * np.ones(
|
|
351
|
+
len(segmentation.predictions), dtype=np.int64
|
|
350
352
|
)
|
|
351
353
|
for idx, prediction in enumerate(segmentation.predictions):
|
|
352
354
|
label_idx = self._add_label(prediction.label)
|
|
353
355
|
prediction_labels[idx] = label_idx
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
356
|
+
|
|
357
|
+
if segmentation.groundtruths:
|
|
358
|
+
combined_groundtruths = np.stack(
|
|
359
|
+
[
|
|
360
|
+
groundtruth.mask.flatten()
|
|
361
|
+
for groundtruth in segmentation.groundtruths
|
|
362
|
+
],
|
|
363
|
+
axis=0,
|
|
364
|
+
)
|
|
365
|
+
else:
|
|
366
|
+
combined_groundtruths = np.zeros(
|
|
367
|
+
(1, segmentation.shape[0] * segmentation.shape[1]),
|
|
368
|
+
dtype=np.bool_,
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
if segmentation.predictions:
|
|
372
|
+
combined_predictions = np.stack(
|
|
373
|
+
[
|
|
374
|
+
prediction.mask.flatten()
|
|
375
|
+
for prediction in segmentation.predictions
|
|
376
|
+
],
|
|
377
|
+
axis=0,
|
|
378
|
+
)
|
|
379
|
+
else:
|
|
380
|
+
combined_predictions = np.zeros(
|
|
381
|
+
(1, segmentation.shape[0] * segmentation.shape[1]),
|
|
382
|
+
dtype=np.bool_,
|
|
383
|
+
)
|
|
372
384
|
|
|
373
385
|
self.matrices.append(
|
|
374
386
|
compute_intermediate_confusion_matrices(
|
|
@@ -379,7 +391,6 @@ class DataLoader:
|
|
|
379
391
|
n_labels=len(self._evaluator.index_to_label),
|
|
380
392
|
)
|
|
381
393
|
)
|
|
382
|
-
self.pixel_count.append(segmentation.size)
|
|
383
394
|
|
|
384
395
|
def finalize(self) -> Evaluator:
|
|
385
396
|
"""
|
|
@@ -392,60 +403,21 @@ class DataLoader:
|
|
|
392
403
|
"""
|
|
393
404
|
|
|
394
405
|
if len(self.matrices) == 0:
|
|
395
|
-
raise
|
|
406
|
+
raise EmptyEvaluatorException()
|
|
396
407
|
|
|
397
|
-
n_datums = self._evaluator.n_datums
|
|
398
408
|
n_labels = len(self._evaluator.index_to_label)
|
|
399
|
-
|
|
400
|
-
self._evaluator.n_labels = n_labels
|
|
401
|
-
|
|
402
|
-
self._evaluator._label_metadata_per_datum = np.zeros(
|
|
403
|
-
(2, n_datums, n_labels), dtype=np.int32
|
|
404
|
-
)
|
|
405
|
-
for datum_idx in range(n_datums):
|
|
406
|
-
for label_idx in range(n_labels):
|
|
407
|
-
gt_count = (
|
|
408
|
-
self.groundtruth_count[label_idx].get(datum_idx, 0)
|
|
409
|
-
if label_idx in self.groundtruth_count
|
|
410
|
-
else 0
|
|
411
|
-
)
|
|
412
|
-
pd_count = (
|
|
413
|
-
self.prediction_count[label_idx].get(datum_idx, 0)
|
|
414
|
-
if label_idx in self.prediction_count
|
|
415
|
-
else 0
|
|
416
|
-
)
|
|
417
|
-
self._evaluator._label_metadata_per_datum[
|
|
418
|
-
:, datum_idx, label_idx
|
|
419
|
-
] = np.array([gt_count, pd_count])
|
|
420
|
-
|
|
421
|
-
self._evaluator._label_metadata = np.array(
|
|
422
|
-
[
|
|
423
|
-
[
|
|
424
|
-
np.sum(
|
|
425
|
-
self._evaluator._label_metadata_per_datum[
|
|
426
|
-
0, :, label_idx
|
|
427
|
-
]
|
|
428
|
-
),
|
|
429
|
-
np.sum(
|
|
430
|
-
self._evaluator._label_metadata_per_datum[
|
|
431
|
-
1, :, label_idx
|
|
432
|
-
]
|
|
433
|
-
),
|
|
434
|
-
]
|
|
435
|
-
for label_idx in range(n_labels)
|
|
436
|
-
],
|
|
437
|
-
dtype=np.int32,
|
|
438
|
-
)
|
|
439
|
-
|
|
440
|
-
self._evaluator._n_pixels_per_datum = np.array(
|
|
441
|
-
self.pixel_count, dtype=np.int32
|
|
442
|
-
)
|
|
443
|
-
|
|
409
|
+
n_datums = len(self._evaluator.index_to_datum_id)
|
|
444
410
|
self._evaluator._confusion_matrices = np.zeros(
|
|
445
|
-
(n_datums, n_labels + 1, n_labels + 1), dtype=np.
|
|
411
|
+
(n_datums, n_labels + 1, n_labels + 1), dtype=np.int64
|
|
446
412
|
)
|
|
447
413
|
for idx, matrix in enumerate(self.matrices):
|
|
448
414
|
h, w = matrix.shape
|
|
449
415
|
self._evaluator._confusion_matrices[idx, :h, :w] = matrix
|
|
450
|
-
|
|
416
|
+
self._evaluator._label_metadata = compute_label_metadata(
|
|
417
|
+
confusion_matrices=self._evaluator._confusion_matrices,
|
|
418
|
+
n_labels=n_labels,
|
|
419
|
+
)
|
|
420
|
+
self._evaluator._metadata = Metadata.create(
|
|
421
|
+
confusion_matrices=self._evaluator._confusion_matrices,
|
|
422
|
+
)
|
|
451
423
|
return self._evaluator
|
|
@@ -8,8 +8,8 @@ from valor_lite.semantic_segmentation.metric import Metric, MetricType
|
|
|
8
8
|
|
|
9
9
|
def unpack_precision_recall_iou_into_metric_lists(
|
|
10
10
|
results: tuple,
|
|
11
|
-
label_metadata: NDArray[np.
|
|
12
|
-
index_to_label:
|
|
11
|
+
label_metadata: NDArray[np.int64],
|
|
12
|
+
index_to_label: list[str],
|
|
13
13
|
) -> dict[MetricType, list[Metric]]:
|
|
14
14
|
|
|
15
15
|
n_labels = len(index_to_label)
|
|
@@ -67,7 +67,7 @@ def unpack_precision_recall_iou_into_metric_lists(
|
|
|
67
67
|
)
|
|
68
68
|
]
|
|
69
69
|
|
|
70
|
-
for label_idx, label in index_to_label
|
|
70
|
+
for label_idx, label in enumerate(index_to_label):
|
|
71
71
|
|
|
72
72
|
kwargs = {
|
|
73
73
|
"label": label,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: valor-lite
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.36.1
|
|
4
4
|
Summary: Evaluate machine learning models.
|
|
5
5
|
Project-URL: homepage, https://www.striveworks.com
|
|
6
6
|
Requires-Python: >=3.10
|
|
@@ -13,7 +13,7 @@ Requires-Dist: evaluate; extra == "nlp"
|
|
|
13
13
|
Requires-Dist: nltk; extra == "nlp"
|
|
14
14
|
Requires-Dist: rouge_score; extra == "nlp"
|
|
15
15
|
Provides-Extra: mistral
|
|
16
|
-
Requires-Dist: mistralai
|
|
16
|
+
Requires-Dist: mistralai<1.8.0,>=1.0.0; extra == "mistral"
|
|
17
17
|
Provides-Extra: openai
|
|
18
18
|
Requires-Dist: openai; extra == "openai"
|
|
19
19
|
Provides-Extra: docs
|
|
@@ -1,27 +1,28 @@
|
|
|
1
1
|
valor_lite/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
|
|
2
2
|
valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
+
valor_lite/exceptions.py,sha256=S32XtowA3cUtErZMOXFKl73jquzBH-h8l2LdMYaCSnI,397
|
|
3
4
|
valor_lite/profiling.py,sha256=TLIROA1qccFw9NoEkMeQcrvvGGO75c4K5yTIWoCUix8,11746
|
|
4
5
|
valor_lite/schemas.py,sha256=pB0MrPx5qFLbwBWDiOUUm-vmXdWvbJLFCBmKgbcbI5g,198
|
|
5
6
|
valor_lite/classification/__init__.py,sha256=8MI8bGwCxYGqRP7KxG7ezhYv4qQ5947XGvvlF8WPM5g,392
|
|
6
7
|
valor_lite/classification/annotation.py,sha256=0aUOvcwBAZgiNOJuyh-pXyNTG7vP7r8CUfnU3OmpUwQ,1113
|
|
7
|
-
valor_lite/classification/computation.py,sha256=
|
|
8
|
-
valor_lite/classification/manager.py,sha256=
|
|
9
|
-
valor_lite/classification/metric.py,sha256=
|
|
8
|
+
valor_lite/classification/computation.py,sha256=B5Y5K_ksbRnCCvkemYb23PKojKmhvSb2sF5JWpIdgD8,16271
|
|
9
|
+
valor_lite/classification/manager.py,sha256=7yPcjyHLVAimTRwQla0IkRZXP6GxhOyrUjv0TAwvEDo,16267
|
|
10
|
+
valor_lite/classification/metric.py,sha256=BJn82GZ7h-350ugXdRKYNPczidtjW_dvdNE194_i7BM,11905
|
|
10
11
|
valor_lite/classification/numpy_compatibility.py,sha256=roqtTetsm1_HxuaejrthQdydjsRIy-FpXpGb86cLh_E,365
|
|
11
|
-
valor_lite/classification/utilities.py,sha256=
|
|
12
|
-
valor_lite/object_detection/__init__.py,sha256=
|
|
12
|
+
valor_lite/classification/utilities.py,sha256=awKz-OdT2y5ydJuwDi5Y9FCFL1gdWyiGBZpqQxWER2A,7063
|
|
13
|
+
valor_lite/object_detection/__init__.py,sha256=y3aQbjP2Y5XXSbzGkf1E1c0c0RuonGUamsQZ-lBvtdM,317
|
|
13
14
|
valor_lite/object_detection/annotation.py,sha256=LVec-rIk408LuFxcOoIkPk0QZMWSSxbmsady4wapC1s,7007
|
|
14
|
-
valor_lite/object_detection/computation.py,sha256=
|
|
15
|
-
valor_lite/object_detection/manager.py,sha256=
|
|
15
|
+
valor_lite/object_detection/computation.py,sha256=njLN-1_yql56NSVxY4KGKohxJUIStPYczVTpEpj5geA,24478
|
|
16
|
+
valor_lite/object_detection/manager.py,sha256=FrHobYKk9ioYRs42jeeDk1Z64wQTiv4UjCl53d2xU1Y,27534
|
|
16
17
|
valor_lite/object_detection/metric.py,sha256=sUYSZwXYfIyfmXG6_7Tje1_ZL_QwvecPq85jrGmwOWE,22739
|
|
17
18
|
valor_lite/object_detection/utilities.py,sha256=tNdv5dL7JhzOamGQkZ8x3ocZoTwPI6K8rcRAGMhp2nc,11217
|
|
18
|
-
valor_lite/semantic_segmentation/__init__.py,sha256=
|
|
19
|
-
valor_lite/semantic_segmentation/annotation.py,sha256=
|
|
20
|
-
valor_lite/semantic_segmentation/benchmark.py,sha256=
|
|
21
|
-
valor_lite/semantic_segmentation/computation.py,sha256=
|
|
22
|
-
valor_lite/semantic_segmentation/manager.py,sha256=
|
|
19
|
+
valor_lite/semantic_segmentation/__init__.py,sha256=_BOClAErxZCpSnnl4C_ofigQfs_9Ak_AFh7EZECBW_I,267
|
|
20
|
+
valor_lite/semantic_segmentation/annotation.py,sha256=XRMV32Sx9A1bAVMFQdBGc3tN5Xz2RfmlyKGXCzdee7A,3705
|
|
21
|
+
valor_lite/semantic_segmentation/benchmark.py,sha256=uxd0SiDY3npsgU5pdeT4HvNP_au9GVRWzoqT6br9DtM,5961
|
|
22
|
+
valor_lite/semantic_segmentation/computation.py,sha256=ZO0qAFmq8lN73UjCyiynSv18qQDtn35FNOmvuXY4rOw,7380
|
|
23
|
+
valor_lite/semantic_segmentation/manager.py,sha256=QmKRCy2_dJUD3h1seReVEJ1ImOfcBcmFF6kOZPO-gN0,13319
|
|
23
24
|
valor_lite/semantic_segmentation/metric.py,sha256=T9RfPJf4WgqGQTXYvSy08vJG5bjXXJnyYZeW0mlxMa8,7132
|
|
24
|
-
valor_lite/semantic_segmentation/utilities.py,sha256=
|
|
25
|
+
valor_lite/semantic_segmentation/utilities.py,sha256=zgVmV8nyKWQK-T4Ov8cZFQzOmTKc5EL7errKFvc2H0g,2957
|
|
25
26
|
valor_lite/text_generation/__init__.py,sha256=pGhpWCSZjLM0pPHCtPykAfos55B8ie3mi9EzbNxfj-U,356
|
|
26
27
|
valor_lite/text_generation/annotation.py,sha256=O5aXiwCS4WjA-fqn4ly-O0MsTHoIOmqxqCaAp9IeI3M,1270
|
|
27
28
|
valor_lite/text_generation/computation.py,sha256=hGDkPfzWY9SDTdozd-nArexJ3ZSNlCIWqHGoD8vO2Cc,18652
|
|
@@ -34,7 +35,7 @@ valor_lite/text_generation/llm/instructions.py,sha256=fz2onBZZWcl5W8iy7zEWkPGU9N
|
|
|
34
35
|
valor_lite/text_generation/llm/integrations.py,sha256=-rTfdAjq1zH-4ixwYuMQEOQ80pIFzMTe0BYfroVx3Pg,6974
|
|
35
36
|
valor_lite/text_generation/llm/utilities.py,sha256=bjqatGgtVTcl1PrMwiDKTYPGJXKrBrx7PDtzIblGSys,1178
|
|
36
37
|
valor_lite/text_generation/llm/validators.py,sha256=Wzr5RlfF58_2wOU-uTw7C8skan_fYdhy4Gfn0jSJ8HM,2700
|
|
37
|
-
valor_lite-0.
|
|
38
|
-
valor_lite-0.
|
|
39
|
-
valor_lite-0.
|
|
40
|
-
valor_lite-0.
|
|
38
|
+
valor_lite-0.36.1.dist-info/METADATA,sha256=joZphouShgVGkl9mrPvoKB34_MxAA0XqBla-fnFC3vM,5071
|
|
39
|
+
valor_lite-0.36.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
40
|
+
valor_lite-0.36.1.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
|
|
41
|
+
valor_lite-0.36.1.dist-info/RECORD,,
|
|
File without changes
|