valor-lite 0.33.6__py3-none-any.whl → 0.33.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,532 @@
1
+ from collections import defaultdict
2
+ from dataclasses import dataclass
3
+
4
+ import numpy as np
5
+ from numpy.typing import NDArray
6
+ from tqdm import tqdm
7
+ from valor_lite.segmentation.annotation import Segmentation
8
+ from valor_lite.segmentation.computation import (
9
+ compute_intermediate_confusion_matrices,
10
+ compute_metrics,
11
+ )
12
+ from valor_lite.segmentation.metric import (
13
+ F1,
14
+ Accuracy,
15
+ ConfusionMatrix,
16
+ IoU,
17
+ MetricType,
18
+ Precision,
19
+ Recall,
20
+ mIoU,
21
+ )
22
+
23
+ """
24
+ Usage
25
+ -----
26
+
27
+ manager = DataLoader()
28
+ manager.add_data(
29
+ groundtruths=groundtruths,
30
+ predictions=predictions,
31
+ )
32
+ evaluator = manager.finalize()
33
+
34
+ metrics = evaluator.evaluate()
35
+
36
+ f1_metrics = metrics[MetricType.F1]
37
+ accuracy_metrics = metrics[MetricType.Accuracy]
38
+
39
+ filter_mask = evaluator.create_filter(datum_uids=["uid1", "uid2"])
40
+ filtered_metrics = evaluator.evaluate(filter_mask=filter_mask)
41
+ """
42
+
43
+
44
+ @dataclass
45
+ class Filter:
46
+ indices: NDArray[np.int32]
47
+ label_metadata: NDArray[np.int32]
48
+ n_pixels: int
49
+
50
+
51
+ class Evaluator:
52
+ """
53
+ Segmentation Evaluator
54
+ """
55
+
56
+ def __init__(self):
57
+
58
+ # metadata
59
+ self.n_datums = 0
60
+ self.n_groundtruths = 0
61
+ self.n_predictions = 0
62
+ self.n_pixels = 0
63
+ self.n_groundtruth_pixels = 0
64
+ self.n_prediction_pixels = 0
65
+ self.n_labels = 0
66
+
67
+ # datum reference
68
+ self.uid_to_index: dict[str, int] = dict()
69
+ self.index_to_uid: dict[int, str] = dict()
70
+
71
+ # label reference
72
+ self.label_to_index: dict[str, int] = dict()
73
+ self.index_to_label: dict[int, str] = dict()
74
+
75
+ # computation caches
76
+ self._confusion_matrices = np.array([])
77
+ self._label_metadata = np.array([], dtype=np.int32)
78
+ self._label_metadata_per_datum = np.array([], dtype=np.int32)
79
+ self._n_pixels_per_datum = np.array([], dtype=np.int32)
80
+
81
+ @property
82
+ def ignored_prediction_labels(self) -> list[str]:
83
+ """
84
+ Prediction labels that are not present in the ground truth set.
85
+ """
86
+ glabels = set(np.where(self._label_metadata[:, 0] > 0)[0])
87
+ plabels = set(np.where(self._label_metadata[:, 1] > 0)[0])
88
+ return [
89
+ self.index_to_label[label_id] for label_id in (plabels - glabels)
90
+ ]
91
+
92
+ @property
93
+ def missing_prediction_labels(self) -> list[str]:
94
+ """
95
+ Ground truth labels that are not present in the prediction set.
96
+ """
97
+ glabels = set(np.where(self._label_metadata[:, 0] > 0)[0])
98
+ plabels = set(np.where(self._label_metadata[:, 1] > 0)[0])
99
+ return [
100
+ self.index_to_label[label_id] for label_id in (glabels - plabels)
101
+ ]
102
+
103
+ @property
104
+ def metadata(self) -> dict:
105
+ """
106
+ Evaluation metadata.
107
+ """
108
+ return {
109
+ "number_of_datums": self.n_datums,
110
+ "number_of_groundtruths": self.n_groundtruths,
111
+ "number_of_predictions": self.n_predictions,
112
+ "number_of_groundtruth_pixels": self.n_groundtruth_pixels,
113
+ "number_of_prediction_pixels": self.n_prediction_pixels,
114
+ "number_of_labels": self.n_labels,
115
+ "ignored_prediction_labels": self.ignored_prediction_labels,
116
+ "missing_prediction_labels": self.missing_prediction_labels,
117
+ }
118
+
119
+ def create_filter(
120
+ self,
121
+ datum_uids: list[str] | NDArray[np.int32] | None = None,
122
+ labels: list[str] | NDArray[np.int32] | None = None,
123
+ ) -> Filter:
124
+ """
125
+ Creates a boolean mask that can be passed to an evaluation.
126
+
127
+ Parameters
128
+ ----------
129
+ datum_uids : list[str] | NDArray[np.int32], optional
130
+ An optional list of string uids or a numpy array of uid indices.
131
+ labels : list[tuple[str, str]] | NDArray[np.int32], optional
132
+ An optional list of labels or a numpy array of label indices.
133
+
134
+ Returns
135
+ -------
136
+ Filter
137
+ A filter object that can be passed to the `evaluate` method.
138
+ """
139
+ n_datums = self._label_metadata_per_datum.shape[1]
140
+ n_labels = self._label_metadata_per_datum.shape[2]
141
+
142
+ mask_datums = np.ones(n_datums, dtype=np.bool_)
143
+ mask_labels = np.ones(n_labels, dtype=np.bool_)
144
+
145
+ if datum_uids is not None:
146
+ if isinstance(datum_uids, list):
147
+ datum_uids = np.array(
148
+ [self.uid_to_index[uid] for uid in datum_uids],
149
+ dtype=np.int32,
150
+ )
151
+ if datum_uids.size == 0:
152
+ mask_datums[mask_datums] = False
153
+ else:
154
+ mask = (
155
+ np.arange(n_datums).reshape(-1, 1)
156
+ == datum_uids.reshape(1, -1)
157
+ ).any(axis=1)
158
+ mask_datums[~mask] = False
159
+
160
+ if labels is not None:
161
+ if isinstance(labels, list):
162
+ labels = np.array(
163
+ [self.label_to_index[label] for label in labels],
164
+ dtype=np.int32,
165
+ )
166
+ if labels.size == 0:
167
+ mask_labels[mask_labels] = False
168
+ else:
169
+ mask = (
170
+ np.arange(n_labels).reshape(-1, 1) == labels.reshape(1, -1)
171
+ ).any(axis=1)
172
+ mask_labels[~mask] = False
173
+
174
+ mask = mask_datums[:, np.newaxis] & mask_labels[np.newaxis, :]
175
+ label_metadata_per_datum = self._label_metadata_per_datum.copy()
176
+ label_metadata_per_datum[:, ~mask] = 0
177
+
178
+ label_metadata = np.zeros_like(self._label_metadata, dtype=np.int32)
179
+ label_metadata = np.transpose(
180
+ np.sum(
181
+ label_metadata_per_datum,
182
+ axis=1,
183
+ )
184
+ )
185
+ n_datums = int(np.sum(label_metadata[:, 0]))
186
+
187
+ return Filter(
188
+ indices=np.where(mask_datums)[0],
189
+ label_metadata=label_metadata,
190
+ n_pixels=self._n_pixels_per_datum[mask_datums].sum(),
191
+ )
192
+
193
+ def evaluate(
194
+ self,
195
+ metrics_to_return: list[MetricType] = MetricType.base(),
196
+ filter_: Filter | None = None,
197
+ as_dict: bool = False,
198
+ ) -> dict[MetricType, list]:
199
+ """
200
+ Performs an evaluation and returns metrics.
201
+
202
+ Parameters
203
+ ----------
204
+ metrics_to_return : list[MetricType]
205
+ A list of metrics to return in the results.
206
+ filter_ : Filter, optional
207
+ An optional filter object.
208
+
209
+ Returns
210
+ -------
211
+ dict[MetricType, list]
212
+ A dictionary mapping MetricType enumerations to lists of computed metrics.
213
+ """
214
+
215
+ # apply filters
216
+ data = self._confusion_matrices
217
+ label_metadata = self._label_metadata
218
+ n_pixels = self.n_pixels
219
+ if filter_ is not None:
220
+ data = data[filter_.indices]
221
+ label_metadata = filter_.label_metadata
222
+ n_pixels = filter_.n_pixels
223
+
224
+ (
225
+ precision,
226
+ recall,
227
+ f1_score,
228
+ accuracy,
229
+ ious,
230
+ hallucination_ratios,
231
+ missing_prediction_ratios,
232
+ ) = compute_metrics(
233
+ data=data,
234
+ label_metadata=label_metadata,
235
+ n_pixels=n_pixels,
236
+ )
237
+
238
+ metrics = defaultdict(list)
239
+
240
+ metrics[MetricType.Accuracy] = [
241
+ Accuracy(
242
+ value=accuracy,
243
+ )
244
+ ]
245
+
246
+ metrics[MetricType.ConfusionMatrix] = [
247
+ ConfusionMatrix(
248
+ confusion_matrix={
249
+ self.index_to_label[gt_label_idx]: {
250
+ self.index_to_label[pd_label_idx]: {
251
+ "iou": float(ious[gt_label_idx, pd_label_idx])
252
+ }
253
+ for pd_label_idx in range(self.n_labels)
254
+ if label_metadata[pd_label_idx, 0] > 0
255
+ }
256
+ for gt_label_idx in range(self.n_labels)
257
+ if label_metadata[gt_label_idx, 0] > 0
258
+ },
259
+ hallucinations={
260
+ self.index_to_label[pd_label_idx]: {
261
+ "percent": float(hallucination_ratios[pd_label_idx])
262
+ }
263
+ for pd_label_idx in range(self.n_labels)
264
+ if label_metadata[pd_label_idx, 0] > 0
265
+ },
266
+ missing_predictions={
267
+ self.index_to_label[gt_label_idx]: {
268
+ "percent": float(
269
+ missing_prediction_ratios[gt_label_idx]
270
+ )
271
+ }
272
+ for gt_label_idx in range(self.n_labels)
273
+ if label_metadata[gt_label_idx, 0] > 0
274
+ },
275
+ )
276
+ ]
277
+
278
+ metrics[MetricType.mIoU] = [
279
+ mIoU(
280
+ value=float(ious.diagonal().mean()),
281
+ )
282
+ ]
283
+
284
+ for label_idx, label in self.index_to_label.items():
285
+
286
+ kwargs = {
287
+ "label": label,
288
+ }
289
+
290
+ # if no groundtruths exists for a label, skip it.
291
+ if label_metadata[label_idx, 0] == 0:
292
+ continue
293
+
294
+ metrics[MetricType.Precision].append(
295
+ Precision(
296
+ value=float(precision[label_idx]),
297
+ **kwargs,
298
+ )
299
+ )
300
+ metrics[MetricType.Recall].append(
301
+ Recall(
302
+ value=float(recall[label_idx]),
303
+ **kwargs,
304
+ )
305
+ )
306
+ metrics[MetricType.F1].append(
307
+ F1(
308
+ value=float(f1_score[label_idx]),
309
+ **kwargs,
310
+ )
311
+ )
312
+ metrics[MetricType.IoU].append(
313
+ IoU(
314
+ value=float(ious[label_idx, label_idx]),
315
+ **kwargs,
316
+ )
317
+ )
318
+
319
+ for metric in set(metrics.keys()):
320
+ if metric not in metrics_to_return:
321
+ del metrics[metric]
322
+
323
+ if as_dict:
324
+ return {
325
+ mtype: [metric.to_dict() for metric in mvalues]
326
+ for mtype, mvalues in metrics.items()
327
+ }
328
+
329
+ return metrics
330
+
331
+
332
+ class DataLoader:
333
+ """
334
+ Segmentation DataLoader.
335
+ """
336
+
337
+ def __init__(self):
338
+ self._evaluator = Evaluator()
339
+ self.groundtruth_count = defaultdict(lambda: defaultdict(int))
340
+ self.prediction_count = defaultdict(lambda: defaultdict(int))
341
+ self.matrices = list()
342
+ self.pixel_count = list()
343
+
344
+ def _add_datum(self, uid: str) -> int:
345
+ """
346
+ Helper function for adding a datum to the cache.
347
+
348
+ Parameters
349
+ ----------
350
+ uid : str
351
+ The datum uid.
352
+
353
+ Returns
354
+ -------
355
+ int
356
+ The datum index.
357
+ """
358
+ if uid in self._evaluator.uid_to_index:
359
+ raise ValueError(f"Datum with uid `{uid}` has already been added.")
360
+ index = len(self._evaluator.uid_to_index)
361
+ self._evaluator.uid_to_index[uid] = index
362
+ self._evaluator.index_to_uid[index] = uid
363
+ return index
364
+
365
+ def _add_label(self, label: str) -> int:
366
+ """
367
+ Helper function for adding a label to the cache.
368
+
369
+ Parameters
370
+ ----------
371
+ label : str
372
+ A string label.
373
+
374
+ Returns
375
+ -------
376
+ int
377
+ The label's index.
378
+ """
379
+ if label not in self._evaluator.label_to_index:
380
+ label_id = len(self._evaluator.index_to_label)
381
+ self._evaluator.label_to_index[label] = label_id
382
+ self._evaluator.index_to_label[label_id] = label
383
+ return self._evaluator.label_to_index[label]
384
+
385
+ def add_data(
386
+ self,
387
+ segmentations: list[Segmentation],
388
+ show_progress: bool = False,
389
+ ):
390
+ """
391
+ Adds segmentations to the cache.
392
+
393
+ Parameters
394
+ ----------
395
+ segmentations : list[Segmentation]
396
+ A list of Segmentation objects.
397
+ show_progress : bool, default=False
398
+ Toggle for tqdm progress bar.
399
+ """
400
+
401
+ disable_tqdm = not show_progress
402
+ for segmentation in tqdm(segmentations, disable=disable_tqdm):
403
+
404
+ # update metadata
405
+ self._evaluator.n_datums += 1
406
+ self._evaluator.n_groundtruths += len(segmentation.groundtruths)
407
+ self._evaluator.n_predictions += len(segmentation.predictions)
408
+ self._evaluator.n_pixels += segmentation.size
409
+ self._evaluator.n_groundtruth_pixels += segmentation.size * len(
410
+ segmentation.groundtruths
411
+ )
412
+ self._evaluator.n_prediction_pixels += segmentation.size * len(
413
+ segmentation.predictions
414
+ )
415
+
416
+ # update datum cache
417
+ uid_index = self._add_datum(segmentation.uid)
418
+
419
+ groundtruth_labels = np.full(
420
+ len(segmentation.groundtruths), fill_value=-1
421
+ )
422
+ for idx, groundtruth in enumerate(segmentation.groundtruths):
423
+ label_idx = self._add_label(groundtruth.label)
424
+ groundtruth_labels[idx] = label_idx
425
+ self.groundtruth_count[label_idx][
426
+ uid_index
427
+ ] += groundtruth.mask.sum()
428
+
429
+ prediction_labels = np.full(
430
+ len(segmentation.predictions), fill_value=-1
431
+ )
432
+ for idx, prediction in enumerate(segmentation.predictions):
433
+ label_idx = self._add_label(prediction.label)
434
+ prediction_labels[idx] = label_idx
435
+ self.prediction_count[label_idx][
436
+ uid_index
437
+ ] += prediction.mask.sum()
438
+
439
+ combined_groundtruths = np.stack(
440
+ [
441
+ groundtruth.mask.flatten()
442
+ for groundtruth in segmentation.groundtruths
443
+ ],
444
+ axis=0,
445
+ )
446
+ combined_predictions = np.stack(
447
+ [
448
+ prediction.mask.flatten()
449
+ for prediction in segmentation.predictions
450
+ ],
451
+ axis=0,
452
+ )
453
+
454
+ self.matrices.append(
455
+ compute_intermediate_confusion_matrices(
456
+ groundtruths=combined_groundtruths,
457
+ predictions=combined_predictions,
458
+ groundtruth_labels=groundtruth_labels,
459
+ prediction_labels=prediction_labels,
460
+ n_labels=len(self._evaluator.index_to_label),
461
+ )
462
+ )
463
+ self.pixel_count.append(segmentation.size)
464
+
465
+ def finalize(self) -> Evaluator:
466
+ """
467
+ Performs data finalization and some preprocessing steps.
468
+
469
+ Returns
470
+ -------
471
+ Evaluator
472
+ A ready-to-use evaluator object.
473
+ """
474
+
475
+ if len(self.matrices) == 0:
476
+ raise ValueError("No data available to create evaluator.")
477
+
478
+ n_datums = self._evaluator.n_datums
479
+ n_labels = len(self._evaluator.index_to_label)
480
+
481
+ self._evaluator.n_labels = n_labels
482
+
483
+ self._evaluator._label_metadata_per_datum = np.zeros(
484
+ (2, n_datums, n_labels), dtype=np.int32
485
+ )
486
+ for datum_idx in range(n_datums):
487
+ for label_idx in range(n_labels):
488
+ gt_count = (
489
+ self.groundtruth_count[label_idx].get(datum_idx, 0)
490
+ if label_idx in self.groundtruth_count
491
+ else 0
492
+ )
493
+ pd_count = (
494
+ self.prediction_count[label_idx].get(datum_idx, 0)
495
+ if label_idx in self.prediction_count
496
+ else 0
497
+ )
498
+ self._evaluator._label_metadata_per_datum[
499
+ :, datum_idx, label_idx
500
+ ] = np.array([gt_count, pd_count])
501
+
502
+ self._evaluator._label_metadata = np.array(
503
+ [
504
+ [
505
+ np.sum(
506
+ self._evaluator._label_metadata_per_datum[
507
+ 0, :, label_idx
508
+ ]
509
+ ),
510
+ np.sum(
511
+ self._evaluator._label_metadata_per_datum[
512
+ 1, :, label_idx
513
+ ]
514
+ ),
515
+ ]
516
+ for label_idx in range(n_labels)
517
+ ],
518
+ dtype=np.int32,
519
+ )
520
+
521
+ self._evaluator._n_pixels_per_datum = np.array(
522
+ self.pixel_count, dtype=np.int32
523
+ )
524
+
525
+ self._evaluator._confusion_matrices = np.zeros(
526
+ (n_datums, n_labels + 1, n_labels + 1), dtype=np.int32
527
+ )
528
+ for idx, matrix in enumerate(self.matrices):
529
+ h, w = matrix.shape
530
+ self._evaluator._confusion_matrices[idx, :h, :w] = matrix
531
+
532
+ return self._evaluator
@@ -0,0 +1,119 @@
1
+ from dataclasses import dataclass
2
+ from enum import Enum
3
+
4
+ from valor_lite.schemas import Metric
5
+
6
+
7
+ class MetricType(Enum):
8
+ Precision = "Precision"
9
+ Recall = "Recall"
10
+ Accuracy = "Accuracy"
11
+ F1 = "F1"
12
+ IoU = "IoU"
13
+ mIoU = "mIoU"
14
+ ConfusionMatrix = "ConfusionMatrix"
15
+
16
+ @classmethod
17
+ def base(cls):
18
+ return [
19
+ cls.Precision,
20
+ cls.Recall,
21
+ cls.Accuracy,
22
+ cls.F1,
23
+ cls.IoU,
24
+ cls.mIoU,
25
+ cls.ConfusionMatrix,
26
+ ]
27
+
28
+
29
+ @dataclass
30
+ class _LabelValue:
31
+ value: float
32
+ label: str
33
+
34
+ @property
35
+ def metric(self) -> Metric:
36
+ return Metric(
37
+ type=type(self).__name__,
38
+ value=self.value,
39
+ parameters={
40
+ "label": self.label,
41
+ },
42
+ )
43
+
44
+ def to_dict(self) -> dict:
45
+ return self.metric.to_dict()
46
+
47
+
48
+ class Precision(_LabelValue):
49
+ pass
50
+
51
+
52
+ class Recall(_LabelValue):
53
+ pass
54
+
55
+
56
+ class F1(_LabelValue):
57
+ pass
58
+
59
+
60
+ class IoU(_LabelValue):
61
+ pass
62
+
63
+
64
+ @dataclass
65
+ class _Value:
66
+ value: float
67
+
68
+ @property
69
+ def metric(self) -> Metric:
70
+ return Metric(
71
+ type=type(self).__name__,
72
+ value=self.value,
73
+ parameters={},
74
+ )
75
+
76
+ def to_dict(self) -> dict:
77
+ return self.metric.to_dict()
78
+
79
+
80
+ class Accuracy(_Value):
81
+ pass
82
+
83
+
84
+ class mIoU(_Value):
85
+ pass
86
+
87
+
88
+ @dataclass
89
+ class ConfusionMatrix:
90
+ confusion_matrix: dict[
91
+ str, # ground truth label value
92
+ dict[
93
+ str, # prediction label value
94
+ dict[str, float], # iou
95
+ ],
96
+ ]
97
+ hallucinations: dict[
98
+ str, # prediction label value
99
+ dict[str, float], # percentage of pixels
100
+ ]
101
+ missing_predictions: dict[
102
+ str, # ground truth label value
103
+ dict[str, float], # percentage of pixels
104
+ ]
105
+
106
+ @property
107
+ def metric(self) -> Metric:
108
+ return Metric(
109
+ type=type(self).__name__,
110
+ value={
111
+ "confusion_matrix": self.confusion_matrix,
112
+ "hallucinations": self.hallucinations,
113
+ "missing_predictions": self.missing_predictions,
114
+ },
115
+ parameters={},
116
+ )
117
+
118
+ def to_dict(self) -> dict:
119
+ return self.metric.to_dict()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: valor-lite
3
- Version: 0.33.6
3
+ Version: 0.33.8
4
4
  Summary: Compute valor metrics directly in your client.
5
5
  License: MIT License
6
6
 
@@ -0,0 +1,22 @@
1
+ valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ valor_lite/schemas.py,sha256=r4cC10w1xYsA785KmGE4ePeOX3wzEs846vT7QAiVg_I,293
3
+ valor_lite/classification/__init__.py,sha256=2wmmziIzUATm7MbmAcPNLXrEX5l4oeD7XBwPd9bWM3Q,506
4
+ valor_lite/classification/annotation.py,sha256=efgwbkOYoujkg9r0CuzMIRfl93DuZV2ldQeW0CBVfeU,478
5
+ valor_lite/classification/computation.py,sha256=qd9K7CcSGmMm_7shfX47_ZIuB-uE2LLiLMZSS_3NJTk,12093
6
+ valor_lite/classification/manager.py,sha256=6YlkqCwuTcAoa40LU0EfXQyZFTwPyRnt5W19qs-T3Xk,24718
7
+ valor_lite/classification/metric.py,sha256=tAxeuTvO5M9gsQWk47hT_Yea6lbfExe16FgkWYmjKGw,3763
8
+ valor_lite/detection/__init__.py,sha256=PiKfemo8FkZRzBhPSjhil8ahGURLy0Vk_iV25CB4UBU,1139
9
+ valor_lite/detection/annotation.py,sha256=kuW8PF4bitPA6cd5Unr5zqgw8nLnvM4BY0jZW14WFAI,4396
10
+ valor_lite/detection/computation.py,sha256=7rOfVlYDadXcJ1_S0FJRF3IPigcsR7guk_0rXeIdAOE,26919
11
+ valor_lite/detection/manager.py,sha256=QzSkiGUYJ4Z6o1QZKPe36Yc74u052h8g36-fUowRlv0,42620
12
+ valor_lite/detection/metric.py,sha256=Wz4xHg0A7E7BFdUPMnrKnz63P6sD7pwHYM1UQ9_dsgY,8872
13
+ valor_lite/segmentation/__init__.py,sha256=IdarTHKUuUMDvMBmInQu12Mm_NMCbql6Hf0nL5b56Ak,424
14
+ valor_lite/segmentation/annotation.py,sha256=8m5ZsRAcDwQVEvn576Ec1DiNwvtxdKQJXpYoToRugfk,1412
15
+ valor_lite/segmentation/computation.py,sha256=iJkEmTNmw9HwQCxSnpJkQsAdVcFriGhhu_WMks6D7tU,5122
16
+ valor_lite/segmentation/manager.py,sha256=zn-l5NyrkaRck8dq5VOKCJFOtylSXsWuiQ_eBV8m8JM,16921
17
+ valor_lite/segmentation/metric.py,sha256=hNn3lB-XJK5YW3itOWvvRMl6hZyWHMlC6smyeVhxeJE,2275
18
+ valor_lite-0.33.8.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
19
+ valor_lite-0.33.8.dist-info/METADATA,sha256=MRi4I1Bu8ojuc9aUSWzkaaH9l5JMRUmenvHdo6bh50o,1865
20
+ valor_lite-0.33.8.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
21
+ valor_lite-0.33.8.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
22
+ valor_lite-0.33.8.dist-info/RECORD,,
@@ -1,17 +0,0 @@
1
- valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- valor_lite/schemas.py,sha256=r4cC10w1xYsA785KmGE4ePeOX3wzEs846vT7QAiVg_I,293
3
- valor_lite/classification/__init__.py,sha256=2wmmziIzUATm7MbmAcPNLXrEX5l4oeD7XBwPd9bWM3Q,506
4
- valor_lite/classification/annotation.py,sha256=rMDTvPHdAlvJ6_M2kRrnJQnj1oqKe-lxbncWC7Q50RE,345
5
- valor_lite/classification/computation.py,sha256=pqAPX6zFlaWyYBnve4sdgJLba_m7smeaqZAsEBvi1no,12776
6
- valor_lite/classification/manager.py,sha256=qAEGBwb6_Kj2Q0-B3NnRiSfJvS_gBSDJYsT6r8X-g_o,27870
7
- valor_lite/classification/metric.py,sha256=00qmagf-zQXUZ1qJW_UmN1k35aaYK_7GEM292Tc_cBE,4256
8
- valor_lite/detection/__init__.py,sha256=PiKfemo8FkZRzBhPSjhil8ahGURLy0Vk_iV25CB4UBU,1139
9
- valor_lite/detection/annotation.py,sha256=BspLc3SjWXj6qYlGGpzDPHEZ8j7CiFzIL5cNlk0WCAM,2732
10
- valor_lite/detection/computation.py,sha256=HDFfPTFQN2obm-g570KKDf7SP9V-h09OyMtFEJXsoTA,26323
11
- valor_lite/detection/manager.py,sha256=dHDGNtYRd_u9iCOTrLpqssdHrepi2N3dlx415kaeCM4,52860
12
- valor_lite/detection/metric.py,sha256=RYKN17nEFRIZIqmotQa6OyNnU0nkjXyfFIclX_5hGpY,9933
13
- valor_lite-0.33.6.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
14
- valor_lite-0.33.6.dist-info/METADATA,sha256=pdZDGSu9gKinRjZo9G-qFmYVLwBw8mqVb0gs6IJVmZE,1865
15
- valor_lite-0.33.6.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
16
- valor_lite-0.33.6.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
17
- valor_lite-0.33.6.dist-info/RECORD,,