valor-lite 0.34.2__py3-none-any.whl → 0.35.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of valor-lite might be problematic. Click here for more details.

@@ -3,6 +3,7 @@ from collections import defaultdict
3
3
  import numpy as np
4
4
  from numpy.typing import NDArray
5
5
 
6
+ from valor_lite.object_detection.computation import PairClassification
6
7
  from valor_lite.object_detection.metric import Metric, MetricType
7
8
 
8
9
 
@@ -11,38 +12,28 @@ def unpack_precision_recall_into_metric_lists(
11
12
  tuple[
12
13
  NDArray[np.float64],
13
14
  NDArray[np.float64],
14
- NDArray[np.float64],
15
- float,
16
15
  ],
17
16
  tuple[
18
17
  NDArray[np.float64],
19
18
  NDArray[np.float64],
20
- NDArray[np.float64],
21
- float,
22
19
  ],
23
20
  NDArray[np.float64],
24
21
  NDArray[np.float64],
25
- NDArray[np.float64],
26
22
  ],
27
23
  iou_thresholds: list[float],
28
24
  score_thresholds: list[float],
29
- index_to_label: dict[int, str],
25
+ index_to_label: list[str],
30
26
  label_metadata: NDArray[np.int32],
31
27
  ):
32
28
  (
33
29
  (
34
30
  average_precision,
35
31
  mean_average_precision,
36
- average_precision_average_over_ious,
37
- mean_average_precision_average_over_ious,
38
32
  ),
39
33
  (
40
34
  average_recall,
41
35
  mean_average_recall,
42
- average_recall_averaged_over_scores,
43
- mean_average_recall_averaged_over_scores,
44
36
  ),
45
- accuracy,
46
37
  precision_recall,
47
38
  pr_curves,
48
39
  ) = results
@@ -56,7 +47,7 @@ def unpack_precision_recall_into_metric_lists(
56
47
  label=label,
57
48
  )
58
49
  for iou_idx, iou_threshold in enumerate(iou_thresholds)
59
- for label_idx, label in index_to_label.items()
50
+ for label_idx, label in enumerate(index_to_label)
60
51
  if int(label_metadata[label_idx, 0]) > 0
61
52
  ]
62
53
 
@@ -68,19 +59,21 @@ def unpack_precision_recall_into_metric_lists(
68
59
  for iou_idx, iou_threshold in enumerate(iou_thresholds)
69
60
  ]
70
61
 
62
+ # TODO - (c.zaloom) will be removed in the future
71
63
  metrics[MetricType.APAveragedOverIOUs] = [
72
64
  Metric.average_precision_averaged_over_IOUs(
73
- value=float(average_precision_average_over_ious[label_idx]),
65
+ value=float(average_precision.mean(axis=0)[label_idx]),
74
66
  iou_thresholds=iou_thresholds,
75
67
  label=label,
76
68
  )
77
- for label_idx, label in index_to_label.items()
69
+ for label_idx, label in enumerate(index_to_label)
78
70
  if int(label_metadata[label_idx, 0]) > 0
79
71
  ]
80
72
 
73
+ # TODO - (c.zaloom) will be removed in the future
81
74
  metrics[MetricType.mAPAveragedOverIOUs] = [
82
75
  Metric.mean_average_precision_averaged_over_IOUs(
83
- value=float(mean_average_precision_average_over_ious),
76
+ value=float(mean_average_precision.mean()),
84
77
  iou_thresholds=iou_thresholds,
85
78
  )
86
79
  ]
@@ -93,7 +86,7 @@ def unpack_precision_recall_into_metric_lists(
93
86
  label=label,
94
87
  )
95
88
  for score_idx, score_threshold in enumerate(score_thresholds)
96
- for label_idx, label in index_to_label.items()
89
+ for label_idx, label in enumerate(index_to_label)
97
90
  if int(label_metadata[label_idx, 0]) > 0
98
91
  ]
99
92
 
@@ -106,35 +99,27 @@ def unpack_precision_recall_into_metric_lists(
106
99
  for score_idx, score_threshold in enumerate(score_thresholds)
107
100
  ]
108
101
 
102
+ # TODO - (c.zaloom) will be removed in the future
109
103
  metrics[MetricType.ARAveragedOverScores] = [
110
104
  Metric.average_recall_averaged_over_scores(
111
- value=float(average_recall_averaged_over_scores[label_idx]),
105
+ value=float(average_recall.mean(axis=0)[label_idx]),
112
106
  score_thresholds=score_thresholds,
113
107
  iou_thresholds=iou_thresholds,
114
108
  label=label,
115
109
  )
116
- for label_idx, label in index_to_label.items()
110
+ for label_idx, label in enumerate(index_to_label)
117
111
  if int(label_metadata[label_idx, 0]) > 0
118
112
  ]
119
113
 
114
+ # TODO - (c.zaloom) will be removed in the future
120
115
  metrics[MetricType.mARAveragedOverScores] = [
121
116
  Metric.mean_average_recall_averaged_over_scores(
122
- value=float(mean_average_recall_averaged_over_scores),
117
+ value=float(mean_average_recall.mean()),
123
118
  score_thresholds=score_thresholds,
124
119
  iou_thresholds=iou_thresholds,
125
120
  )
126
121
  ]
127
122
 
128
- metrics[MetricType.Accuracy] = [
129
- Metric.accuracy(
130
- value=float(accuracy[iou_idx, score_idx]),
131
- iou_threshold=iou_threshold,
132
- score_threshold=score_threshold,
133
- )
134
- for iou_idx, iou_threshold in enumerate(iou_thresholds)
135
- for score_idx, score_threshold in enumerate(score_thresholds)
136
- ]
137
-
138
123
  metrics[MetricType.PrecisionRecallCurve] = [
139
124
  Metric.precision_recall_curve(
140
125
  precisions=pr_curves[iou_idx, label_idx, :, 0].tolist(), # type: ignore[reportArgumentType]
@@ -143,12 +128,11 @@ def unpack_precision_recall_into_metric_lists(
143
128
  label=label,
144
129
  )
145
130
  for iou_idx, iou_threshold in enumerate(iou_thresholds)
146
- for label_idx, label in index_to_label.items()
131
+ for label_idx, label in enumerate(index_to_label)
147
132
  if label_metadata[label_idx, 0] > 0
148
133
  ]
149
134
 
150
- for label_idx, label in index_to_label.items():
151
-
135
+ for label_idx, label in enumerate(index_to_label):
152
136
  if label_metadata[label_idx, 0] == 0:
153
137
  continue
154
138
 
@@ -192,315 +176,148 @@ def unpack_precision_recall_into_metric_lists(
192
176
  return metrics
193
177
 
194
178
 
195
- def _convert_example_to_dict(box: NDArray[np.float16]) -> dict[str, float]:
196
- """
197
- Converts a cached bounding box example to dictionary format.
198
- """
199
- return {
200
- "xmin": float(box[0]),
201
- "xmax": float(box[1]),
202
- "ymin": float(box[2]),
203
- "ymax": float(box[3]),
204
- }
205
-
206
-
207
- def _unpack_confusion_matrix_value(
208
- confusion_matrix: NDArray[np.float64],
209
- number_of_labels: int,
210
- number_of_examples: int,
211
- index_to_uid: dict[int, str],
212
- index_to_label: dict[int, str],
213
- groundtruth_examples: dict[int, NDArray[np.float16]],
214
- prediction_examples: dict[int, NDArray[np.float16]],
215
- ) -> dict[
216
- str,
217
- dict[
218
- str,
219
- dict[
220
- str,
221
- int
222
- | list[
223
- dict[
224
- str,
225
- str | dict[str, float] | float,
226
- ]
227
- ],
228
- ],
229
- ],
230
- ]:
231
- """
232
- Unpacks a numpy array of confusion matrix counts and examples.
233
- """
234
-
235
- datum_idx = lambda gt_label_idx, pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
236
- confusion_matrix[
237
- gt_label_idx,
238
- pd_label_idx,
239
- example_idx * 4 + 1,
240
- ]
179
+ def _create_empty_confusion_matrix(index_to_labels: list[str]):
180
+ unmatched_ground_truths = dict()
181
+ unmatched_predictions = dict()
182
+ confusion_matrix = dict()
183
+ for label in index_to_labels:
184
+ unmatched_ground_truths[label] = {"count": 0, "examples": []}
185
+ unmatched_predictions[label] = {"count": 0, "examples": []}
186
+ confusion_matrix[label] = {}
187
+ for plabel in index_to_labels:
188
+ confusion_matrix[label][plabel] = {"count": 0, "examples": []}
189
+ return (
190
+ confusion_matrix,
191
+ unmatched_predictions,
192
+ unmatched_ground_truths,
241
193
  )
242
194
 
243
- groundtruth_idx = lambda gt_label_idx, pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
244
- confusion_matrix[
245
- gt_label_idx,
246
- pd_label_idx,
247
- example_idx * 4 + 2,
248
- ]
249
- )
250
195
 
251
- prediction_idx = lambda gt_label_idx, pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
252
- confusion_matrix[
253
- gt_label_idx,
254
- pd_label_idx,
255
- example_idx * 4 + 3,
256
- ]
257
- )
196
+ def _unpack_confusion_matrix(
197
+ ids: NDArray[np.int32],
198
+ mask_matched: NDArray[np.bool_],
199
+ mask_fp_unmatched: NDArray[np.bool_],
200
+ mask_fn_unmatched: NDArray[np.bool_],
201
+ index_to_datum_id: list[str],
202
+ index_to_groundtruth_id: list[str],
203
+ index_to_prediction_id: list[str],
204
+ index_to_label: list[str],
205
+ iou_threhsold: float,
206
+ score_threshold: float,
207
+ ):
208
+ (
209
+ confusion_matrix,
210
+ unmatched_predictions,
211
+ unmatched_ground_truths,
212
+ ) = _create_empty_confusion_matrix(index_to_label)
258
213
 
259
- score_idx = lambda gt_label_idx, pd_label_idx, example_idx: float( # noqa: E731 - lambda fn
260
- confusion_matrix[
261
- gt_label_idx,
262
- pd_label_idx,
263
- example_idx * 4 + 4,
264
- ]
214
+ unique_matches = np.unique(
215
+ ids[np.ix_(mask_matched, (0, 1, 2, 3, 4))], axis=0 # type: ignore - numpy ix_ typing
265
216
  )
266
-
267
- return {
268
- index_to_label[gt_label_idx]: {
269
- index_to_label[pd_label_idx]: {
270
- "count": max(
271
- int(confusion_matrix[gt_label_idx, pd_label_idx, 0]),
272
- 0,
273
- ),
274
- "examples": [
275
- {
276
- "datum": index_to_uid[
277
- datum_idx(gt_label_idx, pd_label_idx, example_idx)
278
- ],
279
- "groundtruth": _convert_example_to_dict(
280
- groundtruth_examples[
281
- datum_idx(
282
- gt_label_idx,
283
- pd_label_idx,
284
- example_idx,
285
- )
286
- ][
287
- groundtruth_idx(
288
- gt_label_idx,
289
- pd_label_idx,
290
- example_idx,
291
- )
292
- ]
293
- ),
294
- "prediction": _convert_example_to_dict(
295
- prediction_examples[
296
- datum_idx(
297
- gt_label_idx,
298
- pd_label_idx,
299
- example_idx,
300
- )
301
- ][
302
- prediction_idx(
303
- gt_label_idx,
304
- pd_label_idx,
305
- example_idx,
306
- )
307
- ]
308
- ),
309
- "score": score_idx(
310
- gt_label_idx, pd_label_idx, example_idx
311
- ),
312
- }
313
- for example_idx in range(number_of_examples)
314
- if datum_idx(gt_label_idx, pd_label_idx, example_idx) >= 0
315
- ],
316
- }
317
- for pd_label_idx in range(number_of_labels)
318
- }
319
- for gt_label_idx in range(number_of_labels)
320
- }
321
-
322
-
323
- def _unpack_unmatched_predictions_value(
324
- unmatched_predictions: NDArray[np.float64],
325
- number_of_labels: int,
326
- number_of_examples: int,
327
- index_to_uid: dict[int, str],
328
- index_to_label: dict[int, str],
329
- prediction_examples: dict[int, NDArray[np.float16]],
330
- ) -> dict[
331
- str,
332
- dict[
333
- str,
334
- int | list[dict[str, str | float | dict[str, float]]],
335
- ],
336
- ]:
337
- """
338
- Unpacks a numpy array of unmatched_prediction counts and examples.
339
- """
340
-
341
- datum_idx = (
342
- lambda pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
343
- unmatched_predictions[
344
- pd_label_idx,
345
- example_idx * 3 + 1,
346
- ]
347
- )
217
+ unique_unmatched_predictions = np.unique(
218
+ ids[np.ix_(mask_fp_unmatched, (0, 2, 4))], axis=0 # type: ignore - numpy ix_ typing
348
219
  )
349
-
350
- prediction_idx = (
351
- lambda pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
352
- unmatched_predictions[
353
- pd_label_idx,
354
- example_idx * 3 + 2,
355
- ]
356
- )
220
+ unique_unmatched_groundtruths = np.unique(
221
+ ids[np.ix_(mask_fn_unmatched, (0, 1, 3))], axis=0 # type: ignore - numpy ix_ typing
357
222
  )
358
223
 
359
- score_idx = (
360
- lambda pd_label_idx, example_idx: float( # noqa: E731 - lambda fn
361
- unmatched_predictions[
362
- pd_label_idx,
363
- example_idx * 3 + 3,
364
- ]
365
- )
366
- )
224
+ n_matched = unique_matches.shape[0]
225
+ n_unmatched_predictions = unique_unmatched_predictions.shape[0]
226
+ n_unmatched_groundtruths = unique_unmatched_groundtruths.shape[0]
227
+ n_max = max(n_matched, n_unmatched_groundtruths, n_unmatched_predictions)
367
228
 
368
- return {
369
- index_to_label[pd_label_idx]: {
370
- "count": max(
371
- int(unmatched_predictions[pd_label_idx, 0]),
372
- 0,
373
- ),
374
- "examples": [
229
+ for idx in range(n_max):
230
+ if idx < n_unmatched_groundtruths:
231
+ label = index_to_label[unique_unmatched_groundtruths[idx, 2]]
232
+ unmatched_ground_truths[label]["count"] += 1
233
+ unmatched_ground_truths[label]["examples"].append(
375
234
  {
376
- "datum": index_to_uid[
377
- datum_idx(pd_label_idx, example_idx)
235
+ "datum_id": index_to_datum_id[
236
+ unique_unmatched_groundtruths[idx, 0]
237
+ ],
238
+ "ground_truth_id": index_to_groundtruth_id[
239
+ unique_unmatched_groundtruths[idx, 1]
378
240
  ],
379
- "prediction": _convert_example_to_dict(
380
- prediction_examples[
381
- datum_idx(pd_label_idx, example_idx)
382
- ][prediction_idx(pd_label_idx, example_idx)]
383
- ),
384
- "score": score_idx(pd_label_idx, example_idx),
385
241
  }
386
- for example_idx in range(number_of_examples)
387
- if datum_idx(pd_label_idx, example_idx) >= 0
388
- ],
389
- }
390
- for pd_label_idx in range(number_of_labels)
391
- }
392
-
393
-
394
- def _unpack_unmatched_ground_truths_value(
395
- unmatched_ground_truths: NDArray[np.int32],
396
- number_of_labels: int,
397
- number_of_examples: int,
398
- index_to_uid: dict[int, str],
399
- index_to_label: dict[int, str],
400
- groundtruth_examples: dict[int, NDArray[np.float16]],
401
- ) -> dict[str, dict[str, int | list[dict[str, str | dict[str, float]]]]]:
402
- """
403
- Unpacks a numpy array of unmatched ground truth counts and examples.
404
- """
405
-
406
- datum_idx = (
407
- lambda gt_label_idx, example_idx: int( # noqa: E731 - lambda fn
408
- unmatched_ground_truths[
409
- gt_label_idx,
410
- example_idx * 2 + 1,
411
- ]
412
- )
413
- )
414
-
415
- groundtruth_idx = (
416
- lambda gt_label_idx, example_idx: int( # noqa: E731 - lambda fn
417
- unmatched_ground_truths[
418
- gt_label_idx,
419
- example_idx * 2 + 2,
420
- ]
421
- )
422
- )
423
-
424
- return {
425
- index_to_label[gt_label_idx]: {
426
- "count": max(
427
- int(unmatched_ground_truths[gt_label_idx, 0]),
428
- 0,
429
- ),
430
- "examples": [
242
+ )
243
+ if idx < n_unmatched_predictions:
244
+ label = index_to_label[unique_unmatched_predictions[idx, 2]]
245
+ unmatched_predictions[label]["count"] += 1
246
+ unmatched_predictions[label]["examples"].append(
431
247
  {
432
- "datum": index_to_uid[
433
- datum_idx(gt_label_idx, example_idx)
248
+ "datum_id": index_to_datum_id[
249
+ unique_unmatched_predictions[idx, 0]
250
+ ],
251
+ "prediction_id": index_to_prediction_id[
252
+ unique_unmatched_predictions[idx, 1]
434
253
  ],
435
- "groundtruth": _convert_example_to_dict(
436
- groundtruth_examples[
437
- datum_idx(gt_label_idx, example_idx)
438
- ][groundtruth_idx(gt_label_idx, example_idx)]
439
- ),
440
254
  }
441
- for example_idx in range(number_of_examples)
442
- if datum_idx(gt_label_idx, example_idx) >= 0
443
- ],
444
- }
445
- for gt_label_idx in range(number_of_labels)
446
- }
255
+ )
256
+ if idx < n_matched:
257
+ glabel = index_to_label[unique_matches[idx, 3]]
258
+ plabel = index_to_label[unique_matches[idx, 4]]
259
+ confusion_matrix[glabel][plabel]["count"] += 1
260
+ confusion_matrix[glabel][plabel]["examples"].append(
261
+ {
262
+ "datum_id": index_to_datum_id[unique_matches[idx, 0]],
263
+ "ground_truth_id": index_to_groundtruth_id[
264
+ unique_matches[idx, 1]
265
+ ],
266
+ "prediction_id": index_to_prediction_id[
267
+ unique_matches[idx, 2]
268
+ ],
269
+ }
270
+ )
271
+
272
+ return Metric.confusion_matrix(
273
+ confusion_matrix=confusion_matrix,
274
+ unmatched_ground_truths=unmatched_ground_truths,
275
+ unmatched_predictions=unmatched_predictions,
276
+ iou_threshold=iou_threhsold,
277
+ score_threshold=score_threshold,
278
+ )
447
279
 
448
280
 
449
281
  def unpack_confusion_matrix_into_metric_list(
450
- results: tuple[
451
- NDArray[np.float64],
452
- NDArray[np.float64],
453
- NDArray[np.int32],
454
- ],
282
+ results: NDArray[np.uint8],
283
+ detailed_pairs: NDArray[np.float64],
455
284
  iou_thresholds: list[float],
456
285
  score_thresholds: list[float],
457
- number_of_examples: int,
458
- index_to_label: dict[int, str],
459
- index_to_uid: dict[int, str],
460
- groundtruth_examples: dict[int, NDArray[np.float16]],
461
- prediction_examples: dict[int, NDArray[np.float16]],
286
+ index_to_datum_id: list[str],
287
+ index_to_groundtruth_id: list[str],
288
+ index_to_prediction_id: list[str],
289
+ index_to_label: list[str],
462
290
  ) -> list[Metric]:
463
- (
464
- confusion_matrix,
465
- unmatched_predictions,
466
- unmatched_ground_truths,
467
- ) = results
468
- n_labels = len(index_to_label)
291
+
292
+ ids = detailed_pairs[:, :5].astype(np.int32)
293
+
294
+ mask_matched = (
295
+ np.bitwise_and(
296
+ results, PairClassification.TP | PairClassification.FP_FN_MISCLF
297
+ )
298
+ > 0
299
+ )
300
+ mask_fp_unmatched = (
301
+ np.bitwise_and(results, PairClassification.FP_UNMATCHED) > 0
302
+ )
303
+ mask_fn_unmatched = (
304
+ np.bitwise_and(results, PairClassification.FN_UNMATCHED) > 0
305
+ )
306
+
469
307
  return [
470
- Metric.confusion_matrix(
471
- iou_threshold=iou_threshold,
308
+ _unpack_confusion_matrix(
309
+ ids=ids,
310
+ mask_matched=mask_matched[iou_idx, score_idx],
311
+ mask_fp_unmatched=mask_fp_unmatched[iou_idx, score_idx],
312
+ mask_fn_unmatched=mask_fn_unmatched[iou_idx, score_idx],
313
+ index_to_datum_id=index_to_datum_id,
314
+ index_to_groundtruth_id=index_to_groundtruth_id,
315
+ index_to_prediction_id=index_to_prediction_id,
316
+ index_to_label=index_to_label,
317
+ iou_threhsold=iou_threshold,
472
318
  score_threshold=score_threshold,
473
- maximum_number_of_examples=number_of_examples,
474
- confusion_matrix=_unpack_confusion_matrix_value(
475
- confusion_matrix=confusion_matrix[iou_idx, score_idx, :, :, :],
476
- number_of_labels=n_labels,
477
- number_of_examples=number_of_examples,
478
- index_to_label=index_to_label,
479
- index_to_uid=index_to_uid,
480
- groundtruth_examples=groundtruth_examples,
481
- prediction_examples=prediction_examples,
482
- ),
483
- unmatched_predictions=_unpack_unmatched_predictions_value(
484
- unmatched_predictions=unmatched_predictions[
485
- iou_idx, score_idx, :, :
486
- ],
487
- number_of_labels=n_labels,
488
- number_of_examples=number_of_examples,
489
- index_to_label=index_to_label,
490
- index_to_uid=index_to_uid,
491
- prediction_examples=prediction_examples,
492
- ),
493
- unmatched_ground_truths=_unpack_unmatched_ground_truths_value(
494
- unmatched_ground_truths=unmatched_ground_truths[
495
- iou_idx, score_idx, :, :
496
- ],
497
- number_of_labels=n_labels,
498
- number_of_examples=number_of_examples,
499
- index_to_label=index_to_label,
500
- index_to_uid=index_to_uid,
501
- groundtruth_examples=groundtruth_examples,
502
- ),
503
319
  )
504
320
  for iou_idx, iou_threshold in enumerate(iou_thresholds)
505
321
  for score_idx, score_threshold in enumerate(score_thresholds)
322
+ if (results[iou_idx, score_idx] != -1).any()
506
323
  ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: valor-lite
3
- Version: 0.34.2
3
+ Version: 0.35.0
4
4
  Summary: Evaluate machine learning models.
5
5
  Project-URL: homepage, https://www.striveworks.com
6
6
  Requires-Python: >=3.10
@@ -9,12 +9,12 @@ valor_lite/classification/manager.py,sha256=cZ6-DKao59QqF0JF_U26tBoydpCElAAH8rRy
9
9
  valor_lite/classification/metric.py,sha256=_mW3zynmpW8jUIhK2OeX4usdftHgHM9_l7EAbEe2N3w,12288
10
10
  valor_lite/classification/numpy_compatibility.py,sha256=roqtTetsm1_HxuaejrthQdydjsRIy-FpXpGb86cLh_E,365
11
11
  valor_lite/classification/utilities.py,sha256=eG-Qhd213uf2GXuuqsPxCgBRBFV-z_ADbzneF1kE368,6964
12
- valor_lite/object_detection/__init__.py,sha256=Ql8rju2q7y0Zd9zFvtBJDRhgQFDm1RSYkTsyH3ZE6pA,648
13
- valor_lite/object_detection/annotation.py,sha256=x9bsl8b75yvkMByXXiIYI9d9T03olDqtykSvKJc3aFw,7729
14
- valor_lite/object_detection/computation.py,sha256=zfVTl_TDK3rho3282VcruTvBK6DqbxduP7tE7esMFUY,28345
15
- valor_lite/object_detection/manager.py,sha256=uo9o0gWBQUkTTgwTluhXk0ouVDW8qiyrqTwJD6PJDKE,23043
16
- valor_lite/object_detection/metric.py,sha256=npK2sxiwCUTKlRlFym1AlZTvP9herf9lakbsBDwljGM,24901
17
- valor_lite/object_detection/utilities.py,sha256=42RRyP6L3eWtDY_f7qs7f0WTjhcibmUBu2I4yAwupF0,16456
12
+ valor_lite/object_detection/__init__.py,sha256=fk1f1i-r7cu-2JioP89XJt3ZNlpH02V5g5jgxFg3_W4,295
13
+ valor_lite/object_detection/annotation.py,sha256=LVec-rIk408LuFxcOoIkPk0QZMWSSxbmsady4wapC1s,7007
14
+ valor_lite/object_detection/computation.py,sha256=P4GD-Ho1n4dUiiUMLVCiDvIDJoeE3e3U1ZPzXUG4GE4,22335
15
+ valor_lite/object_detection/manager.py,sha256=1jGbSoaf5XCvUz716CWnmx8YIF6WJCAiFOW1UUksSz0,28172
16
+ valor_lite/object_detection/metric.py,sha256=sUYSZwXYfIyfmXG6_7Tje1_ZL_QwvecPq85jrGmwOWE,22739
17
+ valor_lite/object_detection/utilities.py,sha256=tNdv5dL7JhzOamGQkZ8x3ocZoTwPI6K8rcRAGMhp2nc,11217
18
18
  valor_lite/semantic_segmentation/__init__.py,sha256=BhTUbwbdJa1FdS4ZA3QSIZ8TuJmdGGLGCd5hX6SzKa4,297
19
19
  valor_lite/semantic_segmentation/annotation.py,sha256=xd2qJyIeTW8CT_Goyu3Kvl_51b9b6D3WvUfqwShR0Sk,4990
20
20
  valor_lite/semantic_segmentation/benchmark.py,sha256=iVdxUo9LgDbbXUa6eRhZ49LOYw-yyr2W4p9FP3KHg0k,3848
@@ -34,7 +34,7 @@ valor_lite/text_generation/llm/instructions.py,sha256=fz2onBZZWcl5W8iy7zEWkPGU9N
34
34
  valor_lite/text_generation/llm/integrations.py,sha256=-rTfdAjq1zH-4ixwYuMQEOQ80pIFzMTe0BYfroVx3Pg,6974
35
35
  valor_lite/text_generation/llm/utilities.py,sha256=bjqatGgtVTcl1PrMwiDKTYPGJXKrBrx7PDtzIblGSys,1178
36
36
  valor_lite/text_generation/llm/validators.py,sha256=Wzr5RlfF58_2wOU-uTw7C8skan_fYdhy4Gfn0jSJ8HM,2700
37
- valor_lite-0.34.2.dist-info/METADATA,sha256=hHwCwG9A_jQzo4tTo_LABVpK3eaFvWExPqEgpT1nQLQ,5062
38
- valor_lite-0.34.2.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
39
- valor_lite-0.34.2.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
40
- valor_lite-0.34.2.dist-info/RECORD,,
37
+ valor_lite-0.35.0.dist-info/METADATA,sha256=MjRfyaj-XYIoJrzF7GhHKUy0tiWY6L5hzWvzu3uC-3U,5062
38
+ valor_lite-0.35.0.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
39
+ valor_lite-0.35.0.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
40
+ valor_lite-0.35.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.1.0)
2
+ Generator: setuptools (80.8.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5