valor-lite 0.33.16__py3-none-any.whl → 0.33.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valor_lite/object_detection/annotation.py +0 -24
- valor_lite/object_detection/manager.py +77 -82
- valor_lite/profiling.py +374 -0
- valor_lite/semantic_segmentation/__init__.py +2 -1
- valor_lite/semantic_segmentation/annotation.py +84 -1
- valor_lite/semantic_segmentation/benchmark.py +151 -0
- valor_lite/semantic_segmentation/computation.py +13 -26
- valor_lite/semantic_segmentation/manager.py +6 -2
- {valor_lite-0.33.16.dist-info → valor_lite-0.33.18.dist-info}/METADATA +1 -1
- {valor_lite-0.33.16.dist-info → valor_lite-0.33.18.dist-info}/RECORD +13 -11
- {valor_lite-0.33.16.dist-info → valor_lite-0.33.18.dist-info}/WHEEL +1 -1
- {valor_lite-0.33.16.dist-info → valor_lite-0.33.18.dist-info}/LICENSE +0 -0
- {valor_lite-0.33.16.dist-info → valor_lite-0.33.18.dist-info}/top_level.txt +0 -0
|
@@ -142,18 +142,6 @@ class Polygon:
|
|
|
142
142
|
xmin, ymin, xmax, ymax = self.shape.bounds
|
|
143
143
|
return (xmin, xmax, ymin, ymax)
|
|
144
144
|
|
|
145
|
-
@property
|
|
146
|
-
def annotation(self) -> ShapelyPolygon:
|
|
147
|
-
"""
|
|
148
|
-
Returns the annotation's data representation.
|
|
149
|
-
|
|
150
|
-
Returns
|
|
151
|
-
-------
|
|
152
|
-
shapely.geometry.Polygon
|
|
153
|
-
The polygon shape.
|
|
154
|
-
"""
|
|
155
|
-
return self.shape
|
|
156
|
-
|
|
157
145
|
|
|
158
146
|
@dataclass
|
|
159
147
|
class Bitmask:
|
|
@@ -222,18 +210,6 @@ class Bitmask:
|
|
|
222
210
|
rows, cols = np.nonzero(self.mask)
|
|
223
211
|
return (cols.min(), cols.max(), rows.min(), rows.max())
|
|
224
212
|
|
|
225
|
-
@property
|
|
226
|
-
def annotation(self) -> NDArray[np.bool_]:
|
|
227
|
-
"""
|
|
228
|
-
Returns the annotation's data representation.
|
|
229
|
-
|
|
230
|
-
Returns
|
|
231
|
-
-------
|
|
232
|
-
NDArray[np.bool_]
|
|
233
|
-
The binary mask array.
|
|
234
|
-
"""
|
|
235
|
-
return self.mask
|
|
236
|
-
|
|
237
213
|
|
|
238
214
|
@dataclass
|
|
239
215
|
class Detection:
|
|
@@ -1,17 +1,10 @@
|
|
|
1
1
|
from collections import defaultdict
|
|
2
2
|
from dataclasses import dataclass
|
|
3
|
-
from typing import Type
|
|
4
3
|
|
|
5
4
|
import numpy as np
|
|
6
|
-
import valor_lite.object_detection.annotation as annotation
|
|
7
5
|
from numpy.typing import NDArray
|
|
8
6
|
from tqdm import tqdm
|
|
9
|
-
from valor_lite.object_detection.annotation import
|
|
10
|
-
Bitmask,
|
|
11
|
-
BoundingBox,
|
|
12
|
-
Detection,
|
|
13
|
-
Polygon,
|
|
14
|
-
)
|
|
7
|
+
from valor_lite.object_detection.annotation import Detection
|
|
15
8
|
from valor_lite.object_detection.computation import (
|
|
16
9
|
compute_bbox_iou,
|
|
17
10
|
compute_bitmask_iou,
|
|
@@ -341,6 +334,10 @@ class Evaluator:
|
|
|
341
334
|
return metrics
|
|
342
335
|
|
|
343
336
|
|
|
337
|
+
def defaultdict_int():
|
|
338
|
+
return defaultdict(int)
|
|
339
|
+
|
|
340
|
+
|
|
344
341
|
class DataLoader:
|
|
345
342
|
"""
|
|
346
343
|
Object Detection DataLoader
|
|
@@ -349,8 +346,8 @@ class DataLoader:
|
|
|
349
346
|
def __init__(self):
|
|
350
347
|
self._evaluator = Evaluator()
|
|
351
348
|
self.pairs: list[NDArray[np.float64]] = list()
|
|
352
|
-
self.groundtruth_count = defaultdict(
|
|
353
|
-
self.prediction_count = defaultdict(
|
|
349
|
+
self.groundtruth_count = defaultdict(defaultdict_int)
|
|
350
|
+
self.prediction_count = defaultdict(defaultdict_int)
|
|
354
351
|
|
|
355
352
|
def _add_datum(self, uid: str) -> int:
|
|
356
353
|
"""
|
|
@@ -396,74 +393,47 @@ class DataLoader:
|
|
|
396
393
|
|
|
397
394
|
return self._evaluator.label_to_index[label]
|
|
398
395
|
|
|
399
|
-
def
|
|
396
|
+
def _cache_pairs(
|
|
400
397
|
self,
|
|
401
398
|
uid_index: int,
|
|
402
399
|
groundtruths: list,
|
|
403
400
|
predictions: list,
|
|
404
|
-
|
|
401
|
+
ious: NDArray[np.float64],
|
|
405
402
|
) -> None:
|
|
406
403
|
"""
|
|
407
404
|
Compute IOUs between groundtruths and preditions before storing as pairs.
|
|
408
405
|
|
|
409
406
|
Parameters
|
|
410
407
|
----------
|
|
411
|
-
uid_index: int
|
|
408
|
+
uid_index : int
|
|
412
409
|
The index of the detection.
|
|
413
|
-
groundtruths: list
|
|
410
|
+
groundtruths : list
|
|
414
411
|
A list of groundtruths.
|
|
415
|
-
predictions: list
|
|
412
|
+
predictions : list
|
|
416
413
|
A list of predictions.
|
|
417
|
-
|
|
418
|
-
|
|
414
|
+
ious : NDArray[np.float64]
|
|
415
|
+
An array with shape (n_preds, n_gts) containing IOUs.
|
|
419
416
|
"""
|
|
420
417
|
|
|
421
|
-
pairs = list()
|
|
422
|
-
n_predictions = len(predictions)
|
|
423
|
-
n_groundtruths = len(groundtruths)
|
|
424
|
-
|
|
425
|
-
all_pairs = np.array(
|
|
426
|
-
[
|
|
427
|
-
np.array([gann, pann])
|
|
428
|
-
for _, _, _, pann in predictions
|
|
429
|
-
for _, _, gann in groundtruths
|
|
430
|
-
]
|
|
431
|
-
)
|
|
432
|
-
|
|
433
|
-
match annotation_type:
|
|
434
|
-
case annotation.BoundingBox:
|
|
435
|
-
ious = compute_bbox_iou(all_pairs)
|
|
436
|
-
case annotation.Polygon:
|
|
437
|
-
ious = compute_polygon_iou(all_pairs)
|
|
438
|
-
case annotation.Bitmask:
|
|
439
|
-
ious = compute_bitmask_iou(all_pairs)
|
|
440
|
-
case _:
|
|
441
|
-
raise ValueError(
|
|
442
|
-
f"Invalid annotation type `{annotation_type}`."
|
|
443
|
-
)
|
|
444
|
-
|
|
445
|
-
ious = ious.reshape(n_predictions, n_groundtruths)
|
|
446
418
|
predictions_with_iou_of_zero = np.where((ious < 1e-9).all(axis=1))[0]
|
|
447
419
|
groundtruths_with_iou_of_zero = np.where((ious < 1e-9).all(axis=0))[0]
|
|
448
420
|
|
|
449
|
-
pairs
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
]
|
|
466
|
-
)
|
|
421
|
+
pairs = [
|
|
422
|
+
np.array(
|
|
423
|
+
[
|
|
424
|
+
float(uid_index),
|
|
425
|
+
float(gidx),
|
|
426
|
+
float(pidx),
|
|
427
|
+
ious[pidx, gidx],
|
|
428
|
+
float(glabel),
|
|
429
|
+
float(plabel),
|
|
430
|
+
float(score),
|
|
431
|
+
]
|
|
432
|
+
)
|
|
433
|
+
for pidx, plabel, score in predictions
|
|
434
|
+
for gidx, glabel in groundtruths
|
|
435
|
+
if ious[pidx, gidx] >= 1e-9
|
|
436
|
+
]
|
|
467
437
|
pairs.extend(
|
|
468
438
|
[
|
|
469
439
|
np.array(
|
|
@@ -496,13 +466,12 @@ class DataLoader:
|
|
|
496
466
|
for index in groundtruths_with_iou_of_zero
|
|
497
467
|
]
|
|
498
468
|
)
|
|
499
|
-
|
|
500
469
|
self.pairs.append(np.array(pairs))
|
|
501
470
|
|
|
502
471
|
def _add_data(
|
|
503
472
|
self,
|
|
504
473
|
detections: list[Detection],
|
|
505
|
-
|
|
474
|
+
detection_ious: list[NDArray[np.float64]],
|
|
506
475
|
show_progress: bool = False,
|
|
507
476
|
):
|
|
508
477
|
"""
|
|
@@ -512,13 +481,15 @@ class DataLoader:
|
|
|
512
481
|
----------
|
|
513
482
|
detections : list[Detection]
|
|
514
483
|
A list of Detection objects.
|
|
515
|
-
|
|
516
|
-
|
|
484
|
+
detection_ious : list[NDArray[np.float64]]
|
|
485
|
+
A list of arrays containing IOUs per detection.
|
|
517
486
|
show_progress : bool, default=False
|
|
518
487
|
Toggle for tqdm progress bar.
|
|
519
488
|
"""
|
|
520
489
|
disable_tqdm = not show_progress
|
|
521
|
-
for detection in tqdm(
|
|
490
|
+
for detection, ious in tqdm(
|
|
491
|
+
zip(detections, detection_ious), disable=disable_tqdm
|
|
492
|
+
):
|
|
522
493
|
|
|
523
494
|
# update metadata
|
|
524
495
|
self._evaluator.n_datums += 1
|
|
@@ -541,11 +512,6 @@ class DataLoader:
|
|
|
541
512
|
predictions = list()
|
|
542
513
|
|
|
543
514
|
for gidx, gann in enumerate(detection.groundtruths):
|
|
544
|
-
if not isinstance(gann, annotation_type):
|
|
545
|
-
raise ValueError(
|
|
546
|
-
f"Expected {annotation_type}, but annotation is of type {type(gann)}."
|
|
547
|
-
)
|
|
548
|
-
|
|
549
515
|
self._evaluator.groundtruth_examples[uid_index][
|
|
550
516
|
gidx
|
|
551
517
|
] = gann.extrema
|
|
@@ -556,16 +522,10 @@ class DataLoader:
|
|
|
556
522
|
(
|
|
557
523
|
gidx,
|
|
558
524
|
label_idx,
|
|
559
|
-
gann.annotation,
|
|
560
525
|
)
|
|
561
526
|
)
|
|
562
527
|
|
|
563
528
|
for pidx, pann in enumerate(detection.predictions):
|
|
564
|
-
if not isinstance(pann, annotation_type):
|
|
565
|
-
raise ValueError(
|
|
566
|
-
f"Expected {annotation_type}, but annotation is of type {type(pann)}."
|
|
567
|
-
)
|
|
568
|
-
|
|
569
529
|
self._evaluator.prediction_examples[uid_index][
|
|
570
530
|
pidx
|
|
571
531
|
] = pann.extrema
|
|
@@ -577,15 +537,14 @@ class DataLoader:
|
|
|
577
537
|
pidx,
|
|
578
538
|
label_idx,
|
|
579
539
|
pscore,
|
|
580
|
-
pann.annotation,
|
|
581
540
|
)
|
|
582
541
|
)
|
|
583
542
|
|
|
584
|
-
self.
|
|
543
|
+
self._cache_pairs(
|
|
585
544
|
uid_index=uid_index,
|
|
586
545
|
groundtruths=groundtruths,
|
|
587
546
|
predictions=predictions,
|
|
588
|
-
|
|
547
|
+
ious=ious,
|
|
589
548
|
)
|
|
590
549
|
|
|
591
550
|
def add_bounding_boxes(
|
|
@@ -603,10 +562,22 @@ class DataLoader:
|
|
|
603
562
|
show_progress : bool, default=False
|
|
604
563
|
Toggle for tqdm progress bar.
|
|
605
564
|
"""
|
|
565
|
+
ious = [
|
|
566
|
+
compute_bbox_iou(
|
|
567
|
+
np.array(
|
|
568
|
+
[
|
|
569
|
+
[gt.extrema, pd.extrema]
|
|
570
|
+
for pd in detection.predictions
|
|
571
|
+
for gt in detection.groundtruths
|
|
572
|
+
]
|
|
573
|
+
)
|
|
574
|
+
).reshape(len(detection.predictions), len(detection.groundtruths))
|
|
575
|
+
for detection in detections
|
|
576
|
+
]
|
|
606
577
|
return self._add_data(
|
|
607
578
|
detections=detections,
|
|
579
|
+
detection_ious=ious,
|
|
608
580
|
show_progress=show_progress,
|
|
609
|
-
annotation_type=BoundingBox,
|
|
610
581
|
)
|
|
611
582
|
|
|
612
583
|
def add_polygons(
|
|
@@ -624,10 +595,22 @@ class DataLoader:
|
|
|
624
595
|
show_progress : bool, default=False
|
|
625
596
|
Toggle for tqdm progress bar.
|
|
626
597
|
"""
|
|
598
|
+
ious = [
|
|
599
|
+
compute_polygon_iou(
|
|
600
|
+
np.array(
|
|
601
|
+
[
|
|
602
|
+
[gt.shape, pd.shape] # type: ignore - using the AttributeError as a validator
|
|
603
|
+
for pd in detection.predictions
|
|
604
|
+
for gt in detection.groundtruths
|
|
605
|
+
]
|
|
606
|
+
)
|
|
607
|
+
).reshape(len(detection.predictions), len(detection.groundtruths))
|
|
608
|
+
for detection in detections
|
|
609
|
+
]
|
|
627
610
|
return self._add_data(
|
|
628
611
|
detections=detections,
|
|
612
|
+
detection_ious=ious,
|
|
629
613
|
show_progress=show_progress,
|
|
630
|
-
annotation_type=Polygon,
|
|
631
614
|
)
|
|
632
615
|
|
|
633
616
|
def add_bitmasks(
|
|
@@ -645,10 +628,22 @@ class DataLoader:
|
|
|
645
628
|
show_progress : bool, default=False
|
|
646
629
|
Toggle for tqdm progress bar.
|
|
647
630
|
"""
|
|
631
|
+
ious = [
|
|
632
|
+
compute_bitmask_iou(
|
|
633
|
+
np.array(
|
|
634
|
+
[
|
|
635
|
+
[gt.mask, pd.mask] # type: ignore - using the AttributeError as a validator
|
|
636
|
+
for pd in detection.predictions
|
|
637
|
+
for gt in detection.groundtruths
|
|
638
|
+
]
|
|
639
|
+
)
|
|
640
|
+
).reshape(len(detection.predictions), len(detection.groundtruths))
|
|
641
|
+
for detection in detections
|
|
642
|
+
]
|
|
648
643
|
return self._add_data(
|
|
649
644
|
detections=detections,
|
|
645
|
+
detection_ious=ious,
|
|
650
646
|
show_progress=show_progress,
|
|
651
|
-
annotation_type=Bitmask,
|
|
652
647
|
)
|
|
653
648
|
|
|
654
649
|
def finalize(self) -> Evaluator:
|
valor_lite/profiling.py
ADDED
|
@@ -0,0 +1,374 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import math
|
|
3
|
+
import multiprocessing as mp
|
|
4
|
+
import resource
|
|
5
|
+
import time
|
|
6
|
+
from collections import deque
|
|
7
|
+
from multiprocessing import Queue
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from tqdm import tqdm
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BenchmarkError(Exception):
|
|
14
|
+
def __init__(
|
|
15
|
+
self, benchmark: str, error_type: str, error_message: str
|
|
16
|
+
) -> None:
|
|
17
|
+
super().__init__(
|
|
18
|
+
f"'{benchmark}' raised '{error_type}' with the following message: {error_message}"
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _timeit_subprocess(*args, __fn, __queue: Queue, **kwargs):
|
|
23
|
+
"""
|
|
24
|
+
Multiprocessing subprocess that reports either runtime or errors.
|
|
25
|
+
|
|
26
|
+
This is handled within a subprocess to protect the benchmark against OOM errors.
|
|
27
|
+
"""
|
|
28
|
+
try:
|
|
29
|
+
timer_start = time.perf_counter()
|
|
30
|
+
__fn(*args, **kwargs)
|
|
31
|
+
timer_end = time.perf_counter()
|
|
32
|
+
__queue.put(timer_end - timer_start)
|
|
33
|
+
except Exception as e:
|
|
34
|
+
__queue.put(e)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def create_runtime_profiler(
|
|
38
|
+
time_limit: float | None,
|
|
39
|
+
repeat: int = 1,
|
|
40
|
+
):
|
|
41
|
+
"""
|
|
42
|
+
Creates a runtime profiler as a decorating function.
|
|
43
|
+
|
|
44
|
+
The profiler reports runtime of the wrapped function from a subprocess to protect against OOM errors.
|
|
45
|
+
|
|
46
|
+
Parameters
|
|
47
|
+
----------
|
|
48
|
+
time_limit : float, optional
|
|
49
|
+
An optional time limit to constrain the benchmark.
|
|
50
|
+
repeat : int, default=1
|
|
51
|
+
The number of times to repeat the benchmark to produce an average runtime.
|
|
52
|
+
"""
|
|
53
|
+
ctx = mp.get_context("spawn")
|
|
54
|
+
|
|
55
|
+
def decorator(fn):
|
|
56
|
+
def wrapper(*args, **kwargs):
|
|
57
|
+
# Record average runtime over repeated runs.
|
|
58
|
+
elapsed = 0
|
|
59
|
+
for _ in range(repeat):
|
|
60
|
+
q = ctx.Queue()
|
|
61
|
+
p = ctx.Process(
|
|
62
|
+
target=_timeit_subprocess,
|
|
63
|
+
args=args,
|
|
64
|
+
kwargs={"__fn": fn, "__queue": q, **kwargs},
|
|
65
|
+
)
|
|
66
|
+
p.start()
|
|
67
|
+
p.join(timeout=time_limit)
|
|
68
|
+
|
|
69
|
+
# Check if computation finishes within the timeout
|
|
70
|
+
if p.is_alive():
|
|
71
|
+
p.terminate()
|
|
72
|
+
p.join()
|
|
73
|
+
q.close()
|
|
74
|
+
q.join_thread()
|
|
75
|
+
raise TimeoutError(
|
|
76
|
+
f"Function '{fn.__name__}' did not complete within {time_limit} seconds."
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# Retrieve the result
|
|
80
|
+
result = q.get(timeout=1)
|
|
81
|
+
if isinstance(result, Exception):
|
|
82
|
+
raise result
|
|
83
|
+
elif isinstance(result, float):
|
|
84
|
+
elapsed += result
|
|
85
|
+
else:
|
|
86
|
+
raise TypeError(type(result).__name__)
|
|
87
|
+
|
|
88
|
+
return elapsed / repeat
|
|
89
|
+
|
|
90
|
+
return wrapper
|
|
91
|
+
|
|
92
|
+
return decorator
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def pretty_print_results(results: tuple):
|
|
96
|
+
valid, invalid, permutations = results
|
|
97
|
+
|
|
98
|
+
print(
|
|
99
|
+
"====================================================================="
|
|
100
|
+
)
|
|
101
|
+
print("Details")
|
|
102
|
+
print(json.dumps(permutations, indent=4))
|
|
103
|
+
|
|
104
|
+
if len(valid) > 0:
|
|
105
|
+
print()
|
|
106
|
+
print("Passed")
|
|
107
|
+
keys = ["complexity", "runtime", *valid[0]["details"].keys()]
|
|
108
|
+
header = " | ".join(f"{header:^15}" for header in keys)
|
|
109
|
+
print(header)
|
|
110
|
+
print("-" * len(header))
|
|
111
|
+
for entry in valid:
|
|
112
|
+
values = [
|
|
113
|
+
entry["complexity"],
|
|
114
|
+
round(entry["runtime"], 4),
|
|
115
|
+
*entry["details"].values(),
|
|
116
|
+
]
|
|
117
|
+
row = " | ".join(f"{str(value):^15}" for value in values)
|
|
118
|
+
print(row)
|
|
119
|
+
|
|
120
|
+
if len(invalid) > 0:
|
|
121
|
+
print()
|
|
122
|
+
print("Failed")
|
|
123
|
+
keys = ["complexity", "error", *invalid[0]["details"].keys(), "msg"]
|
|
124
|
+
header = " | ".join(f"{header:^15}" for header in keys)
|
|
125
|
+
print(header)
|
|
126
|
+
print("-" * len(header))
|
|
127
|
+
for entry in invalid:
|
|
128
|
+
values = [
|
|
129
|
+
entry["complexity"],
|
|
130
|
+
entry["error"],
|
|
131
|
+
*entry["details"].values(),
|
|
132
|
+
entry["msg"],
|
|
133
|
+
]
|
|
134
|
+
row = " | ".join(f"{str(value):^15}" for value in values)
|
|
135
|
+
print(row)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def _calculate_complexity(params: list[int | tuple[int]]) -> int:
|
|
139
|
+
"""
|
|
140
|
+
Basic metric of benchmark complexity.
|
|
141
|
+
"""
|
|
142
|
+
flattened_params = [
|
|
143
|
+
math.prod(p) if isinstance(p, tuple) else p for p in params
|
|
144
|
+
]
|
|
145
|
+
return math.prod(flattened_params)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class Benchmark:
|
|
149
|
+
def __init__(
|
|
150
|
+
self,
|
|
151
|
+
time_limit: float | None,
|
|
152
|
+
memory_limit: int | None,
|
|
153
|
+
*_,
|
|
154
|
+
repeat: int | None = 1,
|
|
155
|
+
verbose: bool = False,
|
|
156
|
+
):
|
|
157
|
+
self.time_limit = time_limit
|
|
158
|
+
self.memory_limit = memory_limit
|
|
159
|
+
self.repeat = repeat
|
|
160
|
+
self.verbose = verbose
|
|
161
|
+
|
|
162
|
+
def get_limits(
|
|
163
|
+
self,
|
|
164
|
+
*_,
|
|
165
|
+
readable: bool = True,
|
|
166
|
+
memory_unit: str = "GB",
|
|
167
|
+
time_unit: str = "seconds",
|
|
168
|
+
) -> dict[str, str | int | float | None]:
|
|
169
|
+
"""
|
|
170
|
+
Returns a dictionary of benchmark limits.
|
|
171
|
+
|
|
172
|
+
Parameters
|
|
173
|
+
----------
|
|
174
|
+
readable : bool, default=True
|
|
175
|
+
Toggles whether the output should be human readable.
|
|
176
|
+
memory_unit : str, default="GB"
|
|
177
|
+
Toggles what unit to display the memory limit with when 'readable=True'.
|
|
178
|
+
time_unit : str, default="seconds"
|
|
179
|
+
Toggles what unit to display the time limit with when 'readable=True'.
|
|
180
|
+
|
|
181
|
+
Returns
|
|
182
|
+
-------
|
|
183
|
+
dict[str, str | int | float | None]
|
|
184
|
+
The benchmark limits.
|
|
185
|
+
"""
|
|
186
|
+
|
|
187
|
+
memory_value = self.memory_limit
|
|
188
|
+
if readable and memory_value is not None:
|
|
189
|
+
match memory_unit:
|
|
190
|
+
case "TB":
|
|
191
|
+
memory_value /= 1024**4
|
|
192
|
+
case "GB":
|
|
193
|
+
memory_value /= 1024**3
|
|
194
|
+
case "MB":
|
|
195
|
+
memory_value /= 1024**2
|
|
196
|
+
case "KB":
|
|
197
|
+
memory_value /= 1024
|
|
198
|
+
case "B":
|
|
199
|
+
pass
|
|
200
|
+
case _:
|
|
201
|
+
valid_set = {"TB", "GB", "MB", "KB", "B"}
|
|
202
|
+
raise ValueError(
|
|
203
|
+
f"Expected memory unit to be in the set {valid_set}, received '{memory_unit}'."
|
|
204
|
+
)
|
|
205
|
+
memory_value = f"{memory_value} {memory_unit}"
|
|
206
|
+
|
|
207
|
+
time_value = self.time_limit
|
|
208
|
+
if readable and time_value is not None:
|
|
209
|
+
match time_unit:
|
|
210
|
+
case "minutes":
|
|
211
|
+
time_value /= 60
|
|
212
|
+
case "seconds":
|
|
213
|
+
pass
|
|
214
|
+
case "milliseconds":
|
|
215
|
+
time_value *= 1000
|
|
216
|
+
case _:
|
|
217
|
+
valid_set = {"minutes", "seconds", "milliseconds"}
|
|
218
|
+
raise ValueError(
|
|
219
|
+
f"Expected time unit to be in the set {valid_set}, received '{time_unit}'."
|
|
220
|
+
)
|
|
221
|
+
time_value = f"{time_value} {time_unit}"
|
|
222
|
+
|
|
223
|
+
return {
|
|
224
|
+
"memory_limit": memory_value,
|
|
225
|
+
"time_limit": time_value,
|
|
226
|
+
"repeat": self.repeat,
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
@property
|
|
230
|
+
def memory_limit(self) -> int | None:
|
|
231
|
+
"""
|
|
232
|
+
The memory limit in bytes (B).
|
|
233
|
+
"""
|
|
234
|
+
return self._memory_limit
|
|
235
|
+
|
|
236
|
+
@memory_limit.setter
|
|
237
|
+
def memory_limit(self, limit: int | None):
|
|
238
|
+
"""
|
|
239
|
+
Stores the memory limit and restricts resources.
|
|
240
|
+
"""
|
|
241
|
+
self._memory_limit = limit
|
|
242
|
+
if limit is not None:
|
|
243
|
+
_, hard = resource.getrlimit(resource.RLIMIT_AS)
|
|
244
|
+
resource.setrlimit(resource.RLIMIT_AS, (limit, hard))
|
|
245
|
+
|
|
246
|
+
def run(
|
|
247
|
+
self,
|
|
248
|
+
benchmark,
|
|
249
|
+
**kwargs: list[Any],
|
|
250
|
+
):
|
|
251
|
+
"""
|
|
252
|
+
Runs a benchmark with ranges of parameters.
|
|
253
|
+
|
|
254
|
+
Parameters
|
|
255
|
+
----------
|
|
256
|
+
benchmark : Callable
|
|
257
|
+
The benchmark function.
|
|
258
|
+
**kwargs : list[Any]
|
|
259
|
+
Keyword arguments passing lists of parameters to benchmark. The values should be sorted in
|
|
260
|
+
decreasing complexity. For example, if the number of labels is a parameter then a higher
|
|
261
|
+
number of unique labels would be considered "more" complex.
|
|
262
|
+
|
|
263
|
+
Example
|
|
264
|
+
-------
|
|
265
|
+
>>> b = Benchmark(
|
|
266
|
+
... time_limit=10.0,
|
|
267
|
+
... memory_limit=8 * (1024**3),
|
|
268
|
+
... repeat=1,
|
|
269
|
+
... verbose=False,
|
|
270
|
+
... )
|
|
271
|
+
>>> results = b.run(
|
|
272
|
+
... benchmark=semseg_add_data,
|
|
273
|
+
... n_labels=[
|
|
274
|
+
... 100,
|
|
275
|
+
... 10,
|
|
276
|
+
... ],
|
|
277
|
+
... shape=[
|
|
278
|
+
... (1000, 1000),
|
|
279
|
+
... (100, 100),
|
|
280
|
+
... ],
|
|
281
|
+
... )
|
|
282
|
+
"""
|
|
283
|
+
|
|
284
|
+
nvars = len(kwargs)
|
|
285
|
+
keys = tuple(kwargs.keys())
|
|
286
|
+
vars = tuple(kwargs[key] for key in keys)
|
|
287
|
+
|
|
288
|
+
initial_indices = tuple(0 for _ in range(nvars))
|
|
289
|
+
max_indices = tuple(len(v) for v in vars)
|
|
290
|
+
permutations = math.prod(max_indices)
|
|
291
|
+
|
|
292
|
+
# Initialize queue with the starting index (0, ...)
|
|
293
|
+
queue = deque()
|
|
294
|
+
queue.append(initial_indices)
|
|
295
|
+
|
|
296
|
+
# Keep track of explored combinations to avoid duplicates
|
|
297
|
+
explored = set()
|
|
298
|
+
explored.add(initial_indices)
|
|
299
|
+
|
|
300
|
+
# Store valid combinations that finish within the time limit
|
|
301
|
+
valid_combinations = []
|
|
302
|
+
invalid_combinations = []
|
|
303
|
+
|
|
304
|
+
pbar = tqdm(total=math.prod(max_indices), disable=(not self.verbose))
|
|
305
|
+
prev_count = 0
|
|
306
|
+
while queue:
|
|
307
|
+
|
|
308
|
+
current_indices = queue.popleft()
|
|
309
|
+
parameters = {
|
|
310
|
+
k: v[current_indices[idx]]
|
|
311
|
+
for idx, (k, v) in enumerate(zip(keys, vars))
|
|
312
|
+
}
|
|
313
|
+
complexity = _calculate_complexity(list(parameters.values()))
|
|
314
|
+
|
|
315
|
+
details: dict = {k: str(v) for k, v in parameters.items()}
|
|
316
|
+
|
|
317
|
+
# update terminal with status
|
|
318
|
+
count = len(valid_combinations) + len(invalid_combinations)
|
|
319
|
+
pbar.update(count - prev_count)
|
|
320
|
+
prev_count = count
|
|
321
|
+
|
|
322
|
+
try:
|
|
323
|
+
runtime = benchmark(
|
|
324
|
+
time_limit=self.time_limit,
|
|
325
|
+
repeat=self.repeat,
|
|
326
|
+
**parameters,
|
|
327
|
+
)
|
|
328
|
+
valid_combinations.append(
|
|
329
|
+
{
|
|
330
|
+
"complexity": complexity,
|
|
331
|
+
"runtime": runtime,
|
|
332
|
+
"details": details,
|
|
333
|
+
}
|
|
334
|
+
)
|
|
335
|
+
continue
|
|
336
|
+
except Exception as e:
|
|
337
|
+
invalid_combinations.append(
|
|
338
|
+
{
|
|
339
|
+
"complexity": complexity,
|
|
340
|
+
"error": type(e).__name__,
|
|
341
|
+
"msg": str(e),
|
|
342
|
+
"details": details,
|
|
343
|
+
}
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
for idx in range(nvars):
|
|
347
|
+
new_indices = list(current_indices)
|
|
348
|
+
if new_indices[idx] + 1 < max_indices[idx]:
|
|
349
|
+
new_indices[idx] += 1
|
|
350
|
+
new_indices_tuple = tuple(new_indices)
|
|
351
|
+
if new_indices_tuple not in explored:
|
|
352
|
+
queue.append(new_indices_tuple)
|
|
353
|
+
explored.add(new_indices_tuple)
|
|
354
|
+
|
|
355
|
+
valid_combinations.sort(key=lambda x: -x["complexity"])
|
|
356
|
+
invalid_combinations.sort(key=lambda x: -x["complexity"])
|
|
357
|
+
|
|
358
|
+
# clear terminal and display results
|
|
359
|
+
results = (
|
|
360
|
+
valid_combinations,
|
|
361
|
+
invalid_combinations,
|
|
362
|
+
{
|
|
363
|
+
"benchmark": benchmark.__name__,
|
|
364
|
+
"limits": self.get_limits(readable=True),
|
|
365
|
+
"passed": permutations - len(invalid_combinations),
|
|
366
|
+
"failed": len(invalid_combinations),
|
|
367
|
+
"total": permutations,
|
|
368
|
+
},
|
|
369
|
+
)
|
|
370
|
+
pbar.close()
|
|
371
|
+
if self.verbose:
|
|
372
|
+
pretty_print_results(results)
|
|
373
|
+
|
|
374
|
+
return results
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from .annotation import Bitmask, Segmentation
|
|
1
|
+
from .annotation import Bitmask, Segmentation, generate_segmentation
|
|
2
2
|
from .manager import DataLoader, Evaluator
|
|
3
3
|
from .metric import Metric, MetricType
|
|
4
4
|
|
|
@@ -9,4 +9,5 @@ __all__ = [
|
|
|
9
9
|
"Bitmask",
|
|
10
10
|
"Metric",
|
|
11
11
|
"MetricType",
|
|
12
|
+
"generate_segmentation",
|
|
12
13
|
]
|
|
@@ -29,7 +29,7 @@ class Bitmask:
|
|
|
29
29
|
def __post_init__(self):
|
|
30
30
|
if self.mask.dtype != np.bool_:
|
|
31
31
|
raise ValueError(
|
|
32
|
-
f"Bitmask recieved mask with dtype
|
|
32
|
+
f"Bitmask recieved mask with dtype '{self.mask.dtype}'."
|
|
33
33
|
)
|
|
34
34
|
|
|
35
35
|
|
|
@@ -94,3 +94,86 @@ class Segmentation:
|
|
|
94
94
|
|
|
95
95
|
self.shape = groundtruth_shape.pop()
|
|
96
96
|
self.size = int(np.prod(np.array(self.shape)))
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def generate_segmentation(
|
|
100
|
+
datum_uid: str,
|
|
101
|
+
number_of_unique_labels: int,
|
|
102
|
+
mask_height: int,
|
|
103
|
+
mask_width: int,
|
|
104
|
+
) -> Segmentation:
|
|
105
|
+
"""
|
|
106
|
+
Generates a semantic segmentation annotation.
|
|
107
|
+
|
|
108
|
+
Parameters
|
|
109
|
+
----------
|
|
110
|
+
datum_uid : str
|
|
111
|
+
The datum UID for the generated segmentation.
|
|
112
|
+
number_of_unique_labels : int
|
|
113
|
+
The number of unique labels.
|
|
114
|
+
mask_height : int
|
|
115
|
+
The height of the mask in pixels.
|
|
116
|
+
mask_width : int
|
|
117
|
+
The width of the mask in pixels.
|
|
118
|
+
|
|
119
|
+
Returns
|
|
120
|
+
-------
|
|
121
|
+
Segmentation
|
|
122
|
+
A generated semantic segmenatation annotation.
|
|
123
|
+
"""
|
|
124
|
+
|
|
125
|
+
if number_of_unique_labels > 1:
|
|
126
|
+
common_proba = 0.4 / (number_of_unique_labels - 1)
|
|
127
|
+
min_proba = min(common_proba, 0.1)
|
|
128
|
+
labels = [str(i) for i in range(number_of_unique_labels)] + [None]
|
|
129
|
+
proba = (
|
|
130
|
+
[0.5]
|
|
131
|
+
+ [common_proba for _ in range(number_of_unique_labels - 1)]
|
|
132
|
+
+ [0.1]
|
|
133
|
+
)
|
|
134
|
+
elif number_of_unique_labels == 1:
|
|
135
|
+
labels = ["0", None]
|
|
136
|
+
proba = [0.9, 0.1]
|
|
137
|
+
min_proba = 0.1
|
|
138
|
+
else:
|
|
139
|
+
raise ValueError(
|
|
140
|
+
"The number of unique labels should be greater than zero."
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
probabilities = np.array(proba, dtype=np.float64)
|
|
144
|
+
weights = (probabilities / min_proba).astype(np.int32)
|
|
145
|
+
|
|
146
|
+
indices = np.random.choice(
|
|
147
|
+
np.arange(len(weights)),
|
|
148
|
+
size=(mask_height * 2, mask_width),
|
|
149
|
+
p=probabilities,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
N = len(labels)
|
|
153
|
+
|
|
154
|
+
masks = np.arange(N)[:, None, None] == indices
|
|
155
|
+
|
|
156
|
+
gts = []
|
|
157
|
+
pds = []
|
|
158
|
+
for lidx in range(N):
|
|
159
|
+
label = labels[lidx]
|
|
160
|
+
if label is None:
|
|
161
|
+
continue
|
|
162
|
+
gts.append(
|
|
163
|
+
Bitmask(
|
|
164
|
+
mask=masks[lidx, :mask_height, :],
|
|
165
|
+
label=label,
|
|
166
|
+
)
|
|
167
|
+
)
|
|
168
|
+
pds.append(
|
|
169
|
+
Bitmask(
|
|
170
|
+
mask=masks[lidx, mask_height:, :],
|
|
171
|
+
label=label,
|
|
172
|
+
)
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
return Segmentation(
|
|
176
|
+
uid=datum_uid,
|
|
177
|
+
groundtruths=gts,
|
|
178
|
+
predictions=pds,
|
|
179
|
+
)
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
from valor_lite.profiling import create_runtime_profiler
|
|
2
|
+
from valor_lite.semantic_segmentation import DataLoader, generate_segmentation
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def benchmark_add_data(
|
|
6
|
+
n_labels: int,
|
|
7
|
+
shape: tuple[int, int],
|
|
8
|
+
time_limit: float | None,
|
|
9
|
+
repeat: int = 1,
|
|
10
|
+
) -> float:
|
|
11
|
+
"""
|
|
12
|
+
Benchmarks 'Dataloader.add_data' for semantic segmentation.
|
|
13
|
+
|
|
14
|
+
Parameters
|
|
15
|
+
----------
|
|
16
|
+
n_labels : int
|
|
17
|
+
The number of unique labels to generate.
|
|
18
|
+
shape : tuple[int, int]
|
|
19
|
+
The size (h,w) of the mask to generate.
|
|
20
|
+
time_limit : float, optional
|
|
21
|
+
An optional time limit to constrain the benchmark.
|
|
22
|
+
repeat : int
|
|
23
|
+
The number of times to run the benchmark to produce a runtime average.
|
|
24
|
+
|
|
25
|
+
Returns
|
|
26
|
+
-------
|
|
27
|
+
float
|
|
28
|
+
The average runtime.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
profile = create_runtime_profiler(
|
|
32
|
+
time_limit=time_limit,
|
|
33
|
+
repeat=repeat,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
elapsed = 0
|
|
37
|
+
for _ in range(repeat):
|
|
38
|
+
data = generate_segmentation(
|
|
39
|
+
datum_uid="uid",
|
|
40
|
+
number_of_unique_labels=n_labels,
|
|
41
|
+
mask_height=shape[0],
|
|
42
|
+
mask_width=shape[1],
|
|
43
|
+
)
|
|
44
|
+
loader = DataLoader()
|
|
45
|
+
elapsed += profile(loader.add_data)([data])
|
|
46
|
+
return elapsed / repeat
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def benchmark_finalize(
|
|
50
|
+
n_datums: int,
|
|
51
|
+
n_labels: int,
|
|
52
|
+
time_limit: float | None,
|
|
53
|
+
repeat: int = 1,
|
|
54
|
+
):
|
|
55
|
+
"""
|
|
56
|
+
Benchmarks 'Dataloader.finalize' for semantic segmentation.
|
|
57
|
+
|
|
58
|
+
Parameters
|
|
59
|
+
----------
|
|
60
|
+
n_datums : int
|
|
61
|
+
The number of datums to generate.
|
|
62
|
+
n_labels : int
|
|
63
|
+
The number of unique labels to generate.
|
|
64
|
+
time_limit : float, optional
|
|
65
|
+
An optional time limit to constrain the benchmark.
|
|
66
|
+
repeat : int
|
|
67
|
+
The number of times to run the benchmark to produce a runtime average.
|
|
68
|
+
|
|
69
|
+
Returns
|
|
70
|
+
-------
|
|
71
|
+
float
|
|
72
|
+
The average runtime.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
profile = create_runtime_profiler(
|
|
76
|
+
time_limit=time_limit,
|
|
77
|
+
repeat=repeat,
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
elapsed = 0
|
|
81
|
+
for _ in range(repeat):
|
|
82
|
+
|
|
83
|
+
data = [
|
|
84
|
+
generate_segmentation(
|
|
85
|
+
datum_uid=str(i),
|
|
86
|
+
number_of_unique_labels=n_labels,
|
|
87
|
+
mask_height=5,
|
|
88
|
+
mask_width=5,
|
|
89
|
+
)
|
|
90
|
+
for i in range(10)
|
|
91
|
+
]
|
|
92
|
+
loader = DataLoader()
|
|
93
|
+
for datum_idx in range(n_datums):
|
|
94
|
+
segmentation = data[datum_idx % 10]
|
|
95
|
+
segmentation.uid = str(datum_idx)
|
|
96
|
+
loader.add_data([segmentation])
|
|
97
|
+
elapsed += profile(loader.finalize)()
|
|
98
|
+
return elapsed / repeat
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def benchmark_evaluate(
|
|
102
|
+
n_datums: int,
|
|
103
|
+
n_labels: int,
|
|
104
|
+
time_limit: float | None,
|
|
105
|
+
repeat: int = 1,
|
|
106
|
+
):
|
|
107
|
+
"""
|
|
108
|
+
Benchmarks 'Evaluator.evaluate' for semantic segmentation.
|
|
109
|
+
|
|
110
|
+
Parameters
|
|
111
|
+
----------
|
|
112
|
+
n_datums : int
|
|
113
|
+
The number of datums to generate.
|
|
114
|
+
n_labels : int
|
|
115
|
+
The number of unique labels to generate.
|
|
116
|
+
time_limit : float, optional
|
|
117
|
+
An optional time limit to constrain the benchmark.
|
|
118
|
+
repeat : int
|
|
119
|
+
The number of times to run the benchmark to produce a runtime average.
|
|
120
|
+
|
|
121
|
+
Returns
|
|
122
|
+
-------
|
|
123
|
+
float
|
|
124
|
+
The average runtime.
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
profile = create_runtime_profiler(
|
|
128
|
+
time_limit=time_limit,
|
|
129
|
+
repeat=repeat,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
elapsed = 0
|
|
133
|
+
for _ in range(repeat):
|
|
134
|
+
|
|
135
|
+
data = [
|
|
136
|
+
generate_segmentation(
|
|
137
|
+
datum_uid=str(i),
|
|
138
|
+
number_of_unique_labels=n_labels,
|
|
139
|
+
mask_height=5,
|
|
140
|
+
mask_width=5,
|
|
141
|
+
)
|
|
142
|
+
for i in range(10)
|
|
143
|
+
]
|
|
144
|
+
loader = DataLoader()
|
|
145
|
+
for datum_idx in range(n_datums):
|
|
146
|
+
segmentation = data[datum_idx % 10]
|
|
147
|
+
segmentation.uid = str(datum_idx)
|
|
148
|
+
loader.add_data([segmentation])
|
|
149
|
+
evaluator = loader.finalize()
|
|
150
|
+
elapsed += profile(evaluator.evaluate)()
|
|
151
|
+
return elapsed / repeat
|
|
@@ -31,9 +31,6 @@ def compute_intermediate_confusion_matrices(
|
|
|
31
31
|
A 2-D confusion matrix with shape (n_labels + 1, n_labels + 1).
|
|
32
32
|
"""
|
|
33
33
|
|
|
34
|
-
n_gt_labels = groundtruth_labels.size
|
|
35
|
-
n_pd_labels = prediction_labels.size
|
|
36
|
-
|
|
37
34
|
groundtruth_counts = groundtruths.sum(axis=1)
|
|
38
35
|
prediction_counts = predictions.sum(axis=1)
|
|
39
36
|
|
|
@@ -42,33 +39,23 @@ def compute_intermediate_confusion_matrices(
|
|
|
42
39
|
).sum()
|
|
43
40
|
|
|
44
41
|
intersection_counts = np.logical_and(
|
|
45
|
-
groundtruths
|
|
46
|
-
predictions
|
|
42
|
+
groundtruths[:, None, :],
|
|
43
|
+
predictions[None, :, :],
|
|
47
44
|
).sum(axis=2)
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
intersected_prediction_counts = intersection_counts.sum(axis=1)
|
|
45
|
+
intersected_groundtruth_counts = intersection_counts.sum(axis=1)
|
|
46
|
+
intersected_prediction_counts = intersection_counts.sum(axis=0)
|
|
51
47
|
|
|
52
48
|
confusion_matrix = np.zeros((n_labels + 1, n_labels + 1), dtype=np.int32)
|
|
53
49
|
confusion_matrix[0, 0] = background_counts
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
if gidx == 0:
|
|
64
|
-
confusion_matrix[0, pd_label_idx + 1] = (
|
|
65
|
-
prediction_counts[pidx]
|
|
66
|
-
- intersected_prediction_counts[pidx]
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
confusion_matrix[gt_label_idx + 1, 0] = (
|
|
70
|
-
groundtruth_counts[gidx] - intersected_groundtruth_counts[gidx]
|
|
71
|
-
)
|
|
50
|
+
confusion_matrix[
|
|
51
|
+
np.ix_(groundtruth_labels + 1, prediction_labels + 1)
|
|
52
|
+
] = intersection_counts
|
|
53
|
+
confusion_matrix[0, prediction_labels + 1] = (
|
|
54
|
+
prediction_counts - intersected_prediction_counts
|
|
55
|
+
)
|
|
56
|
+
confusion_matrix[groundtruth_labels + 1, 0] = (
|
|
57
|
+
groundtruth_counts - intersected_groundtruth_counts
|
|
58
|
+
)
|
|
72
59
|
|
|
73
60
|
return confusion_matrix
|
|
74
61
|
|
|
@@ -243,6 +243,10 @@ class Evaluator:
|
|
|
243
243
|
return self.compute_precision_recall_iou(filter_=filter_)
|
|
244
244
|
|
|
245
245
|
|
|
246
|
+
def defaultdict_int():
|
|
247
|
+
return defaultdict(int)
|
|
248
|
+
|
|
249
|
+
|
|
246
250
|
class DataLoader:
|
|
247
251
|
"""
|
|
248
252
|
Segmentation DataLoader.
|
|
@@ -250,8 +254,8 @@ class DataLoader:
|
|
|
250
254
|
|
|
251
255
|
def __init__(self):
|
|
252
256
|
self._evaluator = Evaluator()
|
|
253
|
-
self.groundtruth_count = defaultdict(
|
|
254
|
-
self.prediction_count = defaultdict(
|
|
257
|
+
self.groundtruth_count = defaultdict(defaultdict_int)
|
|
258
|
+
self.prediction_count = defaultdict(defaultdict_int)
|
|
255
259
|
self.matrices = list()
|
|
256
260
|
self.pixel_count = list()
|
|
257
261
|
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
valor_lite/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
|
|
2
2
|
valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
+
valor_lite/profiling.py,sha256=TLIROA1qccFw9NoEkMeQcrvvGGO75c4K5yTIWoCUix8,11746
|
|
3
4
|
valor_lite/schemas.py,sha256=pB0MrPx5qFLbwBWDiOUUm-vmXdWvbJLFCBmKgbcbI5g,198
|
|
4
5
|
valor_lite/classification/__init__.py,sha256=8MI8bGwCxYGqRP7KxG7ezhYv4qQ5947XGvvlF8WPM5g,392
|
|
5
6
|
valor_lite/classification/annotation.py,sha256=0aUOvcwBAZgiNOJuyh-pXyNTG7vP7r8CUfnU3OmpUwQ,1113
|
|
@@ -8,15 +9,16 @@ valor_lite/classification/manager.py,sha256=8GXZECSx4CBbG5NfPrA19BPENqmrjo-wZBma
|
|
|
8
9
|
valor_lite/classification/metric.py,sha256=fkAo-_3s4EIRSkyn3owBSf4_Gp6lBK9xdToDYMWmT8A,12236
|
|
9
10
|
valor_lite/classification/utilities.py,sha256=PmQar06Vt-ew4Jvnn0IM63mq730QVTsdRtFdVu1HMFU,6885
|
|
10
11
|
valor_lite/object_detection/__init__.py,sha256=Ql8rju2q7y0Zd9zFvtBJDRhgQFDm1RSYkTsyH3ZE6pA,648
|
|
11
|
-
valor_lite/object_detection/annotation.py,sha256=
|
|
12
|
+
valor_lite/object_detection/annotation.py,sha256=x9bsl8b75yvkMByXXiIYI9d9T03olDqtykSvKJc3aFw,7729
|
|
12
13
|
valor_lite/object_detection/computation.py,sha256=P5ijxEBuZ3mxYjBQy24TiQpGxRmPuS40Gwn44uv0J7M,28064
|
|
13
|
-
valor_lite/object_detection/manager.py,sha256=
|
|
14
|
+
valor_lite/object_detection/manager.py,sha256=utdILUUCx04EWC0_bHGpEPaxcCOhmsOx5lxT9qU1a9s,23033
|
|
14
15
|
valor_lite/object_detection/metric.py,sha256=8QhdauuaRrzE39idetkFYTPxA12wrBalQDIR4IUzEbg,24794
|
|
15
16
|
valor_lite/object_detection/utilities.py,sha256=98VSW-g8EYI8Cdd9KHLHdm6F4fI89jaX5I4z99zny4s,16271
|
|
16
|
-
valor_lite/semantic_segmentation/__init__.py,sha256=
|
|
17
|
-
valor_lite/semantic_segmentation/annotation.py,sha256=
|
|
18
|
-
valor_lite/semantic_segmentation/
|
|
19
|
-
valor_lite/semantic_segmentation/
|
|
17
|
+
valor_lite/semantic_segmentation/__init__.py,sha256=BhTUbwbdJa1FdS4ZA3QSIZ8TuJmdGGLGCd5hX6SzKa4,297
|
|
18
|
+
valor_lite/semantic_segmentation/annotation.py,sha256=xd2qJyIeTW8CT_Goyu3Kvl_51b9b6D3WvUfqwShR0Sk,4990
|
|
19
|
+
valor_lite/semantic_segmentation/benchmark.py,sha256=iVdxUo9LgDbbXUa6eRhZ49LOYw-yyr2W4p9FP3KHg0k,3848
|
|
20
|
+
valor_lite/semantic_segmentation/computation.py,sha256=myHjJZ70f2Xc-PGHx3DcLWvXXRu_H8w9z20n7qV-Abo,4687
|
|
21
|
+
valor_lite/semantic_segmentation/manager.py,sha256=TtwJI7Bsn3zHL2ECOqCmymG-JqREo7I6qxYtycbz54Y,14322
|
|
20
22
|
valor_lite/semantic_segmentation/metric.py,sha256=aJv3wPEl6USLhZ3c4yz6prnBU-EaG4Kz16f0BXcodd4,7046
|
|
21
23
|
valor_lite/semantic_segmentation/utilities.py,sha256=vZM66YNMz9VJclhuKvcWp74nF65s6bscnnD5U9iDW7Q,2925
|
|
22
24
|
valor_lite/text_generation/__init__.py,sha256=pGhpWCSZjLM0pPHCtPykAfos55B8ie3mi9EzbNxfj-U,356
|
|
@@ -31,8 +33,8 @@ valor_lite/text_generation/llm/instructions.py,sha256=fz2onBZZWcl5W8iy7zEWkPGU9N
|
|
|
31
33
|
valor_lite/text_generation/llm/integrations.py,sha256=-rTfdAjq1zH-4ixwYuMQEOQ80pIFzMTe0BYfroVx3Pg,6974
|
|
32
34
|
valor_lite/text_generation/llm/utilities.py,sha256=bjqatGgtVTcl1PrMwiDKTYPGJXKrBrx7PDtzIblGSys,1178
|
|
33
35
|
valor_lite/text_generation/llm/validators.py,sha256=Wzr5RlfF58_2wOU-uTw7C8skan_fYdhy4Gfn0jSJ8HM,2700
|
|
34
|
-
valor_lite-0.33.
|
|
35
|
-
valor_lite-0.33.
|
|
36
|
-
valor_lite-0.33.
|
|
37
|
-
valor_lite-0.33.
|
|
38
|
-
valor_lite-0.33.
|
|
36
|
+
valor_lite-0.33.18.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
|
|
37
|
+
valor_lite-0.33.18.dist-info/METADATA,sha256=oo3sEQQvJJvAIelgFRB1Me2Jmkk-nb_dkphL2k4wo7Y,5888
|
|
38
|
+
valor_lite-0.33.18.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
|
|
39
|
+
valor_lite-0.33.18.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
|
|
40
|
+
valor_lite-0.33.18.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|