valor-lite 0.33.17__py3-none-any.whl → 0.33.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -334,6 +334,10 @@ class Evaluator:
334
334
  return metrics
335
335
 
336
336
 
337
+ def defaultdict_int():
338
+ return defaultdict(int)
339
+
340
+
337
341
  class DataLoader:
338
342
  """
339
343
  Object Detection DataLoader
@@ -342,8 +346,8 @@ class DataLoader:
342
346
  def __init__(self):
343
347
  self._evaluator = Evaluator()
344
348
  self.pairs: list[NDArray[np.float64]] = list()
345
- self.groundtruth_count = defaultdict(lambda: defaultdict(int))
346
- self.prediction_count = defaultdict(lambda: defaultdict(int))
349
+ self.groundtruth_count = defaultdict(defaultdict_int)
350
+ self.prediction_count = defaultdict(defaultdict_int)
347
351
 
348
352
  def _add_datum(self, uid: str) -> int:
349
353
  """
@@ -0,0 +1,374 @@
1
+ import json
2
+ import math
3
+ import multiprocessing as mp
4
+ import resource
5
+ import time
6
+ from collections import deque
7
+ from multiprocessing import Queue
8
+ from typing import Any
9
+
10
+ from tqdm import tqdm
11
+
12
+
13
+ class BenchmarkError(Exception):
14
+ def __init__(
15
+ self, benchmark: str, error_type: str, error_message: str
16
+ ) -> None:
17
+ super().__init__(
18
+ f"'{benchmark}' raised '{error_type}' with the following message: {error_message}"
19
+ )
20
+
21
+
22
+ def _timeit_subprocess(*args, __fn, __queue: Queue, **kwargs):
23
+ """
24
+ Multiprocessing subprocess that reports either runtime or errors.
25
+
26
+ This is handled within a subprocess to protect the benchmark against OOM errors.
27
+ """
28
+ try:
29
+ timer_start = time.perf_counter()
30
+ __fn(*args, **kwargs)
31
+ timer_end = time.perf_counter()
32
+ __queue.put(timer_end - timer_start)
33
+ except Exception as e:
34
+ __queue.put(e)
35
+
36
+
37
+ def create_runtime_profiler(
38
+ time_limit: float | None,
39
+ repeat: int = 1,
40
+ ):
41
+ """
42
+ Creates a runtime profiler as a decorating function.
43
+
44
+ The profiler reports runtime of the wrapped function from a subprocess to protect against OOM errors.
45
+
46
+ Parameters
47
+ ----------
48
+ time_limit : float, optional
49
+ An optional time limit to constrain the benchmark.
50
+ repeat : int, default=1
51
+ The number of times to repeat the benchmark to produce an average runtime.
52
+ """
53
+ ctx = mp.get_context("spawn")
54
+
55
+ def decorator(fn):
56
+ def wrapper(*args, **kwargs):
57
+ # Record average runtime over repeated runs.
58
+ elapsed = 0
59
+ for _ in range(repeat):
60
+ q = ctx.Queue()
61
+ p = ctx.Process(
62
+ target=_timeit_subprocess,
63
+ args=args,
64
+ kwargs={"__fn": fn, "__queue": q, **kwargs},
65
+ )
66
+ p.start()
67
+ p.join(timeout=time_limit)
68
+
69
+ # Check if computation finishes within the timeout
70
+ if p.is_alive():
71
+ p.terminate()
72
+ p.join()
73
+ q.close()
74
+ q.join_thread()
75
+ raise TimeoutError(
76
+ f"Function '{fn.__name__}' did not complete within {time_limit} seconds."
77
+ )
78
+
79
+ # Retrieve the result
80
+ result = q.get(timeout=1)
81
+ if isinstance(result, Exception):
82
+ raise result
83
+ elif isinstance(result, float):
84
+ elapsed += result
85
+ else:
86
+ raise TypeError(type(result).__name__)
87
+
88
+ return elapsed / repeat
89
+
90
+ return wrapper
91
+
92
+ return decorator
93
+
94
+
95
+ def pretty_print_results(results: tuple):
96
+ valid, invalid, permutations = results
97
+
98
+ print(
99
+ "====================================================================="
100
+ )
101
+ print("Details")
102
+ print(json.dumps(permutations, indent=4))
103
+
104
+ if len(valid) > 0:
105
+ print()
106
+ print("Passed")
107
+ keys = ["complexity", "runtime", *valid[0]["details"].keys()]
108
+ header = " | ".join(f"{header:^15}" for header in keys)
109
+ print(header)
110
+ print("-" * len(header))
111
+ for entry in valid:
112
+ values = [
113
+ entry["complexity"],
114
+ round(entry["runtime"], 4),
115
+ *entry["details"].values(),
116
+ ]
117
+ row = " | ".join(f"{str(value):^15}" for value in values)
118
+ print(row)
119
+
120
+ if len(invalid) > 0:
121
+ print()
122
+ print("Failed")
123
+ keys = ["complexity", "error", *invalid[0]["details"].keys(), "msg"]
124
+ header = " | ".join(f"{header:^15}" for header in keys)
125
+ print(header)
126
+ print("-" * len(header))
127
+ for entry in invalid:
128
+ values = [
129
+ entry["complexity"],
130
+ entry["error"],
131
+ *entry["details"].values(),
132
+ entry["msg"],
133
+ ]
134
+ row = " | ".join(f"{str(value):^15}" for value in values)
135
+ print(row)
136
+
137
+
138
+ def _calculate_complexity(params: list[int | tuple[int]]) -> int:
139
+ """
140
+ Basic metric of benchmark complexity.
141
+ """
142
+ flattened_params = [
143
+ math.prod(p) if isinstance(p, tuple) else p for p in params
144
+ ]
145
+ return math.prod(flattened_params)
146
+
147
+
148
+ class Benchmark:
149
+ def __init__(
150
+ self,
151
+ time_limit: float | None,
152
+ memory_limit: int | None,
153
+ *_,
154
+ repeat: int | None = 1,
155
+ verbose: bool = False,
156
+ ):
157
+ self.time_limit = time_limit
158
+ self.memory_limit = memory_limit
159
+ self.repeat = repeat
160
+ self.verbose = verbose
161
+
162
+ def get_limits(
163
+ self,
164
+ *_,
165
+ readable: bool = True,
166
+ memory_unit: str = "GB",
167
+ time_unit: str = "seconds",
168
+ ) -> dict[str, str | int | float | None]:
169
+ """
170
+ Returns a dictionary of benchmark limits.
171
+
172
+ Parameters
173
+ ----------
174
+ readable : bool, default=True
175
+ Toggles whether the output should be human readable.
176
+ memory_unit : str, default="GB"
177
+ Toggles what unit to display the memory limit with when 'readable=True'.
178
+ time_unit : str, default="seconds"
179
+ Toggles what unit to display the time limit with when 'readable=True'.
180
+
181
+ Returns
182
+ -------
183
+ dict[str, str | int | float | None]
184
+ The benchmark limits.
185
+ """
186
+
187
+ memory_value = self.memory_limit
188
+ if readable and memory_value is not None:
189
+ match memory_unit:
190
+ case "TB":
191
+ memory_value /= 1024**4
192
+ case "GB":
193
+ memory_value /= 1024**3
194
+ case "MB":
195
+ memory_value /= 1024**2
196
+ case "KB":
197
+ memory_value /= 1024
198
+ case "B":
199
+ pass
200
+ case _:
201
+ valid_set = {"TB", "GB", "MB", "KB", "B"}
202
+ raise ValueError(
203
+ f"Expected memory unit to be in the set {valid_set}, received '{memory_unit}'."
204
+ )
205
+ memory_value = f"{memory_value} {memory_unit}"
206
+
207
+ time_value = self.time_limit
208
+ if readable and time_value is not None:
209
+ match time_unit:
210
+ case "minutes":
211
+ time_value /= 60
212
+ case "seconds":
213
+ pass
214
+ case "milliseconds":
215
+ time_value *= 1000
216
+ case _:
217
+ valid_set = {"minutes", "seconds", "milliseconds"}
218
+ raise ValueError(
219
+ f"Expected time unit to be in the set {valid_set}, received '{time_unit}'."
220
+ )
221
+ time_value = f"{time_value} {time_unit}"
222
+
223
+ return {
224
+ "memory_limit": memory_value,
225
+ "time_limit": time_value,
226
+ "repeat": self.repeat,
227
+ }
228
+
229
+ @property
230
+ def memory_limit(self) -> int | None:
231
+ """
232
+ The memory limit in bytes (B).
233
+ """
234
+ return self._memory_limit
235
+
236
+ @memory_limit.setter
237
+ def memory_limit(self, limit: int | None):
238
+ """
239
+ Stores the memory limit and restricts resources.
240
+ """
241
+ self._memory_limit = limit
242
+ if limit is not None:
243
+ _, hard = resource.getrlimit(resource.RLIMIT_AS)
244
+ resource.setrlimit(resource.RLIMIT_AS, (limit, hard))
245
+
246
+ def run(
247
+ self,
248
+ benchmark,
249
+ **kwargs: list[Any],
250
+ ):
251
+ """
252
+ Runs a benchmark with ranges of parameters.
253
+
254
+ Parameters
255
+ ----------
256
+ benchmark : Callable
257
+ The benchmark function.
258
+ **kwargs : list[Any]
259
+ Keyword arguments passing lists of parameters to benchmark. The values should be sorted in
260
+ decreasing complexity. For example, if the number of labels is a parameter then a higher
261
+ number of unique labels would be considered "more" complex.
262
+
263
+ Example
264
+ -------
265
+ >>> b = Benchmark(
266
+ ... time_limit=10.0,
267
+ ... memory_limit=8 * (1024**3),
268
+ ... repeat=1,
269
+ ... verbose=False,
270
+ ... )
271
+ >>> results = b.run(
272
+ ... benchmark=semseg_add_data,
273
+ ... n_labels=[
274
+ ... 100,
275
+ ... 10,
276
+ ... ],
277
+ ... shape=[
278
+ ... (1000, 1000),
279
+ ... (100, 100),
280
+ ... ],
281
+ ... )
282
+ """
283
+
284
+ nvars = len(kwargs)
285
+ keys = tuple(kwargs.keys())
286
+ vars = tuple(kwargs[key] for key in keys)
287
+
288
+ initial_indices = tuple(0 for _ in range(nvars))
289
+ max_indices = tuple(len(v) for v in vars)
290
+ permutations = math.prod(max_indices)
291
+
292
+ # Initialize queue with the starting index (0, ...)
293
+ queue = deque()
294
+ queue.append(initial_indices)
295
+
296
+ # Keep track of explored combinations to avoid duplicates
297
+ explored = set()
298
+ explored.add(initial_indices)
299
+
300
+ # Store valid combinations that finish within the time limit
301
+ valid_combinations = []
302
+ invalid_combinations = []
303
+
304
+ pbar = tqdm(total=math.prod(max_indices), disable=(not self.verbose))
305
+ prev_count = 0
306
+ while queue:
307
+
308
+ current_indices = queue.popleft()
309
+ parameters = {
310
+ k: v[current_indices[idx]]
311
+ for idx, (k, v) in enumerate(zip(keys, vars))
312
+ }
313
+ complexity = _calculate_complexity(list(parameters.values()))
314
+
315
+ details: dict = {k: str(v) for k, v in parameters.items()}
316
+
317
+ # update terminal with status
318
+ count = len(valid_combinations) + len(invalid_combinations)
319
+ pbar.update(count - prev_count)
320
+ prev_count = count
321
+
322
+ try:
323
+ runtime = benchmark(
324
+ time_limit=self.time_limit,
325
+ repeat=self.repeat,
326
+ **parameters,
327
+ )
328
+ valid_combinations.append(
329
+ {
330
+ "complexity": complexity,
331
+ "runtime": runtime,
332
+ "details": details,
333
+ }
334
+ )
335
+ continue
336
+ except Exception as e:
337
+ invalid_combinations.append(
338
+ {
339
+ "complexity": complexity,
340
+ "error": type(e).__name__,
341
+ "msg": str(e),
342
+ "details": details,
343
+ }
344
+ )
345
+
346
+ for idx in range(nvars):
347
+ new_indices = list(current_indices)
348
+ if new_indices[idx] + 1 < max_indices[idx]:
349
+ new_indices[idx] += 1
350
+ new_indices_tuple = tuple(new_indices)
351
+ if new_indices_tuple not in explored:
352
+ queue.append(new_indices_tuple)
353
+ explored.add(new_indices_tuple)
354
+
355
+ valid_combinations.sort(key=lambda x: -x["complexity"])
356
+ invalid_combinations.sort(key=lambda x: -x["complexity"])
357
+
358
+ # clear terminal and display results
359
+ results = (
360
+ valid_combinations,
361
+ invalid_combinations,
362
+ {
363
+ "benchmark": benchmark.__name__,
364
+ "limits": self.get_limits(readable=True),
365
+ "passed": permutations - len(invalid_combinations),
366
+ "failed": len(invalid_combinations),
367
+ "total": permutations,
368
+ },
369
+ )
370
+ pbar.close()
371
+ if self.verbose:
372
+ pretty_print_results(results)
373
+
374
+ return results
@@ -1,4 +1,4 @@
1
- from .annotation import Bitmask, Segmentation
1
+ from .annotation import Bitmask, Segmentation, generate_segmentation
2
2
  from .manager import DataLoader, Evaluator
3
3
  from .metric import Metric, MetricType
4
4
 
@@ -9,4 +9,5 @@ __all__ = [
9
9
  "Bitmask",
10
10
  "Metric",
11
11
  "MetricType",
12
+ "generate_segmentation",
12
13
  ]
@@ -29,7 +29,7 @@ class Bitmask:
29
29
  def __post_init__(self):
30
30
  if self.mask.dtype != np.bool_:
31
31
  raise ValueError(
32
- f"Bitmask recieved mask with dtype `{self.mask.dtype}`."
32
+ f"Bitmask recieved mask with dtype '{self.mask.dtype}'."
33
33
  )
34
34
 
35
35
 
@@ -94,3 +94,86 @@ class Segmentation:
94
94
 
95
95
  self.shape = groundtruth_shape.pop()
96
96
  self.size = int(np.prod(np.array(self.shape)))
97
+
98
+
99
+ def generate_segmentation(
100
+ datum_uid: str,
101
+ number_of_unique_labels: int,
102
+ mask_height: int,
103
+ mask_width: int,
104
+ ) -> Segmentation:
105
+ """
106
+ Generates a semantic segmentation annotation.
107
+
108
+ Parameters
109
+ ----------
110
+ datum_uid : str
111
+ The datum UID for the generated segmentation.
112
+ number_of_unique_labels : int
113
+ The number of unique labels.
114
+ mask_height : int
115
+ The height of the mask in pixels.
116
+ mask_width : int
117
+ The width of the mask in pixels.
118
+
119
+ Returns
120
+ -------
121
+ Segmentation
122
+ A generated semantic segmenatation annotation.
123
+ """
124
+
125
+ if number_of_unique_labels > 1:
126
+ common_proba = 0.4 / (number_of_unique_labels - 1)
127
+ min_proba = min(common_proba, 0.1)
128
+ labels = [str(i) for i in range(number_of_unique_labels)] + [None]
129
+ proba = (
130
+ [0.5]
131
+ + [common_proba for _ in range(number_of_unique_labels - 1)]
132
+ + [0.1]
133
+ )
134
+ elif number_of_unique_labels == 1:
135
+ labels = ["0", None]
136
+ proba = [0.9, 0.1]
137
+ min_proba = 0.1
138
+ else:
139
+ raise ValueError(
140
+ "The number of unique labels should be greater than zero."
141
+ )
142
+
143
+ probabilities = np.array(proba, dtype=np.float64)
144
+ weights = (probabilities / min_proba).astype(np.int32)
145
+
146
+ indices = np.random.choice(
147
+ np.arange(len(weights)),
148
+ size=(mask_height * 2, mask_width),
149
+ p=probabilities,
150
+ )
151
+
152
+ N = len(labels)
153
+
154
+ masks = np.arange(N)[:, None, None] == indices
155
+
156
+ gts = []
157
+ pds = []
158
+ for lidx in range(N):
159
+ label = labels[lidx]
160
+ if label is None:
161
+ continue
162
+ gts.append(
163
+ Bitmask(
164
+ mask=masks[lidx, :mask_height, :],
165
+ label=label,
166
+ )
167
+ )
168
+ pds.append(
169
+ Bitmask(
170
+ mask=masks[lidx, mask_height:, :],
171
+ label=label,
172
+ )
173
+ )
174
+
175
+ return Segmentation(
176
+ uid=datum_uid,
177
+ groundtruths=gts,
178
+ predictions=pds,
179
+ )
@@ -0,0 +1,151 @@
1
+ from valor_lite.profiling import create_runtime_profiler
2
+ from valor_lite.semantic_segmentation import DataLoader, generate_segmentation
3
+
4
+
5
+ def benchmark_add_data(
6
+ n_labels: int,
7
+ shape: tuple[int, int],
8
+ time_limit: float | None,
9
+ repeat: int = 1,
10
+ ) -> float:
11
+ """
12
+ Benchmarks 'Dataloader.add_data' for semantic segmentation.
13
+
14
+ Parameters
15
+ ----------
16
+ n_labels : int
17
+ The number of unique labels to generate.
18
+ shape : tuple[int, int]
19
+ The size (h,w) of the mask to generate.
20
+ time_limit : float, optional
21
+ An optional time limit to constrain the benchmark.
22
+ repeat : int
23
+ The number of times to run the benchmark to produce a runtime average.
24
+
25
+ Returns
26
+ -------
27
+ float
28
+ The average runtime.
29
+ """
30
+
31
+ profile = create_runtime_profiler(
32
+ time_limit=time_limit,
33
+ repeat=repeat,
34
+ )
35
+
36
+ elapsed = 0
37
+ for _ in range(repeat):
38
+ data = generate_segmentation(
39
+ datum_uid="uid",
40
+ number_of_unique_labels=n_labels,
41
+ mask_height=shape[0],
42
+ mask_width=shape[1],
43
+ )
44
+ loader = DataLoader()
45
+ elapsed += profile(loader.add_data)([data])
46
+ return elapsed / repeat
47
+
48
+
49
+ def benchmark_finalize(
50
+ n_datums: int,
51
+ n_labels: int,
52
+ time_limit: float | None,
53
+ repeat: int = 1,
54
+ ):
55
+ """
56
+ Benchmarks 'Dataloader.finalize' for semantic segmentation.
57
+
58
+ Parameters
59
+ ----------
60
+ n_datums : int
61
+ The number of datums to generate.
62
+ n_labels : int
63
+ The number of unique labels to generate.
64
+ time_limit : float, optional
65
+ An optional time limit to constrain the benchmark.
66
+ repeat : int
67
+ The number of times to run the benchmark to produce a runtime average.
68
+
69
+ Returns
70
+ -------
71
+ float
72
+ The average runtime.
73
+ """
74
+
75
+ profile = create_runtime_profiler(
76
+ time_limit=time_limit,
77
+ repeat=repeat,
78
+ )
79
+
80
+ elapsed = 0
81
+ for _ in range(repeat):
82
+
83
+ data = [
84
+ generate_segmentation(
85
+ datum_uid=str(i),
86
+ number_of_unique_labels=n_labels,
87
+ mask_height=5,
88
+ mask_width=5,
89
+ )
90
+ for i in range(10)
91
+ ]
92
+ loader = DataLoader()
93
+ for datum_idx in range(n_datums):
94
+ segmentation = data[datum_idx % 10]
95
+ segmentation.uid = str(datum_idx)
96
+ loader.add_data([segmentation])
97
+ elapsed += profile(loader.finalize)()
98
+ return elapsed / repeat
99
+
100
+
101
+ def benchmark_evaluate(
102
+ n_datums: int,
103
+ n_labels: int,
104
+ time_limit: float | None,
105
+ repeat: int = 1,
106
+ ):
107
+ """
108
+ Benchmarks 'Evaluator.evaluate' for semantic segmentation.
109
+
110
+ Parameters
111
+ ----------
112
+ n_datums : int
113
+ The number of datums to generate.
114
+ n_labels : int
115
+ The number of unique labels to generate.
116
+ time_limit : float, optional
117
+ An optional time limit to constrain the benchmark.
118
+ repeat : int
119
+ The number of times to run the benchmark to produce a runtime average.
120
+
121
+ Returns
122
+ -------
123
+ float
124
+ The average runtime.
125
+ """
126
+
127
+ profile = create_runtime_profiler(
128
+ time_limit=time_limit,
129
+ repeat=repeat,
130
+ )
131
+
132
+ elapsed = 0
133
+ for _ in range(repeat):
134
+
135
+ data = [
136
+ generate_segmentation(
137
+ datum_uid=str(i),
138
+ number_of_unique_labels=n_labels,
139
+ mask_height=5,
140
+ mask_width=5,
141
+ )
142
+ for i in range(10)
143
+ ]
144
+ loader = DataLoader()
145
+ for datum_idx in range(n_datums):
146
+ segmentation = data[datum_idx % 10]
147
+ segmentation.uid = str(datum_idx)
148
+ loader.add_data([segmentation])
149
+ evaluator = loader.finalize()
150
+ elapsed += profile(evaluator.evaluate)()
151
+ return elapsed / repeat
@@ -31,9 +31,6 @@ def compute_intermediate_confusion_matrices(
31
31
  A 2-D confusion matrix with shape (n_labels + 1, n_labels + 1).
32
32
  """
33
33
 
34
- n_gt_labels = groundtruth_labels.size
35
- n_pd_labels = prediction_labels.size
36
-
37
34
  groundtruth_counts = groundtruths.sum(axis=1)
38
35
  prediction_counts = predictions.sum(axis=1)
39
36
 
@@ -42,33 +39,23 @@ def compute_intermediate_confusion_matrices(
42
39
  ).sum()
43
40
 
44
41
  intersection_counts = np.logical_and(
45
- groundtruths.reshape(n_gt_labels, 1, -1),
46
- predictions.reshape(1, n_pd_labels, -1),
42
+ groundtruths[:, None, :],
43
+ predictions[None, :, :],
47
44
  ).sum(axis=2)
48
-
49
45
  intersected_groundtruth_counts = intersection_counts.sum(axis=1)
50
46
  intersected_prediction_counts = intersection_counts.sum(axis=0)
51
47
 
52
48
  confusion_matrix = np.zeros((n_labels + 1, n_labels + 1), dtype=np.int32)
53
49
  confusion_matrix[0, 0] = background_counts
54
- for gidx in range(n_gt_labels):
55
- gt_label_idx = groundtruth_labels[gidx]
56
- for pidx in range(n_pd_labels):
57
- pd_label_idx = prediction_labels[pidx]
58
- confusion_matrix[
59
- gt_label_idx + 1,
60
- pd_label_idx + 1,
61
- ] = intersection_counts[gidx, pidx]
62
-
63
- if gidx == 0:
64
- confusion_matrix[0, pd_label_idx + 1] = (
65
- prediction_counts[pidx]
66
- - intersected_prediction_counts[pidx]
67
- )
68
-
69
- confusion_matrix[gt_label_idx + 1, 0] = (
70
- groundtruth_counts[gidx] - intersected_groundtruth_counts[gidx]
71
- )
50
+ confusion_matrix[
51
+ np.ix_(groundtruth_labels + 1, prediction_labels + 1)
52
+ ] = intersection_counts
53
+ confusion_matrix[0, prediction_labels + 1] = (
54
+ prediction_counts - intersected_prediction_counts
55
+ )
56
+ confusion_matrix[groundtruth_labels + 1, 0] = (
57
+ groundtruth_counts - intersected_groundtruth_counts
58
+ )
72
59
 
73
60
  return confusion_matrix
74
61
 
@@ -243,6 +243,10 @@ class Evaluator:
243
243
  return self.compute_precision_recall_iou(filter_=filter_)
244
244
 
245
245
 
246
+ def defaultdict_int():
247
+ return defaultdict(int)
248
+
249
+
246
250
  class DataLoader:
247
251
  """
248
252
  Segmentation DataLoader.
@@ -250,8 +254,8 @@ class DataLoader:
250
254
 
251
255
  def __init__(self):
252
256
  self._evaluator = Evaluator()
253
- self.groundtruth_count = defaultdict(lambda: defaultdict(int))
254
- self.prediction_count = defaultdict(lambda: defaultdict(int))
257
+ self.groundtruth_count = defaultdict(defaultdict_int)
258
+ self.prediction_count = defaultdict(defaultdict_int)
255
259
  self.matrices = list()
256
260
  self.pixel_count = list()
257
261
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: valor-lite
3
- Version: 0.33.17
3
+ Version: 0.33.18
4
4
  Summary: Compute valor metrics locally.
5
5
  License: MIT License
6
6
 
@@ -1,5 +1,6 @@
1
1
  valor_lite/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
2
2
  valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ valor_lite/profiling.py,sha256=TLIROA1qccFw9NoEkMeQcrvvGGO75c4K5yTIWoCUix8,11746
3
4
  valor_lite/schemas.py,sha256=pB0MrPx5qFLbwBWDiOUUm-vmXdWvbJLFCBmKgbcbI5g,198
4
5
  valor_lite/classification/__init__.py,sha256=8MI8bGwCxYGqRP7KxG7ezhYv4qQ5947XGvvlF8WPM5g,392
5
6
  valor_lite/classification/annotation.py,sha256=0aUOvcwBAZgiNOJuyh-pXyNTG7vP7r8CUfnU3OmpUwQ,1113
@@ -10,13 +11,14 @@ valor_lite/classification/utilities.py,sha256=PmQar06Vt-ew4Jvnn0IM63mq730QVTsdRt
10
11
  valor_lite/object_detection/__init__.py,sha256=Ql8rju2q7y0Zd9zFvtBJDRhgQFDm1RSYkTsyH3ZE6pA,648
11
12
  valor_lite/object_detection/annotation.py,sha256=x9bsl8b75yvkMByXXiIYI9d9T03olDqtykSvKJc3aFw,7729
12
13
  valor_lite/object_detection/computation.py,sha256=P5ijxEBuZ3mxYjBQy24TiQpGxRmPuS40Gwn44uv0J7M,28064
13
- valor_lite/object_detection/manager.py,sha256=I1AwelhxeOA7GJ31eCw8ubNvmprIEGRmbxONozQMsC4,22998
14
+ valor_lite/object_detection/manager.py,sha256=utdILUUCx04EWC0_bHGpEPaxcCOhmsOx5lxT9qU1a9s,23033
14
15
  valor_lite/object_detection/metric.py,sha256=8QhdauuaRrzE39idetkFYTPxA12wrBalQDIR4IUzEbg,24794
15
16
  valor_lite/object_detection/utilities.py,sha256=98VSW-g8EYI8Cdd9KHLHdm6F4fI89jaX5I4z99zny4s,16271
16
- valor_lite/semantic_segmentation/__init__.py,sha256=HQQkr3iBPQfdUrsu0uvx-Uyv9SYmumU1B3slbWOnpNY,245
17
- valor_lite/semantic_segmentation/annotation.py,sha256=CujYFdHS3fgr4Y7mEDs_u1XBmbPJzNU2CdqvjCT_d_A,2938
18
- valor_lite/semantic_segmentation/computation.py,sha256=471Pl-0TCFBdkgZMvYDFs4aa6Ak5kv31xarK_USl3pU,5122
19
- valor_lite/semantic_segmentation/manager.py,sha256=pMepH3zk_fApyFtC9tLrmEYuCbg1n5TLh1J8QRadE44,14287
17
+ valor_lite/semantic_segmentation/__init__.py,sha256=BhTUbwbdJa1FdS4ZA3QSIZ8TuJmdGGLGCd5hX6SzKa4,297
18
+ valor_lite/semantic_segmentation/annotation.py,sha256=xd2qJyIeTW8CT_Goyu3Kvl_51b9b6D3WvUfqwShR0Sk,4990
19
+ valor_lite/semantic_segmentation/benchmark.py,sha256=iVdxUo9LgDbbXUa6eRhZ49LOYw-yyr2W4p9FP3KHg0k,3848
20
+ valor_lite/semantic_segmentation/computation.py,sha256=myHjJZ70f2Xc-PGHx3DcLWvXXRu_H8w9z20n7qV-Abo,4687
21
+ valor_lite/semantic_segmentation/manager.py,sha256=TtwJI7Bsn3zHL2ECOqCmymG-JqREo7I6qxYtycbz54Y,14322
20
22
  valor_lite/semantic_segmentation/metric.py,sha256=aJv3wPEl6USLhZ3c4yz6prnBU-EaG4Kz16f0BXcodd4,7046
21
23
  valor_lite/semantic_segmentation/utilities.py,sha256=vZM66YNMz9VJclhuKvcWp74nF65s6bscnnD5U9iDW7Q,2925
22
24
  valor_lite/text_generation/__init__.py,sha256=pGhpWCSZjLM0pPHCtPykAfos55B8ie3mi9EzbNxfj-U,356
@@ -31,8 +33,8 @@ valor_lite/text_generation/llm/instructions.py,sha256=fz2onBZZWcl5W8iy7zEWkPGU9N
31
33
  valor_lite/text_generation/llm/integrations.py,sha256=-rTfdAjq1zH-4ixwYuMQEOQ80pIFzMTe0BYfroVx3Pg,6974
32
34
  valor_lite/text_generation/llm/utilities.py,sha256=bjqatGgtVTcl1PrMwiDKTYPGJXKrBrx7PDtzIblGSys,1178
33
35
  valor_lite/text_generation/llm/validators.py,sha256=Wzr5RlfF58_2wOU-uTw7C8skan_fYdhy4Gfn0jSJ8HM,2700
34
- valor_lite-0.33.17.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
35
- valor_lite-0.33.17.dist-info/METADATA,sha256=7YPVo6unLWsA7zNV7LcKnwBJm6AqCWjfaeM5nVyVKho,5888
36
- valor_lite-0.33.17.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
37
- valor_lite-0.33.17.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
38
- valor_lite-0.33.17.dist-info/RECORD,,
36
+ valor_lite-0.33.18.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
37
+ valor_lite-0.33.18.dist-info/METADATA,sha256=oo3sEQQvJJvAIelgFRB1Me2Jmkk-nb_dkphL2k4wo7Y,5888
38
+ valor_lite-0.33.18.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
39
+ valor_lite-0.33.18.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
40
+ valor_lite-0.33.18.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.3.0)
2
+ Generator: setuptools (75.5.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5