valor-lite 0.33.8__py3-none-any.whl → 0.33.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,96 @@
1
+ from dataclasses import dataclass, field
2
+
3
+ import numpy as np
4
+ from numpy.typing import NDArray
5
+
6
+
7
+ @dataclass
8
+ class Bitmask:
9
+ """
10
+ Represents a binary mask with an associated semantic label.
11
+
12
+ Parameters
13
+ ----------
14
+ mask : NDArray[np.bool_]
15
+ A NumPy array of boolean values representing the mask.
16
+ label : str
17
+ The semantic label associated with the mask.
18
+
19
+ Examples
20
+ --------
21
+ >>> import numpy as np
22
+ >>> mask = np.array([[True, False], [False, True]], dtype=np.bool_)
23
+ >>> bitmask = Bitmask(mask=mask, label='ocean')
24
+ """
25
+
26
+ mask: NDArray[np.bool_]
27
+ label: str
28
+
29
+ def __post_init__(self):
30
+ if self.mask.dtype != np.bool_:
31
+ raise ValueError(
32
+ f"Bitmask recieved mask with dtype `{self.mask.dtype}`."
33
+ )
34
+
35
+
36
+ @dataclass
37
+ class Segmentation:
38
+ """
39
+ Segmentation data structure holding ground truth and prediction bitmasks for semantic segmentation tasks.
40
+
41
+ Parameters
42
+ ----------
43
+ uid : str
44
+ Unique identifier for the image or sample.
45
+ groundtruths : List[Bitmask]
46
+ List of ground truth bitmasks.
47
+ predictions : List[Bitmask]
48
+ List of predicted bitmasks.
49
+ shape : tuple of int, optional
50
+ The shape of the segmentation masks. This is set automatically after initialization.
51
+ size : int, optional
52
+ The total number of pixels in the masks. This is set automatically after initialization.
53
+
54
+ Examples
55
+ --------
56
+ >>> import numpy as np
57
+ >>> mask1 = np.array([[True, False], [False, True]], dtype=np.bool_)
58
+ >>> groundtruth = Bitmask(mask=mask1, label='object')
59
+ >>> mask2 = np.array([[False, True], [True, False]], dtype=np.bool_)
60
+ >>> prediction = Bitmask(mask=mask2, label='object')
61
+ >>> segmentation = Segmentation(
62
+ ... uid='123',
63
+ ... groundtruths=[groundtruth],
64
+ ... predictions=[prediction]
65
+ ... )
66
+ """
67
+
68
+ uid: str
69
+ groundtruths: list[Bitmask]
70
+ predictions: list[Bitmask]
71
+ shape: tuple[int, ...] = field(default_factory=lambda: (0, 0))
72
+ size: int = field(default=0)
73
+
74
+ def __post_init__(self):
75
+
76
+ groundtruth_shape = {
77
+ groundtruth.mask.shape for groundtruth in self.groundtruths
78
+ }
79
+ prediction_shape = {
80
+ prediction.mask.shape for prediction in self.predictions
81
+ }
82
+ if len(groundtruth_shape) == 0:
83
+ raise ValueError("The segmenation is missing ground truths.")
84
+ elif len(prediction_shape) == 0:
85
+ raise ValueError("The segmenation is missing predictions.")
86
+ elif (
87
+ len(groundtruth_shape) != 1
88
+ or len(prediction_shape) != 1
89
+ or groundtruth_shape != prediction_shape
90
+ ):
91
+ raise ValueError(
92
+ "A shape mismatch exists within the segmentation."
93
+ )
94
+
95
+ self.shape = groundtruth_shape.pop()
96
+ self.size = int(np.prod(np.array(self.shape)))
@@ -4,12 +4,12 @@ from dataclasses import dataclass
4
4
  import numpy as np
5
5
  from numpy.typing import NDArray
6
6
  from tqdm import tqdm
7
- from valor_lite.segmentation.annotation import Segmentation
8
- from valor_lite.segmentation.computation import (
7
+ from valor_lite.semantic_segmentation.annotation import Segmentation
8
+ from valor_lite.semantic_segmentation.computation import (
9
9
  compute_intermediate_confusion_matrices,
10
10
  compute_metrics,
11
11
  )
12
- from valor_lite.segmentation.metric import (
12
+ from valor_lite.semantic_segmentation.metric import (
13
13
  F1,
14
14
  Accuracy,
15
15
  ConfusionMatrix,
@@ -190,9 +190,8 @@ class Evaluator:
190
190
  n_pixels=self._n_pixels_per_datum[mask_datums].sum(),
191
191
  )
192
192
 
193
- def evaluate(
193
+ def compute_precision_recall_iou(
194
194
  self,
195
- metrics_to_return: list[MetricType] = MetricType.base(),
196
195
  filter_: Filter | None = None,
197
196
  as_dict: bool = False,
198
197
  ) -> dict[MetricType, list]:
@@ -201,10 +200,10 @@ class Evaluator:
201
200
 
202
201
  Parameters
203
202
  ----------
204
- metrics_to_return : list[MetricType]
205
- A list of metrics to return in the results.
206
203
  filter_ : Filter, optional
207
204
  An optional filter object.
205
+ as_dict : bool, default=False
206
+ An option to return metrics as dictionaries.
208
207
 
209
208
  Returns
210
209
  -------
@@ -239,7 +238,7 @@ class Evaluator:
239
238
 
240
239
  metrics[MetricType.Accuracy] = [
241
240
  Accuracy(
242
- value=accuracy,
241
+ value=float(accuracy),
243
242
  )
244
243
  ]
245
244
 
@@ -258,16 +257,14 @@ class Evaluator:
258
257
  },
259
258
  hallucinations={
260
259
  self.index_to_label[pd_label_idx]: {
261
- "percent": float(hallucination_ratios[pd_label_idx])
260
+ "ratio": float(hallucination_ratios[pd_label_idx])
262
261
  }
263
262
  for pd_label_idx in range(self.n_labels)
264
263
  if label_metadata[pd_label_idx, 0] > 0
265
264
  },
266
265
  missing_predictions={
267
266
  self.index_to_label[gt_label_idx]: {
268
- "percent": float(
269
- missing_prediction_ratios[gt_label_idx]
270
- )
267
+ "ratio": float(missing_prediction_ratios[gt_label_idx])
271
268
  }
272
269
  for gt_label_idx in range(self.n_labels)
273
270
  if label_metadata[gt_label_idx, 0] > 0
@@ -316,10 +313,6 @@ class Evaluator:
316
313
  )
317
314
  )
318
315
 
319
- for metric in set(metrics.keys()):
320
- if metric not in metrics_to_return:
321
- del metrics[metric]
322
-
323
316
  if as_dict:
324
317
  return {
325
318
  mtype: [metric.to_dict() for metric in mvalues]
@@ -328,6 +321,30 @@ class Evaluator:
328
321
 
329
322
  return metrics
330
323
 
324
+ def evaluate(
325
+ self,
326
+ filter_: Filter | None = None,
327
+ as_dict: bool = False,
328
+ ) -> dict[MetricType, list]:
329
+ """
330
+ Computes all available metrics.
331
+
332
+ Parameters
333
+ ----------
334
+ filter_ : Filter, optional
335
+ An optional filter object.
336
+ as_dict : bool, default=False
337
+ An option to return metrics as dictionaries.
338
+
339
+ Returns
340
+ -------
341
+ dict[MetricType, list]
342
+ A dictionary mapping metric type to lists of metrics.
343
+ """
344
+ return self.compute_precision_recall_iou(
345
+ filter_=filter_, as_dict=as_dict
346
+ )
347
+
331
348
 
332
349
  class DataLoader:
333
350
  """
@@ -0,0 +1,278 @@
1
+ from dataclasses import dataclass
2
+ from enum import Enum
3
+
4
+ from valor_lite.schemas import Metric
5
+
6
+
7
+ class MetricType(Enum):
8
+ Precision = "Precision"
9
+ Recall = "Recall"
10
+ Accuracy = "Accuracy"
11
+ F1 = "F1"
12
+ IoU = "IoU"
13
+ mIoU = "mIoU"
14
+ ConfusionMatrix = "ConfusionMatrix"
15
+
16
+ @classmethod
17
+ def base(cls):
18
+ return [
19
+ cls.Precision,
20
+ cls.Recall,
21
+ cls.Accuracy,
22
+ cls.F1,
23
+ cls.IoU,
24
+ cls.mIoU,
25
+ cls.ConfusionMatrix,
26
+ ]
27
+
28
+
29
+ @dataclass
30
+ class _LabelValue:
31
+ value: float
32
+ label: str
33
+
34
+ def to_metric(self) -> Metric:
35
+ return Metric(
36
+ type=type(self).__name__,
37
+ value=self.value,
38
+ parameters={
39
+ "label": self.label,
40
+ },
41
+ )
42
+
43
+ def to_dict(self) -> dict:
44
+ return self.to_metric().to_dict()
45
+
46
+
47
+ class Precision(_LabelValue):
48
+ """
49
+ Precision metric for a specific class label.
50
+
51
+ Precision is calulated using the number of true-positive pixels divided by
52
+ the sum of all true-positive and false-positive pixels.
53
+
54
+ Attributes
55
+ ----------
56
+ value : float
57
+ The computed precision value.
58
+ label : str
59
+ The label for which the precision is calculated.
60
+
61
+ Methods
62
+ -------
63
+ to_metric()
64
+ Converts the instance to a generic `Metric` object.
65
+ to_dict()
66
+ Converts the instance to a dictionary representation.
67
+ """
68
+
69
+ pass
70
+
71
+
72
+ class Recall(_LabelValue):
73
+ """
74
+ Recall metric for a specific class label.
75
+
76
+ Recall is calulated using the number of true-positive pixels divided by
77
+ the sum of all true-positive and false-negative pixels.
78
+
79
+ Attributes
80
+ ----------
81
+ value : float
82
+ The computed recall value.
83
+ label : str
84
+ The label for which the recall is calculated.
85
+
86
+ Methods
87
+ -------
88
+ to_metric()
89
+ Converts the instance to a generic `Metric` object.
90
+ to_dict()
91
+ Converts the instance to a dictionary representation.
92
+ """
93
+
94
+ pass
95
+
96
+
97
+ class F1(_LabelValue):
98
+ """
99
+ F1 score for a specific class label.
100
+
101
+ Attributes
102
+ ----------
103
+ value : float
104
+ The computed F1 score.
105
+ label : str
106
+ The label for which the F1 score is calculated.
107
+
108
+ Methods
109
+ -------
110
+ to_metric()
111
+ Converts the instance to a generic `Metric` object.
112
+ to_dict()
113
+ Converts the instance to a dictionary representation.
114
+ """
115
+
116
+ pass
117
+
118
+
119
+ class IoU(_LabelValue):
120
+ """
121
+ Intersection over Union (IoU) ratio for a specific class label.
122
+
123
+ Attributes
124
+ ----------
125
+ value : float
126
+ The computed IoU ratio.
127
+ label : str
128
+ The label for which the IoU is calculated.
129
+
130
+ Methods
131
+ -------
132
+ to_metric()
133
+ Converts the instance to a generic `Metric` object.
134
+ to_dict()
135
+ Converts the instance to a dictionary representation.
136
+ """
137
+
138
+ pass
139
+
140
+
141
+ @dataclass
142
+ class _Value:
143
+ value: float
144
+
145
+ def to_metric(self) -> Metric:
146
+ return Metric(
147
+ type=type(self).__name__,
148
+ value=self.value,
149
+ parameters={},
150
+ )
151
+
152
+ def to_dict(self) -> dict:
153
+ return self.to_metric().to_dict()
154
+
155
+
156
+ class Accuracy(_Value):
157
+ """
158
+ Accuracy metric computed over all labels.
159
+
160
+ Attributes
161
+ ----------
162
+ value : float
163
+ The accuracy value.
164
+
165
+ Methods
166
+ -------
167
+ to_metric()
168
+ Converts the instance to a generic `Metric` object.
169
+ to_dict()
170
+ Converts the instance to a dictionary representation.
171
+ """
172
+
173
+ pass
174
+
175
+
176
+ class mIoU(_Value):
177
+ """
178
+ Mean Intersection over Union (mIoU) ratio.
179
+
180
+ The mIoU value is computed by averaging IoU over all labels.
181
+
182
+ Attributes
183
+ ----------
184
+ value : float
185
+ The mIoU value.
186
+
187
+ Methods
188
+ -------
189
+ to_metric()
190
+ Converts the instance to a generic `Metric` object.
191
+ to_dict()
192
+ Converts the instance to a dictionary representation.
193
+ """
194
+
195
+ pass
196
+
197
+
198
+ @dataclass
199
+ class ConfusionMatrix:
200
+ """
201
+ The confusion matrix and related metrics for semantic segmentation tasks.
202
+
203
+ This class encapsulates detailed information about the model's performance, including correct
204
+ predictions, misclassifications, hallucinations (false positives), and missing predictions
205
+ (false negatives). It provides counts for each category to facilitate in-depth analysis.
206
+
207
+ Confusion Matrix Format:
208
+ {
209
+ <ground truth label>: {
210
+ <prediction label>: {
211
+ 'iou': <float>,
212
+ },
213
+ },
214
+ }
215
+
216
+ Hallucinations Format:
217
+ {
218
+ <prediction label>: {
219
+ 'iou': <float>,
220
+ },
221
+ }
222
+
223
+ Missing Predictions Format:
224
+ {
225
+ <ground truth label>: {
226
+ 'iou': <float>,
227
+ },
228
+ }
229
+
230
+ Attributes
231
+ ----------
232
+ confusion_matrix : dict
233
+ Nested dictionaries representing the Intersection over Union (IoU) scores for each
234
+ ground truth label and prediction label pair.
235
+ hallucinations : dict
236
+ Dictionary representing the pixel ratios for predicted labels that do not correspond
237
+ to any ground truth labels (false positives).
238
+ missing_predictions : dict
239
+ Dictionary representing the pixel ratios for ground truth labels that were not predicted
240
+ (false negatives).
241
+
242
+ Methods
243
+ -------
244
+ to_metric()
245
+ Converts the instance to a generic `Metric` object.
246
+ to_dict()
247
+ Converts the instance to a dictionary representation.
248
+ """
249
+
250
+ confusion_matrix: dict[
251
+ str, # ground truth label value
252
+ dict[
253
+ str, # prediction label value
254
+ dict[str, float], # iou
255
+ ],
256
+ ]
257
+ hallucinations: dict[
258
+ str, # prediction label value
259
+ dict[str, float], # pixel ratio
260
+ ]
261
+ missing_predictions: dict[
262
+ str, # ground truth label value
263
+ dict[str, float], # pixel ratio
264
+ ]
265
+
266
+ def to_metric(self) -> Metric:
267
+ return Metric(
268
+ type=type(self).__name__,
269
+ value={
270
+ "confusion_matrix": self.confusion_matrix,
271
+ "hallucinations": self.hallucinations,
272
+ "missing_predictions": self.missing_predictions,
273
+ },
274
+ parameters={},
275
+ )
276
+
277
+ def to_dict(self) -> dict:
278
+ return self.to_metric().to_dict()
File without changes
@@ -0,0 +1,179 @@
1
+ Metadata-Version: 2.1
2
+ Name: valor-lite
3
+ Version: 0.33.9
4
+ Summary: Compute valor metrics locally.
5
+ License: MIT License
6
+
7
+ Copyright (c) 2023 Striveworks
8
+
9
+ Permission is hereby granted, free of charge, to any person obtaining a copy
10
+ of this software and associated documentation files (the "Software"), to deal
11
+ in the Software without restriction, including without limitation the rights
12
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13
+ copies of the Software, and to permit persons to whom the Software is
14
+ furnished to do so, subject to the following conditions:
15
+
16
+ The above copyright notice and this permission notice shall be included in all
17
+ copies or substantial portions of the Software.
18
+
19
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25
+ SOFTWARE.
26
+
27
+ Project-URL: homepage, https://www.striveworks.com
28
+ Requires-Python: >=3.10
29
+ Description-Content-Type: text/markdown
30
+ License-File: LICENSE
31
+ Requires-Dist: Pillow >=9.1.0
32
+ Requires-Dist: tqdm
33
+ Requires-Dist: requests
34
+ Requires-Dist: numpy
35
+ Requires-Dist: shapely
36
+ Requires-Dist: importlib-metadata ; python_version < "3.8"
37
+ Provides-Extra: test
38
+ Requires-Dist: pytest ; extra == 'test'
39
+ Requires-Dist: coverage ; extra == 'test'
40
+
41
+ # valor-lite: Fast, local machine learning evaluation.
42
+
43
+ valor-lite is a lightweight, numpy-based library designed for fast and seamless evaluation of machine learning models. It is optimized for environments where quick, responsive evaluations are essential, whether as part of a larger service or embedded within user-facing tools.
44
+
45
+ valor-lite is maintained by Striveworks, a cutting-edge MLOps company based in Austin, Texas. If you'd like to learn more or have questions, we invite you to connect with us on [Slack](https://striveworks-public.slack.com/join/shared_invite/zt-1a0jx768y-2J1fffN~b4fXYM8GecvOhA#/shared-invite/email) or explore our [GitHub repository](https://github.com/striveworks/valor).
46
+
47
+ For additional details, be sure to check out our user [documentation](https://striveworks.github.io/valor/). We're excited to support you in making the most of Valor!
48
+
49
+ ## Usage
50
+
51
+ ### Classification
52
+
53
+ ```python
54
+ from valor_lite.classification import DataLoader, Classification, MetricType
55
+
56
+ classifications = [
57
+ Classification(
58
+ uid="uid0",
59
+ groundtruth="dog",
60
+ predictions=["dog", "cat", "bird"],
61
+ scores=[0.75, 0.2, 0.05],
62
+ ),
63
+ Classification(
64
+ uid="uid1",
65
+ groundtruth="cat",
66
+ predictions=["dog", "cat", "bird"],
67
+ scores=[0.41, 0.39, 0.1],
68
+ ),
69
+ ]
70
+
71
+ loader = DataLoader()
72
+ loader.add_data(classifications)
73
+ evaluator = loader.finalize()
74
+
75
+ metrics = evaluator.evaluate()
76
+
77
+ assert metrics[MetricType.Precision][0].to_dict() == {
78
+ 'type': 'Precision',
79
+ 'value': [0.5],
80
+ 'parameters': {
81
+ 'score_thresholds': [0.0],
82
+ 'hardmax': True,
83
+ 'label': 'dog'
84
+ }
85
+ }
86
+ ```
87
+
88
+ ### Object Detection
89
+
90
+ ```python
91
+ from valor_lite.object_detection import DataLoader, Detection, BoundingBox, MetricType
92
+
93
+ detections = [
94
+ Detection(
95
+ uid="uid0",
96
+ groundtruths=[
97
+ BoundingBox(
98
+ xmin=0, xmax=10,
99
+ ymin=0, ymax=10,
100
+ labels=["dog"]
101
+ ),
102
+ BoundingBox(
103
+ xmin=20, xmax=30,
104
+ ymin=20, ymax=30,
105
+ labels=["cat"]
106
+ ),
107
+ ],
108
+ predictions=[
109
+ BoundingBox(
110
+ xmin=1, xmax=11,
111
+ ymin=1, ymax=11,
112
+ labels=["dog", "cat", "bird"],
113
+ scores=[0.85, 0.1, 0.05]
114
+ ),
115
+ BoundingBox(
116
+ xmin=21, xmax=31,
117
+ ymin=21, ymax=31,
118
+ labels=["dog", "cat", "bird"],
119
+ scores=[0.34, 0.33, 0.33]
120
+ ),
121
+ ],
122
+ ),
123
+ ]
124
+
125
+ loader = DataLoader()
126
+ loader.add_bounding_boxes(detections)
127
+ evaluator = loader.finalize()
128
+
129
+ metrics = evaluator.evaluate()
130
+
131
+ assert metrics[MetricType.Precision][0].to_dict() == {
132
+ 'type': 'Precision',
133
+ 'value': 0.5,
134
+ 'parameters': {
135
+ 'iou_threshold': 0.5,
136
+ 'score_threshold': 0.5,
137
+ 'label': 'dog'
138
+ }
139
+ }
140
+ ```
141
+
142
+ ### Semantic Segmentation
143
+
144
+ ```python
145
+ import numpy as np
146
+ from valor_lite.semantic_segmentation import DataLoader, Segmentation, Bitmask, MetricType
147
+
148
+ segmentations = [
149
+ Segmentation(
150
+ uid="uid0",
151
+ groundtruths=[
152
+ Bitmask(
153
+ mask=np.random.randint(2, size=(10,10), dtype=np.bool_),
154
+ label="sky",
155
+ ),
156
+ Bitmask(
157
+ mask=np.random.randint(2, size=(10,10), dtype=np.bool_),
158
+ label="ground",
159
+ )
160
+ ],
161
+ predictions=[
162
+ Bitmask(
163
+ mask=np.random.randint(2, size=(10,10), dtype=np.bool_),
164
+ label="sky",
165
+ ),
166
+ Bitmask(
167
+ mask=np.random.randint(2, size=(10,10), dtype=np.bool_),
168
+ label="ground",
169
+ )
170
+ ]
171
+ ),
172
+ ]
173
+
174
+ loader = DataLoader()
175
+ loader.add_data(segmentations)
176
+ evaluator = loader.finalize()
177
+
178
+ print(metrics[MetricType.Precision][0])
179
+ ```
@@ -0,0 +1,24 @@
1
+ valor_lite/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
2
+ valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ valor_lite/schemas.py,sha256=r4cC10w1xYsA785KmGE4ePeOX3wzEs846vT7QAiVg_I,293
4
+ valor_lite/classification/__init__.py,sha256=2wmmziIzUATm7MbmAcPNLXrEX5l4oeD7XBwPd9bWM3Q,506
5
+ valor_lite/classification/annotation.py,sha256=0aUOvcwBAZgiNOJuyh-pXyNTG7vP7r8CUfnU3OmpUwQ,1113
6
+ valor_lite/classification/computation.py,sha256=qd9K7CcSGmMm_7shfX47_ZIuB-uE2LLiLMZSS_3NJTk,12093
7
+ valor_lite/classification/manager.py,sha256=7NKk4syQHH5hBEUDWTD0zIFkJSNdOMzJn8a8GzfBnDc,23205
8
+ valor_lite/classification/metric.py,sha256=m9_zD82YGl0QhuMql943YNKg67NZ6bsrR8ggs6_JZms,11728
9
+ valor_lite/object_detection/__init__.py,sha256=PiKfemo8FkZRzBhPSjhil8ahGURLy0Vk_iV25CB4UBU,1139
10
+ valor_lite/object_detection/annotation.py,sha256=o6VfiRobiB0ljqsNBLAYMXgi32RSIR7uTA-dgxq6zBI,8248
11
+ valor_lite/object_detection/computation.py,sha256=7rOfVlYDadXcJ1_S0FJRF3IPigcsR7guk_0rXeIdAOE,26919
12
+ valor_lite/object_detection/manager.py,sha256=k8VRqmlfWGKj1IuijbG49jXkMelE8v59pTQTCwkSMKk,38833
13
+ valor_lite/object_detection/metric.py,sha256=nWSqIQSBQrpl3Stz_xe2-AYoo2nrATeMuFVFmREjSNA,23833
14
+ valor_lite/semantic_segmentation/__init__.py,sha256=IdarTHKUuUMDvMBmInQu12Mm_NMCbql6Hf0nL5b56Ak,424
15
+ valor_lite/semantic_segmentation/annotation.py,sha256=CujYFdHS3fgr4Y7mEDs_u1XBmbPJzNU2CdqvjCT_d_A,2938
16
+ valor_lite/semantic_segmentation/computation.py,sha256=iJkEmTNmw9HwQCxSnpJkQsAdVcFriGhhu_WMks6D7tU,5122
17
+ valor_lite/semantic_segmentation/manager.py,sha256=aJk6edWZWKqrzl6hVmEUSZVYhHLuyihxWgAIXsCXkZ0,17361
18
+ valor_lite/semantic_segmentation/metric.py,sha256=Y8M3z92SaABEe9TwBUN37TFsh9DR5WoIxO-TfXVwz8I,6289
19
+ valor_lite/text_generation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
+ valor_lite-0.33.9.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
21
+ valor_lite-0.33.9.dist-info/METADATA,sha256=dXS7Nt_WHKBaIARWZ3Ek27i26-pWyatewb3eFEnYor8,5631
22
+ valor_lite-0.33.9.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
23
+ valor_lite-0.33.9.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
24
+ valor_lite-0.33.9.dist-info/RECORD,,