valor-lite 0.34.0__py3-none-any.whl → 0.34.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of valor-lite might be problematic. Click here for more details.
- valor_lite/classification/computation.py +5 -3
- valor_lite/classification/numpy_compatibility.py +13 -0
- valor_lite/object_detection/computation.py +2 -2
- valor_lite/object_detection/manager.py +9 -9
- valor_lite/text_generation/computation.py +5 -4
- {valor_lite-0.34.0.dist-info → valor_lite-0.34.2.dist-info}/METADATA +13 -10
- {valor_lite-0.34.0.dist-info → valor_lite-0.34.2.dist-info}/RECORD +9 -8
- {valor_lite-0.34.0.dist-info → valor_lite-0.34.2.dist-info}/WHEEL +1 -1
- {valor_lite-0.34.0.dist-info → valor_lite-0.34.2.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import numpy as np
|
|
2
2
|
from numpy.typing import NDArray
|
|
3
3
|
|
|
4
|
+
import valor_lite.classification.numpy_compatibility as npc
|
|
5
|
+
|
|
4
6
|
|
|
5
7
|
def _compute_rocauc(
|
|
6
8
|
data: NDArray[np.float64],
|
|
@@ -9,7 +11,7 @@ def _compute_rocauc(
|
|
|
9
11
|
n_labels: int,
|
|
10
12
|
mask_matching_labels: NDArray[np.bool_],
|
|
11
13
|
pd_labels: NDArray[np.int32],
|
|
12
|
-
):
|
|
14
|
+
) -> tuple[NDArray[np.float64], float]:
|
|
13
15
|
"""
|
|
14
16
|
Compute ROCAUC and mean ROCAUC.
|
|
15
17
|
"""
|
|
@@ -56,12 +58,12 @@ def _compute_rocauc(
|
|
|
56
58
|
np.maximum.accumulate(tpr, axis=1, out=tpr)
|
|
57
59
|
|
|
58
60
|
# compute rocauc
|
|
59
|
-
rocauc =
|
|
61
|
+
rocauc = npc.trapezoid(x=fpr, y=tpr, axis=1)
|
|
60
62
|
|
|
61
63
|
# compute mean rocauc
|
|
62
64
|
mean_rocauc = rocauc.mean()
|
|
63
65
|
|
|
64
|
-
return rocauc, mean_rocauc
|
|
66
|
+
return rocauc, mean_rocauc # type: ignore[reportReturnType]
|
|
65
67
|
|
|
66
68
|
|
|
67
69
|
def compute_precision_recall_rocauc(
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from numpy.typing import NDArray
|
|
3
|
+
|
|
4
|
+
try:
|
|
5
|
+
_numpy_trapezoid = np.trapezoid # numpy v2
|
|
6
|
+
except AttributeError:
|
|
7
|
+
_numpy_trapezoid = np.trapz # numpy v1
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def trapezoid(
|
|
11
|
+
x: NDArray[np.float64], y: NDArray[np.float64], axis: int
|
|
12
|
+
) -> NDArray[np.float64]:
|
|
13
|
+
return _numpy_trapezoid(x=x, y=y, axis=axis) # type: ignore - NumPy compatibility
|
|
@@ -560,12 +560,12 @@ def compute_precion_recall(
|
|
|
560
560
|
)
|
|
561
561
|
|
|
562
562
|
return (
|
|
563
|
-
ap_results,
|
|
563
|
+
ap_results, # type: ignore[reportReturnType]
|
|
564
564
|
ar_results,
|
|
565
565
|
accuracy,
|
|
566
566
|
counts,
|
|
567
567
|
pr_curve,
|
|
568
|
-
)
|
|
568
|
+
)
|
|
569
569
|
|
|
570
570
|
|
|
571
571
|
def _count_with_examples(
|
|
@@ -195,8 +195,8 @@ class Evaluator:
|
|
|
195
195
|
|
|
196
196
|
def compute_precision_recall(
|
|
197
197
|
self,
|
|
198
|
-
iou_thresholds: list[float]
|
|
199
|
-
score_thresholds: list[float]
|
|
198
|
+
iou_thresholds: list[float],
|
|
199
|
+
score_thresholds: list[float],
|
|
200
200
|
filter_: Filter | None = None,
|
|
201
201
|
) -> dict[MetricType, list[Metric]]:
|
|
202
202
|
"""
|
|
@@ -240,9 +240,9 @@ class Evaluator:
|
|
|
240
240
|
|
|
241
241
|
def compute_confusion_matrix(
|
|
242
242
|
self,
|
|
243
|
-
iou_thresholds: list[float]
|
|
244
|
-
score_thresholds: list[float]
|
|
245
|
-
number_of_examples: int
|
|
243
|
+
iou_thresholds: list[float],
|
|
244
|
+
score_thresholds: list[float],
|
|
245
|
+
number_of_examples: int,
|
|
246
246
|
filter_: Filter | None = None,
|
|
247
247
|
) -> list[Metric]:
|
|
248
248
|
"""
|
|
@@ -254,7 +254,7 @@ class Evaluator:
|
|
|
254
254
|
A list of IOU thresholds to compute metrics over.
|
|
255
255
|
score_thresholds : list[float]
|
|
256
256
|
A list of score thresholds to compute metrics over.
|
|
257
|
-
number_of_examples : int
|
|
257
|
+
number_of_examples : int
|
|
258
258
|
Maximum number of annotation examples to return in ConfusionMatrix.
|
|
259
259
|
filter_ : Filter, optional
|
|
260
260
|
An optional filter object.
|
|
@@ -295,7 +295,7 @@ class Evaluator:
|
|
|
295
295
|
|
|
296
296
|
def evaluate(
|
|
297
297
|
self,
|
|
298
|
-
iou_thresholds: list[float] = [0.
|
|
298
|
+
iou_thresholds: list[float] = [0.1, 0.5, 0.75],
|
|
299
299
|
score_thresholds: list[float] = [0.5],
|
|
300
300
|
number_of_examples: int = 0,
|
|
301
301
|
filter_: Filter | None = None,
|
|
@@ -305,9 +305,9 @@ class Evaluator:
|
|
|
305
305
|
|
|
306
306
|
Parameters
|
|
307
307
|
----------
|
|
308
|
-
iou_thresholds : list[float]
|
|
308
|
+
iou_thresholds : list[float], default=[0.1, 0.5, 0.75]
|
|
309
309
|
A list of IOU thresholds to compute metrics over.
|
|
310
|
-
score_thresholds : list[float]
|
|
310
|
+
score_thresholds : list[float], default=[0.5]
|
|
311
311
|
A list of score thresholds to compute metrics over.
|
|
312
312
|
number_of_examples : int, default=0
|
|
313
313
|
Maximum number of annotation examples to return in ConfusionMatrix.
|
|
@@ -1,7 +1,3 @@
|
|
|
1
|
-
import evaluate
|
|
2
|
-
from nltk.tokenize import RegexpTokenizer
|
|
3
|
-
from nltk.translate import bleu_score
|
|
4
|
-
|
|
5
1
|
from valor_lite.text_generation.llm.generation import (
|
|
6
2
|
generate_answer_correctness_verdicts,
|
|
7
3
|
generate_answer_relevance_verdicts,
|
|
@@ -551,6 +547,8 @@ def calculate_rouge_scores(
|
|
|
551
547
|
use_stemmer: bool, default=False
|
|
552
548
|
If True, uses Porter stemmer to strip word suffixes. Defaults to False.
|
|
553
549
|
"""
|
|
550
|
+
import evaluate
|
|
551
|
+
|
|
554
552
|
rouge = evaluate.load("rouge")
|
|
555
553
|
|
|
556
554
|
metrics = rouge.compute(
|
|
@@ -589,6 +587,9 @@ def calculate_sentence_bleu(
|
|
|
589
587
|
higher/lower order ngrams, use customized weights. Example: when accounting
|
|
590
588
|
for up to 5-grams with uniform weights (this is called BLEU-5) use [1/5]*5
|
|
591
589
|
"""
|
|
590
|
+
from nltk.tokenize import RegexpTokenizer
|
|
591
|
+
from nltk.translate import bleu_score
|
|
592
|
+
|
|
592
593
|
if len(weights) == 0:
|
|
593
594
|
raise ValueError("At least one weight should be defined.")
|
|
594
595
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: valor-lite
|
|
3
|
-
Version: 0.34.
|
|
3
|
+
Version: 0.34.2
|
|
4
4
|
Summary: Evaluate machine learning models.
|
|
5
5
|
Project-URL: homepage, https://www.striveworks.com
|
|
6
6
|
Requires-Python: >=3.10
|
|
@@ -8,17 +8,14 @@ Description-Content-Type: text/markdown
|
|
|
8
8
|
Requires-Dist: numpy
|
|
9
9
|
Requires-Dist: tqdm
|
|
10
10
|
Requires-Dist: shapely
|
|
11
|
-
|
|
12
|
-
Requires-Dist:
|
|
13
|
-
Requires-Dist:
|
|
11
|
+
Provides-Extra: nlp
|
|
12
|
+
Requires-Dist: evaluate; extra == "nlp"
|
|
13
|
+
Requires-Dist: nltk; extra == "nlp"
|
|
14
|
+
Requires-Dist: rouge_score; extra == "nlp"
|
|
14
15
|
Provides-Extra: mistral
|
|
15
16
|
Requires-Dist: mistralai>=1.0; extra == "mistral"
|
|
16
17
|
Provides-Extra: openai
|
|
17
18
|
Requires-Dist: openai; extra == "openai"
|
|
18
|
-
Provides-Extra: test
|
|
19
|
-
Requires-Dist: pytest; extra == "test"
|
|
20
|
-
Requires-Dist: coverage; extra == "test"
|
|
21
|
-
Requires-Dist: pre-commit; extra == "test"
|
|
22
19
|
Provides-Extra: docs
|
|
23
20
|
Requires-Dist: mkdocs; extra == "docs"
|
|
24
21
|
Requires-Dist: mkdocs-material; extra == "docs"
|
|
@@ -26,8 +23,14 @@ Requires-Dist: mkdocstrings; extra == "docs"
|
|
|
26
23
|
Requires-Dist: mkdocstrings-python; extra == "docs"
|
|
27
24
|
Requires-Dist: mkdocs-include-dir-to-nav; extra == "docs"
|
|
28
25
|
Requires-Dist: mkdocs-swagger-ui-tag; extra == "docs"
|
|
26
|
+
Provides-Extra: test
|
|
27
|
+
Requires-Dist: pytest; extra == "test"
|
|
28
|
+
Requires-Dist: coverage; extra == "test"
|
|
29
|
+
Requires-Dist: pre-commit; extra == "test"
|
|
30
|
+
Provides-Extra: benchmark
|
|
31
|
+
Requires-Dist: requests; extra == "benchmark"
|
|
29
32
|
Provides-Extra: dev
|
|
30
|
-
Requires-Dist: valor-lite[docs,mistral,openai,test]; extra == "dev"
|
|
33
|
+
Requires-Dist: valor-lite[benchmark,docs,mistral,nlp,openai,test]; extra == "dev"
|
|
31
34
|
|
|
32
35
|
# valor-lite: Fast, local machine learning evaluation.
|
|
33
36
|
|
|
@@ -4,14 +4,15 @@ valor_lite/profiling.py,sha256=TLIROA1qccFw9NoEkMeQcrvvGGO75c4K5yTIWoCUix8,11746
|
|
|
4
4
|
valor_lite/schemas.py,sha256=pB0MrPx5qFLbwBWDiOUUm-vmXdWvbJLFCBmKgbcbI5g,198
|
|
5
5
|
valor_lite/classification/__init__.py,sha256=8MI8bGwCxYGqRP7KxG7ezhYv4qQ5947XGvvlF8WPM5g,392
|
|
6
6
|
valor_lite/classification/annotation.py,sha256=0aUOvcwBAZgiNOJuyh-pXyNTG7vP7r8CUfnU3OmpUwQ,1113
|
|
7
|
-
valor_lite/classification/computation.py,sha256=
|
|
7
|
+
valor_lite/classification/computation.py,sha256=CyfeDuxupQXnoRL7t3F6UMll03ZXhNRfZSq0s8QrzAc,12256
|
|
8
8
|
valor_lite/classification/manager.py,sha256=cZ6-DKao59QqF0JF_U26tBoydpCElAAH8rRyX_Kc6bc,16618
|
|
9
9
|
valor_lite/classification/metric.py,sha256=_mW3zynmpW8jUIhK2OeX4usdftHgHM9_l7EAbEe2N3w,12288
|
|
10
|
+
valor_lite/classification/numpy_compatibility.py,sha256=roqtTetsm1_HxuaejrthQdydjsRIy-FpXpGb86cLh_E,365
|
|
10
11
|
valor_lite/classification/utilities.py,sha256=eG-Qhd213uf2GXuuqsPxCgBRBFV-z_ADbzneF1kE368,6964
|
|
11
12
|
valor_lite/object_detection/__init__.py,sha256=Ql8rju2q7y0Zd9zFvtBJDRhgQFDm1RSYkTsyH3ZE6pA,648
|
|
12
13
|
valor_lite/object_detection/annotation.py,sha256=x9bsl8b75yvkMByXXiIYI9d9T03olDqtykSvKJc3aFw,7729
|
|
13
|
-
valor_lite/object_detection/computation.py,sha256=
|
|
14
|
-
valor_lite/object_detection/manager.py,sha256=
|
|
14
|
+
valor_lite/object_detection/computation.py,sha256=zfVTl_TDK3rho3282VcruTvBK6DqbxduP7tE7esMFUY,28345
|
|
15
|
+
valor_lite/object_detection/manager.py,sha256=uo9o0gWBQUkTTgwTluhXk0ouVDW8qiyrqTwJD6PJDKE,23043
|
|
15
16
|
valor_lite/object_detection/metric.py,sha256=npK2sxiwCUTKlRlFym1AlZTvP9herf9lakbsBDwljGM,24901
|
|
16
17
|
valor_lite/object_detection/utilities.py,sha256=42RRyP6L3eWtDY_f7qs7f0WTjhcibmUBu2I4yAwupF0,16456
|
|
17
18
|
valor_lite/semantic_segmentation/__init__.py,sha256=BhTUbwbdJa1FdS4ZA3QSIZ8TuJmdGGLGCd5hX6SzKa4,297
|
|
@@ -23,7 +24,7 @@ valor_lite/semantic_segmentation/metric.py,sha256=T9RfPJf4WgqGQTXYvSy08vJG5bjXXJ
|
|
|
23
24
|
valor_lite/semantic_segmentation/utilities.py,sha256=UD0X-iCWMR8Rmw2YaP4HM3lxwhYwo-yNGzF-taAJ8RA,2959
|
|
24
25
|
valor_lite/text_generation/__init__.py,sha256=pGhpWCSZjLM0pPHCtPykAfos55B8ie3mi9EzbNxfj-U,356
|
|
25
26
|
valor_lite/text_generation/annotation.py,sha256=O5aXiwCS4WjA-fqn4ly-O0MsTHoIOmqxqCaAp9IeI3M,1270
|
|
26
|
-
valor_lite/text_generation/computation.py,sha256=
|
|
27
|
+
valor_lite/text_generation/computation.py,sha256=hGDkPfzWY9SDTdozd-nArexJ3ZSNlCIWqHGoD8vO2Cc,18652
|
|
27
28
|
valor_lite/text_generation/manager.py,sha256=C4QwvronGHXmYSkaRmUGy7TN0C0aeyDx9Hb-ClNYXK4,24810
|
|
28
29
|
valor_lite/text_generation/metric.py,sha256=C9gbWejjOJ23JVLecuUhYW5rkx30NUCfRtgsM46uMds,10409
|
|
29
30
|
valor_lite/text_generation/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -33,7 +34,7 @@ valor_lite/text_generation/llm/instructions.py,sha256=fz2onBZZWcl5W8iy7zEWkPGU9N
|
|
|
33
34
|
valor_lite/text_generation/llm/integrations.py,sha256=-rTfdAjq1zH-4ixwYuMQEOQ80pIFzMTe0BYfroVx3Pg,6974
|
|
34
35
|
valor_lite/text_generation/llm/utilities.py,sha256=bjqatGgtVTcl1PrMwiDKTYPGJXKrBrx7PDtzIblGSys,1178
|
|
35
36
|
valor_lite/text_generation/llm/validators.py,sha256=Wzr5RlfF58_2wOU-uTw7C8skan_fYdhy4Gfn0jSJ8HM,2700
|
|
36
|
-
valor_lite-0.34.
|
|
37
|
-
valor_lite-0.34.
|
|
38
|
-
valor_lite-0.34.
|
|
39
|
-
valor_lite-0.34.
|
|
37
|
+
valor_lite-0.34.2.dist-info/METADATA,sha256=hHwCwG9A_jQzo4tTo_LABVpK3eaFvWExPqEgpT1nQLQ,5062
|
|
38
|
+
valor_lite-0.34.2.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
|
39
|
+
valor_lite-0.34.2.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
|
|
40
|
+
valor_lite-0.34.2.dist-info/RECORD,,
|
|
File without changes
|