valor-lite 0.34.0__tar.gz → 0.34.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. {valor_lite-0.34.0 → valor_lite-0.34.2}/PKG-INFO +13 -10
  2. {valor_lite-0.34.0 → valor_lite-0.34.2}/pyproject.toml +4 -5
  3. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/classification/computation.py +5 -3
  4. valor_lite-0.34.2/valor_lite/classification/numpy_compatibility.py +13 -0
  5. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/object_detection/computation.py +2 -2
  6. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/object_detection/manager.py +9 -9
  7. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/text_generation/computation.py +5 -4
  8. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite.egg-info/PKG-INFO +13 -10
  9. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite.egg-info/SOURCES.txt +1 -0
  10. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite.egg-info/requires.txt +9 -4
  11. {valor_lite-0.34.0 → valor_lite-0.34.2}/README.md +0 -0
  12. {valor_lite-0.34.0 → valor_lite-0.34.2}/setup.cfg +0 -0
  13. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/LICENSE +0 -0
  14. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/__init__.py +0 -0
  15. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/classification/__init__.py +0 -0
  16. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/classification/annotation.py +0 -0
  17. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/classification/manager.py +0 -0
  18. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/classification/metric.py +0 -0
  19. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/classification/utilities.py +0 -0
  20. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/object_detection/__init__.py +0 -0
  21. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/object_detection/annotation.py +0 -0
  22. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/object_detection/metric.py +0 -0
  23. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/object_detection/utilities.py +0 -0
  24. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/profiling.py +0 -0
  25. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/schemas.py +0 -0
  26. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/semantic_segmentation/__init__.py +0 -0
  27. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/semantic_segmentation/annotation.py +0 -0
  28. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/semantic_segmentation/benchmark.py +0 -0
  29. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/semantic_segmentation/computation.py +0 -0
  30. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/semantic_segmentation/manager.py +0 -0
  31. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/semantic_segmentation/metric.py +0 -0
  32. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/semantic_segmentation/utilities.py +0 -0
  33. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/text_generation/__init__.py +0 -0
  34. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/text_generation/annotation.py +0 -0
  35. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/text_generation/llm/__init__.py +0 -0
  36. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/text_generation/llm/exceptions.py +0 -0
  37. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/text_generation/llm/generation.py +0 -0
  38. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/text_generation/llm/instructions.py +0 -0
  39. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/text_generation/llm/integrations.py +0 -0
  40. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/text_generation/llm/utilities.py +0 -0
  41. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/text_generation/llm/validators.py +0 -0
  42. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/text_generation/manager.py +0 -0
  43. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite/text_generation/metric.py +0 -0
  44. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite.egg-info/dependency_links.txt +0 -0
  45. {valor_lite-0.34.0 → valor_lite-0.34.2}/valor_lite.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: valor-lite
3
- Version: 0.34.0
3
+ Version: 0.34.2
4
4
  Summary: Evaluate machine learning models.
5
5
  Project-URL: homepage, https://www.striveworks.com
6
6
  Requires-Python: >=3.10
@@ -8,17 +8,14 @@ Description-Content-Type: text/markdown
8
8
  Requires-Dist: numpy
9
9
  Requires-Dist: tqdm
10
10
  Requires-Dist: shapely
11
- Requires-Dist: evaluate
12
- Requires-Dist: nltk
13
- Requires-Dist: rouge_score
11
+ Provides-Extra: nlp
12
+ Requires-Dist: evaluate; extra == "nlp"
13
+ Requires-Dist: nltk; extra == "nlp"
14
+ Requires-Dist: rouge_score; extra == "nlp"
14
15
  Provides-Extra: mistral
15
16
  Requires-Dist: mistralai>=1.0; extra == "mistral"
16
17
  Provides-Extra: openai
17
18
  Requires-Dist: openai; extra == "openai"
18
- Provides-Extra: test
19
- Requires-Dist: pytest; extra == "test"
20
- Requires-Dist: coverage; extra == "test"
21
- Requires-Dist: pre-commit; extra == "test"
22
19
  Provides-Extra: docs
23
20
  Requires-Dist: mkdocs; extra == "docs"
24
21
  Requires-Dist: mkdocs-material; extra == "docs"
@@ -26,8 +23,14 @@ Requires-Dist: mkdocstrings; extra == "docs"
26
23
  Requires-Dist: mkdocstrings-python; extra == "docs"
27
24
  Requires-Dist: mkdocs-include-dir-to-nav; extra == "docs"
28
25
  Requires-Dist: mkdocs-swagger-ui-tag; extra == "docs"
26
+ Provides-Extra: test
27
+ Requires-Dist: pytest; extra == "test"
28
+ Requires-Dist: coverage; extra == "test"
29
+ Requires-Dist: pre-commit; extra == "test"
30
+ Provides-Extra: benchmark
31
+ Requires-Dist: requests; extra == "benchmark"
29
32
  Provides-Extra: dev
30
- Requires-Dist: valor-lite[docs,mistral,openai,test]; extra == "dev"
33
+ Requires-Dist: valor-lite[benchmark,docs,mistral,nlp,openai,test]; extra == "dev"
31
34
 
32
35
  # valor-lite: Fast, local machine learning evaluation.
33
36
 
@@ -9,9 +9,6 @@ dependencies = [
9
9
  "numpy",
10
10
  "tqdm",
11
11
  "shapely",
12
- "evaluate",
13
- "nltk",
14
- "rouge_score",
15
12
  ]
16
13
 
17
14
  [project.urls]
@@ -22,9 +19,9 @@ requires = ["setuptools>=61.0", "setuptools_scm[toml]>=6.2"]
22
19
  build-backend = "setuptools.build_meta"
23
20
 
24
21
  [project.optional-dependencies]
22
+ nlp = ["evaluate", "nltk", "rouge_score"]
25
23
  mistral = ["mistralai >= 1.0"]
26
24
  openai = ["openai"]
27
- test = ["pytest", "coverage", "pre-commit"]
28
25
  docs = [
29
26
  "mkdocs",
30
27
  "mkdocs-material",
@@ -33,7 +30,9 @@ docs = [
33
30
  "mkdocs-include-dir-to-nav",
34
31
  "mkdocs-swagger-ui-tag",
35
32
  ]
36
- dev = ["valor-lite[openai, mistral, test, docs]"]
33
+ test = ["pytest", "coverage", "pre-commit"]
34
+ benchmark = ["requests"]
35
+ dev = ["valor-lite[nlp, openai, mistral, benchmark, test, docs]"]
37
36
 
38
37
  [tool.black]
39
38
  line-length = 79
@@ -1,6 +1,8 @@
1
1
  import numpy as np
2
2
  from numpy.typing import NDArray
3
3
 
4
+ import valor_lite.classification.numpy_compatibility as npc
5
+
4
6
 
5
7
  def _compute_rocauc(
6
8
  data: NDArray[np.float64],
@@ -9,7 +11,7 @@ def _compute_rocauc(
9
11
  n_labels: int,
10
12
  mask_matching_labels: NDArray[np.bool_],
11
13
  pd_labels: NDArray[np.int32],
12
- ):
14
+ ) -> tuple[NDArray[np.float64], float]:
13
15
  """
14
16
  Compute ROCAUC and mean ROCAUC.
15
17
  """
@@ -56,12 +58,12 @@ def _compute_rocauc(
56
58
  np.maximum.accumulate(tpr, axis=1, out=tpr)
57
59
 
58
60
  # compute rocauc
59
- rocauc = np.trapz(x=fpr, y=tpr, axis=1) # type: ignore - numpy will be switching to `trapezoid` in the future.
61
+ rocauc = npc.trapezoid(x=fpr, y=tpr, axis=1)
60
62
 
61
63
  # compute mean rocauc
62
64
  mean_rocauc = rocauc.mean()
63
65
 
64
- return rocauc, mean_rocauc
66
+ return rocauc, mean_rocauc # type: ignore[reportReturnType]
65
67
 
66
68
 
67
69
  def compute_precision_recall_rocauc(
@@ -0,0 +1,13 @@
1
+ import numpy as np
2
+ from numpy.typing import NDArray
3
+
4
+ try:
5
+ _numpy_trapezoid = np.trapezoid # numpy v2
6
+ except AttributeError:
7
+ _numpy_trapezoid = np.trapz # numpy v1
8
+
9
+
10
+ def trapezoid(
11
+ x: NDArray[np.float64], y: NDArray[np.float64], axis: int
12
+ ) -> NDArray[np.float64]:
13
+ return _numpy_trapezoid(x=x, y=y, axis=axis) # type: ignore - NumPy compatibility
@@ -560,12 +560,12 @@ def compute_precion_recall(
560
560
  )
561
561
 
562
562
  return (
563
- ap_results,
563
+ ap_results, # type: ignore[reportReturnType]
564
564
  ar_results,
565
565
  accuracy,
566
566
  counts,
567
567
  pr_curve,
568
- ) # type: ignore[reportReturnType]
568
+ )
569
569
 
570
570
 
571
571
  def _count_with_examples(
@@ -195,8 +195,8 @@ class Evaluator:
195
195
 
196
196
  def compute_precision_recall(
197
197
  self,
198
- iou_thresholds: list[float] = [0.5, 0.75, 0.9],
199
- score_thresholds: list[float] = [0.5],
198
+ iou_thresholds: list[float],
199
+ score_thresholds: list[float],
200
200
  filter_: Filter | None = None,
201
201
  ) -> dict[MetricType, list[Metric]]:
202
202
  """
@@ -240,9 +240,9 @@ class Evaluator:
240
240
 
241
241
  def compute_confusion_matrix(
242
242
  self,
243
- iou_thresholds: list[float] = [0.5, 0.75, 0.9],
244
- score_thresholds: list[float] = [0.5],
245
- number_of_examples: int = 0,
243
+ iou_thresholds: list[float],
244
+ score_thresholds: list[float],
245
+ number_of_examples: int,
246
246
  filter_: Filter | None = None,
247
247
  ) -> list[Metric]:
248
248
  """
@@ -254,7 +254,7 @@ class Evaluator:
254
254
  A list of IOU thresholds to compute metrics over.
255
255
  score_thresholds : list[float]
256
256
  A list of score thresholds to compute metrics over.
257
- number_of_examples : int, default=0
257
+ number_of_examples : int
258
258
  Maximum number of annotation examples to return in ConfusionMatrix.
259
259
  filter_ : Filter, optional
260
260
  An optional filter object.
@@ -295,7 +295,7 @@ class Evaluator:
295
295
 
296
296
  def evaluate(
297
297
  self,
298
- iou_thresholds: list[float] = [0.5, 0.75, 0.9],
298
+ iou_thresholds: list[float] = [0.1, 0.5, 0.75],
299
299
  score_thresholds: list[float] = [0.5],
300
300
  number_of_examples: int = 0,
301
301
  filter_: Filter | None = None,
@@ -305,9 +305,9 @@ class Evaluator:
305
305
 
306
306
  Parameters
307
307
  ----------
308
- iou_thresholds : list[float]
308
+ iou_thresholds : list[float], default=[0.1, 0.5, 0.75]
309
309
  A list of IOU thresholds to compute metrics over.
310
- score_thresholds : list[float]
310
+ score_thresholds : list[float], default=[0.5]
311
311
  A list of score thresholds to compute metrics over.
312
312
  number_of_examples : int, default=0
313
313
  Maximum number of annotation examples to return in ConfusionMatrix.
@@ -1,7 +1,3 @@
1
- import evaluate
2
- from nltk.tokenize import RegexpTokenizer
3
- from nltk.translate import bleu_score
4
-
5
1
  from valor_lite.text_generation.llm.generation import (
6
2
  generate_answer_correctness_verdicts,
7
3
  generate_answer_relevance_verdicts,
@@ -551,6 +547,8 @@ def calculate_rouge_scores(
551
547
  use_stemmer: bool, default=False
552
548
  If True, uses Porter stemmer to strip word suffixes. Defaults to False.
553
549
  """
550
+ import evaluate
551
+
554
552
  rouge = evaluate.load("rouge")
555
553
 
556
554
  metrics = rouge.compute(
@@ -589,6 +587,9 @@ def calculate_sentence_bleu(
589
587
  higher/lower order ngrams, use customized weights. Example: when accounting
590
588
  for up to 5-grams with uniform weights (this is called BLEU-5) use [1/5]*5
591
589
  """
590
+ from nltk.tokenize import RegexpTokenizer
591
+ from nltk.translate import bleu_score
592
+
592
593
  if len(weights) == 0:
593
594
  raise ValueError("At least one weight should be defined.")
594
595
 
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: valor-lite
3
- Version: 0.34.0
3
+ Version: 0.34.2
4
4
  Summary: Evaluate machine learning models.
5
5
  Project-URL: homepage, https://www.striveworks.com
6
6
  Requires-Python: >=3.10
@@ -8,17 +8,14 @@ Description-Content-Type: text/markdown
8
8
  Requires-Dist: numpy
9
9
  Requires-Dist: tqdm
10
10
  Requires-Dist: shapely
11
- Requires-Dist: evaluate
12
- Requires-Dist: nltk
13
- Requires-Dist: rouge_score
11
+ Provides-Extra: nlp
12
+ Requires-Dist: evaluate; extra == "nlp"
13
+ Requires-Dist: nltk; extra == "nlp"
14
+ Requires-Dist: rouge_score; extra == "nlp"
14
15
  Provides-Extra: mistral
15
16
  Requires-Dist: mistralai>=1.0; extra == "mistral"
16
17
  Provides-Extra: openai
17
18
  Requires-Dist: openai; extra == "openai"
18
- Provides-Extra: test
19
- Requires-Dist: pytest; extra == "test"
20
- Requires-Dist: coverage; extra == "test"
21
- Requires-Dist: pre-commit; extra == "test"
22
19
  Provides-Extra: docs
23
20
  Requires-Dist: mkdocs; extra == "docs"
24
21
  Requires-Dist: mkdocs-material; extra == "docs"
@@ -26,8 +23,14 @@ Requires-Dist: mkdocstrings; extra == "docs"
26
23
  Requires-Dist: mkdocstrings-python; extra == "docs"
27
24
  Requires-Dist: mkdocs-include-dir-to-nav; extra == "docs"
28
25
  Requires-Dist: mkdocs-swagger-ui-tag; extra == "docs"
26
+ Provides-Extra: test
27
+ Requires-Dist: pytest; extra == "test"
28
+ Requires-Dist: coverage; extra == "test"
29
+ Requires-Dist: pre-commit; extra == "test"
30
+ Provides-Extra: benchmark
31
+ Requires-Dist: requests; extra == "benchmark"
29
32
  Provides-Extra: dev
30
- Requires-Dist: valor-lite[docs,mistral,openai,test]; extra == "dev"
33
+ Requires-Dist: valor-lite[benchmark,docs,mistral,nlp,openai,test]; extra == "dev"
31
34
 
32
35
  # valor-lite: Fast, local machine learning evaluation.
33
36
 
@@ -14,6 +14,7 @@ valor_lite/classification/annotation.py
14
14
  valor_lite/classification/computation.py
15
15
  valor_lite/classification/manager.py
16
16
  valor_lite/classification/metric.py
17
+ valor_lite/classification/numpy_compatibility.py
17
18
  valor_lite/classification/utilities.py
18
19
  valor_lite/object_detection/__init__.py
19
20
  valor_lite/object_detection/annotation.py
@@ -1,12 +1,12 @@
1
1
  numpy
2
2
  tqdm
3
3
  shapely
4
- evaluate
5
- nltk
6
- rouge_score
4
+
5
+ [benchmark]
6
+ requests
7
7
 
8
8
  [dev]
9
- valor-lite[docs,mistral,openai,test]
9
+ valor-lite[benchmark,docs,mistral,nlp,openai,test]
10
10
 
11
11
  [docs]
12
12
  mkdocs
@@ -19,6 +19,11 @@ mkdocs-swagger-ui-tag
19
19
  [mistral]
20
20
  mistralai>=1.0
21
21
 
22
+ [nlp]
23
+ evaluate
24
+ nltk
25
+ rouge_score
26
+
22
27
  [openai]
23
28
  openai
24
29
 
File without changes
File without changes