eval-ai-library 0.2.1__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eval-ai-library might be problematic. Click here for more details.

Files changed (29) hide show
  1. eval_ai_library-0.3.0.dist-info/METADATA +1042 -0
  2. eval_ai_library-0.3.0.dist-info/RECORD +34 -0
  3. eval_lib/__init__.py +19 -6
  4. eval_lib/agent_metrics/knowledge_retention_metric/knowledge_retention.py +8 -3
  5. eval_lib/agent_metrics/role_adherence_metric/role_adherence.py +12 -4
  6. eval_lib/agent_metrics/task_success_metric/task_success_rate.py +23 -23
  7. eval_lib/agent_metrics/tools_correctness_metric/tool_correctness.py +8 -2
  8. eval_lib/datagenerator/datagenerator.py +208 -12
  9. eval_lib/datagenerator/document_loader.py +29 -29
  10. eval_lib/evaluate.py +0 -22
  11. eval_lib/llm_client.py +223 -78
  12. eval_lib/metric_pattern.py +208 -152
  13. eval_lib/metrics/answer_precision_metric/answer_precision.py +8 -3
  14. eval_lib/metrics/answer_relevancy_metric/answer_relevancy.py +7 -2
  15. eval_lib/metrics/bias_metric/bias.py +12 -2
  16. eval_lib/metrics/contextual_precision_metric/contextual_precision.py +9 -4
  17. eval_lib/metrics/contextual_recall_metric/contextual_recall.py +7 -3
  18. eval_lib/metrics/contextual_relevancy_metric/contextual_relevancy.py +8 -2
  19. eval_lib/metrics/custom_metric/custom_eval.py +237 -204
  20. eval_lib/metrics/faithfulness_metric/faithfulness.py +7 -2
  21. eval_lib/metrics/geval/geval.py +8 -2
  22. eval_lib/metrics/restricted_refusal_metric/restricted_refusal.py +7 -3
  23. eval_lib/metrics/toxicity_metric/toxicity.py +8 -2
  24. eval_lib/utils.py +44 -29
  25. eval_ai_library-0.2.1.dist-info/METADATA +0 -753
  26. eval_ai_library-0.2.1.dist-info/RECORD +0 -34
  27. {eval_ai_library-0.2.1.dist-info → eval_ai_library-0.3.0.dist-info}/WHEEL +0 -0
  28. {eval_ai_library-0.2.1.dist-info → eval_ai_library-0.3.0.dist-info}/licenses/LICENSE +0 -0
  29. {eval_ai_library-0.2.1.dist-info → eval_ai_library-0.3.0.dist-info}/top_level.txt +0 -0
eval_lib/utils.py CHANGED
@@ -4,7 +4,7 @@ Utility functions for metrics evaluation
4
4
  import re
5
5
  import json
6
6
  from typing import List
7
- from math import exp
7
+ import math
8
8
 
9
9
 
10
10
  """
@@ -12,47 +12,62 @@ Utility functions for metrics evaluation
12
12
  """
13
13
 
14
14
 
15
+ def _map_temperature_to_p(
16
+ temperature: float,
17
+ t_min: float = 0.1,
18
+ t_max: float = 1.0,
19
+ p_min: float = -8.0,
20
+ p_max: float = 12.25, # chosen so that t=0.5 -> p=1
21
+ ) -> float:
22
+ """
23
+ Map temperature in [t_min, t_max] linearly to power exponent p, with:
24
+ t=0.1 -> p=-8 (very strict)
25
+ t=0.5 -> p=+1 (arithmetic mean)
26
+ t=1.0 -> p=+12.25 (very lenient)
27
+ """
28
+ t = max(t_min, min(t_max, temperature))
29
+ alpha = (t - t_min) / (t_max - t_min) # in [0,1]
30
+ return p_min + alpha * (p_max - p_min)
31
+
32
+
15
33
  def score_agg(
16
34
  scores: List[float],
17
35
  temperature: float = 0.5,
18
- penalty: float = 0.1
36
+ penalty: float = 0.1,
37
+ eps_for_neg_p: float = 1e-9
19
38
  ) -> float:
20
39
  """
21
- Compute a softmax-weighted aggregate of scores with penalty for low-scoring items.
22
-
23
- This function applies softmax weighting (higher scores get more weight) and then
24
- applies a penalty proportional to the number of low-scoring items.
40
+ Aggregate verdict scores with temperature-controlled strictness via power mean.
25
41
 
26
- Args:
27
- scores: List of scores (0.0 to 1.0) to aggregate
28
- temperature: Controls strictness of aggregation
29
- - Lower (0.1-0.3): Strict - high scores dominate
30
- - Medium (0.4-0.6): Balanced - default behavior
31
- - Higher (0.8-1.5): Lenient - closer to arithmetic mean
32
- penalty: Penalty factor for low-scoring items (default 0.1)
33
- - Applied to scores <= 0.4
34
-
35
- Returns:
36
- Aggregated score between 0.0 and 1.0
42
+ - Low temperature (~0.1): strict (p negative) -> close to min
43
+ - Medium temperature (=0.5): balanced (p=1) -> arithmetic mean
44
+ - High temperature (=1.0): lenient (large positive p) -> close to max
37
45
 
38
- Example:
39
- >>> scores = [1.0, 0.9, 0.7, 0.3, 0.0]
40
- >>> score_agg(scores, temperature=0.5)
41
- 0.73
46
+ Applies a penalty for "none" verdicts (0.0) only.
42
47
  """
43
48
  if not scores:
44
49
  return 0.0
45
50
 
46
- # Compute softmax weights
47
- exp_scores = [exp(s / temperature) for s in scores]
48
- total = sum(exp_scores)
49
- softmax_score = sum(s * e / total for s, e in zip(scores, exp_scores))
51
+ p = _map_temperature_to_p(temperature)
52
+
53
+ # For negative p, clamp zeros to a small epsilon to avoid 0**p blowing up
54
+ base = [(s if s > 0.0 else eps_for_neg_p)
55
+ for s in scores] if p < 0 else scores
56
+
57
+ # Power mean: M_p = ( (Σ s_i^p) / n )^(1/p)
58
+ if abs(p) < 1e-12:
59
+ # Limit p -> 0 is geometric mean
60
+ logs = [math.log(s if s > 0 else eps_for_neg_p) for s in base]
61
+ agg = math.exp(sum(logs) / len(logs))
62
+ else:
63
+ mean_pow = sum(s ** p for s in base) / len(base)
64
+ agg = mean_pow ** (1.0 / p)
50
65
 
51
- # Apply penalty if many statements have low scores (≤ 0.4)
52
- irrelevant = sum(1 for s in scores if s <= 0.4)
53
- penalty_factor = max(0.0, 1 - penalty * irrelevant)
66
+ # Apply penalty for "none" verdicts only
67
+ none_count = sum(1 for s in scores if s == 0.0)
68
+ penalty_factor = max(0.0, 1 - penalty * none_count)
54
69
 
55
- return round(softmax_score * penalty_factor, 4)
70
+ return round(agg * penalty_factor, 4)
56
71
 
57
72
 
58
73
  def extract_json_block(text: str) -> str: