azure-ai-evaluation 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (31) hide show
  1. azure/ai/evaluation/_azure/_clients.py +24 -8
  2. azure/ai/evaluation/_azure/_models.py +2 -2
  3. azure/ai/evaluation/_constants.py +18 -0
  4. azure/ai/evaluation/_evaluate/_batch_run/__init__.py +2 -1
  5. azure/ai/evaluation/_evaluate/_eval_run.py +3 -1
  6. azure/ai/evaluation/_evaluate/_evaluate.py +69 -12
  7. azure/ai/evaluation/_evaluate/_utils.py +27 -0
  8. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +46 -25
  9. azure/ai/evaluation/_evaluators/_common/__init__.py +2 -0
  10. azure/ai/evaluation/_evaluators/_common/_base_eval.py +69 -4
  11. azure/ai/evaluation/_evaluators/_common/_base_multi_eval.py +61 -0
  12. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +7 -1
  13. azure/ai/evaluation/_evaluators/_common/_conversation_aggregators.py +49 -0
  14. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +5 -42
  15. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +2 -0
  16. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +2 -0
  17. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +2 -0
  18. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +2 -0
  19. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +61 -68
  20. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +45 -23
  21. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +55 -34
  22. azure/ai/evaluation/_evaluators/_qa/_qa.py +32 -27
  23. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +44 -23
  24. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +41 -81
  25. azure/ai/evaluation/_version.py +1 -1
  26. azure/ai/evaluation/simulator/_simulator.py +21 -13
  27. {azure_ai_evaluation-1.1.0.dist-info → azure_ai_evaluation-1.2.0.dist-info}/METADATA +71 -7
  28. {azure_ai_evaluation-1.1.0.dist-info → azure_ai_evaluation-1.2.0.dist-info}/RECORD +31 -29
  29. {azure_ai_evaluation-1.1.0.dist-info → azure_ai_evaluation-1.2.0.dist-info}/NOTICE.txt +0 -0
  30. {azure_ai_evaluation-1.1.0.dist-info → azure_ai_evaluation-1.2.0.dist-info}/WHEEL +0 -0
  31. {azure_ai_evaluation-1.1.0.dist-info → azure_ai_evaluation-1.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,61 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from concurrent.futures import as_completed
5
+ from typing import TypeVar, Dict, List
6
+
7
+ from promptflow.tracing import ThreadPoolExecutorWithContext as ThreadPoolExecutor
8
+ from typing_extensions import override
9
+
10
+ from azure.ai.evaluation._evaluators._common import EvaluatorBase
11
+
12
+ T = TypeVar("T")
13
+
14
+
15
+ class MultiEvaluatorBase(EvaluatorBase[T]):
16
+ """
17
+ Base class for evaluators that contain and run multiple other evaluators to produce a
18
+ suite of metrics.
19
+
20
+ Child classes still need to implement the __call__ methods, but they shouldn't need a _do_eval.
21
+
22
+ :param evaluators: The list of evaluators to run when this evaluator is called.
23
+ :type evaluators: List[~azure.ai.evaluation._evaluators._common.EvaluatorBase]
24
+ :param kwargs: Additional arguments to pass to the evaluator.
25
+ :type kwargs: Any
26
+ :return: An evaluator that runs multiple other evaluators and combines their results.
27
+ """
28
+
29
+ def __init__(self, evaluators: List[EvaluatorBase[T]], **kwargs):
30
+ super().__init__()
31
+ self._parallel = kwargs.pop("_parallel", True)
32
+ self._evaluators = evaluators
33
+
34
+ @override
35
+ async def _do_eval(self, eval_input: Dict) -> Dict[str, T]:
36
+ """Run each evaluator, possibly in parallel, and combine the results into
37
+ a single large dictionary containing each evaluation. Inputs are passed
38
+ directly to each evaluator without additional processing.
39
+
40
+
41
+ :param eval_input: The input to the evaluation function.
42
+ :type eval_input: Dict
43
+ :return: The evaluation result.
44
+ :rtype: Dict
45
+ """
46
+ results: Dict[str, T] = {}
47
+ if self._parallel:
48
+ with ThreadPoolExecutor() as executor:
49
+ # pylint: disable=no-value-for-parameter
50
+ futures = {executor.submit(evaluator, **eval_input): evaluator for evaluator in self._evaluators}
51
+
52
+ for future in as_completed(futures):
53
+ results.update(future.result())
54
+ else:
55
+ for evaluator in self._evaluators:
56
+ result = evaluator(**eval_input)
57
+ # Ignore is to avoid mypy getting upset over the amount of duck-typing
58
+ # that's going on to shove evaluators around like this.
59
+ results.update(result) # type: ignore[arg-type]
60
+
61
+ return results
@@ -15,6 +15,7 @@ from azure.ai.evaluation._common.rai_service import evaluate_with_rai_service, e
15
15
  from azure.ai.evaluation._common.utils import validate_azure_ai_project
16
16
  from azure.ai.evaluation._exceptions import EvaluationException
17
17
  from azure.ai.evaluation._common.utils import validate_conversation
18
+ from azure.ai.evaluation._constants import _AggregationType
18
19
  from azure.core.credentials import TokenCredential
19
20
 
20
21
  from . import EvaluatorBase
@@ -35,6 +36,10 @@ class RaiServiceEvaluatorBase(EvaluatorBase[T]):
35
36
  aggregated. Per-turn results are still be available in the output via the "evaluation_per_turn" key
36
37
  when this occurs. Default is False, resulting full conversation evaluation and aggregation.
37
38
  :type eval_last_turn: bool
39
+ :param conversation_aggregation_type: The type of aggregation to perform on the per-turn results of a conversation
40
+ to produce a single result.
41
+ Default is ~azure.ai.evaluation._AggregationType.MEAN.
42
+ :type conversation_aggregation_type: ~azure.ai.evaluation._AggregationType
38
43
  """
39
44
 
40
45
  @override
@@ -44,8 +49,9 @@ class RaiServiceEvaluatorBase(EvaluatorBase[T]):
44
49
  azure_ai_project: dict,
45
50
  credential: TokenCredential,
46
51
  eval_last_turn: bool = False,
52
+ conversation_aggregation_type: _AggregationType = _AggregationType.MEAN,
47
53
  ):
48
- super().__init__(eval_last_turn=eval_last_turn)
54
+ super().__init__(eval_last_turn=eval_last_turn, conversation_aggregation_type=conversation_aggregation_type)
49
55
  self._eval_metric = eval_metric
50
56
  self._azure_ai_project = validate_azure_ai_project(azure_ai_project)
51
57
  self._credential = credential
@@ -0,0 +1,49 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Callable, List
6
+ from azure.ai.evaluation._common.math import list_mean
7
+ from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
8
+ from azure.ai.evaluation._constants import _AggregationType
9
+
10
+
11
+ def GetAggregator(aggregation_type: _AggregationType) -> Callable[[List[float]], float]:
12
+ if aggregation_type == _AggregationType.SUM:
13
+ return sum
14
+ if aggregation_type == _AggregationType.MEAN:
15
+ return list_mean
16
+ if aggregation_type == _AggregationType.MAX:
17
+ return max
18
+ if aggregation_type == _AggregationType.MIN:
19
+ return min
20
+ if aggregation_type == _AggregationType.CUSTOM:
21
+ msg = (
22
+ "Cannot 'get' aggregator function associated with custom aggregation enum."
23
+ + " This enum value should only be outputted as an indicator of an injected"
24
+ + " aggregation function, not inputted directly"
25
+ )
26
+ raise EvaluationException(
27
+ message=msg,
28
+ blame=ErrorBlame.UNKNOWN,
29
+ category=ErrorCategory.INVALID_VALUE,
30
+ target=ErrorTarget.EVALUATE,
31
+ )
32
+ raise EvaluationException(
33
+ message=f"Unaccounted for aggregation type: {aggregation_type}",
34
+ blame=ErrorBlame.UNKNOWN,
35
+ category=ErrorCategory.INVALID_VALUE,
36
+ target=ErrorTarget.EVALUATE,
37
+ )
38
+
39
+
40
+ def GetAggregatorType(aggregation_function: Callable) -> _AggregationType:
41
+ if aggregation_function == sum: # pylint: disable=comparison-with-callable
42
+ return _AggregationType.SUM
43
+ if aggregation_function == list_mean: # pylint: disable=comparison-with-callable
44
+ return _AggregationType.MEAN
45
+ if aggregation_function == max: # pylint: disable=comparison-with-callable
46
+ return _AggregationType.MAX
47
+ if aggregation_function == min: # pylint: disable=comparison-with-callable
48
+ return _AggregationType.MIN
49
+ return _AggregationType.CUSTOM
@@ -1,13 +1,11 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
- from concurrent.futures import as_completed
5
- from typing import Callable, Dict, List, Union
4
+ from typing import Dict, List, Union
6
5
 
7
- from promptflow.tracing import ThreadPoolExecutorWithContext as ThreadPoolExecutor
8
6
  from typing_extensions import overload, override
9
7
 
10
- from azure.ai.evaluation._evaluators._common import EvaluatorBase
8
+ from azure.ai.evaluation._evaluators._common import MultiEvaluatorBase
11
9
  from azure.ai.evaluation._model_configurations import Conversation
12
10
  from azure.ai.evaluation._common._experimental import experimental
13
11
 
@@ -18,7 +16,7 @@ from ._violence import ViolenceEvaluator
18
16
 
19
17
 
20
18
  @experimental
21
- class ContentSafetyEvaluator(EvaluatorBase[Union[str, float]]):
19
+ class ContentSafetyEvaluator(MultiEvaluatorBase[Union[str, float]]):
22
20
  """
23
21
  Initialize a content safety evaluator configured to evaluate content safety metrics for QA scenario.
24
22
 
@@ -44,16 +42,14 @@ class ContentSafetyEvaluator(EvaluatorBase[Union[str, float]]):
44
42
  id = "content_safety"
45
43
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
46
44
 
47
- # TODO address 3579092 to re-enabled parallel evals.
48
45
  def __init__(self, credential, azure_ai_project, **kwargs):
49
- super().__init__()
50
- self._parallel = kwargs.pop("_parallel", True)
51
- self._evaluators: List[Callable[..., Dict[str, Union[str, float]]]] = [
46
+ evaluators = [
52
47
  ViolenceEvaluator(credential, azure_ai_project),
53
48
  SexualEvaluator(credential, azure_ai_project),
54
49
  SelfHarmEvaluator(credential, azure_ai_project),
55
50
  HateUnfairnessEvaluator(credential, azure_ai_project),
56
51
  ]
52
+ super().__init__(evaluators=evaluators, **kwargs)
57
53
 
58
54
  @overload
59
55
  def __call__(
@@ -109,36 +105,3 @@ class ContentSafetyEvaluator(EvaluatorBase[Union[str, float]]):
109
105
  :rtype: Union[Dict[str, Union[str, float]], Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]]
110
106
  """
111
107
  return super().__call__(*args, **kwargs)
112
-
113
- @override
114
- async def _do_eval(self, eval_input: Dict) -> Dict[str, Union[str, float]]:
115
- """Perform the evaluation using the Azure AI RAI service.
116
- The exact evaluation performed is determined by the evaluation metric supplied
117
- by the child class initializer.
118
-
119
- :param eval_input: The input to the evaluation function.
120
- :type eval_input: Dict
121
- :return: The evaluation result.
122
- :rtype: Dict
123
- """
124
- query = eval_input.get("query", None)
125
- response = eval_input.get("response", None)
126
- conversation = eval_input.get("conversation", None)
127
- results: Dict[str, Union[str, float]] = {}
128
- # TODO fix this to not explode on empty optional inputs (PF SKD error)
129
- if self._parallel:
130
- with ThreadPoolExecutor() as executor:
131
- # pylint: disable=no-value-for-parameter
132
- futures = {
133
- executor.submit(evaluator, query=query, response=response, conversation=conversation): evaluator
134
- for evaluator in self._evaluators
135
- }
136
-
137
- for future in as_completed(futures):
138
- results.update(future.result())
139
- else:
140
- for evaluator in self._evaluators:
141
- result = evaluator(query=query, response=response, conversation=conversation)
142
- results.update(result)
143
-
144
- return results
@@ -9,6 +9,7 @@ from azure.ai.evaluation._common._experimental import experimental
9
9
  from azure.ai.evaluation._common.constants import EvaluationMetrics
10
10
  from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
11
11
  from azure.ai.evaluation._model_configurations import Conversation
12
+ from azure.ai.evaluation._constants import _AggregationType
12
13
 
13
14
 
14
15
  @experimental
@@ -71,6 +72,7 @@ class HateUnfairnessEvaluator(RaiServiceEvaluatorBase[Union[str, float]]):
71
72
  eval_metric=EvaluationMetrics.HATE_FAIRNESS,
72
73
  azure_ai_project=azure_ai_project,
73
74
  credential=credential,
75
+ conversation_aggregation_type=_AggregationType.MAX,
74
76
  )
75
77
 
76
78
  @overload
@@ -9,6 +9,7 @@ from azure.ai.evaluation._common._experimental import experimental
9
9
  from azure.ai.evaluation._common.constants import EvaluationMetrics
10
10
  from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
11
11
  from azure.ai.evaluation._model_configurations import Conversation
12
+ from azure.ai.evaluation._constants import _AggregationType
12
13
 
13
14
 
14
15
  @experimental
@@ -65,6 +66,7 @@ class SelfHarmEvaluator(RaiServiceEvaluatorBase[Union[str, float]]):
65
66
  eval_metric=EvaluationMetrics.SELF_HARM,
66
67
  azure_ai_project=azure_ai_project,
67
68
  credential=credential,
69
+ conversation_aggregation_type=_AggregationType.MAX,
68
70
  )
69
71
 
70
72
  @overload
@@ -9,6 +9,7 @@ from azure.ai.evaluation._common._experimental import experimental
9
9
  from azure.ai.evaluation._common.constants import EvaluationMetrics
10
10
  from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
11
11
  from azure.ai.evaluation._model_configurations import Conversation
12
+ from azure.ai.evaluation._constants import _AggregationType
12
13
 
13
14
 
14
15
  @experimental
@@ -67,6 +68,7 @@ class SexualEvaluator(RaiServiceEvaluatorBase[Union[str, float]]):
67
68
  eval_metric=EvaluationMetrics.SEXUAL,
68
69
  azure_ai_project=azure_ai_project,
69
70
  credential=credential,
71
+ conversation_aggregation_type=_AggregationType.MAX,
70
72
  )
71
73
 
72
74
  @overload
@@ -9,6 +9,7 @@ from azure.ai.evaluation._common._experimental import experimental
9
9
  from azure.ai.evaluation._common.constants import EvaluationMetrics
10
10
  from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
11
11
  from azure.ai.evaluation._model_configurations import Conversation
12
+ from azure.ai.evaluation._constants import _AggregationType
12
13
 
13
14
 
14
15
  @experimental
@@ -67,6 +68,7 @@ class ViolenceEvaluator(RaiServiceEvaluatorBase[Union[str, float]]):
67
68
  eval_metric=EvaluationMetrics.VIOLENCE,
68
69
  azure_ai_project=azure_ai_project,
69
70
  credential=credential,
71
+ conversation_aggregation_type=_AggregationType.MAX,
70
72
  )
71
73
 
72
74
  @overload
@@ -3,45 +3,44 @@
3
3
  # ---------------------------------------------------------
4
4
 
5
5
  from collections import Counter
6
- from typing import List
6
+ from typing import List, Dict
7
+ from typing_extensions import overload, override
7
8
 
8
- from promptflow._utils.async_utils import async_run_allowing_running_loop
9
+ from azure.ai.evaluation._evaluators._common import EvaluatorBase
9
10
 
10
- from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
11
11
 
12
+ class F1ScoreEvaluator(EvaluatorBase):
13
+ """
14
+ Calculates the F1 score for a given response and ground truth or a multi-turn conversation.
12
15
 
13
- class _AsyncF1ScoreEvaluator:
14
- def __init__(self):
15
- pass
16
+ F1 Scores range from 0 to 1, with 1 being the best possible score.
16
17
 
17
- async def __call__(self, *, response: str, ground_truth: str, **kwargs):
18
- """
19
- Evaluate F1 score.
18
+ The F1-score computes the ratio of the number of shared words between the model generation and
19
+ the ground truth. Ratio is computed over the individual words in the generated response against those in the ground
20
+ truth answer. The number of shared words between the generation and the truth is the basis of the F1 score:
21
+ precision is the ratio of the number of shared words to the total number of words in the generation, and recall
22
+ is the ratio of the number of shared words to the total number of words in the ground truth.
20
23
 
21
- :keyword response: The response to be evaluated.
22
- :paramtype response: str
23
- :keyword ground_truth: The ground truth to be evaluated.
24
- :paramtype ground_truth: str
25
- :return: The F1 score.
26
- :rtype: Dict[str, float]
27
- """
28
- # Validate inputs
29
- if not (response and response.strip() and response != "None") or not (
30
- ground_truth and ground_truth.strip() and ground_truth != "None"
31
- ):
32
- msg = "Both 'response' and 'ground_truth' must be non-empty strings."
33
- raise EvaluationException(
34
- message=msg,
35
- internal_message=msg,
36
- error_category=ErrorCategory.MISSING_FIELD,
37
- error_blame=ErrorBlame.USER_ERROR,
38
- error_target=ErrorTarget.F1_EVALUATOR,
39
- )
24
+ Use the F1 score when you want a single comprehensive metric that combines both recall and precision in your
25
+ model's responses. It provides a balanced evaluation of your model's performance in terms of capturing accurate
26
+ information in the response.
40
27
 
41
- # Run f1 score computation.
42
- f1_result = self._compute_f1_score(response=response, ground_truth=ground_truth)
43
28
 
44
- return {"f1_score": f1_result}
29
+ .. admonition:: Example:
30
+
31
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
32
+ :start-after: [START f1_score_evaluator]
33
+ :end-before: [END f1_score_evaluator]
34
+ :language: python
35
+ :dedent: 8
36
+ :caption: Initialize and call an F1ScoreEvaluator.
37
+ """
38
+
39
+ id = "azureml://registries/azureml/models/F1Score-Evaluator/versions/3"
40
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
41
+
42
+ def __init__(self):
43
+ super().__init__()
45
44
 
46
45
  @classmethod
47
46
  def _compute_f1_score(cls, response: str, ground_truth: str) -> float:
@@ -103,41 +102,24 @@ class _AsyncF1ScoreEvaluator:
103
102
 
104
103
  return f1
105
104
 
105
+ @override
106
+ async def _do_eval(self, eval_input: Dict) -> Dict[str, float]:
107
+ """Produce an f1 score evaluation result.
106
108
 
107
- class F1ScoreEvaluator:
108
- """
109
- Calculates the F1 score for a given response and ground truth or a multi-turn conversation.
110
-
111
- F1 Scores range from 0 to 1, with 1 being the best possible score.
112
-
113
- The F1-score computes the ratio of the number of shared words between the model generation and
114
- the ground truth. Ratio is computed over the individual words in the generated response against those in the ground
115
- truth answer. The number of shared words between the generation and the truth is the basis of the F1 score:
116
- precision is the ratio of the number of shared words to the total number of words in the generation, and recall
117
- is the ratio of the number of shared words to the total number of words in the ground truth.
118
-
119
- Use the F1 score when you want a single comprehensive metric that combines both recall and precision in your
120
- model's responses. It provides a balanced evaluation of your model's performance in terms of capturing accurate
121
- information in the response.
122
-
123
-
124
- .. admonition:: Example:
125
-
126
- .. literalinclude:: ../samples/evaluation_samples_evaluate.py
127
- :start-after: [START f1_score_evaluator]
128
- :end-before: [END f1_score_evaluator]
129
- :language: python
130
- :dedent: 8
131
- :caption: Initialize and call an F1ScoreEvaluator.
132
- """
133
-
134
- id = "azureml://registries/azureml/models/F1Score-Evaluator/versions/3"
135
- """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
109
+ :param eval_input: The input to the evaluation function.
110
+ :type eval_input: Dict
111
+ :return: The evaluation result.
112
+ :rtype: Dict
113
+ """
114
+ ground_truth = eval_input["ground_truth"]
115
+ response = eval_input["response"]
116
+ # Run f1 score computation.
117
+ f1_result = self._compute_f1_score(response=response, ground_truth=ground_truth)
136
118
 
137
- def __init__(self):
138
- self._async_evaluator = _AsyncF1ScoreEvaluator()
119
+ return {"f1_score": f1_result}
139
120
 
140
- def __call__(self, *, response: str, ground_truth: str, **kwargs):
121
+ @overload # type: ignore
122
+ def __call__(self, *, response: str, ground_truth: str) -> Dict[str, float]:
141
123
  """
142
124
  Evaluate F1 score.
143
125
 
@@ -149,9 +131,20 @@ class F1ScoreEvaluator:
149
131
  :rtype: Dict[str, float]
150
132
  """
151
133
 
152
- return async_run_allowing_running_loop(
153
- self._async_evaluator, response=response, ground_truth=ground_truth, **kwargs
154
- )
134
+ @override
135
+ def __call__( # pylint: disable=docstring-missing-param
136
+ self,
137
+ *args,
138
+ **kwargs,
139
+ ):
140
+ """
141
+ Evaluate F1 score.
155
142
 
156
- def _to_async(self):
157
- return self._async_evaluator
143
+ :keyword response: The response to be evaluated.
144
+ :paramtype response: str
145
+ :keyword ground_truth: The ground truth to be evaluated.
146
+ :paramtype ground_truth: str
147
+ :return: The F1 score.
148
+ :rtype: Dict[str, float]
149
+ """
150
+ return super().__call__(*args, **kwargs)
@@ -1,28 +1,16 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
+ from typing import Dict
4
5
  from nltk.translate.gleu_score import sentence_gleu
5
- from promptflow._utils.async_utils import async_run_allowing_running_loop
6
+ from typing_extensions import overload, override
6
7
 
7
8
  from azure.ai.evaluation._common.utils import nltk_tokenize
8
9
 
10
+ from azure.ai.evaluation._evaluators._common import EvaluatorBase
9
11
 
10
- class _AsyncGleuScoreEvaluator:
11
- def __init__(self):
12
- pass
13
-
14
- async def __call__(self, *, ground_truth: str, response: str, **kwargs):
15
- reference_tokens = nltk_tokenize(ground_truth)
16
- hypothesis_tokens = nltk_tokenize(response)
17
-
18
- score = sentence_gleu([reference_tokens], hypothesis_tokens)
19
-
20
- return {
21
- "gleu_score": score,
22
- }
23
12
 
24
-
25
- class GleuScoreEvaluator:
13
+ class GleuScoreEvaluator(EvaluatorBase):
26
14
  """
27
15
  Calculates the GLEU (Google-BLEU) score between a response and the ground truth.
28
16
 
@@ -47,10 +35,32 @@ class GleuScoreEvaluator:
47
35
  id = "azureml://registries/azureml/models/Gleu-Score-Evaluator/versions/3"
48
36
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
49
37
 
38
+ @override
50
39
  def __init__(self):
51
- self._async_evaluator = _AsyncGleuScoreEvaluator()
40
+ super().__init__()
41
+
42
+ @override
43
+ async def _do_eval(self, eval_input: Dict) -> Dict[str, float]:
44
+ """Produce a glue score evaluation result.
45
+
46
+ :param eval_input: The input to the evaluation function.
47
+ :type eval_input: Dict
48
+ :return: The evaluation result.
49
+ :rtype: Dict
50
+ """
51
+ ground_truth = eval_input["ground_truth"]
52
+ response = eval_input["response"]
53
+ reference_tokens = nltk_tokenize(ground_truth)
54
+ hypothesis_tokens = nltk_tokenize(response)
52
55
 
53
- def __call__(self, *, ground_truth: str, response: str, **kwargs):
56
+ score = sentence_gleu([reference_tokens], hypothesis_tokens)
57
+
58
+ return {
59
+ "gleu_score": score,
60
+ }
61
+
62
+ @overload # type: ignore
63
+ def __call__(self, *, ground_truth: str, response: str):
54
64
  """
55
65
  Evaluate the GLEU score between the response and the ground truth.
56
66
 
@@ -61,9 +71,21 @@ class GleuScoreEvaluator:
61
71
  :return: The GLEU score.
62
72
  :rtype: Dict[str, float]
63
73
  """
64
- return async_run_allowing_running_loop(
65
- self._async_evaluator, ground_truth=ground_truth, response=response, **kwargs
66
- )
67
74
 
68
- def _to_async(self):
69
- return self._async_evaluator
75
+ @override
76
+ def __call__( # pylint: disable=docstring-missing-param
77
+ self,
78
+ *args,
79
+ **kwargs,
80
+ ):
81
+ """
82
+ Evaluate the GLEU score between the response and the ground truth.
83
+
84
+ :keyword response: The response to be evaluated.
85
+ :paramtype response: str
86
+ :keyword ground_truth: The ground truth to be compared against.
87
+ :paramtype ground_truth: str
88
+ :return: The GLEU score.
89
+ :rtype: Dict[str, float]
90
+ """
91
+ return super().__call__(*args, **kwargs)
@@ -1,38 +1,16 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
+ from typing import Dict
5
+
4
6
  from nltk.translate.meteor_score import meteor_score
5
- from promptflow._utils.async_utils import async_run_allowing_running_loop
7
+ from typing_extensions import overload, override
6
8
 
7
9
  from azure.ai.evaluation._common.utils import nltk_tokenize, ensure_nltk_data_downloaded
10
+ from azure.ai.evaluation._evaluators._common import EvaluatorBase
8
11
 
9
12
 
10
- class _AsyncMeteorScoreEvaluator:
11
- def __init__(self, alpha: float = 0.9, beta: float = 3.0, gamma: float = 0.5):
12
- self._alpha = alpha
13
- self._beta = beta
14
- self._gamma = gamma
15
-
16
- ensure_nltk_data_downloaded()
17
-
18
- async def __call__(self, *, ground_truth: str, response: str, **kwargs):
19
- reference_tokens = nltk_tokenize(ground_truth)
20
- hypothesis_tokens = nltk_tokenize(response)
21
-
22
- score = meteor_score(
23
- [reference_tokens],
24
- hypothesis_tokens,
25
- alpha=self._alpha,
26
- beta=self._beta,
27
- gamma=self._gamma,
28
- )
29
-
30
- return {
31
- "meteor_score": score,
32
- }
33
-
34
-
35
- class MeteorScoreEvaluator:
13
+ class MeteorScoreEvaluator(EvaluatorBase):
36
14
  """
37
15
  Calculates the METEOR score for a given response and ground truth.
38
16
 
@@ -68,10 +46,41 @@ class MeteorScoreEvaluator:
68
46
  id = "azureml://registries/azureml/models/Meteor-Score-Evaluator/versions/3"
69
47
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
70
48
 
49
+ @override
71
50
  def __init__(self, alpha: float = 0.9, beta: float = 3.0, gamma: float = 0.5):
72
- self._async_evaluator = _AsyncMeteorScoreEvaluator(alpha=alpha, beta=beta, gamma=gamma)
51
+ self._alpha = alpha
52
+ self._beta = beta
53
+ self._gamma = gamma
54
+ ensure_nltk_data_downloaded()
55
+ super().__init__()
73
56
 
74
- def __call__(self, *, ground_truth: str, response: str, **kwargs):
57
+ @override
58
+ async def _do_eval(self, eval_input: Dict) -> Dict[str, float]:
59
+ """Produce a meteor score evaluation result.
60
+
61
+ :param eval_input: The input to the evaluation function.
62
+ :type eval_input: Dict
63
+ :return: The evaluation result.
64
+ :rtype: Dict
65
+ """
66
+ ground_truth = eval_input["ground_truth"]
67
+ response = eval_input["response"]
68
+ reference_tokens = nltk_tokenize(ground_truth)
69
+ hypothesis_tokens = nltk_tokenize(response)
70
+ score = meteor_score(
71
+ [reference_tokens],
72
+ hypothesis_tokens,
73
+ alpha=self._alpha,
74
+ beta=self._beta,
75
+ gamma=self._gamma,
76
+ )
77
+
78
+ return {
79
+ "meteor_score": score,
80
+ }
81
+
82
+ @overload # type: ignore
83
+ def __call__(self, *, ground_truth: str, response: str) -> Dict[str, float]:
75
84
  """
76
85
  Evaluate the METEOR score between the response and the ground truth.
77
86
 
@@ -82,9 +91,21 @@ class MeteorScoreEvaluator:
82
91
  :return: The METEOR score.
83
92
  :rtype: Dict[str, float]
84
93
  """
85
- return async_run_allowing_running_loop(
86
- self._async_evaluator, ground_truth=ground_truth, response=response, **kwargs
87
- )
88
94
 
89
- def _to_async(self):
90
- return self._async_evaluator
95
+ @override
96
+ def __call__( # pylint: disable=docstring-missing-param
97
+ self,
98
+ *args,
99
+ **kwargs,
100
+ ):
101
+ """
102
+ Evaluate the METEOR score between the response and the ground truth.
103
+
104
+ :keyword response: The response to be evaluated.
105
+ :paramtype response: str
106
+ :keyword ground_truth: The ground truth to be compared against.
107
+ :paramtype ground_truth: str
108
+ :return: The METEOR score.
109
+ :rtype: Dict[str, float]
110
+ """
111
+ return super().__call__(*args, **kwargs)